summaryrefslogtreecommitdiffstats
path: root/contrib/restricted/abseil-cpp-tstring
diff options
context:
space:
mode:
authorDevtools Arcadia <[email protected]>2022-02-07 18:08:42 +0300
committerDevtools Arcadia <[email protected]>2022-02-07 18:08:42 +0300
commit1110808a9d39d4b808aef724c861a2e1a38d2a69 (patch)
treee26c9fed0de5d9873cce7e00bc214573dc2195b7 /contrib/restricted/abseil-cpp-tstring
intermediate changes
ref:cde9a383711a11544ce7e107a78147fb96cc4029
Diffstat (limited to 'contrib/restricted/abseil-cpp-tstring')
-rw-r--r--contrib/restricted/abseil-cpp-tstring/ABSEIL_ISSUE_TEMPLATE.md22
-rw-r--r--contrib/restricted/abseil-cpp-tstring/AUTHORS6
-rw-r--r--contrib/restricted/abseil-cpp-tstring/CONTRIBUTING.md141
-rw-r--r--contrib/restricted/abseil-cpp-tstring/FAQ.md167
-rw-r--r--contrib/restricted/abseil-cpp-tstring/LICENSE203
-rw-r--r--contrib/restricted/abseil-cpp-tstring/README.md142
-rw-r--r--contrib/restricted/abseil-cpp-tstring/UPGRADES.md17
-rw-r--r--contrib/restricted/abseil-cpp-tstring/provides.pbtxt66
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/algorithm/.yandex_meta/licenses.list.txt16
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/algorithm/algorithm.h159
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/algorithm/container.h1774
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/algorithm/ya.make14
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/attributes.h735
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/call_once.h219
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/casts.h187
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/config.h767
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/const_init.h76
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/dynamic_annotations.h471
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/atomic_hook.h200
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/atomic_hook_test_helper.h34
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/cycleclock.cc107
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/cycleclock.h94
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/direct_mmap.h169
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/dynamic_annotations.h398
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/endian.h327
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/errno_saver.h43
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/exception_safety_testing.h1109
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/exception_testing.h42
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/fast_type_id.h48
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/hide_ptr.h51
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/identity.h37
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/inline_variable.h107
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/inline_variable_testing.h46
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/invoke.h187
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/low_level_alloc.cc620
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/low_level_alloc.h126
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/low_level_alloc/ya.make33
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/low_level_scheduling.h134
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/per_thread_tls.h52
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/pretty_function.h33
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging.cc242
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging.h195
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging/ya.make30
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/scheduling_mode.h58
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/scoped_set_env.cc81
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/scoped_set_env.h45
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock.cc229
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock.h248
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_akaros.inc35
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_linux.inc74
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_posix.inc46
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_wait.cc81
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_wait.h95
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_wait/ya.make26
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_win32.inc37
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/strerror.cc88
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/strerror.h39
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/sysinfo.cc508
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/sysinfo.h74
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/thread_annotations.h271
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/thread_identity.cc155
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/thread_identity.h265
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/throw_delegate.cc212
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/throw_delegate.h75
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/throw_delegate/ya.make31
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/tsan_mutex_interface.h68
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/unaligned_access.h82
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/unscaledcycleclock.cc154
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/unscaledcycleclock.h124
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/log_severity.cc27
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/log_severity.h121
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/log_severity/ya.make26
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/macros.h158
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/optimization.h244
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/options.h238
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/policy_checks.h111
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/port.h25
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/thread_annotations.h335
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/ya.make34
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/city/ya.make33
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/container/.yandex_meta/licenses.list.txt24
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/container/btree_map.h815
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/container/btree_set.h728
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/container/btree_test.h166
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/container/fixed_array.h527
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/container/flat_hash_map.h606
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/container/flat_hash_set.h504
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/container/inlined_vector.h855
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/absl_hashtablez_sampler/ya.make49
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/btree.h2641
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/btree_container.h683
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/common.h206
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/compressed_tuple.h290
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/container_memory.h460
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/counting_allocator.h114
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/hash_function_defaults.h163
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/hash_generator_testing.h182
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/hash_policy_testing.h184
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/hash_policy_traits.h208
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/hashtable_debug.h110
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/hashtable_debug_hooks.h85
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/hashtablez_sampler.cc190
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/hashtablez_sampler.h281
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/hashtablez_sampler_force_weak_definition.cc31
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/have_sse.h50
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/inlined_vector.h932
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/layout.h743
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/node_hash_policy.h92
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/raw_hash_map.h198
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/raw_hash_set.cc67
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/raw_hash_set.h2034
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/raw_hash_set/ya.make52
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/test_instance_tracker.h274
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/tracked.h83
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/unordered_map_constructor_test.h494
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/unordered_map_lookup_test.h117
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/unordered_map_members_test.h87
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/unordered_map_modifiers_test.h351
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/unordered_set_constructor_test.h496
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/unordered_set_lookup_test.h91
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/unordered_set_members_test.h86
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/unordered_set_modifiers_test.h221
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/container/node_hash_map.h597
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/container/node_hash_set.h493
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/container/ya.make14
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/.yandex_meta/licenses.list.txt24
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/failure_signal_handler.cc388
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/failure_signal_handler.h121
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/failure_signal_handler/ya.make43
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/.yandex_meta/licenses.list.txt52
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/address_is_readable.cc139
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/address_is_readable.h32
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/demangle.cc1959
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/demangle.h71
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/elf_mem_image.cc383
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/elf_mem_image.h138
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/examine_stack.cc203
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/examine_stack.h42
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stack_consumption.h50
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_aarch64-inl.inc199
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_arm-inl.inc134
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_config.h87
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_emscripten-inl.inc110
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_generic-inl.inc108
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_powerpc-inl.inc253
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_riscv-inl.inc234
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_unimplemented-inl.inc24
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_win32-inl.inc93
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_x86-inl.inc364
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/symbolize.h153
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/vdso_support.cc191
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/vdso_support.h158
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/ya.make40
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/leak_check.cc69
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/leak_check.h133
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/leak_check/ya.make26
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/leak_check_disable.cc20
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/leak_check_disable/ya.make26
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/stacktrace.cc142
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/stacktrace.h231
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/stacktrace/ya.make32
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/symbolize.cc38
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/symbolize.h99
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/symbolize/ya.make40
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/symbolize_darwin.inc101
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/symbolize_elf.inc1574
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/symbolize_emscripten.inc72
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/symbolize_unimplemented.inc40
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/symbolize_win32.inc81
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/ya.make31
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/demangle/ya.make33
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/functional/.yandex_meta/licenses.list.txt20
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/functional/bind_front.h184
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/functional/function_ref.h142
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/functional/internal/front_binder.h95
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/functional/internal/function_ref.h106
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/functional/ya.make14
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/hash/.yandex_meta/licenses.list.txt16
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/hash/hash.h347
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/hash/hash_testing.h378
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/hash/internal/.yandex_meta/licenses.list.txt34
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/hash/internal/city.cc349
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/hash/internal/city.h78
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/hash/internal/hash.cc69
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/hash/internal/hash.h1096
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/hash/internal/low_level_hash.cc123
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/hash/internal/low_level_hash.h50
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/hash/internal/spy_hash_state.h231
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/hash/internal/ya.make32
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/hash/ya.make41
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/memory/.yandex_meta/licenses.list.txt16
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/memory/memory.h698
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/memory/ya.make18
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/meta/.yandex_meta/licenses.list.txt16
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/meta/type_traits.h797
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/meta/ya.make18
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/numeric/.yandex_meta/licenses.list.txt38
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/numeric/bits.h177
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/numeric/int128.cc383
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/numeric/int128.h1165
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/numeric/int128_have_intrinsic.inc296
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/numeric/int128_no_intrinsic.inc311
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/numeric/internal/bits.h358
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/numeric/internal/representation.h55
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/numeric/ya.make24
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/profiling/internal/exponential_biased.cc93
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/profiling/internal/exponential_biased.h130
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/profiling/internal/exponential_biased/ya.make26
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/profiling/internal/periodic_sampler.cc53
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/profiling/internal/periodic_sampler.h211
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/profiling/internal/sample_recorder.h230
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/status/.yandex_meta/licenses.list.txt20
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/status/internal/status_internal.h69
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/status/internal/statusor_internal.h396
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/status/status.cc444
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/status/status.h882
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/status/status_payload_printer.cc38
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/status/status_payload_printer.h51
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/status/statusor.cc103
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/status/statusor.h770
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/status/statusor/ya.make57
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/status/ya.make54
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/.yandex_meta/licenses.list.txt46
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/ascii.cc200
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/ascii.h242
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/charconv.cc984
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/charconv.h120
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord.cc2047
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord.h1521
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord/ya.make58
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord_test_helpers.h122
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/cordz_test_helpers.h151
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/escaping.cc949
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/escaping.h164
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/absl_cord_internal/ya.make42
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/absl_strings_internal/ya.make35
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/char_map.h156
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/charconv_bigint.cc359
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/charconv_bigint.h423
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/charconv_parse.cc504
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/charconv_parse.h99
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_internal.cc89
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_internal.h620
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_btree.cc1128
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_btree.h939
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_btree_navigator.cc185
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_btree_navigator.h265
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_btree_reader.cc68
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_btree_reader.h211
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_consume.cc129
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_consume.h50
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_flat.h146
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_ring.cc771
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_ring.h607
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_ring_reader.h118
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_test_util.h220
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_functions.cc96
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_functions.h85
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_functions/ya.make32
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_handle.cc139
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_handle.h131
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_handle/ya.make47
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_info.cc445
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_info.h298
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_info/ya.make51
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_sample_token.cc64
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_sample_token.h97
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_sample_token/ya.make52
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_statistics.h87
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_update_scope.h71
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_update_tracker.h121
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/escaping.cc180
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/escaping.h58
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/escaping_test_common.h133
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/memutil.cc112
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/memutil.h148
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/numbers_test_common.h184
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/ostringstream.cc36
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/ostringstream.h89
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/pow10_helper.h40
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/resize_uninitialized.h119
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/stl_type_traits.h248
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/.yandex_meta/licenses.list.txt20
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/arg.cc488
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/arg.h528
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/bind.cc258
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/bind.h217
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/checker.h333
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/extension.cc75
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/extension.h445
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/float_conversion.cc1423
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/float_conversion.h37
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/output.cc72
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/output.h96
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/parser.cc339
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/parser.h357
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/ya.make40
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_join_internal.h314
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_split_internal.h430
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/string_constant.h64
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/utf8.cc53
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/utf8.h50
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/match.cc43
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/match.h100
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/numbers.cc1093
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/numbers.h308
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/str_cat.cc246
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/str_cat.h411
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/str_format.h812
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/str_join.h293
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/str_replace.cc82
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/str_replace.h219
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/str_split.cc139
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/str_split.h548
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/string_view.cc230
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/string_view.h712
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/strip.h91
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/substitute.cc172
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/substitute.h723
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/ya.make46
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/.yandex_meta/licenses.list.txt16
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/barrier.cc52
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/barrier.h79
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/blocking_counter.cc67
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/blocking_counter.h101
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/.yandex_meta/licenses.list.txt34
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/create_thread_identity.cc140
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/create_thread_identity.h60
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/futex.h154
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/graphcycles.cc698
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/graphcycles.h141
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/kernel_timeout.h156
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/per_thread_sem.cc106
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/per_thread_sem.h115
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/thread_pool.h93
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/waiter.cc428
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/waiter.h155
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/ya.make32
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/mutex.cc2751
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/mutex.h1082
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/notification.cc78
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/notification.h123
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/ya.make50
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/time/.yandex_meta/licenses.list.txt42
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/time/civil_time.cc173
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/time/civil_time.h538
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/time/civil_time/ya.make26
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/time/clock.cc585
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/time/clock.h74
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/time/duration.cc954
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/time/format.cc160
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/include/cctz/civil_time.h332
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/include/cctz/civil_time_detail.h628
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/include/cctz/time_zone.h459
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/include/cctz/zone_info_source.h102
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/civil_time_detail.cc94
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_fixed.cc140
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_fixed.h52
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_format.cc1029
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_if.cc45
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_if.h77
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_impl.cc113
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_impl.h93
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_info.cc1027
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_info.h137
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_libc.cc315
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_libc.h55
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_lookup.cc236
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_posix.cc159
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_posix.h132
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/tzfile.h122
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/zone_info_source.cc115
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/get_current_time_chrono.inc31
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/get_current_time_posix.inc24
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/test_util.h33
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/zoneinfo.inc729
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/time/time.cc500
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/time/time.h1616
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/time/time_zone/ya.make38
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/time/ya.make44
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/types/.yandex_meta/licenses.list.txt20
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/types/any.h528
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/types/bad_any_cast.cc46
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/types/bad_any_cast.h75
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/types/bad_any_cast/ya.make31
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/types/bad_optional_access.cc48
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/types/bad_optional_access.h78
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/types/bad_optional_access/ya.make31
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/types/bad_variant_access.cc64
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/types/bad_variant_access.h82
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/types/bad_variant_access/ya.make31
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/types/compare.h600
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/.yandex_meta/licenses.list.txt24
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/conformance_aliases.h447
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/conformance_archetype.h978
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/conformance_profile.h931
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/conformance_testing.h1386
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/conformance_testing_helpers.h391
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/optional.h396
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/parentheses.h34
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/span.h128
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/transform_args.h246
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/variant.h1646
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/ya.make14
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/types/optional.h776
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/types/span.h726
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/types/variant.h866
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/types/ya.make14
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/utility/.yandex_meta/licenses.list.txt16
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/utility/utility.h350
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/utility/ya.make14
-rw-r--r--contrib/restricted/abseil-cpp-tstring/ya.make172
412 files changed, 109623 insertions, 0 deletions
diff --git a/contrib/restricted/abseil-cpp-tstring/ABSEIL_ISSUE_TEMPLATE.md b/contrib/restricted/abseil-cpp-tstring/ABSEIL_ISSUE_TEMPLATE.md
new file mode 100644
index 00000000000..ed5461f166c
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/ABSEIL_ISSUE_TEMPLATE.md
@@ -0,0 +1,22 @@
+Please submit a new Abseil Issue using the template below:
+
+## [Short title of proposed API change(s)]
+
+--------------------------------------------------------------------------------
+--------------------------------------------------------------------------------
+
+## Background
+
+[Provide the background information that is required in order to evaluate the
+proposed API changes. No controversial claims should be made here. If there are
+design constraints that need to be considered, they should be presented here
+**along with justification for those constraints**. Linking to other docs is
+good, but please keep the **pertinent information as self contained** as
+possible in this section.]
+
+## Proposed API Change (s)
+
+[Please clearly describe the API change(s) being proposed. If multiple changes,
+please keep them clearly distinguished. When possible, **use example code
+snippets to illustrate before-after API usages**. List pros-n-cons. Highlight
+the main questions that you want to be answered. Given the Abseil project compatibility requirements, describe why the API change is safe.]
diff --git a/contrib/restricted/abseil-cpp-tstring/AUTHORS b/contrib/restricted/abseil-cpp-tstring/AUTHORS
new file mode 100644
index 00000000000..976d31defc2
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/AUTHORS
@@ -0,0 +1,6 @@
+# This is the list of Abseil authors for copyright purposes.
+#
+# This does not necessarily list everyone who has contributed code, since in
+# some cases, their employer may be the copyright holder. To see the full list
+# of contributors, see the revision history in source control.
+Google Inc.
diff --git a/contrib/restricted/abseil-cpp-tstring/CONTRIBUTING.md b/contrib/restricted/abseil-cpp-tstring/CONTRIBUTING.md
new file mode 100644
index 00000000000..a252cfac310
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/CONTRIBUTING.md
@@ -0,0 +1,141 @@
+# How to Contribute to Abseil
+
+We'd love to accept your patches and contributions to this project. There are
+just a few small guidelines you need to follow.
+
+NOTE: If you are new to GitHub, please start by reading [Pull Request
+howto](https://help.github.com/articles/about-pull-requests/)
+
+## Contributor License Agreement
+
+Contributions to this project must be accompanied by a Contributor License
+Agreement. You (or your employer) retain the copyright to your contribution,
+this simply gives us permission to use and redistribute your contributions as
+part of the project. Head over to <https://cla.developers.google.com/> to see
+your current agreements on file or to sign a new one.
+
+You generally only need to submit a CLA once, so if you've already submitted one
+(even if it was for a different project), you probably don't need to do it
+again.
+
+## Contribution Guidelines
+
+Potential contributors sometimes ask us if the Abseil project is the appropriate
+home for their utility library code or for specific functions implementing
+missing portions of the standard. Often, the answer to this question is "no".
+We’d like to articulate our thinking on this issue so that our choices can be
+understood by everyone and so that contributors can have a better intuition
+about whether Abseil might be interested in adopting a new library.
+
+### Priorities
+
+Although our mission is to augment the C++ standard library, our goal is not to
+provide a full forward-compatible implementation of the latest standard. For us
+to consider a library for inclusion in Abseil, it is not enough that a library
+is useful. We generally choose to release a library when it meets at least one
+of the following criteria:
+
+* **Widespread usage** - Using our internal codebase to help gauge usage, most
+ of the libraries we've released have tens of thousands of users.
+* **Anticipated widespread usage** - Pre-adoption of some standard-compliant
+ APIs may not have broad adoption initially but can be expected to pick up
+ usage when it replaces legacy APIs. `y_absl::from_chars`, for example,
+ replaces existing code that converts strings to numbers and will therefore
+ likely see usage growth.
+* **High impact** - APIs that provide a key solution to a specific problem,
+ such as `y_absl::FixedArray`, have higher impact than usage numbers may signal
+ and are released because of their importance.
+* **Direct support for a library that falls under one of the above** - When we
+ want access to a smaller library as an implementation detail for a
+ higher-priority library we plan to release, we may release it, as we did
+ with portions of `y_absl/meta/type_traits.h`. One consequence of this is that
+ the presence of a library in Abseil does not necessarily mean that other
+ similar libraries would be a high priority.
+
+### API Freeze Consequences
+
+Via the
+[Abseil Compatibility Guidelines](https://abseil.io/about/compatibility), we
+have promised a large degree of API stability. In particular, we will not make
+backward-incompatible changes to released APIs without also shipping a tool or
+process that can upgrade our users' code. We are not yet at the point of easily
+releasing such tools. Therefore, at this time, shipping a library establishes an
+API contract which is borderline unchangeable. (We can add new functionality,
+but we cannot easily change existing behavior.) This constraint forces us to
+very carefully review all APIs that we ship.
+
+
+## Coding Style
+
+To keep the source consistent, readable, diffable and easy to merge, we use a
+fairly rigid coding style, as defined by the
+[google-styleguide](https://github.com/google/styleguide) project. All patches
+will be expected to conform to the style outlined
+[here](https://google.github.io/styleguide/cppguide.html).
+
+## Guidelines for Pull Requests
+
+* If you are a Googler, it is preferable to first create an internal CL and
+ have it reviewed and submitted. The code propagation process will deliver
+ the change to GitHub.
+
+* Create **small PRs** that are narrowly focused on **addressing a single
+ concern**. We often receive PRs that are trying to fix several things at a
+ time, but if only one fix is considered acceptable, nothing gets merged and
+ both author's & review's time is wasted. Create more PRs to address
+ different concerns and everyone will be happy.
+
+* For speculative changes, consider opening an [Abseil
+ issue](https://github.com/abseil/abseil-cpp/issues) and discussing it first.
+ If you are suggesting a behavioral or API change, consider starting with an
+ [Abseil proposal template](ABSEIL_ISSUE_TEMPLATE.md).
+
+* Provide a good **PR description** as a record of **what** change is being
+ made and **why** it was made. Link to a GitHub issue if it exists.
+
+* Don't fix code style and formatting unless you are already changing that
+ line to address an issue. Formatting of modified lines may be done using
+ `git clang-format`. PRs with irrelevant changes won't be merged. If
+ you do want to fix formatting or style, do that in a separate PR.
+
+* Unless your PR is trivial, you should expect there will be reviewer comments
+ that you'll need to address before merging. We expect you to be reasonably
+ responsive to those comments, otherwise the PR will be closed after 2-3
+ weeks of inactivity.
+
+* Maintain **clean commit history** and use **meaningful commit messages**.
+ PRs with messy commit history are difficult to review and won't be merged.
+ Use `rebase -i upstream/master` to curate your commit history and/or to
+ bring in latest changes from master (but avoid rebasing in the middle of a
+ code review).
+
+* Keep your PR up to date with upstream/master (if there are merge conflicts,
+ we can't really merge your change).
+
+* **All tests need to be passing** before your change can be merged. We
+ recommend you **run tests locally** (see below)
+
+* Exceptions to the rules can be made if there's a compelling reason for doing
+ so. That is - the rules are here to serve us, not the other way around, and
+ the rules need to be serving their intended purpose to be valuable.
+
+* All submissions, including submissions by project members, require review.
+
+## Running Tests
+
+If you have [Bazel](https://bazel.build/) installed, use `bazel test
+--test_tag_filters="-benchmark" ...` to run the unit tests.
+
+If you are running the Linux operating system and have
+[Docker](https://www.docker.com/) installed, you can also run the `linux_*.sh`
+scripts under the `ci/`(https://github.com/abseil/abseil-cpp/tree/master/ci)
+directory to test Abseil under a variety of conditions.
+
+## Abseil Committers
+
+The current members of the Abseil engineering team are the only committers at
+present.
+
+## Release Process
+
+Abseil lives at head, where latest-and-greatest code can be found.
diff --git a/contrib/restricted/abseil-cpp-tstring/FAQ.md b/contrib/restricted/abseil-cpp-tstring/FAQ.md
new file mode 100644
index 00000000000..fc2192a89eb
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/FAQ.md
@@ -0,0 +1,167 @@
+# Abseil FAQ
+
+## Is Abseil the right home for my utility library?
+
+Most often the answer to the question is "no." As both the [About
+Abseil](https://abseil.io/about/) page and our [contributing
+guidelines](https://github.com/abseil/abseil-cpp/blob/master/CONTRIBUTING.md#contribution-guidelines)
+explain, Abseil contains a variety of core C++ library code that is widely used
+at [Google](https://www.google.com/). As such, Abseil's primary purpose is to be
+used as a dependency by Google's open source C++ projects. While we do hope that
+Abseil is also useful to the C++ community at large, this added constraint also
+means that we are unlikely to accept a contribution of utility code that isn't
+already widely used by Google.
+
+## How to I set the C++ dialect used to build Abseil?
+
+The short answer is that whatever mechanism you choose, you need to make sure
+that you set this option consistently at the global level for your entire
+project. If, for example, you want to set the C++ dialect to C++17, with
+[Bazel](https://bazel/build/) as the build system and `gcc` or `clang` as the
+compiler, there several ways to do this:
+* Pass `--cxxopt=-std=c++17` on the command line (for example, `bazel build
+ --cxxopt=-std=c++17 ...`)
+* Set the environment variable `BAZEL_CXXOPTS` (for example,
+ `BAZEL_CXXOPTS=-std=c++17`)
+* Add `build --cxxopt=-std=c++17` to your [`.bazelrc`
+ file](https://docs.bazel.build/versions/master/guide.html#bazelrc)
+
+If you are using CMake as the build system, you'll need to add a line like
+`set(CMAKE_CXX_STANDARD 17)` to your top level `CMakeLists.txt` file. If you
+are developing a library designed to be used by other clients, you should
+instead leave `CMAKE_CXX_STANDARD` unset and configure the minimum C++ standard
+required by each of your library targets via `target_compile_features`. See the
+[CMake build
+instructions](https://github.com/abseil/abseil-cpp/blob/master/CMake/README.md)
+for more information.
+
+For a longer answer to this question and to understand why some other approaches
+don't work, see the answer to ["What is ABI and why don't you recommend using a
+pre-compiled version of
+Abseil?"](#what-is-abi-and-why-dont-you-recommend-using-a-pre-compiled-version-of-abseil)
+
+## What is ABI and why don't you recommend using a pre-compiled version of Abseil?
+
+For the purposes of this discussion, you can think of
+[ABI](https://en.wikipedia.org/wiki/Application_binary_interface) as the
+compiled representation of the interfaces in code. This is in contrast to
+[API](https://en.wikipedia.org/wiki/Application_programming_interface), which
+you can think of as the interfaces as defined by the code itself. [Abseil has a
+strong promise of API compatibility, but does not make any promise of ABI
+compatibility](https://abseil.io/about/compatibility). Let's take a look at what
+this means in practice.
+
+You might be tempted to do something like this in a
+[Bazel](https://bazel.build/) `BUILD` file:
+
+```
+# DON'T DO THIS!!!
+cc_library(
+ name = "my_library",
+ srcs = ["my_library.cc"],
+ copts = ["-std=c++17"], # May create a mixed-mode compile!
+ deps = ["@com_google_absl//y_absl/strings"],
+)
+```
+
+Applying `-std=c++17` to an individual target in your `BUILD` file is going to
+compile that specific target in C++17 mode, but it isn't going to ensure the
+Abseil library is built in C++17 mode, since the Abseil library itself is a
+different build target. If your code includes an Abseil header, then your
+program may contain conflicting definitions of the same
+class/function/variable/enum, etc. As a rule, all compile options that affect
+the ABI of a program need to be applied to the entire build on a global basis.
+
+C++ has something called the [One Definition
+Rule](https://en.wikipedia.org/wiki/One_Definition_Rule) (ODR). C++ doesn't
+allow multiple definitions of the same class/function/variable/enum, etc. ODR
+violations sometimes result in linker errors, but linkers do not always catch
+violations. Uncaught ODR violations can result in strange runtime behaviors or
+crashes that can be hard to debug.
+
+If you build the Abseil library and your code using different compile options
+that affect ABI, there is a good chance you will run afoul of the One Definition
+Rule. Examples of GCC compile options that affect ABI include (but aren't
+limited to) language dialect (e.g. `-std=`), optimization level (e.g. `-O2`),
+code generation flags (e.g. `-fexceptions`), and preprocessor defines
+(e.g. `-DNDEBUG`).
+
+If you use a pre-compiled version of Abseil, (for example, from your Linux
+distribution package manager or from something like
+[vcpkg](https://github.com/microsoft/vcpkg)) you have to be very careful to
+ensure ABI compatibility across the components of your program. The only way you
+can be sure your program is going to be correct regarding ABI is to ensure
+you've used the exact same compile options as were used to build the
+pre-compiled library. This does not mean that Abseil cannot work as part of a
+Linux distribution since a knowledgeable binary packager will have ensured that
+all packages have been built with consistent compile options. This is one of the
+reasons we warn against - though do not outright reject - using Abseil as a
+pre-compiled library.
+
+Another possible way that you might afoul of ABI issues is if you accidentally
+include two versions of Abseil in your program. Multiple versions of Abseil can
+end up within the same binary if your program uses the Abseil library and
+another library also transitively depends on Abseil (resulting in what is
+sometimes called the diamond dependency problem). In cases such as this you must
+structure your build so that all libraries use the same version of Abseil.
+[Abseil's strong promise of API compatibility between
+releases](https://abseil.io/about/compatibility) means the latest "HEAD" release
+of Abseil is almost certainly the right choice if you are doing as we recommend
+and building all of your code from source.
+
+For these reasons we recommend you avoid pre-compiled code and build the Abseil
+library yourself in a consistent manner with the rest of your code.
+
+## What is "live at head" and how do I do it?
+
+From Abseil's point-of-view, "live at head" means that every Abseil source
+release (which happens on an almost daily basis) is either API compatible with
+the previous release, or comes with an automated tool that you can run over code
+to make it compatible. In practice, the need to use an automated tool is
+extremely rare. This means that upgrading from one source release to another
+should be a routine practice that can and should be performed often.
+
+We recommend you update to the [latest commit in the `master` branch of
+Abseil](https://github.com/abseil/abseil-cpp/commits/master) as often as
+possible. Not only will you pick up bug fixes more quickly, but if you have good
+automated testing, you will catch and be able to fix any [Hyrum's
+Law](https://www.hyrumslaw.com/) dependency problems on an incremental basis
+instead of being overwhelmed by them and having difficulty isolating them if you
+wait longer between updates.
+
+If you are using the [Bazel](https://bazel.build/) build system and its
+[external dependencies](https://docs.bazel.build/versions/master/external.html)
+feature, updating the
+[`http_archive`](https://docs.bazel.build/versions/master/repo/http.html#http_archive)
+rule in your
+[`WORKSPACE`](https://docs.bazel.build/versions/master/be/workspace.html) for
+`com_google_abseil` to point to the [latest commit in the `master` branch of
+Abseil](https://github.com/abseil/abseil-cpp/commits/master) is all you need to
+do. For example, on February 11, 2020, the latest commit to the master branch
+was `98eb410c93ad059f9bba1bf43f5bb916fc92a5ea`. To update to this commit, you
+would add the following snippet to your `WORKSPACE` file:
+
+```
+http_archive(
+ name = "com_google_absl",
+ urls = ["https://github.com/abseil/abseil-cpp/archive/98eb410c93ad059f9bba1bf43f5bb916fc92a5ea.zip"], # 2020-02-11T18:50:53Z
+ strip_prefix = "abseil-cpp-98eb410c93ad059f9bba1bf43f5bb916fc92a5ea",
+ sha256 = "aabf6c57e3834f8dc3873a927f37eaf69975d4b28117fc7427dfb1c661542a87",
+)
+```
+
+To get the `sha256` of this URL, run `curl -sL --output -
+https://github.com/abseil/abseil-cpp/archive/98eb410c93ad059f9bba1bf43f5bb916fc92a5ea.zip
+| sha256sum -`.
+
+You can commit the updated `WORKSPACE` file to your source control every time
+you update, and if you have good automated testing, you might even consider
+automating this.
+
+One thing we don't recommend is using GitHub's `master.zip` files (for example
+[https://github.com/abseil/abseil-cpp/archive/master.zip](https://github.com/abseil/abseil-cpp/archive/master.zip)),
+which are always the latest commit in the `master` branch, to implement live at
+head. Since these `master.zip` URLs are not versioned, you will lose build
+reproducibility. In addition, some build systems, including Bazel, will simply
+cache this file, which means you won't actually be updating to the latest
+release until your cache is cleared or invalidated.
diff --git a/contrib/restricted/abseil-cpp-tstring/LICENSE b/contrib/restricted/abseil-cpp-tstring/LICENSE
new file mode 100644
index 00000000000..ccd61dcfe3d
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/LICENSE
@@ -0,0 +1,203 @@
+
+ Apache License
+ Version 2.0, January 2004
+ https://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ https://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
diff --git a/contrib/restricted/abseil-cpp-tstring/README.md b/contrib/restricted/abseil-cpp-tstring/README.md
new file mode 100644
index 00000000000..a63901a4224
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/README.md
@@ -0,0 +1,142 @@
+# Abseil - C++ Common Libraries
+
+The repository contains the Abseil C++ library code. Abseil is an open-source
+collection of C++ code (compliant to C++11) designed to augment the C++
+standard library.
+
+## Table of Contents
+
+- [About Abseil](#about)
+- [Quickstart](#quickstart)
+- [Building Abseil](#build)
+- [Support](#support)
+- [Codemap](#codemap)
+- [Releases](#releases)
+- [License](#license)
+- [Links](#links)
+
+<a name="about"></a>
+## About Abseil
+
+Abseil is an open-source collection of C++ library code designed to augment
+the C++ standard library. The Abseil library code is collected from Google's
+own C++ code base, has been extensively tested and used in production, and
+is the same code we depend on in our daily coding lives.
+
+In some cases, Abseil provides pieces missing from the C++ standard; in
+others, Abseil provides alternatives to the standard for special needs
+we've found through usage in the Google code base. We denote those cases
+clearly within the library code we provide you.
+
+Abseil is not meant to be a competitor to the standard library; we've
+just found that many of these utilities serve a purpose within our code
+base, and we now want to provide those resources to the C++ community as
+a whole.
+
+<a name="quickstart"></a>
+## Quickstart
+
+If you want to just get started, make sure you at least run through the
+[Abseil Quickstart](https://abseil.io/docs/cpp/quickstart). The Quickstart
+contains information about setting up your development environment, downloading
+the Abseil code, running tests, and getting a simple binary working.
+
+<a name="build"></a>
+## Building Abseil
+
+[Bazel](https://bazel.build) and [CMake](https://cmake.org/) are the official
+build systems for Abseil.
+
+See the [quickstart](https://abseil.io/docs/cpp/quickstart) for more information
+on building Abseil using the Bazel build system.
+
+If you require CMake support, please check the [CMake build
+instructions](CMake/README.md) and [CMake
+Quickstart](https://abseil.io/docs/cpp/quickstart-cmake).
+
+## Support
+
+Abseil is officially supported on many platforms. See the [Abseil
+platform support
+guide](https://abseil.io/docs/cpp/platforms/platforms) for details on
+supported operating systems, compilers, CPUs, etc.
+
+## Codemap
+
+Abseil contains the following C++ library components:
+
+* [`base`](y_absl/base/) Abseil Fundamentals
+ <br /> The `base` library contains initialization code and other code which
+ all other Abseil code depends on. Code within `base` may not depend on any
+ other code (other than the C++ standard library).
+* [`algorithm`](y_absl/algorithm/)
+ <br /> The `algorithm` library contains additions to the C++ `<algorithm>`
+ library and container-based versions of such algorithms.
+* [`cleanup`](y_absl/cleanup/)
+ <br /> The `cleanup` library contains the control-flow-construct-like type
+ `y_absl::Cleanup` which is used for executing a callback on scope exit.
+* [`container`](y_absl/container/)
+ <br /> The `container` library contains additional STL-style containers,
+ including Abseil's unordered "Swiss table" containers.
+* [`debugging`](y_absl/debugging/)
+ <br /> The `debugging` library contains code useful for enabling leak
+ checks, and stacktrace and symbolization utilities.
+* [`hash`](y_absl/hash/)
+ <br /> The `hash` library contains the hashing framework and default hash
+ functor implementations for hashable types in Abseil.
+* [`memory`](y_absl/memory/)
+ <br /> The `memory` library contains C++11-compatible versions of
+ `std::make_unique()` and related memory management facilities.
+* [`meta`](y_absl/meta/)
+ <br /> The `meta` library contains C++11-compatible versions of type checks
+ available within C++14 and C++17 versions of the C++ `<type_traits>` library.
+* [`numeric`](y_absl/numeric/)
+ <br /> The `numeric` library contains C++11-compatible 128-bit integers.
+* [`profiling`](y_absl/profiling/)
+ <br /> The `profiling` library contains utility code for profiling C++
+ entities. It is currently a private dependency of other Abseil libraries.
+* [`status`](y_absl/status/)
+ <br /> The `status` contains abstractions for error handling, specifically
+ `y_absl::Status` and `y_absl::StatusOr<T>`.
+* [`strings`](y_absl/strings/)
+ <br /> The `strings` library contains a variety of strings routines and
+ utilities, including a C++11-compatible version of the C++17
+ `std::string_view` type.
+* [`synchronization`](y_absl/synchronization/)
+ <br /> The `synchronization` library contains concurrency primitives (Abseil's
+ `y_absl::Mutex` class, an alternative to `std::mutex`) and a variety of
+ synchronization abstractions.
+* [`time`](y_absl/time/)
+ <br /> The `time` library contains abstractions for computing with absolute
+ points in time, durations of time, and formatting and parsing time within
+ time zones.
+* [`types`](y_absl/types/)
+ <br /> The `types` library contains non-container utility types, like a
+ C++11-compatible version of the C++17 `std::optional` type.
+* [`utility`](y_absl/utility/)
+ <br /> The `utility` library contains utility and helper code.
+
+## Releases
+
+Abseil recommends users "live-at-head" (update to the latest commit from the
+master branch as often as possible). However, we realize this philosophy doesn't
+work for every project, so we also provide [Long Term Support
+Releases](https://github.com/abseil/abseil-cpp/releases) to which we backport
+fixes for severe bugs. See our [release
+management](https://abseil.io/about/releases) document for more details.
+
+## License
+
+The Abseil C++ library is licensed under the terms of the Apache
+license. See [LICENSE](LICENSE) for more information.
+
+## Links
+
+For more information about Abseil:
+
+* Consult our [Abseil Introduction](https://abseil.io/about/intro)
+* Read [Why Adopt Abseil](https://abseil.io/about/philosophy) to understand our
+ design philosophy.
+* Peruse our
+ [Abseil Compatibility Guarantees](https://abseil.io/about/compatibility) to
+ understand both what we promise to you, and what we expect of you in return.
diff --git a/contrib/restricted/abseil-cpp-tstring/UPGRADES.md b/contrib/restricted/abseil-cpp-tstring/UPGRADES.md
new file mode 100644
index 00000000000..35599d0878d
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/UPGRADES.md
@@ -0,0 +1,17 @@
+# C++ Upgrade Tools
+
+Abseil may occassionally release API-breaking changes. As noted in our
+[Compatibility Guidelines][compatibility-guide], we will aim to provide a tool
+to do the work of effecting such API-breaking changes, when absolutely
+necessary.
+
+These tools will be listed on the [C++ Upgrade Tools][upgrade-tools] guide on
+https://abseil.io.
+
+For more information, the [C++ Automated Upgrade Guide][api-upgrades-guide]
+outlines this process.
+
+[compatibility-guide]: https://abseil.io/about/compatibility
+[api-upgrades-guide]: https://abseil.io/docs/cpp/tools/api-upgrades
+[upgrade-tools]: https://abseil.io/docs/cpp/tools/upgrades/
+
diff --git a/contrib/restricted/abseil-cpp-tstring/provides.pbtxt b/contrib/restricted/abseil-cpp-tstring/provides.pbtxt
new file mode 100644
index 00000000000..34db641a554
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/provides.pbtxt
@@ -0,0 +1,66 @@
+p { i: "abseil-cpp" x: "absl_bad_any_cast_impl" ix: true peerdir: "contrib/restricted/abseil-cpp-tstring/y_absl/types/bad_any_cast" }
+p { i: "abseil-cpp" x: "absl_bad_optional_access" ix: true peerdir: "contrib/restricted/abseil-cpp-tstring/y_absl/types/bad_optional_access" }
+p { i: "abseil-cpp" x: "absl_bad_variant_access" ix: true peerdir: "contrib/restricted/abseil-cpp-tstring/y_absl/types/bad_variant_access" }
+p { i: "abseil-cpp" x: "absl_base" ix: true peerdir: "contrib/restricted/abseil-cpp-tstring/y_absl/base" }
+p { i: "abseil-cpp" x: "absl_city" ix: true peerdir: "contrib/restricted/abseil-cpp-tstring/y_absl/city" }
+p { i: "abseil-cpp" x: "absl_civil_time" ix: true peerdir: "contrib/restricted/abseil-cpp-tstring/y_absl/time/civil_time" }
+p { i: "abseil-cpp" x: "absl_cord" ix: true peerdir: "contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord" }
+p { i: "abseil-cpp" x: "absl_cord_internal" ix: true peerdir: "contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/absl_cord_internal" }
+p { i: "abseil-cpp" x: "absl_cordz_functions" ix: true peerdir: "contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_functions" }
+p { i: "abseil-cpp" x: "absl_cordz_handle" ix: true peerdir: "contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_handle" }
+p { i: "abseil-cpp" x: "absl_cordz_info" ix: true peerdir: "contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_info" }
+p { i: "abseil-cpp" x: "absl_cordz_sample_token" ix: true peerdir: "contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_sample_token" }
+p { i: "abseil-cpp" x: "absl_debugging_internal" ix: true peerdir: "contrib/restricted/abseil-cpp-tstring/y_absl/debugging" }
+p { i: "abseil-cpp" x: "absl_demangle_internal" ix: true peerdir: "contrib/restricted/abseil-cpp-tstring/y_absl/demangle" }
+p { i: "abseil-cpp" x: "absl_examine_stack" ix: true peerdir: "contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal" }
+p { i: "abseil-cpp" x: "absl_exponential_biased" ix: true peerdir: "contrib/restricted/abseil-cpp-tstring/y_absl/profiling/internal/exponential_biased" }
+p { i: "abseil-cpp" x: "absl_failure_signal_handler" ix: true peerdir: "contrib/restricted/abseil-cpp-tstring/y_absl/debugging/failure_signal_handler" }
+p { i: "abseil-cpp" x: "absl_flags" ix: true peerdir: "contrib/restricted/abseil-cpp-tstring/y_absl/flags" }
+p { i: "abseil-cpp" x: "absl_flags_commandlineflag" ix: true peerdir: "contrib/restricted/abseil-cpp-tstring/y_absl/flags/commandlineflag" }
+p { i: "abseil-cpp" x: "absl_flags_commandlineflag_internal" ix: true peerdir: "contrib/restricted/abseil-cpp-tstring/y_absl/flags/internal/commandlineflag" }
+p { i: "abseil-cpp" x: "absl_flags_config" ix: true peerdir: "contrib/restricted/abseil-cpp-tstring/y_absl/flags/usage_config" }
+p { i: "abseil-cpp" x: "absl_flags_internal" ix: true peerdir: "contrib/restricted/abseil-cpp-tstring/y_absl/flags/internal/flag" }
+p { i: "abseil-cpp" x: "absl_flags_marshalling" ix: true peerdir: "contrib/restricted/abseil-cpp-tstring/y_absl/flags/marshalling" }
+p { i: "abseil-cpp" x: "absl_flags_parse" ix: true peerdir: "contrib/restricted/abseil-cpp-tstring/y_absl/flags/parse" }
+p { i: "abseil-cpp" x: "absl_flags_private_handle_accessor" ix: true peerdir: "contrib/restricted/abseil-cpp-tstring/y_absl/flags/internal/private_handle_accessor" }
+p { i: "abseil-cpp" x: "absl_flags_program_name" ix: true peerdir: "contrib/restricted/abseil-cpp-tstring/y_absl/flags/internal/program_name" }
+p { i: "abseil-cpp" x: "absl_flags_reflection" ix: true peerdir: "contrib/restricted/abseil-cpp-tstring/y_absl/flags/reflection" }
+p { i: "abseil-cpp" x: "absl_flags_usage" ix: true peerdir: "contrib/restricted/abseil-cpp-tstring/y_absl/flags/usage" }
+p { i: "abseil-cpp" x: "absl_flags_usage_internal" ix: true peerdir: "contrib/restricted/abseil-cpp-tstring/y_absl/flags/internal/usage" }
+p { i: "abseil-cpp" x: "absl_graphcycles_internal" ix: true peerdir: "contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal" }
+p { i: "abseil-cpp" x: "absl_hash" ix: true peerdir: "contrib/restricted/abseil-cpp-tstring/y_absl/hash" }
+p { i: "abseil-cpp" x: "absl_hashtablez_sampler" ix: true peerdir: "contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/absl_hashtablez_sampler" }
+p { i: "abseil-cpp" x: "absl_int128" ix: true peerdir: "contrib/restricted/abseil-cpp-tstring/y_absl/numeric" }
+p { i: "abseil-cpp" x: "absl_leak_check" ix: true peerdir: "contrib/restricted/abseil-cpp-tstring/y_absl/debugging/leak_check" }
+p { i: "abseil-cpp" x: "absl_leak_check_disable" ix: true peerdir: "contrib/restricted/abseil-cpp-tstring/y_absl/debugging/leak_check_disable" }
+p { i: "abseil-cpp" x: "absl_log_severity" ix: true peerdir: "contrib/restricted/abseil-cpp-tstring/y_absl/base/log_severity" }
+p { i: "abseil-cpp" x: "absl_low_level_hash" ix: true peerdir: "contrib/restricted/abseil-cpp-tstring/y_absl/hash/internal" }
+p { i: "abseil-cpp" x: "absl_malloc_internal" ix: true peerdir: "contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/low_level_alloc" }
+p { i: "abseil-cpp" x: "absl_periodic_sampler" ix: true peerdir: "contrib/restricted/abseil-cpp-tstring/y_absl/profiling/internal/periodic_sampler" }
+p { i: "abseil-cpp" x: "absl_random_distributions" ix: true peerdir: "contrib/restricted/abseil-cpp-tstring/y_absl/random/absl_random_distributions" }
+p { i: "abseil-cpp" x: "absl_random_internal_distribution_test_util" ix: true peerdir: "contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/absl_random_internal_distribution_test_util" }
+p { i: "abseil-cpp" x: "absl_random_internal_platform" ix: true peerdir: "contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/randen_round_keys" }
+p { i: "abseil-cpp" x: "absl_random_internal_pool_urbg" ix: true peerdir: "contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/pool_urbg" }
+p { i: "abseil-cpp" x: "absl_random_internal_randen" ix: true peerdir: "contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/randen" }
+p { i: "abseil-cpp" x: "absl_random_internal_randen_hwaes" ix: true peerdir: "contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/randen_detect" }
+p { i: "abseil-cpp" x: "absl_random_internal_randen_hwaes_impl" ix: true peerdir: "contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/randen_hwaes" }
+p { i: "abseil-cpp" x: "absl_random_internal_randen_slow" ix: true peerdir: "contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/randen_slow" }
+p { i: "abseil-cpp" x: "absl_random_internal_seed_material" ix: true peerdir: "contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/seed_material" }
+p { i: "abseil-cpp" x: "absl_random_seed_gen_exception" ix: true peerdir: "contrib/restricted/abseil-cpp-tstring/y_absl/random/seed_gen_exception" }
+p { i: "abseil-cpp" x: "absl_random_seed_sequences" ix: true peerdir: "contrib/restricted/abseil-cpp-tstring/y_absl/random/seed_sequences" }
+p { i: "abseil-cpp" x: "absl_raw_hash_set" ix: true peerdir: "contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/raw_hash_set" }
+p { i: "abseil-cpp" x: "absl_raw_logging_internal" ix: true peerdir: "contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging" }
+p { i: "abseil-cpp" x: "absl_scoped_set_env" ix: true peerdir: "contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/scoped_set_env" }
+p { i: "abseil-cpp" x: "absl_spinlock_wait" ix: true peerdir: "contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_wait" }
+p { i: "abseil-cpp" x: "absl_stacktrace" ix: true peerdir: "contrib/restricted/abseil-cpp-tstring/y_absl/debugging/stacktrace" }
+p { i: "abseil-cpp" x: "absl_status" ix: true peerdir: "contrib/restricted/abseil-cpp-tstring/y_absl/status" }
+p { i: "abseil-cpp" x: "absl_statusor" ix: true peerdir: "contrib/restricted/abseil-cpp-tstring/y_absl/status/statusor" }
+p { i: "abseil-cpp" x: "absl_str_format_internal" ix: true peerdir: "contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format" }
+p { i: "abseil-cpp" x: "absl_strerror" ix: true peerdir: "contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/strerror" }
+p { i: "abseil-cpp" x: "absl_strings" ix: true peerdir: "contrib/restricted/abseil-cpp-tstring/y_absl/strings" }
+p { i: "abseil-cpp" x: "absl_strings_internal" ix: true peerdir: "contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/absl_strings_internal" }
+p { i: "abseil-cpp" x: "absl_symbolize" ix: true peerdir: "contrib/restricted/abseil-cpp-tstring/y_absl/debugging/symbolize" }
+p { i: "abseil-cpp" x: "absl_synchronization" ix: true peerdir: "contrib/restricted/abseil-cpp-tstring/y_absl/synchronization" }
+p { i: "abseil-cpp" x: "absl_throw_delegate" ix: true peerdir: "contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/throw_delegate" }
+p { i: "abseil-cpp" x: "absl_time" ix: true peerdir: "contrib/restricted/abseil-cpp-tstring/y_absl/time" }
+p { i: "abseil-cpp" x: "absl_time_zone" ix: true peerdir: "contrib/restricted/abseil-cpp-tstring/y_absl/time/time_zone" }
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/algorithm/.yandex_meta/licenses.list.txt b/contrib/restricted/abseil-cpp-tstring/y_absl/algorithm/.yandex_meta/licenses.list.txt
new file mode 100644
index 00000000000..7be6b428485
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/algorithm/.yandex_meta/licenses.list.txt
@@ -0,0 +1,16 @@
+====================Apache-2.0====================
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+
+====================COPYRIGHT====================
+// Copyright 2017 The Abseil Authors.
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/algorithm/algorithm.h b/contrib/restricted/abseil-cpp-tstring/y_absl/algorithm/algorithm.h
new file mode 100644
index 00000000000..b1003bd0a9f
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/algorithm/algorithm.h
@@ -0,0 +1,159 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: algorithm.h
+// -----------------------------------------------------------------------------
+//
+// This header file contains Google extensions to the standard <algorithm> C++
+// header.
+
+#ifndef ABSL_ALGORITHM_ALGORITHM_H_
+#define ABSL_ALGORITHM_ALGORITHM_H_
+
+#include <algorithm>
+#include <iterator>
+#include <type_traits>
+
+#include "y_absl/base/config.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+namespace algorithm_internal {
+
+// Performs comparisons with operator==, similar to C++14's `std::equal_to<>`.
+struct EqualTo {
+ template <typename T, typename U>
+ bool operator()(const T& a, const U& b) const {
+ return a == b;
+ }
+};
+
+template <typename InputIter1, typename InputIter2, typename Pred>
+bool EqualImpl(InputIter1 first1, InputIter1 last1, InputIter2 first2,
+ InputIter2 last2, Pred pred, std::input_iterator_tag,
+ std::input_iterator_tag) {
+ while (true) {
+ if (first1 == last1) return first2 == last2;
+ if (first2 == last2) return false;
+ if (!pred(*first1, *first2)) return false;
+ ++first1;
+ ++first2;
+ }
+}
+
+template <typename InputIter1, typename InputIter2, typename Pred>
+bool EqualImpl(InputIter1 first1, InputIter1 last1, InputIter2 first2,
+ InputIter2 last2, Pred&& pred, std::random_access_iterator_tag,
+ std::random_access_iterator_tag) {
+ return (last1 - first1 == last2 - first2) &&
+ std::equal(first1, last1, first2, std::forward<Pred>(pred));
+}
+
+// When we are using our own internal predicate that just applies operator==, we
+// forward to the non-predicate form of std::equal. This enables an optimization
+// in libstdc++ that can result in std::memcmp being used for integer types.
+template <typename InputIter1, typename InputIter2>
+bool EqualImpl(InputIter1 first1, InputIter1 last1, InputIter2 first2,
+ InputIter2 last2, algorithm_internal::EqualTo /* unused */,
+ std::random_access_iterator_tag,
+ std::random_access_iterator_tag) {
+ return (last1 - first1 == last2 - first2) &&
+ std::equal(first1, last1, first2);
+}
+
+template <typename It>
+It RotateImpl(It first, It middle, It last, std::true_type) {
+ return std::rotate(first, middle, last);
+}
+
+template <typename It>
+It RotateImpl(It first, It middle, It last, std::false_type) {
+ std::rotate(first, middle, last);
+ return std::next(first, std::distance(middle, last));
+}
+
+} // namespace algorithm_internal
+
+// equal()
+//
+// Compares the equality of two ranges specified by pairs of iterators, using
+// the given predicate, returning true iff for each corresponding iterator i1
+// and i2 in the first and second range respectively, pred(*i1, *i2) == true
+//
+// This comparison takes at most min(`last1` - `first1`, `last2` - `first2`)
+// invocations of the predicate. Additionally, if InputIter1 and InputIter2 are
+// both random-access iterators, and `last1` - `first1` != `last2` - `first2`,
+// then the predicate is never invoked and the function returns false.
+//
+// This is a C++11-compatible implementation of C++14 `std::equal`. See
+// https://en.cppreference.com/w/cpp/algorithm/equal for more information.
+template <typename InputIter1, typename InputIter2, typename Pred>
+bool equal(InputIter1 first1, InputIter1 last1, InputIter2 first2,
+ InputIter2 last2, Pred&& pred) {
+ return algorithm_internal::EqualImpl(
+ first1, last1, first2, last2, std::forward<Pred>(pred),
+ typename std::iterator_traits<InputIter1>::iterator_category{},
+ typename std::iterator_traits<InputIter2>::iterator_category{});
+}
+
+// Overload of equal() that performs comparison of two ranges specified by pairs
+// of iterators using operator==.
+template <typename InputIter1, typename InputIter2>
+bool equal(InputIter1 first1, InputIter1 last1, InputIter2 first2,
+ InputIter2 last2) {
+ return y_absl::equal(first1, last1, first2, last2,
+ algorithm_internal::EqualTo{});
+}
+
+// linear_search()
+//
+// Performs a linear search for `value` using the iterator `first` up to
+// but not including `last`, returning true if [`first`, `last`) contains an
+// element equal to `value`.
+//
+// A linear search is of O(n) complexity which is guaranteed to make at most
+// n = (`last` - `first`) comparisons. A linear search over short containers
+// may be faster than a binary search, even when the container is sorted.
+template <typename InputIterator, typename EqualityComparable>
+bool linear_search(InputIterator first, InputIterator last,
+ const EqualityComparable& value) {
+ return std::find(first, last, value) != last;
+}
+
+// rotate()
+//
+// Performs a left rotation on a range of elements (`first`, `last`) such that
+// `middle` is now the first element. `rotate()` returns an iterator pointing to
+// the first element before rotation. This function is exactly the same as
+// `std::rotate`, but fixes a bug in gcc
+// <= 4.9 where `std::rotate` returns `void` instead of an iterator.
+//
+// The complexity of this algorithm is the same as that of `std::rotate`, but if
+// `ForwardIterator` is not a random-access iterator, then `y_absl::rotate`
+// performs an additional pass over the range to construct the return value.
+template <typename ForwardIterator>
+ForwardIterator rotate(ForwardIterator first, ForwardIterator middle,
+ ForwardIterator last) {
+ return algorithm_internal::RotateImpl(
+ first, middle, last,
+ std::is_same<decltype(std::rotate(first, middle, last)),
+ ForwardIterator>());
+}
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_ALGORITHM_ALGORITHM_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/algorithm/container.h b/contrib/restricted/abseil-cpp-tstring/y_absl/algorithm/container.h
new file mode 100644
index 00000000000..d6bf3859645
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/algorithm/container.h
@@ -0,0 +1,1774 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: container.h
+// -----------------------------------------------------------------------------
+//
+// This header file provides Container-based versions of algorithmic functions
+// within the C++ standard library. The following standard library sets of
+// functions are covered within this file:
+//
+// * Algorithmic <iterator> functions
+// * Algorithmic <numeric> functions
+// * <algorithm> functions
+//
+// The standard library functions operate on iterator ranges; the functions
+// within this API operate on containers, though many return iterator ranges.
+//
+// All functions within this API are named with a `c_` prefix. Calls such as
+// `y_absl::c_xx(container, ...) are equivalent to std:: functions such as
+// `std::xx(std::begin(cont), std::end(cont), ...)`. Functions that act on
+// iterators but not conceptually on iterator ranges (e.g. `std::iter_swap`)
+// have no equivalent here.
+//
+// For template parameter and variable naming, `C` indicates the container type
+// to which the function is applied, `Pred` indicates the predicate object type
+// to be used by the function and `T` indicates the applicable element type.
+
+#ifndef ABSL_ALGORITHM_CONTAINER_H_
+#define ABSL_ALGORITHM_CONTAINER_H_
+
+#include <algorithm>
+#include <cassert>
+#include <iterator>
+#include <numeric>
+#include <type_traits>
+#include <unordered_map>
+#include <unordered_set>
+#include <utility>
+#include <vector>
+
+#include "y_absl/algorithm/algorithm.h"
+#include "y_absl/base/macros.h"
+#include "y_absl/meta/type_traits.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_algorithm_internal {
+
+// NOTE: it is important to defer to ADL lookup for building with C++ modules,
+// especially for headers like <valarray> which are not visible from this file
+// but specialize std::begin and std::end.
+using std::begin;
+using std::end;
+
+// The type of the iterator given by begin(c) (possibly std::begin(c)).
+// ContainerIter<const vector<T>> gives vector<T>::const_iterator,
+// while ContainerIter<vector<T>> gives vector<T>::iterator.
+template <typename C>
+using ContainerIter = decltype(begin(std::declval<C&>()));
+
+// An MSVC bug involving template parameter substitution requires us to use
+// decltype() here instead of just std::pair.
+template <typename C1, typename C2>
+using ContainerIterPairType =
+ decltype(std::make_pair(ContainerIter<C1>(), ContainerIter<C2>()));
+
+template <typename C>
+using ContainerDifferenceType =
+ decltype(std::distance(std::declval<ContainerIter<C>>(),
+ std::declval<ContainerIter<C>>()));
+
+template <typename C>
+using ContainerPointerType =
+ typename std::iterator_traits<ContainerIter<C>>::pointer;
+
+// container_algorithm_internal::c_begin and
+// container_algorithm_internal::c_end are abbreviations for proper ADL
+// lookup of std::begin and std::end, i.e.
+// using std::begin;
+// using std::end;
+// std::foo(begin(c), end(c));
+// becomes
+// std::foo(container_algorithm_internal::begin(c),
+// container_algorithm_internal::end(c));
+// These are meant for internal use only.
+
+template <typename C>
+ContainerIter<C> c_begin(C& c) { return begin(c); }
+
+template <typename C>
+ContainerIter<C> c_end(C& c) { return end(c); }
+
+template <typename T>
+struct IsUnorderedContainer : std::false_type {};
+
+template <class Key, class T, class Hash, class KeyEqual, class Allocator>
+struct IsUnorderedContainer<
+ std::unordered_map<Key, T, Hash, KeyEqual, Allocator>> : std::true_type {};
+
+template <class Key, class Hash, class KeyEqual, class Allocator>
+struct IsUnorderedContainer<std::unordered_set<Key, Hash, KeyEqual, Allocator>>
+ : std::true_type {};
+
+// container_algorithm_internal::c_size. It is meant for internal use only.
+
+template <class C>
+auto c_size(C& c) -> decltype(c.size()) {
+ return c.size();
+}
+
+template <class T, std::size_t N>
+constexpr std::size_t c_size(T (&)[N]) {
+ return N;
+}
+
+} // namespace container_algorithm_internal
+
+// PUBLIC API
+
+//------------------------------------------------------------------------------
+// Abseil algorithm.h functions
+//------------------------------------------------------------------------------
+
+// c_linear_search()
+//
+// Container-based version of y_absl::linear_search() for performing a linear
+// search within a container.
+template <typename C, typename EqualityComparable>
+bool c_linear_search(const C& c, EqualityComparable&& value) {
+ return linear_search(container_algorithm_internal::c_begin(c),
+ container_algorithm_internal::c_end(c),
+ std::forward<EqualityComparable>(value));
+}
+
+//------------------------------------------------------------------------------
+// <iterator> algorithms
+//------------------------------------------------------------------------------
+
+// c_distance()
+//
+// Container-based version of the <iterator> `std::distance()` function to
+// return the number of elements within a container.
+template <typename C>
+container_algorithm_internal::ContainerDifferenceType<const C> c_distance(
+ const C& c) {
+ return std::distance(container_algorithm_internal::c_begin(c),
+ container_algorithm_internal::c_end(c));
+}
+
+//------------------------------------------------------------------------------
+// <algorithm> Non-modifying sequence operations
+//------------------------------------------------------------------------------
+
+// c_all_of()
+//
+// Container-based version of the <algorithm> `std::all_of()` function to
+// test a condition on all elements within a container.
+template <typename C, typename Pred>
+bool c_all_of(const C& c, Pred&& pred) {
+ return std::all_of(container_algorithm_internal::c_begin(c),
+ container_algorithm_internal::c_end(c),
+ std::forward<Pred>(pred));
+}
+
+// c_any_of()
+//
+// Container-based version of the <algorithm> `std::any_of()` function to
+// test if any element in a container fulfills a condition.
+template <typename C, typename Pred>
+bool c_any_of(const C& c, Pred&& pred) {
+ return std::any_of(container_algorithm_internal::c_begin(c),
+ container_algorithm_internal::c_end(c),
+ std::forward<Pred>(pred));
+}
+
+// c_none_of()
+//
+// Container-based version of the <algorithm> `std::none_of()` function to
+// test if no elements in a container fulfill a condition.
+template <typename C, typename Pred>
+bool c_none_of(const C& c, Pred&& pred) {
+ return std::none_of(container_algorithm_internal::c_begin(c),
+ container_algorithm_internal::c_end(c),
+ std::forward<Pred>(pred));
+}
+
+// c_for_each()
+//
+// Container-based version of the <algorithm> `std::for_each()` function to
+// apply a function to a container's elements.
+template <typename C, typename Function>
+decay_t<Function> c_for_each(C&& c, Function&& f) {
+ return std::for_each(container_algorithm_internal::c_begin(c),
+ container_algorithm_internal::c_end(c),
+ std::forward<Function>(f));
+}
+
+// c_find()
+//
+// Container-based version of the <algorithm> `std::find()` function to find
+// the first element containing the passed value within a container value.
+template <typename C, typename T>
+container_algorithm_internal::ContainerIter<C> c_find(C& c, T&& value) {
+ return std::find(container_algorithm_internal::c_begin(c),
+ container_algorithm_internal::c_end(c),
+ std::forward<T>(value));
+}
+
+// c_find_if()
+//
+// Container-based version of the <algorithm> `std::find_if()` function to find
+// the first element in a container matching the given condition.
+template <typename C, typename Pred>
+container_algorithm_internal::ContainerIter<C> c_find_if(C& c, Pred&& pred) {
+ return std::find_if(container_algorithm_internal::c_begin(c),
+ container_algorithm_internal::c_end(c),
+ std::forward<Pred>(pred));
+}
+
+// c_find_if_not()
+//
+// Container-based version of the <algorithm> `std::find_if_not()` function to
+// find the first element in a container not matching the given condition.
+template <typename C, typename Pred>
+container_algorithm_internal::ContainerIter<C> c_find_if_not(C& c,
+ Pred&& pred) {
+ return std::find_if_not(container_algorithm_internal::c_begin(c),
+ container_algorithm_internal::c_end(c),
+ std::forward<Pred>(pred));
+}
+
+// c_find_end()
+//
+// Container-based version of the <algorithm> `std::find_end()` function to
+// find the last subsequence within a container.
+template <typename Sequence1, typename Sequence2>
+container_algorithm_internal::ContainerIter<Sequence1> c_find_end(
+ Sequence1& sequence, Sequence2& subsequence) {
+ return std::find_end(container_algorithm_internal::c_begin(sequence),
+ container_algorithm_internal::c_end(sequence),
+ container_algorithm_internal::c_begin(subsequence),
+ container_algorithm_internal::c_end(subsequence));
+}
+
+// Overload of c_find_end() for using a predicate evaluation other than `==` as
+// the function's test condition.
+template <typename Sequence1, typename Sequence2, typename BinaryPredicate>
+container_algorithm_internal::ContainerIter<Sequence1> c_find_end(
+ Sequence1& sequence, Sequence2& subsequence, BinaryPredicate&& pred) {
+ return std::find_end(container_algorithm_internal::c_begin(sequence),
+ container_algorithm_internal::c_end(sequence),
+ container_algorithm_internal::c_begin(subsequence),
+ container_algorithm_internal::c_end(subsequence),
+ std::forward<BinaryPredicate>(pred));
+}
+
+// c_find_first_of()
+//
+// Container-based version of the <algorithm> `std::find_first_of()` function to
+// find the first element within the container that is also within the options
+// container.
+template <typename C1, typename C2>
+container_algorithm_internal::ContainerIter<C1> c_find_first_of(C1& container,
+ C2& options) {
+ return std::find_first_of(container_algorithm_internal::c_begin(container),
+ container_algorithm_internal::c_end(container),
+ container_algorithm_internal::c_begin(options),
+ container_algorithm_internal::c_end(options));
+}
+
+// Overload of c_find_first_of() for using a predicate evaluation other than
+// `==` as the function's test condition.
+template <typename C1, typename C2, typename BinaryPredicate>
+container_algorithm_internal::ContainerIter<C1> c_find_first_of(
+ C1& container, C2& options, BinaryPredicate&& pred) {
+ return std::find_first_of(container_algorithm_internal::c_begin(container),
+ container_algorithm_internal::c_end(container),
+ container_algorithm_internal::c_begin(options),
+ container_algorithm_internal::c_end(options),
+ std::forward<BinaryPredicate>(pred));
+}
+
+// c_adjacent_find()
+//
+// Container-based version of the <algorithm> `std::adjacent_find()` function to
+// find equal adjacent elements within a container.
+template <typename Sequence>
+container_algorithm_internal::ContainerIter<Sequence> c_adjacent_find(
+ Sequence& sequence) {
+ return std::adjacent_find(container_algorithm_internal::c_begin(sequence),
+ container_algorithm_internal::c_end(sequence));
+}
+
+// Overload of c_adjacent_find() for using a predicate evaluation other than
+// `==` as the function's test condition.
+template <typename Sequence, typename BinaryPredicate>
+container_algorithm_internal::ContainerIter<Sequence> c_adjacent_find(
+ Sequence& sequence, BinaryPredicate&& pred) {
+ return std::adjacent_find(container_algorithm_internal::c_begin(sequence),
+ container_algorithm_internal::c_end(sequence),
+ std::forward<BinaryPredicate>(pred));
+}
+
+// c_count()
+//
+// Container-based version of the <algorithm> `std::count()` function to count
+// values that match within a container.
+template <typename C, typename T>
+container_algorithm_internal::ContainerDifferenceType<const C> c_count(
+ const C& c, T&& value) {
+ return std::count(container_algorithm_internal::c_begin(c),
+ container_algorithm_internal::c_end(c),
+ std::forward<T>(value));
+}
+
+// c_count_if()
+//
+// Container-based version of the <algorithm> `std::count_if()` function to
+// count values matching a condition within a container.
+template <typename C, typename Pred>
+container_algorithm_internal::ContainerDifferenceType<const C> c_count_if(
+ const C& c, Pred&& pred) {
+ return std::count_if(container_algorithm_internal::c_begin(c),
+ container_algorithm_internal::c_end(c),
+ std::forward<Pred>(pred));
+}
+
+// c_mismatch()
+//
+// Container-based version of the <algorithm> `std::mismatch()` function to
+// return the first element where two ordered containers differ. Applies `==` to
+// the first N elements of `c1` and `c2`, where N = min(size(c1), size(c2)).
+template <typename C1, typename C2>
+container_algorithm_internal::ContainerIterPairType<C1, C2>
+c_mismatch(C1& c1, C2& c2) {
+ auto first1 = container_algorithm_internal::c_begin(c1);
+ auto last1 = container_algorithm_internal::c_end(c1);
+ auto first2 = container_algorithm_internal::c_begin(c2);
+ auto last2 = container_algorithm_internal::c_end(c2);
+
+ for (; first1 != last1 && first2 != last2; ++first1, (void)++first2) {
+ // Negates equality because Cpp17EqualityComparable doesn't require clients
+ // to overload both `operator==` and `operator!=`.
+ if (!(*first1 == *first2)) {
+ break;
+ }
+ }
+
+ return std::make_pair(first1, first2);
+}
+
+// Overload of c_mismatch() for using a predicate evaluation other than `==` as
+// the function's test condition. Applies `pred`to the first N elements of `c1`
+// and `c2`, where N = min(size(c1), size(c2)).
+template <typename C1, typename C2, typename BinaryPredicate>
+container_algorithm_internal::ContainerIterPairType<C1, C2>
+c_mismatch(C1& c1, C2& c2, BinaryPredicate pred) {
+ auto first1 = container_algorithm_internal::c_begin(c1);
+ auto last1 = container_algorithm_internal::c_end(c1);
+ auto first2 = container_algorithm_internal::c_begin(c2);
+ auto last2 = container_algorithm_internal::c_end(c2);
+
+ for (; first1 != last1 && first2 != last2; ++first1, (void)++first2) {
+ if (!pred(*first1, *first2)) {
+ break;
+ }
+ }
+
+ return std::make_pair(first1, first2);
+}
+
+// c_equal()
+//
+// Container-based version of the <algorithm> `std::equal()` function to
+// test whether two containers are equal.
+//
+// NOTE: the semantics of c_equal() are slightly different than those of
+// equal(): while the latter iterates over the second container only up to the
+// size of the first container, c_equal() also checks whether the container
+// sizes are equal. This better matches expectations about c_equal() based on
+// its signature.
+//
+// Example:
+// vector v1 = <1, 2, 3>;
+// vector v2 = <1, 2, 3, 4>;
+// equal(std::begin(v1), std::end(v1), std::begin(v2)) returns true
+// c_equal(v1, v2) returns false
+
+template <typename C1, typename C2>
+bool c_equal(const C1& c1, const C2& c2) {
+ return ((container_algorithm_internal::c_size(c1) ==
+ container_algorithm_internal::c_size(c2)) &&
+ std::equal(container_algorithm_internal::c_begin(c1),
+ container_algorithm_internal::c_end(c1),
+ container_algorithm_internal::c_begin(c2)));
+}
+
+// Overload of c_equal() for using a predicate evaluation other than `==` as
+// the function's test condition.
+template <typename C1, typename C2, typename BinaryPredicate>
+bool c_equal(const C1& c1, const C2& c2, BinaryPredicate&& pred) {
+ return ((container_algorithm_internal::c_size(c1) ==
+ container_algorithm_internal::c_size(c2)) &&
+ std::equal(container_algorithm_internal::c_begin(c1),
+ container_algorithm_internal::c_end(c1),
+ container_algorithm_internal::c_begin(c2),
+ std::forward<BinaryPredicate>(pred)));
+}
+
+// c_is_permutation()
+//
+// Container-based version of the <algorithm> `std::is_permutation()` function
+// to test whether a container is a permutation of another.
+template <typename C1, typename C2>
+bool c_is_permutation(const C1& c1, const C2& c2) {
+ using std::begin;
+ using std::end;
+ return c1.size() == c2.size() &&
+ std::is_permutation(begin(c1), end(c1), begin(c2));
+}
+
+// Overload of c_is_permutation() for using a predicate evaluation other than
+// `==` as the function's test condition.
+template <typename C1, typename C2, typename BinaryPredicate>
+bool c_is_permutation(const C1& c1, const C2& c2, BinaryPredicate&& pred) {
+ using std::begin;
+ using std::end;
+ return c1.size() == c2.size() &&
+ std::is_permutation(begin(c1), end(c1), begin(c2),
+ std::forward<BinaryPredicate>(pred));
+}
+
+// c_search()
+//
+// Container-based version of the <algorithm> `std::search()` function to search
+// a container for a subsequence.
+template <typename Sequence1, typename Sequence2>
+container_algorithm_internal::ContainerIter<Sequence1> c_search(
+ Sequence1& sequence, Sequence2& subsequence) {
+ return std::search(container_algorithm_internal::c_begin(sequence),
+ container_algorithm_internal::c_end(sequence),
+ container_algorithm_internal::c_begin(subsequence),
+ container_algorithm_internal::c_end(subsequence));
+}
+
+// Overload of c_search() for using a predicate evaluation other than
+// `==` as the function's test condition.
+template <typename Sequence1, typename Sequence2, typename BinaryPredicate>
+container_algorithm_internal::ContainerIter<Sequence1> c_search(
+ Sequence1& sequence, Sequence2& subsequence, BinaryPredicate&& pred) {
+ return std::search(container_algorithm_internal::c_begin(sequence),
+ container_algorithm_internal::c_end(sequence),
+ container_algorithm_internal::c_begin(subsequence),
+ container_algorithm_internal::c_end(subsequence),
+ std::forward<BinaryPredicate>(pred));
+}
+
+// c_search_n()
+//
+// Container-based version of the <algorithm> `std::search_n()` function to
+// search a container for the first sequence of N elements.
+template <typename Sequence, typename Size, typename T>
+container_algorithm_internal::ContainerIter<Sequence> c_search_n(
+ Sequence& sequence, Size count, T&& value) {
+ return std::search_n(container_algorithm_internal::c_begin(sequence),
+ container_algorithm_internal::c_end(sequence), count,
+ std::forward<T>(value));
+}
+
+// Overload of c_search_n() for using a predicate evaluation other than
+// `==` as the function's test condition.
+template <typename Sequence, typename Size, typename T,
+ typename BinaryPredicate>
+container_algorithm_internal::ContainerIter<Sequence> c_search_n(
+ Sequence& sequence, Size count, T&& value, BinaryPredicate&& pred) {
+ return std::search_n(container_algorithm_internal::c_begin(sequence),
+ container_algorithm_internal::c_end(sequence), count,
+ std::forward<T>(value),
+ std::forward<BinaryPredicate>(pred));
+}
+
+//------------------------------------------------------------------------------
+// <algorithm> Modifying sequence operations
+//------------------------------------------------------------------------------
+
+// c_copy()
+//
+// Container-based version of the <algorithm> `std::copy()` function to copy a
+// container's elements into an iterator.
+template <typename InputSequence, typename OutputIterator>
+OutputIterator c_copy(const InputSequence& input, OutputIterator output) {
+ return std::copy(container_algorithm_internal::c_begin(input),
+ container_algorithm_internal::c_end(input), output);
+}
+
+// c_copy_n()
+//
+// Container-based version of the <algorithm> `std::copy_n()` function to copy a
+// container's first N elements into an iterator.
+template <typename C, typename Size, typename OutputIterator>
+OutputIterator c_copy_n(const C& input, Size n, OutputIterator output) {
+ return std::copy_n(container_algorithm_internal::c_begin(input), n, output);
+}
+
+// c_copy_if()
+//
+// Container-based version of the <algorithm> `std::copy_if()` function to copy
+// a container's elements satisfying some condition into an iterator.
+template <typename InputSequence, typename OutputIterator, typename Pred>
+OutputIterator c_copy_if(const InputSequence& input, OutputIterator output,
+ Pred&& pred) {
+ return std::copy_if(container_algorithm_internal::c_begin(input),
+ container_algorithm_internal::c_end(input), output,
+ std::forward<Pred>(pred));
+}
+
+// c_copy_backward()
+//
+// Container-based version of the <algorithm> `std::copy_backward()` function to
+// copy a container's elements in reverse order into an iterator.
+template <typename C, typename BidirectionalIterator>
+BidirectionalIterator c_copy_backward(const C& src,
+ BidirectionalIterator dest) {
+ return std::copy_backward(container_algorithm_internal::c_begin(src),
+ container_algorithm_internal::c_end(src), dest);
+}
+
+// c_move()
+//
+// Container-based version of the <algorithm> `std::move()` function to move
+// a container's elements into an iterator.
+template <typename C, typename OutputIterator>
+OutputIterator c_move(C&& src, OutputIterator dest) {
+ return std::move(container_algorithm_internal::c_begin(src),
+ container_algorithm_internal::c_end(src), dest);
+}
+
+// c_move_backward()
+//
+// Container-based version of the <algorithm> `std::move_backward()` function to
+// move a container's elements into an iterator in reverse order.
+template <typename C, typename BidirectionalIterator>
+BidirectionalIterator c_move_backward(C&& src, BidirectionalIterator dest) {
+ return std::move_backward(container_algorithm_internal::c_begin(src),
+ container_algorithm_internal::c_end(src), dest);
+}
+
+// c_swap_ranges()
+//
+// Container-based version of the <algorithm> `std::swap_ranges()` function to
+// swap a container's elements with another container's elements. Swaps the
+// first N elements of `c1` and `c2`, where N = min(size(c1), size(c2)).
+template <typename C1, typename C2>
+container_algorithm_internal::ContainerIter<C2> c_swap_ranges(C1& c1, C2& c2) {
+ auto first1 = container_algorithm_internal::c_begin(c1);
+ auto last1 = container_algorithm_internal::c_end(c1);
+ auto first2 = container_algorithm_internal::c_begin(c2);
+ auto last2 = container_algorithm_internal::c_end(c2);
+
+ using std::swap;
+ for (; first1 != last1 && first2 != last2; ++first1, (void)++first2) {
+ swap(*first1, *first2);
+ }
+ return first2;
+}
+
+// c_transform()
+//
+// Container-based version of the <algorithm> `std::transform()` function to
+// transform a container's elements using the unary operation, storing the
+// result in an iterator pointing to the last transformed element in the output
+// range.
+template <typename InputSequence, typename OutputIterator, typename UnaryOp>
+OutputIterator c_transform(const InputSequence& input, OutputIterator output,
+ UnaryOp&& unary_op) {
+ return std::transform(container_algorithm_internal::c_begin(input),
+ container_algorithm_internal::c_end(input), output,
+ std::forward<UnaryOp>(unary_op));
+}
+
+// Overload of c_transform() for performing a transformation using a binary
+// predicate. Applies `binary_op` to the first N elements of `c1` and `c2`,
+// where N = min(size(c1), size(c2)).
+template <typename InputSequence1, typename InputSequence2,
+ typename OutputIterator, typename BinaryOp>
+OutputIterator c_transform(const InputSequence1& input1,
+ const InputSequence2& input2, OutputIterator output,
+ BinaryOp&& binary_op) {
+ auto first1 = container_algorithm_internal::c_begin(input1);
+ auto last1 = container_algorithm_internal::c_end(input1);
+ auto first2 = container_algorithm_internal::c_begin(input2);
+ auto last2 = container_algorithm_internal::c_end(input2);
+ for (; first1 != last1 && first2 != last2;
+ ++first1, (void)++first2, ++output) {
+ *output = binary_op(*first1, *first2);
+ }
+
+ return output;
+}
+
+// c_replace()
+//
+// Container-based version of the <algorithm> `std::replace()` function to
+// replace a container's elements of some value with a new value. The container
+// is modified in place.
+template <typename Sequence, typename T>
+void c_replace(Sequence& sequence, const T& old_value, const T& new_value) {
+ std::replace(container_algorithm_internal::c_begin(sequence),
+ container_algorithm_internal::c_end(sequence), old_value,
+ new_value);
+}
+
+// c_replace_if()
+//
+// Container-based version of the <algorithm> `std::replace_if()` function to
+// replace a container's elements of some value with a new value based on some
+// condition. The container is modified in place.
+template <typename C, typename Pred, typename T>
+void c_replace_if(C& c, Pred&& pred, T&& new_value) {
+ std::replace_if(container_algorithm_internal::c_begin(c),
+ container_algorithm_internal::c_end(c),
+ std::forward<Pred>(pred), std::forward<T>(new_value));
+}
+
+// c_replace_copy()
+//
+// Container-based version of the <algorithm> `std::replace_copy()` function to
+// replace a container's elements of some value with a new value and return the
+// results within an iterator.
+template <typename C, typename OutputIterator, typename T>
+OutputIterator c_replace_copy(const C& c, OutputIterator result, T&& old_value,
+ T&& new_value) {
+ return std::replace_copy(container_algorithm_internal::c_begin(c),
+ container_algorithm_internal::c_end(c), result,
+ std::forward<T>(old_value),
+ std::forward<T>(new_value));
+}
+
+// c_replace_copy_if()
+//
+// Container-based version of the <algorithm> `std::replace_copy_if()` function
+// to replace a container's elements of some value with a new value based on
+// some condition, and return the results within an iterator.
+template <typename C, typename OutputIterator, typename Pred, typename T>
+OutputIterator c_replace_copy_if(const C& c, OutputIterator result, Pred&& pred,
+ T&& new_value) {
+ return std::replace_copy_if(container_algorithm_internal::c_begin(c),
+ container_algorithm_internal::c_end(c), result,
+ std::forward<Pred>(pred),
+ std::forward<T>(new_value));
+}
+
+// c_fill()
+//
+// Container-based version of the <algorithm> `std::fill()` function to fill a
+// container with some value.
+template <typename C, typename T>
+void c_fill(C& c, T&& value) {
+ std::fill(container_algorithm_internal::c_begin(c),
+ container_algorithm_internal::c_end(c), std::forward<T>(value));
+}
+
+// c_fill_n()
+//
+// Container-based version of the <algorithm> `std::fill_n()` function to fill
+// the first N elements in a container with some value.
+template <typename C, typename Size, typename T>
+void c_fill_n(C& c, Size n, T&& value) {
+ std::fill_n(container_algorithm_internal::c_begin(c), n,
+ std::forward<T>(value));
+}
+
+// c_generate()
+//
+// Container-based version of the <algorithm> `std::generate()` function to
+// assign a container's elements to the values provided by the given generator.
+template <typename C, typename Generator>
+void c_generate(C& c, Generator&& gen) {
+ std::generate(container_algorithm_internal::c_begin(c),
+ container_algorithm_internal::c_end(c),
+ std::forward<Generator>(gen));
+}
+
+// c_generate_n()
+//
+// Container-based version of the <algorithm> `std::generate_n()` function to
+// assign a container's first N elements to the values provided by the given
+// generator.
+template <typename C, typename Size, typename Generator>
+container_algorithm_internal::ContainerIter<C> c_generate_n(C& c, Size n,
+ Generator&& gen) {
+ return std::generate_n(container_algorithm_internal::c_begin(c), n,
+ std::forward<Generator>(gen));
+}
+
+// Note: `c_xx()` <algorithm> container versions for `remove()`, `remove_if()`,
+// and `unique()` are omitted, because it's not clear whether or not such
+// functions should call erase on their supplied sequences afterwards. Either
+// behavior would be surprising for a different set of users.
+
+// c_remove_copy()
+//
+// Container-based version of the <algorithm> `std::remove_copy()` function to
+// copy a container's elements while removing any elements matching the given
+// `value`.
+template <typename C, typename OutputIterator, typename T>
+OutputIterator c_remove_copy(const C& c, OutputIterator result, T&& value) {
+ return std::remove_copy(container_algorithm_internal::c_begin(c),
+ container_algorithm_internal::c_end(c), result,
+ std::forward<T>(value));
+}
+
+// c_remove_copy_if()
+//
+// Container-based version of the <algorithm> `std::remove_copy_if()` function
+// to copy a container's elements while removing any elements matching the given
+// condition.
+template <typename C, typename OutputIterator, typename Pred>
+OutputIterator c_remove_copy_if(const C& c, OutputIterator result,
+ Pred&& pred) {
+ return std::remove_copy_if(container_algorithm_internal::c_begin(c),
+ container_algorithm_internal::c_end(c), result,
+ std::forward<Pred>(pred));
+}
+
+// c_unique_copy()
+//
+// Container-based version of the <algorithm> `std::unique_copy()` function to
+// copy a container's elements while removing any elements containing duplicate
+// values.
+template <typename C, typename OutputIterator>
+OutputIterator c_unique_copy(const C& c, OutputIterator result) {
+ return std::unique_copy(container_algorithm_internal::c_begin(c),
+ container_algorithm_internal::c_end(c), result);
+}
+
+// Overload of c_unique_copy() for using a predicate evaluation other than
+// `==` for comparing uniqueness of the element values.
+template <typename C, typename OutputIterator, typename BinaryPredicate>
+OutputIterator c_unique_copy(const C& c, OutputIterator result,
+ BinaryPredicate&& pred) {
+ return std::unique_copy(container_algorithm_internal::c_begin(c),
+ container_algorithm_internal::c_end(c), result,
+ std::forward<BinaryPredicate>(pred));
+}
+
+// c_reverse()
+//
+// Container-based version of the <algorithm> `std::reverse()` function to
+// reverse a container's elements.
+template <typename Sequence>
+void c_reverse(Sequence& sequence) {
+ std::reverse(container_algorithm_internal::c_begin(sequence),
+ container_algorithm_internal::c_end(sequence));
+}
+
+// c_reverse_copy()
+//
+// Container-based version of the <algorithm> `std::reverse()` function to
+// reverse a container's elements and write them to an iterator range.
+template <typename C, typename OutputIterator>
+OutputIterator c_reverse_copy(const C& sequence, OutputIterator result) {
+ return std::reverse_copy(container_algorithm_internal::c_begin(sequence),
+ container_algorithm_internal::c_end(sequence),
+ result);
+}
+
+// c_rotate()
+//
+// Container-based version of the <algorithm> `std::rotate()` function to
+// shift a container's elements leftward such that the `middle` element becomes
+// the first element in the container.
+template <typename C,
+ typename Iterator = container_algorithm_internal::ContainerIter<C>>
+Iterator c_rotate(C& sequence, Iterator middle) {
+ return y_absl::rotate(container_algorithm_internal::c_begin(sequence), middle,
+ container_algorithm_internal::c_end(sequence));
+}
+
+// c_rotate_copy()
+//
+// Container-based version of the <algorithm> `std::rotate_copy()` function to
+// shift a container's elements leftward such that the `middle` element becomes
+// the first element in a new iterator range.
+template <typename C, typename OutputIterator>
+OutputIterator c_rotate_copy(
+ const C& sequence,
+ container_algorithm_internal::ContainerIter<const C> middle,
+ OutputIterator result) {
+ return std::rotate_copy(container_algorithm_internal::c_begin(sequence),
+ middle, container_algorithm_internal::c_end(sequence),
+ result);
+}
+
+// c_shuffle()
+//
+// Container-based version of the <algorithm> `std::shuffle()` function to
+// randomly shuffle elements within the container using a `gen()` uniform random
+// number generator.
+template <typename RandomAccessContainer, typename UniformRandomBitGenerator>
+void c_shuffle(RandomAccessContainer& c, UniformRandomBitGenerator&& gen) {
+ std::shuffle(container_algorithm_internal::c_begin(c),
+ container_algorithm_internal::c_end(c),
+ std::forward<UniformRandomBitGenerator>(gen));
+}
+
+//------------------------------------------------------------------------------
+// <algorithm> Partition functions
+//------------------------------------------------------------------------------
+
+// c_is_partitioned()
+//
+// Container-based version of the <algorithm> `std::is_partitioned()` function
+// to test whether all elements in the container for which `pred` returns `true`
+// precede those for which `pred` is `false`.
+template <typename C, typename Pred>
+bool c_is_partitioned(const C& c, Pred&& pred) {
+ return std::is_partitioned(container_algorithm_internal::c_begin(c),
+ container_algorithm_internal::c_end(c),
+ std::forward<Pred>(pred));
+}
+
+// c_partition()
+//
+// Container-based version of the <algorithm> `std::partition()` function
+// to rearrange all elements in a container in such a way that all elements for
+// which `pred` returns `true` precede all those for which it returns `false`,
+// returning an iterator to the first element of the second group.
+template <typename C, typename Pred>
+container_algorithm_internal::ContainerIter<C> c_partition(C& c, Pred&& pred) {
+ return std::partition(container_algorithm_internal::c_begin(c),
+ container_algorithm_internal::c_end(c),
+ std::forward<Pred>(pred));
+}
+
+// c_stable_partition()
+//
+// Container-based version of the <algorithm> `std::stable_partition()` function
+// to rearrange all elements in a container in such a way that all elements for
+// which `pred` returns `true` precede all those for which it returns `false`,
+// preserving the relative ordering between the two groups. The function returns
+// an iterator to the first element of the second group.
+template <typename C, typename Pred>
+container_algorithm_internal::ContainerIter<C> c_stable_partition(C& c,
+ Pred&& pred) {
+ return std::stable_partition(container_algorithm_internal::c_begin(c),
+ container_algorithm_internal::c_end(c),
+ std::forward<Pred>(pred));
+}
+
+// c_partition_copy()
+//
+// Container-based version of the <algorithm> `std::partition_copy()` function
+// to partition a container's elements and return them into two iterators: one
+// for which `pred` returns `true`, and one for which `pred` returns `false.`
+
+template <typename C, typename OutputIterator1, typename OutputIterator2,
+ typename Pred>
+std::pair<OutputIterator1, OutputIterator2> c_partition_copy(
+ const C& c, OutputIterator1 out_true, OutputIterator2 out_false,
+ Pred&& pred) {
+ return std::partition_copy(container_algorithm_internal::c_begin(c),
+ container_algorithm_internal::c_end(c), out_true,
+ out_false, std::forward<Pred>(pred));
+}
+
+// c_partition_point()
+//
+// Container-based version of the <algorithm> `std::partition_point()` function
+// to return the first element of an already partitioned container for which
+// the given `pred` is not `true`.
+template <typename C, typename Pred>
+container_algorithm_internal::ContainerIter<C> c_partition_point(C& c,
+ Pred&& pred) {
+ return std::partition_point(container_algorithm_internal::c_begin(c),
+ container_algorithm_internal::c_end(c),
+ std::forward<Pred>(pred));
+}
+
+//------------------------------------------------------------------------------
+// <algorithm> Sorting functions
+//------------------------------------------------------------------------------
+
+// c_sort()
+//
+// Container-based version of the <algorithm> `std::sort()` function
+// to sort elements in ascending order of their values.
+template <typename C>
+void c_sort(C& c) {
+ std::sort(container_algorithm_internal::c_begin(c),
+ container_algorithm_internal::c_end(c));
+}
+
+// Overload of c_sort() for performing a `comp` comparison other than the
+// default `operator<`.
+template <typename C, typename LessThan>
+void c_sort(C& c, LessThan&& comp) {
+ std::sort(container_algorithm_internal::c_begin(c),
+ container_algorithm_internal::c_end(c),
+ std::forward<LessThan>(comp));
+}
+
+// c_stable_sort()
+//
+// Container-based version of the <algorithm> `std::stable_sort()` function
+// to sort elements in ascending order of their values, preserving the order
+// of equivalents.
+template <typename C>
+void c_stable_sort(C& c) {
+ std::stable_sort(container_algorithm_internal::c_begin(c),
+ container_algorithm_internal::c_end(c));
+}
+
+// Overload of c_stable_sort() for performing a `comp` comparison other than the
+// default `operator<`.
+template <typename C, typename LessThan>
+void c_stable_sort(C& c, LessThan&& comp) {
+ std::stable_sort(container_algorithm_internal::c_begin(c),
+ container_algorithm_internal::c_end(c),
+ std::forward<LessThan>(comp));
+}
+
+// c_is_sorted()
+//
+// Container-based version of the <algorithm> `std::is_sorted()` function
+// to evaluate whether the given container is sorted in ascending order.
+template <typename C>
+bool c_is_sorted(const C& c) {
+ return std::is_sorted(container_algorithm_internal::c_begin(c),
+ container_algorithm_internal::c_end(c));
+}
+
+// c_is_sorted() overload for performing a `comp` comparison other than the
+// default `operator<`.
+template <typename C, typename LessThan>
+bool c_is_sorted(const C& c, LessThan&& comp) {
+ return std::is_sorted(container_algorithm_internal::c_begin(c),
+ container_algorithm_internal::c_end(c),
+ std::forward<LessThan>(comp));
+}
+
+// c_partial_sort()
+//
+// Container-based version of the <algorithm> `std::partial_sort()` function
+// to rearrange elements within a container such that elements before `middle`
+// are sorted in ascending order.
+template <typename RandomAccessContainer>
+void c_partial_sort(
+ RandomAccessContainer& sequence,
+ container_algorithm_internal::ContainerIter<RandomAccessContainer> middle) {
+ std::partial_sort(container_algorithm_internal::c_begin(sequence), middle,
+ container_algorithm_internal::c_end(sequence));
+}
+
+// Overload of c_partial_sort() for performing a `comp` comparison other than
+// the default `operator<`.
+template <typename RandomAccessContainer, typename LessThan>
+void c_partial_sort(
+ RandomAccessContainer& sequence,
+ container_algorithm_internal::ContainerIter<RandomAccessContainer> middle,
+ LessThan&& comp) {
+ std::partial_sort(container_algorithm_internal::c_begin(sequence), middle,
+ container_algorithm_internal::c_end(sequence),
+ std::forward<LessThan>(comp));
+}
+
+// c_partial_sort_copy()
+//
+// Container-based version of the <algorithm> `std::partial_sort_copy()`
+// function to sort the elements in the given range `result` within the larger
+// `sequence` in ascending order (and using `result` as the output parameter).
+// At most min(result.last - result.first, sequence.last - sequence.first)
+// elements from the sequence will be stored in the result.
+template <typename C, typename RandomAccessContainer>
+container_algorithm_internal::ContainerIter<RandomAccessContainer>
+c_partial_sort_copy(const C& sequence, RandomAccessContainer& result) {
+ return std::partial_sort_copy(container_algorithm_internal::c_begin(sequence),
+ container_algorithm_internal::c_end(sequence),
+ container_algorithm_internal::c_begin(result),
+ container_algorithm_internal::c_end(result));
+}
+
+// Overload of c_partial_sort_copy() for performing a `comp` comparison other
+// than the default `operator<`.
+template <typename C, typename RandomAccessContainer, typename LessThan>
+container_algorithm_internal::ContainerIter<RandomAccessContainer>
+c_partial_sort_copy(const C& sequence, RandomAccessContainer& result,
+ LessThan&& comp) {
+ return std::partial_sort_copy(container_algorithm_internal::c_begin(sequence),
+ container_algorithm_internal::c_end(sequence),
+ container_algorithm_internal::c_begin(result),
+ container_algorithm_internal::c_end(result),
+ std::forward<LessThan>(comp));
+}
+
+// c_is_sorted_until()
+//
+// Container-based version of the <algorithm> `std::is_sorted_until()` function
+// to return the first element within a container that is not sorted in
+// ascending order as an iterator.
+template <typename C>
+container_algorithm_internal::ContainerIter<C> c_is_sorted_until(C& c) {
+ return std::is_sorted_until(container_algorithm_internal::c_begin(c),
+ container_algorithm_internal::c_end(c));
+}
+
+// Overload of c_is_sorted_until() for performing a `comp` comparison other than
+// the default `operator<`.
+template <typename C, typename LessThan>
+container_algorithm_internal::ContainerIter<C> c_is_sorted_until(
+ C& c, LessThan&& comp) {
+ return std::is_sorted_until(container_algorithm_internal::c_begin(c),
+ container_algorithm_internal::c_end(c),
+ std::forward<LessThan>(comp));
+}
+
+// c_nth_element()
+//
+// Container-based version of the <algorithm> `std::nth_element()` function
+// to rearrange the elements within a container such that the `nth` element
+// would be in that position in an ordered sequence; other elements may be in
+// any order, except that all preceding `nth` will be less than that element,
+// and all following `nth` will be greater than that element.
+template <typename RandomAccessContainer>
+void c_nth_element(
+ RandomAccessContainer& sequence,
+ container_algorithm_internal::ContainerIter<RandomAccessContainer> nth) {
+ std::nth_element(container_algorithm_internal::c_begin(sequence), nth,
+ container_algorithm_internal::c_end(sequence));
+}
+
+// Overload of c_nth_element() for performing a `comp` comparison other than
+// the default `operator<`.
+template <typename RandomAccessContainer, typename LessThan>
+void c_nth_element(
+ RandomAccessContainer& sequence,
+ container_algorithm_internal::ContainerIter<RandomAccessContainer> nth,
+ LessThan&& comp) {
+ std::nth_element(container_algorithm_internal::c_begin(sequence), nth,
+ container_algorithm_internal::c_end(sequence),
+ std::forward<LessThan>(comp));
+}
+
+//------------------------------------------------------------------------------
+// <algorithm> Binary Search
+//------------------------------------------------------------------------------
+
+// c_lower_bound()
+//
+// Container-based version of the <algorithm> `std::lower_bound()` function
+// to return an iterator pointing to the first element in a sorted container
+// which does not compare less than `value`.
+template <typename Sequence, typename T>
+container_algorithm_internal::ContainerIter<Sequence> c_lower_bound(
+ Sequence& sequence, T&& value) {
+ return std::lower_bound(container_algorithm_internal::c_begin(sequence),
+ container_algorithm_internal::c_end(sequence),
+ std::forward<T>(value));
+}
+
+// Overload of c_lower_bound() for performing a `comp` comparison other than
+// the default `operator<`.
+template <typename Sequence, typename T, typename LessThan>
+container_algorithm_internal::ContainerIter<Sequence> c_lower_bound(
+ Sequence& sequence, T&& value, LessThan&& comp) {
+ return std::lower_bound(container_algorithm_internal::c_begin(sequence),
+ container_algorithm_internal::c_end(sequence),
+ std::forward<T>(value), std::forward<LessThan>(comp));
+}
+
+// c_upper_bound()
+//
+// Container-based version of the <algorithm> `std::upper_bound()` function
+// to return an iterator pointing to the first element in a sorted container
+// which is greater than `value`.
+template <typename Sequence, typename T>
+container_algorithm_internal::ContainerIter<Sequence> c_upper_bound(
+ Sequence& sequence, T&& value) {
+ return std::upper_bound(container_algorithm_internal::c_begin(sequence),
+ container_algorithm_internal::c_end(sequence),
+ std::forward<T>(value));
+}
+
+// Overload of c_upper_bound() for performing a `comp` comparison other than
+// the default `operator<`.
+template <typename Sequence, typename T, typename LessThan>
+container_algorithm_internal::ContainerIter<Sequence> c_upper_bound(
+ Sequence& sequence, T&& value, LessThan&& comp) {
+ return std::upper_bound(container_algorithm_internal::c_begin(sequence),
+ container_algorithm_internal::c_end(sequence),
+ std::forward<T>(value), std::forward<LessThan>(comp));
+}
+
+// c_equal_range()
+//
+// Container-based version of the <algorithm> `std::equal_range()` function
+// to return an iterator pair pointing to the first and last elements in a
+// sorted container which compare equal to `value`.
+template <typename Sequence, typename T>
+container_algorithm_internal::ContainerIterPairType<Sequence, Sequence>
+c_equal_range(Sequence& sequence, T&& value) {
+ return std::equal_range(container_algorithm_internal::c_begin(sequence),
+ container_algorithm_internal::c_end(sequence),
+ std::forward<T>(value));
+}
+
+// Overload of c_equal_range() for performing a `comp` comparison other than
+// the default `operator<`.
+template <typename Sequence, typename T, typename LessThan>
+container_algorithm_internal::ContainerIterPairType<Sequence, Sequence>
+c_equal_range(Sequence& sequence, T&& value, LessThan&& comp) {
+ return std::equal_range(container_algorithm_internal::c_begin(sequence),
+ container_algorithm_internal::c_end(sequence),
+ std::forward<T>(value), std::forward<LessThan>(comp));
+}
+
+// c_binary_search()
+//
+// Container-based version of the <algorithm> `std::binary_search()` function
+// to test if any element in the sorted container contains a value equivalent to
+// 'value'.
+template <typename Sequence, typename T>
+bool c_binary_search(Sequence&& sequence, T&& value) {
+ return std::binary_search(container_algorithm_internal::c_begin(sequence),
+ container_algorithm_internal::c_end(sequence),
+ std::forward<T>(value));
+}
+
+// Overload of c_binary_search() for performing a `comp` comparison other than
+// the default `operator<`.
+template <typename Sequence, typename T, typename LessThan>
+bool c_binary_search(Sequence&& sequence, T&& value, LessThan&& comp) {
+ return std::binary_search(container_algorithm_internal::c_begin(sequence),
+ container_algorithm_internal::c_end(sequence),
+ std::forward<T>(value),
+ std::forward<LessThan>(comp));
+}
+
+//------------------------------------------------------------------------------
+// <algorithm> Merge functions
+//------------------------------------------------------------------------------
+
+// c_merge()
+//
+// Container-based version of the <algorithm> `std::merge()` function
+// to merge two sorted containers into a single sorted iterator.
+template <typename C1, typename C2, typename OutputIterator>
+OutputIterator c_merge(const C1& c1, const C2& c2, OutputIterator result) {
+ return std::merge(container_algorithm_internal::c_begin(c1),
+ container_algorithm_internal::c_end(c1),
+ container_algorithm_internal::c_begin(c2),
+ container_algorithm_internal::c_end(c2), result);
+}
+
+// Overload of c_merge() for performing a `comp` comparison other than
+// the default `operator<`.
+template <typename C1, typename C2, typename OutputIterator, typename LessThan>
+OutputIterator c_merge(const C1& c1, const C2& c2, OutputIterator result,
+ LessThan&& comp) {
+ return std::merge(container_algorithm_internal::c_begin(c1),
+ container_algorithm_internal::c_end(c1),
+ container_algorithm_internal::c_begin(c2),
+ container_algorithm_internal::c_end(c2), result,
+ std::forward<LessThan>(comp));
+}
+
+// c_inplace_merge()
+//
+// Container-based version of the <algorithm> `std::inplace_merge()` function
+// to merge a supplied iterator `middle` into a container.
+template <typename C>
+void c_inplace_merge(C& c,
+ container_algorithm_internal::ContainerIter<C> middle) {
+ std::inplace_merge(container_algorithm_internal::c_begin(c), middle,
+ container_algorithm_internal::c_end(c));
+}
+
+// Overload of c_inplace_merge() for performing a merge using a `comp` other
+// than `operator<`.
+template <typename C, typename LessThan>
+void c_inplace_merge(C& c,
+ container_algorithm_internal::ContainerIter<C> middle,
+ LessThan&& comp) {
+ std::inplace_merge(container_algorithm_internal::c_begin(c), middle,
+ container_algorithm_internal::c_end(c),
+ std::forward<LessThan>(comp));
+}
+
+// c_includes()
+//
+// Container-based version of the <algorithm> `std::includes()` function
+// to test whether a sorted container `c1` entirely contains another sorted
+// container `c2`.
+template <typename C1, typename C2>
+bool c_includes(const C1& c1, const C2& c2) {
+ return std::includes(container_algorithm_internal::c_begin(c1),
+ container_algorithm_internal::c_end(c1),
+ container_algorithm_internal::c_begin(c2),
+ container_algorithm_internal::c_end(c2));
+}
+
+// Overload of c_includes() for performing a merge using a `comp` other than
+// `operator<`.
+template <typename C1, typename C2, typename LessThan>
+bool c_includes(const C1& c1, const C2& c2, LessThan&& comp) {
+ return std::includes(container_algorithm_internal::c_begin(c1),
+ container_algorithm_internal::c_end(c1),
+ container_algorithm_internal::c_begin(c2),
+ container_algorithm_internal::c_end(c2),
+ std::forward<LessThan>(comp));
+}
+
+// c_set_union()
+//
+// Container-based version of the <algorithm> `std::set_union()` function
+// to return an iterator containing the union of two containers; duplicate
+// values are not copied into the output.
+template <typename C1, typename C2, typename OutputIterator,
+ typename = typename std::enable_if<
+ !container_algorithm_internal::IsUnorderedContainer<C1>::value,
+ void>::type,
+ typename = typename std::enable_if<
+ !container_algorithm_internal::IsUnorderedContainer<C2>::value,
+ void>::type>
+OutputIterator c_set_union(const C1& c1, const C2& c2, OutputIterator output) {
+ return std::set_union(container_algorithm_internal::c_begin(c1),
+ container_algorithm_internal::c_end(c1),
+ container_algorithm_internal::c_begin(c2),
+ container_algorithm_internal::c_end(c2), output);
+}
+
+// Overload of c_set_union() for performing a merge using a `comp` other than
+// `operator<`.
+template <typename C1, typename C2, typename OutputIterator, typename LessThan,
+ typename = typename std::enable_if<
+ !container_algorithm_internal::IsUnorderedContainer<C1>::value,
+ void>::type,
+ typename = typename std::enable_if<
+ !container_algorithm_internal::IsUnorderedContainer<C2>::value,
+ void>::type>
+OutputIterator c_set_union(const C1& c1, const C2& c2, OutputIterator output,
+ LessThan&& comp) {
+ return std::set_union(container_algorithm_internal::c_begin(c1),
+ container_algorithm_internal::c_end(c1),
+ container_algorithm_internal::c_begin(c2),
+ container_algorithm_internal::c_end(c2), output,
+ std::forward<LessThan>(comp));
+}
+
+// c_set_intersection()
+//
+// Container-based version of the <algorithm> `std::set_intersection()` function
+// to return an iterator containing the intersection of two sorted containers.
+template <typename C1, typename C2, typename OutputIterator,
+ typename = typename std::enable_if<
+ !container_algorithm_internal::IsUnorderedContainer<C1>::value,
+ void>::type,
+ typename = typename std::enable_if<
+ !container_algorithm_internal::IsUnorderedContainer<C2>::value,
+ void>::type>
+OutputIterator c_set_intersection(const C1& c1, const C2& c2,
+ OutputIterator output) {
+ // In debug builds, ensure that both containers are sorted with respect to the
+ // default comparator. std::set_intersection requires the containers be sorted
+ // using operator<.
+ assert(y_absl::c_is_sorted(c1));
+ assert(y_absl::c_is_sorted(c2));
+ return std::set_intersection(container_algorithm_internal::c_begin(c1),
+ container_algorithm_internal::c_end(c1),
+ container_algorithm_internal::c_begin(c2),
+ container_algorithm_internal::c_end(c2), output);
+}
+
+// Overload of c_set_intersection() for performing a merge using a `comp` other
+// than `operator<`.
+template <typename C1, typename C2, typename OutputIterator, typename LessThan,
+ typename = typename std::enable_if<
+ !container_algorithm_internal::IsUnorderedContainer<C1>::value,
+ void>::type,
+ typename = typename std::enable_if<
+ !container_algorithm_internal::IsUnorderedContainer<C2>::value,
+ void>::type>
+OutputIterator c_set_intersection(const C1& c1, const C2& c2,
+ OutputIterator output, LessThan&& comp) {
+ // In debug builds, ensure that both containers are sorted with respect to the
+ // default comparator. std::set_intersection requires the containers be sorted
+ // using the same comparator.
+ assert(y_absl::c_is_sorted(c1, comp));
+ assert(y_absl::c_is_sorted(c2, comp));
+ return std::set_intersection(container_algorithm_internal::c_begin(c1),
+ container_algorithm_internal::c_end(c1),
+ container_algorithm_internal::c_begin(c2),
+ container_algorithm_internal::c_end(c2), output,
+ std::forward<LessThan>(comp));
+}
+
+// c_set_difference()
+//
+// Container-based version of the <algorithm> `std::set_difference()` function
+// to return an iterator containing elements present in the first container but
+// not in the second.
+template <typename C1, typename C2, typename OutputIterator,
+ typename = typename std::enable_if<
+ !container_algorithm_internal::IsUnorderedContainer<C1>::value,
+ void>::type,
+ typename = typename std::enable_if<
+ !container_algorithm_internal::IsUnorderedContainer<C2>::value,
+ void>::type>
+OutputIterator c_set_difference(const C1& c1, const C2& c2,
+ OutputIterator output) {
+ return std::set_difference(container_algorithm_internal::c_begin(c1),
+ container_algorithm_internal::c_end(c1),
+ container_algorithm_internal::c_begin(c2),
+ container_algorithm_internal::c_end(c2), output);
+}
+
+// Overload of c_set_difference() for performing a merge using a `comp` other
+// than `operator<`.
+template <typename C1, typename C2, typename OutputIterator, typename LessThan,
+ typename = typename std::enable_if<
+ !container_algorithm_internal::IsUnorderedContainer<C1>::value,
+ void>::type,
+ typename = typename std::enable_if<
+ !container_algorithm_internal::IsUnorderedContainer<C2>::value,
+ void>::type>
+OutputIterator c_set_difference(const C1& c1, const C2& c2,
+ OutputIterator output, LessThan&& comp) {
+ return std::set_difference(container_algorithm_internal::c_begin(c1),
+ container_algorithm_internal::c_end(c1),
+ container_algorithm_internal::c_begin(c2),
+ container_algorithm_internal::c_end(c2), output,
+ std::forward<LessThan>(comp));
+}
+
+// c_set_symmetric_difference()
+//
+// Container-based version of the <algorithm> `std::set_symmetric_difference()`
+// function to return an iterator containing elements present in either one
+// container or the other, but not both.
+template <typename C1, typename C2, typename OutputIterator,
+ typename = typename std::enable_if<
+ !container_algorithm_internal::IsUnorderedContainer<C1>::value,
+ void>::type,
+ typename = typename std::enable_if<
+ !container_algorithm_internal::IsUnorderedContainer<C2>::value,
+ void>::type>
+OutputIterator c_set_symmetric_difference(const C1& c1, const C2& c2,
+ OutputIterator output) {
+ return std::set_symmetric_difference(
+ container_algorithm_internal::c_begin(c1),
+ container_algorithm_internal::c_end(c1),
+ container_algorithm_internal::c_begin(c2),
+ container_algorithm_internal::c_end(c2), output);
+}
+
+// Overload of c_set_symmetric_difference() for performing a merge using a
+// `comp` other than `operator<`.
+template <typename C1, typename C2, typename OutputIterator, typename LessThan,
+ typename = typename std::enable_if<
+ !container_algorithm_internal::IsUnorderedContainer<C1>::value,
+ void>::type,
+ typename = typename std::enable_if<
+ !container_algorithm_internal::IsUnorderedContainer<C2>::value,
+ void>::type>
+OutputIterator c_set_symmetric_difference(const C1& c1, const C2& c2,
+ OutputIterator output,
+ LessThan&& comp) {
+ return std::set_symmetric_difference(
+ container_algorithm_internal::c_begin(c1),
+ container_algorithm_internal::c_end(c1),
+ container_algorithm_internal::c_begin(c2),
+ container_algorithm_internal::c_end(c2), output,
+ std::forward<LessThan>(comp));
+}
+
+//------------------------------------------------------------------------------
+// <algorithm> Heap functions
+//------------------------------------------------------------------------------
+
+// c_push_heap()
+//
+// Container-based version of the <algorithm> `std::push_heap()` function
+// to push a value onto a container heap.
+template <typename RandomAccessContainer>
+void c_push_heap(RandomAccessContainer& sequence) {
+ std::push_heap(container_algorithm_internal::c_begin(sequence),
+ container_algorithm_internal::c_end(sequence));
+}
+
+// Overload of c_push_heap() for performing a push operation on a heap using a
+// `comp` other than `operator<`.
+template <typename RandomAccessContainer, typename LessThan>
+void c_push_heap(RandomAccessContainer& sequence, LessThan&& comp) {
+ std::push_heap(container_algorithm_internal::c_begin(sequence),
+ container_algorithm_internal::c_end(sequence),
+ std::forward<LessThan>(comp));
+}
+
+// c_pop_heap()
+//
+// Container-based version of the <algorithm> `std::pop_heap()` function
+// to pop a value from a heap container.
+template <typename RandomAccessContainer>
+void c_pop_heap(RandomAccessContainer& sequence) {
+ std::pop_heap(container_algorithm_internal::c_begin(sequence),
+ container_algorithm_internal::c_end(sequence));
+}
+
+// Overload of c_pop_heap() for performing a pop operation on a heap using a
+// `comp` other than `operator<`.
+template <typename RandomAccessContainer, typename LessThan>
+void c_pop_heap(RandomAccessContainer& sequence, LessThan&& comp) {
+ std::pop_heap(container_algorithm_internal::c_begin(sequence),
+ container_algorithm_internal::c_end(sequence),
+ std::forward<LessThan>(comp));
+}
+
+// c_make_heap()
+//
+// Container-based version of the <algorithm> `std::make_heap()` function
+// to make a container a heap.
+template <typename RandomAccessContainer>
+void c_make_heap(RandomAccessContainer& sequence) {
+ std::make_heap(container_algorithm_internal::c_begin(sequence),
+ container_algorithm_internal::c_end(sequence));
+}
+
+// Overload of c_make_heap() for performing heap comparisons using a
+// `comp` other than `operator<`
+template <typename RandomAccessContainer, typename LessThan>
+void c_make_heap(RandomAccessContainer& sequence, LessThan&& comp) {
+ std::make_heap(container_algorithm_internal::c_begin(sequence),
+ container_algorithm_internal::c_end(sequence),
+ std::forward<LessThan>(comp));
+}
+
+// c_sort_heap()
+//
+// Container-based version of the <algorithm> `std::sort_heap()` function
+// to sort a heap into ascending order (after which it is no longer a heap).
+template <typename RandomAccessContainer>
+void c_sort_heap(RandomAccessContainer& sequence) {
+ std::sort_heap(container_algorithm_internal::c_begin(sequence),
+ container_algorithm_internal::c_end(sequence));
+}
+
+// Overload of c_sort_heap() for performing heap comparisons using a
+// `comp` other than `operator<`
+template <typename RandomAccessContainer, typename LessThan>
+void c_sort_heap(RandomAccessContainer& sequence, LessThan&& comp) {
+ std::sort_heap(container_algorithm_internal::c_begin(sequence),
+ container_algorithm_internal::c_end(sequence),
+ std::forward<LessThan>(comp));
+}
+
+// c_is_heap()
+//
+// Container-based version of the <algorithm> `std::is_heap()` function
+// to check whether the given container is a heap.
+template <typename RandomAccessContainer>
+bool c_is_heap(const RandomAccessContainer& sequence) {
+ return std::is_heap(container_algorithm_internal::c_begin(sequence),
+ container_algorithm_internal::c_end(sequence));
+}
+
+// Overload of c_is_heap() for performing heap comparisons using a
+// `comp` other than `operator<`
+template <typename RandomAccessContainer, typename LessThan>
+bool c_is_heap(const RandomAccessContainer& sequence, LessThan&& comp) {
+ return std::is_heap(container_algorithm_internal::c_begin(sequence),
+ container_algorithm_internal::c_end(sequence),
+ std::forward<LessThan>(comp));
+}
+
+// c_is_heap_until()
+//
+// Container-based version of the <algorithm> `std::is_heap_until()` function
+// to find the first element in a given container which is not in heap order.
+template <typename RandomAccessContainer>
+container_algorithm_internal::ContainerIter<RandomAccessContainer>
+c_is_heap_until(RandomAccessContainer& sequence) {
+ return std::is_heap_until(container_algorithm_internal::c_begin(sequence),
+ container_algorithm_internal::c_end(sequence));
+}
+
+// Overload of c_is_heap_until() for performing heap comparisons using a
+// `comp` other than `operator<`
+template <typename RandomAccessContainer, typename LessThan>
+container_algorithm_internal::ContainerIter<RandomAccessContainer>
+c_is_heap_until(RandomAccessContainer& sequence, LessThan&& comp) {
+ return std::is_heap_until(container_algorithm_internal::c_begin(sequence),
+ container_algorithm_internal::c_end(sequence),
+ std::forward<LessThan>(comp));
+}
+
+//------------------------------------------------------------------------------
+// <algorithm> Min/max
+//------------------------------------------------------------------------------
+
+// c_min_element()
+//
+// Container-based version of the <algorithm> `std::min_element()` function
+// to return an iterator pointing to the element with the smallest value, using
+// `operator<` to make the comparisons.
+template <typename Sequence>
+container_algorithm_internal::ContainerIter<Sequence> c_min_element(
+ Sequence& sequence) {
+ return std::min_element(container_algorithm_internal::c_begin(sequence),
+ container_algorithm_internal::c_end(sequence));
+}
+
+// Overload of c_min_element() for performing a `comp` comparison other than
+// `operator<`.
+template <typename Sequence, typename LessThan>
+container_algorithm_internal::ContainerIter<Sequence> c_min_element(
+ Sequence& sequence, LessThan&& comp) {
+ return std::min_element(container_algorithm_internal::c_begin(sequence),
+ container_algorithm_internal::c_end(sequence),
+ std::forward<LessThan>(comp));
+}
+
+// c_max_element()
+//
+// Container-based version of the <algorithm> `std::max_element()` function
+// to return an iterator pointing to the element with the largest value, using
+// `operator<` to make the comparisons.
+template <typename Sequence>
+container_algorithm_internal::ContainerIter<Sequence> c_max_element(
+ Sequence& sequence) {
+ return std::max_element(container_algorithm_internal::c_begin(sequence),
+ container_algorithm_internal::c_end(sequence));
+}
+
+// Overload of c_max_element() for performing a `comp` comparison other than
+// `operator<`.
+template <typename Sequence, typename LessThan>
+container_algorithm_internal::ContainerIter<Sequence> c_max_element(
+ Sequence& sequence, LessThan&& comp) {
+ return std::max_element(container_algorithm_internal::c_begin(sequence),
+ container_algorithm_internal::c_end(sequence),
+ std::forward<LessThan>(comp));
+}
+
+// c_minmax_element()
+//
+// Container-based version of the <algorithm> `std::minmax_element()` function
+// to return a pair of iterators pointing to the elements containing the
+// smallest and largest values, respectively, using `operator<` to make the
+// comparisons.
+template <typename C>
+container_algorithm_internal::ContainerIterPairType<C, C>
+c_minmax_element(C& c) {
+ return std::minmax_element(container_algorithm_internal::c_begin(c),
+ container_algorithm_internal::c_end(c));
+}
+
+// Overload of c_minmax_element() for performing `comp` comparisons other than
+// `operator<`.
+template <typename C, typename LessThan>
+container_algorithm_internal::ContainerIterPairType<C, C>
+c_minmax_element(C& c, LessThan&& comp) {
+ return std::minmax_element(container_algorithm_internal::c_begin(c),
+ container_algorithm_internal::c_end(c),
+ std::forward<LessThan>(comp));
+}
+
+//------------------------------------------------------------------------------
+// <algorithm> Lexicographical Comparisons
+//------------------------------------------------------------------------------
+
+// c_lexicographical_compare()
+//
+// Container-based version of the <algorithm> `std::lexicographical_compare()`
+// function to lexicographically compare (e.g. sort words alphabetically) two
+// container sequences. The comparison is performed using `operator<`. Note
+// that capital letters ("A-Z") have ASCII values less than lowercase letters
+// ("a-z").
+template <typename Sequence1, typename Sequence2>
+bool c_lexicographical_compare(Sequence1&& sequence1, Sequence2&& sequence2) {
+ return std::lexicographical_compare(
+ container_algorithm_internal::c_begin(sequence1),
+ container_algorithm_internal::c_end(sequence1),
+ container_algorithm_internal::c_begin(sequence2),
+ container_algorithm_internal::c_end(sequence2));
+}
+
+// Overload of c_lexicographical_compare() for performing a lexicographical
+// comparison using a `comp` operator instead of `operator<`.
+template <typename Sequence1, typename Sequence2, typename LessThan>
+bool c_lexicographical_compare(Sequence1&& sequence1, Sequence2&& sequence2,
+ LessThan&& comp) {
+ return std::lexicographical_compare(
+ container_algorithm_internal::c_begin(sequence1),
+ container_algorithm_internal::c_end(sequence1),
+ container_algorithm_internal::c_begin(sequence2),
+ container_algorithm_internal::c_end(sequence2),
+ std::forward<LessThan>(comp));
+}
+
+// c_next_permutation()
+//
+// Container-based version of the <algorithm> `std::next_permutation()` function
+// to rearrange a container's elements into the next lexicographically greater
+// permutation.
+template <typename C>
+bool c_next_permutation(C& c) {
+ return std::next_permutation(container_algorithm_internal::c_begin(c),
+ container_algorithm_internal::c_end(c));
+}
+
+// Overload of c_next_permutation() for performing a lexicographical
+// comparison using a `comp` operator instead of `operator<`.
+template <typename C, typename LessThan>
+bool c_next_permutation(C& c, LessThan&& comp) {
+ return std::next_permutation(container_algorithm_internal::c_begin(c),
+ container_algorithm_internal::c_end(c),
+ std::forward<LessThan>(comp));
+}
+
+// c_prev_permutation()
+//
+// Container-based version of the <algorithm> `std::prev_permutation()` function
+// to rearrange a container's elements into the next lexicographically lesser
+// permutation.
+template <typename C>
+bool c_prev_permutation(C& c) {
+ return std::prev_permutation(container_algorithm_internal::c_begin(c),
+ container_algorithm_internal::c_end(c));
+}
+
+// Overload of c_prev_permutation() for performing a lexicographical
+// comparison using a `comp` operator instead of `operator<`.
+template <typename C, typename LessThan>
+bool c_prev_permutation(C& c, LessThan&& comp) {
+ return std::prev_permutation(container_algorithm_internal::c_begin(c),
+ container_algorithm_internal::c_end(c),
+ std::forward<LessThan>(comp));
+}
+
+//------------------------------------------------------------------------------
+// <numeric> algorithms
+//------------------------------------------------------------------------------
+
+// c_iota()
+//
+// Container-based version of the <algorithm> `std::iota()` function
+// to compute successive values of `value`, as if incremented with `++value`
+// after each element is written. and write them to the container.
+template <typename Sequence, typename T>
+void c_iota(Sequence& sequence, T&& value) {
+ std::iota(container_algorithm_internal::c_begin(sequence),
+ container_algorithm_internal::c_end(sequence),
+ std::forward<T>(value));
+}
+// c_accumulate()
+//
+// Container-based version of the <algorithm> `std::accumulate()` function
+// to accumulate the element values of a container to `init` and return that
+// accumulation by value.
+//
+// Note: Due to a language technicality this function has return type
+// y_absl::decay_t<T>. As a user of this function you can casually read
+// this as "returns T by value" and assume it does the right thing.
+template <typename Sequence, typename T>
+decay_t<T> c_accumulate(const Sequence& sequence, T&& init) {
+ return std::accumulate(container_algorithm_internal::c_begin(sequence),
+ container_algorithm_internal::c_end(sequence),
+ std::forward<T>(init));
+}
+
+// Overload of c_accumulate() for using a binary operations other than
+// addition for computing the accumulation.
+template <typename Sequence, typename T, typename BinaryOp>
+decay_t<T> c_accumulate(const Sequence& sequence, T&& init,
+ BinaryOp&& binary_op) {
+ return std::accumulate(container_algorithm_internal::c_begin(sequence),
+ container_algorithm_internal::c_end(sequence),
+ std::forward<T>(init),
+ std::forward<BinaryOp>(binary_op));
+}
+
+// c_inner_product()
+//
+// Container-based version of the <algorithm> `std::inner_product()` function
+// to compute the cumulative inner product of container element pairs.
+//
+// Note: Due to a language technicality this function has return type
+// y_absl::decay_t<T>. As a user of this function you can casually read
+// this as "returns T by value" and assume it does the right thing.
+template <typename Sequence1, typename Sequence2, typename T>
+decay_t<T> c_inner_product(const Sequence1& factors1, const Sequence2& factors2,
+ T&& sum) {
+ return std::inner_product(container_algorithm_internal::c_begin(factors1),
+ container_algorithm_internal::c_end(factors1),
+ container_algorithm_internal::c_begin(factors2),
+ std::forward<T>(sum));
+}
+
+// Overload of c_inner_product() for using binary operations other than
+// `operator+` (for computing the accumulation) and `operator*` (for computing
+// the product between the two container's element pair).
+template <typename Sequence1, typename Sequence2, typename T,
+ typename BinaryOp1, typename BinaryOp2>
+decay_t<T> c_inner_product(const Sequence1& factors1, const Sequence2& factors2,
+ T&& sum, BinaryOp1&& op1, BinaryOp2&& op2) {
+ return std::inner_product(container_algorithm_internal::c_begin(factors1),
+ container_algorithm_internal::c_end(factors1),
+ container_algorithm_internal::c_begin(factors2),
+ std::forward<T>(sum), std::forward<BinaryOp1>(op1),
+ std::forward<BinaryOp2>(op2));
+}
+
+// c_adjacent_difference()
+//
+// Container-based version of the <algorithm> `std::adjacent_difference()`
+// function to compute the difference between each element and the one preceding
+// it and write it to an iterator.
+template <typename InputSequence, typename OutputIt>
+OutputIt c_adjacent_difference(const InputSequence& input,
+ OutputIt output_first) {
+ return std::adjacent_difference(container_algorithm_internal::c_begin(input),
+ container_algorithm_internal::c_end(input),
+ output_first);
+}
+
+// Overload of c_adjacent_difference() for using a binary operation other than
+// subtraction to compute the adjacent difference.
+template <typename InputSequence, typename OutputIt, typename BinaryOp>
+OutputIt c_adjacent_difference(const InputSequence& input,
+ OutputIt output_first, BinaryOp&& op) {
+ return std::adjacent_difference(container_algorithm_internal::c_begin(input),
+ container_algorithm_internal::c_end(input),
+ output_first, std::forward<BinaryOp>(op));
+}
+
+// c_partial_sum()
+//
+// Container-based version of the <algorithm> `std::partial_sum()` function
+// to compute the partial sum of the elements in a sequence and write them
+// to an iterator. The partial sum is the sum of all element values so far in
+// the sequence.
+template <typename InputSequence, typename OutputIt>
+OutputIt c_partial_sum(const InputSequence& input, OutputIt output_first) {
+ return std::partial_sum(container_algorithm_internal::c_begin(input),
+ container_algorithm_internal::c_end(input),
+ output_first);
+}
+
+// Overload of c_partial_sum() for using a binary operation other than addition
+// to compute the "partial sum".
+template <typename InputSequence, typename OutputIt, typename BinaryOp>
+OutputIt c_partial_sum(const InputSequence& input, OutputIt output_first,
+ BinaryOp&& op) {
+ return std::partial_sum(container_algorithm_internal::c_begin(input),
+ container_algorithm_internal::c_end(input),
+ output_first, std::forward<BinaryOp>(op));
+}
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_ALGORITHM_CONTAINER_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/algorithm/ya.make b/contrib/restricted/abseil-cpp-tstring/y_absl/algorithm/ya.make
new file mode 100644
index 00000000000..b5ead458565
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/algorithm/ya.make
@@ -0,0 +1,14 @@
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
+OWNER(
+ somov
+ g:cpp-contrib
+)
+
+LICENSE(Apache-2.0)
+
+LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
+
+END()
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/attributes.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/attributes.h
new file mode 100644
index 00000000000..8321acda516
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/attributes.h
@@ -0,0 +1,735 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// This header file defines macros for declaring attributes for functions,
+// types, and variables.
+//
+// These macros are used within Abseil and allow the compiler to optimize, where
+// applicable, certain function calls.
+//
+// Most macros here are exposing GCC or Clang features, and are stubbed out for
+// other compilers.
+//
+// GCC attributes documentation:
+// https://gcc.gnu.org/onlinedocs/gcc-4.7.0/gcc/Function-Attributes.html
+// https://gcc.gnu.org/onlinedocs/gcc-4.7.0/gcc/Variable-Attributes.html
+// https://gcc.gnu.org/onlinedocs/gcc-4.7.0/gcc/Type-Attributes.html
+//
+// Most attributes in this file are already supported by GCC 4.7. However, some
+// of them are not supported in older version of Clang. Thus, we check
+// `__has_attribute()` first. If the check fails, we check if we are on GCC and
+// assume the attribute exists on GCC (which is verified on GCC 4.7).
+
+#ifndef ABSL_BASE_ATTRIBUTES_H_
+#define ABSL_BASE_ATTRIBUTES_H_
+
+#include "y_absl/base/config.h"
+
+// ABSL_HAVE_ATTRIBUTE
+//
+// A function-like feature checking macro that is a wrapper around
+// `__has_attribute`, which is defined by GCC 5+ and Clang and evaluates to a
+// nonzero constant integer if the attribute is supported or 0 if not.
+//
+// It evaluates to zero if `__has_attribute` is not defined by the compiler.
+//
+// GCC: https://gcc.gnu.org/gcc-5/changes.html
+// Clang: https://clang.llvm.org/docs/LanguageExtensions.html
+#ifdef __has_attribute
+#define ABSL_HAVE_ATTRIBUTE(x) __has_attribute(x)
+#else
+#define ABSL_HAVE_ATTRIBUTE(x) 0
+#endif
+
+// ABSL_HAVE_CPP_ATTRIBUTE
+//
+// A function-like feature checking macro that accepts C++11 style attributes.
+// It's a wrapper around `__has_cpp_attribute`, defined by ISO C++ SD-6
+// (https://en.cppreference.com/w/cpp/experimental/feature_test). If we don't
+// find `__has_cpp_attribute`, will evaluate to 0.
+#if defined(__cplusplus) && defined(__has_cpp_attribute)
+// NOTE: requiring __cplusplus above should not be necessary, but
+// works around https://bugs.llvm.org/show_bug.cgi?id=23435.
+#define ABSL_HAVE_CPP_ATTRIBUTE(x) __has_cpp_attribute(x)
+#else
+#define ABSL_HAVE_CPP_ATTRIBUTE(x) 0
+#endif
+
+// -----------------------------------------------------------------------------
+// Function Attributes
+// -----------------------------------------------------------------------------
+//
+// GCC: https://gcc.gnu.org/onlinedocs/gcc/Function-Attributes.html
+// Clang: https://clang.llvm.org/docs/AttributeReference.html
+
+// ABSL_PRINTF_ATTRIBUTE
+// ABSL_SCANF_ATTRIBUTE
+//
+// Tells the compiler to perform `printf` format string checking if the
+// compiler supports it; see the 'format' attribute in
+// <https://gcc.gnu.org/onlinedocs/gcc-4.7.0/gcc/Function-Attributes.html>.
+//
+// Note: As the GCC manual states, "[s]ince non-static C++ methods
+// have an implicit 'this' argument, the arguments of such methods
+// should be counted from two, not one."
+#if ABSL_HAVE_ATTRIBUTE(format) || (defined(__GNUC__) && !defined(__clang__))
+#define ABSL_PRINTF_ATTRIBUTE(string_index, first_to_check) \
+ __attribute__((__format__(__printf__, string_index, first_to_check)))
+#define ABSL_SCANF_ATTRIBUTE(string_index, first_to_check) \
+ __attribute__((__format__(__scanf__, string_index, first_to_check)))
+#else
+#define ABSL_PRINTF_ATTRIBUTE(string_index, first_to_check)
+#define ABSL_SCANF_ATTRIBUTE(string_index, first_to_check)
+#endif
+
+// ABSL_ATTRIBUTE_ALWAYS_INLINE
+// ABSL_ATTRIBUTE_NOINLINE
+//
+// Forces functions to either inline or not inline. Introduced in gcc 3.1.
+#if ABSL_HAVE_ATTRIBUTE(always_inline) || \
+ (defined(__GNUC__) && !defined(__clang__))
+#define ABSL_ATTRIBUTE_ALWAYS_INLINE __attribute__((always_inline))
+#define ABSL_HAVE_ATTRIBUTE_ALWAYS_INLINE 1
+#else
+#define ABSL_ATTRIBUTE_ALWAYS_INLINE
+#endif
+
+#if ABSL_HAVE_ATTRIBUTE(noinline) || (defined(__GNUC__) && !defined(__clang__))
+#define ABSL_ATTRIBUTE_NOINLINE __attribute__((noinline))
+#define ABSL_HAVE_ATTRIBUTE_NOINLINE 1
+#else
+#define ABSL_ATTRIBUTE_NOINLINE
+#endif
+
+// ABSL_ATTRIBUTE_NO_TAIL_CALL
+//
+// Prevents the compiler from optimizing away stack frames for functions which
+// end in a call to another function.
+#if ABSL_HAVE_ATTRIBUTE(disable_tail_calls)
+#define ABSL_HAVE_ATTRIBUTE_NO_TAIL_CALL 1
+#define ABSL_ATTRIBUTE_NO_TAIL_CALL __attribute__((disable_tail_calls))
+#elif defined(__GNUC__) && !defined(__clang__) && !defined(__e2k__)
+#define ABSL_HAVE_ATTRIBUTE_NO_TAIL_CALL 1
+#define ABSL_ATTRIBUTE_NO_TAIL_CALL \
+ __attribute__((optimize("no-optimize-sibling-calls")))
+#else
+#define ABSL_ATTRIBUTE_NO_TAIL_CALL
+#define ABSL_HAVE_ATTRIBUTE_NO_TAIL_CALL 0
+#endif
+
+// ABSL_ATTRIBUTE_WEAK
+//
+// Tags a function as weak for the purposes of compilation and linking.
+// Weak attributes did not work properly in LLVM's Windows backend before
+// 9.0.0, so disable them there. See https://bugs.llvm.org/show_bug.cgi?id=37598
+// for further information.
+// The MinGW compiler doesn't complain about the weak attribute until the link
+// step, presumably because Windows doesn't use ELF binaries.
+#if (ABSL_HAVE_ATTRIBUTE(weak) || \
+ (defined(__GNUC__) && !defined(__clang__))) && \
+ (!defined(_WIN32) || __clang_major__ < 9) && !defined(__MINGW32__)
+#undef ABSL_ATTRIBUTE_WEAK
+#define ABSL_ATTRIBUTE_WEAK __attribute__((weak))
+#define ABSL_HAVE_ATTRIBUTE_WEAK 1
+#else
+#define ABSL_ATTRIBUTE_WEAK
+#define ABSL_HAVE_ATTRIBUTE_WEAK 0
+#endif
+
+// ABSL_ATTRIBUTE_NONNULL
+//
+// Tells the compiler either (a) that a particular function parameter
+// should be a non-null pointer, or (b) that all pointer arguments should
+// be non-null.
+//
+// Note: As the GCC manual states, "[s]ince non-static C++ methods
+// have an implicit 'this' argument, the arguments of such methods
+// should be counted from two, not one."
+//
+// Args are indexed starting at 1.
+//
+// For non-static class member functions, the implicit `this` argument
+// is arg 1, and the first explicit argument is arg 2. For static class member
+// functions, there is no implicit `this`, and the first explicit argument is
+// arg 1.
+//
+// Example:
+//
+// /* arg_a cannot be null, but arg_b can */
+// void Function(void* arg_a, void* arg_b) ABSL_ATTRIBUTE_NONNULL(1);
+//
+// class C {
+// /* arg_a cannot be null, but arg_b can */
+// void Method(void* arg_a, void* arg_b) ABSL_ATTRIBUTE_NONNULL(2);
+//
+// /* arg_a cannot be null, but arg_b can */
+// static void StaticMethod(void* arg_a, void* arg_b)
+// ABSL_ATTRIBUTE_NONNULL(1);
+// };
+//
+// If no arguments are provided, then all pointer arguments should be non-null.
+//
+// /* No pointer arguments may be null. */
+// void Function(void* arg_a, void* arg_b, int arg_c) ABSL_ATTRIBUTE_NONNULL();
+//
+// NOTE: The GCC nonnull attribute actually accepts a list of arguments, but
+// ABSL_ATTRIBUTE_NONNULL does not.
+#if ABSL_HAVE_ATTRIBUTE(nonnull) || (defined(__GNUC__) && !defined(__clang__))
+#define ABSL_ATTRIBUTE_NONNULL(arg_index) __attribute__((nonnull(arg_index)))
+#else
+#define ABSL_ATTRIBUTE_NONNULL(...)
+#endif
+
+// ABSL_ATTRIBUTE_NORETURN
+//
+// Tells the compiler that a given function never returns.
+#if ABSL_HAVE_ATTRIBUTE(noreturn) || (defined(__GNUC__) && !defined(__clang__))
+#define ABSL_ATTRIBUTE_NORETURN __attribute__((noreturn))
+#elif defined(_MSC_VER)
+#define ABSL_ATTRIBUTE_NORETURN __declspec(noreturn)
+#else
+#define ABSL_ATTRIBUTE_NORETURN
+#endif
+
+// ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS
+//
+// Tells the AddressSanitizer (or other memory testing tools) to ignore a given
+// function. Useful for cases when a function reads random locations on stack,
+// calls _exit from a cloned subprocess, deliberately accesses buffer
+// out of bounds or does other scary things with memory.
+// NOTE: GCC supports AddressSanitizer(asan) since 4.8.
+// https://gcc.gnu.org/gcc-4.8/changes.html
+#if ABSL_HAVE_ATTRIBUTE(no_sanitize_address)
+#define ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS __attribute__((no_sanitize_address))
+#else
+#define ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS
+#endif
+
+// ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY
+//
+// Tells the MemorySanitizer to relax the handling of a given function. All "Use
+// of uninitialized value" warnings from such functions will be suppressed, and
+// all values loaded from memory will be considered fully initialized. This
+// attribute is similar to the ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS attribute
+// above, but deals with initialized-ness rather than addressability issues.
+// NOTE: MemorySanitizer(msan) is supported by Clang but not GCC.
+#if ABSL_HAVE_ATTRIBUTE(no_sanitize_memory)
+#define ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY __attribute__((no_sanitize_memory))
+#else
+#define ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY
+#endif
+
+// ABSL_ATTRIBUTE_NO_SANITIZE_THREAD
+//
+// Tells the ThreadSanitizer to not instrument a given function.
+// NOTE: GCC supports ThreadSanitizer(tsan) since 4.8.
+// https://gcc.gnu.org/gcc-4.8/changes.html
+#if ABSL_HAVE_ATTRIBUTE(no_sanitize_thread)
+#define ABSL_ATTRIBUTE_NO_SANITIZE_THREAD __attribute__((no_sanitize_thread))
+#else
+#define ABSL_ATTRIBUTE_NO_SANITIZE_THREAD
+#endif
+
+// ABSL_ATTRIBUTE_NO_SANITIZE_UNDEFINED
+//
+// Tells the UndefinedSanitizer to ignore a given function. Useful for cases
+// where certain behavior (eg. division by zero) is being used intentionally.
+// NOTE: GCC supports UndefinedBehaviorSanitizer(ubsan) since 4.9.
+// https://gcc.gnu.org/gcc-4.9/changes.html
+#if ABSL_HAVE_ATTRIBUTE(no_sanitize_undefined)
+#define ABSL_ATTRIBUTE_NO_SANITIZE_UNDEFINED \
+ __attribute__((no_sanitize_undefined))
+#elif ABSL_HAVE_ATTRIBUTE(no_sanitize)
+#define ABSL_ATTRIBUTE_NO_SANITIZE_UNDEFINED \
+ __attribute__((no_sanitize("undefined")))
+#else
+#define ABSL_ATTRIBUTE_NO_SANITIZE_UNDEFINED
+#endif
+
+// ABSL_ATTRIBUTE_NO_SANITIZE_CFI
+//
+// Tells the ControlFlowIntegrity sanitizer to not instrument a given function.
+// See https://clang.llvm.org/docs/ControlFlowIntegrity.html for details.
+#if ABSL_HAVE_ATTRIBUTE(no_sanitize)
+#define ABSL_ATTRIBUTE_NO_SANITIZE_CFI __attribute__((no_sanitize("cfi")))
+#else
+#define ABSL_ATTRIBUTE_NO_SANITIZE_CFI
+#endif
+
+// ABSL_ATTRIBUTE_NO_SANITIZE_SAFESTACK
+//
+// Tells the SafeStack to not instrument a given function.
+// See https://clang.llvm.org/docs/SafeStack.html for details.
+#if ABSL_HAVE_ATTRIBUTE(no_sanitize)
+#define ABSL_ATTRIBUTE_NO_SANITIZE_SAFESTACK \
+ __attribute__((no_sanitize("safe-stack")))
+#else
+#define ABSL_ATTRIBUTE_NO_SANITIZE_SAFESTACK
+#endif
+
+// ABSL_ATTRIBUTE_RETURNS_NONNULL
+//
+// Tells the compiler that a particular function never returns a null pointer.
+#if ABSL_HAVE_ATTRIBUTE(returns_nonnull)
+#define ABSL_ATTRIBUTE_RETURNS_NONNULL __attribute__((returns_nonnull))
+#else
+#define ABSL_ATTRIBUTE_RETURNS_NONNULL
+#endif
+
+// ABSL_HAVE_ATTRIBUTE_SECTION
+//
+// Indicates whether labeled sections are supported. Weak symbol support is
+// a prerequisite. Labeled sections are not supported on Darwin/iOS.
+#ifdef ABSL_HAVE_ATTRIBUTE_SECTION
+#error ABSL_HAVE_ATTRIBUTE_SECTION cannot be directly set
+#elif (ABSL_HAVE_ATTRIBUTE(section) || \
+ (defined(__GNUC__) && !defined(__clang__))) && \
+ !defined(__APPLE__) && ABSL_HAVE_ATTRIBUTE_WEAK
+#define ABSL_HAVE_ATTRIBUTE_SECTION 1
+
+// ABSL_ATTRIBUTE_SECTION
+//
+// Tells the compiler/linker to put a given function into a section and define
+// `__start_ ## name` and `__stop_ ## name` symbols to bracket the section.
+// This functionality is supported by GNU linker. Any function annotated with
+// `ABSL_ATTRIBUTE_SECTION` must not be inlined, or it will be placed into
+// whatever section its caller is placed into.
+//
+#ifndef ABSL_ATTRIBUTE_SECTION
+#define ABSL_ATTRIBUTE_SECTION(name) \
+ __attribute__((section(#name))) __attribute__((noinline))
+#endif
+
+
+// ABSL_ATTRIBUTE_SECTION_VARIABLE
+//
+// Tells the compiler/linker to put a given variable into a section and define
+// `__start_ ## name` and `__stop_ ## name` symbols to bracket the section.
+// This functionality is supported by GNU linker.
+#ifndef ABSL_ATTRIBUTE_SECTION_VARIABLE
+#ifdef _AIX
+// __attribute__((section(#name))) on AIX is achived by using the `.csect` psudo
+// op which includes an additional integer as part of its syntax indcating
+// alignment. If data fall under different alignments then you might get a
+// compilation error indicating a `Section type conflict`.
+#define ABSL_ATTRIBUTE_SECTION_VARIABLE(name)
+#else
+#define ABSL_ATTRIBUTE_SECTION_VARIABLE(name) __attribute__((section(#name)))
+#endif
+#endif
+
+// ABSL_DECLARE_ATTRIBUTE_SECTION_VARS
+//
+// A weak section declaration to be used as a global declaration
+// for ABSL_ATTRIBUTE_SECTION_START|STOP(name) to compile and link
+// even without functions with ABSL_ATTRIBUTE_SECTION(name).
+// ABSL_DEFINE_ATTRIBUTE_SECTION should be in the exactly one file; it's
+// a no-op on ELF but not on Mach-O.
+//
+#ifndef ABSL_DECLARE_ATTRIBUTE_SECTION_VARS
+#define ABSL_DECLARE_ATTRIBUTE_SECTION_VARS(name) \
+ extern char __start_##name[] ABSL_ATTRIBUTE_WEAK; \
+ extern char __stop_##name[] ABSL_ATTRIBUTE_WEAK
+#endif
+#ifndef ABSL_DEFINE_ATTRIBUTE_SECTION_VARS
+#define ABSL_INIT_ATTRIBUTE_SECTION_VARS(name)
+#define ABSL_DEFINE_ATTRIBUTE_SECTION_VARS(name)
+#endif
+
+// ABSL_ATTRIBUTE_SECTION_START
+//
+// Returns `void*` pointers to start/end of a section of code with
+// functions having ABSL_ATTRIBUTE_SECTION(name).
+// Returns 0 if no such functions exist.
+// One must ABSL_DECLARE_ATTRIBUTE_SECTION_VARS(name) for this to compile and
+// link.
+//
+#define ABSL_ATTRIBUTE_SECTION_START(name) \
+ (reinterpret_cast<void *>(__start_##name))
+#define ABSL_ATTRIBUTE_SECTION_STOP(name) \
+ (reinterpret_cast<void *>(__stop_##name))
+
+#else // !ABSL_HAVE_ATTRIBUTE_SECTION
+
+#define ABSL_HAVE_ATTRIBUTE_SECTION 0
+
+// provide dummy definitions
+#define ABSL_ATTRIBUTE_SECTION(name)
+#define ABSL_ATTRIBUTE_SECTION_VARIABLE(name)
+#define ABSL_INIT_ATTRIBUTE_SECTION_VARS(name)
+#define ABSL_DEFINE_ATTRIBUTE_SECTION_VARS(name)
+#define ABSL_DECLARE_ATTRIBUTE_SECTION_VARS(name)
+#define ABSL_ATTRIBUTE_SECTION_START(name) (reinterpret_cast<void *>(0))
+#define ABSL_ATTRIBUTE_SECTION_STOP(name) (reinterpret_cast<void *>(0))
+
+#endif // ABSL_ATTRIBUTE_SECTION
+
+// ABSL_ATTRIBUTE_STACK_ALIGN_FOR_OLD_LIBC
+//
+// Support for aligning the stack on 32-bit x86.
+#if ABSL_HAVE_ATTRIBUTE(force_align_arg_pointer) || \
+ (defined(__GNUC__) && !defined(__clang__))
+#if defined(__i386__)
+#define ABSL_ATTRIBUTE_STACK_ALIGN_FOR_OLD_LIBC \
+ __attribute__((force_align_arg_pointer))
+#define ABSL_REQUIRE_STACK_ALIGN_TRAMPOLINE (0)
+#elif defined(__x86_64__)
+#define ABSL_REQUIRE_STACK_ALIGN_TRAMPOLINE (1)
+#define ABSL_ATTRIBUTE_STACK_ALIGN_FOR_OLD_LIBC
+#else // !__i386__ && !__x86_64
+#define ABSL_REQUIRE_STACK_ALIGN_TRAMPOLINE (0)
+#define ABSL_ATTRIBUTE_STACK_ALIGN_FOR_OLD_LIBC
+#endif // __i386__
+#else
+#define ABSL_ATTRIBUTE_STACK_ALIGN_FOR_OLD_LIBC
+#define ABSL_REQUIRE_STACK_ALIGN_TRAMPOLINE (0)
+#endif
+
+// ABSL_MUST_USE_RESULT
+//
+// Tells the compiler to warn about unused results.
+//
+// When annotating a function, it must appear as the first part of the
+// declaration or definition. The compiler will warn if the return value from
+// such a function is unused:
+//
+// ABSL_MUST_USE_RESULT Sprocket* AllocateSprocket();
+// AllocateSprocket(); // Triggers a warning.
+//
+// When annotating a class, it is equivalent to annotating every function which
+// returns an instance.
+//
+// class ABSL_MUST_USE_RESULT Sprocket {};
+// Sprocket(); // Triggers a warning.
+//
+// Sprocket MakeSprocket();
+// MakeSprocket(); // Triggers a warning.
+//
+// Note that references and pointers are not instances:
+//
+// Sprocket* SprocketPointer();
+// SprocketPointer(); // Does *not* trigger a warning.
+//
+// ABSL_MUST_USE_RESULT allows using cast-to-void to suppress the unused result
+// warning. For that, warn_unused_result is used only for clang but not for gcc.
+// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=66425
+//
+// Note: past advice was to place the macro after the argument list.
+#if ABSL_HAVE_ATTRIBUTE(nodiscard)
+#define ABSL_MUST_USE_RESULT [[nodiscard]]
+#elif defined(__clang__) && ABSL_HAVE_ATTRIBUTE(warn_unused_result)
+#define ABSL_MUST_USE_RESULT __attribute__((warn_unused_result))
+#else
+#define ABSL_MUST_USE_RESULT
+#endif
+
+// ABSL_ATTRIBUTE_HOT, ABSL_ATTRIBUTE_COLD
+//
+// Tells GCC that a function is hot or cold. GCC can use this information to
+// improve static analysis, i.e. a conditional branch to a cold function
+// is likely to be not-taken.
+// This annotation is used for function declarations.
+//
+// Example:
+//
+// int foo() ABSL_ATTRIBUTE_HOT;
+#if ABSL_HAVE_ATTRIBUTE(hot) || (defined(__GNUC__) && !defined(__clang__))
+#define ABSL_ATTRIBUTE_HOT __attribute__((hot))
+#else
+#define ABSL_ATTRIBUTE_HOT
+#endif
+
+#if ABSL_HAVE_ATTRIBUTE(cold) || (defined(__GNUC__) && !defined(__clang__))
+#define ABSL_ATTRIBUTE_COLD __attribute__((cold))
+#else
+#define ABSL_ATTRIBUTE_COLD
+#endif
+
+// ABSL_XRAY_ALWAYS_INSTRUMENT, ABSL_XRAY_NEVER_INSTRUMENT, ABSL_XRAY_LOG_ARGS
+//
+// We define the ABSL_XRAY_ALWAYS_INSTRUMENT and ABSL_XRAY_NEVER_INSTRUMENT
+// macro used as an attribute to mark functions that must always or never be
+// instrumented by XRay. Currently, this is only supported in Clang/LLVM.
+//
+// For reference on the LLVM XRay instrumentation, see
+// http://llvm.org/docs/XRay.html.
+//
+// A function with the XRAY_ALWAYS_INSTRUMENT macro attribute in its declaration
+// will always get the XRay instrumentation sleds. These sleds may introduce
+// some binary size and runtime overhead and must be used sparingly.
+//
+// These attributes only take effect when the following conditions are met:
+//
+// * The file/target is built in at least C++11 mode, with a Clang compiler
+// that supports XRay attributes.
+// * The file/target is built with the -fxray-instrument flag set for the
+// Clang/LLVM compiler.
+// * The function is defined in the translation unit (the compiler honors the
+// attribute in either the definition or the declaration, and must match).
+//
+// There are cases when, even when building with XRay instrumentation, users
+// might want to control specifically which functions are instrumented for a
+// particular build using special-case lists provided to the compiler. These
+// special case lists are provided to Clang via the
+// -fxray-always-instrument=... and -fxray-never-instrument=... flags. The
+// attributes in source take precedence over these special-case lists.
+//
+// To disable the XRay attributes at build-time, users may define
+// ABSL_NO_XRAY_ATTRIBUTES. Do NOT define ABSL_NO_XRAY_ATTRIBUTES on specific
+// packages/targets, as this may lead to conflicting definitions of functions at
+// link-time.
+//
+// XRay isn't currently supported on Android:
+// https://github.com/android/ndk/issues/368
+#if ABSL_HAVE_CPP_ATTRIBUTE(clang::xray_always_instrument) && \
+ !defined(ABSL_NO_XRAY_ATTRIBUTES) && !defined(__ANDROID__)
+#define ABSL_XRAY_ALWAYS_INSTRUMENT [[clang::xray_always_instrument]]
+#define ABSL_XRAY_NEVER_INSTRUMENT [[clang::xray_never_instrument]]
+#if ABSL_HAVE_CPP_ATTRIBUTE(clang::xray_log_args)
+#define ABSL_XRAY_LOG_ARGS(N) \
+ [[clang::xray_always_instrument, clang::xray_log_args(N)]]
+#else
+#define ABSL_XRAY_LOG_ARGS(N) [[clang::xray_always_instrument]]
+#endif
+#else
+#define ABSL_XRAY_ALWAYS_INSTRUMENT
+#define ABSL_XRAY_NEVER_INSTRUMENT
+#define ABSL_XRAY_LOG_ARGS(N)
+#endif
+
+// ABSL_ATTRIBUTE_REINITIALIZES
+//
+// Indicates that a member function reinitializes the entire object to a known
+// state, independent of the previous state of the object.
+//
+// The clang-tidy check bugprone-use-after-move allows member functions marked
+// with this attribute to be called on objects that have been moved from;
+// without the attribute, this would result in a use-after-move warning.
+#if ABSL_HAVE_CPP_ATTRIBUTE(clang::reinitializes)
+#define ABSL_ATTRIBUTE_REINITIALIZES [[clang::reinitializes]]
+#else
+#define ABSL_ATTRIBUTE_REINITIALIZES
+#endif
+
+// -----------------------------------------------------------------------------
+// Variable Attributes
+// -----------------------------------------------------------------------------
+
+// ABSL_ATTRIBUTE_UNUSED
+//
+// Prevents the compiler from complaining about variables that appear unused.
+//
+// For code or headers that are assured to only build with C++17 and up, prefer
+// just using the standard '[[maybe_unused]]' directly over this macro.
+//
+// Due to differences in positioning requirements between the old, compiler
+// specific __attribute__ syntax and the now standard [[maybe_unused]], this
+// macro does not attempt to take advantage of '[[maybe_unused]]'.
+#if ABSL_HAVE_ATTRIBUTE(unused) || (defined(__GNUC__) && !defined(__clang__))
+#undef ABSL_ATTRIBUTE_UNUSED
+#define ABSL_ATTRIBUTE_UNUSED __attribute__((__unused__))
+#else
+#define ABSL_ATTRIBUTE_UNUSED
+#endif
+
+// ABSL_ATTRIBUTE_INITIAL_EXEC
+//
+// Tells the compiler to use "initial-exec" mode for a thread-local variable.
+// See http://people.redhat.com/drepper/tls.pdf for the gory details.
+#if ABSL_HAVE_ATTRIBUTE(tls_model) || (defined(__GNUC__) && !defined(__clang__))
+#define ABSL_ATTRIBUTE_INITIAL_EXEC __attribute__((tls_model("initial-exec")))
+#else
+#define ABSL_ATTRIBUTE_INITIAL_EXEC
+#endif
+
+// ABSL_ATTRIBUTE_PACKED
+//
+// Instructs the compiler not to use natural alignment for a tagged data
+// structure, but instead to reduce its alignment to 1.
+//
+// Therefore, DO NOT APPLY THIS ATTRIBUTE TO STRUCTS CONTAINING ATOMICS. Doing
+// so can cause atomic variables to be mis-aligned and silently violate
+// atomicity on x86.
+//
+// This attribute can either be applied to members of a structure or to a
+// structure in its entirety. Applying this attribute (judiciously) to a
+// structure in its entirety to optimize the memory footprint of very
+// commonly-used structs is fine. Do not apply this attribute to a structure in
+// its entirety if the purpose is to control the offsets of the members in the
+// structure. Instead, apply this attribute only to structure members that need
+// it.
+//
+// When applying ABSL_ATTRIBUTE_PACKED only to specific structure members the
+// natural alignment of structure members not annotated is preserved. Aligned
+// member accesses are faster than non-aligned member accesses even if the
+// targeted microprocessor supports non-aligned accesses.
+#if ABSL_HAVE_ATTRIBUTE(packed) || (defined(__GNUC__) && !defined(__clang__))
+#define ABSL_ATTRIBUTE_PACKED __attribute__((__packed__))
+#else
+#define ABSL_ATTRIBUTE_PACKED
+#endif
+
+// ABSL_ATTRIBUTE_FUNC_ALIGN
+//
+// Tells the compiler to align the function start at least to certain
+// alignment boundary
+#if ABSL_HAVE_ATTRIBUTE(aligned) || (defined(__GNUC__) && !defined(__clang__))
+#define ABSL_ATTRIBUTE_FUNC_ALIGN(bytes) __attribute__((aligned(bytes)))
+#else
+#define ABSL_ATTRIBUTE_FUNC_ALIGN(bytes)
+#endif
+
+// ABSL_FALLTHROUGH_INTENDED
+//
+// Annotates implicit fall-through between switch labels, allowing a case to
+// indicate intentional fallthrough and turn off warnings about any lack of a
+// `break` statement. The ABSL_FALLTHROUGH_INTENDED macro should be followed by
+// a semicolon and can be used in most places where `break` can, provided that
+// no statements exist between it and the next switch label.
+//
+// Example:
+//
+// switch (x) {
+// case 40:
+// case 41:
+// if (truth_is_out_there) {
+// ++x;
+// ABSL_FALLTHROUGH_INTENDED; // Use instead of/along with annotations
+// // in comments
+// } else {
+// return x;
+// }
+// case 42:
+// ...
+//
+// Notes: When supported, GCC and Clang can issue a warning on switch labels
+// with unannotated fallthrough using the warning `-Wimplicit-fallthrough`. See
+// clang documentation on language extensions for details:
+// https://clang.llvm.org/docs/AttributeReference.html#fallthrough-clang-fallthrough
+//
+// When used with unsupported compilers, the ABSL_FALLTHROUGH_INTENDED macro has
+// no effect on diagnostics. In any case this macro has no effect on runtime
+// behavior and performance of code.
+
+#ifdef ABSL_FALLTHROUGH_INTENDED
+#error "ABSL_FALLTHROUGH_INTENDED should not be defined."
+#elif ABSL_HAVE_CPP_ATTRIBUTE(fallthrough)
+#define ABSL_FALLTHROUGH_INTENDED [[fallthrough]]
+#elif ABSL_HAVE_CPP_ATTRIBUTE(clang::fallthrough)
+#define ABSL_FALLTHROUGH_INTENDED [[clang::fallthrough]]
+#elif ABSL_HAVE_CPP_ATTRIBUTE(gnu::fallthrough)
+#define ABSL_FALLTHROUGH_INTENDED [[gnu::fallthrough]]
+#else
+#define ABSL_FALLTHROUGH_INTENDED \
+ do { \
+ } while (0)
+#endif
+
+// ABSL_DEPRECATED()
+//
+// Marks a deprecated class, struct, enum, function, method and variable
+// declarations. The macro argument is used as a custom diagnostic message (e.g.
+// suggestion of a better alternative).
+//
+// Examples:
+//
+// class ABSL_DEPRECATED("Use Bar instead") Foo {...};
+//
+// ABSL_DEPRECATED("Use Baz() instead") void Bar() {...}
+//
+// template <typename T>
+// ABSL_DEPRECATED("Use DoThat() instead")
+// void DoThis();
+//
+// Every usage of a deprecated entity will trigger a warning when compiled with
+// clang's `-Wdeprecated-declarations` option. This option is turned off by
+// default, but the warnings will be reported by clang-tidy.
+#if defined(__clang__) && defined(__cplusplus) && __cplusplus >= 201103L
+#define ABSL_DEPRECATED(message) __attribute__((deprecated(message)))
+#endif
+
+#ifndef ABSL_DEPRECATED
+#define ABSL_DEPRECATED(message)
+#endif
+
+// ABSL_CONST_INIT
+//
+// A variable declaration annotated with the `ABSL_CONST_INIT` attribute will
+// not compile (on supported platforms) unless the variable has a constant
+// initializer. This is useful for variables with static and thread storage
+// duration, because it guarantees that they will not suffer from the so-called
+// "static init order fiasco". Prefer to put this attribute on the most visible
+// declaration of the variable, if there's more than one, because code that
+// accesses the variable can then use the attribute for optimization.
+//
+// Example:
+//
+// class MyClass {
+// public:
+// ABSL_CONST_INIT static MyType my_var;
+// };
+//
+// MyType MyClass::my_var = MakeMyType(...);
+//
+// Note that this attribute is redundant if the variable is declared constexpr.
+#if ABSL_HAVE_CPP_ATTRIBUTE(clang::require_constant_initialization)
+#define ABSL_CONST_INIT [[clang::require_constant_initialization]]
+#else
+#define ABSL_CONST_INIT
+#endif // ABSL_HAVE_CPP_ATTRIBUTE(clang::require_constant_initialization)
+
+// ABSL_ATTRIBUTE_PURE_FUNCTION
+//
+// ABSL_ATTRIBUTE_PURE_FUNCTION is used to annotate declarations of "pure"
+// functions. A function is pure if its return value is only a function of its
+// arguments. The pure attribute prohibits a function from modifying the state
+// of the program that is observable by means other than inspecting the
+// function's return value. Declaring such functions with the pure attribute
+// allows the compiler to avoid emitting some calls in repeated invocations of
+// the function with the same argument values.
+//
+// Example:
+//
+// ABSL_ATTRIBUTE_PURE_FUNCTION int64_t ToInt64Milliseconds(Duration d);
+#if ABSL_HAVE_CPP_ATTRIBUTE(gnu::pure)
+#define ABSL_ATTRIBUTE_PURE_FUNCTION [[gnu::pure]]
+#elif ABSL_HAVE_ATTRIBUTE(pure)
+#define ABSL_ATTRIBUTE_PURE_FUNCTION __attribute__((pure))
+#else
+#define ABSL_ATTRIBUTE_PURE_FUNCTION
+#endif
+
+// ABSL_ATTRIBUTE_LIFETIME_BOUND indicates that a resource owned by a function
+// parameter or implicit object parameter is retained by the return value of the
+// annotated function (or, for a parameter of a constructor, in the value of the
+// constructed object). This attribute causes warnings to be produced if a
+// temporary object does not live long enough.
+//
+// When applied to a reference parameter, the referenced object is assumed to be
+// retained by the return value of the function. When applied to a non-reference
+// parameter (for example, a pointer or a class type), all temporaries
+// referenced by the parameter are assumed to be retained by the return value of
+// the function.
+//
+// See also the upstream documentation:
+// https://clang.llvm.org/docs/AttributeReference.html#lifetimebound
+#if ABSL_HAVE_CPP_ATTRIBUTE(clang::lifetimebound)
+#define ABSL_ATTRIBUTE_LIFETIME_BOUND [[clang::lifetimebound]]
+#elif ABSL_HAVE_ATTRIBUTE(lifetimebound)
+#define ABSL_ATTRIBUTE_LIFETIME_BOUND __attribute__((lifetimebound))
+#else
+#define ABSL_ATTRIBUTE_LIFETIME_BOUND
+#endif
+
+#endif // ABSL_BASE_ATTRIBUTES_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/call_once.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/call_once.h
new file mode 100644
index 00000000000..5d80e4e7caa
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/call_once.h
@@ -0,0 +1,219 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: call_once.h
+// -----------------------------------------------------------------------------
+//
+// This header file provides an Abseil version of `std::call_once` for invoking
+// a given function at most once, across all threads. This Abseil version is
+// faster than the C++11 version and incorporates the C++17 argument-passing
+// fix, so that (for example) non-const references may be passed to the invoked
+// function.
+
+#ifndef ABSL_BASE_CALL_ONCE_H_
+#define ABSL_BASE_CALL_ONCE_H_
+
+#include <algorithm>
+#include <atomic>
+#include <cstdint>
+#include <type_traits>
+#include <utility>
+
+#include "y_absl/base/internal/invoke.h"
+#include "y_absl/base/internal/low_level_scheduling.h"
+#include "y_absl/base/internal/raw_logging.h"
+#include "y_absl/base/internal/scheduling_mode.h"
+#include "y_absl/base/internal/spinlock_wait.h"
+#include "y_absl/base/macros.h"
+#include "y_absl/base/optimization.h"
+#include "y_absl/base/port.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+class once_flag;
+
+namespace base_internal {
+std::atomic<uint32_t>* ControlWord(y_absl::once_flag* flag);
+} // namespace base_internal
+
+// call_once()
+//
+// For all invocations using a given `once_flag`, invokes a given `fn` exactly
+// once across all threads. The first call to `call_once()` with a particular
+// `once_flag` argument (that does not throw an exception) will run the
+// specified function with the provided `args`; other calls with the same
+// `once_flag` argument will not run the function, but will wait
+// for the provided function to finish running (if it is still running).
+//
+// This mechanism provides a safe, simple, and fast mechanism for one-time
+// initialization in a multi-threaded process.
+//
+// Example:
+//
+// class MyInitClass {
+// public:
+// ...
+// mutable y_absl::once_flag once_;
+//
+// MyInitClass* init() const {
+// y_absl::call_once(once_, &MyInitClass::Init, this);
+// return ptr_;
+// }
+//
+template <typename Callable, typename... Args>
+void call_once(y_absl::once_flag& flag, Callable&& fn, Args&&... args);
+
+// once_flag
+//
+// Objects of this type are used to distinguish calls to `call_once()` and
+// ensure the provided function is only invoked once across all threads. This
+// type is not copyable or movable. However, it has a `constexpr`
+// constructor, and is safe to use as a namespace-scoped global variable.
+class once_flag {
+ public:
+ constexpr once_flag() : control_(0) {}
+ once_flag(const once_flag&) = delete;
+ once_flag& operator=(const once_flag&) = delete;
+
+ private:
+ friend std::atomic<uint32_t>* base_internal::ControlWord(once_flag* flag);
+ std::atomic<uint32_t> control_;
+};
+
+//------------------------------------------------------------------------------
+// End of public interfaces.
+// Implementation details follow.
+//------------------------------------------------------------------------------
+
+namespace base_internal {
+
+// Like call_once, but uses KERNEL_ONLY scheduling. Intended to be used to
+// initialize entities used by the scheduler implementation.
+template <typename Callable, typename... Args>
+void LowLevelCallOnce(y_absl::once_flag* flag, Callable&& fn, Args&&... args);
+
+// Disables scheduling while on stack when scheduling mode is non-cooperative.
+// No effect for cooperative scheduling modes.
+class SchedulingHelper {
+ public:
+ explicit SchedulingHelper(base_internal::SchedulingMode mode) : mode_(mode) {
+ if (mode_ == base_internal::SCHEDULE_KERNEL_ONLY) {
+ guard_result_ = base_internal::SchedulingGuard::DisableRescheduling();
+ }
+ }
+
+ ~SchedulingHelper() {
+ if (mode_ == base_internal::SCHEDULE_KERNEL_ONLY) {
+ base_internal::SchedulingGuard::EnableRescheduling(guard_result_);
+ }
+ }
+
+ private:
+ base_internal::SchedulingMode mode_;
+ bool guard_result_;
+};
+
+// Bit patterns for call_once state machine values. Internal implementation
+// detail, not for use by clients.
+//
+// The bit patterns are arbitrarily chosen from unlikely values, to aid in
+// debugging. However, kOnceInit must be 0, so that a zero-initialized
+// once_flag will be valid for immediate use.
+enum {
+ kOnceInit = 0,
+ kOnceRunning = 0x65C2937B,
+ kOnceWaiter = 0x05A308D2,
+ // A very small constant is chosen for kOnceDone so that it fit in a single
+ // compare with immediate instruction for most common ISAs. This is verified
+ // for x86, POWER and ARM.
+ kOnceDone = 221, // Random Number
+};
+
+template <typename Callable, typename... Args>
+ABSL_ATTRIBUTE_NOINLINE
+void CallOnceImpl(std::atomic<uint32_t>* control,
+ base_internal::SchedulingMode scheduling_mode, Callable&& fn,
+ Args&&... args) {
+#ifndef NDEBUG
+ {
+ uint32_t old_control = control->load(std::memory_order_relaxed);
+ if (old_control != kOnceInit &&
+ old_control != kOnceRunning &&
+ old_control != kOnceWaiter &&
+ old_control != kOnceDone) {
+ ABSL_RAW_LOG(FATAL, "Unexpected value for control word: 0x%lx",
+ static_cast<unsigned long>(old_control)); // NOLINT
+ }
+ }
+#endif // NDEBUG
+ static const base_internal::SpinLockWaitTransition trans[] = {
+ {kOnceInit, kOnceRunning, true},
+ {kOnceRunning, kOnceWaiter, false},
+ {kOnceDone, kOnceDone, true}};
+
+ // Must do this before potentially modifying control word's state.
+ base_internal::SchedulingHelper maybe_disable_scheduling(scheduling_mode);
+ // Short circuit the simplest case to avoid procedure call overhead.
+ // The base_internal::SpinLockWait() call returns either kOnceInit or
+ // kOnceDone. If it returns kOnceDone, it must have loaded the control word
+ // with std::memory_order_acquire and seen a value of kOnceDone.
+ uint32_t old_control = kOnceInit;
+ if (control->compare_exchange_strong(old_control, kOnceRunning,
+ std::memory_order_relaxed) ||
+ base_internal::SpinLockWait(control, ABSL_ARRAYSIZE(trans), trans,
+ scheduling_mode) == kOnceInit) {
+ base_internal::invoke(std::forward<Callable>(fn),
+ std::forward<Args>(args)...);
+ old_control =
+ control->exchange(base_internal::kOnceDone, std::memory_order_release);
+ if (old_control == base_internal::kOnceWaiter) {
+ base_internal::SpinLockWake(control, true);
+ }
+ } // else *control is already kOnceDone
+}
+
+inline std::atomic<uint32_t>* ControlWord(once_flag* flag) {
+ return &flag->control_;
+}
+
+template <typename Callable, typename... Args>
+void LowLevelCallOnce(y_absl::once_flag* flag, Callable&& fn, Args&&... args) {
+ std::atomic<uint32_t>* once = base_internal::ControlWord(flag);
+ uint32_t s = once->load(std::memory_order_acquire);
+ if (ABSL_PREDICT_FALSE(s != base_internal::kOnceDone)) {
+ base_internal::CallOnceImpl(once, base_internal::SCHEDULE_KERNEL_ONLY,
+ std::forward<Callable>(fn),
+ std::forward<Args>(args)...);
+ }
+}
+
+} // namespace base_internal
+
+template <typename Callable, typename... Args>
+void call_once(y_absl::once_flag& flag, Callable&& fn, Args&&... args) {
+ std::atomic<uint32_t>* once = base_internal::ControlWord(&flag);
+ uint32_t s = once->load(std::memory_order_acquire);
+ if (ABSL_PREDICT_FALSE(s != base_internal::kOnceDone)) {
+ base_internal::CallOnceImpl(
+ once, base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL,
+ std::forward<Callable>(fn), std::forward<Args>(args)...);
+ }
+}
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_BASE_CALL_ONCE_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/casts.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/casts.h
new file mode 100644
index 00000000000..6270ede21ed
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/casts.h
@@ -0,0 +1,187 @@
+//
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: casts.h
+// -----------------------------------------------------------------------------
+//
+// This header file defines casting templates to fit use cases not covered by
+// the standard casts provided in the C++ standard. As with all cast operations,
+// use these with caution and only if alternatives do not exist.
+
+#ifndef ABSL_BASE_CASTS_H_
+#define ABSL_BASE_CASTS_H_
+
+#include <cstring>
+#include <memory>
+#include <type_traits>
+#include <utility>
+
+#include "y_absl/base/internal/identity.h"
+#include "y_absl/base/macros.h"
+#include "y_absl/meta/type_traits.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+namespace internal_casts {
+
+template <class Dest, class Source>
+struct is_bitcastable
+ : std::integral_constant<
+ bool,
+ sizeof(Dest) == sizeof(Source) &&
+ type_traits_internal::is_trivially_copyable<Source>::value &&
+ type_traits_internal::is_trivially_copyable<Dest>::value &&
+ std::is_default_constructible<Dest>::value> {};
+
+} // namespace internal_casts
+
+// implicit_cast()
+//
+// Performs an implicit conversion between types following the language
+// rules for implicit conversion; if an implicit conversion is otherwise
+// allowed by the language in the given context, this function performs such an
+// implicit conversion.
+//
+// Example:
+//
+// // If the context allows implicit conversion:
+// From from;
+// To to = from;
+//
+// // Such code can be replaced by:
+// implicit_cast<To>(from);
+//
+// An `implicit_cast()` may also be used to annotate numeric type conversions
+// that, although safe, may produce compiler warnings (such as `long` to `int`).
+// Additionally, an `implicit_cast()` is also useful within return statements to
+// indicate a specific implicit conversion is being undertaken.
+//
+// Example:
+//
+// return implicit_cast<double>(size_in_bytes) / capacity_;
+//
+// Annotating code with `implicit_cast()` allows you to explicitly select
+// particular overloads and template instantiations, while providing a safer
+// cast than `reinterpret_cast()` or `static_cast()`.
+//
+// Additionally, an `implicit_cast()` can be used to allow upcasting within a
+// type hierarchy where incorrect use of `static_cast()` could accidentally
+// allow downcasting.
+//
+// Finally, an `implicit_cast()` can be used to perform implicit conversions
+// from unrelated types that otherwise couldn't be implicitly cast directly;
+// C++ will normally only implicitly cast "one step" in such conversions.
+//
+// That is, if C is a type which can be implicitly converted to B, with B being
+// a type that can be implicitly converted to A, an `implicit_cast()` can be
+// used to convert C to B (which the compiler can then implicitly convert to A
+// using language rules).
+//
+// Example:
+//
+// // Assume an object C is convertible to B, which is implicitly convertible
+// // to A
+// A a = implicit_cast<B>(C);
+//
+// Such implicit cast chaining may be useful within template logic.
+template <typename To>
+constexpr To implicit_cast(typename y_absl::internal::identity_t<To> to) {
+ return to;
+}
+
+// bit_cast()
+//
+// Performs a bitwise cast on a type without changing the underlying bit
+// representation of that type's value. The two types must be of the same size
+// and both types must be trivially copyable. As with most casts, use with
+// caution. A `bit_cast()` might be needed when you need to temporarily treat a
+// type as some other type, such as in the following cases:
+//
+// * Serialization (casting temporarily to `char *` for those purposes is
+// always allowed by the C++ standard)
+// * Managing the individual bits of a type within mathematical operations
+// that are not normally accessible through that type
+// * Casting non-pointer types to pointer types (casting the other way is
+// allowed by `reinterpret_cast()` but round-trips cannot occur the other
+// way).
+//
+// Example:
+//
+// float f = 3.14159265358979;
+// int i = bit_cast<int32_t>(f);
+// // i = 0x40490fdb
+//
+// Casting non-pointer types to pointer types and then dereferencing them
+// traditionally produces undefined behavior.
+//
+// Example:
+//
+// // WRONG
+// float f = 3.14159265358979; // WRONG
+// int i = * reinterpret_cast<int*>(&f); // WRONG
+//
+// The address-casting method produces undefined behavior according to the ISO
+// C++ specification section [basic.lval]. Roughly, this section says: if an
+// object in memory has one type, and a program accesses it with a different
+// type, the result is undefined behavior for most values of "different type".
+//
+// Such casting results in type punning: holding an object in memory of one type
+// and reading its bits back using a different type. A `bit_cast()` avoids this
+// issue by implementing its casts using `memcpy()`, which avoids introducing
+// this undefined behavior.
+//
+// NOTE: The requirements here are more strict than the bit_cast of standard
+// proposal p0476 due to the need for workarounds and lack of intrinsics.
+// Specifically, this implementation also requires `Dest` to be
+// default-constructible.
+template <
+ typename Dest, typename Source,
+ typename std::enable_if<internal_casts::is_bitcastable<Dest, Source>::value,
+ int>::type = 0>
+inline Dest bit_cast(const Source& source) {
+ Dest dest;
+ memcpy(static_cast<void*>(std::addressof(dest)),
+ static_cast<const void*>(std::addressof(source)), sizeof(dest));
+ return dest;
+}
+
+// NOTE: This overload is only picked if the requirements of bit_cast are
+// not met. It is therefore UB, but is provided temporarily as previous
+// versions of this function template were unchecked. Do not use this in
+// new code.
+template <
+ typename Dest, typename Source,
+ typename std::enable_if<
+ !internal_casts::is_bitcastable<Dest, Source>::value,
+ int>::type = 0>
+ABSL_DEPRECATED(
+ "y_absl::bit_cast type requirements were violated. Update the types "
+ "being used such that they are the same size and are both "
+ "TriviallyCopyable.")
+inline Dest bit_cast(const Source& source) {
+ static_assert(sizeof(Dest) == sizeof(Source),
+ "Source and destination types should have equal sizes.");
+
+ Dest dest;
+ memcpy(&dest, &source, sizeof(dest));
+ return dest;
+}
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_BASE_CASTS_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/config.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/config.h
new file mode 100644
index 00000000000..162843aa003
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/config.h
@@ -0,0 +1,767 @@
+//
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: config.h
+// -----------------------------------------------------------------------------
+//
+// This header file defines a set of macros for checking the presence of
+// important compiler and platform features. Such macros can be used to
+// produce portable code by parameterizing compilation based on the presence or
+// lack of a given feature.
+//
+// We define a "feature" as some interface we wish to program to: for example,
+// a library function or system call. A value of `1` indicates support for
+// that feature; any other value indicates the feature support is undefined.
+//
+// Example:
+//
+// Suppose a programmer wants to write a program that uses the 'mmap()' system
+// call. The Abseil macro for that feature (`ABSL_HAVE_MMAP`) allows you to
+// selectively include the `mmap.h` header and bracket code using that feature
+// in the macro:
+//
+// #include "y_absl/base/config.h"
+//
+// #ifdef ABSL_HAVE_MMAP
+// #include "sys/mman.h"
+// #endif //ABSL_HAVE_MMAP
+//
+// ...
+// #ifdef ABSL_HAVE_MMAP
+// void *ptr = mmap(...);
+// ...
+// #endif // ABSL_HAVE_MMAP
+
+#ifndef ABSL_BASE_CONFIG_H_
+#define ABSL_BASE_CONFIG_H_
+
+// Included for the __GLIBC__ macro (or similar macros on other systems).
+#include <limits.h>
+
+#ifdef __cplusplus
+// Included for __GLIBCXX__, _LIBCPP_VERSION
+#include <cstddef>
+#endif // __cplusplus
+
+#if defined(__APPLE__)
+// Included for TARGET_OS_IPHONE, __IPHONE_OS_VERSION_MIN_REQUIRED,
+// __IPHONE_8_0.
+#include <Availability.h>
+#include <TargetConditionals.h>
+#endif
+
+#include "y_absl/base/options.h"
+#include "y_absl/base/policy_checks.h"
+
+// Abseil long-term support (LTS) releases will define
+// `ABSL_LTS_RELEASE_VERSION` to the integer representing the date string of the
+// LTS release version, and will define `ABSL_LTS_RELEASE_PATCH_LEVEL` to the
+// integer representing the patch-level for that release.
+//
+// For example, for LTS release version "20300401.2", this would give us
+// ABSL_LTS_RELEASE_VERSION == 20300401 && ABSL_LTS_RELEASE_PATCH_LEVEL == 2
+//
+// These symbols will not be defined in non-LTS code.
+//
+// Abseil recommends that clients live-at-head. Therefore, if you are using
+// these symbols to assert a minimum version requirement, we recommend you do it
+// as
+//
+// #if defined(ABSL_LTS_RELEASE_VERSION) && ABSL_LTS_RELEASE_VERSION < 20300401
+// #error Project foo requires Abseil LTS version >= 20300401
+// #endif
+//
+// The `defined(ABSL_LTS_RELEASE_VERSION)` part of the check excludes
+// live-at-head clients from the minimum version assertion.
+//
+// See https://abseil.io/about/releases for more information on Abseil release
+// management.
+//
+// LTS releases can be obtained from
+// https://github.com/abseil/abseil-cpp/releases.
+#define ABSL_LTS_RELEASE_VERSION 20211102
+#define ABSL_LTS_RELEASE_PATCH_LEVEL 0
+
+// Helper macro to convert a CPP variable to a string literal.
+#define ABSL_INTERNAL_DO_TOKEN_STR(x) #x
+#define ABSL_INTERNAL_TOKEN_STR(x) ABSL_INTERNAL_DO_TOKEN_STR(x)
+
+// -----------------------------------------------------------------------------
+// Abseil namespace annotations
+// -----------------------------------------------------------------------------
+
+// ABSL_NAMESPACE_BEGIN/ABSL_NAMESPACE_END
+//
+// An annotation placed at the beginning/end of each `namespace y_absl` scope.
+// This is used to inject an inline namespace.
+//
+// The proper way to write Abseil code in the `y_absl` namespace is:
+//
+// namespace y_absl {
+// ABSL_NAMESPACE_BEGIN
+//
+// void Foo(); // y_absl::Foo().
+//
+// ABSL_NAMESPACE_END
+// } // namespace y_absl
+//
+// Users of Abseil should not use these macros, because users of Abseil should
+// not write `namespace y_absl {` in their own code for any reason. (Abseil does
+// not support forward declarations of its own types, nor does it support
+// user-provided specialization of Abseil templates. Code that violates these
+// rules may be broken without warning.)
+#if !defined(ABSL_OPTION_USE_INLINE_NAMESPACE) || \
+ !defined(ABSL_OPTION_INLINE_NAMESPACE_NAME)
+#error options.h is misconfigured.
+#endif
+
+// Check that ABSL_OPTION_INLINE_NAMESPACE_NAME is neither "head" nor ""
+#if defined(__cplusplus) && ABSL_OPTION_USE_INLINE_NAMESPACE == 1
+
+#define ABSL_INTERNAL_INLINE_NAMESPACE_STR \
+ ABSL_INTERNAL_TOKEN_STR(ABSL_OPTION_INLINE_NAMESPACE_NAME)
+
+static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != '\0',
+ "options.h misconfigured: ABSL_OPTION_INLINE_NAMESPACE_NAME must "
+ "not be empty.");
+static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
+ ABSL_INTERNAL_INLINE_NAMESPACE_STR[1] != 'e' ||
+ ABSL_INTERNAL_INLINE_NAMESPACE_STR[2] != 'a' ||
+ ABSL_INTERNAL_INLINE_NAMESPACE_STR[3] != 'd' ||
+ ABSL_INTERNAL_INLINE_NAMESPACE_STR[4] != '\0',
+ "options.h misconfigured: ABSL_OPTION_INLINE_NAMESPACE_NAME must "
+ "be changed to a new, unique identifier name.");
+
+#endif
+
+#if ABSL_OPTION_USE_INLINE_NAMESPACE == 0
+#define ABSL_NAMESPACE_BEGIN
+#define ABSL_NAMESPACE_END
+#define ABSL_INTERNAL_C_SYMBOL(x) x
+#elif ABSL_OPTION_USE_INLINE_NAMESPACE == 1
+#define ABSL_NAMESPACE_BEGIN \
+ inline namespace ABSL_OPTION_INLINE_NAMESPACE_NAME {
+#define ABSL_NAMESPACE_END }
+#define ABSL_INTERNAL_C_SYMBOL_HELPER_2(x, v) x##_##v
+#define ABSL_INTERNAL_C_SYMBOL_HELPER_1(x, v) \
+ ABSL_INTERNAL_C_SYMBOL_HELPER_2(x, v)
+#define ABSL_INTERNAL_C_SYMBOL(x) \
+ ABSL_INTERNAL_C_SYMBOL_HELPER_1(x, ABSL_OPTION_INLINE_NAMESPACE_NAME)
+#else
+#error options.h is misconfigured.
+#endif
+
+// -----------------------------------------------------------------------------
+// Compiler Feature Checks
+// -----------------------------------------------------------------------------
+
+// ABSL_HAVE_BUILTIN()
+//
+// Checks whether the compiler supports a Clang Feature Checking Macro, and if
+// so, checks whether it supports the provided builtin function "x" where x
+// is one of the functions noted in
+// https://clang.llvm.org/docs/LanguageExtensions.html
+//
+// Note: Use this macro to avoid an extra level of #ifdef __has_builtin check.
+// http://releases.llvm.org/3.3/tools/clang/docs/LanguageExtensions.html
+#ifdef __has_builtin
+#define ABSL_HAVE_BUILTIN(x) __has_builtin(x)
+#else
+#define ABSL_HAVE_BUILTIN(x) 0
+#endif
+
+#if defined(__is_identifier)
+#define ABSL_INTERNAL_HAS_KEYWORD(x) !(__is_identifier(x))
+#else
+#define ABSL_INTERNAL_HAS_KEYWORD(x) 0
+#endif
+
+#ifdef __has_feature
+#define ABSL_HAVE_FEATURE(f) __has_feature(f)
+#else
+#define ABSL_HAVE_FEATURE(f) 0
+#endif
+
+// Portable check for GCC minimum version:
+// https://gcc.gnu.org/onlinedocs/cpp/Common-Predefined-Macros.html
+#if defined(__GNUC__) && defined(__GNUC_MINOR__)
+#define ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(x, y) \
+ (__GNUC__ > (x) || __GNUC__ == (x) && __GNUC_MINOR__ >= (y))
+#else
+#define ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(x, y) 0
+#endif
+
+#if defined(__clang__) && defined(__clang_major__) && defined(__clang_minor__)
+#define ABSL_INTERNAL_HAVE_MIN_CLANG_VERSION(x, y) \
+ (__clang_major__ > (x) || __clang_major__ == (x) && __clang_minor__ >= (y))
+#else
+#define ABSL_INTERNAL_HAVE_MIN_CLANG_VERSION(x, y) 0
+#endif
+
+// ABSL_HAVE_TLS is defined to 1 when __thread should be supported.
+// We assume __thread is supported on Linux when compiled with Clang or compiled
+// against libstdc++ with _GLIBCXX_HAVE_TLS defined.
+#ifdef ABSL_HAVE_TLS
+#error ABSL_HAVE_TLS cannot be directly set
+#elif defined(__linux__) && (defined(__clang__) || defined(_GLIBCXX_HAVE_TLS))
+#define ABSL_HAVE_TLS 1
+#endif
+
+// ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE
+//
+// Checks whether `std::is_trivially_destructible<T>` is supported.
+//
+// Notes: All supported compilers using libc++ support this feature, as does
+// gcc >= 4.8.1 using libstdc++, and Visual Studio.
+#ifdef ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE
+#error ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE cannot be directly set
+#elif defined(_LIBCPP_VERSION) || defined(_MSC_VER) || \
+ (!defined(__clang__) && defined(__GLIBCXX__) && \
+ ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(4, 8))
+#define ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE 1
+#endif
+
+// ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE
+//
+// Checks whether `std::is_trivially_default_constructible<T>` and
+// `std::is_trivially_copy_constructible<T>` are supported.
+
+// ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE
+//
+// Checks whether `std::is_trivially_copy_assignable<T>` is supported.
+
+// Notes: Clang with libc++ supports these features, as does gcc >= 7.4 with
+// libstdc++, or gcc >= 8.2 with libc++, and Visual Studio (but not NVCC).
+#if defined(ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE)
+#error ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE cannot be directly set
+#elif defined(ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE)
+#error ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE cannot directly set
+#elif (defined(__clang__) && defined(_LIBCPP_VERSION)) || \
+ (!defined(__clang__) && \
+ ((ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(7, 4) && defined(__GLIBCXX__)) || \
+ (ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(8, 2) && \
+ defined(_LIBCPP_VERSION)))) || \
+ (defined(_MSC_VER) && !defined(__NVCC__))
+#define ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE 1
+#define ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE 1
+#endif
+
+// ABSL_HAVE_SOURCE_LOCATION_CURRENT
+//
+// Indicates whether `y_absl::SourceLocation::current()` will return useful
+// information in some contexts.
+#ifndef ABSL_HAVE_SOURCE_LOCATION_CURRENT
+#if ABSL_INTERNAL_HAS_KEYWORD(__builtin_LINE) && \
+ ABSL_INTERNAL_HAS_KEYWORD(__builtin_FILE)
+#define ABSL_HAVE_SOURCE_LOCATION_CURRENT 1
+#elif ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(5, 0)
+#define ABSL_HAVE_SOURCE_LOCATION_CURRENT 1
+#endif
+#endif
+
+// ABSL_HAVE_THREAD_LOCAL
+//
+// Checks whether C++11's `thread_local` storage duration specifier is
+// supported.
+#ifdef ABSL_HAVE_THREAD_LOCAL
+#error ABSL_HAVE_THREAD_LOCAL cannot be directly set
+#elif defined(__APPLE__)
+// Notes:
+// * Xcode's clang did not support `thread_local` until version 8, and
+// even then not for all iOS < 9.0.
+// * Xcode 9.3 started disallowing `thread_local` for 32-bit iOS simulator
+// targeting iOS 9.x.
+// * Xcode 10 moves the deployment target check for iOS < 9.0 to link time
+// making ABSL_HAVE_FEATURE unreliable there.
+//
+#if ABSL_HAVE_FEATURE(cxx_thread_local) && \
+ !(TARGET_OS_IPHONE && __IPHONE_OS_VERSION_MIN_REQUIRED < __IPHONE_9_0)
+#define ABSL_HAVE_THREAD_LOCAL 1
+#endif
+#else // !defined(__APPLE__)
+#define ABSL_HAVE_THREAD_LOCAL 1
+#endif
+
+// There are platforms for which TLS should not be used even though the compiler
+// makes it seem like it's supported (Android NDK < r12b for example).
+// This is primarily because of linker problems and toolchain misconfiguration:
+// Abseil does not intend to support this indefinitely. Currently, the newest
+// toolchain that we intend to support that requires this behavior is the
+// r11 NDK - allowing for a 5 year support window on that means this option
+// is likely to be removed around June of 2021.
+// TLS isn't supported until NDK r12b per
+// https://developer.android.com/ndk/downloads/revision_history.html
+// Since NDK r16, `__NDK_MAJOR__` and `__NDK_MINOR__` are defined in
+// <android/ndk-version.h>. For NDK < r16, users should define these macros,
+// e.g. `-D__NDK_MAJOR__=11 -D__NKD_MINOR__=0` for NDK r11.
+#if defined(__ANDROID__) && defined(__clang__)
+#if __has_include(<android/ndk-version.h>)
+#include <android/ndk-version.h>
+#endif // __has_include(<android/ndk-version.h>)
+#if defined(__ANDROID__) && defined(__clang__) && defined(__NDK_MAJOR__) && \
+ defined(__NDK_MINOR__) && \
+ ((__NDK_MAJOR__ < 12) || ((__NDK_MAJOR__ == 12) && (__NDK_MINOR__ < 1)))
+#undef ABSL_HAVE_TLS
+#undef ABSL_HAVE_THREAD_LOCAL
+#endif
+#endif // defined(__ANDROID__) && defined(__clang__)
+
+// ABSL_HAVE_INTRINSIC_INT128
+//
+// Checks whether the __int128 compiler extension for a 128-bit integral type is
+// supported.
+//
+// Note: __SIZEOF_INT128__ is defined by Clang and GCC when __int128 is
+// supported, but we avoid using it in certain cases:
+// * On Clang:
+// * Building using Clang for Windows, where the Clang runtime library has
+// 128-bit support only on LP64 architectures, but Windows is LLP64.
+// * On Nvidia's nvcc:
+// * nvcc also defines __GNUC__ and __SIZEOF_INT128__, but not all versions
+// actually support __int128.
+#ifdef ABSL_HAVE_INTRINSIC_INT128
+#error ABSL_HAVE_INTRINSIC_INT128 cannot be directly set
+#elif defined(__SIZEOF_INT128__)
+#if (defined(__clang__) && !defined(_WIN32)) || \
+ (defined(__CUDACC__) && __CUDACC_VER_MAJOR__ >= 9) || \
+ (defined(__GNUC__) && !defined(__clang__) && !defined(__CUDACC__))
+#define ABSL_HAVE_INTRINSIC_INT128 1
+#elif defined(__CUDACC__)
+// __CUDACC_VER__ is a full version number before CUDA 9, and is defined to a
+// string explaining that it has been removed starting with CUDA 9. We use
+// nested #ifs because there is no short-circuiting in the preprocessor.
+// NOTE: `__CUDACC__` could be undefined while `__CUDACC_VER__` is defined.
+#if __CUDACC_VER__ >= 70000
+#define ABSL_HAVE_INTRINSIC_INT128 1
+#endif // __CUDACC_VER__ >= 70000
+#endif // defined(__CUDACC__)
+#endif // ABSL_HAVE_INTRINSIC_INT128
+
+// ABSL_HAVE_EXCEPTIONS
+//
+// Checks whether the compiler both supports and enables exceptions. Many
+// compilers support a "no exceptions" mode that disables exceptions.
+//
+// Generally, when ABSL_HAVE_EXCEPTIONS is not defined:
+//
+// * Code using `throw` and `try` may not compile.
+// * The `noexcept` specifier will still compile and behave as normal.
+// * The `noexcept` operator may still return `false`.
+//
+// For further details, consult the compiler's documentation.
+#ifdef ABSL_HAVE_EXCEPTIONS
+#error ABSL_HAVE_EXCEPTIONS cannot be directly set.
+#elif ABSL_INTERNAL_HAVE_MIN_CLANG_VERSION(3, 6)
+// Clang >= 3.6
+#if ABSL_HAVE_FEATURE(cxx_exceptions)
+#define ABSL_HAVE_EXCEPTIONS 1
+#endif // ABSL_HAVE_FEATURE(cxx_exceptions)
+#elif defined(__clang__)
+// Clang < 3.6
+// http://releases.llvm.org/3.6.0/tools/clang/docs/ReleaseNotes.html#the-exceptions-macro
+#if defined(__EXCEPTIONS) && ABSL_HAVE_FEATURE(cxx_exceptions)
+#define ABSL_HAVE_EXCEPTIONS 1
+#endif // defined(__EXCEPTIONS) && ABSL_HAVE_FEATURE(cxx_exceptions)
+// Handle remaining special cases and default to exceptions being supported.
+#elif !(defined(__GNUC__) && (__GNUC__ < 5) && !defined(__EXCEPTIONS)) && \
+ !(ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(5, 0) && \
+ !defined(__cpp_exceptions)) && \
+ !(defined(_MSC_VER) && !defined(_CPPUNWIND))
+#define ABSL_HAVE_EXCEPTIONS 1
+#endif
+
+// -----------------------------------------------------------------------------
+// Platform Feature Checks
+// -----------------------------------------------------------------------------
+
+// Currently supported operating systems and associated preprocessor
+// symbols:
+//
+// Linux and Linux-derived __linux__
+// Android __ANDROID__ (implies __linux__)
+// Linux (non-Android) __linux__ && !__ANDROID__
+// Darwin (macOS and iOS) __APPLE__
+// Akaros (http://akaros.org) __ros__
+// Windows _WIN32
+// NaCL __native_client__
+// AsmJS __asmjs__
+// WebAssembly __wasm__
+// Fuchsia __Fuchsia__
+//
+// Note that since Android defines both __ANDROID__ and __linux__, one
+// may probe for either Linux or Android by simply testing for __linux__.
+
+// ABSL_HAVE_MMAP
+//
+// Checks whether the platform has an mmap(2) implementation as defined in
+// POSIX.1-2001.
+#ifdef ABSL_HAVE_MMAP
+#error ABSL_HAVE_MMAP cannot be directly set
+#elif defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \
+ defined(_AIX) || defined(__ros__) || defined(__native_client__) || \
+ defined(__asmjs__) || defined(__wasm__) || defined(__Fuchsia__) || \
+ defined(__sun) || defined(__ASYLO__) || defined(__myriad2__) || \
+ defined(__HAIKU__)
+#define ABSL_HAVE_MMAP 1
+#endif
+
+// ABSL_HAVE_PTHREAD_GETSCHEDPARAM
+//
+// Checks whether the platform implements the pthread_(get|set)schedparam(3)
+// functions as defined in POSIX.1-2001.
+#ifdef ABSL_HAVE_PTHREAD_GETSCHEDPARAM
+#error ABSL_HAVE_PTHREAD_GETSCHEDPARAM cannot be directly set
+#elif defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \
+ defined(_AIX) || defined(__ros__)
+#define ABSL_HAVE_PTHREAD_GETSCHEDPARAM 1
+#endif
+
+// ABSL_HAVE_SCHED_GETCPU
+//
+// Checks whether sched_getcpu is available.
+#ifdef ABSL_HAVE_SCHED_GETCPU
+#error ABSL_HAVE_SCHED_GETCPU cannot be directly set
+#elif defined(__linux__)
+#define ABSL_HAVE_SCHED_GETCPU 1
+#endif
+
+// ABSL_HAVE_SCHED_YIELD
+//
+// Checks whether the platform implements sched_yield(2) as defined in
+// POSIX.1-2001.
+#ifdef ABSL_HAVE_SCHED_YIELD
+#error ABSL_HAVE_SCHED_YIELD cannot be directly set
+#elif defined(__linux__) || defined(__ros__) || defined(__native_client__)
+#define ABSL_HAVE_SCHED_YIELD 1
+#endif
+
+// ABSL_HAVE_SEMAPHORE_H
+//
+// Checks whether the platform supports the <semaphore.h> header and sem_init(3)
+// family of functions as standardized in POSIX.1-2001.
+//
+// Note: While Apple provides <semaphore.h> for both iOS and macOS, it is
+// explicitly deprecated and will cause build failures if enabled for those
+// platforms. We side-step the issue by not defining it here for Apple
+// platforms.
+#ifdef ABSL_HAVE_SEMAPHORE_H
+#error ABSL_HAVE_SEMAPHORE_H cannot be directly set
+#elif defined(__linux__) || defined(__ros__)
+#define ABSL_HAVE_SEMAPHORE_H 1
+#endif
+
+// ABSL_HAVE_ALARM
+//
+// Checks whether the platform supports the <signal.h> header and alarm(2)
+// function as standardized in POSIX.1-2001.
+#ifdef ABSL_HAVE_ALARM
+#error ABSL_HAVE_ALARM cannot be directly set
+#elif defined(__GOOGLE_GRTE_VERSION__)
+// feature tests for Google's GRTE
+#define ABSL_HAVE_ALARM 1
+#elif defined(__GLIBC__)
+// feature test for glibc
+#define ABSL_HAVE_ALARM 1
+#elif defined(_MSC_VER)
+// feature tests for Microsoft's library
+#elif defined(__MINGW32__)
+// mingw32 doesn't provide alarm(2):
+// https://osdn.net/projects/mingw/scm/git/mingw-org-wsl/blobs/5.2-trunk/mingwrt/include/unistd.h
+// mingw-w64 provides a no-op implementation:
+// https://sourceforge.net/p/mingw-w64/mingw-w64/ci/master/tree/mingw-w64-crt/misc/alarm.c
+#elif defined(__EMSCRIPTEN__)
+// emscripten doesn't support signals
+#elif defined(__Fuchsia__)
+// Signals don't exist on fuchsia.
+#elif defined(__native_client__)
+#else
+// other standard libraries
+#define ABSL_HAVE_ALARM 1
+#endif
+
+// ABSL_IS_LITTLE_ENDIAN
+// ABSL_IS_BIG_ENDIAN
+//
+// Checks the endianness of the platform.
+//
+// Notes: uses the built in endian macros provided by GCC (since 4.6) and
+// Clang (since 3.2); see
+// https://gcc.gnu.org/onlinedocs/cpp/Common-Predefined-Macros.html.
+// Otherwise, if _WIN32, assume little endian. Otherwise, bail with an error.
+#if defined(ABSL_IS_BIG_ENDIAN)
+#error "ABSL_IS_BIG_ENDIAN cannot be directly set."
+#endif
+#if defined(ABSL_IS_LITTLE_ENDIAN)
+#error "ABSL_IS_LITTLE_ENDIAN cannot be directly set."
+#endif
+
+#if (defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && \
+ __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
+#define ABSL_IS_LITTLE_ENDIAN 1
+#elif defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && \
+ __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#define ABSL_IS_BIG_ENDIAN 1
+#elif defined(_WIN32)
+#define ABSL_IS_LITTLE_ENDIAN 1
+#else
+#error "y_absl endian detection needs to be set up for your compiler"
+#endif
+
+// macOS 10.13 and iOS 10.11 don't let you use <any>, <optional>, or <variant>
+// even though the headers exist and are publicly noted to work. See
+// https://github.com/abseil/abseil-cpp/issues/207 and
+// https://developer.apple.com/documentation/xcode_release_notes/xcode_10_release_notes
+// libc++ spells out the availability requirements in the file
+// llvm-project/libcxx/include/__config via the #define
+// _LIBCPP_AVAILABILITY_BAD_OPTIONAL_ACCESS.
+#if defined(__APPLE__) && defined(_LIBCPP_VERSION) && \
+ ((defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && \
+ __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 101400) || \
+ (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && \
+ __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 120000) || \
+ (defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) && \
+ __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 50000) || \
+ (defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && \
+ __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ < 120000))
+#define ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE 1
+#else
+#define ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE 0
+#endif
+
+// ABSL_HAVE_STD_ANY
+//
+// Checks whether C++17 std::any is available by checking whether <any> exists.
+#ifdef ABSL_HAVE_STD_ANY
+#error "ABSL_HAVE_STD_ANY cannot be directly set."
+#endif
+
+#ifdef __has_include
+#if __has_include(<any>) && defined(__cplusplus) && __cplusplus >= 201703L && \
+ !ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE
+#define ABSL_HAVE_STD_ANY 1
+#endif
+#endif
+
+// ABSL_HAVE_STD_OPTIONAL
+//
+// Checks whether C++17 std::optional is available.
+#ifdef ABSL_HAVE_STD_OPTIONAL
+#error "ABSL_HAVE_STD_OPTIONAL cannot be directly set."
+#endif
+
+#ifdef __has_include
+#if __has_include(<optional>) && defined(__cplusplus) && \
+ __cplusplus >= 201703L && !ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE
+#define ABSL_HAVE_STD_OPTIONAL 1
+#endif
+#endif
+
+// ABSL_HAVE_STD_VARIANT
+//
+// Checks whether C++17 std::variant is available.
+#ifdef ABSL_HAVE_STD_VARIANT
+#error "ABSL_HAVE_STD_VARIANT cannot be directly set."
+#endif
+
+#ifdef __has_include
+#if __has_include(<variant>) && defined(__cplusplus) && \
+ __cplusplus >= 201703L && !ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE
+#define ABSL_HAVE_STD_VARIANT 1
+#endif
+#endif
+
+// ABSL_HAVE_STD_STRING_VIEW
+//
+// Checks whether C++17 std::string_view is available.
+#ifdef ABSL_HAVE_STD_STRING_VIEW
+#error "ABSL_HAVE_STD_STRING_VIEW cannot be directly set."
+#endif
+
+#define ABSL_HAVE_STD_STRING_VIEW 1
+
+// For MSVC, `__has_include` is supported in VS 2017 15.3, which is later than
+// the support for <optional>, <any>, <string_view>, <variant>. So we use
+// _MSC_VER to check whether we have VS 2017 RTM (when <optional>, <any>,
+// <string_view>, <variant> is implemented) or higher. Also, `__cplusplus` is
+// not correctly set by MSVC, so we use `_MSVC_LANG` to check the language
+// version.
+// TODO(zhangxy): fix tests before enabling aliasing for `std::any`.
+#if defined(_MSC_VER) && _MSC_VER >= 1910 && \
+ ((defined(_MSVC_LANG) && _MSVC_LANG > 201402) || \
+ (defined(__cplusplus) && __cplusplus > 201402))
+// #define ABSL_HAVE_STD_ANY 1
+#define ABSL_HAVE_STD_OPTIONAL 1
+#define ABSL_HAVE_STD_VARIANT 1
+#define ABSL_HAVE_STD_STRING_VIEW 1
+#endif
+
+// ABSL_USES_STD_ANY
+//
+// Indicates whether y_absl::any is an alias for std::any.
+#if !defined(ABSL_OPTION_USE_STD_ANY)
+#error options.h is misconfigured.
+#elif ABSL_OPTION_USE_STD_ANY == 0 || \
+ (ABSL_OPTION_USE_STD_ANY == 2 && !defined(ABSL_HAVE_STD_ANY))
+#undef ABSL_USES_STD_ANY
+#elif ABSL_OPTION_USE_STD_ANY == 1 || \
+ (ABSL_OPTION_USE_STD_ANY == 2 && defined(ABSL_HAVE_STD_ANY))
+#define ABSL_USES_STD_ANY 1
+#else
+#error options.h is misconfigured.
+#endif
+
+// ABSL_USES_STD_OPTIONAL
+//
+// Indicates whether y_absl::optional is an alias for std::optional.
+#if !defined(ABSL_OPTION_USE_STD_OPTIONAL)
+#error options.h is misconfigured.
+#elif ABSL_OPTION_USE_STD_OPTIONAL == 0 || \
+ (ABSL_OPTION_USE_STD_OPTIONAL == 2 && !defined(ABSL_HAVE_STD_OPTIONAL))
+#undef ABSL_USES_STD_OPTIONAL
+#elif ABSL_OPTION_USE_STD_OPTIONAL == 1 || \
+ (ABSL_OPTION_USE_STD_OPTIONAL == 2 && defined(ABSL_HAVE_STD_OPTIONAL))
+#define ABSL_USES_STD_OPTIONAL 1
+#else
+#error options.h is misconfigured.
+#endif
+
+// ABSL_USES_STD_VARIANT
+//
+// Indicates whether y_absl::variant is an alias for std::variant.
+#if !defined(ABSL_OPTION_USE_STD_VARIANT)
+#error options.h is misconfigured.
+#elif ABSL_OPTION_USE_STD_VARIANT == 0 || \
+ (ABSL_OPTION_USE_STD_VARIANT == 2 && !defined(ABSL_HAVE_STD_VARIANT))
+#undef ABSL_USES_STD_VARIANT
+#elif ABSL_OPTION_USE_STD_VARIANT == 1 || \
+ (ABSL_OPTION_USE_STD_VARIANT == 2 && defined(ABSL_HAVE_STD_VARIANT))
+#define ABSL_USES_STD_VARIANT 1
+#else
+#error options.h is misconfigured.
+#endif
+
+// ABSL_USES_STD_STRING_VIEW
+//
+// Indicates whether y_absl::string_view is an alias for std::string_view.
+#if !defined(ABSL_OPTION_USE_STD_STRING_VIEW)
+#error options.h is misconfigured.
+#elif ABSL_OPTION_USE_STD_STRING_VIEW == 0 || \
+ (ABSL_OPTION_USE_STD_STRING_VIEW == 2 && \
+ !defined(ABSL_HAVE_STD_STRING_VIEW))
+#undef ABSL_USES_STD_STRING_VIEW
+#elif ABSL_OPTION_USE_STD_STRING_VIEW == 1 || \
+ (ABSL_OPTION_USE_STD_STRING_VIEW == 2 && \
+ defined(ABSL_HAVE_STD_STRING_VIEW))
+#define ABSL_USES_STD_STRING_VIEW 1
+#else
+#error options.h is misconfigured.
+#endif
+
+// In debug mode, MSVC 2017's std::variant throws a EXCEPTION_ACCESS_VIOLATION
+// SEH exception from emplace for variant<SomeStruct> when constructing the
+// struct can throw. This defeats some of variant_test and
+// variant_exception_safety_test.
+#if defined(_MSC_VER) && _MSC_VER >= 1700 && defined(_DEBUG)
+#define ABSL_INTERNAL_MSVC_2017_DBG_MODE
+#endif
+
+// ABSL_INTERNAL_MANGLED_NS
+// ABSL_INTERNAL_MANGLED_BACKREFERENCE
+//
+// Internal macros for building up mangled names in our internal fork of CCTZ.
+// This implementation detail is only needed and provided for the MSVC build.
+//
+// These macros both expand to string literals. ABSL_INTERNAL_MANGLED_NS is
+// the mangled spelling of the `y_absl` namespace, and
+// ABSL_INTERNAL_MANGLED_BACKREFERENCE is a back-reference integer representing
+// the proper count to skip past the CCTZ fork namespace names. (This number
+// is one larger when there is an inline namespace name to skip.)
+#if defined(_MSC_VER)
+#if ABSL_OPTION_USE_INLINE_NAMESPACE == 0
+#define ABSL_INTERNAL_MANGLED_NS "y_absl"
+#define ABSL_INTERNAL_MANGLED_BACKREFERENCE "5"
+#else
+#define ABSL_INTERNAL_MANGLED_NS \
+ ABSL_INTERNAL_TOKEN_STR(ABSL_OPTION_INLINE_NAMESPACE_NAME) "@y_absl"
+#define ABSL_INTERNAL_MANGLED_BACKREFERENCE "6"
+#endif
+#endif
+
+#undef ABSL_INTERNAL_HAS_KEYWORD
+
+// ABSL_DLL
+//
+// When building Abseil as a DLL, this macro expands to `__declspec(dllexport)`
+// so we can annotate symbols appropriately as being exported. When used in
+// headers consuming a DLL, this macro expands to `__declspec(dllimport)` so
+// that consumers know the symbol is defined inside the DLL. In all other cases,
+// the macro expands to nothing.
+#if defined(_MSC_VER)
+#if defined(ABSL_BUILD_DLL)
+#define ABSL_DLL __declspec(dllexport)
+#elif defined(ABSL_CONSUME_DLL)
+#define ABSL_DLL __declspec(dllimport)
+#else
+#define ABSL_DLL
+#endif
+#else
+#define ABSL_DLL
+#endif // defined(_MSC_VER)
+
+// ABSL_HAVE_MEMORY_SANITIZER
+//
+// MemorySanitizer (MSan) is a detector of uninitialized reads. It consists of
+// a compiler instrumentation module and a run-time library.
+#ifdef ABSL_HAVE_MEMORY_SANITIZER
+#error "ABSL_HAVE_MEMORY_SANITIZER cannot be directly set."
+#elif defined(__SANITIZE_MEMORY__)
+#define ABSL_HAVE_MEMORY_SANITIZER 1
+#elif !defined(__native_client__) && ABSL_HAVE_FEATURE(memory_sanitizer)
+#define ABSL_HAVE_MEMORY_SANITIZER 1
+#endif
+
+// ABSL_HAVE_THREAD_SANITIZER
+//
+// ThreadSanitizer (TSan) is a fast data race detector.
+#ifdef ABSL_HAVE_THREAD_SANITIZER
+#error "ABSL_HAVE_THREAD_SANITIZER cannot be directly set."
+#elif defined(__SANITIZE_THREAD__)
+#define ABSL_HAVE_THREAD_SANITIZER 1
+#elif ABSL_HAVE_FEATURE(thread_sanitizer)
+#define ABSL_HAVE_THREAD_SANITIZER 1
+#endif
+
+// ABSL_HAVE_ADDRESS_SANITIZER
+//
+// AddressSanitizer (ASan) is a fast memory error detector.
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
+#error "ABSL_HAVE_ADDRESS_SANITIZER cannot be directly set."
+#elif defined(__SANITIZE_ADDRESS__)
+#define ABSL_HAVE_ADDRESS_SANITIZER 1
+#elif ABSL_HAVE_FEATURE(address_sanitizer)
+#define ABSL_HAVE_ADDRESS_SANITIZER 1
+#endif
+
+// ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION
+//
+// Class template argument deduction is a language feature added in C++17.
+#ifdef ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION
+#error "ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION cannot be directly set."
+#elif defined(__cpp_deduction_guides)
+#define ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION 1
+#endif
+
+#endif // ABSL_BASE_CONFIG_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/const_init.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/const_init.h
new file mode 100644
index 00000000000..4bab055ddad
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/const_init.h
@@ -0,0 +1,76 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// kConstInit
+// -----------------------------------------------------------------------------
+//
+// A constructor tag used to mark an object as safe for use as a global
+// variable, avoiding the usual lifetime issues that can affect globals.
+
+#ifndef ABSL_BASE_CONST_INIT_H_
+#define ABSL_BASE_CONST_INIT_H_
+
+#include "y_absl/base/config.h"
+
+// In general, objects with static storage duration (such as global variables)
+// can trigger tricky object lifetime situations. Attempting to access them
+// from the constructors or destructors of other global objects can result in
+// undefined behavior, unless their constructors and destructors are designed
+// with this issue in mind.
+//
+// The normal way to deal with this issue in C++11 is to use constant
+// initialization and trivial destructors.
+//
+// Constant initialization is guaranteed to occur before any other code
+// executes. Constructors that are declared 'constexpr' are eligible for
+// constant initialization. You can annotate a variable declaration with the
+// ABSL_CONST_INIT macro to express this intent. For compilers that support
+// it, this annotation will cause a compilation error for declarations that
+// aren't subject to constant initialization (perhaps because a runtime value
+// was passed as a constructor argument).
+//
+// On program shutdown, lifetime issues can be avoided on global objects by
+// ensuring that they contain trivial destructors. A class has a trivial
+// destructor unless it has a user-defined destructor, a virtual method or base
+// class, or a data member or base class with a non-trivial destructor of its
+// own. Objects with static storage duration and a trivial destructor are not
+// cleaned up on program shutdown, and are thus safe to access from other code
+// running during shutdown.
+//
+// For a few core Abseil classes, we make a best effort to allow for safe global
+// instances, even though these classes have non-trivial destructors. These
+// objects can be created with the y_absl::kConstInit tag. For example:
+// ABSL_CONST_INIT y_absl::Mutex global_mutex(y_absl::kConstInit);
+//
+// The line above declares a global variable of type y_absl::Mutex which can be
+// accessed at any point during startup or shutdown. global_mutex's destructor
+// will still run, but will not invalidate the object. Note that C++ specifies
+// that accessing an object after its destructor has run results in undefined
+// behavior, but this pattern works on the toolchains we support.
+//
+// The y_absl::kConstInit tag should only be used to define objects with static
+// or thread_local storage duration.
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+enum ConstInitType {
+ kConstInit,
+};
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_BASE_CONST_INIT_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/dynamic_annotations.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/dynamic_annotations.h
new file mode 100644
index 00000000000..50b3122cf76
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/dynamic_annotations.h
@@ -0,0 +1,471 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file defines dynamic annotations for use with dynamic analysis tool
+// such as valgrind, PIN, etc.
+//
+// Dynamic annotation is a source code annotation that affects the generated
+// code (that is, the annotation is not a comment). Each such annotation is
+// attached to a particular instruction and/or to a particular object (address)
+// in the program.
+//
+// The annotations that should be used by users are macros in all upper-case
+// (e.g., ABSL_ANNOTATE_THREAD_NAME).
+//
+// Actual implementation of these macros may differ depending on the dynamic
+// analysis tool being used.
+//
+// This file supports the following configurations:
+// - Dynamic Annotations enabled (with static thread-safety warnings disabled).
+// In this case, macros expand to functions implemented by Thread Sanitizer,
+// when building with TSan. When not provided an external implementation,
+// dynamic_annotations.cc provides no-op implementations.
+//
+// - Static Clang thread-safety warnings enabled.
+// When building with a Clang compiler that supports thread-safety warnings,
+// a subset of annotations can be statically-checked at compile-time. We
+// expand these macros to static-inline functions that can be analyzed for
+// thread-safety, but afterwards elided when building the final binary.
+//
+// - All annotations are disabled.
+// If neither Dynamic Annotations nor Clang thread-safety warnings are
+// enabled, then all annotation-macros expand to empty.
+
+#ifndef ABSL_BASE_DYNAMIC_ANNOTATIONS_H_
+#define ABSL_BASE_DYNAMIC_ANNOTATIONS_H_
+
+#include <stddef.h>
+
+#include "y_absl/base/attributes.h"
+#include "y_absl/base/config.h"
+#ifdef __cplusplus
+#include "y_absl/base/macros.h"
+#endif
+
+// TODO(rogeeff): Remove after the backward compatibility period.
+#include "y_absl/base/internal/dynamic_annotations.h" // IWYU pragma: export
+
+// -------------------------------------------------------------------------
+// Decide which features are enabled.
+
+#ifdef ABSL_HAVE_THREAD_SANITIZER
+
+#define ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED 1
+#define ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED 1
+#define ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED 1
+#define ABSL_INTERNAL_ANNOTALYSIS_ENABLED 0
+#define ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED 1
+
+#else
+
+#define ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED 0
+#define ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED 0
+#define ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED 0
+
+// Clang provides limited support for static thread-safety analysis through a
+// feature called Annotalysis. We configure macro-definitions according to
+// whether Annotalysis support is available. When running in opt-mode, GCC
+// will issue a warning, if these attributes are compiled. Only include them
+// when compiling using Clang.
+
+#if defined(__clang__)
+#define ABSL_INTERNAL_ANNOTALYSIS_ENABLED 1
+#if !defined(SWIG)
+#define ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED 1
+#endif
+#else
+#define ABSL_INTERNAL_ANNOTALYSIS_ENABLED 0
+#endif
+
+// Read/write annotations are enabled in Annotalysis mode; disabled otherwise.
+#define ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED \
+ ABSL_INTERNAL_ANNOTALYSIS_ENABLED
+
+#endif // ABSL_HAVE_THREAD_SANITIZER
+
+#ifdef __cplusplus
+#define ABSL_INTERNAL_BEGIN_EXTERN_C extern "C" {
+#define ABSL_INTERNAL_END_EXTERN_C } // extern "C"
+#define ABSL_INTERNAL_GLOBAL_SCOPED(F) ::F
+#define ABSL_INTERNAL_STATIC_INLINE inline
+#else
+#define ABSL_INTERNAL_BEGIN_EXTERN_C // empty
+#define ABSL_INTERNAL_END_EXTERN_C // empty
+#define ABSL_INTERNAL_GLOBAL_SCOPED(F) F
+#define ABSL_INTERNAL_STATIC_INLINE static inline
+#endif
+
+// -------------------------------------------------------------------------
+// Define race annotations.
+
+#if ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED == 1
+// Some of the symbols used in this section (e.g. AnnotateBenignRaceSized) are
+// defined by the compiler-based santizer implementation, not by the Abseil
+// library. Therefore they do not use ABSL_INTERNAL_C_SYMBOL.
+
+// -------------------------------------------------------------
+// Annotations that suppress errors. It is usually better to express the
+// program's synchronization using the other annotations, but these can be used
+// when all else fails.
+
+// Report that we may have a benign race at `pointer`, with size
+// "sizeof(*(pointer))". `pointer` must be a non-void* pointer. Insert at the
+// point where `pointer` has been allocated, preferably close to the point
+// where the race happens. See also ABSL_ANNOTATE_BENIGN_RACE_STATIC.
+#define ABSL_ANNOTATE_BENIGN_RACE(pointer, description) \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateBenignRaceSized) \
+ (__FILE__, __LINE__, pointer, sizeof(*(pointer)), description)
+
+// Same as ABSL_ANNOTATE_BENIGN_RACE(`address`, `description`), but applies to
+// the memory range [`address`, `address`+`size`).
+#define ABSL_ANNOTATE_BENIGN_RACE_SIZED(address, size, description) \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateBenignRaceSized) \
+ (__FILE__, __LINE__, address, size, description)
+
+// Enable (`enable`!=0) or disable (`enable`==0) race detection for all threads.
+// This annotation could be useful if you want to skip expensive race analysis
+// during some period of program execution, e.g. during initialization.
+#define ABSL_ANNOTATE_ENABLE_RACE_DETECTION(enable) \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateEnableRaceDetection) \
+ (__FILE__, __LINE__, enable)
+
+// -------------------------------------------------------------
+// Annotations useful for debugging.
+
+// Report the current thread `name` to a race detector.
+#define ABSL_ANNOTATE_THREAD_NAME(name) \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateThreadName)(__FILE__, __LINE__, name)
+
+// -------------------------------------------------------------
+// Annotations useful when implementing locks. They are not normally needed by
+// modules that merely use locks. The `lock` argument is a pointer to the lock
+// object.
+
+// Report that a lock has been created at address `lock`.
+#define ABSL_ANNOTATE_RWLOCK_CREATE(lock) \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockCreate)(__FILE__, __LINE__, lock)
+
+// Report that a linker initialized lock has been created at address `lock`.
+#ifdef ABSL_HAVE_THREAD_SANITIZER
+#define ABSL_ANNOTATE_RWLOCK_CREATE_STATIC(lock) \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockCreateStatic) \
+ (__FILE__, __LINE__, lock)
+#else
+#define ABSL_ANNOTATE_RWLOCK_CREATE_STATIC(lock) \
+ ABSL_ANNOTATE_RWLOCK_CREATE(lock)
+#endif
+
+// Report that the lock at address `lock` is about to be destroyed.
+#define ABSL_ANNOTATE_RWLOCK_DESTROY(lock) \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockDestroy)(__FILE__, __LINE__, lock)
+
+// Report that the lock at address `lock` has been acquired.
+// `is_w`=1 for writer lock, `is_w`=0 for reader lock.
+#define ABSL_ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockAcquired) \
+ (__FILE__, __LINE__, lock, is_w)
+
+// Report that the lock at address `lock` is about to be released.
+// `is_w`=1 for writer lock, `is_w`=0 for reader lock.
+#define ABSL_ANNOTATE_RWLOCK_RELEASED(lock, is_w) \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockReleased) \
+ (__FILE__, __LINE__, lock, is_w)
+
+// Apply ABSL_ANNOTATE_BENIGN_RACE_SIZED to a static variable `static_var`.
+#define ABSL_ANNOTATE_BENIGN_RACE_STATIC(static_var, description) \
+ namespace { \
+ class static_var##_annotator { \
+ public: \
+ static_var##_annotator() { \
+ ABSL_ANNOTATE_BENIGN_RACE_SIZED(&static_var, sizeof(static_var), \
+ #static_var ": " description); \
+ } \
+ }; \
+ static static_var##_annotator the##static_var##_annotator; \
+ } // namespace
+
+// Function prototypes of annotations provided by the compiler-based sanitizer
+// implementation.
+ABSL_INTERNAL_BEGIN_EXTERN_C
+void AnnotateRWLockCreate(const char* file, int line,
+ const volatile void* lock);
+void AnnotateRWLockCreateStatic(const char* file, int line,
+ const volatile void* lock);
+void AnnotateRWLockDestroy(const char* file, int line,
+ const volatile void* lock);
+void AnnotateRWLockAcquired(const char* file, int line,
+ const volatile void* lock, long is_w); // NOLINT
+void AnnotateRWLockReleased(const char* file, int line,
+ const volatile void* lock, long is_w); // NOLINT
+void AnnotateBenignRace(const char* file, int line,
+ const volatile void* address, const char* description);
+void AnnotateBenignRaceSized(const char* file, int line,
+ const volatile void* address, size_t size,
+ const char* description);
+void AnnotateThreadName(const char* file, int line, const char* name);
+void AnnotateEnableRaceDetection(const char* file, int line, int enable);
+ABSL_INTERNAL_END_EXTERN_C
+
+#else // ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED == 0
+
+#define ABSL_ANNOTATE_RWLOCK_CREATE(lock) // empty
+#define ABSL_ANNOTATE_RWLOCK_CREATE_STATIC(lock) // empty
+#define ABSL_ANNOTATE_RWLOCK_DESTROY(lock) // empty
+#define ABSL_ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) // empty
+#define ABSL_ANNOTATE_RWLOCK_RELEASED(lock, is_w) // empty
+#define ABSL_ANNOTATE_BENIGN_RACE(address, description) // empty
+#define ABSL_ANNOTATE_BENIGN_RACE_SIZED(address, size, description) // empty
+#define ABSL_ANNOTATE_THREAD_NAME(name) // empty
+#define ABSL_ANNOTATE_ENABLE_RACE_DETECTION(enable) // empty
+#define ABSL_ANNOTATE_BENIGN_RACE_STATIC(static_var, description) // empty
+
+#endif // ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED
+
+// -------------------------------------------------------------------------
+// Define memory annotations.
+
+#ifdef ABSL_HAVE_MEMORY_SANITIZER
+
+#include <sanitizer/msan_interface.h>
+
+#define ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \
+ __msan_unpoison(address, size)
+
+#define ABSL_ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) \
+ __msan_allocated_memory(address, size)
+
+#else // !defined(ABSL_HAVE_MEMORY_SANITIZER)
+
+// TODO(rogeeff): remove this branch
+#ifdef ABSL_HAVE_THREAD_SANITIZER
+#define ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \
+ do { \
+ (void)(address); \
+ (void)(size); \
+ } while (0)
+#define ABSL_ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) \
+ do { \
+ (void)(address); \
+ (void)(size); \
+ } while (0)
+#else
+
+#define ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(address, size) // empty
+#define ABSL_ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) // empty
+
+#endif
+
+#endif // ABSL_HAVE_MEMORY_SANITIZER
+
+// -------------------------------------------------------------------------
+// Define IGNORE_READS_BEGIN/_END attributes.
+
+#if defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED)
+
+#define ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE \
+ __attribute((exclusive_lock_function("*")))
+#define ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE \
+ __attribute((unlock_function("*")))
+
+#else // !defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED)
+
+#define ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE // empty
+#define ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE // empty
+
+#endif // defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED)
+
+// -------------------------------------------------------------------------
+// Define IGNORE_READS_BEGIN/_END annotations.
+
+#if ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED == 1
+// Some of the symbols used in this section (e.g. AnnotateIgnoreReadsBegin) are
+// defined by the compiler-based implementation, not by the Abseil
+// library. Therefore they do not use ABSL_INTERNAL_C_SYMBOL.
+
+// Request the analysis tool to ignore all reads in the current thread until
+// ABSL_ANNOTATE_IGNORE_READS_END is called. Useful to ignore intentional racey
+// reads, while still checking other reads and all writes.
+// See also ABSL_ANNOTATE_UNPROTECTED_READ.
+#define ABSL_ANNOTATE_IGNORE_READS_BEGIN() \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsBegin) \
+ (__FILE__, __LINE__)
+
+// Stop ignoring reads.
+#define ABSL_ANNOTATE_IGNORE_READS_END() \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsEnd) \
+ (__FILE__, __LINE__)
+
+// Function prototypes of annotations provided by the compiler-based sanitizer
+// implementation.
+ABSL_INTERNAL_BEGIN_EXTERN_C
+void AnnotateIgnoreReadsBegin(const char* file, int line)
+ ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE;
+void AnnotateIgnoreReadsEnd(const char* file,
+ int line) ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE;
+ABSL_INTERNAL_END_EXTERN_C
+
+#elif defined(ABSL_INTERNAL_ANNOTALYSIS_ENABLED)
+
+// When Annotalysis is enabled without Dynamic Annotations, the use of
+// static-inline functions allows the annotations to be read at compile-time,
+// while still letting the compiler elide the functions from the final build.
+//
+// TODO(delesley) -- The exclusive lock here ignores writes as well, but
+// allows IGNORE_READS_AND_WRITES to work properly.
+
+#define ABSL_ANNOTATE_IGNORE_READS_BEGIN() \
+ ABSL_INTERNAL_GLOBAL_SCOPED( \
+ ABSL_INTERNAL_C_SYMBOL(AbslInternalAnnotateIgnoreReadsBegin)) \
+ ()
+
+#define ABSL_ANNOTATE_IGNORE_READS_END() \
+ ABSL_INTERNAL_GLOBAL_SCOPED( \
+ ABSL_INTERNAL_C_SYMBOL(AbslInternalAnnotateIgnoreReadsEnd)) \
+ ()
+
+ABSL_INTERNAL_STATIC_INLINE void ABSL_INTERNAL_C_SYMBOL(
+ AbslInternalAnnotateIgnoreReadsBegin)()
+ ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE {}
+
+ABSL_INTERNAL_STATIC_INLINE void ABSL_INTERNAL_C_SYMBOL(
+ AbslInternalAnnotateIgnoreReadsEnd)()
+ ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE {}
+
+#else
+
+#define ABSL_ANNOTATE_IGNORE_READS_BEGIN() // empty
+#define ABSL_ANNOTATE_IGNORE_READS_END() // empty
+
+#endif
+
+// -------------------------------------------------------------------------
+// Define IGNORE_WRITES_BEGIN/_END annotations.
+
+#if ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED == 1
+
+// Similar to ABSL_ANNOTATE_IGNORE_READS_BEGIN, but ignore writes instead.
+#define ABSL_ANNOTATE_IGNORE_WRITES_BEGIN() \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreWritesBegin)(__FILE__, __LINE__)
+
+// Stop ignoring writes.
+#define ABSL_ANNOTATE_IGNORE_WRITES_END() \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreWritesEnd)(__FILE__, __LINE__)
+
+// Function prototypes of annotations provided by the compiler-based sanitizer
+// implementation.
+ABSL_INTERNAL_BEGIN_EXTERN_C
+void AnnotateIgnoreWritesBegin(const char* file, int line);
+void AnnotateIgnoreWritesEnd(const char* file, int line);
+ABSL_INTERNAL_END_EXTERN_C
+
+#else
+
+#define ABSL_ANNOTATE_IGNORE_WRITES_BEGIN() // empty
+#define ABSL_ANNOTATE_IGNORE_WRITES_END() // empty
+
+#endif
+
+// -------------------------------------------------------------------------
+// Define the ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_* annotations using the more
+// primitive annotations defined above.
+//
+// Instead of doing
+// ABSL_ANNOTATE_IGNORE_READS_BEGIN();
+// ... = x;
+// ABSL_ANNOTATE_IGNORE_READS_END();
+// one can use
+// ... = ABSL_ANNOTATE_UNPROTECTED_READ(x);
+
+#if defined(ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED)
+
+// Start ignoring all memory accesses (both reads and writes).
+#define ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() \
+ do { \
+ ABSL_ANNOTATE_IGNORE_READS_BEGIN(); \
+ ABSL_ANNOTATE_IGNORE_WRITES_BEGIN(); \
+ } while (0)
+
+// Stop ignoring both reads and writes.
+#define ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_END() \
+ do { \
+ ABSL_ANNOTATE_IGNORE_WRITES_END(); \
+ ABSL_ANNOTATE_IGNORE_READS_END(); \
+ } while (0)
+
+#ifdef __cplusplus
+// ABSL_ANNOTATE_UNPROTECTED_READ is the preferred way to annotate racey reads.
+#define ABSL_ANNOTATE_UNPROTECTED_READ(x) \
+ y_absl::base_internal::AnnotateUnprotectedRead(x)
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+template <typename T>
+inline T AnnotateUnprotectedRead(const volatile T& x) { // NOLINT
+ ABSL_ANNOTATE_IGNORE_READS_BEGIN();
+ T res = x;
+ ABSL_ANNOTATE_IGNORE_READS_END();
+ return res;
+}
+
+} // namespace base_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+#endif
+
+#else
+
+#define ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() // empty
+#define ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_END() // empty
+#define ABSL_ANNOTATE_UNPROTECTED_READ(x) (x)
+
+#endif
+
+// -------------------------------------------------------------------------
+// Address sanitizer annotations
+
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
+// Describe the current state of a contiguous container such as e.g.
+// std::vector or TString. For more details see
+// sanitizer/common_interface_defs.h, which is provided by the compiler.
+#include <sanitizer/common_interface_defs.h>
+
+#define ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(beg, end, old_mid, new_mid) \
+ __sanitizer_annotate_contiguous_container(beg, end, old_mid, new_mid)
+#define ABSL_ADDRESS_SANITIZER_REDZONE(name) \
+ struct { \
+ alignas(8) char x[8]; \
+ } name
+
+#else
+
+#define ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(beg, end, old_mid, new_mid) // empty
+#define ABSL_ADDRESS_SANITIZER_REDZONE(name) static_assert(true, "")
+
+#endif // ABSL_HAVE_ADDRESS_SANITIZER
+
+// -------------------------------------------------------------------------
+// Undefine the macros intended only for this file.
+
+#undef ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED
+#undef ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED
+#undef ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED
+#undef ABSL_INTERNAL_ANNOTALYSIS_ENABLED
+#undef ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED
+#undef ABSL_INTERNAL_BEGIN_EXTERN_C
+#undef ABSL_INTERNAL_END_EXTERN_C
+#undef ABSL_INTERNAL_STATIC_INLINE
+
+#endif // ABSL_BASE_DYNAMIC_ANNOTATIONS_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/atomic_hook.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/atomic_hook.h
new file mode 100644
index 00000000000..5bdf360a57e
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/atomic_hook.h
@@ -0,0 +1,200 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_BASE_INTERNAL_ATOMIC_HOOK_H_
+#define ABSL_BASE_INTERNAL_ATOMIC_HOOK_H_
+
+#include <atomic>
+#include <cassert>
+#include <cstdint>
+#include <utility>
+
+#include "y_absl/base/attributes.h"
+#include "y_absl/base/config.h"
+
+#if defined(_MSC_VER) && !defined(__clang__)
+#define ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT 0
+#else
+#define ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT 1
+#endif
+
+#if defined(_MSC_VER)
+#define ABSL_HAVE_WORKING_ATOMIC_POINTER 0
+#else
+#define ABSL_HAVE_WORKING_ATOMIC_POINTER 1
+#endif
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+template <typename T>
+class AtomicHook;
+
+// To workaround AtomicHook not being constant-initializable on some platforms,
+// prefer to annotate instances with `ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES`
+// instead of `ABSL_CONST_INIT`.
+#if ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT
+#define ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES ABSL_CONST_INIT
+#else
+#define ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
+#endif
+
+// `AtomicHook` is a helper class, templatized on a raw function pointer type,
+// for implementing Abseil customization hooks. It is a callable object that
+// dispatches to the registered hook. Objects of type `AtomicHook` must have
+// static or thread storage duration.
+//
+// A default constructed object performs a no-op (and returns a default
+// constructed object) if no hook has been registered.
+//
+// Hooks can be pre-registered via constant initialization, for example:
+//
+// ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES static AtomicHook<void(*)()>
+// my_hook(DefaultAction);
+//
+// and then changed at runtime via a call to `Store()`.
+//
+// Reads and writes guarantee memory_order_acquire/memory_order_release
+// semantics.
+template <typename ReturnType, typename... Args>
+class AtomicHook<ReturnType (*)(Args...)> {
+ public:
+ using FnPtr = ReturnType (*)(Args...);
+
+ // Constructs an object that by default performs a no-op (and
+ // returns a default constructed object) when no hook as been registered.
+ constexpr AtomicHook() : AtomicHook(DummyFunction) {}
+
+ // Constructs an object that by default dispatches to/returns the
+ // pre-registered default_fn when no hook has been registered at runtime.
+#if ABSL_HAVE_WORKING_ATOMIC_POINTER && ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT
+ explicit constexpr AtomicHook(FnPtr default_fn)
+ : hook_(default_fn), default_fn_(default_fn) {}
+#elif ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT
+ explicit constexpr AtomicHook(FnPtr default_fn)
+ : hook_(kUninitialized), default_fn_(default_fn) {}
+#else
+ // As of January 2020, on all known versions of MSVC this constructor runs in
+ // the global constructor sequence. If `Store()` is called by a dynamic
+ // initializer, we want to preserve the value, even if this constructor runs
+ // after the call to `Store()`. If not, `hook_` will be
+ // zero-initialized by the linker and we have no need to set it.
+ // https://developercommunity.visualstudio.com/content/problem/336946/class-with-constexpr-constructor-not-using-static.html
+ explicit constexpr AtomicHook(FnPtr default_fn)
+ : /* hook_(deliberately omitted), */ default_fn_(default_fn) {
+ static_assert(kUninitialized == 0, "here we rely on zero-initialization");
+ }
+#endif
+
+ // Stores the provided function pointer as the value for this hook.
+ //
+ // This is intended to be called once. Multiple calls are legal only if the
+ // same function pointer is provided for each call. The store is implemented
+ // as a memory_order_release operation, and read accesses are implemented as
+ // memory_order_acquire.
+ void Store(FnPtr fn) {
+ bool success = DoStore(fn);
+ static_cast<void>(success);
+ assert(success);
+ }
+
+ // Invokes the registered callback. If no callback has yet been registered, a
+ // default-constructed object of the appropriate type is returned instead.
+ template <typename... CallArgs>
+ ReturnType operator()(CallArgs&&... args) const {
+ return DoLoad()(std::forward<CallArgs>(args)...);
+ }
+
+ // Returns the registered callback, or nullptr if none has been registered.
+ // Useful if client code needs to conditionalize behavior based on whether a
+ // callback was registered.
+ //
+ // Note that atomic_hook.Load()() and atomic_hook() have different semantics:
+ // operator()() will perform a no-op if no callback was registered, while
+ // Load()() will dereference a null function pointer. Prefer operator()() to
+ // Load()() unless you must conditionalize behavior on whether a hook was
+ // registered.
+ FnPtr Load() const {
+ FnPtr ptr = DoLoad();
+ return (ptr == DummyFunction) ? nullptr : ptr;
+ }
+
+ private:
+ static ReturnType DummyFunction(Args...) {
+ return ReturnType();
+ }
+
+ // Current versions of MSVC (as of September 2017) have a broken
+ // implementation of std::atomic<T*>: Its constructor attempts to do the
+ // equivalent of a reinterpret_cast in a constexpr context, which is not
+ // allowed.
+ //
+ // This causes an issue when building with LLVM under Windows. To avoid this,
+ // we use a less-efficient, intptr_t-based implementation on Windows.
+#if ABSL_HAVE_WORKING_ATOMIC_POINTER
+ // Return the stored value, or DummyFunction if no value has been stored.
+ FnPtr DoLoad() const { return hook_.load(std::memory_order_acquire); }
+
+ // Store the given value. Returns false if a different value was already
+ // stored to this object.
+ bool DoStore(FnPtr fn) {
+ assert(fn);
+ FnPtr expected = default_fn_;
+ const bool store_succeeded = hook_.compare_exchange_strong(
+ expected, fn, std::memory_order_acq_rel, std::memory_order_acquire);
+ const bool same_value_already_stored = (expected == fn);
+ return store_succeeded || same_value_already_stored;
+ }
+
+ std::atomic<FnPtr> hook_;
+#else // !ABSL_HAVE_WORKING_ATOMIC_POINTER
+ // Use a sentinel value unlikely to be the address of an actual function.
+ static constexpr intptr_t kUninitialized = 0;
+
+ static_assert(sizeof(intptr_t) >= sizeof(FnPtr),
+ "intptr_t can't contain a function pointer");
+
+ FnPtr DoLoad() const {
+ const intptr_t value = hook_.load(std::memory_order_acquire);
+ if (value == kUninitialized) {
+ return default_fn_;
+ }
+ return reinterpret_cast<FnPtr>(value);
+ }
+
+ bool DoStore(FnPtr fn) {
+ assert(fn);
+ const auto value = reinterpret_cast<intptr_t>(fn);
+ intptr_t expected = kUninitialized;
+ const bool store_succeeded = hook_.compare_exchange_strong(
+ expected, value, std::memory_order_acq_rel, std::memory_order_acquire);
+ const bool same_value_already_stored = (expected == value);
+ return store_succeeded || same_value_already_stored;
+ }
+
+ std::atomic<intptr_t> hook_;
+#endif
+
+ const FnPtr default_fn_;
+};
+
+#undef ABSL_HAVE_WORKING_ATOMIC_POINTER
+#undef ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT
+
+} // namespace base_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_BASE_INTERNAL_ATOMIC_HOOK_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/atomic_hook_test_helper.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/atomic_hook_test_helper.h
new file mode 100644
index 00000000000..e600d27f4f1
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/atomic_hook_test_helper.h
@@ -0,0 +1,34 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_BASE_ATOMIC_HOOK_TEST_HELPER_H_
+#define ABSL_BASE_ATOMIC_HOOK_TEST_HELPER_H_
+
+#include "y_absl/base/internal/atomic_hook.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace atomic_hook_internal {
+
+using VoidF = void (*)();
+extern y_absl::base_internal::AtomicHook<VoidF> func;
+extern int default_func_calls;
+void DefaultFunc();
+void RegisterFunc(VoidF func);
+
+} // namespace atomic_hook_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_BASE_ATOMIC_HOOK_TEST_HELPER_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/cycleclock.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/cycleclock.cc
new file mode 100644
index 00000000000..6b43f7c1841
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/cycleclock.cc
@@ -0,0 +1,107 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// The implementation of CycleClock::Frequency.
+//
+// NOTE: only i386 and x86_64 have been well tested.
+// PPC, sparc, alpha, and ia64 are based on
+// http://peter.kuscsik.com/wordpress/?p=14
+// with modifications by m3b. See also
+// https://setisvn.ssl.berkeley.edu/svn/lib/fftw-3.0.1/kernel/cycle.h
+
+#include "y_absl/base/internal/cycleclock.h"
+
+#include <atomic>
+#include <chrono> // NOLINT(build/c++11)
+
+#include "y_absl/base/internal/unscaledcycleclock.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+#if ABSL_USE_UNSCALED_CYCLECLOCK
+
+namespace {
+
+#ifdef NDEBUG
+#ifdef ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY
+// Not debug mode and the UnscaledCycleClock frequency is the CPU
+// frequency. Scale the CycleClock to prevent overflow if someone
+// tries to represent the time as cycles since the Unix epoch.
+static constexpr int32_t kShift = 1;
+#else
+// Not debug mode and the UnscaledCycleClock isn't operating at the
+// raw CPU frequency. There is no need to do any scaling, so don't
+// needlessly sacrifice precision.
+static constexpr int32_t kShift = 0;
+#endif
+#else
+// In debug mode use a different shift to discourage depending on a
+// particular shift value.
+static constexpr int32_t kShift = 2;
+#endif
+
+static constexpr double kFrequencyScale = 1.0 / (1 << kShift);
+static std::atomic<CycleClockSourceFunc> cycle_clock_source;
+
+CycleClockSourceFunc LoadCycleClockSource() {
+ // Optimize for the common case (no callback) by first doing a relaxed load;
+ // this is significantly faster on non-x86 platforms.
+ if (cycle_clock_source.load(std::memory_order_relaxed) == nullptr) {
+ return nullptr;
+ }
+ // This corresponds to the store(std::memory_order_release) in
+ // CycleClockSource::Register, and makes sure that any updates made prior to
+ // registering the callback are visible to this thread before the callback is
+ // invoked.
+ return cycle_clock_source.load(std::memory_order_acquire);
+}
+
+} // namespace
+
+int64_t CycleClock::Now() {
+ auto fn = LoadCycleClockSource();
+ if (fn == nullptr) {
+ return base_internal::UnscaledCycleClock::Now() >> kShift;
+ }
+ return fn() >> kShift;
+}
+
+double CycleClock::Frequency() {
+ return kFrequencyScale * base_internal::UnscaledCycleClock::Frequency();
+}
+
+void CycleClockSource::Register(CycleClockSourceFunc source) {
+ // Corresponds to the load(std::memory_order_acquire) in LoadCycleClockSource.
+ cycle_clock_source.store(source, std::memory_order_release);
+}
+
+#else
+
+int64_t CycleClock::Now() {
+ return std::chrono::duration_cast<std::chrono::nanoseconds>(
+ std::chrono::steady_clock::now().time_since_epoch())
+ .count();
+}
+
+double CycleClock::Frequency() {
+ return 1e9;
+}
+
+#endif
+
+} // namespace base_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/cycleclock.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/cycleclock.h
new file mode 100644
index 00000000000..389c9093e35
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/cycleclock.h
@@ -0,0 +1,94 @@
+//
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+// -----------------------------------------------------------------------------
+// File: cycleclock.h
+// -----------------------------------------------------------------------------
+//
+// This header file defines a `CycleClock`, which yields the value and frequency
+// of a cycle counter that increments at a rate that is approximately constant.
+//
+// NOTE:
+//
+// The cycle counter frequency is not necessarily related to the core clock
+// frequency and should not be treated as such. That is, `CycleClock` cycles are
+// not necessarily "CPU cycles" and code should not rely on that behavior, even
+// if experimentally observed.
+//
+// An arbitrary offset may have been added to the counter at power on.
+//
+// On some platforms, the rate and offset of the counter may differ
+// slightly when read from different CPUs of a multiprocessor. Usually,
+// we try to ensure that the operating system adjusts values periodically
+// so that values agree approximately. If you need stronger guarantees,
+// consider using alternate interfaces.
+//
+// The CPU is not required to maintain the ordering of a cycle counter read
+// with respect to surrounding instructions.
+
+#ifndef ABSL_BASE_INTERNAL_CYCLECLOCK_H_
+#define ABSL_BASE_INTERNAL_CYCLECLOCK_H_
+
+#include <cstdint>
+
+#include "y_absl/base/config.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+// -----------------------------------------------------------------------------
+// CycleClock
+// -----------------------------------------------------------------------------
+class CycleClock {
+ public:
+ // CycleClock::Now()
+ //
+ // Returns the value of a cycle counter that counts at a rate that is
+ // approximately constant.
+ static int64_t Now();
+
+ // CycleClock::Frequency()
+ //
+ // Returns the amount by which `CycleClock::Now()` increases per second. Note
+ // that this value may not necessarily match the core CPU clock frequency.
+ static double Frequency();
+
+ private:
+ CycleClock() = delete; // no instances
+ CycleClock(const CycleClock&) = delete;
+ CycleClock& operator=(const CycleClock&) = delete;
+};
+
+using CycleClockSourceFunc = int64_t (*)();
+
+class CycleClockSource {
+ private:
+ // CycleClockSource::Register()
+ //
+ // Register a function that provides an alternate source for the unscaled CPU
+ // cycle count value. The source function must be async signal safe, must not
+ // call CycleClock::Now(), and must have a frequency that matches that of the
+ // unscaled clock used by CycleClock. A nullptr value resets CycleClock to use
+ // the default source.
+ static void Register(CycleClockSourceFunc source);
+};
+
+} // namespace base_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_BASE_INTERNAL_CYCLECLOCK_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/direct_mmap.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/direct_mmap.h
new file mode 100644
index 00000000000..82be9f94ab6
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/direct_mmap.h
@@ -0,0 +1,169 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Functions for directly invoking mmap() via syscall, avoiding the case where
+// mmap() has been locally overridden.
+
+#ifndef ABSL_BASE_INTERNAL_DIRECT_MMAP_H_
+#define ABSL_BASE_INTERNAL_DIRECT_MMAP_H_
+
+#include "y_absl/base/config.h"
+
+#if ABSL_HAVE_MMAP
+
+#include <sys/mman.h>
+
+#ifdef __linux__
+
+#include <sys/types.h>
+#ifdef __BIONIC__
+#include <sys/syscall.h>
+#else
+#include <syscall.h>
+#endif
+
+#include <linux/unistd.h>
+#include <unistd.h>
+#include <cerrno>
+#include <cstdarg>
+#include <cstdint>
+
+#ifdef __mips__
+// Include definitions of the ABI currently in use.
+#ifdef __BIONIC__
+// Android doesn't have sgidefs.h, but does have asm/sgidefs.h, which has the
+// definitions we need.
+#include <asm/sgidefs.h>
+#else
+#include <sgidefs.h>
+#endif // __BIONIC__
+#endif // __mips__
+
+// SYS_mmap and SYS_munmap are not defined in Android.
+#ifdef __BIONIC__
+extern "C" void* __mmap2(void*, size_t, int, int, int, size_t);
+#if defined(__NR_mmap) && !defined(SYS_mmap)
+#define SYS_mmap __NR_mmap
+#endif
+#ifndef SYS_munmap
+#define SYS_munmap __NR_munmap
+#endif
+#endif // __BIONIC__
+
+#if defined(__NR_mmap2) && !defined(SYS_mmap2)
+#define SYS_mmap2 __NR_mmap2
+#endif
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+// Platform specific logic extracted from
+// https://chromium.googlesource.com/linux-syscall-support/+/master/linux_syscall_support.h
+inline void* DirectMmap(void* start, size_t length, int prot, int flags, int fd,
+ off64_t offset) noexcept {
+#if defined(__i386__) || defined(__ARM_ARCH_3__) || defined(__ARM_EABI__) || \
+ defined(__m68k__) || defined(__sh__) || \
+ (defined(__hppa__) && !defined(__LP64__)) || \
+ (defined(__mips__) && _MIPS_SIM == _MIPS_SIM_ABI32) || \
+ (defined(__PPC__) && !defined(__PPC64__)) || \
+ (defined(__riscv) && __riscv_xlen == 32) || \
+ (defined(__s390__) && !defined(__s390x__)) || \
+ (defined(__sparc__) && !defined(__arch64__))
+ // On these architectures, implement mmap with mmap2.
+ static int pagesize = 0;
+ if (pagesize == 0) {
+#if defined(__wasm__) || defined(__asmjs__)
+ pagesize = getpagesize();
+#else
+ pagesize = sysconf(_SC_PAGESIZE);
+#endif
+ }
+ if (offset < 0 || offset % pagesize != 0) {
+ errno = EINVAL;
+ return MAP_FAILED;
+ }
+#ifdef __BIONIC__
+ // SYS_mmap2 has problems on Android API level <= 16.
+ // Workaround by invoking __mmap2() instead.
+ return __mmap2(start, length, prot, flags, fd, offset / pagesize);
+#else
+ return reinterpret_cast<void*>(
+ syscall(SYS_mmap2, start, length, prot, flags, fd,
+ static_cast<off_t>(offset / pagesize)));
+#endif
+#elif defined(__s390x__)
+ // On s390x, mmap() arguments are passed in memory.
+ unsigned long buf[6] = {reinterpret_cast<unsigned long>(start), // NOLINT
+ static_cast<unsigned long>(length), // NOLINT
+ static_cast<unsigned long>(prot), // NOLINT
+ static_cast<unsigned long>(flags), // NOLINT
+ static_cast<unsigned long>(fd), // NOLINT
+ static_cast<unsigned long>(offset)}; // NOLINT
+ return reinterpret_cast<void*>(syscall(SYS_mmap, buf));
+#elif defined(__x86_64__)
+// The x32 ABI has 32 bit longs, but the syscall interface is 64 bit.
+// We need to explicitly cast to an unsigned 64 bit type to avoid implicit
+// sign extension. We can't cast pointers directly because those are
+// 32 bits, and gcc will dump ugly warnings about casting from a pointer
+// to an integer of a different size. We also need to make sure __off64_t
+// isn't truncated to 32-bits under x32.
+#define MMAP_SYSCALL_ARG(x) ((uint64_t)(uintptr_t)(x))
+ return reinterpret_cast<void*>(
+ syscall(SYS_mmap, MMAP_SYSCALL_ARG(start), MMAP_SYSCALL_ARG(length),
+ MMAP_SYSCALL_ARG(prot), MMAP_SYSCALL_ARG(flags),
+ MMAP_SYSCALL_ARG(fd), static_cast<uint64_t>(offset)));
+#undef MMAP_SYSCALL_ARG
+#else // Remaining 64-bit aritectures.
+ static_assert(sizeof(unsigned long) == 8, "Platform is not 64-bit");
+ return reinterpret_cast<void*>(
+ syscall(SYS_mmap, start, length, prot, flags, fd, offset));
+#endif
+}
+
+inline int DirectMunmap(void* start, size_t length) {
+ return static_cast<int>(syscall(SYS_munmap, start, length));
+}
+
+} // namespace base_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#else // !__linux__
+
+// For non-linux platforms where we have mmap, just dispatch directly to the
+// actual mmap()/munmap() methods.
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+inline void* DirectMmap(void* start, size_t length, int prot, int flags, int fd,
+ off_t offset) {
+ return mmap(start, length, prot, flags, fd, offset);
+}
+
+inline int DirectMunmap(void* start, size_t length) {
+ return munmap(start, length);
+}
+
+} // namespace base_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // __linux__
+
+#endif // ABSL_HAVE_MMAP
+
+#endif // ABSL_BASE_INTERNAL_DIRECT_MMAP_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/dynamic_annotations.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/dynamic_annotations.h
new file mode 100644
index 00000000000..75f00226834
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/dynamic_annotations.h
@@ -0,0 +1,398 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file defines dynamic annotations for use with dynamic analysis tool
+// such as valgrind, PIN, etc.
+//
+// Dynamic annotation is a source code annotation that affects the generated
+// code (that is, the annotation is not a comment). Each such annotation is
+// attached to a particular instruction and/or to a particular object (address)
+// in the program.
+//
+// The annotations that should be used by users are macros in all upper-case
+// (e.g., ANNOTATE_THREAD_NAME).
+//
+// Actual implementation of these macros may differ depending on the dynamic
+// analysis tool being used.
+//
+// This file supports the following configurations:
+// - Dynamic Annotations enabled (with static thread-safety warnings disabled).
+// In this case, macros expand to functions implemented by Thread Sanitizer,
+// when building with TSan. When not provided an external implementation,
+// dynamic_annotations.cc provides no-op implementations.
+//
+// - Static Clang thread-safety warnings enabled.
+// When building with a Clang compiler that supports thread-safety warnings,
+// a subset of annotations can be statically-checked at compile-time. We
+// expand these macros to static-inline functions that can be analyzed for
+// thread-safety, but afterwards elided when building the final binary.
+//
+// - All annotations are disabled.
+// If neither Dynamic Annotations nor Clang thread-safety warnings are
+// enabled, then all annotation-macros expand to empty.
+
+#ifndef ABSL_BASE_INTERNAL_DYNAMIC_ANNOTATIONS_H_
+#define ABSL_BASE_INTERNAL_DYNAMIC_ANNOTATIONS_H_
+
+#include <stddef.h>
+
+#include "y_absl/base/config.h"
+
+// -------------------------------------------------------------------------
+// Decide which features are enabled
+
+#ifndef DYNAMIC_ANNOTATIONS_ENABLED
+#define DYNAMIC_ANNOTATIONS_ENABLED 0
+#endif
+
+#if defined(__clang__) && !defined(SWIG)
+#define ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED 1
+#endif
+
+#if DYNAMIC_ANNOTATIONS_ENABLED != 0
+
+#define ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED 1
+#define ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED 1
+#define ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED 1
+#define ABSL_INTERNAL_ANNOTALYSIS_ENABLED 0
+#define ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED 1
+
+#else
+
+#define ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED 0
+#define ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED 0
+#define ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED 0
+
+// Clang provides limited support for static thread-safety analysis through a
+// feature called Annotalysis. We configure macro-definitions according to
+// whether Annotalysis support is available. When running in opt-mode, GCC
+// will issue a warning, if these attributes are compiled. Only include them
+// when compiling using Clang.
+
+// ANNOTALYSIS_ENABLED == 1 when IGNORE_READ_ATTRIBUTE_ENABLED == 1
+#define ABSL_INTERNAL_ANNOTALYSIS_ENABLED \
+ defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED)
+// Read/write annotations are enabled in Annotalysis mode; disabled otherwise.
+#define ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED \
+ ABSL_INTERNAL_ANNOTALYSIS_ENABLED
+#endif
+
+// Memory annotations are also made available to LLVM's Memory Sanitizer
+#if defined(ABSL_HAVE_MEMORY_SANITIZER) && !defined(__native_client__)
+#define ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED 1
+#endif
+
+#ifndef ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED
+#define ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED 0
+#endif
+
+#ifdef __cplusplus
+#define ABSL_INTERNAL_BEGIN_EXTERN_C extern "C" {
+#define ABSL_INTERNAL_END_EXTERN_C } // extern "C"
+#define ABSL_INTERNAL_GLOBAL_SCOPED(F) ::F
+#define ABSL_INTERNAL_STATIC_INLINE inline
+#else
+#define ABSL_INTERNAL_BEGIN_EXTERN_C // empty
+#define ABSL_INTERNAL_END_EXTERN_C // empty
+#define ABSL_INTERNAL_GLOBAL_SCOPED(F) F
+#define ABSL_INTERNAL_STATIC_INLINE static inline
+#endif
+
+// -------------------------------------------------------------------------
+// Define race annotations.
+
+#if ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED == 1
+
+// -------------------------------------------------------------
+// Annotations that suppress errors. It is usually better to express the
+// program's synchronization using the other annotations, but these can be used
+// when all else fails.
+
+// Report that we may have a benign race at `pointer`, with size
+// "sizeof(*(pointer))". `pointer` must be a non-void* pointer. Insert at the
+// point where `pointer` has been allocated, preferably close to the point
+// where the race happens. See also ANNOTATE_BENIGN_RACE_STATIC.
+#define ANNOTATE_BENIGN_RACE(pointer, description) \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateBenignRaceSized) \
+ (__FILE__, __LINE__, pointer, sizeof(*(pointer)), description)
+
+// Same as ANNOTATE_BENIGN_RACE(`address`, `description`), but applies to
+// the memory range [`address`, `address`+`size`).
+#define ANNOTATE_BENIGN_RACE_SIZED(address, size, description) \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateBenignRaceSized) \
+ (__FILE__, __LINE__, address, size, description)
+
+// Enable (`enable`!=0) or disable (`enable`==0) race detection for all threads.
+// This annotation could be useful if you want to skip expensive race analysis
+// during some period of program execution, e.g. during initialization.
+#define ANNOTATE_ENABLE_RACE_DETECTION(enable) \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateEnableRaceDetection) \
+ (__FILE__, __LINE__, enable)
+
+// -------------------------------------------------------------
+// Annotations useful for debugging.
+
+// Report the current thread `name` to a race detector.
+#define ANNOTATE_THREAD_NAME(name) \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateThreadName)(__FILE__, __LINE__, name)
+
+// -------------------------------------------------------------
+// Annotations useful when implementing locks. They are not normally needed by
+// modules that merely use locks. The `lock` argument is a pointer to the lock
+// object.
+
+// Report that a lock has been created at address `lock`.
+#define ANNOTATE_RWLOCK_CREATE(lock) \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockCreate)(__FILE__, __LINE__, lock)
+
+// Report that a linker initialized lock has been created at address `lock`.
+#ifdef ABSL_HAVE_THREAD_SANITIZER
+#define ANNOTATE_RWLOCK_CREATE_STATIC(lock) \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockCreateStatic) \
+ (__FILE__, __LINE__, lock)
+#else
+#define ANNOTATE_RWLOCK_CREATE_STATIC(lock) ANNOTATE_RWLOCK_CREATE(lock)
+#endif
+
+// Report that the lock at address `lock` is about to be destroyed.
+#define ANNOTATE_RWLOCK_DESTROY(lock) \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockDestroy)(__FILE__, __LINE__, lock)
+
+// Report that the lock at address `lock` has been acquired.
+// `is_w`=1 for writer lock, `is_w`=0 for reader lock.
+#define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockAcquired) \
+ (__FILE__, __LINE__, lock, is_w)
+
+// Report that the lock at address `lock` is about to be released.
+// `is_w`=1 for writer lock, `is_w`=0 for reader lock.
+#define ANNOTATE_RWLOCK_RELEASED(lock, is_w) \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockReleased) \
+ (__FILE__, __LINE__, lock, is_w)
+
+// Apply ANNOTATE_BENIGN_RACE_SIZED to a static variable `static_var`.
+#define ANNOTATE_BENIGN_RACE_STATIC(static_var, description) \
+ namespace { \
+ class static_var##_annotator { \
+ public: \
+ static_var##_annotator() { \
+ ANNOTATE_BENIGN_RACE_SIZED(&static_var, sizeof(static_var), \
+ #static_var ": " description); \
+ } \
+ }; \
+ static static_var##_annotator the##static_var##_annotator; \
+ } // namespace
+
+#else // ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED == 0
+
+#define ANNOTATE_RWLOCK_CREATE(lock) // empty
+#define ANNOTATE_RWLOCK_CREATE_STATIC(lock) // empty
+#define ANNOTATE_RWLOCK_DESTROY(lock) // empty
+#define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) // empty
+#define ANNOTATE_RWLOCK_RELEASED(lock, is_w) // empty
+#define ANNOTATE_BENIGN_RACE(address, description) // empty
+#define ANNOTATE_BENIGN_RACE_SIZED(address, size, description) // empty
+#define ANNOTATE_THREAD_NAME(name) // empty
+#define ANNOTATE_ENABLE_RACE_DETECTION(enable) // empty
+#define ANNOTATE_BENIGN_RACE_STATIC(static_var, description) // empty
+
+#endif // ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED
+
+// -------------------------------------------------------------------------
+// Define memory annotations.
+
+#if ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED == 1
+
+#include <sanitizer/msan_interface.h>
+
+#define ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \
+ __msan_unpoison(address, size)
+
+#define ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) \
+ __msan_allocated_memory(address, size)
+
+#else // ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED == 0
+
+#if DYNAMIC_ANNOTATIONS_ENABLED == 1
+#define ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \
+ do { \
+ (void)(address); \
+ (void)(size); \
+ } while (0)
+#define ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) \
+ do { \
+ (void)(address); \
+ (void)(size); \
+ } while (0)
+#else
+#define ANNOTATE_MEMORY_IS_INITIALIZED(address, size) // empty
+#define ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) // empty
+#endif
+
+#endif // ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED
+
+// -------------------------------------------------------------------------
+// Define IGNORE_READS_BEGIN/_END attributes.
+
+#if defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED)
+
+#define ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE \
+ __attribute((exclusive_lock_function("*")))
+#define ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE \
+ __attribute((unlock_function("*")))
+
+#else // !defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED)
+
+#define ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE // empty
+#define ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE // empty
+
+#endif // defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED)
+
+// -------------------------------------------------------------------------
+// Define IGNORE_READS_BEGIN/_END annotations.
+
+#if ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED == 1
+
+// Request the analysis tool to ignore all reads in the current thread until
+// ANNOTATE_IGNORE_READS_END is called. Useful to ignore intentional racey
+// reads, while still checking other reads and all writes.
+// See also ANNOTATE_UNPROTECTED_READ.
+#define ANNOTATE_IGNORE_READS_BEGIN() \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsBegin)(__FILE__, __LINE__)
+
+// Stop ignoring reads.
+#define ANNOTATE_IGNORE_READS_END() \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsEnd)(__FILE__, __LINE__)
+
+#elif defined(ABSL_INTERNAL_ANNOTALYSIS_ENABLED)
+
+// When Annotalysis is enabled without Dynamic Annotations, the use of
+// static-inline functions allows the annotations to be read at compile-time,
+// while still letting the compiler elide the functions from the final build.
+//
+// TODO(delesley) -- The exclusive lock here ignores writes as well, but
+// allows IGNORE_READS_AND_WRITES to work properly.
+
+#define ANNOTATE_IGNORE_READS_BEGIN() \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AbslInternalAnnotateIgnoreReadsBegin)()
+
+#define ANNOTATE_IGNORE_READS_END() \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AbslInternalAnnotateIgnoreReadsEnd)()
+
+#else
+
+#define ANNOTATE_IGNORE_READS_BEGIN() // empty
+#define ANNOTATE_IGNORE_READS_END() // empty
+
+#endif
+
+// -------------------------------------------------------------------------
+// Define IGNORE_WRITES_BEGIN/_END annotations.
+
+#if ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED == 1
+
+// Similar to ANNOTATE_IGNORE_READS_BEGIN, but ignore writes instead.
+#define ANNOTATE_IGNORE_WRITES_BEGIN() \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreWritesBegin)(__FILE__, __LINE__)
+
+// Stop ignoring writes.
+#define ANNOTATE_IGNORE_WRITES_END() \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreWritesEnd)(__FILE__, __LINE__)
+
+#else
+
+#define ANNOTATE_IGNORE_WRITES_BEGIN() // empty
+#define ANNOTATE_IGNORE_WRITES_END() // empty
+
+#endif
+
+// -------------------------------------------------------------------------
+// Define the ANNOTATE_IGNORE_READS_AND_WRITES_* annotations using the more
+// primitive annotations defined above.
+//
+// Instead of doing
+// ANNOTATE_IGNORE_READS_BEGIN();
+// ... = x;
+// ANNOTATE_IGNORE_READS_END();
+// one can use
+// ... = ANNOTATE_UNPROTECTED_READ(x);
+
+#if defined(ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED)
+
+// Start ignoring all memory accesses (both reads and writes).
+#define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() \
+ do { \
+ ANNOTATE_IGNORE_READS_BEGIN(); \
+ ANNOTATE_IGNORE_WRITES_BEGIN(); \
+ } while (0)
+
+// Stop ignoring both reads and writes.
+#define ANNOTATE_IGNORE_READS_AND_WRITES_END() \
+ do { \
+ ANNOTATE_IGNORE_WRITES_END(); \
+ ANNOTATE_IGNORE_READS_END(); \
+ } while (0)
+
+#ifdef __cplusplus
+// ANNOTATE_UNPROTECTED_READ is the preferred way to annotate racey reads.
+#define ANNOTATE_UNPROTECTED_READ(x) \
+ y_absl::base_internal::AnnotateUnprotectedRead(x)
+
+#endif
+
+#else
+
+#define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() // empty
+#define ANNOTATE_IGNORE_READS_AND_WRITES_END() // empty
+#define ANNOTATE_UNPROTECTED_READ(x) (x)
+
+#endif
+
+// -------------------------------------------------------------------------
+// Address sanitizer annotations
+
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
+// Describe the current state of a contiguous container such as e.g.
+// std::vector or TString. For more details see
+// sanitizer/common_interface_defs.h, which is provided by the compiler.
+#include <sanitizer/common_interface_defs.h>
+
+#define ANNOTATE_CONTIGUOUS_CONTAINER(beg, end, old_mid, new_mid) \
+ __sanitizer_annotate_contiguous_container(beg, end, old_mid, new_mid)
+#define ADDRESS_SANITIZER_REDZONE(name) \
+ struct { \
+ char x[8] __attribute__((aligned(8))); \
+ } name
+
+#else
+
+#define ANNOTATE_CONTIGUOUS_CONTAINER(beg, end, old_mid, new_mid)
+#define ADDRESS_SANITIZER_REDZONE(name) static_assert(true, "")
+
+#endif // ABSL_HAVE_ADDRESS_SANITIZER
+
+// -------------------------------------------------------------------------
+// Undefine the macros intended only for this file.
+
+#undef ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED
+#undef ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED
+#undef ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED
+#undef ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED
+#undef ABSL_INTERNAL_ANNOTALYSIS_ENABLED
+#undef ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED
+#undef ABSL_INTERNAL_BEGIN_EXTERN_C
+#undef ABSL_INTERNAL_END_EXTERN_C
+#undef ABSL_INTERNAL_STATIC_INLINE
+
+#endif // ABSL_BASE_INTERNAL_DYNAMIC_ANNOTATIONS_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/endian.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/endian.h
new file mode 100644
index 00000000000..0f7adb8bf6e
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/endian.h
@@ -0,0 +1,327 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef ABSL_BASE_INTERNAL_ENDIAN_H_
+#define ABSL_BASE_INTERNAL_ENDIAN_H_
+
+// The following guarantees declaration of the byte swap functions
+#ifdef _MSC_VER
+#include <stdlib.h> // NOLINT(build/include)
+#elif defined(__FreeBSD__)
+#include <sys/endian.h>
+#elif defined(__GLIBC__)
+#include <byteswap.h> // IWYU pragma: export
+#endif
+
+#include <cstdint>
+#include "y_absl/base/casts.h"
+#include "y_absl/base/config.h"
+#include "y_absl/base/internal/unaligned_access.h"
+#include "y_absl/base/port.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+// Use compiler byte-swapping intrinsics if they are available. 32-bit
+// and 64-bit versions are available in Clang and GCC as of GCC 4.3.0.
+// The 16-bit version is available in Clang and GCC only as of GCC 4.8.0.
+// For simplicity, we enable them all only for GCC 4.8.0 or later.
+#if defined(__clang__) || \
+ (defined(__GNUC__) && \
+ ((__GNUC__ == 4 && __GNUC_MINOR__ >= 8) || __GNUC__ >= 5))
+inline uint64_t gbswap_64(uint64_t host_int) {
+ return __builtin_bswap64(host_int);
+}
+inline uint32_t gbswap_32(uint32_t host_int) {
+ return __builtin_bswap32(host_int);
+}
+inline uint16_t gbswap_16(uint16_t host_int) {
+ return __builtin_bswap16(host_int);
+}
+
+#elif defined(_MSC_VER)
+inline uint64_t gbswap_64(uint64_t host_int) {
+ return _byteswap_uint64(host_int);
+}
+inline uint32_t gbswap_32(uint32_t host_int) {
+ return _byteswap_ulong(host_int);
+}
+inline uint16_t gbswap_16(uint16_t host_int) {
+ return _byteswap_ushort(host_int);
+}
+
+#else
+inline uint64_t gbswap_64(uint64_t host_int) {
+#if defined(__GNUC__) && defined(__x86_64__) && !defined(__APPLE__)
+ // Adapted from /usr/include/byteswap.h. Not available on Mac.
+ if (__builtin_constant_p(host_int)) {
+ return __bswap_constant_64(host_int);
+ } else {
+ uint64_t result;
+ __asm__("bswap %0" : "=r"(result) : "0"(host_int));
+ return result;
+ }
+#elif defined(__GLIBC__)
+ return bswap_64(host_int);
+#else
+ return (((host_int & uint64_t{0xFF}) << 56) |
+ ((host_int & uint64_t{0xFF00}) << 40) |
+ ((host_int & uint64_t{0xFF0000}) << 24) |
+ ((host_int & uint64_t{0xFF000000}) << 8) |
+ ((host_int & uint64_t{0xFF00000000}) >> 8) |
+ ((host_int & uint64_t{0xFF0000000000}) >> 24) |
+ ((host_int & uint64_t{0xFF000000000000}) >> 40) |
+ ((host_int & uint64_t{0xFF00000000000000}) >> 56));
+#endif // bswap_64
+}
+
+inline uint32_t gbswap_32(uint32_t host_int) {
+#if defined(__GLIBC__)
+ return bswap_32(host_int);
+#else
+ return (((host_int & uint32_t{0xFF}) << 24) |
+ ((host_int & uint32_t{0xFF00}) << 8) |
+ ((host_int & uint32_t{0xFF0000}) >> 8) |
+ ((host_int & uint32_t{0xFF000000}) >> 24));
+#endif
+}
+
+inline uint16_t gbswap_16(uint16_t host_int) {
+#if defined(__GLIBC__)
+ return bswap_16(host_int);
+#else
+ return (((host_int & uint16_t{0xFF}) << 8) |
+ ((host_int & uint16_t{0xFF00}) >> 8));
+#endif
+}
+
+#endif // intrinsics available
+
+#ifdef ABSL_IS_LITTLE_ENDIAN
+
+// Definitions for ntohl etc. that don't require us to include
+// netinet/in.h. We wrap gbswap_32 and gbswap_16 in functions rather
+// than just #defining them because in debug mode, gcc doesn't
+// correctly handle the (rather involved) definitions of bswap_32.
+// gcc guarantees that inline functions are as fast as macros, so
+// this isn't a performance hit.
+inline uint16_t ghtons(uint16_t x) { return gbswap_16(x); }
+inline uint32_t ghtonl(uint32_t x) { return gbswap_32(x); }
+inline uint64_t ghtonll(uint64_t x) { return gbswap_64(x); }
+
+#elif defined ABSL_IS_BIG_ENDIAN
+
+// These definitions are simpler on big-endian machines
+// These are functions instead of macros to avoid self-assignment warnings
+// on calls such as "i = ghtnol(i);". This also provides type checking.
+inline uint16_t ghtons(uint16_t x) { return x; }
+inline uint32_t ghtonl(uint32_t x) { return x; }
+inline uint64_t ghtonll(uint64_t x) { return x; }
+
+#else
+#error \
+ "Unsupported byte order: Either ABSL_IS_BIG_ENDIAN or " \
+ "ABSL_IS_LITTLE_ENDIAN must be defined"
+#endif // byte order
+
+inline uint16_t gntohs(uint16_t x) { return ghtons(x); }
+inline uint32_t gntohl(uint32_t x) { return ghtonl(x); }
+inline uint64_t gntohll(uint64_t x) { return ghtonll(x); }
+
+// Utilities to convert numbers between the current hosts's native byte
+// order and little-endian byte order
+//
+// Load/Store methods are alignment safe
+namespace little_endian {
+// Conversion functions.
+#ifdef ABSL_IS_LITTLE_ENDIAN
+
+inline uint16_t FromHost16(uint16_t x) { return x; }
+inline uint16_t ToHost16(uint16_t x) { return x; }
+
+inline uint32_t FromHost32(uint32_t x) { return x; }
+inline uint32_t ToHost32(uint32_t x) { return x; }
+
+inline uint64_t FromHost64(uint64_t x) { return x; }
+inline uint64_t ToHost64(uint64_t x) { return x; }
+
+inline constexpr bool IsLittleEndian() { return true; }
+
+#elif defined ABSL_IS_BIG_ENDIAN
+
+inline uint16_t FromHost16(uint16_t x) { return gbswap_16(x); }
+inline uint16_t ToHost16(uint16_t x) { return gbswap_16(x); }
+
+inline uint32_t FromHost32(uint32_t x) { return gbswap_32(x); }
+inline uint32_t ToHost32(uint32_t x) { return gbswap_32(x); }
+
+inline uint64_t FromHost64(uint64_t x) { return gbswap_64(x); }
+inline uint64_t ToHost64(uint64_t x) { return gbswap_64(x); }
+
+inline constexpr bool IsLittleEndian() { return false; }
+
+#endif /* ENDIAN */
+
+inline uint8_t FromHost(uint8_t x) { return x; }
+inline uint16_t FromHost(uint16_t x) { return FromHost16(x); }
+inline uint32_t FromHost(uint32_t x) { return FromHost32(x); }
+inline uint64_t FromHost(uint64_t x) { return FromHost64(x); }
+inline uint8_t ToHost(uint8_t x) { return x; }
+inline uint16_t ToHost(uint16_t x) { return ToHost16(x); }
+inline uint32_t ToHost(uint32_t x) { return ToHost32(x); }
+inline uint64_t ToHost(uint64_t x) { return ToHost64(x); }
+
+inline int8_t FromHost(int8_t x) { return x; }
+inline int16_t FromHost(int16_t x) {
+ return bit_cast<int16_t>(FromHost16(bit_cast<uint16_t>(x)));
+}
+inline int32_t FromHost(int32_t x) {
+ return bit_cast<int32_t>(FromHost32(bit_cast<uint32_t>(x)));
+}
+inline int64_t FromHost(int64_t x) {
+ return bit_cast<int64_t>(FromHost64(bit_cast<uint64_t>(x)));
+}
+inline int8_t ToHost(int8_t x) { return x; }
+inline int16_t ToHost(int16_t x) {
+ return bit_cast<int16_t>(ToHost16(bit_cast<uint16_t>(x)));
+}
+inline int32_t ToHost(int32_t x) {
+ return bit_cast<int32_t>(ToHost32(bit_cast<uint32_t>(x)));
+}
+inline int64_t ToHost(int64_t x) {
+ return bit_cast<int64_t>(ToHost64(bit_cast<uint64_t>(x)));
+}
+
+// Functions to do unaligned loads and stores in little-endian order.
+inline uint16_t Load16(const void *p) {
+ return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p));
+}
+
+inline void Store16(void *p, uint16_t v) {
+ ABSL_INTERNAL_UNALIGNED_STORE16(p, FromHost16(v));
+}
+
+inline uint32_t Load32(const void *p) {
+ return ToHost32(ABSL_INTERNAL_UNALIGNED_LOAD32(p));
+}
+
+inline void Store32(void *p, uint32_t v) {
+ ABSL_INTERNAL_UNALIGNED_STORE32(p, FromHost32(v));
+}
+
+inline uint64_t Load64(const void *p) {
+ return ToHost64(ABSL_INTERNAL_UNALIGNED_LOAD64(p));
+}
+
+inline void Store64(void *p, uint64_t v) {
+ ABSL_INTERNAL_UNALIGNED_STORE64(p, FromHost64(v));
+}
+
+} // namespace little_endian
+
+// Utilities to convert numbers between the current hosts's native byte
+// order and big-endian byte order (same as network byte order)
+//
+// Load/Store methods are alignment safe
+namespace big_endian {
+#ifdef ABSL_IS_LITTLE_ENDIAN
+
+inline uint16_t FromHost16(uint16_t x) { return gbswap_16(x); }
+inline uint16_t ToHost16(uint16_t x) { return gbswap_16(x); }
+
+inline uint32_t FromHost32(uint32_t x) { return gbswap_32(x); }
+inline uint32_t ToHost32(uint32_t x) { return gbswap_32(x); }
+
+inline uint64_t FromHost64(uint64_t x) { return gbswap_64(x); }
+inline uint64_t ToHost64(uint64_t x) { return gbswap_64(x); }
+
+inline constexpr bool IsLittleEndian() { return true; }
+
+#elif defined ABSL_IS_BIG_ENDIAN
+
+inline uint16_t FromHost16(uint16_t x) { return x; }
+inline uint16_t ToHost16(uint16_t x) { return x; }
+
+inline uint32_t FromHost32(uint32_t x) { return x; }
+inline uint32_t ToHost32(uint32_t x) { return x; }
+
+inline uint64_t FromHost64(uint64_t x) { return x; }
+inline uint64_t ToHost64(uint64_t x) { return x; }
+
+inline constexpr bool IsLittleEndian() { return false; }
+
+#endif /* ENDIAN */
+
+inline uint8_t FromHost(uint8_t x) { return x; }
+inline uint16_t FromHost(uint16_t x) { return FromHost16(x); }
+inline uint32_t FromHost(uint32_t x) { return FromHost32(x); }
+inline uint64_t FromHost(uint64_t x) { return FromHost64(x); }
+inline uint8_t ToHost(uint8_t x) { return x; }
+inline uint16_t ToHost(uint16_t x) { return ToHost16(x); }
+inline uint32_t ToHost(uint32_t x) { return ToHost32(x); }
+inline uint64_t ToHost(uint64_t x) { return ToHost64(x); }
+
+inline int8_t FromHost(int8_t x) { return x; }
+inline int16_t FromHost(int16_t x) {
+ return bit_cast<int16_t>(FromHost16(bit_cast<uint16_t>(x)));
+}
+inline int32_t FromHost(int32_t x) {
+ return bit_cast<int32_t>(FromHost32(bit_cast<uint32_t>(x)));
+}
+inline int64_t FromHost(int64_t x) {
+ return bit_cast<int64_t>(FromHost64(bit_cast<uint64_t>(x)));
+}
+inline int8_t ToHost(int8_t x) { return x; }
+inline int16_t ToHost(int16_t x) {
+ return bit_cast<int16_t>(ToHost16(bit_cast<uint16_t>(x)));
+}
+inline int32_t ToHost(int32_t x) {
+ return bit_cast<int32_t>(ToHost32(bit_cast<uint32_t>(x)));
+}
+inline int64_t ToHost(int64_t x) {
+ return bit_cast<int64_t>(ToHost64(bit_cast<uint64_t>(x)));
+}
+
+// Functions to do unaligned loads and stores in big-endian order.
+inline uint16_t Load16(const void *p) {
+ return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p));
+}
+
+inline void Store16(void *p, uint16_t v) {
+ ABSL_INTERNAL_UNALIGNED_STORE16(p, FromHost16(v));
+}
+
+inline uint32_t Load32(const void *p) {
+ return ToHost32(ABSL_INTERNAL_UNALIGNED_LOAD32(p));
+}
+
+inline void Store32(void *p, uint32_t v) {
+ ABSL_INTERNAL_UNALIGNED_STORE32(p, FromHost32(v));
+}
+
+inline uint64_t Load64(const void *p) {
+ return ToHost64(ABSL_INTERNAL_UNALIGNED_LOAD64(p));
+}
+
+inline void Store64(void *p, uint64_t v) {
+ ABSL_INTERNAL_UNALIGNED_STORE64(p, FromHost64(v));
+}
+
+} // namespace big_endian
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_BASE_INTERNAL_ENDIAN_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/errno_saver.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/errno_saver.h
new file mode 100644
index 00000000000..94202a4ed64
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/errno_saver.h
@@ -0,0 +1,43 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_BASE_INTERNAL_ERRNO_SAVER_H_
+#define ABSL_BASE_INTERNAL_ERRNO_SAVER_H_
+
+#include <cerrno>
+
+#include "y_absl/base/config.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+// `ErrnoSaver` captures the value of `errno` upon construction and restores it
+// upon deletion. It is used in low-level code and must be super fast. Do not
+// add instrumentation, even in debug modes.
+class ErrnoSaver {
+ public:
+ ErrnoSaver() : saved_errno_(errno) {}
+ ~ErrnoSaver() { errno = saved_errno_; }
+ int operator()() const { return saved_errno_; }
+
+ private:
+ const int saved_errno_;
+};
+
+} // namespace base_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_BASE_INTERNAL_ERRNO_SAVER_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/exception_safety_testing.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/exception_safety_testing.h
new file mode 100644
index 00000000000..3009f17875d
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/exception_safety_testing.h
@@ -0,0 +1,1109 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Utilities for testing exception-safety
+
+#ifndef ABSL_BASE_INTERNAL_EXCEPTION_SAFETY_TESTING_H_
+#define ABSL_BASE_INTERNAL_EXCEPTION_SAFETY_TESTING_H_
+
+#include "y_absl/base/config.h"
+
+#ifdef ABSL_HAVE_EXCEPTIONS
+
+#include <cstddef>
+#include <cstdint>
+#include <functional>
+#include <initializer_list>
+#include <iosfwd>
+#include <util/generic/string.h>
+#include <tuple>
+#include <unordered_map>
+
+#include "gtest/gtest.h"
+#include "y_absl/base/internal/pretty_function.h"
+#include "y_absl/memory/memory.h"
+#include "y_absl/meta/type_traits.h"
+#include "y_absl/strings/string_view.h"
+#include "y_absl/strings/substitute.h"
+#include "y_absl/utility/utility.h"
+
+namespace testing {
+
+enum class TypeSpec;
+enum class AllocSpec;
+
+constexpr TypeSpec operator|(TypeSpec a, TypeSpec b) {
+ using T = y_absl::underlying_type_t<TypeSpec>;
+ return static_cast<TypeSpec>(static_cast<T>(a) | static_cast<T>(b));
+}
+
+constexpr TypeSpec operator&(TypeSpec a, TypeSpec b) {
+ using T = y_absl::underlying_type_t<TypeSpec>;
+ return static_cast<TypeSpec>(static_cast<T>(a) & static_cast<T>(b));
+}
+
+constexpr AllocSpec operator|(AllocSpec a, AllocSpec b) {
+ using T = y_absl::underlying_type_t<AllocSpec>;
+ return static_cast<AllocSpec>(static_cast<T>(a) | static_cast<T>(b));
+}
+
+constexpr AllocSpec operator&(AllocSpec a, AllocSpec b) {
+ using T = y_absl::underlying_type_t<AllocSpec>;
+ return static_cast<AllocSpec>(static_cast<T>(a) & static_cast<T>(b));
+}
+
+namespace exceptions_internal {
+
+TString GetSpecString(TypeSpec);
+TString GetSpecString(AllocSpec);
+
+struct NoThrowTag {};
+struct StrongGuaranteeTagType {};
+
+// A simple exception class. We throw this so that test code can catch
+// exceptions specifically thrown by ThrowingValue.
+class TestException {
+ public:
+ explicit TestException(y_absl::string_view msg) : msg_(msg) {}
+ virtual ~TestException() {}
+ virtual const char* what() const noexcept { return msg_.c_str(); }
+
+ private:
+ TString msg_;
+};
+
+// TestBadAllocException exists because allocation functions must throw an
+// exception which can be caught by a handler of std::bad_alloc. We use a child
+// class of std::bad_alloc so we can customise the error message, and also
+// derive from TestException so we don't accidentally end up catching an actual
+// bad_alloc exception in TestExceptionSafety.
+class TestBadAllocException : public std::bad_alloc, public TestException {
+ public:
+ explicit TestBadAllocException(y_absl::string_view msg) : TestException(msg) {}
+ using TestException::what;
+};
+
+extern int countdown;
+
+// Allows the countdown variable to be set manually (defaulting to the initial
+// value of 0)
+inline void SetCountdown(int i = 0) { countdown = i; }
+// Sets the countdown to the terminal value -1
+inline void UnsetCountdown() { SetCountdown(-1); }
+
+void MaybeThrow(y_absl::string_view msg, bool throw_bad_alloc = false);
+
+testing::AssertionResult FailureMessage(const TestException& e,
+ int countdown) noexcept;
+
+struct TrackedAddress {
+ bool is_alive;
+ TString description;
+};
+
+// Inspects the constructions and destructions of anything inheriting from
+// TrackedObject. This allows us to safely "leak" TrackedObjects, as
+// ConstructorTracker will destroy everything left over in its destructor.
+class ConstructorTracker {
+ public:
+ explicit ConstructorTracker(int count) : countdown_(count) {
+ assert(current_tracker_instance_ == nullptr);
+ current_tracker_instance_ = this;
+ }
+
+ ~ConstructorTracker() {
+ assert(current_tracker_instance_ == this);
+ current_tracker_instance_ = nullptr;
+
+ for (auto& it : address_map_) {
+ void* address = it.first;
+ TrackedAddress& tracked_address = it.second;
+ if (tracked_address.is_alive) {
+ ADD_FAILURE() << ErrorMessage(address, tracked_address.description,
+ countdown_, "Object was not destroyed.");
+ }
+ }
+ }
+
+ static void ObjectConstructed(void* address, TString description) {
+ if (!CurrentlyTracking()) return;
+
+ TrackedAddress& tracked_address =
+ current_tracker_instance_->address_map_[address];
+ if (tracked_address.is_alive) {
+ ADD_FAILURE() << ErrorMessage(
+ address, tracked_address.description,
+ current_tracker_instance_->countdown_,
+ "Object was re-constructed. Current object was constructed by " +
+ description);
+ }
+ tracked_address = {true, std::move(description)};
+ }
+
+ static void ObjectDestructed(void* address) {
+ if (!CurrentlyTracking()) return;
+
+ auto it = current_tracker_instance_->address_map_.find(address);
+ // Not tracked. Ignore.
+ if (it == current_tracker_instance_->address_map_.end()) return;
+
+ TrackedAddress& tracked_address = it->second;
+ if (!tracked_address.is_alive) {
+ ADD_FAILURE() << ErrorMessage(address, tracked_address.description,
+ current_tracker_instance_->countdown_,
+ "Object was re-destroyed.");
+ }
+ tracked_address.is_alive = false;
+ }
+
+ private:
+ static bool CurrentlyTracking() {
+ return current_tracker_instance_ != nullptr;
+ }
+
+ static TString ErrorMessage(void* address,
+ const TString& address_description,
+ int countdown,
+ const TString& error_description) {
+ return y_absl::Substitute(
+ "With coundtown at $0:\n"
+ " $1\n"
+ " Object originally constructed by $2\n"
+ " Object address: $3\n",
+ countdown, error_description, address_description, address);
+ }
+
+ std::unordered_map<void*, TrackedAddress> address_map_;
+ int countdown_;
+
+ static ConstructorTracker* current_tracker_instance_;
+};
+
+class TrackedObject {
+ public:
+ TrackedObject(const TrackedObject&) = delete;
+ TrackedObject(TrackedObject&&) = delete;
+
+ protected:
+ explicit TrackedObject(TString description) {
+ ConstructorTracker::ObjectConstructed(this, std::move(description));
+ }
+
+ ~TrackedObject() noexcept { ConstructorTracker::ObjectDestructed(this); }
+};
+} // namespace exceptions_internal
+
+extern exceptions_internal::NoThrowTag nothrow_ctor;
+
+extern exceptions_internal::StrongGuaranteeTagType strong_guarantee;
+
+// A test class which is convertible to bool. The conversion can be
+// instrumented to throw at a controlled time.
+class ThrowingBool {
+ public:
+ ThrowingBool(bool b) noexcept : b_(b) {} // NOLINT(runtime/explicit)
+ operator bool() const { // NOLINT
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return b_;
+ }
+
+ private:
+ bool b_;
+};
+
+/*
+ * Configuration enum for the ThrowingValue type that defines behavior for the
+ * lifetime of the instance. Use testing::nothrow_ctor to prevent the integer
+ * constructor from throwing.
+ *
+ * kEverythingThrows: Every operation can throw an exception
+ * kNoThrowCopy: Copy construction and copy assignment will not throw
+ * kNoThrowMove: Move construction and move assignment will not throw
+ * kNoThrowNew: Overloaded operators new and new[] will not throw
+ */
+enum class TypeSpec {
+ kEverythingThrows = 0,
+ kNoThrowCopy = 1,
+ kNoThrowMove = 1 << 1,
+ kNoThrowNew = 1 << 2,
+};
+
+/*
+ * A testing class instrumented to throw an exception at a controlled time.
+ *
+ * ThrowingValue implements a slightly relaxed version of the Regular concept --
+ * that is it's a value type with the expected semantics. It also implements
+ * arithmetic operations. It doesn't implement member and pointer operators
+ * like operator-> or operator[].
+ *
+ * ThrowingValue can be instrumented to have certain operations be noexcept by
+ * using compile-time bitfield template arguments. That is, to make an
+ * ThrowingValue which has noexcept move construction/assignment and noexcept
+ * copy construction/assignment, use the following:
+ * ThrowingValue<testing::kNoThrowMove | testing::kNoThrowCopy> my_thrwr{val};
+ */
+template <TypeSpec Spec = TypeSpec::kEverythingThrows>
+class ThrowingValue : private exceptions_internal::TrackedObject {
+ static constexpr bool IsSpecified(TypeSpec spec) {
+ return static_cast<bool>(Spec & spec);
+ }
+
+ static constexpr int kDefaultValue = 0;
+ static constexpr int kBadValue = 938550620;
+
+ public:
+ ThrowingValue() : TrackedObject(GetInstanceString(kDefaultValue)) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ dummy_ = kDefaultValue;
+ }
+
+ ThrowingValue(const ThrowingValue& other) noexcept(
+ IsSpecified(TypeSpec::kNoThrowCopy))
+ : TrackedObject(GetInstanceString(other.dummy_)) {
+ if (!IsSpecified(TypeSpec::kNoThrowCopy)) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ }
+ dummy_ = other.dummy_;
+ }
+
+ ThrowingValue(ThrowingValue&& other) noexcept(
+ IsSpecified(TypeSpec::kNoThrowMove))
+ : TrackedObject(GetInstanceString(other.dummy_)) {
+ if (!IsSpecified(TypeSpec::kNoThrowMove)) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ }
+ dummy_ = other.dummy_;
+ }
+
+ explicit ThrowingValue(int i) : TrackedObject(GetInstanceString(i)) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ dummy_ = i;
+ }
+
+ ThrowingValue(int i, exceptions_internal::NoThrowTag) noexcept
+ : TrackedObject(GetInstanceString(i)), dummy_(i) {}
+
+ // y_absl expects nothrow destructors
+ ~ThrowingValue() noexcept = default;
+
+ ThrowingValue& operator=(const ThrowingValue& other) noexcept(
+ IsSpecified(TypeSpec::kNoThrowCopy)) {
+ dummy_ = kBadValue;
+ if (!IsSpecified(TypeSpec::kNoThrowCopy)) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ }
+ dummy_ = other.dummy_;
+ return *this;
+ }
+
+ ThrowingValue& operator=(ThrowingValue&& other) noexcept(
+ IsSpecified(TypeSpec::kNoThrowMove)) {
+ dummy_ = kBadValue;
+ if (!IsSpecified(TypeSpec::kNoThrowMove)) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ }
+ dummy_ = other.dummy_;
+ return *this;
+ }
+
+ // Arithmetic Operators
+ ThrowingValue operator+(const ThrowingValue& other) const {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return ThrowingValue(dummy_ + other.dummy_, nothrow_ctor);
+ }
+
+ ThrowingValue operator+() const {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return ThrowingValue(dummy_, nothrow_ctor);
+ }
+
+ ThrowingValue operator-(const ThrowingValue& other) const {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return ThrowingValue(dummy_ - other.dummy_, nothrow_ctor);
+ }
+
+ ThrowingValue operator-() const {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return ThrowingValue(-dummy_, nothrow_ctor);
+ }
+
+ ThrowingValue& operator++() {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ ++dummy_;
+ return *this;
+ }
+
+ ThrowingValue operator++(int) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ auto out = ThrowingValue(dummy_, nothrow_ctor);
+ ++dummy_;
+ return out;
+ }
+
+ ThrowingValue& operator--() {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ --dummy_;
+ return *this;
+ }
+
+ ThrowingValue operator--(int) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ auto out = ThrowingValue(dummy_, nothrow_ctor);
+ --dummy_;
+ return out;
+ }
+
+ ThrowingValue operator*(const ThrowingValue& other) const {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return ThrowingValue(dummy_ * other.dummy_, nothrow_ctor);
+ }
+
+ ThrowingValue operator/(const ThrowingValue& other) const {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return ThrowingValue(dummy_ / other.dummy_, nothrow_ctor);
+ }
+
+ ThrowingValue operator%(const ThrowingValue& other) const {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return ThrowingValue(dummy_ % other.dummy_, nothrow_ctor);
+ }
+
+ ThrowingValue operator<<(int shift) const {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return ThrowingValue(dummy_ << shift, nothrow_ctor);
+ }
+
+ ThrowingValue operator>>(int shift) const {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return ThrowingValue(dummy_ >> shift, nothrow_ctor);
+ }
+
+ // Comparison Operators
+ // NOTE: We use `ThrowingBool` instead of `bool` because most STL
+ // types/containers requires T to be convertible to bool.
+ friend ThrowingBool operator==(const ThrowingValue& a,
+ const ThrowingValue& b) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return a.dummy_ == b.dummy_;
+ }
+ friend ThrowingBool operator!=(const ThrowingValue& a,
+ const ThrowingValue& b) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return a.dummy_ != b.dummy_;
+ }
+ friend ThrowingBool operator<(const ThrowingValue& a,
+ const ThrowingValue& b) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return a.dummy_ < b.dummy_;
+ }
+ friend ThrowingBool operator<=(const ThrowingValue& a,
+ const ThrowingValue& b) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return a.dummy_ <= b.dummy_;
+ }
+ friend ThrowingBool operator>(const ThrowingValue& a,
+ const ThrowingValue& b) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return a.dummy_ > b.dummy_;
+ }
+ friend ThrowingBool operator>=(const ThrowingValue& a,
+ const ThrowingValue& b) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return a.dummy_ >= b.dummy_;
+ }
+
+ // Logical Operators
+ ThrowingBool operator!() const {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return !dummy_;
+ }
+
+ ThrowingBool operator&&(const ThrowingValue& other) const {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return dummy_ && other.dummy_;
+ }
+
+ ThrowingBool operator||(const ThrowingValue& other) const {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return dummy_ || other.dummy_;
+ }
+
+ // Bitwise Logical Operators
+ ThrowingValue operator~() const {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return ThrowingValue(~dummy_, nothrow_ctor);
+ }
+
+ ThrowingValue operator&(const ThrowingValue& other) const {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return ThrowingValue(dummy_ & other.dummy_, nothrow_ctor);
+ }
+
+ ThrowingValue operator|(const ThrowingValue& other) const {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return ThrowingValue(dummy_ | other.dummy_, nothrow_ctor);
+ }
+
+ ThrowingValue operator^(const ThrowingValue& other) const {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return ThrowingValue(dummy_ ^ other.dummy_, nothrow_ctor);
+ }
+
+ // Compound Assignment operators
+ ThrowingValue& operator+=(const ThrowingValue& other) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ dummy_ += other.dummy_;
+ return *this;
+ }
+
+ ThrowingValue& operator-=(const ThrowingValue& other) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ dummy_ -= other.dummy_;
+ return *this;
+ }
+
+ ThrowingValue& operator*=(const ThrowingValue& other) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ dummy_ *= other.dummy_;
+ return *this;
+ }
+
+ ThrowingValue& operator/=(const ThrowingValue& other) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ dummy_ /= other.dummy_;
+ return *this;
+ }
+
+ ThrowingValue& operator%=(const ThrowingValue& other) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ dummy_ %= other.dummy_;
+ return *this;
+ }
+
+ ThrowingValue& operator&=(const ThrowingValue& other) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ dummy_ &= other.dummy_;
+ return *this;
+ }
+
+ ThrowingValue& operator|=(const ThrowingValue& other) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ dummy_ |= other.dummy_;
+ return *this;
+ }
+
+ ThrowingValue& operator^=(const ThrowingValue& other) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ dummy_ ^= other.dummy_;
+ return *this;
+ }
+
+ ThrowingValue& operator<<=(int shift) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ dummy_ <<= shift;
+ return *this;
+ }
+
+ ThrowingValue& operator>>=(int shift) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ dummy_ >>= shift;
+ return *this;
+ }
+
+ // Pointer operators
+ void operator&() const = delete; // NOLINT(runtime/operator)
+
+ // Stream operators
+ friend std::ostream& operator<<(std::ostream& os, const ThrowingValue& tv) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return os << GetInstanceString(tv.dummy_);
+ }
+
+ friend std::istream& operator>>(std::istream& is, const ThrowingValue&) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return is;
+ }
+
+ // Memory management operators
+ static void* operator new(size_t s) noexcept(
+ IsSpecified(TypeSpec::kNoThrowNew)) {
+ if (!IsSpecified(TypeSpec::kNoThrowNew)) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION, true);
+ }
+ return ::operator new(s);
+ }
+
+ static void* operator new[](size_t s) noexcept(
+ IsSpecified(TypeSpec::kNoThrowNew)) {
+ if (!IsSpecified(TypeSpec::kNoThrowNew)) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION, true);
+ }
+ return ::operator new[](s);
+ }
+
+ template <typename... Args>
+ static void* operator new(size_t s, Args&&... args) noexcept(
+ IsSpecified(TypeSpec::kNoThrowNew)) {
+ if (!IsSpecified(TypeSpec::kNoThrowNew)) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION, true);
+ }
+ return ::operator new(s, std::forward<Args>(args)...);
+ }
+
+ template <typename... Args>
+ static void* operator new[](size_t s, Args&&... args) noexcept(
+ IsSpecified(TypeSpec::kNoThrowNew)) {
+ if (!IsSpecified(TypeSpec::kNoThrowNew)) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION, true);
+ }
+ return ::operator new[](s, std::forward<Args>(args)...);
+ }
+
+ // Abseil doesn't support throwing overloaded operator delete. These are
+ // provided so a throwing operator-new can clean up after itself.
+ void operator delete(void* p) noexcept { ::operator delete(p); }
+
+ template <typename... Args>
+ void operator delete(void* p, Args&&... args) noexcept {
+ ::operator delete(p, std::forward<Args>(args)...);
+ }
+
+ void operator delete[](void* p) noexcept { return ::operator delete[](p); }
+
+ template <typename... Args>
+ void operator delete[](void* p, Args&&... args) noexcept {
+ return ::operator delete[](p, std::forward<Args>(args)...);
+ }
+
+ // Non-standard access to the actual contained value. No need for this to
+ // throw.
+ int& Get() noexcept { return dummy_; }
+ const int& Get() const noexcept { return dummy_; }
+
+ private:
+ static TString GetInstanceString(int dummy) {
+ return y_absl::StrCat("ThrowingValue<",
+ exceptions_internal::GetSpecString(Spec), ">(", dummy,
+ ")");
+ }
+
+ int dummy_;
+};
+// While not having to do with exceptions, explicitly delete comma operator, to
+// make sure we don't use it on user-supplied types.
+template <TypeSpec Spec, typename T>
+void operator,(const ThrowingValue<Spec>&, T&&) = delete;
+template <TypeSpec Spec, typename T>
+void operator,(T&&, const ThrowingValue<Spec>&) = delete;
+
+/*
+ * Configuration enum for the ThrowingAllocator type that defines behavior for
+ * the lifetime of the instance.
+ *
+ * kEverythingThrows: Calls to the member functions may throw
+ * kNoThrowAllocate: Calls to the member functions will not throw
+ */
+enum class AllocSpec {
+ kEverythingThrows = 0,
+ kNoThrowAllocate = 1,
+};
+
+/*
+ * An allocator type which is instrumented to throw at a controlled time, or not
+ * to throw, using AllocSpec. The supported settings are the default of every
+ * function which is allowed to throw in a conforming allocator possibly
+ * throwing, or nothing throws, in line with the ABSL_ALLOCATOR_THROWS
+ * configuration macro.
+ */
+template <typename T, AllocSpec Spec = AllocSpec::kEverythingThrows>
+class ThrowingAllocator : private exceptions_internal::TrackedObject {
+ static constexpr bool IsSpecified(AllocSpec spec) {
+ return static_cast<bool>(Spec & spec);
+ }
+
+ public:
+ using pointer = T*;
+ using const_pointer = const T*;
+ using reference = T&;
+ using const_reference = const T&;
+ using void_pointer = void*;
+ using const_void_pointer = const void*;
+ using value_type = T;
+ using size_type = size_t;
+ using difference_type = ptrdiff_t;
+
+ using is_nothrow =
+ std::integral_constant<bool, Spec == AllocSpec::kNoThrowAllocate>;
+ using propagate_on_container_copy_assignment = std::true_type;
+ using propagate_on_container_move_assignment = std::true_type;
+ using propagate_on_container_swap = std::true_type;
+ using is_always_equal = std::false_type;
+
+ ThrowingAllocator() : TrackedObject(GetInstanceString(next_id_)) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ dummy_ = std::make_shared<const int>(next_id_++);
+ }
+
+ template <typename U>
+ ThrowingAllocator(const ThrowingAllocator<U, Spec>& other) noexcept // NOLINT
+ : TrackedObject(GetInstanceString(*other.State())),
+ dummy_(other.State()) {}
+
+ // According to C++11 standard [17.6.3.5], Table 28, the move/copy ctors of
+ // allocator shall not exit via an exception, thus they are marked noexcept.
+ ThrowingAllocator(const ThrowingAllocator& other) noexcept
+ : TrackedObject(GetInstanceString(*other.State())),
+ dummy_(other.State()) {}
+
+ template <typename U>
+ ThrowingAllocator(ThrowingAllocator<U, Spec>&& other) noexcept // NOLINT
+ : TrackedObject(GetInstanceString(*other.State())),
+ dummy_(std::move(other.State())) {}
+
+ ThrowingAllocator(ThrowingAllocator&& other) noexcept
+ : TrackedObject(GetInstanceString(*other.State())),
+ dummy_(std::move(other.State())) {}
+
+ ~ThrowingAllocator() noexcept = default;
+
+ ThrowingAllocator& operator=(const ThrowingAllocator& other) noexcept {
+ dummy_ = other.State();
+ return *this;
+ }
+
+ template <typename U>
+ ThrowingAllocator& operator=(
+ const ThrowingAllocator<U, Spec>& other) noexcept {
+ dummy_ = other.State();
+ return *this;
+ }
+
+ template <typename U>
+ ThrowingAllocator& operator=(ThrowingAllocator<U, Spec>&& other) noexcept {
+ dummy_ = std::move(other.State());
+ return *this;
+ }
+
+ template <typename U>
+ struct rebind {
+ using other = ThrowingAllocator<U, Spec>;
+ };
+
+ pointer allocate(size_type n) noexcept(
+ IsSpecified(AllocSpec::kNoThrowAllocate)) {
+ ReadStateAndMaybeThrow(ABSL_PRETTY_FUNCTION);
+ return static_cast<pointer>(::operator new(n * sizeof(T)));
+ }
+
+ pointer allocate(size_type n, const_void_pointer) noexcept(
+ IsSpecified(AllocSpec::kNoThrowAllocate)) {
+ return allocate(n);
+ }
+
+ void deallocate(pointer ptr, size_type) noexcept {
+ ReadState();
+ ::operator delete(static_cast<void*>(ptr));
+ }
+
+ template <typename U, typename... Args>
+ void construct(U* ptr, Args&&... args) noexcept(
+ IsSpecified(AllocSpec::kNoThrowAllocate)) {
+ ReadStateAndMaybeThrow(ABSL_PRETTY_FUNCTION);
+ ::new (static_cast<void*>(ptr)) U(std::forward<Args>(args)...);
+ }
+
+ template <typename U>
+ void destroy(U* p) noexcept {
+ ReadState();
+ p->~U();
+ }
+
+ size_type max_size() const noexcept {
+ return (std::numeric_limits<difference_type>::max)() / sizeof(value_type);
+ }
+
+ ThrowingAllocator select_on_container_copy_construction() noexcept(
+ IsSpecified(AllocSpec::kNoThrowAllocate)) {
+ ReadStateAndMaybeThrow(ABSL_PRETTY_FUNCTION);
+ return *this;
+ }
+
+ template <typename U>
+ bool operator==(const ThrowingAllocator<U, Spec>& other) const noexcept {
+ return dummy_ == other.dummy_;
+ }
+
+ template <typename U>
+ bool operator!=(const ThrowingAllocator<U, Spec>& other) const noexcept {
+ return dummy_ != other.dummy_;
+ }
+
+ template <typename, AllocSpec>
+ friend class ThrowingAllocator;
+
+ private:
+ static TString GetInstanceString(int dummy) {
+ return y_absl::StrCat("ThrowingAllocator<",
+ exceptions_internal::GetSpecString(Spec), ">(", dummy,
+ ")");
+ }
+
+ const std::shared_ptr<const int>& State() const { return dummy_; }
+ std::shared_ptr<const int>& State() { return dummy_; }
+
+ void ReadState() {
+ // we know that this will never be true, but the compiler doesn't, so this
+ // should safely force a read of the value.
+ if (*dummy_ < 0) std::abort();
+ }
+
+ void ReadStateAndMaybeThrow(y_absl::string_view msg) const {
+ if (!IsSpecified(AllocSpec::kNoThrowAllocate)) {
+ exceptions_internal::MaybeThrow(
+ y_absl::Substitute("Allocator id $0 threw from $1", *dummy_, msg));
+ }
+ }
+
+ static int next_id_;
+ std::shared_ptr<const int> dummy_;
+};
+
+template <typename T, AllocSpec Spec>
+int ThrowingAllocator<T, Spec>::next_id_ = 0;
+
+// Tests for resource leaks by attempting to construct a T using args repeatedly
+// until successful, using the countdown method. Side effects can then be
+// tested for resource leaks.
+template <typename T, typename... Args>
+void TestThrowingCtor(Args&&... args) {
+ struct Cleanup {
+ ~Cleanup() { exceptions_internal::UnsetCountdown(); }
+ } c;
+ for (int count = 0;; ++count) {
+ exceptions_internal::ConstructorTracker ct(count);
+ exceptions_internal::SetCountdown(count);
+ try {
+ T temp(std::forward<Args>(args)...);
+ static_cast<void>(temp);
+ break;
+ } catch (const exceptions_internal::TestException&) {
+ }
+ }
+}
+
+// Tests the nothrow guarantee of the provided nullary operation. If the an
+// exception is thrown, the result will be AssertionFailure(). Otherwise, it
+// will be AssertionSuccess().
+template <typename Operation>
+testing::AssertionResult TestNothrowOp(const Operation& operation) {
+ struct Cleanup {
+ Cleanup() { exceptions_internal::SetCountdown(); }
+ ~Cleanup() { exceptions_internal::UnsetCountdown(); }
+ } c;
+ try {
+ operation();
+ return testing::AssertionSuccess();
+ } catch (const exceptions_internal::TestException&) {
+ return testing::AssertionFailure()
+ << "TestException thrown during call to operation() when nothrow "
+ "guarantee was expected.";
+ } catch (...) {
+ return testing::AssertionFailure()
+ << "Unknown exception thrown during call to operation() when "
+ "nothrow guarantee was expected.";
+ }
+}
+
+namespace exceptions_internal {
+
+// Dummy struct for ExceptionSafetyTestBuilder<> partial state.
+struct UninitializedT {};
+
+template <typename T>
+class DefaultFactory {
+ public:
+ explicit DefaultFactory(const T& t) : t_(t) {}
+ std::unique_ptr<T> operator()() const { return y_absl::make_unique<T>(t_); }
+
+ private:
+ T t_;
+};
+
+template <size_t LazyContractsCount, typename LazyFactory,
+ typename LazyOperation>
+using EnableIfTestable = typename y_absl::enable_if_t<
+ LazyContractsCount != 0 &&
+ !std::is_same<LazyFactory, UninitializedT>::value &&
+ !std::is_same<LazyOperation, UninitializedT>::value>;
+
+template <typename Factory = UninitializedT,
+ typename Operation = UninitializedT, typename... Contracts>
+class ExceptionSafetyTestBuilder;
+
+} // namespace exceptions_internal
+
+/*
+ * Constructs an empty ExceptionSafetyTestBuilder. All
+ * ExceptionSafetyTestBuilder objects are immutable and all With[thing] mutation
+ * methods return new instances of ExceptionSafetyTestBuilder.
+ *
+ * In order to test a T for exception safety, a factory for that T, a testable
+ * operation, and at least one contract callback returning an assertion
+ * result must be applied using the respective methods.
+ */
+exceptions_internal::ExceptionSafetyTestBuilder<> MakeExceptionSafetyTester();
+
+namespace exceptions_internal {
+template <typename T>
+struct IsUniquePtr : std::false_type {};
+
+template <typename T, typename D>
+struct IsUniquePtr<std::unique_ptr<T, D>> : std::true_type {};
+
+template <typename Factory>
+struct FactoryPtrTypeHelper {
+ using type = decltype(std::declval<const Factory&>()());
+
+ static_assert(IsUniquePtr<type>::value, "Factories must return a unique_ptr");
+};
+
+template <typename Factory>
+using FactoryPtrType = typename FactoryPtrTypeHelper<Factory>::type;
+
+template <typename Factory>
+using FactoryElementType = typename FactoryPtrType<Factory>::element_type;
+
+template <typename T>
+class ExceptionSafetyTest {
+ using Factory = std::function<std::unique_ptr<T>()>;
+ using Operation = std::function<void(T*)>;
+ using Contract = std::function<AssertionResult(T*)>;
+
+ public:
+ template <typename... Contracts>
+ explicit ExceptionSafetyTest(const Factory& f, const Operation& op,
+ const Contracts&... contracts)
+ : factory_(f), operation_(op), contracts_{WrapContract(contracts)...} {}
+
+ AssertionResult Test() const {
+ for (int count = 0;; ++count) {
+ exceptions_internal::ConstructorTracker ct(count);
+
+ for (const auto& contract : contracts_) {
+ auto t_ptr = factory_();
+ try {
+ SetCountdown(count);
+ operation_(t_ptr.get());
+ // Unset for the case that the operation throws no exceptions, which
+ // would leave the countdown set and break the *next* exception safety
+ // test after this one.
+ UnsetCountdown();
+ return AssertionSuccess();
+ } catch (const exceptions_internal::TestException& e) {
+ if (!contract(t_ptr.get())) {
+ return AssertionFailure() << e.what() << " failed contract check";
+ }
+ }
+ }
+ }
+ }
+
+ private:
+ template <typename ContractFn>
+ Contract WrapContract(const ContractFn& contract) {
+ return [contract](T* t_ptr) { return AssertionResult(contract(t_ptr)); };
+ }
+
+ Contract WrapContract(StrongGuaranteeTagType) {
+ return [this](T* t_ptr) { return AssertionResult(*factory_() == *t_ptr); };
+ }
+
+ Factory factory_;
+ Operation operation_;
+ std::vector<Contract> contracts_;
+};
+
+/*
+ * Builds a tester object that tests if performing a operation on a T follows
+ * exception safety guarantees. Verification is done via contract assertion
+ * callbacks applied to T instances post-throw.
+ *
+ * Template parameters for ExceptionSafetyTestBuilder:
+ *
+ * - Factory: The factory object (passed in via tester.WithFactory(...) or
+ * tester.WithInitialValue(...)) must be invocable with the signature
+ * `std::unique_ptr<T> operator()() const` where T is the type being tested.
+ * It is used for reliably creating identical T instances to test on.
+ *
+ * - Operation: The operation object (passsed in via tester.WithOperation(...)
+ * or tester.Test(...)) must be invocable with the signature
+ * `void operator()(T*) const` where T is the type being tested. It is used
+ * for performing steps on a T instance that may throw and that need to be
+ * checked for exception safety. Each call to the operation will receive a
+ * fresh T instance so it's free to modify and destroy the T instances as it
+ * pleases.
+ *
+ * - Contracts...: The contract assertion callback objects (passed in via
+ * tester.WithContracts(...)) must be invocable with the signature
+ * `testing::AssertionResult operator()(T*) const` where T is the type being
+ * tested. Contract assertion callbacks are provided T instances post-throw.
+ * They must return testing::AssertionSuccess when the type contracts of the
+ * provided T instance hold. If the type contracts of the T instance do not
+ * hold, they must return testing::AssertionFailure. Execution order of
+ * Contracts... is unspecified. They will each individually get a fresh T
+ * instance so they are free to modify and destroy the T instances as they
+ * please.
+ */
+template <typename Factory, typename Operation, typename... Contracts>
+class ExceptionSafetyTestBuilder {
+ public:
+ /*
+ * Returns a new ExceptionSafetyTestBuilder with an included T factory based
+ * on the provided T instance. The existing factory will not be included in
+ * the newly created tester instance. The created factory returns a new T
+ * instance by copy-constructing the provided const T& t.
+ *
+ * Preconditions for tester.WithInitialValue(const T& t):
+ *
+ * - The const T& t object must be copy-constructible where T is the type
+ * being tested. For non-copy-constructible objects, use the method
+ * tester.WithFactory(...).
+ */
+ template <typename T>
+ ExceptionSafetyTestBuilder<DefaultFactory<T>, Operation, Contracts...>
+ WithInitialValue(const T& t) const {
+ return WithFactory(DefaultFactory<T>(t));
+ }
+
+ /*
+ * Returns a new ExceptionSafetyTestBuilder with the provided T factory
+ * included. The existing factory will not be included in the newly-created
+ * tester instance. This method is intended for use with types lacking a copy
+ * constructor. Types that can be copy-constructed should instead use the
+ * method tester.WithInitialValue(...).
+ */
+ template <typename NewFactory>
+ ExceptionSafetyTestBuilder<y_absl::decay_t<NewFactory>, Operation, Contracts...>
+ WithFactory(const NewFactory& new_factory) const {
+ return {new_factory, operation_, contracts_};
+ }
+
+ /*
+ * Returns a new ExceptionSafetyTestBuilder with the provided testable
+ * operation included. The existing operation will not be included in the
+ * newly created tester.
+ */
+ template <typename NewOperation>
+ ExceptionSafetyTestBuilder<Factory, y_absl::decay_t<NewOperation>, Contracts...>
+ WithOperation(const NewOperation& new_operation) const {
+ return {factory_, new_operation, contracts_};
+ }
+
+ /*
+ * Returns a new ExceptionSafetyTestBuilder with the provided MoreContracts...
+ * combined with the Contracts... that were already included in the instance
+ * on which the method was called. Contracts... cannot be removed or replaced
+ * once added to an ExceptionSafetyTestBuilder instance. A fresh object must
+ * be created in order to get an empty Contracts... list.
+ *
+ * In addition to passing in custom contract assertion callbacks, this method
+ * accepts `testing::strong_guarantee` as an argument which checks T instances
+ * post-throw against freshly created T instances via operator== to verify
+ * that any state changes made during the execution of the operation were
+ * properly rolled back.
+ */
+ template <typename... MoreContracts>
+ ExceptionSafetyTestBuilder<Factory, Operation, Contracts...,
+ y_absl::decay_t<MoreContracts>...>
+ WithContracts(const MoreContracts&... more_contracts) const {
+ return {
+ factory_, operation_,
+ std::tuple_cat(contracts_, std::tuple<y_absl::decay_t<MoreContracts>...>(
+ more_contracts...))};
+ }
+
+ /*
+ * Returns a testing::AssertionResult that is the reduced result of the
+ * exception safety algorithm. The algorithm short circuits and returns
+ * AssertionFailure after the first contract callback returns an
+ * AssertionFailure. Otherwise, if all contract callbacks return an
+ * AssertionSuccess, the reduced result is AssertionSuccess.
+ *
+ * The passed-in testable operation will not be saved in a new tester instance
+ * nor will it modify/replace the existing tester instance. This is useful
+ * when each operation being tested is unique and does not need to be reused.
+ *
+ * Preconditions for tester.Test(const NewOperation& new_operation):
+ *
+ * - May only be called after at least one contract assertion callback and a
+ * factory or initial value have been provided.
+ */
+ template <
+ typename NewOperation,
+ typename = EnableIfTestable<sizeof...(Contracts), Factory, NewOperation>>
+ testing::AssertionResult Test(const NewOperation& new_operation) const {
+ return TestImpl(new_operation, y_absl::index_sequence_for<Contracts...>());
+ }
+
+ /*
+ * Returns a testing::AssertionResult that is the reduced result of the
+ * exception safety algorithm. The algorithm short circuits and returns
+ * AssertionFailure after the first contract callback returns an
+ * AssertionFailure. Otherwise, if all contract callbacks return an
+ * AssertionSuccess, the reduced result is AssertionSuccess.
+ *
+ * Preconditions for tester.Test():
+ *
+ * - May only be called after at least one contract assertion callback, a
+ * factory or initial value and a testable operation have been provided.
+ */
+ template <
+ typename LazyOperation = Operation,
+ typename = EnableIfTestable<sizeof...(Contracts), Factory, LazyOperation>>
+ testing::AssertionResult Test() const {
+ return Test(operation_);
+ }
+
+ private:
+ template <typename, typename, typename...>
+ friend class ExceptionSafetyTestBuilder;
+
+ friend ExceptionSafetyTestBuilder<> testing::MakeExceptionSafetyTester();
+
+ ExceptionSafetyTestBuilder() {}
+
+ ExceptionSafetyTestBuilder(const Factory& f, const Operation& o,
+ const std::tuple<Contracts...>& i)
+ : factory_(f), operation_(o), contracts_(i) {}
+
+ template <typename SelectedOperation, size_t... Indices>
+ testing::AssertionResult TestImpl(SelectedOperation selected_operation,
+ y_absl::index_sequence<Indices...>) const {
+ return ExceptionSafetyTest<FactoryElementType<Factory>>(
+ factory_, selected_operation, std::get<Indices>(contracts_)...)
+ .Test();
+ }
+
+ Factory factory_;
+ Operation operation_;
+ std::tuple<Contracts...> contracts_;
+};
+
+} // namespace exceptions_internal
+
+} // namespace testing
+
+#endif // ABSL_HAVE_EXCEPTIONS
+
+#endif // ABSL_BASE_INTERNAL_EXCEPTION_SAFETY_TESTING_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/exception_testing.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/exception_testing.h
new file mode 100644
index 00000000000..f2dd04b9d62
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/exception_testing.h
@@ -0,0 +1,42 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Testing utilities for ABSL types which throw exceptions.
+
+#ifndef ABSL_BASE_INTERNAL_EXCEPTION_TESTING_H_
+#define ABSL_BASE_INTERNAL_EXCEPTION_TESTING_H_
+
+#include "gtest/gtest.h"
+#include "y_absl/base/config.h"
+
+// ABSL_BASE_INTERNAL_EXPECT_FAIL tests either for a specified thrown exception
+// if exceptions are enabled, or for death with a specified text in the error
+// message
+#ifdef ABSL_HAVE_EXCEPTIONS
+
+#define ABSL_BASE_INTERNAL_EXPECT_FAIL(expr, exception_t, text) \
+ EXPECT_THROW(expr, exception_t)
+
+#elif defined(__ANDROID__)
+// Android asserts do not log anywhere that gtest can currently inspect.
+// So we expect exit, but cannot match the message.
+#define ABSL_BASE_INTERNAL_EXPECT_FAIL(expr, exception_t, text) \
+ EXPECT_DEATH(expr, ".*")
+#else
+#define ABSL_BASE_INTERNAL_EXPECT_FAIL(expr, exception_t, text) \
+ EXPECT_DEATH_IF_SUPPORTED(expr, text)
+
+#endif
+
+#endif // ABSL_BASE_INTERNAL_EXCEPTION_TESTING_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/fast_type_id.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/fast_type_id.h
new file mode 100644
index 00000000000..c82cba7b4a6
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/fast_type_id.h
@@ -0,0 +1,48 @@
+//
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef ABSL_BASE_INTERNAL_FAST_TYPE_ID_H_
+#define ABSL_BASE_INTERNAL_FAST_TYPE_ID_H_
+
+#include "y_absl/base/config.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+template <typename Type>
+struct FastTypeTag {
+ constexpr static char dummy_var = 0;
+};
+
+template <typename Type>
+constexpr char FastTypeTag<Type>::dummy_var;
+
+// FastTypeId<Type>() evaluates at compile/link-time to a unique pointer for the
+// passed-in type. These are meant to be good match for keys into maps or
+// straight up comparisons.
+using FastTypeIdType = const void*;
+
+template <typename Type>
+constexpr inline FastTypeIdType FastTypeId() {
+ return &FastTypeTag<Type>::dummy_var;
+}
+
+} // namespace base_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_BASE_INTERNAL_FAST_TYPE_ID_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/hide_ptr.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/hide_ptr.h
new file mode 100644
index 00000000000..ccdad25cb78
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/hide_ptr.h
@@ -0,0 +1,51 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_BASE_INTERNAL_HIDE_PTR_H_
+#define ABSL_BASE_INTERNAL_HIDE_PTR_H_
+
+#include <cstdint>
+
+#include "y_absl/base/config.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+// Arbitrary value with high bits set. Xor'ing with it is unlikely
+// to map one valid pointer to another valid pointer.
+constexpr uintptr_t HideMask() {
+ return (uintptr_t{0xF03A5F7BU} << (sizeof(uintptr_t) - 4) * 8) | 0xF03A5F7BU;
+}
+
+// Hide a pointer from the leak checker. For internal use only.
+// Differs from y_absl::IgnoreLeak(ptr) in that y_absl::IgnoreLeak(ptr) causes ptr
+// and all objects reachable from ptr to be ignored by the leak checker.
+template <class T>
+inline uintptr_t HidePtr(T* ptr) {
+ return reinterpret_cast<uintptr_t>(ptr) ^ HideMask();
+}
+
+// Return a pointer that has been hidden from the leak checker.
+// For internal use only.
+template <class T>
+inline T* UnhidePtr(uintptr_t hidden) {
+ return reinterpret_cast<T*>(hidden ^ HideMask());
+}
+
+} // namespace base_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_BASE_INTERNAL_HIDE_PTR_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/identity.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/identity.h
new file mode 100644
index 00000000000..2360afeed24
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/identity.h
@@ -0,0 +1,37 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef ABSL_BASE_INTERNAL_IDENTITY_H_
+#define ABSL_BASE_INTERNAL_IDENTITY_H_
+
+#include "y_absl/base/config.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace internal {
+
+template <typename T>
+struct identity {
+ typedef T type;
+};
+
+template <typename T>
+using identity_t = typename identity<T>::type;
+
+} // namespace internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_BASE_INTERNAL_IDENTITY_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/inline_variable.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/inline_variable.h
new file mode 100644
index 00000000000..38799c5722c
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/inline_variable.h
@@ -0,0 +1,107 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_BASE_INTERNAL_INLINE_VARIABLE_EMULATION_H_
+#define ABSL_BASE_INTERNAL_INLINE_VARIABLE_EMULATION_H_
+
+#include <type_traits>
+
+#include "y_absl/base/internal/identity.h"
+
+// File:
+// This file define a macro that allows the creation of or emulation of C++17
+// inline variables based on whether or not the feature is supported.
+
+////////////////////////////////////////////////////////////////////////////////
+// Macro: ABSL_INTERNAL_INLINE_CONSTEXPR(type, name, init)
+//
+// Description:
+// Expands to the equivalent of an inline constexpr instance of the specified
+// `type` and `name`, initialized to the value `init`. If the compiler being
+// used is detected as supporting actual inline variables as a language
+// feature, then the macro expands to an actual inline variable definition.
+//
+// Requires:
+// `type` is a type that is usable in an extern variable declaration.
+//
+// Requires: `name` is a valid identifier
+//
+// Requires:
+// `init` is an expression that can be used in the following definition:
+// constexpr type name = init;
+//
+// Usage:
+//
+// // Equivalent to: `inline constexpr size_t variant_npos = -1;`
+// ABSL_INTERNAL_INLINE_CONSTEXPR(size_t, variant_npos, -1);
+//
+// Differences in implementation:
+// For a direct, language-level inline variable, decltype(name) will be the
+// type that was specified along with const qualification, whereas for
+// emulated inline variables, decltype(name) may be different (in practice
+// it will likely be a reference type).
+////////////////////////////////////////////////////////////////////////////////
+
+#ifdef __cpp_inline_variables
+
+// Clang's -Wmissing-variable-declarations option erroneously warned that
+// inline constexpr objects need to be pre-declared. This has now been fixed,
+// but we will need to support this workaround for people building with older
+// versions of clang.
+//
+// Bug: https://bugs.llvm.org/show_bug.cgi?id=35862
+//
+// Note:
+// identity_t is used here so that the const and name are in the
+// appropriate place for pointer types, reference types, function pointer
+// types, etc..
+#if defined(__clang__)
+#define ABSL_INTERNAL_EXTERN_DECL(type, name) \
+ extern const ::y_absl::internal::identity_t<type> name;
+#else // Otherwise, just define the macro to do nothing.
+#define ABSL_INTERNAL_EXTERN_DECL(type, name)
+#endif // defined(__clang__)
+
+// See above comment at top of file for details.
+#define ABSL_INTERNAL_INLINE_CONSTEXPR(type, name, init) \
+ ABSL_INTERNAL_EXTERN_DECL(type, name) \
+ inline constexpr ::y_absl::internal::identity_t<type> name = init
+
+#else
+
+// See above comment at top of file for details.
+//
+// Note:
+// identity_t is used here so that the const and name are in the
+// appropriate place for pointer types, reference types, function pointer
+// types, etc..
+#define ABSL_INTERNAL_INLINE_CONSTEXPR(var_type, name, init) \
+ template <class /*AbslInternalDummy*/ = void> \
+ struct AbslInternalInlineVariableHolder##name { \
+ static constexpr ::y_absl::internal::identity_t<var_type> kInstance = init; \
+ }; \
+ \
+ template <class AbslInternalDummy> \
+ constexpr ::y_absl::internal::identity_t<var_type> \
+ AbslInternalInlineVariableHolder##name<AbslInternalDummy>::kInstance; \
+ \
+ static constexpr const ::y_absl::internal::identity_t<var_type>& \
+ name = /* NOLINT */ \
+ AbslInternalInlineVariableHolder##name<>::kInstance; \
+ static_assert(sizeof(void (*)(decltype(name))) != 0, \
+ "Silence unused variable warnings.")
+
+#endif // __cpp_inline_variables
+
+#endif // ABSL_BASE_INTERNAL_INLINE_VARIABLE_EMULATION_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/inline_variable_testing.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/inline_variable_testing.h
new file mode 100644
index 00000000000..190e74e643a
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/inline_variable_testing.h
@@ -0,0 +1,46 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_BASE_INLINE_VARIABLE_TESTING_H_
+#define ABSL_BASE_INLINE_VARIABLE_TESTING_H_
+
+#include "y_absl/base/internal/inline_variable.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace inline_variable_testing_internal {
+
+struct Foo {
+ int value = 5;
+};
+
+ABSL_INTERNAL_INLINE_CONSTEXPR(Foo, inline_variable_foo, {});
+ABSL_INTERNAL_INLINE_CONSTEXPR(Foo, other_inline_variable_foo, {});
+
+ABSL_INTERNAL_INLINE_CONSTEXPR(int, inline_variable_int, 5);
+ABSL_INTERNAL_INLINE_CONSTEXPR(int, other_inline_variable_int, 5);
+
+ABSL_INTERNAL_INLINE_CONSTEXPR(void(*)(), inline_variable_fun_ptr, nullptr);
+
+const Foo& get_foo_a();
+const Foo& get_foo_b();
+
+const int& get_int_a();
+const int& get_int_b();
+
+} // namespace inline_variable_testing_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_BASE_INLINE_VARIABLE_TESTING_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/invoke.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/invoke.h
new file mode 100644
index 00000000000..928666ff44b
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/invoke.h
@@ -0,0 +1,187 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// y_absl::base_internal::invoke(f, args...) is an implementation of
+// INVOKE(f, args...) from section [func.require] of the C++ standard.
+//
+// [func.require]
+// Define INVOKE (f, t1, t2, ..., tN) as follows:
+// 1. (t1.*f)(t2, ..., tN) when f is a pointer to a member function of a class T
+// and t1 is an object of type T or a reference to an object of type T or a
+// reference to an object of a type derived from T;
+// 2. ((*t1).*f)(t2, ..., tN) when f is a pointer to a member function of a
+// class T and t1 is not one of the types described in the previous item;
+// 3. t1.*f when N == 1 and f is a pointer to member data of a class T and t1 is
+// an object of type T or a reference to an object of type T or a reference
+// to an object of a type derived from T;
+// 4. (*t1).*f when N == 1 and f is a pointer to member data of a class T and t1
+// is not one of the types described in the previous item;
+// 5. f(t1, t2, ..., tN) in all other cases.
+//
+// The implementation is SFINAE-friendly: substitution failure within invoke()
+// isn't an error.
+
+#ifndef ABSL_BASE_INTERNAL_INVOKE_H_
+#define ABSL_BASE_INTERNAL_INVOKE_H_
+
+#include <algorithm>
+#include <type_traits>
+#include <utility>
+
+#include "y_absl/meta/type_traits.h"
+
+// The following code is internal implementation detail. See the comment at the
+// top of this file for the API documentation.
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+// The five classes below each implement one of the clauses from the definition
+// of INVOKE. The inner class template Accept<F, Args...> checks whether the
+// clause is applicable; static function template Invoke(f, args...) does the
+// invocation.
+//
+// By separating the clause selection logic from invocation we make sure that
+// Invoke() does exactly what the standard says.
+
+template <typename Derived>
+struct StrippedAccept {
+ template <typename... Args>
+ struct Accept : Derived::template AcceptImpl<typename std::remove_cv<
+ typename std::remove_reference<Args>::type>::type...> {};
+};
+
+// (t1.*f)(t2, ..., tN) when f is a pointer to a member function of a class T
+// and t1 is an object of type T or a reference to an object of type T or a
+// reference to an object of a type derived from T.
+struct MemFunAndRef : StrippedAccept<MemFunAndRef> {
+ template <typename... Args>
+ struct AcceptImpl : std::false_type {};
+
+ template <typename MemFunType, typename C, typename Obj, typename... Args>
+ struct AcceptImpl<MemFunType C::*, Obj, Args...>
+ : std::integral_constant<bool, std::is_base_of<C, Obj>::value &&
+ y_absl::is_function<MemFunType>::value> {
+ };
+
+ template <typename MemFun, typename Obj, typename... Args>
+ static decltype((std::declval<Obj>().*
+ std::declval<MemFun>())(std::declval<Args>()...))
+ Invoke(MemFun&& mem_fun, Obj&& obj, Args&&... args) {
+ return (std::forward<Obj>(obj).*
+ std::forward<MemFun>(mem_fun))(std::forward<Args>(args)...);
+ }
+};
+
+// ((*t1).*f)(t2, ..., tN) when f is a pointer to a member function of a
+// class T and t1 is not one of the types described in the previous item.
+struct MemFunAndPtr : StrippedAccept<MemFunAndPtr> {
+ template <typename... Args>
+ struct AcceptImpl : std::false_type {};
+
+ template <typename MemFunType, typename C, typename Ptr, typename... Args>
+ struct AcceptImpl<MemFunType C::*, Ptr, Args...>
+ : std::integral_constant<bool, !std::is_base_of<C, Ptr>::value &&
+ y_absl::is_function<MemFunType>::value> {
+ };
+
+ template <typename MemFun, typename Ptr, typename... Args>
+ static decltype(((*std::declval<Ptr>()).*
+ std::declval<MemFun>())(std::declval<Args>()...))
+ Invoke(MemFun&& mem_fun, Ptr&& ptr, Args&&... args) {
+ return ((*std::forward<Ptr>(ptr)).*
+ std::forward<MemFun>(mem_fun))(std::forward<Args>(args)...);
+ }
+};
+
+// t1.*f when N == 1 and f is a pointer to member data of a class T and t1 is
+// an object of type T or a reference to an object of type T or a reference
+// to an object of a type derived from T.
+struct DataMemAndRef : StrippedAccept<DataMemAndRef> {
+ template <typename... Args>
+ struct AcceptImpl : std::false_type {};
+
+ template <typename R, typename C, typename Obj>
+ struct AcceptImpl<R C::*, Obj>
+ : std::integral_constant<bool, std::is_base_of<C, Obj>::value &&
+ !y_absl::is_function<R>::value> {};
+
+ template <typename DataMem, typename Ref>
+ static decltype(std::declval<Ref>().*std::declval<DataMem>()) Invoke(
+ DataMem&& data_mem, Ref&& ref) {
+ return std::forward<Ref>(ref).*std::forward<DataMem>(data_mem);
+ }
+};
+
+// (*t1).*f when N == 1 and f is a pointer to member data of a class T and t1
+// is not one of the types described in the previous item.
+struct DataMemAndPtr : StrippedAccept<DataMemAndPtr> {
+ template <typename... Args>
+ struct AcceptImpl : std::false_type {};
+
+ template <typename R, typename C, typename Ptr>
+ struct AcceptImpl<R C::*, Ptr>
+ : std::integral_constant<bool, !std::is_base_of<C, Ptr>::value &&
+ !y_absl::is_function<R>::value> {};
+
+ template <typename DataMem, typename Ptr>
+ static decltype((*std::declval<Ptr>()).*std::declval<DataMem>()) Invoke(
+ DataMem&& data_mem, Ptr&& ptr) {
+ return (*std::forward<Ptr>(ptr)).*std::forward<DataMem>(data_mem);
+ }
+};
+
+// f(t1, t2, ..., tN) in all other cases.
+struct Callable {
+ // Callable doesn't have Accept because it's the last clause that gets picked
+ // when none of the previous clauses are applicable.
+ template <typename F, typename... Args>
+ static decltype(std::declval<F>()(std::declval<Args>()...)) Invoke(
+ F&& f, Args&&... args) {
+ return std::forward<F>(f)(std::forward<Args>(args)...);
+ }
+};
+
+// Resolves to the first matching clause.
+template <typename... Args>
+struct Invoker {
+ typedef typename std::conditional<
+ MemFunAndRef::Accept<Args...>::value, MemFunAndRef,
+ typename std::conditional<
+ MemFunAndPtr::Accept<Args...>::value, MemFunAndPtr,
+ typename std::conditional<
+ DataMemAndRef::Accept<Args...>::value, DataMemAndRef,
+ typename std::conditional<DataMemAndPtr::Accept<Args...>::value,
+ DataMemAndPtr, Callable>::type>::type>::
+ type>::type type;
+};
+
+// The result type of Invoke<F, Args...>.
+template <typename F, typename... Args>
+using invoke_result_t = decltype(Invoker<F, Args...>::type::Invoke(
+ std::declval<F>(), std::declval<Args>()...));
+
+// Invoke(f, args...) is an implementation of INVOKE(f, args...) from section
+// [func.require] of the C++ standard.
+template <typename F, typename... Args>
+invoke_result_t<F, Args...> invoke(F&& f, Args&&... args) {
+ return Invoker<F, Args...>::type::Invoke(std::forward<F>(f),
+ std::forward<Args>(args)...);
+}
+} // namespace base_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_BASE_INTERNAL_INVOKE_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/low_level_alloc.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/low_level_alloc.cc
new file mode 100644
index 00000000000..0c477d1b287
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/low_level_alloc.cc
@@ -0,0 +1,620 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// A low-level allocator that can be used by other low-level
+// modules without introducing dependency cycles.
+// This allocator is slow and wasteful of memory;
+// it should not be used when performance is key.
+
+#include "y_absl/base/internal/low_level_alloc.h"
+
+#include <type_traits>
+
+#include "y_absl/base/call_once.h"
+#include "y_absl/base/config.h"
+#include "y_absl/base/internal/direct_mmap.h"
+#include "y_absl/base/internal/scheduling_mode.h"
+#include "y_absl/base/macros.h"
+#include "y_absl/base/thread_annotations.h"
+
+// LowLevelAlloc requires that the platform support low-level
+// allocation of virtual memory. Platforms lacking this cannot use
+// LowLevelAlloc.
+#ifndef ABSL_LOW_LEVEL_ALLOC_MISSING
+
+#ifndef _WIN32
+#include <pthread.h>
+#include <signal.h>
+#include <sys/mman.h>
+#include <unistd.h>
+#else
+#include <windows.h>
+#endif
+
+#include <string.h>
+#include <algorithm>
+#include <atomic>
+#include <cerrno>
+#include <cstddef>
+#include <new> // for placement-new
+
+#include "y_absl/base/dynamic_annotations.h"
+#include "y_absl/base/internal/raw_logging.h"
+#include "y_absl/base/internal/spinlock.h"
+
+// MAP_ANONYMOUS
+#if defined(__APPLE__)
+// For mmap, Linux defines both MAP_ANONYMOUS and MAP_ANON and says MAP_ANON is
+// deprecated. In Darwin, MAP_ANON is all there is.
+#if !defined MAP_ANONYMOUS
+#define MAP_ANONYMOUS MAP_ANON
+#endif // !MAP_ANONYMOUS
+#endif // __APPLE__
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+// A first-fit allocator with amortized logarithmic free() time.
+
+// ---------------------------------------------------------------------------
+static const int kMaxLevel = 30;
+
+namespace {
+// This struct describes one allocated block, or one free block.
+struct AllocList {
+ struct Header {
+ // Size of entire region, including this field. Must be
+ // first. Valid in both allocated and unallocated blocks.
+ uintptr_t size;
+
+ // kMagicAllocated or kMagicUnallocated xor this.
+ uintptr_t magic;
+
+ // Pointer to parent arena.
+ LowLevelAlloc::Arena *arena;
+
+ // Aligns regions to 0 mod 2*sizeof(void*).
+ void *dummy_for_alignment;
+ } header;
+
+ // Next two fields: in unallocated blocks: freelist skiplist data
+ // in allocated blocks: overlaps with client data
+
+ // Levels in skiplist used.
+ int levels;
+
+ // Actually has levels elements. The AllocList node may not have room
+ // for all kMaxLevel entries. See max_fit in LLA_SkiplistLevels().
+ AllocList *next[kMaxLevel];
+};
+} // namespace
+
+// ---------------------------------------------------------------------------
+// A trivial skiplist implementation. This is used to keep the freelist
+// in address order while taking only logarithmic time per insert and delete.
+
+// An integer approximation of log2(size/base)
+// Requires size >= base.
+static int IntLog2(size_t size, size_t base) {
+ int result = 0;
+ for (size_t i = size; i > base; i >>= 1) { // i == floor(size/2**result)
+ result++;
+ }
+ // floor(size / 2**result) <= base < floor(size / 2**(result-1))
+ // => log2(size/(base+1)) <= result < 1+log2(size/base)
+ // => result ~= log2(size/base)
+ return result;
+}
+
+// Return a random integer n: p(n)=1/(2**n) if 1 <= n; p(n)=0 if n < 1.
+static int Random(uint32_t *state) {
+ uint32_t r = *state;
+ int result = 1;
+ while ((((r = r*1103515245 + 12345) >> 30) & 1) == 0) {
+ result++;
+ }
+ *state = r;
+ return result;
+}
+
+// Return a number of skiplist levels for a node of size bytes, where
+// base is the minimum node size. Compute level=log2(size / base)+n
+// where n is 1 if random is false and otherwise a random number generated with
+// the standard distribution for a skiplist: See Random() above.
+// Bigger nodes tend to have more skiplist levels due to the log2(size / base)
+// term, so first-fit searches touch fewer nodes. "level" is clipped so
+// level<kMaxLevel and next[level-1] will fit in the node.
+// 0 < LLA_SkiplistLevels(x,y,false) <= LLA_SkiplistLevels(x,y,true) < kMaxLevel
+static int LLA_SkiplistLevels(size_t size, size_t base, uint32_t *random) {
+ // max_fit is the maximum number of levels that will fit in a node for the
+ // given size. We can't return more than max_fit, no matter what the
+ // random number generator says.
+ size_t max_fit = (size - offsetof(AllocList, next)) / sizeof(AllocList *);
+ int level = IntLog2(size, base) + (random != nullptr ? Random(random) : 1);
+ if (static_cast<size_t>(level) > max_fit) level = static_cast<int>(max_fit);
+ if (level > kMaxLevel-1) level = kMaxLevel - 1;
+ ABSL_RAW_CHECK(level >= 1, "block not big enough for even one level");
+ return level;
+}
+
+// Return "atleast", the first element of AllocList *head s.t. *atleast >= *e.
+// For 0 <= i < head->levels, set prev[i] to "no_greater", where no_greater
+// points to the last element at level i in the AllocList less than *e, or is
+// head if no such element exists.
+static AllocList *LLA_SkiplistSearch(AllocList *head,
+ AllocList *e, AllocList **prev) {
+ AllocList *p = head;
+ for (int level = head->levels - 1; level >= 0; level--) {
+ for (AllocList *n; (n = p->next[level]) != nullptr && n < e; p = n) {
+ }
+ prev[level] = p;
+ }
+ return (head->levels == 0) ? nullptr : prev[0]->next[0];
+}
+
+// Insert element *e into AllocList *head. Set prev[] as LLA_SkiplistSearch.
+// Requires that e->levels be previously set by the caller (using
+// LLA_SkiplistLevels())
+static void LLA_SkiplistInsert(AllocList *head, AllocList *e,
+ AllocList **prev) {
+ LLA_SkiplistSearch(head, e, prev);
+ for (; head->levels < e->levels; head->levels++) { // extend prev pointers
+ prev[head->levels] = head; // to all *e's levels
+ }
+ for (int i = 0; i != e->levels; i++) { // add element to list
+ e->next[i] = prev[i]->next[i];
+ prev[i]->next[i] = e;
+ }
+}
+
+// Remove element *e from AllocList *head. Set prev[] as LLA_SkiplistSearch().
+// Requires that e->levels be previous set by the caller (using
+// LLA_SkiplistLevels())
+static void LLA_SkiplistDelete(AllocList *head, AllocList *e,
+ AllocList **prev) {
+ AllocList *found = LLA_SkiplistSearch(head, e, prev);
+ ABSL_RAW_CHECK(e == found, "element not in freelist");
+ for (int i = 0; i != e->levels && prev[i]->next[i] == e; i++) {
+ prev[i]->next[i] = e->next[i];
+ }
+ while (head->levels > 0 && head->next[head->levels - 1] == nullptr) {
+ head->levels--; // reduce head->levels if level unused
+ }
+}
+
+// ---------------------------------------------------------------------------
+// Arena implementation
+
+// Metadata for an LowLevelAlloc arena instance.
+struct LowLevelAlloc::Arena {
+ // Constructs an arena with the given LowLevelAlloc flags.
+ explicit Arena(uint32_t flags_value);
+
+ base_internal::SpinLock mu;
+ // Head of free list, sorted by address
+ AllocList freelist ABSL_GUARDED_BY(mu);
+ // Count of allocated blocks
+ int32_t allocation_count ABSL_GUARDED_BY(mu);
+ // flags passed to NewArena
+ const uint32_t flags;
+ // Result of sysconf(_SC_PAGESIZE)
+ const size_t pagesize;
+ // Lowest power of two >= max(16, sizeof(AllocList))
+ const size_t round_up;
+ // Smallest allocation block size
+ const size_t min_size;
+ // PRNG state
+ uint32_t random ABSL_GUARDED_BY(mu);
+};
+
+namespace {
+// Static storage space for the lazily-constructed, default global arena
+// instances. We require this space because the whole point of LowLevelAlloc
+// is to avoid relying on malloc/new.
+alignas(LowLevelAlloc::Arena) unsigned char default_arena_storage[sizeof(
+ LowLevelAlloc::Arena)];
+alignas(LowLevelAlloc::Arena) unsigned char unhooked_arena_storage[sizeof(
+ LowLevelAlloc::Arena)];
+#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
+alignas(
+ LowLevelAlloc::Arena) unsigned char unhooked_async_sig_safe_arena_storage
+ [sizeof(LowLevelAlloc::Arena)];
+#endif
+
+// We must use LowLevelCallOnce here to construct the global arenas, rather than
+// using function-level statics, to avoid recursively invoking the scheduler.
+y_absl::once_flag create_globals_once;
+
+void CreateGlobalArenas() {
+ new (&default_arena_storage)
+ LowLevelAlloc::Arena(LowLevelAlloc::kCallMallocHook);
+ new (&unhooked_arena_storage) LowLevelAlloc::Arena(0);
+#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
+ new (&unhooked_async_sig_safe_arena_storage)
+ LowLevelAlloc::Arena(LowLevelAlloc::kAsyncSignalSafe);
+#endif
+}
+
+// Returns a global arena that does not call into hooks. Used by NewArena()
+// when kCallMallocHook is not set.
+LowLevelAlloc::Arena* UnhookedArena() {
+ base_internal::LowLevelCallOnce(&create_globals_once, CreateGlobalArenas);
+ return reinterpret_cast<LowLevelAlloc::Arena*>(&unhooked_arena_storage);
+}
+
+#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
+// Returns a global arena that is async-signal safe. Used by NewArena() when
+// kAsyncSignalSafe is set.
+LowLevelAlloc::Arena *UnhookedAsyncSigSafeArena() {
+ base_internal::LowLevelCallOnce(&create_globals_once, CreateGlobalArenas);
+ return reinterpret_cast<LowLevelAlloc::Arena *>(
+ &unhooked_async_sig_safe_arena_storage);
+}
+#endif
+
+} // namespace
+
+// Returns the default arena, as used by LowLevelAlloc::Alloc() and friends.
+LowLevelAlloc::Arena *LowLevelAlloc::DefaultArena() {
+ base_internal::LowLevelCallOnce(&create_globals_once, CreateGlobalArenas);
+ return reinterpret_cast<LowLevelAlloc::Arena*>(&default_arena_storage);
+}
+
+// magic numbers to identify allocated and unallocated blocks
+static const uintptr_t kMagicAllocated = 0x4c833e95U;
+static const uintptr_t kMagicUnallocated = ~kMagicAllocated;
+
+namespace {
+class ABSL_SCOPED_LOCKABLE ArenaLock {
+ public:
+ explicit ArenaLock(LowLevelAlloc::Arena *arena)
+ ABSL_EXCLUSIVE_LOCK_FUNCTION(arena->mu)
+ : arena_(arena) {
+#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
+ if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
+ sigset_t all;
+ sigfillset(&all);
+ mask_valid_ = pthread_sigmask(SIG_BLOCK, &all, &mask_) == 0;
+ }
+#endif
+ arena_->mu.Lock();
+ }
+ ~ArenaLock() { ABSL_RAW_CHECK(left_, "haven't left Arena region"); }
+ void Leave() ABSL_UNLOCK_FUNCTION() {
+ arena_->mu.Unlock();
+#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
+ if (mask_valid_) {
+ const int err = pthread_sigmask(SIG_SETMASK, &mask_, nullptr);
+ if (err != 0) {
+ ABSL_RAW_LOG(FATAL, "pthread_sigmask failed: %d", err);
+ }
+ }
+#endif
+ left_ = true;
+ }
+
+ private:
+ bool left_ = false; // whether left region
+#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
+ bool mask_valid_ = false;
+ sigset_t mask_; // old mask of blocked signals
+#endif
+ LowLevelAlloc::Arena *arena_;
+ ArenaLock(const ArenaLock &) = delete;
+ ArenaLock &operator=(const ArenaLock &) = delete;
+};
+} // namespace
+
+// create an appropriate magic number for an object at "ptr"
+// "magic" should be kMagicAllocated or kMagicUnallocated
+inline static uintptr_t Magic(uintptr_t magic, AllocList::Header *ptr) {
+ return magic ^ reinterpret_cast<uintptr_t>(ptr);
+}
+
+namespace {
+size_t GetPageSize() {
+#ifdef _WIN32
+ SYSTEM_INFO system_info;
+ GetSystemInfo(&system_info);
+ return std::max(system_info.dwPageSize, system_info.dwAllocationGranularity);
+#elif defined(__wasm__) || defined(__asmjs__)
+ return getpagesize();
+#else
+ return sysconf(_SC_PAGESIZE);
+#endif
+}
+
+size_t RoundedUpBlockSize() {
+ // Round up block sizes to a power of two close to the header size.
+ size_t round_up = 16;
+ while (round_up < sizeof(AllocList::Header)) {
+ round_up += round_up;
+ }
+ return round_up;
+}
+
+} // namespace
+
+LowLevelAlloc::Arena::Arena(uint32_t flags_value)
+ : mu(base_internal::SCHEDULE_KERNEL_ONLY),
+ allocation_count(0),
+ flags(flags_value),
+ pagesize(GetPageSize()),
+ round_up(RoundedUpBlockSize()),
+ min_size(2 * round_up),
+ random(0) {
+ freelist.header.size = 0;
+ freelist.header.magic =
+ Magic(kMagicUnallocated, &freelist.header);
+ freelist.header.arena = this;
+ freelist.levels = 0;
+ memset(freelist.next, 0, sizeof(freelist.next));
+}
+
+// L < meta_data_arena->mu
+LowLevelAlloc::Arena *LowLevelAlloc::NewArena(int32_t flags) {
+ Arena *meta_data_arena = DefaultArena();
+#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
+ if ((flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
+ meta_data_arena = UnhookedAsyncSigSafeArena();
+ } else // NOLINT(readability/braces)
+#endif
+ if ((flags & LowLevelAlloc::kCallMallocHook) == 0) {
+ meta_data_arena = UnhookedArena();
+ }
+ Arena *result =
+ new (AllocWithArena(sizeof (*result), meta_data_arena)) Arena(flags);
+ return result;
+}
+
+// L < arena->mu, L < arena->arena->mu
+bool LowLevelAlloc::DeleteArena(Arena *arena) {
+ ABSL_RAW_CHECK(
+ arena != nullptr && arena != DefaultArena() && arena != UnhookedArena(),
+ "may not delete default arena");
+ ArenaLock section(arena);
+ if (arena->allocation_count != 0) {
+ section.Leave();
+ return false;
+ }
+ while (arena->freelist.next[0] != nullptr) {
+ AllocList *region = arena->freelist.next[0];
+ size_t size = region->header.size;
+ arena->freelist.next[0] = region->next[0];
+ ABSL_RAW_CHECK(
+ region->header.magic == Magic(kMagicUnallocated, &region->header),
+ "bad magic number in DeleteArena()");
+ ABSL_RAW_CHECK(region->header.arena == arena,
+ "bad arena pointer in DeleteArena()");
+ ABSL_RAW_CHECK(size % arena->pagesize == 0,
+ "empty arena has non-page-aligned block size");
+ ABSL_RAW_CHECK(reinterpret_cast<uintptr_t>(region) % arena->pagesize == 0,
+ "empty arena has non-page-aligned block");
+ int munmap_result;
+#ifdef _WIN32
+ munmap_result = VirtualFree(region, 0, MEM_RELEASE);
+ ABSL_RAW_CHECK(munmap_result != 0,
+ "LowLevelAlloc::DeleteArena: VitualFree failed");
+#else
+#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
+ if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) == 0) {
+ munmap_result = munmap(region, size);
+ } else {
+ munmap_result = base_internal::DirectMunmap(region, size);
+ }
+#else
+ munmap_result = munmap(region, size);
+#endif // ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
+ if (munmap_result != 0) {
+ ABSL_RAW_LOG(FATAL, "LowLevelAlloc::DeleteArena: munmap failed: %d",
+ errno);
+ }
+#endif // _WIN32
+ }
+ section.Leave();
+ arena->~Arena();
+ Free(arena);
+ return true;
+}
+
+// ---------------------------------------------------------------------------
+
+// Addition, checking for overflow. The intent is to die if an external client
+// manages to push through a request that would cause arithmetic to fail.
+static inline uintptr_t CheckedAdd(uintptr_t a, uintptr_t b) {
+ uintptr_t sum = a + b;
+ ABSL_RAW_CHECK(sum >= a, "LowLevelAlloc arithmetic overflow");
+ return sum;
+}
+
+// Return value rounded up to next multiple of align.
+// align must be a power of two.
+static inline uintptr_t RoundUp(uintptr_t addr, uintptr_t align) {
+ return CheckedAdd(addr, align - 1) & ~(align - 1);
+}
+
+// Equivalent to "return prev->next[i]" but with sanity checking
+// that the freelist is in the correct order, that it
+// consists of regions marked "unallocated", and that no two regions
+// are adjacent in memory (they should have been coalesced).
+// L >= arena->mu
+static AllocList *Next(int i, AllocList *prev, LowLevelAlloc::Arena *arena) {
+ ABSL_RAW_CHECK(i < prev->levels, "too few levels in Next()");
+ AllocList *next = prev->next[i];
+ if (next != nullptr) {
+ ABSL_RAW_CHECK(
+ next->header.magic == Magic(kMagicUnallocated, &next->header),
+ "bad magic number in Next()");
+ ABSL_RAW_CHECK(next->header.arena == arena, "bad arena pointer in Next()");
+ if (prev != &arena->freelist) {
+ ABSL_RAW_CHECK(prev < next, "unordered freelist");
+ ABSL_RAW_CHECK(reinterpret_cast<char *>(prev) + prev->header.size <
+ reinterpret_cast<char *>(next),
+ "malformed freelist");
+ }
+ }
+ return next;
+}
+
+// Coalesce list item "a" with its successor if they are adjacent.
+static void Coalesce(AllocList *a) {
+ AllocList *n = a->next[0];
+ if (n != nullptr && reinterpret_cast<char *>(a) + a->header.size ==
+ reinterpret_cast<char *>(n)) {
+ LowLevelAlloc::Arena *arena = a->header.arena;
+ a->header.size += n->header.size;
+ n->header.magic = 0;
+ n->header.arena = nullptr;
+ AllocList *prev[kMaxLevel];
+ LLA_SkiplistDelete(&arena->freelist, n, prev);
+ LLA_SkiplistDelete(&arena->freelist, a, prev);
+ a->levels = LLA_SkiplistLevels(a->header.size, arena->min_size,
+ &arena->random);
+ LLA_SkiplistInsert(&arena->freelist, a, prev);
+ }
+}
+
+// Adds block at location "v" to the free list
+// L >= arena->mu
+static void AddToFreelist(void *v, LowLevelAlloc::Arena *arena) {
+ AllocList *f = reinterpret_cast<AllocList *>(
+ reinterpret_cast<char *>(v) - sizeof (f->header));
+ ABSL_RAW_CHECK(f->header.magic == Magic(kMagicAllocated, &f->header),
+ "bad magic number in AddToFreelist()");
+ ABSL_RAW_CHECK(f->header.arena == arena,
+ "bad arena pointer in AddToFreelist()");
+ f->levels = LLA_SkiplistLevels(f->header.size, arena->min_size,
+ &arena->random);
+ AllocList *prev[kMaxLevel];
+ LLA_SkiplistInsert(&arena->freelist, f, prev);
+ f->header.magic = Magic(kMagicUnallocated, &f->header);
+ Coalesce(f); // maybe coalesce with successor
+ Coalesce(prev[0]); // maybe coalesce with predecessor
+}
+
+// Frees storage allocated by LowLevelAlloc::Alloc().
+// L < arena->mu
+void LowLevelAlloc::Free(void *v) {
+ if (v != nullptr) {
+ AllocList *f = reinterpret_cast<AllocList *>(
+ reinterpret_cast<char *>(v) - sizeof (f->header));
+ LowLevelAlloc::Arena *arena = f->header.arena;
+ ArenaLock section(arena);
+ AddToFreelist(v, arena);
+ ABSL_RAW_CHECK(arena->allocation_count > 0, "nothing in arena to free");
+ arena->allocation_count--;
+ section.Leave();
+ }
+}
+
+// allocates and returns a block of size bytes, to be freed with Free()
+// L < arena->mu
+static void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) {
+ void *result = nullptr;
+ if (request != 0) {
+ AllocList *s; // will point to region that satisfies request
+ ArenaLock section(arena);
+ // round up with header
+ size_t req_rnd = RoundUp(CheckedAdd(request, sizeof (s->header)),
+ arena->round_up);
+ for (;;) { // loop until we find a suitable region
+ // find the minimum levels that a block of this size must have
+ int i = LLA_SkiplistLevels(req_rnd, arena->min_size, nullptr) - 1;
+ if (i < arena->freelist.levels) { // potential blocks exist
+ AllocList *before = &arena->freelist; // predecessor of s
+ while ((s = Next(i, before, arena)) != nullptr &&
+ s->header.size < req_rnd) {
+ before = s;
+ }
+ if (s != nullptr) { // we found a region
+ break;
+ }
+ }
+ // we unlock before mmap() both because mmap() may call a callback hook,
+ // and because it may be slow.
+ arena->mu.Unlock();
+ // mmap generous 64K chunks to decrease
+ // the chances/impact of fragmentation:
+ size_t new_pages_size = RoundUp(req_rnd, arena->pagesize * 16);
+ void *new_pages;
+#ifdef _WIN32
+ new_pages = VirtualAlloc(0, new_pages_size,
+ MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
+ ABSL_RAW_CHECK(new_pages != nullptr, "VirtualAlloc failed");
+#else
+#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
+ if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
+ new_pages = base_internal::DirectMmap(nullptr, new_pages_size,
+ PROT_WRITE|PROT_READ, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
+ } else {
+ new_pages = mmap(nullptr, new_pages_size, PROT_WRITE | PROT_READ,
+ MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+ }
+#else
+ new_pages = mmap(nullptr, new_pages_size, PROT_WRITE | PROT_READ,
+ MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+#endif // ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
+ if (new_pages == MAP_FAILED) {
+ ABSL_RAW_LOG(FATAL, "mmap error: %d", errno);
+ }
+
+#endif // _WIN32
+ arena->mu.Lock();
+ s = reinterpret_cast<AllocList *>(new_pages);
+ s->header.size = new_pages_size;
+ // Pretend the block is allocated; call AddToFreelist() to free it.
+ s->header.magic = Magic(kMagicAllocated, &s->header);
+ s->header.arena = arena;
+ AddToFreelist(&s->levels, arena); // insert new region into free list
+ }
+ AllocList *prev[kMaxLevel];
+ LLA_SkiplistDelete(&arena->freelist, s, prev); // remove from free list
+ // s points to the first free region that's big enough
+ if (CheckedAdd(req_rnd, arena->min_size) <= s->header.size) {
+ // big enough to split
+ AllocList *n = reinterpret_cast<AllocList *>
+ (req_rnd + reinterpret_cast<char *>(s));
+ n->header.size = s->header.size - req_rnd;
+ n->header.magic = Magic(kMagicAllocated, &n->header);
+ n->header.arena = arena;
+ s->header.size = req_rnd;
+ AddToFreelist(&n->levels, arena);
+ }
+ s->header.magic = Magic(kMagicAllocated, &s->header);
+ ABSL_RAW_CHECK(s->header.arena == arena, "");
+ arena->allocation_count++;
+ section.Leave();
+ result = &s->levels;
+ }
+ ABSL_ANNOTATE_MEMORY_IS_UNINITIALIZED(result, request);
+ return result;
+}
+
+void *LowLevelAlloc::Alloc(size_t request) {
+ void *result = DoAllocWithArena(request, DefaultArena());
+ return result;
+}
+
+void *LowLevelAlloc::AllocWithArena(size_t request, Arena *arena) {
+ ABSL_RAW_CHECK(arena != nullptr, "must pass a valid arena");
+ void *result = DoAllocWithArena(request, arena);
+ return result;
+}
+
+} // namespace base_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_LOW_LEVEL_ALLOC_MISSING
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/low_level_alloc.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/low_level_alloc.h
new file mode 100644
index 00000000000..fa109d53ec9
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/low_level_alloc.h
@@ -0,0 +1,126 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef ABSL_BASE_INTERNAL_LOW_LEVEL_ALLOC_H_
+#define ABSL_BASE_INTERNAL_LOW_LEVEL_ALLOC_H_
+
+// A simple thread-safe memory allocator that does not depend on
+// mutexes or thread-specific data. It is intended to be used
+// sparingly, and only when malloc() would introduce an unwanted
+// dependency, such as inside the heap-checker, or the Mutex
+// implementation.
+
+// IWYU pragma: private, include "base/low_level_alloc.h"
+
+#include <sys/types.h>
+
+#include <cstdint>
+
+#include "y_absl/base/attributes.h"
+#include "y_absl/base/config.h"
+
+// LowLevelAlloc requires that the platform support low-level
+// allocation of virtual memory. Platforms lacking this cannot use
+// LowLevelAlloc.
+#ifdef ABSL_LOW_LEVEL_ALLOC_MISSING
+#error ABSL_LOW_LEVEL_ALLOC_MISSING cannot be directly set
+#elif !defined(ABSL_HAVE_MMAP) && !defined(_WIN32)
+#define ABSL_LOW_LEVEL_ALLOC_MISSING 1
+#endif
+
+// Using LowLevelAlloc with kAsyncSignalSafe isn't supported on Windows or
+// asm.js / WebAssembly.
+// See https://kripken.github.io/emscripten-site/docs/porting/pthreads.html
+// for more information.
+#ifdef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
+#error ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING cannot be directly set
+#elif defined(_WIN32) || defined(__asmjs__) || defined(__wasm__)
+#define ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING 1
+#endif
+
+#include <cstddef>
+
+#include "y_absl/base/port.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+class LowLevelAlloc {
+ public:
+ struct Arena; // an arena from which memory may be allocated
+
+ // Returns a pointer to a block of at least "request" bytes
+ // that have been newly allocated from the specific arena.
+ // for Alloc() call the DefaultArena() is used.
+ // Returns 0 if passed request==0.
+ // Does not return 0 under other circumstances; it crashes if memory
+ // is not available.
+ static void *Alloc(size_t request) ABSL_ATTRIBUTE_SECTION(malloc_hook);
+ static void *AllocWithArena(size_t request, Arena *arena)
+ ABSL_ATTRIBUTE_SECTION(malloc_hook);
+
+ // Deallocates a region of memory that was previously allocated with
+ // Alloc(). Does nothing if passed 0. "s" must be either 0,
+ // or must have been returned from a call to Alloc() and not yet passed to
+ // Free() since that call to Alloc(). The space is returned to the arena
+ // from which it was allocated.
+ static void Free(void *s) ABSL_ATTRIBUTE_SECTION(malloc_hook);
+
+ // ABSL_ATTRIBUTE_SECTION(malloc_hook) for Alloc* and Free
+ // are to put all callers of MallocHook::Invoke* in this module
+ // into special section,
+ // so that MallocHook::GetCallerStackTrace can function accurately.
+
+ // Create a new arena.
+ // The root metadata for the new arena is allocated in the
+ // meta_data_arena; the DefaultArena() can be passed for meta_data_arena.
+ // These values may be ored into flags:
+ enum {
+ // Report calls to Alloc() and Free() via the MallocHook interface.
+ // Set in the DefaultArena.
+ kCallMallocHook = 0x0001,
+
+#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
+ // Make calls to Alloc(), Free() be async-signal-safe. Not set in
+ // DefaultArena(). Not supported on all platforms.
+ kAsyncSignalSafe = 0x0002,
+#endif
+ };
+ // Construct a new arena. The allocation of the underlying metadata honors
+ // the provided flags. For example, the call NewArena(kAsyncSignalSafe)
+ // is itself async-signal-safe, as well as generatating an arena that provides
+ // async-signal-safe Alloc/Free.
+ static Arena *NewArena(int32_t flags);
+
+ // Destroys an arena allocated by NewArena and returns true,
+ // provided no allocated blocks remain in the arena.
+ // If allocated blocks remain in the arena, does nothing and
+ // returns false.
+ // It is illegal to attempt to destroy the DefaultArena().
+ static bool DeleteArena(Arena *arena);
+
+ // The default arena that always exists.
+ static Arena *DefaultArena();
+
+ private:
+ LowLevelAlloc(); // no instances
+};
+
+} // namespace base_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_BASE_INTERNAL_LOW_LEVEL_ALLOC_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/low_level_alloc/ya.make b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/low_level_alloc/ya.make
new file mode 100644
index 00000000000..fabba746775
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/low_level_alloc/ya.make
@@ -0,0 +1,33 @@
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
+WITHOUT_LICENSE_TEXTS()
+
+OWNER(
+ somov
+ g:cpp-contrib
+)
+
+LICENSE(Apache-2.0)
+
+PEERDIR(
+ contrib/restricted/abseil-cpp-tstring/y_absl/base
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_wait
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/log_severity
+)
+
+ADDINCL(
+ GLOBAL contrib/restricted/abseil-cpp-tstring
+)
+
+NO_COMPILER_WARNINGS()
+
+SRCDIR(contrib/restricted/abseil-cpp-tstring/y_absl/base/internal)
+
+SRCS(
+ low_level_alloc.cc
+)
+
+END()
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/low_level_scheduling.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/low_level_scheduling.h
new file mode 100644
index 00000000000..61eb4ac6434
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/low_level_scheduling.h
@@ -0,0 +1,134 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Core interfaces and definitions used by by low-level interfaces such as
+// SpinLock.
+
+#ifndef ABSL_BASE_INTERNAL_LOW_LEVEL_SCHEDULING_H_
+#define ABSL_BASE_INTERNAL_LOW_LEVEL_SCHEDULING_H_
+
+#include "y_absl/base/internal/raw_logging.h"
+#include "y_absl/base/internal/scheduling_mode.h"
+#include "y_absl/base/macros.h"
+
+// The following two declarations exist so SchedulingGuard may friend them with
+// the appropriate language linkage. These callbacks allow libc internals, such
+// as function level statics, to schedule cooperatively when locking.
+extern "C" bool __google_disable_rescheduling(void);
+extern "C" void __google_enable_rescheduling(bool disable_result);
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+class CondVar;
+class Mutex;
+
+namespace synchronization_internal {
+int MutexDelay(int32_t c, int mode);
+} // namespace synchronization_internal
+
+namespace base_internal {
+
+class SchedulingHelper; // To allow use of SchedulingGuard.
+class SpinLock; // To allow use of SchedulingGuard.
+
+// SchedulingGuard
+// Provides guard semantics that may be used to disable cooperative rescheduling
+// of the calling thread within specific program blocks. This is used to
+// protect resources (e.g. low-level SpinLocks or Domain code) that cooperative
+// scheduling depends on.
+//
+// Domain implementations capable of rescheduling in reaction to involuntary
+// kernel thread actions (e.g blocking due to a pagefault or syscall) must
+// guarantee that an annotated thread is not allowed to (cooperatively)
+// reschedule until the annotated region is complete.
+//
+// It is an error to attempt to use a cooperatively scheduled resource (e.g.
+// Mutex) within a rescheduling-disabled region.
+//
+// All methods are async-signal safe.
+class SchedulingGuard {
+ public:
+ // Returns true iff the calling thread may be cooperatively rescheduled.
+ static bool ReschedulingIsAllowed();
+ SchedulingGuard(const SchedulingGuard&) = delete;
+ SchedulingGuard& operator=(const SchedulingGuard&) = delete;
+
+ private:
+ // Disable cooperative rescheduling of the calling thread. It may still
+ // initiate scheduling operations (e.g. wake-ups), however, it may not itself
+ // reschedule. Nestable. The returned result is opaque, clients should not
+ // attempt to interpret it.
+ // REQUIRES: Result must be passed to a pairing EnableScheduling().
+ static bool DisableRescheduling();
+
+ // Marks the end of a rescheduling disabled region, previously started by
+ // DisableRescheduling().
+ // REQUIRES: Pairs with innermost call (and result) of DisableRescheduling().
+ static void EnableRescheduling(bool disable_result);
+
+ // A scoped helper for {Disable, Enable}Rescheduling().
+ // REQUIRES: destructor must run in same thread as constructor.
+ struct ScopedDisable {
+ ScopedDisable() { disabled = SchedulingGuard::DisableRescheduling(); }
+ ~ScopedDisable() { SchedulingGuard::EnableRescheduling(disabled); }
+
+ bool disabled;
+ };
+
+ // A scoped helper to enable rescheduling temporarily.
+ // REQUIRES: destructor must run in same thread as constructor.
+ class ScopedEnable {
+ public:
+ ScopedEnable();
+ ~ScopedEnable();
+
+ private:
+ int scheduling_disabled_depth_;
+ };
+
+ // Access to SchedulingGuard is explicitly permitted.
+ friend class y_absl::CondVar;
+ friend class y_absl::Mutex;
+ friend class SchedulingHelper;
+ friend class SpinLock;
+ friend int y_absl::synchronization_internal::MutexDelay(int32_t c, int mode);
+};
+
+//------------------------------------------------------------------------------
+// End of public interfaces.
+//------------------------------------------------------------------------------
+
+inline bool SchedulingGuard::ReschedulingIsAllowed() {
+ return false;
+}
+
+inline bool SchedulingGuard::DisableRescheduling() {
+ return false;
+}
+
+inline void SchedulingGuard::EnableRescheduling(bool /* disable_result */) {
+ return;
+}
+
+inline SchedulingGuard::ScopedEnable::ScopedEnable()
+ : scheduling_disabled_depth_(0) {}
+inline SchedulingGuard::ScopedEnable::~ScopedEnable() {
+ ABSL_RAW_CHECK(scheduling_disabled_depth_ == 0, "disable unused warning");
+}
+
+} // namespace base_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_BASE_INTERNAL_LOW_LEVEL_SCHEDULING_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/per_thread_tls.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/per_thread_tls.h
new file mode 100644
index 00000000000..943f72a433d
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/per_thread_tls.h
@@ -0,0 +1,52 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_BASE_INTERNAL_PER_THREAD_TLS_H_
+#define ABSL_BASE_INTERNAL_PER_THREAD_TLS_H_
+
+// This header defines two macros:
+//
+// If the platform supports thread-local storage:
+//
+// * ABSL_PER_THREAD_TLS_KEYWORD is the C keyword needed to declare a
+// thread-local variable
+// * ABSL_PER_THREAD_TLS is 1
+//
+// Otherwise:
+//
+// * ABSL_PER_THREAD_TLS_KEYWORD is empty
+// * ABSL_PER_THREAD_TLS is 0
+//
+// Microsoft C supports thread-local storage.
+// GCC supports it if the appropriate version of glibc is available,
+// which the programmer can indicate by defining ABSL_HAVE_TLS
+
+#include "y_absl/base/port.h" // For ABSL_HAVE_TLS
+
+#if defined(ABSL_PER_THREAD_TLS)
+#error ABSL_PER_THREAD_TLS cannot be directly set
+#elif defined(ABSL_PER_THREAD_TLS_KEYWORD)
+#error ABSL_PER_THREAD_TLS_KEYWORD cannot be directly set
+#elif defined(ABSL_HAVE_TLS)
+#define ABSL_PER_THREAD_TLS_KEYWORD __thread
+#define ABSL_PER_THREAD_TLS 1
+#elif defined(_MSC_VER)
+#define ABSL_PER_THREAD_TLS_KEYWORD __declspec(thread)
+#define ABSL_PER_THREAD_TLS 1
+#else
+#define ABSL_PER_THREAD_TLS_KEYWORD
+#define ABSL_PER_THREAD_TLS 0
+#endif
+
+#endif // ABSL_BASE_INTERNAL_PER_THREAD_TLS_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/pretty_function.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/pretty_function.h
new file mode 100644
index 00000000000..35d51676dc2
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/pretty_function.h
@@ -0,0 +1,33 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_BASE_INTERNAL_PRETTY_FUNCTION_H_
+#define ABSL_BASE_INTERNAL_PRETTY_FUNCTION_H_
+
+// ABSL_PRETTY_FUNCTION
+//
+// In C++11, __func__ gives the undecorated name of the current function. That
+// is, "main", not "int main()". Various compilers give extra macros to get the
+// decorated function name, including return type and arguments, to
+// differentiate between overload sets. ABSL_PRETTY_FUNCTION is a portable
+// version of these macros which forwards to the correct macro on each compiler.
+#if defined(_MSC_VER)
+#define ABSL_PRETTY_FUNCTION __FUNCSIG__
+#elif defined(__GNUC__)
+#define ABSL_PRETTY_FUNCTION __PRETTY_FUNCTION__
+#else
+#error "Unsupported compiler"
+#endif
+
+#endif // ABSL_BASE_INTERNAL_PRETTY_FUNCTION_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging.cc
new file mode 100644
index 00000000000..ea9a48c2c06
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging.cc
@@ -0,0 +1,242 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "y_absl/base/internal/raw_logging.h"
+
+#include <stddef.h>
+#include <cstdarg>
+#include <cstdio>
+#include <cstdlib>
+#include <cstring>
+
+#include "y_absl/base/attributes.h"
+#include "y_absl/base/config.h"
+#include "y_absl/base/internal/atomic_hook.h"
+#include "y_absl/base/log_severity.h"
+
+// We know how to perform low-level writes to stderr in POSIX and Windows. For
+// these platforms, we define the token ABSL_LOW_LEVEL_WRITE_SUPPORTED.
+// Much of raw_logging.cc becomes a no-op when we can't output messages,
+// although a FATAL ABSL_RAW_LOG message will still abort the process.
+
+// ABSL_HAVE_POSIX_WRITE is defined when the platform provides posix write()
+// (as from unistd.h)
+//
+// This preprocessor token is also defined in raw_io.cc. If you need to copy
+// this, consider moving both to config.h instead.
+#if defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \
+ defined(__Fuchsia__) || defined(__native_client__) || \
+ defined(__EMSCRIPTEN__) || defined(__ASYLO__)
+
+#include <unistd.h>
+
+#define ABSL_HAVE_POSIX_WRITE 1
+#define ABSL_LOW_LEVEL_WRITE_SUPPORTED 1
+#else
+#undef ABSL_HAVE_POSIX_WRITE
+#endif
+
+// ABSL_HAVE_SYSCALL_WRITE is defined when the platform provides the syscall
+// syscall(SYS_write, /*int*/ fd, /*char* */ buf, /*size_t*/ len);
+// for low level operations that want to avoid libc.
+#if (defined(__linux__) || defined(__FreeBSD__)) && !defined(__ANDROID__)
+#include <sys/syscall.h>
+#define ABSL_HAVE_SYSCALL_WRITE 1
+#define ABSL_LOW_LEVEL_WRITE_SUPPORTED 1
+#else
+#undef ABSL_HAVE_SYSCALL_WRITE
+#endif
+
+#ifdef _WIN32
+#include <io.h>
+
+#define ABSL_HAVE_RAW_IO 1
+#define ABSL_LOW_LEVEL_WRITE_SUPPORTED 1
+#else
+#undef ABSL_HAVE_RAW_IO
+#endif
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace raw_logging_internal {
+namespace {
+
+// TODO(gfalcon): We want raw-logging to work on as many platforms as possible.
+// Explicitly `#error` out when not `ABSL_LOW_LEVEL_WRITE_SUPPORTED`, except for
+// a selected set of platforms for which we expect not to be able to raw log.
+
+ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
+ y_absl::base_internal::AtomicHook<LogPrefixHook>
+ log_prefix_hook;
+ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
+ y_absl::base_internal::AtomicHook<AbortHook>
+ abort_hook;
+
+#ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED
+constexpr char kTruncated[] = " ... (message truncated)\n";
+
+// sprintf the format to the buffer, adjusting *buf and *size to reflect the
+// consumed bytes, and return whether the message fit without truncation. If
+// truncation occurred, if possible leave room in the buffer for the message
+// kTruncated[].
+bool VADoRawLog(char** buf, int* size, const char* format, va_list ap)
+ ABSL_PRINTF_ATTRIBUTE(3, 0);
+bool VADoRawLog(char** buf, int* size, const char* format, va_list ap) {
+ int n = vsnprintf(*buf, *size, format, ap);
+ bool result = true;
+ if (n < 0 || n > *size) {
+ result = false;
+ if (static_cast<size_t>(*size) > sizeof(kTruncated)) {
+ n = *size - sizeof(kTruncated); // room for truncation message
+ } else {
+ n = 0; // no room for truncation message
+ }
+ }
+ *size -= n;
+ *buf += n;
+ return result;
+}
+#endif // ABSL_LOW_LEVEL_WRITE_SUPPORTED
+
+constexpr int kLogBufSize = 3000;
+
+// CAVEAT: vsnprintf called from *DoRawLog below has some (exotic) code paths
+// that invoke malloc() and getenv() that might acquire some locks.
+
+// Helper for RawLog below.
+// *DoRawLog writes to *buf of *size and move them past the written portion.
+// It returns true iff there was no overflow or error.
+bool DoRawLog(char** buf, int* size, const char* format, ...)
+ ABSL_PRINTF_ATTRIBUTE(3, 4);
+bool DoRawLog(char** buf, int* size, const char* format, ...) {
+ va_list ap;
+ va_start(ap, format);
+ int n = vsnprintf(*buf, *size, format, ap);
+ va_end(ap);
+ if (n < 0 || n > *size) return false;
+ *size -= n;
+ *buf += n;
+ return true;
+}
+
+void RawLogVA(y_absl::LogSeverity severity, const char* file, int line,
+ const char* format, va_list ap) ABSL_PRINTF_ATTRIBUTE(4, 0);
+void RawLogVA(y_absl::LogSeverity severity, const char* file, int line,
+ const char* format, va_list ap) {
+ char buffer[kLogBufSize];
+ char* buf = buffer;
+ int size = sizeof(buffer);
+#ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED
+ bool enabled = true;
+#else
+ bool enabled = false;
+#endif
+
+#ifdef ABSL_MIN_LOG_LEVEL
+ if (severity < static_cast<y_absl::LogSeverity>(ABSL_MIN_LOG_LEVEL) &&
+ severity < y_absl::LogSeverity::kFatal) {
+ enabled = false;
+ }
+#endif
+
+ auto log_prefix_hook_ptr = log_prefix_hook.Load();
+ if (log_prefix_hook_ptr) {
+ enabled = log_prefix_hook_ptr(severity, file, line, &buf, &size);
+ } else {
+ if (enabled) {
+ DoRawLog(&buf, &size, "[%s : %d] RAW: ", file, line);
+ }
+ }
+ const char* const prefix_end = buf;
+
+#ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED
+ if (enabled) {
+ bool no_chop = VADoRawLog(&buf, &size, format, ap);
+ if (no_chop) {
+ DoRawLog(&buf, &size, "\n");
+ } else {
+ DoRawLog(&buf, &size, "%s", kTruncated);
+ }
+ SafeWriteToStderr(buffer, strlen(buffer));
+ }
+#else
+ static_cast<void>(format);
+ static_cast<void>(ap);
+#endif
+
+ // Abort the process after logging a FATAL message, even if the output itself
+ // was suppressed.
+ if (severity == y_absl::LogSeverity::kFatal) {
+ abort_hook(file, line, buffer, prefix_end, buffer + kLogBufSize);
+ abort();
+ }
+}
+
+// Non-formatting version of RawLog().
+//
+// TODO(gfalcon): When string_view no longer depends on base, change this
+// interface to take its message as a string_view instead.
+void DefaultInternalLog(y_absl::LogSeverity severity, const char* file, int line,
+ const TString& message) {
+ RawLog(severity, file, line, "%.*s", static_cast<int>(message.size()),
+ message.data());
+}
+
+} // namespace
+
+void SafeWriteToStderr(const char *s, size_t len) {
+#if defined(ABSL_HAVE_SYSCALL_WRITE)
+ syscall(SYS_write, STDERR_FILENO, s, len);
+#elif defined(ABSL_HAVE_POSIX_WRITE)
+ write(STDERR_FILENO, s, len);
+#elif defined(ABSL_HAVE_RAW_IO)
+ _write(/* stderr */ 2, s, len);
+#else
+ // stderr logging unsupported on this platform
+ (void) s;
+ (void) len;
+#endif
+}
+
+void RawLog(y_absl::LogSeverity severity, const char* file, int line,
+ const char* format, ...) {
+ va_list ap;
+ va_start(ap, format);
+ RawLogVA(severity, file, line, format, ap);
+ va_end(ap);
+}
+
+bool RawLoggingFullySupported() {
+#ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED
+ return true;
+#else // !ABSL_LOW_LEVEL_WRITE_SUPPORTED
+ return false;
+#endif // !ABSL_LOW_LEVEL_WRITE_SUPPORTED
+}
+
+ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES ABSL_DLL
+ y_absl::base_internal::AtomicHook<InternalLogFunction>
+ internal_log_function(DefaultInternalLog);
+
+void RegisterLogPrefixHook(LogPrefixHook func) { log_prefix_hook.Store(func); }
+
+void RegisterAbortHook(AbortHook func) { abort_hook.Store(func); }
+
+void RegisterInternalLogFunction(InternalLogFunction func) {
+ internal_log_function.Store(func);
+}
+
+} // namespace raw_logging_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging.h
new file mode 100644
index 00000000000..4d5c77003f8
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging.h
@@ -0,0 +1,195 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Thread-safe logging routines that do not allocate any memory or
+// acquire any locks, and can therefore be used by low-level memory
+// allocation, synchronization, and signal-handling code.
+
+#ifndef ABSL_BASE_INTERNAL_RAW_LOGGING_H_
+#define ABSL_BASE_INTERNAL_RAW_LOGGING_H_
+
+#include <util/generic/string.h>
+
+#include "y_absl/base/attributes.h"
+#include "y_absl/base/config.h"
+#include "y_absl/base/internal/atomic_hook.h"
+#include "y_absl/base/log_severity.h"
+#include "y_absl/base/macros.h"
+#include "y_absl/base/optimization.h"
+#include "y_absl/base/port.h"
+
+// This is similar to LOG(severity) << format..., but
+// * it is to be used ONLY by low-level modules that can't use normal LOG()
+// * it is designed to be a low-level logger that does not allocate any
+// memory and does not need any locks, hence:
+// * it logs straight and ONLY to STDERR w/o buffering
+// * it uses an explicit printf-format and arguments list
+// * it will silently chop off really long message strings
+// Usage example:
+// ABSL_RAW_LOG(ERROR, "Failed foo with %i: %s", status, error);
+// This will print an almost standard log line like this to stderr only:
+// E0821 211317 file.cc:123] RAW: Failed foo with 22: bad_file
+
+#define ABSL_RAW_LOG(severity, ...) \
+ do { \
+ constexpr const char* absl_raw_logging_internal_basename = \
+ ::y_absl::raw_logging_internal::Basename(__FILE__, \
+ sizeof(__FILE__) - 1); \
+ ::y_absl::raw_logging_internal::RawLog(ABSL_RAW_LOGGING_INTERNAL_##severity, \
+ absl_raw_logging_internal_basename, \
+ __LINE__, __VA_ARGS__); \
+ } while (0)
+
+// Similar to CHECK(condition) << message, but for low-level modules:
+// we use only ABSL_RAW_LOG that does not allocate memory.
+// We do not want to provide args list here to encourage this usage:
+// if (!cond) ABSL_RAW_LOG(FATAL, "foo ...", hard_to_compute_args);
+// so that the args are not computed when not needed.
+#define ABSL_RAW_CHECK(condition, message) \
+ do { \
+ if (ABSL_PREDICT_FALSE(!(condition))) { \
+ ABSL_RAW_LOG(FATAL, "Check %s failed: %s", #condition, message); \
+ } \
+ } while (0)
+
+// ABSL_INTERNAL_LOG and ABSL_INTERNAL_CHECK work like the RAW variants above,
+// except that if the richer log library is linked into the binary, we dispatch
+// to that instead. This is potentially useful for internal logging and
+// assertions, where we are using RAW_LOG neither for its async-signal-safety
+// nor for its non-allocating nature, but rather because raw logging has very
+// few other dependencies.
+//
+// The API is a subset of the above: each macro only takes two arguments. Use
+// StrCat if you need to build a richer message.
+#define ABSL_INTERNAL_LOG(severity, message) \
+ do { \
+ constexpr const char* absl_raw_logging_internal_filename = __FILE__; \
+ ::y_absl::raw_logging_internal::internal_log_function( \
+ ABSL_RAW_LOGGING_INTERNAL_##severity, \
+ absl_raw_logging_internal_filename, __LINE__, message); \
+ if (ABSL_RAW_LOGGING_INTERNAL_##severity == ::y_absl::LogSeverity::kFatal) \
+ ABSL_INTERNAL_UNREACHABLE; \
+ } while (0)
+
+#define ABSL_INTERNAL_CHECK(condition, message) \
+ do { \
+ if (ABSL_PREDICT_FALSE(!(condition))) { \
+ TString death_message = "Check " #condition " failed: "; \
+ death_message += TString(message); \
+ ABSL_INTERNAL_LOG(FATAL, death_message); \
+ } \
+ } while (0)
+
+#define ABSL_RAW_LOGGING_INTERNAL_INFO ::y_absl::LogSeverity::kInfo
+#define ABSL_RAW_LOGGING_INTERNAL_WARNING ::y_absl::LogSeverity::kWarning
+#define ABSL_RAW_LOGGING_INTERNAL_ERROR ::y_absl::LogSeverity::kError
+#define ABSL_RAW_LOGGING_INTERNAL_FATAL ::y_absl::LogSeverity::kFatal
+#define ABSL_RAW_LOGGING_INTERNAL_LEVEL(severity) \
+ ::y_absl::NormalizeLogSeverity(severity)
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace raw_logging_internal {
+
+// Helper function to implement ABSL_RAW_LOG
+// Logs format... at "severity" level, reporting it
+// as called from file:line.
+// This does not allocate memory or acquire locks.
+void RawLog(y_absl::LogSeverity severity, const char* file, int line,
+ const char* format, ...) ABSL_PRINTF_ATTRIBUTE(4, 5);
+
+// Writes the provided buffer directly to stderr, in a safe, low-level manner.
+//
+// In POSIX this means calling write(), which is async-signal safe and does
+// not malloc. If the platform supports the SYS_write syscall, we invoke that
+// directly to side-step any libc interception.
+void SafeWriteToStderr(const char *s, size_t len);
+
+// compile-time function to get the "base" filename, that is, the part of
+// a filename after the last "/" or "\" path separator. The search starts at
+// the end of the string; the second parameter is the length of the string.
+constexpr const char* Basename(const char* fname, int offset) {
+ return offset == 0 || fname[offset - 1] == '/' || fname[offset - 1] == '\\'
+ ? fname + offset
+ : Basename(fname, offset - 1);
+}
+
+// For testing only.
+// Returns true if raw logging is fully supported. When it is not
+// fully supported, no messages will be emitted, but a log at FATAL
+// severity will cause an abort.
+//
+// TODO(gfalcon): Come up with a better name for this method.
+bool RawLoggingFullySupported();
+
+// Function type for a raw_logging customization hook for suppressing messages
+// by severity, and for writing custom prefixes on non-suppressed messages.
+//
+// The installed hook is called for every raw log invocation. The message will
+// be logged to stderr only if the hook returns true. FATAL errors will cause
+// the process to abort, even if writing to stderr is suppressed. The hook is
+// also provided with an output buffer, where it can write a custom log message
+// prefix.
+//
+// The raw_logging system does not allocate memory or grab locks. User-provided
+// hooks must avoid these operations, and must not throw exceptions.
+//
+// 'severity' is the severity level of the message being written.
+// 'file' and 'line' are the file and line number where the ABSL_RAW_LOG macro
+// was located.
+// 'buffer' and 'buf_size' are pointers to the buffer and buffer size. If the
+// hook writes a prefix, it must increment *buffer and decrement *buf_size
+// accordingly.
+using LogPrefixHook = bool (*)(y_absl::LogSeverity severity, const char* file,
+ int line, char** buffer, int* buf_size);
+
+// Function type for a raw_logging customization hook called to abort a process
+// when a FATAL message is logged. If the provided AbortHook() returns, the
+// logging system will call abort().
+//
+// 'file' and 'line' are the file and line number where the ABSL_RAW_LOG macro
+// was located.
+// The NUL-terminated logged message lives in the buffer between 'buf_start'
+// and 'buf_end'. 'prefix_end' points to the first non-prefix character of the
+// buffer (as written by the LogPrefixHook.)
+using AbortHook = void (*)(const char* file, int line, const char* buf_start,
+ const char* prefix_end, const char* buf_end);
+
+// Internal logging function for ABSL_INTERNAL_LOG to dispatch to.
+//
+// TODO(gfalcon): When string_view no longer depends on base, change this
+// interface to take its message as a string_view instead.
+using InternalLogFunction = void (*)(y_absl::LogSeverity severity,
+ const char* file, int line,
+ const TString& message);
+
+ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES ABSL_DLL extern base_internal::AtomicHook<
+ InternalLogFunction>
+ internal_log_function;
+
+// Registers hooks of the above types. Only a single hook of each type may be
+// registered. It is an error to call these functions multiple times with
+// different input arguments.
+//
+// These functions are safe to call at any point during initialization; they do
+// not block or malloc, and are async-signal safe.
+void RegisterLogPrefixHook(LogPrefixHook func);
+void RegisterAbortHook(AbortHook func);
+void RegisterInternalLogFunction(InternalLogFunction func);
+
+} // namespace raw_logging_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_BASE_INTERNAL_RAW_LOGGING_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging/ya.make b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging/ya.make
new file mode 100644
index 00000000000..e7cfe7d2169
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging/ya.make
@@ -0,0 +1,30 @@
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
+WITHOUT_LICENSE_TEXTS()
+
+OWNER(
+ somov
+ g:cpp-contrib
+)
+
+LICENSE(Apache-2.0)
+
+PEERDIR(
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/log_severity
+)
+
+ADDINCL(
+ GLOBAL contrib/restricted/abseil-cpp-tstring
+)
+
+NO_COMPILER_WARNINGS()
+
+SRCDIR(contrib/restricted/abseil-cpp-tstring/y_absl/base/internal)
+
+SRCS(
+ raw_logging.cc
+)
+
+END()
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/scheduling_mode.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/scheduling_mode.h
new file mode 100644
index 00000000000..0165e7233b8
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/scheduling_mode.h
@@ -0,0 +1,58 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Core interfaces and definitions used by by low-level interfaces such as
+// SpinLock.
+
+#ifndef ABSL_BASE_INTERNAL_SCHEDULING_MODE_H_
+#define ABSL_BASE_INTERNAL_SCHEDULING_MODE_H_
+
+#include "y_absl/base/config.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+// Used to describe how a thread may be scheduled. Typically associated with
+// the declaration of a resource supporting synchronized access.
+//
+// SCHEDULE_COOPERATIVE_AND_KERNEL:
+// Specifies that when waiting, a cooperative thread (e.g. a Fiber) may
+// reschedule (using base::scheduling semantics); allowing other cooperative
+// threads to proceed.
+//
+// SCHEDULE_KERNEL_ONLY: (Also described as "non-cooperative")
+// Specifies that no cooperative scheduling semantics may be used, even if the
+// current thread is itself cooperatively scheduled. This means that
+// cooperative threads will NOT allow other cooperative threads to execute in
+// their place while waiting for a resource of this type. Host operating system
+// semantics (e.g. a futex) may still be used.
+//
+// When optional, clients should strongly prefer SCHEDULE_COOPERATIVE_AND_KERNEL
+// by default. SCHEDULE_KERNEL_ONLY should only be used for resources on which
+// base::scheduling (e.g. the implementation of a Scheduler) may depend.
+//
+// NOTE: Cooperative resources may not be nested below non-cooperative ones.
+// This means that it is invalid to to acquire a SCHEDULE_COOPERATIVE_AND_KERNEL
+// resource if a SCHEDULE_KERNEL_ONLY resource is already held.
+enum SchedulingMode {
+ SCHEDULE_KERNEL_ONLY = 0, // Allow scheduling only the host OS.
+ SCHEDULE_COOPERATIVE_AND_KERNEL, // Also allow cooperative scheduling.
+};
+
+} // namespace base_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_BASE_INTERNAL_SCHEDULING_MODE_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/scoped_set_env.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/scoped_set_env.cc
new file mode 100644
index 00000000000..e489272b45b
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/scoped_set_env.cc
@@ -0,0 +1,81 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "y_absl/base/internal/scoped_set_env.h"
+
+#ifdef _WIN32
+#include <windows.h>
+#endif
+
+#include <cstdlib>
+
+#include "y_absl/base/internal/raw_logging.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+namespace {
+
+#ifdef _WIN32
+const int kMaxEnvVarValueSize = 1024;
+#endif
+
+void SetEnvVar(const char* name, const char* value) {
+#ifdef _WIN32
+ SetEnvironmentVariableA(name, value);
+#else
+ if (value == nullptr) {
+ ::unsetenv(name);
+ } else {
+ ::setenv(name, value, 1);
+ }
+#endif
+}
+
+} // namespace
+
+ScopedSetEnv::ScopedSetEnv(const char* var_name, const char* new_value)
+ : var_name_(var_name), was_unset_(false) {
+#ifdef _WIN32
+ char buf[kMaxEnvVarValueSize];
+ auto get_res = GetEnvironmentVariableA(var_name_.c_str(), buf, sizeof(buf));
+ ABSL_INTERNAL_CHECK(get_res < sizeof(buf), "value exceeds buffer size");
+
+ if (get_res == 0) {
+ was_unset_ = (GetLastError() == ERROR_ENVVAR_NOT_FOUND);
+ } else {
+ old_value_.assign(buf, get_res);
+ }
+
+ SetEnvironmentVariableA(var_name_.c_str(), new_value);
+#else
+ const char* val = ::getenv(var_name_.c_str());
+ if (val == nullptr) {
+ was_unset_ = true;
+ } else {
+ old_value_ = val;
+ }
+#endif
+
+ SetEnvVar(var_name_.c_str(), new_value);
+}
+
+ScopedSetEnv::~ScopedSetEnv() {
+ SetEnvVar(var_name_.c_str(), was_unset_ ? nullptr : old_value_.c_str());
+}
+
+} // namespace base_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/scoped_set_env.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/scoped_set_env.h
new file mode 100644
index 00000000000..5641562f1ba
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/scoped_set_env.h
@@ -0,0 +1,45 @@
+//
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef ABSL_BASE_INTERNAL_SCOPED_SET_ENV_H_
+#define ABSL_BASE_INTERNAL_SCOPED_SET_ENV_H_
+
+#include <util/generic/string.h>
+
+#include "y_absl/base/config.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+class ScopedSetEnv {
+ public:
+ ScopedSetEnv(const char* var_name, const char* new_value);
+ ~ScopedSetEnv();
+
+ private:
+ TString var_name_;
+ TString old_value_;
+
+ // True if the environment variable was initially not set.
+ bool was_unset_;
+};
+
+} // namespace base_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_BASE_INTERNAL_SCOPED_SET_ENV_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock.cc
new file mode 100644
index 00000000000..2ee7cde432f
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock.cc
@@ -0,0 +1,229 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "y_absl/base/internal/spinlock.h"
+
+#include <algorithm>
+#include <atomic>
+#include <limits>
+
+#include "y_absl/base/attributes.h"
+#include "y_absl/base/internal/atomic_hook.h"
+#include "y_absl/base/internal/cycleclock.h"
+#include "y_absl/base/internal/spinlock_wait.h"
+#include "y_absl/base/internal/sysinfo.h" /* For NumCPUs() */
+#include "y_absl/base/call_once.h"
+
+// Description of lock-word:
+// 31..00: [............................3][2][1][0]
+//
+// [0]: kSpinLockHeld
+// [1]: kSpinLockCooperative
+// [2]: kSpinLockDisabledScheduling
+// [31..3]: ONLY kSpinLockSleeper OR
+// Wait time in cycles >> PROFILE_TIMESTAMP_SHIFT
+//
+// Detailed descriptions:
+//
+// Bit [0]: The lock is considered held iff kSpinLockHeld is set.
+//
+// Bit [1]: Eligible waiters (e.g. Fibers) may co-operatively reschedule when
+// contended iff kSpinLockCooperative is set.
+//
+// Bit [2]: This bit is exclusive from bit [1]. It is used only by a
+// non-cooperative lock. When set, indicates that scheduling was
+// successfully disabled when the lock was acquired. May be unset,
+// even if non-cooperative, if a ThreadIdentity did not yet exist at
+// time of acquisition.
+//
+// Bit [3]: If this is the only upper bit ([31..3]) set then this lock was
+// acquired without contention, however, at least one waiter exists.
+//
+// Otherwise, bits [31..3] represent the time spent by the current lock
+// holder to acquire the lock. There may be outstanding waiter(s).
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES static base_internal::AtomicHook<void (*)(
+ const void *lock, int64_t wait_cycles)>
+ submit_profile_data;
+
+void RegisterSpinLockProfiler(void (*fn)(const void *contendedlock,
+ int64_t wait_cycles)) {
+ submit_profile_data.Store(fn);
+}
+
+// Static member variable definitions.
+constexpr uint32_t SpinLock::kSpinLockHeld;
+constexpr uint32_t SpinLock::kSpinLockCooperative;
+constexpr uint32_t SpinLock::kSpinLockDisabledScheduling;
+constexpr uint32_t SpinLock::kSpinLockSleeper;
+constexpr uint32_t SpinLock::kWaitTimeMask;
+
+// Uncommon constructors.
+SpinLock::SpinLock(base_internal::SchedulingMode mode)
+ : lockword_(IsCooperative(mode) ? kSpinLockCooperative : 0) {
+ ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static);
+}
+
+// Monitor the lock to see if its value changes within some time period
+// (adaptive_spin_count loop iterations). The last value read from the lock
+// is returned from the method.
+uint32_t SpinLock::SpinLoop() {
+ // We are already in the slow path of SpinLock, initialize the
+ // adaptive_spin_count here.
+ ABSL_CONST_INIT static y_absl::once_flag init_adaptive_spin_count;
+ ABSL_CONST_INIT static int adaptive_spin_count = 0;
+ base_internal::LowLevelCallOnce(&init_adaptive_spin_count, []() {
+ adaptive_spin_count = base_internal::NumCPUs() > 1 ? 1000 : 1;
+ });
+
+ int c = adaptive_spin_count;
+ uint32_t lock_value;
+ do {
+ lock_value = lockword_.load(std::memory_order_relaxed);
+ } while ((lock_value & kSpinLockHeld) != 0 && --c > 0);
+ return lock_value;
+}
+
+void SpinLock::SlowLock() {
+ uint32_t lock_value = SpinLoop();
+ lock_value = TryLockInternal(lock_value, 0);
+ if ((lock_value & kSpinLockHeld) == 0) {
+ return;
+ }
+
+ base_internal::SchedulingMode scheduling_mode;
+ if ((lock_value & kSpinLockCooperative) != 0) {
+ scheduling_mode = base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL;
+ } else {
+ scheduling_mode = base_internal::SCHEDULE_KERNEL_ONLY;
+ }
+
+ // The lock was not obtained initially, so this thread needs to wait for
+ // it. Record the current timestamp in the local variable wait_start_time
+ // so the total wait time can be stored in the lockword once this thread
+ // obtains the lock.
+ int64_t wait_start_time = CycleClock::Now();
+ uint32_t wait_cycles = 0;
+ int lock_wait_call_count = 0;
+ while ((lock_value & kSpinLockHeld) != 0) {
+ // If the lock is currently held, but not marked as having a sleeper, mark
+ // it as having a sleeper.
+ if ((lock_value & kWaitTimeMask) == 0) {
+ // Here, just "mark" that the thread is going to sleep. Don't store the
+ // lock wait time in the lock -- the lock word stores the amount of time
+ // that the current holder waited before acquiring the lock, not the wait
+ // time of any thread currently waiting to acquire it.
+ if (lockword_.compare_exchange_strong(
+ lock_value, lock_value | kSpinLockSleeper,
+ std::memory_order_relaxed, std::memory_order_relaxed)) {
+ // Successfully transitioned to kSpinLockSleeper. Pass
+ // kSpinLockSleeper to the SpinLockWait routine to properly indicate
+ // the last lock_value observed.
+ lock_value |= kSpinLockSleeper;
+ } else if ((lock_value & kSpinLockHeld) == 0) {
+ // Lock is free again, so try and acquire it before sleeping. The
+ // new lock state will be the number of cycles this thread waited if
+ // this thread obtains the lock.
+ lock_value = TryLockInternal(lock_value, wait_cycles);
+ continue; // Skip the delay at the end of the loop.
+ } else if ((lock_value & kWaitTimeMask) == 0) {
+ // The lock is still held, without a waiter being marked, but something
+ // else about the lock word changed, causing our CAS to fail. For
+ // example, a new lock holder may have acquired the lock with
+ // kSpinLockDisabledScheduling set, whereas the previous holder had not
+ // set that flag. In this case, attempt again to mark ourselves as a
+ // waiter.
+ continue;
+ }
+ }
+
+ // SpinLockDelay() calls into fiber scheduler, we need to see
+ // synchronization there to avoid false positives.
+ ABSL_TSAN_MUTEX_PRE_DIVERT(this, 0);
+ // Wait for an OS specific delay.
+ base_internal::SpinLockDelay(&lockword_, lock_value, ++lock_wait_call_count,
+ scheduling_mode);
+ ABSL_TSAN_MUTEX_POST_DIVERT(this, 0);
+ // Spin again after returning from the wait routine to give this thread
+ // some chance of obtaining the lock.
+ lock_value = SpinLoop();
+ wait_cycles = EncodeWaitCycles(wait_start_time, CycleClock::Now());
+ lock_value = TryLockInternal(lock_value, wait_cycles);
+ }
+}
+
+void SpinLock::SlowUnlock(uint32_t lock_value) {
+ base_internal::SpinLockWake(&lockword_,
+ false); // wake waiter if necessary
+
+ // If our acquisition was contended, collect contentionz profile info. We
+ // reserve a unitary wait time to represent that a waiter exists without our
+ // own acquisition having been contended.
+ if ((lock_value & kWaitTimeMask) != kSpinLockSleeper) {
+ const uint64_t wait_cycles = DecodeWaitCycles(lock_value);
+ ABSL_TSAN_MUTEX_PRE_DIVERT(this, 0);
+ submit_profile_data(this, wait_cycles);
+ ABSL_TSAN_MUTEX_POST_DIVERT(this, 0);
+ }
+}
+
+// We use the upper 29 bits of the lock word to store the time spent waiting to
+// acquire this lock. This is reported by contentionz profiling. Since the
+// lower bits of the cycle counter wrap very quickly on high-frequency
+// processors we divide to reduce the granularity to 2^kProfileTimestampShift
+// sized units. On a 4Ghz machine this will lose track of wait times greater
+// than (2^29/4 Ghz)*128 =~ 17.2 seconds. Such waits should be extremely rare.
+static constexpr int kProfileTimestampShift = 7;
+
+// We currently reserve the lower 3 bits.
+static constexpr int kLockwordReservedShift = 3;
+
+uint32_t SpinLock::EncodeWaitCycles(int64_t wait_start_time,
+ int64_t wait_end_time) {
+ static const int64_t kMaxWaitTime =
+ std::numeric_limits<uint32_t>::max() >> kLockwordReservedShift;
+ int64_t scaled_wait_time =
+ (wait_end_time - wait_start_time) >> kProfileTimestampShift;
+
+ // Return a representation of the time spent waiting that can be stored in
+ // the lock word's upper bits.
+ uint32_t clamped = static_cast<uint32_t>(
+ std::min(scaled_wait_time, kMaxWaitTime) << kLockwordReservedShift);
+
+ if (clamped == 0) {
+ return kSpinLockSleeper; // Just wake waiters, but don't record contention.
+ }
+ // Bump up value if necessary to avoid returning kSpinLockSleeper.
+ const uint32_t kMinWaitTime =
+ kSpinLockSleeper + (1 << kLockwordReservedShift);
+ if (clamped == kSpinLockSleeper) {
+ return kMinWaitTime;
+ }
+ return clamped;
+}
+
+uint64_t SpinLock::DecodeWaitCycles(uint32_t lock_value) {
+ // Cast to uint32_t first to ensure bits [63:32] are cleared.
+ const uint64_t scaled_wait_time =
+ static_cast<uint32_t>(lock_value & kWaitTimeMask);
+ return scaled_wait_time << (kProfileTimestampShift - kLockwordReservedShift);
+}
+
+} // namespace base_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock.h
new file mode 100644
index 00000000000..ef88cb52c03
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock.h
@@ -0,0 +1,248 @@
+//
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+// Most users requiring mutual exclusion should use Mutex.
+// SpinLock is provided for use in two situations:
+// - for use by Abseil internal code that Mutex itself depends on
+// - for async signal safety (see below)
+
+// SpinLock is async signal safe. If a spinlock is used within a signal
+// handler, all code that acquires the lock must ensure that the signal cannot
+// arrive while they are holding the lock. Typically, this is done by blocking
+// the signal.
+//
+// Threads waiting on a SpinLock may be woken in an arbitrary order.
+
+#ifndef ABSL_BASE_INTERNAL_SPINLOCK_H_
+#define ABSL_BASE_INTERNAL_SPINLOCK_H_
+
+#include <stdint.h>
+#include <sys/types.h>
+
+#include <atomic>
+
+#include "y_absl/base/attributes.h"
+#include "y_absl/base/const_init.h"
+#include "y_absl/base/dynamic_annotations.h"
+#include "y_absl/base/internal/low_level_scheduling.h"
+#include "y_absl/base/internal/raw_logging.h"
+#include "y_absl/base/internal/scheduling_mode.h"
+#include "y_absl/base/internal/tsan_mutex_interface.h"
+#include "y_absl/base/macros.h"
+#include "y_absl/base/port.h"
+#include "y_absl/base/thread_annotations.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+class ABSL_LOCKABLE SpinLock {
+ public:
+ SpinLock() : lockword_(kSpinLockCooperative) {
+ ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static);
+ }
+
+ // Constructors that allow non-cooperative spinlocks to be created for use
+ // inside thread schedulers. Normal clients should not use these.
+ explicit SpinLock(base_internal::SchedulingMode mode);
+
+ // Constructor for global SpinLock instances. See y_absl/base/const_init.h.
+ constexpr SpinLock(y_absl::ConstInitType, base_internal::SchedulingMode mode)
+ : lockword_(IsCooperative(mode) ? kSpinLockCooperative : 0) {}
+
+ // For global SpinLock instances prefer trivial destructor when possible.
+ // Default but non-trivial destructor in some build configurations causes an
+ // extra static initializer.
+#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
+ ~SpinLock() { ABSL_TSAN_MUTEX_DESTROY(this, __tsan_mutex_not_static); }
+#else
+ ~SpinLock() = default;
+#endif
+
+ // Acquire this SpinLock.
+ inline void Lock() ABSL_EXCLUSIVE_LOCK_FUNCTION() {
+ ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
+ if (!TryLockImpl()) {
+ SlowLock();
+ }
+ ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
+ }
+
+ // Try to acquire this SpinLock without blocking and return true if the
+ // acquisition was successful. If the lock was not acquired, false is
+ // returned. If this SpinLock is free at the time of the call, TryLock
+ // will return true with high probability.
+ inline bool TryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) {
+ ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_try_lock);
+ bool res = TryLockImpl();
+ ABSL_TSAN_MUTEX_POST_LOCK(
+ this, __tsan_mutex_try_lock | (res ? 0 : __tsan_mutex_try_lock_failed),
+ 0);
+ return res;
+ }
+
+ // Release this SpinLock, which must be held by the calling thread.
+ inline void Unlock() ABSL_UNLOCK_FUNCTION() {
+ ABSL_TSAN_MUTEX_PRE_UNLOCK(this, 0);
+ uint32_t lock_value = lockword_.load(std::memory_order_relaxed);
+ lock_value = lockword_.exchange(lock_value & kSpinLockCooperative,
+ std::memory_order_release);
+
+ if ((lock_value & kSpinLockDisabledScheduling) != 0) {
+ base_internal::SchedulingGuard::EnableRescheduling(true);
+ }
+ if ((lock_value & kWaitTimeMask) != 0) {
+ // Collect contentionz profile info, and speed the wakeup of any waiter.
+ // The wait_cycles value indicates how long this thread spent waiting
+ // for the lock.
+ SlowUnlock(lock_value);
+ }
+ ABSL_TSAN_MUTEX_POST_UNLOCK(this, 0);
+ }
+
+ // Determine if the lock is held. When the lock is held by the invoking
+ // thread, true will always be returned. Intended to be used as
+ // CHECK(lock.IsHeld()).
+ inline bool IsHeld() const {
+ return (lockword_.load(std::memory_order_relaxed) & kSpinLockHeld) != 0;
+ }
+
+ protected:
+ // These should not be exported except for testing.
+
+ // Store number of cycles between wait_start_time and wait_end_time in a
+ // lock value.
+ static uint32_t EncodeWaitCycles(int64_t wait_start_time,
+ int64_t wait_end_time);
+
+ // Extract number of wait cycles in a lock value.
+ static uint64_t DecodeWaitCycles(uint32_t lock_value);
+
+ // Provide access to protected method above. Use for testing only.
+ friend struct SpinLockTest;
+
+ private:
+ // lockword_ is used to store the following:
+ //
+ // bit[0] encodes whether a lock is being held.
+ // bit[1] encodes whether a lock uses cooperative scheduling.
+ // bit[2] encodes whether the current lock holder disabled scheduling when
+ // acquiring the lock. Only set when kSpinLockHeld is also set.
+ // bit[3:31] encodes time a lock spent on waiting as a 29-bit unsigned int.
+ // This is set by the lock holder to indicate how long it waited on
+ // the lock before eventually acquiring it. The number of cycles is
+ // encoded as a 29-bit unsigned int, or in the case that the current
+ // holder did not wait but another waiter is queued, the LSB
+ // (kSpinLockSleeper) is set. The implementation does not explicitly
+ // track the number of queued waiters beyond this. It must always be
+ // assumed that waiters may exist if the current holder was required to
+ // queue.
+ //
+ // Invariant: if the lock is not held, the value is either 0 or
+ // kSpinLockCooperative.
+ static constexpr uint32_t kSpinLockHeld = 1;
+ static constexpr uint32_t kSpinLockCooperative = 2;
+ static constexpr uint32_t kSpinLockDisabledScheduling = 4;
+ static constexpr uint32_t kSpinLockSleeper = 8;
+ // Includes kSpinLockSleeper.
+ static constexpr uint32_t kWaitTimeMask =
+ ~(kSpinLockHeld | kSpinLockCooperative | kSpinLockDisabledScheduling);
+
+ // Returns true if the provided scheduling mode is cooperative.
+ static constexpr bool IsCooperative(
+ base_internal::SchedulingMode scheduling_mode) {
+ return scheduling_mode == base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL;
+ }
+
+ uint32_t TryLockInternal(uint32_t lock_value, uint32_t wait_cycles);
+ void SlowLock() ABSL_ATTRIBUTE_COLD;
+ void SlowUnlock(uint32_t lock_value) ABSL_ATTRIBUTE_COLD;
+ uint32_t SpinLoop();
+
+ inline bool TryLockImpl() {
+ uint32_t lock_value = lockword_.load(std::memory_order_relaxed);
+ return (TryLockInternal(lock_value, 0) & kSpinLockHeld) == 0;
+ }
+
+ std::atomic<uint32_t> lockword_;
+
+ SpinLock(const SpinLock&) = delete;
+ SpinLock& operator=(const SpinLock&) = delete;
+};
+
+// Corresponding locker object that arranges to acquire a spinlock for
+// the duration of a C++ scope.
+class ABSL_SCOPED_LOCKABLE SpinLockHolder {
+ public:
+ inline explicit SpinLockHolder(SpinLock* l) ABSL_EXCLUSIVE_LOCK_FUNCTION(l)
+ : lock_(l) {
+ l->Lock();
+ }
+ inline ~SpinLockHolder() ABSL_UNLOCK_FUNCTION() { lock_->Unlock(); }
+
+ SpinLockHolder(const SpinLockHolder&) = delete;
+ SpinLockHolder& operator=(const SpinLockHolder&) = delete;
+
+ private:
+ SpinLock* lock_;
+};
+
+// Register a hook for profiling support.
+//
+// The function pointer registered here will be called whenever a spinlock is
+// contended. The callback is given an opaque handle to the contended spinlock
+// and the number of wait cycles. This is thread-safe, but only a single
+// profiler can be registered. It is an error to call this function multiple
+// times with different arguments.
+void RegisterSpinLockProfiler(void (*fn)(const void* lock,
+ int64_t wait_cycles));
+
+//------------------------------------------------------------------------------
+// Public interface ends here.
+//------------------------------------------------------------------------------
+
+// If (result & kSpinLockHeld) == 0, then *this was successfully locked.
+// Otherwise, returns last observed value for lockword_.
+inline uint32_t SpinLock::TryLockInternal(uint32_t lock_value,
+ uint32_t wait_cycles) {
+ if ((lock_value & kSpinLockHeld) != 0) {
+ return lock_value;
+ }
+
+ uint32_t sched_disabled_bit = 0;
+ if ((lock_value & kSpinLockCooperative) == 0) {
+ // For non-cooperative locks we must make sure we mark ourselves as
+ // non-reschedulable before we attempt to CompareAndSwap.
+ if (base_internal::SchedulingGuard::DisableRescheduling()) {
+ sched_disabled_bit = kSpinLockDisabledScheduling;
+ }
+ }
+
+ if (!lockword_.compare_exchange_strong(
+ lock_value,
+ kSpinLockHeld | lock_value | wait_cycles | sched_disabled_bit,
+ std::memory_order_acquire, std::memory_order_relaxed)) {
+ base_internal::SchedulingGuard::EnableRescheduling(sched_disabled_bit != 0);
+ }
+
+ return lock_value;
+}
+
+} // namespace base_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_BASE_INTERNAL_SPINLOCK_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_akaros.inc b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_akaros.inc
new file mode 100644
index 00000000000..69955dc7651
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_akaros.inc
@@ -0,0 +1,35 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// This file is an Akaros-specific part of spinlock_wait.cc
+
+#include <atomic>
+
+#include "y_absl/base/internal/scheduling_mode.h"
+
+extern "C" {
+
+ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)(
+ std::atomic<uint32_t>* /* lock_word */, uint32_t /* value */,
+ int /* loop */, y_absl::base_internal::SchedulingMode /* mode */) {
+ // In Akaros, one must take care not to call anything that could cause a
+ // malloc(), a blocking system call, or a uthread_yield() while holding a
+ // spinlock. Our callers assume will not call into libraries or other
+ // arbitrary code.
+}
+
+ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(
+ std::atomic<uint32_t>* /* lock_word */, bool /* all */) {}
+
+} // extern "C"
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_linux.inc b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_linux.inc
new file mode 100644
index 00000000000..5b4480d133b
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_linux.inc
@@ -0,0 +1,74 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// This file is a Linux-specific part of spinlock_wait.cc
+
+#include <linux/futex.h>
+#include <sys/syscall.h>
+#include <unistd.h>
+
+#include <atomic>
+#include <climits>
+#include <cstdint>
+#include <ctime>
+
+#include "y_absl/base/attributes.h"
+#include "y_absl/base/internal/errno_saver.h"
+
+// The SpinLock lockword is `std::atomic<uint32_t>`. Here we assert that
+// `std::atomic<uint32_t>` is bitwise equivalent of the `int` expected
+// by SYS_futex. We also assume that reads/writes done to the lockword
+// by SYS_futex have rational semantics with regard to the
+// std::atomic<> API. C++ provides no guarantees of these assumptions,
+// but they are believed to hold in practice.
+static_assert(sizeof(std::atomic<uint32_t>) == sizeof(int),
+ "SpinLock lockword has the wrong size for a futex");
+
+// Some Android headers are missing these definitions even though they
+// support these futex operations.
+#ifdef __BIONIC__
+#ifndef SYS_futex
+#define SYS_futex __NR_futex
+#endif
+#ifndef FUTEX_PRIVATE_FLAG
+#define FUTEX_PRIVATE_FLAG 128
+#endif
+#endif
+
+#if defined(__NR_futex_time64) && !defined(SYS_futex_time64)
+#define SYS_futex_time64 __NR_futex_time64
+#endif
+
+#if defined(SYS_futex_time64) && !defined(SYS_futex)
+#define SYS_futex SYS_futex_time64
+#endif
+
+extern "C" {
+
+ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)(
+ std::atomic<uint32_t> *w, uint32_t value, int loop,
+ y_absl::base_internal::SchedulingMode) {
+ y_absl::base_internal::ErrnoSaver errno_saver;
+ struct timespec tm;
+ tm.tv_sec = 0;
+ tm.tv_nsec = y_absl::base_internal::SpinLockSuggestedDelayNS(loop);
+ syscall(SYS_futex, w, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, value, &tm);
+}
+
+ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(
+ std::atomic<uint32_t> *w, bool all) {
+ syscall(SYS_futex, w, FUTEX_WAKE | FUTEX_PRIVATE_FLAG, all ? INT_MAX : 1, 0);
+}
+
+} // extern "C"
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_posix.inc b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_posix.inc
new file mode 100644
index 00000000000..12a9b86599b
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_posix.inc
@@ -0,0 +1,46 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// This file is a Posix-specific part of spinlock_wait.cc
+
+#include <sched.h>
+
+#include <atomic>
+#include <ctime>
+
+#include "y_absl/base/internal/errno_saver.h"
+#include "y_absl/base/internal/scheduling_mode.h"
+#include "y_absl/base/port.h"
+
+extern "C" {
+
+ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)(
+ std::atomic<uint32_t>* /* lock_word */, uint32_t /* value */, int loop,
+ y_absl::base_internal::SchedulingMode /* mode */) {
+ y_absl::base_internal::ErrnoSaver errno_saver;
+ if (loop == 0) {
+ } else if (loop == 1) {
+ sched_yield();
+ } else {
+ struct timespec tm;
+ tm.tv_sec = 0;
+ tm.tv_nsec = y_absl::base_internal::SpinLockSuggestedDelayNS(loop);
+ nanosleep(&tm, nullptr);
+ }
+}
+
+ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(
+ std::atomic<uint32_t>* /* lock_word */, bool /* all */) {}
+
+} // extern "C"
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_wait.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_wait.cc
new file mode 100644
index 00000000000..138cb3c5f07
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_wait.cc
@@ -0,0 +1,81 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// The OS-specific header included below must provide two calls:
+// AbslInternalSpinLockDelay() and AbslInternalSpinLockWake().
+// See spinlock_wait.h for the specs.
+
+#include <atomic>
+#include <cstdint>
+
+#include "y_absl/base/internal/spinlock_wait.h"
+
+#if defined(_WIN32)
+#include "y_absl/base/internal/spinlock_win32.inc"
+#elif defined(__linux__)
+#include "y_absl/base/internal/spinlock_linux.inc"
+#elif defined(__akaros__)
+#include "y_absl/base/internal/spinlock_akaros.inc"
+#else
+#include "y_absl/base/internal/spinlock_posix.inc"
+#endif
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+// See spinlock_wait.h for spec.
+uint32_t SpinLockWait(std::atomic<uint32_t> *w, int n,
+ const SpinLockWaitTransition trans[],
+ base_internal::SchedulingMode scheduling_mode) {
+ int loop = 0;
+ for (;;) {
+ uint32_t v = w->load(std::memory_order_acquire);
+ int i;
+ for (i = 0; i != n && v != trans[i].from; i++) {
+ }
+ if (i == n) {
+ SpinLockDelay(w, v, ++loop, scheduling_mode); // no matching transition
+ } else if (trans[i].to == v || // null transition
+ w->compare_exchange_strong(v, trans[i].to,
+ std::memory_order_acquire,
+ std::memory_order_relaxed)) {
+ if (trans[i].done) return v;
+ }
+ }
+}
+
+static std::atomic<uint64_t> delay_rand;
+
+// Return a suggested delay in nanoseconds for iteration number "loop"
+int SpinLockSuggestedDelayNS(int loop) {
+ // Weak pseudo-random number generator to get some spread between threads
+ // when many are spinning.
+ uint64_t r = delay_rand.load(std::memory_order_relaxed);
+ r = 0x5deece66dLL * r + 0xb; // numbers from nrand48()
+ delay_rand.store(r, std::memory_order_relaxed);
+
+ if (loop < 0 || loop > 32) { // limit loop to 0..32
+ loop = 32;
+ }
+ const int kMinDelay = 128 << 10; // 128us
+ // Double delay every 8 iterations, up to 16x (2ms).
+ int delay = kMinDelay << (loop / 8);
+ // Randomize in delay..2*delay range, for resulting 128us..4ms range.
+ return delay | ((delay - 1) & static_cast<int>(r));
+}
+
+} // namespace base_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_wait.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_wait.h
new file mode 100644
index 00000000000..2e34d7026be
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_wait.h
@@ -0,0 +1,95 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_BASE_INTERNAL_SPINLOCK_WAIT_H_
+#define ABSL_BASE_INTERNAL_SPINLOCK_WAIT_H_
+
+// Operations to make atomic transitions on a word, and to allow
+// waiting for those transitions to become possible.
+
+#include <stdint.h>
+#include <atomic>
+
+#include "y_absl/base/internal/scheduling_mode.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+// SpinLockWait() waits until it can perform one of several transitions from
+// "from" to "to". It returns when it performs a transition where done==true.
+struct SpinLockWaitTransition {
+ uint32_t from;
+ uint32_t to;
+ bool done;
+};
+
+// Wait until *w can transition from trans[i].from to trans[i].to for some i
+// satisfying 0<=i<n && trans[i].done, atomically make the transition,
+// then return the old value of *w. Make any other atomic transitions
+// where !trans[i].done, but continue waiting.
+//
+// Wakeups for threads blocked on SpinLockWait do not respect priorities.
+uint32_t SpinLockWait(std::atomic<uint32_t> *w, int n,
+ const SpinLockWaitTransition trans[],
+ SchedulingMode scheduling_mode);
+
+// If possible, wake some thread that has called SpinLockDelay(w, ...). If `all`
+// is true, wake all such threads. On some systems, this may be a no-op; on
+// those systems, threads calling SpinLockDelay() will always wake eventually
+// even if SpinLockWake() is never called.
+void SpinLockWake(std::atomic<uint32_t> *w, bool all);
+
+// Wait for an appropriate spin delay on iteration "loop" of a
+// spin loop on location *w, whose previously observed value was "value".
+// SpinLockDelay() may do nothing, may yield the CPU, may sleep a clock tick,
+// or may wait for a call to SpinLockWake(w).
+void SpinLockDelay(std::atomic<uint32_t> *w, uint32_t value, int loop,
+ base_internal::SchedulingMode scheduling_mode);
+
+// Helper used by AbslInternalSpinLockDelay.
+// Returns a suggested delay in nanoseconds for iteration number "loop".
+int SpinLockSuggestedDelayNS(int loop);
+
+} // namespace base_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+// In some build configurations we pass --detect-odr-violations to the
+// gold linker. This causes it to flag weak symbol overrides as ODR
+// violations. Because ODR only applies to C++ and not C,
+// --detect-odr-violations ignores symbols not mangled with C++ names.
+// By changing our extension points to be extern "C", we dodge this
+// check.
+extern "C" {
+void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(std::atomic<uint32_t> *w,
+ bool all);
+void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)(
+ std::atomic<uint32_t> *w, uint32_t value, int loop,
+ y_absl::base_internal::SchedulingMode scheduling_mode);
+}
+
+inline void y_absl::base_internal::SpinLockWake(std::atomic<uint32_t> *w,
+ bool all) {
+ ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(w, all);
+}
+
+inline void y_absl::base_internal::SpinLockDelay(
+ std::atomic<uint32_t> *w, uint32_t value, int loop,
+ y_absl::base_internal::SchedulingMode scheduling_mode) {
+ ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)
+ (w, value, loop, scheduling_mode);
+}
+
+#endif // ABSL_BASE_INTERNAL_SPINLOCK_WAIT_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_wait/ya.make b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_wait/ya.make
new file mode 100644
index 00000000000..902ffe394f7
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_wait/ya.make
@@ -0,0 +1,26 @@
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
+WITHOUT_LICENSE_TEXTS()
+
+OWNER(
+ somov
+ g:cpp-contrib
+)
+
+LICENSE(Apache-2.0)
+
+ADDINCL(
+ GLOBAL contrib/restricted/abseil-cpp-tstring
+)
+
+NO_COMPILER_WARNINGS()
+
+SRCDIR(contrib/restricted/abseil-cpp-tstring/y_absl/base/internal)
+
+SRCS(
+ spinlock_wait.cc
+)
+
+END()
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_win32.inc b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_win32.inc
new file mode 100644
index 00000000000..648f74134f0
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_win32.inc
@@ -0,0 +1,37 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// This file is a Win32-specific part of spinlock_wait.cc
+
+#include <windows.h>
+#include <atomic>
+#include "y_absl/base/internal/scheduling_mode.h"
+
+extern "C" {
+
+void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)(
+ std::atomic<uint32_t>* /* lock_word */, uint32_t /* value */, int loop,
+ y_absl::base_internal::SchedulingMode /* mode */) {
+ if (loop == 0) {
+ } else if (loop == 1) {
+ Sleep(0);
+ } else {
+ Sleep(y_absl::base_internal::SpinLockSuggestedDelayNS(loop) / 1000000);
+ }
+}
+
+void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(
+ std::atomic<uint32_t>* /* lock_word */, bool /* all */) {}
+
+} // extern "C"
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/strerror.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/strerror.cc
new file mode 100644
index 00000000000..fe50d84a032
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/strerror.cc
@@ -0,0 +1,88 @@
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "y_absl/base/internal/strerror.h"
+
+#include <array>
+#include <cerrno>
+#include <cstddef>
+#include <cstdio>
+#include <cstring>
+#include <util/generic/string.h>
+#include <type_traits>
+
+#include "y_absl/base/internal/errno_saver.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+namespace {
+
+const char* StrErrorAdaptor(int errnum, char* buf, size_t buflen) {
+#if defined(_WIN32)
+ int rc = strerror_s(buf, buflen, errnum);
+ buf[buflen - 1] = '\0'; // guarantee NUL termination
+ if (rc == 0 && strncmp(buf, "Unknown error", buflen) == 0) *buf = '\0';
+ return buf;
+#else
+ // The type of `ret` is platform-specific; both of these branches must compile
+ // either way but only one will execute on any given platform:
+ auto ret = strerror_r(errnum, buf, buflen);
+ if (std::is_same<decltype(ret), int>::value) {
+ // XSI `strerror_r`; `ret` is `int`:
+ if (ret) *buf = '\0';
+ return buf;
+ } else {
+ // GNU `strerror_r`; `ret` is `char *`:
+ return reinterpret_cast<const char*>(ret);
+ }
+#endif
+}
+
+TString StrErrorInternal(int errnum) {
+ char buf[100];
+ const char* str = StrErrorAdaptor(errnum, buf, sizeof buf);
+ if (*str == '\0') {
+ snprintf(buf, sizeof buf, "Unknown error %d", errnum);
+ str = buf;
+ }
+ return str;
+}
+
+// kSysNerr is the number of errors from a recent glibc. `StrError()` falls back
+// to `StrErrorAdaptor()` if the value is larger than this.
+constexpr int kSysNerr = 135;
+
+std::array<TString, kSysNerr>* NewStrErrorTable() {
+ auto* table = new std::array<TString, kSysNerr>;
+ for (int i = 0; i < static_cast<int>(table->size()); ++i) {
+ (*table)[i] = StrErrorInternal(i);
+ }
+ return table;
+}
+
+} // namespace
+
+TString StrError(int errnum) {
+ y_absl::base_internal::ErrnoSaver errno_saver;
+ static const auto* table = NewStrErrorTable();
+ if (errnum >= 0 && errnum < static_cast<int>(table->size())) {
+ return (*table)[errnum];
+ }
+ return StrErrorInternal(errnum);
+}
+
+} // namespace base_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/strerror.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/strerror.h
new file mode 100644
index 00000000000..a80a7b9c35f
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/strerror.h
@@ -0,0 +1,39 @@
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_BASE_INTERNAL_STRERROR_H_
+#define ABSL_BASE_INTERNAL_STRERROR_H_
+
+#include <util/generic/string.h>
+
+#include "y_absl/base/config.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+// A portable and thread-safe alternative to C89's `strerror`.
+//
+// The C89 specification of `strerror` is not suitable for use in a
+// multi-threaded application as the returned string may be changed by calls to
+// `strerror` from another thread. The many non-stdlib alternatives differ
+// enough in their names, availability, and semantics to justify this wrapper
+// around them. `errno` will not be modified by a call to `y_absl::StrError`.
+TString StrError(int errnum);
+
+} // namespace base_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_BASE_INTERNAL_STRERROR_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/sysinfo.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/sysinfo.cc
new file mode 100644
index 00000000000..9eb0cf3f8cd
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/sysinfo.cc
@@ -0,0 +1,508 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "y_absl/base/internal/sysinfo.h"
+
+#include "y_absl/base/attributes.h"
+
+#ifdef _WIN32
+#include <windows.h>
+#else
+#include <fcntl.h>
+#include <pthread.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+#endif
+
+#ifdef __linux__
+#include <sys/syscall.h>
+#endif
+
+#if defined(__APPLE__) || defined(__FreeBSD__)
+#include <sys/sysctl.h>
+#endif
+
+#if defined(__myriad2__)
+#error #include <rtems.h>
+#endif
+
+#include <string.h>
+
+#include <cassert>
+#include <cstdint>
+#include <cstdio>
+#include <cstdlib>
+#include <ctime>
+#include <limits>
+#include <thread> // NOLINT(build/c++11)
+#include <utility>
+#include <vector>
+
+#include "y_absl/base/call_once.h"
+#include "y_absl/base/config.h"
+#include "y_absl/base/internal/raw_logging.h"
+#include "y_absl/base/internal/spinlock.h"
+#include "y_absl/base/internal/unscaledcycleclock.h"
+#include "y_absl/base/thread_annotations.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+namespace {
+
+#if defined(_WIN32)
+
+// Returns number of bits set in `bitMask`
+DWORD Win32CountSetBits(ULONG_PTR bitMask) {
+ for (DWORD bitSetCount = 0; ; ++bitSetCount) {
+ if (bitMask == 0) return bitSetCount;
+ bitMask &= bitMask - 1;
+ }
+}
+
+// Returns the number of logical CPUs using GetLogicalProcessorInformation(), or
+// 0 if the number of processors is not available or can not be computed.
+// https://docs.microsoft.com/en-us/windows/win32/api/sysinfoapi/nf-sysinfoapi-getlogicalprocessorinformation
+int Win32NumCPUs() {
+#pragma comment(lib, "kernel32.lib")
+ using Info = SYSTEM_LOGICAL_PROCESSOR_INFORMATION;
+
+ DWORD info_size = sizeof(Info);
+ Info* info(static_cast<Info*>(malloc(info_size)));
+ if (info == nullptr) return 0;
+
+ bool success = GetLogicalProcessorInformation(info, &info_size);
+ if (!success && GetLastError() == ERROR_INSUFFICIENT_BUFFER) {
+ free(info);
+ info = static_cast<Info*>(malloc(info_size));
+ if (info == nullptr) return 0;
+ success = GetLogicalProcessorInformation(info, &info_size);
+ }
+
+ DWORD logicalProcessorCount = 0;
+ if (success) {
+ Info* ptr = info;
+ DWORD byteOffset = 0;
+ while (byteOffset + sizeof(Info) <= info_size) {
+ switch (ptr->Relationship) {
+ case RelationProcessorCore:
+ logicalProcessorCount += Win32CountSetBits(ptr->ProcessorMask);
+ break;
+
+ case RelationNumaNode:
+ case RelationCache:
+ case RelationProcessorPackage:
+ // Ignore other entries
+ break;
+
+ default:
+ // Ignore unknown entries
+ break;
+ }
+ byteOffset += sizeof(Info);
+ ptr++;
+ }
+ }
+ free(info);
+ return logicalProcessorCount;
+}
+
+#endif
+
+} // namespace
+
+
+static int GetNumCPUs() {
+#if defined(__myriad2__)
+ return 1;
+#elif defined(_WIN32)
+ const unsigned hardware_concurrency = Win32NumCPUs();
+ return hardware_concurrency ? hardware_concurrency : 1;
+#elif defined(_AIX)
+ return sysconf(_SC_NPROCESSORS_ONLN);
+#else
+ // Other possibilities:
+ // - Read /sys/devices/system/cpu/online and use cpumask_parse()
+ // - sysconf(_SC_NPROCESSORS_ONLN)
+ return std::thread::hardware_concurrency();
+#endif
+}
+
+#if defined(_WIN32)
+
+static double GetNominalCPUFrequency() {
+#if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP) && \
+ !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP)
+ // UWP apps don't have access to the registry and currently don't provide an
+ // API informing about CPU nominal frequency.
+ return 1.0;
+#else
+#pragma comment(lib, "advapi32.lib") // For Reg* functions.
+ HKEY key;
+ // Use the Reg* functions rather than the SH functions because shlwapi.dll
+ // pulls in gdi32.dll which makes process destruction much more costly.
+ if (RegOpenKeyExA(HKEY_LOCAL_MACHINE,
+ "HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0", 0,
+ KEY_READ, &key) == ERROR_SUCCESS) {
+ DWORD type = 0;
+ DWORD data = 0;
+ DWORD data_size = sizeof(data);
+ auto result = RegQueryValueExA(key, "~MHz", 0, &type,
+ reinterpret_cast<LPBYTE>(&data), &data_size);
+ RegCloseKey(key);
+ if (result == ERROR_SUCCESS && type == REG_DWORD &&
+ data_size == sizeof(data)) {
+ return data * 1e6; // Value is MHz.
+ }
+ }
+ return 1.0;
+#endif // WINAPI_PARTITION_APP && !WINAPI_PARTITION_DESKTOP
+}
+
+#elif defined(CTL_HW) && defined(HW_CPU_FREQ)
+
+static double GetNominalCPUFrequency() {
+ unsigned freq;
+ size_t size = sizeof(freq);
+ int mib[2] = {CTL_HW, HW_CPU_FREQ};
+ if (sysctl(mib, 2, &freq, &size, nullptr, 0) == 0) {
+ return static_cast<double>(freq);
+ }
+ return 1.0;
+}
+
+#else
+
+// Helper function for reading a long from a file. Returns true if successful
+// and the memory location pointed to by value is set to the value read.
+static bool ReadLongFromFile(const char *file, long *value) {
+ bool ret = false;
+ int fd = open(file, O_RDONLY);
+ if (fd != -1) {
+ char line[1024];
+ char *err;
+ memset(line, '\0', sizeof(line));
+ int len = read(fd, line, sizeof(line) - 1);
+ if (len <= 0) {
+ ret = false;
+ } else {
+ const long temp_value = strtol(line, &err, 10);
+ if (line[0] != '\0' && (*err == '\n' || *err == '\0')) {
+ *value = temp_value;
+ ret = true;
+ }
+ }
+ close(fd);
+ }
+ return ret;
+}
+
+#if defined(ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY)
+
+// Reads a monotonic time source and returns a value in
+// nanoseconds. The returned value uses an arbitrary epoch, not the
+// Unix epoch.
+static int64_t ReadMonotonicClockNanos() {
+ struct timespec t;
+#ifdef CLOCK_MONOTONIC_RAW
+ int rc = clock_gettime(CLOCK_MONOTONIC_RAW, &t);
+#else
+ int rc = clock_gettime(CLOCK_MONOTONIC, &t);
+#endif
+ if (rc != 0) {
+ perror("clock_gettime() failed");
+ abort();
+ }
+ return int64_t{t.tv_sec} * 1000000000 + t.tv_nsec;
+}
+
+class UnscaledCycleClockWrapperForInitializeFrequency {
+ public:
+ static int64_t Now() { return base_internal::UnscaledCycleClock::Now(); }
+};
+
+struct TimeTscPair {
+ int64_t time; // From ReadMonotonicClockNanos().
+ int64_t tsc; // From UnscaledCycleClock::Now().
+};
+
+// Returns a pair of values (monotonic kernel time, TSC ticks) that
+// approximately correspond to each other. This is accomplished by
+// doing several reads and picking the reading with the lowest
+// latency. This approach is used to minimize the probability that
+// our thread was preempted between clock reads.
+static TimeTscPair GetTimeTscPair() {
+ int64_t best_latency = std::numeric_limits<int64_t>::max();
+ TimeTscPair best;
+ for (int i = 0; i < 10; ++i) {
+ int64_t t0 = ReadMonotonicClockNanos();
+ int64_t tsc = UnscaledCycleClockWrapperForInitializeFrequency::Now();
+ int64_t t1 = ReadMonotonicClockNanos();
+ int64_t latency = t1 - t0;
+ if (latency < best_latency) {
+ best_latency = latency;
+ best.time = t0;
+ best.tsc = tsc;
+ }
+ }
+ return best;
+}
+
+// Measures and returns the TSC frequency by taking a pair of
+// measurements approximately `sleep_nanoseconds` apart.
+static double MeasureTscFrequencyWithSleep(int sleep_nanoseconds) {
+ auto t0 = GetTimeTscPair();
+ struct timespec ts;
+ ts.tv_sec = 0;
+ ts.tv_nsec = sleep_nanoseconds;
+ while (nanosleep(&ts, &ts) != 0 && errno == EINTR) {}
+ auto t1 = GetTimeTscPair();
+ double elapsed_ticks = t1.tsc - t0.tsc;
+ double elapsed_time = (t1.time - t0.time) * 1e-9;
+ return elapsed_ticks / elapsed_time;
+}
+
+// Measures and returns the TSC frequency by calling
+// MeasureTscFrequencyWithSleep(), doubling the sleep interval until the
+// frequency measurement stabilizes.
+static double MeasureTscFrequency() {
+ double last_measurement = -1.0;
+ int sleep_nanoseconds = 1000000; // 1 millisecond.
+ for (int i = 0; i < 8; ++i) {
+ double measurement = MeasureTscFrequencyWithSleep(sleep_nanoseconds);
+ if (measurement * 0.99 < last_measurement &&
+ last_measurement < measurement * 1.01) {
+ // Use the current measurement if it is within 1% of the
+ // previous measurement.
+ return measurement;
+ }
+ last_measurement = measurement;
+ sleep_nanoseconds *= 2;
+ }
+ return last_measurement;
+}
+
+#endif // ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY
+
+static double GetNominalCPUFrequency() {
+ long freq = 0;
+
+ // Google's production kernel has a patch to export the TSC
+ // frequency through sysfs. If the kernel is exporting the TSC
+ // frequency use that. There are issues where cpuinfo_max_freq
+ // cannot be relied on because the BIOS may be exporting an invalid
+ // p-state (on x86) or p-states may be used to put the processor in
+ // a new mode (turbo mode). Essentially, those frequencies cannot
+ // always be relied upon. The same reasons apply to /proc/cpuinfo as
+ // well.
+ if (ReadLongFromFile("/sys/devices/system/cpu/cpu0/tsc_freq_khz", &freq)) {
+ return freq * 1e3; // Value is kHz.
+ }
+
+#if defined(ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY)
+ // On these platforms, the TSC frequency is the nominal CPU
+ // frequency. But without having the kernel export it directly
+ // though /sys/devices/system/cpu/cpu0/tsc_freq_khz, there is no
+ // other way to reliably get the TSC frequency, so we have to
+ // measure it ourselves. Some CPUs abuse cpuinfo_max_freq by
+ // exporting "fake" frequencies for implementing new features. For
+ // example, Intel's turbo mode is enabled by exposing a p-state
+ // value with a higher frequency than that of the real TSC
+ // rate. Because of this, we prefer to measure the TSC rate
+ // ourselves on i386 and x86-64.
+ return MeasureTscFrequency();
+#else
+
+ // If CPU scaling is in effect, we want to use the *maximum*
+ // frequency, not whatever CPU speed some random processor happens
+ // to be using now.
+ if (ReadLongFromFile("/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq",
+ &freq)) {
+ return freq * 1e3; // Value is kHz.
+ }
+
+ return 1.0;
+#endif // !ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY
+}
+
+#endif
+
+ABSL_CONST_INIT static once_flag init_num_cpus_once;
+ABSL_CONST_INIT static int num_cpus = 0;
+
+// NumCPUs() may be called before main() and before malloc is properly
+// initialized, therefore this must not allocate memory.
+int NumCPUs() {
+ base_internal::LowLevelCallOnce(
+ &init_num_cpus_once, []() { num_cpus = GetNumCPUs(); });
+ return num_cpus;
+}
+
+// A default frequency of 0.0 might be dangerous if it is used in division.
+ABSL_CONST_INIT static once_flag init_nominal_cpu_frequency_once;
+ABSL_CONST_INIT static double nominal_cpu_frequency = 1.0;
+
+// NominalCPUFrequency() may be called before main() and before malloc is
+// properly initialized, therefore this must not allocate memory.
+double NominalCPUFrequency() {
+ base_internal::LowLevelCallOnce(
+ &init_nominal_cpu_frequency_once,
+ []() { nominal_cpu_frequency = GetNominalCPUFrequency(); });
+ return nominal_cpu_frequency;
+}
+
+#if defined(_WIN32)
+
+pid_t GetTID() {
+ return pid_t{GetCurrentThreadId()};
+}
+
+#elif defined(__linux__)
+
+#ifndef SYS_gettid
+#define SYS_gettid __NR_gettid
+#endif
+
+pid_t GetTID() {
+ return syscall(SYS_gettid);
+}
+
+#elif defined(__akaros__)
+
+pid_t GetTID() {
+ // Akaros has a concept of "vcore context", which is the state the program
+ // is forced into when we need to make a user-level scheduling decision, or
+ // run a signal handler. This is analogous to the interrupt context that a
+ // CPU might enter if it encounters some kind of exception.
+ //
+ // There is no current thread context in vcore context, but we need to give
+ // a reasonable answer if asked for a thread ID (e.g., in a signal handler).
+ // Thread 0 always exists, so if we are in vcore context, we return that.
+ //
+ // Otherwise, we know (since we are using pthreads) that the uthread struct
+ // current_uthread is pointing to is the first element of a
+ // struct pthread_tcb, so we extract and return the thread ID from that.
+ //
+ // TODO(dcross): Akaros anticipates moving the thread ID to the uthread
+ // structure at some point. We should modify this code to remove the cast
+ // when that happens.
+ if (in_vcore_context())
+ return 0;
+ return reinterpret_cast<struct pthread_tcb *>(current_uthread)->id;
+}
+
+#elif defined(__myriad2__)
+
+pid_t GetTID() {
+ uint32_t tid;
+ rtems_task_ident(RTEMS_SELF, 0, &tid);
+ return tid;
+}
+
+#else
+
+// Fallback implementation of GetTID using pthread_getspecific.
+ABSL_CONST_INIT static once_flag tid_once;
+ABSL_CONST_INIT static pthread_key_t tid_key;
+ABSL_CONST_INIT static y_absl::base_internal::SpinLock tid_lock(
+ y_absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
+
+// We set a bit per thread in this array to indicate that an ID is in
+// use. ID 0 is unused because it is the default value returned by
+// pthread_getspecific().
+ABSL_CONST_INIT static std::vector<uint32_t> *tid_array
+ ABSL_GUARDED_BY(tid_lock) = nullptr;
+static constexpr int kBitsPerWord = 32; // tid_array is uint32_t.
+
+// Returns the TID to tid_array.
+static void FreeTID(void *v) {
+ intptr_t tid = reinterpret_cast<intptr_t>(v);
+ int word = tid / kBitsPerWord;
+ uint32_t mask = ~(1u << (tid % kBitsPerWord));
+ y_absl::base_internal::SpinLockHolder lock(&tid_lock);
+ assert(0 <= word && static_cast<size_t>(word) < tid_array->size());
+ (*tid_array)[word] &= mask;
+}
+
+static void InitGetTID() {
+ if (pthread_key_create(&tid_key, FreeTID) != 0) {
+ // The logging system calls GetTID() so it can't be used here.
+ perror("pthread_key_create failed");
+ abort();
+ }
+
+ // Initialize tid_array.
+ y_absl::base_internal::SpinLockHolder lock(&tid_lock);
+ tid_array = new std::vector<uint32_t>(1);
+ (*tid_array)[0] = 1; // ID 0 is never-allocated.
+}
+
+// Return a per-thread small integer ID from pthread's thread-specific data.
+pid_t GetTID() {
+ y_absl::call_once(tid_once, InitGetTID);
+
+ intptr_t tid = reinterpret_cast<intptr_t>(pthread_getspecific(tid_key));
+ if (tid != 0) {
+ return tid;
+ }
+
+ int bit; // tid_array[word] = 1u << bit;
+ size_t word;
+ {
+ // Search for the first unused ID.
+ y_absl::base_internal::SpinLockHolder lock(&tid_lock);
+ // First search for a word in the array that is not all ones.
+ word = 0;
+ while (word < tid_array->size() && ~(*tid_array)[word] == 0) {
+ ++word;
+ }
+ if (word == tid_array->size()) {
+ tid_array->push_back(0); // No space left, add kBitsPerWord more IDs.
+ }
+ // Search for a zero bit in the word.
+ bit = 0;
+ while (bit < kBitsPerWord && (((*tid_array)[word] >> bit) & 1) != 0) {
+ ++bit;
+ }
+ tid = (word * kBitsPerWord) + bit;
+ (*tid_array)[word] |= 1u << bit; // Mark the TID as allocated.
+ }
+
+ if (pthread_setspecific(tid_key, reinterpret_cast<void *>(tid)) != 0) {
+ perror("pthread_setspecific failed");
+ abort();
+ }
+
+ return static_cast<pid_t>(tid);
+}
+
+#endif
+
+// GetCachedTID() caches the thread ID in thread-local storage (which is a
+// userspace construct) to avoid unnecessary system calls. Without this caching,
+// it can take roughly 98ns, while it takes roughly 1ns with this caching.
+pid_t GetCachedTID() {
+#ifdef ABSL_HAVE_THREAD_LOCAL
+ static thread_local pid_t thread_id = GetTID();
+ return thread_id;
+#else
+ return GetTID();
+#endif // ABSL_HAVE_THREAD_LOCAL
+}
+
+} // namespace base_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/sysinfo.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/sysinfo.h
new file mode 100644
index 00000000000..0fd7207a389
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/sysinfo.h
@@ -0,0 +1,74 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// This file includes routines to find out characteristics
+// of the machine a program is running on. It is undoubtedly
+// system-dependent.
+
+// Functions listed here that accept a pid_t as an argument act on the
+// current process if the pid_t argument is 0
+// All functions here are thread-hostile due to file caching unless
+// commented otherwise.
+
+#ifndef ABSL_BASE_INTERNAL_SYSINFO_H_
+#define ABSL_BASE_INTERNAL_SYSINFO_H_
+
+#ifndef _WIN32
+#include <sys/types.h>
+#endif
+
+#include <cstdint>
+
+#include "y_absl/base/config.h"
+#include "y_absl/base/port.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+// Nominal core processor cycles per second of each processor. This is _not_
+// necessarily the frequency of the CycleClock counter (see cycleclock.h)
+// Thread-safe.
+double NominalCPUFrequency();
+
+// Number of logical processors (hyperthreads) in system. Thread-safe.
+int NumCPUs();
+
+// Return the thread id of the current thread, as told by the system.
+// No two currently-live threads implemented by the OS shall have the same ID.
+// Thread ids of exited threads may be reused. Multiple user-level threads
+// may have the same thread ID if multiplexed on the same OS thread.
+//
+// On Linux, you may send a signal to the resulting ID with kill(). However,
+// it is recommended for portability that you use pthread_kill() instead.
+#ifdef _WIN32
+// On Windows, process id and thread id are of the same type according to the
+// return types of GetProcessId() and GetThreadId() are both DWORD, an unsigned
+// 32-bit type.
+using pid_t = uint32_t;
+#endif
+pid_t GetTID();
+
+// Like GetTID(), but caches the result in thread-local storage in order
+// to avoid unnecessary system calls. Note that there are some cases where
+// one must call through to GetTID directly, which is why this exists as a
+// separate function. For example, GetCachedTID() is not safe to call in
+// an asynchronous signal-handling context nor right after a call to fork().
+pid_t GetCachedTID();
+
+} // namespace base_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_BASE_INTERNAL_SYSINFO_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/thread_annotations.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/thread_annotations.h
new file mode 100644
index 00000000000..b4b01a8fb4d
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/thread_annotations.h
@@ -0,0 +1,271 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: thread_annotations.h
+// -----------------------------------------------------------------------------
+//
+// WARNING: This is a backwards compatible header and it will be removed after
+// the migration to prefixed thread annotations is finished; please include
+// "y_absl/base/thread_annotations.h".
+//
+// This header file contains macro definitions for thread safety annotations
+// that allow developers to document the locking policies of multi-threaded
+// code. The annotations can also help program analysis tools to identify
+// potential thread safety issues.
+//
+// These annotations are implemented using compiler attributes. Using the macros
+// defined here instead of raw attributes allow for portability and future
+// compatibility.
+//
+// When referring to mutexes in the arguments of the attributes, you should
+// use variable names or more complex expressions (e.g. my_object->mutex_)
+// that evaluate to a concrete mutex object whenever possible. If the mutex
+// you want to refer to is not in scope, you may use a member pointer
+// (e.g. &MyClass::mutex_) to refer to a mutex in some (unknown) object.
+
+#ifndef ABSL_BASE_INTERNAL_THREAD_ANNOTATIONS_H_
+#define ABSL_BASE_INTERNAL_THREAD_ANNOTATIONS_H_
+
+#if defined(__clang__)
+#define THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x))
+#else
+#define THREAD_ANNOTATION_ATTRIBUTE__(x) // no-op
+#endif
+
+// GUARDED_BY()
+//
+// Documents if a shared field or global variable needs to be protected by a
+// mutex. GUARDED_BY() allows the user to specify a particular mutex that
+// should be held when accessing the annotated variable.
+//
+// Although this annotation (and PT_GUARDED_BY, below) cannot be applied to
+// local variables, a local variable and its associated mutex can often be
+// combined into a small class or struct, thereby allowing the annotation.
+//
+// Example:
+//
+// class Foo {
+// Mutex mu_;
+// int p1_ GUARDED_BY(mu_);
+// ...
+// };
+#define GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(guarded_by(x))
+
+// PT_GUARDED_BY()
+//
+// Documents if the memory location pointed to by a pointer should be guarded
+// by a mutex when dereferencing the pointer.
+//
+// Example:
+// class Foo {
+// Mutex mu_;
+// int *p1_ PT_GUARDED_BY(mu_);
+// ...
+// };
+//
+// Note that a pointer variable to a shared memory location could itself be a
+// shared variable.
+//
+// Example:
+//
+// // `q_`, guarded by `mu1_`, points to a shared memory location that is
+// // guarded by `mu2_`:
+// int *q_ GUARDED_BY(mu1_) PT_GUARDED_BY(mu2_);
+#define PT_GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(pt_guarded_by(x))
+
+// ACQUIRED_AFTER() / ACQUIRED_BEFORE()
+//
+// Documents the acquisition order between locks that can be held
+// simultaneously by a thread. For any two locks that need to be annotated
+// to establish an acquisition order, only one of them needs the annotation.
+// (i.e. You don't have to annotate both locks with both ACQUIRED_AFTER
+// and ACQUIRED_BEFORE.)
+//
+// As with GUARDED_BY, this is only applicable to mutexes that are shared
+// fields or global variables.
+//
+// Example:
+//
+// Mutex m1_;
+// Mutex m2_ ACQUIRED_AFTER(m1_);
+#define ACQUIRED_AFTER(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(__VA_ARGS__))
+
+#define ACQUIRED_BEFORE(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(__VA_ARGS__))
+
+// EXCLUSIVE_LOCKS_REQUIRED() / SHARED_LOCKS_REQUIRED()
+//
+// Documents a function that expects a mutex to be held prior to entry.
+// The mutex is expected to be held both on entry to, and exit from, the
+// function.
+//
+// An exclusive lock allows read-write access to the guarded data member(s), and
+// only one thread can acquire a lock exclusively at any one time. A shared lock
+// allows read-only access, and any number of threads can acquire a shared lock
+// concurrently.
+//
+// Generally, non-const methods should be annotated with
+// EXCLUSIVE_LOCKS_REQUIRED, while const methods should be annotated with
+// SHARED_LOCKS_REQUIRED.
+//
+// Example:
+//
+// Mutex mu1, mu2;
+// int a GUARDED_BY(mu1);
+// int b GUARDED_BY(mu2);
+//
+// void foo() EXCLUSIVE_LOCKS_REQUIRED(mu1, mu2) { ... }
+// void bar() const SHARED_LOCKS_REQUIRED(mu1, mu2) { ... }
+#define EXCLUSIVE_LOCKS_REQUIRED(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(exclusive_locks_required(__VA_ARGS__))
+
+#define SHARED_LOCKS_REQUIRED(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(shared_locks_required(__VA_ARGS__))
+
+// LOCKS_EXCLUDED()
+//
+// Documents the locks acquired in the body of the function. These locks
+// cannot be held when calling this function (as Abseil's `Mutex` locks are
+// non-reentrant).
+#define LOCKS_EXCLUDED(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(__VA_ARGS__))
+
+// LOCK_RETURNED()
+//
+// Documents a function that returns a mutex without acquiring it. For example,
+// a public getter method that returns a pointer to a private mutex should
+// be annotated with LOCK_RETURNED.
+#define LOCK_RETURNED(x) \
+ THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x))
+
+// LOCKABLE
+//
+// Documents if a class/type is a lockable type (such as the `Mutex` class).
+#define LOCKABLE \
+ THREAD_ANNOTATION_ATTRIBUTE__(lockable)
+
+// SCOPED_LOCKABLE
+//
+// Documents if a class does RAII locking (such as the `MutexLock` class).
+// The constructor should use `LOCK_FUNCTION()` to specify the mutex that is
+// acquired, and the destructor should use `UNLOCK_FUNCTION()` with no
+// arguments; the analysis will assume that the destructor unlocks whatever the
+// constructor locked.
+#define SCOPED_LOCKABLE \
+ THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable)
+
+// EXCLUSIVE_LOCK_FUNCTION()
+//
+// Documents functions that acquire a lock in the body of a function, and do
+// not release it.
+#define EXCLUSIVE_LOCK_FUNCTION(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(exclusive_lock_function(__VA_ARGS__))
+
+// SHARED_LOCK_FUNCTION()
+//
+// Documents functions that acquire a shared (reader) lock in the body of a
+// function, and do not release it.
+#define SHARED_LOCK_FUNCTION(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(shared_lock_function(__VA_ARGS__))
+
+// UNLOCK_FUNCTION()
+//
+// Documents functions that expect a lock to be held on entry to the function,
+// and release it in the body of the function.
+#define UNLOCK_FUNCTION(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(unlock_function(__VA_ARGS__))
+
+// EXCLUSIVE_TRYLOCK_FUNCTION() / SHARED_TRYLOCK_FUNCTION()
+//
+// Documents functions that try to acquire a lock, and return success or failure
+// (or a non-boolean value that can be interpreted as a boolean).
+// The first argument should be `true` for functions that return `true` on
+// success, or `false` for functions that return `false` on success. The second
+// argument specifies the mutex that is locked on success. If unspecified, this
+// mutex is assumed to be `this`.
+#define EXCLUSIVE_TRYLOCK_FUNCTION(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(exclusive_trylock_function(__VA_ARGS__))
+
+#define SHARED_TRYLOCK_FUNCTION(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(shared_trylock_function(__VA_ARGS__))
+
+// ASSERT_EXCLUSIVE_LOCK() / ASSERT_SHARED_LOCK()
+//
+// Documents functions that dynamically check to see if a lock is held, and fail
+// if it is not held.
+#define ASSERT_EXCLUSIVE_LOCK(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(assert_exclusive_lock(__VA_ARGS__))
+
+#define ASSERT_SHARED_LOCK(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(assert_shared_lock(__VA_ARGS__))
+
+// NO_THREAD_SAFETY_ANALYSIS
+//
+// Turns off thread safety checking within the body of a particular function.
+// This annotation is used to mark functions that are known to be correct, but
+// the locking behavior is more complicated than the analyzer can handle.
+#define NO_THREAD_SAFETY_ANALYSIS \
+ THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis)
+
+//------------------------------------------------------------------------------
+// Tool-Supplied Annotations
+//------------------------------------------------------------------------------
+
+// TS_UNCHECKED should be placed around lock expressions that are not valid
+// C++ syntax, but which are present for documentation purposes. These
+// annotations will be ignored by the analysis.
+#define TS_UNCHECKED(x) ""
+
+// TS_FIXME is used to mark lock expressions that are not valid C++ syntax.
+// It is used by automated tools to mark and disable invalid expressions.
+// The annotation should either be fixed, or changed to TS_UNCHECKED.
+#define TS_FIXME(x) ""
+
+// Like NO_THREAD_SAFETY_ANALYSIS, this turns off checking within the body of
+// a particular function. However, this attribute is used to mark functions
+// that are incorrect and need to be fixed. It is used by automated tools to
+// avoid breaking the build when the analysis is updated.
+// Code owners are expected to eventually fix the routine.
+#define NO_THREAD_SAFETY_ANALYSIS_FIXME NO_THREAD_SAFETY_ANALYSIS
+
+// Similar to NO_THREAD_SAFETY_ANALYSIS_FIXME, this macro marks a GUARDED_BY
+// annotation that needs to be fixed, because it is producing thread safety
+// warning. It disables the GUARDED_BY.
+#define GUARDED_BY_FIXME(x)
+
+// Disables warnings for a single read operation. This can be used to avoid
+// warnings when it is known that the read is not actually involved in a race,
+// but the compiler cannot confirm that.
+#define TS_UNCHECKED_READ(x) thread_safety_analysis::ts_unchecked_read(x)
+
+
+namespace thread_safety_analysis {
+
+// Takes a reference to a guarded data member, and returns an unguarded
+// reference.
+template <typename T>
+inline const T& ts_unchecked_read(const T& v) NO_THREAD_SAFETY_ANALYSIS {
+ return v;
+}
+
+template <typename T>
+inline T& ts_unchecked_read(T& v) NO_THREAD_SAFETY_ANALYSIS {
+ return v;
+}
+
+} // namespace thread_safety_analysis
+
+#endif // ABSL_BASE_INTERNAL_THREAD_ANNOTATIONS_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/thread_identity.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/thread_identity.cc
new file mode 100644
index 00000000000..b5e88ae302c
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/thread_identity.cc
@@ -0,0 +1,155 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "y_absl/base/internal/thread_identity.h"
+
+#ifndef _WIN32
+#include <pthread.h>
+#include <signal.h>
+#endif
+
+#include <atomic>
+#include <cassert>
+#include <memory>
+
+#include "y_absl/base/attributes.h"
+#include "y_absl/base/call_once.h"
+#include "y_absl/base/internal/raw_logging.h"
+#include "y_absl/base/internal/spinlock.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+#if ABSL_THREAD_IDENTITY_MODE != ABSL_THREAD_IDENTITY_MODE_USE_CPP11
+namespace {
+// Used to co-ordinate one-time creation of our pthread_key
+y_absl::once_flag init_thread_identity_key_once;
+pthread_key_t thread_identity_pthread_key;
+std::atomic<bool> pthread_key_initialized(false);
+
+void AllocateThreadIdentityKey(ThreadIdentityReclaimerFunction reclaimer) {
+ pthread_key_create(&thread_identity_pthread_key, reclaimer);
+ pthread_key_initialized.store(true, std::memory_order_release);
+}
+} // namespace
+#endif
+
+#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_TLS || \
+ ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11
+// The actual TLS storage for a thread's currently associated ThreadIdentity.
+// This is referenced by inline accessors in the header.
+// "protected" visibility ensures that if multiple instances of Abseil code
+// exist within a process (via dlopen() or similar), references to
+// thread_identity_ptr from each instance of the code will refer to
+// *different* instances of this ptr.
+// Apple platforms have the visibility attribute, but issue a compile warning
+// that protected visibility is unsupported.
+#if ABSL_HAVE_ATTRIBUTE(visibility) && !defined(__APPLE__)
+__attribute__((visibility("protected")))
+#endif // ABSL_HAVE_ATTRIBUTE(visibility) && !defined(__APPLE__)
+#if ABSL_PER_THREAD_TLS
+// Prefer __thread to thread_local as benchmarks indicate it is a bit faster.
+ABSL_PER_THREAD_TLS_KEYWORD ThreadIdentity* thread_identity_ptr = nullptr;
+#elif defined(ABSL_HAVE_THREAD_LOCAL)
+thread_local ThreadIdentity* thread_identity_ptr = nullptr;
+#endif // ABSL_PER_THREAD_TLS
+#endif // TLS or CPP11
+
+void SetCurrentThreadIdentity(
+ ThreadIdentity* identity, ThreadIdentityReclaimerFunction reclaimer) {
+ assert(CurrentThreadIdentityIfPresent() == nullptr);
+ // Associate our destructor.
+ // NOTE: This call to pthread_setspecific is currently the only immovable
+ // barrier to CurrentThreadIdentity() always being async signal safe.
+#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC
+ // NOTE: Not async-safe. But can be open-coded.
+ y_absl::call_once(init_thread_identity_key_once, AllocateThreadIdentityKey,
+ reclaimer);
+
+#if defined(__EMSCRIPTEN__) || defined(__MINGW32__)
+ // Emscripten and MinGW pthread implementations does not support signals.
+ // See https://kripken.github.io/emscripten-site/docs/porting/pthreads.html
+ // for more information.
+ pthread_setspecific(thread_identity_pthread_key,
+ reinterpret_cast<void*>(identity));
+#else
+ // We must mask signals around the call to setspecific as with current glibc,
+ // a concurrent getspecific (needed for GetCurrentThreadIdentityIfPresent())
+ // may zero our value.
+ //
+ // While not officially async-signal safe, getspecific within a signal handler
+ // is otherwise OK.
+ sigset_t all_signals;
+ sigset_t curr_signals;
+ sigfillset(&all_signals);
+ pthread_sigmask(SIG_SETMASK, &all_signals, &curr_signals);
+ pthread_setspecific(thread_identity_pthread_key,
+ reinterpret_cast<void*>(identity));
+ pthread_sigmask(SIG_SETMASK, &curr_signals, nullptr);
+#endif // !__EMSCRIPTEN__ && !__MINGW32__
+
+#elif ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_TLS
+ // NOTE: Not async-safe. But can be open-coded.
+ y_absl::call_once(init_thread_identity_key_once, AllocateThreadIdentityKey,
+ reclaimer);
+ pthread_setspecific(thread_identity_pthread_key,
+ reinterpret_cast<void*>(identity));
+ thread_identity_ptr = identity;
+#elif ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11
+ thread_local std::unique_ptr<ThreadIdentity, ThreadIdentityReclaimerFunction>
+ holder(identity, reclaimer);
+ thread_identity_ptr = identity;
+#else
+#error Unimplemented ABSL_THREAD_IDENTITY_MODE
+#endif
+}
+
+#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_TLS || \
+ ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11
+
+// Please see the comment on `CurrentThreadIdentityIfPresent` in
+// thread_identity.h. When we cannot expose thread_local variables in
+// headers, we opt for the correct-but-slower option of not inlining this
+// function.
+#ifndef ABSL_INTERNAL_INLINE_CURRENT_THREAD_IDENTITY_IF_PRESENT
+ThreadIdentity* CurrentThreadIdentityIfPresent() { return thread_identity_ptr; }
+#endif
+#endif
+
+void ClearCurrentThreadIdentity() {
+#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_TLS || \
+ ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11
+ thread_identity_ptr = nullptr;
+#elif ABSL_THREAD_IDENTITY_MODE == \
+ ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC
+ // pthread_setspecific expected to clear value on destruction
+ assert(CurrentThreadIdentityIfPresent() == nullptr);
+#endif
+}
+
+#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC
+ThreadIdentity* CurrentThreadIdentityIfPresent() {
+ bool initialized = pthread_key_initialized.load(std::memory_order_acquire);
+ if (!initialized) {
+ return nullptr;
+ }
+ return reinterpret_cast<ThreadIdentity*>(
+ pthread_getspecific(thread_identity_pthread_key));
+}
+#endif
+
+} // namespace base_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/thread_identity.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/thread_identity.h
new file mode 100644
index 00000000000..09a6c0bce11
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/thread_identity.h
@@ -0,0 +1,265 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Each active thread has an ThreadIdentity that may represent the thread in
+// various level interfaces. ThreadIdentity objects are never deallocated.
+// When a thread terminates, its ThreadIdentity object may be reused for a
+// thread created later.
+
+#ifndef ABSL_BASE_INTERNAL_THREAD_IDENTITY_H_
+#define ABSL_BASE_INTERNAL_THREAD_IDENTITY_H_
+
+#ifndef _WIN32
+#include <pthread.h>
+// Defines __GOOGLE_GRTE_VERSION__ (via glibc-specific features.h) when
+// supported.
+#include <unistd.h>
+#endif
+
+#include <atomic>
+#include <cstdint>
+
+#include "y_absl/base/config.h"
+#include "y_absl/base/internal/per_thread_tls.h"
+#include "y_absl/base/optimization.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+struct SynchLocksHeld;
+struct SynchWaitParams;
+
+namespace base_internal {
+
+class SpinLock;
+struct ThreadIdentity;
+
+// Used by the implementation of y_absl::Mutex and y_absl::CondVar.
+struct PerThreadSynch {
+ // The internal representation of y_absl::Mutex and y_absl::CondVar rely
+ // on the alignment of PerThreadSynch. Both store the address of the
+ // PerThreadSynch in the high-order bits of their internal state,
+ // which means the low kLowZeroBits of the address of PerThreadSynch
+ // must be zero.
+ static constexpr int kLowZeroBits = 8;
+ static constexpr int kAlignment = 1 << kLowZeroBits;
+
+ // Returns the associated ThreadIdentity.
+ // This can be implemented as a cast because we guarantee
+ // PerThreadSynch is the first element of ThreadIdentity.
+ ThreadIdentity* thread_identity() {
+ return reinterpret_cast<ThreadIdentity*>(this);
+ }
+
+ PerThreadSynch *next; // Circular waiter queue; initialized to 0.
+ PerThreadSynch *skip; // If non-zero, all entries in Mutex queue
+ // up to and including "skip" have same
+ // condition as this, and will be woken later
+ bool may_skip; // if false while on mutex queue, a mutex unlocker
+ // is using this PerThreadSynch as a terminator. Its
+ // skip field must not be filled in because the loop
+ // might then skip over the terminator.
+ bool wake; // This thread is to be woken from a Mutex.
+ // If "x" is on a waiter list for a mutex, "x->cond_waiter" is true iff the
+ // waiter is waiting on the mutex as part of a CV Wait or Mutex Await.
+ //
+ // The value of "x->cond_waiter" is meaningless if "x" is not on a
+ // Mutex waiter list.
+ bool cond_waiter;
+ bool maybe_unlocking; // Valid at head of Mutex waiter queue;
+ // true if UnlockSlow could be searching
+ // for a waiter to wake. Used for an optimization
+ // in Enqueue(). true is always a valid value.
+ // Can be reset to false when the unlocker or any
+ // writer releases the lock, or a reader fully
+ // releases the lock. It may not be set to false
+ // by a reader that decrements the count to
+ // non-zero. protected by mutex spinlock
+ bool suppress_fatal_errors; // If true, try to proceed even in the face
+ // of broken invariants. This is used within
+ // fatal signal handlers to improve the
+ // chances of debug logging information being
+ // output successfully.
+ int priority; // Priority of thread (updated every so often).
+
+ // State values:
+ // kAvailable: This PerThreadSynch is available.
+ // kQueued: This PerThreadSynch is unavailable, it's currently queued on a
+ // Mutex or CondVar waistlist.
+ //
+ // Transitions from kQueued to kAvailable require a release
+ // barrier. This is needed as a waiter may use "state" to
+ // independently observe that it's no longer queued.
+ //
+ // Transitions from kAvailable to kQueued require no barrier, they
+ // are externally ordered by the Mutex.
+ enum State {
+ kAvailable,
+ kQueued
+ };
+ std::atomic<State> state;
+
+ // The wait parameters of the current wait. waitp is null if the
+ // thread is not waiting. Transitions from null to non-null must
+ // occur before the enqueue commit point (state = kQueued in
+ // Enqueue() and CondVarEnqueue()). Transitions from non-null to
+ // null must occur after the wait is finished (state = kAvailable in
+ // Mutex::Block() and CondVar::WaitCommon()). This field may be
+ // changed only by the thread that describes this PerThreadSynch. A
+ // special case is Fer(), which calls Enqueue() on another thread,
+ // but with an identical SynchWaitParams pointer, thus leaving the
+ // pointer unchanged.
+ SynchWaitParams* waitp;
+
+ intptr_t readers; // Number of readers in mutex.
+
+ // When priority will next be read (cycles).
+ int64_t next_priority_read_cycles;
+
+ // Locks held; used during deadlock detection.
+ // Allocated in Synch_GetAllLocks() and freed in ReclaimThreadIdentity().
+ SynchLocksHeld *all_locks;
+};
+
+// The instances of this class are allocated in NewThreadIdentity() with an
+// alignment of PerThreadSynch::kAlignment.
+struct ThreadIdentity {
+ // Must be the first member. The Mutex implementation requires that
+ // the PerThreadSynch object associated with each thread is
+ // PerThreadSynch::kAlignment aligned. We provide this alignment on
+ // ThreadIdentity itself.
+ PerThreadSynch per_thread_synch;
+
+ // Private: Reserved for y_absl::synchronization_internal::Waiter.
+ struct WaiterState {
+ alignas(void*) char data[128];
+ } waiter_state;
+
+ // Used by PerThreadSem::{Get,Set}ThreadBlockedCounter().
+ std::atomic<int>* blocked_count_ptr;
+
+ // The following variables are mostly read/written just by the
+ // thread itself. The only exception is that these are read by
+ // a ticker thread as a hint.
+ std::atomic<int> ticker; // Tick counter, incremented once per second.
+ std::atomic<int> wait_start; // Ticker value when thread started waiting.
+ std::atomic<bool> is_idle; // Has thread become idle yet?
+
+ ThreadIdentity* next;
+};
+
+// Returns the ThreadIdentity object representing the calling thread; guaranteed
+// to be unique for its lifetime. The returned object will remain valid for the
+// program's lifetime; although it may be re-assigned to a subsequent thread.
+// If one does not exist, return nullptr instead.
+//
+// Does not malloc(*), and is async-signal safe.
+// [*] Technically pthread_setspecific() does malloc on first use; however this
+// is handled internally within tcmalloc's initialization already.
+//
+// New ThreadIdentity objects can be constructed and associated with a thread
+// by calling GetOrCreateCurrentThreadIdentity() in per-thread-sem.h.
+ThreadIdentity* CurrentThreadIdentityIfPresent();
+
+using ThreadIdentityReclaimerFunction = void (*)(void*);
+
+// Sets the current thread identity to the given value. 'reclaimer' is a
+// pointer to the global function for cleaning up instances on thread
+// destruction.
+void SetCurrentThreadIdentity(ThreadIdentity* identity,
+ ThreadIdentityReclaimerFunction reclaimer);
+
+// Removes the currently associated ThreadIdentity from the running thread.
+// This must be called from inside the ThreadIdentityReclaimerFunction, and only
+// from that function.
+void ClearCurrentThreadIdentity();
+
+// May be chosen at compile time via: -DABSL_FORCE_THREAD_IDENTITY_MODE=<mode
+// index>
+#ifdef ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC
+#error ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC cannot be directly set
+#else
+#define ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC 0
+#endif
+
+#ifdef ABSL_THREAD_IDENTITY_MODE_USE_TLS
+#error ABSL_THREAD_IDENTITY_MODE_USE_TLS cannot be directly set
+#else
+#define ABSL_THREAD_IDENTITY_MODE_USE_TLS 1
+#endif
+
+#ifdef ABSL_THREAD_IDENTITY_MODE_USE_CPP11
+#error ABSL_THREAD_IDENTITY_MODE_USE_CPP11 cannot be directly set
+#else
+#define ABSL_THREAD_IDENTITY_MODE_USE_CPP11 2
+#endif
+
+#ifdef ABSL_THREAD_IDENTITY_MODE
+#error ABSL_THREAD_IDENTITY_MODE cannot be directly set
+#elif defined(ABSL_FORCE_THREAD_IDENTITY_MODE)
+#define ABSL_THREAD_IDENTITY_MODE ABSL_FORCE_THREAD_IDENTITY_MODE
+#elif defined(_WIN32) && !defined(__MINGW32__)
+#define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_CPP11
+#elif defined(__APPLE__) && defined(ABSL_HAVE_THREAD_LOCAL)
+#define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_CPP11
+#elif ABSL_PER_THREAD_TLS && defined(__GOOGLE_GRTE_VERSION__) && \
+ (__GOOGLE_GRTE_VERSION__ >= 20140228L)
+// Support for async-safe TLS was specifically added in GRTEv4. It's not
+// present in the upstream eglibc.
+// Note: Current default for production systems.
+#define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_TLS
+#else
+#define ABSL_THREAD_IDENTITY_MODE \
+ ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC
+#endif
+
+#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_TLS || \
+ ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11
+
+#if ABSL_PER_THREAD_TLS
+ABSL_CONST_INIT extern ABSL_PER_THREAD_TLS_KEYWORD ThreadIdentity*
+ thread_identity_ptr;
+#elif defined(ABSL_HAVE_THREAD_LOCAL)
+ABSL_CONST_INIT extern thread_local ThreadIdentity* thread_identity_ptr;
+#else
+#error Thread-local storage not detected on this platform
+#endif
+
+// thread_local variables cannot be in headers exposed by DLLs or in certain
+// build configurations on Apple platforms. However, it is important for
+// performance reasons in general that `CurrentThreadIdentityIfPresent` be
+// inlined. In the other cases we opt to have the function not be inlined. Note
+// that `CurrentThreadIdentityIfPresent` is declared above so we can exclude
+// this entire inline definition.
+#if !defined(__APPLE__) && !defined(ABSL_BUILD_DLL) && \
+ !defined(ABSL_CONSUME_DLL)
+#define ABSL_INTERNAL_INLINE_CURRENT_THREAD_IDENTITY_IF_PRESENT 1
+#endif
+
+#ifdef ABSL_INTERNAL_INLINE_CURRENT_THREAD_IDENTITY_IF_PRESENT
+inline ThreadIdentity* CurrentThreadIdentityIfPresent() {
+ return thread_identity_ptr;
+}
+#endif
+
+#elif ABSL_THREAD_IDENTITY_MODE != \
+ ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC
+#error Unknown ABSL_THREAD_IDENTITY_MODE
+#endif
+
+} // namespace base_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_BASE_INTERNAL_THREAD_IDENTITY_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/throw_delegate.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/throw_delegate.cc
new file mode 100644
index 00000000000..dcce5aedc38
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/throw_delegate.cc
@@ -0,0 +1,212 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "y_absl/base/internal/throw_delegate.h"
+
+#include <cstdlib>
+#include <functional>
+#include <new>
+#include <stdexcept>
+
+#include "y_absl/base/config.h"
+#include "y_absl/base/internal/raw_logging.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+// NOTE: The various STL exception throwing functions are placed within the
+// #ifdef blocks so the symbols aren't exposed on platforms that don't support
+// them, such as the Android NDK. For example, ANGLE fails to link when building
+// within AOSP without them, since the STL functions don't exist.
+namespace {
+#ifdef ABSL_HAVE_EXCEPTIONS
+template <typename T>
+[[noreturn]] void Throw(const T& error) {
+ throw error;
+}
+#endif
+} // namespace
+
+void ThrowStdLogicError(const TString& what_arg) {
+#ifdef ABSL_HAVE_EXCEPTIONS
+ Throw(std::logic_error(what_arg));
+#else
+ ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
+ std::abort();
+#endif
+}
+void ThrowStdLogicError(const char* what_arg) {
+#ifdef ABSL_HAVE_EXCEPTIONS
+ Throw(std::logic_error(what_arg));
+#else
+ ABSL_RAW_LOG(FATAL, "%s", what_arg);
+ std::abort();
+#endif
+}
+void ThrowStdInvalidArgument(const TString& what_arg) {
+#ifdef ABSL_HAVE_EXCEPTIONS
+ Throw(std::invalid_argument(what_arg));
+#else
+ ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
+ std::abort();
+#endif
+}
+void ThrowStdInvalidArgument(const char* what_arg) {
+#ifdef ABSL_HAVE_EXCEPTIONS
+ Throw(std::invalid_argument(what_arg));
+#else
+ ABSL_RAW_LOG(FATAL, "%s", what_arg);
+ std::abort();
+#endif
+}
+
+void ThrowStdDomainError(const TString& what_arg) {
+#ifdef ABSL_HAVE_EXCEPTIONS
+ Throw(std::domain_error(what_arg));
+#else
+ ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
+ std::abort();
+#endif
+}
+void ThrowStdDomainError(const char* what_arg) {
+#ifdef ABSL_HAVE_EXCEPTIONS
+ Throw(std::domain_error(what_arg));
+#else
+ ABSL_RAW_LOG(FATAL, "%s", what_arg);
+ std::abort();
+#endif
+}
+
+void ThrowStdLengthError(const TString& what_arg) {
+#ifdef ABSL_HAVE_EXCEPTIONS
+ Throw(std::length_error(what_arg));
+#else
+ ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
+ std::abort();
+#endif
+}
+void ThrowStdLengthError(const char* what_arg) {
+#ifdef ABSL_HAVE_EXCEPTIONS
+ Throw(std::length_error(what_arg));
+#else
+ ABSL_RAW_LOG(FATAL, "%s", what_arg);
+ std::abort();
+#endif
+}
+
+void ThrowStdOutOfRange(const TString& what_arg) {
+#ifdef ABSL_HAVE_EXCEPTIONS
+ Throw(std::out_of_range(what_arg));
+#else
+ ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
+ std::abort();
+#endif
+}
+void ThrowStdOutOfRange(const char* what_arg) {
+#ifdef ABSL_HAVE_EXCEPTIONS
+ Throw(std::out_of_range(what_arg));
+#else
+ ABSL_RAW_LOG(FATAL, "%s", what_arg);
+ std::abort();
+#endif
+}
+
+void ThrowStdRuntimeError(const TString& what_arg) {
+#ifdef ABSL_HAVE_EXCEPTIONS
+ Throw(std::runtime_error(what_arg));
+#else
+ ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
+ std::abort();
+#endif
+}
+void ThrowStdRuntimeError(const char* what_arg) {
+#ifdef ABSL_HAVE_EXCEPTIONS
+ Throw(std::runtime_error(what_arg));
+#else
+ ABSL_RAW_LOG(FATAL, "%s", what_arg);
+ std::abort();
+#endif
+}
+
+void ThrowStdRangeError(const TString& what_arg) {
+#ifdef ABSL_HAVE_EXCEPTIONS
+ Throw(std::range_error(what_arg));
+#else
+ ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
+ std::abort();
+#endif
+}
+void ThrowStdRangeError(const char* what_arg) {
+#ifdef ABSL_HAVE_EXCEPTIONS
+ Throw(std::range_error(what_arg));
+#else
+ ABSL_RAW_LOG(FATAL, "%s", what_arg);
+ std::abort();
+#endif
+}
+
+void ThrowStdOverflowError(const TString& what_arg) {
+#ifdef ABSL_HAVE_EXCEPTIONS
+ Throw(std::overflow_error(what_arg));
+#else
+ ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
+ std::abort();
+#endif
+}
+void ThrowStdOverflowError(const char* what_arg) {
+#ifdef ABSL_HAVE_EXCEPTIONS
+ Throw(std::overflow_error(what_arg));
+#else
+ ABSL_RAW_LOG(FATAL, "%s", what_arg);
+ std::abort();
+#endif
+}
+
+void ThrowStdUnderflowError(const TString& what_arg) {
+#ifdef ABSL_HAVE_EXCEPTIONS
+ Throw(std::underflow_error(what_arg));
+#else
+ ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
+ std::abort();
+#endif
+}
+void ThrowStdUnderflowError(const char* what_arg) {
+#ifdef ABSL_HAVE_EXCEPTIONS
+ Throw(std::underflow_error(what_arg));
+#else
+ ABSL_RAW_LOG(FATAL, "%s", what_arg);
+ std::abort();
+#endif
+}
+
+void ThrowStdBadFunctionCall() {
+#ifdef ABSL_HAVE_EXCEPTIONS
+ Throw(std::bad_function_call());
+#else
+ std::abort();
+#endif
+}
+
+void ThrowStdBadAlloc() {
+#ifdef ABSL_HAVE_EXCEPTIONS
+ Throw(std::bad_alloc());
+#else
+ std::abort();
+#endif
+}
+
+} // namespace base_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/throw_delegate.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/throw_delegate.h
new file mode 100644
index 00000000000..fda03a5db3a
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/throw_delegate.h
@@ -0,0 +1,75 @@
+//
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef ABSL_BASE_INTERNAL_THROW_DELEGATE_H_
+#define ABSL_BASE_INTERNAL_THROW_DELEGATE_H_
+
+#include <util/generic/string.h>
+
+#include "y_absl/base/config.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+// Helper functions that allow throwing exceptions consistently from anywhere.
+// The main use case is for header-based libraries (eg templates), as they will
+// be built by many different targets with their own compiler options.
+// In particular, this will allow a safe way to throw exceptions even if the
+// caller is compiled with -fno-exceptions. This is intended for implementing
+// things like map<>::at(), which the standard documents as throwing an
+// exception on error.
+//
+// Using other techniques like #if tricks could lead to ODR violations.
+//
+// You shouldn't use it unless you're writing code that you know will be built
+// both with and without exceptions and you need to conform to an interface
+// that uses exceptions.
+
+[[noreturn]] void ThrowStdLogicError(const TString& what_arg);
+[[noreturn]] void ThrowStdLogicError(const char* what_arg);
+[[noreturn]] void ThrowStdInvalidArgument(const TString& what_arg);
+[[noreturn]] void ThrowStdInvalidArgument(const char* what_arg);
+[[noreturn]] void ThrowStdDomainError(const TString& what_arg);
+[[noreturn]] void ThrowStdDomainError(const char* what_arg);
+[[noreturn]] void ThrowStdLengthError(const TString& what_arg);
+[[noreturn]] void ThrowStdLengthError(const char* what_arg);
+[[noreturn]] void ThrowStdOutOfRange(const TString& what_arg);
+[[noreturn]] void ThrowStdOutOfRange(const char* what_arg);
+[[noreturn]] void ThrowStdRuntimeError(const TString& what_arg);
+[[noreturn]] void ThrowStdRuntimeError(const char* what_arg);
+[[noreturn]] void ThrowStdRangeError(const TString& what_arg);
+[[noreturn]] void ThrowStdRangeError(const char* what_arg);
+[[noreturn]] void ThrowStdOverflowError(const TString& what_arg);
+[[noreturn]] void ThrowStdOverflowError(const char* what_arg);
+[[noreturn]] void ThrowStdUnderflowError(const TString& what_arg);
+[[noreturn]] void ThrowStdUnderflowError(const char* what_arg);
+
+[[noreturn]] void ThrowStdBadFunctionCall();
+[[noreturn]] void ThrowStdBadAlloc();
+
+// ThrowStdBadArrayNewLength() cannot be consistently supported because
+// std::bad_array_new_length is missing in libstdc++ until 4.9.0.
+// https://gcc.gnu.org/onlinedocs/gcc-4.8.3/libstdc++/api/a01379_source.html
+// https://gcc.gnu.org/onlinedocs/gcc-4.9.0/libstdc++/api/a01327_source.html
+// libcxx (as of 3.2) and msvc (as of 2015) both have it.
+// [[noreturn]] void ThrowStdBadArrayNewLength();
+
+} // namespace base_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_BASE_INTERNAL_THROW_DELEGATE_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/throw_delegate/ya.make b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/throw_delegate/ya.make
new file mode 100644
index 00000000000..1b956ad4940
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/throw_delegate/ya.make
@@ -0,0 +1,31 @@
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
+WITHOUT_LICENSE_TEXTS()
+
+OWNER(
+ somov
+ g:cpp-contrib
+)
+
+LICENSE(Apache-2.0)
+
+PEERDIR(
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/log_severity
+)
+
+ADDINCL(
+ GLOBAL contrib/restricted/abseil-cpp-tstring
+)
+
+NO_COMPILER_WARNINGS()
+
+SRCDIR(contrib/restricted/abseil-cpp-tstring/y_absl/base/internal)
+
+SRCS(
+ throw_delegate.cc
+)
+
+END()
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/tsan_mutex_interface.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/tsan_mutex_interface.h
new file mode 100644
index 00000000000..69a61d08144
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/tsan_mutex_interface.h
@@ -0,0 +1,68 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// This file is intended solely for spinlock.h.
+// It provides ThreadSanitizer annotations for custom mutexes.
+// See <sanitizer/tsan_interface.h> for meaning of these annotations.
+
+#ifndef ABSL_BASE_INTERNAL_TSAN_MUTEX_INTERFACE_H_
+#define ABSL_BASE_INTERNAL_TSAN_MUTEX_INTERFACE_H_
+
+#include "y_absl/base/config.h"
+
+// ABSL_INTERNAL_HAVE_TSAN_INTERFACE
+// Macro intended only for internal use.
+//
+// Checks whether LLVM Thread Sanitizer interfaces are available.
+// First made available in LLVM 5.0 (Sep 2017).
+#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
+#error "ABSL_INTERNAL_HAVE_TSAN_INTERFACE cannot be directly set."
+#endif
+
+#if defined(ABSL_HAVE_THREAD_SANITIZER) && defined(__has_include)
+#if __has_include(<sanitizer/tsan_interface.h>)
+#define ABSL_INTERNAL_HAVE_TSAN_INTERFACE 1
+#endif
+#endif
+
+#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
+#include <sanitizer/tsan_interface.h>
+
+#define ABSL_TSAN_MUTEX_CREATE __tsan_mutex_create
+#define ABSL_TSAN_MUTEX_DESTROY __tsan_mutex_destroy
+#define ABSL_TSAN_MUTEX_PRE_LOCK __tsan_mutex_pre_lock
+#define ABSL_TSAN_MUTEX_POST_LOCK __tsan_mutex_post_lock
+#define ABSL_TSAN_MUTEX_PRE_UNLOCK __tsan_mutex_pre_unlock
+#define ABSL_TSAN_MUTEX_POST_UNLOCK __tsan_mutex_post_unlock
+#define ABSL_TSAN_MUTEX_PRE_SIGNAL __tsan_mutex_pre_signal
+#define ABSL_TSAN_MUTEX_POST_SIGNAL __tsan_mutex_post_signal
+#define ABSL_TSAN_MUTEX_PRE_DIVERT __tsan_mutex_pre_divert
+#define ABSL_TSAN_MUTEX_POST_DIVERT __tsan_mutex_post_divert
+
+#else
+
+#define ABSL_TSAN_MUTEX_CREATE(...)
+#define ABSL_TSAN_MUTEX_DESTROY(...)
+#define ABSL_TSAN_MUTEX_PRE_LOCK(...)
+#define ABSL_TSAN_MUTEX_POST_LOCK(...)
+#define ABSL_TSAN_MUTEX_PRE_UNLOCK(...)
+#define ABSL_TSAN_MUTEX_POST_UNLOCK(...)
+#define ABSL_TSAN_MUTEX_PRE_SIGNAL(...)
+#define ABSL_TSAN_MUTEX_POST_SIGNAL(...)
+#define ABSL_TSAN_MUTEX_PRE_DIVERT(...)
+#define ABSL_TSAN_MUTEX_POST_DIVERT(...)
+
+#endif
+
+#endif // ABSL_BASE_INTERNAL_TSAN_MUTEX_INTERFACE_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/unaligned_access.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/unaligned_access.h
new file mode 100644
index 00000000000..7dbb23b1dee
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/unaligned_access.h
@@ -0,0 +1,82 @@
+//
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef ABSL_BASE_INTERNAL_UNALIGNED_ACCESS_H_
+#define ABSL_BASE_INTERNAL_UNALIGNED_ACCESS_H_
+
+#include <string.h>
+
+#include <cstdint>
+
+#include "y_absl/base/attributes.h"
+#include "y_absl/base/config.h"
+
+// unaligned APIs
+
+// Portable handling of unaligned loads, stores, and copies.
+
+// The unaligned API is C++ only. The declarations use C++ features
+// (namespaces, inline) which are absent or incompatible in C.
+#if defined(__cplusplus)
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+inline uint16_t UnalignedLoad16(const void *p) {
+ uint16_t t;
+ memcpy(&t, p, sizeof t);
+ return t;
+}
+
+inline uint32_t UnalignedLoad32(const void *p) {
+ uint32_t t;
+ memcpy(&t, p, sizeof t);
+ return t;
+}
+
+inline uint64_t UnalignedLoad64(const void *p) {
+ uint64_t t;
+ memcpy(&t, p, sizeof t);
+ return t;
+}
+
+inline void UnalignedStore16(void *p, uint16_t v) { memcpy(p, &v, sizeof v); }
+
+inline void UnalignedStore32(void *p, uint32_t v) { memcpy(p, &v, sizeof v); }
+
+inline void UnalignedStore64(void *p, uint64_t v) { memcpy(p, &v, sizeof v); }
+
+} // namespace base_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#define ABSL_INTERNAL_UNALIGNED_LOAD16(_p) \
+ (y_absl::base_internal::UnalignedLoad16(_p))
+#define ABSL_INTERNAL_UNALIGNED_LOAD32(_p) \
+ (y_absl::base_internal::UnalignedLoad32(_p))
+#define ABSL_INTERNAL_UNALIGNED_LOAD64(_p) \
+ (y_absl::base_internal::UnalignedLoad64(_p))
+
+#define ABSL_INTERNAL_UNALIGNED_STORE16(_p, _val) \
+ (y_absl::base_internal::UnalignedStore16(_p, _val))
+#define ABSL_INTERNAL_UNALIGNED_STORE32(_p, _val) \
+ (y_absl::base_internal::UnalignedStore32(_p, _val))
+#define ABSL_INTERNAL_UNALIGNED_STORE64(_p, _val) \
+ (y_absl::base_internal::UnalignedStore64(_p, _val))
+
+#endif // defined(__cplusplus), end of unaligned API
+
+#endif // ABSL_BASE_INTERNAL_UNALIGNED_ACCESS_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/unscaledcycleclock.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/unscaledcycleclock.cc
new file mode 100644
index 00000000000..072a9852fa3
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/unscaledcycleclock.cc
@@ -0,0 +1,154 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "y_absl/base/internal/unscaledcycleclock.h"
+
+#if ABSL_USE_UNSCALED_CYCLECLOCK
+
+#if defined(_WIN32)
+#include <intrin.h>
+#endif
+
+#if defined(__powerpc__) || defined(__ppc__)
+#ifdef __GLIBC__
+#include <sys/platform/ppc.h>
+#elif defined(__FreeBSD__)
+#include <sys/sysctl.h>
+#include <sys/types.h>
+#endif
+#endif
+
+#include "y_absl/base/internal/sysinfo.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+#if defined(__i386__)
+
+int64_t UnscaledCycleClock::Now() {
+ int64_t ret;
+ __asm__ volatile("rdtsc" : "=A"(ret));
+ return ret;
+}
+
+double UnscaledCycleClock::Frequency() {
+ return base_internal::NominalCPUFrequency();
+}
+
+#elif defined(__x86_64__)
+
+int64_t UnscaledCycleClock::Now() {
+ uint64_t low, high;
+ __asm__ volatile("rdtsc" : "=a"(low), "=d"(high));
+ return (high << 32) | low;
+}
+
+double UnscaledCycleClock::Frequency() {
+ return base_internal::NominalCPUFrequency();
+}
+
+#elif defined(__powerpc__) || defined(__ppc__)
+
+int64_t UnscaledCycleClock::Now() {
+#ifdef __GLIBC__
+ return __ppc_get_timebase();
+#else
+#ifdef __powerpc64__
+ int64_t tbr;
+ asm volatile("mfspr %0, 268" : "=r"(tbr));
+ return tbr;
+#else
+ int32_t tbu, tbl, tmp;
+ asm volatile(
+ "0:\n"
+ "mftbu %[hi32]\n"
+ "mftb %[lo32]\n"
+ "mftbu %[tmp]\n"
+ "cmpw %[tmp],%[hi32]\n"
+ "bne 0b\n"
+ : [ hi32 ] "=r"(tbu), [ lo32 ] "=r"(tbl), [ tmp ] "=r"(tmp));
+ return (static_cast<int64_t>(tbu) << 32) | tbl;
+#endif
+#endif
+}
+
+double UnscaledCycleClock::Frequency() {
+#ifdef __GLIBC__
+ return __ppc_get_timebase_freq();
+#elif defined(_AIX)
+ // This is the same constant value as returned by
+ // __ppc_get_timebase_freq().
+ return static_cast<double>(512000000);
+#elif defined(__FreeBSD__)
+ static once_flag init_timebase_frequency_once;
+ static double timebase_frequency = 0.0;
+ base_internal::LowLevelCallOnce(&init_timebase_frequency_once, [&]() {
+ size_t length = sizeof(timebase_frequency);
+ sysctlbyname("kern.timecounter.tc.timebase.frequency", &timebase_frequency,
+ &length, nullptr, 0);
+ });
+ return timebase_frequency;
+#else
+#error Must implement UnscaledCycleClock::Frequency()
+#endif
+}
+
+#elif defined(__aarch64__)
+
+// System timer of ARMv8 runs at a different frequency than the CPU's.
+// The frequency is fixed, typically in the range 1-50MHz. It can be
+// read at CNTFRQ special register. We assume the OS has set up
+// the virtual timer properly.
+int64_t UnscaledCycleClock::Now() {
+ int64_t virtual_timer_value;
+ asm volatile("mrs %0, cntvct_el0" : "=r"(virtual_timer_value));
+ return virtual_timer_value;
+}
+
+double UnscaledCycleClock::Frequency() {
+ uint64_t aarch64_timer_frequency;
+ asm volatile("mrs %0, cntfrq_el0" : "=r"(aarch64_timer_frequency));
+ return aarch64_timer_frequency;
+}
+
+#elif defined(__riscv)
+
+int64_t UnscaledCycleClock::Now() {
+ int64_t virtual_timer_value;
+ asm volatile("rdcycle %0" : "=r"(virtual_timer_value));
+ return virtual_timer_value;
+}
+
+double UnscaledCycleClock::Frequency() {
+ return base_internal::NominalCPUFrequency();
+}
+
+#elif defined(_M_IX86) || defined(_M_X64)
+
+#pragma intrinsic(__rdtsc)
+
+int64_t UnscaledCycleClock::Now() { return __rdtsc(); }
+
+double UnscaledCycleClock::Frequency() {
+ return base_internal::NominalCPUFrequency();
+}
+
+#endif
+
+} // namespace base_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_USE_UNSCALED_CYCLECLOCK
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/unscaledcycleclock.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/unscaledcycleclock.h
new file mode 100644
index 00000000000..618c5c7e737
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/unscaledcycleclock.h
@@ -0,0 +1,124 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// UnscaledCycleClock
+// An UnscaledCycleClock yields the value and frequency of a cycle counter
+// that increments at a rate that is approximately constant.
+// This class is for internal use only, you should consider using CycleClock
+// instead.
+//
+// Notes:
+// The cycle counter frequency is not necessarily the core clock frequency.
+// That is, CycleCounter cycles are not necessarily "CPU cycles".
+//
+// An arbitrary offset may have been added to the counter at power on.
+//
+// On some platforms, the rate and offset of the counter may differ
+// slightly when read from different CPUs of a multiprocessor. Usually,
+// we try to ensure that the operating system adjusts values periodically
+// so that values agree approximately. If you need stronger guarantees,
+// consider using alternate interfaces.
+//
+// The CPU is not required to maintain the ordering of a cycle counter read
+// with respect to surrounding instructions.
+
+#ifndef ABSL_BASE_INTERNAL_UNSCALEDCYCLECLOCK_H_
+#define ABSL_BASE_INTERNAL_UNSCALEDCYCLECLOCK_H_
+
+#include <cstdint>
+
+#if defined(__APPLE__)
+#include <TargetConditionals.h>
+#endif
+
+#include "y_absl/base/port.h"
+
+// The following platforms have an implementation of a hardware counter.
+#if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__) || \
+ defined(__powerpc__) || defined(__ppc__) || defined(__riscv) || \
+ defined(_M_IX86) || defined(_M_X64)
+#define ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION 1
+#else
+#define ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION 0
+#endif
+
+// The following platforms often disable access to the hardware
+// counter (through a sandbox) even if the underlying hardware has a
+// usable counter. The CycleTimer interface also requires a *scaled*
+// CycleClock that runs at atleast 1 MHz. We've found some Android
+// ARM64 devices where this is not the case, so we disable it by
+// default on Android ARM64.
+#if defined(__native_client__) || \
+ (defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE) || \
+ (defined(__ANDROID__) && defined(__aarch64__))
+#define ABSL_USE_UNSCALED_CYCLECLOCK_DEFAULT 0
+#else
+#define ABSL_USE_UNSCALED_CYCLECLOCK_DEFAULT 1
+#endif
+
+// UnscaledCycleClock is an optional internal feature.
+// Use "#if ABSL_USE_UNSCALED_CYCLECLOCK" to test for its presence.
+// Can be overridden at compile-time via -DABSL_USE_UNSCALED_CYCLECLOCK=0|1
+#if !defined(ABSL_USE_UNSCALED_CYCLECLOCK)
+#define ABSL_USE_UNSCALED_CYCLECLOCK \
+ (ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION && \
+ ABSL_USE_UNSCALED_CYCLECLOCK_DEFAULT)
+#endif
+
+#if ABSL_USE_UNSCALED_CYCLECLOCK
+
+// This macro can be used to test if UnscaledCycleClock::Frequency()
+// is NominalCPUFrequency() on a particular platform.
+#if (defined(__i386__) || defined(__x86_64__) || defined(__riscv) || \
+ defined(_M_IX86) || defined(_M_X64))
+#define ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY
+#endif
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace time_internal {
+class UnscaledCycleClockWrapperForGetCurrentTime;
+} // namespace time_internal
+
+namespace base_internal {
+class CycleClock;
+class UnscaledCycleClockWrapperForInitializeFrequency;
+
+class UnscaledCycleClock {
+ private:
+ UnscaledCycleClock() = delete;
+
+ // Return the value of a cycle counter that counts at a rate that is
+ // approximately constant.
+ static int64_t Now();
+
+ // Return the how much UnscaledCycleClock::Now() increases per second.
+ // This is not necessarily the core CPU clock frequency.
+ // It may be the nominal value report by the kernel, rather than a measured
+ // value.
+ static double Frequency();
+
+ // Allowed users
+ friend class base_internal::CycleClock;
+ friend class time_internal::UnscaledCycleClockWrapperForGetCurrentTime;
+ friend class base_internal::UnscaledCycleClockWrapperForInitializeFrequency;
+};
+
+} // namespace base_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_USE_UNSCALED_CYCLECLOCK
+
+#endif // ABSL_BASE_INTERNAL_UNSCALEDCYCLECLOCK_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/log_severity.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/base/log_severity.cc
new file mode 100644
index 00000000000..2ae8c75aec2
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/log_severity.cc
@@ -0,0 +1,27 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "y_absl/base/log_severity.h"
+
+#include <ostream>
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+std::ostream& operator<<(std::ostream& os, y_absl::LogSeverity s) {
+ if (s == y_absl::NormalizeLogSeverity(s)) return os << y_absl::LogSeverityName(s);
+ return os << "y_absl::LogSeverity(" << static_cast<int>(s) << ")";
+}
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/log_severity.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/log_severity.h
new file mode 100644
index 00000000000..26f20ad8e12
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/log_severity.h
@@ -0,0 +1,121 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_BASE_LOG_SEVERITY_H_
+#define ABSL_BASE_LOG_SEVERITY_H_
+
+#include <array>
+#include <ostream>
+
+#include "y_absl/base/attributes.h"
+#include "y_absl/base/config.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+// y_absl::LogSeverity
+//
+// Four severity levels are defined. Logging APIs should terminate the program
+// when a message is logged at severity `kFatal`; the other levels have no
+// special semantics.
+//
+// Values other than the four defined levels (e.g. produced by `static_cast`)
+// are valid, but their semantics when passed to a function, macro, or flag
+// depend on the function, macro, or flag. The usual behavior is to normalize
+// such values to a defined severity level, however in some cases values other
+// than the defined levels are useful for comparison.
+//
+// Example:
+//
+// // Effectively disables all logging:
+// SetMinLogLevel(static_cast<y_absl::LogSeverity>(100));
+//
+// Abseil flags may be defined with type `LogSeverity`. Dependency layering
+// constraints require that the `AbslParseFlag()` overload be declared and
+// defined in the flags library itself rather than here. The `AbslUnparseFlag()`
+// overload is defined there as well for consistency.
+//
+// y_absl::LogSeverity Flag String Representation
+//
+// An `y_absl::LogSeverity` has a string representation used for parsing
+// command-line flags based on the enumerator name (e.g. `kFatal`) or
+// its unprefixed name (without the `k`) in any case-insensitive form. (E.g.
+// "FATAL", "fatal" or "Fatal" are all valid.) Unparsing such flags produces an
+// unprefixed string representation in all caps (e.g. "FATAL") or an integer.
+//
+// Additionally, the parser accepts arbitrary integers (as if the type were
+// `int`).
+//
+// Examples:
+//
+// --my_log_level=kInfo
+// --my_log_level=INFO
+// --my_log_level=info
+// --my_log_level=0
+//
+// Unparsing a flag produces the same result as `y_absl::LogSeverityName()` for
+// the standard levels and a base-ten integer otherwise.
+enum class LogSeverity : int {
+ kInfo = 0,
+ kWarning = 1,
+ kError = 2,
+ kFatal = 3,
+};
+
+// LogSeverities()
+//
+// Returns an iterable of all standard `y_absl::LogSeverity` values, ordered from
+// least to most severe.
+constexpr std::array<y_absl::LogSeverity, 4> LogSeverities() {
+ return {{y_absl::LogSeverity::kInfo, y_absl::LogSeverity::kWarning,
+ y_absl::LogSeverity::kError, y_absl::LogSeverity::kFatal}};
+}
+
+// LogSeverityName()
+//
+// Returns the all-caps string representation (e.g. "INFO") of the specified
+// severity level if it is one of the standard levels and "UNKNOWN" otherwise.
+constexpr const char* LogSeverityName(y_absl::LogSeverity s) {
+ return s == y_absl::LogSeverity::kInfo
+ ? "INFO"
+ : s == y_absl::LogSeverity::kWarning
+ ? "WARNING"
+ : s == y_absl::LogSeverity::kError
+ ? "ERROR"
+ : s == y_absl::LogSeverity::kFatal ? "FATAL" : "UNKNOWN";
+}
+
+// NormalizeLogSeverity()
+//
+// Values less than `kInfo` normalize to `kInfo`; values greater than `kFatal`
+// normalize to `kError` (**NOT** `kFatal`).
+constexpr y_absl::LogSeverity NormalizeLogSeverity(y_absl::LogSeverity s) {
+ return s < y_absl::LogSeverity::kInfo
+ ? y_absl::LogSeverity::kInfo
+ : s > y_absl::LogSeverity::kFatal ? y_absl::LogSeverity::kError : s;
+}
+constexpr y_absl::LogSeverity NormalizeLogSeverity(int s) {
+ return y_absl::NormalizeLogSeverity(static_cast<y_absl::LogSeverity>(s));
+}
+
+// operator<<
+//
+// The exact representation of a streamed `y_absl::LogSeverity` is deliberately
+// unspecified; do not rely on it.
+std::ostream& operator<<(std::ostream& os, y_absl::LogSeverity s);
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_BASE_LOG_SEVERITY_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/log_severity/ya.make b/contrib/restricted/abseil-cpp-tstring/y_absl/base/log_severity/ya.make
new file mode 100644
index 00000000000..3e4b5634c2f
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/log_severity/ya.make
@@ -0,0 +1,26 @@
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
+WITHOUT_LICENSE_TEXTS()
+
+OWNER(
+ somov
+ g:cpp-contrib
+)
+
+LICENSE(Apache-2.0)
+
+ADDINCL(
+ GLOBAL contrib/restricted/abseil-cpp-tstring
+)
+
+NO_COMPILER_WARNINGS()
+
+SRCDIR(contrib/restricted/abseil-cpp-tstring/y_absl/base)
+
+SRCS(
+ log_severity.cc
+)
+
+END()
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/macros.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/macros.h
new file mode 100644
index 00000000000..b09a81877d5
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/macros.h
@@ -0,0 +1,158 @@
+//
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: macros.h
+// -----------------------------------------------------------------------------
+//
+// This header file defines the set of language macros used within Abseil code.
+// For the set of macros used to determine supported compilers and platforms,
+// see y_absl/base/config.h instead.
+//
+// This code is compiled directly on many platforms, including client
+// platforms like Windows, Mac, and embedded systems. Before making
+// any changes here, make sure that you're not breaking any platforms.
+
+#ifndef ABSL_BASE_MACROS_H_
+#define ABSL_BASE_MACROS_H_
+
+#include <cassert>
+#include <cstddef>
+
+#include "y_absl/base/attributes.h"
+#include "y_absl/base/config.h"
+#include "y_absl/base/optimization.h"
+#include "y_absl/base/port.h"
+
+// ABSL_ARRAYSIZE()
+//
+// Returns the number of elements in an array as a compile-time constant, which
+// can be used in defining new arrays. If you use this macro on a pointer by
+// mistake, you will get a compile-time error.
+#define ABSL_ARRAYSIZE(array) \
+ (sizeof(::y_absl::macros_internal::ArraySizeHelper(array)))
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace macros_internal {
+// Note: this internal template function declaration is used by ABSL_ARRAYSIZE.
+// The function doesn't need a definition, as we only use its type.
+template <typename T, size_t N>
+auto ArraySizeHelper(const T (&array)[N]) -> char (&)[N];
+} // namespace macros_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+// ABSL_BAD_CALL_IF()
+//
+// Used on a function overload to trap bad calls: any call that matches the
+// overload will cause a compile-time error. This macro uses a clang-specific
+// "enable_if" attribute, as described at
+// https://clang.llvm.org/docs/AttributeReference.html#enable-if
+//
+// Overloads which use this macro should be bracketed by
+// `#ifdef ABSL_BAD_CALL_IF`.
+//
+// Example:
+//
+// int isdigit(int c);
+// #ifdef ABSL_BAD_CALL_IF
+// int isdigit(int c)
+// ABSL_BAD_CALL_IF(c <= -1 || c > 255,
+// "'c' must have the value of an unsigned char or EOF");
+// #endif // ABSL_BAD_CALL_IF
+#if ABSL_HAVE_ATTRIBUTE(enable_if)
+#define ABSL_BAD_CALL_IF(expr, msg) \
+ __attribute__((enable_if(expr, "Bad call trap"), unavailable(msg)))
+#endif
+
+// ABSL_ASSERT()
+//
+// In C++11, `assert` can't be used portably within constexpr functions.
+// ABSL_ASSERT functions as a runtime assert but works in C++11 constexpr
+// functions. Example:
+//
+// constexpr double Divide(double a, double b) {
+// return ABSL_ASSERT(b != 0), a / b;
+// }
+//
+// This macro is inspired by
+// https://akrzemi1.wordpress.com/2017/05/18/asserts-in-constexpr-functions/
+#if defined(NDEBUG)
+#define ABSL_ASSERT(expr) \
+ (false ? static_cast<void>(expr) : static_cast<void>(0))
+#else
+#define ABSL_ASSERT(expr) \
+ (ABSL_PREDICT_TRUE((expr)) ? static_cast<void>(0) \
+ : [] { assert(false && #expr); }()) // NOLINT
+#endif
+
+// `ABSL_INTERNAL_HARDENING_ABORT()` controls how `ABSL_HARDENING_ASSERT()`
+// aborts the program in release mode (when NDEBUG is defined). The
+// implementation should abort the program as quickly as possible and ideally it
+// should not be possible to ignore the abort request.
+#if (ABSL_HAVE_BUILTIN(__builtin_trap) && \
+ ABSL_HAVE_BUILTIN(__builtin_unreachable)) || \
+ (defined(__GNUC__) && !defined(__clang__))
+#define ABSL_INTERNAL_HARDENING_ABORT() \
+ do { \
+ __builtin_trap(); \
+ __builtin_unreachable(); \
+ } while (false)
+#else
+#define ABSL_INTERNAL_HARDENING_ABORT() abort()
+#endif
+
+// ABSL_HARDENING_ASSERT()
+//
+// `ABSL_HARDENING_ASSERT()` is like `ABSL_ASSERT()`, but used to implement
+// runtime assertions that should be enabled in hardened builds even when
+// `NDEBUG` is defined.
+//
+// When `NDEBUG` is not defined, `ABSL_HARDENING_ASSERT()` is identical to
+// `ABSL_ASSERT()`.
+//
+// See `ABSL_OPTION_HARDENED` in `y_absl/base/options.h` for more information on
+// hardened mode.
+#if ABSL_OPTION_HARDENED == 1 && defined(NDEBUG)
+#define ABSL_HARDENING_ASSERT(expr) \
+ (ABSL_PREDICT_TRUE((expr)) ? static_cast<void>(0) \
+ : [] { ABSL_INTERNAL_HARDENING_ABORT(); }())
+#else
+#define ABSL_HARDENING_ASSERT(expr) ABSL_ASSERT(expr)
+#endif
+
+#ifdef ABSL_HAVE_EXCEPTIONS
+#define ABSL_INTERNAL_TRY try
+#define ABSL_INTERNAL_CATCH_ANY catch (...)
+#define ABSL_INTERNAL_RETHROW do { throw; } while (false)
+#else // ABSL_HAVE_EXCEPTIONS
+#define ABSL_INTERNAL_TRY if (true)
+#define ABSL_INTERNAL_CATCH_ANY else if (false)
+#define ABSL_INTERNAL_RETHROW do {} while (false)
+#endif // ABSL_HAVE_EXCEPTIONS
+
+// `ABSL_INTERNAL_UNREACHABLE` is an unreachable statement. A program which
+// reaches one has undefined behavior, and the compiler may optimize
+// accordingly.
+#if defined(__GNUC__) || ABSL_HAVE_BUILTIN(__builtin_unreachable)
+#define ABSL_INTERNAL_UNREACHABLE __builtin_unreachable()
+#elif defined(_MSC_VER)
+#define ABSL_INTERNAL_UNREACHABLE __assume(0)
+#else
+#define ABSL_INTERNAL_UNREACHABLE
+#endif
+
+#endif // ABSL_BASE_MACROS_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/optimization.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/optimization.h
new file mode 100644
index 00000000000..501530ae309
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/optimization.h
@@ -0,0 +1,244 @@
+//
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: optimization.h
+// -----------------------------------------------------------------------------
+//
+// This header file defines portable macros for performance optimization.
+
+#ifndef ABSL_BASE_OPTIMIZATION_H_
+#define ABSL_BASE_OPTIMIZATION_H_
+
+#include <assert.h>
+
+#include "y_absl/base/config.h"
+
+// ABSL_BLOCK_TAIL_CALL_OPTIMIZATION
+//
+// Instructs the compiler to avoid optimizing tail-call recursion. This macro is
+// useful when you wish to preserve the existing function order within a stack
+// trace for logging, debugging, or profiling purposes.
+//
+// Example:
+//
+// int f() {
+// int result = g();
+// ABSL_BLOCK_TAIL_CALL_OPTIMIZATION();
+// return result;
+// }
+#if defined(__pnacl__)
+#define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() if (volatile int x = 0) { (void)x; }
+#elif defined(__clang__)
+// Clang will not tail call given inline volatile assembly.
+#define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() __asm__ __volatile__("")
+#elif defined(__GNUC__)
+// GCC will not tail call given inline volatile assembly.
+#define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() __asm__ __volatile__("")
+#elif defined(_MSC_VER)
+#include <intrin.h>
+// The __nop() intrinsic blocks the optimisation.
+#define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() __nop()
+#else
+#define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() if (volatile int x = 0) { (void)x; }
+#endif
+
+// ABSL_CACHELINE_SIZE
+//
+// Explicitly defines the size of the L1 cache for purposes of alignment.
+// Setting the cacheline size allows you to specify that certain objects be
+// aligned on a cacheline boundary with `ABSL_CACHELINE_ALIGNED` declarations.
+// (See below.)
+//
+// NOTE: this macro should be replaced with the following C++17 features, when
+// those are generally available:
+//
+// * `std::hardware_constructive_interference_size`
+// * `std::hardware_destructive_interference_size`
+//
+// See http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2016/p0154r1.html
+// for more information.
+#if defined(__GNUC__)
+// Cache line alignment
+#if defined(__i386__) || defined(__x86_64__)
+#define ABSL_CACHELINE_SIZE 64
+#elif defined(__powerpc64__)
+#define ABSL_CACHELINE_SIZE 128
+#elif defined(__aarch64__)
+// We would need to read special register ctr_el0 to find out L1 dcache size.
+// This value is a good estimate based on a real aarch64 machine.
+#define ABSL_CACHELINE_SIZE 64
+#elif defined(__arm__)
+// Cache line sizes for ARM: These values are not strictly correct since
+// cache line sizes depend on implementations, not architectures. There
+// are even implementations with cache line sizes configurable at boot
+// time.
+#if defined(__ARM_ARCH_5T__)
+#define ABSL_CACHELINE_SIZE 32
+#elif defined(__ARM_ARCH_7A__)
+#define ABSL_CACHELINE_SIZE 64
+#endif
+#endif
+
+#ifndef ABSL_CACHELINE_SIZE
+// A reasonable default guess. Note that overestimates tend to waste more
+// space, while underestimates tend to waste more time.
+#define ABSL_CACHELINE_SIZE 64
+#endif
+
+// ABSL_CACHELINE_ALIGNED
+//
+// Indicates that the declared object be cache aligned using
+// `ABSL_CACHELINE_SIZE` (see above). Cacheline aligning objects allows you to
+// load a set of related objects in the L1 cache for performance improvements.
+// Cacheline aligning objects properly allows constructive memory sharing and
+// prevents destructive (or "false") memory sharing.
+//
+// NOTE: callers should replace uses of this macro with `alignas()` using
+// `std::hardware_constructive_interference_size` and/or
+// `std::hardware_destructive_interference_size` when C++17 becomes available to
+// them.
+//
+// See http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2016/p0154r1.html
+// for more information.
+//
+// On some compilers, `ABSL_CACHELINE_ALIGNED` expands to an `__attribute__`
+// or `__declspec` attribute. For compilers where this is not known to work,
+// the macro expands to nothing.
+//
+// No further guarantees are made here. The result of applying the macro
+// to variables and types is always implementation-defined.
+//
+// WARNING: It is easy to use this attribute incorrectly, even to the point
+// of causing bugs that are difficult to diagnose, crash, etc. It does not
+// of itself guarantee that objects are aligned to a cache line.
+//
+// NOTE: Some compilers are picky about the locations of annotations such as
+// this attribute, so prefer to put it at the beginning of your declaration.
+// For example,
+//
+// ABSL_CACHELINE_ALIGNED static Foo* foo = ...
+//
+// class ABSL_CACHELINE_ALIGNED Bar { ...
+//
+// Recommendations:
+//
+// 1) Consult compiler documentation; this comment is not kept in sync as
+// toolchains evolve.
+// 2) Verify your use has the intended effect. This often requires inspecting
+// the generated machine code.
+// 3) Prefer applying this attribute to individual variables. Avoid
+// applying it to types. This tends to localize the effect.
+#define ABSL_CACHELINE_ALIGNED __attribute__((aligned(ABSL_CACHELINE_SIZE)))
+#elif defined(_MSC_VER)
+#define ABSL_CACHELINE_SIZE 64
+#define ABSL_CACHELINE_ALIGNED __declspec(align(ABSL_CACHELINE_SIZE))
+#else
+#define ABSL_CACHELINE_SIZE 64
+#define ABSL_CACHELINE_ALIGNED
+#endif
+
+// ABSL_PREDICT_TRUE, ABSL_PREDICT_FALSE
+//
+// Enables the compiler to prioritize compilation using static analysis for
+// likely paths within a boolean branch.
+//
+// Example:
+//
+// if (ABSL_PREDICT_TRUE(expression)) {
+// return result; // Faster if more likely
+// } else {
+// return 0;
+// }
+//
+// Compilers can use the information that a certain branch is not likely to be
+// taken (for instance, a CHECK failure) to optimize for the common case in
+// the absence of better information (ie. compiling gcc with `-fprofile-arcs`).
+//
+// Recommendation: Modern CPUs dynamically predict branch execution paths,
+// typically with accuracy greater than 97%. As a result, annotating every
+// branch in a codebase is likely counterproductive; however, annotating
+// specific branches that are both hot and consistently mispredicted is likely
+// to yield performance improvements.
+#if ABSL_HAVE_BUILTIN(__builtin_expect) || \
+ (defined(__GNUC__) && !defined(__clang__))
+#define ABSL_PREDICT_FALSE(x) (__builtin_expect(false || (x), false))
+#define ABSL_PREDICT_TRUE(x) (__builtin_expect(false || (x), true))
+#else
+#define ABSL_PREDICT_FALSE(x) (x)
+#define ABSL_PREDICT_TRUE(x) (x)
+#endif
+
+// ABSL_INTERNAL_ASSUME(cond)
+// Informs the compiler that a condition is always true and that it can assume
+// it to be true for optimization purposes. The call has undefined behavior if
+// the condition is false.
+// In !NDEBUG mode, the condition is checked with an assert().
+// NOTE: The expression must not have side effects, as it will only be evaluated
+// in some compilation modes and not others.
+//
+// Example:
+//
+// int x = ...;
+// ABSL_INTERNAL_ASSUME(x >= 0);
+// // The compiler can optimize the division to a simple right shift using the
+// // assumption specified above.
+// int y = x / 16;
+//
+#if !defined(NDEBUG)
+#define ABSL_INTERNAL_ASSUME(cond) assert(cond)
+#elif ABSL_HAVE_BUILTIN(__builtin_assume)
+#define ABSL_INTERNAL_ASSUME(cond) __builtin_assume(cond)
+#elif defined(__GNUC__) || ABSL_HAVE_BUILTIN(__builtin_unreachable)
+#define ABSL_INTERNAL_ASSUME(cond) \
+ do { \
+ if (!(cond)) __builtin_unreachable(); \
+ } while (0)
+#elif defined(_MSC_VER)
+#define ABSL_INTERNAL_ASSUME(cond) __assume(cond)
+#else
+#define ABSL_INTERNAL_ASSUME(cond) \
+ do { \
+ static_cast<void>(false && (cond)); \
+ } while (0)
+#endif
+
+// ABSL_INTERNAL_UNIQUE_SMALL_NAME(cond)
+// This macro forces small unique name on a static file level symbols like
+// static local variables or static functions. This is intended to be used in
+// macro definitions to optimize the cost of generated code. Do NOT use it on
+// symbols exported from translation unit since it may cause a link time
+// conflict.
+//
+// Example:
+//
+// #define MY_MACRO(txt)
+// namespace {
+// char VeryVeryLongVarName[] ABSL_INTERNAL_UNIQUE_SMALL_NAME() = txt;
+// const char* VeryVeryLongFuncName() ABSL_INTERNAL_UNIQUE_SMALL_NAME();
+// const char* VeryVeryLongFuncName() { return txt; }
+// }
+//
+
+#if defined(__GNUC__)
+#define ABSL_INTERNAL_UNIQUE_SMALL_NAME2(x) #x
+#define ABSL_INTERNAL_UNIQUE_SMALL_NAME1(x) ABSL_INTERNAL_UNIQUE_SMALL_NAME2(x)
+#define ABSL_INTERNAL_UNIQUE_SMALL_NAME() \
+ asm(ABSL_INTERNAL_UNIQUE_SMALL_NAME1(.y_absl.__COUNTER__))
+#else
+#define ABSL_INTERNAL_UNIQUE_SMALL_NAME()
+#endif
+
+#endif // ABSL_BASE_OPTIMIZATION_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/options.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/options.h
new file mode 100644
index 00000000000..906310cde07
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/options.h
@@ -0,0 +1,238 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: options.h
+// -----------------------------------------------------------------------------
+//
+// This file contains Abseil configuration options for setting specific
+// implementations instead of letting Abseil determine which implementation to
+// use at compile-time. Setting these options may be useful for package or build
+// managers who wish to guarantee ABI stability within binary builds (which are
+// otherwise difficult to enforce).
+//
+// *** IMPORTANT NOTICE FOR PACKAGE MANAGERS: It is important that
+// maintainers of package managers who wish to package Abseil read and
+// understand this file! ***
+//
+// Abseil contains a number of possible configuration endpoints, based on
+// parameters such as the detected platform, language version, or command-line
+// flags used to invoke the underlying binary. As is the case with all
+// libraries, binaries which contain Abseil code must ensure that separate
+// packages use the same compiled copy of Abseil to avoid a diamond dependency
+// problem, which can occur if two packages built with different Abseil
+// configuration settings are linked together. Diamond dependency problems in
+// C++ may manifest as violations to the One Definition Rule (ODR) (resulting in
+// linker errors), or undefined behavior (resulting in crashes).
+//
+// Diamond dependency problems can be avoided if all packages utilize the same
+// exact version of Abseil. Building from source code with the same compilation
+// parameters is the easiest way to avoid such dependency problems. However, for
+// package managers who cannot control such compilation parameters, we are
+// providing the file to allow you to inject ABI (Application Binary Interface)
+// stability across builds. Settings options in this file will neither change
+// API nor ABI, providing a stable copy of Abseil between packages.
+//
+// Care must be taken to keep options within these configurations isolated
+// from any other dynamic settings, such as command-line flags which could alter
+// these options. This file is provided specifically to help build and package
+// managers provide a stable copy of Abseil within their libraries and binaries;
+// other developers should not have need to alter the contents of this file.
+//
+// -----------------------------------------------------------------------------
+// Usage
+// -----------------------------------------------------------------------------
+//
+// For any particular package release, set the appropriate definitions within
+// this file to whatever value makes the most sense for your package(s). Note
+// that, by default, most of these options, at the moment, affect the
+// implementation of types; future options may affect other implementation
+// details.
+//
+// NOTE: the defaults within this file all assume that Abseil can select the
+// proper Abseil implementation at compile-time, which will not be sufficient
+// to guarantee ABI stability to package managers.
+
+#ifndef ABSL_BASE_OPTIONS_H_
+#define ABSL_BASE_OPTIONS_H_
+
+// Include a standard library header to allow configuration based on the
+// standard library in use.
+#ifdef __cplusplus
+#include <ciso646>
+#endif
+
+// -----------------------------------------------------------------------------
+// Type Compatibility Options
+// -----------------------------------------------------------------------------
+//
+// ABSL_OPTION_USE_STD_ANY
+//
+// This option controls whether y_absl::any is implemented as an alias to
+// std::any, or as an independent implementation.
+//
+// A value of 0 means to use Abseil's implementation. This requires only C++11
+// support, and is expected to work on every toolchain we support.
+//
+// A value of 1 means to use an alias to std::any. This requires that all code
+// using Abseil is built in C++17 mode or later.
+//
+// A value of 2 means to detect the C++ version being used to compile Abseil,
+// and use an alias only if a working std::any is available. This option is
+// useful when you are building your entire program, including all of its
+// dependencies, from source. It should not be used otherwise -- for example,
+// if you are distributing Abseil in a binary package manager -- since in
+// mode 2, y_absl::any will name a different type, with a different mangled name
+// and binary layout, depending on the compiler flags passed by the end user.
+// For more info, see https://abseil.io/about/design/dropin-types.
+//
+// User code should not inspect this macro. To check in the preprocessor if
+// y_absl::any is a typedef of std::any, use the feature macro ABSL_USES_STD_ANY.
+
+#define ABSL_OPTION_USE_STD_ANY 2
+
+
+// ABSL_OPTION_USE_STD_OPTIONAL
+//
+// This option controls whether y_absl::optional is implemented as an alias to
+// std::optional, or as an independent implementation.
+//
+// A value of 0 means to use Abseil's implementation. This requires only C++11
+// support, and is expected to work on every toolchain we support.
+//
+// A value of 1 means to use an alias to std::optional. This requires that all
+// code using Abseil is built in C++17 mode or later.
+//
+// A value of 2 means to detect the C++ version being used to compile Abseil,
+// and use an alias only if a working std::optional is available. This option
+// is useful when you are building your program from source. It should not be
+// used otherwise -- for example, if you are distributing Abseil in a binary
+// package manager -- since in mode 2, y_absl::optional will name a different
+// type, with a different mangled name and binary layout, depending on the
+// compiler flags passed by the end user. For more info, see
+// https://abseil.io/about/design/dropin-types.
+
+// User code should not inspect this macro. To check in the preprocessor if
+// y_absl::optional is a typedef of std::optional, use the feature macro
+// ABSL_USES_STD_OPTIONAL.
+
+#define ABSL_OPTION_USE_STD_OPTIONAL 2
+
+
+// ABSL_OPTION_USE_STD_STRING_VIEW
+//
+// This option controls whether y_absl::string_view is implemented as an alias to
+// std::string_view, or as an independent implementation.
+//
+// A value of 0 means to use Abseil's implementation. This requires only C++11
+// support, and is expected to work on every toolchain we support.
+//
+// A value of 1 means to use an alias to std::string_view. This requires that
+// all code using Abseil is built in C++17 mode or later.
+//
+// A value of 2 means to detect the C++ version being used to compile Abseil,
+// and use an alias only if a working std::string_view is available. This
+// option is useful when you are building your program from source. It should
+// not be used otherwise -- for example, if you are distributing Abseil in a
+// binary package manager -- since in mode 2, y_absl::string_view will name a
+// different type, with a different mangled name and binary layout, depending on
+// the compiler flags passed by the end user. For more info, see
+// https://abseil.io/about/design/dropin-types.
+//
+// User code should not inspect this macro. To check in the preprocessor if
+// y_absl::string_view is a typedef of std::string_view, use the feature macro
+// ABSL_USES_STD_STRING_VIEW.
+
+#define ABSL_OPTION_USE_STD_STRING_VIEW 2
+
+// ABSL_OPTION_USE_STD_VARIANT
+//
+// This option controls whether y_absl::variant is implemented as an alias to
+// std::variant, or as an independent implementation.
+//
+// A value of 0 means to use Abseil's implementation. This requires only C++11
+// support, and is expected to work on every toolchain we support.
+//
+// A value of 1 means to use an alias to std::variant. This requires that all
+// code using Abseil is built in C++17 mode or later.
+//
+// A value of 2 means to detect the C++ version being used to compile Abseil,
+// and use an alias only if a working std::variant is available. This option
+// is useful when you are building your program from source. It should not be
+// used otherwise -- for example, if you are distributing Abseil in a binary
+// package manager -- since in mode 2, y_absl::variant will name a different
+// type, with a different mangled name and binary layout, depending on the
+// compiler flags passed by the end user. For more info, see
+// https://abseil.io/about/design/dropin-types.
+//
+// User code should not inspect this macro. To check in the preprocessor if
+// y_absl::variant is a typedef of std::variant, use the feature macro
+// ABSL_USES_STD_VARIANT.
+
+#define ABSL_OPTION_USE_STD_VARIANT 2
+
+
+// ABSL_OPTION_USE_INLINE_NAMESPACE
+// ABSL_OPTION_INLINE_NAMESPACE_NAME
+//
+// These options controls whether all entities in the y_absl namespace are
+// contained within an inner inline namespace. This does not affect the
+// user-visible API of Abseil, but it changes the mangled names of all symbols.
+//
+// This can be useful as a version tag if you are distributing Abseil in
+// precompiled form. This will prevent a binary library build of Abseil with
+// one inline namespace being used with headers configured with a different
+// inline namespace name. Binary packagers are reminded that Abseil does not
+// guarantee any ABI stability in Abseil, so any update of Abseil or
+// configuration change in such a binary package should be combined with a
+// new, unique value for the inline namespace name.
+//
+// A value of 0 means not to use inline namespaces.
+//
+// A value of 1 means to use an inline namespace with the given name inside
+// namespace y_absl. If this is set, ABSL_OPTION_INLINE_NAMESPACE_NAME must also
+// be changed to a new, unique identifier name. In particular "head" is not
+// allowed.
+
+#define ABSL_OPTION_USE_INLINE_NAMESPACE 1
+#define ABSL_OPTION_INLINE_NAMESPACE_NAME lts_y_20211102
+
+// ABSL_OPTION_HARDENED
+//
+// This option enables a "hardened" build in release mode (in this context,
+// release mode is defined as a build where the `NDEBUG` macro is defined).
+//
+// A value of 0 means that "hardened" mode is not enabled.
+//
+// A value of 1 means that "hardened" mode is enabled.
+//
+// Hardened builds have additional security checks enabled when `NDEBUG` is
+// defined. Defining `NDEBUG` is normally used to turn `assert()` macro into a
+// no-op, as well as disabling other bespoke program consistency checks. By
+// defining ABSL_OPTION_HARDENED to 1, a select set of checks remain enabled in
+// release mode. These checks guard against programming errors that may lead to
+// security vulnerabilities. In release mode, when one of these programming
+// errors is encountered, the program will immediately abort, possibly without
+// any attempt at logging.
+//
+// The checks enabled by this option are not free; they do incur runtime cost.
+//
+// The checks enabled by this option are always active when `NDEBUG` is not
+// defined, even in the case when ABSL_OPTION_HARDENED is defined to 0. The
+// checks enabled by this option may abort the program in a different way and
+// log additional information when `NDEBUG` is not defined.
+
+#define ABSL_OPTION_HARDENED 0
+
+#endif // ABSL_BASE_OPTIONS_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/policy_checks.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/policy_checks.h
new file mode 100644
index 00000000000..06b32439168
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/policy_checks.h
@@ -0,0 +1,111 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: policy_checks.h
+// -----------------------------------------------------------------------------
+//
+// This header enforces a minimum set of policies at build time, such as the
+// supported compiler and library versions. Unsupported configurations are
+// reported with `#error`. This enforcement is best effort, so successfully
+// compiling this header does not guarantee a supported configuration.
+
+#ifndef ABSL_BASE_POLICY_CHECKS_H_
+#define ABSL_BASE_POLICY_CHECKS_H_
+
+// Included for the __GLIBC_PREREQ macro used below.
+#include <limits.h>
+
+// Included for the _STLPORT_VERSION macro used below.
+#if defined(__cplusplus)
+#include <cstddef>
+#endif
+
+// -----------------------------------------------------------------------------
+// Operating System Check
+// -----------------------------------------------------------------------------
+
+#if defined(__CYGWIN__)
+#error "Cygwin is not supported."
+#endif
+
+// -----------------------------------------------------------------------------
+// Toolchain Check
+// -----------------------------------------------------------------------------
+
+// We support MSVC++ 14.0 update 2 and later.
+// This minimum will go up.
+#if defined(_MSC_FULL_VER) && _MSC_FULL_VER < 190023918 && !defined(__clang__)
+#error "This package requires Visual Studio 2015 Update 2 or higher."
+#endif
+
+// We support gcc 4.7 and later.
+// This minimum will go up.
+#if defined(__GNUC__) && !defined(__clang__)
+#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 7)
+#error "This package requires gcc 4.7 or higher."
+#endif
+#endif
+
+// We support Apple Xcode clang 4.2.1 (version 421.11.65) and later.
+// This corresponds to Apple Xcode version 4.5.
+// This minimum will go up.
+#if defined(__apple_build_version__) && __apple_build_version__ < 4211165
+#error "This package requires __apple_build_version__ of 4211165 or higher."
+#endif
+
+// -----------------------------------------------------------------------------
+// C++ Version Check
+// -----------------------------------------------------------------------------
+
+// Enforce C++11 as the minimum. Note that Visual Studio has not
+// advanced __cplusplus despite being good enough for our purposes, so
+// so we exempt it from the check.
+#if defined(__cplusplus) && !defined(_MSC_VER)
+#if __cplusplus < 201103L
+#error "C++ versions less than C++11 are not supported."
+#endif
+#endif
+
+// -----------------------------------------------------------------------------
+// Standard Library Check
+// -----------------------------------------------------------------------------
+
+#if defined(_STLPORT_VERSION)
+#error "STLPort is not supported."
+#endif
+
+// -----------------------------------------------------------------------------
+// `char` Size Check
+// -----------------------------------------------------------------------------
+
+// Abseil currently assumes CHAR_BIT == 8. If you would like to use Abseil on a
+// platform where this is not the case, please provide us with the details about
+// your platform so we can consider relaxing this requirement.
+#if CHAR_BIT != 8
+#error "Abseil assumes CHAR_BIT == 8."
+#endif
+
+// -----------------------------------------------------------------------------
+// `int` Size Check
+// -----------------------------------------------------------------------------
+
+// Abseil currently assumes that an int is 4 bytes. If you would like to use
+// Abseil on a platform where this is not the case, please provide us with the
+// details about your platform so we can consider relaxing this requirement.
+#if INT_MAX < 2147483647
+#error "Abseil assumes that int is at least 4 bytes. "
+#endif
+
+#endif // ABSL_BASE_POLICY_CHECKS_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/port.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/port.h
new file mode 100644
index 00000000000..69ec795d467
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/port.h
@@ -0,0 +1,25 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// This files is a forwarding header for other headers containing various
+// portability macros and functions.
+
+#ifndef ABSL_BASE_PORT_H_
+#define ABSL_BASE_PORT_H_
+
+#include "y_absl/base/attributes.h"
+#include "y_absl/base/config.h"
+#include "y_absl/base/optimization.h"
+
+#endif // ABSL_BASE_PORT_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/thread_annotations.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/thread_annotations.h
new file mode 100644
index 00000000000..aba5d58dc34
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/thread_annotations.h
@@ -0,0 +1,335 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: thread_annotations.h
+// -----------------------------------------------------------------------------
+//
+// This header file contains macro definitions for thread safety annotations
+// that allow developers to document the locking policies of multi-threaded
+// code. The annotations can also help program analysis tools to identify
+// potential thread safety issues.
+//
+// These annotations are implemented using compiler attributes. Using the macros
+// defined here instead of raw attributes allow for portability and future
+// compatibility.
+//
+// When referring to mutexes in the arguments of the attributes, you should
+// use variable names or more complex expressions (e.g. my_object->mutex_)
+// that evaluate to a concrete mutex object whenever possible. If the mutex
+// you want to refer to is not in scope, you may use a member pointer
+// (e.g. &MyClass::mutex_) to refer to a mutex in some (unknown) object.
+
+#ifndef ABSL_BASE_THREAD_ANNOTATIONS_H_
+#define ABSL_BASE_THREAD_ANNOTATIONS_H_
+
+#include "y_absl/base/attributes.h"
+#include "y_absl/base/config.h"
+// TODO(mbonadei): Remove after the backward compatibility period.
+#include "y_absl/base/internal/thread_annotations.h" // IWYU pragma: export
+
+// ABSL_GUARDED_BY()
+//
+// Documents if a shared field or global variable needs to be protected by a
+// mutex. ABSL_GUARDED_BY() allows the user to specify a particular mutex that
+// should be held when accessing the annotated variable.
+//
+// Although this annotation (and ABSL_PT_GUARDED_BY, below) cannot be applied to
+// local variables, a local variable and its associated mutex can often be
+// combined into a small class or struct, thereby allowing the annotation.
+//
+// Example:
+//
+// class Foo {
+// Mutex mu_;
+// int p1_ ABSL_GUARDED_BY(mu_);
+// ...
+// };
+#if ABSL_HAVE_ATTRIBUTE(guarded_by)
+#define ABSL_GUARDED_BY(x) __attribute__((guarded_by(x)))
+#else
+#define ABSL_GUARDED_BY(x)
+#endif
+
+// ABSL_PT_GUARDED_BY()
+//
+// Documents if the memory location pointed to by a pointer should be guarded
+// by a mutex when dereferencing the pointer.
+//
+// Example:
+// class Foo {
+// Mutex mu_;
+// int *p1_ ABSL_PT_GUARDED_BY(mu_);
+// ...
+// };
+//
+// Note that a pointer variable to a shared memory location could itself be a
+// shared variable.
+//
+// Example:
+//
+// // `q_`, guarded by `mu1_`, points to a shared memory location that is
+// // guarded by `mu2_`:
+// int *q_ ABSL_GUARDED_BY(mu1_) ABSL_PT_GUARDED_BY(mu2_);
+#if ABSL_HAVE_ATTRIBUTE(pt_guarded_by)
+#define ABSL_PT_GUARDED_BY(x) __attribute__((pt_guarded_by(x)))
+#else
+#define ABSL_PT_GUARDED_BY(x)
+#endif
+
+// ABSL_ACQUIRED_AFTER() / ABSL_ACQUIRED_BEFORE()
+//
+// Documents the acquisition order between locks that can be held
+// simultaneously by a thread. For any two locks that need to be annotated
+// to establish an acquisition order, only one of them needs the annotation.
+// (i.e. You don't have to annotate both locks with both ABSL_ACQUIRED_AFTER
+// and ABSL_ACQUIRED_BEFORE.)
+//
+// As with ABSL_GUARDED_BY, this is only applicable to mutexes that are shared
+// fields or global variables.
+//
+// Example:
+//
+// Mutex m1_;
+// Mutex m2_ ABSL_ACQUIRED_AFTER(m1_);
+#if ABSL_HAVE_ATTRIBUTE(acquired_after)
+#define ABSL_ACQUIRED_AFTER(...) __attribute__((acquired_after(__VA_ARGS__)))
+#else
+#define ABSL_ACQUIRED_AFTER(...)
+#endif
+
+#if ABSL_HAVE_ATTRIBUTE(acquired_before)
+#define ABSL_ACQUIRED_BEFORE(...) __attribute__((acquired_before(__VA_ARGS__)))
+#else
+#define ABSL_ACQUIRED_BEFORE(...)
+#endif
+
+// ABSL_EXCLUSIVE_LOCKS_REQUIRED() / ABSL_SHARED_LOCKS_REQUIRED()
+//
+// Documents a function that expects a mutex to be held prior to entry.
+// The mutex is expected to be held both on entry to, and exit from, the
+// function.
+//
+// An exclusive lock allows read-write access to the guarded data member(s), and
+// only one thread can acquire a lock exclusively at any one time. A shared lock
+// allows read-only access, and any number of threads can acquire a shared lock
+// concurrently.
+//
+// Generally, non-const methods should be annotated with
+// ABSL_EXCLUSIVE_LOCKS_REQUIRED, while const methods should be annotated with
+// ABSL_SHARED_LOCKS_REQUIRED.
+//
+// Example:
+//
+// Mutex mu1, mu2;
+// int a ABSL_GUARDED_BY(mu1);
+// int b ABSL_GUARDED_BY(mu2);
+//
+// void foo() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu1, mu2) { ... }
+// void bar() const ABSL_SHARED_LOCKS_REQUIRED(mu1, mu2) { ... }
+#if ABSL_HAVE_ATTRIBUTE(exclusive_locks_required)
+#define ABSL_EXCLUSIVE_LOCKS_REQUIRED(...) \
+ __attribute__((exclusive_locks_required(__VA_ARGS__)))
+#else
+#define ABSL_EXCLUSIVE_LOCKS_REQUIRED(...)
+#endif
+
+#if ABSL_HAVE_ATTRIBUTE(shared_locks_required)
+#define ABSL_SHARED_LOCKS_REQUIRED(...) \
+ __attribute__((shared_locks_required(__VA_ARGS__)))
+#else
+#define ABSL_SHARED_LOCKS_REQUIRED(...)
+#endif
+
+// ABSL_LOCKS_EXCLUDED()
+//
+// Documents the locks acquired in the body of the function. These locks
+// cannot be held when calling this function (as Abseil's `Mutex` locks are
+// non-reentrant).
+#if ABSL_HAVE_ATTRIBUTE(locks_excluded)
+#define ABSL_LOCKS_EXCLUDED(...) __attribute__((locks_excluded(__VA_ARGS__)))
+#else
+#define ABSL_LOCKS_EXCLUDED(...)
+#endif
+
+// ABSL_LOCK_RETURNED()
+//
+// Documents a function that returns a mutex without acquiring it. For example,
+// a public getter method that returns a pointer to a private mutex should
+// be annotated with ABSL_LOCK_RETURNED.
+#if ABSL_HAVE_ATTRIBUTE(lock_returned)
+#define ABSL_LOCK_RETURNED(x) __attribute__((lock_returned(x)))
+#else
+#define ABSL_LOCK_RETURNED(x)
+#endif
+
+// ABSL_LOCKABLE
+//
+// Documents if a class/type is a lockable type (such as the `Mutex` class).
+#if ABSL_HAVE_ATTRIBUTE(lockable)
+#define ABSL_LOCKABLE __attribute__((lockable))
+#else
+#define ABSL_LOCKABLE
+#endif
+
+// ABSL_SCOPED_LOCKABLE
+//
+// Documents if a class does RAII locking (such as the `MutexLock` class).
+// The constructor should use `LOCK_FUNCTION()` to specify the mutex that is
+// acquired, and the destructor should use `UNLOCK_FUNCTION()` with no
+// arguments; the analysis will assume that the destructor unlocks whatever the
+// constructor locked.
+#if ABSL_HAVE_ATTRIBUTE(scoped_lockable)
+#define ABSL_SCOPED_LOCKABLE __attribute__((scoped_lockable))
+#else
+#define ABSL_SCOPED_LOCKABLE
+#endif
+
+// ABSL_EXCLUSIVE_LOCK_FUNCTION()
+//
+// Documents functions that acquire a lock in the body of a function, and do
+// not release it.
+#if ABSL_HAVE_ATTRIBUTE(exclusive_lock_function)
+#define ABSL_EXCLUSIVE_LOCK_FUNCTION(...) \
+ __attribute__((exclusive_lock_function(__VA_ARGS__)))
+#else
+#define ABSL_EXCLUSIVE_LOCK_FUNCTION(...)
+#endif
+
+// ABSL_SHARED_LOCK_FUNCTION()
+//
+// Documents functions that acquire a shared (reader) lock in the body of a
+// function, and do not release it.
+#if ABSL_HAVE_ATTRIBUTE(shared_lock_function)
+#define ABSL_SHARED_LOCK_FUNCTION(...) \
+ __attribute__((shared_lock_function(__VA_ARGS__)))
+#else
+#define ABSL_SHARED_LOCK_FUNCTION(...)
+#endif
+
+// ABSL_UNLOCK_FUNCTION()
+//
+// Documents functions that expect a lock to be held on entry to the function,
+// and release it in the body of the function.
+#if ABSL_HAVE_ATTRIBUTE(unlock_function)
+#define ABSL_UNLOCK_FUNCTION(...) __attribute__((unlock_function(__VA_ARGS__)))
+#else
+#define ABSL_UNLOCK_FUNCTION(...)
+#endif
+
+// ABSL_EXCLUSIVE_TRYLOCK_FUNCTION() / ABSL_SHARED_TRYLOCK_FUNCTION()
+//
+// Documents functions that try to acquire a lock, and return success or failure
+// (or a non-boolean value that can be interpreted as a boolean).
+// The first argument should be `true` for functions that return `true` on
+// success, or `false` for functions that return `false` on success. The second
+// argument specifies the mutex that is locked on success. If unspecified, this
+// mutex is assumed to be `this`.
+#if ABSL_HAVE_ATTRIBUTE(exclusive_trylock_function)
+#define ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(...) \
+ __attribute__((exclusive_trylock_function(__VA_ARGS__)))
+#else
+#define ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(...)
+#endif
+
+#if ABSL_HAVE_ATTRIBUTE(shared_trylock_function)
+#define ABSL_SHARED_TRYLOCK_FUNCTION(...) \
+ __attribute__((shared_trylock_function(__VA_ARGS__)))
+#else
+#define ABSL_SHARED_TRYLOCK_FUNCTION(...)
+#endif
+
+// ABSL_ASSERT_EXCLUSIVE_LOCK() / ABSL_ASSERT_SHARED_LOCK()
+//
+// Documents functions that dynamically check to see if a lock is held, and fail
+// if it is not held.
+#if ABSL_HAVE_ATTRIBUTE(assert_exclusive_lock)
+#define ABSL_ASSERT_EXCLUSIVE_LOCK(...) \
+ __attribute__((assert_exclusive_lock(__VA_ARGS__)))
+#else
+#define ABSL_ASSERT_EXCLUSIVE_LOCK(...)
+#endif
+
+#if ABSL_HAVE_ATTRIBUTE(assert_shared_lock)
+#define ABSL_ASSERT_SHARED_LOCK(...) \
+ __attribute__((assert_shared_lock(__VA_ARGS__)))
+#else
+#define ABSL_ASSERT_SHARED_LOCK(...)
+#endif
+
+// ABSL_NO_THREAD_SAFETY_ANALYSIS
+//
+// Turns off thread safety checking within the body of a particular function.
+// This annotation is used to mark functions that are known to be correct, but
+// the locking behavior is more complicated than the analyzer can handle.
+#if ABSL_HAVE_ATTRIBUTE(no_thread_safety_analysis)
+#define ABSL_NO_THREAD_SAFETY_ANALYSIS \
+ __attribute__((no_thread_safety_analysis))
+#else
+#define ABSL_NO_THREAD_SAFETY_ANALYSIS
+#endif
+
+//------------------------------------------------------------------------------
+// Tool-Supplied Annotations
+//------------------------------------------------------------------------------
+
+// ABSL_TS_UNCHECKED should be placed around lock expressions that are not valid
+// C++ syntax, but which are present for documentation purposes. These
+// annotations will be ignored by the analysis.
+#define ABSL_TS_UNCHECKED(x) ""
+
+// ABSL_TS_FIXME is used to mark lock expressions that are not valid C++ syntax.
+// It is used by automated tools to mark and disable invalid expressions.
+// The annotation should either be fixed, or changed to ABSL_TS_UNCHECKED.
+#define ABSL_TS_FIXME(x) ""
+
+// Like ABSL_NO_THREAD_SAFETY_ANALYSIS, this turns off checking within the body
+// of a particular function. However, this attribute is used to mark functions
+// that are incorrect and need to be fixed. It is used by automated tools to
+// avoid breaking the build when the analysis is updated.
+// Code owners are expected to eventually fix the routine.
+#define ABSL_NO_THREAD_SAFETY_ANALYSIS_FIXME ABSL_NO_THREAD_SAFETY_ANALYSIS
+
+// Similar to ABSL_NO_THREAD_SAFETY_ANALYSIS_FIXME, this macro marks a
+// ABSL_GUARDED_BY annotation that needs to be fixed, because it is producing
+// thread safety warning. It disables the ABSL_GUARDED_BY.
+#define ABSL_GUARDED_BY_FIXME(x)
+
+// Disables warnings for a single read operation. This can be used to avoid
+// warnings when it is known that the read is not actually involved in a race,
+// but the compiler cannot confirm that.
+#define ABSL_TS_UNCHECKED_READ(x) y_absl::base_internal::ts_unchecked_read(x)
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+// Takes a reference to a guarded data member, and returns an unguarded
+// reference.
+// Do not use this function directly, use ABSL_TS_UNCHECKED_READ instead.
+template <typename T>
+inline const T& ts_unchecked_read(const T& v) ABSL_NO_THREAD_SAFETY_ANALYSIS {
+ return v;
+}
+
+template <typename T>
+inline T& ts_unchecked_read(T& v) ABSL_NO_THREAD_SAFETY_ANALYSIS {
+ return v;
+}
+
+} // namespace base_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_BASE_THREAD_ANNOTATIONS_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/ya.make b/contrib/restricted/abseil-cpp-tstring/y_absl/base/ya.make
new file mode 100644
index 00000000000..9e0d53060e8
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/ya.make
@@ -0,0 +1,34 @@
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
+OWNER(
+ somov
+ g:cpp-contrib
+)
+
+LICENSE(Apache-2.0)
+
+LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
+
+PEERDIR(
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_wait
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/log_severity
+)
+
+ADDINCL(
+ GLOBAL contrib/restricted/abseil-cpp-tstring
+)
+
+NO_COMPILER_WARNINGS()
+
+SRCS(
+ internal/cycleclock.cc
+ internal/spinlock.cc
+ internal/sysinfo.cc
+ internal/thread_identity.cc
+ internal/unscaledcycleclock.cc
+)
+
+END()
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/city/ya.make b/contrib/restricted/abseil-cpp-tstring/y_absl/city/ya.make
new file mode 100644
index 00000000000..7fc856d7964
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/city/ya.make
@@ -0,0 +1,33 @@
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
+WITHOUT_LICENSE_TEXTS()
+
+OWNER(
+ somov
+ g:cpp-contrib
+)
+
+LICENSE(Apache-2.0)
+
+PEERDIR(
+ contrib/restricted/abseil-cpp-tstring/y_absl/base
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_wait
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/log_severity
+)
+
+ADDINCL(
+ GLOBAL contrib/restricted/abseil-cpp-tstring
+)
+
+NO_COMPILER_WARNINGS()
+
+SRCDIR(contrib/restricted/abseil-cpp-tstring/y_absl/hash/internal)
+
+SRCS(
+ city.cc
+)
+
+END()
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/container/.yandex_meta/licenses.list.txt b/contrib/restricted/abseil-cpp-tstring/y_absl/container/.yandex_meta/licenses.list.txt
new file mode 100644
index 00000000000..f39e6835968
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/container/.yandex_meta/licenses.list.txt
@@ -0,0 +1,24 @@
+====================Apache-2.0====================
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+
+====================COPYRIGHT====================
+// Copyright 2017 The Abseil Authors.
+
+
+====================COPYRIGHT====================
+// Copyright 2018 The Abseil Authors.
+
+
+====================COPYRIGHT====================
+// Copyright 2019 The Abseil Authors.
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/container/btree_map.h b/contrib/restricted/abseil-cpp-tstring/y_absl/container/btree_map.h
new file mode 100644
index 00000000000..3f46c541b81
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/container/btree_map.h
@@ -0,0 +1,815 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: btree_map.h
+// -----------------------------------------------------------------------------
+//
+// This header file defines B-tree maps: sorted associative containers mapping
+// keys to values.
+//
+// * `y_absl::btree_map<>`
+// * `y_absl::btree_multimap<>`
+//
+// These B-tree types are similar to the corresponding types in the STL
+// (`std::map` and `std::multimap`) and generally conform to the STL interfaces
+// of those types. However, because they are implemented using B-trees, they
+// are more efficient in most situations.
+//
+// Unlike `std::map` and `std::multimap`, which are commonly implemented using
+// red-black tree nodes, B-tree maps use more generic B-tree nodes able to hold
+// multiple values per node. Holding multiple values per node often makes
+// B-tree maps perform better than their `std::map` counterparts, because
+// multiple entries can be checked within the same cache hit.
+//
+// However, these types should not be considered drop-in replacements for
+// `std::map` and `std::multimap` as there are some API differences, which are
+// noted in this header file.
+//
+// Importantly, insertions and deletions may invalidate outstanding iterators,
+// pointers, and references to elements. Such invalidations are typically only
+// an issue if insertion and deletion operations are interleaved with the use of
+// more than one iterator, pointer, or reference simultaneously. For this
+// reason, `insert()` and `erase()` return a valid iterator at the current
+// position.
+
+#ifndef ABSL_CONTAINER_BTREE_MAP_H_
+#define ABSL_CONTAINER_BTREE_MAP_H_
+
+#include "y_absl/container/internal/btree.h" // IWYU pragma: export
+#include "y_absl/container/internal/btree_container.h" // IWYU pragma: export
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+// y_absl::btree_map<>
+//
+// An `y_absl::btree_map<K, V>` is an ordered associative container of
+// unique keys and associated values designed to be a more efficient replacement
+// for `std::map` (in most cases).
+//
+// Keys are sorted using an (optional) comparison function, which defaults to
+// `std::less<K>`.
+//
+// An `y_absl::btree_map<K, V>` uses a default allocator of
+// `std::allocator<std::pair<const K, V>>` to allocate (and deallocate)
+// nodes, and construct and destruct values within those nodes. You may
+// instead specify a custom allocator `A` (which in turn requires specifying a
+// custom comparator `C`) as in `y_absl::btree_map<K, V, C, A>`.
+//
+template <typename Key, typename Value, typename Compare = std::less<Key>,
+ typename Alloc = std::allocator<std::pair<const Key, Value>>>
+class btree_map
+ : public container_internal::btree_map_container<
+ container_internal::btree<container_internal::map_params<
+ Key, Value, Compare, Alloc, /*TargetNodeSize=*/256,
+ /*Multi=*/false>>> {
+ using Base = typename btree_map::btree_map_container;
+
+ public:
+ // Constructors and Assignment Operators
+ //
+ // A `btree_map` supports the same overload set as `std::map`
+ // for construction and assignment:
+ //
+ // * Default constructor
+ //
+ // y_absl::btree_map<int, TString> map1;
+ //
+ // * Initializer List constructor
+ //
+ // y_absl::btree_map<int, TString> map2 =
+ // {{1, "huey"}, {2, "dewey"}, {3, "louie"},};
+ //
+ // * Copy constructor
+ //
+ // y_absl::btree_map<int, TString> map3(map2);
+ //
+ // * Copy assignment operator
+ //
+ // y_absl::btree_map<int, TString> map4;
+ // map4 = map3;
+ //
+ // * Move constructor
+ //
+ // // Move is guaranteed efficient
+ // y_absl::btree_map<int, TString> map5(std::move(map4));
+ //
+ // * Move assignment operator
+ //
+ // // May be efficient if allocators are compatible
+ // y_absl::btree_map<int, TString> map6;
+ // map6 = std::move(map5);
+ //
+ // * Range constructor
+ //
+ // std::vector<std::pair<int, TString>> v = {{1, "a"}, {2, "b"}};
+ // y_absl::btree_map<int, TString> map7(v.begin(), v.end());
+ btree_map() {}
+ using Base::Base;
+
+ // btree_map::begin()
+ //
+ // Returns an iterator to the beginning of the `btree_map`.
+ using Base::begin;
+
+ // btree_map::cbegin()
+ //
+ // Returns a const iterator to the beginning of the `btree_map`.
+ using Base::cbegin;
+
+ // btree_map::end()
+ //
+ // Returns an iterator to the end of the `btree_map`.
+ using Base::end;
+
+ // btree_map::cend()
+ //
+ // Returns a const iterator to the end of the `btree_map`.
+ using Base::cend;
+
+ // btree_map::empty()
+ //
+ // Returns whether or not the `btree_map` is empty.
+ using Base::empty;
+
+ // btree_map::max_size()
+ //
+ // Returns the largest theoretical possible number of elements within a
+ // `btree_map` under current memory constraints. This value can be thought
+ // of as the largest value of `std::distance(begin(), end())` for a
+ // `btree_map<Key, T>`.
+ using Base::max_size;
+
+ // btree_map::size()
+ //
+ // Returns the number of elements currently within the `btree_map`.
+ using Base::size;
+
+ // btree_map::clear()
+ //
+ // Removes all elements from the `btree_map`. Invalidates any references,
+ // pointers, or iterators referring to contained elements.
+ using Base::clear;
+
+ // btree_map::erase()
+ //
+ // Erases elements within the `btree_map`. If an erase occurs, any references,
+ // pointers, or iterators are invalidated.
+ // Overloads are listed below.
+ //
+ // iterator erase(iterator position):
+ // iterator erase(const_iterator position):
+ //
+ // Erases the element at `position` of the `btree_map`, returning
+ // the iterator pointing to the element after the one that was erased
+ // (or end() if none exists).
+ //
+ // iterator erase(const_iterator first, const_iterator last):
+ //
+ // Erases the elements in the open interval [`first`, `last`), returning
+ // the iterator pointing to the element after the interval that was erased
+ // (or end() if none exists).
+ //
+ // template <typename K> size_type erase(const K& key):
+ //
+ // Erases the element with the matching key, if it exists, returning the
+ // number of elements erased (0 or 1).
+ using Base::erase;
+
+ // btree_map::insert()
+ //
+ // Inserts an element of the specified value into the `btree_map`,
+ // returning an iterator pointing to the newly inserted element, provided that
+ // an element with the given key does not already exist. If an insertion
+ // occurs, any references, pointers, or iterators are invalidated.
+ // Overloads are listed below.
+ //
+ // std::pair<iterator,bool> insert(const value_type& value):
+ //
+ // Inserts a value into the `btree_map`. Returns a pair consisting of an
+ // iterator to the inserted element (or to the element that prevented the
+ // insertion) and a bool denoting whether the insertion took place.
+ //
+ // std::pair<iterator,bool> insert(value_type&& value):
+ //
+ // Inserts a moveable value into the `btree_map`. Returns a pair
+ // consisting of an iterator to the inserted element (or to the element that
+ // prevented the insertion) and a bool denoting whether the insertion took
+ // place.
+ //
+ // iterator insert(const_iterator hint, const value_type& value):
+ // iterator insert(const_iterator hint, value_type&& value):
+ //
+ // Inserts a value, using the position of `hint` as a non-binding suggestion
+ // for where to begin the insertion search. Returns an iterator to the
+ // inserted element, or to the existing element that prevented the
+ // insertion.
+ //
+ // void insert(InputIterator first, InputIterator last):
+ //
+ // Inserts a range of values [`first`, `last`).
+ //
+ // void insert(std::initializer_list<init_type> ilist):
+ //
+ // Inserts the elements within the initializer list `ilist`.
+ using Base::insert;
+
+ // btree_map::insert_or_assign()
+ //
+ // Inserts an element of the specified value into the `btree_map` provided
+ // that a value with the given key does not already exist, or replaces the
+ // corresponding mapped type with the forwarded `obj` argument if a key for
+ // that value already exists, returning an iterator pointing to the newly
+ // inserted element. Overloads are listed below.
+ //
+ // pair<iterator, bool> insert_or_assign(const key_type& k, M&& obj):
+ // pair<iterator, bool> insert_or_assign(key_type&& k, M&& obj):
+ //
+ // Inserts/Assigns (or moves) the element of the specified key into the
+ // `btree_map`. If the returned bool is true, insertion took place, and if
+ // it's false, assignment took place.
+ //
+ // iterator insert_or_assign(const_iterator hint,
+ // const key_type& k, M&& obj):
+ // iterator insert_or_assign(const_iterator hint, key_type&& k, M&& obj):
+ //
+ // Inserts/Assigns (or moves) the element of the specified key into the
+ // `btree_map` using the position of `hint` as a non-binding suggestion
+ // for where to begin the insertion search.
+ using Base::insert_or_assign;
+
+ // btree_map::emplace()
+ //
+ // Inserts an element of the specified value by constructing it in-place
+ // within the `btree_map`, provided that no element with the given key
+ // already exists.
+ //
+ // The element may be constructed even if there already is an element with the
+ // key in the container, in which case the newly constructed element will be
+ // destroyed immediately. Prefer `try_emplace()` unless your key is not
+ // copyable or moveable.
+ //
+ // If an insertion occurs, any references, pointers, or iterators are
+ // invalidated.
+ using Base::emplace;
+
+ // btree_map::emplace_hint()
+ //
+ // Inserts an element of the specified value by constructing it in-place
+ // within the `btree_map`, using the position of `hint` as a non-binding
+ // suggestion for where to begin the insertion search, and only inserts
+ // provided that no element with the given key already exists.
+ //
+ // The element may be constructed even if there already is an element with the
+ // key in the container, in which case the newly constructed element will be
+ // destroyed immediately. Prefer `try_emplace()` unless your key is not
+ // copyable or moveable.
+ //
+ // If an insertion occurs, any references, pointers, or iterators are
+ // invalidated.
+ using Base::emplace_hint;
+
+ // btree_map::try_emplace()
+ //
+ // Inserts an element of the specified value by constructing it in-place
+ // within the `btree_map`, provided that no element with the given key
+ // already exists. Unlike `emplace()`, if an element with the given key
+ // already exists, we guarantee that no element is constructed.
+ //
+ // If an insertion occurs, any references, pointers, or iterators are
+ // invalidated.
+ //
+ // Overloads are listed below.
+ //
+ // std::pair<iterator, bool> try_emplace(const key_type& k, Args&&... args):
+ // std::pair<iterator, bool> try_emplace(key_type&& k, Args&&... args):
+ //
+ // Inserts (via copy or move) the element of the specified key into the
+ // `btree_map`.
+ //
+ // iterator try_emplace(const_iterator hint,
+ // const key_type& k, Args&&... args):
+ // iterator try_emplace(const_iterator hint, key_type&& k, Args&&... args):
+ //
+ // Inserts (via copy or move) the element of the specified key into the
+ // `btree_map` using the position of `hint` as a non-binding suggestion
+ // for where to begin the insertion search.
+ using Base::try_emplace;
+
+ // btree_map::extract()
+ //
+ // Extracts the indicated element, erasing it in the process, and returns it
+ // as a C++17-compatible node handle. Overloads are listed below.
+ //
+ // node_type extract(const_iterator position):
+ //
+ // Extracts the element at the indicated position and returns a node handle
+ // owning that extracted data.
+ //
+ // template <typename K> node_type extract(const K& k):
+ //
+ // Extracts the element with the key matching the passed key value and
+ // returns a node handle owning that extracted data. If the `btree_map`
+ // does not contain an element with a matching key, this function returns an
+ // empty node handle.
+ //
+ // NOTE: when compiled in an earlier version of C++ than C++17,
+ // `node_type::key()` returns a const reference to the key instead of a
+ // mutable reference. We cannot safely return a mutable reference without
+ // std::launder (which is not available before C++17).
+ //
+ // NOTE: In this context, `node_type` refers to the C++17 concept of a
+ // move-only type that owns and provides access to the elements in associative
+ // containers (https://en.cppreference.com/w/cpp/container/node_handle).
+ // It does NOT refer to the data layout of the underlying btree.
+ using Base::extract;
+
+ // btree_map::merge()
+ //
+ // Extracts elements from a given `source` btree_map into this
+ // `btree_map`. If the destination `btree_map` already contains an
+ // element with an equivalent key, that element is not extracted.
+ using Base::merge;
+
+ // btree_map::swap(btree_map& other)
+ //
+ // Exchanges the contents of this `btree_map` with those of the `other`
+ // btree_map, avoiding invocation of any move, copy, or swap operations on
+ // individual elements.
+ //
+ // All iterators and references on the `btree_map` remain valid, excepting
+ // for the past-the-end iterator, which is invalidated.
+ using Base::swap;
+
+ // btree_map::at()
+ //
+ // Returns a reference to the mapped value of the element with key equivalent
+ // to the passed key.
+ using Base::at;
+
+ // btree_map::contains()
+ //
+ // template <typename K> bool contains(const K& key) const:
+ //
+ // Determines whether an element comparing equal to the given `key` exists
+ // within the `btree_map`, returning `true` if so or `false` otherwise.
+ //
+ // Supports heterogeneous lookup, provided that the map has a compatible
+ // heterogeneous comparator.
+ using Base::contains;
+
+ // btree_map::count()
+ //
+ // template <typename K> size_type count(const K& key) const:
+ //
+ // Returns the number of elements comparing equal to the given `key` within
+ // the `btree_map`. Note that this function will return either `1` or `0`
+ // since duplicate elements are not allowed within a `btree_map`.
+ //
+ // Supports heterogeneous lookup, provided that the map has a compatible
+ // heterogeneous comparator.
+ using Base::count;
+
+ // btree_map::equal_range()
+ //
+ // Returns a half-open range [first, last), defined by a `std::pair` of two
+ // iterators, containing all elements with the passed key in the `btree_map`.
+ using Base::equal_range;
+
+ // btree_map::find()
+ //
+ // template <typename K> iterator find(const K& key):
+ // template <typename K> const_iterator find(const K& key) const:
+ //
+ // Finds an element with the passed `key` within the `btree_map`.
+ //
+ // Supports heterogeneous lookup, provided that the map has a compatible
+ // heterogeneous comparator.
+ using Base::find;
+
+ // btree_map::lower_bound()
+ //
+ // template <typename K> iterator lower_bound(const K& key):
+ // template <typename K> const_iterator lower_bound(const K& key) const:
+ //
+ // Finds the first element with a key that is not less than `key` within the
+ // `btree_map`.
+ //
+ // Supports heterogeneous lookup, provided that the map has a compatible
+ // heterogeneous comparator.
+ using Base::lower_bound;
+
+ // btree_map::upper_bound()
+ //
+ // template <typename K> iterator upper_bound(const K& key):
+ // template <typename K> const_iterator upper_bound(const K& key) const:
+ //
+ // Finds the first element with a key that is greater than `key` within the
+ // `btree_map`.
+ //
+ // Supports heterogeneous lookup, provided that the map has a compatible
+ // heterogeneous comparator.
+ using Base::upper_bound;
+
+ // btree_map::operator[]()
+ //
+ // Returns a reference to the value mapped to the passed key within the
+ // `btree_map`, performing an `insert()` if the key does not already
+ // exist.
+ //
+ // If an insertion occurs, any references, pointers, or iterators are
+ // invalidated. Otherwise iterators are not affected and references are not
+ // invalidated. Overloads are listed below.
+ //
+ // T& operator[](key_type&& key):
+ // T& operator[](const key_type& key):
+ //
+ // Inserts a value_type object constructed in-place if the element with the
+ // given key does not exist.
+ using Base::operator[];
+
+ // btree_map::get_allocator()
+ //
+ // Returns the allocator function associated with this `btree_map`.
+ using Base::get_allocator;
+
+ // btree_map::key_comp();
+ //
+ // Returns the key comparator associated with this `btree_map`.
+ using Base::key_comp;
+
+ // btree_map::value_comp();
+ //
+ // Returns the value comparator associated with this `btree_map`.
+ using Base::value_comp;
+};
+
+// y_absl::swap(y_absl::btree_map<>, y_absl::btree_map<>)
+//
+// Swaps the contents of two `y_absl::btree_map` containers.
+template <typename K, typename V, typename C, typename A>
+void swap(btree_map<K, V, C, A> &x, btree_map<K, V, C, A> &y) {
+ return x.swap(y);
+}
+
+// y_absl::erase_if(y_absl::btree_map<>, Pred)
+//
+// Erases all elements that satisfy the predicate pred from the container.
+template <typename K, typename V, typename C, typename A, typename Pred>
+void erase_if(btree_map<K, V, C, A> &map, Pred pred) {
+ for (auto it = map.begin(); it != map.end();) {
+ if (pred(*it)) {
+ it = map.erase(it);
+ } else {
+ ++it;
+ }
+ }
+}
+
+// y_absl::btree_multimap
+//
+// An `y_absl::btree_multimap<K, V>` is an ordered associative container of
+// keys and associated values designed to be a more efficient replacement for
+// `std::multimap` (in most cases). Unlike `y_absl::btree_map`, a B-tree multimap
+// allows multiple elements with equivalent keys.
+//
+// Keys are sorted using an (optional) comparison function, which defaults to
+// `std::less<K>`.
+//
+// An `y_absl::btree_multimap<K, V>` uses a default allocator of
+// `std::allocator<std::pair<const K, V>>` to allocate (and deallocate)
+// nodes, and construct and destruct values within those nodes. You may
+// instead specify a custom allocator `A` (which in turn requires specifying a
+// custom comparator `C`) as in `y_absl::btree_multimap<K, V, C, A>`.
+//
+template <typename Key, typename Value, typename Compare = std::less<Key>,
+ typename Alloc = std::allocator<std::pair<const Key, Value>>>
+class btree_multimap
+ : public container_internal::btree_multimap_container<
+ container_internal::btree<container_internal::map_params<
+ Key, Value, Compare, Alloc, /*TargetNodeSize=*/256,
+ /*Multi=*/true>>> {
+ using Base = typename btree_multimap::btree_multimap_container;
+
+ public:
+ // Constructors and Assignment Operators
+ //
+ // A `btree_multimap` supports the same overload set as `std::multimap`
+ // for construction and assignment:
+ //
+ // * Default constructor
+ //
+ // y_absl::btree_multimap<int, TString> map1;
+ //
+ // * Initializer List constructor
+ //
+ // y_absl::btree_multimap<int, TString> map2 =
+ // {{1, "huey"}, {2, "dewey"}, {3, "louie"},};
+ //
+ // * Copy constructor
+ //
+ // y_absl::btree_multimap<int, TString> map3(map2);
+ //
+ // * Copy assignment operator
+ //
+ // y_absl::btree_multimap<int, TString> map4;
+ // map4 = map3;
+ //
+ // * Move constructor
+ //
+ // // Move is guaranteed efficient
+ // y_absl::btree_multimap<int, TString> map5(std::move(map4));
+ //
+ // * Move assignment operator
+ //
+ // // May be efficient if allocators are compatible
+ // y_absl::btree_multimap<int, TString> map6;
+ // map6 = std::move(map5);
+ //
+ // * Range constructor
+ //
+ // std::vector<std::pair<int, TString>> v = {{1, "a"}, {2, "b"}};
+ // y_absl::btree_multimap<int, TString> map7(v.begin(), v.end());
+ btree_multimap() {}
+ using Base::Base;
+
+ // btree_multimap::begin()
+ //
+ // Returns an iterator to the beginning of the `btree_multimap`.
+ using Base::begin;
+
+ // btree_multimap::cbegin()
+ //
+ // Returns a const iterator to the beginning of the `btree_multimap`.
+ using Base::cbegin;
+
+ // btree_multimap::end()
+ //
+ // Returns an iterator to the end of the `btree_multimap`.
+ using Base::end;
+
+ // btree_multimap::cend()
+ //
+ // Returns a const iterator to the end of the `btree_multimap`.
+ using Base::cend;
+
+ // btree_multimap::empty()
+ //
+ // Returns whether or not the `btree_multimap` is empty.
+ using Base::empty;
+
+ // btree_multimap::max_size()
+ //
+ // Returns the largest theoretical possible number of elements within a
+ // `btree_multimap` under current memory constraints. This value can be
+ // thought of as the largest value of `std::distance(begin(), end())` for a
+ // `btree_multimap<Key, T>`.
+ using Base::max_size;
+
+ // btree_multimap::size()
+ //
+ // Returns the number of elements currently within the `btree_multimap`.
+ using Base::size;
+
+ // btree_multimap::clear()
+ //
+ // Removes all elements from the `btree_multimap`. Invalidates any references,
+ // pointers, or iterators referring to contained elements.
+ using Base::clear;
+
+ // btree_multimap::erase()
+ //
+ // Erases elements within the `btree_multimap`. If an erase occurs, any
+ // references, pointers, or iterators are invalidated.
+ // Overloads are listed below.
+ //
+ // iterator erase(iterator position):
+ // iterator erase(const_iterator position):
+ //
+ // Erases the element at `position` of the `btree_multimap`, returning
+ // the iterator pointing to the element after the one that was erased
+ // (or end() if none exists).
+ //
+ // iterator erase(const_iterator first, const_iterator last):
+ //
+ // Erases the elements in the open interval [`first`, `last`), returning
+ // the iterator pointing to the element after the interval that was erased
+ // (or end() if none exists).
+ //
+ // template <typename K> size_type erase(const K& key):
+ //
+ // Erases the elements matching the key, if any exist, returning the
+ // number of elements erased.
+ using Base::erase;
+
+ // btree_multimap::insert()
+ //
+ // Inserts an element of the specified value into the `btree_multimap`,
+ // returning an iterator pointing to the newly inserted element.
+ // Any references, pointers, or iterators are invalidated. Overloads are
+ // listed below.
+ //
+ // iterator insert(const value_type& value):
+ //
+ // Inserts a value into the `btree_multimap`, returning an iterator to the
+ // inserted element.
+ //
+ // iterator insert(value_type&& value):
+ //
+ // Inserts a moveable value into the `btree_multimap`, returning an iterator
+ // to the inserted element.
+ //
+ // iterator insert(const_iterator hint, const value_type& value):
+ // iterator insert(const_iterator hint, value_type&& value):
+ //
+ // Inserts a value, using the position of `hint` as a non-binding suggestion
+ // for where to begin the insertion search. Returns an iterator to the
+ // inserted element.
+ //
+ // void insert(InputIterator first, InputIterator last):
+ //
+ // Inserts a range of values [`first`, `last`).
+ //
+ // void insert(std::initializer_list<init_type> ilist):
+ //
+ // Inserts the elements within the initializer list `ilist`.
+ using Base::insert;
+
+ // btree_multimap::emplace()
+ //
+ // Inserts an element of the specified value by constructing it in-place
+ // within the `btree_multimap`. Any references, pointers, or iterators are
+ // invalidated.
+ using Base::emplace;
+
+ // btree_multimap::emplace_hint()
+ //
+ // Inserts an element of the specified value by constructing it in-place
+ // within the `btree_multimap`, using the position of `hint` as a non-binding
+ // suggestion for where to begin the insertion search.
+ //
+ // Any references, pointers, or iterators are invalidated.
+ using Base::emplace_hint;
+
+ // btree_multimap::extract()
+ //
+ // Extracts the indicated element, erasing it in the process, and returns it
+ // as a C++17-compatible node handle. Overloads are listed below.
+ //
+ // node_type extract(const_iterator position):
+ //
+ // Extracts the element at the indicated position and returns a node handle
+ // owning that extracted data.
+ //
+ // template <typename K> node_type extract(const K& k):
+ //
+ // Extracts the element with the key matching the passed key value and
+ // returns a node handle owning that extracted data. If the `btree_multimap`
+ // does not contain an element with a matching key, this function returns an
+ // empty node handle.
+ //
+ // NOTE: when compiled in an earlier version of C++ than C++17,
+ // `node_type::key()` returns a const reference to the key instead of a
+ // mutable reference. We cannot safely return a mutable reference without
+ // std::launder (which is not available before C++17).
+ //
+ // NOTE: In this context, `node_type` refers to the C++17 concept of a
+ // move-only type that owns and provides access to the elements in associative
+ // containers (https://en.cppreference.com/w/cpp/container/node_handle).
+ // It does NOT refer to the data layout of the underlying btree.
+ using Base::extract;
+
+ // btree_multimap::merge()
+ //
+ // Extracts all elements from a given `source` btree_multimap into this
+ // `btree_multimap`.
+ using Base::merge;
+
+ // btree_multimap::swap(btree_multimap& other)
+ //
+ // Exchanges the contents of this `btree_multimap` with those of the `other`
+ // btree_multimap, avoiding invocation of any move, copy, or swap operations
+ // on individual elements.
+ //
+ // All iterators and references on the `btree_multimap` remain valid,
+ // excepting for the past-the-end iterator, which is invalidated.
+ using Base::swap;
+
+ // btree_multimap::contains()
+ //
+ // template <typename K> bool contains(const K& key) const:
+ //
+ // Determines whether an element comparing equal to the given `key` exists
+ // within the `btree_multimap`, returning `true` if so or `false` otherwise.
+ //
+ // Supports heterogeneous lookup, provided that the map has a compatible
+ // heterogeneous comparator.
+ using Base::contains;
+
+ // btree_multimap::count()
+ //
+ // template <typename K> size_type count(const K& key) const:
+ //
+ // Returns the number of elements comparing equal to the given `key` within
+ // the `btree_multimap`.
+ //
+ // Supports heterogeneous lookup, provided that the map has a compatible
+ // heterogeneous comparator.
+ using Base::count;
+
+ // btree_multimap::equal_range()
+ //
+ // Returns a half-open range [first, last), defined by a `std::pair` of two
+ // iterators, containing all elements with the passed key in the
+ // `btree_multimap`.
+ using Base::equal_range;
+
+ // btree_multimap::find()
+ //
+ // template <typename K> iterator find(const K& key):
+ // template <typename K> const_iterator find(const K& key) const:
+ //
+ // Finds an element with the passed `key` within the `btree_multimap`.
+ //
+ // Supports heterogeneous lookup, provided that the map has a compatible
+ // heterogeneous comparator.
+ using Base::find;
+
+ // btree_multimap::lower_bound()
+ //
+ // template <typename K> iterator lower_bound(const K& key):
+ // template <typename K> const_iterator lower_bound(const K& key) const:
+ //
+ // Finds the first element with a key that is not less than `key` within the
+ // `btree_multimap`.
+ //
+ // Supports heterogeneous lookup, provided that the map has a compatible
+ // heterogeneous comparator.
+ using Base::lower_bound;
+
+ // btree_multimap::upper_bound()
+ //
+ // template <typename K> iterator upper_bound(const K& key):
+ // template <typename K> const_iterator upper_bound(const K& key) const:
+ //
+ // Finds the first element with a key that is greater than `key` within the
+ // `btree_multimap`.
+ //
+ // Supports heterogeneous lookup, provided that the map has a compatible
+ // heterogeneous comparator.
+ using Base::upper_bound;
+
+ // btree_multimap::get_allocator()
+ //
+ // Returns the allocator function associated with this `btree_multimap`.
+ using Base::get_allocator;
+
+ // btree_multimap::key_comp();
+ //
+ // Returns the key comparator associated with this `btree_multimap`.
+ using Base::key_comp;
+
+ // btree_multimap::value_comp();
+ //
+ // Returns the value comparator associated with this `btree_multimap`.
+ using Base::value_comp;
+};
+
+// y_absl::swap(y_absl::btree_multimap<>, y_absl::btree_multimap<>)
+//
+// Swaps the contents of two `y_absl::btree_multimap` containers.
+template <typename K, typename V, typename C, typename A>
+void swap(btree_multimap<K, V, C, A> &x, btree_multimap<K, V, C, A> &y) {
+ return x.swap(y);
+}
+
+// y_absl::erase_if(y_absl::btree_multimap<>, Pred)
+//
+// Erases all elements that satisfy the predicate pred from the container.
+template <typename K, typename V, typename C, typename A, typename Pred>
+void erase_if(btree_multimap<K, V, C, A> &map, Pred pred) {
+ for (auto it = map.begin(); it != map.end();) {
+ if (pred(*it)) {
+ it = map.erase(it);
+ } else {
+ ++it;
+ }
+ }
+}
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_CONTAINER_BTREE_MAP_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/container/btree_set.h b/contrib/restricted/abseil-cpp-tstring/y_absl/container/btree_set.h
new file mode 100644
index 00000000000..905fb8e964c
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/container/btree_set.h
@@ -0,0 +1,728 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: btree_set.h
+// -----------------------------------------------------------------------------
+//
+// This header file defines B-tree sets: sorted associative containers of
+// values.
+//
+// * `y_absl::btree_set<>`
+// * `y_absl::btree_multiset<>`
+//
+// These B-tree types are similar to the corresponding types in the STL
+// (`std::set` and `std::multiset`) and generally conform to the STL interfaces
+// of those types. However, because they are implemented using B-trees, they
+// are more efficient in most situations.
+//
+// Unlike `std::set` and `std::multiset`, which are commonly implemented using
+// red-black tree nodes, B-tree sets use more generic B-tree nodes able to hold
+// multiple values per node. Holding multiple values per node often makes
+// B-tree sets perform better than their `std::set` counterparts, because
+// multiple entries can be checked within the same cache hit.
+//
+// However, these types should not be considered drop-in replacements for
+// `std::set` and `std::multiset` as there are some API differences, which are
+// noted in this header file.
+//
+// Importantly, insertions and deletions may invalidate outstanding iterators,
+// pointers, and references to elements. Such invalidations are typically only
+// an issue if insertion and deletion operations are interleaved with the use of
+// more than one iterator, pointer, or reference simultaneously. For this
+// reason, `insert()` and `erase()` return a valid iterator at the current
+// position.
+
+#ifndef ABSL_CONTAINER_BTREE_SET_H_
+#define ABSL_CONTAINER_BTREE_SET_H_
+
+#include "y_absl/container/internal/btree.h" // IWYU pragma: export
+#include "y_absl/container/internal/btree_container.h" // IWYU pragma: export
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+// y_absl::btree_set<>
+//
+// An `y_absl::btree_set<K>` is an ordered associative container of unique key
+// values designed to be a more efficient replacement for `std::set` (in most
+// cases).
+//
+// Keys are sorted using an (optional) comparison function, which defaults to
+// `std::less<K>`.
+//
+// An `y_absl::btree_set<K>` uses a default allocator of `std::allocator<K>` to
+// allocate (and deallocate) nodes, and construct and destruct values within
+// those nodes. You may instead specify a custom allocator `A` (which in turn
+// requires specifying a custom comparator `C`) as in
+// `y_absl::btree_set<K, C, A>`.
+//
+template <typename Key, typename Compare = std::less<Key>,
+ typename Alloc = std::allocator<Key>>
+class btree_set
+ : public container_internal::btree_set_container<
+ container_internal::btree<container_internal::set_params<
+ Key, Compare, Alloc, /*TargetNodeSize=*/256,
+ /*Multi=*/false>>> {
+ using Base = typename btree_set::btree_set_container;
+
+ public:
+ // Constructors and Assignment Operators
+ //
+ // A `btree_set` supports the same overload set as `std::set`
+ // for construction and assignment:
+ //
+ // * Default constructor
+ //
+ // y_absl::btree_set<TString> set1;
+ //
+ // * Initializer List constructor
+ //
+ // y_absl::btree_set<TString> set2 =
+ // {{"huey"}, {"dewey"}, {"louie"},};
+ //
+ // * Copy constructor
+ //
+ // y_absl::btree_set<TString> set3(set2);
+ //
+ // * Copy assignment operator
+ //
+ // y_absl::btree_set<TString> set4;
+ // set4 = set3;
+ //
+ // * Move constructor
+ //
+ // // Move is guaranteed efficient
+ // y_absl::btree_set<TString> set5(std::move(set4));
+ //
+ // * Move assignment operator
+ //
+ // // May be efficient if allocators are compatible
+ // y_absl::btree_set<TString> set6;
+ // set6 = std::move(set5);
+ //
+ // * Range constructor
+ //
+ // std::vector<TString> v = {"a", "b"};
+ // y_absl::btree_set<TString> set7(v.begin(), v.end());
+ btree_set() {}
+ using Base::Base;
+
+ // btree_set::begin()
+ //
+ // Returns an iterator to the beginning of the `btree_set`.
+ using Base::begin;
+
+ // btree_set::cbegin()
+ //
+ // Returns a const iterator to the beginning of the `btree_set`.
+ using Base::cbegin;
+
+ // btree_set::end()
+ //
+ // Returns an iterator to the end of the `btree_set`.
+ using Base::end;
+
+ // btree_set::cend()
+ //
+ // Returns a const iterator to the end of the `btree_set`.
+ using Base::cend;
+
+ // btree_set::empty()
+ //
+ // Returns whether or not the `btree_set` is empty.
+ using Base::empty;
+
+ // btree_set::max_size()
+ //
+ // Returns the largest theoretical possible number of elements within a
+ // `btree_set` under current memory constraints. This value can be thought
+ // of as the largest value of `std::distance(begin(), end())` for a
+ // `btree_set<Key>`.
+ using Base::max_size;
+
+ // btree_set::size()
+ //
+ // Returns the number of elements currently within the `btree_set`.
+ using Base::size;
+
+ // btree_set::clear()
+ //
+ // Removes all elements from the `btree_set`. Invalidates any references,
+ // pointers, or iterators referring to contained elements.
+ using Base::clear;
+
+ // btree_set::erase()
+ //
+ // Erases elements within the `btree_set`. Overloads are listed below.
+ //
+ // iterator erase(iterator position):
+ // iterator erase(const_iterator position):
+ //
+ // Erases the element at `position` of the `btree_set`, returning
+ // the iterator pointing to the element after the one that was erased
+ // (or end() if none exists).
+ //
+ // iterator erase(const_iterator first, const_iterator last):
+ //
+ // Erases the elements in the open interval [`first`, `last`), returning
+ // the iterator pointing to the element after the interval that was erased
+ // (or end() if none exists).
+ //
+ // template <typename K> size_type erase(const K& key):
+ //
+ // Erases the element with the matching key, if it exists, returning the
+ // number of elements erased (0 or 1).
+ using Base::erase;
+
+ // btree_set::insert()
+ //
+ // Inserts an element of the specified value into the `btree_set`,
+ // returning an iterator pointing to the newly inserted element, provided that
+ // an element with the given key does not already exist. If an insertion
+ // occurs, any references, pointers, or iterators are invalidated.
+ // Overloads are listed below.
+ //
+ // std::pair<iterator,bool> insert(const value_type& value):
+ //
+ // Inserts a value into the `btree_set`. Returns a pair consisting of an
+ // iterator to the inserted element (or to the element that prevented the
+ // insertion) and a bool denoting whether the insertion took place.
+ //
+ // std::pair<iterator,bool> insert(value_type&& value):
+ //
+ // Inserts a moveable value into the `btree_set`. Returns a pair
+ // consisting of an iterator to the inserted element (or to the element that
+ // prevented the insertion) and a bool denoting whether the insertion took
+ // place.
+ //
+ // iterator insert(const_iterator hint, const value_type& value):
+ // iterator insert(const_iterator hint, value_type&& value):
+ //
+ // Inserts a value, using the position of `hint` as a non-binding suggestion
+ // for where to begin the insertion search. Returns an iterator to the
+ // inserted element, or to the existing element that prevented the
+ // insertion.
+ //
+ // void insert(InputIterator first, InputIterator last):
+ //
+ // Inserts a range of values [`first`, `last`).
+ //
+ // void insert(std::initializer_list<init_type> ilist):
+ //
+ // Inserts the elements within the initializer list `ilist`.
+ using Base::insert;
+
+ // btree_set::emplace()
+ //
+ // Inserts an element of the specified value by constructing it in-place
+ // within the `btree_set`, provided that no element with the given key
+ // already exists.
+ //
+ // The element may be constructed even if there already is an element with the
+ // key in the container, in which case the newly constructed element will be
+ // destroyed immediately.
+ //
+ // If an insertion occurs, any references, pointers, or iterators are
+ // invalidated.
+ using Base::emplace;
+
+ // btree_set::emplace_hint()
+ //
+ // Inserts an element of the specified value by constructing it in-place
+ // within the `btree_set`, using the position of `hint` as a non-binding
+ // suggestion for where to begin the insertion search, and only inserts
+ // provided that no element with the given key already exists.
+ //
+ // The element may be constructed even if there already is an element with the
+ // key in the container, in which case the newly constructed element will be
+ // destroyed immediately.
+ //
+ // If an insertion occurs, any references, pointers, or iterators are
+ // invalidated.
+ using Base::emplace_hint;
+
+ // btree_set::extract()
+ //
+ // Extracts the indicated element, erasing it in the process, and returns it
+ // as a C++17-compatible node handle. Overloads are listed below.
+ //
+ // node_type extract(const_iterator position):
+ //
+ // Extracts the element at the indicated position and returns a node handle
+ // owning that extracted data.
+ //
+ // template <typename K> node_type extract(const K& k):
+ //
+ // Extracts the element with the key matching the passed key value and
+ // returns a node handle owning that extracted data. If the `btree_set`
+ // does not contain an element with a matching key, this function returns an
+ // empty node handle.
+ //
+ // NOTE: In this context, `node_type` refers to the C++17 concept of a
+ // move-only type that owns and provides access to the elements in associative
+ // containers (https://en.cppreference.com/w/cpp/container/node_handle).
+ // It does NOT refer to the data layout of the underlying btree.
+ using Base::extract;
+
+ // btree_set::merge()
+ //
+ // Extracts elements from a given `source` btree_set into this
+ // `btree_set`. If the destination `btree_set` already contains an
+ // element with an equivalent key, that element is not extracted.
+ using Base::merge;
+
+ // btree_set::swap(btree_set& other)
+ //
+ // Exchanges the contents of this `btree_set` with those of the `other`
+ // btree_set, avoiding invocation of any move, copy, or swap operations on
+ // individual elements.
+ //
+ // All iterators and references on the `btree_set` remain valid, excepting
+ // for the past-the-end iterator, which is invalidated.
+ using Base::swap;
+
+ // btree_set::contains()
+ //
+ // template <typename K> bool contains(const K& key) const:
+ //
+ // Determines whether an element comparing equal to the given `key` exists
+ // within the `btree_set`, returning `true` if so or `false` otherwise.
+ //
+ // Supports heterogeneous lookup, provided that the set has a compatible
+ // heterogeneous comparator.
+ using Base::contains;
+
+ // btree_set::count()
+ //
+ // template <typename K> size_type count(const K& key) const:
+ //
+ // Returns the number of elements comparing equal to the given `key` within
+ // the `btree_set`. Note that this function will return either `1` or `0`
+ // since duplicate elements are not allowed within a `btree_set`.
+ //
+ // Supports heterogeneous lookup, provided that the set has a compatible
+ // heterogeneous comparator.
+ using Base::count;
+
+ // btree_set::equal_range()
+ //
+ // Returns a closed range [first, last], defined by a `std::pair` of two
+ // iterators, containing all elements with the passed key in the
+ // `btree_set`.
+ using Base::equal_range;
+
+ // btree_set::find()
+ //
+ // template <typename K> iterator find(const K& key):
+ // template <typename K> const_iterator find(const K& key) const:
+ //
+ // Finds an element with the passed `key` within the `btree_set`.
+ //
+ // Supports heterogeneous lookup, provided that the set has a compatible
+ // heterogeneous comparator.
+ using Base::find;
+
+ // btree_set::lower_bound()
+ //
+ // template <typename K> iterator lower_bound(const K& key):
+ // template <typename K> const_iterator lower_bound(const K& key) const:
+ //
+ // Finds the first element that is not less than `key` within the `btree_set`.
+ //
+ // Supports heterogeneous lookup, provided that the set has a compatible
+ // heterogeneous comparator.
+ using Base::lower_bound;
+
+ // btree_set::upper_bound()
+ //
+ // template <typename K> iterator upper_bound(const K& key):
+ // template <typename K> const_iterator upper_bound(const K& key) const:
+ //
+ // Finds the first element that is greater than `key` within the `btree_set`.
+ //
+ // Supports heterogeneous lookup, provided that the set has a compatible
+ // heterogeneous comparator.
+ using Base::upper_bound;
+
+ // btree_set::get_allocator()
+ //
+ // Returns the allocator function associated with this `btree_set`.
+ using Base::get_allocator;
+
+ // btree_set::key_comp();
+ //
+ // Returns the key comparator associated with this `btree_set`.
+ using Base::key_comp;
+
+ // btree_set::value_comp();
+ //
+ // Returns the value comparator associated with this `btree_set`. The keys to
+ // sort the elements are the values themselves, therefore `value_comp` and its
+ // sibling member function `key_comp` are equivalent.
+ using Base::value_comp;
+};
+
+// y_absl::swap(y_absl::btree_set<>, y_absl::btree_set<>)
+//
+// Swaps the contents of two `y_absl::btree_set` containers.
+template <typename K, typename C, typename A>
+void swap(btree_set<K, C, A> &x, btree_set<K, C, A> &y) {
+ return x.swap(y);
+}
+
+// y_absl::erase_if(y_absl::btree_set<>, Pred)
+//
+// Erases all elements that satisfy the predicate pred from the container.
+template <typename K, typename C, typename A, typename Pred>
+void erase_if(btree_set<K, C, A> &set, Pred pred) {
+ for (auto it = set.begin(); it != set.end();) {
+ if (pred(*it)) {
+ it = set.erase(it);
+ } else {
+ ++it;
+ }
+ }
+}
+
+// y_absl::btree_multiset<>
+//
+// An `y_absl::btree_multiset<K>` is an ordered associative container of
+// keys and associated values designed to be a more efficient replacement
+// for `std::multiset` (in most cases). Unlike `y_absl::btree_set`, a B-tree
+// multiset allows equivalent elements.
+//
+// Keys are sorted using an (optional) comparison function, which defaults to
+// `std::less<K>`.
+//
+// An `y_absl::btree_multiset<K>` uses a default allocator of `std::allocator<K>`
+// to allocate (and deallocate) nodes, and construct and destruct values within
+// those nodes. You may instead specify a custom allocator `A` (which in turn
+// requires specifying a custom comparator `C`) as in
+// `y_absl::btree_multiset<K, C, A>`.
+//
+template <typename Key, typename Compare = std::less<Key>,
+ typename Alloc = std::allocator<Key>>
+class btree_multiset
+ : public container_internal::btree_multiset_container<
+ container_internal::btree<container_internal::set_params<
+ Key, Compare, Alloc, /*TargetNodeSize=*/256,
+ /*Multi=*/true>>> {
+ using Base = typename btree_multiset::btree_multiset_container;
+
+ public:
+ // Constructors and Assignment Operators
+ //
+ // A `btree_multiset` supports the same overload set as `std::set`
+ // for construction and assignment:
+ //
+ // * Default constructor
+ //
+ // y_absl::btree_multiset<TString> set1;
+ //
+ // * Initializer List constructor
+ //
+ // y_absl::btree_multiset<TString> set2 =
+ // {{"huey"}, {"dewey"}, {"louie"},};
+ //
+ // * Copy constructor
+ //
+ // y_absl::btree_multiset<TString> set3(set2);
+ //
+ // * Copy assignment operator
+ //
+ // y_absl::btree_multiset<TString> set4;
+ // set4 = set3;
+ //
+ // * Move constructor
+ //
+ // // Move is guaranteed efficient
+ // y_absl::btree_multiset<TString> set5(std::move(set4));
+ //
+ // * Move assignment operator
+ //
+ // // May be efficient if allocators are compatible
+ // y_absl::btree_multiset<TString> set6;
+ // set6 = std::move(set5);
+ //
+ // * Range constructor
+ //
+ // std::vector<TString> v = {"a", "b"};
+ // y_absl::btree_multiset<TString> set7(v.begin(), v.end());
+ btree_multiset() {}
+ using Base::Base;
+
+ // btree_multiset::begin()
+ //
+ // Returns an iterator to the beginning of the `btree_multiset`.
+ using Base::begin;
+
+ // btree_multiset::cbegin()
+ //
+ // Returns a const iterator to the beginning of the `btree_multiset`.
+ using Base::cbegin;
+
+ // btree_multiset::end()
+ //
+ // Returns an iterator to the end of the `btree_multiset`.
+ using Base::end;
+
+ // btree_multiset::cend()
+ //
+ // Returns a const iterator to the end of the `btree_multiset`.
+ using Base::cend;
+
+ // btree_multiset::empty()
+ //
+ // Returns whether or not the `btree_multiset` is empty.
+ using Base::empty;
+
+ // btree_multiset::max_size()
+ //
+ // Returns the largest theoretical possible number of elements within a
+ // `btree_multiset` under current memory constraints. This value can be
+ // thought of as the largest value of `std::distance(begin(), end())` for a
+ // `btree_multiset<Key>`.
+ using Base::max_size;
+
+ // btree_multiset::size()
+ //
+ // Returns the number of elements currently within the `btree_multiset`.
+ using Base::size;
+
+ // btree_multiset::clear()
+ //
+ // Removes all elements from the `btree_multiset`. Invalidates any references,
+ // pointers, or iterators referring to contained elements.
+ using Base::clear;
+
+ // btree_multiset::erase()
+ //
+ // Erases elements within the `btree_multiset`. Overloads are listed below.
+ //
+ // iterator erase(iterator position):
+ // iterator erase(const_iterator position):
+ //
+ // Erases the element at `position` of the `btree_multiset`, returning
+ // the iterator pointing to the element after the one that was erased
+ // (or end() if none exists).
+ //
+ // iterator erase(const_iterator first, const_iterator last):
+ //
+ // Erases the elements in the open interval [`first`, `last`), returning
+ // the iterator pointing to the element after the interval that was erased
+ // (or end() if none exists).
+ //
+ // template <typename K> size_type erase(const K& key):
+ //
+ // Erases the elements matching the key, if any exist, returning the
+ // number of elements erased.
+ using Base::erase;
+
+ // btree_multiset::insert()
+ //
+ // Inserts an element of the specified value into the `btree_multiset`,
+ // returning an iterator pointing to the newly inserted element.
+ // Any references, pointers, or iterators are invalidated. Overloads are
+ // listed below.
+ //
+ // iterator insert(const value_type& value):
+ //
+ // Inserts a value into the `btree_multiset`, returning an iterator to the
+ // inserted element.
+ //
+ // iterator insert(value_type&& value):
+ //
+ // Inserts a moveable value into the `btree_multiset`, returning an iterator
+ // to the inserted element.
+ //
+ // iterator insert(const_iterator hint, const value_type& value):
+ // iterator insert(const_iterator hint, value_type&& value):
+ //
+ // Inserts a value, using the position of `hint` as a non-binding suggestion
+ // for where to begin the insertion search. Returns an iterator to the
+ // inserted element.
+ //
+ // void insert(InputIterator first, InputIterator last):
+ //
+ // Inserts a range of values [`first`, `last`).
+ //
+ // void insert(std::initializer_list<init_type> ilist):
+ //
+ // Inserts the elements within the initializer list `ilist`.
+ using Base::insert;
+
+ // btree_multiset::emplace()
+ //
+ // Inserts an element of the specified value by constructing it in-place
+ // within the `btree_multiset`. Any references, pointers, or iterators are
+ // invalidated.
+ using Base::emplace;
+
+ // btree_multiset::emplace_hint()
+ //
+ // Inserts an element of the specified value by constructing it in-place
+ // within the `btree_multiset`, using the position of `hint` as a non-binding
+ // suggestion for where to begin the insertion search.
+ //
+ // Any references, pointers, or iterators are invalidated.
+ using Base::emplace_hint;
+
+ // btree_multiset::extract()
+ //
+ // Extracts the indicated element, erasing it in the process, and returns it
+ // as a C++17-compatible node handle. Overloads are listed below.
+ //
+ // node_type extract(const_iterator position):
+ //
+ // Extracts the element at the indicated position and returns a node handle
+ // owning that extracted data.
+ //
+ // template <typename K> node_type extract(const K& k):
+ //
+ // Extracts the element with the key matching the passed key value and
+ // returns a node handle owning that extracted data. If the `btree_multiset`
+ // does not contain an element with a matching key, this function returns an
+ // empty node handle.
+ //
+ // NOTE: In this context, `node_type` refers to the C++17 concept of a
+ // move-only type that owns and provides access to the elements in associative
+ // containers (https://en.cppreference.com/w/cpp/container/node_handle).
+ // It does NOT refer to the data layout of the underlying btree.
+ using Base::extract;
+
+ // btree_multiset::merge()
+ //
+ // Extracts all elements from a given `source` btree_multiset into this
+ // `btree_multiset`.
+ using Base::merge;
+
+ // btree_multiset::swap(btree_multiset& other)
+ //
+ // Exchanges the contents of this `btree_multiset` with those of the `other`
+ // btree_multiset, avoiding invocation of any move, copy, or swap operations
+ // on individual elements.
+ //
+ // All iterators and references on the `btree_multiset` remain valid,
+ // excepting for the past-the-end iterator, which is invalidated.
+ using Base::swap;
+
+ // btree_multiset::contains()
+ //
+ // template <typename K> bool contains(const K& key) const:
+ //
+ // Determines whether an element comparing equal to the given `key` exists
+ // within the `btree_multiset`, returning `true` if so or `false` otherwise.
+ //
+ // Supports heterogeneous lookup, provided that the set has a compatible
+ // heterogeneous comparator.
+ using Base::contains;
+
+ // btree_multiset::count()
+ //
+ // template <typename K> size_type count(const K& key) const:
+ //
+ // Returns the number of elements comparing equal to the given `key` within
+ // the `btree_multiset`.
+ //
+ // Supports heterogeneous lookup, provided that the set has a compatible
+ // heterogeneous comparator.
+ using Base::count;
+
+ // btree_multiset::equal_range()
+ //
+ // Returns a closed range [first, last], defined by a `std::pair` of two
+ // iterators, containing all elements with the passed key in the
+ // `btree_multiset`.
+ using Base::equal_range;
+
+ // btree_multiset::find()
+ //
+ // template <typename K> iterator find(const K& key):
+ // template <typename K> const_iterator find(const K& key) const:
+ //
+ // Finds an element with the passed `key` within the `btree_multiset`.
+ //
+ // Supports heterogeneous lookup, provided that the set has a compatible
+ // heterogeneous comparator.
+ using Base::find;
+
+ // btree_multiset::lower_bound()
+ //
+ // template <typename K> iterator lower_bound(const K& key):
+ // template <typename K> const_iterator lower_bound(const K& key) const:
+ //
+ // Finds the first element that is not less than `key` within the
+ // `btree_multiset`.
+ //
+ // Supports heterogeneous lookup, provided that the set has a compatible
+ // heterogeneous comparator.
+ using Base::lower_bound;
+
+ // btree_multiset::upper_bound()
+ //
+ // template <typename K> iterator upper_bound(const K& key):
+ // template <typename K> const_iterator upper_bound(const K& key) const:
+ //
+ // Finds the first element that is greater than `key` within the
+ // `btree_multiset`.
+ //
+ // Supports heterogeneous lookup, provided that the set has a compatible
+ // heterogeneous comparator.
+ using Base::upper_bound;
+
+ // btree_multiset::get_allocator()
+ //
+ // Returns the allocator function associated with this `btree_multiset`.
+ using Base::get_allocator;
+
+ // btree_multiset::key_comp();
+ //
+ // Returns the key comparator associated with this `btree_multiset`.
+ using Base::key_comp;
+
+ // btree_multiset::value_comp();
+ //
+ // Returns the value comparator associated with this `btree_multiset`. The
+ // keys to sort the elements are the values themselves, therefore `value_comp`
+ // and its sibling member function `key_comp` are equivalent.
+ using Base::value_comp;
+};
+
+// y_absl::swap(y_absl::btree_multiset<>, y_absl::btree_multiset<>)
+//
+// Swaps the contents of two `y_absl::btree_multiset` containers.
+template <typename K, typename C, typename A>
+void swap(btree_multiset<K, C, A> &x, btree_multiset<K, C, A> &y) {
+ return x.swap(y);
+}
+
+// y_absl::erase_if(y_absl::btree_multiset<>, Pred)
+//
+// Erases all elements that satisfy the predicate pred from the container.
+template <typename K, typename C, typename A, typename Pred>
+void erase_if(btree_multiset<K, C, A> &set, Pred pred) {
+ for (auto it = set.begin(); it != set.end();) {
+ if (pred(*it)) {
+ it = set.erase(it);
+ } else {
+ ++it;
+ }
+ }
+}
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_CONTAINER_BTREE_SET_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/container/btree_test.h b/contrib/restricted/abseil-cpp-tstring/y_absl/container/btree_test.h
new file mode 100644
index 00000000000..97ed054ce8c
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/container/btree_test.h
@@ -0,0 +1,166 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_BTREE_TEST_H_
+#define ABSL_CONTAINER_BTREE_TEST_H_
+
+#include <algorithm>
+#include <cassert>
+#include <random>
+#include <util/generic/string.h>
+#include <utility>
+#include <vector>
+
+#include "y_absl/container/btree_map.h"
+#include "y_absl/container/btree_set.h"
+#include "y_absl/container/flat_hash_set.h"
+#include "y_absl/strings/cord.h"
+#include "y_absl/time/time.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+// Like remove_const but propagates the removal through std::pair.
+template <typename T>
+struct remove_pair_const {
+ using type = typename std::remove_const<T>::type;
+};
+template <typename T, typename U>
+struct remove_pair_const<std::pair<T, U> > {
+ using type = std::pair<typename remove_pair_const<T>::type,
+ typename remove_pair_const<U>::type>;
+};
+
+// Utility class to provide an accessor for a key given a value. The default
+// behavior is to treat the value as a pair and return the first element.
+template <typename K, typename V>
+struct KeyOfValue {
+ struct type {
+ const K& operator()(const V& p) const { return p.first; }
+ };
+};
+
+// Partial specialization of KeyOfValue class for when the key and value are
+// the same type such as in set<> and btree_set<>.
+template <typename K>
+struct KeyOfValue<K, K> {
+ struct type {
+ const K& operator()(const K& k) const { return k; }
+ };
+};
+
+inline char* GenerateDigits(char buf[16], unsigned val, unsigned maxval) {
+ assert(val <= maxval);
+ constexpr unsigned kBase = 64; // avoid integer division.
+ unsigned p = 15;
+ buf[p--] = 0;
+ while (maxval > 0) {
+ buf[p--] = ' ' + (val % kBase);
+ val /= kBase;
+ maxval /= kBase;
+ }
+ return buf + p + 1;
+}
+
+template <typename K>
+struct Generator {
+ int maxval;
+ explicit Generator(int m) : maxval(m) {}
+ K operator()(int i) const {
+ assert(i <= maxval);
+ return K(i);
+ }
+};
+
+template <>
+struct Generator<y_absl::Time> {
+ int maxval;
+ explicit Generator(int m) : maxval(m) {}
+ y_absl::Time operator()(int i) const { return y_absl::FromUnixMillis(i); }
+};
+
+template <>
+struct Generator<TString> {
+ int maxval;
+ explicit Generator(int m) : maxval(m) {}
+ TString operator()(int i) const {
+ char buf[16];
+ return GenerateDigits(buf, i, maxval);
+ }
+};
+
+template <>
+struct Generator<Cord> {
+ int maxval;
+ explicit Generator(int m) : maxval(m) {}
+ Cord operator()(int i) const {
+ char buf[16];
+ return Cord(GenerateDigits(buf, i, maxval));
+ }
+};
+
+template <typename T, typename U>
+struct Generator<std::pair<T, U> > {
+ Generator<typename remove_pair_const<T>::type> tgen;
+ Generator<typename remove_pair_const<U>::type> ugen;
+
+ explicit Generator(int m) : tgen(m), ugen(m) {}
+ std::pair<T, U> operator()(int i) const {
+ return std::make_pair(tgen(i), ugen(i));
+ }
+};
+
+// Generate n values for our tests and benchmarks. Value range is [0, maxval].
+inline std::vector<int> GenerateNumbersWithSeed(int n, int maxval, int seed) {
+ // NOTE: Some tests rely on generated numbers not changing between test runs.
+ // We use std::minstd_rand0 because it is well-defined, but don't use
+ // std::uniform_int_distribution because platforms use different algorithms.
+ std::minstd_rand0 rng(seed);
+
+ std::vector<int> values;
+ y_absl::flat_hash_set<int> unique_values;
+ if (values.size() < n) {
+ for (int i = values.size(); i < n; i++) {
+ int value;
+ do {
+ value = static_cast<int>(rng()) % (maxval + 1);
+ } while (!unique_values.insert(value).second);
+
+ values.push_back(value);
+ }
+ }
+ return values;
+}
+
+// Generates n values in the range [0, maxval].
+template <typename V>
+std::vector<V> GenerateValuesWithSeed(int n, int maxval, int seed) {
+ const std::vector<int> nums = GenerateNumbersWithSeed(n, maxval, seed);
+ Generator<V> gen(maxval);
+ std::vector<V> vec;
+
+ vec.reserve(n);
+ for (int i = 0; i < n; i++) {
+ vec.push_back(gen(nums[i]));
+ }
+
+ return vec;
+}
+
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_CONTAINER_BTREE_TEST_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/container/fixed_array.h b/contrib/restricted/abseil-cpp-tstring/y_absl/container/fixed_array.h
new file mode 100644
index 00000000000..33b6caf00d8
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/container/fixed_array.h
@@ -0,0 +1,527 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: fixed_array.h
+// -----------------------------------------------------------------------------
+//
+// A `FixedArray<T>` represents a non-resizable array of `T` where the length of
+// the array can be determined at run-time. It is a good replacement for
+// non-standard and deprecated uses of `alloca()` and variable length arrays
+// within the GCC extension. (See
+// https://gcc.gnu.org/onlinedocs/gcc/Variable-Length.html).
+//
+// `FixedArray` allocates small arrays inline, keeping performance fast by
+// avoiding heap operations. It also helps reduce the chances of
+// accidentally overflowing your stack if large input is passed to
+// your function.
+
+#ifndef ABSL_CONTAINER_FIXED_ARRAY_H_
+#define ABSL_CONTAINER_FIXED_ARRAY_H_
+
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <initializer_list>
+#include <iterator>
+#include <limits>
+#include <memory>
+#include <new>
+#include <type_traits>
+
+#include "y_absl/algorithm/algorithm.h"
+#include "y_absl/base/config.h"
+#include "y_absl/base/dynamic_annotations.h"
+#include "y_absl/base/internal/throw_delegate.h"
+#include "y_absl/base/macros.h"
+#include "y_absl/base/optimization.h"
+#include "y_absl/base/port.h"
+#include "y_absl/container/internal/compressed_tuple.h"
+#include "y_absl/memory/memory.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+constexpr static auto kFixedArrayUseDefault = static_cast<size_t>(-1);
+
+// -----------------------------------------------------------------------------
+// FixedArray
+// -----------------------------------------------------------------------------
+//
+// A `FixedArray` provides a run-time fixed-size array, allocating a small array
+// inline for efficiency.
+//
+// Most users should not specify an `inline_elements` argument and let
+// `FixedArray` automatically determine the number of elements
+// to store inline based on `sizeof(T)`. If `inline_elements` is specified, the
+// `FixedArray` implementation will use inline storage for arrays with a
+// length <= `inline_elements`.
+//
+// Note that a `FixedArray` constructed with a `size_type` argument will
+// default-initialize its values by leaving trivially constructible types
+// uninitialized (e.g. int, int[4], double), and others default-constructed.
+// This matches the behavior of c-style arrays and `std::array`, but not
+// `std::vector`.
+template <typename T, size_t N = kFixedArrayUseDefault,
+ typename A = std::allocator<T>>
+class FixedArray {
+ static_assert(!std::is_array<T>::value || std::extent<T>::value > 0,
+ "Arrays with unknown bounds cannot be used with FixedArray.");
+
+ static constexpr size_t kInlineBytesDefault = 256;
+
+ using AllocatorTraits = std::allocator_traits<A>;
+ // std::iterator_traits isn't guaranteed to be SFINAE-friendly until C++17,
+ // but this seems to be mostly pedantic.
+ template <typename Iterator>
+ using EnableIfForwardIterator = y_absl::enable_if_t<std::is_convertible<
+ typename std::iterator_traits<Iterator>::iterator_category,
+ std::forward_iterator_tag>::value>;
+ static constexpr bool NoexceptCopyable() {
+ return std::is_nothrow_copy_constructible<StorageElement>::value &&
+ y_absl::allocator_is_nothrow<allocator_type>::value;
+ }
+ static constexpr bool NoexceptMovable() {
+ return std::is_nothrow_move_constructible<StorageElement>::value &&
+ y_absl::allocator_is_nothrow<allocator_type>::value;
+ }
+ static constexpr bool DefaultConstructorIsNonTrivial() {
+ return !y_absl::is_trivially_default_constructible<StorageElement>::value;
+ }
+
+ public:
+ using allocator_type = typename AllocatorTraits::allocator_type;
+ using value_type = typename AllocatorTraits::value_type;
+ using pointer = typename AllocatorTraits::pointer;
+ using const_pointer = typename AllocatorTraits::const_pointer;
+ using reference = value_type&;
+ using const_reference = const value_type&;
+ using size_type = typename AllocatorTraits::size_type;
+ using difference_type = typename AllocatorTraits::difference_type;
+ using iterator = pointer;
+ using const_iterator = const_pointer;
+ using reverse_iterator = std::reverse_iterator<iterator>;
+ using const_reverse_iterator = std::reverse_iterator<const_iterator>;
+
+ static constexpr size_type inline_elements =
+ (N == kFixedArrayUseDefault ? kInlineBytesDefault / sizeof(value_type)
+ : static_cast<size_type>(N));
+
+ FixedArray(
+ const FixedArray& other,
+ const allocator_type& a = allocator_type()) noexcept(NoexceptCopyable())
+ : FixedArray(other.begin(), other.end(), a) {}
+
+ FixedArray(
+ FixedArray&& other,
+ const allocator_type& a = allocator_type()) noexcept(NoexceptMovable())
+ : FixedArray(std::make_move_iterator(other.begin()),
+ std::make_move_iterator(other.end()), a) {}
+
+ // Creates an array object that can store `n` elements.
+ // Note that trivially constructible elements will be uninitialized.
+ explicit FixedArray(size_type n, const allocator_type& a = allocator_type())
+ : storage_(n, a) {
+ if (DefaultConstructorIsNonTrivial()) {
+ memory_internal::ConstructRange(storage_.alloc(), storage_.begin(),
+ storage_.end());
+ }
+ }
+
+ // Creates an array initialized with `n` copies of `val`.
+ FixedArray(size_type n, const value_type& val,
+ const allocator_type& a = allocator_type())
+ : storage_(n, a) {
+ memory_internal::ConstructRange(storage_.alloc(), storage_.begin(),
+ storage_.end(), val);
+ }
+
+ // Creates an array initialized with the size and contents of `init_list`.
+ FixedArray(std::initializer_list<value_type> init_list,
+ const allocator_type& a = allocator_type())
+ : FixedArray(init_list.begin(), init_list.end(), a) {}
+
+ // Creates an array initialized with the elements from the input
+ // range. The array's size will always be `std::distance(first, last)`.
+ // REQUIRES: Iterator must be a forward_iterator or better.
+ template <typename Iterator, EnableIfForwardIterator<Iterator>* = nullptr>
+ FixedArray(Iterator first, Iterator last,
+ const allocator_type& a = allocator_type())
+ : storage_(std::distance(first, last), a) {
+ memory_internal::CopyRange(storage_.alloc(), storage_.begin(), first, last);
+ }
+
+ ~FixedArray() noexcept {
+ for (auto* cur = storage_.begin(); cur != storage_.end(); ++cur) {
+ AllocatorTraits::destroy(storage_.alloc(), cur);
+ }
+ }
+
+ // Assignments are deleted because they break the invariant that the size of a
+ // `FixedArray` never changes.
+ void operator=(FixedArray&&) = delete;
+ void operator=(const FixedArray&) = delete;
+
+ // FixedArray::size()
+ //
+ // Returns the length of the fixed array.
+ size_type size() const { return storage_.size(); }
+
+ // FixedArray::max_size()
+ //
+ // Returns the largest possible value of `std::distance(begin(), end())` for a
+ // `FixedArray<T>`. This is equivalent to the most possible addressable bytes
+ // over the number of bytes taken by T.
+ constexpr size_type max_size() const {
+ return (std::numeric_limits<difference_type>::max)() / sizeof(value_type);
+ }
+
+ // FixedArray::empty()
+ //
+ // Returns whether or not the fixed array is empty.
+ bool empty() const { return size() == 0; }
+
+ // FixedArray::memsize()
+ //
+ // Returns the memory size of the fixed array in bytes.
+ size_t memsize() const { return size() * sizeof(value_type); }
+
+ // FixedArray::data()
+ //
+ // Returns a const T* pointer to elements of the `FixedArray`. This pointer
+ // can be used to access (but not modify) the contained elements.
+ const_pointer data() const { return AsValueType(storage_.begin()); }
+
+ // Overload of FixedArray::data() to return a T* pointer to elements of the
+ // fixed array. This pointer can be used to access and modify the contained
+ // elements.
+ pointer data() { return AsValueType(storage_.begin()); }
+
+ // FixedArray::operator[]
+ //
+ // Returns a reference the ith element of the fixed array.
+ // REQUIRES: 0 <= i < size()
+ reference operator[](size_type i) {
+ ABSL_HARDENING_ASSERT(i < size());
+ return data()[i];
+ }
+
+ // Overload of FixedArray::operator()[] to return a const reference to the
+ // ith element of the fixed array.
+ // REQUIRES: 0 <= i < size()
+ const_reference operator[](size_type i) const {
+ ABSL_HARDENING_ASSERT(i < size());
+ return data()[i];
+ }
+
+ // FixedArray::at
+ //
+ // Bounds-checked access. Returns a reference to the ith element of the fixed
+ // array, or throws std::out_of_range
+ reference at(size_type i) {
+ if (ABSL_PREDICT_FALSE(i >= size())) {
+ base_internal::ThrowStdOutOfRange("FixedArray::at failed bounds check");
+ }
+ return data()[i];
+ }
+
+ // Overload of FixedArray::at() to return a const reference to the ith element
+ // of the fixed array.
+ const_reference at(size_type i) const {
+ if (ABSL_PREDICT_FALSE(i >= size())) {
+ base_internal::ThrowStdOutOfRange("FixedArray::at failed bounds check");
+ }
+ return data()[i];
+ }
+
+ // FixedArray::front()
+ //
+ // Returns a reference to the first element of the fixed array.
+ reference front() {
+ ABSL_HARDENING_ASSERT(!empty());
+ return data()[0];
+ }
+
+ // Overload of FixedArray::front() to return a reference to the first element
+ // of a fixed array of const values.
+ const_reference front() const {
+ ABSL_HARDENING_ASSERT(!empty());
+ return data()[0];
+ }
+
+ // FixedArray::back()
+ //
+ // Returns a reference to the last element of the fixed array.
+ reference back() {
+ ABSL_HARDENING_ASSERT(!empty());
+ return data()[size() - 1];
+ }
+
+ // Overload of FixedArray::back() to return a reference to the last element
+ // of a fixed array of const values.
+ const_reference back() const {
+ ABSL_HARDENING_ASSERT(!empty());
+ return data()[size() - 1];
+ }
+
+ // FixedArray::begin()
+ //
+ // Returns an iterator to the beginning of the fixed array.
+ iterator begin() { return data(); }
+
+ // Overload of FixedArray::begin() to return a const iterator to the
+ // beginning of the fixed array.
+ const_iterator begin() const { return data(); }
+
+ // FixedArray::cbegin()
+ //
+ // Returns a const iterator to the beginning of the fixed array.
+ const_iterator cbegin() const { return begin(); }
+
+ // FixedArray::end()
+ //
+ // Returns an iterator to the end of the fixed array.
+ iterator end() { return data() + size(); }
+
+ // Overload of FixedArray::end() to return a const iterator to the end of the
+ // fixed array.
+ const_iterator end() const { return data() + size(); }
+
+ // FixedArray::cend()
+ //
+ // Returns a const iterator to the end of the fixed array.
+ const_iterator cend() const { return end(); }
+
+ // FixedArray::rbegin()
+ //
+ // Returns a reverse iterator from the end of the fixed array.
+ reverse_iterator rbegin() { return reverse_iterator(end()); }
+
+ // Overload of FixedArray::rbegin() to return a const reverse iterator from
+ // the end of the fixed array.
+ const_reverse_iterator rbegin() const {
+ return const_reverse_iterator(end());
+ }
+
+ // FixedArray::crbegin()
+ //
+ // Returns a const reverse iterator from the end of the fixed array.
+ const_reverse_iterator crbegin() const { return rbegin(); }
+
+ // FixedArray::rend()
+ //
+ // Returns a reverse iterator from the beginning of the fixed array.
+ reverse_iterator rend() { return reverse_iterator(begin()); }
+
+ // Overload of FixedArray::rend() for returning a const reverse iterator
+ // from the beginning of the fixed array.
+ const_reverse_iterator rend() const {
+ return const_reverse_iterator(begin());
+ }
+
+ // FixedArray::crend()
+ //
+ // Returns a reverse iterator from the beginning of the fixed array.
+ const_reverse_iterator crend() const { return rend(); }
+
+ // FixedArray::fill()
+ //
+ // Assigns the given `value` to all elements in the fixed array.
+ void fill(const value_type& val) { std::fill(begin(), end(), val); }
+
+ // Relational operators. Equality operators are elementwise using
+ // `operator==`, while order operators order FixedArrays lexicographically.
+ friend bool operator==(const FixedArray& lhs, const FixedArray& rhs) {
+ return y_absl::equal(lhs.begin(), lhs.end(), rhs.begin(), rhs.end());
+ }
+
+ friend bool operator!=(const FixedArray& lhs, const FixedArray& rhs) {
+ return !(lhs == rhs);
+ }
+
+ friend bool operator<(const FixedArray& lhs, const FixedArray& rhs) {
+ return std::lexicographical_compare(lhs.begin(), lhs.end(), rhs.begin(),
+ rhs.end());
+ }
+
+ friend bool operator>(const FixedArray& lhs, const FixedArray& rhs) {
+ return rhs < lhs;
+ }
+
+ friend bool operator<=(const FixedArray& lhs, const FixedArray& rhs) {
+ return !(rhs < lhs);
+ }
+
+ friend bool operator>=(const FixedArray& lhs, const FixedArray& rhs) {
+ return !(lhs < rhs);
+ }
+
+ template <typename H>
+ friend H AbslHashValue(H h, const FixedArray& v) {
+ return H::combine(H::combine_contiguous(std::move(h), v.data(), v.size()),
+ v.size());
+ }
+
+ private:
+ // StorageElement
+ //
+ // For FixedArrays with a C-style-array value_type, StorageElement is a POD
+ // wrapper struct called StorageElementWrapper that holds the value_type
+ // instance inside. This is needed for construction and destruction of the
+ // entire array regardless of how many dimensions it has. For all other cases,
+ // StorageElement is just an alias of value_type.
+ //
+ // Maintainer's Note: The simpler solution would be to simply wrap value_type
+ // in a struct whether it's an array or not. That causes some paranoid
+ // diagnostics to misfire, believing that 'data()' returns a pointer to a
+ // single element, rather than the packed array that it really is.
+ // e.g.:
+ //
+ // FixedArray<char> buf(1);
+ // sprintf(buf.data(), "foo");
+ //
+ // error: call to int __builtin___sprintf_chk(etc...)
+ // will always overflow destination buffer [-Werror]
+ //
+ template <typename OuterT, typename InnerT = y_absl::remove_extent_t<OuterT>,
+ size_t InnerN = std::extent<OuterT>::value>
+ struct StorageElementWrapper {
+ InnerT array[InnerN];
+ };
+
+ using StorageElement =
+ y_absl::conditional_t<std::is_array<value_type>::value,
+ StorageElementWrapper<value_type>, value_type>;
+
+ static pointer AsValueType(pointer ptr) { return ptr; }
+ static pointer AsValueType(StorageElementWrapper<value_type>* ptr) {
+ return std::addressof(ptr->array);
+ }
+
+ static_assert(sizeof(StorageElement) == sizeof(value_type), "");
+ static_assert(alignof(StorageElement) == alignof(value_type), "");
+
+ class NonEmptyInlinedStorage {
+ public:
+ StorageElement* data() { return reinterpret_cast<StorageElement*>(buff_); }
+ void AnnotateConstruct(size_type n);
+ void AnnotateDestruct(size_type n);
+
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
+ void* RedzoneBegin() { return &redzone_begin_; }
+ void* RedzoneEnd() { return &redzone_end_ + 1; }
+#endif // ABSL_HAVE_ADDRESS_SANITIZER
+
+ private:
+ ABSL_ADDRESS_SANITIZER_REDZONE(redzone_begin_);
+ alignas(StorageElement) char buff_[sizeof(StorageElement[inline_elements])];
+ ABSL_ADDRESS_SANITIZER_REDZONE(redzone_end_);
+ };
+
+ class EmptyInlinedStorage {
+ public:
+ StorageElement* data() { return nullptr; }
+ void AnnotateConstruct(size_type) {}
+ void AnnotateDestruct(size_type) {}
+ };
+
+ using InlinedStorage =
+ y_absl::conditional_t<inline_elements == 0, EmptyInlinedStorage,
+ NonEmptyInlinedStorage>;
+
+ // Storage
+ //
+ // An instance of Storage manages the inline and out-of-line memory for
+ // instances of FixedArray. This guarantees that even when construction of
+ // individual elements fails in the FixedArray constructor body, the
+ // destructor for Storage will still be called and out-of-line memory will be
+ // properly deallocated.
+ //
+ class Storage : public InlinedStorage {
+ public:
+ Storage(size_type n, const allocator_type& a)
+ : size_alloc_(n, a), data_(InitializeData()) {}
+
+ ~Storage() noexcept {
+ if (UsingInlinedStorage(size())) {
+ InlinedStorage::AnnotateDestruct(size());
+ } else {
+ AllocatorTraits::deallocate(alloc(), AsValueType(begin()), size());
+ }
+ }
+
+ size_type size() const { return size_alloc_.template get<0>(); }
+ StorageElement* begin() const { return data_; }
+ StorageElement* end() const { return begin() + size(); }
+ allocator_type& alloc() { return size_alloc_.template get<1>(); }
+
+ private:
+ static bool UsingInlinedStorage(size_type n) {
+ return n <= inline_elements;
+ }
+
+ StorageElement* InitializeData() {
+ if (UsingInlinedStorage(size())) {
+ InlinedStorage::AnnotateConstruct(size());
+ return InlinedStorage::data();
+ } else {
+ return reinterpret_cast<StorageElement*>(
+ AllocatorTraits::allocate(alloc(), size()));
+ }
+ }
+
+ // `CompressedTuple` takes advantage of EBCO for stateless `allocator_type`s
+ container_internal::CompressedTuple<size_type, allocator_type> size_alloc_;
+ StorageElement* data_;
+ };
+
+ Storage storage_;
+};
+
+template <typename T, size_t N, typename A>
+constexpr size_t FixedArray<T, N, A>::kInlineBytesDefault;
+
+template <typename T, size_t N, typename A>
+constexpr typename FixedArray<T, N, A>::size_type
+ FixedArray<T, N, A>::inline_elements;
+
+template <typename T, size_t N, typename A>
+void FixedArray<T, N, A>::NonEmptyInlinedStorage::AnnotateConstruct(
+ typename FixedArray<T, N, A>::size_type n) {
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
+ if (!n) return;
+ ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(data(), RedzoneEnd(), RedzoneEnd(),
+ data() + n);
+ ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(RedzoneBegin(), data(), data(),
+ RedzoneBegin());
+#endif // ABSL_HAVE_ADDRESS_SANITIZER
+ static_cast<void>(n); // Mark used when not in asan mode
+}
+
+template <typename T, size_t N, typename A>
+void FixedArray<T, N, A>::NonEmptyInlinedStorage::AnnotateDestruct(
+ typename FixedArray<T, N, A>::size_type n) {
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
+ if (!n) return;
+ ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(data(), RedzoneEnd(), data() + n,
+ RedzoneEnd());
+ ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(RedzoneBegin(), data(), RedzoneBegin(),
+ data());
+#endif // ABSL_HAVE_ADDRESS_SANITIZER
+ static_cast<void>(n); // Mark used when not in asan mode
+}
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_CONTAINER_FIXED_ARRAY_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/container/flat_hash_map.h b/contrib/restricted/abseil-cpp-tstring/y_absl/container/flat_hash_map.h
new file mode 100644
index 00000000000..eb3f09f06d9
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/container/flat_hash_map.h
@@ -0,0 +1,606 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: flat_hash_map.h
+// -----------------------------------------------------------------------------
+//
+// An `y_absl::flat_hash_map<K, V>` is an unordered associative container of
+// unique keys and associated values designed to be a more efficient replacement
+// for `std::unordered_map`. Like `unordered_map`, search, insertion, and
+// deletion of map elements can be done as an `O(1)` operation. However,
+// `flat_hash_map` (and other unordered associative containers known as the
+// collection of Abseil "Swiss tables") contain other optimizations that result
+// in both memory and computation advantages.
+//
+// In most cases, your default choice for a hash map should be a map of type
+// `flat_hash_map`.
+
+#ifndef ABSL_CONTAINER_FLAT_HASH_MAP_H_
+#define ABSL_CONTAINER_FLAT_HASH_MAP_H_
+
+#include <cstddef>
+#include <new>
+#include <type_traits>
+#include <utility>
+
+#include "y_absl/algorithm/container.h"
+#include "y_absl/container/internal/container_memory.h"
+#include "y_absl/container/internal/hash_function_defaults.h" // IWYU pragma: export
+#include "y_absl/container/internal/raw_hash_map.h" // IWYU pragma: export
+#include "y_absl/memory/memory.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+template <class K, class V>
+struct FlatHashMapPolicy;
+} // namespace container_internal
+
+// -----------------------------------------------------------------------------
+// y_absl::flat_hash_map
+// -----------------------------------------------------------------------------
+//
+// An `y_absl::flat_hash_map<K, V>` is an unordered associative container which
+// has been optimized for both speed and memory footprint in most common use
+// cases. Its interface is similar to that of `std::unordered_map<K, V>` with
+// the following notable differences:
+//
+// * Requires keys that are CopyConstructible
+// * Requires values that are MoveConstructible
+// * Supports heterogeneous lookup, through `find()`, `operator[]()` and
+// `insert()`, provided that the map is provided a compatible heterogeneous
+// hashing function and equality operator.
+// * Invalidates any references and pointers to elements within the table after
+// `rehash()`.
+// * Contains a `capacity()` member function indicating the number of element
+// slots (open, deleted, and empty) within the hash map.
+// * Returns `void` from the `erase(iterator)` overload.
+//
+// By default, `flat_hash_map` uses the `y_absl::Hash` hashing framework.
+// All fundamental and Abseil types that support the `y_absl::Hash` framework have
+// a compatible equality operator for comparing insertions into `flat_hash_map`.
+// If your type is not yet supported by the `y_absl::Hash` framework, see
+// y_absl/hash/hash.h for information on extending Abseil hashing to user-defined
+// types.
+//
+// NOTE: A `flat_hash_map` stores its value types directly inside its
+// implementation array to avoid memory indirection. Because a `flat_hash_map`
+// is designed to move data when rehashed, map values will not retain pointer
+// stability. If you require pointer stability, or if your values are large,
+// consider using `y_absl::flat_hash_map<Key, std::unique_ptr<Value>>` instead.
+// If your types are not moveable or you require pointer stability for keys,
+// consider `y_absl::node_hash_map`.
+//
+// Example:
+//
+// // Create a flat hash map of three strings (that map to strings)
+// y_absl::flat_hash_map<TString, TString> ducks =
+// {{"a", "huey"}, {"b", "dewey"}, {"c", "louie"}};
+//
+// // Insert a new element into the flat hash map
+// ducks.insert({"d", "donald"});
+//
+// // Force a rehash of the flat hash map
+// ducks.rehash(0);
+//
+// // Find the element with the key "b"
+// TString search_key = "b";
+// auto result = ducks.find(search_key);
+// if (result != ducks.end()) {
+// std::cout << "Result: " << result->second << std::endl;
+// }
+template <class K, class V,
+ class Hash = y_absl::container_internal::hash_default_hash<K>,
+ class Eq = y_absl::container_internal::hash_default_eq<K>,
+ class Allocator = std::allocator<std::pair<const K, V>>>
+class flat_hash_map : public y_absl::container_internal::raw_hash_map<
+ y_absl::container_internal::FlatHashMapPolicy<K, V>,
+ Hash, Eq, Allocator> {
+ using Base = typename flat_hash_map::raw_hash_map;
+
+ public:
+ // Constructors and Assignment Operators
+ //
+ // A flat_hash_map supports the same overload set as `std::unordered_map`
+ // for construction and assignment:
+ //
+ // * Default constructor
+ //
+ // // No allocation for the table's elements is made.
+ // y_absl::flat_hash_map<int, TString> map1;
+ //
+ // * Initializer List constructor
+ //
+ // y_absl::flat_hash_map<int, TString> map2 =
+ // {{1, "huey"}, {2, "dewey"}, {3, "louie"},};
+ //
+ // * Copy constructor
+ //
+ // y_absl::flat_hash_map<int, TString> map3(map2);
+ //
+ // * Copy assignment operator
+ //
+ // // Hash functor and Comparator are copied as well
+ // y_absl::flat_hash_map<int, TString> map4;
+ // map4 = map3;
+ //
+ // * Move constructor
+ //
+ // // Move is guaranteed efficient
+ // y_absl::flat_hash_map<int, TString> map5(std::move(map4));
+ //
+ // * Move assignment operator
+ //
+ // // May be efficient if allocators are compatible
+ // y_absl::flat_hash_map<int, TString> map6;
+ // map6 = std::move(map5);
+ //
+ // * Range constructor
+ //
+ // std::vector<std::pair<int, TString>> v = {{1, "a"}, {2, "b"}};
+ // y_absl::flat_hash_map<int, TString> map7(v.begin(), v.end());
+ flat_hash_map() {}
+ using Base::Base;
+
+ // flat_hash_map::begin()
+ //
+ // Returns an iterator to the beginning of the `flat_hash_map`.
+ using Base::begin;
+
+ // flat_hash_map::cbegin()
+ //
+ // Returns a const iterator to the beginning of the `flat_hash_map`.
+ using Base::cbegin;
+
+ // flat_hash_map::cend()
+ //
+ // Returns a const iterator to the end of the `flat_hash_map`.
+ using Base::cend;
+
+ // flat_hash_map::end()
+ //
+ // Returns an iterator to the end of the `flat_hash_map`.
+ using Base::end;
+
+ // flat_hash_map::capacity()
+ //
+ // Returns the number of element slots (assigned, deleted, and empty)
+ // available within the `flat_hash_map`.
+ //
+ // NOTE: this member function is particular to `y_absl::flat_hash_map` and is
+ // not provided in the `std::unordered_map` API.
+ using Base::capacity;
+
+ // flat_hash_map::empty()
+ //
+ // Returns whether or not the `flat_hash_map` is empty.
+ using Base::empty;
+
+ // flat_hash_map::max_size()
+ //
+ // Returns the largest theoretical possible number of elements within a
+ // `flat_hash_map` under current memory constraints. This value can be thought
+ // of the largest value of `std::distance(begin(), end())` for a
+ // `flat_hash_map<K, V>`.
+ using Base::max_size;
+
+ // flat_hash_map::size()
+ //
+ // Returns the number of elements currently within the `flat_hash_map`.
+ using Base::size;
+
+ // flat_hash_map::clear()
+ //
+ // Removes all elements from the `flat_hash_map`. Invalidates any references,
+ // pointers, or iterators referring to contained elements.
+ //
+ // NOTE: this operation may shrink the underlying buffer. To avoid shrinking
+ // the underlying buffer call `erase(begin(), end())`.
+ using Base::clear;
+
+ // flat_hash_map::erase()
+ //
+ // Erases elements within the `flat_hash_map`. Erasing does not trigger a
+ // rehash. Overloads are listed below.
+ //
+ // void erase(const_iterator pos):
+ //
+ // Erases the element at `position` of the `flat_hash_map`, returning
+ // `void`.
+ //
+ // NOTE: returning `void` in this case is different than that of STL
+ // containers in general and `std::unordered_map` in particular (which
+ // return an iterator to the element following the erased element). If that
+ // iterator is needed, simply post increment the iterator:
+ //
+ // map.erase(it++);
+ //
+ // iterator erase(const_iterator first, const_iterator last):
+ //
+ // Erases the elements in the open interval [`first`, `last`), returning an
+ // iterator pointing to `last`.
+ //
+ // size_type erase(const key_type& key):
+ //
+ // Erases the element with the matching key, if it exists, returning the
+ // number of elements erased (0 or 1).
+ using Base::erase;
+
+ // flat_hash_map::insert()
+ //
+ // Inserts an element of the specified value into the `flat_hash_map`,
+ // returning an iterator pointing to the newly inserted element, provided that
+ // an element with the given key does not already exist. If rehashing occurs
+ // due to the insertion, all iterators are invalidated. Overloads are listed
+ // below.
+ //
+ // std::pair<iterator,bool> insert(const init_type& value):
+ //
+ // Inserts a value into the `flat_hash_map`. Returns a pair consisting of an
+ // iterator to the inserted element (or to the element that prevented the
+ // insertion) and a bool denoting whether the insertion took place.
+ //
+ // std::pair<iterator,bool> insert(T&& value):
+ // std::pair<iterator,bool> insert(init_type&& value):
+ //
+ // Inserts a moveable value into the `flat_hash_map`. Returns a pair
+ // consisting of an iterator to the inserted element (or to the element that
+ // prevented the insertion) and a bool denoting whether the insertion took
+ // place.
+ //
+ // iterator insert(const_iterator hint, const init_type& value):
+ // iterator insert(const_iterator hint, T&& value):
+ // iterator insert(const_iterator hint, init_type&& value);
+ //
+ // Inserts a value, using the position of `hint` as a non-binding suggestion
+ // for where to begin the insertion search. Returns an iterator to the
+ // inserted element, or to the existing element that prevented the
+ // insertion.
+ //
+ // void insert(InputIterator first, InputIterator last):
+ //
+ // Inserts a range of values [`first`, `last`).
+ //
+ // NOTE: Although the STL does not specify which element may be inserted if
+ // multiple keys compare equivalently, for `flat_hash_map` we guarantee the
+ // first match is inserted.
+ //
+ // void insert(std::initializer_list<init_type> ilist):
+ //
+ // Inserts the elements within the initializer list `ilist`.
+ //
+ // NOTE: Although the STL does not specify which element may be inserted if
+ // multiple keys compare equivalently within the initializer list, for
+ // `flat_hash_map` we guarantee the first match is inserted.
+ using Base::insert;
+
+ // flat_hash_map::insert_or_assign()
+ //
+ // Inserts an element of the specified value into the `flat_hash_map` provided
+ // that a value with the given key does not already exist, or replaces it with
+ // the element value if a key for that value already exists, returning an
+ // iterator pointing to the newly inserted element. If rehashing occurs due
+ // to the insertion, all existing iterators are invalidated. Overloads are
+ // listed below.
+ //
+ // pair<iterator, bool> insert_or_assign(const init_type& k, T&& obj):
+ // pair<iterator, bool> insert_or_assign(init_type&& k, T&& obj):
+ //
+ // Inserts/Assigns (or moves) the element of the specified key into the
+ // `flat_hash_map`.
+ //
+ // iterator insert_or_assign(const_iterator hint,
+ // const init_type& k, T&& obj):
+ // iterator insert_or_assign(const_iterator hint, init_type&& k, T&& obj):
+ //
+ // Inserts/Assigns (or moves) the element of the specified key into the
+ // `flat_hash_map` using the position of `hint` as a non-binding suggestion
+ // for where to begin the insertion search.
+ using Base::insert_or_assign;
+
+ // flat_hash_map::emplace()
+ //
+ // Inserts an element of the specified value by constructing it in-place
+ // within the `flat_hash_map`, provided that no element with the given key
+ // already exists.
+ //
+ // The element may be constructed even if there already is an element with the
+ // key in the container, in which case the newly constructed element will be
+ // destroyed immediately. Prefer `try_emplace()` unless your key is not
+ // copyable or moveable.
+ //
+ // If rehashing occurs due to the insertion, all iterators are invalidated.
+ using Base::emplace;
+
+ // flat_hash_map::emplace_hint()
+ //
+ // Inserts an element of the specified value by constructing it in-place
+ // within the `flat_hash_map`, using the position of `hint` as a non-binding
+ // suggestion for where to begin the insertion search, and only inserts
+ // provided that no element with the given key already exists.
+ //
+ // The element may be constructed even if there already is an element with the
+ // key in the container, in which case the newly constructed element will be
+ // destroyed immediately. Prefer `try_emplace()` unless your key is not
+ // copyable or moveable.
+ //
+ // If rehashing occurs due to the insertion, all iterators are invalidated.
+ using Base::emplace_hint;
+
+ // flat_hash_map::try_emplace()
+ //
+ // Inserts an element of the specified value by constructing it in-place
+ // within the `flat_hash_map`, provided that no element with the given key
+ // already exists. Unlike `emplace()`, if an element with the given key
+ // already exists, we guarantee that no element is constructed.
+ //
+ // If rehashing occurs due to the insertion, all iterators are invalidated.
+ // Overloads are listed below.
+ //
+ // pair<iterator, bool> try_emplace(const key_type& k, Args&&... args):
+ // pair<iterator, bool> try_emplace(key_type&& k, Args&&... args):
+ //
+ // Inserts (via copy or move) the element of the specified key into the
+ // `flat_hash_map`.
+ //
+ // iterator try_emplace(const_iterator hint,
+ // const init_type& k, Args&&... args):
+ // iterator try_emplace(const_iterator hint, init_type&& k, Args&&... args):
+ //
+ // Inserts (via copy or move) the element of the specified key into the
+ // `flat_hash_map` using the position of `hint` as a non-binding suggestion
+ // for where to begin the insertion search.
+ //
+ // All `try_emplace()` overloads make the same guarantees regarding rvalue
+ // arguments as `std::unordered_map::try_emplace()`, namely that these
+ // functions will not move from rvalue arguments if insertions do not happen.
+ using Base::try_emplace;
+
+ // flat_hash_map::extract()
+ //
+ // Extracts the indicated element, erasing it in the process, and returns it
+ // as a C++17-compatible node handle. Overloads are listed below.
+ //
+ // node_type extract(const_iterator position):
+ //
+ // Extracts the key,value pair of the element at the indicated position and
+ // returns a node handle owning that extracted data.
+ //
+ // node_type extract(const key_type& x):
+ //
+ // Extracts the key,value pair of the element with a key matching the passed
+ // key value and returns a node handle owning that extracted data. If the
+ // `flat_hash_map` does not contain an element with a matching key, this
+ // function returns an empty node handle.
+ //
+ // NOTE: when compiled in an earlier version of C++ than C++17,
+ // `node_type::key()` returns a const reference to the key instead of a
+ // mutable reference. We cannot safely return a mutable reference without
+ // std::launder (which is not available before C++17).
+ using Base::extract;
+
+ // flat_hash_map::merge()
+ //
+ // Extracts elements from a given `source` flat hash map into this
+ // `flat_hash_map`. If the destination `flat_hash_map` already contains an
+ // element with an equivalent key, that element is not extracted.
+ using Base::merge;
+
+ // flat_hash_map::swap(flat_hash_map& other)
+ //
+ // Exchanges the contents of this `flat_hash_map` with those of the `other`
+ // flat hash map, avoiding invocation of any move, copy, or swap operations on
+ // individual elements.
+ //
+ // All iterators and references on the `flat_hash_map` remain valid, excepting
+ // for the past-the-end iterator, which is invalidated.
+ //
+ // `swap()` requires that the flat hash map's hashing and key equivalence
+ // functions be Swappable, and are exchanged using unqualified calls to
+ // non-member `swap()`. If the map's allocator has
+ // `std::allocator_traits<allocator_type>::propagate_on_container_swap::value`
+ // set to `true`, the allocators are also exchanged using an unqualified call
+ // to non-member `swap()`; otherwise, the allocators are not swapped.
+ using Base::swap;
+
+ // flat_hash_map::rehash(count)
+ //
+ // Rehashes the `flat_hash_map`, setting the number of slots to be at least
+ // the passed value. If the new number of slots increases the load factor more
+ // than the current maximum load factor
+ // (`count` < `size()` / `max_load_factor()`), then the new number of slots
+ // will be at least `size()` / `max_load_factor()`.
+ //
+ // To force a rehash, pass rehash(0).
+ //
+ // NOTE: unlike behavior in `std::unordered_map`, references are also
+ // invalidated upon a `rehash()`.
+ using Base::rehash;
+
+ // flat_hash_map::reserve(count)
+ //
+ // Sets the number of slots in the `flat_hash_map` to the number needed to
+ // accommodate at least `count` total elements without exceeding the current
+ // maximum load factor, and may rehash the container if needed.
+ using Base::reserve;
+
+ // flat_hash_map::at()
+ //
+ // Returns a reference to the mapped value of the element with key equivalent
+ // to the passed key.
+ using Base::at;
+
+ // flat_hash_map::contains()
+ //
+ // Determines whether an element with a key comparing equal to the given `key`
+ // exists within the `flat_hash_map`, returning `true` if so or `false`
+ // otherwise.
+ using Base::contains;
+
+ // flat_hash_map::count(const Key& key) const
+ //
+ // Returns the number of elements with a key comparing equal to the given
+ // `key` within the `flat_hash_map`. note that this function will return
+ // either `1` or `0` since duplicate keys are not allowed within a
+ // `flat_hash_map`.
+ using Base::count;
+
+ // flat_hash_map::equal_range()
+ //
+ // Returns a closed range [first, last], defined by a `std::pair` of two
+ // iterators, containing all elements with the passed key in the
+ // `flat_hash_map`.
+ using Base::equal_range;
+
+ // flat_hash_map::find()
+ //
+ // Finds an element with the passed `key` within the `flat_hash_map`.
+ using Base::find;
+
+ // flat_hash_map::operator[]()
+ //
+ // Returns a reference to the value mapped to the passed key within the
+ // `flat_hash_map`, performing an `insert()` if the key does not already
+ // exist.
+ //
+ // If an insertion occurs and results in a rehashing of the container, all
+ // iterators are invalidated. Otherwise iterators are not affected and
+ // references are not invalidated. Overloads are listed below.
+ //
+ // T& operator[](const Key& key):
+ //
+ // Inserts an init_type object constructed in-place if the element with the
+ // given key does not exist.
+ //
+ // T& operator[](Key&& key):
+ //
+ // Inserts an init_type object constructed in-place provided that an element
+ // with the given key does not exist.
+ using Base::operator[];
+
+ // flat_hash_map::bucket_count()
+ //
+ // Returns the number of "buckets" within the `flat_hash_map`. Note that
+ // because a flat hash map contains all elements within its internal storage,
+ // this value simply equals the current capacity of the `flat_hash_map`.
+ using Base::bucket_count;
+
+ // flat_hash_map::load_factor()
+ //
+ // Returns the current load factor of the `flat_hash_map` (the average number
+ // of slots occupied with a value within the hash map).
+ using Base::load_factor;
+
+ // flat_hash_map::max_load_factor()
+ //
+ // Manages the maximum load factor of the `flat_hash_map`. Overloads are
+ // listed below.
+ //
+ // float flat_hash_map::max_load_factor()
+ //
+ // Returns the current maximum load factor of the `flat_hash_map`.
+ //
+ // void flat_hash_map::max_load_factor(float ml)
+ //
+ // Sets the maximum load factor of the `flat_hash_map` to the passed value.
+ //
+ // NOTE: This overload is provided only for API compatibility with the STL;
+ // `flat_hash_map` will ignore any set load factor and manage its rehashing
+ // internally as an implementation detail.
+ using Base::max_load_factor;
+
+ // flat_hash_map::get_allocator()
+ //
+ // Returns the allocator function associated with this `flat_hash_map`.
+ using Base::get_allocator;
+
+ // flat_hash_map::hash_function()
+ //
+ // Returns the hashing function used to hash the keys within this
+ // `flat_hash_map`.
+ using Base::hash_function;
+
+ // flat_hash_map::key_eq()
+ //
+ // Returns the function used for comparing keys equality.
+ using Base::key_eq;
+};
+
+// erase_if(flat_hash_map<>, Pred)
+//
+// Erases all elements that satisfy the predicate `pred` from the container `c`.
+template <typename K, typename V, typename H, typename E, typename A,
+ typename Predicate>
+void erase_if(flat_hash_map<K, V, H, E, A>& c, Predicate pred) {
+ container_internal::EraseIf(pred, &c);
+}
+
+namespace container_internal {
+
+template <class K, class V>
+struct FlatHashMapPolicy {
+ using slot_policy = container_internal::map_slot_policy<K, V>;
+ using slot_type = typename slot_policy::slot_type;
+ using key_type = K;
+ using mapped_type = V;
+ using init_type = std::pair</*non const*/ key_type, mapped_type>;
+
+ template <class Allocator, class... Args>
+ static void construct(Allocator* alloc, slot_type* slot, Args&&... args) {
+ slot_policy::construct(alloc, slot, std::forward<Args>(args)...);
+ }
+
+ template <class Allocator>
+ static void destroy(Allocator* alloc, slot_type* slot) {
+ slot_policy::destroy(alloc, slot);
+ }
+
+ template <class Allocator>
+ static void transfer(Allocator* alloc, slot_type* new_slot,
+ slot_type* old_slot) {
+ slot_policy::transfer(alloc, new_slot, old_slot);
+ }
+
+ template <class F, class... Args>
+ static decltype(y_absl::container_internal::DecomposePair(
+ std::declval<F>(), std::declval<Args>()...))
+ apply(F&& f, Args&&... args) {
+ return y_absl::container_internal::DecomposePair(std::forward<F>(f),
+ std::forward<Args>(args)...);
+ }
+
+ static size_t space_used(const slot_type*) { return 0; }
+
+ static std::pair<const K, V>& element(slot_type* slot) { return slot->value; }
+
+ static V& value(std::pair<const K, V>* kv) { return kv->second; }
+ static const V& value(const std::pair<const K, V>* kv) { return kv->second; }
+};
+
+} // namespace container_internal
+
+namespace container_algorithm_internal {
+
+// Specialization of trait in y_absl/algorithm/container.h
+template <class Key, class T, class Hash, class KeyEqual, class Allocator>
+struct IsUnorderedContainer<
+ y_absl::flat_hash_map<Key, T, Hash, KeyEqual, Allocator>> : std::true_type {};
+
+} // namespace container_algorithm_internal
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_CONTAINER_FLAT_HASH_MAP_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/container/flat_hash_set.h b/contrib/restricted/abseil-cpp-tstring/y_absl/container/flat_hash_set.h
new file mode 100644
index 00000000000..23fe02a9500
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/container/flat_hash_set.h
@@ -0,0 +1,504 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: flat_hash_set.h
+// -----------------------------------------------------------------------------
+//
+// An `y_absl::flat_hash_set<T>` is an unordered associative container designed to
+// be a more efficient replacement for `std::unordered_set`. Like
+// `unordered_set`, search, insertion, and deletion of set elements can be done
+// as an `O(1)` operation. However, `flat_hash_set` (and other unordered
+// associative containers known as the collection of Abseil "Swiss tables")
+// contain other optimizations that result in both memory and computation
+// advantages.
+//
+// In most cases, your default choice for a hash set should be a set of type
+// `flat_hash_set`.
+#ifndef ABSL_CONTAINER_FLAT_HASH_SET_H_
+#define ABSL_CONTAINER_FLAT_HASH_SET_H_
+
+#include <type_traits>
+#include <utility>
+
+#include "y_absl/algorithm/container.h"
+#include "y_absl/base/macros.h"
+#include "y_absl/container/internal/container_memory.h"
+#include "y_absl/container/internal/hash_function_defaults.h" // IWYU pragma: export
+#include "y_absl/container/internal/raw_hash_set.h" // IWYU pragma: export
+#include "y_absl/memory/memory.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+template <typename T>
+struct FlatHashSetPolicy;
+} // namespace container_internal
+
+// -----------------------------------------------------------------------------
+// y_absl::flat_hash_set
+// -----------------------------------------------------------------------------
+//
+// An `y_absl::flat_hash_set<T>` is an unordered associative container which has
+// been optimized for both speed and memory footprint in most common use cases.
+// Its interface is similar to that of `std::unordered_set<T>` with the
+// following notable differences:
+//
+// * Requires keys that are CopyConstructible
+// * Supports heterogeneous lookup, through `find()` and `insert()`, provided
+// that the set is provided a compatible heterogeneous hashing function and
+// equality operator.
+// * Invalidates any references and pointers to elements within the table after
+// `rehash()`.
+// * Contains a `capacity()` member function indicating the number of element
+// slots (open, deleted, and empty) within the hash set.
+// * Returns `void` from the `erase(iterator)` overload.
+//
+// By default, `flat_hash_set` uses the `y_absl::Hash` hashing framework. All
+// fundamental and Abseil types that support the `y_absl::Hash` framework have a
+// compatible equality operator for comparing insertions into `flat_hash_map`.
+// If your type is not yet supported by the `y_absl::Hash` framework, see
+// y_absl/hash/hash.h for information on extending Abseil hashing to user-defined
+// types.
+//
+// NOTE: A `flat_hash_set` stores its keys directly inside its implementation
+// array to avoid memory indirection. Because a `flat_hash_set` is designed to
+// move data when rehashed, set keys will not retain pointer stability. If you
+// require pointer stability, consider using
+// `y_absl::flat_hash_set<std::unique_ptr<T>>`. If your type is not moveable and
+// you require pointer stability, consider `y_absl::node_hash_set` instead.
+//
+// Example:
+//
+// // Create a flat hash set of three strings
+// y_absl::flat_hash_set<TString> ducks =
+// {"huey", "dewey", "louie"};
+//
+// // Insert a new element into the flat hash set
+// ducks.insert("donald");
+//
+// // Force a rehash of the flat hash set
+// ducks.rehash(0);
+//
+// // See if "dewey" is present
+// if (ducks.contains("dewey")) {
+// std::cout << "We found dewey!" << std::endl;
+// }
+template <class T, class Hash = y_absl::container_internal::hash_default_hash<T>,
+ class Eq = y_absl::container_internal::hash_default_eq<T>,
+ class Allocator = std::allocator<T>>
+class flat_hash_set
+ : public y_absl::container_internal::raw_hash_set<
+ y_absl::container_internal::FlatHashSetPolicy<T>, Hash, Eq, Allocator> {
+ using Base = typename flat_hash_set::raw_hash_set;
+
+ public:
+ // Constructors and Assignment Operators
+ //
+ // A flat_hash_set supports the same overload set as `std::unordered_map`
+ // for construction and assignment:
+ //
+ // * Default constructor
+ //
+ // // No allocation for the table's elements is made.
+ // y_absl::flat_hash_set<TString> set1;
+ //
+ // * Initializer List constructor
+ //
+ // y_absl::flat_hash_set<TString> set2 =
+ // {{"huey"}, {"dewey"}, {"louie"},};
+ //
+ // * Copy constructor
+ //
+ // y_absl::flat_hash_set<TString> set3(set2);
+ //
+ // * Copy assignment operator
+ //
+ // // Hash functor and Comparator are copied as well
+ // y_absl::flat_hash_set<TString> set4;
+ // set4 = set3;
+ //
+ // * Move constructor
+ //
+ // // Move is guaranteed efficient
+ // y_absl::flat_hash_set<TString> set5(std::move(set4));
+ //
+ // * Move assignment operator
+ //
+ // // May be efficient if allocators are compatible
+ // y_absl::flat_hash_set<TString> set6;
+ // set6 = std::move(set5);
+ //
+ // * Range constructor
+ //
+ // std::vector<TString> v = {"a", "b"};
+ // y_absl::flat_hash_set<TString> set7(v.begin(), v.end());
+ flat_hash_set() {}
+ using Base::Base;
+
+ // flat_hash_set::begin()
+ //
+ // Returns an iterator to the beginning of the `flat_hash_set`.
+ using Base::begin;
+
+ // flat_hash_set::cbegin()
+ //
+ // Returns a const iterator to the beginning of the `flat_hash_set`.
+ using Base::cbegin;
+
+ // flat_hash_set::cend()
+ //
+ // Returns a const iterator to the end of the `flat_hash_set`.
+ using Base::cend;
+
+ // flat_hash_set::end()
+ //
+ // Returns an iterator to the end of the `flat_hash_set`.
+ using Base::end;
+
+ // flat_hash_set::capacity()
+ //
+ // Returns the number of element slots (assigned, deleted, and empty)
+ // available within the `flat_hash_set`.
+ //
+ // NOTE: this member function is particular to `y_absl::flat_hash_set` and is
+ // not provided in the `std::unordered_map` API.
+ using Base::capacity;
+
+ // flat_hash_set::empty()
+ //
+ // Returns whether or not the `flat_hash_set` is empty.
+ using Base::empty;
+
+ // flat_hash_set::max_size()
+ //
+ // Returns the largest theoretical possible number of elements within a
+ // `flat_hash_set` under current memory constraints. This value can be thought
+ // of the largest value of `std::distance(begin(), end())` for a
+ // `flat_hash_set<T>`.
+ using Base::max_size;
+
+ // flat_hash_set::size()
+ //
+ // Returns the number of elements currently within the `flat_hash_set`.
+ using Base::size;
+
+ // flat_hash_set::clear()
+ //
+ // Removes all elements from the `flat_hash_set`. Invalidates any references,
+ // pointers, or iterators referring to contained elements.
+ //
+ // NOTE: this operation may shrink the underlying buffer. To avoid shrinking
+ // the underlying buffer call `erase(begin(), end())`.
+ using Base::clear;
+
+ // flat_hash_set::erase()
+ //
+ // Erases elements within the `flat_hash_set`. Erasing does not trigger a
+ // rehash. Overloads are listed below.
+ //
+ // void erase(const_iterator pos):
+ //
+ // Erases the element at `position` of the `flat_hash_set`, returning
+ // `void`.
+ //
+ // NOTE: returning `void` in this case is different than that of STL
+ // containers in general and `std::unordered_set` in particular (which
+ // return an iterator to the element following the erased element). If that
+ // iterator is needed, simply post increment the iterator:
+ //
+ // set.erase(it++);
+ //
+ // iterator erase(const_iterator first, const_iterator last):
+ //
+ // Erases the elements in the open interval [`first`, `last`), returning an
+ // iterator pointing to `last`.
+ //
+ // size_type erase(const key_type& key):
+ //
+ // Erases the element with the matching key, if it exists, returning the
+ // number of elements erased (0 or 1).
+ using Base::erase;
+
+ // flat_hash_set::insert()
+ //
+ // Inserts an element of the specified value into the `flat_hash_set`,
+ // returning an iterator pointing to the newly inserted element, provided that
+ // an element with the given key does not already exist. If rehashing occurs
+ // due to the insertion, all iterators are invalidated. Overloads are listed
+ // below.
+ //
+ // std::pair<iterator,bool> insert(const T& value):
+ //
+ // Inserts a value into the `flat_hash_set`. Returns a pair consisting of an
+ // iterator to the inserted element (or to the element that prevented the
+ // insertion) and a bool denoting whether the insertion took place.
+ //
+ // std::pair<iterator,bool> insert(T&& value):
+ //
+ // Inserts a moveable value into the `flat_hash_set`. Returns a pair
+ // consisting of an iterator to the inserted element (or to the element that
+ // prevented the insertion) and a bool denoting whether the insertion took
+ // place.
+ //
+ // iterator insert(const_iterator hint, const T& value):
+ // iterator insert(const_iterator hint, T&& value):
+ //
+ // Inserts a value, using the position of `hint` as a non-binding suggestion
+ // for where to begin the insertion search. Returns an iterator to the
+ // inserted element, or to the existing element that prevented the
+ // insertion.
+ //
+ // void insert(InputIterator first, InputIterator last):
+ //
+ // Inserts a range of values [`first`, `last`).
+ //
+ // NOTE: Although the STL does not specify which element may be inserted if
+ // multiple keys compare equivalently, for `flat_hash_set` we guarantee the
+ // first match is inserted.
+ //
+ // void insert(std::initializer_list<T> ilist):
+ //
+ // Inserts the elements within the initializer list `ilist`.
+ //
+ // NOTE: Although the STL does not specify which element may be inserted if
+ // multiple keys compare equivalently within the initializer list, for
+ // `flat_hash_set` we guarantee the first match is inserted.
+ using Base::insert;
+
+ // flat_hash_set::emplace()
+ //
+ // Inserts an element of the specified value by constructing it in-place
+ // within the `flat_hash_set`, provided that no element with the given key
+ // already exists.
+ //
+ // The element may be constructed even if there already is an element with the
+ // key in the container, in which case the newly constructed element will be
+ // destroyed immediately.
+ //
+ // If rehashing occurs due to the insertion, all iterators are invalidated.
+ using Base::emplace;
+
+ // flat_hash_set::emplace_hint()
+ //
+ // Inserts an element of the specified value by constructing it in-place
+ // within the `flat_hash_set`, using the position of `hint` as a non-binding
+ // suggestion for where to begin the insertion search, and only inserts
+ // provided that no element with the given key already exists.
+ //
+ // The element may be constructed even if there already is an element with the
+ // key in the container, in which case the newly constructed element will be
+ // destroyed immediately.
+ //
+ // If rehashing occurs due to the insertion, all iterators are invalidated.
+ using Base::emplace_hint;
+
+ // flat_hash_set::extract()
+ //
+ // Extracts the indicated element, erasing it in the process, and returns it
+ // as a C++17-compatible node handle. Overloads are listed below.
+ //
+ // node_type extract(const_iterator position):
+ //
+ // Extracts the element at the indicated position and returns a node handle
+ // owning that extracted data.
+ //
+ // node_type extract(const key_type& x):
+ //
+ // Extracts the element with the key matching the passed key value and
+ // returns a node handle owning that extracted data. If the `flat_hash_set`
+ // does not contain an element with a matching key, this function returns an
+ // empty node handle.
+ using Base::extract;
+
+ // flat_hash_set::merge()
+ //
+ // Extracts elements from a given `source` flat hash set into this
+ // `flat_hash_set`. If the destination `flat_hash_set` already contains an
+ // element with an equivalent key, that element is not extracted.
+ using Base::merge;
+
+ // flat_hash_set::swap(flat_hash_set& other)
+ //
+ // Exchanges the contents of this `flat_hash_set` with those of the `other`
+ // flat hash map, avoiding invocation of any move, copy, or swap operations on
+ // individual elements.
+ //
+ // All iterators and references on the `flat_hash_set` remain valid, excepting
+ // for the past-the-end iterator, which is invalidated.
+ //
+ // `swap()` requires that the flat hash set's hashing and key equivalence
+ // functions be Swappable, and are exchaged using unqualified calls to
+ // non-member `swap()`. If the map's allocator has
+ // `std::allocator_traits<allocator_type>::propagate_on_container_swap::value`
+ // set to `true`, the allocators are also exchanged using an unqualified call
+ // to non-member `swap()`; otherwise, the allocators are not swapped.
+ using Base::swap;
+
+ // flat_hash_set::rehash(count)
+ //
+ // Rehashes the `flat_hash_set`, setting the number of slots to be at least
+ // the passed value. If the new number of slots increases the load factor more
+ // than the current maximum load factor
+ // (`count` < `size()` / `max_load_factor()`), then the new number of slots
+ // will be at least `size()` / `max_load_factor()`.
+ //
+ // To force a rehash, pass rehash(0).
+ //
+ // NOTE: unlike behavior in `std::unordered_set`, references are also
+ // invalidated upon a `rehash()`.
+ using Base::rehash;
+
+ // flat_hash_set::reserve(count)
+ //
+ // Sets the number of slots in the `flat_hash_set` to the number needed to
+ // accommodate at least `count` total elements without exceeding the current
+ // maximum load factor, and may rehash the container if needed.
+ using Base::reserve;
+
+ // flat_hash_set::contains()
+ //
+ // Determines whether an element comparing equal to the given `key` exists
+ // within the `flat_hash_set`, returning `true` if so or `false` otherwise.
+ using Base::contains;
+
+ // flat_hash_set::count(const Key& key) const
+ //
+ // Returns the number of elements comparing equal to the given `key` within
+ // the `flat_hash_set`. note that this function will return either `1` or `0`
+ // since duplicate elements are not allowed within a `flat_hash_set`.
+ using Base::count;
+
+ // flat_hash_set::equal_range()
+ //
+ // Returns a closed range [first, last], defined by a `std::pair` of two
+ // iterators, containing all elements with the passed key in the
+ // `flat_hash_set`.
+ using Base::equal_range;
+
+ // flat_hash_set::find()
+ //
+ // Finds an element with the passed `key` within the `flat_hash_set`.
+ using Base::find;
+
+ // flat_hash_set::bucket_count()
+ //
+ // Returns the number of "buckets" within the `flat_hash_set`. Note that
+ // because a flat hash map contains all elements within its internal storage,
+ // this value simply equals the current capacity of the `flat_hash_set`.
+ using Base::bucket_count;
+
+ // flat_hash_set::load_factor()
+ //
+ // Returns the current load factor of the `flat_hash_set` (the average number
+ // of slots occupied with a value within the hash map).
+ using Base::load_factor;
+
+ // flat_hash_set::max_load_factor()
+ //
+ // Manages the maximum load factor of the `flat_hash_set`. Overloads are
+ // listed below.
+ //
+ // float flat_hash_set::max_load_factor()
+ //
+ // Returns the current maximum load factor of the `flat_hash_set`.
+ //
+ // void flat_hash_set::max_load_factor(float ml)
+ //
+ // Sets the maximum load factor of the `flat_hash_set` to the passed value.
+ //
+ // NOTE: This overload is provided only for API compatibility with the STL;
+ // `flat_hash_set` will ignore any set load factor and manage its rehashing
+ // internally as an implementation detail.
+ using Base::max_load_factor;
+
+ // flat_hash_set::get_allocator()
+ //
+ // Returns the allocator function associated with this `flat_hash_set`.
+ using Base::get_allocator;
+
+ // flat_hash_set::hash_function()
+ //
+ // Returns the hashing function used to hash the keys within this
+ // `flat_hash_set`.
+ using Base::hash_function;
+
+ // flat_hash_set::key_eq()
+ //
+ // Returns the function used for comparing keys equality.
+ using Base::key_eq;
+};
+
+// erase_if(flat_hash_set<>, Pred)
+//
+// Erases all elements that satisfy the predicate `pred` from the container `c`.
+template <typename T, typename H, typename E, typename A, typename Predicate>
+void erase_if(flat_hash_set<T, H, E, A>& c, Predicate pred) {
+ container_internal::EraseIf(pred, &c);
+}
+
+namespace container_internal {
+
+template <class T>
+struct FlatHashSetPolicy {
+ using slot_type = T;
+ using key_type = T;
+ using init_type = T;
+ using constant_iterators = std::true_type;
+
+ template <class Allocator, class... Args>
+ static void construct(Allocator* alloc, slot_type* slot, Args&&... args) {
+ y_absl::allocator_traits<Allocator>::construct(*alloc, slot,
+ std::forward<Args>(args)...);
+ }
+
+ template <class Allocator>
+ static void destroy(Allocator* alloc, slot_type* slot) {
+ y_absl::allocator_traits<Allocator>::destroy(*alloc, slot);
+ }
+
+ template <class Allocator>
+ static void transfer(Allocator* alloc, slot_type* new_slot,
+ slot_type* old_slot) {
+ construct(alloc, new_slot, std::move(*old_slot));
+ destroy(alloc, old_slot);
+ }
+
+ static T& element(slot_type* slot) { return *slot; }
+
+ template <class F, class... Args>
+ static decltype(y_absl::container_internal::DecomposeValue(
+ std::declval<F>(), std::declval<Args>()...))
+ apply(F&& f, Args&&... args) {
+ return y_absl::container_internal::DecomposeValue(
+ std::forward<F>(f), std::forward<Args>(args)...);
+ }
+
+ static size_t space_used(const T*) { return 0; }
+};
+} // namespace container_internal
+
+namespace container_algorithm_internal {
+
+// Specialization of trait in y_absl/algorithm/container.h
+template <class Key, class Hash, class KeyEqual, class Allocator>
+struct IsUnorderedContainer<y_absl::flat_hash_set<Key, Hash, KeyEqual, Allocator>>
+ : std::true_type {};
+
+} // namespace container_algorithm_internal
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_CONTAINER_FLAT_HASH_SET_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/container/inlined_vector.h b/contrib/restricted/abseil-cpp-tstring/y_absl/container/inlined_vector.h
new file mode 100644
index 00000000000..34af121cf7c
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/container/inlined_vector.h
@@ -0,0 +1,855 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: inlined_vector.h
+// -----------------------------------------------------------------------------
+//
+// This header file contains the declaration and definition of an "inlined
+// vector" which behaves in an equivalent fashion to a `std::vector`, except
+// that storage for small sequences of the vector are provided inline without
+// requiring any heap allocation.
+//
+// An `y_absl::InlinedVector<T, N>` specifies the default capacity `N` as one of
+// its template parameters. Instances where `size() <= N` hold contained
+// elements in inline space. Typically `N` is very small so that sequences that
+// are expected to be short do not require allocations.
+//
+// An `y_absl::InlinedVector` does not usually require a specific allocator. If
+// the inlined vector grows beyond its initial constraints, it will need to
+// allocate (as any normal `std::vector` would). This is usually performed with
+// the default allocator (defined as `std::allocator<T>`). Optionally, a custom
+// allocator type may be specified as `A` in `y_absl::InlinedVector<T, N, A>`.
+
+#ifndef ABSL_CONTAINER_INLINED_VECTOR_H_
+#define ABSL_CONTAINER_INLINED_VECTOR_H_
+
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdlib>
+#include <cstring>
+#include <initializer_list>
+#include <iterator>
+#include <memory>
+#include <type_traits>
+#include <utility>
+
+#include "y_absl/algorithm/algorithm.h"
+#include "y_absl/base/internal/throw_delegate.h"
+#include "y_absl/base/macros.h"
+#include "y_absl/base/optimization.h"
+#include "y_absl/base/port.h"
+#include "y_absl/container/internal/inlined_vector.h"
+#include "y_absl/memory/memory.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+// -----------------------------------------------------------------------------
+// InlinedVector
+// -----------------------------------------------------------------------------
+//
+// An `y_absl::InlinedVector` is designed to be a drop-in replacement for
+// `std::vector` for use cases where the vector's size is sufficiently small
+// that it can be inlined. If the inlined vector does grow beyond its estimated
+// capacity, it will trigger an initial allocation on the heap, and will behave
+// as a `std::vector`. The API of the `y_absl::InlinedVector` within this file is
+// designed to cover the same API footprint as covered by `std::vector`.
+template <typename T, size_t N, typename A = std::allocator<T>>
+class InlinedVector {
+ static_assert(N > 0, "`y_absl::InlinedVector` requires an inlined capacity.");
+
+ using Storage = inlined_vector_internal::Storage<T, N, A>;
+
+ template <typename TheA>
+ using AllocatorTraits = inlined_vector_internal::AllocatorTraits<TheA>;
+ template <typename TheA>
+ using MoveIterator = inlined_vector_internal::MoveIterator<TheA>;
+ template <typename TheA>
+ using IsMemcpyOk = inlined_vector_internal::IsMemcpyOk<TheA>;
+
+ template <typename TheA, typename Iterator>
+ using IteratorValueAdapter =
+ inlined_vector_internal::IteratorValueAdapter<TheA, Iterator>;
+ template <typename TheA>
+ using CopyValueAdapter = inlined_vector_internal::CopyValueAdapter<TheA>;
+ template <typename TheA>
+ using DefaultValueAdapter =
+ inlined_vector_internal::DefaultValueAdapter<TheA>;
+
+ template <typename Iterator>
+ using EnableIfAtLeastForwardIterator = y_absl::enable_if_t<
+ inlined_vector_internal::IsAtLeastForwardIterator<Iterator>::value, int>;
+ template <typename Iterator>
+ using DisableIfAtLeastForwardIterator = y_absl::enable_if_t<
+ !inlined_vector_internal::IsAtLeastForwardIterator<Iterator>::value, int>;
+
+ public:
+ using allocator_type = A;
+ using value_type = inlined_vector_internal::ValueType<A>;
+ using pointer = inlined_vector_internal::Pointer<A>;
+ using const_pointer = inlined_vector_internal::ConstPointer<A>;
+ using size_type = inlined_vector_internal::SizeType<A>;
+ using difference_type = inlined_vector_internal::DifferenceType<A>;
+ using reference = inlined_vector_internal::Reference<A>;
+ using const_reference = inlined_vector_internal::ConstReference<A>;
+ using iterator = inlined_vector_internal::Iterator<A>;
+ using const_iterator = inlined_vector_internal::ConstIterator<A>;
+ using reverse_iterator = inlined_vector_internal::ReverseIterator<A>;
+ using const_reverse_iterator =
+ inlined_vector_internal::ConstReverseIterator<A>;
+
+ // ---------------------------------------------------------------------------
+ // InlinedVector Constructors and Destructor
+ // ---------------------------------------------------------------------------
+
+ // Creates an empty inlined vector with a value-initialized allocator.
+ InlinedVector() noexcept(noexcept(allocator_type())) : storage_() {}
+
+ // Creates an empty inlined vector with a copy of `allocator`.
+ explicit InlinedVector(const allocator_type& allocator) noexcept
+ : storage_(allocator) {}
+
+ // Creates an inlined vector with `n` copies of `value_type()`.
+ explicit InlinedVector(size_type n,
+ const allocator_type& allocator = allocator_type())
+ : storage_(allocator) {
+ storage_.Initialize(DefaultValueAdapter<A>(), n);
+ }
+
+ // Creates an inlined vector with `n` copies of `v`.
+ InlinedVector(size_type n, const_reference v,
+ const allocator_type& allocator = allocator_type())
+ : storage_(allocator) {
+ storage_.Initialize(CopyValueAdapter<A>(std::addressof(v)), n);
+ }
+
+ // Creates an inlined vector with copies of the elements of `list`.
+ InlinedVector(std::initializer_list<value_type> list,
+ const allocator_type& allocator = allocator_type())
+ : InlinedVector(list.begin(), list.end(), allocator) {}
+
+ // Creates an inlined vector with elements constructed from the provided
+ // forward iterator range [`first`, `last`).
+ //
+ // NOTE: the `enable_if` prevents ambiguous interpretation between a call to
+ // this constructor with two integral arguments and a call to the above
+ // `InlinedVector(size_type, const_reference)` constructor.
+ template <typename ForwardIterator,
+ EnableIfAtLeastForwardIterator<ForwardIterator> = 0>
+ InlinedVector(ForwardIterator first, ForwardIterator last,
+ const allocator_type& allocator = allocator_type())
+ : storage_(allocator) {
+ storage_.Initialize(IteratorValueAdapter<A, ForwardIterator>(first),
+ std::distance(first, last));
+ }
+
+ // Creates an inlined vector with elements constructed from the provided input
+ // iterator range [`first`, `last`).
+ template <typename InputIterator,
+ DisableIfAtLeastForwardIterator<InputIterator> = 0>
+ InlinedVector(InputIterator first, InputIterator last,
+ const allocator_type& allocator = allocator_type())
+ : storage_(allocator) {
+ std::copy(first, last, std::back_inserter(*this));
+ }
+
+ // Creates an inlined vector by copying the contents of `other` using
+ // `other`'s allocator.
+ InlinedVector(const InlinedVector& other)
+ : InlinedVector(other, other.storage_.GetAllocator()) {}
+
+ // Creates an inlined vector by copying the contents of `other` using the
+ // provided `allocator`.
+ InlinedVector(const InlinedVector& other, const allocator_type& allocator)
+ : storage_(allocator) {
+ if (other.empty()) {
+ // Empty; nothing to do.
+ } else if (IsMemcpyOk<A>::value && !other.storage_.GetIsAllocated()) {
+ // Memcpy-able and do not need allocation.
+ storage_.MemcpyFrom(other.storage_);
+ } else {
+ storage_.InitFrom(other.storage_);
+ }
+ }
+
+ // Creates an inlined vector by moving in the contents of `other` without
+ // allocating. If `other` contains allocated memory, the newly-created inlined
+ // vector will take ownership of that memory. However, if `other` does not
+ // contain allocated memory, the newly-created inlined vector will perform
+ // element-wise move construction of the contents of `other`.
+ //
+ // NOTE: since no allocation is performed for the inlined vector in either
+ // case, the `noexcept(...)` specification depends on whether moving the
+ // underlying objects can throw. It is assumed assumed that...
+ // a) move constructors should only throw due to allocation failure.
+ // b) if `value_type`'s move constructor allocates, it uses the same
+ // allocation function as the inlined vector's allocator.
+ // Thus, the move constructor is non-throwing if the allocator is non-throwing
+ // or `value_type`'s move constructor is specified as `noexcept`.
+ InlinedVector(InlinedVector&& other) noexcept(
+ y_absl::allocator_is_nothrow<allocator_type>::value ||
+ std::is_nothrow_move_constructible<value_type>::value)
+ : storage_(other.storage_.GetAllocator()) {
+ if (IsMemcpyOk<A>::value) {
+ storage_.MemcpyFrom(other.storage_);
+
+ other.storage_.SetInlinedSize(0);
+ } else if (other.storage_.GetIsAllocated()) {
+ storage_.SetAllocation({other.storage_.GetAllocatedData(),
+ other.storage_.GetAllocatedCapacity()});
+ storage_.SetAllocatedSize(other.storage_.GetSize());
+
+ other.storage_.SetInlinedSize(0);
+ } else {
+ IteratorValueAdapter<A, MoveIterator<A>> other_values(
+ MoveIterator<A>(other.storage_.GetInlinedData()));
+
+ inlined_vector_internal::ConstructElements<A>(
+ storage_.GetAllocator(), storage_.GetInlinedData(), other_values,
+ other.storage_.GetSize());
+
+ storage_.SetInlinedSize(other.storage_.GetSize());
+ }
+ }
+
+ // Creates an inlined vector by moving in the contents of `other` with a copy
+ // of `allocator`.
+ //
+ // NOTE: if `other`'s allocator is not equal to `allocator`, even if `other`
+ // contains allocated memory, this move constructor will still allocate. Since
+ // allocation is performed, this constructor can only be `noexcept` if the
+ // specified allocator is also `noexcept`.
+ InlinedVector(
+ InlinedVector&& other,
+ const allocator_type& allocator)
+ noexcept(y_absl::allocator_is_nothrow<allocator_type>::value)
+ : storage_(allocator) {
+ if (IsMemcpyOk<A>::value) {
+ storage_.MemcpyFrom(other.storage_);
+
+ other.storage_.SetInlinedSize(0);
+ } else if ((storage_.GetAllocator() == other.storage_.GetAllocator()) &&
+ other.storage_.GetIsAllocated()) {
+ storage_.SetAllocation({other.storage_.GetAllocatedData(),
+ other.storage_.GetAllocatedCapacity()});
+ storage_.SetAllocatedSize(other.storage_.GetSize());
+
+ other.storage_.SetInlinedSize(0);
+ } else {
+ storage_.Initialize(IteratorValueAdapter<A, MoveIterator<A>>(
+ MoveIterator<A>(other.data())),
+ other.size());
+ }
+ }
+
+ ~InlinedVector() {}
+
+ // ---------------------------------------------------------------------------
+ // InlinedVector Member Accessors
+ // ---------------------------------------------------------------------------
+
+ // `InlinedVector::empty()`
+ //
+ // Returns whether the inlined vector contains no elements.
+ bool empty() const noexcept { return !size(); }
+
+ // `InlinedVector::size()`
+ //
+ // Returns the number of elements in the inlined vector.
+ size_type size() const noexcept { return storage_.GetSize(); }
+
+ // `InlinedVector::max_size()`
+ //
+ // Returns the maximum number of elements the inlined vector can hold.
+ size_type max_size() const noexcept {
+ // One bit of the size storage is used to indicate whether the inlined
+ // vector contains allocated memory. As a result, the maximum size that the
+ // inlined vector can express is half of the max for `size_type`.
+ return (std::numeric_limits<size_type>::max)() / 2;
+ }
+
+ // `InlinedVector::capacity()`
+ //
+ // Returns the number of elements that could be stored in the inlined vector
+ // without requiring a reallocation.
+ //
+ // NOTE: for most inlined vectors, `capacity()` should be equal to the
+ // template parameter `N`. For inlined vectors which exceed this capacity,
+ // they will no longer be inlined and `capacity()` will equal the capactity of
+ // the allocated memory.
+ size_type capacity() const noexcept {
+ return storage_.GetIsAllocated() ? storage_.GetAllocatedCapacity()
+ : storage_.GetInlinedCapacity();
+ }
+
+ // `InlinedVector::data()`
+ //
+ // Returns a `pointer` to the elements of the inlined vector. This pointer
+ // can be used to access and modify the contained elements.
+ //
+ // NOTE: only elements within [`data()`, `data() + size()`) are valid.
+ pointer data() noexcept {
+ return storage_.GetIsAllocated() ? storage_.GetAllocatedData()
+ : storage_.GetInlinedData();
+ }
+
+ // Overload of `InlinedVector::data()` that returns a `const_pointer` to the
+ // elements of the inlined vector. This pointer can be used to access but not
+ // modify the contained elements.
+ //
+ // NOTE: only elements within [`data()`, `data() + size()`) are valid.
+ const_pointer data() const noexcept {
+ return storage_.GetIsAllocated() ? storage_.GetAllocatedData()
+ : storage_.GetInlinedData();
+ }
+
+ // `InlinedVector::operator[](...)`
+ //
+ // Returns a `reference` to the `i`th element of the inlined vector.
+ reference operator[](size_type i) {
+ ABSL_HARDENING_ASSERT(i < size());
+ return data()[i];
+ }
+
+ // Overload of `InlinedVector::operator[](...)` that returns a
+ // `const_reference` to the `i`th element of the inlined vector.
+ const_reference operator[](size_type i) const {
+ ABSL_HARDENING_ASSERT(i < size());
+ return data()[i];
+ }
+
+ // `InlinedVector::at(...)`
+ //
+ // Returns a `reference` to the `i`th element of the inlined vector.
+ //
+ // NOTE: if `i` is not within the required range of `InlinedVector::at(...)`,
+ // in both debug and non-debug builds, `std::out_of_range` will be thrown.
+ reference at(size_type i) {
+ if (ABSL_PREDICT_FALSE(i >= size())) {
+ base_internal::ThrowStdOutOfRange(
+ "`InlinedVector::at(size_type)` failed bounds check");
+ }
+ return data()[i];
+ }
+
+ // Overload of `InlinedVector::at(...)` that returns a `const_reference` to
+ // the `i`th element of the inlined vector.
+ //
+ // NOTE: if `i` is not within the required range of `InlinedVector::at(...)`,
+ // in both debug and non-debug builds, `std::out_of_range` will be thrown.
+ const_reference at(size_type i) const {
+ if (ABSL_PREDICT_FALSE(i >= size())) {
+ base_internal::ThrowStdOutOfRange(
+ "`InlinedVector::at(size_type) const` failed bounds check");
+ }
+ return data()[i];
+ }
+
+ // `InlinedVector::front()`
+ //
+ // Returns a `reference` to the first element of the inlined vector.
+ reference front() {
+ ABSL_HARDENING_ASSERT(!empty());
+ return data()[0];
+ }
+
+ // Overload of `InlinedVector::front()` that returns a `const_reference` to
+ // the first element of the inlined vector.
+ const_reference front() const {
+ ABSL_HARDENING_ASSERT(!empty());
+ return data()[0];
+ }
+
+ // `InlinedVector::back()`
+ //
+ // Returns a `reference` to the last element of the inlined vector.
+ reference back() {
+ ABSL_HARDENING_ASSERT(!empty());
+ return data()[size() - 1];
+ }
+
+ // Overload of `InlinedVector::back()` that returns a `const_reference` to the
+ // last element of the inlined vector.
+ const_reference back() const {
+ ABSL_HARDENING_ASSERT(!empty());
+ return data()[size() - 1];
+ }
+
+ // `InlinedVector::begin()`
+ //
+ // Returns an `iterator` to the beginning of the inlined vector.
+ iterator begin() noexcept { return data(); }
+
+ // Overload of `InlinedVector::begin()` that returns a `const_iterator` to
+ // the beginning of the inlined vector.
+ const_iterator begin() const noexcept { return data(); }
+
+ // `InlinedVector::end()`
+ //
+ // Returns an `iterator` to the end of the inlined vector.
+ iterator end() noexcept { return data() + size(); }
+
+ // Overload of `InlinedVector::end()` that returns a `const_iterator` to the
+ // end of the inlined vector.
+ const_iterator end() const noexcept { return data() + size(); }
+
+ // `InlinedVector::cbegin()`
+ //
+ // Returns a `const_iterator` to the beginning of the inlined vector.
+ const_iterator cbegin() const noexcept { return begin(); }
+
+ // `InlinedVector::cend()`
+ //
+ // Returns a `const_iterator` to the end of the inlined vector.
+ const_iterator cend() const noexcept { return end(); }
+
+ // `InlinedVector::rbegin()`
+ //
+ // Returns a `reverse_iterator` from the end of the inlined vector.
+ reverse_iterator rbegin() noexcept { return reverse_iterator(end()); }
+
+ // Overload of `InlinedVector::rbegin()` that returns a
+ // `const_reverse_iterator` from the end of the inlined vector.
+ const_reverse_iterator rbegin() const noexcept {
+ return const_reverse_iterator(end());
+ }
+
+ // `InlinedVector::rend()`
+ //
+ // Returns a `reverse_iterator` from the beginning of the inlined vector.
+ reverse_iterator rend() noexcept { return reverse_iterator(begin()); }
+
+ // Overload of `InlinedVector::rend()` that returns a `const_reverse_iterator`
+ // from the beginning of the inlined vector.
+ const_reverse_iterator rend() const noexcept {
+ return const_reverse_iterator(begin());
+ }
+
+ // `InlinedVector::crbegin()`
+ //
+ // Returns a `const_reverse_iterator` from the end of the inlined vector.
+ const_reverse_iterator crbegin() const noexcept { return rbegin(); }
+
+ // `InlinedVector::crend()`
+ //
+ // Returns a `const_reverse_iterator` from the beginning of the inlined
+ // vector.
+ const_reverse_iterator crend() const noexcept { return rend(); }
+
+ // `InlinedVector::get_allocator()`
+ //
+ // Returns a copy of the inlined vector's allocator.
+ allocator_type get_allocator() const { return storage_.GetAllocator(); }
+
+ // ---------------------------------------------------------------------------
+ // InlinedVector Member Mutators
+ // ---------------------------------------------------------------------------
+
+ // `InlinedVector::operator=(...)`
+ //
+ // Replaces the elements of the inlined vector with copies of the elements of
+ // `list`.
+ InlinedVector& operator=(std::initializer_list<value_type> list) {
+ assign(list.begin(), list.end());
+
+ return *this;
+ }
+
+ // Overload of `InlinedVector::operator=(...)` that replaces the elements of
+ // the inlined vector with copies of the elements of `other`.
+ InlinedVector& operator=(const InlinedVector& other) {
+ if (ABSL_PREDICT_TRUE(this != std::addressof(other))) {
+ const_pointer other_data = other.data();
+ assign(other_data, other_data + other.size());
+ }
+
+ return *this;
+ }
+
+ // Overload of `InlinedVector::operator=(...)` that moves the elements of
+ // `other` into the inlined vector.
+ //
+ // NOTE: as a result of calling this overload, `other` is left in a valid but
+ // unspecified state.
+ InlinedVector& operator=(InlinedVector&& other) {
+ if (ABSL_PREDICT_TRUE(this != std::addressof(other))) {
+ if (IsMemcpyOk<A>::value || other.storage_.GetIsAllocated()) {
+ inlined_vector_internal::DestroyElements<A>(storage_.GetAllocator(),
+ data(), size());
+ storage_.DeallocateIfAllocated();
+ storage_.MemcpyFrom(other.storage_);
+
+ other.storage_.SetInlinedSize(0);
+ } else {
+ storage_.Assign(IteratorValueAdapter<A, MoveIterator<A>>(
+ MoveIterator<A>(other.storage_.GetInlinedData())),
+ other.size());
+ }
+ }
+
+ return *this;
+ }
+
+ // `InlinedVector::assign(...)`
+ //
+ // Replaces the contents of the inlined vector with `n` copies of `v`.
+ void assign(size_type n, const_reference v) {
+ storage_.Assign(CopyValueAdapter<A>(std::addressof(v)), n);
+ }
+
+ // Overload of `InlinedVector::assign(...)` that replaces the contents of the
+ // inlined vector with copies of the elements of `list`.
+ void assign(std::initializer_list<value_type> list) {
+ assign(list.begin(), list.end());
+ }
+
+ // Overload of `InlinedVector::assign(...)` to replace the contents of the
+ // inlined vector with the range [`first`, `last`).
+ //
+ // NOTE: this overload is for iterators that are "forward" category or better.
+ template <typename ForwardIterator,
+ EnableIfAtLeastForwardIterator<ForwardIterator> = 0>
+ void assign(ForwardIterator first, ForwardIterator last) {
+ storage_.Assign(IteratorValueAdapter<A, ForwardIterator>(first),
+ std::distance(first, last));
+ }
+
+ // Overload of `InlinedVector::assign(...)` to replace the contents of the
+ // inlined vector with the range [`first`, `last`).
+ //
+ // NOTE: this overload is for iterators that are "input" category.
+ template <typename InputIterator,
+ DisableIfAtLeastForwardIterator<InputIterator> = 0>
+ void assign(InputIterator first, InputIterator last) {
+ size_type i = 0;
+ for (; i < size() && first != last; ++i, static_cast<void>(++first)) {
+ data()[i] = *first;
+ }
+
+ erase(data() + i, data() + size());
+ std::copy(first, last, std::back_inserter(*this));
+ }
+
+ // `InlinedVector::resize(...)`
+ //
+ // Resizes the inlined vector to contain `n` elements.
+ //
+ // NOTE: If `n` is smaller than `size()`, extra elements are destroyed. If `n`
+ // is larger than `size()`, new elements are value-initialized.
+ void resize(size_type n) {
+ ABSL_HARDENING_ASSERT(n <= max_size());
+ storage_.Resize(DefaultValueAdapter<A>(), n);
+ }
+
+ // Overload of `InlinedVector::resize(...)` that resizes the inlined vector to
+ // contain `n` elements.
+ //
+ // NOTE: if `n` is smaller than `size()`, extra elements are destroyed. If `n`
+ // is larger than `size()`, new elements are copied-constructed from `v`.
+ void resize(size_type n, const_reference v) {
+ ABSL_HARDENING_ASSERT(n <= max_size());
+ storage_.Resize(CopyValueAdapter<A>(std::addressof(v)), n);
+ }
+
+ // `InlinedVector::insert(...)`
+ //
+ // Inserts a copy of `v` at `pos`, returning an `iterator` to the newly
+ // inserted element.
+ iterator insert(const_iterator pos, const_reference v) {
+ return emplace(pos, v);
+ }
+
+ // Overload of `InlinedVector::insert(...)` that inserts `v` at `pos` using
+ // move semantics, returning an `iterator` to the newly inserted element.
+ iterator insert(const_iterator pos, value_type&& v) {
+ return emplace(pos, std::move(v));
+ }
+
+ // Overload of `InlinedVector::insert(...)` that inserts `n` contiguous copies
+ // of `v` starting at `pos`, returning an `iterator` pointing to the first of
+ // the newly inserted elements.
+ iterator insert(const_iterator pos, size_type n, const_reference v) {
+ ABSL_HARDENING_ASSERT(pos >= begin());
+ ABSL_HARDENING_ASSERT(pos <= end());
+
+ if (ABSL_PREDICT_TRUE(n != 0)) {
+ value_type dealias = v;
+ return storage_.Insert(pos, CopyValueAdapter<A>(std::addressof(dealias)),
+ n);
+ } else {
+ return const_cast<iterator>(pos);
+ }
+ }
+
+ // Overload of `InlinedVector::insert(...)` that inserts copies of the
+ // elements of `list` starting at `pos`, returning an `iterator` pointing to
+ // the first of the newly inserted elements.
+ iterator insert(const_iterator pos, std::initializer_list<value_type> list) {
+ return insert(pos, list.begin(), list.end());
+ }
+
+ // Overload of `InlinedVector::insert(...)` that inserts the range [`first`,
+ // `last`) starting at `pos`, returning an `iterator` pointing to the first
+ // of the newly inserted elements.
+ //
+ // NOTE: this overload is for iterators that are "forward" category or better.
+ template <typename ForwardIterator,
+ EnableIfAtLeastForwardIterator<ForwardIterator> = 0>
+ iterator insert(const_iterator pos, ForwardIterator first,
+ ForwardIterator last) {
+ ABSL_HARDENING_ASSERT(pos >= begin());
+ ABSL_HARDENING_ASSERT(pos <= end());
+
+ if (ABSL_PREDICT_TRUE(first != last)) {
+ return storage_.Insert(pos,
+ IteratorValueAdapter<A, ForwardIterator>(first),
+ std::distance(first, last));
+ } else {
+ return const_cast<iterator>(pos);
+ }
+ }
+
+ // Overload of `InlinedVector::insert(...)` that inserts the range [`first`,
+ // `last`) starting at `pos`, returning an `iterator` pointing to the first
+ // of the newly inserted elements.
+ //
+ // NOTE: this overload is for iterators that are "input" category.
+ template <typename InputIterator,
+ DisableIfAtLeastForwardIterator<InputIterator> = 0>
+ iterator insert(const_iterator pos, InputIterator first, InputIterator last) {
+ ABSL_HARDENING_ASSERT(pos >= begin());
+ ABSL_HARDENING_ASSERT(pos <= end());
+
+ size_type index = std::distance(cbegin(), pos);
+ for (size_type i = index; first != last; ++i, static_cast<void>(++first)) {
+ insert(data() + i, *first);
+ }
+
+ return iterator(data() + index);
+ }
+
+ // `InlinedVector::emplace(...)`
+ //
+ // Constructs and inserts an element using `args...` in the inlined vector at
+ // `pos`, returning an `iterator` pointing to the newly emplaced element.
+ template <typename... Args>
+ iterator emplace(const_iterator pos, Args&&... args) {
+ ABSL_HARDENING_ASSERT(pos >= begin());
+ ABSL_HARDENING_ASSERT(pos <= end());
+
+ value_type dealias(std::forward<Args>(args)...);
+ return storage_.Insert(pos,
+ IteratorValueAdapter<A, MoveIterator<A>>(
+ MoveIterator<A>(std::addressof(dealias))),
+ 1);
+ }
+
+ // `InlinedVector::emplace_back(...)`
+ //
+ // Constructs and inserts an element using `args...` in the inlined vector at
+ // `end()`, returning a `reference` to the newly emplaced element.
+ template <typename... Args>
+ reference emplace_back(Args&&... args) {
+ return storage_.EmplaceBack(std::forward<Args>(args)...);
+ }
+
+ // `InlinedVector::push_back(...)`
+ //
+ // Inserts a copy of `v` in the inlined vector at `end()`.
+ void push_back(const_reference v) { static_cast<void>(emplace_back(v)); }
+
+ // Overload of `InlinedVector::push_back(...)` for inserting `v` at `end()`
+ // using move semantics.
+ void push_back(value_type&& v) {
+ static_cast<void>(emplace_back(std::move(v)));
+ }
+
+ // `InlinedVector::pop_back()`
+ //
+ // Destroys the element at `back()`, reducing the size by `1`.
+ void pop_back() noexcept {
+ ABSL_HARDENING_ASSERT(!empty());
+
+ AllocatorTraits<A>::destroy(storage_.GetAllocator(), data() + (size() - 1));
+ storage_.SubtractSize(1);
+ }
+
+ // `InlinedVector::erase(...)`
+ //
+ // Erases the element at `pos`, returning an `iterator` pointing to where the
+ // erased element was located.
+ //
+ // NOTE: may return `end()`, which is not dereferencable.
+ iterator erase(const_iterator pos) {
+ ABSL_HARDENING_ASSERT(pos >= begin());
+ ABSL_HARDENING_ASSERT(pos < end());
+
+ return storage_.Erase(pos, pos + 1);
+ }
+
+ // Overload of `InlinedVector::erase(...)` that erases every element in the
+ // range [`from`, `to`), returning an `iterator` pointing to where the first
+ // erased element was located.
+ //
+ // NOTE: may return `end()`, which is not dereferencable.
+ iterator erase(const_iterator from, const_iterator to) {
+ ABSL_HARDENING_ASSERT(from >= begin());
+ ABSL_HARDENING_ASSERT(from <= to);
+ ABSL_HARDENING_ASSERT(to <= end());
+
+ if (ABSL_PREDICT_TRUE(from != to)) {
+ return storage_.Erase(from, to);
+ } else {
+ return const_cast<iterator>(from);
+ }
+ }
+
+ // `InlinedVector::clear()`
+ //
+ // Destroys all elements in the inlined vector, setting the size to `0` and
+ // deallocating any held memory.
+ void clear() noexcept {
+ inlined_vector_internal::DestroyElements<A>(storage_.GetAllocator(), data(),
+ size());
+ storage_.DeallocateIfAllocated();
+
+ storage_.SetInlinedSize(0);
+ }
+
+ // `InlinedVector::reserve(...)`
+ //
+ // Ensures that there is enough room for at least `n` elements.
+ void reserve(size_type n) { storage_.Reserve(n); }
+
+ // `InlinedVector::shrink_to_fit()`
+ //
+ // Attempts to reduce memory usage by moving elements to (or keeping elements
+ // in) the smallest available buffer sufficient for containing `size()`
+ // elements.
+ //
+ // If `size()` is sufficiently small, the elements will be moved into (or kept
+ // in) the inlined space.
+ void shrink_to_fit() {
+ if (storage_.GetIsAllocated()) {
+ storage_.ShrinkToFit();
+ }
+ }
+
+ // `InlinedVector::swap(...)`
+ //
+ // Swaps the contents of the inlined vector with `other`.
+ void swap(InlinedVector& other) {
+ if (ABSL_PREDICT_TRUE(this != std::addressof(other))) {
+ storage_.Swap(std::addressof(other.storage_));
+ }
+ }
+
+ private:
+ template <typename H, typename TheT, size_t TheN, typename TheA>
+ friend H AbslHashValue(H h, const y_absl::InlinedVector<TheT, TheN, TheA>& a);
+
+ Storage storage_;
+};
+
+// -----------------------------------------------------------------------------
+// InlinedVector Non-Member Functions
+// -----------------------------------------------------------------------------
+
+// `swap(...)`
+//
+// Swaps the contents of two inlined vectors.
+template <typename T, size_t N, typename A>
+void swap(y_absl::InlinedVector<T, N, A>& a,
+ y_absl::InlinedVector<T, N, A>& b) noexcept(noexcept(a.swap(b))) {
+ a.swap(b);
+}
+
+// `operator==(...)`
+//
+// Tests for value-equality of two inlined vectors.
+template <typename T, size_t N, typename A>
+bool operator==(const y_absl::InlinedVector<T, N, A>& a,
+ const y_absl::InlinedVector<T, N, A>& b) {
+ auto a_data = a.data();
+ auto b_data = b.data();
+ return y_absl::equal(a_data, a_data + a.size(), b_data, b_data + b.size());
+}
+
+// `operator!=(...)`
+//
+// Tests for value-inequality of two inlined vectors.
+template <typename T, size_t N, typename A>
+bool operator!=(const y_absl::InlinedVector<T, N, A>& a,
+ const y_absl::InlinedVector<T, N, A>& b) {
+ return !(a == b);
+}
+
+// `operator<(...)`
+//
+// Tests whether the value of an inlined vector is less than the value of
+// another inlined vector using a lexicographical comparison algorithm.
+template <typename T, size_t N, typename A>
+bool operator<(const y_absl::InlinedVector<T, N, A>& a,
+ const y_absl::InlinedVector<T, N, A>& b) {
+ auto a_data = a.data();
+ auto b_data = b.data();
+ return std::lexicographical_compare(a_data, a_data + a.size(), b_data,
+ b_data + b.size());
+}
+
+// `operator>(...)`
+//
+// Tests whether the value of an inlined vector is greater than the value of
+// another inlined vector using a lexicographical comparison algorithm.
+template <typename T, size_t N, typename A>
+bool operator>(const y_absl::InlinedVector<T, N, A>& a,
+ const y_absl::InlinedVector<T, N, A>& b) {
+ return b < a;
+}
+
+// `operator<=(...)`
+//
+// Tests whether the value of an inlined vector is less than or equal to the
+// value of another inlined vector using a lexicographical comparison algorithm.
+template <typename T, size_t N, typename A>
+bool operator<=(const y_absl::InlinedVector<T, N, A>& a,
+ const y_absl::InlinedVector<T, N, A>& b) {
+ return !(b < a);
+}
+
+// `operator>=(...)`
+//
+// Tests whether the value of an inlined vector is greater than or equal to the
+// value of another inlined vector using a lexicographical comparison algorithm.
+template <typename T, size_t N, typename A>
+bool operator>=(const y_absl::InlinedVector<T, N, A>& a,
+ const y_absl::InlinedVector<T, N, A>& b) {
+ return !(a < b);
+}
+
+// `AbslHashValue(...)`
+//
+// Provides `y_absl::Hash` support for `y_absl::InlinedVector`. It is uncommon to
+// call this directly.
+template <typename H, typename T, size_t N, typename A>
+H AbslHashValue(H h, const y_absl::InlinedVector<T, N, A>& a) {
+ auto size = a.size();
+ return H::combine(H::combine_contiguous(std::move(h), a.data(), size), size);
+}
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_CONTAINER_INLINED_VECTOR_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/absl_hashtablez_sampler/ya.make b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/absl_hashtablez_sampler/ya.make
new file mode 100644
index 00000000000..54874c04660
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/absl_hashtablez_sampler/ya.make
@@ -0,0 +1,49 @@
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
+WITHOUT_LICENSE_TEXTS()
+
+OWNER(
+ somov
+ g:cpp-contrib
+)
+
+LICENSE(Apache-2.0)
+
+PEERDIR(
+ contrib/restricted/abseil-cpp-tstring/y_absl/base
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/low_level_alloc
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_wait
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/throw_delegate
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/log_severity
+ contrib/restricted/abseil-cpp-tstring/y_absl/debugging
+ contrib/restricted/abseil-cpp-tstring/y_absl/debugging/stacktrace
+ contrib/restricted/abseil-cpp-tstring/y_absl/debugging/symbolize
+ contrib/restricted/abseil-cpp-tstring/y_absl/demangle
+ contrib/restricted/abseil-cpp-tstring/y_absl/numeric
+ contrib/restricted/abseil-cpp-tstring/y_absl/profiling/internal/exponential_biased
+ contrib/restricted/abseil-cpp-tstring/y_absl/strings
+ contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/absl_strings_internal
+ contrib/restricted/abseil-cpp-tstring/y_absl/synchronization
+ contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal
+ contrib/restricted/abseil-cpp-tstring/y_absl/time
+ contrib/restricted/abseil-cpp-tstring/y_absl/time/civil_time
+ contrib/restricted/abseil-cpp-tstring/y_absl/time/time_zone
+)
+
+ADDINCL(
+ GLOBAL contrib/restricted/abseil-cpp-tstring
+)
+
+NO_COMPILER_WARNINGS()
+
+SRCDIR(contrib/restricted/abseil-cpp-tstring/y_absl/container/internal)
+
+SRCS(
+ hashtablez_sampler.cc
+ hashtablez_sampler_force_weak_definition.cc
+)
+
+END()
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/btree.h b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/btree.h
new file mode 100644
index 00000000000..a2492608112
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/btree.h
@@ -0,0 +1,2641 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// A btree implementation of the STL set and map interfaces. A btree is smaller
+// and generally also faster than STL set/map (refer to the benchmarks below).
+// The red-black tree implementation of STL set/map has an overhead of 3
+// pointers (left, right and parent) plus the node color information for each
+// stored value. So a set<int32_t> consumes 40 bytes for each value stored in
+// 64-bit mode. This btree implementation stores multiple values on fixed
+// size nodes (usually 256 bytes) and doesn't store child pointers for leaf
+// nodes. The result is that a btree_set<int32_t> may use much less memory per
+// stored value. For the random insertion benchmark in btree_bench.cc, a
+// btree_set<int32_t> with node-size of 256 uses 5.1 bytes per stored value.
+//
+// The packing of multiple values on to each node of a btree has another effect
+// besides better space utilization: better cache locality due to fewer cache
+// lines being accessed. Better cache locality translates into faster
+// operations.
+//
+// CAVEATS
+//
+// Insertions and deletions on a btree can cause splitting, merging or
+// rebalancing of btree nodes. And even without these operations, insertions
+// and deletions on a btree will move values around within a node. In both
+// cases, the result is that insertions and deletions can invalidate iterators
+// pointing to values other than the one being inserted/deleted. Therefore, this
+// container does not provide pointer stability. This is notably different from
+// STL set/map which takes care to not invalidate iterators on insert/erase
+// except, of course, for iterators pointing to the value being erased. A
+// partial workaround when erasing is available: erase() returns an iterator
+// pointing to the item just after the one that was erased (or end() if none
+// exists).
+
+#ifndef ABSL_CONTAINER_INTERNAL_BTREE_H_
+#define ABSL_CONTAINER_INTERNAL_BTREE_H_
+
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <cstring>
+#include <functional>
+#include <iterator>
+#include <limits>
+#include <new>
+#include <util/generic/string.h>
+#include <type_traits>
+#include <utility>
+
+#include "y_absl/base/macros.h"
+#include "y_absl/container/internal/common.h"
+#include "y_absl/container/internal/compressed_tuple.h"
+#include "y_absl/container/internal/container_memory.h"
+#include "y_absl/container/internal/layout.h"
+#include "y_absl/memory/memory.h"
+#include "y_absl/meta/type_traits.h"
+#include "y_absl/strings/cord.h"
+#include "y_absl/strings/string_view.h"
+#include "y_absl/types/compare.h"
+#include "y_absl/utility/utility.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+// A helper class that indicates if the Compare parameter is a key-compare-to
+// comparator.
+template <typename Compare, typename T>
+using btree_is_key_compare_to =
+ std::is_convertible<y_absl::result_of_t<Compare(const T &, const T &)>,
+ y_absl::weak_ordering>;
+
+struct StringBtreeDefaultLess {
+ using is_transparent = void;
+
+ StringBtreeDefaultLess() = default;
+
+ // Compatibility constructor.
+ StringBtreeDefaultLess(std::less<TString>) {} // NOLINT
+ StringBtreeDefaultLess(std::less<y_absl::string_view>) {} // NOLINT
+
+ // Allow converting to std::less for use in key_comp()/value_comp().
+ explicit operator std::less<TString>() const { return {}; }
+ explicit operator std::less<y_absl::string_view>() const { return {}; }
+ explicit operator std::less<y_absl::Cord>() const { return {}; }
+
+ y_absl::weak_ordering operator()(y_absl::string_view lhs,
+ y_absl::string_view rhs) const {
+ return compare_internal::compare_result_as_ordering(lhs.compare(rhs));
+ }
+ StringBtreeDefaultLess(std::less<y_absl::Cord>) {} // NOLINT
+ y_absl::weak_ordering operator()(const y_absl::Cord &lhs,
+ const y_absl::Cord &rhs) const {
+ return compare_internal::compare_result_as_ordering(lhs.Compare(rhs));
+ }
+ y_absl::weak_ordering operator()(const y_absl::Cord &lhs,
+ y_absl::string_view rhs) const {
+ return compare_internal::compare_result_as_ordering(lhs.Compare(rhs));
+ }
+ y_absl::weak_ordering operator()(y_absl::string_view lhs,
+ const y_absl::Cord &rhs) const {
+ return compare_internal::compare_result_as_ordering(-rhs.Compare(lhs));
+ }
+};
+
+struct StringBtreeDefaultGreater {
+ using is_transparent = void;
+
+ StringBtreeDefaultGreater() = default;
+
+ StringBtreeDefaultGreater(std::greater<TString>) {} // NOLINT
+ StringBtreeDefaultGreater(std::greater<y_absl::string_view>) {} // NOLINT
+
+ // Allow converting to std::greater for use in key_comp()/value_comp().
+ explicit operator std::greater<TString>() const { return {}; }
+ explicit operator std::greater<y_absl::string_view>() const { return {}; }
+ explicit operator std::greater<y_absl::Cord>() const { return {}; }
+
+ y_absl::weak_ordering operator()(y_absl::string_view lhs,
+ y_absl::string_view rhs) const {
+ return compare_internal::compare_result_as_ordering(rhs.compare(lhs));
+ }
+ StringBtreeDefaultGreater(std::greater<y_absl::Cord>) {} // NOLINT
+ y_absl::weak_ordering operator()(const y_absl::Cord &lhs,
+ const y_absl::Cord &rhs) const {
+ return compare_internal::compare_result_as_ordering(rhs.Compare(lhs));
+ }
+ y_absl::weak_ordering operator()(const y_absl::Cord &lhs,
+ y_absl::string_view rhs) const {
+ return compare_internal::compare_result_as_ordering(-lhs.Compare(rhs));
+ }
+ y_absl::weak_ordering operator()(y_absl::string_view lhs,
+ const y_absl::Cord &rhs) const {
+ return compare_internal::compare_result_as_ordering(rhs.Compare(lhs));
+ }
+};
+
+// A helper class to convert a boolean comparison into a three-way "compare-to"
+// comparison that returns an `y_absl::weak_ordering`. This helper
+// class is specialized for less<TString>, greater<TString>,
+// less<string_view>, greater<string_view>, less<y_absl::Cord>, and
+// greater<y_absl::Cord>.
+//
+// key_compare_to_adapter is provided so that btree users
+// automatically get the more efficient compare-to code when using common
+// Abseil string types with common comparison functors.
+// These string-like specializations also turn on heterogeneous lookup by
+// default.
+template <typename Compare>
+struct key_compare_to_adapter {
+ using type = Compare;
+};
+
+template <>
+struct key_compare_to_adapter<std::less<TString>> {
+ using type = StringBtreeDefaultLess;
+};
+
+template <>
+struct key_compare_to_adapter<std::greater<TString>> {
+ using type = StringBtreeDefaultGreater;
+};
+
+template <>
+struct key_compare_to_adapter<std::less<y_absl::string_view>> {
+ using type = StringBtreeDefaultLess;
+};
+
+template <>
+struct key_compare_to_adapter<std::greater<y_absl::string_view>> {
+ using type = StringBtreeDefaultGreater;
+};
+
+template <>
+struct key_compare_to_adapter<std::less<y_absl::Cord>> {
+ using type = StringBtreeDefaultLess;
+};
+
+template <>
+struct key_compare_to_adapter<std::greater<y_absl::Cord>> {
+ using type = StringBtreeDefaultGreater;
+};
+
+// Detects an 'absl_btree_prefer_linear_node_search' member. This is
+// a protocol used as an opt-in or opt-out of linear search.
+//
+// For example, this would be useful for key types that wrap an integer
+// and define their own cheap operator<(). For example:
+//
+// class K {
+// public:
+// using absl_btree_prefer_linear_node_search = std::true_type;
+// ...
+// private:
+// friend bool operator<(K a, K b) { return a.k_ < b.k_; }
+// int k_;
+// };
+//
+// btree_map<K, V> m; // Uses linear search
+//
+// If T has the preference tag, then it has a preference.
+// Btree will use the tag's truth value.
+template <typename T, typename = void>
+struct has_linear_node_search_preference : std::false_type {};
+template <typename T, typename = void>
+struct prefers_linear_node_search : std::false_type {};
+template <typename T>
+struct has_linear_node_search_preference<
+ T, y_absl::void_t<typename T::absl_btree_prefer_linear_node_search>>
+ : std::true_type {};
+template <typename T>
+struct prefers_linear_node_search<
+ T, y_absl::void_t<typename T::absl_btree_prefer_linear_node_search>>
+ : T::absl_btree_prefer_linear_node_search {};
+
+template <typename Key, typename Compare, typename Alloc, int TargetNodeSize,
+ bool Multi, typename SlotPolicy>
+struct common_params {
+ using original_key_compare = Compare;
+
+ // If Compare is a common comparator for a string-like type, then we adapt it
+ // to use heterogeneous lookup and to be a key-compare-to comparator.
+ using key_compare = typename key_compare_to_adapter<Compare>::type;
+ // A type which indicates if we have a key-compare-to functor or a plain old
+ // key-compare functor.
+ using is_key_compare_to = btree_is_key_compare_to<key_compare, Key>;
+
+ using allocator_type = Alloc;
+ using key_type = Key;
+ using size_type = std::make_signed<size_t>::type;
+ using difference_type = ptrdiff_t;
+
+ using slot_policy = SlotPolicy;
+ using slot_type = typename slot_policy::slot_type;
+ using value_type = typename slot_policy::value_type;
+ using init_type = typename slot_policy::mutable_value_type;
+ using pointer = value_type *;
+ using const_pointer = const value_type *;
+ using reference = value_type &;
+ using const_reference = const value_type &;
+
+ // For the given lookup key type, returns whether we can have multiple
+ // equivalent keys in the btree. If this is a multi-container, then we can.
+ // Otherwise, we can have multiple equivalent keys only if all of the
+ // following conditions are met:
+ // - The comparator is transparent.
+ // - The lookup key type is not the same as key_type.
+ // - The comparator is not a StringBtreeDefault{Less,Greater} comparator
+ // that we know has the same equivalence classes for all lookup types.
+ template <typename LookupKey>
+ constexpr static bool can_have_multiple_equivalent_keys() {
+ return Multi ||
+ (IsTransparent<key_compare>::value &&
+ !std::is_same<LookupKey, Key>::value &&
+ !std::is_same<key_compare, StringBtreeDefaultLess>::value &&
+ !std::is_same<key_compare, StringBtreeDefaultGreater>::value);
+ }
+
+ enum {
+ kTargetNodeSize = TargetNodeSize,
+
+ // Upper bound for the available space for values. This is largest for leaf
+ // nodes, which have overhead of at least a pointer + 4 bytes (for storing
+ // 3 field_types and an enum).
+ kNodeValueSpace =
+ TargetNodeSize - /*minimum overhead=*/(sizeof(void *) + 4),
+ };
+
+ // This is an integral type large enough to hold as many
+ // ValueSize-values as will fit a node of TargetNodeSize bytes.
+ using node_count_type =
+ y_absl::conditional_t<(kNodeValueSpace / sizeof(value_type) >
+ (std::numeric_limits<uint8_t>::max)()),
+ uint16_t, uint8_t>; // NOLINT
+
+ // The following methods are necessary for passing this struct as PolicyTraits
+ // for node_handle and/or are used within btree.
+ static value_type &element(slot_type *slot) {
+ return slot_policy::element(slot);
+ }
+ static const value_type &element(const slot_type *slot) {
+ return slot_policy::element(slot);
+ }
+ template <class... Args>
+ static void construct(Alloc *alloc, slot_type *slot, Args &&... args) {
+ slot_policy::construct(alloc, slot, std::forward<Args>(args)...);
+ }
+ static void construct(Alloc *alloc, slot_type *slot, slot_type *other) {
+ slot_policy::construct(alloc, slot, other);
+ }
+ static void destroy(Alloc *alloc, slot_type *slot) {
+ slot_policy::destroy(alloc, slot);
+ }
+ static void transfer(Alloc *alloc, slot_type *new_slot, slot_type *old_slot) {
+ construct(alloc, new_slot, old_slot);
+ destroy(alloc, old_slot);
+ }
+ static void swap(Alloc *alloc, slot_type *a, slot_type *b) {
+ slot_policy::swap(alloc, a, b);
+ }
+ static void move(Alloc *alloc, slot_type *src, slot_type *dest) {
+ slot_policy::move(alloc, src, dest);
+ }
+};
+
+// A parameters structure for holding the type parameters for a btree_map.
+// Compare and Alloc should be nothrow copy-constructible.
+template <typename Key, typename Data, typename Compare, typename Alloc,
+ int TargetNodeSize, bool Multi>
+struct map_params : common_params<Key, Compare, Alloc, TargetNodeSize, Multi,
+ map_slot_policy<Key, Data>> {
+ using super_type = typename map_params::common_params;
+ using mapped_type = Data;
+ // This type allows us to move keys when it is safe to do so. It is safe
+ // for maps in which value_type and mutable_value_type are layout compatible.
+ using slot_policy = typename super_type::slot_policy;
+ using slot_type = typename super_type::slot_type;
+ using value_type = typename super_type::value_type;
+ using init_type = typename super_type::init_type;
+
+ using original_key_compare = typename super_type::original_key_compare;
+ // Reference: https://en.cppreference.com/w/cpp/container/map/value_compare
+ class value_compare {
+ template <typename Params>
+ friend class btree;
+
+ protected:
+ explicit value_compare(original_key_compare c) : comp(std::move(c)) {}
+
+ original_key_compare comp; // NOLINT
+
+ public:
+ auto operator()(const value_type &lhs, const value_type &rhs) const
+ -> decltype(comp(lhs.first, rhs.first)) {
+ return comp(lhs.first, rhs.first);
+ }
+ };
+ using is_map_container = std::true_type;
+
+ template <typename V>
+ static auto key(const V &value) -> decltype(value.first) {
+ return value.first;
+ }
+ static const Key &key(const slot_type *s) { return slot_policy::key(s); }
+ static const Key &key(slot_type *s) { return slot_policy::key(s); }
+ // For use in node handle.
+ static auto mutable_key(slot_type *s)
+ -> decltype(slot_policy::mutable_key(s)) {
+ return slot_policy::mutable_key(s);
+ }
+ static mapped_type &value(value_type *value) { return value->second; }
+};
+
+// This type implements the necessary functions from the
+// y_absl::container_internal::slot_type interface.
+template <typename Key>
+struct set_slot_policy {
+ using slot_type = Key;
+ using value_type = Key;
+ using mutable_value_type = Key;
+
+ static value_type &element(slot_type *slot) { return *slot; }
+ static const value_type &element(const slot_type *slot) { return *slot; }
+
+ template <typename Alloc, class... Args>
+ static void construct(Alloc *alloc, slot_type *slot, Args &&... args) {
+ y_absl::allocator_traits<Alloc>::construct(*alloc, slot,
+ std::forward<Args>(args)...);
+ }
+
+ template <typename Alloc>
+ static void construct(Alloc *alloc, slot_type *slot, slot_type *other) {
+ y_absl::allocator_traits<Alloc>::construct(*alloc, slot, std::move(*other));
+ }
+
+ template <typename Alloc>
+ static void destroy(Alloc *alloc, slot_type *slot) {
+ y_absl::allocator_traits<Alloc>::destroy(*alloc, slot);
+ }
+
+ template <typename Alloc>
+ static void swap(Alloc * /*alloc*/, slot_type *a, slot_type *b) {
+ using std::swap;
+ swap(*a, *b);
+ }
+
+ template <typename Alloc>
+ static void move(Alloc * /*alloc*/, slot_type *src, slot_type *dest) {
+ *dest = std::move(*src);
+ }
+};
+
+// A parameters structure for holding the type parameters for a btree_set.
+// Compare and Alloc should be nothrow copy-constructible.
+template <typename Key, typename Compare, typename Alloc, int TargetNodeSize,
+ bool Multi>
+struct set_params : common_params<Key, Compare, Alloc, TargetNodeSize, Multi,
+ set_slot_policy<Key>> {
+ using value_type = Key;
+ using slot_type = typename set_params::common_params::slot_type;
+ using value_compare =
+ typename set_params::common_params::original_key_compare;
+ using is_map_container = std::false_type;
+
+ template <typename V>
+ static const V &key(const V &value) { return value; }
+ static const Key &key(const slot_type *slot) { return *slot; }
+ static const Key &key(slot_type *slot) { return *slot; }
+};
+
+// An adapter class that converts a lower-bound compare into an upper-bound
+// compare. Note: there is no need to make a version of this adapter specialized
+// for key-compare-to functors because the upper-bound (the first value greater
+// than the input) is never an exact match.
+template <typename Compare>
+struct upper_bound_adapter {
+ explicit upper_bound_adapter(const Compare &c) : comp(c) {}
+ template <typename K1, typename K2>
+ bool operator()(const K1 &a, const K2 &b) const {
+ // Returns true when a is not greater than b.
+ return !compare_internal::compare_result_as_less_than(comp(b, a));
+ }
+
+ private:
+ Compare comp;
+};
+
+enum class MatchKind : uint8_t { kEq, kNe };
+
+template <typename V, bool IsCompareTo>
+struct SearchResult {
+ V value;
+ MatchKind match;
+
+ static constexpr bool HasMatch() { return true; }
+ bool IsEq() const { return match == MatchKind::kEq; }
+};
+
+// When we don't use CompareTo, `match` is not present.
+// This ensures that callers can't use it accidentally when it provides no
+// useful information.
+template <typename V>
+struct SearchResult<V, false> {
+ SearchResult() {}
+ explicit SearchResult(V value) : value(value) {}
+ SearchResult(V value, MatchKind /*match*/) : value(value) {}
+
+ V value;
+
+ static constexpr bool HasMatch() { return false; }
+ static constexpr bool IsEq() { return false; }
+};
+
+// A node in the btree holding. The same node type is used for both internal
+// and leaf nodes in the btree, though the nodes are allocated in such a way
+// that the children array is only valid in internal nodes.
+template <typename Params>
+class btree_node {
+ using is_key_compare_to = typename Params::is_key_compare_to;
+ using field_type = typename Params::node_count_type;
+ using allocator_type = typename Params::allocator_type;
+ using slot_type = typename Params::slot_type;
+
+ public:
+ using params_type = Params;
+ using key_type = typename Params::key_type;
+ using value_type = typename Params::value_type;
+ using pointer = typename Params::pointer;
+ using const_pointer = typename Params::const_pointer;
+ using reference = typename Params::reference;
+ using const_reference = typename Params::const_reference;
+ using key_compare = typename Params::key_compare;
+ using size_type = typename Params::size_type;
+ using difference_type = typename Params::difference_type;
+
+ // Btree decides whether to use linear node search as follows:
+ // - If the comparator expresses a preference, use that.
+ // - If the key expresses a preference, use that.
+ // - If the key is arithmetic and the comparator is std::less or
+ // std::greater, choose linear.
+ // - Otherwise, choose binary.
+ // TODO(ezb): Might make sense to add condition(s) based on node-size.
+ using use_linear_search = std::integral_constant<
+ bool,
+ has_linear_node_search_preference<key_compare>::value
+ ? prefers_linear_node_search<key_compare>::value
+ : has_linear_node_search_preference<key_type>::value
+ ? prefers_linear_node_search<key_type>::value
+ : std::is_arithmetic<key_type>::value &&
+ (std::is_same<std::less<key_type>, key_compare>::value ||
+ std::is_same<std::greater<key_type>,
+ key_compare>::value)>;
+
+ // This class is organized by y_absl::container_internal::Layout as if it had
+ // the following structure:
+ // // A pointer to the node's parent.
+ // btree_node *parent;
+ //
+ // // The position of the node in the node's parent.
+ // field_type position;
+ // // The index of the first populated value in `values`.
+ // // TODO(ezb): right now, `start` is always 0. Update insertion/merge
+ // // logic to allow for floating storage within nodes.
+ // field_type start;
+ // // The index after the last populated value in `values`. Currently, this
+ // // is the same as the count of values.
+ // field_type finish;
+ // // The maximum number of values the node can hold. This is an integer in
+ // // [1, kNodeSlots] for root leaf nodes, kNodeSlots for non-root leaf
+ // // nodes, and kInternalNodeMaxCount (as a sentinel value) for internal
+ // // nodes (even though there are still kNodeSlots values in the node).
+ // // TODO(ezb): make max_count use only 4 bits and record log2(capacity)
+ // // to free extra bits for is_root, etc.
+ // field_type max_count;
+ //
+ // // The array of values. The capacity is `max_count` for leaf nodes and
+ // // kNodeSlots for internal nodes. Only the values in
+ // // [start, finish) have been initialized and are valid.
+ // slot_type values[max_count];
+ //
+ // // The array of child pointers. The keys in children[i] are all less
+ // // than key(i). The keys in children[i + 1] are all greater than key(i).
+ // // There are 0 children for leaf nodes and kNodeSlots + 1 children for
+ // // internal nodes.
+ // btree_node *children[kNodeSlots + 1];
+ //
+ // This class is only constructed by EmptyNodeType. Normally, pointers to the
+ // layout above are allocated, cast to btree_node*, and de-allocated within
+ // the btree implementation.
+ ~btree_node() = default;
+ btree_node(btree_node const &) = delete;
+ btree_node &operator=(btree_node const &) = delete;
+
+ // Public for EmptyNodeType.
+ constexpr static size_type Alignment() {
+ static_assert(LeafLayout(1).Alignment() == InternalLayout().Alignment(),
+ "Alignment of all nodes must be equal.");
+ return InternalLayout().Alignment();
+ }
+
+ protected:
+ btree_node() = default;
+
+ private:
+ using layout_type = y_absl::container_internal::Layout<btree_node *, field_type,
+ slot_type, btree_node *>;
+ constexpr static size_type SizeWithNSlots(size_type n) {
+ return layout_type(/*parent*/ 1,
+ /*position, start, finish, max_count*/ 4,
+ /*slots*/ n,
+ /*children*/ 0)
+ .AllocSize();
+ }
+ // A lower bound for the overhead of fields other than values in a leaf node.
+ constexpr static size_type MinimumOverhead() {
+ return SizeWithNSlots(1) - sizeof(value_type);
+ }
+
+ // Compute how many values we can fit onto a leaf node taking into account
+ // padding.
+ constexpr static size_type NodeTargetSlots(const int begin, const int end) {
+ return begin == end ? begin
+ : SizeWithNSlots((begin + end) / 2 + 1) >
+ params_type::kTargetNodeSize
+ ? NodeTargetSlots(begin, (begin + end) / 2)
+ : NodeTargetSlots((begin + end) / 2 + 1, end);
+ }
+
+ enum {
+ kTargetNodeSize = params_type::kTargetNodeSize,
+ kNodeTargetSlots = NodeTargetSlots(0, params_type::kTargetNodeSize),
+
+ // We need a minimum of 3 slots per internal node in order to perform
+ // splitting (1 value for the two nodes involved in the split and 1 value
+ // propagated to the parent as the delimiter for the split). For performance
+ // reasons, we don't allow 3 slots-per-node due to bad worst case occupancy
+ // of 1/3 (for a node, not a b-tree).
+ kMinNodeSlots = 4,
+
+ kNodeSlots =
+ kNodeTargetSlots >= kMinNodeSlots ? kNodeTargetSlots : kMinNodeSlots,
+
+ // The node is internal (i.e. is not a leaf node) if and only if `max_count`
+ // has this value.
+ kInternalNodeMaxCount = 0,
+ };
+
+ // Leaves can have less than kNodeSlots values.
+ constexpr static layout_type LeafLayout(const int slot_count = kNodeSlots) {
+ return layout_type(/*parent*/ 1,
+ /*position, start, finish, max_count*/ 4,
+ /*slots*/ slot_count,
+ /*children*/ 0);
+ }
+ constexpr static layout_type InternalLayout() {
+ return layout_type(/*parent*/ 1,
+ /*position, start, finish, max_count*/ 4,
+ /*slots*/ kNodeSlots,
+ /*children*/ kNodeSlots + 1);
+ }
+ constexpr static size_type LeafSize(const int slot_count = kNodeSlots) {
+ return LeafLayout(slot_count).AllocSize();
+ }
+ constexpr static size_type InternalSize() {
+ return InternalLayout().AllocSize();
+ }
+
+ // N is the index of the type in the Layout definition.
+ // ElementType<N> is the Nth type in the Layout definition.
+ template <size_type N>
+ inline typename layout_type::template ElementType<N> *GetField() {
+ // We assert that we don't read from values that aren't there.
+ assert(N < 3 || !leaf());
+ return InternalLayout().template Pointer<N>(reinterpret_cast<char *>(this));
+ }
+ template <size_type N>
+ inline const typename layout_type::template ElementType<N> *GetField() const {
+ assert(N < 3 || !leaf());
+ return InternalLayout().template Pointer<N>(
+ reinterpret_cast<const char *>(this));
+ }
+ void set_parent(btree_node *p) { *GetField<0>() = p; }
+ field_type &mutable_finish() { return GetField<1>()[2]; }
+ slot_type *slot(int i) { return &GetField<2>()[i]; }
+ slot_type *start_slot() { return slot(start()); }
+ slot_type *finish_slot() { return slot(finish()); }
+ const slot_type *slot(int i) const { return &GetField<2>()[i]; }
+ void set_position(field_type v) { GetField<1>()[0] = v; }
+ void set_start(field_type v) { GetField<1>()[1] = v; }
+ void set_finish(field_type v) { GetField<1>()[2] = v; }
+ // This method is only called by the node init methods.
+ void set_max_count(field_type v) { GetField<1>()[3] = v; }
+
+ public:
+ // Whether this is a leaf node or not. This value doesn't change after the
+ // node is created.
+ bool leaf() const { return GetField<1>()[3] != kInternalNodeMaxCount; }
+
+ // Getter for the position of this node in its parent.
+ field_type position() const { return GetField<1>()[0]; }
+
+ // Getter for the offset of the first value in the `values` array.
+ field_type start() const {
+ // TODO(ezb): when floating storage is implemented, return GetField<1>()[1];
+ assert(GetField<1>()[1] == 0);
+ return 0;
+ }
+
+ // Getter for the offset after the last value in the `values` array.
+ field_type finish() const { return GetField<1>()[2]; }
+
+ // Getters for the number of values stored in this node.
+ field_type count() const {
+ assert(finish() >= start());
+ return finish() - start();
+ }
+ field_type max_count() const {
+ // Internal nodes have max_count==kInternalNodeMaxCount.
+ // Leaf nodes have max_count in [1, kNodeSlots].
+ const field_type max_count = GetField<1>()[3];
+ return max_count == field_type{kInternalNodeMaxCount}
+ ? field_type{kNodeSlots}
+ : max_count;
+ }
+
+ // Getter for the parent of this node.
+ btree_node *parent() const { return *GetField<0>(); }
+ // Getter for whether the node is the root of the tree. The parent of the
+ // root of the tree is the leftmost node in the tree which is guaranteed to
+ // be a leaf.
+ bool is_root() const { return parent()->leaf(); }
+ void make_root() {
+ assert(parent()->is_root());
+ set_parent(parent()->parent());
+ }
+
+ // Getters for the key/value at position i in the node.
+ const key_type &key(int i) const { return params_type::key(slot(i)); }
+ reference value(int i) { return params_type::element(slot(i)); }
+ const_reference value(int i) const { return params_type::element(slot(i)); }
+
+ // Getters/setter for the child at position i in the node.
+ btree_node *child(int i) const { return GetField<3>()[i]; }
+ btree_node *start_child() const { return child(start()); }
+ btree_node *&mutable_child(int i) { return GetField<3>()[i]; }
+ void clear_child(int i) {
+ y_absl::container_internal::SanitizerPoisonObject(&mutable_child(i));
+ }
+ void set_child(int i, btree_node *c) {
+ y_absl::container_internal::SanitizerUnpoisonObject(&mutable_child(i));
+ mutable_child(i) = c;
+ c->set_position(i);
+ }
+ void init_child(int i, btree_node *c) {
+ set_child(i, c);
+ c->set_parent(this);
+ }
+
+ // Returns the position of the first value whose key is not less than k.
+ template <typename K>
+ SearchResult<int, is_key_compare_to::value> lower_bound(
+ const K &k, const key_compare &comp) const {
+ return use_linear_search::value ? linear_search(k, comp)
+ : binary_search(k, comp);
+ }
+ // Returns the position of the first value whose key is greater than k.
+ template <typename K>
+ int upper_bound(const K &k, const key_compare &comp) const {
+ auto upper_compare = upper_bound_adapter<key_compare>(comp);
+ return use_linear_search::value ? linear_search(k, upper_compare).value
+ : binary_search(k, upper_compare).value;
+ }
+
+ template <typename K, typename Compare>
+ SearchResult<int, btree_is_key_compare_to<Compare, key_type>::value>
+ linear_search(const K &k, const Compare &comp) const {
+ return linear_search_impl(k, start(), finish(), comp,
+ btree_is_key_compare_to<Compare, key_type>());
+ }
+
+ template <typename K, typename Compare>
+ SearchResult<int, btree_is_key_compare_to<Compare, key_type>::value>
+ binary_search(const K &k, const Compare &comp) const {
+ return binary_search_impl(k, start(), finish(), comp,
+ btree_is_key_compare_to<Compare, key_type>());
+ }
+
+ // Returns the position of the first value whose key is not less than k using
+ // linear search performed using plain compare.
+ template <typename K, typename Compare>
+ SearchResult<int, false> linear_search_impl(
+ const K &k, int s, const int e, const Compare &comp,
+ std::false_type /* IsCompareTo */) const {
+ while (s < e) {
+ if (!comp(key(s), k)) {
+ break;
+ }
+ ++s;
+ }
+ return SearchResult<int, false>{s};
+ }
+
+ // Returns the position of the first value whose key is not less than k using
+ // linear search performed using compare-to.
+ template <typename K, typename Compare>
+ SearchResult<int, true> linear_search_impl(
+ const K &k, int s, const int e, const Compare &comp,
+ std::true_type /* IsCompareTo */) const {
+ while (s < e) {
+ const y_absl::weak_ordering c = comp(key(s), k);
+ if (c == 0) {
+ return {s, MatchKind::kEq};
+ } else if (c > 0) {
+ break;
+ }
+ ++s;
+ }
+ return {s, MatchKind::kNe};
+ }
+
+ // Returns the position of the first value whose key is not less than k using
+ // binary search performed using plain compare.
+ template <typename K, typename Compare>
+ SearchResult<int, false> binary_search_impl(
+ const K &k, int s, int e, const Compare &comp,
+ std::false_type /* IsCompareTo */) const {
+ while (s != e) {
+ const int mid = (s + e) >> 1;
+ if (comp(key(mid), k)) {
+ s = mid + 1;
+ } else {
+ e = mid;
+ }
+ }
+ return SearchResult<int, false>{s};
+ }
+
+ // Returns the position of the first value whose key is not less than k using
+ // binary search performed using compare-to.
+ template <typename K, typename CompareTo>
+ SearchResult<int, true> binary_search_impl(
+ const K &k, int s, int e, const CompareTo &comp,
+ std::true_type /* IsCompareTo */) const {
+ if (params_type::template can_have_multiple_equivalent_keys<K>()) {
+ MatchKind exact_match = MatchKind::kNe;
+ while (s != e) {
+ const int mid = (s + e) >> 1;
+ const y_absl::weak_ordering c = comp(key(mid), k);
+ if (c < 0) {
+ s = mid + 1;
+ } else {
+ e = mid;
+ if (c == 0) {
+ // Need to return the first value whose key is not less than k,
+ // which requires continuing the binary search if there could be
+ // multiple equivalent keys.
+ exact_match = MatchKind::kEq;
+ }
+ }
+ }
+ return {s, exact_match};
+ } else { // Can't have multiple equivalent keys.
+ while (s != e) {
+ const int mid = (s + e) >> 1;
+ const y_absl::weak_ordering c = comp(key(mid), k);
+ if (c < 0) {
+ s = mid + 1;
+ } else if (c > 0) {
+ e = mid;
+ } else {
+ return {mid, MatchKind::kEq};
+ }
+ }
+ return {s, MatchKind::kNe};
+ }
+ }
+
+ // Emplaces a value at position i, shifting all existing values and
+ // children at positions >= i to the right by 1.
+ template <typename... Args>
+ void emplace_value(size_type i, allocator_type *alloc, Args &&... args);
+
+ // Removes the values at positions [i, i + to_erase), shifting all existing
+ // values and children after that range to the left by to_erase. Clears all
+ // children between [i, i + to_erase).
+ void remove_values(field_type i, field_type to_erase, allocator_type *alloc);
+
+ // Rebalances a node with its right sibling.
+ void rebalance_right_to_left(int to_move, btree_node *right,
+ allocator_type *alloc);
+ void rebalance_left_to_right(int to_move, btree_node *right,
+ allocator_type *alloc);
+
+ // Splits a node, moving a portion of the node's values to its right sibling.
+ void split(int insert_position, btree_node *dest, allocator_type *alloc);
+
+ // Merges a node with its right sibling, moving all of the values and the
+ // delimiting key in the parent node onto itself, and deleting the src node.
+ void merge(btree_node *src, allocator_type *alloc);
+
+ // Node allocation/deletion routines.
+ void init_leaf(btree_node *parent, int max_count) {
+ set_parent(parent);
+ set_position(0);
+ set_start(0);
+ set_finish(0);
+ set_max_count(max_count);
+ y_absl::container_internal::SanitizerPoisonMemoryRegion(
+ start_slot(), max_count * sizeof(slot_type));
+ }
+ void init_internal(btree_node *parent) {
+ init_leaf(parent, kNodeSlots);
+ // Set `max_count` to a sentinel value to indicate that this node is
+ // internal.
+ set_max_count(kInternalNodeMaxCount);
+ y_absl::container_internal::SanitizerPoisonMemoryRegion(
+ &mutable_child(start()), (kNodeSlots + 1) * sizeof(btree_node *));
+ }
+
+ static void deallocate(const size_type size, btree_node *node,
+ allocator_type *alloc) {
+ y_absl::container_internal::Deallocate<Alignment()>(alloc, node, size);
+ }
+
+ // Deletes a node and all of its children.
+ static void clear_and_delete(btree_node *node, allocator_type *alloc);
+
+ private:
+ template <typename... Args>
+ void value_init(const field_type i, allocator_type *alloc, Args &&... args) {
+ y_absl::container_internal::SanitizerUnpoisonObject(slot(i));
+ params_type::construct(alloc, slot(i), std::forward<Args>(args)...);
+ }
+ void value_destroy(const field_type i, allocator_type *alloc) {
+ params_type::destroy(alloc, slot(i));
+ y_absl::container_internal::SanitizerPoisonObject(slot(i));
+ }
+ void value_destroy_n(const field_type i, const field_type n,
+ allocator_type *alloc) {
+ for (slot_type *s = slot(i), *end = slot(i + n); s != end; ++s) {
+ params_type::destroy(alloc, s);
+ y_absl::container_internal::SanitizerPoisonObject(s);
+ }
+ }
+
+ static void transfer(slot_type *dest, slot_type *src, allocator_type *alloc) {
+ y_absl::container_internal::SanitizerUnpoisonObject(dest);
+ params_type::transfer(alloc, dest, src);
+ y_absl::container_internal::SanitizerPoisonObject(src);
+ }
+
+ // Transfers value from slot `src_i` in `src_node` to slot `dest_i` in `this`.
+ void transfer(const size_type dest_i, const size_type src_i,
+ btree_node *src_node, allocator_type *alloc) {
+ transfer(slot(dest_i), src_node->slot(src_i), alloc);
+ }
+
+ // Transfers `n` values starting at value `src_i` in `src_node` into the
+ // values starting at value `dest_i` in `this`.
+ void transfer_n(const size_type n, const size_type dest_i,
+ const size_type src_i, btree_node *src_node,
+ allocator_type *alloc) {
+ for (slot_type *src = src_node->slot(src_i), *end = src + n,
+ *dest = slot(dest_i);
+ src != end; ++src, ++dest) {
+ transfer(dest, src, alloc);
+ }
+ }
+
+ // Same as above, except that we start at the end and work our way to the
+ // beginning.
+ void transfer_n_backward(const size_type n, const size_type dest_i,
+ const size_type src_i, btree_node *src_node,
+ allocator_type *alloc) {
+ for (slot_type *src = src_node->slot(src_i + n - 1), *end = src - n,
+ *dest = slot(dest_i + n - 1);
+ src != end; --src, --dest) {
+ transfer(dest, src, alloc);
+ }
+ }
+
+ template <typename P>
+ friend class btree;
+ template <typename N, typename R, typename P>
+ friend struct btree_iterator;
+ friend class BtreeNodePeer;
+};
+
+template <typename Node, typename Reference, typename Pointer>
+struct btree_iterator {
+ private:
+ using key_type = typename Node::key_type;
+ using size_type = typename Node::size_type;
+ using params_type = typename Node::params_type;
+ using is_map_container = typename params_type::is_map_container;
+
+ using node_type = Node;
+ using normal_node = typename std::remove_const<Node>::type;
+ using const_node = const Node;
+ using normal_pointer = typename params_type::pointer;
+ using normal_reference = typename params_type::reference;
+ using const_pointer = typename params_type::const_pointer;
+ using const_reference = typename params_type::const_reference;
+ using slot_type = typename params_type::slot_type;
+
+ using iterator =
+ btree_iterator<normal_node, normal_reference, normal_pointer>;
+ using const_iterator =
+ btree_iterator<const_node, const_reference, const_pointer>;
+
+ public:
+ // These aliases are public for std::iterator_traits.
+ using difference_type = typename Node::difference_type;
+ using value_type = typename params_type::value_type;
+ using pointer = Pointer;
+ using reference = Reference;
+ using iterator_category = std::bidirectional_iterator_tag;
+
+ btree_iterator() : node(nullptr), position(-1) {}
+ explicit btree_iterator(Node *n) : node(n), position(n->start()) {}
+ btree_iterator(Node *n, int p) : node(n), position(p) {}
+
+ // NOTE: this SFINAE allows for implicit conversions from iterator to
+ // const_iterator, but it specifically avoids hiding the copy constructor so
+ // that the trivial one will be used when possible.
+ template <typename N, typename R, typename P,
+ y_absl::enable_if_t<
+ std::is_same<btree_iterator<N, R, P>, iterator>::value &&
+ std::is_same<btree_iterator, const_iterator>::value,
+ int> = 0>
+ btree_iterator(const btree_iterator<N, R, P> other) // NOLINT
+ : node(other.node), position(other.position) {}
+
+ private:
+ // This SFINAE allows explicit conversions from const_iterator to
+ // iterator, but also avoids hiding the copy constructor.
+ // NOTE: the const_cast is safe because this constructor is only called by
+ // non-const methods and the container owns the nodes.
+ template <typename N, typename R, typename P,
+ y_absl::enable_if_t<
+ std::is_same<btree_iterator<N, R, P>, const_iterator>::value &&
+ std::is_same<btree_iterator, iterator>::value,
+ int> = 0>
+ explicit btree_iterator(const btree_iterator<N, R, P> other)
+ : node(const_cast<node_type *>(other.node)), position(other.position) {}
+
+ // Increment/decrement the iterator.
+ void increment() {
+ if (node->leaf() && ++position < node->finish()) {
+ return;
+ }
+ increment_slow();
+ }
+ void increment_slow();
+
+ void decrement() {
+ if (node->leaf() && --position >= node->start()) {
+ return;
+ }
+ decrement_slow();
+ }
+ void decrement_slow();
+
+ public:
+ bool operator==(const iterator &other) const {
+ return node == other.node && position == other.position;
+ }
+ bool operator==(const const_iterator &other) const {
+ return node == other.node && position == other.position;
+ }
+ bool operator!=(const iterator &other) const {
+ return node != other.node || position != other.position;
+ }
+ bool operator!=(const const_iterator &other) const {
+ return node != other.node || position != other.position;
+ }
+
+ // Accessors for the key/value the iterator is pointing at.
+ reference operator*() const {
+ ABSL_HARDENING_ASSERT(node != nullptr);
+ ABSL_HARDENING_ASSERT(node->start() <= position);
+ ABSL_HARDENING_ASSERT(node->finish() > position);
+ return node->value(position);
+ }
+ pointer operator->() const { return &operator*(); }
+
+ btree_iterator &operator++() {
+ increment();
+ return *this;
+ }
+ btree_iterator &operator--() {
+ decrement();
+ return *this;
+ }
+ btree_iterator operator++(int) {
+ btree_iterator tmp = *this;
+ ++*this;
+ return tmp;
+ }
+ btree_iterator operator--(int) {
+ btree_iterator tmp = *this;
+ --*this;
+ return tmp;
+ }
+
+ private:
+ friend iterator;
+ friend const_iterator;
+ template <typename Params>
+ friend class btree;
+ template <typename Tree>
+ friend class btree_container;
+ template <typename Tree>
+ friend class btree_set_container;
+ template <typename Tree>
+ friend class btree_map_container;
+ template <typename Tree>
+ friend class btree_multiset_container;
+ template <typename TreeType, typename CheckerType>
+ friend class base_checker;
+
+ const key_type &key() const { return node->key(position); }
+ slot_type *slot() { return node->slot(position); }
+
+ // The node in the tree the iterator is pointing at.
+ Node *node;
+ // The position within the node of the tree the iterator is pointing at.
+ // NOTE: this is an int rather than a field_type because iterators can point
+ // to invalid positions (such as -1) in certain circumstances.
+ int position;
+};
+
+template <typename Params>
+class btree {
+ using node_type = btree_node<Params>;
+ using is_key_compare_to = typename Params::is_key_compare_to;
+ using init_type = typename Params::init_type;
+ using field_type = typename node_type::field_type;
+
+ // We use a static empty node for the root/leftmost/rightmost of empty btrees
+ // in order to avoid branching in begin()/end().
+ struct alignas(node_type::Alignment()) EmptyNodeType : node_type {
+ using field_type = typename node_type::field_type;
+ node_type *parent;
+ field_type position = 0;
+ field_type start = 0;
+ field_type finish = 0;
+ // max_count must be != kInternalNodeMaxCount (so that this node is regarded
+ // as a leaf node). max_count() is never called when the tree is empty.
+ field_type max_count = node_type::kInternalNodeMaxCount + 1;
+
+#ifdef _MSC_VER
+ // MSVC has constexpr code generations bugs here.
+ EmptyNodeType() : parent(this) {}
+#else
+ constexpr EmptyNodeType(node_type *p) : parent(p) {}
+#endif
+ };
+
+ static node_type *EmptyNode() {
+#ifdef _MSC_VER
+ static EmptyNodeType *empty_node = new EmptyNodeType;
+ // This assert fails on some other construction methods.
+ assert(empty_node->parent == empty_node);
+ return empty_node;
+#else
+ static constexpr EmptyNodeType empty_node(
+ const_cast<EmptyNodeType *>(&empty_node));
+ return const_cast<EmptyNodeType *>(&empty_node);
+#endif
+ }
+
+ enum : uint32_t {
+ kNodeSlots = node_type::kNodeSlots,
+ kMinNodeValues = kNodeSlots / 2,
+ };
+
+ struct node_stats {
+ using size_type = typename Params::size_type;
+
+ node_stats(size_type l, size_type i) : leaf_nodes(l), internal_nodes(i) {}
+
+ node_stats &operator+=(const node_stats &other) {
+ leaf_nodes += other.leaf_nodes;
+ internal_nodes += other.internal_nodes;
+ return *this;
+ }
+
+ size_type leaf_nodes;
+ size_type internal_nodes;
+ };
+
+ public:
+ using key_type = typename Params::key_type;
+ using value_type = typename Params::value_type;
+ using size_type = typename Params::size_type;
+ using difference_type = typename Params::difference_type;
+ using key_compare = typename Params::key_compare;
+ using original_key_compare = typename Params::original_key_compare;
+ using value_compare = typename Params::value_compare;
+ using allocator_type = typename Params::allocator_type;
+ using reference = typename Params::reference;
+ using const_reference = typename Params::const_reference;
+ using pointer = typename Params::pointer;
+ using const_pointer = typename Params::const_pointer;
+ using iterator =
+ typename btree_iterator<node_type, reference, pointer>::iterator;
+ using const_iterator = typename iterator::const_iterator;
+ using reverse_iterator = std::reverse_iterator<iterator>;
+ using const_reverse_iterator = std::reverse_iterator<const_iterator>;
+ using node_handle_type = node_handle<Params, Params, allocator_type>;
+
+ // Internal types made public for use by btree_container types.
+ using params_type = Params;
+ using slot_type = typename Params::slot_type;
+
+ private:
+ // For use in copy_or_move_values_in_order.
+ const value_type &maybe_move_from_iterator(const_iterator it) { return *it; }
+ value_type &&maybe_move_from_iterator(iterator it) {
+ // This is a destructive operation on the other container so it's safe for
+ // us to const_cast and move from the keys here even if it's a set.
+ return std::move(const_cast<value_type &>(*it));
+ }
+
+ // Copies or moves (depending on the template parameter) the values in
+ // other into this btree in their order in other. This btree must be empty
+ // before this method is called. This method is used in copy construction,
+ // copy assignment, and move assignment.
+ template <typename Btree>
+ void copy_or_move_values_in_order(Btree &other);
+
+ // Validates that various assumptions/requirements are true at compile time.
+ constexpr static bool static_assert_validation();
+
+ public:
+ btree(const key_compare &comp, const allocator_type &alloc)
+ : root_(comp, alloc, EmptyNode()), rightmost_(EmptyNode()), size_(0) {}
+
+ btree(const btree &other) : btree(other, other.allocator()) {}
+ btree(const btree &other, const allocator_type &alloc)
+ : btree(other.key_comp(), alloc) {
+ copy_or_move_values_in_order(other);
+ }
+ btree(btree &&other) noexcept
+ : root_(std::move(other.root_)),
+ rightmost_(y_absl::exchange(other.rightmost_, EmptyNode())),
+ size_(y_absl::exchange(other.size_, 0)) {
+ other.mutable_root() = EmptyNode();
+ }
+ btree(btree &&other, const allocator_type &alloc)
+ : btree(other.key_comp(), alloc) {
+ if (alloc == other.allocator()) {
+ swap(other);
+ } else {
+ // Move values from `other` one at a time when allocators are different.
+ copy_or_move_values_in_order(other);
+ }
+ }
+
+ ~btree() {
+ // Put static_asserts in destructor to avoid triggering them before the type
+ // is complete.
+ static_assert(static_assert_validation(), "This call must be elided.");
+ clear();
+ }
+
+ // Assign the contents of other to *this.
+ btree &operator=(const btree &other);
+ btree &operator=(btree &&other) noexcept;
+
+ iterator begin() { return iterator(leftmost()); }
+ const_iterator begin() const { return const_iterator(leftmost()); }
+ iterator end() { return iterator(rightmost_, rightmost_->finish()); }
+ const_iterator end() const {
+ return const_iterator(rightmost_, rightmost_->finish());
+ }
+ reverse_iterator rbegin() { return reverse_iterator(end()); }
+ const_reverse_iterator rbegin() const {
+ return const_reverse_iterator(end());
+ }
+ reverse_iterator rend() { return reverse_iterator(begin()); }
+ const_reverse_iterator rend() const {
+ return const_reverse_iterator(begin());
+ }
+
+ // Finds the first element whose key is not less than `key`.
+ template <typename K>
+ iterator lower_bound(const K &key) {
+ return internal_end(internal_lower_bound(key).value);
+ }
+ template <typename K>
+ const_iterator lower_bound(const K &key) const {
+ return internal_end(internal_lower_bound(key).value);
+ }
+
+ // Finds the first element whose key is not less than `key` and also returns
+ // whether that element is equal to `key`.
+ template <typename K>
+ std::pair<iterator, bool> lower_bound_equal(const K &key) const;
+
+ // Finds the first element whose key is greater than `key`.
+ template <typename K>
+ iterator upper_bound(const K &key) {
+ return internal_end(internal_upper_bound(key));
+ }
+ template <typename K>
+ const_iterator upper_bound(const K &key) const {
+ return internal_end(internal_upper_bound(key));
+ }
+
+ // Finds the range of values which compare equal to key. The first member of
+ // the returned pair is equal to lower_bound(key). The second member of the
+ // pair is equal to upper_bound(key).
+ template <typename K>
+ std::pair<iterator, iterator> equal_range(const K &key);
+ template <typename K>
+ std::pair<const_iterator, const_iterator> equal_range(const K &key) const {
+ return const_cast<btree *>(this)->equal_range(key);
+ }
+
+ // Inserts a value into the btree only if it does not already exist. The
+ // boolean return value indicates whether insertion succeeded or failed.
+ // Requirement: if `key` already exists in the btree, does not consume `args`.
+ // Requirement: `key` is never referenced after consuming `args`.
+ template <typename K, typename... Args>
+ std::pair<iterator, bool> insert_unique(const K &key, Args &&... args);
+
+ // Inserts with hint. Checks to see if the value should be placed immediately
+ // before `position` in the tree. If so, then the insertion will take
+ // amortized constant time. If not, the insertion will take amortized
+ // logarithmic time as if a call to insert_unique() were made.
+ // Requirement: if `key` already exists in the btree, does not consume `args`.
+ // Requirement: `key` is never referenced after consuming `args`.
+ template <typename K, typename... Args>
+ std::pair<iterator, bool> insert_hint_unique(iterator position,
+ const K &key,
+ Args &&... args);
+
+ // Insert a range of values into the btree.
+ // Note: the first overload avoids constructing a value_type if the key
+ // already exists in the btree.
+ template <typename InputIterator,
+ typename = decltype(std::declval<const key_compare &>()(
+ params_type::key(*std::declval<InputIterator>()),
+ std::declval<const key_type &>()))>
+ void insert_iterator_unique(InputIterator b, InputIterator e, int);
+ // We need the second overload for cases in which we need to construct a
+ // value_type in order to compare it with the keys already in the btree.
+ template <typename InputIterator>
+ void insert_iterator_unique(InputIterator b, InputIterator e, char);
+
+ // Inserts a value into the btree.
+ template <typename ValueType>
+ iterator insert_multi(const key_type &key, ValueType &&v);
+
+ // Inserts a value into the btree.
+ template <typename ValueType>
+ iterator insert_multi(ValueType &&v) {
+ return insert_multi(params_type::key(v), std::forward<ValueType>(v));
+ }
+
+ // Insert with hint. Check to see if the value should be placed immediately
+ // before position in the tree. If it does, then the insertion will take
+ // amortized constant time. If not, the insertion will take amortized
+ // logarithmic time as if a call to insert_multi(v) were made.
+ template <typename ValueType>
+ iterator insert_hint_multi(iterator position, ValueType &&v);
+
+ // Insert a range of values into the btree.
+ template <typename InputIterator>
+ void insert_iterator_multi(InputIterator b, InputIterator e);
+
+ // Erase the specified iterator from the btree. The iterator must be valid
+ // (i.e. not equal to end()). Return an iterator pointing to the node after
+ // the one that was erased (or end() if none exists).
+ // Requirement: does not read the value at `*iter`.
+ iterator erase(iterator iter);
+
+ // Erases range. Returns the number of keys erased and an iterator pointing
+ // to the element after the last erased element.
+ std::pair<size_type, iterator> erase_range(iterator begin, iterator end);
+
+ // Finds an element with key equivalent to `key` or returns `end()` if `key`
+ // is not present.
+ template <typename K>
+ iterator find(const K &key) {
+ return internal_end(internal_find(key));
+ }
+ template <typename K>
+ const_iterator find(const K &key) const {
+ return internal_end(internal_find(key));
+ }
+
+ // Clear the btree, deleting all of the values it contains.
+ void clear();
+
+ // Swaps the contents of `this` and `other`.
+ void swap(btree &other);
+
+ const key_compare &key_comp() const noexcept {
+ return root_.template get<0>();
+ }
+ template <typename K1, typename K2>
+ bool compare_keys(const K1 &a, const K2 &b) const {
+ return compare_internal::compare_result_as_less_than(key_comp()(a, b));
+ }
+
+ value_compare value_comp() const {
+ return value_compare(original_key_compare(key_comp()));
+ }
+
+ // Verifies the structure of the btree.
+ void verify() const;
+
+ // Size routines.
+ size_type size() const { return size_; }
+ size_type max_size() const { return (std::numeric_limits<size_type>::max)(); }
+ bool empty() const { return size_ == 0; }
+
+ // The height of the btree. An empty tree will have height 0.
+ size_type height() const {
+ size_type h = 0;
+ if (!empty()) {
+ // Count the length of the chain from the leftmost node up to the
+ // root. We actually count from the root back around to the level below
+ // the root, but the calculation is the same because of the circularity
+ // of that traversal.
+ const node_type *n = root();
+ do {
+ ++h;
+ n = n->parent();
+ } while (n != root());
+ }
+ return h;
+ }
+
+ // The number of internal, leaf and total nodes used by the btree.
+ size_type leaf_nodes() const { return internal_stats(root()).leaf_nodes; }
+ size_type internal_nodes() const {
+ return internal_stats(root()).internal_nodes;
+ }
+ size_type nodes() const {
+ node_stats stats = internal_stats(root());
+ return stats.leaf_nodes + stats.internal_nodes;
+ }
+
+ // The total number of bytes used by the btree.
+ size_type bytes_used() const {
+ node_stats stats = internal_stats(root());
+ if (stats.leaf_nodes == 1 && stats.internal_nodes == 0) {
+ return sizeof(*this) + node_type::LeafSize(root()->max_count());
+ } else {
+ return sizeof(*this) + stats.leaf_nodes * node_type::LeafSize() +
+ stats.internal_nodes * node_type::InternalSize();
+ }
+ }
+
+ // The average number of bytes used per value stored in the btree assuming
+ // random insertion order.
+ static double average_bytes_per_value() {
+ // The expected number of values per node with random insertion order is the
+ // average of the maximum and minimum numbers of values per node.
+ const double expected_values_per_node =
+ (kNodeSlots + kMinNodeValues) / 2.0;
+ return node_type::LeafSize() / expected_values_per_node;
+ }
+
+ // The fullness of the btree. Computed as the number of elements in the btree
+ // divided by the maximum number of elements a tree with the current number
+ // of nodes could hold. A value of 1 indicates perfect space
+ // utilization. Smaller values indicate space wastage.
+ // Returns 0 for empty trees.
+ double fullness() const {
+ if (empty()) return 0.0;
+ return static_cast<double>(size()) / (nodes() * kNodeSlots);
+ }
+ // The overhead of the btree structure in bytes per node. Computed as the
+ // total number of bytes used by the btree minus the number of bytes used for
+ // storing elements divided by the number of elements.
+ // Returns 0 for empty trees.
+ double overhead() const {
+ if (empty()) return 0.0;
+ return (bytes_used() - size() * sizeof(value_type)) /
+ static_cast<double>(size());
+ }
+
+ // The allocator used by the btree.
+ allocator_type get_allocator() const { return allocator(); }
+
+ private:
+ // Internal accessor routines.
+ node_type *root() { return root_.template get<2>(); }
+ const node_type *root() const { return root_.template get<2>(); }
+ node_type *&mutable_root() noexcept { return root_.template get<2>(); }
+ key_compare *mutable_key_comp() noexcept { return &root_.template get<0>(); }
+
+ // The leftmost node is stored as the parent of the root node.
+ node_type *leftmost() { return root()->parent(); }
+ const node_type *leftmost() const { return root()->parent(); }
+
+ // Allocator routines.
+ allocator_type *mutable_allocator() noexcept {
+ return &root_.template get<1>();
+ }
+ const allocator_type &allocator() const noexcept {
+ return root_.template get<1>();
+ }
+
+ // Allocates a correctly aligned node of at least size bytes using the
+ // allocator.
+ node_type *allocate(const size_type size) {
+ return reinterpret_cast<node_type *>(
+ y_absl::container_internal::Allocate<node_type::Alignment()>(
+ mutable_allocator(), size));
+ }
+
+ // Node creation/deletion routines.
+ node_type *new_internal_node(node_type *parent) {
+ node_type *n = allocate(node_type::InternalSize());
+ n->init_internal(parent);
+ return n;
+ }
+ node_type *new_leaf_node(node_type *parent) {
+ node_type *n = allocate(node_type::LeafSize());
+ n->init_leaf(parent, kNodeSlots);
+ return n;
+ }
+ node_type *new_leaf_root_node(const int max_count) {
+ node_type *n = allocate(node_type::LeafSize(max_count));
+ n->init_leaf(/*parent=*/n, max_count);
+ return n;
+ }
+
+ // Deletion helper routines.
+ iterator rebalance_after_delete(iterator iter);
+
+ // Rebalances or splits the node iter points to.
+ void rebalance_or_split(iterator *iter);
+
+ // Merges the values of left, right and the delimiting key on their parent
+ // onto left, removing the delimiting key and deleting right.
+ void merge_nodes(node_type *left, node_type *right);
+
+ // Tries to merge node with its left or right sibling, and failing that,
+ // rebalance with its left or right sibling. Returns true if a merge
+ // occurred, at which point it is no longer valid to access node. Returns
+ // false if no merging took place.
+ bool try_merge_or_rebalance(iterator *iter);
+
+ // Tries to shrink the height of the tree by 1.
+ void try_shrink();
+
+ iterator internal_end(iterator iter) {
+ return iter.node != nullptr ? iter : end();
+ }
+ const_iterator internal_end(const_iterator iter) const {
+ return iter.node != nullptr ? iter : end();
+ }
+
+ // Emplaces a value into the btree immediately before iter. Requires that
+ // key(v) <= iter.key() and (--iter).key() <= key(v).
+ template <typename... Args>
+ iterator internal_emplace(iterator iter, Args &&... args);
+
+ // Returns an iterator pointing to the first value >= the value "iter" is
+ // pointing at. Note that "iter" might be pointing to an invalid location such
+ // as iter.position == iter.node->finish(). This routine simply moves iter up
+ // in the tree to a valid location.
+ // Requires: iter.node is non-null.
+ template <typename IterType>
+ static IterType internal_last(IterType iter);
+
+ // Returns an iterator pointing to the leaf position at which key would
+ // reside in the tree, unless there is an exact match - in which case, the
+ // result may not be on a leaf. When there's a three-way comparator, we can
+ // return whether there was an exact match. This allows the caller to avoid a
+ // subsequent comparison to determine if an exact match was made, which is
+ // important for keys with expensive comparison, such as strings.
+ template <typename K>
+ SearchResult<iterator, is_key_compare_to::value> internal_locate(
+ const K &key) const;
+
+ // Internal routine which implements lower_bound().
+ template <typename K>
+ SearchResult<iterator, is_key_compare_to::value> internal_lower_bound(
+ const K &key) const;
+
+ // Internal routine which implements upper_bound().
+ template <typename K>
+ iterator internal_upper_bound(const K &key) const;
+
+ // Internal routine which implements find().
+ template <typename K>
+ iterator internal_find(const K &key) const;
+
+ // Verifies the tree structure of node.
+ int internal_verify(const node_type *node, const key_type *lo,
+ const key_type *hi) const;
+
+ node_stats internal_stats(const node_type *node) const {
+ // The root can be a static empty node.
+ if (node == nullptr || (node == root() && empty())) {
+ return node_stats(0, 0);
+ }
+ if (node->leaf()) {
+ return node_stats(1, 0);
+ }
+ node_stats res(0, 1);
+ for (int i = node->start(); i <= node->finish(); ++i) {
+ res += internal_stats(node->child(i));
+ }
+ return res;
+ }
+
+ // We use compressed tuple in order to save space because key_compare and
+ // allocator_type are usually empty.
+ y_absl::container_internal::CompressedTuple<key_compare, allocator_type,
+ node_type *>
+ root_;
+
+ // A pointer to the rightmost node. Note that the leftmost node is stored as
+ // the root's parent.
+ node_type *rightmost_;
+
+ // Number of values.
+ size_type size_;
+};
+
+////
+// btree_node methods
+template <typename P>
+template <typename... Args>
+inline void btree_node<P>::emplace_value(const size_type i,
+ allocator_type *alloc,
+ Args &&... args) {
+ assert(i >= start());
+ assert(i <= finish());
+ // Shift old values to create space for new value and then construct it in
+ // place.
+ if (i < finish()) {
+ transfer_n_backward(finish() - i, /*dest_i=*/i + 1, /*src_i=*/i, this,
+ alloc);
+ }
+ value_init(i, alloc, std::forward<Args>(args)...);
+ set_finish(finish() + 1);
+
+ if (!leaf() && finish() > i + 1) {
+ for (int j = finish(); j > i + 1; --j) {
+ set_child(j, child(j - 1));
+ }
+ clear_child(i + 1);
+ }
+}
+
+template <typename P>
+inline void btree_node<P>::remove_values(const field_type i,
+ const field_type to_erase,
+ allocator_type *alloc) {
+ // Transfer values after the removed range into their new places.
+ value_destroy_n(i, to_erase, alloc);
+ const field_type orig_finish = finish();
+ const field_type src_i = i + to_erase;
+ transfer_n(orig_finish - src_i, i, src_i, this, alloc);
+
+ if (!leaf()) {
+ // Delete all children between begin and end.
+ for (int j = 0; j < to_erase; ++j) {
+ clear_and_delete(child(i + j + 1), alloc);
+ }
+ // Rotate children after end into new positions.
+ for (int j = i + to_erase + 1; j <= orig_finish; ++j) {
+ set_child(j - to_erase, child(j));
+ clear_child(j);
+ }
+ }
+ set_finish(orig_finish - to_erase);
+}
+
+template <typename P>
+void btree_node<P>::rebalance_right_to_left(const int to_move,
+ btree_node *right,
+ allocator_type *alloc) {
+ assert(parent() == right->parent());
+ assert(position() + 1 == right->position());
+ assert(right->count() >= count());
+ assert(to_move >= 1);
+ assert(to_move <= right->count());
+
+ // 1) Move the delimiting value in the parent to the left node.
+ transfer(finish(), position(), parent(), alloc);
+
+ // 2) Move the (to_move - 1) values from the right node to the left node.
+ transfer_n(to_move - 1, finish() + 1, right->start(), right, alloc);
+
+ // 3) Move the new delimiting value to the parent from the right node.
+ parent()->transfer(position(), right->start() + to_move - 1, right, alloc);
+
+ // 4) Shift the values in the right node to their correct positions.
+ right->transfer_n(right->count() - to_move, right->start(),
+ right->start() + to_move, right, alloc);
+
+ if (!leaf()) {
+ // Move the child pointers from the right to the left node.
+ for (int i = 0; i < to_move; ++i) {
+ init_child(finish() + i + 1, right->child(i));
+ }
+ for (int i = right->start(); i <= right->finish() - to_move; ++i) {
+ assert(i + to_move <= right->max_count());
+ right->init_child(i, right->child(i + to_move));
+ right->clear_child(i + to_move);
+ }
+ }
+
+ // Fixup `finish` on the left and right nodes.
+ set_finish(finish() + to_move);
+ right->set_finish(right->finish() - to_move);
+}
+
+template <typename P>
+void btree_node<P>::rebalance_left_to_right(const int to_move,
+ btree_node *right,
+ allocator_type *alloc) {
+ assert(parent() == right->parent());
+ assert(position() + 1 == right->position());
+ assert(count() >= right->count());
+ assert(to_move >= 1);
+ assert(to_move <= count());
+
+ // Values in the right node are shifted to the right to make room for the
+ // new to_move values. Then, the delimiting value in the parent and the
+ // other (to_move - 1) values in the left node are moved into the right node.
+ // Lastly, a new delimiting value is moved from the left node into the
+ // parent, and the remaining empty left node entries are destroyed.
+
+ // 1) Shift existing values in the right node to their correct positions.
+ right->transfer_n_backward(right->count(), right->start() + to_move,
+ right->start(), right, alloc);
+
+ // 2) Move the delimiting value in the parent to the right node.
+ right->transfer(right->start() + to_move - 1, position(), parent(), alloc);
+
+ // 3) Move the (to_move - 1) values from the left node to the right node.
+ right->transfer_n(to_move - 1, right->start(), finish() - (to_move - 1), this,
+ alloc);
+
+ // 4) Move the new delimiting value to the parent from the left node.
+ parent()->transfer(position(), finish() - to_move, this, alloc);
+
+ if (!leaf()) {
+ // Move the child pointers from the left to the right node.
+ for (int i = right->finish(); i >= right->start(); --i) {
+ right->init_child(i + to_move, right->child(i));
+ right->clear_child(i);
+ }
+ for (int i = 1; i <= to_move; ++i) {
+ right->init_child(i - 1, child(finish() - to_move + i));
+ clear_child(finish() - to_move + i);
+ }
+ }
+
+ // Fixup the counts on the left and right nodes.
+ set_finish(finish() - to_move);
+ right->set_finish(right->finish() + to_move);
+}
+
+template <typename P>
+void btree_node<P>::split(const int insert_position, btree_node *dest,
+ allocator_type *alloc) {
+ assert(dest->count() == 0);
+ assert(max_count() == kNodeSlots);
+
+ // We bias the split based on the position being inserted. If we're
+ // inserting at the beginning of the left node then bias the split to put
+ // more values on the right node. If we're inserting at the end of the
+ // right node then bias the split to put more values on the left node.
+ if (insert_position == start()) {
+ dest->set_finish(dest->start() + finish() - 1);
+ } else if (insert_position == kNodeSlots) {
+ dest->set_finish(dest->start());
+ } else {
+ dest->set_finish(dest->start() + count() / 2);
+ }
+ set_finish(finish() - dest->count());
+ assert(count() >= 1);
+
+ // Move values from the left sibling to the right sibling.
+ dest->transfer_n(dest->count(), dest->start(), finish(), this, alloc);
+
+ // The split key is the largest value in the left sibling.
+ --mutable_finish();
+ parent()->emplace_value(position(), alloc, finish_slot());
+ value_destroy(finish(), alloc);
+ parent()->init_child(position() + 1, dest);
+
+ if (!leaf()) {
+ for (int i = dest->start(), j = finish() + 1; i <= dest->finish();
+ ++i, ++j) {
+ assert(child(j) != nullptr);
+ dest->init_child(i, child(j));
+ clear_child(j);
+ }
+ }
+}
+
+template <typename P>
+void btree_node<P>::merge(btree_node *src, allocator_type *alloc) {
+ assert(parent() == src->parent());
+ assert(position() + 1 == src->position());
+
+ // Move the delimiting value to the left node.
+ value_init(finish(), alloc, parent()->slot(position()));
+
+ // Move the values from the right to the left node.
+ transfer_n(src->count(), finish() + 1, src->start(), src, alloc);
+
+ if (!leaf()) {
+ // Move the child pointers from the right to the left node.
+ for (int i = src->start(), j = finish() + 1; i <= src->finish(); ++i, ++j) {
+ init_child(j, src->child(i));
+ src->clear_child(i);
+ }
+ }
+
+ // Fixup `finish` on the src and dest nodes.
+ set_finish(start() + 1 + count() + src->count());
+ src->set_finish(src->start());
+
+ // Remove the value on the parent node and delete the src node.
+ parent()->remove_values(position(), /*to_erase=*/1, alloc);
+}
+
+template <typename P>
+void btree_node<P>::clear_and_delete(btree_node *node, allocator_type *alloc) {
+ if (node->leaf()) {
+ node->value_destroy_n(node->start(), node->count(), alloc);
+ deallocate(LeafSize(node->max_count()), node, alloc);
+ return;
+ }
+ if (node->count() == 0) {
+ deallocate(InternalSize(), node, alloc);
+ return;
+ }
+
+ // The parent of the root of the subtree we are deleting.
+ btree_node *delete_root_parent = node->parent();
+
+ // Navigate to the leftmost leaf under node, and then delete upwards.
+ while (!node->leaf()) node = node->start_child();
+ // Use `int` because `pos` needs to be able to hold `kNodeSlots+1`, which
+ // isn't guaranteed to be a valid `field_type`.
+ int pos = node->position();
+ btree_node *parent = node->parent();
+ for (;;) {
+ // In each iteration of the next loop, we delete one leaf node and go right.
+ assert(pos <= parent->finish());
+ do {
+ node = parent->child(pos);
+ if (!node->leaf()) {
+ // Navigate to the leftmost leaf under node.
+ while (!node->leaf()) node = node->start_child();
+ pos = node->position();
+ parent = node->parent();
+ }
+ node->value_destroy_n(node->start(), node->count(), alloc);
+ deallocate(LeafSize(node->max_count()), node, alloc);
+ ++pos;
+ } while (pos <= parent->finish());
+
+ // Once we've deleted all children of parent, delete parent and go up/right.
+ assert(pos > parent->finish());
+ do {
+ node = parent;
+ pos = node->position();
+ parent = node->parent();
+ node->value_destroy_n(node->start(), node->count(), alloc);
+ deallocate(InternalSize(), node, alloc);
+ if (parent == delete_root_parent) return;
+ ++pos;
+ } while (pos > parent->finish());
+ }
+}
+
+////
+// btree_iterator methods
+template <typename N, typename R, typename P>
+void btree_iterator<N, R, P>::increment_slow() {
+ if (node->leaf()) {
+ assert(position >= node->finish());
+ btree_iterator save(*this);
+ while (position == node->finish() && !node->is_root()) {
+ assert(node->parent()->child(node->position()) == node);
+ position = node->position();
+ node = node->parent();
+ }
+ // TODO(ezb): assert we aren't incrementing end() instead of handling.
+ if (position == node->finish()) {
+ *this = save;
+ }
+ } else {
+ assert(position < node->finish());
+ node = node->child(position + 1);
+ while (!node->leaf()) {
+ node = node->start_child();
+ }
+ position = node->start();
+ }
+}
+
+template <typename N, typename R, typename P>
+void btree_iterator<N, R, P>::decrement_slow() {
+ if (node->leaf()) {
+ assert(position <= -1);
+ btree_iterator save(*this);
+ while (position < node->start() && !node->is_root()) {
+ assert(node->parent()->child(node->position()) == node);
+ position = node->position() - 1;
+ node = node->parent();
+ }
+ // TODO(ezb): assert we aren't decrementing begin() instead of handling.
+ if (position < node->start()) {
+ *this = save;
+ }
+ } else {
+ assert(position >= node->start());
+ node = node->child(position);
+ while (!node->leaf()) {
+ node = node->child(node->finish());
+ }
+ position = node->finish() - 1;
+ }
+}
+
+////
+// btree methods
+template <typename P>
+template <typename Btree>
+void btree<P>::copy_or_move_values_in_order(Btree &other) {
+ static_assert(std::is_same<btree, Btree>::value ||
+ std::is_same<const btree, Btree>::value,
+ "Btree type must be same or const.");
+ assert(empty());
+
+ // We can avoid key comparisons because we know the order of the
+ // values is the same order we'll store them in.
+ auto iter = other.begin();
+ if (iter == other.end()) return;
+ insert_multi(maybe_move_from_iterator(iter));
+ ++iter;
+ for (; iter != other.end(); ++iter) {
+ // If the btree is not empty, we can just insert the new value at the end
+ // of the tree.
+ internal_emplace(end(), maybe_move_from_iterator(iter));
+ }
+}
+
+template <typename P>
+constexpr bool btree<P>::static_assert_validation() {
+ static_assert(std::is_nothrow_copy_constructible<key_compare>::value,
+ "Key comparison must be nothrow copy constructible");
+ static_assert(std::is_nothrow_copy_constructible<allocator_type>::value,
+ "Allocator must be nothrow copy constructible");
+ static_assert(type_traits_internal::is_trivially_copyable<iterator>::value,
+ "iterator not trivially copyable.");
+
+ // Note: We assert that kTargetValues, which is computed from
+ // Params::kTargetNodeSize, must fit the node_type::field_type.
+ static_assert(
+ kNodeSlots < (1 << (8 * sizeof(typename node_type::field_type))),
+ "target node size too large");
+
+ // Verify that key_compare returns an y_absl::{weak,strong}_ordering or bool.
+ using compare_result_type =
+ y_absl::result_of_t<key_compare(key_type, key_type)>;
+ static_assert(
+ std::is_same<compare_result_type, bool>::value ||
+ std::is_convertible<compare_result_type, y_absl::weak_ordering>::value,
+ "key comparison function must return y_absl::{weak,strong}_ordering or "
+ "bool.");
+
+ // Test the assumption made in setting kNodeValueSpace.
+ static_assert(node_type::MinimumOverhead() >= sizeof(void *) + 4,
+ "node space assumption incorrect");
+
+ return true;
+}
+
+template <typename P>
+template <typename K>
+auto btree<P>::lower_bound_equal(const K &key) const
+ -> std::pair<iterator, bool> {
+ const SearchResult<iterator, is_key_compare_to::value> res =
+ internal_lower_bound(key);
+ const iterator lower = iterator(internal_end(res.value));
+ const bool equal = res.HasMatch()
+ ? res.IsEq()
+ : lower != end() && !compare_keys(key, lower.key());
+ return {lower, equal};
+}
+
+template <typename P>
+template <typename K>
+auto btree<P>::equal_range(const K &key) -> std::pair<iterator, iterator> {
+ const std::pair<iterator, bool> lower_and_equal = lower_bound_equal(key);
+ const iterator lower = lower_and_equal.first;
+ if (!lower_and_equal.second) {
+ return {lower, lower};
+ }
+
+ const iterator next = std::next(lower);
+ if (!params_type::template can_have_multiple_equivalent_keys<K>()) {
+ // The next iterator after lower must point to a key greater than `key`.
+ // Note: if this assert fails, then it may indicate that the comparator does
+ // not meet the equivalence requirements for Compare
+ // (see https://en.cppreference.com/w/cpp/named_req/Compare).
+ assert(next == end() || compare_keys(key, next.key()));
+ return {lower, next};
+ }
+ // Try once more to avoid the call to upper_bound() if there's only one
+ // equivalent key. This should prevent all calls to upper_bound() in cases of
+ // unique-containers with heterogeneous comparators in which all comparison
+ // operators have the same equivalence classes.
+ if (next == end() || compare_keys(key, next.key())) return {lower, next};
+
+ // In this case, we need to call upper_bound() to avoid worst case O(N)
+ // behavior if we were to iterate over equal keys.
+ return {lower, upper_bound(key)};
+}
+
+template <typename P>
+template <typename K, typename... Args>
+auto btree<P>::insert_unique(const K &key, Args &&... args)
+ -> std::pair<iterator, bool> {
+ if (empty()) {
+ mutable_root() = rightmost_ = new_leaf_root_node(1);
+ }
+
+ SearchResult<iterator, is_key_compare_to::value> res = internal_locate(key);
+ iterator iter = res.value;
+
+ if (res.HasMatch()) {
+ if (res.IsEq()) {
+ // The key already exists in the tree, do nothing.
+ return {iter, false};
+ }
+ } else {
+ iterator last = internal_last(iter);
+ if (last.node && !compare_keys(key, last.key())) {
+ // The key already exists in the tree, do nothing.
+ return {last, false};
+ }
+ }
+ return {internal_emplace(iter, std::forward<Args>(args)...), true};
+}
+
+template <typename P>
+template <typename K, typename... Args>
+inline auto btree<P>::insert_hint_unique(iterator position, const K &key,
+ Args &&... args)
+ -> std::pair<iterator, bool> {
+ if (!empty()) {
+ if (position == end() || compare_keys(key, position.key())) {
+ if (position == begin() || compare_keys(std::prev(position).key(), key)) {
+ // prev.key() < key < position.key()
+ return {internal_emplace(position, std::forward<Args>(args)...), true};
+ }
+ } else if (compare_keys(position.key(), key)) {
+ ++position;
+ if (position == end() || compare_keys(key, position.key())) {
+ // {original `position`}.key() < key < {current `position`}.key()
+ return {internal_emplace(position, std::forward<Args>(args)...), true};
+ }
+ } else {
+ // position.key() == key
+ return {position, false};
+ }
+ }
+ return insert_unique(key, std::forward<Args>(args)...);
+}
+
+template <typename P>
+template <typename InputIterator, typename>
+void btree<P>::insert_iterator_unique(InputIterator b, InputIterator e, int) {
+ for (; b != e; ++b) {
+ insert_hint_unique(end(), params_type::key(*b), *b);
+ }
+}
+
+template <typename P>
+template <typename InputIterator>
+void btree<P>::insert_iterator_unique(InputIterator b, InputIterator e, char) {
+ for (; b != e; ++b) {
+ init_type value(*b);
+ insert_hint_unique(end(), params_type::key(value), std::move(value));
+ }
+}
+
+template <typename P>
+template <typename ValueType>
+auto btree<P>::insert_multi(const key_type &key, ValueType &&v) -> iterator {
+ if (empty()) {
+ mutable_root() = rightmost_ = new_leaf_root_node(1);
+ }
+
+ iterator iter = internal_upper_bound(key);
+ if (iter.node == nullptr) {
+ iter = end();
+ }
+ return internal_emplace(iter, std::forward<ValueType>(v));
+}
+
+template <typename P>
+template <typename ValueType>
+auto btree<P>::insert_hint_multi(iterator position, ValueType &&v) -> iterator {
+ if (!empty()) {
+ const key_type &key = params_type::key(v);
+ if (position == end() || !compare_keys(position.key(), key)) {
+ if (position == begin() ||
+ !compare_keys(key, std::prev(position).key())) {
+ // prev.key() <= key <= position.key()
+ return internal_emplace(position, std::forward<ValueType>(v));
+ }
+ } else {
+ ++position;
+ if (position == end() || !compare_keys(position.key(), key)) {
+ // {original `position`}.key() < key < {current `position`}.key()
+ return internal_emplace(position, std::forward<ValueType>(v));
+ }
+ }
+ }
+ return insert_multi(std::forward<ValueType>(v));
+}
+
+template <typename P>
+template <typename InputIterator>
+void btree<P>::insert_iterator_multi(InputIterator b, InputIterator e) {
+ for (; b != e; ++b) {
+ insert_hint_multi(end(), *b);
+ }
+}
+
+template <typename P>
+auto btree<P>::operator=(const btree &other) -> btree & {
+ if (this != &other) {
+ clear();
+
+ *mutable_key_comp() = other.key_comp();
+ if (y_absl::allocator_traits<
+ allocator_type>::propagate_on_container_copy_assignment::value) {
+ *mutable_allocator() = other.allocator();
+ }
+
+ copy_or_move_values_in_order(other);
+ }
+ return *this;
+}
+
+template <typename P>
+auto btree<P>::operator=(btree &&other) noexcept -> btree & {
+ if (this != &other) {
+ clear();
+
+ using std::swap;
+ if (y_absl::allocator_traits<
+ allocator_type>::propagate_on_container_copy_assignment::value) {
+ // Note: `root_` also contains the allocator and the key comparator.
+ swap(root_, other.root_);
+ swap(rightmost_, other.rightmost_);
+ swap(size_, other.size_);
+ } else {
+ if (allocator() == other.allocator()) {
+ swap(mutable_root(), other.mutable_root());
+ swap(*mutable_key_comp(), *other.mutable_key_comp());
+ swap(rightmost_, other.rightmost_);
+ swap(size_, other.size_);
+ } else {
+ // We aren't allowed to propagate the allocator and the allocator is
+ // different so we can't take over its memory. We must move each element
+ // individually. We need both `other` and `this` to have `other`s key
+ // comparator while moving the values so we can't swap the key
+ // comparators.
+ *mutable_key_comp() = other.key_comp();
+ copy_or_move_values_in_order(other);
+ }
+ }
+ }
+ return *this;
+}
+
+template <typename P>
+auto btree<P>::erase(iterator iter) -> iterator {
+ bool internal_delete = false;
+ if (!iter.node->leaf()) {
+ // Deletion of a value on an internal node. First, move the largest value
+ // from our left child here, then delete that position (in remove_values()
+ // below). We can get to the largest value from our left child by
+ // decrementing iter.
+ iterator internal_iter(iter);
+ --iter;
+ assert(iter.node->leaf());
+ params_type::move(mutable_allocator(), iter.node->slot(iter.position),
+ internal_iter.node->slot(internal_iter.position));
+ internal_delete = true;
+ }
+
+ // Delete the key from the leaf.
+ iter.node->remove_values(iter.position, /*to_erase=*/1, mutable_allocator());
+ --size_;
+
+ // We want to return the next value after the one we just erased. If we
+ // erased from an internal node (internal_delete == true), then the next
+ // value is ++(++iter). If we erased from a leaf node (internal_delete ==
+ // false) then the next value is ++iter. Note that ++iter may point to an
+ // internal node and the value in the internal node may move to a leaf node
+ // (iter.node) when rebalancing is performed at the leaf level.
+
+ iterator res = rebalance_after_delete(iter);
+
+ // If we erased from an internal node, advance the iterator.
+ if (internal_delete) {
+ ++res;
+ }
+ return res;
+}
+
+template <typename P>
+auto btree<P>::rebalance_after_delete(iterator iter) -> iterator {
+ // Merge/rebalance as we walk back up the tree.
+ iterator res(iter);
+ bool first_iteration = true;
+ for (;;) {
+ if (iter.node == root()) {
+ try_shrink();
+ if (empty()) {
+ return end();
+ }
+ break;
+ }
+ if (iter.node->count() >= kMinNodeValues) {
+ break;
+ }
+ bool merged = try_merge_or_rebalance(&iter);
+ // On the first iteration, we should update `res` with `iter` because `res`
+ // may have been invalidated.
+ if (first_iteration) {
+ res = iter;
+ first_iteration = false;
+ }
+ if (!merged) {
+ break;
+ }
+ iter.position = iter.node->position();
+ iter.node = iter.node->parent();
+ }
+
+ // Adjust our return value. If we're pointing at the end of a node, advance
+ // the iterator.
+ if (res.position == res.node->finish()) {
+ res.position = res.node->finish() - 1;
+ ++res;
+ }
+
+ return res;
+}
+
+template <typename P>
+auto btree<P>::erase_range(iterator begin, iterator end)
+ -> std::pair<size_type, iterator> {
+ difference_type count = std::distance(begin, end);
+ assert(count >= 0);
+
+ if (count == 0) {
+ return {0, begin};
+ }
+
+ if (count == size_) {
+ clear();
+ return {count, this->end()};
+ }
+
+ if (begin.node == end.node) {
+ assert(end.position > begin.position);
+ begin.node->remove_values(begin.position, end.position - begin.position,
+ mutable_allocator());
+ size_ -= count;
+ return {count, rebalance_after_delete(begin)};
+ }
+
+ const size_type target_size = size_ - count;
+ while (size_ > target_size) {
+ if (begin.node->leaf()) {
+ const size_type remaining_to_erase = size_ - target_size;
+ const size_type remaining_in_node = begin.node->finish() - begin.position;
+ const size_type to_erase =
+ (std::min)(remaining_to_erase, remaining_in_node);
+ begin.node->remove_values(begin.position, to_erase, mutable_allocator());
+ size_ -= to_erase;
+ begin = rebalance_after_delete(begin);
+ } else {
+ begin = erase(begin);
+ }
+ }
+ return {count, begin};
+}
+
+template <typename P>
+void btree<P>::clear() {
+ if (!empty()) {
+ node_type::clear_and_delete(root(), mutable_allocator());
+ }
+ mutable_root() = EmptyNode();
+ rightmost_ = EmptyNode();
+ size_ = 0;
+}
+
+template <typename P>
+void btree<P>::swap(btree &other) {
+ using std::swap;
+ if (y_absl::allocator_traits<
+ allocator_type>::propagate_on_container_swap::value) {
+ // Note: `root_` also contains the allocator and the key comparator.
+ swap(root_, other.root_);
+ } else {
+ // It's undefined behavior if the allocators are unequal here.
+ assert(allocator() == other.allocator());
+ swap(mutable_root(), other.mutable_root());
+ swap(*mutable_key_comp(), *other.mutable_key_comp());
+ }
+ swap(rightmost_, other.rightmost_);
+ swap(size_, other.size_);
+}
+
+template <typename P>
+void btree<P>::verify() const {
+ assert(root() != nullptr);
+ assert(leftmost() != nullptr);
+ assert(rightmost_ != nullptr);
+ assert(empty() || size() == internal_verify(root(), nullptr, nullptr));
+ assert(leftmost() == (++const_iterator(root(), -1)).node);
+ assert(rightmost_ == (--const_iterator(root(), root()->finish())).node);
+ assert(leftmost()->leaf());
+ assert(rightmost_->leaf());
+}
+
+template <typename P>
+void btree<P>::rebalance_or_split(iterator *iter) {
+ node_type *&node = iter->node;
+ int &insert_position = iter->position;
+ assert(node->count() == node->max_count());
+ assert(kNodeSlots == node->max_count());
+
+ // First try to make room on the node by rebalancing.
+ node_type *parent = node->parent();
+ if (node != root()) {
+ if (node->position() > parent->start()) {
+ // Try rebalancing with our left sibling.
+ node_type *left = parent->child(node->position() - 1);
+ assert(left->max_count() == kNodeSlots);
+ if (left->count() < kNodeSlots) {
+ // We bias rebalancing based on the position being inserted. If we're
+ // inserting at the end of the right node then we bias rebalancing to
+ // fill up the left node.
+ int to_move = (kNodeSlots - left->count()) /
+ (1 + (insert_position < static_cast<int>(kNodeSlots)));
+ to_move = (std::max)(1, to_move);
+
+ if (insert_position - to_move >= node->start() ||
+ left->count() + to_move < static_cast<int>(kNodeSlots)) {
+ left->rebalance_right_to_left(to_move, node, mutable_allocator());
+
+ assert(node->max_count() - node->count() == to_move);
+ insert_position = insert_position - to_move;
+ if (insert_position < node->start()) {
+ insert_position = insert_position + left->count() + 1;
+ node = left;
+ }
+
+ assert(node->count() < node->max_count());
+ return;
+ }
+ }
+ }
+
+ if (node->position() < parent->finish()) {
+ // Try rebalancing with our right sibling.
+ node_type *right = parent->child(node->position() + 1);
+ assert(right->max_count() == kNodeSlots);
+ if (right->count() < kNodeSlots) {
+ // We bias rebalancing based on the position being inserted. If we're
+ // inserting at the beginning of the left node then we bias rebalancing
+ // to fill up the right node.
+ int to_move = (static_cast<int>(kNodeSlots) - right->count()) /
+ (1 + (insert_position > node->start()));
+ to_move = (std::max)(1, to_move);
+
+ if (insert_position <= node->finish() - to_move ||
+ right->count() + to_move < static_cast<int>(kNodeSlots)) {
+ node->rebalance_left_to_right(to_move, right, mutable_allocator());
+
+ if (insert_position > node->finish()) {
+ insert_position = insert_position - node->count() - 1;
+ node = right;
+ }
+
+ assert(node->count() < node->max_count());
+ return;
+ }
+ }
+ }
+
+ // Rebalancing failed, make sure there is room on the parent node for a new
+ // value.
+ assert(parent->max_count() == kNodeSlots);
+ if (parent->count() == kNodeSlots) {
+ iterator parent_iter(node->parent(), node->position());
+ rebalance_or_split(&parent_iter);
+ }
+ } else {
+ // Rebalancing not possible because this is the root node.
+ // Create a new root node and set the current root node as the child of the
+ // new root.
+ parent = new_internal_node(parent);
+ parent->init_child(parent->start(), root());
+ mutable_root() = parent;
+ // If the former root was a leaf node, then it's now the rightmost node.
+ assert(!parent->start_child()->leaf() ||
+ parent->start_child() == rightmost_);
+ }
+
+ // Split the node.
+ node_type *split_node;
+ if (node->leaf()) {
+ split_node = new_leaf_node(parent);
+ node->split(insert_position, split_node, mutable_allocator());
+ if (rightmost_ == node) rightmost_ = split_node;
+ } else {
+ split_node = new_internal_node(parent);
+ node->split(insert_position, split_node, mutable_allocator());
+ }
+
+ if (insert_position > node->finish()) {
+ insert_position = insert_position - node->count() - 1;
+ node = split_node;
+ }
+}
+
+template <typename P>
+void btree<P>::merge_nodes(node_type *left, node_type *right) {
+ left->merge(right, mutable_allocator());
+ if (rightmost_ == right) rightmost_ = left;
+}
+
+template <typename P>
+bool btree<P>::try_merge_or_rebalance(iterator *iter) {
+ node_type *parent = iter->node->parent();
+ if (iter->node->position() > parent->start()) {
+ // Try merging with our left sibling.
+ node_type *left = parent->child(iter->node->position() - 1);
+ assert(left->max_count() == kNodeSlots);
+ if (1U + left->count() + iter->node->count() <= kNodeSlots) {
+ iter->position += 1 + left->count();
+ merge_nodes(left, iter->node);
+ iter->node = left;
+ return true;
+ }
+ }
+ if (iter->node->position() < parent->finish()) {
+ // Try merging with our right sibling.
+ node_type *right = parent->child(iter->node->position() + 1);
+ assert(right->max_count() == kNodeSlots);
+ if (1U + iter->node->count() + right->count() <= kNodeSlots) {
+ merge_nodes(iter->node, right);
+ return true;
+ }
+ // Try rebalancing with our right sibling. We don't perform rebalancing if
+ // we deleted the first element from iter->node and the node is not
+ // empty. This is a small optimization for the common pattern of deleting
+ // from the front of the tree.
+ if (right->count() > kMinNodeValues &&
+ (iter->node->count() == 0 || iter->position > iter->node->start())) {
+ int to_move = (right->count() - iter->node->count()) / 2;
+ to_move = (std::min)(to_move, right->count() - 1);
+ iter->node->rebalance_right_to_left(to_move, right, mutable_allocator());
+ return false;
+ }
+ }
+ if (iter->node->position() > parent->start()) {
+ // Try rebalancing with our left sibling. We don't perform rebalancing if
+ // we deleted the last element from iter->node and the node is not
+ // empty. This is a small optimization for the common pattern of deleting
+ // from the back of the tree.
+ node_type *left = parent->child(iter->node->position() - 1);
+ if (left->count() > kMinNodeValues &&
+ (iter->node->count() == 0 || iter->position < iter->node->finish())) {
+ int to_move = (left->count() - iter->node->count()) / 2;
+ to_move = (std::min)(to_move, left->count() - 1);
+ left->rebalance_left_to_right(to_move, iter->node, mutable_allocator());
+ iter->position += to_move;
+ return false;
+ }
+ }
+ return false;
+}
+
+template <typename P>
+void btree<P>::try_shrink() {
+ node_type *orig_root = root();
+ if (orig_root->count() > 0) {
+ return;
+ }
+ // Deleted the last item on the root node, shrink the height of the tree.
+ if (orig_root->leaf()) {
+ assert(size() == 0);
+ mutable_root() = rightmost_ = EmptyNode();
+ } else {
+ node_type *child = orig_root->start_child();
+ child->make_root();
+ mutable_root() = child;
+ }
+ node_type::clear_and_delete(orig_root, mutable_allocator());
+}
+
+template <typename P>
+template <typename IterType>
+inline IterType btree<P>::internal_last(IterType iter) {
+ assert(iter.node != nullptr);
+ while (iter.position == iter.node->finish()) {
+ iter.position = iter.node->position();
+ iter.node = iter.node->parent();
+ if (iter.node->leaf()) {
+ iter.node = nullptr;
+ break;
+ }
+ }
+ return iter;
+}
+
+template <typename P>
+template <typename... Args>
+inline auto btree<P>::internal_emplace(iterator iter, Args &&... args)
+ -> iterator {
+ if (!iter.node->leaf()) {
+ // We can't insert on an internal node. Instead, we'll insert after the
+ // previous value which is guaranteed to be on a leaf node.
+ --iter;
+ ++iter.position;
+ }
+ const field_type max_count = iter.node->max_count();
+ allocator_type *alloc = mutable_allocator();
+ if (iter.node->count() == max_count) {
+ // Make room in the leaf for the new item.
+ if (max_count < kNodeSlots) {
+ // Insertion into the root where the root is smaller than the full node
+ // size. Simply grow the size of the root node.
+ assert(iter.node == root());
+ iter.node =
+ new_leaf_root_node((std::min<int>)(kNodeSlots, 2 * max_count));
+ // Transfer the values from the old root to the new root.
+ node_type *old_root = root();
+ node_type *new_root = iter.node;
+ new_root->transfer_n(old_root->count(), new_root->start(),
+ old_root->start(), old_root, alloc);
+ new_root->set_finish(old_root->finish());
+ old_root->set_finish(old_root->start());
+ node_type::clear_and_delete(old_root, alloc);
+ mutable_root() = rightmost_ = new_root;
+ } else {
+ rebalance_or_split(&iter);
+ }
+ }
+ iter.node->emplace_value(iter.position, alloc, std::forward<Args>(args)...);
+ ++size_;
+ return iter;
+}
+
+template <typename P>
+template <typename K>
+inline auto btree<P>::internal_locate(const K &key) const
+ -> SearchResult<iterator, is_key_compare_to::value> {
+ iterator iter(const_cast<node_type *>(root()));
+ for (;;) {
+ SearchResult<int, is_key_compare_to::value> res =
+ iter.node->lower_bound(key, key_comp());
+ iter.position = res.value;
+ if (res.IsEq()) {
+ return {iter, MatchKind::kEq};
+ }
+ // Note: in the non-key-compare-to case, we don't need to walk all the way
+ // down the tree if the keys are equal, but determining equality would
+ // require doing an extra comparison on each node on the way down, and we
+ // will need to go all the way to the leaf node in the expected case.
+ if (iter.node->leaf()) {
+ break;
+ }
+ iter.node = iter.node->child(iter.position);
+ }
+ // Note: in the non-key-compare-to case, the key may actually be equivalent
+ // here (and the MatchKind::kNe is ignored).
+ return {iter, MatchKind::kNe};
+}
+
+template <typename P>
+template <typename K>
+auto btree<P>::internal_lower_bound(const K &key) const
+ -> SearchResult<iterator, is_key_compare_to::value> {
+ if (!params_type::template can_have_multiple_equivalent_keys<K>()) {
+ SearchResult<iterator, is_key_compare_to::value> ret = internal_locate(key);
+ ret.value = internal_last(ret.value);
+ return ret;
+ }
+ iterator iter(const_cast<node_type *>(root()));
+ SearchResult<int, is_key_compare_to::value> res;
+ bool seen_eq = false;
+ for (;;) {
+ res = iter.node->lower_bound(key, key_comp());
+ iter.position = res.value;
+ if (iter.node->leaf()) {
+ break;
+ }
+ seen_eq = seen_eq || res.IsEq();
+ iter.node = iter.node->child(iter.position);
+ }
+ if (res.IsEq()) return {iter, MatchKind::kEq};
+ return {internal_last(iter), seen_eq ? MatchKind::kEq : MatchKind::kNe};
+}
+
+template <typename P>
+template <typename K>
+auto btree<P>::internal_upper_bound(const K &key) const -> iterator {
+ iterator iter(const_cast<node_type *>(root()));
+ for (;;) {
+ iter.position = iter.node->upper_bound(key, key_comp());
+ if (iter.node->leaf()) {
+ break;
+ }
+ iter.node = iter.node->child(iter.position);
+ }
+ return internal_last(iter);
+}
+
+template <typename P>
+template <typename K>
+auto btree<P>::internal_find(const K &key) const -> iterator {
+ SearchResult<iterator, is_key_compare_to::value> res = internal_locate(key);
+ if (res.HasMatch()) {
+ if (res.IsEq()) {
+ return res.value;
+ }
+ } else {
+ const iterator iter = internal_last(res.value);
+ if (iter.node != nullptr && !compare_keys(key, iter.key())) {
+ return iter;
+ }
+ }
+ return {nullptr, 0};
+}
+
+template <typename P>
+int btree<P>::internal_verify(const node_type *node, const key_type *lo,
+ const key_type *hi) const {
+ assert(node->count() > 0);
+ assert(node->count() <= node->max_count());
+ if (lo) {
+ assert(!compare_keys(node->key(node->start()), *lo));
+ }
+ if (hi) {
+ assert(!compare_keys(*hi, node->key(node->finish() - 1)));
+ }
+ for (int i = node->start() + 1; i < node->finish(); ++i) {
+ assert(!compare_keys(node->key(i), node->key(i - 1)));
+ }
+ int count = node->count();
+ if (!node->leaf()) {
+ for (int i = node->start(); i <= node->finish(); ++i) {
+ assert(node->child(i) != nullptr);
+ assert(node->child(i)->parent() == node);
+ assert(node->child(i)->position() == i);
+ count += internal_verify(node->child(i),
+ i == node->start() ? lo : &node->key(i - 1),
+ i == node->finish() ? hi : &node->key(i));
+ }
+ }
+ return count;
+}
+
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_CONTAINER_INTERNAL_BTREE_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/btree_container.h b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/btree_container.h
new file mode 100644
index 00000000000..d23feff31ba
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/btree_container.h
@@ -0,0 +1,683 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_BTREE_CONTAINER_H_
+#define ABSL_CONTAINER_INTERNAL_BTREE_CONTAINER_H_
+
+#include <algorithm>
+#include <initializer_list>
+#include <iterator>
+#include <utility>
+
+#include "y_absl/base/attributes.h"
+#include "y_absl/base/internal/throw_delegate.h"
+#include "y_absl/container/internal/btree.h" // IWYU pragma: export
+#include "y_absl/container/internal/common.h"
+#include "y_absl/memory/memory.h"
+#include "y_absl/meta/type_traits.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+// A common base class for btree_set, btree_map, btree_multiset, and
+// btree_multimap.
+template <typename Tree>
+class btree_container {
+ using params_type = typename Tree::params_type;
+
+ protected:
+ // Alias used for heterogeneous lookup functions.
+ // `key_arg<K>` evaluates to `K` when the functors are transparent and to
+ // `key_type` otherwise. It permits template argument deduction on `K` for the
+ // transparent case.
+ template <class K>
+ using key_arg =
+ typename KeyArg<IsTransparent<typename Tree::key_compare>::value>::
+ template type<K, typename Tree::key_type>;
+
+ public:
+ using key_type = typename Tree::key_type;
+ using value_type = typename Tree::value_type;
+ using size_type = typename Tree::size_type;
+ using difference_type = typename Tree::difference_type;
+ using key_compare = typename Tree::original_key_compare;
+ using value_compare = typename Tree::value_compare;
+ using allocator_type = typename Tree::allocator_type;
+ using reference = typename Tree::reference;
+ using const_reference = typename Tree::const_reference;
+ using pointer = typename Tree::pointer;
+ using const_pointer = typename Tree::const_pointer;
+ using iterator = typename Tree::iterator;
+ using const_iterator = typename Tree::const_iterator;
+ using reverse_iterator = typename Tree::reverse_iterator;
+ using const_reverse_iterator = typename Tree::const_reverse_iterator;
+ using node_type = typename Tree::node_handle_type;
+
+ // Constructors/assignments.
+ btree_container() : tree_(key_compare(), allocator_type()) {}
+ explicit btree_container(const key_compare &comp,
+ const allocator_type &alloc = allocator_type())
+ : tree_(comp, alloc) {}
+ explicit btree_container(const allocator_type &alloc)
+ : tree_(key_compare(), alloc) {}
+
+ btree_container(const btree_container &other)
+ : btree_container(other, y_absl::allocator_traits<allocator_type>::
+ select_on_container_copy_construction(
+ other.get_allocator())) {}
+ btree_container(const btree_container &other, const allocator_type &alloc)
+ : tree_(other.tree_, alloc) {}
+
+ btree_container(btree_container &&other) noexcept(
+ std::is_nothrow_move_constructible<Tree>::value) = default;
+ btree_container(btree_container &&other, const allocator_type &alloc)
+ : tree_(std::move(other.tree_), alloc) {}
+
+ btree_container &operator=(const btree_container &other) = default;
+ btree_container &operator=(btree_container &&other) noexcept(
+ std::is_nothrow_move_assignable<Tree>::value) = default;
+
+ // Iterator routines.
+ iterator begin() { return tree_.begin(); }
+ const_iterator begin() const { return tree_.begin(); }
+ const_iterator cbegin() const { return tree_.begin(); }
+ iterator end() { return tree_.end(); }
+ const_iterator end() const { return tree_.end(); }
+ const_iterator cend() const { return tree_.end(); }
+ reverse_iterator rbegin() { return tree_.rbegin(); }
+ const_reverse_iterator rbegin() const { return tree_.rbegin(); }
+ const_reverse_iterator crbegin() const { return tree_.rbegin(); }
+ reverse_iterator rend() { return tree_.rend(); }
+ const_reverse_iterator rend() const { return tree_.rend(); }
+ const_reverse_iterator crend() const { return tree_.rend(); }
+
+ // Lookup routines.
+ template <typename K = key_type>
+ size_type count(const key_arg<K> &key) const {
+ auto equal_range = this->equal_range(key);
+ return std::distance(equal_range.first, equal_range.second);
+ }
+ template <typename K = key_type>
+ iterator find(const key_arg<K> &key) {
+ return tree_.find(key);
+ }
+ template <typename K = key_type>
+ const_iterator find(const key_arg<K> &key) const {
+ return tree_.find(key);
+ }
+ template <typename K = key_type>
+ bool contains(const key_arg<K> &key) const {
+ return find(key) != end();
+ }
+ template <typename K = key_type>
+ iterator lower_bound(const key_arg<K> &key) {
+ return tree_.lower_bound(key);
+ }
+ template <typename K = key_type>
+ const_iterator lower_bound(const key_arg<K> &key) const {
+ return tree_.lower_bound(key);
+ }
+ template <typename K = key_type>
+ iterator upper_bound(const key_arg<K> &key) {
+ return tree_.upper_bound(key);
+ }
+ template <typename K = key_type>
+ const_iterator upper_bound(const key_arg<K> &key) const {
+ return tree_.upper_bound(key);
+ }
+ template <typename K = key_type>
+ std::pair<iterator, iterator> equal_range(const key_arg<K> &key) {
+ return tree_.equal_range(key);
+ }
+ template <typename K = key_type>
+ std::pair<const_iterator, const_iterator> equal_range(
+ const key_arg<K> &key) const {
+ return tree_.equal_range(key);
+ }
+
+ // Deletion routines. Note that there is also a deletion routine that is
+ // specific to btree_set_container/btree_multiset_container.
+
+ // Erase the specified iterator from the btree. The iterator must be valid
+ // (i.e. not equal to end()). Return an iterator pointing to the node after
+ // the one that was erased (or end() if none exists).
+ iterator erase(const_iterator iter) { return tree_.erase(iterator(iter)); }
+ iterator erase(iterator iter) { return tree_.erase(iter); }
+ iterator erase(const_iterator first, const_iterator last) {
+ return tree_.erase_range(iterator(first), iterator(last)).second;
+ }
+ template <typename K = key_type>
+ size_type erase(const key_arg<K> &key) {
+ auto equal_range = this->equal_range(key);
+ return tree_.erase_range(equal_range.first, equal_range.second).first;
+ }
+
+ // Extract routines.
+ node_type extract(iterator position) {
+ // Use Move instead of Transfer, because the rebalancing code expects to
+ // have a valid object to scribble metadata bits on top of.
+ auto node = CommonAccess::Move<node_type>(get_allocator(), position.slot());
+ erase(position);
+ return node;
+ }
+ node_type extract(const_iterator position) {
+ return extract(iterator(position));
+ }
+
+ // Utility routines.
+ ABSL_ATTRIBUTE_REINITIALIZES void clear() { tree_.clear(); }
+ void swap(btree_container &other) { tree_.swap(other.tree_); }
+ void verify() const { tree_.verify(); }
+
+ // Size routines.
+ size_type size() const { return tree_.size(); }
+ size_type max_size() const { return tree_.max_size(); }
+ bool empty() const { return tree_.empty(); }
+
+ friend bool operator==(const btree_container &x, const btree_container &y) {
+ if (x.size() != y.size()) return false;
+ return std::equal(x.begin(), x.end(), y.begin());
+ }
+
+ friend bool operator!=(const btree_container &x, const btree_container &y) {
+ return !(x == y);
+ }
+
+ friend bool operator<(const btree_container &x, const btree_container &y) {
+ return std::lexicographical_compare(x.begin(), x.end(), y.begin(), y.end());
+ }
+
+ friend bool operator>(const btree_container &x, const btree_container &y) {
+ return y < x;
+ }
+
+ friend bool operator<=(const btree_container &x, const btree_container &y) {
+ return !(y < x);
+ }
+
+ friend bool operator>=(const btree_container &x, const btree_container &y) {
+ return !(x < y);
+ }
+
+ // The allocator used by the btree.
+ allocator_type get_allocator() const { return tree_.get_allocator(); }
+
+ // The key comparator used by the btree.
+ key_compare key_comp() const { return key_compare(tree_.key_comp()); }
+ value_compare value_comp() const { return tree_.value_comp(); }
+
+ // Support y_absl::Hash.
+ template <typename State>
+ friend State AbslHashValue(State h, const btree_container &b) {
+ for (const auto &v : b) {
+ h = State::combine(std::move(h), v);
+ }
+ return State::combine(std::move(h), b.size());
+ }
+
+ protected:
+ Tree tree_;
+};
+
+// A common base class for btree_set and btree_map.
+template <typename Tree>
+class btree_set_container : public btree_container<Tree> {
+ using super_type = btree_container<Tree>;
+ using params_type = typename Tree::params_type;
+ using init_type = typename params_type::init_type;
+ using is_key_compare_to = typename params_type::is_key_compare_to;
+ friend class BtreeNodePeer;
+
+ protected:
+ template <class K>
+ using key_arg = typename super_type::template key_arg<K>;
+
+ public:
+ using key_type = typename Tree::key_type;
+ using value_type = typename Tree::value_type;
+ using size_type = typename Tree::size_type;
+ using key_compare = typename Tree::original_key_compare;
+ using allocator_type = typename Tree::allocator_type;
+ using iterator = typename Tree::iterator;
+ using const_iterator = typename Tree::const_iterator;
+ using node_type = typename super_type::node_type;
+ using insert_return_type = InsertReturnType<iterator, node_type>;
+
+ // Inherit constructors.
+ using super_type::super_type;
+ btree_set_container() {}
+
+ // Range constructors.
+ template <class InputIterator>
+ btree_set_container(InputIterator b, InputIterator e,
+ const key_compare &comp = key_compare(),
+ const allocator_type &alloc = allocator_type())
+ : super_type(comp, alloc) {
+ insert(b, e);
+ }
+ template <class InputIterator>
+ btree_set_container(InputIterator b, InputIterator e,
+ const allocator_type &alloc)
+ : btree_set_container(b, e, key_compare(), alloc) {}
+
+ // Initializer list constructors.
+ btree_set_container(std::initializer_list<init_type> init,
+ const key_compare &comp = key_compare(),
+ const allocator_type &alloc = allocator_type())
+ : btree_set_container(init.begin(), init.end(), comp, alloc) {}
+ btree_set_container(std::initializer_list<init_type> init,
+ const allocator_type &alloc)
+ : btree_set_container(init.begin(), init.end(), alloc) {}
+
+ // Insertion routines.
+ std::pair<iterator, bool> insert(const value_type &v) {
+ return this->tree_.insert_unique(params_type::key(v), v);
+ }
+ std::pair<iterator, bool> insert(value_type &&v) {
+ return this->tree_.insert_unique(params_type::key(v), std::move(v));
+ }
+ template <typename... Args>
+ std::pair<iterator, bool> emplace(Args &&... args) {
+ init_type v(std::forward<Args>(args)...);
+ return this->tree_.insert_unique(params_type::key(v), std::move(v));
+ }
+ iterator insert(const_iterator hint, const value_type &v) {
+ return this->tree_
+ .insert_hint_unique(iterator(hint), params_type::key(v), v)
+ .first;
+ }
+ iterator insert(const_iterator hint, value_type &&v) {
+ return this->tree_
+ .insert_hint_unique(iterator(hint), params_type::key(v), std::move(v))
+ .first;
+ }
+ template <typename... Args>
+ iterator emplace_hint(const_iterator hint, Args &&... args) {
+ init_type v(std::forward<Args>(args)...);
+ return this->tree_
+ .insert_hint_unique(iterator(hint), params_type::key(v), std::move(v))
+ .first;
+ }
+ template <typename InputIterator>
+ void insert(InputIterator b, InputIterator e) {
+ this->tree_.insert_iterator_unique(b, e, 0);
+ }
+ void insert(std::initializer_list<init_type> init) {
+ this->tree_.insert_iterator_unique(init.begin(), init.end(), 0);
+ }
+ insert_return_type insert(node_type &&node) {
+ if (!node) return {this->end(), false, node_type()};
+ std::pair<iterator, bool> res =
+ this->tree_.insert_unique(params_type::key(CommonAccess::GetSlot(node)),
+ CommonAccess::GetSlot(node));
+ if (res.second) {
+ CommonAccess::Destroy(&node);
+ return {res.first, true, node_type()};
+ } else {
+ return {res.first, false, std::move(node)};
+ }
+ }
+ iterator insert(const_iterator hint, node_type &&node) {
+ if (!node) return this->end();
+ std::pair<iterator, bool> res = this->tree_.insert_hint_unique(
+ iterator(hint), params_type::key(CommonAccess::GetSlot(node)),
+ CommonAccess::GetSlot(node));
+ if (res.second) CommonAccess::Destroy(&node);
+ return res.first;
+ }
+
+ // Node extraction routines.
+ template <typename K = key_type>
+ node_type extract(const key_arg<K> &key) {
+ const std::pair<iterator, bool> lower_and_equal =
+ this->tree_.lower_bound_equal(key);
+ return lower_and_equal.second ? extract(lower_and_equal.first)
+ : node_type();
+ }
+ using super_type::extract;
+
+ // Merge routines.
+ // Moves elements from `src` into `this`. If the element already exists in
+ // `this`, it is left unmodified in `src`.
+ template <
+ typename T,
+ typename y_absl::enable_if_t<
+ y_absl::conjunction<
+ std::is_same<value_type, typename T::value_type>,
+ std::is_same<allocator_type, typename T::allocator_type>,
+ std::is_same<typename params_type::is_map_container,
+ typename T::params_type::is_map_container>>::value,
+ int> = 0>
+ void merge(btree_container<T> &src) { // NOLINT
+ for (auto src_it = src.begin(); src_it != src.end();) {
+ if (insert(std::move(params_type::element(src_it.slot()))).second) {
+ src_it = src.erase(src_it);
+ } else {
+ ++src_it;
+ }
+ }
+ }
+
+ template <
+ typename T,
+ typename y_absl::enable_if_t<
+ y_absl::conjunction<
+ std::is_same<value_type, typename T::value_type>,
+ std::is_same<allocator_type, typename T::allocator_type>,
+ std::is_same<typename params_type::is_map_container,
+ typename T::params_type::is_map_container>>::value,
+ int> = 0>
+ void merge(btree_container<T> &&src) {
+ merge(src);
+ }
+};
+
+// Base class for btree_map.
+template <typename Tree>
+class btree_map_container : public btree_set_container<Tree> {
+ using super_type = btree_set_container<Tree>;
+ using params_type = typename Tree::params_type;
+ friend class BtreeNodePeer;
+
+ private:
+ template <class K>
+ using key_arg = typename super_type::template key_arg<K>;
+
+ public:
+ using key_type = typename Tree::key_type;
+ using mapped_type = typename params_type::mapped_type;
+ using value_type = typename Tree::value_type;
+ using key_compare = typename Tree::original_key_compare;
+ using allocator_type = typename Tree::allocator_type;
+ using iterator = typename Tree::iterator;
+ using const_iterator = typename Tree::const_iterator;
+
+ // Inherit constructors.
+ using super_type::super_type;
+ btree_map_container() {}
+
+ // Insertion routines.
+ // Note: the nullptr template arguments and extra `const M&` overloads allow
+ // for supporting bitfield arguments.
+ template <typename K = key_type, class M>
+ std::pair<iterator, bool> insert_or_assign(const key_arg<K> &k,
+ const M &obj) {
+ return insert_or_assign_impl(k, obj);
+ }
+ template <typename K = key_type, class M, K * = nullptr>
+ std::pair<iterator, bool> insert_or_assign(key_arg<K> &&k, const M &obj) {
+ return insert_or_assign_impl(std::forward<K>(k), obj);
+ }
+ template <typename K = key_type, class M, M * = nullptr>
+ std::pair<iterator, bool> insert_or_assign(const key_arg<K> &k, M &&obj) {
+ return insert_or_assign_impl(k, std::forward<M>(obj));
+ }
+ template <typename K = key_type, class M, K * = nullptr, M * = nullptr>
+ std::pair<iterator, bool> insert_or_assign(key_arg<K> &&k, M &&obj) {
+ return insert_or_assign_impl(std::forward<K>(k), std::forward<M>(obj));
+ }
+ template <typename K = key_type, class M>
+ iterator insert_or_assign(const_iterator hint, const key_arg<K> &k,
+ const M &obj) {
+ return insert_or_assign_hint_impl(hint, k, obj);
+ }
+ template <typename K = key_type, class M, K * = nullptr>
+ iterator insert_or_assign(const_iterator hint, key_arg<K> &&k, const M &obj) {
+ return insert_or_assign_hint_impl(hint, std::forward<K>(k), obj);
+ }
+ template <typename K = key_type, class M, M * = nullptr>
+ iterator insert_or_assign(const_iterator hint, const key_arg<K> &k, M &&obj) {
+ return insert_or_assign_hint_impl(hint, k, std::forward<M>(obj));
+ }
+ template <typename K = key_type, class M, K * = nullptr, M * = nullptr>
+ iterator insert_or_assign(const_iterator hint, key_arg<K> &&k, M &&obj) {
+ return insert_or_assign_hint_impl(hint, std::forward<K>(k),
+ std::forward<M>(obj));
+ }
+
+ template <typename K = key_type, typename... Args,
+ typename y_absl::enable_if_t<
+ !std::is_convertible<K, const_iterator>::value, int> = 0>
+ std::pair<iterator, bool> try_emplace(const key_arg<K> &k, Args &&... args) {
+ return try_emplace_impl(k, std::forward<Args>(args)...);
+ }
+ template <typename K = key_type, typename... Args,
+ typename y_absl::enable_if_t<
+ !std::is_convertible<K, const_iterator>::value, int> = 0>
+ std::pair<iterator, bool> try_emplace(key_arg<K> &&k, Args &&... args) {
+ return try_emplace_impl(std::forward<K>(k), std::forward<Args>(args)...);
+ }
+ template <typename K = key_type, typename... Args>
+ iterator try_emplace(const_iterator hint, const key_arg<K> &k,
+ Args &&... args) {
+ return try_emplace_hint_impl(hint, k, std::forward<Args>(args)...);
+ }
+ template <typename K = key_type, typename... Args>
+ iterator try_emplace(const_iterator hint, key_arg<K> &&k, Args &&... args) {
+ return try_emplace_hint_impl(hint, std::forward<K>(k),
+ std::forward<Args>(args)...);
+ }
+
+ template <typename K = key_type>
+ mapped_type &operator[](const key_arg<K> &k) {
+ return try_emplace(k).first->second;
+ }
+ template <typename K = key_type>
+ mapped_type &operator[](key_arg<K> &&k) {
+ return try_emplace(std::forward<K>(k)).first->second;
+ }
+
+ template <typename K = key_type>
+ mapped_type &at(const key_arg<K> &key) {
+ auto it = this->find(key);
+ if (it == this->end())
+ base_internal::ThrowStdOutOfRange("y_absl::btree_map::at");
+ return it->second;
+ }
+ template <typename K = key_type>
+ const mapped_type &at(const key_arg<K> &key) const {
+ auto it = this->find(key);
+ if (it == this->end())
+ base_internal::ThrowStdOutOfRange("y_absl::btree_map::at");
+ return it->second;
+ }
+
+ private:
+ // Note: when we call `std::forward<M>(obj)` twice, it's safe because
+ // insert_unique/insert_hint_unique are guaranteed to not consume `obj` when
+ // `ret.second` is false.
+ template <class K, class M>
+ std::pair<iterator, bool> insert_or_assign_impl(K &&k, M &&obj) {
+ const std::pair<iterator, bool> ret =
+ this->tree_.insert_unique(k, std::forward<K>(k), std::forward<M>(obj));
+ if (!ret.second) ret.first->second = std::forward<M>(obj);
+ return ret;
+ }
+ template <class K, class M>
+ iterator insert_or_assign_hint_impl(const_iterator hint, K &&k, M &&obj) {
+ const std::pair<iterator, bool> ret = this->tree_.insert_hint_unique(
+ iterator(hint), k, std::forward<K>(k), std::forward<M>(obj));
+ if (!ret.second) ret.first->second = std::forward<M>(obj);
+ return ret.first;
+ }
+
+ template <class K, class... Args>
+ std::pair<iterator, bool> try_emplace_impl(K &&k, Args &&... args) {
+ return this->tree_.insert_unique(
+ k, std::piecewise_construct, std::forward_as_tuple(std::forward<K>(k)),
+ std::forward_as_tuple(std::forward<Args>(args)...));
+ }
+ template <class K, class... Args>
+ iterator try_emplace_hint_impl(const_iterator hint, K &&k, Args &&... args) {
+ return this->tree_
+ .insert_hint_unique(iterator(hint), k, std::piecewise_construct,
+ std::forward_as_tuple(std::forward<K>(k)),
+ std::forward_as_tuple(std::forward<Args>(args)...))
+ .first;
+ }
+};
+
+// A common base class for btree_multiset and btree_multimap.
+template <typename Tree>
+class btree_multiset_container : public btree_container<Tree> {
+ using super_type = btree_container<Tree>;
+ using params_type = typename Tree::params_type;
+ using init_type = typename params_type::init_type;
+ using is_key_compare_to = typename params_type::is_key_compare_to;
+
+ template <class K>
+ using key_arg = typename super_type::template key_arg<K>;
+
+ public:
+ using key_type = typename Tree::key_type;
+ using value_type = typename Tree::value_type;
+ using size_type = typename Tree::size_type;
+ using key_compare = typename Tree::original_key_compare;
+ using allocator_type = typename Tree::allocator_type;
+ using iterator = typename Tree::iterator;
+ using const_iterator = typename Tree::const_iterator;
+ using node_type = typename super_type::node_type;
+
+ // Inherit constructors.
+ using super_type::super_type;
+ btree_multiset_container() {}
+
+ // Range constructors.
+ template <class InputIterator>
+ btree_multiset_container(InputIterator b, InputIterator e,
+ const key_compare &comp = key_compare(),
+ const allocator_type &alloc = allocator_type())
+ : super_type(comp, alloc) {
+ insert(b, e);
+ }
+ template <class InputIterator>
+ btree_multiset_container(InputIterator b, InputIterator e,
+ const allocator_type &alloc)
+ : btree_multiset_container(b, e, key_compare(), alloc) {}
+
+ // Initializer list constructors.
+ btree_multiset_container(std::initializer_list<init_type> init,
+ const key_compare &comp = key_compare(),
+ const allocator_type &alloc = allocator_type())
+ : btree_multiset_container(init.begin(), init.end(), comp, alloc) {}
+ btree_multiset_container(std::initializer_list<init_type> init,
+ const allocator_type &alloc)
+ : btree_multiset_container(init.begin(), init.end(), alloc) {}
+
+ // Insertion routines.
+ iterator insert(const value_type &v) { return this->tree_.insert_multi(v); }
+ iterator insert(value_type &&v) {
+ return this->tree_.insert_multi(std::move(v));
+ }
+ iterator insert(const_iterator hint, const value_type &v) {
+ return this->tree_.insert_hint_multi(iterator(hint), v);
+ }
+ iterator insert(const_iterator hint, value_type &&v) {
+ return this->tree_.insert_hint_multi(iterator(hint), std::move(v));
+ }
+ template <typename InputIterator>
+ void insert(InputIterator b, InputIterator e) {
+ this->tree_.insert_iterator_multi(b, e);
+ }
+ void insert(std::initializer_list<init_type> init) {
+ this->tree_.insert_iterator_multi(init.begin(), init.end());
+ }
+ template <typename... Args>
+ iterator emplace(Args &&... args) {
+ return this->tree_.insert_multi(init_type(std::forward<Args>(args)...));
+ }
+ template <typename... Args>
+ iterator emplace_hint(const_iterator hint, Args &&... args) {
+ return this->tree_.insert_hint_multi(
+ iterator(hint), init_type(std::forward<Args>(args)...));
+ }
+ iterator insert(node_type &&node) {
+ if (!node) return this->end();
+ iterator res =
+ this->tree_.insert_multi(params_type::key(CommonAccess::GetSlot(node)),
+ CommonAccess::GetSlot(node));
+ CommonAccess::Destroy(&node);
+ return res;
+ }
+ iterator insert(const_iterator hint, node_type &&node) {
+ if (!node) return this->end();
+ iterator res = this->tree_.insert_hint_multi(
+ iterator(hint),
+ std::move(params_type::element(CommonAccess::GetSlot(node))));
+ CommonAccess::Destroy(&node);
+ return res;
+ }
+
+ // Node extraction routines.
+ template <typename K = key_type>
+ node_type extract(const key_arg<K> &key) {
+ const std::pair<iterator, bool> lower_and_equal =
+ this->tree_.lower_bound_equal(key);
+ return lower_and_equal.second ? extract(lower_and_equal.first)
+ : node_type();
+ }
+ using super_type::extract;
+
+ // Merge routines.
+ // Moves all elements from `src` into `this`.
+ template <
+ typename T,
+ typename y_absl::enable_if_t<
+ y_absl::conjunction<
+ std::is_same<value_type, typename T::value_type>,
+ std::is_same<allocator_type, typename T::allocator_type>,
+ std::is_same<typename params_type::is_map_container,
+ typename T::params_type::is_map_container>>::value,
+ int> = 0>
+ void merge(btree_container<T> &src) { // NOLINT
+ for (auto src_it = src.begin(), end = src.end(); src_it != end; ++src_it) {
+ insert(std::move(params_type::element(src_it.slot())));
+ }
+ src.clear();
+ }
+
+ template <
+ typename T,
+ typename y_absl::enable_if_t<
+ y_absl::conjunction<
+ std::is_same<value_type, typename T::value_type>,
+ std::is_same<allocator_type, typename T::allocator_type>,
+ std::is_same<typename params_type::is_map_container,
+ typename T::params_type::is_map_container>>::value,
+ int> = 0>
+ void merge(btree_container<T> &&src) {
+ merge(src);
+ }
+};
+
+// A base class for btree_multimap.
+template <typename Tree>
+class btree_multimap_container : public btree_multiset_container<Tree> {
+ using super_type = btree_multiset_container<Tree>;
+ using params_type = typename Tree::params_type;
+
+ public:
+ using mapped_type = typename params_type::mapped_type;
+
+ // Inherit constructors.
+ using super_type::super_type;
+ btree_multimap_container() {}
+};
+
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_CONTAINER_INTERNAL_BTREE_CONTAINER_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/common.h b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/common.h
new file mode 100644
index 00000000000..ec84f975e52
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/common.h
@@ -0,0 +1,206 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_CONTAINER_H_
+#define ABSL_CONTAINER_INTERNAL_CONTAINER_H_
+
+#include <cassert>
+#include <type_traits>
+
+#include "y_absl/meta/type_traits.h"
+#include "y_absl/types/optional.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+template <class, class = void>
+struct IsTransparent : std::false_type {};
+template <class T>
+struct IsTransparent<T, y_absl::void_t<typename T::is_transparent>>
+ : std::true_type {};
+
+template <bool is_transparent>
+struct KeyArg {
+ // Transparent. Forward `K`.
+ template <typename K, typename key_type>
+ using type = K;
+};
+
+template <>
+struct KeyArg<false> {
+ // Not transparent. Always use `key_type`.
+ template <typename K, typename key_type>
+ using type = key_type;
+};
+
+// The node_handle concept from C++17.
+// We specialize node_handle for sets and maps. node_handle_base holds the
+// common API of both.
+template <typename PolicyTraits, typename Alloc>
+class node_handle_base {
+ protected:
+ using slot_type = typename PolicyTraits::slot_type;
+
+ public:
+ using allocator_type = Alloc;
+
+ constexpr node_handle_base() = default;
+ node_handle_base(node_handle_base&& other) noexcept {
+ *this = std::move(other);
+ }
+ ~node_handle_base() { destroy(); }
+ node_handle_base& operator=(node_handle_base&& other) noexcept {
+ destroy();
+ if (!other.empty()) {
+ alloc_ = other.alloc_;
+ PolicyTraits::transfer(alloc(), slot(), other.slot());
+ other.reset();
+ }
+ return *this;
+ }
+
+ bool empty() const noexcept { return !alloc_; }
+ explicit operator bool() const noexcept { return !empty(); }
+ allocator_type get_allocator() const { return *alloc_; }
+
+ protected:
+ friend struct CommonAccess;
+
+ struct transfer_tag_t {};
+ node_handle_base(transfer_tag_t, const allocator_type& a, slot_type* s)
+ : alloc_(a) {
+ PolicyTraits::transfer(alloc(), slot(), s);
+ }
+
+ struct move_tag_t {};
+ node_handle_base(move_tag_t, const allocator_type& a, slot_type* s)
+ : alloc_(a) {
+ PolicyTraits::construct(alloc(), slot(), s);
+ }
+
+ void destroy() {
+ if (!empty()) {
+ PolicyTraits::destroy(alloc(), slot());
+ reset();
+ }
+ }
+
+ void reset() {
+ assert(alloc_.has_value());
+ alloc_ = y_absl::nullopt;
+ }
+
+ slot_type* slot() const {
+ assert(!empty());
+ return reinterpret_cast<slot_type*>(std::addressof(slot_space_));
+ }
+ allocator_type* alloc() { return std::addressof(*alloc_); }
+
+ private:
+ y_absl::optional<allocator_type> alloc_ = {};
+ alignas(slot_type) mutable unsigned char slot_space_[sizeof(slot_type)] = {};
+};
+
+// For sets.
+template <typename Policy, typename PolicyTraits, typename Alloc,
+ typename = void>
+class node_handle : public node_handle_base<PolicyTraits, Alloc> {
+ using Base = node_handle_base<PolicyTraits, Alloc>;
+
+ public:
+ using value_type = typename PolicyTraits::value_type;
+
+ constexpr node_handle() {}
+
+ value_type& value() const { return PolicyTraits::element(this->slot()); }
+
+ private:
+ friend struct CommonAccess;
+
+ using Base::Base;
+};
+
+// For maps.
+template <typename Policy, typename PolicyTraits, typename Alloc>
+class node_handle<Policy, PolicyTraits, Alloc,
+ y_absl::void_t<typename Policy::mapped_type>>
+ : public node_handle_base<PolicyTraits, Alloc> {
+ using Base = node_handle_base<PolicyTraits, Alloc>;
+ using slot_type = typename PolicyTraits::slot_type;
+
+ public:
+ using key_type = typename Policy::key_type;
+ using mapped_type = typename Policy::mapped_type;
+
+ constexpr node_handle() {}
+
+ // When C++17 is available, we can use std::launder to provide mutable
+ // access to the key. Otherwise, we provide const access.
+ auto key() const
+ -> decltype(PolicyTraits::mutable_key(std::declval<slot_type*>())) {
+ return PolicyTraits::mutable_key(this->slot());
+ }
+
+ mapped_type& mapped() const {
+ return PolicyTraits::value(&PolicyTraits::element(this->slot()));
+ }
+
+ private:
+ friend struct CommonAccess;
+
+ using Base::Base;
+};
+
+// Provide access to non-public node-handle functions.
+struct CommonAccess {
+ template <typename Node>
+ static auto GetSlot(const Node& node) -> decltype(node.slot()) {
+ return node.slot();
+ }
+
+ template <typename Node>
+ static void Destroy(Node* node) {
+ node->destroy();
+ }
+
+ template <typename Node>
+ static void Reset(Node* node) {
+ node->reset();
+ }
+
+ template <typename T, typename... Args>
+ static T Transfer(Args&&... args) {
+ return T(typename T::transfer_tag_t{}, std::forward<Args>(args)...);
+ }
+
+ template <typename T, typename... Args>
+ static T Move(Args&&... args) {
+ return T(typename T::move_tag_t{}, std::forward<Args>(args)...);
+ }
+};
+
+// Implement the insert_return_type<> concept of C++17.
+template <class Iterator, class NodeType>
+struct InsertReturnType {
+ Iterator position;
+ bool inserted;
+ NodeType node;
+};
+
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_CONTAINER_INTERNAL_CONTAINER_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/compressed_tuple.h b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/compressed_tuple.h
new file mode 100644
index 00000000000..735b738a9b7
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/compressed_tuple.h
@@ -0,0 +1,290 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Helper class to perform the Empty Base Optimization.
+// Ts can contain classes and non-classes, empty or not. For the ones that
+// are empty classes, we perform the optimization. If all types in Ts are empty
+// classes, then CompressedTuple<Ts...> is itself an empty class.
+//
+// To access the members, use member get<N>() function.
+//
+// Eg:
+// y_absl::container_internal::CompressedTuple<int, T1, T2, T3> value(7, t1, t2,
+// t3);
+// assert(value.get<0>() == 7);
+// T1& t1 = value.get<1>();
+// const T2& t2 = value.get<2>();
+// ...
+//
+// https://en.cppreference.com/w/cpp/language/ebo
+
+#ifndef ABSL_CONTAINER_INTERNAL_COMPRESSED_TUPLE_H_
+#define ABSL_CONTAINER_INTERNAL_COMPRESSED_TUPLE_H_
+
+#include <initializer_list>
+#include <tuple>
+#include <type_traits>
+#include <utility>
+
+#include "y_absl/utility/utility.h"
+
+#if defined(_MSC_VER) && !defined(__NVCC__)
+// We need to mark these classes with this declspec to ensure that
+// CompressedTuple happens.
+#define ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC __declspec(empty_bases)
+#else
+#define ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC
+#endif
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+template <typename... Ts>
+class CompressedTuple;
+
+namespace internal_compressed_tuple {
+
+template <typename D, size_t I>
+struct Elem;
+template <typename... B, size_t I>
+struct Elem<CompressedTuple<B...>, I>
+ : std::tuple_element<I, std::tuple<B...>> {};
+template <typename D, size_t I>
+using ElemT = typename Elem<D, I>::type;
+
+// Use the __is_final intrinsic if available. Where it's not available, classes
+// declared with the 'final' specifier cannot be used as CompressedTuple
+// elements.
+// TODO(sbenza): Replace this with std::is_final in C++14.
+template <typename T>
+constexpr bool IsFinal() {
+#if defined(__clang__) || defined(__GNUC__)
+ return __is_final(T);
+#else
+ return false;
+#endif
+}
+
+// We can't use EBCO on other CompressedTuples because that would mean that we
+// derive from multiple Storage<> instantiations with the same I parameter,
+// and potentially from multiple identical Storage<> instantiations. So anytime
+// we use type inheritance rather than encapsulation, we mark
+// CompressedTupleImpl, to make this easy to detect.
+struct uses_inheritance {};
+
+template <typename T>
+constexpr bool ShouldUseBase() {
+ return std::is_class<T>::value && std::is_empty<T>::value && !IsFinal<T>() &&
+ !std::is_base_of<uses_inheritance, T>::value;
+}
+
+// The storage class provides two specializations:
+// - For empty classes, it stores T as a base class.
+// - For everything else, it stores T as a member.
+template <typename T, size_t I,
+#if defined(_MSC_VER)
+ bool UseBase =
+ ShouldUseBase<typename std::enable_if<true, T>::type>()>
+#else
+ bool UseBase = ShouldUseBase<T>()>
+#endif
+struct Storage {
+ T value;
+ constexpr Storage() = default;
+ template <typename V>
+ explicit constexpr Storage(y_absl::in_place_t, V&& v)
+ : value(y_absl::forward<V>(v)) {}
+ constexpr const T& get() const& { return value; }
+ T& get() & { return value; }
+ constexpr const T&& get() const&& { return y_absl::move(*this).value; }
+ T&& get() && { return std::move(*this).value; }
+};
+
+template <typename T, size_t I>
+struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC Storage<T, I, true> : T {
+ constexpr Storage() = default;
+
+ template <typename V>
+ explicit constexpr Storage(y_absl::in_place_t, V&& v)
+ : T(y_absl::forward<V>(v)) {}
+
+ constexpr const T& get() const& { return *this; }
+ T& get() & { return *this; }
+ constexpr const T&& get() const&& { return y_absl::move(*this); }
+ T&& get() && { return std::move(*this); }
+};
+
+template <typename D, typename I, bool ShouldAnyUseBase>
+struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl;
+
+template <typename... Ts, size_t... I, bool ShouldAnyUseBase>
+struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl<
+ CompressedTuple<Ts...>, y_absl::index_sequence<I...>, ShouldAnyUseBase>
+ // We use the dummy identity function through std::integral_constant to
+ // convince MSVC of accepting and expanding I in that context. Without it
+ // you would get:
+ // error C3548: 'I': parameter pack cannot be used in this context
+ : uses_inheritance,
+ Storage<Ts, std::integral_constant<size_t, I>::value>... {
+ constexpr CompressedTupleImpl() = default;
+ template <typename... Vs>
+ explicit constexpr CompressedTupleImpl(y_absl::in_place_t, Vs&&... args)
+ : Storage<Ts, I>(y_absl::in_place, y_absl::forward<Vs>(args))... {}
+ friend CompressedTuple<Ts...>;
+};
+
+template <typename... Ts, size_t... I>
+struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl<
+ CompressedTuple<Ts...>, y_absl::index_sequence<I...>, false>
+ // We use the dummy identity function as above...
+ : Storage<Ts, std::integral_constant<size_t, I>::value, false>... {
+ constexpr CompressedTupleImpl() = default;
+ template <typename... Vs>
+ explicit constexpr CompressedTupleImpl(y_absl::in_place_t, Vs&&... args)
+ : Storage<Ts, I, false>(y_absl::in_place, y_absl::forward<Vs>(args))... {}
+ friend CompressedTuple<Ts...>;
+};
+
+std::false_type Or(std::initializer_list<std::false_type>);
+std::true_type Or(std::initializer_list<bool>);
+
+// MSVC requires this to be done separately rather than within the declaration
+// of CompressedTuple below.
+template <typename... Ts>
+constexpr bool ShouldAnyUseBase() {
+ return decltype(
+ Or({std::integral_constant<bool, ShouldUseBase<Ts>()>()...})){};
+}
+
+template <typename T, typename V>
+using TupleElementMoveConstructible =
+ typename std::conditional<std::is_reference<T>::value,
+ std::is_convertible<V, T>,
+ std::is_constructible<T, V&&>>::type;
+
+template <bool SizeMatches, class T, class... Vs>
+struct TupleMoveConstructible : std::false_type {};
+
+template <class... Ts, class... Vs>
+struct TupleMoveConstructible<true, CompressedTuple<Ts...>, Vs...>
+ : std::integral_constant<
+ bool, y_absl::conjunction<
+ TupleElementMoveConstructible<Ts, Vs&&>...>::value> {};
+
+template <typename T>
+struct compressed_tuple_size;
+
+template <typename... Es>
+struct compressed_tuple_size<CompressedTuple<Es...>>
+ : public std::integral_constant<std::size_t, sizeof...(Es)> {};
+
+template <class T, class... Vs>
+struct TupleItemsMoveConstructible
+ : std::integral_constant<
+ bool, TupleMoveConstructible<compressed_tuple_size<T>::value ==
+ sizeof...(Vs),
+ T, Vs...>::value> {};
+
+} // namespace internal_compressed_tuple
+
+// Helper class to perform the Empty Base Class Optimization.
+// Ts can contain classes and non-classes, empty or not. For the ones that
+// are empty classes, we perform the CompressedTuple. If all types in Ts are
+// empty classes, then CompressedTuple<Ts...> is itself an empty class. (This
+// does not apply when one or more of those empty classes is itself an empty
+// CompressedTuple.)
+//
+// To access the members, use member .get<N>() function.
+//
+// Eg:
+// y_absl::container_internal::CompressedTuple<int, T1, T2, T3> value(7, t1, t2,
+// t3);
+// assert(value.get<0>() == 7);
+// T1& t1 = value.get<1>();
+// const T2& t2 = value.get<2>();
+// ...
+//
+// https://en.cppreference.com/w/cpp/language/ebo
+template <typename... Ts>
+class ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple
+ : private internal_compressed_tuple::CompressedTupleImpl<
+ CompressedTuple<Ts...>, y_absl::index_sequence_for<Ts...>,
+ internal_compressed_tuple::ShouldAnyUseBase<Ts...>()> {
+ private:
+ template <int I>
+ using ElemT = internal_compressed_tuple::ElemT<CompressedTuple, I>;
+
+ template <int I>
+ using StorageT = internal_compressed_tuple::Storage<ElemT<I>, I>;
+
+ public:
+ // There seems to be a bug in MSVC dealing in which using '=default' here will
+ // cause the compiler to ignore the body of other constructors. The work-
+ // around is to explicitly implement the default constructor.
+#if defined(_MSC_VER)
+ constexpr CompressedTuple() : CompressedTuple::CompressedTupleImpl() {}
+#else
+ constexpr CompressedTuple() = default;
+#endif
+ explicit constexpr CompressedTuple(const Ts&... base)
+ : CompressedTuple::CompressedTupleImpl(y_absl::in_place, base...) {}
+
+ template <typename First, typename... Vs,
+ y_absl::enable_if_t<
+ y_absl::conjunction<
+ // Ensure we are not hiding default copy/move constructors.
+ y_absl::negation<std::is_same<void(CompressedTuple),
+ void(y_absl::decay_t<First>)>>,
+ internal_compressed_tuple::TupleItemsMoveConstructible<
+ CompressedTuple<Ts...>, First, Vs...>>::value,
+ bool> = true>
+ explicit constexpr CompressedTuple(First&& first, Vs&&... base)
+ : CompressedTuple::CompressedTupleImpl(y_absl::in_place,
+ y_absl::forward<First>(first),
+ y_absl::forward<Vs>(base)...) {}
+
+ template <int I>
+ ElemT<I>& get() & {
+ return StorageT<I>::get();
+ }
+
+ template <int I>
+ constexpr const ElemT<I>& get() const& {
+ return StorageT<I>::get();
+ }
+
+ template <int I>
+ ElemT<I>&& get() && {
+ return std::move(*this).StorageT<I>::get();
+ }
+
+ template <int I>
+ constexpr const ElemT<I>&& get() const&& {
+ return y_absl::move(*this).StorageT<I>::get();
+ }
+};
+
+// Explicit specialization for a zero-element tuple
+// (needed to avoid ambiguous overloads for the default constructor).
+template <>
+class ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple<> {};
+
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#undef ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC
+
+#endif // ABSL_CONTAINER_INTERNAL_COMPRESSED_TUPLE_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/container_memory.h b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/container_memory.h
new file mode 100644
index 00000000000..348bc8c0d00
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/container_memory.h
@@ -0,0 +1,460 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_CONTAINER_MEMORY_H_
+#define ABSL_CONTAINER_INTERNAL_CONTAINER_MEMORY_H_
+
+#include <cassert>
+#include <cstddef>
+#include <memory>
+#include <new>
+#include <tuple>
+#include <type_traits>
+#include <utility>
+
+#include "y_absl/base/config.h"
+#include "y_absl/memory/memory.h"
+#include "y_absl/meta/type_traits.h"
+#include "y_absl/utility/utility.h"
+
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
+#include <sanitizer/asan_interface.h>
+#endif
+
+#ifdef ABSL_HAVE_MEMORY_SANITIZER
+#include <sanitizer/msan_interface.h>
+#endif
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+template <size_t Alignment>
+struct alignas(Alignment) AlignedType {};
+
+// Allocates at least n bytes aligned to the specified alignment.
+// Alignment must be a power of 2. It must be positive.
+//
+// Note that many allocators don't honor alignment requirements above certain
+// threshold (usually either alignof(std::max_align_t) or alignof(void*)).
+// Allocate() doesn't apply alignment corrections. If the underlying allocator
+// returns insufficiently alignment pointer, that's what you are going to get.
+template <size_t Alignment, class Alloc>
+void* Allocate(Alloc* alloc, size_t n) {
+ static_assert(Alignment > 0, "");
+ assert(n && "n must be positive");
+ using M = AlignedType<Alignment>;
+ using A = typename y_absl::allocator_traits<Alloc>::template rebind_alloc<M>;
+ using AT = typename y_absl::allocator_traits<Alloc>::template rebind_traits<M>;
+ // On macOS, "mem_alloc" is a #define with one argument defined in
+ // rpc/types.h, so we can't name the variable "mem_alloc" and initialize it
+ // with the "foo(bar)" syntax.
+ A my_mem_alloc(*alloc);
+ void* p = AT::allocate(my_mem_alloc, (n + sizeof(M) - 1) / sizeof(M));
+ assert(reinterpret_cast<uintptr_t>(p) % Alignment == 0 &&
+ "allocator does not respect alignment");
+ return p;
+}
+
+// The pointer must have been previously obtained by calling
+// Allocate<Alignment>(alloc, n).
+template <size_t Alignment, class Alloc>
+void Deallocate(Alloc* alloc, void* p, size_t n) {
+ static_assert(Alignment > 0, "");
+ assert(n && "n must be positive");
+ using M = AlignedType<Alignment>;
+ using A = typename y_absl::allocator_traits<Alloc>::template rebind_alloc<M>;
+ using AT = typename y_absl::allocator_traits<Alloc>::template rebind_traits<M>;
+ // On macOS, "mem_alloc" is a #define with one argument defined in
+ // rpc/types.h, so we can't name the variable "mem_alloc" and initialize it
+ // with the "foo(bar)" syntax.
+ A my_mem_alloc(*alloc);
+ AT::deallocate(my_mem_alloc, static_cast<M*>(p),
+ (n + sizeof(M) - 1) / sizeof(M));
+}
+
+namespace memory_internal {
+
+// Constructs T into uninitialized storage pointed by `ptr` using the args
+// specified in the tuple.
+template <class Alloc, class T, class Tuple, size_t... I>
+void ConstructFromTupleImpl(Alloc* alloc, T* ptr, Tuple&& t,
+ y_absl::index_sequence<I...>) {
+ y_absl::allocator_traits<Alloc>::construct(
+ *alloc, ptr, std::get<I>(std::forward<Tuple>(t))...);
+}
+
+template <class T, class F>
+struct WithConstructedImplF {
+ template <class... Args>
+ decltype(std::declval<F>()(std::declval<T>())) operator()(
+ Args&&... args) const {
+ return std::forward<F>(f)(T(std::forward<Args>(args)...));
+ }
+ F&& f;
+};
+
+template <class T, class Tuple, size_t... Is, class F>
+decltype(std::declval<F>()(std::declval<T>())) WithConstructedImpl(
+ Tuple&& t, y_absl::index_sequence<Is...>, F&& f) {
+ return WithConstructedImplF<T, F>{std::forward<F>(f)}(
+ std::get<Is>(std::forward<Tuple>(t))...);
+}
+
+template <class T, size_t... Is>
+auto TupleRefImpl(T&& t, y_absl::index_sequence<Is...>)
+ -> decltype(std::forward_as_tuple(std::get<Is>(std::forward<T>(t))...)) {
+ return std::forward_as_tuple(std::get<Is>(std::forward<T>(t))...);
+}
+
+// Returns a tuple of references to the elements of the input tuple. T must be a
+// tuple.
+template <class T>
+auto TupleRef(T&& t) -> decltype(
+ TupleRefImpl(std::forward<T>(t),
+ y_absl::make_index_sequence<
+ std::tuple_size<typename std::decay<T>::type>::value>())) {
+ return TupleRefImpl(
+ std::forward<T>(t),
+ y_absl::make_index_sequence<
+ std::tuple_size<typename std::decay<T>::type>::value>());
+}
+
+template <class F, class K, class V>
+decltype(std::declval<F>()(std::declval<const K&>(), std::piecewise_construct,
+ std::declval<std::tuple<K>>(), std::declval<V>()))
+DecomposePairImpl(F&& f, std::pair<std::tuple<K>, V> p) {
+ const auto& key = std::get<0>(p.first);
+ return std::forward<F>(f)(key, std::piecewise_construct, std::move(p.first),
+ std::move(p.second));
+}
+
+} // namespace memory_internal
+
+// Constructs T into uninitialized storage pointed by `ptr` using the args
+// specified in the tuple.
+template <class Alloc, class T, class Tuple>
+void ConstructFromTuple(Alloc* alloc, T* ptr, Tuple&& t) {
+ memory_internal::ConstructFromTupleImpl(
+ alloc, ptr, std::forward<Tuple>(t),
+ y_absl::make_index_sequence<
+ std::tuple_size<typename std::decay<Tuple>::type>::value>());
+}
+
+// Constructs T using the args specified in the tuple and calls F with the
+// constructed value.
+template <class T, class Tuple, class F>
+decltype(std::declval<F>()(std::declval<T>())) WithConstructed(
+ Tuple&& t, F&& f) {
+ return memory_internal::WithConstructedImpl<T>(
+ std::forward<Tuple>(t),
+ y_absl::make_index_sequence<
+ std::tuple_size<typename std::decay<Tuple>::type>::value>(),
+ std::forward<F>(f));
+}
+
+// Given arguments of an std::pair's consructor, PairArgs() returns a pair of
+// tuples with references to the passed arguments. The tuples contain
+// constructor arguments for the first and the second elements of the pair.
+//
+// The following two snippets are equivalent.
+//
+// 1. std::pair<F, S> p(args...);
+//
+// 2. auto a = PairArgs(args...);
+// std::pair<F, S> p(std::piecewise_construct,
+// std::move(p.first), std::move(p.second));
+inline std::pair<std::tuple<>, std::tuple<>> PairArgs() { return {}; }
+template <class F, class S>
+std::pair<std::tuple<F&&>, std::tuple<S&&>> PairArgs(F&& f, S&& s) {
+ return {std::piecewise_construct, std::forward_as_tuple(std::forward<F>(f)),
+ std::forward_as_tuple(std::forward<S>(s))};
+}
+template <class F, class S>
+std::pair<std::tuple<const F&>, std::tuple<const S&>> PairArgs(
+ const std::pair<F, S>& p) {
+ return PairArgs(p.first, p.second);
+}
+template <class F, class S>
+std::pair<std::tuple<F&&>, std::tuple<S&&>> PairArgs(std::pair<F, S>&& p) {
+ return PairArgs(std::forward<F>(p.first), std::forward<S>(p.second));
+}
+template <class F, class S>
+auto PairArgs(std::piecewise_construct_t, F&& f, S&& s)
+ -> decltype(std::make_pair(memory_internal::TupleRef(std::forward<F>(f)),
+ memory_internal::TupleRef(std::forward<S>(s)))) {
+ return std::make_pair(memory_internal::TupleRef(std::forward<F>(f)),
+ memory_internal::TupleRef(std::forward<S>(s)));
+}
+
+// A helper function for implementing apply() in map policies.
+template <class F, class... Args>
+auto DecomposePair(F&& f, Args&&... args)
+ -> decltype(memory_internal::DecomposePairImpl(
+ std::forward<F>(f), PairArgs(std::forward<Args>(args)...))) {
+ return memory_internal::DecomposePairImpl(
+ std::forward<F>(f), PairArgs(std::forward<Args>(args)...));
+}
+
+// A helper function for implementing apply() in set policies.
+template <class F, class Arg>
+decltype(std::declval<F>()(std::declval<const Arg&>(), std::declval<Arg>()))
+DecomposeValue(F&& f, Arg&& arg) {
+ const auto& key = arg;
+ return std::forward<F>(f)(key, std::forward<Arg>(arg));
+}
+
+// Helper functions for asan and msan.
+inline void SanitizerPoisonMemoryRegion(const void* m, size_t s) {
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
+ ASAN_POISON_MEMORY_REGION(m, s);
+#endif
+#ifdef ABSL_HAVE_MEMORY_SANITIZER
+ __msan_poison(m, s);
+#endif
+ (void)m;
+ (void)s;
+}
+
+inline void SanitizerUnpoisonMemoryRegion(const void* m, size_t s) {
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
+ ASAN_UNPOISON_MEMORY_REGION(m, s);
+#endif
+#ifdef ABSL_HAVE_MEMORY_SANITIZER
+ __msan_unpoison(m, s);
+#endif
+ (void)m;
+ (void)s;
+}
+
+template <typename T>
+inline void SanitizerPoisonObject(const T* object) {
+ SanitizerPoisonMemoryRegion(object, sizeof(T));
+}
+
+template <typename T>
+inline void SanitizerUnpoisonObject(const T* object) {
+ SanitizerUnpoisonMemoryRegion(object, sizeof(T));
+}
+
+namespace memory_internal {
+
+// If Pair is a standard-layout type, OffsetOf<Pair>::kFirst and
+// OffsetOf<Pair>::kSecond are equivalent to offsetof(Pair, first) and
+// offsetof(Pair, second) respectively. Otherwise they are -1.
+//
+// The purpose of OffsetOf is to avoid calling offsetof() on non-standard-layout
+// type, which is non-portable.
+template <class Pair, class = std::true_type>
+struct OffsetOf {
+ static constexpr size_t kFirst = static_cast<size_t>(-1);
+ static constexpr size_t kSecond = static_cast<size_t>(-1);
+};
+
+template <class Pair>
+struct OffsetOf<Pair, typename std::is_standard_layout<Pair>::type> {
+ static constexpr size_t kFirst = offsetof(Pair, first);
+ static constexpr size_t kSecond = offsetof(Pair, second);
+};
+
+template <class K, class V>
+struct IsLayoutCompatible {
+ private:
+ struct Pair {
+ K first;
+ V second;
+ };
+
+ // Is P layout-compatible with Pair?
+ template <class P>
+ static constexpr bool LayoutCompatible() {
+ return std::is_standard_layout<P>() && sizeof(P) == sizeof(Pair) &&
+ alignof(P) == alignof(Pair) &&
+ memory_internal::OffsetOf<P>::kFirst ==
+ memory_internal::OffsetOf<Pair>::kFirst &&
+ memory_internal::OffsetOf<P>::kSecond ==
+ memory_internal::OffsetOf<Pair>::kSecond;
+ }
+
+ public:
+ // Whether pair<const K, V> and pair<K, V> are layout-compatible. If they are,
+ // then it is safe to store them in a union and read from either.
+ static constexpr bool value = std::is_standard_layout<K>() &&
+ std::is_standard_layout<Pair>() &&
+ memory_internal::OffsetOf<Pair>::kFirst == 0 &&
+ LayoutCompatible<std::pair<K, V>>() &&
+ LayoutCompatible<std::pair<const K, V>>();
+};
+
+} // namespace memory_internal
+
+// The internal storage type for key-value containers like flat_hash_map.
+//
+// It is convenient for the value_type of a flat_hash_map<K, V> to be
+// pair<const K, V>; the "const K" prevents accidental modification of the key
+// when dealing with the reference returned from find() and similar methods.
+// However, this creates other problems; we want to be able to emplace(K, V)
+// efficiently with move operations, and similarly be able to move a
+// pair<K, V> in insert().
+//
+// The solution is this union, which aliases the const and non-const versions
+// of the pair. This also allows flat_hash_map<const K, V> to work, even though
+// that has the same efficiency issues with move in emplace() and insert() -
+// but people do it anyway.
+//
+// If kMutableKeys is false, only the value member can be accessed.
+//
+// If kMutableKeys is true, key can be accessed through all slots while value
+// and mutable_value must be accessed only via INITIALIZED slots. Slots are
+// created and destroyed via mutable_value so that the key can be moved later.
+//
+// Accessing one of the union fields while the other is active is safe as
+// long as they are layout-compatible, which is guaranteed by the definition of
+// kMutableKeys. For C++11, the relevant section of the standard is
+// https://timsong-cpp.github.io/cppwp/n3337/class.mem#19 (9.2.19)
+template <class K, class V>
+union map_slot_type {
+ map_slot_type() {}
+ ~map_slot_type() = delete;
+ using value_type = std::pair<const K, V>;
+ using mutable_value_type =
+ std::pair<y_absl::remove_const_t<K>, y_absl::remove_const_t<V>>;
+
+ value_type value;
+ mutable_value_type mutable_value;
+ y_absl::remove_const_t<K> key;
+};
+
+template <class K, class V>
+struct map_slot_policy {
+ using slot_type = map_slot_type<K, V>;
+ using value_type = std::pair<const K, V>;
+ using mutable_value_type = std::pair<K, V>;
+
+ private:
+ static void emplace(slot_type* slot) {
+ // The construction of union doesn't do anything at runtime but it allows us
+ // to access its members without violating aliasing rules.
+ new (slot) slot_type;
+ }
+ // If pair<const K, V> and pair<K, V> are layout-compatible, we can accept one
+ // or the other via slot_type. We are also free to access the key via
+ // slot_type::key in this case.
+ using kMutableKeys = memory_internal::IsLayoutCompatible<K, V>;
+
+ public:
+ static value_type& element(slot_type* slot) { return slot->value; }
+ static const value_type& element(const slot_type* slot) {
+ return slot->value;
+ }
+
+ // When C++17 is available, we can use std::launder to provide mutable
+ // access to the key for use in node handle.
+#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606
+ static K& mutable_key(slot_type* slot) {
+ // Still check for kMutableKeys so that we can avoid calling std::launder
+ // unless necessary because it can interfere with optimizations.
+ return kMutableKeys::value ? slot->key
+ : *std::launder(const_cast<K*>(
+ std::addressof(slot->value.first)));
+ }
+#else // !(defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606)
+ static const K& mutable_key(slot_type* slot) { return key(slot); }
+#endif
+
+ static const K& key(const slot_type* slot) {
+ return kMutableKeys::value ? slot->key : slot->value.first;
+ }
+
+ template <class Allocator, class... Args>
+ static void construct(Allocator* alloc, slot_type* slot, Args&&... args) {
+ emplace(slot);
+ if (kMutableKeys::value) {
+ y_absl::allocator_traits<Allocator>::construct(*alloc, &slot->mutable_value,
+ std::forward<Args>(args)...);
+ } else {
+ y_absl::allocator_traits<Allocator>::construct(*alloc, &slot->value,
+ std::forward<Args>(args)...);
+ }
+ }
+
+ // Construct this slot by moving from another slot.
+ template <class Allocator>
+ static void construct(Allocator* alloc, slot_type* slot, slot_type* other) {
+ emplace(slot);
+ if (kMutableKeys::value) {
+ y_absl::allocator_traits<Allocator>::construct(
+ *alloc, &slot->mutable_value, std::move(other->mutable_value));
+ } else {
+ y_absl::allocator_traits<Allocator>::construct(*alloc, &slot->value,
+ std::move(other->value));
+ }
+ }
+
+ template <class Allocator>
+ static void destroy(Allocator* alloc, slot_type* slot) {
+ if (kMutableKeys::value) {
+ y_absl::allocator_traits<Allocator>::destroy(*alloc, &slot->mutable_value);
+ } else {
+ y_absl::allocator_traits<Allocator>::destroy(*alloc, &slot->value);
+ }
+ }
+
+ template <class Allocator>
+ static void transfer(Allocator* alloc, slot_type* new_slot,
+ slot_type* old_slot) {
+ emplace(new_slot);
+ if (kMutableKeys::value) {
+ y_absl::allocator_traits<Allocator>::construct(
+ *alloc, &new_slot->mutable_value, std::move(old_slot->mutable_value));
+ } else {
+ y_absl::allocator_traits<Allocator>::construct(*alloc, &new_slot->value,
+ std::move(old_slot->value));
+ }
+ destroy(alloc, old_slot);
+ }
+
+ template <class Allocator>
+ static void swap(Allocator* alloc, slot_type* a, slot_type* b) {
+ if (kMutableKeys::value) {
+ using std::swap;
+ swap(a->mutable_value, b->mutable_value);
+ } else {
+ value_type tmp = std::move(a->value);
+ y_absl::allocator_traits<Allocator>::destroy(*alloc, &a->value);
+ y_absl::allocator_traits<Allocator>::construct(*alloc, &a->value,
+ std::move(b->value));
+ y_absl::allocator_traits<Allocator>::destroy(*alloc, &b->value);
+ y_absl::allocator_traits<Allocator>::construct(*alloc, &b->value,
+ std::move(tmp));
+ }
+ }
+
+ template <class Allocator>
+ static void move(Allocator* alloc, slot_type* src, slot_type* dest) {
+ if (kMutableKeys::value) {
+ dest->mutable_value = std::move(src->mutable_value);
+ } else {
+ y_absl::allocator_traits<Allocator>::destroy(*alloc, &dest->value);
+ y_absl::allocator_traits<Allocator>::construct(*alloc, &dest->value,
+ std::move(src->value));
+ }
+ }
+};
+
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_CONTAINER_INTERNAL_CONTAINER_MEMORY_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/counting_allocator.h b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/counting_allocator.h
new file mode 100644
index 00000000000..c8975dbd90c
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/counting_allocator.h
@@ -0,0 +1,114 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_COUNTING_ALLOCATOR_H_
+#define ABSL_CONTAINER_INTERNAL_COUNTING_ALLOCATOR_H_
+
+#include <cstdint>
+#include <memory>
+
+#include "y_absl/base/config.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+// This is a stateful allocator, but the state lives outside of the
+// allocator (in whatever test is using the allocator). This is odd
+// but helps in tests where the allocator is propagated into nested
+// containers - that chain of allocators uses the same state and is
+// thus easier to query for aggregate allocation information.
+template <typename T>
+class CountingAllocator {
+ public:
+ using Allocator = std::allocator<T>;
+ using AllocatorTraits = std::allocator_traits<Allocator>;
+ using value_type = typename AllocatorTraits::value_type;
+ using pointer = typename AllocatorTraits::pointer;
+ using const_pointer = typename AllocatorTraits::const_pointer;
+ using size_type = typename AllocatorTraits::size_type;
+ using difference_type = typename AllocatorTraits::difference_type;
+
+ CountingAllocator() = default;
+ explicit CountingAllocator(int64_t* bytes_used) : bytes_used_(bytes_used) {}
+ CountingAllocator(int64_t* bytes_used, int64_t* instance_count)
+ : bytes_used_(bytes_used), instance_count_(instance_count) {}
+
+ template <typename U>
+ CountingAllocator(const CountingAllocator<U>& x)
+ : bytes_used_(x.bytes_used_), instance_count_(x.instance_count_) {}
+
+ pointer allocate(
+ size_type n,
+ typename AllocatorTraits::const_void_pointer hint = nullptr) {
+ Allocator allocator;
+ pointer ptr = AllocatorTraits::allocate(allocator, n, hint);
+ if (bytes_used_ != nullptr) {
+ *bytes_used_ += n * sizeof(T);
+ }
+ return ptr;
+ }
+
+ void deallocate(pointer p, size_type n) {
+ Allocator allocator;
+ AllocatorTraits::deallocate(allocator, p, n);
+ if (bytes_used_ != nullptr) {
+ *bytes_used_ -= n * sizeof(T);
+ }
+ }
+
+ template <typename U, typename... Args>
+ void construct(U* p, Args&&... args) {
+ Allocator allocator;
+ AllocatorTraits::construct(allocator, p, std::forward<Args>(args)...);
+ if (instance_count_ != nullptr) {
+ *instance_count_ += 1;
+ }
+ }
+
+ template <typename U>
+ void destroy(U* p) {
+ Allocator allocator;
+ AllocatorTraits::destroy(allocator, p);
+ if (instance_count_ != nullptr) {
+ *instance_count_ -= 1;
+ }
+ }
+
+ template <typename U>
+ class rebind {
+ public:
+ using other = CountingAllocator<U>;
+ };
+
+ friend bool operator==(const CountingAllocator& a,
+ const CountingAllocator& b) {
+ return a.bytes_used_ == b.bytes_used_ &&
+ a.instance_count_ == b.instance_count_;
+ }
+
+ friend bool operator!=(const CountingAllocator& a,
+ const CountingAllocator& b) {
+ return !(a == b);
+ }
+
+ int64_t* bytes_used_ = nullptr;
+ int64_t* instance_count_ = nullptr;
+};
+
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_CONTAINER_INTERNAL_COUNTING_ALLOCATOR_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/hash_function_defaults.h b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/hash_function_defaults.h
new file mode 100644
index 00000000000..68a18058d1f
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/hash_function_defaults.h
@@ -0,0 +1,163 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Define the default Hash and Eq functions for SwissTable containers.
+//
+// std::hash<T> and std::equal_to<T> are not appropriate hash and equal
+// functions for SwissTable containers. There are two reasons for this.
+//
+// SwissTable containers are power of 2 sized containers:
+//
+// This means they use the lower bits of the hash value to find the slot for
+// each entry. The typical hash function for integral types is the identity.
+// This is a very weak hash function for SwissTable and any power of 2 sized
+// hashtable implementation which will lead to excessive collisions. For
+// SwissTable we use murmur3 style mixing to reduce collisions to a minimum.
+//
+// SwissTable containers support heterogeneous lookup:
+//
+// In order to make heterogeneous lookup work, hash and equal functions must be
+// polymorphic. At the same time they have to satisfy the same requirements the
+// C++ standard imposes on hash functions and equality operators. That is:
+//
+// if hash_default_eq<T>(a, b) returns true for any a and b of type T, then
+// hash_default_hash<T>(a) must equal hash_default_hash<T>(b)
+//
+// For SwissTable containers this requirement is relaxed to allow a and b of
+// any and possibly different types. Note that like the standard the hash and
+// equal functions are still bound to T. This is important because some type U
+// can be hashed by/tested for equality differently depending on T. A notable
+// example is `const char*`. `const char*` is treated as a c-style string when
+// the hash function is hash<TString> but as a pointer when the hash
+// function is hash<void*>.
+//
+#ifndef ABSL_CONTAINER_INTERNAL_HASH_FUNCTION_DEFAULTS_H_
+#define ABSL_CONTAINER_INTERNAL_HASH_FUNCTION_DEFAULTS_H_
+
+#include <stdint.h>
+#include <cstddef>
+#include <memory>
+#include <util/generic/string.h>
+#include <type_traits>
+
+#include "y_absl/base/config.h"
+#include "y_absl/hash/hash.h"
+#include "y_absl/strings/cord.h"
+#include "y_absl/strings/string_view.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+// The hash of an object of type T is computed by using y_absl::Hash.
+template <class T, class E = void>
+struct HashEq {
+ using Hash = y_absl::Hash<T>;
+ using Eq = std::equal_to<T>;
+};
+
+struct StringHash {
+ using is_transparent = void;
+
+ size_t operator()(y_absl::string_view v) const {
+ return y_absl::Hash<y_absl::string_view>{}(v);
+ }
+ size_t operator()(const y_absl::Cord& v) const {
+ return y_absl::Hash<y_absl::Cord>{}(v);
+ }
+};
+
+struct StringEq {
+ using is_transparent = void;
+ bool operator()(y_absl::string_view lhs, y_absl::string_view rhs) const {
+ return lhs == rhs;
+ }
+ bool operator()(const y_absl::Cord& lhs, const y_absl::Cord& rhs) const {
+ return lhs == rhs;
+ }
+ bool operator()(const y_absl::Cord& lhs, y_absl::string_view rhs) const {
+ return lhs == rhs;
+ }
+ bool operator()(y_absl::string_view lhs, const y_absl::Cord& rhs) const {
+ return lhs == rhs;
+ }
+};
+
+// Supports heterogeneous lookup for string-like elements.
+struct StringHashEq {
+ using Hash = StringHash;
+ using Eq = StringEq;
+};
+
+template <>
+struct HashEq<TString> : StringHashEq {};
+template <>
+struct HashEq<y_absl::string_view> : StringHashEq {};
+template <>
+struct HashEq<y_absl::Cord> : StringHashEq {};
+
+// Supports heterogeneous lookup for pointers and smart pointers.
+template <class T>
+struct HashEq<T*> {
+ struct Hash {
+ using is_transparent = void;
+ template <class U>
+ size_t operator()(const U& ptr) const {
+ return y_absl::Hash<const T*>{}(HashEq::ToPtr(ptr));
+ }
+ };
+ struct Eq {
+ using is_transparent = void;
+ template <class A, class B>
+ bool operator()(const A& a, const B& b) const {
+ return HashEq::ToPtr(a) == HashEq::ToPtr(b);
+ }
+ };
+
+ private:
+ static const T* ToPtr(const T* ptr) { return ptr; }
+ template <class U, class D>
+ static const T* ToPtr(const std::unique_ptr<U, D>& ptr) {
+ return ptr.get();
+ }
+ template <class U>
+ static const T* ToPtr(const std::shared_ptr<U>& ptr) {
+ return ptr.get();
+ }
+};
+
+template <class T, class D>
+struct HashEq<std::unique_ptr<T, D>> : HashEq<T*> {};
+template <class T>
+struct HashEq<std::shared_ptr<T>> : HashEq<T*> {};
+
+// This header's visibility is restricted. If you need to access the default
+// hasher please use the container's ::hasher alias instead.
+//
+// Example: typename Hash = typename y_absl::flat_hash_map<K, V>::hasher
+template <class T>
+using hash_default_hash = typename container_internal::HashEq<T>::Hash;
+
+// This header's visibility is restricted. If you need to access the default
+// key equal please use the container's ::key_equal alias instead.
+//
+// Example: typename Eq = typename y_absl::flat_hash_map<K, V, Hash>::key_equal
+template <class T>
+using hash_default_eq = typename container_internal::HashEq<T>::Eq;
+
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_CONTAINER_INTERNAL_HASH_FUNCTION_DEFAULTS_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/hash_generator_testing.h b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/hash_generator_testing.h
new file mode 100644
index 00000000000..79b215465e4
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/hash_generator_testing.h
@@ -0,0 +1,182 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Generates random values for testing. Specialized only for the few types we
+// care about.
+
+#ifndef ABSL_CONTAINER_INTERNAL_HASH_GENERATOR_TESTING_H_
+#define ABSL_CONTAINER_INTERNAL_HASH_GENERATOR_TESTING_H_
+
+#include <stdint.h>
+
+#include <algorithm>
+#include <cassert>
+#include <iosfwd>
+#include <random>
+#include <tuple>
+#include <type_traits>
+#include <utility>
+#include <vector>
+
+#include "y_absl/container/internal/hash_policy_testing.h"
+#include "y_absl/memory/memory.h"
+#include "y_absl/meta/type_traits.h"
+#include "y_absl/strings/string_view.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+namespace hash_internal {
+namespace generator_internal {
+
+template <class Container, class = void>
+struct IsMap : std::false_type {};
+
+template <class Map>
+struct IsMap<Map, y_absl::void_t<typename Map::mapped_type>> : std::true_type {};
+
+} // namespace generator_internal
+
+std::mt19937_64* GetSharedRng();
+
+enum Enum {
+ kEnumEmpty,
+ kEnumDeleted,
+};
+
+enum class EnumClass : uint64_t {
+ kEmpty,
+ kDeleted,
+};
+
+inline std::ostream& operator<<(std::ostream& o, const EnumClass& ec) {
+ return o << static_cast<uint64_t>(ec);
+}
+
+template <class T, class E = void>
+struct Generator;
+
+template <class T>
+struct Generator<T, typename std::enable_if<std::is_integral<T>::value>::type> {
+ T operator()() const {
+ std::uniform_int_distribution<T> dist;
+ return dist(*GetSharedRng());
+ }
+};
+
+template <>
+struct Generator<Enum> {
+ Enum operator()() const {
+ std::uniform_int_distribution<typename std::underlying_type<Enum>::type>
+ dist;
+ while (true) {
+ auto variate = dist(*GetSharedRng());
+ if (variate != kEnumEmpty && variate != kEnumDeleted)
+ return static_cast<Enum>(variate);
+ }
+ }
+};
+
+template <>
+struct Generator<EnumClass> {
+ EnumClass operator()() const {
+ std::uniform_int_distribution<
+ typename std::underlying_type<EnumClass>::type>
+ dist;
+ while (true) {
+ EnumClass variate = static_cast<EnumClass>(dist(*GetSharedRng()));
+ if (variate != EnumClass::kEmpty && variate != EnumClass::kDeleted)
+ return static_cast<EnumClass>(variate);
+ }
+ }
+};
+
+template <>
+struct Generator<TString> {
+ TString operator()() const;
+};
+
+template <>
+struct Generator<y_absl::string_view> {
+ y_absl::string_view operator()() const;
+};
+
+template <>
+struct Generator<NonStandardLayout> {
+ NonStandardLayout operator()() const {
+ return NonStandardLayout(Generator<TString>()());
+ }
+};
+
+template <class K, class V>
+struct Generator<std::pair<K, V>> {
+ std::pair<K, V> operator()() const {
+ return std::pair<K, V>(Generator<typename std::decay<K>::type>()(),
+ Generator<typename std::decay<V>::type>()());
+ }
+};
+
+template <class... Ts>
+struct Generator<std::tuple<Ts...>> {
+ std::tuple<Ts...> operator()() const {
+ return std::tuple<Ts...>(Generator<typename std::decay<Ts>::type>()()...);
+ }
+};
+
+template <class T>
+struct Generator<std::unique_ptr<T>> {
+ std::unique_ptr<T> operator()() const {
+ return y_absl::make_unique<T>(Generator<T>()());
+ }
+};
+
+template <class U>
+struct Generator<U, y_absl::void_t<decltype(std::declval<U&>().key()),
+ decltype(std::declval<U&>().value())>>
+ : Generator<std::pair<
+ typename std::decay<decltype(std::declval<U&>().key())>::type,
+ typename std::decay<decltype(std::declval<U&>().value())>::type>> {};
+
+template <class Container>
+using GeneratedType = decltype(
+ std::declval<const Generator<
+ typename std::conditional<generator_internal::IsMap<Container>::value,
+ typename Container::value_type,
+ typename Container::key_type>::type>&>()());
+
+// Naive wrapper that performs a linear search of previous values.
+// Beware this is O(SQR), which is reasonable for smaller kMaxValues.
+template <class T, size_t kMaxValues = 64, class E = void>
+struct UniqueGenerator {
+ Generator<T, E> gen;
+ std::vector<T> values;
+
+ T operator()() {
+ assert(values.size() < kMaxValues);
+ for (;;) {
+ T value = gen();
+ if (std::find(values.begin(), values.end(), value) == values.end()) {
+ values.push_back(value);
+ return value;
+ }
+ }
+ }
+};
+
+} // namespace hash_internal
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_CONTAINER_INTERNAL_HASH_GENERATOR_TESTING_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/hash_policy_testing.h b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/hash_policy_testing.h
new file mode 100644
index 00000000000..baa367eee68
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/hash_policy_testing.h
@@ -0,0 +1,184 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Utilities to help tests verify that hash tables properly handle stateful
+// allocators and hash functions.
+
+#ifndef ABSL_CONTAINER_INTERNAL_HASH_POLICY_TESTING_H_
+#define ABSL_CONTAINER_INTERNAL_HASH_POLICY_TESTING_H_
+
+#include <cstdlib>
+#include <limits>
+#include <memory>
+#include <ostream>
+#include <type_traits>
+#include <utility>
+#include <vector>
+
+#include "y_absl/hash/hash.h"
+#include "y_absl/strings/string_view.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+namespace hash_testing_internal {
+
+template <class Derived>
+struct WithId {
+ WithId() : id_(next_id<Derived>()) {}
+ WithId(const WithId& that) : id_(that.id_) {}
+ WithId(WithId&& that) : id_(that.id_) { that.id_ = 0; }
+ WithId& operator=(const WithId& that) {
+ id_ = that.id_;
+ return *this;
+ }
+ WithId& operator=(WithId&& that) {
+ id_ = that.id_;
+ that.id_ = 0;
+ return *this;
+ }
+
+ size_t id() const { return id_; }
+
+ friend bool operator==(const WithId& a, const WithId& b) {
+ return a.id_ == b.id_;
+ }
+ friend bool operator!=(const WithId& a, const WithId& b) { return !(a == b); }
+
+ protected:
+ explicit WithId(size_t id) : id_(id) {}
+
+ private:
+ size_t id_;
+
+ template <class T>
+ static size_t next_id() {
+ // 0 is reserved for moved from state.
+ static size_t gId = 1;
+ return gId++;
+ }
+};
+
+} // namespace hash_testing_internal
+
+struct NonStandardLayout {
+ NonStandardLayout() {}
+ explicit NonStandardLayout(TString s) : value(std::move(s)) {}
+ virtual ~NonStandardLayout() {}
+
+ friend bool operator==(const NonStandardLayout& a,
+ const NonStandardLayout& b) {
+ return a.value == b.value;
+ }
+ friend bool operator!=(const NonStandardLayout& a,
+ const NonStandardLayout& b) {
+ return a.value != b.value;
+ }
+
+ template <typename H>
+ friend H AbslHashValue(H h, const NonStandardLayout& v) {
+ return H::combine(std::move(h), v.value);
+ }
+
+ TString value;
+};
+
+struct StatefulTestingHash
+ : y_absl::container_internal::hash_testing_internal::WithId<
+ StatefulTestingHash> {
+ template <class T>
+ size_t operator()(const T& t) const {
+ return y_absl::Hash<T>{}(t);
+ }
+};
+
+struct StatefulTestingEqual
+ : y_absl::container_internal::hash_testing_internal::WithId<
+ StatefulTestingEqual> {
+ template <class T, class U>
+ bool operator()(const T& t, const U& u) const {
+ return t == u;
+ }
+};
+
+// It is expected that Alloc() == Alloc() for all allocators so we cannot use
+// WithId base. We need to explicitly assign ids.
+template <class T = int>
+struct Alloc : std::allocator<T> {
+ using propagate_on_container_swap = std::true_type;
+
+ // Using old paradigm for this to ensure compatibility.
+ explicit Alloc(size_t id = 0) : id_(id) {}
+
+ Alloc(const Alloc&) = default;
+ Alloc& operator=(const Alloc&) = default;
+
+ template <class U>
+ Alloc(const Alloc<U>& that) : std::allocator<T>(that), id_(that.id()) {}
+
+ template <class U>
+ struct rebind {
+ using other = Alloc<U>;
+ };
+
+ size_t id() const { return id_; }
+
+ friend bool operator==(const Alloc& a, const Alloc& b) {
+ return a.id_ == b.id_;
+ }
+ friend bool operator!=(const Alloc& a, const Alloc& b) { return !(a == b); }
+
+ private:
+ size_t id_ = (std::numeric_limits<size_t>::max)();
+};
+
+template <class Map>
+auto items(const Map& m) -> std::vector<
+ std::pair<typename Map::key_type, typename Map::mapped_type>> {
+ using std::get;
+ std::vector<std::pair<typename Map::key_type, typename Map::mapped_type>> res;
+ res.reserve(m.size());
+ for (const auto& v : m) res.emplace_back(get<0>(v), get<1>(v));
+ return res;
+}
+
+template <class Set>
+auto keys(const Set& s)
+ -> std::vector<typename std::decay<typename Set::key_type>::type> {
+ std::vector<typename std::decay<typename Set::key_type>::type> res;
+ res.reserve(s.size());
+ for (const auto& v : s) res.emplace_back(v);
+ return res;
+}
+
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+// ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS is false for glibcxx versions
+// where the unordered containers are missing certain constructors that
+// take allocator arguments. This test is defined ad-hoc for the platforms
+// we care about (notably Crosstool 17) because libstdcxx's useless
+// versioning scheme precludes a more principled solution.
+// From GCC-4.9 Changelog: (src: https://gcc.gnu.org/gcc-4.9/changes.html)
+// "the unordered associative containers in <unordered_map> and <unordered_set>
+// meet the allocator-aware container requirements;"
+#if (defined(__GLIBCXX__) && __GLIBCXX__ <= 20140425 ) || \
+( __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 9 ))
+#define ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS 0
+#else
+#define ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS 1
+#endif
+
+#endif // ABSL_CONTAINER_INTERNAL_HASH_POLICY_TESTING_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/hash_policy_traits.h b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/hash_policy_traits.h
new file mode 100644
index 00000000000..33f704fc4bd
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/hash_policy_traits.h
@@ -0,0 +1,208 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_HASH_POLICY_TRAITS_H_
+#define ABSL_CONTAINER_INTERNAL_HASH_POLICY_TRAITS_H_
+
+#include <cstddef>
+#include <memory>
+#include <new>
+#include <type_traits>
+#include <utility>
+
+#include "y_absl/meta/type_traits.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+// Defines how slots are initialized/destroyed/moved.
+template <class Policy, class = void>
+struct hash_policy_traits {
+ // The type of the keys stored in the hashtable.
+ using key_type = typename Policy::key_type;
+
+ private:
+ struct ReturnKey {
+ // When C++17 is available, we can use std::launder to provide mutable
+ // access to the key for use in node handle.
+#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606
+ template <class Key,
+ y_absl::enable_if_t<std::is_lvalue_reference<Key>::value, int> = 0>
+ static key_type& Impl(Key&& k, int) {
+ return *std::launder(
+ const_cast<key_type*>(std::addressof(std::forward<Key>(k))));
+ }
+#endif
+
+ template <class Key>
+ static Key Impl(Key&& k, char) {
+ return std::forward<Key>(k);
+ }
+
+ // When Key=T&, we forward the lvalue reference.
+ // When Key=T, we return by value to avoid a dangling reference.
+ // eg, for string_hash_map.
+ template <class Key, class... Args>
+ auto operator()(Key&& k, const Args&...) const
+ -> decltype(Impl(std::forward<Key>(k), 0)) {
+ return Impl(std::forward<Key>(k), 0);
+ }
+ };
+
+ template <class P = Policy, class = void>
+ struct ConstantIteratorsImpl : std::false_type {};
+
+ template <class P>
+ struct ConstantIteratorsImpl<P, y_absl::void_t<typename P::constant_iterators>>
+ : P::constant_iterators {};
+
+ public:
+ // The actual object stored in the hash table.
+ using slot_type = typename Policy::slot_type;
+
+ // The argument type for insertions into the hashtable. This is different
+ // from value_type for increased performance. See initializer_list constructor
+ // and insert() member functions for more details.
+ using init_type = typename Policy::init_type;
+
+ using reference = decltype(Policy::element(std::declval<slot_type*>()));
+ using pointer = typename std::remove_reference<reference>::type*;
+ using value_type = typename std::remove_reference<reference>::type;
+
+ // Policies can set this variable to tell raw_hash_set that all iterators
+ // should be constant, even `iterator`. This is useful for set-like
+ // containers.
+ // Defaults to false if not provided by the policy.
+ using constant_iterators = ConstantIteratorsImpl<>;
+
+ // PRECONDITION: `slot` is UNINITIALIZED
+ // POSTCONDITION: `slot` is INITIALIZED
+ template <class Alloc, class... Args>
+ static void construct(Alloc* alloc, slot_type* slot, Args&&... args) {
+ Policy::construct(alloc, slot, std::forward<Args>(args)...);
+ }
+
+ // PRECONDITION: `slot` is INITIALIZED
+ // POSTCONDITION: `slot` is UNINITIALIZED
+ template <class Alloc>
+ static void destroy(Alloc* alloc, slot_type* slot) {
+ Policy::destroy(alloc, slot);
+ }
+
+ // Transfers the `old_slot` to `new_slot`. Any memory allocated by the
+ // allocator inside `old_slot` to `new_slot` can be transferred.
+ //
+ // OPTIONAL: defaults to:
+ //
+ // clone(new_slot, std::move(*old_slot));
+ // destroy(old_slot);
+ //
+ // PRECONDITION: `new_slot` is UNINITIALIZED and `old_slot` is INITIALIZED
+ // POSTCONDITION: `new_slot` is INITIALIZED and `old_slot` is
+ // UNINITIALIZED
+ template <class Alloc>
+ static void transfer(Alloc* alloc, slot_type* new_slot, slot_type* old_slot) {
+ transfer_impl(alloc, new_slot, old_slot, 0);
+ }
+
+ // PRECONDITION: `slot` is INITIALIZED
+ // POSTCONDITION: `slot` is INITIALIZED
+ template <class P = Policy>
+ static auto element(slot_type* slot) -> decltype(P::element(slot)) {
+ return P::element(slot);
+ }
+
+ // Returns the amount of memory owned by `slot`, exclusive of `sizeof(*slot)`.
+ //
+ // If `slot` is nullptr, returns the constant amount of memory owned by any
+ // full slot or -1 if slots own variable amounts of memory.
+ //
+ // PRECONDITION: `slot` is INITIALIZED or nullptr
+ template <class P = Policy>
+ static size_t space_used(const slot_type* slot) {
+ return P::space_used(slot);
+ }
+
+ // Provides generalized access to the key for elements, both for elements in
+ // the table and for elements that have not yet been inserted (or even
+ // constructed). We would like an API that allows us to say: `key(args...)`
+ // but we cannot do that for all cases, so we use this more general API that
+ // can be used for many things, including the following:
+ //
+ // - Given an element in a table, get its key.
+ // - Given an element initializer, get its key.
+ // - Given `emplace()` arguments, get the element key.
+ //
+ // Implementations of this must adhere to a very strict technical
+ // specification around aliasing and consuming arguments:
+ //
+ // Let `value_type` be the result type of `element()` without ref- and
+ // cv-qualifiers. The first argument is a functor, the rest are constructor
+ // arguments for `value_type`. Returns `std::forward<F>(f)(k, xs...)`, where
+ // `k` is the element key, and `xs...` are the new constructor arguments for
+ // `value_type`. It's allowed for `k` to alias `xs...`, and for both to alias
+ // `ts...`. The key won't be touched once `xs...` are used to construct an
+ // element; `ts...` won't be touched at all, which allows `apply()` to consume
+ // any rvalues among them.
+ //
+ // If `value_type` is constructible from `Ts&&...`, `Policy::apply()` must not
+ // trigger a hard compile error unless it originates from `f`. In other words,
+ // `Policy::apply()` must be SFINAE-friendly. If `value_type` is not
+ // constructible from `Ts&&...`, either SFINAE or a hard compile error is OK.
+ //
+ // If `Ts...` is `[cv] value_type[&]` or `[cv] init_type[&]`,
+ // `Policy::apply()` must work. A compile error is not allowed, SFINAE or not.
+ template <class F, class... Ts, class P = Policy>
+ static auto apply(F&& f, Ts&&... ts)
+ -> decltype(P::apply(std::forward<F>(f), std::forward<Ts>(ts)...)) {
+ return P::apply(std::forward<F>(f), std::forward<Ts>(ts)...);
+ }
+
+ // Returns the "key" portion of the slot.
+ // Used for node handle manipulation.
+ template <class P = Policy>
+ static auto mutable_key(slot_type* slot)
+ -> decltype(P::apply(ReturnKey(), element(slot))) {
+ return P::apply(ReturnKey(), element(slot));
+ }
+
+ // Returns the "value" (as opposed to the "key") portion of the element. Used
+ // by maps to implement `operator[]`, `at()` and `insert_or_assign()`.
+ template <class T, class P = Policy>
+ static auto value(T* elem) -> decltype(P::value(elem)) {
+ return P::value(elem);
+ }
+
+ private:
+ // Use auto -> decltype as an enabler.
+ template <class Alloc, class P = Policy>
+ static auto transfer_impl(Alloc* alloc, slot_type* new_slot,
+ slot_type* old_slot, int)
+ -> decltype((void)P::transfer(alloc, new_slot, old_slot)) {
+ P::transfer(alloc, new_slot, old_slot);
+ }
+ template <class Alloc>
+ static void transfer_impl(Alloc* alloc, slot_type* new_slot,
+ slot_type* old_slot, char) {
+ construct(alloc, new_slot, std::move(element(old_slot)));
+ destroy(alloc, old_slot);
+ }
+};
+
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_CONTAINER_INTERNAL_HASH_POLICY_TRAITS_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/hashtable_debug.h b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/hashtable_debug.h
new file mode 100644
index 00000000000..2be9db22973
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/hashtable_debug.h
@@ -0,0 +1,110 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// This library provides APIs to debug the probing behavior of hash tables.
+//
+// In general, the probing behavior is a black box for users and only the
+// side effects can be measured in the form of performance differences.
+// These APIs give a glimpse on the actual behavior of the probing algorithms in
+// these hashtables given a specified hash function and a set of elements.
+//
+// The probe count distribution can be used to assess the quality of the hash
+// function for that particular hash table. Note that a hash function that
+// performs well in one hash table implementation does not necessarily performs
+// well in a different one.
+//
+// This library supports std::unordered_{set,map}, dense_hash_{set,map} and
+// y_absl::{flat,node,string}_hash_{set,map}.
+
+#ifndef ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_H_
+#define ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_H_
+
+#include <cstddef>
+#include <algorithm>
+#include <type_traits>
+#include <vector>
+
+#include "y_absl/container/internal/hashtable_debug_hooks.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+// Returns the number of probes required to lookup `key`. Returns 0 for a
+// search with no collisions. Higher values mean more hash collisions occurred;
+// however, the exact meaning of this number varies according to the container
+// type.
+template <typename C>
+size_t GetHashtableDebugNumProbes(
+ const C& c, const typename C::key_type& key) {
+ return y_absl::container_internal::hashtable_debug_internal::
+ HashtableDebugAccess<C>::GetNumProbes(c, key);
+}
+
+// Gets a histogram of the number of probes for each elements in the container.
+// The sum of all the values in the vector is equal to container.size().
+template <typename C>
+std::vector<size_t> GetHashtableDebugNumProbesHistogram(const C& container) {
+ std::vector<size_t> v;
+ for (auto it = container.begin(); it != container.end(); ++it) {
+ size_t num_probes = GetHashtableDebugNumProbes(
+ container,
+ y_absl::container_internal::hashtable_debug_internal::GetKey<C>(*it, 0));
+ v.resize((std::max)(v.size(), num_probes + 1));
+ v[num_probes]++;
+ }
+ return v;
+}
+
+struct HashtableDebugProbeSummary {
+ size_t total_elements;
+ size_t total_num_probes;
+ double mean;
+};
+
+// Gets a summary of the probe count distribution for the elements in the
+// container.
+template <typename C>
+HashtableDebugProbeSummary GetHashtableDebugProbeSummary(const C& container) {
+ auto probes = GetHashtableDebugNumProbesHistogram(container);
+ HashtableDebugProbeSummary summary = {};
+ for (size_t i = 0; i < probes.size(); ++i) {
+ summary.total_elements += probes[i];
+ summary.total_num_probes += probes[i] * i;
+ }
+ summary.mean = 1.0 * summary.total_num_probes / summary.total_elements;
+ return summary;
+}
+
+// Returns the number of bytes requested from the allocator by the container
+// and not freed.
+template <typename C>
+size_t AllocatedByteSize(const C& c) {
+ return y_absl::container_internal::hashtable_debug_internal::
+ HashtableDebugAccess<C>::AllocatedByteSize(c);
+}
+
+// Returns a tight lower bound for AllocatedByteSize(c) where `c` is of type `C`
+// and `c.size()` is equal to `num_elements`.
+template <typename C>
+size_t LowerBoundAllocatedByteSize(size_t num_elements) {
+ return y_absl::container_internal::hashtable_debug_internal::
+ HashtableDebugAccess<C>::LowerBoundAllocatedByteSize(num_elements);
+}
+
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/hashtable_debug_hooks.h b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/hashtable_debug_hooks.h
new file mode 100644
index 00000000000..ec8c0fd29d2
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/hashtable_debug_hooks.h
@@ -0,0 +1,85 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Provides the internal API for hashtable_debug.h.
+
+#ifndef ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_HOOKS_H_
+#define ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_HOOKS_H_
+
+#include <cstddef>
+
+#include <algorithm>
+#include <type_traits>
+#include <vector>
+
+#include "y_absl/base/config.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+namespace hashtable_debug_internal {
+
+// If it is a map, call get<0>().
+using std::get;
+template <typename T, typename = typename T::mapped_type>
+auto GetKey(const typename T::value_type& pair, int) -> decltype(get<0>(pair)) {
+ return get<0>(pair);
+}
+
+// If it is not a map, return the value directly.
+template <typename T>
+const typename T::key_type& GetKey(const typename T::key_type& key, char) {
+ return key;
+}
+
+// Containers should specialize this to provide debug information for that
+// container.
+template <class Container, typename Enabler = void>
+struct HashtableDebugAccess {
+ // Returns the number of probes required to find `key` in `c`. The "number of
+ // probes" is a concept that can vary by container. Implementations should
+ // return 0 when `key` was found in the minimum number of operations and
+ // should increment the result for each non-trivial operation required to find
+ // `key`.
+ //
+ // The default implementation uses the bucket api from the standard and thus
+ // works for `std::unordered_*` containers.
+ static size_t GetNumProbes(const Container& c,
+ const typename Container::key_type& key) {
+ if (!c.bucket_count()) return {};
+ size_t num_probes = 0;
+ size_t bucket = c.bucket(key);
+ for (auto it = c.begin(bucket), e = c.end(bucket);; ++it, ++num_probes) {
+ if (it == e) return num_probes;
+ if (c.key_eq()(key, GetKey<Container>(*it, 0))) return num_probes;
+ }
+ }
+
+ // Returns the number of bytes requested from the allocator by the container
+ // and not freed.
+ //
+ // static size_t AllocatedByteSize(const Container& c);
+
+ // Returns a tight lower bound for AllocatedByteSize(c) where `c` is of type
+ // `Container` and `c.size()` is equal to `num_elements`.
+ //
+ // static size_t LowerBoundAllocatedByteSize(size_t num_elements);
+};
+
+} // namespace hashtable_debug_internal
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_HOOKS_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/hashtablez_sampler.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/hashtablez_sampler.cc
new file mode 100644
index 00000000000..6017ac9bb03
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/hashtablez_sampler.cc
@@ -0,0 +1,190 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "y_absl/container/internal/hashtablez_sampler.h"
+
+#include <atomic>
+#include <cassert>
+#include <cmath>
+#include <functional>
+#include <limits>
+
+#include "y_absl/base/attributes.h"
+#include "y_absl/container/internal/have_sse.h"
+#include "y_absl/debugging/stacktrace.h"
+#include "y_absl/memory/memory.h"
+#include "y_absl/profiling/internal/exponential_biased.h"
+#include "y_absl/profiling/internal/sample_recorder.h"
+#include "y_absl/synchronization/mutex.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+constexpr int HashtablezInfo::kMaxStackDepth;
+
+namespace {
+ABSL_CONST_INIT std::atomic<bool> g_hashtablez_enabled{
+ false
+};
+ABSL_CONST_INIT std::atomic<int32_t> g_hashtablez_sample_parameter{1 << 10};
+
+#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
+ABSL_PER_THREAD_TLS_KEYWORD y_absl::profiling_internal::ExponentialBiased
+ g_exponential_biased_generator;
+#endif
+
+} // namespace
+
+#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
+ABSL_PER_THREAD_TLS_KEYWORD int64_t global_next_sample = 0;
+#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
+
+HashtablezSampler& GlobalHashtablezSampler() {
+ static auto* sampler = new HashtablezSampler();
+ return *sampler;
+}
+
+// TODO(bradleybear): The comments at this constructors declaration say that the
+// fields are not initialized, but this definition does initialize the fields.
+// Something needs to be cleaned up.
+HashtablezInfo::HashtablezInfo() { PrepareForSampling(); }
+HashtablezInfo::~HashtablezInfo() = default;
+
+void HashtablezInfo::PrepareForSampling() {
+ capacity.store(0, std::memory_order_relaxed);
+ size.store(0, std::memory_order_relaxed);
+ num_erases.store(0, std::memory_order_relaxed);
+ num_rehashes.store(0, std::memory_order_relaxed);
+ max_probe_length.store(0, std::memory_order_relaxed);
+ total_probe_length.store(0, std::memory_order_relaxed);
+ hashes_bitwise_or.store(0, std::memory_order_relaxed);
+ hashes_bitwise_and.store(~size_t{}, std::memory_order_relaxed);
+ hashes_bitwise_xor.store(0, std::memory_order_relaxed);
+ max_reserve.store(0, std::memory_order_relaxed);
+
+ create_time = y_absl::Now();
+ // The inliner makes hardcoded skip_count difficult (especially when combined
+ // with LTO). We use the ability to exclude stacks by regex when encoding
+ // instead.
+ depth = y_absl::GetStackTrace(stack, HashtablezInfo::kMaxStackDepth,
+ /* skip_count= */ 0);
+}
+
+static bool ShouldForceSampling() {
+ enum ForceState {
+ kDontForce,
+ kForce,
+ kUninitialized
+ };
+ ABSL_CONST_INIT static std::atomic<ForceState> global_state{
+ kUninitialized};
+ ForceState state = global_state.load(std::memory_order_relaxed);
+ if (ABSL_PREDICT_TRUE(state == kDontForce)) return false;
+
+ if (state == kUninitialized) {
+ state = ABSL_INTERNAL_C_SYMBOL(AbslContainerInternalSampleEverything)()
+ ? kForce
+ : kDontForce;
+ global_state.store(state, std::memory_order_relaxed);
+ }
+ return state == kForce;
+}
+
+HashtablezInfo* SampleSlow(int64_t* next_sample, size_t inline_element_size) {
+ if (ABSL_PREDICT_FALSE(ShouldForceSampling())) {
+ *next_sample = 1;
+ HashtablezInfo* result = GlobalHashtablezSampler().Register();
+ result->inline_element_size = inline_element_size;
+ return result;
+ }
+
+#if !defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
+ *next_sample = std::numeric_limits<int64_t>::max();
+ return nullptr;
+#else
+ bool first = *next_sample < 0;
+ *next_sample = g_exponential_biased_generator.GetStride(
+ g_hashtablez_sample_parameter.load(std::memory_order_relaxed));
+ // Small values of interval are equivalent to just sampling next time.
+ ABSL_ASSERT(*next_sample >= 1);
+
+ // g_hashtablez_enabled can be dynamically flipped, we need to set a threshold
+ // low enough that we will start sampling in a reasonable time, so we just use
+ // the default sampling rate.
+ if (!g_hashtablez_enabled.load(std::memory_order_relaxed)) return nullptr;
+
+ // We will only be negative on our first count, so we should just retry in
+ // that case.
+ if (first) {
+ if (ABSL_PREDICT_TRUE(--*next_sample > 0)) return nullptr;
+ return SampleSlow(next_sample, inline_element_size);
+ }
+
+ HashtablezInfo* result = GlobalHashtablezSampler().Register();
+ result->inline_element_size = inline_element_size;
+ return result;
+#endif
+}
+
+void UnsampleSlow(HashtablezInfo* info) {
+ GlobalHashtablezSampler().Unregister(info);
+}
+
+void RecordInsertSlow(HashtablezInfo* info, size_t hash,
+ size_t distance_from_desired) {
+ // SwissTables probe in groups of 16, so scale this to count items probes and
+ // not offset from desired.
+ size_t probe_length = distance_from_desired;
+#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
+ probe_length /= 16;
+#else
+ probe_length /= 8;
+#endif
+
+ info->hashes_bitwise_and.fetch_and(hash, std::memory_order_relaxed);
+ info->hashes_bitwise_or.fetch_or(hash, std::memory_order_relaxed);
+ info->hashes_bitwise_xor.fetch_xor(hash, std::memory_order_relaxed);
+ info->max_probe_length.store(
+ std::max(info->max_probe_length.load(std::memory_order_relaxed),
+ probe_length),
+ std::memory_order_relaxed);
+ info->total_probe_length.fetch_add(probe_length, std::memory_order_relaxed);
+ info->size.fetch_add(1, std::memory_order_relaxed);
+}
+
+void SetHashtablezEnabled(bool enabled) {
+ g_hashtablez_enabled.store(enabled, std::memory_order_release);
+}
+
+void SetHashtablezSampleParameter(int32_t rate) {
+ if (rate > 0) {
+ g_hashtablez_sample_parameter.store(rate, std::memory_order_release);
+ } else {
+ ABSL_RAW_LOG(ERROR, "Invalid hashtablez sample rate: %lld",
+ static_cast<long long>(rate)); // NOLINT(runtime/int)
+ }
+}
+
+void SetHashtablezMaxSamples(int32_t max) {
+ if (max > 0) {
+ GlobalHashtablezSampler().SetMaxSamples(max);
+ } else {
+ ABSL_RAW_LOG(ERROR, "Invalid hashtablez max samples: %lld",
+ static_cast<long long>(max)); // NOLINT(runtime/int)
+ }
+}
+
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/hashtablez_sampler.h b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/hashtablez_sampler.h
new file mode 100644
index 00000000000..8521ee754a9
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/hashtablez_sampler.h
@@ -0,0 +1,281 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: hashtablez_sampler.h
+// -----------------------------------------------------------------------------
+//
+// This header file defines the API for a low level library to sample hashtables
+// and collect runtime statistics about them.
+//
+// `HashtablezSampler` controls the lifecycle of `HashtablezInfo` objects which
+// store information about a single sample.
+//
+// `Record*` methods store information into samples.
+// `Sample()` and `Unsample()` make use of a single global sampler with
+// properties controlled by the flags hashtablez_enabled,
+// hashtablez_sample_rate, and hashtablez_max_samples.
+//
+// WARNING
+//
+// Using this sampling API may cause sampled Swiss tables to use the global
+// allocator (operator `new`) in addition to any custom allocator. If you
+// are using a table in an unusual circumstance where allocation or calling a
+// linux syscall is unacceptable, this could interfere.
+//
+// This utility is internal-only. Use at your own risk.
+
+#ifndef ABSL_CONTAINER_INTERNAL_HASHTABLEZ_SAMPLER_H_
+#define ABSL_CONTAINER_INTERNAL_HASHTABLEZ_SAMPLER_H_
+
+#include <atomic>
+#include <functional>
+#include <memory>
+#include <vector>
+
+#include "y_absl/base/internal/per_thread_tls.h"
+#include "y_absl/base/optimization.h"
+#include "y_absl/container/internal/have_sse.h"
+#include "y_absl/profiling/internal/sample_recorder.h"
+#include "y_absl/synchronization/mutex.h"
+#include "y_absl/utility/utility.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+// Stores information about a sampled hashtable. All mutations to this *must*
+// be made through `Record*` functions below. All reads from this *must* only
+// occur in the callback to `HashtablezSampler::Iterate`.
+struct HashtablezInfo : public profiling_internal::Sample<HashtablezInfo> {
+ // Constructs the object but does not fill in any fields.
+ HashtablezInfo();
+ ~HashtablezInfo();
+ HashtablezInfo(const HashtablezInfo&) = delete;
+ HashtablezInfo& operator=(const HashtablezInfo&) = delete;
+
+ // Puts the object into a clean state, fills in the logically `const` members,
+ // blocking for any readers that are currently sampling the object.
+ void PrepareForSampling() ABSL_EXCLUSIVE_LOCKS_REQUIRED(init_mu);
+
+ // These fields are mutated by the various Record* APIs and need to be
+ // thread-safe.
+ std::atomic<size_t> capacity;
+ std::atomic<size_t> size;
+ std::atomic<size_t> num_erases;
+ std::atomic<size_t> num_rehashes;
+ std::atomic<size_t> max_probe_length;
+ std::atomic<size_t> total_probe_length;
+ std::atomic<size_t> hashes_bitwise_or;
+ std::atomic<size_t> hashes_bitwise_and;
+ std::atomic<size_t> hashes_bitwise_xor;
+ std::atomic<size_t> max_reserve;
+
+ // All of the fields below are set by `PrepareForSampling`, they must not be
+ // mutated in `Record*` functions. They are logically `const` in that sense.
+ // These are guarded by init_mu, but that is not externalized to clients, who
+ // can only read them during `HashtablezSampler::Iterate` which will hold the
+ // lock.
+ static constexpr int kMaxStackDepth = 64;
+ y_absl::Time create_time;
+ int32_t depth;
+ void* stack[kMaxStackDepth];
+ size_t inline_element_size;
+};
+
+inline void RecordRehashSlow(HashtablezInfo* info, size_t total_probe_length) {
+#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
+ total_probe_length /= 16;
+#else
+ total_probe_length /= 8;
+#endif
+ info->total_probe_length.store(total_probe_length, std::memory_order_relaxed);
+ info->num_erases.store(0, std::memory_order_relaxed);
+ // There is only one concurrent writer, so `load` then `store` is sufficient
+ // instead of using `fetch_add`.
+ info->num_rehashes.store(
+ 1 + info->num_rehashes.load(std::memory_order_relaxed),
+ std::memory_order_relaxed);
+}
+
+inline void RecordReservationSlow(HashtablezInfo* info,
+ size_t target_capacity) {
+ info->max_reserve.store(
+ (std::max)(info->max_reserve.load(std::memory_order_relaxed),
+ target_capacity),
+ std::memory_order_relaxed);
+}
+
+inline void RecordClearedReservationSlow(HashtablezInfo* info) {
+ info->max_reserve.store(0, std::memory_order_relaxed);
+}
+
+inline void RecordStorageChangedSlow(HashtablezInfo* info, size_t size,
+ size_t capacity) {
+ info->size.store(size, std::memory_order_relaxed);
+ info->capacity.store(capacity, std::memory_order_relaxed);
+ if (size == 0) {
+ // This is a clear, reset the total/num_erases too.
+ info->total_probe_length.store(0, std::memory_order_relaxed);
+ info->num_erases.store(0, std::memory_order_relaxed);
+ }
+}
+
+void RecordInsertSlow(HashtablezInfo* info, size_t hash,
+ size_t distance_from_desired);
+
+inline void RecordEraseSlow(HashtablezInfo* info) {
+ info->size.fetch_sub(1, std::memory_order_relaxed);
+ // There is only one concurrent writer, so `load` then `store` is sufficient
+ // instead of using `fetch_add`.
+ info->num_erases.store(
+ 1 + info->num_erases.load(std::memory_order_relaxed),
+ std::memory_order_relaxed);
+}
+
+HashtablezInfo* SampleSlow(int64_t* next_sample, size_t inline_element_size);
+void UnsampleSlow(HashtablezInfo* info);
+
+#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
+#error ABSL_INTERNAL_HASHTABLEZ_SAMPLE cannot be directly set
+#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
+
+#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
+class HashtablezInfoHandle {
+ public:
+ explicit HashtablezInfoHandle() : info_(nullptr) {}
+ explicit HashtablezInfoHandle(HashtablezInfo* info) : info_(info) {}
+ ~HashtablezInfoHandle() {
+ if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
+ UnsampleSlow(info_);
+ }
+
+ HashtablezInfoHandle(const HashtablezInfoHandle&) = delete;
+ HashtablezInfoHandle& operator=(const HashtablezInfoHandle&) = delete;
+
+ HashtablezInfoHandle(HashtablezInfoHandle&& o) noexcept
+ : info_(y_absl::exchange(o.info_, nullptr)) {}
+ HashtablezInfoHandle& operator=(HashtablezInfoHandle&& o) noexcept {
+ if (ABSL_PREDICT_FALSE(info_ != nullptr)) {
+ UnsampleSlow(info_);
+ }
+ info_ = y_absl::exchange(o.info_, nullptr);
+ return *this;
+ }
+
+ inline void RecordStorageChanged(size_t size, size_t capacity) {
+ if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
+ RecordStorageChangedSlow(info_, size, capacity);
+ }
+
+ inline void RecordRehash(size_t total_probe_length) {
+ if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
+ RecordRehashSlow(info_, total_probe_length);
+ }
+
+ inline void RecordReservation(size_t target_capacity) {
+ if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
+ RecordReservationSlow(info_, target_capacity);
+ }
+
+ inline void RecordClearedReservation() {
+ if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
+ RecordClearedReservationSlow(info_);
+ }
+
+ inline void RecordInsert(size_t hash, size_t distance_from_desired) {
+ if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
+ RecordInsertSlow(info_, hash, distance_from_desired);
+ }
+
+ inline void RecordErase() {
+ if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
+ RecordEraseSlow(info_);
+ }
+
+ friend inline void swap(HashtablezInfoHandle& lhs,
+ HashtablezInfoHandle& rhs) {
+ std::swap(lhs.info_, rhs.info_);
+ }
+
+ private:
+ friend class HashtablezInfoHandlePeer;
+ HashtablezInfo* info_;
+};
+#else
+// Ensure that when Hashtablez is turned off at compile time, HashtablezInfo can
+// be removed by the linker, in order to reduce the binary size.
+class HashtablezInfoHandle {
+ public:
+ explicit HashtablezInfoHandle() = default;
+ explicit HashtablezInfoHandle(std::nullptr_t) {}
+
+ inline void RecordStorageChanged(size_t /*size*/, size_t /*capacity*/) {}
+ inline void RecordRehash(size_t /*total_probe_length*/) {}
+ inline void RecordReservation(size_t /*target_capacity*/) {}
+ inline void RecordClearedReservation() {}
+ inline void RecordInsert(size_t /*hash*/, size_t /*distance_from_desired*/) {}
+ inline void RecordErase() {}
+
+ friend inline void swap(HashtablezInfoHandle& /*lhs*/,
+ HashtablezInfoHandle& /*rhs*/) {}
+};
+#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
+
+#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
+extern ABSL_PER_THREAD_TLS_KEYWORD int64_t global_next_sample;
+#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
+
+// Returns an RAII sampling handle that manages registration and unregistation
+// with the global sampler.
+inline HashtablezInfoHandle Sample(
+ size_t inline_element_size ABSL_ATTRIBUTE_UNUSED) {
+#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
+ if (ABSL_PREDICT_TRUE(--global_next_sample > 0)) {
+ return HashtablezInfoHandle(nullptr);
+ }
+ return HashtablezInfoHandle(
+ SampleSlow(&global_next_sample, inline_element_size));
+#else
+ return HashtablezInfoHandle(nullptr);
+#endif // !ABSL_PER_THREAD_TLS
+}
+
+using HashtablezSampler =
+ ::y_absl::profiling_internal::SampleRecorder<HashtablezInfo>;
+
+// Returns a global Sampler.
+HashtablezSampler& GlobalHashtablezSampler();
+
+// Enables or disables sampling for Swiss tables.
+void SetHashtablezEnabled(bool enabled);
+
+// Sets the rate at which Swiss tables will be sampled.
+void SetHashtablezSampleParameter(int32_t rate);
+
+// Sets a soft max for the number of samples that will be kept.
+void SetHashtablezMaxSamples(int32_t max);
+
+// Configuration override.
+// This allows process-wide sampling without depending on order of
+// initialization of static storage duration objects.
+// The definition of this constant is weak, which allows us to inject a
+// different value for it at link time.
+extern "C" bool ABSL_INTERNAL_C_SYMBOL(AbslContainerInternalSampleEverything)();
+
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_CONTAINER_INTERNAL_HASHTABLEZ_SAMPLER_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/hashtablez_sampler_force_weak_definition.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/hashtablez_sampler_force_weak_definition.cc
new file mode 100644
index 00000000000..79a31229813
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/hashtablez_sampler_force_weak_definition.cc
@@ -0,0 +1,31 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "y_absl/container/internal/hashtablez_sampler.h"
+
+#include "y_absl/base/attributes.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+// See hashtablez_sampler.h for details.
+extern "C" ABSL_ATTRIBUTE_WEAK bool ABSL_INTERNAL_C_SYMBOL(
+ AbslContainerInternalSampleEverything)() {
+ return false;
+}
+
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/have_sse.h b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/have_sse.h
new file mode 100644
index 00000000000..e75e1a16d32
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/have_sse.h
@@ -0,0 +1,50 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Shared config probing for SSE instructions used in Swiss tables.
+#ifndef ABSL_CONTAINER_INTERNAL_HAVE_SSE_H_
+#define ABSL_CONTAINER_INTERNAL_HAVE_SSE_H_
+
+#ifndef ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
+#if defined(__SSE2__) || \
+ (defined(_MSC_VER) && \
+ (defined(_M_X64) || (defined(_M_IX86) && _M_IX86_FP >= 2)))
+#define ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2 1
+#else
+#define ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2 0
+#endif
+#endif
+
+#ifndef ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3
+#ifdef __SSSE3__
+#define ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3 1
+#else
+#define ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3 0
+#endif
+#endif
+
+#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3 && \
+ !ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
+#error "Bad configuration!"
+#endif
+
+#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
+#include <emmintrin.h>
+#endif
+
+#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3
+#include <tmmintrin.h>
+#endif
+
+#endif // ABSL_CONTAINER_INTERNAL_HAVE_SSE_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/inlined_vector.h b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/inlined_vector.h
new file mode 100644
index 00000000000..e6488e843e1
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/inlined_vector.h
@@ -0,0 +1,932 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_INLINED_VECTOR_INTERNAL_H_
+#define ABSL_CONTAINER_INTERNAL_INLINED_VECTOR_INTERNAL_H_
+
+#include <algorithm>
+#include <cstddef>
+#include <cstring>
+#include <iterator>
+#include <limits>
+#include <memory>
+#include <new>
+#include <type_traits>
+#include <utility>
+
+#include "y_absl/base/attributes.h"
+#include "y_absl/base/macros.h"
+#include "y_absl/container/internal/compressed_tuple.h"
+#include "y_absl/memory/memory.h"
+#include "y_absl/meta/type_traits.h"
+#include "y_absl/types/span.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace inlined_vector_internal {
+
+// GCC does not deal very well with the below code
+#if !defined(__clang__) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Warray-bounds"
+#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
+#endif
+
+template <typename A>
+using AllocatorTraits = std::allocator_traits<A>;
+template <typename A>
+using ValueType = typename AllocatorTraits<A>::value_type;
+template <typename A>
+using SizeType = typename AllocatorTraits<A>::size_type;
+template <typename A>
+using Pointer = typename AllocatorTraits<A>::pointer;
+template <typename A>
+using ConstPointer = typename AllocatorTraits<A>::const_pointer;
+template <typename A>
+using SizeType = typename AllocatorTraits<A>::size_type;
+template <typename A>
+using DifferenceType = typename AllocatorTraits<A>::difference_type;
+template <typename A>
+using Reference = ValueType<A>&;
+template <typename A>
+using ConstReference = const ValueType<A>&;
+template <typename A>
+using Iterator = Pointer<A>;
+template <typename A>
+using ConstIterator = ConstPointer<A>;
+template <typename A>
+using ReverseIterator = typename std::reverse_iterator<Iterator<A>>;
+template <typename A>
+using ConstReverseIterator = typename std::reverse_iterator<ConstIterator<A>>;
+template <typename A>
+using MoveIterator = typename std::move_iterator<Iterator<A>>;
+
+template <typename Iterator>
+using IsAtLeastForwardIterator = std::is_convertible<
+ typename std::iterator_traits<Iterator>::iterator_category,
+ std::forward_iterator_tag>;
+
+template <typename A>
+using IsMemcpyOk =
+ y_absl::conjunction<std::is_same<A, std::allocator<ValueType<A>>>,
+ y_absl::is_trivially_copy_constructible<ValueType<A>>,
+ y_absl::is_trivially_copy_assignable<ValueType<A>>,
+ y_absl::is_trivially_destructible<ValueType<A>>>;
+
+template <typename T>
+struct TypeIdentity {
+ using type = T;
+};
+
+// Used for function arguments in template functions to prevent ADL by forcing
+// callers to explicitly specify the template parameter.
+template <typename T>
+using NoTypeDeduction = typename TypeIdentity<T>::type;
+
+template <typename A>
+void DestroyElements(NoTypeDeduction<A>& allocator, Pointer<A> destroy_first,
+ SizeType<A> destroy_size) {
+ if (destroy_first != nullptr) {
+ for (SizeType<A> i = destroy_size; i != 0;) {
+ --i;
+ AllocatorTraits<A>::destroy(allocator, destroy_first + i);
+ }
+ }
+}
+
+template <typename A>
+struct Allocation {
+ Pointer<A> data;
+ SizeType<A> capacity;
+};
+
+template <typename A,
+ bool IsOverAligned =
+ (alignof(ValueType<A>) > ABSL_INTERNAL_DEFAULT_NEW_ALIGNMENT)>
+struct MallocAdapter {
+ static Allocation<A> Allocate(A& allocator, SizeType<A> requested_capacity) {
+ return {AllocatorTraits<A>::allocate(allocator, requested_capacity),
+ requested_capacity};
+ }
+
+ static void Deallocate(A& allocator, Pointer<A> pointer,
+ SizeType<A> capacity) {
+ AllocatorTraits<A>::deallocate(allocator, pointer, capacity);
+ }
+};
+
+template <typename A, typename ValueAdapter>
+void ConstructElements(NoTypeDeduction<A>& allocator,
+ Pointer<A> construct_first, ValueAdapter& values,
+ SizeType<A> construct_size) {
+ for (SizeType<A> i = 0; i < construct_size; ++i) {
+ ABSL_INTERNAL_TRY { values.ConstructNext(allocator, construct_first + i); }
+ ABSL_INTERNAL_CATCH_ANY {
+ DestroyElements<A>(allocator, construct_first, i);
+ ABSL_INTERNAL_RETHROW;
+ }
+ }
+}
+
+template <typename A, typename ValueAdapter>
+void AssignElements(Pointer<A> assign_first, ValueAdapter& values,
+ SizeType<A> assign_size) {
+ for (SizeType<A> i = 0; i < assign_size; ++i) {
+ values.AssignNext(assign_first + i);
+ }
+}
+
+template <typename A>
+struct StorageView {
+ Pointer<A> data;
+ SizeType<A> size;
+ SizeType<A> capacity;
+};
+
+template <typename A, typename Iterator>
+class IteratorValueAdapter {
+ public:
+ explicit IteratorValueAdapter(const Iterator& it) : it_(it) {}
+
+ void ConstructNext(A& allocator, Pointer<A> construct_at) {
+ AllocatorTraits<A>::construct(allocator, construct_at, *it_);
+ ++it_;
+ }
+
+ void AssignNext(Pointer<A> assign_at) {
+ *assign_at = *it_;
+ ++it_;
+ }
+
+ private:
+ Iterator it_;
+};
+
+template <typename A>
+class CopyValueAdapter {
+ public:
+ explicit CopyValueAdapter(ConstPointer<A> p) : ptr_(p) {}
+
+ void ConstructNext(A& allocator, Pointer<A> construct_at) {
+ AllocatorTraits<A>::construct(allocator, construct_at, *ptr_);
+ }
+
+ void AssignNext(Pointer<A> assign_at) { *assign_at = *ptr_; }
+
+ private:
+ ConstPointer<A> ptr_;
+};
+
+template <typename A>
+class DefaultValueAdapter {
+ public:
+ explicit DefaultValueAdapter() {}
+
+ void ConstructNext(A& allocator, Pointer<A> construct_at) {
+ AllocatorTraits<A>::construct(allocator, construct_at);
+ }
+
+ void AssignNext(Pointer<A> assign_at) { *assign_at = ValueType<A>(); }
+};
+
+template <typename A>
+class AllocationTransaction {
+ public:
+ explicit AllocationTransaction(A& allocator)
+ : allocator_data_(allocator, nullptr), capacity_(0) {}
+
+ ~AllocationTransaction() {
+ if (DidAllocate()) {
+ MallocAdapter<A>::Deallocate(GetAllocator(), GetData(), GetCapacity());
+ }
+ }
+
+ AllocationTransaction(const AllocationTransaction&) = delete;
+ void operator=(const AllocationTransaction&) = delete;
+
+ A& GetAllocator() { return allocator_data_.template get<0>(); }
+ Pointer<A>& GetData() { return allocator_data_.template get<1>(); }
+ SizeType<A>& GetCapacity() { return capacity_; }
+
+ bool DidAllocate() { return GetData() != nullptr; }
+
+ Pointer<A> Allocate(SizeType<A> requested_capacity) {
+ Allocation<A> result =
+ MallocAdapter<A>::Allocate(GetAllocator(), requested_capacity);
+ GetData() = result.data;
+ GetCapacity() = result.capacity;
+ return result.data;
+ }
+
+ ABSL_MUST_USE_RESULT Allocation<A> Release() && {
+ Allocation<A> result = {GetData(), GetCapacity()};
+ Reset();
+ return result;
+ }
+
+ private:
+ void Reset() {
+ GetData() = nullptr;
+ GetCapacity() = 0;
+ }
+
+ container_internal::CompressedTuple<A, Pointer<A>> allocator_data_;
+ SizeType<A> capacity_;
+};
+
+template <typename A>
+class ConstructionTransaction {
+ public:
+ explicit ConstructionTransaction(A& allocator)
+ : allocator_data_(allocator, nullptr), size_(0) {}
+
+ ~ConstructionTransaction() {
+ if (DidConstruct()) {
+ DestroyElements<A>(GetAllocator(), GetData(), GetSize());
+ }
+ }
+
+ ConstructionTransaction(const ConstructionTransaction&) = delete;
+ void operator=(const ConstructionTransaction&) = delete;
+
+ A& GetAllocator() { return allocator_data_.template get<0>(); }
+ Pointer<A>& GetData() { return allocator_data_.template get<1>(); }
+ SizeType<A>& GetSize() { return size_; }
+
+ bool DidConstruct() { return GetData() != nullptr; }
+ template <typename ValueAdapter>
+ void Construct(Pointer<A> data, ValueAdapter& values, SizeType<A> size) {
+ ConstructElements<A>(GetAllocator(), data, values, size);
+ GetData() = data;
+ GetSize() = size;
+ }
+ void Commit() && {
+ GetData() = nullptr;
+ GetSize() = 0;
+ }
+
+ private:
+ container_internal::CompressedTuple<A, Pointer<A>> allocator_data_;
+ SizeType<A> size_;
+};
+
+template <typename T, size_t N, typename A>
+class Storage {
+ public:
+ static SizeType<A> NextCapacity(SizeType<A> current_capacity) {
+ return current_capacity * 2;
+ }
+
+ static SizeType<A> ComputeCapacity(SizeType<A> current_capacity,
+ SizeType<A> requested_capacity) {
+ return (std::max)(NextCapacity(current_capacity), requested_capacity);
+ }
+
+ // ---------------------------------------------------------------------------
+ // Storage Constructors and Destructor
+ // ---------------------------------------------------------------------------
+
+ Storage() : metadata_(A(), /* size and is_allocated */ 0) {}
+
+ explicit Storage(const A& allocator)
+ : metadata_(allocator, /* size and is_allocated */ 0) {}
+
+ ~Storage() {
+ if (GetSizeAndIsAllocated() == 0) {
+ // Empty and not allocated; nothing to do.
+ } else if (IsMemcpyOk<A>::value) {
+ // No destructors need to be run; just deallocate if necessary.
+ DeallocateIfAllocated();
+ } else {
+ DestroyContents();
+ }
+ }
+
+ // ---------------------------------------------------------------------------
+ // Storage Member Accessors
+ // ---------------------------------------------------------------------------
+
+ SizeType<A>& GetSizeAndIsAllocated() { return metadata_.template get<1>(); }
+
+ const SizeType<A>& GetSizeAndIsAllocated() const {
+ return metadata_.template get<1>();
+ }
+
+ SizeType<A> GetSize() const { return GetSizeAndIsAllocated() >> 1; }
+
+ bool GetIsAllocated() const { return GetSizeAndIsAllocated() & 1; }
+
+ Pointer<A> GetAllocatedData() { return data_.allocated.allocated_data; }
+
+ ConstPointer<A> GetAllocatedData() const {
+ return data_.allocated.allocated_data;
+ }
+
+ Pointer<A> GetInlinedData() {
+ return reinterpret_cast<Pointer<A>>(
+ std::addressof(data_.inlined.inlined_data[0]));
+ }
+
+ ConstPointer<A> GetInlinedData() const {
+ return reinterpret_cast<ConstPointer<A>>(
+ std::addressof(data_.inlined.inlined_data[0]));
+ }
+
+ SizeType<A> GetAllocatedCapacity() const {
+ return data_.allocated.allocated_capacity;
+ }
+
+ SizeType<A> GetInlinedCapacity() const { return static_cast<SizeType<A>>(N); }
+
+ StorageView<A> MakeStorageView() {
+ return GetIsAllocated() ? StorageView<A>{GetAllocatedData(), GetSize(),
+ GetAllocatedCapacity()}
+ : StorageView<A>{GetInlinedData(), GetSize(),
+ GetInlinedCapacity()};
+ }
+
+ A& GetAllocator() { return metadata_.template get<0>(); }
+
+ const A& GetAllocator() const { return metadata_.template get<0>(); }
+
+ // ---------------------------------------------------------------------------
+ // Storage Member Mutators
+ // ---------------------------------------------------------------------------
+
+ ABSL_ATTRIBUTE_NOINLINE void InitFrom(const Storage& other);
+
+ template <typename ValueAdapter>
+ void Initialize(ValueAdapter values, SizeType<A> new_size);
+
+ template <typename ValueAdapter>
+ void Assign(ValueAdapter values, SizeType<A> new_size);
+
+ template <typename ValueAdapter>
+ void Resize(ValueAdapter values, SizeType<A> new_size);
+
+ template <typename ValueAdapter>
+ Iterator<A> Insert(ConstIterator<A> pos, ValueAdapter values,
+ SizeType<A> insert_count);
+
+ template <typename... Args>
+ Reference<A> EmplaceBack(Args&&... args);
+
+ Iterator<A> Erase(ConstIterator<A> from, ConstIterator<A> to);
+
+ void Reserve(SizeType<A> requested_capacity);
+
+ void ShrinkToFit();
+
+ void Swap(Storage* other_storage_ptr);
+
+ void SetIsAllocated() {
+ GetSizeAndIsAllocated() |= static_cast<SizeType<A>>(1);
+ }
+
+ void UnsetIsAllocated() {
+ GetSizeAndIsAllocated() &= ((std::numeric_limits<SizeType<A>>::max)() - 1);
+ }
+
+ void SetSize(SizeType<A> size) {
+ GetSizeAndIsAllocated() =
+ (size << 1) | static_cast<SizeType<A>>(GetIsAllocated());
+ }
+
+ void SetAllocatedSize(SizeType<A> size) {
+ GetSizeAndIsAllocated() = (size << 1) | static_cast<SizeType<A>>(1);
+ }
+
+ void SetInlinedSize(SizeType<A> size) {
+ GetSizeAndIsAllocated() = size << static_cast<SizeType<A>>(1);
+ }
+
+ void AddSize(SizeType<A> count) {
+ GetSizeAndIsAllocated() += count << static_cast<SizeType<A>>(1);
+ }
+
+ void SubtractSize(SizeType<A> count) {
+ assert(count <= GetSize());
+
+ GetSizeAndIsAllocated() -= count << static_cast<SizeType<A>>(1);
+ }
+
+ void SetAllocation(Allocation<A> allocation) {
+ data_.allocated.allocated_data = allocation.data;
+ data_.allocated.allocated_capacity = allocation.capacity;
+ }
+
+ void MemcpyFrom(const Storage& other_storage) {
+ assert(IsMemcpyOk<A>::value || other_storage.GetIsAllocated());
+
+ GetSizeAndIsAllocated() = other_storage.GetSizeAndIsAllocated();
+ data_ = other_storage.data_;
+ }
+
+ void DeallocateIfAllocated() {
+ if (GetIsAllocated()) {
+ MallocAdapter<A>::Deallocate(GetAllocator(), GetAllocatedData(),
+ GetAllocatedCapacity());
+ }
+ }
+
+ private:
+ ABSL_ATTRIBUTE_NOINLINE void DestroyContents();
+
+ using Metadata = container_internal::CompressedTuple<A, SizeType<A>>;
+
+ struct Allocated {
+ Pointer<A> allocated_data;
+ SizeType<A> allocated_capacity;
+ };
+
+ struct Inlined {
+ alignas(ValueType<A>) char inlined_data[sizeof(ValueType<A>[N])];
+ };
+
+ union Data {
+ Allocated allocated;
+ Inlined inlined;
+ };
+
+ template <typename... Args>
+ ABSL_ATTRIBUTE_NOINLINE Reference<A> EmplaceBackSlow(Args&&... args);
+
+ Metadata metadata_;
+ Data data_;
+};
+
+template <typename T, size_t N, typename A>
+void Storage<T, N, A>::DestroyContents() {
+ Pointer<A> data = GetIsAllocated() ? GetAllocatedData() : GetInlinedData();
+ DestroyElements<A>(GetAllocator(), data, GetSize());
+ DeallocateIfAllocated();
+}
+
+template <typename T, size_t N, typename A>
+void Storage<T, N, A>::InitFrom(const Storage& other) {
+ const SizeType<A> n = other.GetSize();
+ assert(n > 0); // Empty sources handled handled in caller.
+ ConstPointer<A> src;
+ Pointer<A> dst;
+ if (!other.GetIsAllocated()) {
+ dst = GetInlinedData();
+ src = other.GetInlinedData();
+ } else {
+ // Because this is only called from the `InlinedVector` constructors, it's
+ // safe to take on the allocation with size `0`. If `ConstructElements(...)`
+ // throws, deallocation will be automatically handled by `~Storage()`.
+ SizeType<A> requested_capacity = ComputeCapacity(GetInlinedCapacity(), n);
+ Allocation<A> allocation =
+ MallocAdapter<A>::Allocate(GetAllocator(), requested_capacity);
+ SetAllocation(allocation);
+ dst = allocation.data;
+ src = other.GetAllocatedData();
+ }
+ if (IsMemcpyOk<A>::value) {
+ std::memcpy(reinterpret_cast<char*>(dst),
+ reinterpret_cast<const char*>(src), n * sizeof(ValueType<A>));
+ } else {
+ auto values = IteratorValueAdapter<A, ConstPointer<A>>(src);
+ ConstructElements<A>(GetAllocator(), dst, values, n);
+ }
+ GetSizeAndIsAllocated() = other.GetSizeAndIsAllocated();
+}
+
+template <typename T, size_t N, typename A>
+template <typename ValueAdapter>
+auto Storage<T, N, A>::Initialize(ValueAdapter values, SizeType<A> new_size)
+ -> void {
+ // Only callable from constructors!
+ assert(!GetIsAllocated());
+ assert(GetSize() == 0);
+
+ Pointer<A> construct_data;
+ if (new_size > GetInlinedCapacity()) {
+ // Because this is only called from the `InlinedVector` constructors, it's
+ // safe to take on the allocation with size `0`. If `ConstructElements(...)`
+ // throws, deallocation will be automatically handled by `~Storage()`.
+ SizeType<A> requested_capacity =
+ ComputeCapacity(GetInlinedCapacity(), new_size);
+ Allocation<A> allocation =
+ MallocAdapter<A>::Allocate(GetAllocator(), requested_capacity);
+ construct_data = allocation.data;
+ SetAllocation(allocation);
+ SetIsAllocated();
+ } else {
+ construct_data = GetInlinedData();
+ }
+
+ ConstructElements<A>(GetAllocator(), construct_data, values, new_size);
+
+ // Since the initial size was guaranteed to be `0` and the allocated bit is
+ // already correct for either case, *adding* `new_size` gives us the correct
+ // result faster than setting it directly.
+ AddSize(new_size);
+}
+
+template <typename T, size_t N, typename A>
+template <typename ValueAdapter>
+auto Storage<T, N, A>::Assign(ValueAdapter values, SizeType<A> new_size)
+ -> void {
+ StorageView<A> storage_view = MakeStorageView();
+
+ AllocationTransaction<A> allocation_tx(GetAllocator());
+
+ y_absl::Span<ValueType<A>> assign_loop;
+ y_absl::Span<ValueType<A>> construct_loop;
+ y_absl::Span<ValueType<A>> destroy_loop;
+
+ if (new_size > storage_view.capacity) {
+ SizeType<A> requested_capacity =
+ ComputeCapacity(storage_view.capacity, new_size);
+ construct_loop = {allocation_tx.Allocate(requested_capacity), new_size};
+ destroy_loop = {storage_view.data, storage_view.size};
+ } else if (new_size > storage_view.size) {
+ assign_loop = {storage_view.data, storage_view.size};
+ construct_loop = {storage_view.data + storage_view.size,
+ new_size - storage_view.size};
+ } else {
+ assign_loop = {storage_view.data, new_size};
+ destroy_loop = {storage_view.data + new_size, storage_view.size - new_size};
+ }
+
+ AssignElements<A>(assign_loop.data(), values, assign_loop.size());
+
+ ConstructElements<A>(GetAllocator(), construct_loop.data(), values,
+ construct_loop.size());
+
+ DestroyElements<A>(GetAllocator(), destroy_loop.data(), destroy_loop.size());
+
+ if (allocation_tx.DidAllocate()) {
+ DeallocateIfAllocated();
+ SetAllocation(std::move(allocation_tx).Release());
+ SetIsAllocated();
+ }
+
+ SetSize(new_size);
+}
+
+template <typename T, size_t N, typename A>
+template <typename ValueAdapter>
+auto Storage<T, N, A>::Resize(ValueAdapter values, SizeType<A> new_size)
+ -> void {
+ StorageView<A> storage_view = MakeStorageView();
+ Pointer<A> const base = storage_view.data;
+ const SizeType<A> size = storage_view.size;
+ A& alloc = GetAllocator();
+ if (new_size <= size) {
+ // Destroy extra old elements.
+ DestroyElements<A>(alloc, base + new_size, size - new_size);
+ } else if (new_size <= storage_view.capacity) {
+ // Construct new elements in place.
+ ConstructElements<A>(alloc, base + size, values, new_size - size);
+ } else {
+ // Steps:
+ // a. Allocate new backing store.
+ // b. Construct new elements in new backing store.
+ // c. Move existing elements from old backing store to now.
+ // d. Destroy all elements in old backing store.
+ // Use transactional wrappers for the first two steps so we can roll
+ // back if necessary due to exceptions.
+ AllocationTransaction<A> allocation_tx(alloc);
+ SizeType<A> requested_capacity =
+ ComputeCapacity(storage_view.capacity, new_size);
+ Pointer<A> new_data = allocation_tx.Allocate(requested_capacity);
+
+ ConstructionTransaction<A> construction_tx(alloc);
+ construction_tx.Construct(new_data + size, values, new_size - size);
+
+ IteratorValueAdapter<A, MoveIterator<A>> move_values(
+ (MoveIterator<A>(base)));
+ ConstructElements<A>(alloc, new_data, move_values, size);
+
+ DestroyElements<A>(alloc, base, size);
+ std::move(construction_tx).Commit();
+ DeallocateIfAllocated();
+ SetAllocation(std::move(allocation_tx).Release());
+ SetIsAllocated();
+ }
+ SetSize(new_size);
+}
+
+template <typename T, size_t N, typename A>
+template <typename ValueAdapter>
+auto Storage<T, N, A>::Insert(ConstIterator<A> pos, ValueAdapter values,
+ SizeType<A> insert_count) -> Iterator<A> {
+ StorageView<A> storage_view = MakeStorageView();
+
+ SizeType<A> insert_index =
+ std::distance(ConstIterator<A>(storage_view.data), pos);
+ SizeType<A> insert_end_index = insert_index + insert_count;
+ SizeType<A> new_size = storage_view.size + insert_count;
+
+ if (new_size > storage_view.capacity) {
+ AllocationTransaction<A> allocation_tx(GetAllocator());
+ ConstructionTransaction<A> construction_tx(GetAllocator());
+ ConstructionTransaction<A> move_construction_tx(GetAllocator());
+
+ IteratorValueAdapter<A, MoveIterator<A>> move_values(
+ MoveIterator<A>(storage_view.data));
+
+ SizeType<A> requested_capacity =
+ ComputeCapacity(storage_view.capacity, new_size);
+ Pointer<A> new_data = allocation_tx.Allocate(requested_capacity);
+
+ construction_tx.Construct(new_data + insert_index, values, insert_count);
+
+ move_construction_tx.Construct(new_data, move_values, insert_index);
+
+ ConstructElements<A>(GetAllocator(), new_data + insert_end_index,
+ move_values, storage_view.size - insert_index);
+
+ DestroyElements<A>(GetAllocator(), storage_view.data, storage_view.size);
+
+ std::move(construction_tx).Commit();
+ std::move(move_construction_tx).Commit();
+ DeallocateIfAllocated();
+ SetAllocation(std::move(allocation_tx).Release());
+
+ SetAllocatedSize(new_size);
+ return Iterator<A>(new_data + insert_index);
+ } else {
+ SizeType<A> move_construction_destination_index =
+ (std::max)(insert_end_index, storage_view.size);
+
+ ConstructionTransaction<A> move_construction_tx(GetAllocator());
+
+ IteratorValueAdapter<A, MoveIterator<A>> move_construction_values(
+ MoveIterator<A>(storage_view.data +
+ (move_construction_destination_index - insert_count)));
+ y_absl::Span<ValueType<A>> move_construction = {
+ storage_view.data + move_construction_destination_index,
+ new_size - move_construction_destination_index};
+
+ Pointer<A> move_assignment_values = storage_view.data + insert_index;
+ y_absl::Span<ValueType<A>> move_assignment = {
+ storage_view.data + insert_end_index,
+ move_construction_destination_index - insert_end_index};
+
+ y_absl::Span<ValueType<A>> insert_assignment = {move_assignment_values,
+ move_construction.size()};
+
+ y_absl::Span<ValueType<A>> insert_construction = {
+ insert_assignment.data() + insert_assignment.size(),
+ insert_count - insert_assignment.size()};
+
+ move_construction_tx.Construct(move_construction.data(),
+ move_construction_values,
+ move_construction.size());
+
+ for (Pointer<A>
+ destination = move_assignment.data() + move_assignment.size(),
+ last_destination = move_assignment.data(),
+ source = move_assignment_values + move_assignment.size();
+ ;) {
+ --destination;
+ --source;
+ if (destination < last_destination) break;
+ *destination = std::move(*source);
+ }
+
+ AssignElements<A>(insert_assignment.data(), values,
+ insert_assignment.size());
+
+ ConstructElements<A>(GetAllocator(), insert_construction.data(), values,
+ insert_construction.size());
+
+ std::move(move_construction_tx).Commit();
+
+ AddSize(insert_count);
+ return Iterator<A>(storage_view.data + insert_index);
+ }
+}
+
+template <typename T, size_t N, typename A>
+template <typename... Args>
+auto Storage<T, N, A>::EmplaceBack(Args&&... args) -> Reference<A> {
+ StorageView<A> storage_view = MakeStorageView();
+ const SizeType<A> n = storage_view.size;
+ if (ABSL_PREDICT_TRUE(n != storage_view.capacity)) {
+ // Fast path; new element fits.
+ Pointer<A> last_ptr = storage_view.data + n;
+ AllocatorTraits<A>::construct(GetAllocator(), last_ptr,
+ std::forward<Args>(args)...);
+ AddSize(1);
+ return *last_ptr;
+ }
+ // TODO(b/173712035): Annotate with musttail attribute to prevent regression.
+ return EmplaceBackSlow(std::forward<Args>(args)...);
+}
+
+template <typename T, size_t N, typename A>
+template <typename... Args>
+auto Storage<T, N, A>::EmplaceBackSlow(Args&&... args) -> Reference<A> {
+ StorageView<A> storage_view = MakeStorageView();
+ AllocationTransaction<A> allocation_tx(GetAllocator());
+ IteratorValueAdapter<A, MoveIterator<A>> move_values(
+ MoveIterator<A>(storage_view.data));
+ SizeType<A> requested_capacity = NextCapacity(storage_view.capacity);
+ Pointer<A> construct_data = allocation_tx.Allocate(requested_capacity);
+ Pointer<A> last_ptr = construct_data + storage_view.size;
+
+ // Construct new element.
+ AllocatorTraits<A>::construct(GetAllocator(), last_ptr,
+ std::forward<Args>(args)...);
+ // Move elements from old backing store to new backing store.
+ ABSL_INTERNAL_TRY {
+ ConstructElements<A>(GetAllocator(), allocation_tx.GetData(), move_values,
+ storage_view.size);
+ }
+ ABSL_INTERNAL_CATCH_ANY {
+ AllocatorTraits<A>::destroy(GetAllocator(), last_ptr);
+ ABSL_INTERNAL_RETHROW;
+ }
+ // Destroy elements in old backing store.
+ DestroyElements<A>(GetAllocator(), storage_view.data, storage_view.size);
+
+ DeallocateIfAllocated();
+ SetAllocation(std::move(allocation_tx).Release());
+ SetIsAllocated();
+ AddSize(1);
+ return *last_ptr;
+}
+
+template <typename T, size_t N, typename A>
+auto Storage<T, N, A>::Erase(ConstIterator<A> from, ConstIterator<A> to)
+ -> Iterator<A> {
+ StorageView<A> storage_view = MakeStorageView();
+
+ SizeType<A> erase_size = std::distance(from, to);
+ SizeType<A> erase_index =
+ std::distance(ConstIterator<A>(storage_view.data), from);
+ SizeType<A> erase_end_index = erase_index + erase_size;
+
+ IteratorValueAdapter<A, MoveIterator<A>> move_values(
+ MoveIterator<A>(storage_view.data + erase_end_index));
+
+ AssignElements<A>(storage_view.data + erase_index, move_values,
+ storage_view.size - erase_end_index);
+
+ DestroyElements<A>(GetAllocator(),
+ storage_view.data + (storage_view.size - erase_size),
+ erase_size);
+
+ SubtractSize(erase_size);
+ return Iterator<A>(storage_view.data + erase_index);
+}
+
+template <typename T, size_t N, typename A>
+auto Storage<T, N, A>::Reserve(SizeType<A> requested_capacity) -> void {
+ StorageView<A> storage_view = MakeStorageView();
+
+ if (ABSL_PREDICT_FALSE(requested_capacity <= storage_view.capacity)) return;
+
+ AllocationTransaction<A> allocation_tx(GetAllocator());
+
+ IteratorValueAdapter<A, MoveIterator<A>> move_values(
+ MoveIterator<A>(storage_view.data));
+
+ SizeType<A> new_requested_capacity =
+ ComputeCapacity(storage_view.capacity, requested_capacity);
+ Pointer<A> new_data = allocation_tx.Allocate(new_requested_capacity);
+
+ ConstructElements<A>(GetAllocator(), new_data, move_values,
+ storage_view.size);
+
+ DestroyElements<A>(GetAllocator(), storage_view.data, storage_view.size);
+
+ DeallocateIfAllocated();
+ SetAllocation(std::move(allocation_tx).Release());
+ SetIsAllocated();
+}
+
+template <typename T, size_t N, typename A>
+auto Storage<T, N, A>::ShrinkToFit() -> void {
+ // May only be called on allocated instances!
+ assert(GetIsAllocated());
+
+ StorageView<A> storage_view{GetAllocatedData(), GetSize(),
+ GetAllocatedCapacity()};
+
+ if (ABSL_PREDICT_FALSE(storage_view.size == storage_view.capacity)) return;
+
+ AllocationTransaction<A> allocation_tx(GetAllocator());
+
+ IteratorValueAdapter<A, MoveIterator<A>> move_values(
+ MoveIterator<A>(storage_view.data));
+
+ Pointer<A> construct_data;
+ if (storage_view.size > GetInlinedCapacity()) {
+ SizeType<A> requested_capacity = storage_view.size;
+ construct_data = allocation_tx.Allocate(requested_capacity);
+ if (allocation_tx.GetCapacity() >= storage_view.capacity) {
+ // Already using the smallest available heap allocation.
+ return;
+ }
+ } else {
+ construct_data = GetInlinedData();
+ }
+
+ ABSL_INTERNAL_TRY {
+ ConstructElements<A>(GetAllocator(), construct_data, move_values,
+ storage_view.size);
+ }
+ ABSL_INTERNAL_CATCH_ANY {
+ SetAllocation({storage_view.data, storage_view.capacity});
+ ABSL_INTERNAL_RETHROW;
+ }
+
+ DestroyElements<A>(GetAllocator(), storage_view.data, storage_view.size);
+
+ MallocAdapter<A>::Deallocate(GetAllocator(), storage_view.data,
+ storage_view.capacity);
+
+ if (allocation_tx.DidAllocate()) {
+ SetAllocation(std::move(allocation_tx).Release());
+ } else {
+ UnsetIsAllocated();
+ }
+}
+
+template <typename T, size_t N, typename A>
+auto Storage<T, N, A>::Swap(Storage* other_storage_ptr) -> void {
+ using std::swap;
+ assert(this != other_storage_ptr);
+
+ if (GetIsAllocated() && other_storage_ptr->GetIsAllocated()) {
+ swap(data_.allocated, other_storage_ptr->data_.allocated);
+ } else if (!GetIsAllocated() && !other_storage_ptr->GetIsAllocated()) {
+ Storage* small_ptr = this;
+ Storage* large_ptr = other_storage_ptr;
+ if (small_ptr->GetSize() > large_ptr->GetSize()) swap(small_ptr, large_ptr);
+
+ for (SizeType<A> i = 0; i < small_ptr->GetSize(); ++i) {
+ swap(small_ptr->GetInlinedData()[i], large_ptr->GetInlinedData()[i]);
+ }
+
+ IteratorValueAdapter<A, MoveIterator<A>> move_values(
+ MoveIterator<A>(large_ptr->GetInlinedData() + small_ptr->GetSize()));
+
+ ConstructElements<A>(large_ptr->GetAllocator(),
+ small_ptr->GetInlinedData() + small_ptr->GetSize(),
+ move_values,
+ large_ptr->GetSize() - small_ptr->GetSize());
+
+ DestroyElements<A>(large_ptr->GetAllocator(),
+ large_ptr->GetInlinedData() + small_ptr->GetSize(),
+ large_ptr->GetSize() - small_ptr->GetSize());
+ } else {
+ Storage* allocated_ptr = this;
+ Storage* inlined_ptr = other_storage_ptr;
+ if (!allocated_ptr->GetIsAllocated()) swap(allocated_ptr, inlined_ptr);
+
+ StorageView<A> allocated_storage_view{
+ allocated_ptr->GetAllocatedData(), allocated_ptr->GetSize(),
+ allocated_ptr->GetAllocatedCapacity()};
+
+ IteratorValueAdapter<A, MoveIterator<A>> move_values(
+ MoveIterator<A>(inlined_ptr->GetInlinedData()));
+
+ ABSL_INTERNAL_TRY {
+ ConstructElements<A>(inlined_ptr->GetAllocator(),
+ allocated_ptr->GetInlinedData(), move_values,
+ inlined_ptr->GetSize());
+ }
+ ABSL_INTERNAL_CATCH_ANY {
+ allocated_ptr->SetAllocation(
+ Allocation<A>{allocated_storage_view.data, allocated_storage_view.capacity});
+ ABSL_INTERNAL_RETHROW;
+ }
+
+ DestroyElements<A>(inlined_ptr->GetAllocator(),
+ inlined_ptr->GetInlinedData(), inlined_ptr->GetSize());
+
+ inlined_ptr->SetAllocation(
+ Allocation<A>{allocated_storage_view.data, allocated_storage_view.capacity});
+ }
+
+ swap(GetSizeAndIsAllocated(), other_storage_ptr->GetSizeAndIsAllocated());
+ swap(GetAllocator(), other_storage_ptr->GetAllocator());
+}
+
+// End ignore "array-bounds" and "maybe-uninitialized"
+#if !defined(__clang__) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+
+} // namespace inlined_vector_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_CONTAINER_INTERNAL_INLINED_VECTOR_INTERNAL_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/layout.h b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/layout.h
new file mode 100644
index 00000000000..4c3f6689430
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/layout.h
@@ -0,0 +1,743 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// MOTIVATION AND TUTORIAL
+//
+// If you want to put in a single heap allocation N doubles followed by M ints,
+// it's easy if N and M are known at compile time.
+//
+// struct S {
+// double a[N];
+// int b[M];
+// };
+//
+// S* p = new S;
+//
+// But what if N and M are known only in run time? Class template Layout to the
+// rescue! It's a portable generalization of the technique known as struct hack.
+//
+// // This object will tell us everything we need to know about the memory
+// // layout of double[N] followed by int[M]. It's structurally identical to
+// // size_t[2] that stores N and M. It's very cheap to create.
+// const Layout<double, int> layout(N, M);
+//
+// // Allocate enough memory for both arrays. `AllocSize()` tells us how much
+// // memory is needed. We are free to use any allocation function we want as
+// // long as it returns aligned memory.
+// std::unique_ptr<unsigned char[]> p(new unsigned char[layout.AllocSize()]);
+//
+// // Obtain the pointer to the array of doubles.
+// // Equivalent to `reinterpret_cast<double*>(p.get())`.
+// //
+// // We could have written layout.Pointer<0>(p) instead. If all the types are
+// // unique you can use either form, but if some types are repeated you must
+// // use the index form.
+// double* a = layout.Pointer<double>(p.get());
+//
+// // Obtain the pointer to the array of ints.
+// // Equivalent to `reinterpret_cast<int*>(p.get() + N * 8)`.
+// int* b = layout.Pointer<int>(p);
+//
+// If we are unable to specify sizes of all fields, we can pass as many sizes as
+// we can to `Partial()`. In return, it'll allow us to access the fields whose
+// locations and sizes can be computed from the provided information.
+// `Partial()` comes in handy when the array sizes are embedded into the
+// allocation.
+//
+// // size_t[1] containing N, size_t[1] containing M, double[N], int[M].
+// using L = Layout<size_t, size_t, double, int>;
+//
+// unsigned char* Allocate(size_t n, size_t m) {
+// const L layout(1, 1, n, m);
+// unsigned char* p = new unsigned char[layout.AllocSize()];
+// *layout.Pointer<0>(p) = n;
+// *layout.Pointer<1>(p) = m;
+// return p;
+// }
+//
+// void Use(unsigned char* p) {
+// // First, extract N and M.
+// // Specify that the first array has only one element. Using `prefix` we
+// // can access the first two arrays but not more.
+// constexpr auto prefix = L::Partial(1);
+// size_t n = *prefix.Pointer<0>(p);
+// size_t m = *prefix.Pointer<1>(p);
+//
+// // Now we can get pointers to the payload.
+// const L layout(1, 1, n, m);
+// double* a = layout.Pointer<double>(p);
+// int* b = layout.Pointer<int>(p);
+// }
+//
+// The layout we used above combines fixed-size with dynamically-sized fields.
+// This is quite common. Layout is optimized for this use case and generates
+// optimal code. All computations that can be performed at compile time are
+// indeed performed at compile time.
+//
+// Efficiency tip: The order of fields matters. In `Layout<T1, ..., TN>` try to
+// ensure that `alignof(T1) >= ... >= alignof(TN)`. This way you'll have no
+// padding in between arrays.
+//
+// You can manually override the alignment of an array by wrapping the type in
+// `Aligned<T, N>`. `Layout<..., Aligned<T, N>, ...>` has exactly the same API
+// and behavior as `Layout<..., T, ...>` except that the first element of the
+// array of `T` is aligned to `N` (the rest of the elements follow without
+// padding). `N` cannot be less than `alignof(T)`.
+//
+// `AllocSize()` and `Pointer()` are the most basic methods for dealing with
+// memory layouts. Check out the reference or code below to discover more.
+//
+// EXAMPLE
+//
+// // Immutable move-only string with sizeof equal to sizeof(void*). The
+// // string size and the characters are kept in the same heap allocation.
+// class CompactString {
+// public:
+// CompactString(const char* s = "") {
+// const size_t size = strlen(s);
+// // size_t[1] followed by char[size + 1].
+// const L layout(1, size + 1);
+// p_.reset(new unsigned char[layout.AllocSize()]);
+// // If running under ASAN, mark the padding bytes, if any, to catch
+// // memory errors.
+// layout.PoisonPadding(p_.get());
+// // Store the size in the allocation.
+// *layout.Pointer<size_t>(p_.get()) = size;
+// // Store the characters in the allocation.
+// memcpy(layout.Pointer<char>(p_.get()), s, size + 1);
+// }
+//
+// size_t size() const {
+// // Equivalent to reinterpret_cast<size_t&>(*p).
+// return *L::Partial().Pointer<size_t>(p_.get());
+// }
+//
+// const char* c_str() const {
+// // Equivalent to reinterpret_cast<char*>(p.get() + sizeof(size_t)).
+// // The argument in Partial(1) specifies that we have size_t[1] in front
+// // of the characters.
+// return L::Partial(1).Pointer<char>(p_.get());
+// }
+//
+// private:
+// // Our heap allocation contains a size_t followed by an array of chars.
+// using L = Layout<size_t, char>;
+// std::unique_ptr<unsigned char[]> p_;
+// };
+//
+// int main() {
+// CompactString s = "hello";
+// assert(s.size() == 5);
+// assert(strcmp(s.c_str(), "hello") == 0);
+// }
+//
+// DOCUMENTATION
+//
+// The interface exported by this file consists of:
+// - class `Layout<>` and its public members.
+// - The public members of class `internal_layout::LayoutImpl<>`. That class
+// isn't intended to be used directly, and its name and template parameter
+// list are internal implementation details, but the class itself provides
+// most of the functionality in this file. See comments on its members for
+// detailed documentation.
+//
+// `Layout<T1,... Tn>::Partial(count1,..., countm)` (where `m` <= `n`) returns a
+// `LayoutImpl<>` object. `Layout<T1,..., Tn> layout(count1,..., countn)`
+// creates a `Layout` object, which exposes the same functionality by inheriting
+// from `LayoutImpl<>`.
+
+#ifndef ABSL_CONTAINER_INTERNAL_LAYOUT_H_
+#define ABSL_CONTAINER_INTERNAL_LAYOUT_H_
+
+#include <assert.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include <ostream>
+#include <util/generic/string.h>
+#include <tuple>
+#include <type_traits>
+#include <typeinfo>
+#include <utility>
+
+#include "y_absl/base/config.h"
+#include "y_absl/meta/type_traits.h"
+#include "y_absl/strings/str_cat.h"
+#include "y_absl/types/span.h"
+#include "y_absl/utility/utility.h"
+
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
+#include <sanitizer/asan_interface.h>
+#endif
+
+#if defined(__GXX_RTTI)
+#define ABSL_INTERNAL_HAS_CXA_DEMANGLE
+#endif
+
+#ifdef ABSL_INTERNAL_HAS_CXA_DEMANGLE
+#include <cxxabi.h>
+#endif
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+// A type wrapper that instructs `Layout` to use the specific alignment for the
+// array. `Layout<..., Aligned<T, N>, ...>` has exactly the same API
+// and behavior as `Layout<..., T, ...>` except that the first element of the
+// array of `T` is aligned to `N` (the rest of the elements follow without
+// padding).
+//
+// Requires: `N >= alignof(T)` and `N` is a power of 2.
+template <class T, size_t N>
+struct Aligned;
+
+namespace internal_layout {
+
+template <class T>
+struct NotAligned {};
+
+template <class T, size_t N>
+struct NotAligned<const Aligned<T, N>> {
+ static_assert(sizeof(T) == 0, "Aligned<T, N> cannot be const-qualified");
+};
+
+template <size_t>
+using IntToSize = size_t;
+
+template <class>
+using TypeToSize = size_t;
+
+template <class T>
+struct Type : NotAligned<T> {
+ using type = T;
+};
+
+template <class T, size_t N>
+struct Type<Aligned<T, N>> {
+ using type = T;
+};
+
+template <class T>
+struct SizeOf : NotAligned<T>, std::integral_constant<size_t, sizeof(T)> {};
+
+template <class T, size_t N>
+struct SizeOf<Aligned<T, N>> : std::integral_constant<size_t, sizeof(T)> {};
+
+// Note: workaround for https://gcc.gnu.org/PR88115
+template <class T>
+struct AlignOf : NotAligned<T> {
+ static constexpr size_t value = alignof(T);
+};
+
+template <class T, size_t N>
+struct AlignOf<Aligned<T, N>> {
+ static_assert(N % alignof(T) == 0,
+ "Custom alignment can't be lower than the type's alignment");
+ static constexpr size_t value = N;
+};
+
+// Does `Ts...` contain `T`?
+template <class T, class... Ts>
+using Contains = y_absl::disjunction<std::is_same<T, Ts>...>;
+
+template <class From, class To>
+using CopyConst =
+ typename std::conditional<std::is_const<From>::value, const To, To>::type;
+
+// Note: We're not qualifying this with y_absl:: because it doesn't compile under
+// MSVC.
+template <class T>
+using SliceType = Span<T>;
+
+// This namespace contains no types. It prevents functions defined in it from
+// being found by ADL.
+namespace adl_barrier {
+
+template <class Needle, class... Ts>
+constexpr size_t Find(Needle, Needle, Ts...) {
+ static_assert(!Contains<Needle, Ts...>(), "Duplicate element type");
+ return 0;
+}
+
+template <class Needle, class T, class... Ts>
+constexpr size_t Find(Needle, T, Ts...) {
+ return adl_barrier::Find(Needle(), Ts()...) + 1;
+}
+
+constexpr bool IsPow2(size_t n) { return !(n & (n - 1)); }
+
+// Returns `q * m` for the smallest `q` such that `q * m >= n`.
+// Requires: `m` is a power of two. It's enforced by IsLegalElementType below.
+constexpr size_t Align(size_t n, size_t m) { return (n + m - 1) & ~(m - 1); }
+
+constexpr size_t Min(size_t a, size_t b) { return b < a ? b : a; }
+
+constexpr size_t Max(size_t a) { return a; }
+
+template <class... Ts>
+constexpr size_t Max(size_t a, size_t b, Ts... rest) {
+ return adl_barrier::Max(b < a ? a : b, rest...);
+}
+
+template <class T>
+TString TypeName() {
+ TString out;
+ int status = 0;
+ char* demangled = nullptr;
+#ifdef ABSL_INTERNAL_HAS_CXA_DEMANGLE
+ demangled = abi::__cxa_demangle(typeid(T).name(), nullptr, nullptr, &status);
+#endif
+ if (status == 0 && demangled != nullptr) { // Demangling succeeded.
+ y_absl::StrAppend(&out, "<", demangled, ">");
+ free(demangled);
+ } else {
+#if defined(__GXX_RTTI) || defined(_CPPRTTI)
+ y_absl::StrAppend(&out, "<", typeid(T).name(), ">");
+#endif
+ }
+ return out;
+}
+
+} // namespace adl_barrier
+
+template <bool C>
+using EnableIf = typename std::enable_if<C, int>::type;
+
+// Can `T` be a template argument of `Layout`?
+template <class T>
+using IsLegalElementType = std::integral_constant<
+ bool, !std::is_reference<T>::value && !std::is_volatile<T>::value &&
+ !std::is_reference<typename Type<T>::type>::value &&
+ !std::is_volatile<typename Type<T>::type>::value &&
+ adl_barrier::IsPow2(AlignOf<T>::value)>;
+
+template <class Elements, class SizeSeq, class OffsetSeq>
+class LayoutImpl;
+
+// Public base class of `Layout` and the result type of `Layout::Partial()`.
+//
+// `Elements...` contains all template arguments of `Layout` that created this
+// instance.
+//
+// `SizeSeq...` is `[0, NumSizes)` where `NumSizes` is the number of arguments
+// passed to `Layout::Partial()` or `Layout::Layout()`.
+//
+// `OffsetSeq...` is `[0, NumOffsets)` where `NumOffsets` is
+// `Min(sizeof...(Elements), NumSizes + 1)` (the number of arrays for which we
+// can compute offsets).
+template <class... Elements, size_t... SizeSeq, size_t... OffsetSeq>
+class LayoutImpl<std::tuple<Elements...>, y_absl::index_sequence<SizeSeq...>,
+ y_absl::index_sequence<OffsetSeq...>> {
+ private:
+ static_assert(sizeof...(Elements) > 0, "At least one field is required");
+ static_assert(y_absl::conjunction<IsLegalElementType<Elements>...>::value,
+ "Invalid element type (see IsLegalElementType)");
+
+ enum {
+ NumTypes = sizeof...(Elements),
+ NumSizes = sizeof...(SizeSeq),
+ NumOffsets = sizeof...(OffsetSeq),
+ };
+
+ // These are guaranteed by `Layout`.
+ static_assert(NumOffsets == adl_barrier::Min(NumTypes, NumSizes + 1),
+ "Internal error");
+ static_assert(NumTypes > 0, "Internal error");
+
+ // Returns the index of `T` in `Elements...`. Results in a compilation error
+ // if `Elements...` doesn't contain exactly one instance of `T`.
+ template <class T>
+ static constexpr size_t ElementIndex() {
+ static_assert(Contains<Type<T>, Type<typename Type<Elements>::type>...>(),
+ "Type not found");
+ return adl_barrier::Find(Type<T>(),
+ Type<typename Type<Elements>::type>()...);
+ }
+
+ template <size_t N>
+ using ElementAlignment =
+ AlignOf<typename std::tuple_element<N, std::tuple<Elements...>>::type>;
+
+ public:
+ // Element types of all arrays packed in a tuple.
+ using ElementTypes = std::tuple<typename Type<Elements>::type...>;
+
+ // Element type of the Nth array.
+ template <size_t N>
+ using ElementType = typename std::tuple_element<N, ElementTypes>::type;
+
+ constexpr explicit LayoutImpl(IntToSize<SizeSeq>... sizes)
+ : size_{sizes...} {}
+
+ // Alignment of the layout, equal to the strictest alignment of all elements.
+ // All pointers passed to the methods of layout must be aligned to this value.
+ static constexpr size_t Alignment() {
+ return adl_barrier::Max(AlignOf<Elements>::value...);
+ }
+
+ // Offset in bytes of the Nth array.
+ //
+ // // int[3], 4 bytes of padding, double[4].
+ // Layout<int, double> x(3, 4);
+ // assert(x.Offset<0>() == 0); // The ints starts from 0.
+ // assert(x.Offset<1>() == 16); // The doubles starts from 16.
+ //
+ // Requires: `N <= NumSizes && N < sizeof...(Ts)`.
+ template <size_t N, EnableIf<N == 0> = 0>
+ constexpr size_t Offset() const {
+ return 0;
+ }
+
+ template <size_t N, EnableIf<N != 0> = 0>
+ constexpr size_t Offset() const {
+ static_assert(N < NumOffsets, "Index out of bounds");
+ return adl_barrier::Align(
+ Offset<N - 1>() + SizeOf<ElementType<N - 1>>::value * size_[N - 1],
+ ElementAlignment<N>::value);
+ }
+
+ // Offset in bytes of the array with the specified element type. There must
+ // be exactly one such array and its zero-based index must be at most
+ // `NumSizes`.
+ //
+ // // int[3], 4 bytes of padding, double[4].
+ // Layout<int, double> x(3, 4);
+ // assert(x.Offset<int>() == 0); // The ints starts from 0.
+ // assert(x.Offset<double>() == 16); // The doubles starts from 16.
+ template <class T>
+ constexpr size_t Offset() const {
+ return Offset<ElementIndex<T>()>();
+ }
+
+ // Offsets in bytes of all arrays for which the offsets are known.
+ constexpr std::array<size_t, NumOffsets> Offsets() const {
+ return {{Offset<OffsetSeq>()...}};
+ }
+
+ // The number of elements in the Nth array. This is the Nth argument of
+ // `Layout::Partial()` or `Layout::Layout()` (zero-based).
+ //
+ // // int[3], 4 bytes of padding, double[4].
+ // Layout<int, double> x(3, 4);
+ // assert(x.Size<0>() == 3);
+ // assert(x.Size<1>() == 4);
+ //
+ // Requires: `N < NumSizes`.
+ template <size_t N>
+ constexpr size_t Size() const {
+ static_assert(N < NumSizes, "Index out of bounds");
+ return size_[N];
+ }
+
+ // The number of elements in the array with the specified element type.
+ // There must be exactly one such array and its zero-based index must be
+ // at most `NumSizes`.
+ //
+ // // int[3], 4 bytes of padding, double[4].
+ // Layout<int, double> x(3, 4);
+ // assert(x.Size<int>() == 3);
+ // assert(x.Size<double>() == 4);
+ template <class T>
+ constexpr size_t Size() const {
+ return Size<ElementIndex<T>()>();
+ }
+
+ // The number of elements of all arrays for which they are known.
+ constexpr std::array<size_t, NumSizes> Sizes() const {
+ return {{Size<SizeSeq>()...}};
+ }
+
+ // Pointer to the beginning of the Nth array.
+ //
+ // `Char` must be `[const] [signed|unsigned] char`.
+ //
+ // // int[3], 4 bytes of padding, double[4].
+ // Layout<int, double> x(3, 4);
+ // unsigned char* p = new unsigned char[x.AllocSize()];
+ // int* ints = x.Pointer<0>(p);
+ // double* doubles = x.Pointer<1>(p);
+ //
+ // Requires: `N <= NumSizes && N < sizeof...(Ts)`.
+ // Requires: `p` is aligned to `Alignment()`.
+ template <size_t N, class Char>
+ CopyConst<Char, ElementType<N>>* Pointer(Char* p) const {
+ using C = typename std::remove_const<Char>::type;
+ static_assert(
+ std::is_same<C, char>() || std::is_same<C, unsigned char>() ||
+ std::is_same<C, signed char>(),
+ "The argument must be a pointer to [const] [signed|unsigned] char");
+ constexpr size_t alignment = Alignment();
+ (void)alignment;
+ assert(reinterpret_cast<uintptr_t>(p) % alignment == 0);
+ return reinterpret_cast<CopyConst<Char, ElementType<N>>*>(p + Offset<N>());
+ }
+
+ // Pointer to the beginning of the array with the specified element type.
+ // There must be exactly one such array and its zero-based index must be at
+ // most `NumSizes`.
+ //
+ // `Char` must be `[const] [signed|unsigned] char`.
+ //
+ // // int[3], 4 bytes of padding, double[4].
+ // Layout<int, double> x(3, 4);
+ // unsigned char* p = new unsigned char[x.AllocSize()];
+ // int* ints = x.Pointer<int>(p);
+ // double* doubles = x.Pointer<double>(p);
+ //
+ // Requires: `p` is aligned to `Alignment()`.
+ template <class T, class Char>
+ CopyConst<Char, T>* Pointer(Char* p) const {
+ return Pointer<ElementIndex<T>()>(p);
+ }
+
+ // Pointers to all arrays for which pointers are known.
+ //
+ // `Char` must be `[const] [signed|unsigned] char`.
+ //
+ // // int[3], 4 bytes of padding, double[4].
+ // Layout<int, double> x(3, 4);
+ // unsigned char* p = new unsigned char[x.AllocSize()];
+ //
+ // int* ints;
+ // double* doubles;
+ // std::tie(ints, doubles) = x.Pointers(p);
+ //
+ // Requires: `p` is aligned to `Alignment()`.
+ //
+ // Note: We're not using ElementType alias here because it does not compile
+ // under MSVC.
+ template <class Char>
+ std::tuple<CopyConst<
+ Char, typename std::tuple_element<OffsetSeq, ElementTypes>::type>*...>
+ Pointers(Char* p) const {
+ return std::tuple<CopyConst<Char, ElementType<OffsetSeq>>*...>(
+ Pointer<OffsetSeq>(p)...);
+ }
+
+ // The Nth array.
+ //
+ // `Char` must be `[const] [signed|unsigned] char`.
+ //
+ // // int[3], 4 bytes of padding, double[4].
+ // Layout<int, double> x(3, 4);
+ // unsigned char* p = new unsigned char[x.AllocSize()];
+ // Span<int> ints = x.Slice<0>(p);
+ // Span<double> doubles = x.Slice<1>(p);
+ //
+ // Requires: `N < NumSizes`.
+ // Requires: `p` is aligned to `Alignment()`.
+ template <size_t N, class Char>
+ SliceType<CopyConst<Char, ElementType<N>>> Slice(Char* p) const {
+ return SliceType<CopyConst<Char, ElementType<N>>>(Pointer<N>(p), Size<N>());
+ }
+
+ // The array with the specified element type. There must be exactly one
+ // such array and its zero-based index must be less than `NumSizes`.
+ //
+ // `Char` must be `[const] [signed|unsigned] char`.
+ //
+ // // int[3], 4 bytes of padding, double[4].
+ // Layout<int, double> x(3, 4);
+ // unsigned char* p = new unsigned char[x.AllocSize()];
+ // Span<int> ints = x.Slice<int>(p);
+ // Span<double> doubles = x.Slice<double>(p);
+ //
+ // Requires: `p` is aligned to `Alignment()`.
+ template <class T, class Char>
+ SliceType<CopyConst<Char, T>> Slice(Char* p) const {
+ return Slice<ElementIndex<T>()>(p);
+ }
+
+ // All arrays with known sizes.
+ //
+ // `Char` must be `[const] [signed|unsigned] char`.
+ //
+ // // int[3], 4 bytes of padding, double[4].
+ // Layout<int, double> x(3, 4);
+ // unsigned char* p = new unsigned char[x.AllocSize()];
+ //
+ // Span<int> ints;
+ // Span<double> doubles;
+ // std::tie(ints, doubles) = x.Slices(p);
+ //
+ // Requires: `p` is aligned to `Alignment()`.
+ //
+ // Note: We're not using ElementType alias here because it does not compile
+ // under MSVC.
+ template <class Char>
+ std::tuple<SliceType<CopyConst<
+ Char, typename std::tuple_element<SizeSeq, ElementTypes>::type>>...>
+ Slices(Char* p) const {
+ // Workaround for https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63875 (fixed
+ // in 6.1).
+ (void)p;
+ return std::tuple<SliceType<CopyConst<Char, ElementType<SizeSeq>>>...>(
+ Slice<SizeSeq>(p)...);
+ }
+
+ // The size of the allocation that fits all arrays.
+ //
+ // // int[3], 4 bytes of padding, double[4].
+ // Layout<int, double> x(3, 4);
+ // unsigned char* p = new unsigned char[x.AllocSize()]; // 48 bytes
+ //
+ // Requires: `NumSizes == sizeof...(Ts)`.
+ constexpr size_t AllocSize() const {
+ static_assert(NumTypes == NumSizes, "You must specify sizes of all fields");
+ return Offset<NumTypes - 1>() +
+ SizeOf<ElementType<NumTypes - 1>>::value * size_[NumTypes - 1];
+ }
+
+ // If built with --config=asan, poisons padding bytes (if any) in the
+ // allocation. The pointer must point to a memory block at least
+ // `AllocSize()` bytes in length.
+ //
+ // `Char` must be `[const] [signed|unsigned] char`.
+ //
+ // Requires: `p` is aligned to `Alignment()`.
+ template <class Char, size_t N = NumOffsets - 1, EnableIf<N == 0> = 0>
+ void PoisonPadding(const Char* p) const {
+ Pointer<0>(p); // verify the requirements on `Char` and `p`
+ }
+
+ template <class Char, size_t N = NumOffsets - 1, EnableIf<N != 0> = 0>
+ void PoisonPadding(const Char* p) const {
+ static_assert(N < NumOffsets, "Index out of bounds");
+ (void)p;
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
+ PoisonPadding<Char, N - 1>(p);
+ // The `if` is an optimization. It doesn't affect the observable behaviour.
+ if (ElementAlignment<N - 1>::value % ElementAlignment<N>::value) {
+ size_t start =
+ Offset<N - 1>() + SizeOf<ElementType<N - 1>>::value * size_[N - 1];
+ ASAN_POISON_MEMORY_REGION(p + start, Offset<N>() - start);
+ }
+#endif
+ }
+
+ // Human-readable description of the memory layout. Useful for debugging.
+ // Slow.
+ //
+ // // char[5], 3 bytes of padding, int[3], 4 bytes of padding, followed
+ // // by an unknown number of doubles.
+ // auto x = Layout<char, int, double>::Partial(5, 3);
+ // assert(x.DebugString() ==
+ // "@0<char>(1)[5]; @8<int>(4)[3]; @24<double>(8)");
+ //
+ // Each field is in the following format: @offset<type>(sizeof)[size] (<type>
+ // may be missing depending on the target platform). For example,
+ // @8<int>(4)[3] means that at offset 8 we have an array of ints, where each
+ // int is 4 bytes, and we have 3 of those ints. The size of the last field may
+ // be missing (as in the example above). Only fields with known offsets are
+ // described. Type names may differ across platforms: one compiler might
+ // produce "unsigned*" where another produces "unsigned int *".
+ TString DebugString() const {
+ const auto offsets = Offsets();
+ const size_t sizes[] = {SizeOf<ElementType<OffsetSeq>>::value...};
+ const TString types[] = {
+ adl_barrier::TypeName<ElementType<OffsetSeq>>()...};
+ TString res = y_absl::StrCat("@0", types[0], "(", sizes[0], ")");
+ for (size_t i = 0; i != NumOffsets - 1; ++i) {
+ y_absl::StrAppend(&res, "[", size_[i], "]; @", offsets[i + 1], types[i + 1],
+ "(", sizes[i + 1], ")");
+ }
+ // NumSizes is a constant that may be zero. Some compilers cannot see that
+ // inside the if statement "size_[NumSizes - 1]" must be valid.
+ int last = static_cast<int>(NumSizes) - 1;
+ if (NumTypes == NumSizes && last >= 0) {
+ y_absl::StrAppend(&res, "[", size_[last], "]");
+ }
+ return res;
+ }
+
+ private:
+ // Arguments of `Layout::Partial()` or `Layout::Layout()`.
+ size_t size_[NumSizes > 0 ? NumSizes : 1];
+};
+
+template <size_t NumSizes, class... Ts>
+using LayoutType = LayoutImpl<
+ std::tuple<Ts...>, y_absl::make_index_sequence<NumSizes>,
+ y_absl::make_index_sequence<adl_barrier::Min(sizeof...(Ts), NumSizes + 1)>>;
+
+} // namespace internal_layout
+
+// Descriptor of arrays of various types and sizes laid out in memory one after
+// another. See the top of the file for documentation.
+//
+// Check out the public API of internal_layout::LayoutImpl above. The type is
+// internal to the library but its methods are public, and they are inherited
+// by `Layout`.
+template <class... Ts>
+class Layout : public internal_layout::LayoutType<sizeof...(Ts), Ts...> {
+ public:
+ static_assert(sizeof...(Ts) > 0, "At least one field is required");
+ static_assert(
+ y_absl::conjunction<internal_layout::IsLegalElementType<Ts>...>::value,
+ "Invalid element type (see IsLegalElementType)");
+
+ // The result type of `Partial()` with `NumSizes` arguments.
+ template <size_t NumSizes>
+ using PartialType = internal_layout::LayoutType<NumSizes, Ts...>;
+
+ // `Layout` knows the element types of the arrays we want to lay out in
+ // memory but not the number of elements in each array.
+ // `Partial(size1, ..., sizeN)` allows us to specify the latter. The
+ // resulting immutable object can be used to obtain pointers to the
+ // individual arrays.
+ //
+ // It's allowed to pass fewer array sizes than the number of arrays. E.g.,
+ // if all you need is to the offset of the second array, you only need to
+ // pass one argument -- the number of elements in the first array.
+ //
+ // // int[3] followed by 4 bytes of padding and an unknown number of
+ // // doubles.
+ // auto x = Layout<int, double>::Partial(3);
+ // // doubles start at byte 16.
+ // assert(x.Offset<1>() == 16);
+ //
+ // If you know the number of elements in all arrays, you can still call
+ // `Partial()` but it's more convenient to use the constructor of `Layout`.
+ //
+ // Layout<int, double> x(3, 5);
+ //
+ // Note: The sizes of the arrays must be specified in number of elements,
+ // not in bytes.
+ //
+ // Requires: `sizeof...(Sizes) <= sizeof...(Ts)`.
+ // Requires: all arguments are convertible to `size_t`.
+ template <class... Sizes>
+ static constexpr PartialType<sizeof...(Sizes)> Partial(Sizes&&... sizes) {
+ static_assert(sizeof...(Sizes) <= sizeof...(Ts), "");
+ return PartialType<sizeof...(Sizes)>(y_absl::forward<Sizes>(sizes)...);
+ }
+
+ // Creates a layout with the sizes of all arrays specified. If you know
+ // only the sizes of the first N arrays (where N can be zero), you can use
+ // `Partial()` defined above. The constructor is essentially equivalent to
+ // calling `Partial()` and passing in all array sizes; the constructor is
+ // provided as a convenient abbreviation.
+ //
+ // Note: The sizes of the arrays must be specified in number of elements,
+ // not in bytes.
+ constexpr explicit Layout(internal_layout::TypeToSize<Ts>... sizes)
+ : internal_layout::LayoutType<sizeof...(Ts), Ts...>(sizes...) {}
+};
+
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_CONTAINER_INTERNAL_LAYOUT_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/node_hash_policy.h b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/node_hash_policy.h
new file mode 100644
index 00000000000..24d0eaf5ee8
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/node_hash_policy.h
@@ -0,0 +1,92 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Adapts a policy for nodes.
+//
+// The node policy should model:
+//
+// struct Policy {
+// // Returns a new node allocated and constructed using the allocator, using
+// // the specified arguments.
+// template <class Alloc, class... Args>
+// value_type* new_element(Alloc* alloc, Args&&... args) const;
+//
+// // Destroys and deallocates node using the allocator.
+// template <class Alloc>
+// void delete_element(Alloc* alloc, value_type* node) const;
+// };
+//
+// It may also optionally define `value()` and `apply()`. For documentation on
+// these, see hash_policy_traits.h.
+
+#ifndef ABSL_CONTAINER_INTERNAL_NODE_HASH_POLICY_H_
+#define ABSL_CONTAINER_INTERNAL_NODE_HASH_POLICY_H_
+
+#include <cassert>
+#include <cstddef>
+#include <memory>
+#include <type_traits>
+#include <utility>
+
+#include "y_absl/base/config.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+template <class Reference, class Policy>
+struct node_hash_policy {
+ static_assert(std::is_lvalue_reference<Reference>::value, "");
+
+ using slot_type = typename std::remove_cv<
+ typename std::remove_reference<Reference>::type>::type*;
+
+ template <class Alloc, class... Args>
+ static void construct(Alloc* alloc, slot_type* slot, Args&&... args) {
+ *slot = Policy::new_element(alloc, std::forward<Args>(args)...);
+ }
+
+ template <class Alloc>
+ static void destroy(Alloc* alloc, slot_type* slot) {
+ Policy::delete_element(alloc, *slot);
+ }
+
+ template <class Alloc>
+ static void transfer(Alloc*, slot_type* new_slot, slot_type* old_slot) {
+ *new_slot = *old_slot;
+ }
+
+ static size_t space_used(const slot_type* slot) {
+ if (slot == nullptr) return Policy::element_space_used(nullptr);
+ return Policy::element_space_used(*slot);
+ }
+
+ static Reference element(slot_type* slot) { return **slot; }
+
+ template <class T, class P = Policy>
+ static auto value(T* elem) -> decltype(P::value(elem)) {
+ return P::value(elem);
+ }
+
+ template <class... Ts, class P = Policy>
+ static auto apply(Ts&&... ts) -> decltype(P::apply(std::forward<Ts>(ts)...)) {
+ return P::apply(std::forward<Ts>(ts)...);
+ }
+};
+
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_CONTAINER_INTERNAL_NODE_HASH_POLICY_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/raw_hash_map.h b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/raw_hash_map.h
new file mode 100644
index 00000000000..8a0591f581a
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/raw_hash_map.h
@@ -0,0 +1,198 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_RAW_HASH_MAP_H_
+#define ABSL_CONTAINER_INTERNAL_RAW_HASH_MAP_H_
+
+#include <tuple>
+#include <type_traits>
+#include <utility>
+
+#include "y_absl/base/internal/throw_delegate.h"
+#include "y_absl/container/internal/container_memory.h"
+#include "y_absl/container/internal/raw_hash_set.h" // IWYU pragma: export
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+template <class Policy, class Hash, class Eq, class Alloc>
+class raw_hash_map : public raw_hash_set<Policy, Hash, Eq, Alloc> {
+ // P is Policy. It's passed as a template argument to support maps that have
+ // incomplete types as values, as in unordered_map<K, IncompleteType>.
+ // MappedReference<> may be a non-reference type.
+ template <class P>
+ using MappedReference = decltype(P::value(
+ std::addressof(std::declval<typename raw_hash_map::reference>())));
+
+ // MappedConstReference<> may be a non-reference type.
+ template <class P>
+ using MappedConstReference = decltype(P::value(
+ std::addressof(std::declval<typename raw_hash_map::const_reference>())));
+
+ using KeyArgImpl =
+ KeyArg<IsTransparent<Eq>::value && IsTransparent<Hash>::value>;
+
+ public:
+ using key_type = typename Policy::key_type;
+ using mapped_type = typename Policy::mapped_type;
+ template <class K>
+ using key_arg = typename KeyArgImpl::template type<K, key_type>;
+
+ static_assert(!std::is_reference<key_type>::value, "");
+
+ // TODO(b/187807849): Evaluate whether to support reference mapped_type and
+ // remove this assertion if/when it is supported.
+ static_assert(!std::is_reference<mapped_type>::value, "");
+
+ using iterator = typename raw_hash_map::raw_hash_set::iterator;
+ using const_iterator = typename raw_hash_map::raw_hash_set::const_iterator;
+
+ raw_hash_map() {}
+ using raw_hash_map::raw_hash_set::raw_hash_set;
+
+ // The last two template parameters ensure that both arguments are rvalues
+ // (lvalue arguments are handled by the overloads below). This is necessary
+ // for supporting bitfield arguments.
+ //
+ // union { int n : 1; };
+ // flat_hash_map<int, int> m;
+ // m.insert_or_assign(n, n);
+ template <class K = key_type, class V = mapped_type, K* = nullptr,
+ V* = nullptr>
+ std::pair<iterator, bool> insert_or_assign(key_arg<K>&& k, V&& v) {
+ return insert_or_assign_impl(std::forward<K>(k), std::forward<V>(v));
+ }
+
+ template <class K = key_type, class V = mapped_type, K* = nullptr>
+ std::pair<iterator, bool> insert_or_assign(key_arg<K>&& k, const V& v) {
+ return insert_or_assign_impl(std::forward<K>(k), v);
+ }
+
+ template <class K = key_type, class V = mapped_type, V* = nullptr>
+ std::pair<iterator, bool> insert_or_assign(const key_arg<K>& k, V&& v) {
+ return insert_or_assign_impl(k, std::forward<V>(v));
+ }
+
+ template <class K = key_type, class V = mapped_type>
+ std::pair<iterator, bool> insert_or_assign(const key_arg<K>& k, const V& v) {
+ return insert_or_assign_impl(k, v);
+ }
+
+ template <class K = key_type, class V = mapped_type, K* = nullptr,
+ V* = nullptr>
+ iterator insert_or_assign(const_iterator, key_arg<K>&& k, V&& v) {
+ return insert_or_assign(std::forward<K>(k), std::forward<V>(v)).first;
+ }
+
+ template <class K = key_type, class V = mapped_type, K* = nullptr>
+ iterator insert_or_assign(const_iterator, key_arg<K>&& k, const V& v) {
+ return insert_or_assign(std::forward<K>(k), v).first;
+ }
+
+ template <class K = key_type, class V = mapped_type, V* = nullptr>
+ iterator insert_or_assign(const_iterator, const key_arg<K>& k, V&& v) {
+ return insert_or_assign(k, std::forward<V>(v)).first;
+ }
+
+ template <class K = key_type, class V = mapped_type>
+ iterator insert_or_assign(const_iterator, const key_arg<K>& k, const V& v) {
+ return insert_or_assign(k, v).first;
+ }
+
+ // All `try_emplace()` overloads make the same guarantees regarding rvalue
+ // arguments as `std::unordered_map::try_emplace()`, namely that these
+ // functions will not move from rvalue arguments if insertions do not happen.
+ template <class K = key_type, class... Args,
+ typename std::enable_if<
+ !std::is_convertible<K, const_iterator>::value, int>::type = 0,
+ K* = nullptr>
+ std::pair<iterator, bool> try_emplace(key_arg<K>&& k, Args&&... args) {
+ return try_emplace_impl(std::forward<K>(k), std::forward<Args>(args)...);
+ }
+
+ template <class K = key_type, class... Args,
+ typename std::enable_if<
+ !std::is_convertible<K, const_iterator>::value, int>::type = 0>
+ std::pair<iterator, bool> try_emplace(const key_arg<K>& k, Args&&... args) {
+ return try_emplace_impl(k, std::forward<Args>(args)...);
+ }
+
+ template <class K = key_type, class... Args, K* = nullptr>
+ iterator try_emplace(const_iterator, key_arg<K>&& k, Args&&... args) {
+ return try_emplace(std::forward<K>(k), std::forward<Args>(args)...).first;
+ }
+
+ template <class K = key_type, class... Args>
+ iterator try_emplace(const_iterator, const key_arg<K>& k, Args&&... args) {
+ return try_emplace(k, std::forward<Args>(args)...).first;
+ }
+
+ template <class K = key_type, class P = Policy>
+ MappedReference<P> at(const key_arg<K>& key) {
+ auto it = this->find(key);
+ if (it == this->end()) {
+ base_internal::ThrowStdOutOfRange(
+ "y_absl::container_internal::raw_hash_map<>::at");
+ }
+ return Policy::value(&*it);
+ }
+
+ template <class K = key_type, class P = Policy>
+ MappedConstReference<P> at(const key_arg<K>& key) const {
+ auto it = this->find(key);
+ if (it == this->end()) {
+ base_internal::ThrowStdOutOfRange(
+ "y_absl::container_internal::raw_hash_map<>::at");
+ }
+ return Policy::value(&*it);
+ }
+
+ template <class K = key_type, class P = Policy, K* = nullptr>
+ MappedReference<P> operator[](key_arg<K>&& key) {
+ return Policy::value(&*try_emplace(std::forward<K>(key)).first);
+ }
+
+ template <class K = key_type, class P = Policy>
+ MappedReference<P> operator[](const key_arg<K>& key) {
+ return Policy::value(&*try_emplace(key).first);
+ }
+
+ private:
+ template <class K, class V>
+ std::pair<iterator, bool> insert_or_assign_impl(K&& k, V&& v) {
+ auto res = this->find_or_prepare_insert(k);
+ if (res.second)
+ this->emplace_at(res.first, std::forward<K>(k), std::forward<V>(v));
+ else
+ Policy::value(&*this->iterator_at(res.first)) = std::forward<V>(v);
+ return {this->iterator_at(res.first), res.second};
+ }
+
+ template <class K = key_type, class... Args>
+ std::pair<iterator, bool> try_emplace_impl(K&& k, Args&&... args) {
+ auto res = this->find_or_prepare_insert(k);
+ if (res.second)
+ this->emplace_at(res.first, std::piecewise_construct,
+ std::forward_as_tuple(std::forward<K>(k)),
+ std::forward_as_tuple(std::forward<Args>(args)...));
+ return {this->iterator_at(res.first), res.second};
+ }
+};
+
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_CONTAINER_INTERNAL_RAW_HASH_MAP_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/raw_hash_set.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/raw_hash_set.cc
new file mode 100644
index 00000000000..bf2a15a6781
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/raw_hash_set.cc
@@ -0,0 +1,67 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "y_absl/container/internal/raw_hash_set.h"
+
+#include <atomic>
+#include <cstddef>
+
+#include "y_absl/base/config.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+alignas(16) ABSL_CONST_INIT ABSL_DLL const ctrl_t kEmptyGroup[16] = {
+ ctrl_t::kSentinel, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty,
+ ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty,
+ ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty,
+ ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty};
+
+constexpr size_t Group::kWidth;
+
+// Returns "random" seed.
+inline size_t RandomSeed() {
+#ifdef ABSL_HAVE_THREAD_LOCAL
+ static thread_local size_t counter = 0;
+ size_t value = ++counter;
+#else // ABSL_HAVE_THREAD_LOCAL
+ static std::atomic<size_t> counter(0);
+ size_t value = counter.fetch_add(1, std::memory_order_relaxed);
+#endif // ABSL_HAVE_THREAD_LOCAL
+ return value ^ static_cast<size_t>(reinterpret_cast<uintptr_t>(&counter));
+}
+
+bool ShouldInsertBackwards(size_t hash, const ctrl_t* ctrl) {
+ // To avoid problems with weak hashes and single bit tests, we use % 13.
+ // TODO(kfm,sbenza): revisit after we do unconditional mixing
+ return (H1(hash, ctrl) ^ RandomSeed()) % 13 > 6;
+}
+
+void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity) {
+ assert(ctrl[capacity] == ctrl_t::kSentinel);
+ assert(IsValidCapacity(capacity));
+ for (ctrl_t* pos = ctrl; pos < ctrl + capacity; pos += Group::kWidth) {
+ Group{pos}.ConvertSpecialToEmptyAndFullToDeleted(pos);
+ }
+ // Copy the cloned ctrl bytes.
+ std::memcpy(ctrl + capacity + 1, ctrl, NumClonedBytes());
+ ctrl[capacity] = ctrl_t::kSentinel;
+}
+// Extern template instantiotion for inline function.
+template FindInfo find_first_non_full(const ctrl_t*, size_t, size_t);
+
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/raw_hash_set.h b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/raw_hash_set.h
new file mode 100644
index 00000000000..8f45559b31d
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/raw_hash_set.h
@@ -0,0 +1,2034 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// An open-addressing
+// hashtable with quadratic probing.
+//
+// This is a low level hashtable on top of which different interfaces can be
+// implemented, like flat_hash_set, node_hash_set, string_hash_set, etc.
+//
+// The table interface is similar to that of std::unordered_set. Notable
+// differences are that most member functions support heterogeneous keys when
+// BOTH the hash and eq functions are marked as transparent. They do so by
+// providing a typedef called `is_transparent`.
+//
+// When heterogeneous lookup is enabled, functions that take key_type act as if
+// they have an overload set like:
+//
+// iterator find(const key_type& key);
+// template <class K>
+// iterator find(const K& key);
+//
+// size_type erase(const key_type& key);
+// template <class K>
+// size_type erase(const K& key);
+//
+// std::pair<iterator, iterator> equal_range(const key_type& key);
+// template <class K>
+// std::pair<iterator, iterator> equal_range(const K& key);
+//
+// When heterogeneous lookup is disabled, only the explicit `key_type` overloads
+// exist.
+//
+// find() also supports passing the hash explicitly:
+//
+// iterator find(const key_type& key, size_t hash);
+// template <class U>
+// iterator find(const U& key, size_t hash);
+//
+// In addition the pointer to element and iterator stability guarantees are
+// weaker: all iterators and pointers are invalidated after a new element is
+// inserted.
+//
+// IMPLEMENTATION DETAILS
+//
+// The table stores elements inline in a slot array. In addition to the slot
+// array the table maintains some control state per slot. The extra state is one
+// byte per slot and stores empty or deleted marks, or alternatively 7 bits from
+// the hash of an occupied slot. The table is split into logical groups of
+// slots, like so:
+//
+// Group 1 Group 2 Group 3
+// +---------------+---------------+---------------+
+// | | | | | | | | | | | | | | | | | | | | | | | | |
+// +---------------+---------------+---------------+
+//
+// On lookup the hash is split into two parts:
+// - H2: 7 bits (those stored in the control bytes)
+// - H1: the rest of the bits
+// The groups are probed using H1. For each group the slots are matched to H2 in
+// parallel. Because H2 is 7 bits (128 states) and the number of slots per group
+// is low (8 or 16) in almost all cases a match in H2 is also a lookup hit.
+//
+// On insert, once the right group is found (as in lookup), its slots are
+// filled in order.
+//
+// On erase a slot is cleared. In case the group did not have any empty slots
+// before the erase, the erased slot is marked as deleted.
+//
+// Groups without empty slots (but maybe with deleted slots) extend the probe
+// sequence. The probing algorithm is quadratic. Given N the number of groups,
+// the probing function for the i'th probe is:
+//
+// P(0) = H1 % N
+//
+// P(i) = (P(i - 1) + i) % N
+//
+// This probing function guarantees that after N probes, all the groups of the
+// table will be probed exactly once.
+//
+// The control state and slot array are stored contiguously in a shared heap
+// allocation. The layout of this allocation is: `capacity()` control bytes,
+// one sentinel control byte, `Group::kWidth - 1` cloned control bytes,
+// <possible padding>, `capacity()` slots. The sentinel control byte is used in
+// iteration so we know when we reach the end of the table. The cloned control
+// bytes at the end of the table are cloned from the beginning of the table so
+// groups that begin near the end of the table can see a full group. In cases in
+// which there are more than `capacity()` cloned control bytes, the extra bytes
+// are `kEmpty`, and these ensure that we always see at least one empty slot and
+// can stop an unsuccessful search.
+
+#ifndef ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
+#define ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
+
+#include <algorithm>
+#include <cmath>
+#include <cstdint>
+#include <cstring>
+#include <iterator>
+#include <limits>
+#include <memory>
+#include <tuple>
+#include <type_traits>
+#include <utility>
+
+#include "y_absl/base/internal/endian.h"
+#include "y_absl/base/optimization.h"
+#include "y_absl/base/port.h"
+#include "y_absl/container/internal/common.h"
+#include "y_absl/container/internal/compressed_tuple.h"
+#include "y_absl/container/internal/container_memory.h"
+#include "y_absl/container/internal/hash_policy_traits.h"
+#include "y_absl/container/internal/hashtable_debug_hooks.h"
+#include "y_absl/container/internal/hashtablez_sampler.h"
+#include "y_absl/container/internal/have_sse.h"
+#include "y_absl/memory/memory.h"
+#include "y_absl/meta/type_traits.h"
+#include "y_absl/numeric/bits.h"
+#include "y_absl/utility/utility.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+template <typename AllocType>
+void SwapAlloc(AllocType& lhs, AllocType& rhs,
+ std::true_type /* propagate_on_container_swap */) {
+ using std::swap;
+ swap(lhs, rhs);
+}
+template <typename AllocType>
+void SwapAlloc(AllocType& /*lhs*/, AllocType& /*rhs*/,
+ std::false_type /* propagate_on_container_swap */) {}
+
+template <size_t Width>
+class probe_seq {
+ public:
+ probe_seq(size_t hash, size_t mask) {
+ assert(((mask + 1) & mask) == 0 && "not a mask");
+ mask_ = mask;
+ offset_ = hash & mask_;
+ }
+ size_t offset() const { return offset_; }
+ size_t offset(size_t i) const { return (offset_ + i) & mask_; }
+
+ void next() {
+ index_ += Width;
+ offset_ += index_;
+ offset_ &= mask_;
+ }
+ // 0-based probe index. The i-th probe in the probe sequence.
+ size_t index() const { return index_; }
+
+ private:
+ size_t mask_;
+ size_t offset_;
+ size_t index_ = 0;
+};
+
+template <class ContainerKey, class Hash, class Eq>
+struct RequireUsableKey {
+ template <class PassedKey, class... Args>
+ std::pair<
+ decltype(std::declval<const Hash&>()(std::declval<const PassedKey&>())),
+ decltype(std::declval<const Eq&>()(std::declval<const ContainerKey&>(),
+ std::declval<const PassedKey&>()))>*
+ operator()(const PassedKey&, const Args&...) const;
+};
+
+template <class E, class Policy, class Hash, class Eq, class... Ts>
+struct IsDecomposable : std::false_type {};
+
+template <class Policy, class Hash, class Eq, class... Ts>
+struct IsDecomposable<
+ y_absl::void_t<decltype(
+ Policy::apply(RequireUsableKey<typename Policy::key_type, Hash, Eq>(),
+ std::declval<Ts>()...))>,
+ Policy, Hash, Eq, Ts...> : std::true_type {};
+
+// TODO(alkis): Switch to std::is_nothrow_swappable when gcc/clang supports it.
+template <class T>
+constexpr bool IsNoThrowSwappable(std::true_type = {} /* is_swappable */) {
+ using std::swap;
+ return noexcept(swap(std::declval<T&>(), std::declval<T&>()));
+}
+template <class T>
+constexpr bool IsNoThrowSwappable(std::false_type /* is_swappable */) {
+ return false;
+}
+
+template <typename T>
+uint32_t TrailingZeros(T x) {
+ ABSL_INTERNAL_ASSUME(x != 0);
+ return countr_zero(x);
+}
+
+// An abstraction over a bitmask. It provides an easy way to iterate through the
+// indexes of the set bits of a bitmask. When Shift=0 (platforms with SSE),
+// this is a true bitmask. On non-SSE, platforms the arithematic used to
+// emulate the SSE behavior works in bytes (Shift=3) and leaves each bytes as
+// either 0x00 or 0x80.
+//
+// For example:
+// for (int i : BitMask<uint32_t, 16>(0x5)) -> yields 0, 2
+// for (int i : BitMask<uint64_t, 8, 3>(0x0000000080800000)) -> yields 2, 3
+template <class T, int SignificantBits, int Shift = 0>
+class BitMask {
+ static_assert(std::is_unsigned<T>::value, "");
+ static_assert(Shift == 0 || Shift == 3, "");
+
+ public:
+ // These are useful for unit tests (gunit).
+ using value_type = int;
+ using iterator = BitMask;
+ using const_iterator = BitMask;
+
+ explicit BitMask(T mask) : mask_(mask) {}
+ BitMask& operator++() {
+ mask_ &= (mask_ - 1);
+ return *this;
+ }
+ explicit operator bool() const { return mask_ != 0; }
+ int operator*() const { return LowestBitSet(); }
+ uint32_t LowestBitSet() const {
+ return container_internal::TrailingZeros(mask_) >> Shift;
+ }
+ uint32_t HighestBitSet() const {
+ return static_cast<uint32_t>((bit_width(mask_) - 1) >> Shift);
+ }
+
+ BitMask begin() const { return *this; }
+ BitMask end() const { return BitMask(0); }
+
+ uint32_t TrailingZeros() const {
+ return container_internal::TrailingZeros(mask_) >> Shift;
+ }
+
+ uint32_t LeadingZeros() const {
+ constexpr int total_significant_bits = SignificantBits << Shift;
+ constexpr int extra_bits = sizeof(T) * 8 - total_significant_bits;
+ return countl_zero(mask_ << extra_bits) >> Shift;
+ }
+
+ private:
+ friend bool operator==(const BitMask& a, const BitMask& b) {
+ return a.mask_ == b.mask_;
+ }
+ friend bool operator!=(const BitMask& a, const BitMask& b) {
+ return a.mask_ != b.mask_;
+ }
+
+ T mask_;
+};
+
+using h2_t = uint8_t;
+
+// The values here are selected for maximum performance. See the static asserts
+// below for details. We use an enum class so that when strict aliasing is
+// enabled, the compiler knows ctrl_t doesn't alias other types.
+enum class ctrl_t : int8_t {
+ kEmpty = -128, // 0b10000000
+ kDeleted = -2, // 0b11111110
+ kSentinel = -1, // 0b11111111
+};
+static_assert(
+ (static_cast<int8_t>(ctrl_t::kEmpty) &
+ static_cast<int8_t>(ctrl_t::kDeleted) &
+ static_cast<int8_t>(ctrl_t::kSentinel) & 0x80) != 0,
+ "Special markers need to have the MSB to make checking for them efficient");
+static_assert(
+ ctrl_t::kEmpty < ctrl_t::kSentinel && ctrl_t::kDeleted < ctrl_t::kSentinel,
+ "ctrl_t::kEmpty and ctrl_t::kDeleted must be smaller than "
+ "ctrl_t::kSentinel to make the SIMD test of IsEmptyOrDeleted() efficient");
+static_assert(
+ ctrl_t::kSentinel == static_cast<ctrl_t>(-1),
+ "ctrl_t::kSentinel must be -1 to elide loading it from memory into SIMD "
+ "registers (pcmpeqd xmm, xmm)");
+static_assert(ctrl_t::kEmpty == static_cast<ctrl_t>(-128),
+ "ctrl_t::kEmpty must be -128 to make the SIMD check for its "
+ "existence efficient (psignb xmm, xmm)");
+static_assert(
+ (~static_cast<int8_t>(ctrl_t::kEmpty) &
+ ~static_cast<int8_t>(ctrl_t::kDeleted) &
+ static_cast<int8_t>(ctrl_t::kSentinel) & 0x7F) != 0,
+ "ctrl_t::kEmpty and ctrl_t::kDeleted must share an unset bit that is not "
+ "shared by ctrl_t::kSentinel to make the scalar test for "
+ "MatchEmptyOrDeleted() efficient");
+static_assert(ctrl_t::kDeleted == static_cast<ctrl_t>(-2),
+ "ctrl_t::kDeleted must be -2 to make the implementation of "
+ "ConvertSpecialToEmptyAndFullToDeleted efficient");
+
+// A single block of empty control bytes for tables without any slots allocated.
+// This enables removing a branch in the hot path of find().
+ABSL_DLL extern const ctrl_t kEmptyGroup[16];
+inline ctrl_t* EmptyGroup() {
+ return const_cast<ctrl_t*>(kEmptyGroup);
+}
+
+// Mixes a randomly generated per-process seed with `hash` and `ctrl` to
+// randomize insertion order within groups.
+bool ShouldInsertBackwards(size_t hash, const ctrl_t* ctrl);
+
+// Returns a hash seed.
+//
+// The seed consists of the ctrl_ pointer, which adds enough entropy to ensure
+// non-determinism of iteration order in most cases.
+inline size_t HashSeed(const ctrl_t* ctrl) {
+ // The low bits of the pointer have little or no entropy because of
+ // alignment. We shift the pointer to try to use higher entropy bits. A
+ // good number seems to be 12 bits, because that aligns with page size.
+ return reinterpret_cast<uintptr_t>(ctrl) >> 12;
+}
+
+inline size_t H1(size_t hash, const ctrl_t* ctrl) {
+ return (hash >> 7) ^ HashSeed(ctrl);
+}
+inline h2_t H2(size_t hash) { return hash & 0x7F; }
+
+inline bool IsEmpty(ctrl_t c) { return c == ctrl_t::kEmpty; }
+inline bool IsFull(ctrl_t c) { return c >= static_cast<ctrl_t>(0); }
+inline bool IsDeleted(ctrl_t c) { return c == ctrl_t::kDeleted; }
+inline bool IsEmptyOrDeleted(ctrl_t c) { return c < ctrl_t::kSentinel; }
+
+#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
+
+// https://github.com/abseil/abseil-cpp/issues/209
+// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=87853
+// _mm_cmpgt_epi8 is broken under GCC with -funsigned-char
+// Work around this by using the portable implementation of Group
+// when using -funsigned-char under GCC.
+inline __m128i _mm_cmpgt_epi8_fixed(__m128i a, __m128i b) {
+#if defined(__GNUC__) && !defined(__clang__)
+ if (std::is_unsigned<char>::value) {
+ const __m128i mask = _mm_set1_epi8(0x80);
+ const __m128i diff = _mm_subs_epi8(b, a);
+ return _mm_cmpeq_epi8(_mm_and_si128(diff, mask), mask);
+ }
+#endif
+ return _mm_cmpgt_epi8(a, b);
+}
+
+struct GroupSse2Impl {
+ static constexpr size_t kWidth = 16; // the number of slots per group
+
+ explicit GroupSse2Impl(const ctrl_t* pos) {
+ ctrl = _mm_loadu_si128(reinterpret_cast<const __m128i*>(pos));
+ }
+
+ // Returns a bitmask representing the positions of slots that match hash.
+ BitMask<uint32_t, kWidth> Match(h2_t hash) const {
+ auto match = _mm_set1_epi8(hash);
+ return BitMask<uint32_t, kWidth>(
+ _mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl)));
+ }
+
+ // Returns a bitmask representing the positions of empty slots.
+ BitMask<uint32_t, kWidth> MatchEmpty() const {
+#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3
+ // This only works because ctrl_t::kEmpty is -128.
+ return BitMask<uint32_t, kWidth>(
+ _mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl)));
+#else
+ return Match(static_cast<h2_t>(ctrl_t::kEmpty));
+#endif
+ }
+
+ // Returns a bitmask representing the positions of empty or deleted slots.
+ BitMask<uint32_t, kWidth> MatchEmptyOrDeleted() const {
+ auto special = _mm_set1_epi8(static_cast<int8_t>(ctrl_t::kSentinel));
+ return BitMask<uint32_t, kWidth>(
+ _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)));
+ }
+
+ // Returns the number of trailing empty or deleted elements in the group.
+ uint32_t CountLeadingEmptyOrDeleted() const {
+ auto special = _mm_set1_epi8(static_cast<int8_t>(ctrl_t::kSentinel));
+ return TrailingZeros(static_cast<uint32_t>(
+ _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)) + 1));
+ }
+
+ void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
+ auto msbs = _mm_set1_epi8(static_cast<char>(-128));
+ auto x126 = _mm_set1_epi8(126);
+#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3
+ auto res = _mm_or_si128(_mm_shuffle_epi8(x126, ctrl), msbs);
+#else
+ auto zero = _mm_setzero_si128();
+ auto special_mask = _mm_cmpgt_epi8_fixed(zero, ctrl);
+ auto res = _mm_or_si128(msbs, _mm_andnot_si128(special_mask, x126));
+#endif
+ _mm_storeu_si128(reinterpret_cast<__m128i*>(dst), res);
+ }
+
+ __m128i ctrl;
+};
+#endif // ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
+
+struct GroupPortableImpl {
+ static constexpr size_t kWidth = 8;
+
+ explicit GroupPortableImpl(const ctrl_t* pos)
+ : ctrl(little_endian::Load64(pos)) {}
+
+ BitMask<uint64_t, kWidth, 3> Match(h2_t hash) const {
+ // For the technique, see:
+ // http://graphics.stanford.edu/~seander/bithacks.html##ValueInWord
+ // (Determine if a word has a byte equal to n).
+ //
+ // Caveat: there are false positives but:
+ // - they only occur if there is a real match
+ // - they never occur on ctrl_t::kEmpty, ctrl_t::kDeleted, ctrl_t::kSentinel
+ // - they will be handled gracefully by subsequent checks in code
+ //
+ // Example:
+ // v = 0x1716151413121110
+ // hash = 0x12
+ // retval = (v - lsbs) & ~v & msbs = 0x0000000080800000
+ constexpr uint64_t msbs = 0x8080808080808080ULL;
+ constexpr uint64_t lsbs = 0x0101010101010101ULL;
+ auto x = ctrl ^ (lsbs * hash);
+ return BitMask<uint64_t, kWidth, 3>((x - lsbs) & ~x & msbs);
+ }
+
+ BitMask<uint64_t, kWidth, 3> MatchEmpty() const {
+ constexpr uint64_t msbs = 0x8080808080808080ULL;
+ return BitMask<uint64_t, kWidth, 3>((ctrl & (~ctrl << 6)) & msbs);
+ }
+
+ BitMask<uint64_t, kWidth, 3> MatchEmptyOrDeleted() const {
+ constexpr uint64_t msbs = 0x8080808080808080ULL;
+ return BitMask<uint64_t, kWidth, 3>((ctrl & (~ctrl << 7)) & msbs);
+ }
+
+ uint32_t CountLeadingEmptyOrDeleted() const {
+ constexpr uint64_t gaps = 0x00FEFEFEFEFEFEFEULL;
+ return (TrailingZeros(((~ctrl & (ctrl >> 7)) | gaps) + 1) + 7) >> 3;
+ }
+
+ void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
+ constexpr uint64_t msbs = 0x8080808080808080ULL;
+ constexpr uint64_t lsbs = 0x0101010101010101ULL;
+ auto x = ctrl & msbs;
+ auto res = (~x + (x >> 7)) & ~lsbs;
+ little_endian::Store64(dst, res);
+ }
+
+ uint64_t ctrl;
+};
+
+#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
+using Group = GroupSse2Impl;
+#else
+using Group = GroupPortableImpl;
+#endif
+
+// The number of cloned control bytes that we copy from the beginning to the
+// end of the control bytes array.
+constexpr size_t NumClonedBytes() { return Group::kWidth - 1; }
+
+template <class Policy, class Hash, class Eq, class Alloc>
+class raw_hash_set;
+
+inline bool IsValidCapacity(size_t n) { return ((n + 1) & n) == 0 && n > 0; }
+
+// PRECONDITION:
+// IsValidCapacity(capacity)
+// ctrl[capacity] == ctrl_t::kSentinel
+// ctrl[i] != ctrl_t::kSentinel for all i < capacity
+// Applies mapping for every byte in ctrl:
+// DELETED -> EMPTY
+// EMPTY -> EMPTY
+// FULL -> DELETED
+void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity);
+
+// Rounds up the capacity to the next power of 2 minus 1, with a minimum of 1.
+inline size_t NormalizeCapacity(size_t n) {
+ return n ? ~size_t{} >> countl_zero(n) : 1;
+}
+
+// General notes on capacity/growth methods below:
+// - We use 7/8th as maximum load factor. For 16-wide groups, that gives an
+// average of two empty slots per group.
+// - For (capacity+1) >= Group::kWidth, growth is 7/8*capacity.
+// - For (capacity+1) < Group::kWidth, growth == capacity. In this case, we
+// never need to probe (the whole table fits in one group) so we don't need a
+// load factor less than 1.
+
+// Given `capacity` of the table, returns the size (i.e. number of full slots)
+// at which we should grow the capacity.
+inline size_t CapacityToGrowth(size_t capacity) {
+ assert(IsValidCapacity(capacity));
+ // `capacity*7/8`
+ if (Group::kWidth == 8 && capacity == 7) {
+ // x-x/8 does not work when x==7.
+ return 6;
+ }
+ return capacity - capacity / 8;
+}
+// From desired "growth" to a lowerbound of the necessary capacity.
+// Might not be a valid one and requires NormalizeCapacity().
+inline size_t GrowthToLowerboundCapacity(size_t growth) {
+ // `growth*8/7`
+ if (Group::kWidth == 8 && growth == 7) {
+ // x+(x-1)/7 does not work when x==7.
+ return 8;
+ }
+ return growth + static_cast<size_t>((static_cast<int64_t>(growth) - 1) / 7);
+}
+
+template <class InputIter>
+size_t SelectBucketCountForIterRange(InputIter first, InputIter last,
+ size_t bucket_count) {
+ if (bucket_count != 0) {
+ return bucket_count;
+ }
+ using InputIterCategory =
+ typename std::iterator_traits<InputIter>::iterator_category;
+ if (std::is_base_of<std::random_access_iterator_tag,
+ InputIterCategory>::value) {
+ return GrowthToLowerboundCapacity(
+ static_cast<size_t>(std::distance(first, last)));
+ }
+ return 0;
+}
+
+inline void AssertIsFull(ctrl_t* ctrl) {
+ ABSL_HARDENING_ASSERT((ctrl != nullptr && IsFull(*ctrl)) &&
+ "Invalid operation on iterator. The element might have "
+ "been erased, or the table might have rehashed.");
+}
+
+inline void AssertIsValid(ctrl_t* ctrl) {
+ ABSL_HARDENING_ASSERT((ctrl == nullptr || IsFull(*ctrl)) &&
+ "Invalid operation on iterator. The element might have "
+ "been erased, or the table might have rehashed.");
+}
+
+struct FindInfo {
+ size_t offset;
+ size_t probe_length;
+};
+
+// The representation of the object has two modes:
+// - small: For capacities < kWidth-1
+// - large: For the rest.
+//
+// Differences:
+// - In small mode we are able to use the whole capacity. The extra control
+// bytes give us at least one "empty" control byte to stop the iteration.
+// This is important to make 1 a valid capacity.
+//
+// - In small mode only the first `capacity()` control bytes after the
+// sentinel are valid. The rest contain dummy ctrl_t::kEmpty values that do not
+// represent a real slot. This is important to take into account on
+// find_first_non_full(), where we never try ShouldInsertBackwards() for
+// small tables.
+inline bool is_small(size_t capacity) { return capacity < Group::kWidth - 1; }
+
+inline probe_seq<Group::kWidth> probe(const ctrl_t* ctrl, size_t hash,
+ size_t capacity) {
+ return probe_seq<Group::kWidth>(H1(hash, ctrl), capacity);
+}
+
+// Probes the raw_hash_set with the probe sequence for hash and returns the
+// pointer to the first empty or deleted slot.
+// NOTE: this function must work with tables having both ctrl_t::kEmpty and
+// ctrl_t::kDeleted in one group. Such tables appears during
+// drop_deletes_without_resize.
+//
+// This function is very useful when insertions happen and:
+// - the input is already a set
+// - there are enough slots
+// - the element with the hash is not in the table
+template <typename = void>
+inline FindInfo find_first_non_full(const ctrl_t* ctrl, size_t hash,
+ size_t capacity) {
+ auto seq = probe(ctrl, hash, capacity);
+ while (true) {
+ Group g{ctrl + seq.offset()};
+ auto mask = g.MatchEmptyOrDeleted();
+ if (mask) {
+#if !defined(NDEBUG)
+ // We want to add entropy even when ASLR is not enabled.
+ // In debug build we will randomly insert in either the front or back of
+ // the group.
+ // TODO(kfm,sbenza): revisit after we do unconditional mixing
+ if (!is_small(capacity) && ShouldInsertBackwards(hash, ctrl)) {
+ return {seq.offset(mask.HighestBitSet()), seq.index()};
+ }
+#endif
+ return {seq.offset(mask.LowestBitSet()), seq.index()};
+ }
+ seq.next();
+ assert(seq.index() <= capacity && "full table!");
+ }
+}
+
+// Extern template for inline function keep possibility of inlining.
+// When compiler decided to not inline, no symbols will be added to the
+// corresponding translation unit.
+extern template FindInfo find_first_non_full(const ctrl_t*, size_t, size_t);
+
+// Reset all ctrl bytes back to ctrl_t::kEmpty, except the sentinel.
+inline void ResetCtrl(size_t capacity, ctrl_t* ctrl, const void* slot,
+ size_t slot_size) {
+ std::memset(ctrl, static_cast<int8_t>(ctrl_t::kEmpty),
+ capacity + 1 + NumClonedBytes());
+ ctrl[capacity] = ctrl_t::kSentinel;
+ SanitizerPoisonMemoryRegion(slot, slot_size * capacity);
+}
+
+// Sets the control byte, and if `i < NumClonedBytes()`, set the cloned byte
+// at the end too.
+inline void SetCtrl(size_t i, ctrl_t h, size_t capacity, ctrl_t* ctrl,
+ const void* slot, size_t slot_size) {
+ assert(i < capacity);
+
+ auto* slot_i = static_cast<const char*>(slot) + i * slot_size;
+ if (IsFull(h)) {
+ SanitizerUnpoisonMemoryRegion(slot_i, slot_size);
+ } else {
+ SanitizerPoisonMemoryRegion(slot_i, slot_size);
+ }
+
+ ctrl[i] = h;
+ ctrl[((i - NumClonedBytes()) & capacity) + (NumClonedBytes() & capacity)] = h;
+}
+
+inline void SetCtrl(size_t i, h2_t h, size_t capacity, ctrl_t* ctrl,
+ const void* slot, size_t slot_size) {
+ SetCtrl(i, static_cast<ctrl_t>(h), capacity, ctrl, slot, slot_size);
+}
+
+// The allocated block consists of `capacity + 1 + NumClonedBytes()` control
+// bytes followed by `capacity` slots, which must be aligned to `slot_align`.
+// SlotOffset returns the offset of the slots into the allocated block.
+inline size_t SlotOffset(size_t capacity, size_t slot_align) {
+ assert(IsValidCapacity(capacity));
+ const size_t num_control_bytes = capacity + 1 + NumClonedBytes();
+ return (num_control_bytes + slot_align - 1) & (~slot_align + 1);
+}
+
+// Returns the size of the allocated block. See also above comment.
+inline size_t AllocSize(size_t capacity, size_t slot_size, size_t slot_align) {
+ return SlotOffset(capacity, slot_align) + capacity * slot_size;
+}
+
+// Policy: a policy defines how to perform different operations on
+// the slots of the hashtable (see hash_policy_traits.h for the full interface
+// of policy).
+//
+// Hash: a (possibly polymorphic) functor that hashes keys of the hashtable. The
+// functor should accept a key and return size_t as hash. For best performance
+// it is important that the hash function provides high entropy across all bits
+// of the hash.
+//
+// Eq: a (possibly polymorphic) functor that compares two keys for equality. It
+// should accept two (of possibly different type) keys and return a bool: true
+// if they are equal, false if they are not. If two keys compare equal, then
+// their hash values as defined by Hash MUST be equal.
+//
+// Allocator: an Allocator
+// [https://en.cppreference.com/w/cpp/named_req/Allocator] with which
+// the storage of the hashtable will be allocated and the elements will be
+// constructed and destroyed.
+template <class Policy, class Hash, class Eq, class Alloc>
+class raw_hash_set {
+ using PolicyTraits = hash_policy_traits<Policy>;
+ using KeyArgImpl =
+ KeyArg<IsTransparent<Eq>::value && IsTransparent<Hash>::value>;
+
+ public:
+ using init_type = typename PolicyTraits::init_type;
+ using key_type = typename PolicyTraits::key_type;
+ // TODO(sbenza): Hide slot_type as it is an implementation detail. Needs user
+ // code fixes!
+ using slot_type = typename PolicyTraits::slot_type;
+ using allocator_type = Alloc;
+ using size_type = size_t;
+ using difference_type = ptrdiff_t;
+ using hasher = Hash;
+ using key_equal = Eq;
+ using policy_type = Policy;
+ using value_type = typename PolicyTraits::value_type;
+ using reference = value_type&;
+ using const_reference = const value_type&;
+ using pointer = typename y_absl::allocator_traits<
+ allocator_type>::template rebind_traits<value_type>::pointer;
+ using const_pointer = typename y_absl::allocator_traits<
+ allocator_type>::template rebind_traits<value_type>::const_pointer;
+
+ // Alias used for heterogeneous lookup functions.
+ // `key_arg<K>` evaluates to `K` when the functors are transparent and to
+ // `key_type` otherwise. It permits template argument deduction on `K` for the
+ // transparent case.
+ template <class K>
+ using key_arg = typename KeyArgImpl::template type<K, key_type>;
+
+ private:
+ // Give an early error when key_type is not hashable/eq.
+ auto KeyTypeCanBeHashed(const Hash& h, const key_type& k) -> decltype(h(k));
+ auto KeyTypeCanBeEq(const Eq& eq, const key_type& k) -> decltype(eq(k, k));
+
+ using AllocTraits = y_absl::allocator_traits<allocator_type>;
+ using SlotAlloc = typename y_absl::allocator_traits<
+ allocator_type>::template rebind_alloc<slot_type>;
+ using SlotAllocTraits = typename y_absl::allocator_traits<
+ allocator_type>::template rebind_traits<slot_type>;
+
+ static_assert(std::is_lvalue_reference<reference>::value,
+ "Policy::element() must return a reference");
+
+ template <typename T>
+ struct SameAsElementReference
+ : std::is_same<typename std::remove_cv<
+ typename std::remove_reference<reference>::type>::type,
+ typename std::remove_cv<
+ typename std::remove_reference<T>::type>::type> {};
+
+ // An enabler for insert(T&&): T must be convertible to init_type or be the
+ // same as [cv] value_type [ref].
+ // Note: we separate SameAsElementReference into its own type to avoid using
+ // reference unless we need to. MSVC doesn't seem to like it in some
+ // cases.
+ template <class T>
+ using RequiresInsertable = typename std::enable_if<
+ y_absl::disjunction<std::is_convertible<T, init_type>,
+ SameAsElementReference<T>>::value,
+ int>::type;
+
+ // RequiresNotInit is a workaround for gcc prior to 7.1.
+ // See https://godbolt.org/g/Y4xsUh.
+ template <class T>
+ using RequiresNotInit =
+ typename std::enable_if<!std::is_same<T, init_type>::value, int>::type;
+
+ template <class... Ts>
+ using IsDecomposable = IsDecomposable<void, PolicyTraits, Hash, Eq, Ts...>;
+
+ public:
+ static_assert(std::is_same<pointer, value_type*>::value,
+ "Allocators with custom pointer types are not supported");
+ static_assert(std::is_same<const_pointer, const value_type*>::value,
+ "Allocators with custom pointer types are not supported");
+
+ class iterator {
+ friend class raw_hash_set;
+
+ public:
+ using iterator_category = std::forward_iterator_tag;
+ using value_type = typename raw_hash_set::value_type;
+ using reference =
+ y_absl::conditional_t<PolicyTraits::constant_iterators::value,
+ const value_type&, value_type&>;
+ using pointer = y_absl::remove_reference_t<reference>*;
+ using difference_type = typename raw_hash_set::difference_type;
+
+ iterator() {}
+
+ // PRECONDITION: not an end() iterator.
+ reference operator*() const {
+ AssertIsFull(ctrl_);
+ return PolicyTraits::element(slot_);
+ }
+
+ // PRECONDITION: not an end() iterator.
+ pointer operator->() const { return &operator*(); }
+
+ // PRECONDITION: not an end() iterator.
+ iterator& operator++() {
+ AssertIsFull(ctrl_);
+ ++ctrl_;
+ ++slot_;
+ skip_empty_or_deleted();
+ return *this;
+ }
+ // PRECONDITION: not an end() iterator.
+ iterator operator++(int) {
+ auto tmp = *this;
+ ++*this;
+ return tmp;
+ }
+
+ friend bool operator==(const iterator& a, const iterator& b) {
+ AssertIsValid(a.ctrl_);
+ AssertIsValid(b.ctrl_);
+ return a.ctrl_ == b.ctrl_;
+ }
+ friend bool operator!=(const iterator& a, const iterator& b) {
+ return !(a == b);
+ }
+
+ private:
+ iterator(ctrl_t* ctrl, slot_type* slot) : ctrl_(ctrl), slot_(slot) {
+ // This assumption helps the compiler know that any non-end iterator is
+ // not equal to any end iterator.
+ ABSL_INTERNAL_ASSUME(ctrl != nullptr);
+ }
+
+ void skip_empty_or_deleted() {
+ while (IsEmptyOrDeleted(*ctrl_)) {
+ uint32_t shift = Group{ctrl_}.CountLeadingEmptyOrDeleted();
+ ctrl_ += shift;
+ slot_ += shift;
+ }
+ if (ABSL_PREDICT_FALSE(*ctrl_ == ctrl_t::kSentinel)) ctrl_ = nullptr;
+ }
+
+ ctrl_t* ctrl_ = nullptr;
+ // To avoid uninitialized member warnings, put slot_ in an anonymous union.
+ // The member is not initialized on singleton and end iterators.
+ union {
+ slot_type* slot_;
+ };
+ };
+
+ class const_iterator {
+ friend class raw_hash_set;
+
+ public:
+ using iterator_category = typename iterator::iterator_category;
+ using value_type = typename raw_hash_set::value_type;
+ using reference = typename raw_hash_set::const_reference;
+ using pointer = typename raw_hash_set::const_pointer;
+ using difference_type = typename raw_hash_set::difference_type;
+
+ const_iterator() {}
+ // Implicit construction from iterator.
+ const_iterator(iterator i) : inner_(std::move(i)) {}
+
+ reference operator*() const { return *inner_; }
+ pointer operator->() const { return inner_.operator->(); }
+
+ const_iterator& operator++() {
+ ++inner_;
+ return *this;
+ }
+ const_iterator operator++(int) { return inner_++; }
+
+ friend bool operator==(const const_iterator& a, const const_iterator& b) {
+ return a.inner_ == b.inner_;
+ }
+ friend bool operator!=(const const_iterator& a, const const_iterator& b) {
+ return !(a == b);
+ }
+
+ private:
+ const_iterator(const ctrl_t* ctrl, const slot_type* slot)
+ : inner_(const_cast<ctrl_t*>(ctrl), const_cast<slot_type*>(slot)) {}
+
+ iterator inner_;
+ };
+
+ using node_type = node_handle<Policy, hash_policy_traits<Policy>, Alloc>;
+ using insert_return_type = InsertReturnType<iterator, node_type>;
+
+ raw_hash_set() noexcept(
+ std::is_nothrow_default_constructible<hasher>::value&&
+ std::is_nothrow_default_constructible<key_equal>::value&&
+ std::is_nothrow_default_constructible<allocator_type>::value) {}
+
+ explicit raw_hash_set(size_t bucket_count, const hasher& hash = hasher(),
+ const key_equal& eq = key_equal(),
+ const allocator_type& alloc = allocator_type())
+ : ctrl_(EmptyGroup()),
+ settings_(0, HashtablezInfoHandle(), hash, eq, alloc) {
+ if (bucket_count) {
+ capacity_ = NormalizeCapacity(bucket_count);
+ initialize_slots();
+ }
+ }
+
+ raw_hash_set(size_t bucket_count, const hasher& hash,
+ const allocator_type& alloc)
+ : raw_hash_set(bucket_count, hash, key_equal(), alloc) {}
+
+ raw_hash_set(size_t bucket_count, const allocator_type& alloc)
+ : raw_hash_set(bucket_count, hasher(), key_equal(), alloc) {}
+
+ explicit raw_hash_set(const allocator_type& alloc)
+ : raw_hash_set(0, hasher(), key_equal(), alloc) {}
+
+ template <class InputIter>
+ raw_hash_set(InputIter first, InputIter last, size_t bucket_count = 0,
+ const hasher& hash = hasher(), const key_equal& eq = key_equal(),
+ const allocator_type& alloc = allocator_type())
+ : raw_hash_set(SelectBucketCountForIterRange(first, last, bucket_count),
+ hash, eq, alloc) {
+ insert(first, last);
+ }
+
+ template <class InputIter>
+ raw_hash_set(InputIter first, InputIter last, size_t bucket_count,
+ const hasher& hash, const allocator_type& alloc)
+ : raw_hash_set(first, last, bucket_count, hash, key_equal(), alloc) {}
+
+ template <class InputIter>
+ raw_hash_set(InputIter first, InputIter last, size_t bucket_count,
+ const allocator_type& alloc)
+ : raw_hash_set(first, last, bucket_count, hasher(), key_equal(), alloc) {}
+
+ template <class InputIter>
+ raw_hash_set(InputIter first, InputIter last, const allocator_type& alloc)
+ : raw_hash_set(first, last, 0, hasher(), key_equal(), alloc) {}
+
+ // Instead of accepting std::initializer_list<value_type> as the first
+ // argument like std::unordered_set<value_type> does, we have two overloads
+ // that accept std::initializer_list<T> and std::initializer_list<init_type>.
+ // This is advantageous for performance.
+ //
+ // // Turns {"abc", "def"} into std::initializer_list<TString>, then
+ // // copies the strings into the set.
+ // std::unordered_set<TString> s = {"abc", "def"};
+ //
+ // // Turns {"abc", "def"} into std::initializer_list<const char*>, then
+ // // copies the strings into the set.
+ // y_absl::flat_hash_set<TString> s = {"abc", "def"};
+ //
+ // The same trick is used in insert().
+ //
+ // The enabler is necessary to prevent this constructor from triggering where
+ // the copy constructor is meant to be called.
+ //
+ // y_absl::flat_hash_set<int> a, b{a};
+ //
+ // RequiresNotInit<T> is a workaround for gcc prior to 7.1.
+ template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
+ raw_hash_set(std::initializer_list<T> init, size_t bucket_count = 0,
+ const hasher& hash = hasher(), const key_equal& eq = key_equal(),
+ const allocator_type& alloc = allocator_type())
+ : raw_hash_set(init.begin(), init.end(), bucket_count, hash, eq, alloc) {}
+
+ raw_hash_set(std::initializer_list<init_type> init, size_t bucket_count = 0,
+ const hasher& hash = hasher(), const key_equal& eq = key_equal(),
+ const allocator_type& alloc = allocator_type())
+ : raw_hash_set(init.begin(), init.end(), bucket_count, hash, eq, alloc) {}
+
+ template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
+ raw_hash_set(std::initializer_list<T> init, size_t bucket_count,
+ const hasher& hash, const allocator_type& alloc)
+ : raw_hash_set(init, bucket_count, hash, key_equal(), alloc) {}
+
+ raw_hash_set(std::initializer_list<init_type> init, size_t bucket_count,
+ const hasher& hash, const allocator_type& alloc)
+ : raw_hash_set(init, bucket_count, hash, key_equal(), alloc) {}
+
+ template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
+ raw_hash_set(std::initializer_list<T> init, size_t bucket_count,
+ const allocator_type& alloc)
+ : raw_hash_set(init, bucket_count, hasher(), key_equal(), alloc) {}
+
+ raw_hash_set(std::initializer_list<init_type> init, size_t bucket_count,
+ const allocator_type& alloc)
+ : raw_hash_set(init, bucket_count, hasher(), key_equal(), alloc) {}
+
+ template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
+ raw_hash_set(std::initializer_list<T> init, const allocator_type& alloc)
+ : raw_hash_set(init, 0, hasher(), key_equal(), alloc) {}
+
+ raw_hash_set(std::initializer_list<init_type> init,
+ const allocator_type& alloc)
+ : raw_hash_set(init, 0, hasher(), key_equal(), alloc) {}
+
+ raw_hash_set(const raw_hash_set& that)
+ : raw_hash_set(that, AllocTraits::select_on_container_copy_construction(
+ that.alloc_ref())) {}
+
+ raw_hash_set(const raw_hash_set& that, const allocator_type& a)
+ : raw_hash_set(0, that.hash_ref(), that.eq_ref(), a) {
+ reserve(that.size());
+ // Because the table is guaranteed to be empty, we can do something faster
+ // than a full `insert`.
+ for (const auto& v : that) {
+ const size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, v);
+ auto target = find_first_non_full(ctrl_, hash, capacity_);
+ SetCtrl(target.offset, H2(hash), capacity_, ctrl_, slots_,
+ sizeof(slot_type));
+ emplace_at(target.offset, v);
+ infoz().RecordInsert(hash, target.probe_length);
+ }
+ size_ = that.size();
+ growth_left() -= that.size();
+ }
+
+ raw_hash_set(raw_hash_set&& that) noexcept(
+ std::is_nothrow_copy_constructible<hasher>::value&&
+ std::is_nothrow_copy_constructible<key_equal>::value&&
+ std::is_nothrow_copy_constructible<allocator_type>::value)
+ : ctrl_(y_absl::exchange(that.ctrl_, EmptyGroup())),
+ slots_(y_absl::exchange(that.slots_, nullptr)),
+ size_(y_absl::exchange(that.size_, 0)),
+ capacity_(y_absl::exchange(that.capacity_, 0)),
+ // Hash, equality and allocator are copied instead of moved because
+ // `that` must be left valid. If Hash is std::function<Key>, moving it
+ // would create a nullptr functor that cannot be called.
+ settings_(y_absl::exchange(that.growth_left(), 0),
+ y_absl::exchange(that.infoz(), HashtablezInfoHandle()),
+ that.hash_ref(), that.eq_ref(), that.alloc_ref()) {}
+
+ raw_hash_set(raw_hash_set&& that, const allocator_type& a)
+ : ctrl_(EmptyGroup()),
+ slots_(nullptr),
+ size_(0),
+ capacity_(0),
+ settings_(0, HashtablezInfoHandle(), that.hash_ref(), that.eq_ref(),
+ a) {
+ if (a == that.alloc_ref()) {
+ std::swap(ctrl_, that.ctrl_);
+ std::swap(slots_, that.slots_);
+ std::swap(size_, that.size_);
+ std::swap(capacity_, that.capacity_);
+ std::swap(growth_left(), that.growth_left());
+ std::swap(infoz(), that.infoz());
+ } else {
+ reserve(that.size());
+ // Note: this will copy elements of dense_set and unordered_set instead of
+ // moving them. This can be fixed if it ever becomes an issue.
+ for (auto& elem : that) insert(std::move(elem));
+ }
+ }
+
+ raw_hash_set& operator=(const raw_hash_set& that) {
+ raw_hash_set tmp(that,
+ AllocTraits::propagate_on_container_copy_assignment::value
+ ? that.alloc_ref()
+ : alloc_ref());
+ swap(tmp);
+ return *this;
+ }
+
+ raw_hash_set& operator=(raw_hash_set&& that) noexcept(
+ y_absl::allocator_traits<allocator_type>::is_always_equal::value&&
+ std::is_nothrow_move_assignable<hasher>::value&&
+ std::is_nothrow_move_assignable<key_equal>::value) {
+ // TODO(sbenza): We should only use the operations from the noexcept clause
+ // to make sure we actually adhere to that contract.
+ return move_assign(
+ std::move(that),
+ typename AllocTraits::propagate_on_container_move_assignment());
+ }
+
+ ~raw_hash_set() { destroy_slots(); }
+
+ iterator begin() {
+ auto it = iterator_at(0);
+ it.skip_empty_or_deleted();
+ return it;
+ }
+ iterator end() { return {}; }
+
+ const_iterator begin() const {
+ return const_cast<raw_hash_set*>(this)->begin();
+ }
+ const_iterator end() const { return {}; }
+ const_iterator cbegin() const { return begin(); }
+ const_iterator cend() const { return end(); }
+
+ bool empty() const { return !size(); }
+ size_t size() const { return size_; }
+ size_t capacity() const { return capacity_; }
+ size_t max_size() const { return (std::numeric_limits<size_t>::max)(); }
+
+ ABSL_ATTRIBUTE_REINITIALIZES void clear() {
+ // Iterating over this container is O(bucket_count()). When bucket_count()
+ // is much greater than size(), iteration becomes prohibitively expensive.
+ // For clear() it is more important to reuse the allocated array when the
+ // container is small because allocation takes comparatively long time
+ // compared to destruction of the elements of the container. So we pick the
+ // largest bucket_count() threshold for which iteration is still fast and
+ // past that we simply deallocate the array.
+ if (capacity_ > 127) {
+ destroy_slots();
+
+ infoz().RecordClearedReservation();
+ } else if (capacity_) {
+ for (size_t i = 0; i != capacity_; ++i) {
+ if (IsFull(ctrl_[i])) {
+ PolicyTraits::destroy(&alloc_ref(), slots_ + i);
+ }
+ }
+ size_ = 0;
+ ResetCtrl(capacity_, ctrl_, slots_, sizeof(slot_type));
+ reset_growth_left();
+ }
+ assert(empty());
+ infoz().RecordStorageChanged(0, capacity_);
+ }
+
+ // This overload kicks in when the argument is an rvalue of insertable and
+ // decomposable type other than init_type.
+ //
+ // flat_hash_map<TString, int> m;
+ // m.insert(std::make_pair("abc", 42));
+ // TODO(cheshire): A type alias T2 is introduced as a workaround for the nvcc
+ // bug.
+ template <class T, RequiresInsertable<T> = 0,
+ class T2 = T,
+ typename std::enable_if<IsDecomposable<T2>::value, int>::type = 0,
+ T* = nullptr>
+ std::pair<iterator, bool> insert(T&& value) {
+ return emplace(std::forward<T>(value));
+ }
+
+ // This overload kicks in when the argument is a bitfield or an lvalue of
+ // insertable and decomposable type.
+ //
+ // union { int n : 1; };
+ // flat_hash_set<int> s;
+ // s.insert(n);
+ //
+ // flat_hash_set<TString> s;
+ // const char* p = "hello";
+ // s.insert(p);
+ //
+ // TODO(romanp): Once we stop supporting gcc 5.1 and below, replace
+ // RequiresInsertable<T> with RequiresInsertable<const T&>.
+ // We are hitting this bug: https://godbolt.org/g/1Vht4f.
+ template <
+ class T, RequiresInsertable<T> = 0,
+ typename std::enable_if<IsDecomposable<const T&>::value, int>::type = 0>
+ std::pair<iterator, bool> insert(const T& value) {
+ return emplace(value);
+ }
+
+ // This overload kicks in when the argument is an rvalue of init_type. Its
+ // purpose is to handle brace-init-list arguments.
+ //
+ // flat_hash_map<TString, int> s;
+ // s.insert({"abc", 42});
+ std::pair<iterator, bool> insert(init_type&& value) {
+ return emplace(std::move(value));
+ }
+
+ // TODO(cheshire): A type alias T2 is introduced as a workaround for the nvcc
+ // bug.
+ template <class T, RequiresInsertable<T> = 0, class T2 = T,
+ typename std::enable_if<IsDecomposable<T2>::value, int>::type = 0,
+ T* = nullptr>
+ iterator insert(const_iterator, T&& value) {
+ return insert(std::forward<T>(value)).first;
+ }
+
+ // TODO(romanp): Once we stop supporting gcc 5.1 and below, replace
+ // RequiresInsertable<T> with RequiresInsertable<const T&>.
+ // We are hitting this bug: https://godbolt.org/g/1Vht4f.
+ template <
+ class T, RequiresInsertable<T> = 0,
+ typename std::enable_if<IsDecomposable<const T&>::value, int>::type = 0>
+ iterator insert(const_iterator, const T& value) {
+ return insert(value).first;
+ }
+
+ iterator insert(const_iterator, init_type&& value) {
+ return insert(std::move(value)).first;
+ }
+
+ template <class InputIt>
+ void insert(InputIt first, InputIt last) {
+ for (; first != last; ++first) emplace(*first);
+ }
+
+ template <class T, RequiresNotInit<T> = 0, RequiresInsertable<const T&> = 0>
+ void insert(std::initializer_list<T> ilist) {
+ insert(ilist.begin(), ilist.end());
+ }
+
+ void insert(std::initializer_list<init_type> ilist) {
+ insert(ilist.begin(), ilist.end());
+ }
+
+ insert_return_type insert(node_type&& node) {
+ if (!node) return {end(), false, node_type()};
+ const auto& elem = PolicyTraits::element(CommonAccess::GetSlot(node));
+ auto res = PolicyTraits::apply(
+ InsertSlot<false>{*this, std::move(*CommonAccess::GetSlot(node))},
+ elem);
+ if (res.second) {
+ CommonAccess::Reset(&node);
+ return {res.first, true, node_type()};
+ } else {
+ return {res.first, false, std::move(node)};
+ }
+ }
+
+ iterator insert(const_iterator, node_type&& node) {
+ auto res = insert(std::move(node));
+ node = std::move(res.node);
+ return res.position;
+ }
+
+ // This overload kicks in if we can deduce the key from args. This enables us
+ // to avoid constructing value_type if an entry with the same key already
+ // exists.
+ //
+ // For example:
+ //
+ // flat_hash_map<TString, TString> m = {{"abc", "def"}};
+ // // Creates no TString copies and makes no heap allocations.
+ // m.emplace("abc", "xyz");
+ template <class... Args, typename std::enable_if<
+ IsDecomposable<Args...>::value, int>::type = 0>
+ std::pair<iterator, bool> emplace(Args&&... args) {
+ return PolicyTraits::apply(EmplaceDecomposable{*this},
+ std::forward<Args>(args)...);
+ }
+
+ // This overload kicks in if we cannot deduce the key from args. It constructs
+ // value_type unconditionally and then either moves it into the table or
+ // destroys.
+ template <class... Args, typename std::enable_if<
+ !IsDecomposable<Args...>::value, int>::type = 0>
+ std::pair<iterator, bool> emplace(Args&&... args) {
+ alignas(slot_type) unsigned char raw[sizeof(slot_type)];
+ slot_type* slot = reinterpret_cast<slot_type*>(&raw);
+
+ PolicyTraits::construct(&alloc_ref(), slot, std::forward<Args>(args)...);
+ const auto& elem = PolicyTraits::element(slot);
+ return PolicyTraits::apply(InsertSlot<true>{*this, std::move(*slot)}, elem);
+ }
+
+ template <class... Args>
+ iterator emplace_hint(const_iterator, Args&&... args) {
+ return emplace(std::forward<Args>(args)...).first;
+ }
+
+ // Extension API: support for lazy emplace.
+ //
+ // Looks up key in the table. If found, returns the iterator to the element.
+ // Otherwise calls `f` with one argument of type `raw_hash_set::constructor`.
+ //
+ // `f` must abide by several restrictions:
+ // - it MUST call `raw_hash_set::constructor` with arguments as if a
+ // `raw_hash_set::value_type` is constructed,
+ // - it MUST NOT access the container before the call to
+ // `raw_hash_set::constructor`, and
+ // - it MUST NOT erase the lazily emplaced element.
+ // Doing any of these is undefined behavior.
+ //
+ // For example:
+ //
+ // std::unordered_set<ArenaString> s;
+ // // Makes ArenaStr even if "abc" is in the map.
+ // s.insert(ArenaString(&arena, "abc"));
+ //
+ // flat_hash_set<ArenaStr> s;
+ // // Makes ArenaStr only if "abc" is not in the map.
+ // s.lazy_emplace("abc", [&](const constructor& ctor) {
+ // ctor(&arena, "abc");
+ // });
+ //
+ // WARNING: This API is currently experimental. If there is a way to implement
+ // the same thing with the rest of the API, prefer that.
+ class constructor {
+ friend class raw_hash_set;
+
+ public:
+ template <class... Args>
+ void operator()(Args&&... args) const {
+ assert(*slot_);
+ PolicyTraits::construct(alloc_, *slot_, std::forward<Args>(args)...);
+ *slot_ = nullptr;
+ }
+
+ private:
+ constructor(allocator_type* a, slot_type** slot) : alloc_(a), slot_(slot) {}
+
+ allocator_type* alloc_;
+ slot_type** slot_;
+ };
+
+ template <class K = key_type, class F>
+ iterator lazy_emplace(const key_arg<K>& key, F&& f) {
+ auto res = find_or_prepare_insert(key);
+ if (res.second) {
+ slot_type* slot = slots_ + res.first;
+ std::forward<F>(f)(constructor(&alloc_ref(), &slot));
+ assert(!slot);
+ }
+ return iterator_at(res.first);
+ }
+
+ // Extension API: support for heterogeneous keys.
+ //
+ // std::unordered_set<TString> s;
+ // // Turns "abc" into TString.
+ // s.erase("abc");
+ //
+ // flat_hash_set<TString> s;
+ // // Uses "abc" directly without copying it into TString.
+ // s.erase("abc");
+ template <class K = key_type>
+ size_type erase(const key_arg<K>& key) {
+ auto it = find(key);
+ if (it == end()) return 0;
+ erase(it);
+ return 1;
+ }
+
+ // Erases the element pointed to by `it`. Unlike `std::unordered_set::erase`,
+ // this method returns void to reduce algorithmic complexity to O(1). The
+ // iterator is invalidated, so any increment should be done before calling
+ // erase. In order to erase while iterating across a map, use the following
+ // idiom (which also works for standard containers):
+ //
+ // for (auto it = m.begin(), end = m.end(); it != end;) {
+ // // `erase()` will invalidate `it`, so advance `it` first.
+ // auto copy_it = it++;
+ // if (<pred>) {
+ // m.erase(copy_it);
+ // }
+ // }
+ void erase(const_iterator cit) { erase(cit.inner_); }
+
+ // This overload is necessary because otherwise erase<K>(const K&) would be
+ // a better match if non-const iterator is passed as an argument.
+ void erase(iterator it) {
+ AssertIsFull(it.ctrl_);
+ PolicyTraits::destroy(&alloc_ref(), it.slot_);
+ erase_meta_only(it);
+ }
+
+ iterator erase(const_iterator first, const_iterator last) {
+ while (first != last) {
+ erase(first++);
+ }
+ return last.inner_;
+ }
+
+ // Moves elements from `src` into `this`.
+ // If the element already exists in `this`, it is left unmodified in `src`.
+ template <typename H, typename E>
+ void merge(raw_hash_set<Policy, H, E, Alloc>& src) { // NOLINT
+ assert(this != &src);
+ for (auto it = src.begin(), e = src.end(); it != e;) {
+ auto next = std::next(it);
+ if (PolicyTraits::apply(InsertSlot<false>{*this, std::move(*it.slot_)},
+ PolicyTraits::element(it.slot_))
+ .second) {
+ src.erase_meta_only(it);
+ }
+ it = next;
+ }
+ }
+
+ template <typename H, typename E>
+ void merge(raw_hash_set<Policy, H, E, Alloc>&& src) {
+ merge(src);
+ }
+
+ node_type extract(const_iterator position) {
+ AssertIsFull(position.inner_.ctrl_);
+ auto node =
+ CommonAccess::Transfer<node_type>(alloc_ref(), position.inner_.slot_);
+ erase_meta_only(position);
+ return node;
+ }
+
+ template <
+ class K = key_type,
+ typename std::enable_if<!std::is_same<K, iterator>::value, int>::type = 0>
+ node_type extract(const key_arg<K>& key) {
+ auto it = find(key);
+ return it == end() ? node_type() : extract(const_iterator{it});
+ }
+
+ void swap(raw_hash_set& that) noexcept(
+ IsNoThrowSwappable<hasher>() && IsNoThrowSwappable<key_equal>() &&
+ IsNoThrowSwappable<allocator_type>(
+ typename AllocTraits::propagate_on_container_swap{})) {
+ using std::swap;
+ swap(ctrl_, that.ctrl_);
+ swap(slots_, that.slots_);
+ swap(size_, that.size_);
+ swap(capacity_, that.capacity_);
+ swap(growth_left(), that.growth_left());
+ swap(hash_ref(), that.hash_ref());
+ swap(eq_ref(), that.eq_ref());
+ swap(infoz(), that.infoz());
+ SwapAlloc(alloc_ref(), that.alloc_ref(),
+ typename AllocTraits::propagate_on_container_swap{});
+ }
+
+ void rehash(size_t n) {
+ if (n == 0 && capacity_ == 0) return;
+ if (n == 0 && size_ == 0) {
+ destroy_slots();
+ infoz().RecordStorageChanged(0, 0);
+ infoz().RecordClearedReservation();
+ return;
+ }
+
+ // bitor is a faster way of doing `max` here. We will round up to the next
+ // power-of-2-minus-1, so bitor is good enough.
+ auto m = NormalizeCapacity(n | GrowthToLowerboundCapacity(size()));
+ // n == 0 unconditionally rehashes as per the standard.
+ if (n == 0 || m > capacity_) {
+ resize(m);
+
+ // This is after resize, to ensure that we have completed the allocation
+ // and have potentially sampled the hashtable.
+ infoz().RecordReservation(n);
+ }
+ }
+
+ void reserve(size_t n) {
+ if (n > size() + growth_left()) {
+ size_t m = GrowthToLowerboundCapacity(n);
+ resize(NormalizeCapacity(m));
+
+ // This is after resize, to ensure that we have completed the allocation
+ // and have potentially sampled the hashtable.
+ infoz().RecordReservation(n);
+ }
+ }
+
+ // Extension API: support for heterogeneous keys.
+ //
+ // std::unordered_set<TString> s;
+ // // Turns "abc" into TString.
+ // s.count("abc");
+ //
+ // ch_set<TString> s;
+ // // Uses "abc" directly without copying it into TString.
+ // s.count("abc");
+ template <class K = key_type>
+ size_t count(const key_arg<K>& key) const {
+ return find(key) == end() ? 0 : 1;
+ }
+
+ // Issues CPU prefetch instructions for the memory needed to find or insert
+ // a key. Like all lookup functions, this support heterogeneous keys.
+ //
+ // NOTE: This is a very low level operation and should not be used without
+ // specific benchmarks indicating its importance.
+ template <class K = key_type>
+ void prefetch(const key_arg<K>& key) const {
+ (void)key;
+#if defined(__GNUC__)
+ prefetch_heap_block();
+ auto seq = probe(ctrl_, hash_ref()(key), capacity_);
+ __builtin_prefetch(static_cast<const void*>(ctrl_ + seq.offset()));
+ __builtin_prefetch(static_cast<const void*>(slots_ + seq.offset()));
+#endif // __GNUC__
+ }
+
+ // The API of find() has two extensions.
+ //
+ // 1. The hash can be passed by the user. It must be equal to the hash of the
+ // key.
+ //
+ // 2. The type of the key argument doesn't have to be key_type. This is so
+ // called heterogeneous key support.
+ template <class K = key_type>
+ iterator find(const key_arg<K>& key, size_t hash) {
+ auto seq = probe(ctrl_, hash, capacity_);
+ while (true) {
+ Group g{ctrl_ + seq.offset()};
+ for (int i : g.Match(H2(hash))) {
+ if (ABSL_PREDICT_TRUE(PolicyTraits::apply(
+ EqualElement<K>{key, eq_ref()},
+ PolicyTraits::element(slots_ + seq.offset(i)))))
+ return iterator_at(seq.offset(i));
+ }
+ if (ABSL_PREDICT_TRUE(g.MatchEmpty())) return end();
+ seq.next();
+ assert(seq.index() <= capacity_ && "full table!");
+ }
+ }
+ template <class K = key_type>
+ iterator find(const key_arg<K>& key) {
+ prefetch_heap_block();
+ return find(key, hash_ref()(key));
+ }
+
+ template <class K = key_type>
+ const_iterator find(const key_arg<K>& key, size_t hash) const {
+ return const_cast<raw_hash_set*>(this)->find(key, hash);
+ }
+ template <class K = key_type>
+ const_iterator find(const key_arg<K>& key) const {
+ prefetch_heap_block();
+ return find(key, hash_ref()(key));
+ }
+
+ template <class K = key_type>
+ bool contains(const key_arg<K>& key) const {
+ return find(key) != end();
+ }
+
+ template <class K = key_type>
+ std::pair<iterator, iterator> equal_range(const key_arg<K>& key) {
+ auto it = find(key);
+ if (it != end()) return {it, std::next(it)};
+ return {it, it};
+ }
+ template <class K = key_type>
+ std::pair<const_iterator, const_iterator> equal_range(
+ const key_arg<K>& key) const {
+ auto it = find(key);
+ if (it != end()) return {it, std::next(it)};
+ return {it, it};
+ }
+
+ size_t bucket_count() const { return capacity_; }
+ float load_factor() const {
+ return capacity_ ? static_cast<double>(size()) / capacity_ : 0.0;
+ }
+ float max_load_factor() const { return 1.0f; }
+ void max_load_factor(float) {
+ // Does nothing.
+ }
+
+ hasher hash_function() const { return hash_ref(); }
+ key_equal key_eq() const { return eq_ref(); }
+ allocator_type get_allocator() const { return alloc_ref(); }
+
+ friend bool operator==(const raw_hash_set& a, const raw_hash_set& b) {
+ if (a.size() != b.size()) return false;
+ const raw_hash_set* outer = &a;
+ const raw_hash_set* inner = &b;
+ if (outer->capacity() > inner->capacity()) std::swap(outer, inner);
+ for (const value_type& elem : *outer)
+ if (!inner->has_element(elem)) return false;
+ return true;
+ }
+
+ friend bool operator!=(const raw_hash_set& a, const raw_hash_set& b) {
+ return !(a == b);
+ }
+
+ friend void swap(raw_hash_set& a,
+ raw_hash_set& b) noexcept(noexcept(a.swap(b))) {
+ a.swap(b);
+ }
+
+ private:
+ template <class Container, typename Enabler>
+ friend struct y_absl::container_internal::hashtable_debug_internal::
+ HashtableDebugAccess;
+
+ struct FindElement {
+ template <class K, class... Args>
+ const_iterator operator()(const K& key, Args&&...) const {
+ return s.find(key);
+ }
+ const raw_hash_set& s;
+ };
+
+ struct HashElement {
+ template <class K, class... Args>
+ size_t operator()(const K& key, Args&&...) const {
+ return h(key);
+ }
+ const hasher& h;
+ };
+
+ template <class K1>
+ struct EqualElement {
+ template <class K2, class... Args>
+ bool operator()(const K2& lhs, Args&&...) const {
+ return eq(lhs, rhs);
+ }
+ const K1& rhs;
+ const key_equal& eq;
+ };
+
+ struct EmplaceDecomposable {
+ template <class K, class... Args>
+ std::pair<iterator, bool> operator()(const K& key, Args&&... args) const {
+ auto res = s.find_or_prepare_insert(key);
+ if (res.second) {
+ s.emplace_at(res.first, std::forward<Args>(args)...);
+ }
+ return {s.iterator_at(res.first), res.second};
+ }
+ raw_hash_set& s;
+ };
+
+ template <bool do_destroy>
+ struct InsertSlot {
+ template <class K, class... Args>
+ std::pair<iterator, bool> operator()(const K& key, Args&&...) && {
+ auto res = s.find_or_prepare_insert(key);
+ if (res.second) {
+ PolicyTraits::transfer(&s.alloc_ref(), s.slots_ + res.first, &slot);
+ } else if (do_destroy) {
+ PolicyTraits::destroy(&s.alloc_ref(), &slot);
+ }
+ return {s.iterator_at(res.first), res.second};
+ }
+ raw_hash_set& s;
+ // Constructed slot. Either moved into place or destroyed.
+ slot_type&& slot;
+ };
+
+ // "erases" the object from the container, except that it doesn't actually
+ // destroy the object. It only updates all the metadata of the class.
+ // This can be used in conjunction with Policy::transfer to move the object to
+ // another place.
+ void erase_meta_only(const_iterator it) {
+ assert(IsFull(*it.inner_.ctrl_) && "erasing a dangling iterator");
+ --size_;
+ const size_t index = it.inner_.ctrl_ - ctrl_;
+ const size_t index_before = (index - Group::kWidth) & capacity_;
+ const auto empty_after = Group(it.inner_.ctrl_).MatchEmpty();
+ const auto empty_before = Group(ctrl_ + index_before).MatchEmpty();
+
+ // We count how many consecutive non empties we have to the right and to the
+ // left of `it`. If the sum is >= kWidth then there is at least one probe
+ // window that might have seen a full group.
+ bool was_never_full =
+ empty_before && empty_after &&
+ static_cast<size_t>(empty_after.TrailingZeros() +
+ empty_before.LeadingZeros()) < Group::kWidth;
+
+ SetCtrl(index, was_never_full ? ctrl_t::kEmpty : ctrl_t::kDeleted,
+ capacity_, ctrl_, slots_, sizeof(slot_type));
+ growth_left() += was_never_full;
+ infoz().RecordErase();
+ }
+
+ void initialize_slots() {
+ assert(capacity_);
+ // Folks with custom allocators often make unwarranted assumptions about the
+ // behavior of their classes vis-a-vis trivial destructability and what
+ // calls they will or wont make. Avoid sampling for people with custom
+ // allocators to get us out of this mess. This is not a hard guarantee but
+ // a workaround while we plan the exact guarantee we want to provide.
+ //
+ // People are often sloppy with the exact type of their allocator (sometimes
+ // it has an extra const or is missing the pair, but rebinds made it work
+ // anyway). To avoid the ambiguity, we work off SlotAlloc which we have
+ // bound more carefully.
+ if (std::is_same<SlotAlloc, std::allocator<slot_type>>::value &&
+ slots_ == nullptr) {
+ infoz() = Sample(sizeof(slot_type));
+ }
+
+ char* mem = static_cast<char*>(Allocate<alignof(slot_type)>(
+ &alloc_ref(),
+ AllocSize(capacity_, sizeof(slot_type), alignof(slot_type))));
+ ctrl_ = reinterpret_cast<ctrl_t*>(mem);
+ slots_ = reinterpret_cast<slot_type*>(
+ mem + SlotOffset(capacity_, alignof(slot_type)));
+ ResetCtrl(capacity_, ctrl_, slots_, sizeof(slot_type));
+ reset_growth_left();
+ infoz().RecordStorageChanged(size_, capacity_);
+ }
+
+ void destroy_slots() {
+ if (!capacity_) return;
+ for (size_t i = 0; i != capacity_; ++i) {
+ if (IsFull(ctrl_[i])) {
+ PolicyTraits::destroy(&alloc_ref(), slots_ + i);
+ }
+ }
+
+ // Unpoison before returning the memory to the allocator.
+ SanitizerUnpoisonMemoryRegion(slots_, sizeof(slot_type) * capacity_);
+ Deallocate<alignof(slot_type)>(
+ &alloc_ref(), ctrl_,
+ AllocSize(capacity_, sizeof(slot_type), alignof(slot_type)));
+ ctrl_ = EmptyGroup();
+ slots_ = nullptr;
+ size_ = 0;
+ capacity_ = 0;
+ growth_left() = 0;
+ }
+
+ void resize(size_t new_capacity) {
+ assert(IsValidCapacity(new_capacity));
+ auto* old_ctrl = ctrl_;
+ auto* old_slots = slots_;
+ const size_t old_capacity = capacity_;
+ capacity_ = new_capacity;
+ initialize_slots();
+
+ size_t total_probe_length = 0;
+ for (size_t i = 0; i != old_capacity; ++i) {
+ if (IsFull(old_ctrl[i])) {
+ size_t hash = PolicyTraits::apply(HashElement{hash_ref()},
+ PolicyTraits::element(old_slots + i));
+ auto target = find_first_non_full(ctrl_, hash, capacity_);
+ size_t new_i = target.offset;
+ total_probe_length += target.probe_length;
+ SetCtrl(new_i, H2(hash), capacity_, ctrl_, slots_, sizeof(slot_type));
+ PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, old_slots + i);
+ }
+ }
+ if (old_capacity) {
+ SanitizerUnpoisonMemoryRegion(old_slots,
+ sizeof(slot_type) * old_capacity);
+ Deallocate<alignof(slot_type)>(
+ &alloc_ref(), old_ctrl,
+ AllocSize(old_capacity, sizeof(slot_type), alignof(slot_type)));
+ }
+ infoz().RecordRehash(total_probe_length);
+ }
+
+ void drop_deletes_without_resize() ABSL_ATTRIBUTE_NOINLINE {
+ assert(IsValidCapacity(capacity_));
+ assert(!is_small(capacity_));
+ // Algorithm:
+ // - mark all DELETED slots as EMPTY
+ // - mark all FULL slots as DELETED
+ // - for each slot marked as DELETED
+ // hash = Hash(element)
+ // target = find_first_non_full(hash)
+ // if target is in the same group
+ // mark slot as FULL
+ // else if target is EMPTY
+ // transfer element to target
+ // mark slot as EMPTY
+ // mark target as FULL
+ // else if target is DELETED
+ // swap current element with target element
+ // mark target as FULL
+ // repeat procedure for current slot with moved from element (target)
+ ConvertDeletedToEmptyAndFullToDeleted(ctrl_, capacity_);
+ alignas(slot_type) unsigned char raw[sizeof(slot_type)];
+ size_t total_probe_length = 0;
+ slot_type* slot = reinterpret_cast<slot_type*>(&raw);
+ for (size_t i = 0; i != capacity_; ++i) {
+ if (!IsDeleted(ctrl_[i])) continue;
+ const size_t hash = PolicyTraits::apply(
+ HashElement{hash_ref()}, PolicyTraits::element(slots_ + i));
+ const FindInfo target = find_first_non_full(ctrl_, hash, capacity_);
+ const size_t new_i = target.offset;
+ total_probe_length += target.probe_length;
+
+ // Verify if the old and new i fall within the same group wrt the hash.
+ // If they do, we don't need to move the object as it falls already in the
+ // best probe we can.
+ const size_t probe_offset = probe(ctrl_, hash, capacity_).offset();
+ const auto probe_index = [probe_offset, this](size_t pos) {
+ return ((pos - probe_offset) & capacity_) / Group::kWidth;
+ };
+
+ // Element doesn't move.
+ if (ABSL_PREDICT_TRUE(probe_index(new_i) == probe_index(i))) {
+ SetCtrl(i, H2(hash), capacity_, ctrl_, slots_, sizeof(slot_type));
+ continue;
+ }
+ if (IsEmpty(ctrl_[new_i])) {
+ // Transfer element to the empty spot.
+ // SetCtrl poisons/unpoisons the slots so we have to call it at the
+ // right time.
+ SetCtrl(new_i, H2(hash), capacity_, ctrl_, slots_, sizeof(slot_type));
+ PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, slots_ + i);
+ SetCtrl(i, ctrl_t::kEmpty, capacity_, ctrl_, slots_, sizeof(slot_type));
+ } else {
+ assert(IsDeleted(ctrl_[new_i]));
+ SetCtrl(new_i, H2(hash), capacity_, ctrl_, slots_, sizeof(slot_type));
+ // Until we are done rehashing, DELETED marks previously FULL slots.
+ // Swap i and new_i elements.
+ PolicyTraits::transfer(&alloc_ref(), slot, slots_ + i);
+ PolicyTraits::transfer(&alloc_ref(), slots_ + i, slots_ + new_i);
+ PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, slot);
+ --i; // repeat
+ }
+ }
+ reset_growth_left();
+ infoz().RecordRehash(total_probe_length);
+ }
+
+ void rehash_and_grow_if_necessary() {
+ if (capacity_ == 0) {
+ resize(1);
+ } else if (capacity_ > Group::kWidth &&
+ // Do these calcuations in 64-bit to avoid overflow.
+ size() * uint64_t{32} <= capacity_ * uint64_t{25}) {
+ // Squash DELETED without growing if there is enough capacity.
+ //
+ // Rehash in place if the current size is <= 25/32 of capacity_.
+ // Rationale for such a high factor: 1) drop_deletes_without_resize() is
+ // faster than resize, and 2) it takes quite a bit of work to add
+ // tombstones. In the worst case, seems to take approximately 4
+ // insert/erase pairs to create a single tombstone and so if we are
+ // rehashing because of tombstones, we can afford to rehash-in-place as
+ // long as we are reclaiming at least 1/8 the capacity without doing more
+ // than 2X the work. (Where "work" is defined to be size() for rehashing
+ // or rehashing in place, and 1 for an insert or erase.) But rehashing in
+ // place is faster per operation than inserting or even doubling the size
+ // of the table, so we actually afford to reclaim even less space from a
+ // resize-in-place. The decision is to rehash in place if we can reclaim
+ // at about 1/8th of the usable capacity (specifically 3/28 of the
+ // capacity) which means that the total cost of rehashing will be a small
+ // fraction of the total work.
+ //
+ // Here is output of an experiment using the BM_CacheInSteadyState
+ // benchmark running the old case (where we rehash-in-place only if we can
+ // reclaim at least 7/16*capacity_) vs. this code (which rehashes in place
+ // if we can recover 3/32*capacity_).
+ //
+ // Note that although in the worst-case number of rehashes jumped up from
+ // 15 to 190, but the number of operations per second is almost the same.
+ //
+ // Abridged output of running BM_CacheInSteadyState benchmark from
+ // raw_hash_set_benchmark. N is the number of insert/erase operations.
+ //
+ // | OLD (recover >= 7/16 | NEW (recover >= 3/32)
+ // size | N/s LoadFactor NRehashes | N/s LoadFactor NRehashes
+ // 448 | 145284 0.44 18 | 140118 0.44 19
+ // 493 | 152546 0.24 11 | 151417 0.48 28
+ // 538 | 151439 0.26 11 | 151152 0.53 38
+ // 583 | 151765 0.28 11 | 150572 0.57 50
+ // 628 | 150241 0.31 11 | 150853 0.61 66
+ // 672 | 149602 0.33 12 | 150110 0.66 90
+ // 717 | 149998 0.35 12 | 149531 0.70 129
+ // 762 | 149836 0.37 13 | 148559 0.74 190
+ // 807 | 149736 0.39 14 | 151107 0.39 14
+ // 852 | 150204 0.42 15 | 151019 0.42 15
+ drop_deletes_without_resize();
+ } else {
+ // Otherwise grow the container.
+ resize(capacity_ * 2 + 1);
+ }
+ }
+
+ bool has_element(const value_type& elem) const {
+ size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, elem);
+ auto seq = probe(ctrl_, hash, capacity_);
+ while (true) {
+ Group g{ctrl_ + seq.offset()};
+ for (int i : g.Match(H2(hash))) {
+ if (ABSL_PREDICT_TRUE(PolicyTraits::element(slots_ + seq.offset(i)) ==
+ elem))
+ return true;
+ }
+ if (ABSL_PREDICT_TRUE(g.MatchEmpty())) return false;
+ seq.next();
+ assert(seq.index() <= capacity_ && "full table!");
+ }
+ return false;
+ }
+
+ // TODO(alkis): Optimize this assuming *this and that don't overlap.
+ raw_hash_set& move_assign(raw_hash_set&& that, std::true_type) {
+ raw_hash_set tmp(std::move(that));
+ swap(tmp);
+ return *this;
+ }
+ raw_hash_set& move_assign(raw_hash_set&& that, std::false_type) {
+ raw_hash_set tmp(std::move(that), alloc_ref());
+ swap(tmp);
+ return *this;
+ }
+
+ protected:
+ template <class K>
+ std::pair<size_t, bool> find_or_prepare_insert(const K& key) {
+ prefetch_heap_block();
+ auto hash = hash_ref()(key);
+ auto seq = probe(ctrl_, hash, capacity_);
+ while (true) {
+ Group g{ctrl_ + seq.offset()};
+ for (int i : g.Match(H2(hash))) {
+ if (ABSL_PREDICT_TRUE(PolicyTraits::apply(
+ EqualElement<K>{key, eq_ref()},
+ PolicyTraits::element(slots_ + seq.offset(i)))))
+ return {seq.offset(i), false};
+ }
+ if (ABSL_PREDICT_TRUE(g.MatchEmpty())) break;
+ seq.next();
+ assert(seq.index() <= capacity_ && "full table!");
+ }
+ return {prepare_insert(hash), true};
+ }
+
+ size_t prepare_insert(size_t hash) ABSL_ATTRIBUTE_NOINLINE {
+ auto target = find_first_non_full(ctrl_, hash, capacity_);
+ if (ABSL_PREDICT_FALSE(growth_left() == 0 &&
+ !IsDeleted(ctrl_[target.offset]))) {
+ rehash_and_grow_if_necessary();
+ target = find_first_non_full(ctrl_, hash, capacity_);
+ }
+ ++size_;
+ growth_left() -= IsEmpty(ctrl_[target.offset]);
+ SetCtrl(target.offset, H2(hash), capacity_, ctrl_, slots_,
+ sizeof(slot_type));
+ infoz().RecordInsert(hash, target.probe_length);
+ return target.offset;
+ }
+
+ // Constructs the value in the space pointed by the iterator. This only works
+ // after an unsuccessful find_or_prepare_insert() and before any other
+ // modifications happen in the raw_hash_set.
+ //
+ // PRECONDITION: i is an index returned from find_or_prepare_insert(k), where
+ // k is the key decomposed from `forward<Args>(args)...`, and the bool
+ // returned by find_or_prepare_insert(k) was true.
+ // POSTCONDITION: *m.iterator_at(i) == value_type(forward<Args>(args)...).
+ template <class... Args>
+ void emplace_at(size_t i, Args&&... args) {
+ PolicyTraits::construct(&alloc_ref(), slots_ + i,
+ std::forward<Args>(args)...);
+
+ assert(PolicyTraits::apply(FindElement{*this}, *iterator_at(i)) ==
+ iterator_at(i) &&
+ "constructed value does not match the lookup key");
+ }
+
+ iterator iterator_at(size_t i) { return {ctrl_ + i, slots_ + i}; }
+ const_iterator iterator_at(size_t i) const { return {ctrl_ + i, slots_ + i}; }
+
+ private:
+ friend struct RawHashSetTestOnlyAccess;
+
+ void reset_growth_left() {
+ growth_left() = CapacityToGrowth(capacity()) - size_;
+ }
+
+ size_t& growth_left() { return settings_.template get<0>(); }
+
+ void prefetch_heap_block() const {
+ // Prefetch the heap-allocated memory region to resolve potential TLB
+ // misses. This is intended to overlap with execution of calculating the
+ // hash for a key.
+#if defined(__GNUC__)
+ __builtin_prefetch(static_cast<const void*>(ctrl_), 0, 1);
+#endif // __GNUC__
+ }
+
+ HashtablezInfoHandle& infoz() { return settings_.template get<1>(); }
+
+ hasher& hash_ref() { return settings_.template get<2>(); }
+ const hasher& hash_ref() const { return settings_.template get<2>(); }
+ key_equal& eq_ref() { return settings_.template get<3>(); }
+ const key_equal& eq_ref() const { return settings_.template get<3>(); }
+ allocator_type& alloc_ref() { return settings_.template get<4>(); }
+ const allocator_type& alloc_ref() const {
+ return settings_.template get<4>();
+ }
+
+ // TODO(alkis): Investigate removing some of these fields:
+ // - ctrl/slots can be derived from each other
+ // - size can be moved into the slot array
+ ctrl_t* ctrl_ = EmptyGroup(); // [(capacity + 1 + NumClonedBytes()) * ctrl_t]
+ slot_type* slots_ = nullptr; // [capacity * slot_type]
+ size_t size_ = 0; // number of full slots
+ size_t capacity_ = 0; // total number of slots
+ y_absl::container_internal::CompressedTuple<size_t /* growth_left */,
+ HashtablezInfoHandle, hasher,
+ key_equal, allocator_type>
+ settings_{0, HashtablezInfoHandle{}, hasher{}, key_equal{},
+ allocator_type{}};
+};
+
+// Erases all elements that satisfy the predicate `pred` from the container `c`.
+template <typename P, typename H, typename E, typename A, typename Predicate>
+void EraseIf(Predicate& pred, raw_hash_set<P, H, E, A>* c) {
+ for (auto it = c->begin(), last = c->end(); it != last;) {
+ if (pred(*it)) {
+ c->erase(it++);
+ } else {
+ ++it;
+ }
+ }
+}
+
+namespace hashtable_debug_internal {
+template <typename Set>
+struct HashtableDebugAccess<Set, y_absl::void_t<typename Set::raw_hash_set>> {
+ using Traits = typename Set::PolicyTraits;
+ using Slot = typename Traits::slot_type;
+
+ static size_t GetNumProbes(const Set& set,
+ const typename Set::key_type& key) {
+ size_t num_probes = 0;
+ size_t hash = set.hash_ref()(key);
+ auto seq = probe(set.ctrl_, hash, set.capacity_);
+ while (true) {
+ container_internal::Group g{set.ctrl_ + seq.offset()};
+ for (int i : g.Match(container_internal::H2(hash))) {
+ if (Traits::apply(
+ typename Set::template EqualElement<typename Set::key_type>{
+ key, set.eq_ref()},
+ Traits::element(set.slots_ + seq.offset(i))))
+ return num_probes;
+ ++num_probes;
+ }
+ if (g.MatchEmpty()) return num_probes;
+ seq.next();
+ ++num_probes;
+ }
+ }
+
+ static size_t AllocatedByteSize(const Set& c) {
+ size_t capacity = c.capacity_;
+ if (capacity == 0) return 0;
+ size_t m = AllocSize(capacity, sizeof(Slot), alignof(Slot));
+
+ size_t per_slot = Traits::space_used(static_cast<const Slot*>(nullptr));
+ if (per_slot != ~size_t{}) {
+ m += per_slot * c.size();
+ } else {
+ for (size_t i = 0; i != capacity; ++i) {
+ if (container_internal::IsFull(c.ctrl_[i])) {
+ m += Traits::space_used(c.slots_ + i);
+ }
+ }
+ }
+ return m;
+ }
+
+ static size_t LowerBoundAllocatedByteSize(size_t size) {
+ size_t capacity = GrowthToLowerboundCapacity(size);
+ if (capacity == 0) return 0;
+ size_t m =
+ AllocSize(NormalizeCapacity(capacity), sizeof(Slot), alignof(Slot));
+ size_t per_slot = Traits::space_used(static_cast<const Slot*>(nullptr));
+ if (per_slot != ~size_t{}) {
+ m += per_slot * size;
+ }
+ return m;
+ }
+};
+
+} // namespace hashtable_debug_internal
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/raw_hash_set/ya.make b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/raw_hash_set/ya.make
new file mode 100644
index 00000000000..d636929b873
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/raw_hash_set/ya.make
@@ -0,0 +1,52 @@
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
+WITHOUT_LICENSE_TEXTS()
+
+OWNER(
+ somov
+ g:cpp-contrib
+)
+
+LICENSE(Apache-2.0)
+
+PEERDIR(
+ contrib/restricted/abseil-cpp-tstring/y_absl/base
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/low_level_alloc
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_wait
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/throw_delegate
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/log_severity
+ contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/absl_hashtablez_sampler
+ contrib/restricted/abseil-cpp-tstring/y_absl/debugging
+ contrib/restricted/abseil-cpp-tstring/y_absl/debugging/stacktrace
+ contrib/restricted/abseil-cpp-tstring/y_absl/debugging/symbolize
+ contrib/restricted/abseil-cpp-tstring/y_absl/demangle
+ contrib/restricted/abseil-cpp-tstring/y_absl/hash
+ contrib/restricted/abseil-cpp-tstring/y_absl/numeric
+ contrib/restricted/abseil-cpp-tstring/y_absl/profiling/internal/exponential_biased
+ contrib/restricted/abseil-cpp-tstring/y_absl/strings
+ contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/absl_strings_internal
+ contrib/restricted/abseil-cpp-tstring/y_absl/synchronization
+ contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal
+ contrib/restricted/abseil-cpp-tstring/y_absl/time
+ contrib/restricted/abseil-cpp-tstring/y_absl/time/civil_time
+ contrib/restricted/abseil-cpp-tstring/y_absl/time/time_zone
+ contrib/restricted/abseil-cpp-tstring/y_absl/types/bad_optional_access
+ contrib/restricted/abseil-cpp-tstring/y_absl/types
+)
+
+ADDINCL(
+ GLOBAL contrib/restricted/abseil-cpp-tstring
+)
+
+NO_COMPILER_WARNINGS()
+
+SRCDIR(contrib/restricted/abseil-cpp-tstring/y_absl/container/internal)
+
+SRCS(
+ raw_hash_set.cc
+)
+
+END()
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/test_instance_tracker.h b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/test_instance_tracker.h
new file mode 100644
index 00000000000..49dcaf5d21b
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/test_instance_tracker.h
@@ -0,0 +1,274 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_TEST_INSTANCE_TRACKER_H_
+#define ABSL_CONTAINER_INTERNAL_TEST_INSTANCE_TRACKER_H_
+
+#include <cstdlib>
+#include <ostream>
+
+#include "y_absl/types/compare.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace test_internal {
+
+// A type that counts number of occurrences of the type, the live occurrences of
+// the type, as well as the number of copies, moves, swaps, and comparisons that
+// have occurred on the type. This is used as a base class for the copyable,
+// copyable+movable, and movable types below that are used in actual tests. Use
+// InstanceTracker in tests to track the number of instances.
+class BaseCountedInstance {
+ public:
+ explicit BaseCountedInstance(int x) : value_(x) {
+ ++num_instances_;
+ ++num_live_instances_;
+ }
+ BaseCountedInstance(const BaseCountedInstance& x)
+ : value_(x.value_), is_live_(x.is_live_) {
+ ++num_instances_;
+ if (is_live_) ++num_live_instances_;
+ ++num_copies_;
+ }
+ BaseCountedInstance(BaseCountedInstance&& x)
+ : value_(x.value_), is_live_(x.is_live_) {
+ x.is_live_ = false;
+ ++num_instances_;
+ ++num_moves_;
+ }
+ ~BaseCountedInstance() {
+ --num_instances_;
+ if (is_live_) --num_live_instances_;
+ }
+
+ BaseCountedInstance& operator=(const BaseCountedInstance& x) {
+ value_ = x.value_;
+ if (is_live_) --num_live_instances_;
+ is_live_ = x.is_live_;
+ if (is_live_) ++num_live_instances_;
+ ++num_copies_;
+ return *this;
+ }
+ BaseCountedInstance& operator=(BaseCountedInstance&& x) {
+ value_ = x.value_;
+ if (is_live_) --num_live_instances_;
+ is_live_ = x.is_live_;
+ x.is_live_ = false;
+ ++num_moves_;
+ return *this;
+ }
+
+ bool operator==(const BaseCountedInstance& x) const {
+ ++num_comparisons_;
+ return value_ == x.value_;
+ }
+
+ bool operator!=(const BaseCountedInstance& x) const {
+ ++num_comparisons_;
+ return value_ != x.value_;
+ }
+
+ bool operator<(const BaseCountedInstance& x) const {
+ ++num_comparisons_;
+ return value_ < x.value_;
+ }
+
+ bool operator>(const BaseCountedInstance& x) const {
+ ++num_comparisons_;
+ return value_ > x.value_;
+ }
+
+ bool operator<=(const BaseCountedInstance& x) const {
+ ++num_comparisons_;
+ return value_ <= x.value_;
+ }
+
+ bool operator>=(const BaseCountedInstance& x) const {
+ ++num_comparisons_;
+ return value_ >= x.value_;
+ }
+
+ y_absl::weak_ordering compare(const BaseCountedInstance& x) const {
+ ++num_comparisons_;
+ return value_ < x.value_
+ ? y_absl::weak_ordering::less
+ : value_ == x.value_ ? y_absl::weak_ordering::equivalent
+ : y_absl::weak_ordering::greater;
+ }
+
+ int value() const {
+ if (!is_live_) std::abort();
+ return value_;
+ }
+
+ friend std::ostream& operator<<(std::ostream& o,
+ const BaseCountedInstance& v) {
+ return o << "[value:" << v.value() << "]";
+ }
+
+ // Implementation of efficient swap() that counts swaps.
+ static void SwapImpl(
+ BaseCountedInstance& lhs, // NOLINT(runtime/references)
+ BaseCountedInstance& rhs) { // NOLINT(runtime/references)
+ using std::swap;
+ swap(lhs.value_, rhs.value_);
+ swap(lhs.is_live_, rhs.is_live_);
+ ++BaseCountedInstance::num_swaps_;
+ }
+
+ private:
+ friend class InstanceTracker;
+
+ int value_;
+
+ // Indicates if the value is live, ie it hasn't been moved away from.
+ bool is_live_ = true;
+
+ // Number of instances.
+ static int num_instances_;
+
+ // Number of live instances (those that have not been moved away from.)
+ static int num_live_instances_;
+
+ // Number of times that BaseCountedInstance objects were moved.
+ static int num_moves_;
+
+ // Number of times that BaseCountedInstance objects were copied.
+ static int num_copies_;
+
+ // Number of times that BaseCountedInstance objects were swapped.
+ static int num_swaps_;
+
+ // Number of times that BaseCountedInstance objects were compared.
+ static int num_comparisons_;
+};
+
+// Helper to track the BaseCountedInstance instance counters. Expects that the
+// number of instances and live_instances are the same when it is constructed
+// and when it is destructed.
+class InstanceTracker {
+ public:
+ InstanceTracker()
+ : start_instances_(BaseCountedInstance::num_instances_),
+ start_live_instances_(BaseCountedInstance::num_live_instances_) {
+ ResetCopiesMovesSwaps();
+ }
+ ~InstanceTracker() {
+ if (instances() != 0) std::abort();
+ if (live_instances() != 0) std::abort();
+ }
+
+ // Returns the number of BaseCountedInstance instances both containing valid
+ // values and those moved away from compared to when the InstanceTracker was
+ // constructed
+ int instances() const {
+ return BaseCountedInstance::num_instances_ - start_instances_;
+ }
+
+ // Returns the number of live BaseCountedInstance instances compared to when
+ // the InstanceTracker was constructed
+ int live_instances() const {
+ return BaseCountedInstance::num_live_instances_ - start_live_instances_;
+ }
+
+ // Returns the number of moves on BaseCountedInstance objects since
+ // construction or since the last call to ResetCopiesMovesSwaps().
+ int moves() const { return BaseCountedInstance::num_moves_ - start_moves_; }
+
+ // Returns the number of copies on BaseCountedInstance objects since
+ // construction or the last call to ResetCopiesMovesSwaps().
+ int copies() const {
+ return BaseCountedInstance::num_copies_ - start_copies_;
+ }
+
+ // Returns the number of swaps on BaseCountedInstance objects since
+ // construction or the last call to ResetCopiesMovesSwaps().
+ int swaps() const { return BaseCountedInstance::num_swaps_ - start_swaps_; }
+
+ // Returns the number of comparisons on BaseCountedInstance objects since
+ // construction or the last call to ResetCopiesMovesSwaps().
+ int comparisons() const {
+ return BaseCountedInstance::num_comparisons_ - start_comparisons_;
+ }
+
+ // Resets the base values for moves, copies, comparisons, and swaps to the
+ // current values, so that subsequent Get*() calls for moves, copies,
+ // comparisons, and swaps will compare to the situation at the point of this
+ // call.
+ void ResetCopiesMovesSwaps() {
+ start_moves_ = BaseCountedInstance::num_moves_;
+ start_copies_ = BaseCountedInstance::num_copies_;
+ start_swaps_ = BaseCountedInstance::num_swaps_;
+ start_comparisons_ = BaseCountedInstance::num_comparisons_;
+ }
+
+ private:
+ int start_instances_;
+ int start_live_instances_;
+ int start_moves_;
+ int start_copies_;
+ int start_swaps_;
+ int start_comparisons_;
+};
+
+// Copyable, not movable.
+class CopyableOnlyInstance : public BaseCountedInstance {
+ public:
+ explicit CopyableOnlyInstance(int x) : BaseCountedInstance(x) {}
+ CopyableOnlyInstance(const CopyableOnlyInstance& rhs) = default;
+ CopyableOnlyInstance& operator=(const CopyableOnlyInstance& rhs) = default;
+
+ friend void swap(CopyableOnlyInstance& lhs, CopyableOnlyInstance& rhs) {
+ BaseCountedInstance::SwapImpl(lhs, rhs);
+ }
+
+ static bool supports_move() { return false; }
+};
+
+// Copyable and movable.
+class CopyableMovableInstance : public BaseCountedInstance {
+ public:
+ explicit CopyableMovableInstance(int x) : BaseCountedInstance(x) {}
+ CopyableMovableInstance(const CopyableMovableInstance& rhs) = default;
+ CopyableMovableInstance(CopyableMovableInstance&& rhs) = default;
+ CopyableMovableInstance& operator=(const CopyableMovableInstance& rhs) =
+ default;
+ CopyableMovableInstance& operator=(CopyableMovableInstance&& rhs) = default;
+
+ friend void swap(CopyableMovableInstance& lhs, CopyableMovableInstance& rhs) {
+ BaseCountedInstance::SwapImpl(lhs, rhs);
+ }
+
+ static bool supports_move() { return true; }
+};
+
+// Only movable, not default-constructible.
+class MovableOnlyInstance : public BaseCountedInstance {
+ public:
+ explicit MovableOnlyInstance(int x) : BaseCountedInstance(x) {}
+ MovableOnlyInstance(MovableOnlyInstance&& other) = default;
+ MovableOnlyInstance& operator=(MovableOnlyInstance&& other) = default;
+
+ friend void swap(MovableOnlyInstance& lhs, MovableOnlyInstance& rhs) {
+ BaseCountedInstance::SwapImpl(lhs, rhs);
+ }
+
+ static bool supports_move() { return true; }
+};
+
+} // namespace test_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_CONTAINER_INTERNAL_TEST_INSTANCE_TRACKER_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/tracked.h b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/tracked.h
new file mode 100644
index 00000000000..8765ee5ea61
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/tracked.h
@@ -0,0 +1,83 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_TRACKED_H_
+#define ABSL_CONTAINER_INTERNAL_TRACKED_H_
+
+#include <stddef.h>
+
+#include <memory>
+#include <utility>
+
+#include "y_absl/base/config.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+// A class that tracks its copies and moves so that it can be queried in tests.
+template <class T>
+class Tracked {
+ public:
+ Tracked() {}
+ // NOLINTNEXTLINE(runtime/explicit)
+ Tracked(const T& val) : val_(val) {}
+ Tracked(const Tracked& that)
+ : val_(that.val_),
+ num_moves_(that.num_moves_),
+ num_copies_(that.num_copies_) {
+ ++(*num_copies_);
+ }
+ Tracked(Tracked&& that)
+ : val_(std::move(that.val_)),
+ num_moves_(std::move(that.num_moves_)),
+ num_copies_(std::move(that.num_copies_)) {
+ ++(*num_moves_);
+ }
+ Tracked& operator=(const Tracked& that) {
+ val_ = that.val_;
+ num_moves_ = that.num_moves_;
+ num_copies_ = that.num_copies_;
+ ++(*num_copies_);
+ }
+ Tracked& operator=(Tracked&& that) {
+ val_ = std::move(that.val_);
+ num_moves_ = std::move(that.num_moves_);
+ num_copies_ = std::move(that.num_copies_);
+ ++(*num_moves_);
+ }
+
+ const T& val() const { return val_; }
+
+ friend bool operator==(const Tracked& a, const Tracked& b) {
+ return a.val_ == b.val_;
+ }
+ friend bool operator!=(const Tracked& a, const Tracked& b) {
+ return !(a == b);
+ }
+
+ size_t num_copies() { return *num_copies_; }
+ size_t num_moves() { return *num_moves_; }
+
+ private:
+ T val_;
+ std::shared_ptr<size_t> num_moves_ = std::make_shared<size_t>(0);
+ std::shared_ptr<size_t> num_copies_ = std::make_shared<size_t>(0);
+};
+
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_CONTAINER_INTERNAL_TRACKED_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/unordered_map_constructor_test.h b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/unordered_map_constructor_test.h
new file mode 100644
index 00000000000..f55d6293ae8
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/unordered_map_constructor_test.h
@@ -0,0 +1,494 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_CONSTRUCTOR_TEST_H_
+#define ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_CONSTRUCTOR_TEST_H_
+
+#include <algorithm>
+#include <unordered_map>
+#include <vector>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "y_absl/container/internal/hash_generator_testing.h"
+#include "y_absl/container/internal/hash_policy_testing.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+template <class UnordMap>
+class ConstructorTest : public ::testing::Test {};
+
+TYPED_TEST_SUITE_P(ConstructorTest);
+
+TYPED_TEST_P(ConstructorTest, NoArgs) {
+ TypeParam m;
+ EXPECT_TRUE(m.empty());
+ EXPECT_THAT(m, ::testing::UnorderedElementsAre());
+}
+
+TYPED_TEST_P(ConstructorTest, BucketCount) {
+ TypeParam m(123);
+ EXPECT_TRUE(m.empty());
+ EXPECT_THAT(m, ::testing::UnorderedElementsAre());
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, BucketCountHash) {
+ using H = typename TypeParam::hasher;
+ H hasher;
+ TypeParam m(123, hasher);
+ EXPECT_EQ(m.hash_function(), hasher);
+ EXPECT_TRUE(m.empty());
+ EXPECT_THAT(m, ::testing::UnorderedElementsAre());
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, BucketCountHashEqual) {
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ H hasher;
+ E equal;
+ TypeParam m(123, hasher, equal);
+ EXPECT_EQ(m.hash_function(), hasher);
+ EXPECT_EQ(m.key_eq(), equal);
+ EXPECT_TRUE(m.empty());
+ EXPECT_THAT(m, ::testing::UnorderedElementsAre());
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, BucketCountHashEqualAlloc) {
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ E equal;
+ A alloc(0);
+ TypeParam m(123, hasher, equal, alloc);
+ EXPECT_EQ(m.hash_function(), hasher);
+ EXPECT_EQ(m.key_eq(), equal);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_TRUE(m.empty());
+ EXPECT_THAT(m, ::testing::UnorderedElementsAre());
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+template <typename T>
+struct is_std_unordered_map : std::false_type {};
+
+template <typename... T>
+struct is_std_unordered_map<std::unordered_map<T...>> : std::true_type {};
+
+#if defined(UNORDERED_MAP_CXX14) || defined(UNORDERED_MAP_CXX17)
+using has_cxx14_std_apis = std::true_type;
+#else
+using has_cxx14_std_apis = std::false_type;
+#endif
+
+template <typename T>
+using expect_cxx14_apis =
+ y_absl::disjunction<y_absl::negation<is_std_unordered_map<T>>,
+ has_cxx14_std_apis>;
+
+template <typename TypeParam>
+void BucketCountAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void BucketCountAllocTest(std::true_type) {
+ using A = typename TypeParam::allocator_type;
+ A alloc(0);
+ TypeParam m(123, alloc);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_TRUE(m.empty());
+ EXPECT_THAT(m, ::testing::UnorderedElementsAre());
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, BucketCountAlloc) {
+ BucketCountAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
+}
+
+template <typename TypeParam>
+void BucketCountHashAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void BucketCountHashAllocTest(std::true_type) {
+ using H = typename TypeParam::hasher;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ A alloc(0);
+ TypeParam m(123, hasher, alloc);
+ EXPECT_EQ(m.hash_function(), hasher);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_TRUE(m.empty());
+ EXPECT_THAT(m, ::testing::UnorderedElementsAre());
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, BucketCountHashAlloc) {
+ BucketCountHashAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
+}
+
+#if ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS
+using has_alloc_std_constructors = std::true_type;
+#else
+using has_alloc_std_constructors = std::false_type;
+#endif
+
+template <typename T>
+using expect_alloc_constructors =
+ y_absl::disjunction<y_absl::negation<is_std_unordered_map<T>>,
+ has_alloc_std_constructors>;
+
+template <typename TypeParam>
+void AllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void AllocTest(std::true_type) {
+ using A = typename TypeParam::allocator_type;
+ A alloc(0);
+ TypeParam m(alloc);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_TRUE(m.empty());
+ EXPECT_THAT(m, ::testing::UnorderedElementsAre());
+}
+
+TYPED_TEST_P(ConstructorTest, Alloc) {
+ AllocTest<TypeParam>(expect_alloc_constructors<TypeParam>());
+}
+
+TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashEqualAlloc) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ E equal;
+ A alloc(0);
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::UniqueGenerator<T>());
+ TypeParam m(values.begin(), values.end(), 123, hasher, equal, alloc);
+ EXPECT_EQ(m.hash_function(), hasher);
+ EXPECT_EQ(m.key_eq(), equal);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+template <typename TypeParam>
+void InputIteratorBucketAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void InputIteratorBucketAllocTest(std::true_type) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using A = typename TypeParam::allocator_type;
+ A alloc(0);
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::UniqueGenerator<T>());
+ TypeParam m(values.begin(), values.end(), 123, alloc);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, InputIteratorBucketAlloc) {
+ InputIteratorBucketAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
+}
+
+template <typename TypeParam>
+void InputIteratorBucketHashAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void InputIteratorBucketHashAllocTest(std::true_type) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using H = typename TypeParam::hasher;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ A alloc(0);
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::UniqueGenerator<T>());
+ TypeParam m(values.begin(), values.end(), 123, hasher, alloc);
+ EXPECT_EQ(m.hash_function(), hasher);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashAlloc) {
+ InputIteratorBucketHashAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
+}
+
+TYPED_TEST_P(ConstructorTest, CopyConstructor) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ E equal;
+ A alloc(0);
+ hash_internal::UniqueGenerator<T> gen;
+ TypeParam m(123, hasher, equal, alloc);
+ for (size_t i = 0; i != 10; ++i) m.insert(gen());
+ TypeParam n(m);
+ EXPECT_EQ(m.hash_function(), n.hash_function());
+ EXPECT_EQ(m.key_eq(), n.key_eq());
+ EXPECT_EQ(m.get_allocator(), n.get_allocator());
+ EXPECT_EQ(m, n);
+}
+
+template <typename TypeParam>
+void CopyConstructorAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void CopyConstructorAllocTest(std::true_type) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ E equal;
+ A alloc(0);
+ hash_internal::UniqueGenerator<T> gen;
+ TypeParam m(123, hasher, equal, alloc);
+ for (size_t i = 0; i != 10; ++i) m.insert(gen());
+ TypeParam n(m, A(11));
+ EXPECT_EQ(m.hash_function(), n.hash_function());
+ EXPECT_EQ(m.key_eq(), n.key_eq());
+ EXPECT_NE(m.get_allocator(), n.get_allocator());
+ EXPECT_EQ(m, n);
+}
+
+TYPED_TEST_P(ConstructorTest, CopyConstructorAlloc) {
+ CopyConstructorAllocTest<TypeParam>(expect_alloc_constructors<TypeParam>());
+}
+
+// TODO(alkis): Test non-propagating allocators on copy constructors.
+
+TYPED_TEST_P(ConstructorTest, MoveConstructor) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ E equal;
+ A alloc(0);
+ hash_internal::UniqueGenerator<T> gen;
+ TypeParam m(123, hasher, equal, alloc);
+ for (size_t i = 0; i != 10; ++i) m.insert(gen());
+ TypeParam t(m);
+ TypeParam n(std::move(t));
+ EXPECT_EQ(m.hash_function(), n.hash_function());
+ EXPECT_EQ(m.key_eq(), n.key_eq());
+ EXPECT_EQ(m.get_allocator(), n.get_allocator());
+ EXPECT_EQ(m, n);
+}
+
+template <typename TypeParam>
+void MoveConstructorAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void MoveConstructorAllocTest(std::true_type) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ E equal;
+ A alloc(0);
+ hash_internal::UniqueGenerator<T> gen;
+ TypeParam m(123, hasher, equal, alloc);
+ for (size_t i = 0; i != 10; ++i) m.insert(gen());
+ TypeParam t(m);
+ TypeParam n(std::move(t), A(1));
+ EXPECT_EQ(m.hash_function(), n.hash_function());
+ EXPECT_EQ(m.key_eq(), n.key_eq());
+ EXPECT_NE(m.get_allocator(), n.get_allocator());
+ EXPECT_EQ(m, n);
+}
+
+TYPED_TEST_P(ConstructorTest, MoveConstructorAlloc) {
+ MoveConstructorAllocTest<TypeParam>(expect_alloc_constructors<TypeParam>());
+}
+
+// TODO(alkis): Test non-propagating allocators on move constructors.
+
+TYPED_TEST_P(ConstructorTest, InitializerListBucketHashEqualAlloc) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ hash_internal::UniqueGenerator<T> gen;
+ std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ E equal;
+ A alloc(0);
+ TypeParam m(values, 123, hasher, equal, alloc);
+ EXPECT_EQ(m.hash_function(), hasher);
+ EXPECT_EQ(m.key_eq(), equal);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+template <typename TypeParam>
+void InitializerListBucketAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void InitializerListBucketAllocTest(std::true_type) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using A = typename TypeParam::allocator_type;
+ hash_internal::UniqueGenerator<T> gen;
+ std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
+ A alloc(0);
+ TypeParam m(values, 123, alloc);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, InitializerListBucketAlloc) {
+ InitializerListBucketAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
+}
+
+template <typename TypeParam>
+void InitializerListBucketHashAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void InitializerListBucketHashAllocTest(std::true_type) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using H = typename TypeParam::hasher;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ A alloc(0);
+ hash_internal::UniqueGenerator<T> gen;
+ std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
+ TypeParam m(values, 123, hasher, alloc);
+ EXPECT_EQ(m.hash_function(), hasher);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, InitializerListBucketHashAlloc) {
+ InitializerListBucketHashAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
+}
+
+TYPED_TEST_P(ConstructorTest, Assignment) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ E equal;
+ A alloc(0);
+ hash_internal::UniqueGenerator<T> gen;
+ TypeParam m({gen(), gen(), gen()}, 123, hasher, equal, alloc);
+ TypeParam n;
+ n = m;
+ EXPECT_EQ(m.hash_function(), n.hash_function());
+ EXPECT_EQ(m.key_eq(), n.key_eq());
+ EXPECT_EQ(m, n);
+}
+
+// TODO(alkis): Test [non-]propagating allocators on move/copy assignments
+// (it depends on traits).
+
+TYPED_TEST_P(ConstructorTest, MoveAssignment) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ E equal;
+ A alloc(0);
+ hash_internal::UniqueGenerator<T> gen;
+ TypeParam m({gen(), gen(), gen()}, 123, hasher, equal, alloc);
+ TypeParam t(m);
+ TypeParam n;
+ n = std::move(t);
+ EXPECT_EQ(m.hash_function(), n.hash_function());
+ EXPECT_EQ(m.key_eq(), n.key_eq());
+ EXPECT_EQ(m, n);
+}
+
+TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerList) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ hash_internal::UniqueGenerator<T> gen;
+ std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
+ TypeParam m;
+ m = values;
+ EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
+}
+
+TYPED_TEST_P(ConstructorTest, AssignmentOverwritesExisting) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ hash_internal::UniqueGenerator<T> gen;
+ TypeParam m({gen(), gen(), gen()});
+ TypeParam n({gen()});
+ n = m;
+ EXPECT_EQ(m, n);
+}
+
+TYPED_TEST_P(ConstructorTest, MoveAssignmentOverwritesExisting) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ hash_internal::UniqueGenerator<T> gen;
+ TypeParam m({gen(), gen(), gen()});
+ TypeParam t(m);
+ TypeParam n({gen()});
+ n = std::move(t);
+ EXPECT_EQ(m, n);
+}
+
+TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerListOverwritesExisting) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ hash_internal::UniqueGenerator<T> gen;
+ std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
+ TypeParam m;
+ m = values;
+ EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
+}
+
+TYPED_TEST_P(ConstructorTest, AssignmentOnSelf) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ hash_internal::UniqueGenerator<T> gen;
+ std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
+ TypeParam m(values);
+ m = *&m; // Avoid -Wself-assign
+ EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
+}
+
+// We cannot test self move as standard states that it leaves standard
+// containers in unspecified state (and in practice in causes memory-leak
+// according to heap-checker!).
+
+REGISTER_TYPED_TEST_CASE_P(
+ ConstructorTest, NoArgs, BucketCount, BucketCountHash, BucketCountHashEqual,
+ BucketCountHashEqualAlloc, BucketCountAlloc, BucketCountHashAlloc, Alloc,
+ InputIteratorBucketHashEqualAlloc, InputIteratorBucketAlloc,
+ InputIteratorBucketHashAlloc, CopyConstructor, CopyConstructorAlloc,
+ MoveConstructor, MoveConstructorAlloc, InitializerListBucketHashEqualAlloc,
+ InitializerListBucketAlloc, InitializerListBucketHashAlloc, Assignment,
+ MoveAssignment, AssignmentFromInitializerList, AssignmentOverwritesExisting,
+ MoveAssignmentOverwritesExisting,
+ AssignmentFromInitializerListOverwritesExisting, AssignmentOnSelf);
+
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_CONSTRUCTOR_TEST_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/unordered_map_lookup_test.h b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/unordered_map_lookup_test.h
new file mode 100644
index 00000000000..251f49aea29
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/unordered_map_lookup_test.h
@@ -0,0 +1,117 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_LOOKUP_TEST_H_
+#define ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_LOOKUP_TEST_H_
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "y_absl/container/internal/hash_generator_testing.h"
+#include "y_absl/container/internal/hash_policy_testing.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+template <class UnordMap>
+class LookupTest : public ::testing::Test {};
+
+TYPED_TEST_SUITE_P(LookupTest);
+
+TYPED_TEST_P(LookupTest, At) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m(values.begin(), values.end());
+ for (const auto& p : values) {
+ const auto& val = m.at(p.first);
+ EXPECT_EQ(p.second, val) << ::testing::PrintToString(p.first);
+ }
+}
+
+TYPED_TEST_P(LookupTest, OperatorBracket) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using V = typename TypeParam::mapped_type;
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m;
+ for (const auto& p : values) {
+ auto& val = m[p.first];
+ EXPECT_EQ(V(), val) << ::testing::PrintToString(p.first);
+ val = p.second;
+ }
+ for (const auto& p : values)
+ EXPECT_EQ(p.second, m[p.first]) << ::testing::PrintToString(p.first);
+}
+
+TYPED_TEST_P(LookupTest, Count) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m;
+ for (const auto& p : values)
+ EXPECT_EQ(0, m.count(p.first)) << ::testing::PrintToString(p.first);
+ m.insert(values.begin(), values.end());
+ for (const auto& p : values)
+ EXPECT_EQ(1, m.count(p.first)) << ::testing::PrintToString(p.first);
+}
+
+TYPED_TEST_P(LookupTest, Find) {
+ using std::get;
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m;
+ for (const auto& p : values)
+ EXPECT_TRUE(m.end() == m.find(p.first))
+ << ::testing::PrintToString(p.first);
+ m.insert(values.begin(), values.end());
+ for (const auto& p : values) {
+ auto it = m.find(p.first);
+ EXPECT_TRUE(m.end() != it) << ::testing::PrintToString(p.first);
+ EXPECT_EQ(p.second, get<1>(*it)) << ::testing::PrintToString(p.first);
+ }
+}
+
+TYPED_TEST_P(LookupTest, EqualRange) {
+ using std::get;
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m;
+ for (const auto& p : values) {
+ auto r = m.equal_range(p.first);
+ ASSERT_EQ(0, std::distance(r.first, r.second));
+ }
+ m.insert(values.begin(), values.end());
+ for (const auto& p : values) {
+ auto r = m.equal_range(p.first);
+ ASSERT_EQ(1, std::distance(r.first, r.second));
+ EXPECT_EQ(p.second, get<1>(*r.first)) << ::testing::PrintToString(p.first);
+ }
+}
+
+REGISTER_TYPED_TEST_CASE_P(LookupTest, At, OperatorBracket, Count, Find,
+ EqualRange);
+
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_LOOKUP_TEST_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/unordered_map_members_test.h b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/unordered_map_members_test.h
new file mode 100644
index 00000000000..1c9dc8c6be8
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/unordered_map_members_test.h
@@ -0,0 +1,87 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_MEMBERS_TEST_H_
+#define ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_MEMBERS_TEST_H_
+
+#include <type_traits>
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "y_absl/meta/type_traits.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+template <class UnordMap>
+class MembersTest : public ::testing::Test {};
+
+TYPED_TEST_SUITE_P(MembersTest);
+
+template <typename T>
+void UseType() {}
+
+TYPED_TEST_P(MembersTest, Typedefs) {
+ EXPECT_TRUE((std::is_same<std::pair<const typename TypeParam::key_type,
+ typename TypeParam::mapped_type>,
+ typename TypeParam::value_type>()));
+ EXPECT_TRUE((y_absl::conjunction<
+ y_absl::negation<std::is_signed<typename TypeParam::size_type>>,
+ std::is_integral<typename TypeParam::size_type>>()));
+ EXPECT_TRUE((y_absl::conjunction<
+ std::is_signed<typename TypeParam::difference_type>,
+ std::is_integral<typename TypeParam::difference_type>>()));
+ EXPECT_TRUE((std::is_convertible<
+ decltype(std::declval<const typename TypeParam::hasher&>()(
+ std::declval<const typename TypeParam::key_type&>())),
+ size_t>()));
+ EXPECT_TRUE((std::is_convertible<
+ decltype(std::declval<const typename TypeParam::key_equal&>()(
+ std::declval<const typename TypeParam::key_type&>(),
+ std::declval<const typename TypeParam::key_type&>())),
+ bool>()));
+ EXPECT_TRUE((std::is_same<typename TypeParam::allocator_type::value_type,
+ typename TypeParam::value_type>()));
+ EXPECT_TRUE((std::is_same<typename TypeParam::value_type&,
+ typename TypeParam::reference>()));
+ EXPECT_TRUE((std::is_same<const typename TypeParam::value_type&,
+ typename TypeParam::const_reference>()));
+ EXPECT_TRUE((std::is_same<typename std::allocator_traits<
+ typename TypeParam::allocator_type>::pointer,
+ typename TypeParam::pointer>()));
+ EXPECT_TRUE(
+ (std::is_same<typename std::allocator_traits<
+ typename TypeParam::allocator_type>::const_pointer,
+ typename TypeParam::const_pointer>()));
+}
+
+TYPED_TEST_P(MembersTest, SimpleFunctions) {
+ EXPECT_GT(TypeParam().max_size(), 0);
+}
+
+TYPED_TEST_P(MembersTest, BeginEnd) {
+ TypeParam t = {typename TypeParam::value_type{}};
+ EXPECT_EQ(t.begin(), t.cbegin());
+ EXPECT_EQ(t.end(), t.cend());
+ EXPECT_NE(t.begin(), t.end());
+ EXPECT_NE(t.cbegin(), t.cend());
+}
+
+REGISTER_TYPED_TEST_SUITE_P(MembersTest, Typedefs, SimpleFunctions, BeginEnd);
+
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_MEMBERS_TEST_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/unordered_map_modifiers_test.h b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/unordered_map_modifiers_test.h
new file mode 100644
index 00000000000..725cfdc345a
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/unordered_map_modifiers_test.h
@@ -0,0 +1,351 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_MODIFIERS_TEST_H_
+#define ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_MODIFIERS_TEST_H_
+
+#include <memory>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "y_absl/container/internal/hash_generator_testing.h"
+#include "y_absl/container/internal/hash_policy_testing.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+template <class UnordMap>
+class ModifiersTest : public ::testing::Test {};
+
+TYPED_TEST_SUITE_P(ModifiersTest);
+
+TYPED_TEST_P(ModifiersTest, Clear) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m(values.begin(), values.end());
+ ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
+ m.clear();
+ EXPECT_THAT(items(m), ::testing::UnorderedElementsAre());
+ EXPECT_TRUE(m.empty());
+}
+
+TYPED_TEST_P(ModifiersTest, Insert) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using V = typename TypeParam::mapped_type;
+ T val = hash_internal::Generator<T>()();
+ TypeParam m;
+ auto p = m.insert(val);
+ EXPECT_TRUE(p.second);
+ EXPECT_EQ(val, *p.first);
+ T val2 = {val.first, hash_internal::Generator<V>()()};
+ p = m.insert(val2);
+ EXPECT_FALSE(p.second);
+ EXPECT_EQ(val, *p.first);
+}
+
+TYPED_TEST_P(ModifiersTest, InsertHint) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using V = typename TypeParam::mapped_type;
+ T val = hash_internal::Generator<T>()();
+ TypeParam m;
+ auto it = m.insert(m.end(), val);
+ EXPECT_TRUE(it != m.end());
+ EXPECT_EQ(val, *it);
+ T val2 = {val.first, hash_internal::Generator<V>()()};
+ it = m.insert(it, val2);
+ EXPECT_TRUE(it != m.end());
+ EXPECT_EQ(val, *it);
+}
+
+TYPED_TEST_P(ModifiersTest, InsertRange) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m;
+ m.insert(values.begin(), values.end());
+ ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
+}
+
+TYPED_TEST_P(ModifiersTest, InsertWithinCapacity) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using V = typename TypeParam::mapped_type;
+ T val = hash_internal::Generator<T>()();
+ TypeParam m;
+ m.reserve(10);
+ const size_t original_capacity = m.bucket_count();
+ m.insert(val);
+ EXPECT_EQ(m.bucket_count(), original_capacity);
+ T val2 = {val.first, hash_internal::Generator<V>()()};
+ m.insert(val2);
+ EXPECT_EQ(m.bucket_count(), original_capacity);
+}
+
+TYPED_TEST_P(ModifiersTest, InsertRangeWithinCapacity) {
+#if !defined(__GLIBCXX__)
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> base_values;
+ std::generate_n(std::back_inserter(base_values), 10,
+ hash_internal::Generator<T>());
+ std::vector<T> values;
+ while (values.size() != 100) {
+ std::copy_n(base_values.begin(), 10, std::back_inserter(values));
+ }
+ TypeParam m;
+ m.reserve(10);
+ const size_t original_capacity = m.bucket_count();
+ m.insert(values.begin(), values.end());
+ EXPECT_EQ(m.bucket_count(), original_capacity);
+#endif
+}
+
+TYPED_TEST_P(ModifiersTest, InsertOrAssign) {
+#ifdef UNORDERED_MAP_CXX17
+ using std::get;
+ using K = typename TypeParam::key_type;
+ using V = typename TypeParam::mapped_type;
+ K k = hash_internal::Generator<K>()();
+ V val = hash_internal::Generator<V>()();
+ TypeParam m;
+ auto p = m.insert_or_assign(k, val);
+ EXPECT_TRUE(p.second);
+ EXPECT_EQ(k, get<0>(*p.first));
+ EXPECT_EQ(val, get<1>(*p.first));
+ V val2 = hash_internal::Generator<V>()();
+ p = m.insert_or_assign(k, val2);
+ EXPECT_FALSE(p.second);
+ EXPECT_EQ(k, get<0>(*p.first));
+ EXPECT_EQ(val2, get<1>(*p.first));
+#endif
+}
+
+TYPED_TEST_P(ModifiersTest, InsertOrAssignHint) {
+#ifdef UNORDERED_MAP_CXX17
+ using std::get;
+ using K = typename TypeParam::key_type;
+ using V = typename TypeParam::mapped_type;
+ K k = hash_internal::Generator<K>()();
+ V val = hash_internal::Generator<V>()();
+ TypeParam m;
+ auto it = m.insert_or_assign(m.end(), k, val);
+ EXPECT_TRUE(it != m.end());
+ EXPECT_EQ(k, get<0>(*it));
+ EXPECT_EQ(val, get<1>(*it));
+ V val2 = hash_internal::Generator<V>()();
+ it = m.insert_or_assign(it, k, val2);
+ EXPECT_EQ(k, get<0>(*it));
+ EXPECT_EQ(val2, get<1>(*it));
+#endif
+}
+
+TYPED_TEST_P(ModifiersTest, Emplace) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using V = typename TypeParam::mapped_type;
+ T val = hash_internal::Generator<T>()();
+ TypeParam m;
+ // TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps
+ // with test traits/policy.
+ auto p = m.emplace(val);
+ EXPECT_TRUE(p.second);
+ EXPECT_EQ(val, *p.first);
+ T val2 = {val.first, hash_internal::Generator<V>()()};
+ p = m.emplace(val2);
+ EXPECT_FALSE(p.second);
+ EXPECT_EQ(val, *p.first);
+}
+
+TYPED_TEST_P(ModifiersTest, EmplaceHint) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using V = typename TypeParam::mapped_type;
+ T val = hash_internal::Generator<T>()();
+ TypeParam m;
+ // TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps
+ // with test traits/policy.
+ auto it = m.emplace_hint(m.end(), val);
+ EXPECT_EQ(val, *it);
+ T val2 = {val.first, hash_internal::Generator<V>()()};
+ it = m.emplace_hint(it, val2);
+ EXPECT_EQ(val, *it);
+}
+
+TYPED_TEST_P(ModifiersTest, TryEmplace) {
+#ifdef UNORDERED_MAP_CXX17
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using V = typename TypeParam::mapped_type;
+ T val = hash_internal::Generator<T>()();
+ TypeParam m;
+ // TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps
+ // with test traits/policy.
+ auto p = m.try_emplace(val.first, val.second);
+ EXPECT_TRUE(p.second);
+ EXPECT_EQ(val, *p.first);
+ T val2 = {val.first, hash_internal::Generator<V>()()};
+ p = m.try_emplace(val2.first, val2.second);
+ EXPECT_FALSE(p.second);
+ EXPECT_EQ(val, *p.first);
+#endif
+}
+
+TYPED_TEST_P(ModifiersTest, TryEmplaceHint) {
+#ifdef UNORDERED_MAP_CXX17
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using V = typename TypeParam::mapped_type;
+ T val = hash_internal::Generator<T>()();
+ TypeParam m;
+ // TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps
+ // with test traits/policy.
+ auto it = m.try_emplace(m.end(), val.first, val.second);
+ EXPECT_EQ(val, *it);
+ T val2 = {val.first, hash_internal::Generator<V>()()};
+ it = m.try_emplace(it, val2.first, val2.second);
+ EXPECT_EQ(val, *it);
+#endif
+}
+
+template <class V>
+using IfNotVoid = typename std::enable_if<!std::is_void<V>::value, V>::type;
+
+// In openmap we chose not to return the iterator from erase because that's
+// more expensive. As such we adapt erase to return an iterator here.
+struct EraseFirst {
+ template <class Map>
+ auto operator()(Map* m, int) const
+ -> IfNotVoid<decltype(m->erase(m->begin()))> {
+ return m->erase(m->begin());
+ }
+ template <class Map>
+ typename Map::iterator operator()(Map* m, ...) const {
+ auto it = m->begin();
+ m->erase(it++);
+ return it;
+ }
+};
+
+TYPED_TEST_P(ModifiersTest, Erase) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using std::get;
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m(values.begin(), values.end());
+ ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
+ auto& first = *m.begin();
+ std::vector<T> values2;
+ for (const auto& val : values)
+ if (get<0>(val) != get<0>(first)) values2.push_back(val);
+ auto it = EraseFirst()(&m, 0);
+ ASSERT_TRUE(it != m.end());
+ EXPECT_EQ(1, std::count(values2.begin(), values2.end(), *it));
+ EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values2.begin(),
+ values2.end()));
+}
+
+TYPED_TEST_P(ModifiersTest, EraseRange) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m(values.begin(), values.end());
+ ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
+ auto it = m.erase(m.begin(), m.end());
+ EXPECT_THAT(items(m), ::testing::UnorderedElementsAre());
+ EXPECT_TRUE(it == m.end());
+}
+
+TYPED_TEST_P(ModifiersTest, EraseKey) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m(values.begin(), values.end());
+ ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
+ EXPECT_EQ(1, m.erase(values[0].first));
+ EXPECT_EQ(0, std::count(m.begin(), m.end(), values[0]));
+ EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values.begin() + 1,
+ values.end()));
+}
+
+TYPED_TEST_P(ModifiersTest, Swap) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> v1;
+ std::vector<T> v2;
+ std::generate_n(std::back_inserter(v1), 5, hash_internal::Generator<T>());
+ std::generate_n(std::back_inserter(v2), 5, hash_internal::Generator<T>());
+ TypeParam m1(v1.begin(), v1.end());
+ TypeParam m2(v2.begin(), v2.end());
+ EXPECT_THAT(items(m1), ::testing::UnorderedElementsAreArray(v1));
+ EXPECT_THAT(items(m2), ::testing::UnorderedElementsAreArray(v2));
+ m1.swap(m2);
+ EXPECT_THAT(items(m1), ::testing::UnorderedElementsAreArray(v2));
+ EXPECT_THAT(items(m2), ::testing::UnorderedElementsAreArray(v1));
+}
+
+// TODO(alkis): Write tests for extract.
+// TODO(alkis): Write tests for merge.
+
+REGISTER_TYPED_TEST_CASE_P(ModifiersTest, Clear, Insert, InsertHint,
+ InsertRange, InsertWithinCapacity,
+ InsertRangeWithinCapacity, InsertOrAssign,
+ InsertOrAssignHint, Emplace, EmplaceHint, TryEmplace,
+ TryEmplaceHint, Erase, EraseRange, EraseKey, Swap);
+
+template <typename Type>
+struct is_unique_ptr : std::false_type {};
+
+template <typename Type>
+struct is_unique_ptr<std::unique_ptr<Type>> : std::true_type {};
+
+template <class UnordMap>
+class UniquePtrModifiersTest : public ::testing::Test {
+ protected:
+ UniquePtrModifiersTest() {
+ static_assert(is_unique_ptr<typename UnordMap::mapped_type>::value,
+ "UniquePtrModifiersTyest may only be called with a "
+ "std::unique_ptr value type.");
+ }
+};
+
+GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(UniquePtrModifiersTest);
+
+TYPED_TEST_SUITE_P(UniquePtrModifiersTest);
+
+// Test that we do not move from rvalue arguments if an insertion does not
+// happen.
+TYPED_TEST_P(UniquePtrModifiersTest, TryEmplace) {
+#ifdef UNORDERED_MAP_CXX17
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using V = typename TypeParam::mapped_type;
+ T val = hash_internal::Generator<T>()();
+ TypeParam m;
+ auto p = m.try_emplace(val.first, std::move(val.second));
+ EXPECT_TRUE(p.second);
+ // A moved from std::unique_ptr is guaranteed to be nullptr.
+ EXPECT_EQ(val.second, nullptr);
+ T val2 = {val.first, hash_internal::Generator<V>()()};
+ p = m.try_emplace(val2.first, std::move(val2.second));
+ EXPECT_FALSE(p.second);
+ EXPECT_NE(val2.second, nullptr);
+#endif
+}
+
+REGISTER_TYPED_TEST_SUITE_P(UniquePtrModifiersTest, TryEmplace);
+
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_MODIFIERS_TEST_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/unordered_set_constructor_test.h b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/unordered_set_constructor_test.h
new file mode 100644
index 00000000000..1404f449161
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/unordered_set_constructor_test.h
@@ -0,0 +1,496 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_SET_CONSTRUCTOR_TEST_H_
+#define ABSL_CONTAINER_INTERNAL_UNORDERED_SET_CONSTRUCTOR_TEST_H_
+
+#include <algorithm>
+#include <unordered_set>
+#include <vector>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "y_absl/container/internal/hash_generator_testing.h"
+#include "y_absl/container/internal/hash_policy_testing.h"
+#include "y_absl/meta/type_traits.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+template <class UnordMap>
+class ConstructorTest : public ::testing::Test {};
+
+TYPED_TEST_SUITE_P(ConstructorTest);
+
+TYPED_TEST_P(ConstructorTest, NoArgs) {
+ TypeParam m;
+ EXPECT_TRUE(m.empty());
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
+}
+
+TYPED_TEST_P(ConstructorTest, BucketCount) {
+ TypeParam m(123);
+ EXPECT_TRUE(m.empty());
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, BucketCountHash) {
+ using H = typename TypeParam::hasher;
+ H hasher;
+ TypeParam m(123, hasher);
+ EXPECT_EQ(m.hash_function(), hasher);
+ EXPECT_TRUE(m.empty());
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, BucketCountHashEqual) {
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ H hasher;
+ E equal;
+ TypeParam m(123, hasher, equal);
+ EXPECT_EQ(m.hash_function(), hasher);
+ EXPECT_EQ(m.key_eq(), equal);
+ EXPECT_TRUE(m.empty());
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, BucketCountHashEqualAlloc) {
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ E equal;
+ A alloc(0);
+ TypeParam m(123, hasher, equal, alloc);
+ EXPECT_EQ(m.hash_function(), hasher);
+ EXPECT_EQ(m.key_eq(), equal);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_TRUE(m.empty());
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
+ EXPECT_GE(m.bucket_count(), 123);
+
+ const auto& cm = m;
+ EXPECT_EQ(cm.hash_function(), hasher);
+ EXPECT_EQ(cm.key_eq(), equal);
+ EXPECT_EQ(cm.get_allocator(), alloc);
+ EXPECT_TRUE(cm.empty());
+ EXPECT_THAT(keys(cm), ::testing::UnorderedElementsAre());
+ EXPECT_GE(cm.bucket_count(), 123);
+}
+
+template <typename T>
+struct is_std_unordered_set : std::false_type {};
+
+template <typename... T>
+struct is_std_unordered_set<std::unordered_set<T...>> : std::true_type {};
+
+#if defined(UNORDERED_SET_CXX14) || defined(UNORDERED_SET_CXX17)
+using has_cxx14_std_apis = std::true_type;
+#else
+using has_cxx14_std_apis = std::false_type;
+#endif
+
+template <typename T>
+using expect_cxx14_apis =
+ y_absl::disjunction<y_absl::negation<is_std_unordered_set<T>>,
+ has_cxx14_std_apis>;
+
+template <typename TypeParam>
+void BucketCountAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void BucketCountAllocTest(std::true_type) {
+ using A = typename TypeParam::allocator_type;
+ A alloc(0);
+ TypeParam m(123, alloc);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_TRUE(m.empty());
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, BucketCountAlloc) {
+ BucketCountAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
+}
+
+template <typename TypeParam>
+void BucketCountHashAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void BucketCountHashAllocTest(std::true_type) {
+ using H = typename TypeParam::hasher;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ A alloc(0);
+ TypeParam m(123, hasher, alloc);
+ EXPECT_EQ(m.hash_function(), hasher);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_TRUE(m.empty());
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, BucketCountHashAlloc) {
+ BucketCountHashAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
+}
+
+#if ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS
+using has_alloc_std_constructors = std::true_type;
+#else
+using has_alloc_std_constructors = std::false_type;
+#endif
+
+template <typename T>
+using expect_alloc_constructors =
+ y_absl::disjunction<y_absl::negation<is_std_unordered_set<T>>,
+ has_alloc_std_constructors>;
+
+template <typename TypeParam>
+void AllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void AllocTest(std::true_type) {
+ using A = typename TypeParam::allocator_type;
+ A alloc(0);
+ TypeParam m(alloc);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_TRUE(m.empty());
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
+}
+
+TYPED_TEST_P(ConstructorTest, Alloc) {
+ AllocTest<TypeParam>(expect_alloc_constructors<TypeParam>());
+}
+
+TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashEqualAlloc) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ E equal;
+ A alloc(0);
+ std::vector<T> values;
+ for (size_t i = 0; i != 10; ++i)
+ values.push_back(hash_internal::Generator<T>()());
+ TypeParam m(values.begin(), values.end(), 123, hasher, equal, alloc);
+ EXPECT_EQ(m.hash_function(), hasher);
+ EXPECT_EQ(m.key_eq(), equal);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+template <typename TypeParam>
+void InputIteratorBucketAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void InputIteratorBucketAllocTest(std::true_type) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using A = typename TypeParam::allocator_type;
+ A alloc(0);
+ std::vector<T> values;
+ for (size_t i = 0; i != 10; ++i)
+ values.push_back(hash_internal::Generator<T>()());
+ TypeParam m(values.begin(), values.end(), 123, alloc);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, InputIteratorBucketAlloc) {
+ InputIteratorBucketAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
+}
+
+template <typename TypeParam>
+void InputIteratorBucketHashAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void InputIteratorBucketHashAllocTest(std::true_type) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using H = typename TypeParam::hasher;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ A alloc(0);
+ std::vector<T> values;
+ for (size_t i = 0; i != 10; ++i)
+ values.push_back(hash_internal::Generator<T>()());
+ TypeParam m(values.begin(), values.end(), 123, hasher, alloc);
+ EXPECT_EQ(m.hash_function(), hasher);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashAlloc) {
+ InputIteratorBucketHashAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
+}
+
+TYPED_TEST_P(ConstructorTest, CopyConstructor) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ E equal;
+ A alloc(0);
+ TypeParam m(123, hasher, equal, alloc);
+ for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator<T>()());
+ TypeParam n(m);
+ EXPECT_EQ(m.hash_function(), n.hash_function());
+ EXPECT_EQ(m.key_eq(), n.key_eq());
+ EXPECT_EQ(m.get_allocator(), n.get_allocator());
+ EXPECT_EQ(m, n);
+ EXPECT_NE(TypeParam(0, hasher, equal, alloc), n);
+}
+
+template <typename TypeParam>
+void CopyConstructorAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void CopyConstructorAllocTest(std::true_type) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ E equal;
+ A alloc(0);
+ TypeParam m(123, hasher, equal, alloc);
+ for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator<T>()());
+ TypeParam n(m, A(11));
+ EXPECT_EQ(m.hash_function(), n.hash_function());
+ EXPECT_EQ(m.key_eq(), n.key_eq());
+ EXPECT_NE(m.get_allocator(), n.get_allocator());
+ EXPECT_EQ(m, n);
+}
+
+TYPED_TEST_P(ConstructorTest, CopyConstructorAlloc) {
+ CopyConstructorAllocTest<TypeParam>(expect_alloc_constructors<TypeParam>());
+}
+
+// TODO(alkis): Test non-propagating allocators on copy constructors.
+
+TYPED_TEST_P(ConstructorTest, MoveConstructor) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ E equal;
+ A alloc(0);
+ TypeParam m(123, hasher, equal, alloc);
+ for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator<T>()());
+ TypeParam t(m);
+ TypeParam n(std::move(t));
+ EXPECT_EQ(m.hash_function(), n.hash_function());
+ EXPECT_EQ(m.key_eq(), n.key_eq());
+ EXPECT_EQ(m.get_allocator(), n.get_allocator());
+ EXPECT_EQ(m, n);
+}
+
+template <typename TypeParam>
+void MoveConstructorAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void MoveConstructorAllocTest(std::true_type) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ E equal;
+ A alloc(0);
+ TypeParam m(123, hasher, equal, alloc);
+ for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator<T>()());
+ TypeParam t(m);
+ TypeParam n(std::move(t), A(1));
+ EXPECT_EQ(m.hash_function(), n.hash_function());
+ EXPECT_EQ(m.key_eq(), n.key_eq());
+ EXPECT_NE(m.get_allocator(), n.get_allocator());
+ EXPECT_EQ(m, n);
+}
+
+TYPED_TEST_P(ConstructorTest, MoveConstructorAlloc) {
+ MoveConstructorAllocTest<TypeParam>(expect_alloc_constructors<TypeParam>());
+}
+
+// TODO(alkis): Test non-propagating allocators on move constructors.
+
+TYPED_TEST_P(ConstructorTest, InitializerListBucketHashEqualAlloc) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ hash_internal::Generator<T> gen;
+ std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ E equal;
+ A alloc(0);
+ TypeParam m(values, 123, hasher, equal, alloc);
+ EXPECT_EQ(m.hash_function(), hasher);
+ EXPECT_EQ(m.key_eq(), equal);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+template <typename TypeParam>
+void InitializerListBucketAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void InitializerListBucketAllocTest(std::true_type) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using A = typename TypeParam::allocator_type;
+ hash_internal::Generator<T> gen;
+ std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
+ A alloc(0);
+ TypeParam m(values, 123, alloc);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, InitializerListBucketAlloc) {
+ InitializerListBucketAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
+}
+
+template <typename TypeParam>
+void InitializerListBucketHashAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void InitializerListBucketHashAllocTest(std::true_type) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using H = typename TypeParam::hasher;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ A alloc(0);
+ hash_internal::Generator<T> gen;
+ std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
+ TypeParam m(values, 123, hasher, alloc);
+ EXPECT_EQ(m.hash_function(), hasher);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, InitializerListBucketHashAlloc) {
+ InitializerListBucketHashAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
+}
+
+TYPED_TEST_P(ConstructorTest, CopyAssignment) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ E equal;
+ A alloc(0);
+ hash_internal::Generator<T> gen;
+ TypeParam m({gen(), gen(), gen()}, 123, hasher, equal, alloc);
+ TypeParam n;
+ n = m;
+ EXPECT_EQ(m.hash_function(), n.hash_function());
+ EXPECT_EQ(m.key_eq(), n.key_eq());
+ EXPECT_EQ(m, n);
+}
+
+// TODO(alkis): Test [non-]propagating allocators on move/copy assignments
+// (it depends on traits).
+
+TYPED_TEST_P(ConstructorTest, MoveAssignment) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ E equal;
+ A alloc(0);
+ hash_internal::Generator<T> gen;
+ TypeParam m({gen(), gen(), gen()}, 123, hasher, equal, alloc);
+ TypeParam t(m);
+ TypeParam n;
+ n = std::move(t);
+ EXPECT_EQ(m.hash_function(), n.hash_function());
+ EXPECT_EQ(m.key_eq(), n.key_eq());
+ EXPECT_EQ(m, n);
+}
+
+TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerList) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ hash_internal::Generator<T> gen;
+ std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
+ TypeParam m;
+ m = values;
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
+}
+
+TYPED_TEST_P(ConstructorTest, AssignmentOverwritesExisting) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ hash_internal::Generator<T> gen;
+ TypeParam m({gen(), gen(), gen()});
+ TypeParam n({gen()});
+ n = m;
+ EXPECT_EQ(m, n);
+}
+
+TYPED_TEST_P(ConstructorTest, MoveAssignmentOverwritesExisting) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ hash_internal::Generator<T> gen;
+ TypeParam m({gen(), gen(), gen()});
+ TypeParam t(m);
+ TypeParam n({gen()});
+ n = std::move(t);
+ EXPECT_EQ(m, n);
+}
+
+TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerListOverwritesExisting) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ hash_internal::Generator<T> gen;
+ std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
+ TypeParam m;
+ m = values;
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
+}
+
+TYPED_TEST_P(ConstructorTest, AssignmentOnSelf) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ hash_internal::Generator<T> gen;
+ std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
+ TypeParam m(values);
+ m = *&m; // Avoid -Wself-assign.
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
+}
+
+REGISTER_TYPED_TEST_CASE_P(
+ ConstructorTest, NoArgs, BucketCount, BucketCountHash, BucketCountHashEqual,
+ BucketCountHashEqualAlloc, BucketCountAlloc, BucketCountHashAlloc, Alloc,
+ InputIteratorBucketHashEqualAlloc, InputIteratorBucketAlloc,
+ InputIteratorBucketHashAlloc, CopyConstructor, CopyConstructorAlloc,
+ MoveConstructor, MoveConstructorAlloc, InitializerListBucketHashEqualAlloc,
+ InitializerListBucketAlloc, InitializerListBucketHashAlloc, CopyAssignment,
+ MoveAssignment, AssignmentFromInitializerList, AssignmentOverwritesExisting,
+ MoveAssignmentOverwritesExisting,
+ AssignmentFromInitializerListOverwritesExisting, AssignmentOnSelf);
+
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_SET_CONSTRUCTOR_TEST_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/unordered_set_lookup_test.h b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/unordered_set_lookup_test.h
new file mode 100644
index 00000000000..05579173d15
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/unordered_set_lookup_test.h
@@ -0,0 +1,91 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_SET_LOOKUP_TEST_H_
+#define ABSL_CONTAINER_INTERNAL_UNORDERED_SET_LOOKUP_TEST_H_
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "y_absl/container/internal/hash_generator_testing.h"
+#include "y_absl/container/internal/hash_policy_testing.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+template <class UnordSet>
+class LookupTest : public ::testing::Test {};
+
+TYPED_TEST_SUITE_P(LookupTest);
+
+TYPED_TEST_P(LookupTest, Count) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m;
+ for (const auto& v : values)
+ EXPECT_EQ(0, m.count(v)) << ::testing::PrintToString(v);
+ m.insert(values.begin(), values.end());
+ for (const auto& v : values)
+ EXPECT_EQ(1, m.count(v)) << ::testing::PrintToString(v);
+}
+
+TYPED_TEST_P(LookupTest, Find) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m;
+ for (const auto& v : values)
+ EXPECT_TRUE(m.end() == m.find(v)) << ::testing::PrintToString(v);
+ m.insert(values.begin(), values.end());
+ for (const auto& v : values) {
+ typename TypeParam::iterator it = m.find(v);
+ static_assert(std::is_same<const typename TypeParam::value_type&,
+ decltype(*it)>::value,
+ "");
+ static_assert(std::is_same<const typename TypeParam::value_type*,
+ decltype(it.operator->())>::value,
+ "");
+ EXPECT_TRUE(m.end() != it) << ::testing::PrintToString(v);
+ EXPECT_EQ(v, *it) << ::testing::PrintToString(v);
+ }
+}
+
+TYPED_TEST_P(LookupTest, EqualRange) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m;
+ for (const auto& v : values) {
+ auto r = m.equal_range(v);
+ ASSERT_EQ(0, std::distance(r.first, r.second));
+ }
+ m.insert(values.begin(), values.end());
+ for (const auto& v : values) {
+ auto r = m.equal_range(v);
+ ASSERT_EQ(1, std::distance(r.first, r.second));
+ EXPECT_EQ(v, *r.first);
+ }
+}
+
+REGISTER_TYPED_TEST_CASE_P(LookupTest, Count, Find, EqualRange);
+
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_SET_LOOKUP_TEST_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/unordered_set_members_test.h b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/unordered_set_members_test.h
new file mode 100644
index 00000000000..ad1c88d8b25
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/unordered_set_members_test.h
@@ -0,0 +1,86 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_SET_MEMBERS_TEST_H_
+#define ABSL_CONTAINER_INTERNAL_UNORDERED_SET_MEMBERS_TEST_H_
+
+#include <type_traits>
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "y_absl/meta/type_traits.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+template <class UnordSet>
+class MembersTest : public ::testing::Test {};
+
+TYPED_TEST_SUITE_P(MembersTest);
+
+template <typename T>
+void UseType() {}
+
+TYPED_TEST_P(MembersTest, Typedefs) {
+ EXPECT_TRUE((std::is_same<typename TypeParam::key_type,
+ typename TypeParam::value_type>()));
+ EXPECT_TRUE((y_absl::conjunction<
+ y_absl::negation<std::is_signed<typename TypeParam::size_type>>,
+ std::is_integral<typename TypeParam::size_type>>()));
+ EXPECT_TRUE((y_absl::conjunction<
+ std::is_signed<typename TypeParam::difference_type>,
+ std::is_integral<typename TypeParam::difference_type>>()));
+ EXPECT_TRUE((std::is_convertible<
+ decltype(std::declval<const typename TypeParam::hasher&>()(
+ std::declval<const typename TypeParam::key_type&>())),
+ size_t>()));
+ EXPECT_TRUE((std::is_convertible<
+ decltype(std::declval<const typename TypeParam::key_equal&>()(
+ std::declval<const typename TypeParam::key_type&>(),
+ std::declval<const typename TypeParam::key_type&>())),
+ bool>()));
+ EXPECT_TRUE((std::is_same<typename TypeParam::allocator_type::value_type,
+ typename TypeParam::value_type>()));
+ EXPECT_TRUE((std::is_same<typename TypeParam::value_type&,
+ typename TypeParam::reference>()));
+ EXPECT_TRUE((std::is_same<const typename TypeParam::value_type&,
+ typename TypeParam::const_reference>()));
+ EXPECT_TRUE((std::is_same<typename std::allocator_traits<
+ typename TypeParam::allocator_type>::pointer,
+ typename TypeParam::pointer>()));
+ EXPECT_TRUE(
+ (std::is_same<typename std::allocator_traits<
+ typename TypeParam::allocator_type>::const_pointer,
+ typename TypeParam::const_pointer>()));
+}
+
+TYPED_TEST_P(MembersTest, SimpleFunctions) {
+ EXPECT_GT(TypeParam().max_size(), 0);
+}
+
+TYPED_TEST_P(MembersTest, BeginEnd) {
+ TypeParam t = {typename TypeParam::value_type{}};
+ EXPECT_EQ(t.begin(), t.cbegin());
+ EXPECT_EQ(t.end(), t.cend());
+ EXPECT_NE(t.begin(), t.end());
+ EXPECT_NE(t.cbegin(), t.cend());
+}
+
+REGISTER_TYPED_TEST_SUITE_P(MembersTest, Typedefs, SimpleFunctions, BeginEnd);
+
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_SET_MEMBERS_TEST_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/unordered_set_modifiers_test.h b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/unordered_set_modifiers_test.h
new file mode 100644
index 00000000000..2f96a0197be
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/unordered_set_modifiers_test.h
@@ -0,0 +1,221 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_SET_MODIFIERS_TEST_H_
+#define ABSL_CONTAINER_INTERNAL_UNORDERED_SET_MODIFIERS_TEST_H_
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "y_absl/container/internal/hash_generator_testing.h"
+#include "y_absl/container/internal/hash_policy_testing.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+template <class UnordSet>
+class ModifiersTest : public ::testing::Test {};
+
+TYPED_TEST_SUITE_P(ModifiersTest);
+
+TYPED_TEST_P(ModifiersTest, Clear) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m(values.begin(), values.end());
+ ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
+ m.clear();
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
+ EXPECT_TRUE(m.empty());
+}
+
+TYPED_TEST_P(ModifiersTest, Insert) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ T val = hash_internal::Generator<T>()();
+ TypeParam m;
+ auto p = m.insert(val);
+ EXPECT_TRUE(p.second);
+ EXPECT_EQ(val, *p.first);
+ p = m.insert(val);
+ EXPECT_FALSE(p.second);
+}
+
+TYPED_TEST_P(ModifiersTest, InsertHint) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ T val = hash_internal::Generator<T>()();
+ TypeParam m;
+ auto it = m.insert(m.end(), val);
+ EXPECT_TRUE(it != m.end());
+ EXPECT_EQ(val, *it);
+ it = m.insert(it, val);
+ EXPECT_TRUE(it != m.end());
+ EXPECT_EQ(val, *it);
+}
+
+TYPED_TEST_P(ModifiersTest, InsertRange) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m;
+ m.insert(values.begin(), values.end());
+ ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
+}
+
+TYPED_TEST_P(ModifiersTest, InsertWithinCapacity) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ T val = hash_internal::Generator<T>()();
+ TypeParam m;
+ m.reserve(10);
+ const size_t original_capacity = m.bucket_count();
+ m.insert(val);
+ EXPECT_EQ(m.bucket_count(), original_capacity);
+ m.insert(val);
+ EXPECT_EQ(m.bucket_count(), original_capacity);
+}
+
+TYPED_TEST_P(ModifiersTest, InsertRangeWithinCapacity) {
+#if !defined(__GLIBCXX__)
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> base_values;
+ std::generate_n(std::back_inserter(base_values), 10,
+ hash_internal::Generator<T>());
+ std::vector<T> values;
+ while (values.size() != 100) {
+ values.insert(values.end(), base_values.begin(), base_values.end());
+ }
+ TypeParam m;
+ m.reserve(10);
+ const size_t original_capacity = m.bucket_count();
+ m.insert(values.begin(), values.end());
+ EXPECT_EQ(m.bucket_count(), original_capacity);
+#endif
+}
+
+TYPED_TEST_P(ModifiersTest, Emplace) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ T val = hash_internal::Generator<T>()();
+ TypeParam m;
+ // TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps
+ // with test traits/policy.
+ auto p = m.emplace(val);
+ EXPECT_TRUE(p.second);
+ EXPECT_EQ(val, *p.first);
+ p = m.emplace(val);
+ EXPECT_FALSE(p.second);
+ EXPECT_EQ(val, *p.first);
+}
+
+TYPED_TEST_P(ModifiersTest, EmplaceHint) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ T val = hash_internal::Generator<T>()();
+ TypeParam m;
+ // TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps
+ // with test traits/policy.
+ auto it = m.emplace_hint(m.end(), val);
+ EXPECT_EQ(val, *it);
+ it = m.emplace_hint(it, val);
+ EXPECT_EQ(val, *it);
+}
+
+template <class V>
+using IfNotVoid = typename std::enable_if<!std::is_void<V>::value, V>::type;
+
+// In openmap we chose not to return the iterator from erase because that's
+// more expensive. As such we adapt erase to return an iterator here.
+struct EraseFirst {
+ template <class Map>
+ auto operator()(Map* m, int) const
+ -> IfNotVoid<decltype(m->erase(m->begin()))> {
+ return m->erase(m->begin());
+ }
+ template <class Map>
+ typename Map::iterator operator()(Map* m, ...) const {
+ auto it = m->begin();
+ m->erase(it++);
+ return it;
+ }
+};
+
+TYPED_TEST_P(ModifiersTest, Erase) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m(values.begin(), values.end());
+ ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
+ std::vector<T> values2;
+ for (const auto& val : values)
+ if (val != *m.begin()) values2.push_back(val);
+ auto it = EraseFirst()(&m, 0);
+ ASSERT_TRUE(it != m.end());
+ EXPECT_EQ(1, std::count(values2.begin(), values2.end(), *it));
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values2.begin(),
+ values2.end()));
+}
+
+TYPED_TEST_P(ModifiersTest, EraseRange) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m(values.begin(), values.end());
+ ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
+ auto it = m.erase(m.begin(), m.end());
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
+ EXPECT_TRUE(it == m.end());
+}
+
+TYPED_TEST_P(ModifiersTest, EraseKey) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m(values.begin(), values.end());
+ ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
+ EXPECT_EQ(1, m.erase(values[0]));
+ EXPECT_EQ(0, std::count(m.begin(), m.end(), values[0]));
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values.begin() + 1,
+ values.end()));
+}
+
+TYPED_TEST_P(ModifiersTest, Swap) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> v1;
+ std::vector<T> v2;
+ std::generate_n(std::back_inserter(v1), 5, hash_internal::Generator<T>());
+ std::generate_n(std::back_inserter(v2), 5, hash_internal::Generator<T>());
+ TypeParam m1(v1.begin(), v1.end());
+ TypeParam m2(v2.begin(), v2.end());
+ EXPECT_THAT(keys(m1), ::testing::UnorderedElementsAreArray(v1));
+ EXPECT_THAT(keys(m2), ::testing::UnorderedElementsAreArray(v2));
+ m1.swap(m2);
+ EXPECT_THAT(keys(m1), ::testing::UnorderedElementsAreArray(v2));
+ EXPECT_THAT(keys(m2), ::testing::UnorderedElementsAreArray(v1));
+}
+
+// TODO(alkis): Write tests for extract.
+// TODO(alkis): Write tests for merge.
+
+REGISTER_TYPED_TEST_CASE_P(ModifiersTest, Clear, Insert, InsertHint,
+ InsertRange, InsertWithinCapacity,
+ InsertRangeWithinCapacity, Emplace, EmplaceHint,
+ Erase, EraseRange, EraseKey, Swap);
+
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_SET_MODIFIERS_TEST_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/container/node_hash_map.h b/contrib/restricted/abseil-cpp-tstring/y_absl/container/node_hash_map.h
new file mode 100644
index 00000000000..07e26def79c
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/container/node_hash_map.h
@@ -0,0 +1,597 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: node_hash_map.h
+// -----------------------------------------------------------------------------
+//
+// An `y_absl::node_hash_map<K, V>` is an unordered associative container of
+// unique keys and associated values designed to be a more efficient replacement
+// for `std::unordered_map`. Like `unordered_map`, search, insertion, and
+// deletion of map elements can be done as an `O(1)` operation. However,
+// `node_hash_map` (and other unordered associative containers known as the
+// collection of Abseil "Swiss tables") contain other optimizations that result
+// in both memory and computation advantages.
+//
+// In most cases, your default choice for a hash map should be a map of type
+// `flat_hash_map`. However, if you need pointer stability and cannot store
+// a `flat_hash_map` with `unique_ptr` elements, a `node_hash_map` may be a
+// valid alternative. As well, if you are migrating your code from using
+// `std::unordered_map`, a `node_hash_map` provides a more straightforward
+// migration, because it guarantees pointer stability. Consider migrating to
+// `node_hash_map` and perhaps converting to a more efficient `flat_hash_map`
+// upon further review.
+
+#ifndef ABSL_CONTAINER_NODE_HASH_MAP_H_
+#define ABSL_CONTAINER_NODE_HASH_MAP_H_
+
+#include <tuple>
+#include <type_traits>
+#include <utility>
+
+#include "y_absl/algorithm/container.h"
+#include "y_absl/container/internal/container_memory.h"
+#include "y_absl/container/internal/hash_function_defaults.h" // IWYU pragma: export
+#include "y_absl/container/internal/node_hash_policy.h"
+#include "y_absl/container/internal/raw_hash_map.h" // IWYU pragma: export
+#include "y_absl/memory/memory.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+template <class Key, class Value>
+class NodeHashMapPolicy;
+} // namespace container_internal
+
+// -----------------------------------------------------------------------------
+// y_absl::node_hash_map
+// -----------------------------------------------------------------------------
+//
+// An `y_absl::node_hash_map<K, V>` is an unordered associative container which
+// has been optimized for both speed and memory footprint in most common use
+// cases. Its interface is similar to that of `std::unordered_map<K, V>` with
+// the following notable differences:
+//
+// * Supports heterogeneous lookup, through `find()`, `operator[]()` and
+// `insert()`, provided that the map is provided a compatible heterogeneous
+// hashing function and equality operator.
+// * Contains a `capacity()` member function indicating the number of element
+// slots (open, deleted, and empty) within the hash map.
+// * Returns `void` from the `erase(iterator)` overload.
+//
+// By default, `node_hash_map` uses the `y_absl::Hash` hashing framework.
+// All fundamental and Abseil types that support the `y_absl::Hash` framework have
+// a compatible equality operator for comparing insertions into `node_hash_map`.
+// If your type is not yet supported by the `y_absl::Hash` framework, see
+// y_absl/hash/hash.h for information on extending Abseil hashing to user-defined
+// types.
+//
+// Example:
+//
+// // Create a node hash map of three strings (that map to strings)
+// y_absl::node_hash_map<TString, TString> ducks =
+// {{"a", "huey"}, {"b", "dewey"}, {"c", "louie"}};
+//
+// // Insert a new element into the node hash map
+// ducks.insert({"d", "donald"}};
+//
+// // Force a rehash of the node hash map
+// ducks.rehash(0);
+//
+// // Find the element with the key "b"
+// TString search_key = "b";
+// auto result = ducks.find(search_key);
+// if (result != ducks.end()) {
+// std::cout << "Result: " << result->second << std::endl;
+// }
+template <class Key, class Value,
+ class Hash = y_absl::container_internal::hash_default_hash<Key>,
+ class Eq = y_absl::container_internal::hash_default_eq<Key>,
+ class Alloc = std::allocator<std::pair<const Key, Value>>>
+class node_hash_map
+ : public y_absl::container_internal::raw_hash_map<
+ y_absl::container_internal::NodeHashMapPolicy<Key, Value>, Hash, Eq,
+ Alloc> {
+ using Base = typename node_hash_map::raw_hash_map;
+
+ public:
+ // Constructors and Assignment Operators
+ //
+ // A node_hash_map supports the same overload set as `std::unordered_map`
+ // for construction and assignment:
+ //
+ // * Default constructor
+ //
+ // // No allocation for the table's elements is made.
+ // y_absl::node_hash_map<int, TString> map1;
+ //
+ // * Initializer List constructor
+ //
+ // y_absl::node_hash_map<int, TString> map2 =
+ // {{1, "huey"}, {2, "dewey"}, {3, "louie"},};
+ //
+ // * Copy constructor
+ //
+ // y_absl::node_hash_map<int, TString> map3(map2);
+ //
+ // * Copy assignment operator
+ //
+ // // Hash functor and Comparator are copied as well
+ // y_absl::node_hash_map<int, TString> map4;
+ // map4 = map3;
+ //
+ // * Move constructor
+ //
+ // // Move is guaranteed efficient
+ // y_absl::node_hash_map<int, TString> map5(std::move(map4));
+ //
+ // * Move assignment operator
+ //
+ // // May be efficient if allocators are compatible
+ // y_absl::node_hash_map<int, TString> map6;
+ // map6 = std::move(map5);
+ //
+ // * Range constructor
+ //
+ // std::vector<std::pair<int, TString>> v = {{1, "a"}, {2, "b"}};
+ // y_absl::node_hash_map<int, TString> map7(v.begin(), v.end());
+ node_hash_map() {}
+ using Base::Base;
+
+ // node_hash_map::begin()
+ //
+ // Returns an iterator to the beginning of the `node_hash_map`.
+ using Base::begin;
+
+ // node_hash_map::cbegin()
+ //
+ // Returns a const iterator to the beginning of the `node_hash_map`.
+ using Base::cbegin;
+
+ // node_hash_map::cend()
+ //
+ // Returns a const iterator to the end of the `node_hash_map`.
+ using Base::cend;
+
+ // node_hash_map::end()
+ //
+ // Returns an iterator to the end of the `node_hash_map`.
+ using Base::end;
+
+ // node_hash_map::capacity()
+ //
+ // Returns the number of element slots (assigned, deleted, and empty)
+ // available within the `node_hash_map`.
+ //
+ // NOTE: this member function is particular to `y_absl::node_hash_map` and is
+ // not provided in the `std::unordered_map` API.
+ using Base::capacity;
+
+ // node_hash_map::empty()
+ //
+ // Returns whether or not the `node_hash_map` is empty.
+ using Base::empty;
+
+ // node_hash_map::max_size()
+ //
+ // Returns the largest theoretical possible number of elements within a
+ // `node_hash_map` under current memory constraints. This value can be thought
+ // of as the largest value of `std::distance(begin(), end())` for a
+ // `node_hash_map<K, V>`.
+ using Base::max_size;
+
+ // node_hash_map::size()
+ //
+ // Returns the number of elements currently within the `node_hash_map`.
+ using Base::size;
+
+ // node_hash_map::clear()
+ //
+ // Removes all elements from the `node_hash_map`. Invalidates any references,
+ // pointers, or iterators referring to contained elements.
+ //
+ // NOTE: this operation may shrink the underlying buffer. To avoid shrinking
+ // the underlying buffer call `erase(begin(), end())`.
+ using Base::clear;
+
+ // node_hash_map::erase()
+ //
+ // Erases elements within the `node_hash_map`. Erasing does not trigger a
+ // rehash. Overloads are listed below.
+ //
+ // void erase(const_iterator pos):
+ //
+ // Erases the element at `position` of the `node_hash_map`, returning
+ // `void`.
+ //
+ // NOTE: this return behavior is different than that of STL containers in
+ // general and `std::unordered_map` in particular.
+ //
+ // iterator erase(const_iterator first, const_iterator last):
+ //
+ // Erases the elements in the open interval [`first`, `last`), returning an
+ // iterator pointing to `last`.
+ //
+ // size_type erase(const key_type& key):
+ //
+ // Erases the element with the matching key, if it exists, returning the
+ // number of elements erased (0 or 1).
+ using Base::erase;
+
+ // node_hash_map::insert()
+ //
+ // Inserts an element of the specified value into the `node_hash_map`,
+ // returning an iterator pointing to the newly inserted element, provided that
+ // an element with the given key does not already exist. If rehashing occurs
+ // due to the insertion, all iterators are invalidated. Overloads are listed
+ // below.
+ //
+ // std::pair<iterator,bool> insert(const init_type& value):
+ //
+ // Inserts a value into the `node_hash_map`. Returns a pair consisting of an
+ // iterator to the inserted element (or to the element that prevented the
+ // insertion) and a `bool` denoting whether the insertion took place.
+ //
+ // std::pair<iterator,bool> insert(T&& value):
+ // std::pair<iterator,bool> insert(init_type&& value):
+ //
+ // Inserts a moveable value into the `node_hash_map`. Returns a `std::pair`
+ // consisting of an iterator to the inserted element (or to the element that
+ // prevented the insertion) and a `bool` denoting whether the insertion took
+ // place.
+ //
+ // iterator insert(const_iterator hint, const init_type& value):
+ // iterator insert(const_iterator hint, T&& value):
+ // iterator insert(const_iterator hint, init_type&& value);
+ //
+ // Inserts a value, using the position of `hint` as a non-binding suggestion
+ // for where to begin the insertion search. Returns an iterator to the
+ // inserted element, or to the existing element that prevented the
+ // insertion.
+ //
+ // void insert(InputIterator first, InputIterator last):
+ //
+ // Inserts a range of values [`first`, `last`).
+ //
+ // NOTE: Although the STL does not specify which element may be inserted if
+ // multiple keys compare equivalently, for `node_hash_map` we guarantee the
+ // first match is inserted.
+ //
+ // void insert(std::initializer_list<init_type> ilist):
+ //
+ // Inserts the elements within the initializer list `ilist`.
+ //
+ // NOTE: Although the STL does not specify which element may be inserted if
+ // multiple keys compare equivalently within the initializer list, for
+ // `node_hash_map` we guarantee the first match is inserted.
+ using Base::insert;
+
+ // node_hash_map::insert_or_assign()
+ //
+ // Inserts an element of the specified value into the `node_hash_map` provided
+ // that a value with the given key does not already exist, or replaces it with
+ // the element value if a key for that value already exists, returning an
+ // iterator pointing to the newly inserted element. If rehashing occurs due to
+ // the insertion, all iterators are invalidated. Overloads are listed
+ // below.
+ //
+ // std::pair<iterator, bool> insert_or_assign(const init_type& k, T&& obj):
+ // std::pair<iterator, bool> insert_or_assign(init_type&& k, T&& obj):
+ //
+ // Inserts/Assigns (or moves) the element of the specified key into the
+ // `node_hash_map`.
+ //
+ // iterator insert_or_assign(const_iterator hint,
+ // const init_type& k, T&& obj):
+ // iterator insert_or_assign(const_iterator hint, init_type&& k, T&& obj):
+ //
+ // Inserts/Assigns (or moves) the element of the specified key into the
+ // `node_hash_map` using the position of `hint` as a non-binding suggestion
+ // for where to begin the insertion search.
+ using Base::insert_or_assign;
+
+ // node_hash_map::emplace()
+ //
+ // Inserts an element of the specified value by constructing it in-place
+ // within the `node_hash_map`, provided that no element with the given key
+ // already exists.
+ //
+ // The element may be constructed even if there already is an element with the
+ // key in the container, in which case the newly constructed element will be
+ // destroyed immediately. Prefer `try_emplace()` unless your key is not
+ // copyable or moveable.
+ //
+ // If rehashing occurs due to the insertion, all iterators are invalidated.
+ using Base::emplace;
+
+ // node_hash_map::emplace_hint()
+ //
+ // Inserts an element of the specified value by constructing it in-place
+ // within the `node_hash_map`, using the position of `hint` as a non-binding
+ // suggestion for where to begin the insertion search, and only inserts
+ // provided that no element with the given key already exists.
+ //
+ // The element may be constructed even if there already is an element with the
+ // key in the container, in which case the newly constructed element will be
+ // destroyed immediately. Prefer `try_emplace()` unless your key is not
+ // copyable or moveable.
+ //
+ // If rehashing occurs due to the insertion, all iterators are invalidated.
+ using Base::emplace_hint;
+
+ // node_hash_map::try_emplace()
+ //
+ // Inserts an element of the specified value by constructing it in-place
+ // within the `node_hash_map`, provided that no element with the given key
+ // already exists. Unlike `emplace()`, if an element with the given key
+ // already exists, we guarantee that no element is constructed.
+ //
+ // If rehashing occurs due to the insertion, all iterators are invalidated.
+ // Overloads are listed below.
+ //
+ // std::pair<iterator, bool> try_emplace(const key_type& k, Args&&... args):
+ // std::pair<iterator, bool> try_emplace(key_type&& k, Args&&... args):
+ //
+ // Inserts (via copy or move) the element of the specified key into the
+ // `node_hash_map`.
+ //
+ // iterator try_emplace(const_iterator hint,
+ // const init_type& k, Args&&... args):
+ // iterator try_emplace(const_iterator hint, init_type&& k, Args&&... args):
+ //
+ // Inserts (via copy or move) the element of the specified key into the
+ // `node_hash_map` using the position of `hint` as a non-binding suggestion
+ // for where to begin the insertion search.
+ //
+ // All `try_emplace()` overloads make the same guarantees regarding rvalue
+ // arguments as `std::unordered_map::try_emplace()`, namely that these
+ // functions will not move from rvalue arguments if insertions do not happen.
+ using Base::try_emplace;
+
+ // node_hash_map::extract()
+ //
+ // Extracts the indicated element, erasing it in the process, and returns it
+ // as a C++17-compatible node handle. Overloads are listed below.
+ //
+ // node_type extract(const_iterator position):
+ //
+ // Extracts the key,value pair of the element at the indicated position and
+ // returns a node handle owning that extracted data.
+ //
+ // node_type extract(const key_type& x):
+ //
+ // Extracts the key,value pair of the element with a key matching the passed
+ // key value and returns a node handle owning that extracted data. If the
+ // `node_hash_map` does not contain an element with a matching key, this
+ // function returns an empty node handle.
+ //
+ // NOTE: when compiled in an earlier version of C++ than C++17,
+ // `node_type::key()` returns a const reference to the key instead of a
+ // mutable reference. We cannot safely return a mutable reference without
+ // std::launder (which is not available before C++17).
+ using Base::extract;
+
+ // node_hash_map::merge()
+ //
+ // Extracts elements from a given `source` node hash map into this
+ // `node_hash_map`. If the destination `node_hash_map` already contains an
+ // element with an equivalent key, that element is not extracted.
+ using Base::merge;
+
+ // node_hash_map::swap(node_hash_map& other)
+ //
+ // Exchanges the contents of this `node_hash_map` with those of the `other`
+ // node hash map, avoiding invocation of any move, copy, or swap operations on
+ // individual elements.
+ //
+ // All iterators and references on the `node_hash_map` remain valid, excepting
+ // for the past-the-end iterator, which is invalidated.
+ //
+ // `swap()` requires that the node hash map's hashing and key equivalence
+ // functions be Swappable, and are exchaged using unqualified calls to
+ // non-member `swap()`. If the map's allocator has
+ // `std::allocator_traits<allocator_type>::propagate_on_container_swap::value`
+ // set to `true`, the allocators are also exchanged using an unqualified call
+ // to non-member `swap()`; otherwise, the allocators are not swapped.
+ using Base::swap;
+
+ // node_hash_map::rehash(count)
+ //
+ // Rehashes the `node_hash_map`, setting the number of slots to be at least
+ // the passed value. If the new number of slots increases the load factor more
+ // than the current maximum load factor
+ // (`count` < `size()` / `max_load_factor()`), then the new number of slots
+ // will be at least `size()` / `max_load_factor()`.
+ //
+ // To force a rehash, pass rehash(0).
+ using Base::rehash;
+
+ // node_hash_map::reserve(count)
+ //
+ // Sets the number of slots in the `node_hash_map` to the number needed to
+ // accommodate at least `count` total elements without exceeding the current
+ // maximum load factor, and may rehash the container if needed.
+ using Base::reserve;
+
+ // node_hash_map::at()
+ //
+ // Returns a reference to the mapped value of the element with key equivalent
+ // to the passed key.
+ using Base::at;
+
+ // node_hash_map::contains()
+ //
+ // Determines whether an element with a key comparing equal to the given `key`
+ // exists within the `node_hash_map`, returning `true` if so or `false`
+ // otherwise.
+ using Base::contains;
+
+ // node_hash_map::count(const Key& key) const
+ //
+ // Returns the number of elements with a key comparing equal to the given
+ // `key` within the `node_hash_map`. note that this function will return
+ // either `1` or `0` since duplicate keys are not allowed within a
+ // `node_hash_map`.
+ using Base::count;
+
+ // node_hash_map::equal_range()
+ //
+ // Returns a closed range [first, last], defined by a `std::pair` of two
+ // iterators, containing all elements with the passed key in the
+ // `node_hash_map`.
+ using Base::equal_range;
+
+ // node_hash_map::find()
+ //
+ // Finds an element with the passed `key` within the `node_hash_map`.
+ using Base::find;
+
+ // node_hash_map::operator[]()
+ //
+ // Returns a reference to the value mapped to the passed key within the
+ // `node_hash_map`, performing an `insert()` if the key does not already
+ // exist. If an insertion occurs and results in a rehashing of the container,
+ // all iterators are invalidated. Otherwise iterators are not affected and
+ // references are not invalidated. Overloads are listed below.
+ //
+ // T& operator[](const Key& key):
+ //
+ // Inserts an init_type object constructed in-place if the element with the
+ // given key does not exist.
+ //
+ // T& operator[](Key&& key):
+ //
+ // Inserts an init_type object constructed in-place provided that an element
+ // with the given key does not exist.
+ using Base::operator[];
+
+ // node_hash_map::bucket_count()
+ //
+ // Returns the number of "buckets" within the `node_hash_map`.
+ using Base::bucket_count;
+
+ // node_hash_map::load_factor()
+ //
+ // Returns the current load factor of the `node_hash_map` (the average number
+ // of slots occupied with a value within the hash map).
+ using Base::load_factor;
+
+ // node_hash_map::max_load_factor()
+ //
+ // Manages the maximum load factor of the `node_hash_map`. Overloads are
+ // listed below.
+ //
+ // float node_hash_map::max_load_factor()
+ //
+ // Returns the current maximum load factor of the `node_hash_map`.
+ //
+ // void node_hash_map::max_load_factor(float ml)
+ //
+ // Sets the maximum load factor of the `node_hash_map` to the passed value.
+ //
+ // NOTE: This overload is provided only for API compatibility with the STL;
+ // `node_hash_map` will ignore any set load factor and manage its rehashing
+ // internally as an implementation detail.
+ using Base::max_load_factor;
+
+ // node_hash_map::get_allocator()
+ //
+ // Returns the allocator function associated with this `node_hash_map`.
+ using Base::get_allocator;
+
+ // node_hash_map::hash_function()
+ //
+ // Returns the hashing function used to hash the keys within this
+ // `node_hash_map`.
+ using Base::hash_function;
+
+ // node_hash_map::key_eq()
+ //
+ // Returns the function used for comparing keys equality.
+ using Base::key_eq;
+};
+
+// erase_if(node_hash_map<>, Pred)
+//
+// Erases all elements that satisfy the predicate `pred` from the container `c`.
+template <typename K, typename V, typename H, typename E, typename A,
+ typename Predicate>
+void erase_if(node_hash_map<K, V, H, E, A>& c, Predicate pred) {
+ container_internal::EraseIf(pred, &c);
+}
+
+namespace container_internal {
+
+template <class Key, class Value>
+class NodeHashMapPolicy
+ : public y_absl::container_internal::node_hash_policy<
+ std::pair<const Key, Value>&, NodeHashMapPolicy<Key, Value>> {
+ using value_type = std::pair<const Key, Value>;
+
+ public:
+ using key_type = Key;
+ using mapped_type = Value;
+ using init_type = std::pair</*non const*/ key_type, mapped_type>;
+
+ template <class Allocator, class... Args>
+ static value_type* new_element(Allocator* alloc, Args&&... args) {
+ using PairAlloc = typename y_absl::allocator_traits<
+ Allocator>::template rebind_alloc<value_type>;
+ PairAlloc pair_alloc(*alloc);
+ value_type* res =
+ y_absl::allocator_traits<PairAlloc>::allocate(pair_alloc, 1);
+ y_absl::allocator_traits<PairAlloc>::construct(pair_alloc, res,
+ std::forward<Args>(args)...);
+ return res;
+ }
+
+ template <class Allocator>
+ static void delete_element(Allocator* alloc, value_type* pair) {
+ using PairAlloc = typename y_absl::allocator_traits<
+ Allocator>::template rebind_alloc<value_type>;
+ PairAlloc pair_alloc(*alloc);
+ y_absl::allocator_traits<PairAlloc>::destroy(pair_alloc, pair);
+ y_absl::allocator_traits<PairAlloc>::deallocate(pair_alloc, pair, 1);
+ }
+
+ template <class F, class... Args>
+ static decltype(y_absl::container_internal::DecomposePair(
+ std::declval<F>(), std::declval<Args>()...))
+ apply(F&& f, Args&&... args) {
+ return y_absl::container_internal::DecomposePair(std::forward<F>(f),
+ std::forward<Args>(args)...);
+ }
+
+ static size_t element_space_used(const value_type*) {
+ return sizeof(value_type);
+ }
+
+ static Value& value(value_type* elem) { return elem->second; }
+ static const Value& value(const value_type* elem) { return elem->second; }
+};
+} // namespace container_internal
+
+namespace container_algorithm_internal {
+
+// Specialization of trait in y_absl/algorithm/container.h
+template <class Key, class T, class Hash, class KeyEqual, class Allocator>
+struct IsUnorderedContainer<
+ y_absl::node_hash_map<Key, T, Hash, KeyEqual, Allocator>> : std::true_type {};
+
+} // namespace container_algorithm_internal
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_CONTAINER_NODE_HASH_MAP_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/container/node_hash_set.h b/contrib/restricted/abseil-cpp-tstring/y_absl/container/node_hash_set.h
new file mode 100644
index 00000000000..aa98bb29b87
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/container/node_hash_set.h
@@ -0,0 +1,493 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: node_hash_set.h
+// -----------------------------------------------------------------------------
+//
+// An `y_absl::node_hash_set<T>` is an unordered associative container designed to
+// be a more efficient replacement for `std::unordered_set`. Like
+// `unordered_set`, search, insertion, and deletion of set elements can be done
+// as an `O(1)` operation. However, `node_hash_set` (and other unordered
+// associative containers known as the collection of Abseil "Swiss tables")
+// contain other optimizations that result in both memory and computation
+// advantages.
+//
+// In most cases, your default choice for a hash table should be a map of type
+// `flat_hash_map` or a set of type `flat_hash_set`. However, if you need
+// pointer stability, a `node_hash_set` should be your preferred choice. As
+// well, if you are migrating your code from using `std::unordered_set`, a
+// `node_hash_set` should be an easy migration. Consider migrating to
+// `node_hash_set` and perhaps converting to a more efficient `flat_hash_set`
+// upon further review.
+
+#ifndef ABSL_CONTAINER_NODE_HASH_SET_H_
+#define ABSL_CONTAINER_NODE_HASH_SET_H_
+
+#include <type_traits>
+
+#include "y_absl/algorithm/container.h"
+#include "y_absl/container/internal/hash_function_defaults.h" // IWYU pragma: export
+#include "y_absl/container/internal/node_hash_policy.h"
+#include "y_absl/container/internal/raw_hash_set.h" // IWYU pragma: export
+#include "y_absl/memory/memory.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+template <typename T>
+struct NodeHashSetPolicy;
+} // namespace container_internal
+
+// -----------------------------------------------------------------------------
+// y_absl::node_hash_set
+// -----------------------------------------------------------------------------
+//
+// An `y_absl::node_hash_set<T>` is an unordered associative container which
+// has been optimized for both speed and memory footprint in most common use
+// cases. Its interface is similar to that of `std::unordered_set<T>` with the
+// following notable differences:
+//
+// * Supports heterogeneous lookup, through `find()`, `operator[]()` and
+// `insert()`, provided that the set is provided a compatible heterogeneous
+// hashing function and equality operator.
+// * Contains a `capacity()` member function indicating the number of element
+// slots (open, deleted, and empty) within the hash set.
+// * Returns `void` from the `erase(iterator)` overload.
+//
+// By default, `node_hash_set` uses the `y_absl::Hash` hashing framework.
+// All fundamental and Abseil types that support the `y_absl::Hash` framework have
+// a compatible equality operator for comparing insertions into `node_hash_set`.
+// If your type is not yet supported by the `y_absl::Hash` framework, see
+// y_absl/hash/hash.h for information on extending Abseil hashing to user-defined
+// types.
+//
+// Example:
+//
+// // Create a node hash set of three strings
+// y_absl::node_hash_set<TString> ducks =
+// {"huey", "dewey", "louie"};
+//
+// // Insert a new element into the node hash set
+// ducks.insert("donald");
+//
+// // Force a rehash of the node hash set
+// ducks.rehash(0);
+//
+// // See if "dewey" is present
+// if (ducks.contains("dewey")) {
+// std::cout << "We found dewey!" << std::endl;
+// }
+template <class T, class Hash = y_absl::container_internal::hash_default_hash<T>,
+ class Eq = y_absl::container_internal::hash_default_eq<T>,
+ class Alloc = std::allocator<T>>
+class node_hash_set
+ : public y_absl::container_internal::raw_hash_set<
+ y_absl::container_internal::NodeHashSetPolicy<T>, Hash, Eq, Alloc> {
+ using Base = typename node_hash_set::raw_hash_set;
+
+ public:
+ // Constructors and Assignment Operators
+ //
+ // A node_hash_set supports the same overload set as `std::unordered_set`
+ // for construction and assignment:
+ //
+ // * Default constructor
+ //
+ // // No allocation for the table's elements is made.
+ // y_absl::node_hash_set<TString> set1;
+ //
+ // * Initializer List constructor
+ //
+ // y_absl::node_hash_set<TString> set2 =
+ // {{"huey"}, {"dewey"}, {"louie"}};
+ //
+ // * Copy constructor
+ //
+ // y_absl::node_hash_set<TString> set3(set2);
+ //
+ // * Copy assignment operator
+ //
+ // // Hash functor and Comparator are copied as well
+ // y_absl::node_hash_set<TString> set4;
+ // set4 = set3;
+ //
+ // * Move constructor
+ //
+ // // Move is guaranteed efficient
+ // y_absl::node_hash_set<TString> set5(std::move(set4));
+ //
+ // * Move assignment operator
+ //
+ // // May be efficient if allocators are compatible
+ // y_absl::node_hash_set<TString> set6;
+ // set6 = std::move(set5);
+ //
+ // * Range constructor
+ //
+ // std::vector<TString> v = {"a", "b"};
+ // y_absl::node_hash_set<TString> set7(v.begin(), v.end());
+ node_hash_set() {}
+ using Base::Base;
+
+ // node_hash_set::begin()
+ //
+ // Returns an iterator to the beginning of the `node_hash_set`.
+ using Base::begin;
+
+ // node_hash_set::cbegin()
+ //
+ // Returns a const iterator to the beginning of the `node_hash_set`.
+ using Base::cbegin;
+
+ // node_hash_set::cend()
+ //
+ // Returns a const iterator to the end of the `node_hash_set`.
+ using Base::cend;
+
+ // node_hash_set::end()
+ //
+ // Returns an iterator to the end of the `node_hash_set`.
+ using Base::end;
+
+ // node_hash_set::capacity()
+ //
+ // Returns the number of element slots (assigned, deleted, and empty)
+ // available within the `node_hash_set`.
+ //
+ // NOTE: this member function is particular to `y_absl::node_hash_set` and is
+ // not provided in the `std::unordered_set` API.
+ using Base::capacity;
+
+ // node_hash_set::empty()
+ //
+ // Returns whether or not the `node_hash_set` is empty.
+ using Base::empty;
+
+ // node_hash_set::max_size()
+ //
+ // Returns the largest theoretical possible number of elements within a
+ // `node_hash_set` under current memory constraints. This value can be thought
+ // of the largest value of `std::distance(begin(), end())` for a
+ // `node_hash_set<T>`.
+ using Base::max_size;
+
+ // node_hash_set::size()
+ //
+ // Returns the number of elements currently within the `node_hash_set`.
+ using Base::size;
+
+ // node_hash_set::clear()
+ //
+ // Removes all elements from the `node_hash_set`. Invalidates any references,
+ // pointers, or iterators referring to contained elements.
+ //
+ // NOTE: this operation may shrink the underlying buffer. To avoid shrinking
+ // the underlying buffer call `erase(begin(), end())`.
+ using Base::clear;
+
+ // node_hash_set::erase()
+ //
+ // Erases elements within the `node_hash_set`. Erasing does not trigger a
+ // rehash. Overloads are listed below.
+ //
+ // void erase(const_iterator pos):
+ //
+ // Erases the element at `position` of the `node_hash_set`, returning
+ // `void`.
+ //
+ // NOTE: this return behavior is different than that of STL containers in
+ // general and `std::unordered_set` in particular.
+ //
+ // iterator erase(const_iterator first, const_iterator last):
+ //
+ // Erases the elements in the open interval [`first`, `last`), returning an
+ // iterator pointing to `last`.
+ //
+ // size_type erase(const key_type& key):
+ //
+ // Erases the element with the matching key, if it exists, returning the
+ // number of elements erased (0 or 1).
+ using Base::erase;
+
+ // node_hash_set::insert()
+ //
+ // Inserts an element of the specified value into the `node_hash_set`,
+ // returning an iterator pointing to the newly inserted element, provided that
+ // an element with the given key does not already exist. If rehashing occurs
+ // due to the insertion, all iterators are invalidated. Overloads are listed
+ // below.
+ //
+ // std::pair<iterator,bool> insert(const T& value):
+ //
+ // Inserts a value into the `node_hash_set`. Returns a pair consisting of an
+ // iterator to the inserted element (or to the element that prevented the
+ // insertion) and a bool denoting whether the insertion took place.
+ //
+ // std::pair<iterator,bool> insert(T&& value):
+ //
+ // Inserts a moveable value into the `node_hash_set`. Returns a pair
+ // consisting of an iterator to the inserted element (or to the element that
+ // prevented the insertion) and a bool denoting whether the insertion took
+ // place.
+ //
+ // iterator insert(const_iterator hint, const T& value):
+ // iterator insert(const_iterator hint, T&& value):
+ //
+ // Inserts a value, using the position of `hint` as a non-binding suggestion
+ // for where to begin the insertion search. Returns an iterator to the
+ // inserted element, or to the existing element that prevented the
+ // insertion.
+ //
+ // void insert(InputIterator first, InputIterator last):
+ //
+ // Inserts a range of values [`first`, `last`).
+ //
+ // NOTE: Although the STL does not specify which element may be inserted if
+ // multiple keys compare equivalently, for `node_hash_set` we guarantee the
+ // first match is inserted.
+ //
+ // void insert(std::initializer_list<T> ilist):
+ //
+ // Inserts the elements within the initializer list `ilist`.
+ //
+ // NOTE: Although the STL does not specify which element may be inserted if
+ // multiple keys compare equivalently within the initializer list, for
+ // `node_hash_set` we guarantee the first match is inserted.
+ using Base::insert;
+
+ // node_hash_set::emplace()
+ //
+ // Inserts an element of the specified value by constructing it in-place
+ // within the `node_hash_set`, provided that no element with the given key
+ // already exists.
+ //
+ // The element may be constructed even if there already is an element with the
+ // key in the container, in which case the newly constructed element will be
+ // destroyed immediately.
+ //
+ // If rehashing occurs due to the insertion, all iterators are invalidated.
+ using Base::emplace;
+
+ // node_hash_set::emplace_hint()
+ //
+ // Inserts an element of the specified value by constructing it in-place
+ // within the `node_hash_set`, using the position of `hint` as a non-binding
+ // suggestion for where to begin the insertion search, and only inserts
+ // provided that no element with the given key already exists.
+ //
+ // The element may be constructed even if there already is an element with the
+ // key in the container, in which case the newly constructed element will be
+ // destroyed immediately.
+ //
+ // If rehashing occurs due to the insertion, all iterators are invalidated.
+ using Base::emplace_hint;
+
+ // node_hash_set::extract()
+ //
+ // Extracts the indicated element, erasing it in the process, and returns it
+ // as a C++17-compatible node handle. Overloads are listed below.
+ //
+ // node_type extract(const_iterator position):
+ //
+ // Extracts the element at the indicated position and returns a node handle
+ // owning that extracted data.
+ //
+ // node_type extract(const key_type& x):
+ //
+ // Extracts the element with the key matching the passed key value and
+ // returns a node handle owning that extracted data. If the `node_hash_set`
+ // does not contain an element with a matching key, this function returns an
+ // empty node handle.
+ using Base::extract;
+
+ // node_hash_set::merge()
+ //
+ // Extracts elements from a given `source` node hash set into this
+ // `node_hash_set`. If the destination `node_hash_set` already contains an
+ // element with an equivalent key, that element is not extracted.
+ using Base::merge;
+
+ // node_hash_set::swap(node_hash_set& other)
+ //
+ // Exchanges the contents of this `node_hash_set` with those of the `other`
+ // node hash set, avoiding invocation of any move, copy, or swap operations on
+ // individual elements.
+ //
+ // All iterators and references on the `node_hash_set` remain valid, excepting
+ // for the past-the-end iterator, which is invalidated.
+ //
+ // `swap()` requires that the node hash set's hashing and key equivalence
+ // functions be Swappable, and are exchaged using unqualified calls to
+ // non-member `swap()`. If the set's allocator has
+ // `std::allocator_traits<allocator_type>::propagate_on_container_swap::value`
+ // set to `true`, the allocators are also exchanged using an unqualified call
+ // to non-member `swap()`; otherwise, the allocators are not swapped.
+ using Base::swap;
+
+ // node_hash_set::rehash(count)
+ //
+ // Rehashes the `node_hash_set`, setting the number of slots to be at least
+ // the passed value. If the new number of slots increases the load factor more
+ // than the current maximum load factor
+ // (`count` < `size()` / `max_load_factor()`), then the new number of slots
+ // will be at least `size()` / `max_load_factor()`.
+ //
+ // To force a rehash, pass rehash(0).
+ //
+ // NOTE: unlike behavior in `std::unordered_set`, references are also
+ // invalidated upon a `rehash()`.
+ using Base::rehash;
+
+ // node_hash_set::reserve(count)
+ //
+ // Sets the number of slots in the `node_hash_set` to the number needed to
+ // accommodate at least `count` total elements without exceeding the current
+ // maximum load factor, and may rehash the container if needed.
+ using Base::reserve;
+
+ // node_hash_set::contains()
+ //
+ // Determines whether an element comparing equal to the given `key` exists
+ // within the `node_hash_set`, returning `true` if so or `false` otherwise.
+ using Base::contains;
+
+ // node_hash_set::count(const Key& key) const
+ //
+ // Returns the number of elements comparing equal to the given `key` within
+ // the `node_hash_set`. note that this function will return either `1` or `0`
+ // since duplicate elements are not allowed within a `node_hash_set`.
+ using Base::count;
+
+ // node_hash_set::equal_range()
+ //
+ // Returns a closed range [first, last], defined by a `std::pair` of two
+ // iterators, containing all elements with the passed key in the
+ // `node_hash_set`.
+ using Base::equal_range;
+
+ // node_hash_set::find()
+ //
+ // Finds an element with the passed `key` within the `node_hash_set`.
+ using Base::find;
+
+ // node_hash_set::bucket_count()
+ //
+ // Returns the number of "buckets" within the `node_hash_set`. Note that
+ // because a node hash set contains all elements within its internal storage,
+ // this value simply equals the current capacity of the `node_hash_set`.
+ using Base::bucket_count;
+
+ // node_hash_set::load_factor()
+ //
+ // Returns the current load factor of the `node_hash_set` (the average number
+ // of slots occupied with a value within the hash set).
+ using Base::load_factor;
+
+ // node_hash_set::max_load_factor()
+ //
+ // Manages the maximum load factor of the `node_hash_set`. Overloads are
+ // listed below.
+ //
+ // float node_hash_set::max_load_factor()
+ //
+ // Returns the current maximum load factor of the `node_hash_set`.
+ //
+ // void node_hash_set::max_load_factor(float ml)
+ //
+ // Sets the maximum load factor of the `node_hash_set` to the passed value.
+ //
+ // NOTE: This overload is provided only for API compatibility with the STL;
+ // `node_hash_set` will ignore any set load factor and manage its rehashing
+ // internally as an implementation detail.
+ using Base::max_load_factor;
+
+ // node_hash_set::get_allocator()
+ //
+ // Returns the allocator function associated with this `node_hash_set`.
+ using Base::get_allocator;
+
+ // node_hash_set::hash_function()
+ //
+ // Returns the hashing function used to hash the keys within this
+ // `node_hash_set`.
+ using Base::hash_function;
+
+ // node_hash_set::key_eq()
+ //
+ // Returns the function used for comparing keys equality.
+ using Base::key_eq;
+};
+
+// erase_if(node_hash_set<>, Pred)
+//
+// Erases all elements that satisfy the predicate `pred` from the container `c`.
+template <typename T, typename H, typename E, typename A, typename Predicate>
+void erase_if(node_hash_set<T, H, E, A>& c, Predicate pred) {
+ container_internal::EraseIf(pred, &c);
+}
+
+namespace container_internal {
+
+template <class T>
+struct NodeHashSetPolicy
+ : y_absl::container_internal::node_hash_policy<T&, NodeHashSetPolicy<T>> {
+ using key_type = T;
+ using init_type = T;
+ using constant_iterators = std::true_type;
+
+ template <class Allocator, class... Args>
+ static T* new_element(Allocator* alloc, Args&&... args) {
+ using ValueAlloc =
+ typename y_absl::allocator_traits<Allocator>::template rebind_alloc<T>;
+ ValueAlloc value_alloc(*alloc);
+ T* res = y_absl::allocator_traits<ValueAlloc>::allocate(value_alloc, 1);
+ y_absl::allocator_traits<ValueAlloc>::construct(value_alloc, res,
+ std::forward<Args>(args)...);
+ return res;
+ }
+
+ template <class Allocator>
+ static void delete_element(Allocator* alloc, T* elem) {
+ using ValueAlloc =
+ typename y_absl::allocator_traits<Allocator>::template rebind_alloc<T>;
+ ValueAlloc value_alloc(*alloc);
+ y_absl::allocator_traits<ValueAlloc>::destroy(value_alloc, elem);
+ y_absl::allocator_traits<ValueAlloc>::deallocate(value_alloc, elem, 1);
+ }
+
+ template <class F, class... Args>
+ static decltype(y_absl::container_internal::DecomposeValue(
+ std::declval<F>(), std::declval<Args>()...))
+ apply(F&& f, Args&&... args) {
+ return y_absl::container_internal::DecomposeValue(
+ std::forward<F>(f), std::forward<Args>(args)...);
+ }
+
+ static size_t element_space_used(const T*) { return sizeof(T); }
+};
+} // namespace container_internal
+
+namespace container_algorithm_internal {
+
+// Specialization of trait in y_absl/algorithm/container.h
+template <class Key, class Hash, class KeyEqual, class Allocator>
+struct IsUnorderedContainer<y_absl::node_hash_set<Key, Hash, KeyEqual, Allocator>>
+ : std::true_type {};
+
+} // namespace container_algorithm_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_CONTAINER_NODE_HASH_SET_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/container/ya.make b/contrib/restricted/abseil-cpp-tstring/y_absl/container/ya.make
new file mode 100644
index 00000000000..b5ead458565
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/container/ya.make
@@ -0,0 +1,14 @@
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
+OWNER(
+ somov
+ g:cpp-contrib
+)
+
+LICENSE(Apache-2.0)
+
+LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
+
+END()
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/.yandex_meta/licenses.list.txt b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/.yandex_meta/licenses.list.txt
new file mode 100644
index 00000000000..030c5b94cfe
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/.yandex_meta/licenses.list.txt
@@ -0,0 +1,24 @@
+====================Apache-2.0====================
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+
+====================COPYRIGHT====================
+// Copyright 2017 The Abseil Authors.
+
+
+====================COPYRIGHT====================
+// Copyright 2018 The Abseil Authors.
+
+
+====================COPYRIGHT====================
+// Copyright 2020 The Abseil Authors.
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/failure_signal_handler.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/failure_signal_handler.cc
new file mode 100644
index 00000000000..fa6e30f935e
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/failure_signal_handler.cc
@@ -0,0 +1,388 @@
+//
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "y_absl/debugging/failure_signal_handler.h"
+
+#include "y_absl/base/config.h"
+
+#ifdef _WIN32
+#include <windows.h>
+#else
+#include <sched.h>
+#include <unistd.h>
+#endif
+
+#ifdef __APPLE__
+#include <TargetConditionals.h>
+#endif
+
+#ifdef ABSL_HAVE_MMAP
+#include <sys/mman.h>
+#endif
+
+#include <algorithm>
+#include <atomic>
+#include <cerrno>
+#include <csignal>
+#include <cstdio>
+#include <cstring>
+#include <ctime>
+
+#include "y_absl/base/attributes.h"
+#include "y_absl/base/internal/errno_saver.h"
+#include "y_absl/base/internal/raw_logging.h"
+#include "y_absl/base/internal/sysinfo.h"
+#include "y_absl/debugging/internal/examine_stack.h"
+#include "y_absl/debugging/stacktrace.h"
+
+#ifndef _WIN32
+#define ABSL_HAVE_SIGACTION
+// Apple WatchOS and TVOS don't allow sigaltstack
+#if !(defined(TARGET_OS_WATCH) && TARGET_OS_WATCH) && \
+ !(defined(TARGET_OS_TV) && TARGET_OS_TV)
+#define ABSL_HAVE_SIGALTSTACK
+#endif
+#endif
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+ABSL_CONST_INIT static FailureSignalHandlerOptions fsh_options;
+
+// Resets the signal handler for signo to the default action for that
+// signal, then raises the signal.
+static void RaiseToDefaultHandler(int signo) {
+ signal(signo, SIG_DFL);
+ raise(signo);
+}
+
+struct FailureSignalData {
+ const int signo;
+ const char* const as_string;
+#ifdef ABSL_HAVE_SIGACTION
+ struct sigaction previous_action;
+ // StructSigaction is used to silence -Wmissing-field-initializers.
+ using StructSigaction = struct sigaction;
+ #define FSD_PREVIOUS_INIT FailureSignalData::StructSigaction()
+#else
+ void (*previous_handler)(int);
+ #define FSD_PREVIOUS_INIT SIG_DFL
+#endif
+};
+
+ABSL_CONST_INIT static FailureSignalData failure_signal_data[] = {
+ {SIGSEGV, "SIGSEGV", FSD_PREVIOUS_INIT},
+ {SIGILL, "SIGILL", FSD_PREVIOUS_INIT},
+ {SIGFPE, "SIGFPE", FSD_PREVIOUS_INIT},
+ {SIGABRT, "SIGABRT", FSD_PREVIOUS_INIT},
+ {SIGTERM, "SIGTERM", FSD_PREVIOUS_INIT},
+#ifndef _WIN32
+ {SIGBUS, "SIGBUS", FSD_PREVIOUS_INIT},
+ {SIGTRAP, "SIGTRAP", FSD_PREVIOUS_INIT},
+#endif
+};
+
+#undef FSD_PREVIOUS_INIT
+
+static void RaiseToPreviousHandler(int signo) {
+ // Search for the previous handler.
+ for (const auto& it : failure_signal_data) {
+ if (it.signo == signo) {
+#ifdef ABSL_HAVE_SIGACTION
+ sigaction(signo, &it.previous_action, nullptr);
+#else
+ signal(signo, it.previous_handler);
+#endif
+ raise(signo);
+ return;
+ }
+ }
+
+ // Not found, use the default handler.
+ RaiseToDefaultHandler(signo);
+}
+
+namespace debugging_internal {
+
+const char* FailureSignalToString(int signo) {
+ for (const auto& it : failure_signal_data) {
+ if (it.signo == signo) {
+ return it.as_string;
+ }
+ }
+ return "";
+}
+
+} // namespace debugging_internal
+
+#ifdef ABSL_HAVE_SIGALTSTACK
+
+static bool SetupAlternateStackOnce() {
+#if defined(__wasm__) || defined (__asjms__)
+ const size_t page_mask = getpagesize() - 1;
+#else
+ const size_t page_mask = sysconf(_SC_PAGESIZE) - 1;
+#endif
+ size_t stack_size =
+ (std::max<size_t>(SIGSTKSZ, 65536) + page_mask) & ~page_mask;
+#if defined(ABSL_HAVE_ADDRESS_SANITIZER) || \
+ defined(ABSL_HAVE_MEMORY_SANITIZER) || defined(ABSL_HAVE_THREAD_SANITIZER)
+ // Account for sanitizer instrumentation requiring additional stack space.
+ stack_size *= 5;
+#endif
+
+ stack_t sigstk;
+ memset(&sigstk, 0, sizeof(sigstk));
+ sigstk.ss_size = stack_size;
+
+#ifdef ABSL_HAVE_MMAP
+#ifndef MAP_STACK
+#define MAP_STACK 0
+#endif
+#if defined(MAP_ANON) && !defined(MAP_ANONYMOUS)
+#define MAP_ANONYMOUS MAP_ANON
+#endif
+ sigstk.ss_sp = mmap(nullptr, sigstk.ss_size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, 0);
+ if (sigstk.ss_sp == MAP_FAILED) {
+ ABSL_RAW_LOG(FATAL, "mmap() for alternate signal stack failed");
+ }
+#else
+ sigstk.ss_sp = malloc(sigstk.ss_size);
+ if (sigstk.ss_sp == nullptr) {
+ ABSL_RAW_LOG(FATAL, "malloc() for alternate signal stack failed");
+ }
+#endif
+
+ if (sigaltstack(&sigstk, nullptr) != 0) {
+ ABSL_RAW_LOG(FATAL, "sigaltstack() failed with errno=%d", errno);
+ }
+ return true;
+}
+
+#endif
+
+#ifdef ABSL_HAVE_SIGACTION
+
+// Sets up an alternate stack for signal handlers once.
+// Returns the appropriate flag for sig_action.sa_flags
+// if the system supports using an alternate stack.
+static int MaybeSetupAlternateStack() {
+#ifdef ABSL_HAVE_SIGALTSTACK
+ ABSL_ATTRIBUTE_UNUSED static const bool kOnce = SetupAlternateStackOnce();
+ return SA_ONSTACK;
+#else
+ return 0;
+#endif
+}
+
+static void InstallOneFailureHandler(FailureSignalData* data,
+ void (*handler)(int, siginfo_t*, void*)) {
+ struct sigaction act;
+ memset(&act, 0, sizeof(act));
+ sigemptyset(&act.sa_mask);
+ act.sa_flags |= SA_SIGINFO;
+ // SA_NODEFER is required to handle SIGABRT from
+ // ImmediateAbortSignalHandler().
+ act.sa_flags |= SA_NODEFER;
+ if (fsh_options.use_alternate_stack) {
+ act.sa_flags |= MaybeSetupAlternateStack();
+ }
+ act.sa_sigaction = handler;
+ ABSL_RAW_CHECK(sigaction(data->signo, &act, &data->previous_action) == 0,
+ "sigaction() failed");
+}
+
+#else
+
+static void InstallOneFailureHandler(FailureSignalData* data,
+ void (*handler)(int)) {
+ data->previous_handler = signal(data->signo, handler);
+ ABSL_RAW_CHECK(data->previous_handler != SIG_ERR, "signal() failed");
+}
+
+#endif
+
+static void WriteToStderr(const char* data) {
+ y_absl::base_internal::ErrnoSaver errno_saver;
+ y_absl::raw_logging_internal::SafeWriteToStderr(data, strlen(data));
+}
+
+static void WriteSignalMessage(int signo, int cpu,
+ void (*writerfn)(const char*)) {
+ char buf[96];
+ char on_cpu[32] = {0};
+ if (cpu != -1) {
+ snprintf(on_cpu, sizeof(on_cpu), " on cpu %d", cpu);
+ }
+ const char* const signal_string =
+ debugging_internal::FailureSignalToString(signo);
+ if (signal_string != nullptr && signal_string[0] != '\0') {
+ snprintf(buf, sizeof(buf), "*** %s received at time=%ld%s ***\n",
+ signal_string,
+ static_cast<long>(time(nullptr)), // NOLINT(runtime/int)
+ on_cpu);
+ } else {
+ snprintf(buf, sizeof(buf), "*** Signal %d received at time=%ld%s ***\n",
+ signo, static_cast<long>(time(nullptr)), // NOLINT(runtime/int)
+ on_cpu);
+ }
+ writerfn(buf);
+}
+
+// `void*` might not be big enough to store `void(*)(const char*)`.
+struct WriterFnStruct {
+ void (*writerfn)(const char*);
+};
+
+// Many of the y_absl::debugging_internal::Dump* functions in
+// examine_stack.h take a writer function pointer that has a void* arg
+// for historical reasons. failure_signal_handler_writer only takes a
+// data pointer. This function converts between these types.
+static void WriterFnWrapper(const char* data, void* arg) {
+ static_cast<WriterFnStruct*>(arg)->writerfn(data);
+}
+
+// Convenient wrapper around DumpPCAndFrameSizesAndStackTrace() for signal
+// handlers. "noinline" so that GetStackFrames() skips the top-most stack
+// frame for this function.
+ABSL_ATTRIBUTE_NOINLINE static void WriteStackTrace(
+ void* ucontext, bool symbolize_stacktrace,
+ void (*writerfn)(const char*, void*), void* writerfn_arg) {
+ constexpr int kNumStackFrames = 32;
+ void* stack[kNumStackFrames];
+ int frame_sizes[kNumStackFrames];
+ int min_dropped_frames;
+ int depth = y_absl::GetStackFramesWithContext(
+ stack, frame_sizes, kNumStackFrames,
+ 1, // Do not include this function in stack trace.
+ ucontext, &min_dropped_frames);
+ y_absl::debugging_internal::DumpPCAndFrameSizesAndStackTrace(
+ y_absl::debugging_internal::GetProgramCounter(ucontext), stack, frame_sizes,
+ depth, min_dropped_frames, symbolize_stacktrace, writerfn, writerfn_arg);
+}
+
+// Called by AbslFailureSignalHandler() to write the failure info. It is
+// called once with writerfn set to WriteToStderr() and then possibly
+// with writerfn set to the user provided function.
+static void WriteFailureInfo(int signo, void* ucontext, int cpu,
+ void (*writerfn)(const char*)) {
+ WriterFnStruct writerfn_struct{writerfn};
+ WriteSignalMessage(signo, cpu, writerfn);
+ WriteStackTrace(ucontext, fsh_options.symbolize_stacktrace, WriterFnWrapper,
+ &writerfn_struct);
+}
+
+// y_absl::SleepFor() can't be used here since AbslInternalSleepFor()
+// may be overridden to do something that isn't async-signal-safe on
+// some platforms.
+static void PortableSleepForSeconds(int seconds) {
+#ifdef _WIN32
+ Sleep(seconds * 1000);
+#else
+ struct timespec sleep_time;
+ sleep_time.tv_sec = seconds;
+ sleep_time.tv_nsec = 0;
+ while (nanosleep(&sleep_time, &sleep_time) != 0 && errno == EINTR) {}
+#endif
+}
+
+#ifdef ABSL_HAVE_ALARM
+// AbslFailureSignalHandler() installs this as a signal handler for
+// SIGALRM, then sets an alarm to be delivered to the program after a
+// set amount of time. If AbslFailureSignalHandler() hangs for more than
+// the alarm timeout, ImmediateAbortSignalHandler() will abort the
+// program.
+static void ImmediateAbortSignalHandler(int) {
+ RaiseToDefaultHandler(SIGABRT);
+}
+#endif
+
+// y_absl::base_internal::GetTID() returns pid_t on most platforms, but
+// returns y_absl::base_internal::pid_t on Windows.
+using GetTidType = decltype(y_absl::base_internal::GetTID());
+ABSL_CONST_INIT static std::atomic<GetTidType> failed_tid(0);
+
+#ifndef ABSL_HAVE_SIGACTION
+static void AbslFailureSignalHandler(int signo) {
+ void* ucontext = nullptr;
+#else
+static void AbslFailureSignalHandler(int signo, siginfo_t*, void* ucontext) {
+#endif
+
+ const GetTidType this_tid = y_absl::base_internal::GetTID();
+ GetTidType previous_failed_tid = 0;
+ if (!failed_tid.compare_exchange_strong(
+ previous_failed_tid, static_cast<intptr_t>(this_tid),
+ std::memory_order_acq_rel, std::memory_order_relaxed)) {
+ ABSL_RAW_LOG(
+ ERROR,
+ "Signal %d raised at PC=%p while already in AbslFailureSignalHandler()",
+ signo, y_absl::debugging_internal::GetProgramCounter(ucontext));
+ if (this_tid != previous_failed_tid) {
+ // Another thread is already in AbslFailureSignalHandler(), so wait
+ // a bit for it to finish. If the other thread doesn't kill us,
+ // we do so after sleeping.
+ PortableSleepForSeconds(3);
+ RaiseToDefaultHandler(signo);
+ // The recursively raised signal may be blocked until we return.
+ return;
+ }
+ }
+
+ // Increase the chance that the CPU we report was the same CPU on which the
+ // signal was received by doing this as early as possible, i.e. after
+ // verifying that this is not a recursive signal handler invocation.
+ int my_cpu = -1;
+#ifdef ABSL_HAVE_SCHED_GETCPU
+ my_cpu = sched_getcpu();
+#endif
+
+#ifdef ABSL_HAVE_ALARM
+ // Set an alarm to abort the program in case this code hangs or deadlocks.
+ if (fsh_options.alarm_on_failure_secs > 0) {
+ alarm(0); // Cancel any existing alarms.
+ signal(SIGALRM, ImmediateAbortSignalHandler);
+ alarm(fsh_options.alarm_on_failure_secs);
+ }
+#endif
+
+ // First write to stderr.
+ WriteFailureInfo(signo, ucontext, my_cpu, WriteToStderr);
+
+ // Riskier code (because it is less likely to be async-signal-safe)
+ // goes after this point.
+ if (fsh_options.writerfn != nullptr) {
+ WriteFailureInfo(signo, ucontext, my_cpu, fsh_options.writerfn);
+ fsh_options.writerfn(nullptr);
+ }
+
+ if (fsh_options.call_previous_handler) {
+ RaiseToPreviousHandler(signo);
+ } else {
+ RaiseToDefaultHandler(signo);
+ }
+}
+
+void InstallFailureSignalHandler(const FailureSignalHandlerOptions& options) {
+ fsh_options = options;
+ for (auto& it : failure_signal_data) {
+ InstallOneFailureHandler(&it, AbslFailureSignalHandler);
+ }
+}
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/failure_signal_handler.h b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/failure_signal_handler.h
new file mode 100644
index 00000000000..879df3c4346
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/failure_signal_handler.h
@@ -0,0 +1,121 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: failure_signal_handler.h
+// -----------------------------------------------------------------------------
+//
+// This file configures the Abseil *failure signal handler* to capture and dump
+// useful debugging information (such as a stacktrace) upon program failure.
+//
+// To use the failure signal handler, call `y_absl::InstallFailureSignalHandler()`
+// very early in your program, usually in the first few lines of main():
+//
+// int main(int argc, char** argv) {
+// // Initialize the symbolizer to get a human-readable stack trace
+// y_absl::InitializeSymbolizer(argv[0]);
+//
+// y_absl::FailureSignalHandlerOptions options;
+// y_absl::InstallFailureSignalHandler(options);
+// DoSomethingInteresting();
+// return 0;
+// }
+//
+// Any program that raises a fatal signal (such as `SIGSEGV`, `SIGILL`,
+// `SIGFPE`, `SIGABRT`, `SIGTERM`, `SIGBUG`, and `SIGTRAP`) will call the
+// installed failure signal handler and provide debugging information to stderr.
+//
+// Note that you should *not* install the Abseil failure signal handler more
+// than once. You may, of course, have another (non-Abseil) failure signal
+// handler installed (which would be triggered if Abseil's failure signal
+// handler sets `call_previous_handler` to `true`).
+
+#ifndef ABSL_DEBUGGING_FAILURE_SIGNAL_HANDLER_H_
+#define ABSL_DEBUGGING_FAILURE_SIGNAL_HANDLER_H_
+
+#include "y_absl/base/config.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+// FailureSignalHandlerOptions
+//
+// Struct for holding `y_absl::InstallFailureSignalHandler()` configuration
+// options.
+struct FailureSignalHandlerOptions {
+ // If true, try to symbolize the stacktrace emitted on failure, provided that
+ // you have initialized a symbolizer for that purpose. (See symbolize.h for
+ // more information.)
+ bool symbolize_stacktrace = true;
+
+ // If true, try to run signal handlers on an alternate stack (if supported on
+ // the given platform). An alternate stack is useful for program crashes due
+ // to a stack overflow; by running on a alternate stack, the signal handler
+ // may run even when normal stack space has been exausted. The downside of
+ // using an alternate stack is that extra memory for the alternate stack needs
+ // to be pre-allocated.
+ bool use_alternate_stack = true;
+
+ // If positive, indicates the number of seconds after which the failure signal
+ // handler is invoked to abort the program. Setting such an alarm is useful in
+ // cases where the failure signal handler itself may become hung or
+ // deadlocked.
+ int alarm_on_failure_secs = 3;
+
+ // If true, call the previously registered signal handler for the signal that
+ // was received (if one was registered) after the existing signal handler
+ // runs. This mechanism can be used to chain signal handlers together.
+ //
+ // If false, the signal is raised to the default handler for that signal
+ // (which normally terminates the program).
+ //
+ // IMPORTANT: If true, the chained fatal signal handlers must not try to
+ // recover from the fatal signal. Instead, they should terminate the program
+ // via some mechanism, like raising the default handler for the signal, or by
+ // calling `_exit()`. Note that the failure signal handler may put parts of
+ // the Abseil library into a state from which they cannot recover.
+ bool call_previous_handler = false;
+
+ // If non-null, indicates a pointer to a callback function that will be called
+ // upon failure, with a string argument containing failure data. This function
+ // may be used as a hook to write failure data to a secondary location, such
+ // as a log file. This function will also be called with null data, as a hint
+ // to flush any buffered data before the program may be terminated. Consider
+ // flushing any buffered data in all calls to this function.
+ //
+ // Since this function runs within a signal handler, it should be
+ // async-signal-safe if possible.
+ // See http://man7.org/linux/man-pages/man7/signal-safety.7.html
+ void (*writerfn)(const char*) = nullptr;
+};
+
+// InstallFailureSignalHandler()
+//
+// Installs a signal handler for the common failure signals `SIGSEGV`, `SIGILL`,
+// `SIGFPE`, `SIGABRT`, `SIGTERM`, `SIGBUG`, and `SIGTRAP` (provided they exist
+// on the given platform). The failure signal handler dumps program failure data
+// useful for debugging in an unspecified format to stderr. This data may
+// include the program counter, a stacktrace, and register information on some
+// systems; do not rely on an exact format for the output, as it is subject to
+// change.
+void InstallFailureSignalHandler(const FailureSignalHandlerOptions& options);
+
+namespace debugging_internal {
+const char* FailureSignalToString(int signo);
+} // namespace debugging_internal
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_DEBUGGING_FAILURE_SIGNAL_HANDLER_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/failure_signal_handler/ya.make b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/failure_signal_handler/ya.make
new file mode 100644
index 00000000000..0c71cb55476
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/failure_signal_handler/ya.make
@@ -0,0 +1,43 @@
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
+WITHOUT_LICENSE_TEXTS()
+
+OWNER(
+ somov
+ g:cpp-contrib
+)
+
+LICENSE(Apache-2.0)
+
+PEERDIR(
+ contrib/restricted/abseil-cpp-tstring/y_absl/base
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/low_level_alloc
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_wait
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/throw_delegate
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/log_severity
+ contrib/restricted/abseil-cpp-tstring/y_absl/debugging
+ contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal
+ contrib/restricted/abseil-cpp-tstring/y_absl/debugging/stacktrace
+ contrib/restricted/abseil-cpp-tstring/y_absl/debugging/symbolize
+ contrib/restricted/abseil-cpp-tstring/y_absl/demangle
+ contrib/restricted/abseil-cpp-tstring/y_absl/numeric
+ contrib/restricted/abseil-cpp-tstring/y_absl/strings
+ contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/absl_strings_internal
+)
+
+ADDINCL(
+ GLOBAL contrib/restricted/abseil-cpp-tstring
+)
+
+NO_COMPILER_WARNINGS()
+
+SRCDIR(contrib/restricted/abseil-cpp-tstring/y_absl/debugging)
+
+SRCS(
+ failure_signal_handler.cc
+)
+
+END()
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/.yandex_meta/licenses.list.txt b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/.yandex_meta/licenses.list.txt
new file mode 100644
index 00000000000..aac6c71d05e
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/.yandex_meta/licenses.list.txt
@@ -0,0 +1,52 @@
+====================Apache-2.0====================
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+
+
+====================Apache-2.0====================
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+
+====================Apache-2.0====================
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+
+====================COPYRIGHT====================
+// Copyright 2017 The Abseil Authors.
+
+
+====================COPYRIGHT====================
+// Copyright 2018 The Abseil Authors.
+
+
+====================COPYRIGHT====================
+// Copyright 2021 The Abseil Authors.
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/address_is_readable.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/address_is_readable.cc
new file mode 100644
index 00000000000..df217335eab
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/address_is_readable.cc
@@ -0,0 +1,139 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// base::AddressIsReadable() probes an address to see whether it is readable,
+// without faulting.
+
+#include "y_absl/debugging/internal/address_is_readable.h"
+
+#if !defined(__linux__) || defined(__ANDROID__)
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace debugging_internal {
+
+// On platforms other than Linux, just return true.
+bool AddressIsReadable(const void* /* addr */) { return true; }
+
+} // namespace debugging_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#else
+
+#include <fcntl.h>
+#include <sys/syscall.h>
+#include <unistd.h>
+
+#include <atomic>
+#include <cerrno>
+#include <cstdint>
+
+#include "y_absl/base/internal/errno_saver.h"
+#include "y_absl/base/internal/raw_logging.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace debugging_internal {
+
+// Pack a pid and two file descriptors into a 64-bit word,
+// using 16, 24, and 24 bits for each respectively.
+static uint64_t Pack(uint64_t pid, uint64_t read_fd, uint64_t write_fd) {
+ ABSL_RAW_CHECK((read_fd >> 24) == 0 && (write_fd >> 24) == 0,
+ "fd out of range");
+ return (pid << 48) | ((read_fd & 0xffffff) << 24) | (write_fd & 0xffffff);
+}
+
+// Unpack x into a pid and two file descriptors, where x was created with
+// Pack().
+static void Unpack(uint64_t x, int *pid, int *read_fd, int *write_fd) {
+ *pid = x >> 48;
+ *read_fd = (x >> 24) & 0xffffff;
+ *write_fd = x & 0xffffff;
+}
+
+// Return whether the byte at *addr is readable, without faulting.
+// Save and restores errno. Returns true on systems where
+// unimplemented.
+// This is a namespace-scoped variable for correct zero-initialization.
+static std::atomic<uint64_t> pid_and_fds; // initially 0, an invalid pid.
+
+bool AddressIsReadable(const void *addr) {
+ y_absl::base_internal::ErrnoSaver errno_saver;
+ // We test whether a byte is readable by using write(). Normally, this would
+ // be done via a cached file descriptor to /dev/null, but linux fails to
+ // check whether the byte is readable when the destination is /dev/null, so
+ // we use a cached pipe. We store the pid of the process that created the
+ // pipe to handle the case where a process forks, and the child closes all
+ // the file descriptors and then calls this routine. This is not perfect:
+ // the child could use the routine, then close all file descriptors and then
+ // use this routine again. But the likely use of this routine is when
+ // crashing, to test the validity of pages when dumping the stack. Beware
+ // that we may leak file descriptors, but we're unlikely to leak many.
+ int bytes_written;
+ int current_pid = getpid() & 0xffff; // we use only the low order 16 bits
+ do { // until we do not get EBADF trying to use file descriptors
+ int pid;
+ int read_fd;
+ int write_fd;
+ uint64_t local_pid_and_fds = pid_and_fds.load(std::memory_order_acquire);
+ Unpack(local_pid_and_fds, &pid, &read_fd, &write_fd);
+ while (current_pid != pid) {
+ int p[2];
+ // new pipe
+ if (pipe(p) != 0) {
+ ABSL_RAW_LOG(FATAL, "Failed to create pipe, errno=%d", errno);
+ }
+ fcntl(p[0], F_SETFD, FD_CLOEXEC);
+ fcntl(p[1], F_SETFD, FD_CLOEXEC);
+ uint64_t new_pid_and_fds = Pack(current_pid, p[0], p[1]);
+ if (pid_and_fds.compare_exchange_strong(
+ local_pid_and_fds, new_pid_and_fds, std::memory_order_release,
+ std::memory_order_relaxed)) {
+ local_pid_and_fds = new_pid_and_fds; // fds exposed to other threads
+ } else { // fds not exposed to other threads; we can close them.
+ close(p[0]);
+ close(p[1]);
+ local_pid_and_fds = pid_and_fds.load(std::memory_order_acquire);
+ }
+ Unpack(local_pid_and_fds, &pid, &read_fd, &write_fd);
+ }
+ errno = 0;
+ // Use syscall(SYS_write, ...) instead of write() to prevent ASAN
+ // and other checkers from complaining about accesses to arbitrary
+ // memory.
+ do {
+ bytes_written = syscall(SYS_write, write_fd, addr, 1);
+ } while (bytes_written == -1 && errno == EINTR);
+ if (bytes_written == 1) { // remove the byte from the pipe
+ char c;
+ while (read(read_fd, &c, 1) == -1 && errno == EINTR) {
+ }
+ }
+ if (errno == EBADF) { // Descriptors invalid.
+ // If pid_and_fds contains the problematic file descriptors we just used,
+ // this call will forget them, and the loop will try again.
+ pid_and_fds.compare_exchange_strong(local_pid_and_fds, 0,
+ std::memory_order_release,
+ std::memory_order_relaxed);
+ }
+ } while (errno == EBADF);
+ return bytes_written == 1;
+}
+
+} // namespace debugging_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/address_is_readable.h b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/address_is_readable.h
new file mode 100644
index 00000000000..279dcc640bd
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/address_is_readable.h
@@ -0,0 +1,32 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_DEBUGGING_INTERNAL_ADDRESS_IS_READABLE_H_
+#define ABSL_DEBUGGING_INTERNAL_ADDRESS_IS_READABLE_H_
+
+#include "y_absl/base/config.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace debugging_internal {
+
+// Return whether the byte at *addr is readable, without faulting.
+// Save and restores errno.
+bool AddressIsReadable(const void *addr);
+
+} // namespace debugging_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_DEBUGGING_INTERNAL_ADDRESS_IS_READABLE_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/demangle.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/demangle.cc
new file mode 100644
index 00000000000..0a4daa2f5ce
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/demangle.cc
@@ -0,0 +1,1959 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// For reference check out:
+// https://itanium-cxx-abi.github.io/cxx-abi/abi.html#mangling
+//
+// Note that we only have partial C++11 support yet.
+
+#include "y_absl/debugging/internal/demangle.h"
+
+#include <cstdint>
+#include <cstdio>
+#include <limits>
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace debugging_internal {
+
+typedef struct {
+ const char *abbrev;
+ const char *real_name;
+ // Number of arguments in <expression> context, or 0 if disallowed.
+ int arity;
+} AbbrevPair;
+
+// List of operators from Itanium C++ ABI.
+static const AbbrevPair kOperatorList[] = {
+ // New has special syntax (not currently supported).
+ {"nw", "new", 0},
+ {"na", "new[]", 0},
+
+ // Works except that the 'gs' prefix is not supported.
+ {"dl", "delete", 1},
+ {"da", "delete[]", 1},
+
+ {"ps", "+", 1}, // "positive"
+ {"ng", "-", 1}, // "negative"
+ {"ad", "&", 1}, // "address-of"
+ {"de", "*", 1}, // "dereference"
+ {"co", "~", 1},
+
+ {"pl", "+", 2},
+ {"mi", "-", 2},
+ {"ml", "*", 2},
+ {"dv", "/", 2},
+ {"rm", "%", 2},
+ {"an", "&", 2},
+ {"or", "|", 2},
+ {"eo", "^", 2},
+ {"aS", "=", 2},
+ {"pL", "+=", 2},
+ {"mI", "-=", 2},
+ {"mL", "*=", 2},
+ {"dV", "/=", 2},
+ {"rM", "%=", 2},
+ {"aN", "&=", 2},
+ {"oR", "|=", 2},
+ {"eO", "^=", 2},
+ {"ls", "<<", 2},
+ {"rs", ">>", 2},
+ {"lS", "<<=", 2},
+ {"rS", ">>=", 2},
+ {"eq", "==", 2},
+ {"ne", "!=", 2},
+ {"lt", "<", 2},
+ {"gt", ">", 2},
+ {"le", "<=", 2},
+ {"ge", ">=", 2},
+ {"nt", "!", 1},
+ {"aa", "&&", 2},
+ {"oo", "||", 2},
+ {"pp", "++", 1},
+ {"mm", "--", 1},
+ {"cm", ",", 2},
+ {"pm", "->*", 2},
+ {"pt", "->", 0}, // Special syntax
+ {"cl", "()", 0}, // Special syntax
+ {"ix", "[]", 2},
+ {"qu", "?", 3},
+ {"st", "sizeof", 0}, // Special syntax
+ {"sz", "sizeof", 1}, // Not a real operator name, but used in expressions.
+ {nullptr, nullptr, 0},
+};
+
+// List of builtin types from Itanium C++ ABI.
+//
+// Invariant: only one- or two-character type abbreviations here.
+static const AbbrevPair kBuiltinTypeList[] = {
+ {"v", "void", 0},
+ {"w", "wchar_t", 0},
+ {"b", "bool", 0},
+ {"c", "char", 0},
+ {"a", "signed char", 0},
+ {"h", "unsigned char", 0},
+ {"s", "short", 0},
+ {"t", "unsigned short", 0},
+ {"i", "int", 0},
+ {"j", "unsigned int", 0},
+ {"l", "long", 0},
+ {"m", "unsigned long", 0},
+ {"x", "long long", 0},
+ {"y", "unsigned long long", 0},
+ {"n", "__int128", 0},
+ {"o", "unsigned __int128", 0},
+ {"f", "float", 0},
+ {"d", "double", 0},
+ {"e", "long double", 0},
+ {"g", "__float128", 0},
+ {"z", "ellipsis", 0},
+
+ {"De", "decimal128", 0}, // IEEE 754r decimal floating point (128 bits)
+ {"Dd", "decimal64", 0}, // IEEE 754r decimal floating point (64 bits)
+ {"Dc", "decltype(auto)", 0},
+ {"Da", "auto", 0},
+ {"Dn", "std::nullptr_t", 0}, // i.e., decltype(nullptr)
+ {"Df", "decimal32", 0}, // IEEE 754r decimal floating point (32 bits)
+ {"Di", "char32_t", 0},
+ {"Du", "char8_t", 0},
+ {"Ds", "char16_t", 0},
+ {"Dh", "float16", 0}, // IEEE 754r half-precision float (16 bits)
+ {nullptr, nullptr, 0},
+};
+
+// List of substitutions Itanium C++ ABI.
+static const AbbrevPair kSubstitutionList[] = {
+ {"St", "", 0},
+ {"Sa", "allocator", 0},
+ {"Sb", "basic_string", 0},
+ // std::basic_string<char, std::char_traits<char>,std::allocator<char> >
+ {"Ss", "string", 0},
+ // std::basic_istream<char, std::char_traits<char> >
+ {"Si", "istream", 0},
+ // std::basic_ostream<char, std::char_traits<char> >
+ {"So", "ostream", 0},
+ // std::basic_iostream<char, std::char_traits<char> >
+ {"Sd", "iostream", 0},
+ {nullptr, nullptr, 0},
+};
+
+// State needed for demangling. This struct is copied in almost every stack
+// frame, so every byte counts.
+typedef struct {
+ int mangled_idx; // Cursor of mangled name.
+ int out_cur_idx; // Cursor of output string.
+ int prev_name_idx; // For constructors/destructors.
+ signed int prev_name_length : 16; // For constructors/destructors.
+ signed int nest_level : 15; // For nested names.
+ unsigned int append : 1; // Append flag.
+ // Note: for some reason MSVC can't pack "bool append : 1" into the same int
+ // with the above two fields, so we use an int instead. Amusingly it can pack
+ // "signed bool" as expected, but relying on that to continue to be a legal
+ // type seems ill-advised (as it's illegal in at least clang).
+} ParseState;
+
+static_assert(sizeof(ParseState) == 4 * sizeof(int),
+ "unexpected size of ParseState");
+
+// One-off state for demangling that's not subject to backtracking -- either
+// constant data, data that's intentionally immune to backtracking (steps), or
+// data that would never be changed by backtracking anyway (recursion_depth).
+//
+// Only one copy of this exists for each call to Demangle, so the size of this
+// struct is nearly inconsequential.
+typedef struct {
+ const char *mangled_begin; // Beginning of input string.
+ char *out; // Beginning of output string.
+ int out_end_idx; // One past last allowed output character.
+ int recursion_depth; // For stack exhaustion prevention.
+ int steps; // Cap how much work we'll do, regardless of depth.
+ ParseState parse_state; // Backtrackable state copied for most frames.
+} State;
+
+namespace {
+// Prevent deep recursion / stack exhaustion.
+// Also prevent unbounded handling of complex inputs.
+class ComplexityGuard {
+ public:
+ explicit ComplexityGuard(State *state) : state_(state) {
+ ++state->recursion_depth;
+ ++state->steps;
+ }
+ ~ComplexityGuard() { --state_->recursion_depth; }
+
+ // 256 levels of recursion seems like a reasonable upper limit on depth.
+ // 128 is not enough to demagle synthetic tests from demangle_unittest.txt:
+ // "_ZaaZZZZ..." and "_ZaaZcvZcvZ..."
+ static constexpr int kRecursionDepthLimit = 256;
+
+ // We're trying to pick a charitable upper-limit on how many parse steps are
+ // necessary to handle something that a human could actually make use of.
+ // This is mostly in place as a bound on how much work we'll do if we are
+ // asked to demangle an mangled name from an untrusted source, so it should be
+ // much larger than the largest expected symbol, but much smaller than the
+ // amount of work we can do in, e.g., a second.
+ //
+ // Some real-world symbols from an arbitrary binary started failing between
+ // 2^12 and 2^13, so we multiply the latter by an extra factor of 16 to set
+ // the limit.
+ //
+ // Spending one second on 2^17 parse steps would require each step to take
+ // 7.6us, or ~30000 clock cycles, so it's safe to say this can be done in
+ // under a second.
+ static constexpr int kParseStepsLimit = 1 << 17;
+
+ bool IsTooComplex() const {
+ return state_->recursion_depth > kRecursionDepthLimit ||
+ state_->steps > kParseStepsLimit;
+ }
+
+ private:
+ State *state_;
+};
+} // namespace
+
+// We don't use strlen() in libc since it's not guaranteed to be async
+// signal safe.
+static size_t StrLen(const char *str) {
+ size_t len = 0;
+ while (*str != '\0') {
+ ++str;
+ ++len;
+ }
+ return len;
+}
+
+// Returns true if "str" has at least "n" characters remaining.
+static bool AtLeastNumCharsRemaining(const char *str, int n) {
+ for (int i = 0; i < n; ++i) {
+ if (str[i] == '\0') {
+ return false;
+ }
+ }
+ return true;
+}
+
+// Returns true if "str" has "prefix" as a prefix.
+static bool StrPrefix(const char *str, const char *prefix) {
+ size_t i = 0;
+ while (str[i] != '\0' && prefix[i] != '\0' && str[i] == prefix[i]) {
+ ++i;
+ }
+ return prefix[i] == '\0'; // Consumed everything in "prefix".
+}
+
+static void InitState(State *state, const char *mangled, char *out,
+ int out_size) {
+ state->mangled_begin = mangled;
+ state->out = out;
+ state->out_end_idx = out_size;
+ state->recursion_depth = 0;
+ state->steps = 0;
+
+ state->parse_state.mangled_idx = 0;
+ state->parse_state.out_cur_idx = 0;
+ state->parse_state.prev_name_idx = 0;
+ state->parse_state.prev_name_length = -1;
+ state->parse_state.nest_level = -1;
+ state->parse_state.append = true;
+}
+
+static inline const char *RemainingInput(State *state) {
+ return &state->mangled_begin[state->parse_state.mangled_idx];
+}
+
+// Returns true and advances "mangled_idx" if we find "one_char_token"
+// at "mangled_idx" position. It is assumed that "one_char_token" does
+// not contain '\0'.
+static bool ParseOneCharToken(State *state, const char one_char_token) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ if (RemainingInput(state)[0] == one_char_token) {
+ ++state->parse_state.mangled_idx;
+ return true;
+ }
+ return false;
+}
+
+// Returns true and advances "mangled_cur" if we find "two_char_token"
+// at "mangled_cur" position. It is assumed that "two_char_token" does
+// not contain '\0'.
+static bool ParseTwoCharToken(State *state, const char *two_char_token) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ if (RemainingInput(state)[0] == two_char_token[0] &&
+ RemainingInput(state)[1] == two_char_token[1]) {
+ state->parse_state.mangled_idx += 2;
+ return true;
+ }
+ return false;
+}
+
+// Returns true and advances "mangled_cur" if we find any character in
+// "char_class" at "mangled_cur" position.
+static bool ParseCharClass(State *state, const char *char_class) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ if (RemainingInput(state)[0] == '\0') {
+ return false;
+ }
+ const char *p = char_class;
+ for (; *p != '\0'; ++p) {
+ if (RemainingInput(state)[0] == *p) {
+ ++state->parse_state.mangled_idx;
+ return true;
+ }
+ }
+ return false;
+}
+
+static bool ParseDigit(State *state, int *digit) {
+ char c = RemainingInput(state)[0];
+ if (ParseCharClass(state, "0123456789")) {
+ if (digit != nullptr) {
+ *digit = c - '0';
+ }
+ return true;
+ }
+ return false;
+}
+
+// This function is used for handling an optional non-terminal.
+static bool Optional(bool /*status*/) { return true; }
+
+// This function is used for handling <non-terminal>+ syntax.
+typedef bool (*ParseFunc)(State *);
+static bool OneOrMore(ParseFunc parse_func, State *state) {
+ if (parse_func(state)) {
+ while (parse_func(state)) {
+ }
+ return true;
+ }
+ return false;
+}
+
+// This function is used for handling <non-terminal>* syntax. The function
+// always returns true and must be followed by a termination token or a
+// terminating sequence not handled by parse_func (e.g.
+// ParseOneCharToken(state, 'E')).
+static bool ZeroOrMore(ParseFunc parse_func, State *state) {
+ while (parse_func(state)) {
+ }
+ return true;
+}
+
+// Append "str" at "out_cur_idx". If there is an overflow, out_cur_idx is
+// set to out_end_idx+1. The output string is ensured to
+// always terminate with '\0' as long as there is no overflow.
+static void Append(State *state, const char *const str, const int length) {
+ for (int i = 0; i < length; ++i) {
+ if (state->parse_state.out_cur_idx + 1 <
+ state->out_end_idx) { // +1 for '\0'
+ state->out[state->parse_state.out_cur_idx++] = str[i];
+ } else {
+ // signal overflow
+ state->parse_state.out_cur_idx = state->out_end_idx + 1;
+ break;
+ }
+ }
+ if (state->parse_state.out_cur_idx < state->out_end_idx) {
+ state->out[state->parse_state.out_cur_idx] =
+ '\0'; // Terminate it with '\0'
+ }
+}
+
+// We don't use equivalents in libc to avoid locale issues.
+static bool IsLower(char c) { return c >= 'a' && c <= 'z'; }
+
+static bool IsAlpha(char c) {
+ return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z');
+}
+
+static bool IsDigit(char c) { return c >= '0' && c <= '9'; }
+
+// Returns true if "str" is a function clone suffix. These suffixes are used
+// by GCC 4.5.x and later versions (and our locally-modified version of GCC
+// 4.4.x) to indicate functions which have been cloned during optimization.
+// We treat any sequence (.<alpha>+.<digit>+)+ as a function clone suffix.
+// Additionally, '_' is allowed along with the alphanumeric sequence.
+static bool IsFunctionCloneSuffix(const char *str) {
+ size_t i = 0;
+ while (str[i] != '\0') {
+ bool parsed = false;
+ // Consume a single [.<alpha> | _]*[.<digit>]* sequence.
+ if (str[i] == '.' && (IsAlpha(str[i + 1]) || str[i + 1] == '_')) {
+ parsed = true;
+ i += 2;
+ while (IsAlpha(str[i]) || str[i] == '_') {
+ ++i;
+ }
+ }
+ if (str[i] == '.' && IsDigit(str[i + 1])) {
+ parsed = true;
+ i += 2;
+ while (IsDigit(str[i])) {
+ ++i;
+ }
+ }
+ if (!parsed)
+ return false;
+ }
+ return true; // Consumed everything in "str".
+}
+
+static bool EndsWith(State *state, const char chr) {
+ return state->parse_state.out_cur_idx > 0 &&
+ state->parse_state.out_cur_idx < state->out_end_idx &&
+ chr == state->out[state->parse_state.out_cur_idx - 1];
+}
+
+// Append "str" with some tweaks, iff "append" state is true.
+static void MaybeAppendWithLength(State *state, const char *const str,
+ const int length) {
+ if (state->parse_state.append && length > 0) {
+ // Append a space if the output buffer ends with '<' and "str"
+ // starts with '<' to avoid <<<.
+ if (str[0] == '<' && EndsWith(state, '<')) {
+ Append(state, " ", 1);
+ }
+ // Remember the last identifier name for ctors/dtors,
+ // but only if we haven't yet overflown the buffer.
+ if (state->parse_state.out_cur_idx < state->out_end_idx &&
+ (IsAlpha(str[0]) || str[0] == '_')) {
+ state->parse_state.prev_name_idx = state->parse_state.out_cur_idx;
+ state->parse_state.prev_name_length = length;
+ }
+ Append(state, str, length);
+ }
+}
+
+// Appends a positive decimal number to the output if appending is enabled.
+static bool MaybeAppendDecimal(State *state, unsigned int val) {
+ // Max {32-64}-bit unsigned int is 20 digits.
+ constexpr size_t kMaxLength = 20;
+ char buf[kMaxLength];
+
+ // We can't use itoa or sprintf as neither is specified to be
+ // async-signal-safe.
+ if (state->parse_state.append) {
+ // We can't have a one-before-the-beginning pointer, so instead start with
+ // one-past-the-end and manipulate one character before the pointer.
+ char *p = &buf[kMaxLength];
+ do { // val=0 is the only input that should write a leading zero digit.
+ *--p = (val % 10) + '0';
+ val /= 10;
+ } while (p > buf && val != 0);
+
+ // 'p' landed on the last character we set. How convenient.
+ Append(state, p, kMaxLength - (p - buf));
+ }
+
+ return true;
+}
+
+// A convenient wrapper around MaybeAppendWithLength().
+// Returns true so that it can be placed in "if" conditions.
+static bool MaybeAppend(State *state, const char *const str) {
+ if (state->parse_state.append) {
+ int length = StrLen(str);
+ MaybeAppendWithLength(state, str, length);
+ }
+ return true;
+}
+
+// This function is used for handling nested names.
+static bool EnterNestedName(State *state) {
+ state->parse_state.nest_level = 0;
+ return true;
+}
+
+// This function is used for handling nested names.
+static bool LeaveNestedName(State *state, int16_t prev_value) {
+ state->parse_state.nest_level = prev_value;
+ return true;
+}
+
+// Disable the append mode not to print function parameters, etc.
+static bool DisableAppend(State *state) {
+ state->parse_state.append = false;
+ return true;
+}
+
+// Restore the append mode to the previous state.
+static bool RestoreAppend(State *state, bool prev_value) {
+ state->parse_state.append = prev_value;
+ return true;
+}
+
+// Increase the nest level for nested names.
+static void MaybeIncreaseNestLevel(State *state) {
+ if (state->parse_state.nest_level > -1) {
+ ++state->parse_state.nest_level;
+ }
+}
+
+// Appends :: for nested names if necessary.
+static void MaybeAppendSeparator(State *state) {
+ if (state->parse_state.nest_level >= 1) {
+ MaybeAppend(state, "::");
+ }
+}
+
+// Cancel the last separator if necessary.
+static void MaybeCancelLastSeparator(State *state) {
+ if (state->parse_state.nest_level >= 1 && state->parse_state.append &&
+ state->parse_state.out_cur_idx >= 2) {
+ state->parse_state.out_cur_idx -= 2;
+ state->out[state->parse_state.out_cur_idx] = '\0';
+ }
+}
+
+// Returns true if the identifier of the given length pointed to by
+// "mangled_cur" is anonymous namespace.
+static bool IdentifierIsAnonymousNamespace(State *state, int length) {
+ // Returns true if "anon_prefix" is a proper prefix of "mangled_cur".
+ static const char anon_prefix[] = "_GLOBAL__N_";
+ return (length > static_cast<int>(sizeof(anon_prefix) - 1) &&
+ StrPrefix(RemainingInput(state), anon_prefix));
+}
+
+// Forward declarations of our parsing functions.
+static bool ParseMangledName(State *state);
+static bool ParseEncoding(State *state);
+static bool ParseName(State *state);
+static bool ParseUnscopedName(State *state);
+static bool ParseNestedName(State *state);
+static bool ParsePrefix(State *state);
+static bool ParseUnqualifiedName(State *state);
+static bool ParseSourceName(State *state);
+static bool ParseLocalSourceName(State *state);
+static bool ParseUnnamedTypeName(State *state);
+static bool ParseNumber(State *state, int *number_out);
+static bool ParseFloatNumber(State *state);
+static bool ParseSeqId(State *state);
+static bool ParseIdentifier(State *state, int length);
+static bool ParseOperatorName(State *state, int *arity);
+static bool ParseSpecialName(State *state);
+static bool ParseCallOffset(State *state);
+static bool ParseNVOffset(State *state);
+static bool ParseVOffset(State *state);
+static bool ParseCtorDtorName(State *state);
+static bool ParseDecltype(State *state);
+static bool ParseType(State *state);
+static bool ParseCVQualifiers(State *state);
+static bool ParseBuiltinType(State *state);
+static bool ParseFunctionType(State *state);
+static bool ParseBareFunctionType(State *state);
+static bool ParseClassEnumType(State *state);
+static bool ParseArrayType(State *state);
+static bool ParsePointerToMemberType(State *state);
+static bool ParseTemplateParam(State *state);
+static bool ParseTemplateTemplateParam(State *state);
+static bool ParseTemplateArgs(State *state);
+static bool ParseTemplateArg(State *state);
+static bool ParseBaseUnresolvedName(State *state);
+static bool ParseUnresolvedName(State *state);
+static bool ParseExpression(State *state);
+static bool ParseExprPrimary(State *state);
+static bool ParseExprCastValue(State *state);
+static bool ParseLocalName(State *state);
+static bool ParseLocalNameSuffix(State *state);
+static bool ParseDiscriminator(State *state);
+static bool ParseSubstitution(State *state, bool accept_std);
+
+// Implementation note: the following code is a straightforward
+// translation of the Itanium C++ ABI defined in BNF with a couple of
+// exceptions.
+//
+// - Support GNU extensions not defined in the Itanium C++ ABI
+// - <prefix> and <template-prefix> are combined to avoid infinite loop
+// - Reorder patterns to shorten the code
+// - Reorder patterns to give greedier functions precedence
+// We'll mark "Less greedy than" for these cases in the code
+//
+// Each parsing function changes the parse state and returns true on
+// success, or returns false and doesn't change the parse state (note:
+// the parse-steps counter increases regardless of success or failure).
+// To ensure that the parse state isn't changed in the latter case, we
+// save the original state before we call multiple parsing functions
+// consecutively with &&, and restore it if unsuccessful. See
+// ParseEncoding() as an example of this convention. We follow the
+// convention throughout the code.
+//
+// Originally we tried to do demangling without following the full ABI
+// syntax but it turned out we needed to follow the full syntax to
+// parse complicated cases like nested template arguments. Note that
+// implementing a full-fledged demangler isn't trivial (libiberty's
+// cp-demangle.c has +4300 lines).
+//
+// Note that (foo) in <(foo) ...> is a modifier to be ignored.
+//
+// Reference:
+// - Itanium C++ ABI
+// <https://mentorembedded.github.io/cxx-abi/abi.html#mangling>
+
+// <mangled-name> ::= _Z <encoding>
+static bool ParseMangledName(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ return ParseTwoCharToken(state, "_Z") && ParseEncoding(state);
+}
+
+// <encoding> ::= <(function) name> <bare-function-type>
+// ::= <(data) name>
+// ::= <special-name>
+static bool ParseEncoding(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ // Implementing the first two productions together as <name>
+ // [<bare-function-type>] avoids exponential blowup of backtracking.
+ //
+ // Since Optional(...) can't fail, there's no need to copy the state for
+ // backtracking.
+ if (ParseName(state) && Optional(ParseBareFunctionType(state))) {
+ return true;
+ }
+
+ if (ParseSpecialName(state)) {
+ return true;
+ }
+ return false;
+}
+
+// <name> ::= <nested-name>
+// ::= <unscoped-template-name> <template-args>
+// ::= <unscoped-name>
+// ::= <local-name>
+static bool ParseName(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ if (ParseNestedName(state) || ParseLocalName(state)) {
+ return true;
+ }
+
+ // We reorganize the productions to avoid re-parsing unscoped names.
+ // - Inline <unscoped-template-name> productions:
+ // <name> ::= <substitution> <template-args>
+ // ::= <unscoped-name> <template-args>
+ // ::= <unscoped-name>
+ // - Merge the two productions that start with unscoped-name:
+ // <name> ::= <unscoped-name> [<template-args>]
+
+ ParseState copy = state->parse_state;
+ // "std<...>" isn't a valid name.
+ if (ParseSubstitution(state, /*accept_std=*/false) &&
+ ParseTemplateArgs(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ // Note there's no need to restore state after this since only the first
+ // subparser can fail.
+ return ParseUnscopedName(state) && Optional(ParseTemplateArgs(state));
+}
+
+// <unscoped-name> ::= <unqualified-name>
+// ::= St <unqualified-name>
+static bool ParseUnscopedName(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ if (ParseUnqualifiedName(state)) {
+ return true;
+ }
+
+ ParseState copy = state->parse_state;
+ if (ParseTwoCharToken(state, "St") && MaybeAppend(state, "std::") &&
+ ParseUnqualifiedName(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+ return false;
+}
+
+// <ref-qualifer> ::= R // lvalue method reference qualifier
+// ::= O // rvalue method reference qualifier
+static inline bool ParseRefQualifier(State *state) {
+ return ParseCharClass(state, "OR");
+}
+
+// <nested-name> ::= N [<CV-qualifiers>] [<ref-qualifier>] <prefix>
+// <unqualified-name> E
+// ::= N [<CV-qualifiers>] [<ref-qualifier>] <template-prefix>
+// <template-args> E
+static bool ParseNestedName(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ ParseState copy = state->parse_state;
+ if (ParseOneCharToken(state, 'N') && EnterNestedName(state) &&
+ Optional(ParseCVQualifiers(state)) &&
+ Optional(ParseRefQualifier(state)) && ParsePrefix(state) &&
+ LeaveNestedName(state, copy.nest_level) &&
+ ParseOneCharToken(state, 'E')) {
+ return true;
+ }
+ state->parse_state = copy;
+ return false;
+}
+
+// This part is tricky. If we literally translate them to code, we'll
+// end up infinite loop. Hence we merge them to avoid the case.
+//
+// <prefix> ::= <prefix> <unqualified-name>
+// ::= <template-prefix> <template-args>
+// ::= <template-param>
+// ::= <substitution>
+// ::= # empty
+// <template-prefix> ::= <prefix> <(template) unqualified-name>
+// ::= <template-param>
+// ::= <substitution>
+static bool ParsePrefix(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ bool has_something = false;
+ while (true) {
+ MaybeAppendSeparator(state);
+ if (ParseTemplateParam(state) ||
+ ParseSubstitution(state, /*accept_std=*/true) ||
+ ParseUnscopedName(state) ||
+ (ParseOneCharToken(state, 'M') && ParseUnnamedTypeName(state))) {
+ has_something = true;
+ MaybeIncreaseNestLevel(state);
+ continue;
+ }
+ MaybeCancelLastSeparator(state);
+ if (has_something && ParseTemplateArgs(state)) {
+ return ParsePrefix(state);
+ } else {
+ break;
+ }
+ }
+ return true;
+}
+
+// <unqualified-name> ::= <operator-name>
+// ::= <ctor-dtor-name>
+// ::= <source-name>
+// ::= <local-source-name> // GCC extension; see below.
+// ::= <unnamed-type-name>
+static bool ParseUnqualifiedName(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ return (ParseOperatorName(state, nullptr) || ParseCtorDtorName(state) ||
+ ParseSourceName(state) || ParseLocalSourceName(state) ||
+ ParseUnnamedTypeName(state));
+}
+
+// <source-name> ::= <positive length number> <identifier>
+static bool ParseSourceName(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ ParseState copy = state->parse_state;
+ int length = -1;
+ if (ParseNumber(state, &length) && ParseIdentifier(state, length)) {
+ return true;
+ }
+ state->parse_state = copy;
+ return false;
+}
+
+// <local-source-name> ::= L <source-name> [<discriminator>]
+//
+// References:
+// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=31775
+// https://gcc.gnu.org/viewcvs?view=rev&revision=124467
+static bool ParseLocalSourceName(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ ParseState copy = state->parse_state;
+ if (ParseOneCharToken(state, 'L') && ParseSourceName(state) &&
+ Optional(ParseDiscriminator(state))) {
+ return true;
+ }
+ state->parse_state = copy;
+ return false;
+}
+
+// <unnamed-type-name> ::= Ut [<(nonnegative) number>] _
+// ::= <closure-type-name>
+// <closure-type-name> ::= Ul <lambda-sig> E [<(nonnegative) number>] _
+// <lambda-sig> ::= <(parameter) type>+
+static bool ParseUnnamedTypeName(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ ParseState copy = state->parse_state;
+ // Type's 1-based index n is encoded as { "", n == 1; itoa(n-2), otherwise }.
+ // Optionally parse the encoded value into 'which' and add 2 to get the index.
+ int which = -1;
+
+ // Unnamed type local to function or class.
+ if (ParseTwoCharToken(state, "Ut") && Optional(ParseNumber(state, &which)) &&
+ which <= std::numeric_limits<int>::max() - 2 && // Don't overflow.
+ ParseOneCharToken(state, '_')) {
+ MaybeAppend(state, "{unnamed type#");
+ MaybeAppendDecimal(state, 2 + which);
+ MaybeAppend(state, "}");
+ return true;
+ }
+ state->parse_state = copy;
+
+ // Closure type.
+ which = -1;
+ if (ParseTwoCharToken(state, "Ul") && DisableAppend(state) &&
+ OneOrMore(ParseType, state) && RestoreAppend(state, copy.append) &&
+ ParseOneCharToken(state, 'E') && Optional(ParseNumber(state, &which)) &&
+ which <= std::numeric_limits<int>::max() - 2 && // Don't overflow.
+ ParseOneCharToken(state, '_')) {
+ MaybeAppend(state, "{lambda()#");
+ MaybeAppendDecimal(state, 2 + which);
+ MaybeAppend(state, "}");
+ return true;
+ }
+ state->parse_state = copy;
+
+ return false;
+}
+
+// <number> ::= [n] <non-negative decimal integer>
+// If "number_out" is non-null, then *number_out is set to the value of the
+// parsed number on success.
+static bool ParseNumber(State *state, int *number_out) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ bool negative = false;
+ if (ParseOneCharToken(state, 'n')) {
+ negative = true;
+ }
+ const char *p = RemainingInput(state);
+ uint64_t number = 0;
+ for (; *p != '\0'; ++p) {
+ if (IsDigit(*p)) {
+ number = number * 10 + (*p - '0');
+ } else {
+ break;
+ }
+ }
+ // Apply the sign with uint64_t arithmetic so overflows aren't UB. Gives
+ // "incorrect" results for out-of-range inputs, but negative values only
+ // appear for literals, which aren't printed.
+ if (negative) {
+ number = ~number + 1;
+ }
+ if (p != RemainingInput(state)) { // Conversion succeeded.
+ state->parse_state.mangled_idx += p - RemainingInput(state);
+ if (number_out != nullptr) {
+ // Note: possibly truncate "number".
+ *number_out = number;
+ }
+ return true;
+ }
+ return false;
+}
+
+// Floating-point literals are encoded using a fixed-length lowercase
+// hexadecimal string.
+static bool ParseFloatNumber(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ const char *p = RemainingInput(state);
+ for (; *p != '\0'; ++p) {
+ if (!IsDigit(*p) && !(*p >= 'a' && *p <= 'f')) {
+ break;
+ }
+ }
+ if (p != RemainingInput(state)) { // Conversion succeeded.
+ state->parse_state.mangled_idx += p - RemainingInput(state);
+ return true;
+ }
+ return false;
+}
+
+// The <seq-id> is a sequence number in base 36,
+// using digits and upper case letters
+static bool ParseSeqId(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ const char *p = RemainingInput(state);
+ for (; *p != '\0'; ++p) {
+ if (!IsDigit(*p) && !(*p >= 'A' && *p <= 'Z')) {
+ break;
+ }
+ }
+ if (p != RemainingInput(state)) { // Conversion succeeded.
+ state->parse_state.mangled_idx += p - RemainingInput(state);
+ return true;
+ }
+ return false;
+}
+
+// <identifier> ::= <unqualified source code identifier> (of given length)
+static bool ParseIdentifier(State *state, int length) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ if (length < 0 || !AtLeastNumCharsRemaining(RemainingInput(state), length)) {
+ return false;
+ }
+ if (IdentifierIsAnonymousNamespace(state, length)) {
+ MaybeAppend(state, "(anonymous namespace)");
+ } else {
+ MaybeAppendWithLength(state, RemainingInput(state), length);
+ }
+ state->parse_state.mangled_idx += length;
+ return true;
+}
+
+// <operator-name> ::= nw, and other two letters cases
+// ::= cv <type> # (cast)
+// ::= v <digit> <source-name> # vendor extended operator
+static bool ParseOperatorName(State *state, int *arity) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ if (!AtLeastNumCharsRemaining(RemainingInput(state), 2)) {
+ return false;
+ }
+ // First check with "cv" (cast) case.
+ ParseState copy = state->parse_state;
+ if (ParseTwoCharToken(state, "cv") && MaybeAppend(state, "operator ") &&
+ EnterNestedName(state) && ParseType(state) &&
+ LeaveNestedName(state, copy.nest_level)) {
+ if (arity != nullptr) {
+ *arity = 1;
+ }
+ return true;
+ }
+ state->parse_state = copy;
+
+ // Then vendor extended operators.
+ if (ParseOneCharToken(state, 'v') && ParseDigit(state, arity) &&
+ ParseSourceName(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ // Other operator names should start with a lower alphabet followed
+ // by a lower/upper alphabet.
+ if (!(IsLower(RemainingInput(state)[0]) &&
+ IsAlpha(RemainingInput(state)[1]))) {
+ return false;
+ }
+ // We may want to perform a binary search if we really need speed.
+ const AbbrevPair *p;
+ for (p = kOperatorList; p->abbrev != nullptr; ++p) {
+ if (RemainingInput(state)[0] == p->abbrev[0] &&
+ RemainingInput(state)[1] == p->abbrev[1]) {
+ if (arity != nullptr) {
+ *arity = p->arity;
+ }
+ MaybeAppend(state, "operator");
+ if (IsLower(*p->real_name)) { // new, delete, etc.
+ MaybeAppend(state, " ");
+ }
+ MaybeAppend(state, p->real_name);
+ state->parse_state.mangled_idx += 2;
+ return true;
+ }
+ }
+ return false;
+}
+
+// <special-name> ::= TV <type>
+// ::= TT <type>
+// ::= TI <type>
+// ::= TS <type>
+// ::= TH <type> # thread-local
+// ::= Tc <call-offset> <call-offset> <(base) encoding>
+// ::= GV <(object) name>
+// ::= T <call-offset> <(base) encoding>
+// G++ extensions:
+// ::= TC <type> <(offset) number> _ <(base) type>
+// ::= TF <type>
+// ::= TJ <type>
+// ::= GR <name>
+// ::= GA <encoding>
+// ::= Th <call-offset> <(base) encoding>
+// ::= Tv <call-offset> <(base) encoding>
+//
+// Note: we don't care much about them since they don't appear in
+// stack traces. The are special data.
+static bool ParseSpecialName(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ ParseState copy = state->parse_state;
+ if (ParseOneCharToken(state, 'T') && ParseCharClass(state, "VTISH") &&
+ ParseType(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ if (ParseTwoCharToken(state, "Tc") && ParseCallOffset(state) &&
+ ParseCallOffset(state) && ParseEncoding(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ if (ParseTwoCharToken(state, "GV") && ParseName(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ if (ParseOneCharToken(state, 'T') && ParseCallOffset(state) &&
+ ParseEncoding(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ // G++ extensions
+ if (ParseTwoCharToken(state, "TC") && ParseType(state) &&
+ ParseNumber(state, nullptr) && ParseOneCharToken(state, '_') &&
+ DisableAppend(state) && ParseType(state)) {
+ RestoreAppend(state, copy.append);
+ return true;
+ }
+ state->parse_state = copy;
+
+ if (ParseOneCharToken(state, 'T') && ParseCharClass(state, "FJ") &&
+ ParseType(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ if (ParseTwoCharToken(state, "GR") && ParseName(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ if (ParseTwoCharToken(state, "GA") && ParseEncoding(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ if (ParseOneCharToken(state, 'T') && ParseCharClass(state, "hv") &&
+ ParseCallOffset(state) && ParseEncoding(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+ return false;
+}
+
+// <call-offset> ::= h <nv-offset> _
+// ::= v <v-offset> _
+static bool ParseCallOffset(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ ParseState copy = state->parse_state;
+ if (ParseOneCharToken(state, 'h') && ParseNVOffset(state) &&
+ ParseOneCharToken(state, '_')) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ if (ParseOneCharToken(state, 'v') && ParseVOffset(state) &&
+ ParseOneCharToken(state, '_')) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ return false;
+}
+
+// <nv-offset> ::= <(offset) number>
+static bool ParseNVOffset(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ return ParseNumber(state, nullptr);
+}
+
+// <v-offset> ::= <(offset) number> _ <(virtual offset) number>
+static bool ParseVOffset(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ ParseState copy = state->parse_state;
+ if (ParseNumber(state, nullptr) && ParseOneCharToken(state, '_') &&
+ ParseNumber(state, nullptr)) {
+ return true;
+ }
+ state->parse_state = copy;
+ return false;
+}
+
+// <ctor-dtor-name> ::= C1 | C2 | C3 | CI1 <base-class-type> | CI2
+// <base-class-type>
+// ::= D0 | D1 | D2
+// # GCC extensions: "unified" constructor/destructor. See
+// #
+// https://github.com/gcc-mirror/gcc/blob/7ad17b583c3643bd4557f29b8391ca7ef08391f5/gcc/cp/mangle.c#L1847
+// ::= C4 | D4
+static bool ParseCtorDtorName(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ ParseState copy = state->parse_state;
+ if (ParseOneCharToken(state, 'C')) {
+ if (ParseCharClass(state, "1234")) {
+ const char *const prev_name =
+ state->out + state->parse_state.prev_name_idx;
+ MaybeAppendWithLength(state, prev_name,
+ state->parse_state.prev_name_length);
+ return true;
+ } else if (ParseOneCharToken(state, 'I') && ParseCharClass(state, "12") &&
+ ParseClassEnumType(state)) {
+ return true;
+ }
+ }
+ state->parse_state = copy;
+
+ if (ParseOneCharToken(state, 'D') && ParseCharClass(state, "0124")) {
+ const char *const prev_name = state->out + state->parse_state.prev_name_idx;
+ MaybeAppend(state, "~");
+ MaybeAppendWithLength(state, prev_name,
+ state->parse_state.prev_name_length);
+ return true;
+ }
+ state->parse_state = copy;
+ return false;
+}
+
+// <decltype> ::= Dt <expression> E # decltype of an id-expression or class
+// # member access (C++0x)
+// ::= DT <expression> E # decltype of an expression (C++0x)
+static bool ParseDecltype(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+
+ ParseState copy = state->parse_state;
+ if (ParseOneCharToken(state, 'D') && ParseCharClass(state, "tT") &&
+ ParseExpression(state) && ParseOneCharToken(state, 'E')) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ return false;
+}
+
+// <type> ::= <CV-qualifiers> <type>
+// ::= P <type> # pointer-to
+// ::= R <type> # reference-to
+// ::= O <type> # rvalue reference-to (C++0x)
+// ::= C <type> # complex pair (C 2000)
+// ::= G <type> # imaginary (C 2000)
+// ::= U <source-name> <type> # vendor extended type qualifier
+// ::= <builtin-type>
+// ::= <function-type>
+// ::= <class-enum-type> # note: just an alias for <name>
+// ::= <array-type>
+// ::= <pointer-to-member-type>
+// ::= <template-template-param> <template-args>
+// ::= <template-param>
+// ::= <decltype>
+// ::= <substitution>
+// ::= Dp <type> # pack expansion of (C++0x)
+// ::= Dv <num-elems> _ # GNU vector extension
+//
+static bool ParseType(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ ParseState copy = state->parse_state;
+
+ // We should check CV-qualifers, and PRGC things first.
+ //
+ // CV-qualifiers overlap with some operator names, but an operator name is not
+ // valid as a type. To avoid an ambiguity that can lead to exponential time
+ // complexity, refuse to backtrack the CV-qualifiers.
+ //
+ // _Z4aoeuIrMvvE
+ // => _Z 4aoeuI rM v v E
+ // aoeu<operator%=, void, void>
+ // => _Z 4aoeuI r Mv v E
+ // aoeu<void void::* restrict>
+ //
+ // By consuming the CV-qualifiers first, the former parse is disabled.
+ if (ParseCVQualifiers(state)) {
+ const bool result = ParseType(state);
+ if (!result) state->parse_state = copy;
+ return result;
+ }
+ state->parse_state = copy;
+
+ // Similarly, these tag characters can overlap with other <name>s resulting in
+ // two different parse prefixes that land on <template-args> in the same
+ // place, such as "C3r1xI...". So, disable the "ctor-name = C3" parse by
+ // refusing to backtrack the tag characters.
+ if (ParseCharClass(state, "OPRCG")) {
+ const bool result = ParseType(state);
+ if (!result) state->parse_state = copy;
+ return result;
+ }
+ state->parse_state = copy;
+
+ if (ParseTwoCharToken(state, "Dp") && ParseType(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ if (ParseOneCharToken(state, 'U') && ParseSourceName(state) &&
+ ParseType(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ if (ParseBuiltinType(state) || ParseFunctionType(state) ||
+ ParseClassEnumType(state) || ParseArrayType(state) ||
+ ParsePointerToMemberType(state) || ParseDecltype(state) ||
+ // "std" on its own isn't a type.
+ ParseSubstitution(state, /*accept_std=*/false)) {
+ return true;
+ }
+
+ if (ParseTemplateTemplateParam(state) && ParseTemplateArgs(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ // Less greedy than <template-template-param> <template-args>.
+ if (ParseTemplateParam(state)) {
+ return true;
+ }
+
+ if (ParseTwoCharToken(state, "Dv") && ParseNumber(state, nullptr) &&
+ ParseOneCharToken(state, '_')) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ return false;
+}
+
+// <CV-qualifiers> ::= [r] [V] [K]
+// We don't allow empty <CV-qualifiers> to avoid infinite loop in
+// ParseType().
+static bool ParseCVQualifiers(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ int num_cv_qualifiers = 0;
+ num_cv_qualifiers += ParseOneCharToken(state, 'r');
+ num_cv_qualifiers += ParseOneCharToken(state, 'V');
+ num_cv_qualifiers += ParseOneCharToken(state, 'K');
+ return num_cv_qualifiers > 0;
+}
+
+// <builtin-type> ::= v, etc. # single-character builtin types
+// ::= u <source-name>
+// ::= Dd, etc. # two-character builtin types
+//
+// Not supported:
+// ::= DF <number> _ # _FloatN (N bits)
+//
+static bool ParseBuiltinType(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ const AbbrevPair *p;
+ for (p = kBuiltinTypeList; p->abbrev != nullptr; ++p) {
+ // Guaranteed only 1- or 2-character strings in kBuiltinTypeList.
+ if (p->abbrev[1] == '\0') {
+ if (ParseOneCharToken(state, p->abbrev[0])) {
+ MaybeAppend(state, p->real_name);
+ return true;
+ }
+ } else if (p->abbrev[2] == '\0' && ParseTwoCharToken(state, p->abbrev)) {
+ MaybeAppend(state, p->real_name);
+ return true;
+ }
+ }
+
+ ParseState copy = state->parse_state;
+ if (ParseOneCharToken(state, 'u') && ParseSourceName(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+ return false;
+}
+
+// <exception-spec> ::= Do # non-throwing
+// exception-specification (e.g.,
+// noexcept, throw())
+// ::= DO <expression> E # computed (instantiation-dependent)
+// noexcept
+// ::= Dw <type>+ E # dynamic exception specification
+// with instantiation-dependent types
+static bool ParseExceptionSpec(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+
+ if (ParseTwoCharToken(state, "Do")) return true;
+
+ ParseState copy = state->parse_state;
+ if (ParseTwoCharToken(state, "DO") && ParseExpression(state) &&
+ ParseOneCharToken(state, 'E')) {
+ return true;
+ }
+ state->parse_state = copy;
+ if (ParseTwoCharToken(state, "Dw") && OneOrMore(ParseType, state) &&
+ ParseOneCharToken(state, 'E')) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ return false;
+}
+
+// <function-type> ::= [exception-spec] F [Y] <bare-function-type> [O] E
+static bool ParseFunctionType(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ ParseState copy = state->parse_state;
+ if (Optional(ParseExceptionSpec(state)) && ParseOneCharToken(state, 'F') &&
+ Optional(ParseOneCharToken(state, 'Y')) && ParseBareFunctionType(state) &&
+ Optional(ParseOneCharToken(state, 'O')) &&
+ ParseOneCharToken(state, 'E')) {
+ return true;
+ }
+ state->parse_state = copy;
+ return false;
+}
+
+// <bare-function-type> ::= <(signature) type>+
+static bool ParseBareFunctionType(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ ParseState copy = state->parse_state;
+ DisableAppend(state);
+ if (OneOrMore(ParseType, state)) {
+ RestoreAppend(state, copy.append);
+ MaybeAppend(state, "()");
+ return true;
+ }
+ state->parse_state = copy;
+ return false;
+}
+
+// <class-enum-type> ::= <name>
+static bool ParseClassEnumType(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ return ParseName(state);
+}
+
+// <array-type> ::= A <(positive dimension) number> _ <(element) type>
+// ::= A [<(dimension) expression>] _ <(element) type>
+static bool ParseArrayType(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ ParseState copy = state->parse_state;
+ if (ParseOneCharToken(state, 'A') && ParseNumber(state, nullptr) &&
+ ParseOneCharToken(state, '_') && ParseType(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ if (ParseOneCharToken(state, 'A') && Optional(ParseExpression(state)) &&
+ ParseOneCharToken(state, '_') && ParseType(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+ return false;
+}
+
+// <pointer-to-member-type> ::= M <(class) type> <(member) type>
+static bool ParsePointerToMemberType(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ ParseState copy = state->parse_state;
+ if (ParseOneCharToken(state, 'M') && ParseType(state) && ParseType(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+ return false;
+}
+
+// <template-param> ::= T_
+// ::= T <parameter-2 non-negative number> _
+static bool ParseTemplateParam(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ if (ParseTwoCharToken(state, "T_")) {
+ MaybeAppend(state, "?"); // We don't support template substitutions.
+ return true;
+ }
+
+ ParseState copy = state->parse_state;
+ if (ParseOneCharToken(state, 'T') && ParseNumber(state, nullptr) &&
+ ParseOneCharToken(state, '_')) {
+ MaybeAppend(state, "?"); // We don't support template substitutions.
+ return true;
+ }
+ state->parse_state = copy;
+ return false;
+}
+
+// <template-template-param> ::= <template-param>
+// ::= <substitution>
+static bool ParseTemplateTemplateParam(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ return (ParseTemplateParam(state) ||
+ // "std" on its own isn't a template.
+ ParseSubstitution(state, /*accept_std=*/false));
+}
+
+// <template-args> ::= I <template-arg>+ E
+static bool ParseTemplateArgs(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ ParseState copy = state->parse_state;
+ DisableAppend(state);
+ if (ParseOneCharToken(state, 'I') && OneOrMore(ParseTemplateArg, state) &&
+ ParseOneCharToken(state, 'E')) {
+ RestoreAppend(state, copy.append);
+ MaybeAppend(state, "<>");
+ return true;
+ }
+ state->parse_state = copy;
+ return false;
+}
+
+// <template-arg> ::= <type>
+// ::= <expr-primary>
+// ::= J <template-arg>* E # argument pack
+// ::= X <expression> E
+static bool ParseTemplateArg(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ ParseState copy = state->parse_state;
+ if (ParseOneCharToken(state, 'J') && ZeroOrMore(ParseTemplateArg, state) &&
+ ParseOneCharToken(state, 'E')) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ // There can be significant overlap between the following leading to
+ // exponential backtracking:
+ //
+ // <expr-primary> ::= L <type> <expr-cast-value> E
+ // e.g. L 2xxIvE 1 E
+ // <type> ==> <local-source-name> <template-args>
+ // e.g. L 2xx IvE
+ //
+ // This means parsing an entire <type> twice, and <type> can contain
+ // <template-arg>, so this can generate exponential backtracking. There is
+ // only overlap when the remaining input starts with "L <source-name>", so
+ // parse all cases that can start this way jointly to share the common prefix.
+ //
+ // We have:
+ //
+ // <template-arg> ::= <type>
+ // ::= <expr-primary>
+ //
+ // First, drop all the productions of <type> that must start with something
+ // other than 'L'. All that's left is <class-enum-type>; inline it.
+ //
+ // <type> ::= <nested-name> # starts with 'N'
+ // ::= <unscoped-name>
+ // ::= <unscoped-template-name> <template-args>
+ // ::= <local-name> # starts with 'Z'
+ //
+ // Drop and inline again:
+ //
+ // <type> ::= <unscoped-name>
+ // ::= <unscoped-name> <template-args>
+ // ::= <substitution> <template-args> # starts with 'S'
+ //
+ // Merge the first two, inline <unscoped-name>, drop last:
+ //
+ // <type> ::= <unqualified-name> [<template-args>]
+ // ::= St <unqualified-name> [<template-args>] # starts with 'S'
+ //
+ // Drop and inline:
+ //
+ // <type> ::= <operator-name> [<template-args>] # starts with lowercase
+ // ::= <ctor-dtor-name> [<template-args>] # starts with 'C' or 'D'
+ // ::= <source-name> [<template-args>] # starts with digit
+ // ::= <local-source-name> [<template-args>]
+ // ::= <unnamed-type-name> [<template-args>] # starts with 'U'
+ //
+ // One more time:
+ //
+ // <type> ::= L <source-name> [<template-args>]
+ //
+ // Likewise with <expr-primary>:
+ //
+ // <expr-primary> ::= L <type> <expr-cast-value> E
+ // ::= LZ <encoding> E # cannot overlap; drop
+ // ::= L <mangled_name> E # cannot overlap; drop
+ //
+ // By similar reasoning as shown above, the only <type>s starting with
+ // <source-name> are "<source-name> [<template-args>]". Inline this.
+ //
+ // <expr-primary> ::= L <source-name> [<template-args>] <expr-cast-value> E
+ //
+ // Now inline both of these into <template-arg>:
+ //
+ // <template-arg> ::= L <source-name> [<template-args>]
+ // ::= L <source-name> [<template-args>] <expr-cast-value> E
+ //
+ // Merge them and we're done:
+ // <template-arg>
+ // ::= L <source-name> [<template-args>] [<expr-cast-value> E]
+ if (ParseLocalSourceName(state) && Optional(ParseTemplateArgs(state))) {
+ copy = state->parse_state;
+ if (ParseExprCastValue(state) && ParseOneCharToken(state, 'E')) {
+ return true;
+ }
+ state->parse_state = copy;
+ return true;
+ }
+
+ // Now that the overlapping cases can't reach this code, we can safely call
+ // both of these.
+ if (ParseType(state) || ParseExprPrimary(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ if (ParseOneCharToken(state, 'X') && ParseExpression(state) &&
+ ParseOneCharToken(state, 'E')) {
+ return true;
+ }
+ state->parse_state = copy;
+ return false;
+}
+
+// <unresolved-type> ::= <template-param> [<template-args>]
+// ::= <decltype>
+// ::= <substitution>
+static inline bool ParseUnresolvedType(State *state) {
+ // No ComplexityGuard because we don't copy the state in this stack frame.
+ return (ParseTemplateParam(state) && Optional(ParseTemplateArgs(state))) ||
+ ParseDecltype(state) || ParseSubstitution(state, /*accept_std=*/false);
+}
+
+// <simple-id> ::= <source-name> [<template-args>]
+static inline bool ParseSimpleId(State *state) {
+ // No ComplexityGuard because we don't copy the state in this stack frame.
+
+ // Note: <simple-id> cannot be followed by a parameter pack; see comment in
+ // ParseUnresolvedType.
+ return ParseSourceName(state) && Optional(ParseTemplateArgs(state));
+}
+
+// <base-unresolved-name> ::= <source-name> [<template-args>]
+// ::= on <operator-name> [<template-args>]
+// ::= dn <destructor-name>
+static bool ParseBaseUnresolvedName(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+
+ if (ParseSimpleId(state)) {
+ return true;
+ }
+
+ ParseState copy = state->parse_state;
+ if (ParseTwoCharToken(state, "on") && ParseOperatorName(state, nullptr) &&
+ Optional(ParseTemplateArgs(state))) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ if (ParseTwoCharToken(state, "dn") &&
+ (ParseUnresolvedType(state) || ParseSimpleId(state))) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ return false;
+}
+
+// <unresolved-name> ::= [gs] <base-unresolved-name>
+// ::= sr <unresolved-type> <base-unresolved-name>
+// ::= srN <unresolved-type> <unresolved-qualifier-level>+ E
+// <base-unresolved-name>
+// ::= [gs] sr <unresolved-qualifier-level>+ E
+// <base-unresolved-name>
+static bool ParseUnresolvedName(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+
+ ParseState copy = state->parse_state;
+ if (Optional(ParseTwoCharToken(state, "gs")) &&
+ ParseBaseUnresolvedName(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ if (ParseTwoCharToken(state, "sr") && ParseUnresolvedType(state) &&
+ ParseBaseUnresolvedName(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ if (ParseTwoCharToken(state, "sr") && ParseOneCharToken(state, 'N') &&
+ ParseUnresolvedType(state) &&
+ OneOrMore(/* <unresolved-qualifier-level> ::= */ ParseSimpleId, state) &&
+ ParseOneCharToken(state, 'E') && ParseBaseUnresolvedName(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ if (Optional(ParseTwoCharToken(state, "gs")) &&
+ ParseTwoCharToken(state, "sr") &&
+ OneOrMore(/* <unresolved-qualifier-level> ::= */ ParseSimpleId, state) &&
+ ParseOneCharToken(state, 'E') && ParseBaseUnresolvedName(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ return false;
+}
+
+// <expression> ::= <1-ary operator-name> <expression>
+// ::= <2-ary operator-name> <expression> <expression>
+// ::= <3-ary operator-name> <expression> <expression> <expression>
+// ::= cl <expression>+ E
+// ::= cp <simple-id> <expression>* E # Clang-specific.
+// ::= cv <type> <expression> # type (expression)
+// ::= cv <type> _ <expression>* E # type (expr-list)
+// ::= st <type>
+// ::= <template-param>
+// ::= <function-param>
+// ::= <expr-primary>
+// ::= dt <expression> <unresolved-name> # expr.name
+// ::= pt <expression> <unresolved-name> # expr->name
+// ::= sp <expression> # argument pack expansion
+// ::= sr <type> <unqualified-name> <template-args>
+// ::= sr <type> <unqualified-name>
+// <function-param> ::= fp <(top-level) CV-qualifiers> _
+// ::= fp <(top-level) CV-qualifiers> <number> _
+// ::= fL <number> p <(top-level) CV-qualifiers> _
+// ::= fL <number> p <(top-level) CV-qualifiers> <number> _
+static bool ParseExpression(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ if (ParseTemplateParam(state) || ParseExprPrimary(state)) {
+ return true;
+ }
+
+ ParseState copy = state->parse_state;
+
+ // Object/function call expression.
+ if (ParseTwoCharToken(state, "cl") && OneOrMore(ParseExpression, state) &&
+ ParseOneCharToken(state, 'E')) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ // Clang-specific "cp <simple-id> <expression>* E"
+ // https://clang.llvm.org/doxygen/ItaniumMangle_8cpp_source.html#l04338
+ if (ParseTwoCharToken(state, "cp") && ParseSimpleId(state) &&
+ ZeroOrMore(ParseExpression, state) && ParseOneCharToken(state, 'E')) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ // Function-param expression (level 0).
+ if (ParseTwoCharToken(state, "fp") && Optional(ParseCVQualifiers(state)) &&
+ Optional(ParseNumber(state, nullptr)) && ParseOneCharToken(state, '_')) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ // Function-param expression (level 1+).
+ if (ParseTwoCharToken(state, "fL") && Optional(ParseNumber(state, nullptr)) &&
+ ParseOneCharToken(state, 'p') && Optional(ParseCVQualifiers(state)) &&
+ Optional(ParseNumber(state, nullptr)) && ParseOneCharToken(state, '_')) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ // Parse the conversion expressions jointly to avoid re-parsing the <type> in
+ // their common prefix. Parsed as:
+ // <expression> ::= cv <type> <conversion-args>
+ // <conversion-args> ::= _ <expression>* E
+ // ::= <expression>
+ //
+ // Also don't try ParseOperatorName after seeing "cv", since ParseOperatorName
+ // also needs to accept "cv <type>" in other contexts.
+ if (ParseTwoCharToken(state, "cv")) {
+ if (ParseType(state)) {
+ ParseState copy2 = state->parse_state;
+ if (ParseOneCharToken(state, '_') && ZeroOrMore(ParseExpression, state) &&
+ ParseOneCharToken(state, 'E')) {
+ return true;
+ }
+ state->parse_state = copy2;
+ if (ParseExpression(state)) {
+ return true;
+ }
+ }
+ } else {
+ // Parse unary, binary, and ternary operator expressions jointly, taking
+ // care not to re-parse subexpressions repeatedly. Parse like:
+ // <expression> ::= <operator-name> <expression>
+ // [<one-to-two-expressions>]
+ // <one-to-two-expressions> ::= <expression> [<expression>]
+ int arity = -1;
+ if (ParseOperatorName(state, &arity) &&
+ arity > 0 && // 0 arity => disabled.
+ (arity < 3 || ParseExpression(state)) &&
+ (arity < 2 || ParseExpression(state)) &&
+ (arity < 1 || ParseExpression(state))) {
+ return true;
+ }
+ }
+ state->parse_state = copy;
+
+ // sizeof type
+ if (ParseTwoCharToken(state, "st") && ParseType(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ // Object and pointer member access expressions.
+ if ((ParseTwoCharToken(state, "dt") || ParseTwoCharToken(state, "pt")) &&
+ ParseExpression(state) && ParseType(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ // Pointer-to-member access expressions. This parses the same as a binary
+ // operator, but it's implemented separately because "ds" shouldn't be
+ // accepted in other contexts that parse an operator name.
+ if (ParseTwoCharToken(state, "ds") && ParseExpression(state) &&
+ ParseExpression(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ // Parameter pack expansion
+ if (ParseTwoCharToken(state, "sp") && ParseExpression(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ return ParseUnresolvedName(state);
+}
+
+// <expr-primary> ::= L <type> <(value) number> E
+// ::= L <type> <(value) float> E
+// ::= L <mangled-name> E
+// // A bug in g++'s C++ ABI version 2 (-fabi-version=2).
+// ::= LZ <encoding> E
+//
+// Warning, subtle: the "bug" LZ production above is ambiguous with the first
+// production where <type> starts with <local-name>, which can lead to
+// exponential backtracking in two scenarios:
+//
+// - When whatever follows the E in the <local-name> in the first production is
+// not a name, we backtrack the whole <encoding> and re-parse the whole thing.
+//
+// - When whatever follows the <local-name> in the first production is not a
+// number and this <expr-primary> may be followed by a name, we backtrack the
+// <name> and re-parse it.
+//
+// Moreover this ambiguity isn't always resolved -- for example, the following
+// has two different parses:
+//
+// _ZaaILZ4aoeuE1x1EvE
+// => operator&&<aoeu, x, E, void>
+// => operator&&<(aoeu::x)(1), void>
+//
+// To resolve this, we just do what GCC's demangler does, and refuse to parse
+// casts to <local-name> types.
+static bool ParseExprPrimary(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ ParseState copy = state->parse_state;
+
+ // The "LZ" special case: if we see LZ, we commit to accept "LZ <encoding> E"
+ // or fail, no backtracking.
+ if (ParseTwoCharToken(state, "LZ")) {
+ if (ParseEncoding(state) && ParseOneCharToken(state, 'E')) {
+ return true;
+ }
+
+ state->parse_state = copy;
+ return false;
+ }
+
+ // The merged cast production.
+ if (ParseOneCharToken(state, 'L') && ParseType(state) &&
+ ParseExprCastValue(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ if (ParseOneCharToken(state, 'L') && ParseMangledName(state) &&
+ ParseOneCharToken(state, 'E')) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ return false;
+}
+
+// <number> or <float>, followed by 'E', as described above ParseExprPrimary.
+static bool ParseExprCastValue(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ // We have to be able to backtrack after accepting a number because we could
+ // have e.g. "7fffE", which will accept "7" as a number but then fail to find
+ // the 'E'.
+ ParseState copy = state->parse_state;
+ if (ParseNumber(state, nullptr) && ParseOneCharToken(state, 'E')) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ if (ParseFloatNumber(state) && ParseOneCharToken(state, 'E')) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ return false;
+}
+
+// <local-name> ::= Z <(function) encoding> E <(entity) name> [<discriminator>]
+// ::= Z <(function) encoding> E s [<discriminator>]
+//
+// Parsing a common prefix of these two productions together avoids an
+// exponential blowup of backtracking. Parse like:
+// <local-name> := Z <encoding> E <local-name-suffix>
+// <local-name-suffix> ::= s [<discriminator>]
+// ::= <name> [<discriminator>]
+
+static bool ParseLocalNameSuffix(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+
+ if (MaybeAppend(state, "::") && ParseName(state) &&
+ Optional(ParseDiscriminator(state))) {
+ return true;
+ }
+
+ // Since we're not going to overwrite the above "::" by re-parsing the
+ // <encoding> (whose trailing '\0' byte was in the byte now holding the
+ // first ':'), we have to rollback the "::" if the <name> parse failed.
+ if (state->parse_state.append) {
+ state->out[state->parse_state.out_cur_idx - 2] = '\0';
+ }
+
+ return ParseOneCharToken(state, 's') && Optional(ParseDiscriminator(state));
+}
+
+static bool ParseLocalName(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ ParseState copy = state->parse_state;
+ if (ParseOneCharToken(state, 'Z') && ParseEncoding(state) &&
+ ParseOneCharToken(state, 'E') && ParseLocalNameSuffix(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+ return false;
+}
+
+// <discriminator> := _ <(non-negative) number>
+static bool ParseDiscriminator(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ ParseState copy = state->parse_state;
+ if (ParseOneCharToken(state, '_') && ParseNumber(state, nullptr)) {
+ return true;
+ }
+ state->parse_state = copy;
+ return false;
+}
+
+// <substitution> ::= S_
+// ::= S <seq-id> _
+// ::= St, etc.
+//
+// "St" is special in that it's not valid as a standalone name, and it *is*
+// allowed to precede a name without being wrapped in "N...E". This means that
+// if we accept it on its own, we can accept "St1a" and try to parse
+// template-args, then fail and backtrack, accept "St" on its own, then "1a" as
+// an unqualified name and re-parse the same template-args. To block this
+// exponential backtracking, we disable it with 'accept_std=false' in
+// problematic contexts.
+static bool ParseSubstitution(State *state, bool accept_std) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ if (ParseTwoCharToken(state, "S_")) {
+ MaybeAppend(state, "?"); // We don't support substitutions.
+ return true;
+ }
+
+ ParseState copy = state->parse_state;
+ if (ParseOneCharToken(state, 'S') && ParseSeqId(state) &&
+ ParseOneCharToken(state, '_')) {
+ MaybeAppend(state, "?"); // We don't support substitutions.
+ return true;
+ }
+ state->parse_state = copy;
+
+ // Expand abbreviations like "St" => "std".
+ if (ParseOneCharToken(state, 'S')) {
+ const AbbrevPair *p;
+ for (p = kSubstitutionList; p->abbrev != nullptr; ++p) {
+ if (RemainingInput(state)[0] == p->abbrev[1] &&
+ (accept_std || p->abbrev[1] != 't')) {
+ MaybeAppend(state, "std");
+ if (p->real_name[0] != '\0') {
+ MaybeAppend(state, "::");
+ MaybeAppend(state, p->real_name);
+ }
+ ++state->parse_state.mangled_idx;
+ return true;
+ }
+ }
+ }
+ state->parse_state = copy;
+ return false;
+}
+
+// Parse <mangled-name>, optionally followed by either a function-clone suffix
+// or version suffix. Returns true only if all of "mangled_cur" was consumed.
+static bool ParseTopLevelMangledName(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ if (ParseMangledName(state)) {
+ if (RemainingInput(state)[0] != '\0') {
+ // Drop trailing function clone suffix, if any.
+ if (IsFunctionCloneSuffix(RemainingInput(state))) {
+ return true;
+ }
+ // Append trailing version suffix if any.
+ // ex. _Z3foo@@GLIBCXX_3.4
+ if (RemainingInput(state)[0] == '@') {
+ MaybeAppend(state, RemainingInput(state));
+ return true;
+ }
+ return false; // Unconsumed suffix.
+ }
+ return true;
+ }
+ return false;
+}
+
+static bool Overflowed(const State *state) {
+ return state->parse_state.out_cur_idx >= state->out_end_idx;
+}
+
+// The demangler entry point.
+bool Demangle(const char *mangled, char *out, int out_size) {
+ State state;
+ InitState(&state, mangled, out, out_size);
+ return ParseTopLevelMangledName(&state) && !Overflowed(&state) &&
+ state.parse_state.out_cur_idx > 0;
+}
+
+} // namespace debugging_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/demangle.h b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/demangle.h
new file mode 100644
index 00000000000..a9b42edb451
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/demangle.h
@@ -0,0 +1,71 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// An async-signal-safe and thread-safe demangler for Itanium C++ ABI
+// (aka G++ V3 ABI).
+//
+// The demangler is implemented to be used in async signal handlers to
+// symbolize stack traces. We cannot use libstdc++'s
+// abi::__cxa_demangle() in such signal handlers since it's not async
+// signal safe (it uses malloc() internally).
+//
+// Note that this demangler doesn't support full demangling. More
+// specifically, it doesn't print types of function parameters and
+// types of template arguments. It just skips them. However, it's
+// still very useful to extract basic information such as class,
+// function, constructor, destructor, and operator names.
+//
+// See the implementation note in demangle.cc if you are interested.
+//
+// Example:
+//
+// | Mangled Name | The Demangler | abi::__cxa_demangle()
+// |---------------|---------------|-----------------------
+// | _Z1fv | f() | f()
+// | _Z1fi | f() | f(int)
+// | _Z3foo3bar | foo() | foo(bar)
+// | _Z1fIiEvi | f<>() | void f<int>(int)
+// | _ZN1N1fE | N::f | N::f
+// | _ZN3Foo3BarEv | Foo::Bar() | Foo::Bar()
+// | _Zrm1XS_" | operator%() | operator%(X, X)
+// | _ZN3FooC1Ev | Foo::Foo() | Foo::Foo()
+// | _Z1fSs | f() | f(std::basic_string<char,
+// | | | std::char_traits<char>,
+// | | | std::allocator<char> >)
+//
+// See the unit test for more examples.
+//
+// Note: we might want to write demanglers for ABIs other than Itanium
+// C++ ABI in the future.
+//
+
+#ifndef ABSL_DEBUGGING_INTERNAL_DEMANGLE_H_
+#define ABSL_DEBUGGING_INTERNAL_DEMANGLE_H_
+
+#include "y_absl/base/config.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace debugging_internal {
+
+// Demangle `mangled`. On success, return true and write the
+// demangled symbol name to `out`. Otherwise, return false.
+// `out` is modified even if demangling is unsuccessful.
+bool Demangle(const char *mangled, char *out, int out_size);
+
+} // namespace debugging_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_DEBUGGING_INTERNAL_DEMANGLE_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/elf_mem_image.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/elf_mem_image.cc
new file mode 100644
index 00000000000..ba4bbc3855a
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/elf_mem_image.cc
@@ -0,0 +1,383 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Allow dynamic symbol lookup in an in-memory Elf image.
+//
+
+#include "y_absl/debugging/internal/elf_mem_image.h"
+
+#ifdef ABSL_HAVE_ELF_MEM_IMAGE // defined in elf_mem_image.h
+
+#include <string.h>
+#include <cassert>
+#include <cstddef>
+#include "y_absl/base/config.h"
+#include "y_absl/base/internal/raw_logging.h"
+
+// From binutils/include/elf/common.h (this doesn't appear to be documented
+// anywhere else).
+//
+// /* This flag appears in a Versym structure. It means that the symbol
+// is hidden, and is only visible with an explicit version number.
+// This is a GNU extension. */
+// #define VERSYM_HIDDEN 0x8000
+//
+// /* This is the mask for the rest of the Versym information. */
+// #define VERSYM_VERSION 0x7fff
+
+#define VERSYM_VERSION 0x7fff
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace debugging_internal {
+
+namespace {
+
+#if __SIZEOF_POINTER__ == 4
+const int kElfClass = ELFCLASS32;
+int ElfBind(const ElfW(Sym) *symbol) { return ELF32_ST_BIND(symbol->st_info); }
+int ElfType(const ElfW(Sym) *symbol) { return ELF32_ST_TYPE(symbol->st_info); }
+#elif __SIZEOF_POINTER__ == 8
+const int kElfClass = ELFCLASS64;
+int ElfBind(const ElfW(Sym) *symbol) { return ELF64_ST_BIND(symbol->st_info); }
+int ElfType(const ElfW(Sym) *symbol) { return ELF64_ST_TYPE(symbol->st_info); }
+#else
+const int kElfClass = -1;
+int ElfBind(const ElfW(Sym) *) {
+ ABSL_RAW_LOG(FATAL, "Unexpected word size");
+ return 0;
+}
+int ElfType(const ElfW(Sym) *) {
+ ABSL_RAW_LOG(FATAL, "Unexpected word size");
+ return 0;
+}
+#endif
+
+// Extract an element from one of the ELF tables, cast it to desired type.
+// This is just a simple arithmetic and a glorified cast.
+// Callers are responsible for bounds checking.
+template <typename T>
+const T *GetTableElement(const ElfW(Ehdr) * ehdr, ElfW(Off) table_offset,
+ ElfW(Word) element_size, size_t index) {
+ return reinterpret_cast<const T*>(reinterpret_cast<const char *>(ehdr)
+ + table_offset
+ + index * element_size);
+}
+
+} // namespace
+
+// The value of this variable doesn't matter; it's used only for its
+// unique address.
+const int ElfMemImage::kInvalidBaseSentinel = 0;
+
+ElfMemImage::ElfMemImage(const void *base) {
+ ABSL_RAW_CHECK(base != kInvalidBase, "bad pointer");
+ Init(base);
+}
+
+int ElfMemImage::GetNumSymbols() const {
+ if (!hash_) {
+ return 0;
+ }
+ // See http://www.caldera.com/developers/gabi/latest/ch5.dynamic.html#hash
+ return hash_[1];
+}
+
+const ElfW(Sym) *ElfMemImage::GetDynsym(int index) const {
+ ABSL_RAW_CHECK(index < GetNumSymbols(), "index out of range");
+ return dynsym_ + index;
+}
+
+const ElfW(Versym) *ElfMemImage::GetVersym(int index) const {
+ ABSL_RAW_CHECK(index < GetNumSymbols(), "index out of range");
+ return versym_ + index;
+}
+
+const ElfW(Phdr) *ElfMemImage::GetPhdr(int index) const {
+ ABSL_RAW_CHECK(index < ehdr_->e_phnum, "index out of range");
+ return GetTableElement<ElfW(Phdr)>(ehdr_,
+ ehdr_->e_phoff,
+ ehdr_->e_phentsize,
+ index);
+}
+
+const char *ElfMemImage::GetDynstr(ElfW(Word) offset) const {
+ ABSL_RAW_CHECK(offset < strsize_, "offset out of range");
+ return dynstr_ + offset;
+}
+
+const void *ElfMemImage::GetSymAddr(const ElfW(Sym) *sym) const {
+ if (sym->st_shndx == SHN_UNDEF || sym->st_shndx >= SHN_LORESERVE) {
+ // Symbol corresponds to "special" (e.g. SHN_ABS) section.
+ return reinterpret_cast<const void *>(sym->st_value);
+ }
+ ABSL_RAW_CHECK(link_base_ < sym->st_value, "symbol out of range");
+ return GetTableElement<char>(ehdr_, 0, 1, sym->st_value - link_base_);
+}
+
+const ElfW(Verdef) *ElfMemImage::GetVerdef(int index) const {
+ ABSL_RAW_CHECK(0 <= index && static_cast<size_t>(index) <= verdefnum_,
+ "index out of range");
+ const ElfW(Verdef) *version_definition = verdef_;
+ while (version_definition->vd_ndx < index && version_definition->vd_next) {
+ const char *const version_definition_as_char =
+ reinterpret_cast<const char *>(version_definition);
+ version_definition =
+ reinterpret_cast<const ElfW(Verdef) *>(version_definition_as_char +
+ version_definition->vd_next);
+ }
+ return version_definition->vd_ndx == index ? version_definition : nullptr;
+}
+
+const ElfW(Verdaux) *ElfMemImage::GetVerdefAux(
+ const ElfW(Verdef) *verdef) const {
+ return reinterpret_cast<const ElfW(Verdaux) *>(verdef+1);
+}
+
+const char *ElfMemImage::GetVerstr(ElfW(Word) offset) const {
+ ABSL_RAW_CHECK(offset < strsize_, "offset out of range");
+ return dynstr_ + offset;
+}
+
+void ElfMemImage::Init(const void *base) {
+ ehdr_ = nullptr;
+ dynsym_ = nullptr;
+ dynstr_ = nullptr;
+ versym_ = nullptr;
+ verdef_ = nullptr;
+ hash_ = nullptr;
+ strsize_ = 0;
+ verdefnum_ = 0;
+ link_base_ = ~0L; // Sentinel: PT_LOAD .p_vaddr can't possibly be this.
+ if (!base) {
+ return;
+ }
+ const char *const base_as_char = reinterpret_cast<const char *>(base);
+ if (base_as_char[EI_MAG0] != ELFMAG0 || base_as_char[EI_MAG1] != ELFMAG1 ||
+ base_as_char[EI_MAG2] != ELFMAG2 || base_as_char[EI_MAG3] != ELFMAG3) {
+ assert(false);
+ return;
+ }
+ int elf_class = base_as_char[EI_CLASS];
+ if (elf_class != kElfClass) {
+ assert(false);
+ return;
+ }
+ switch (base_as_char[EI_DATA]) {
+ case ELFDATA2LSB: {
+#ifndef ABSL_IS_LITTLE_ENDIAN
+ assert(false);
+ return;
+#endif
+ break;
+ }
+ case ELFDATA2MSB: {
+#ifndef ABSL_IS_BIG_ENDIAN
+ assert(false);
+ return;
+#endif
+ break;
+ }
+ default: {
+ assert(false);
+ return;
+ }
+ }
+
+ ehdr_ = reinterpret_cast<const ElfW(Ehdr) *>(base);
+ const ElfW(Phdr) *dynamic_program_header = nullptr;
+ for (int i = 0; i < ehdr_->e_phnum; ++i) {
+ const ElfW(Phdr) *const program_header = GetPhdr(i);
+ switch (program_header->p_type) {
+ case PT_LOAD:
+ if (!~link_base_) {
+ link_base_ = program_header->p_vaddr;
+ }
+ break;
+ case PT_DYNAMIC:
+ dynamic_program_header = program_header;
+ break;
+ }
+ }
+ if (!~link_base_ || !dynamic_program_header) {
+ assert(false);
+ // Mark this image as not present. Can not recur infinitely.
+ Init(nullptr);
+ return;
+ }
+ ptrdiff_t relocation =
+ base_as_char - reinterpret_cast<const char *>(link_base_);
+ ElfW(Dyn) *dynamic_entry =
+ reinterpret_cast<ElfW(Dyn) *>(dynamic_program_header->p_vaddr +
+ relocation);
+ for (; dynamic_entry->d_tag != DT_NULL; ++dynamic_entry) {
+ const auto value = dynamic_entry->d_un.d_val + relocation;
+ switch (dynamic_entry->d_tag) {
+ case DT_HASH:
+ hash_ = reinterpret_cast<ElfW(Word) *>(value);
+ break;
+ case DT_SYMTAB:
+ dynsym_ = reinterpret_cast<ElfW(Sym) *>(value);
+ break;
+ case DT_STRTAB:
+ dynstr_ = reinterpret_cast<const char *>(value);
+ break;
+ case DT_VERSYM:
+ versym_ = reinterpret_cast<ElfW(Versym) *>(value);
+ break;
+ case DT_VERDEF:
+ verdef_ = reinterpret_cast<ElfW(Verdef) *>(value);
+ break;
+ case DT_VERDEFNUM:
+ verdefnum_ = dynamic_entry->d_un.d_val;
+ break;
+ case DT_STRSZ:
+ strsize_ = dynamic_entry->d_un.d_val;
+ break;
+ default:
+ // Unrecognized entries explicitly ignored.
+ break;
+ }
+ }
+ if (!hash_ || !dynsym_ || !dynstr_ || !versym_ ||
+ !verdef_ || !verdefnum_ || !strsize_) {
+ assert(false); // invalid VDSO
+ // Mark this image as not present. Can not recur infinitely.
+ Init(nullptr);
+ return;
+ }
+}
+
+bool ElfMemImage::LookupSymbol(const char *name,
+ const char *version,
+ int type,
+ SymbolInfo *info_out) const {
+ for (const SymbolInfo& info : *this) {
+ if (strcmp(info.name, name) == 0 && strcmp(info.version, version) == 0 &&
+ ElfType(info.symbol) == type) {
+ if (info_out) {
+ *info_out = info;
+ }
+ return true;
+ }
+ }
+ return false;
+}
+
+bool ElfMemImage::LookupSymbolByAddress(const void *address,
+ SymbolInfo *info_out) const {
+ for (const SymbolInfo& info : *this) {
+ const char *const symbol_start =
+ reinterpret_cast<const char *>(info.address);
+ const char *const symbol_end = symbol_start + info.symbol->st_size;
+ if (symbol_start <= address && address < symbol_end) {
+ if (info_out) {
+ // Client wants to know details for that symbol (the usual case).
+ if (ElfBind(info.symbol) == STB_GLOBAL) {
+ // Strong symbol; just return it.
+ *info_out = info;
+ return true;
+ } else {
+ // Weak or local. Record it, but keep looking for a strong one.
+ *info_out = info;
+ }
+ } else {
+ // Client only cares if there is an overlapping symbol.
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+ElfMemImage::SymbolIterator::SymbolIterator(const void *const image, int index)
+ : index_(index), image_(image) {
+}
+
+const ElfMemImage::SymbolInfo *ElfMemImage::SymbolIterator::operator->() const {
+ return &info_;
+}
+
+const ElfMemImage::SymbolInfo& ElfMemImage::SymbolIterator::operator*() const {
+ return info_;
+}
+
+bool ElfMemImage::SymbolIterator::operator==(const SymbolIterator &rhs) const {
+ return this->image_ == rhs.image_ && this->index_ == rhs.index_;
+}
+
+bool ElfMemImage::SymbolIterator::operator!=(const SymbolIterator &rhs) const {
+ return !(*this == rhs);
+}
+
+ElfMemImage::SymbolIterator &ElfMemImage::SymbolIterator::operator++() {
+ this->Update(1);
+ return *this;
+}
+
+ElfMemImage::SymbolIterator ElfMemImage::begin() const {
+ SymbolIterator it(this, 0);
+ it.Update(0);
+ return it;
+}
+
+ElfMemImage::SymbolIterator ElfMemImage::end() const {
+ return SymbolIterator(this, GetNumSymbols());
+}
+
+void ElfMemImage::SymbolIterator::Update(int increment) {
+ const ElfMemImage *image = reinterpret_cast<const ElfMemImage *>(image_);
+ ABSL_RAW_CHECK(image->IsPresent() || increment == 0, "");
+ if (!image->IsPresent()) {
+ return;
+ }
+ index_ += increment;
+ if (index_ >= image->GetNumSymbols()) {
+ index_ = image->GetNumSymbols();
+ return;
+ }
+ const ElfW(Sym) *symbol = image->GetDynsym(index_);
+ const ElfW(Versym) *version_symbol = image->GetVersym(index_);
+ ABSL_RAW_CHECK(symbol && version_symbol, "");
+ const char *const symbol_name = image->GetDynstr(symbol->st_name);
+ const ElfW(Versym) version_index = version_symbol[0] & VERSYM_VERSION;
+ const ElfW(Verdef) *version_definition = nullptr;
+ const char *version_name = "";
+ if (symbol->st_shndx == SHN_UNDEF) {
+ // Undefined symbols reference DT_VERNEED, not DT_VERDEF, and
+ // version_index could well be greater than verdefnum_, so calling
+ // GetVerdef(version_index) may trigger assertion.
+ } else {
+ version_definition = image->GetVerdef(version_index);
+ }
+ if (version_definition) {
+ // I am expecting 1 or 2 auxiliary entries: 1 for the version itself,
+ // optional 2nd if the version has a parent.
+ ABSL_RAW_CHECK(
+ version_definition->vd_cnt == 1 || version_definition->vd_cnt == 2,
+ "wrong number of entries");
+ const ElfW(Verdaux) *version_aux = image->GetVerdefAux(version_definition);
+ version_name = image->GetVerstr(version_aux->vda_name);
+ }
+ info_.name = symbol_name;
+ info_.version = version_name;
+ info_.address = image->GetSymAddr(symbol);
+ info_.symbol = symbol;
+}
+
+} // namespace debugging_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_HAVE_ELF_MEM_IMAGE
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/elf_mem_image.h b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/elf_mem_image.h
new file mode 100644
index 00000000000..13a6b2e664c
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/elf_mem_image.h
@@ -0,0 +1,138 @@
+/*
+ * Copyright 2017 The Abseil Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Allow dynamic symbol lookup for in-memory Elf images.
+
+#ifndef ABSL_DEBUGGING_INTERNAL_ELF_MEM_IMAGE_H_
+#define ABSL_DEBUGGING_INTERNAL_ELF_MEM_IMAGE_H_
+
+// Including this will define the __GLIBC__ macro if glibc is being
+// used.
+#include <climits>
+
+#include "y_absl/base/config.h"
+
+// Maybe one day we can rewrite this file not to require the elf
+// symbol extensions in glibc, but for right now we need them.
+#ifdef ABSL_HAVE_ELF_MEM_IMAGE
+#error ABSL_HAVE_ELF_MEM_IMAGE cannot be directly set
+#endif
+
+#if defined(__ELF__) && !defined(__native_client__) && !defined(__asmjs__) && \
+ !defined(__wasm__)
+#define ABSL_HAVE_ELF_MEM_IMAGE 1
+#endif
+
+#ifdef ABSL_HAVE_ELF_MEM_IMAGE
+
+#include <link.h> // for ElfW
+
+#if defined(__FreeBSD__) && !defined(ElfW)
+#define ElfW(x) __ElfN(x)
+#endif
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace debugging_internal {
+
+// An in-memory ELF image (may not exist on disk).
+class ElfMemImage {
+ private:
+ // Sentinel: there could never be an elf image at &kInvalidBaseSentinel.
+ static const int kInvalidBaseSentinel;
+
+ public:
+ // Sentinel: there could never be an elf image at this address.
+ static constexpr const void *const kInvalidBase =
+ static_cast<const void*>(&kInvalidBaseSentinel);
+
+ // Information about a single vdso symbol.
+ // All pointers are into .dynsym, .dynstr, or .text of the VDSO.
+ // Do not free() them or modify through them.
+ struct SymbolInfo {
+ const char *name; // E.g. "__vdso_getcpu"
+ const char *version; // E.g. "LINUX_2.6", could be ""
+ // for unversioned symbol.
+ const void *address; // Relocated symbol address.
+ const ElfW(Sym) *symbol; // Symbol in the dynamic symbol table.
+ };
+
+ // Supports iteration over all dynamic symbols.
+ class SymbolIterator {
+ public:
+ friend class ElfMemImage;
+ const SymbolInfo *operator->() const;
+ const SymbolInfo &operator*() const;
+ SymbolIterator& operator++();
+ bool operator!=(const SymbolIterator &rhs) const;
+ bool operator==(const SymbolIterator &rhs) const;
+ private:
+ SymbolIterator(const void *const image, int index);
+ void Update(int incr);
+ SymbolInfo info_;
+ int index_;
+ const void *const image_;
+ };
+
+
+ explicit ElfMemImage(const void *base);
+ void Init(const void *base);
+ bool IsPresent() const { return ehdr_ != nullptr; }
+ const ElfW(Phdr)* GetPhdr(int index) const;
+ const ElfW(Sym)* GetDynsym(int index) const;
+ const ElfW(Versym)* GetVersym(int index) const;
+ const ElfW(Verdef)* GetVerdef(int index) const;
+ const ElfW(Verdaux)* GetVerdefAux(const ElfW(Verdef) *verdef) const;
+ const char* GetDynstr(ElfW(Word) offset) const;
+ const void* GetSymAddr(const ElfW(Sym) *sym) const;
+ const char* GetVerstr(ElfW(Word) offset) const;
+ int GetNumSymbols() const;
+
+ SymbolIterator begin() const;
+ SymbolIterator end() const;
+
+ // Look up versioned dynamic symbol in the image.
+ // Returns false if image is not present, or doesn't contain given
+ // symbol/version/type combination.
+ // If info_out is non-null, additional details are filled in.
+ bool LookupSymbol(const char *name, const char *version,
+ int symbol_type, SymbolInfo *info_out) const;
+
+ // Find info about symbol (if any) which overlaps given address.
+ // Returns true if symbol was found; false if image isn't present
+ // or doesn't have a symbol overlapping given address.
+ // If info_out is non-null, additional details are filled in.
+ bool LookupSymbolByAddress(const void *address, SymbolInfo *info_out) const;
+
+ private:
+ const ElfW(Ehdr) *ehdr_;
+ const ElfW(Sym) *dynsym_;
+ const ElfW(Versym) *versym_;
+ const ElfW(Verdef) *verdef_;
+ const ElfW(Word) *hash_;
+ const char *dynstr_;
+ size_t strsize_;
+ size_t verdefnum_;
+ ElfW(Addr) link_base_; // Link-time base (p_vaddr of first PT_LOAD).
+};
+
+} // namespace debugging_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_HAVE_ELF_MEM_IMAGE
+
+#endif // ABSL_DEBUGGING_INTERNAL_ELF_MEM_IMAGE_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/examine_stack.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/examine_stack.cc
new file mode 100644
index 00000000000..c59d92b6a38
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/examine_stack.cc
@@ -0,0 +1,203 @@
+//
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "y_absl/debugging/internal/examine_stack.h"
+
+#ifndef _WIN32
+#include <unistd.h>
+#endif
+
+#ifdef __APPLE__
+#include <sys/ucontext.h>
+#endif
+
+#include <csignal>
+#include <cstdio>
+
+#include "y_absl/base/attributes.h"
+#include "y_absl/base/internal/raw_logging.h"
+#include "y_absl/base/macros.h"
+#include "y_absl/debugging/stacktrace.h"
+#include "y_absl/debugging/symbolize.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace debugging_internal {
+
+// Returns the program counter from signal context, nullptr if
+// unknown. vuc is a ucontext_t*. We use void* to avoid the use of
+// ucontext_t on non-POSIX systems.
+void* GetProgramCounter(void* vuc) {
+#ifdef __linux__
+ if (vuc != nullptr) {
+ ucontext_t* context = reinterpret_cast<ucontext_t*>(vuc);
+#if defined(__aarch64__)
+ return reinterpret_cast<void*>(context->uc_mcontext.pc);
+#elif defined(__alpha__)
+ return reinterpret_cast<void*>(context->uc_mcontext.sc_pc);
+#elif defined(__arm__)
+ return reinterpret_cast<void*>(context->uc_mcontext.arm_pc);
+#elif defined(__hppa__)
+ return reinterpret_cast<void*>(context->uc_mcontext.sc_iaoq[0]);
+#elif defined(__i386__)
+ if (14 < ABSL_ARRAYSIZE(context->uc_mcontext.gregs))
+ return reinterpret_cast<void*>(context->uc_mcontext.gregs[14]);
+#elif defined(__ia64__)
+ return reinterpret_cast<void*>(context->uc_mcontext.sc_ip);
+#elif defined(__m68k__)
+ return reinterpret_cast<void*>(context->uc_mcontext.gregs[16]);
+#elif defined(__mips__)
+ return reinterpret_cast<void*>(context->uc_mcontext.pc);
+#elif defined(__powerpc64__)
+ return reinterpret_cast<void*>(context->uc_mcontext.gp_regs[32]);
+#elif defined(__powerpc__)
+ return reinterpret_cast<void*>(context->uc_mcontext.uc_regs->gregs[32]);
+#elif defined(__riscv)
+ return reinterpret_cast<void*>(context->uc_mcontext.__gregs[REG_PC]);
+#elif defined(__s390__) && !defined(__s390x__)
+ return reinterpret_cast<void*>(context->uc_mcontext.psw.addr & 0x7fffffff);
+#elif defined(__s390__) && defined(__s390x__)
+ return reinterpret_cast<void*>(context->uc_mcontext.psw.addr);
+#elif defined(__sh__)
+ return reinterpret_cast<void*>(context->uc_mcontext.pc);
+#elif defined(__sparc__) && !defined(__arch64__)
+ return reinterpret_cast<void*>(context->uc_mcontext.gregs[19]);
+#elif defined(__sparc__) && defined(__arch64__)
+ return reinterpret_cast<void*>(context->uc_mcontext.mc_gregs[19]);
+#elif defined(__x86_64__)
+ if (16 < ABSL_ARRAYSIZE(context->uc_mcontext.gregs))
+ return reinterpret_cast<void*>(context->uc_mcontext.gregs[16]);
+#elif defined(__e2k__)
+ return reinterpret_cast<void*>(context->uc_mcontext.cr0_hi);
+#else
+#error "Undefined Architecture."
+#endif
+ }
+#elif defined(__APPLE__)
+ if (vuc != nullptr) {
+ ucontext_t* signal_ucontext = reinterpret_cast<ucontext_t*>(vuc);
+#if defined(__aarch64__)
+ return reinterpret_cast<void*>(
+ __darwin_arm_thread_state64_get_pc(signal_ucontext->uc_mcontext->__ss));
+#elif defined(__arm__)
+#if __DARWIN_UNIX03
+ return reinterpret_cast<void*>(signal_ucontext->uc_mcontext->__ss.__pc);
+#else
+ return reinterpret_cast<void*>(signal_ucontext->uc_mcontext->ss.pc);
+#endif
+#elif defined(__i386__)
+#if __DARWIN_UNIX03
+ return reinterpret_cast<void*>(signal_ucontext->uc_mcontext->__ss.__eip);
+#else
+ return reinterpret_cast<void*>(signal_ucontext->uc_mcontext->ss.eip);
+#endif
+#elif defined(__x86_64__)
+#if __DARWIN_UNIX03
+ return reinterpret_cast<void*>(signal_ucontext->uc_mcontext->__ss.__rip);
+#else
+ return reinterpret_cast<void*>(signal_ucontext->uc_mcontext->ss.rip);
+#endif
+#endif
+ }
+#elif defined(__akaros__)
+ auto* ctx = reinterpret_cast<struct user_context*>(vuc);
+ return reinterpret_cast<void*>(get_user_ctx_pc(ctx));
+#endif
+ static_cast<void>(vuc);
+ return nullptr;
+}
+
+// The %p field width for printf() functions is two characters per byte,
+// and two extra for the leading "0x".
+static constexpr int kPrintfPointerFieldWidth = 2 + 2 * sizeof(void*);
+
+// Print a program counter, its stack frame size, and its symbol name.
+// Note that there is a separate symbolize_pc argument. Return addresses may be
+// at the end of the function, and this allows the caller to back up from pc if
+// appropriate.
+static void DumpPCAndFrameSizeAndSymbol(void (*writerfn)(const char*, void*),
+ void* writerfn_arg, void* pc,
+ void* symbolize_pc, int framesize,
+ const char* const prefix) {
+ char tmp[1024];
+ const char* symbol = "(unknown)";
+ if (y_absl::Symbolize(symbolize_pc, tmp, sizeof(tmp))) {
+ symbol = tmp;
+ }
+ char buf[1024];
+ if (framesize <= 0) {
+ snprintf(buf, sizeof(buf), "%s@ %*p (unknown) %s\n", prefix,
+ kPrintfPointerFieldWidth, pc, symbol);
+ } else {
+ snprintf(buf, sizeof(buf), "%s@ %*p %9d %s\n", prefix,
+ kPrintfPointerFieldWidth, pc, framesize, symbol);
+ }
+ writerfn(buf, writerfn_arg);
+}
+
+// Print a program counter and the corresponding stack frame size.
+static void DumpPCAndFrameSize(void (*writerfn)(const char*, void*),
+ void* writerfn_arg, void* pc, int framesize,
+ const char* const prefix) {
+ char buf[100];
+ if (framesize <= 0) {
+ snprintf(buf, sizeof(buf), "%s@ %*p (unknown)\n", prefix,
+ kPrintfPointerFieldWidth, pc);
+ } else {
+ snprintf(buf, sizeof(buf), "%s@ %*p %9d\n", prefix,
+ kPrintfPointerFieldWidth, pc, framesize);
+ }
+ writerfn(buf, writerfn_arg);
+}
+
+void DumpPCAndFrameSizesAndStackTrace(
+ void* pc, void* const stack[], int frame_sizes[], int depth,
+ int min_dropped_frames, bool symbolize_stacktrace,
+ void (*writerfn)(const char*, void*), void* writerfn_arg) {
+ if (pc != nullptr) {
+ // We don't know the stack frame size for PC, use 0.
+ if (symbolize_stacktrace) {
+ DumpPCAndFrameSizeAndSymbol(writerfn, writerfn_arg, pc, pc, 0, "PC: ");
+ } else {
+ DumpPCAndFrameSize(writerfn, writerfn_arg, pc, 0, "PC: ");
+ }
+ }
+ for (int i = 0; i < depth; i++) {
+ if (symbolize_stacktrace) {
+ // Pass the previous address of pc as the symbol address because pc is a
+ // return address, and an overrun may occur when the function ends with a
+ // call to a function annotated noreturn (e.g. CHECK). Note that we don't
+ // do this for pc above, as the adjustment is only correct for return
+ // addresses.
+ DumpPCAndFrameSizeAndSymbol(writerfn, writerfn_arg, stack[i],
+ reinterpret_cast<char*>(stack[i]) - 1,
+ frame_sizes[i], " ");
+ } else {
+ DumpPCAndFrameSize(writerfn, writerfn_arg, stack[i], frame_sizes[i],
+ " ");
+ }
+ }
+ if (min_dropped_frames > 0) {
+ char buf[100];
+ snprintf(buf, sizeof(buf), " @ ... and at least %d more frames\n",
+ min_dropped_frames);
+ writerfn(buf, writerfn_arg);
+ }
+}
+
+} // namespace debugging_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/examine_stack.h b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/examine_stack.h
new file mode 100644
index 00000000000..32892a11504
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/examine_stack.h
@@ -0,0 +1,42 @@
+//
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef ABSL_DEBUGGING_INTERNAL_EXAMINE_STACK_H_
+#define ABSL_DEBUGGING_INTERNAL_EXAMINE_STACK_H_
+
+#include "y_absl/base/config.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace debugging_internal {
+
+// Returns the program counter from signal context, or nullptr if
+// unknown. `vuc` is a ucontext_t*. We use void* to avoid the use of
+// ucontext_t on non-POSIX systems.
+void* GetProgramCounter(void* vuc);
+
+// Uses `writerfn` to dump the program counter, stack trace, and stack
+// frame sizes.
+void DumpPCAndFrameSizesAndStackTrace(
+ void* pc, void* const stack[], int frame_sizes[], int depth,
+ int min_dropped_frames, bool symbolize_stacktrace,
+ void (*writerfn)(const char*, void*), void* writerfn_arg);
+
+} // namespace debugging_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_DEBUGGING_INTERNAL_EXAMINE_STACK_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stack_consumption.h b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stack_consumption.h
new file mode 100644
index 00000000000..80137ad0eb5
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stack_consumption.h
@@ -0,0 +1,50 @@
+//
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Helper function for measuring stack consumption of signal handlers.
+
+#ifndef ABSL_DEBUGGING_INTERNAL_STACK_CONSUMPTION_H_
+#define ABSL_DEBUGGING_INTERNAL_STACK_CONSUMPTION_H_
+
+#include "y_absl/base/config.h"
+
+// The code in this module is not portable.
+// Use this feature test macro to detect its availability.
+#ifdef ABSL_INTERNAL_HAVE_DEBUGGING_STACK_CONSUMPTION
+#error ABSL_INTERNAL_HAVE_DEBUGGING_STACK_CONSUMPTION cannot be set directly
+#elif !defined(__APPLE__) && !defined(_WIN32) && \
+ (defined(__i386__) || defined(__x86_64__) || defined(__ppc__) || \
+ defined(__aarch64__) || defined(__riscv))
+#define ABSL_INTERNAL_HAVE_DEBUGGING_STACK_CONSUMPTION 1
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace debugging_internal {
+
+// Returns the stack consumption in bytes for the code exercised by
+// signal_handler. To measure stack consumption, signal_handler is registered
+// as a signal handler, so the code that it exercises must be async-signal
+// safe. The argument of signal_handler is an implementation detail of signal
+// handlers and should ignored by the code for signal_handler. Use global
+// variables to pass information between your test code and signal_handler.
+int GetSignalHandlerStackConsumption(void (*signal_handler)(int));
+
+} // namespace debugging_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_INTERNAL_HAVE_DEBUGGING_STACK_CONSUMPTION
+
+#endif // ABSL_DEBUGGING_INTERNAL_STACK_CONSUMPTION_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_aarch64-inl.inc b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_aarch64-inl.inc
new file mode 100644
index 00000000000..700f6074885
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_aarch64-inl.inc
@@ -0,0 +1,199 @@
+#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_AARCH64_INL_H_
+#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_AARCH64_INL_H_
+
+// Generate stack tracer for aarch64
+
+#if defined(__linux__)
+#include <sys/mman.h>
+#include <ucontext.h>
+#include <unistd.h>
+#endif
+
+#include <atomic>
+#include <cassert>
+#include <cstdint>
+#include <iostream>
+
+#include "y_absl/base/attributes.h"
+#include "y_absl/debugging/internal/address_is_readable.h"
+#include "y_absl/debugging/internal/vdso_support.h" // a no-op on non-elf or non-glibc systems
+#include "y_absl/debugging/stacktrace.h"
+
+static const uintptr_t kUnknownFrameSize = 0;
+
+#if defined(__linux__)
+// Returns the address of the VDSO __kernel_rt_sigreturn function, if present.
+static const unsigned char* GetKernelRtSigreturnAddress() {
+ constexpr uintptr_t kImpossibleAddress = 1;
+ ABSL_CONST_INIT static std::atomic<uintptr_t> memoized{kImpossibleAddress};
+ uintptr_t address = memoized.load(std::memory_order_relaxed);
+ if (address != kImpossibleAddress) {
+ return reinterpret_cast<const unsigned char*>(address);
+ }
+
+ address = reinterpret_cast<uintptr_t>(nullptr);
+
+#ifdef ABSL_HAVE_VDSO_SUPPORT
+ y_absl::debugging_internal::VDSOSupport vdso;
+ if (vdso.IsPresent()) {
+ y_absl::debugging_internal::VDSOSupport::SymbolInfo symbol_info;
+ auto lookup = [&](int type) {
+ return vdso.LookupSymbol("__kernel_rt_sigreturn", "LINUX_2.6.39", type,
+ &symbol_info);
+ };
+ if ((!lookup(STT_FUNC) && !lookup(STT_NOTYPE)) ||
+ symbol_info.address == nullptr) {
+ // Unexpected: VDSO is present, yet the expected symbol is missing
+ // or null.
+ assert(false && "VDSO is present, but doesn't have expected symbol");
+ } else {
+ if (reinterpret_cast<uintptr_t>(symbol_info.address) !=
+ kImpossibleAddress) {
+ address = reinterpret_cast<uintptr_t>(symbol_info.address);
+ } else {
+ assert(false && "VDSO returned invalid address");
+ }
+ }
+ }
+#endif
+
+ memoized.store(address, std::memory_order_relaxed);
+ return reinterpret_cast<const unsigned char*>(address);
+}
+#endif // __linux__
+
+// Compute the size of a stack frame in [low..high). We assume that
+// low < high. Return size of kUnknownFrameSize.
+template<typename T>
+static inline uintptr_t ComputeStackFrameSize(const T* low,
+ const T* high) {
+ const char* low_char_ptr = reinterpret_cast<const char *>(low);
+ const char* high_char_ptr = reinterpret_cast<const char *>(high);
+ return low < high ? high_char_ptr - low_char_ptr : kUnknownFrameSize;
+}
+
+// Given a pointer to a stack frame, locate and return the calling
+// stackframe, or return null if no stackframe can be found. Perform sanity
+// checks (the strictness of which is controlled by the boolean parameter
+// "STRICT_UNWINDING") to reduce the chance that a bad pointer is returned.
+template<bool STRICT_UNWINDING, bool WITH_CONTEXT>
+ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS // May read random elements from stack.
+ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack.
+static void **NextStackFrame(void **old_frame_pointer, const void *uc) {
+ void **new_frame_pointer = reinterpret_cast<void**>(*old_frame_pointer);
+ bool check_frame_size = true;
+
+#if defined(__linux__)
+ if (WITH_CONTEXT && uc != nullptr) {
+ // Check to see if next frame's return address is __kernel_rt_sigreturn.
+ if (old_frame_pointer[1] == GetKernelRtSigreturnAddress()) {
+ const ucontext_t *ucv = static_cast<const ucontext_t *>(uc);
+ // old_frame_pointer[0] is not suitable for unwinding, look at
+ // ucontext to discover frame pointer before signal.
+ void **const pre_signal_frame_pointer =
+ reinterpret_cast<void **>(ucv->uc_mcontext.regs[29]);
+
+ // Check that alleged frame pointer is actually readable. This is to
+ // prevent "double fault" in case we hit the first fault due to e.g.
+ // stack corruption.
+ if (!y_absl::debugging_internal::AddressIsReadable(
+ pre_signal_frame_pointer))
+ return nullptr;
+
+ // Alleged frame pointer is readable, use it for further unwinding.
+ new_frame_pointer = pre_signal_frame_pointer;
+
+ // Skip frame size check if we return from a signal. We may be using a
+ // an alternate stack for signals.
+ check_frame_size = false;
+ }
+ }
+#endif
+
+ // aarch64 ABI requires stack pointer to be 16-byte-aligned.
+ if ((reinterpret_cast<uintptr_t>(new_frame_pointer) & 15) != 0)
+ return nullptr;
+
+ // Check frame size. In strict mode, we assume frames to be under
+ // 100,000 bytes. In non-strict mode, we relax the limit to 1MB.
+ if (check_frame_size) {
+ const uintptr_t max_size = STRICT_UNWINDING ? 100000 : 1000000;
+ const uintptr_t frame_size =
+ ComputeStackFrameSize(old_frame_pointer, new_frame_pointer);
+ if (frame_size == kUnknownFrameSize || frame_size > max_size)
+ return nullptr;
+ }
+
+ return new_frame_pointer;
+}
+
+template <bool IS_STACK_FRAMES, bool IS_WITH_CONTEXT>
+ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS // May read random elements from stack.
+ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack.
+static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count,
+ const void *ucp, int *min_dropped_frames) {
+#ifdef __GNUC__
+ void **frame_pointer = reinterpret_cast<void**>(__builtin_frame_address(0));
+#else
+# error reading stack point not yet supported on this platform.
+#endif
+
+ skip_count++; // Skip the frame for this function.
+ int n = 0;
+
+ // The frame pointer points to low address of a frame. The first 64-bit
+ // word of a frame points to the next frame up the call chain, which normally
+ // is just after the high address of the current frame. The second word of
+ // a frame contains return adress of to the caller. To find a pc value
+ // associated with the current frame, we need to go down a level in the call
+ // chain. So we remember return the address of the last frame seen. This
+ // does not work for the first stack frame, which belongs to UnwindImp() but
+ // we skip the frame for UnwindImp() anyway.
+ void* prev_return_address = nullptr;
+
+ while (frame_pointer && n < max_depth) {
+ // The y_absl::GetStackFrames routine is called when we are in some
+ // informational context (the failure signal handler for example).
+ // Use the non-strict unwinding rules to produce a stack trace
+ // that is as complete as possible (even if it contains a few bogus
+ // entries in some rare cases).
+ void **next_frame_pointer =
+ NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(frame_pointer, ucp);
+
+ if (skip_count > 0) {
+ skip_count--;
+ } else {
+ result[n] = prev_return_address;
+ if (IS_STACK_FRAMES) {
+ sizes[n] = ComputeStackFrameSize(frame_pointer, next_frame_pointer);
+ }
+ n++;
+ }
+ prev_return_address = frame_pointer[1];
+ frame_pointer = next_frame_pointer;
+ }
+ if (min_dropped_frames != nullptr) {
+ // Implementation detail: we clamp the max of frames we are willing to
+ // count, so as not to spend too much time in the loop below.
+ const int kMaxUnwind = 200;
+ int j = 0;
+ for (; frame_pointer != nullptr && j < kMaxUnwind; j++) {
+ frame_pointer =
+ NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(frame_pointer, ucp);
+ }
+ *min_dropped_frames = j;
+ }
+ return n;
+}
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace debugging_internal {
+bool StackTraceWorksForTest() {
+ return true;
+}
+} // namespace debugging_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_DEBUGGING_INTERNAL_STACKTRACE_AARCH64_INL_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_arm-inl.inc b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_arm-inl.inc
new file mode 100644
index 00000000000..b73307120ee
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_arm-inl.inc
@@ -0,0 +1,134 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// This is inspired by Craig Silverstein's PowerPC stacktrace code.
+
+#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_ARM_INL_H_
+#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_ARM_INL_H_
+
+#include <cstdint>
+
+#include "y_absl/debugging/stacktrace.h"
+
+// WARNING:
+// This only works if all your code is in either ARM or THUMB mode. With
+// interworking, the frame pointer of the caller can either be in r11 (ARM
+// mode) or r7 (THUMB mode). A callee only saves the frame pointer of its
+// mode in a fixed location on its stack frame. If the caller is a different
+// mode, there is no easy way to find the frame pointer. It can either be
+// still in the designated register or saved on stack along with other callee
+// saved registers.
+
+// Given a pointer to a stack frame, locate and return the calling
+// stackframe, or return nullptr if no stackframe can be found. Perform sanity
+// checks (the strictness of which is controlled by the boolean parameter
+// "STRICT_UNWINDING") to reduce the chance that a bad pointer is returned.
+template<bool STRICT_UNWINDING>
+static void **NextStackFrame(void **old_sp) {
+ void **new_sp = (void**) old_sp[-1];
+
+ // Check that the transition from frame pointer old_sp to frame
+ // pointer new_sp isn't clearly bogus
+ if (STRICT_UNWINDING) {
+ // With the stack growing downwards, older stack frame must be
+ // at a greater address that the current one.
+ if (new_sp <= old_sp) return nullptr;
+ // Assume stack frames larger than 100,000 bytes are bogus.
+ if ((uintptr_t)new_sp - (uintptr_t)old_sp > 100000) return nullptr;
+ } else {
+ // In the non-strict mode, allow discontiguous stack frames.
+ // (alternate-signal-stacks for example).
+ if (new_sp == old_sp) return nullptr;
+ // And allow frames upto about 1MB.
+ if ((new_sp > old_sp)
+ && ((uintptr_t)new_sp - (uintptr_t)old_sp > 1000000)) return nullptr;
+ }
+ if ((uintptr_t)new_sp & (sizeof(void *) - 1)) return nullptr;
+ return new_sp;
+}
+
+// This ensures that y_absl::GetStackTrace sets up the Link Register properly.
+#ifdef __GNUC__
+void StacktraceArmDummyFunction() __attribute__((noinline));
+void StacktraceArmDummyFunction() { __asm__ volatile(""); }
+#else
+# error StacktraceArmDummyFunction() needs to be ported to this platform.
+#endif
+
+template <bool IS_STACK_FRAMES, bool IS_WITH_CONTEXT>
+static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count,
+ const void * /* ucp */, int *min_dropped_frames) {
+#ifdef __GNUC__
+ void **sp = reinterpret_cast<void**>(__builtin_frame_address(0));
+#else
+# error reading stack point not yet supported on this platform.
+#endif
+
+ // On ARM, the return address is stored in the link register (r14).
+ // This is not saved on the stack frame of a leaf function. To
+ // simplify code that reads return addresses, we call a dummy
+ // function so that the return address of this function is also
+ // stored in the stack frame. This works at least for gcc.
+ StacktraceArmDummyFunction();
+
+ int n = 0;
+ while (sp && n < max_depth) {
+ // The y_absl::GetStackFrames routine is called when we are in some
+ // informational context (the failure signal handler for example).
+ // Use the non-strict unwinding rules to produce a stack trace
+ // that is as complete as possible (even if it contains a few bogus
+ // entries in some rare cases).
+ void **next_sp = NextStackFrame<!IS_STACK_FRAMES>(sp);
+
+ if (skip_count > 0) {
+ skip_count--;
+ } else {
+ result[n] = *sp;
+
+ if (IS_STACK_FRAMES) {
+ if (next_sp > sp) {
+ sizes[n] = (uintptr_t)next_sp - (uintptr_t)sp;
+ } else {
+ // A frame-size of 0 is used to indicate unknown frame size.
+ sizes[n] = 0;
+ }
+ }
+ n++;
+ }
+ sp = next_sp;
+ }
+ if (min_dropped_frames != nullptr) {
+ // Implementation detail: we clamp the max of frames we are willing to
+ // count, so as not to spend too much time in the loop below.
+ const int kMaxUnwind = 200;
+ int j = 0;
+ for (; sp != nullptr && j < kMaxUnwind; j++) {
+ sp = NextStackFrame<!IS_STACK_FRAMES>(sp);
+ }
+ *min_dropped_frames = j;
+ }
+ return n;
+}
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace debugging_internal {
+bool StackTraceWorksForTest() {
+ return false;
+}
+} // namespace debugging_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_DEBUGGING_INTERNAL_STACKTRACE_ARM_INL_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_config.h b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_config.h
new file mode 100644
index 00000000000..7ba212ab03f
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_config.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2017 The Abseil Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+
+ * Defines ABSL_STACKTRACE_INL_HEADER to the *-inl.h containing
+ * actual unwinder implementation.
+ * This header is "private" to stacktrace.cc.
+ * DO NOT include it into any other files.
+*/
+#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_CONFIG_H_
+#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_CONFIG_H_
+
+#include "y_absl/base/config.h"
+
+#if defined(ABSL_STACKTRACE_INL_HEADER)
+#error ABSL_STACKTRACE_INL_HEADER cannot be directly set
+
+#elif defined(_WIN32)
+#define ABSL_STACKTRACE_INL_HEADER \
+ "y_absl/debugging/internal/stacktrace_win32-inl.inc"
+
+#elif defined(__APPLE__)
+#ifdef ABSL_HAVE_THREAD_LOCAL
+// Thread local support required for UnwindImpl.
+#define ABSL_STACKTRACE_INL_HEADER \
+ "y_absl/debugging/internal/stacktrace_generic-inl.inc"
+#endif // defined(ABSL_HAVE_THREAD_LOCAL)
+
+#elif defined(__EMSCRIPTEN__)
+#define ABSL_STACKTRACE_INL_HEADER \
+ "y_absl/debugging/internal/stacktrace_emscripten-inl.inc"
+
+#elif defined(__linux__) && !defined(__ANDROID__)
+
+#if defined(NO_FRAME_POINTER) && \
+ (defined(__i386__) || defined(__x86_64__) || defined(__aarch64__))
+// Note: The libunwind-based implementation is not available to open-source
+// users.
+#define ABSL_STACKTRACE_INL_HEADER \
+ "y_absl/debugging/internal/stacktrace_libunwind-inl.inc"
+#define STACKTRACE_USES_LIBUNWIND 1
+#elif defined(NO_FRAME_POINTER) && defined(__has_include)
+#if __has_include(<execinfo.h>)
+// Note: When using glibc this may require -funwind-tables to function properly.
+#define ABSL_STACKTRACE_INL_HEADER \
+ "y_absl/debugging/internal/stacktrace_generic-inl.inc"
+#endif // __has_include(<execinfo.h>)
+#elif defined(__i386__) || defined(__x86_64__)
+#define ABSL_STACKTRACE_INL_HEADER \
+ "y_absl/debugging/internal/stacktrace_x86-inl.inc"
+#elif defined(__ppc__) || defined(__PPC__)
+#define ABSL_STACKTRACE_INL_HEADER \
+ "y_absl/debugging/internal/stacktrace_powerpc-inl.inc"
+#elif defined(__aarch64__)
+#define ABSL_STACKTRACE_INL_HEADER \
+ "y_absl/debugging/internal/stacktrace_aarch64-inl.inc"
+#elif defined(__riscv)
+#define ABSL_STACKTRACE_INL_HEADER \
+ "y_absl/debugging/internal/stacktrace_riscv-inl.inc"
+#elif defined(__has_include)
+#if __has_include(<execinfo.h>)
+// Note: When using glibc this may require -funwind-tables to function properly.
+#define ABSL_STACKTRACE_INL_HEADER \
+ "y_absl/debugging/internal/stacktrace_generic-inl.inc"
+#endif // __has_include(<execinfo.h>)
+#endif // defined(__has_include)
+
+#endif // defined(__linux__) && !defined(__ANDROID__)
+
+// Fallback to the empty implementation.
+#if !defined(ABSL_STACKTRACE_INL_HEADER)
+#define ABSL_STACKTRACE_INL_HEADER \
+ "y_absl/debugging/internal/stacktrace_unimplemented-inl.inc"
+#endif
+
+#endif // ABSL_DEBUGGING_INTERNAL_STACKTRACE_CONFIG_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_emscripten-inl.inc b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_emscripten-inl.inc
new file mode 100644
index 00000000000..cdb55571344
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_emscripten-inl.inc
@@ -0,0 +1,110 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Portable implementation - just use glibc
+//
+// Note: The glibc implementation may cause a call to malloc.
+// This can cause a deadlock in HeapProfiler.
+
+#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_EMSCRIPTEN_INL_H_
+#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_EMSCRIPTEN_INL_H_
+
+#error #include <emscripten.h>
+
+#include <atomic>
+#include <cstring>
+
+#include "y_absl/base/attributes.h"
+#include "y_absl/debugging/stacktrace.h"
+
+extern "C" {
+uintptr_t emscripten_stack_snapshot();
+uint32_t emscripten_stack_unwind_buffer(uintptr_t pc, void *buffer,
+ uint32_t depth);
+}
+
+// Sometimes, we can try to get a stack trace from within a stack
+// trace, which can cause a self-deadlock.
+// Protect against such reentrant call by failing to get a stack trace.
+//
+// We use __thread here because the code here is extremely low level -- it is
+// called while collecting stack traces from within malloc and mmap, and thus
+// can not call anything which might call malloc or mmap itself.
+static __thread int recursive = 0;
+
+// The stack trace function might be invoked very early in the program's
+// execution (e.g. from the very first malloc).
+// As such, we suppress usage of backtrace during this early stage of execution.
+static std::atomic<bool> disable_stacktraces(true); // Disabled until healthy.
+// Waiting until static initializers run seems to be late enough.
+// This file is included into stacktrace.cc so this will only run once.
+ABSL_ATTRIBUTE_UNUSED static int stacktraces_enabler = []() {
+ // Check if we can even create stacktraces. If not, bail early and leave
+ // disable_stacktraces set as-is.
+ // clang-format off
+ if (!EM_ASM_INT({ return (typeof wasmOffsetConverter !== 'undefined'); })) {
+ return 0;
+ }
+ // clang-format on
+ disable_stacktraces.store(false, std::memory_order_relaxed);
+ return 0;
+}();
+
+template <bool IS_STACK_FRAMES, bool IS_WITH_CONTEXT>
+static int UnwindImpl(void **result, int *sizes, int max_depth, int skip_count,
+ const void *ucp, int *min_dropped_frames) {
+ if (recursive || disable_stacktraces.load(std::memory_order_relaxed)) {
+ return 0;
+ }
+ ++recursive;
+
+ static_cast<void>(ucp); // Unused.
+ constexpr int kStackLength = 64;
+ void *stack[kStackLength];
+
+ int size;
+ uintptr_t pc = emscripten_stack_snapshot();
+ size = emscripten_stack_unwind_buffer(pc, stack, kStackLength);
+
+ int result_count = size - skip_count;
+ if (result_count < 0) result_count = 0;
+ if (result_count > max_depth) result_count = max_depth;
+ for (int i = 0; i < result_count; i++) result[i] = stack[i + skip_count];
+
+ if (IS_STACK_FRAMES) {
+ // No implementation for finding out the stack frame sizes yet.
+ memset(sizes, 0, sizeof(*sizes) * result_count);
+ }
+ if (min_dropped_frames != nullptr) {
+ if (size - skip_count - max_depth > 0) {
+ *min_dropped_frames = size - skip_count - max_depth;
+ } else {
+ *min_dropped_frames = 0;
+ }
+ }
+
+ --recursive;
+
+ return result_count;
+}
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace debugging_internal {
+bool StackTraceWorksForTest() { return true; }
+} // namespace debugging_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_DEBUGGING_INTERNAL_STACKTRACE_EMSCRIPTEN_INL_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_generic-inl.inc b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_generic-inl.inc
new file mode 100644
index 00000000000..fa623aa9548
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_generic-inl.inc
@@ -0,0 +1,108 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Portable implementation - just use glibc
+//
+// Note: The glibc implementation may cause a call to malloc.
+// This can cause a deadlock in HeapProfiler.
+
+#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_GENERIC_INL_H_
+#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_GENERIC_INL_H_
+
+#include <execinfo.h>
+#include <atomic>
+#include <cstring>
+
+#include "y_absl/debugging/stacktrace.h"
+#include "y_absl/base/attributes.h"
+
+// Sometimes, we can try to get a stack trace from within a stack
+// trace, because we don't block signals inside this code (which would be too
+// expensive: the two extra system calls per stack trace do matter here).
+// That can cause a self-deadlock.
+// Protect against such reentrant call by failing to get a stack trace.
+//
+// We use __thread here because the code here is extremely low level -- it is
+// called while collecting stack traces from within malloc and mmap, and thus
+// can not call anything which might call malloc or mmap itself.
+static __thread int recursive = 0;
+
+// The stack trace function might be invoked very early in the program's
+// execution (e.g. from the very first malloc if using tcmalloc). Also, the
+// glibc implementation itself will trigger malloc the first time it is called.
+// As such, we suppress usage of backtrace during this early stage of execution.
+static std::atomic<bool> disable_stacktraces(true); // Disabled until healthy.
+// Waiting until static initializers run seems to be late enough.
+// This file is included into stacktrace.cc so this will only run once.
+ABSL_ATTRIBUTE_UNUSED static int stacktraces_enabler = []() {
+ void* unused_stack[1];
+ // Force the first backtrace to happen early to get the one-time shared lib
+ // loading (allocation) out of the way. After the first call it is much safer
+ // to use backtrace from a signal handler if we crash somewhere later.
+ backtrace(unused_stack, 1);
+ disable_stacktraces.store(false, std::memory_order_relaxed);
+ return 0;
+}();
+
+template <bool IS_STACK_FRAMES, bool IS_WITH_CONTEXT>
+static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count,
+ const void *ucp, int *min_dropped_frames) {
+ if (recursive || disable_stacktraces.load(std::memory_order_relaxed)) {
+ return 0;
+ }
+ ++recursive;
+
+ static_cast<void>(ucp); // Unused.
+ static const int kStackLength = 64;
+ void * stack[kStackLength];
+ int size;
+
+ size = backtrace(stack, kStackLength);
+ skip_count++; // we want to skip the current frame as well
+ int result_count = size - skip_count;
+ if (result_count < 0)
+ result_count = 0;
+ if (result_count > max_depth)
+ result_count = max_depth;
+ for (int i = 0; i < result_count; i++)
+ result[i] = stack[i + skip_count];
+
+ if (IS_STACK_FRAMES) {
+ // No implementation for finding out the stack frame sizes yet.
+ memset(sizes, 0, sizeof(*sizes) * result_count);
+ }
+ if (min_dropped_frames != nullptr) {
+ if (size - skip_count - max_depth > 0) {
+ *min_dropped_frames = size - skip_count - max_depth;
+ } else {
+ *min_dropped_frames = 0;
+ }
+ }
+
+ --recursive;
+
+ return result_count;
+}
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace debugging_internal {
+bool StackTraceWorksForTest() {
+ return true;
+}
+} // namespace debugging_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_DEBUGGING_INTERNAL_STACKTRACE_GENERIC_INL_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_powerpc-inl.inc b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_powerpc-inl.inc
new file mode 100644
index 00000000000..1e41c4ebf2b
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_powerpc-inl.inc
@@ -0,0 +1,253 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Produce stack trace. I'm guessing (hoping!) the code is much like
+// for x86. For apple machines, at least, it seems to be; see
+// https://developer.apple.com/documentation/mac/runtimehtml/RTArch-59.html
+// https://www.linux-foundation.org/spec/ELF/ppc64/PPC-elf64abi-1.9.html#STACK
+// Linux has similar code: http://patchwork.ozlabs.org/linuxppc/patch?id=8882
+
+#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_POWERPC_INL_H_
+#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_POWERPC_INL_H_
+
+#if defined(__linux__)
+#include <asm/ptrace.h> // for PT_NIP.
+#include <ucontext.h> // for ucontext_t
+#endif
+
+#include <unistd.h>
+#include <cassert>
+#include <cstdint>
+#include <cstdio>
+
+#include "y_absl/base/attributes.h"
+#include "y_absl/base/optimization.h"
+#include "y_absl/base/port.h"
+#include "y_absl/debugging/stacktrace.h"
+#include "y_absl/debugging/internal/address_is_readable.h"
+#include "y_absl/debugging/internal/vdso_support.h" // a no-op on non-elf or non-glibc systems
+
+// Given a stack pointer, return the saved link register value.
+// Note that this is the link register for a callee.
+static inline void *StacktracePowerPCGetLR(void **sp) {
+ // PowerPC has 3 main ABIs, which say where in the stack the
+ // Link Register is. For DARWIN and AIX (used by apple and
+ // linux ppc64), it's in sp[2]. For SYSV (used by linux ppc),
+ // it's in sp[1].
+#if defined(_CALL_AIX) || defined(_CALL_DARWIN)
+ return *(sp+2);
+#elif defined(_CALL_SYSV)
+ return *(sp+1);
+#elif defined(__APPLE__) || defined(__FreeBSD__) || \
+ (defined(__linux__) && defined(__PPC64__))
+ // This check is in case the compiler doesn't define _CALL_AIX/etc.
+ return *(sp+2);
+#elif defined(__linux)
+ // This check is in case the compiler doesn't define _CALL_SYSV.
+ return *(sp+1);
+#else
+#error Need to specify the PPC ABI for your archiecture.
+#endif
+}
+
+// Given a pointer to a stack frame, locate and return the calling
+// stackframe, or return null if no stackframe can be found. Perform sanity
+// checks (the strictness of which is controlled by the boolean parameter
+// "STRICT_UNWINDING") to reduce the chance that a bad pointer is returned.
+template<bool STRICT_UNWINDING, bool IS_WITH_CONTEXT>
+ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS // May read random elements from stack.
+ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack.
+static void **NextStackFrame(void **old_sp, const void *uc) {
+ void **new_sp = (void **) *old_sp;
+ enum { kStackAlignment = 16 };
+
+ // Check that the transition from frame pointer old_sp to frame
+ // pointer new_sp isn't clearly bogus
+ if (STRICT_UNWINDING) {
+ // With the stack growing downwards, older stack frame must be
+ // at a greater address that the current one.
+ if (new_sp <= old_sp) return nullptr;
+ // Assume stack frames larger than 100,000 bytes are bogus.
+ if ((uintptr_t)new_sp - (uintptr_t)old_sp > 100000) return nullptr;
+ } else {
+ // In the non-strict mode, allow discontiguous stack frames.
+ // (alternate-signal-stacks for example).
+ if (new_sp == old_sp) return nullptr;
+ // And allow frames upto about 1MB.
+ if ((new_sp > old_sp)
+ && ((uintptr_t)new_sp - (uintptr_t)old_sp > 1000000)) return nullptr;
+ }
+ if ((uintptr_t)new_sp % kStackAlignment != 0) return nullptr;
+
+#if defined(__linux__)
+ enum StackTraceKernelSymbolStatus {
+ kNotInitialized = 0, kAddressValid, kAddressInvalid };
+
+ if (IS_WITH_CONTEXT && uc != nullptr) {
+ static StackTraceKernelSymbolStatus kernel_symbol_status =
+ kNotInitialized; // Sentinel: not computed yet.
+ // Initialize with sentinel value: __kernel_rt_sigtramp_rt64 can not
+ // possibly be there.
+ static const unsigned char *kernel_sigtramp_rt64_address = nullptr;
+ if (kernel_symbol_status == kNotInitialized) {
+ y_absl::debugging_internal::VDSOSupport vdso;
+ if (vdso.IsPresent()) {
+ y_absl::debugging_internal::VDSOSupport::SymbolInfo
+ sigtramp_rt64_symbol_info;
+ if (!vdso.LookupSymbol(
+ "__kernel_sigtramp_rt64", "LINUX_2.6.15",
+ y_absl::debugging_internal::VDSOSupport::kVDSOSymbolType,
+ &sigtramp_rt64_symbol_info) ||
+ sigtramp_rt64_symbol_info.address == nullptr) {
+ // Unexpected: VDSO is present, yet the expected symbol is missing
+ // or null.
+ assert(false && "VDSO is present, but doesn't have expected symbol");
+ kernel_symbol_status = kAddressInvalid;
+ } else {
+ kernel_sigtramp_rt64_address =
+ reinterpret_cast<const unsigned char *>(
+ sigtramp_rt64_symbol_info.address);
+ kernel_symbol_status = kAddressValid;
+ }
+ } else {
+ kernel_symbol_status = kAddressInvalid;
+ }
+ }
+
+ if (new_sp != nullptr &&
+ kernel_symbol_status == kAddressValid &&
+ StacktracePowerPCGetLR(new_sp) == kernel_sigtramp_rt64_address) {
+ const ucontext_t* signal_context =
+ reinterpret_cast<const ucontext_t*>(uc);
+ void **const sp_before_signal =
+#if defined(__PPC64__)
+ reinterpret_cast<void **>(signal_context->uc_mcontext.gp_regs[PT_R1]);
+#else
+ reinterpret_cast<void **>(
+ signal_context->uc_mcontext.uc_regs->gregs[PT_R1]);
+#endif
+ // Check that alleged sp before signal is nonnull and is reasonably
+ // aligned.
+ if (sp_before_signal != nullptr &&
+ ((uintptr_t)sp_before_signal % kStackAlignment) == 0) {
+ // Check that alleged stack pointer is actually readable. This is to
+ // prevent a "double fault" in case we hit the first fault due to e.g.
+ // a stack corruption.
+ if (y_absl::debugging_internal::AddressIsReadable(sp_before_signal)) {
+ // Alleged stack pointer is readable, use it for further unwinding.
+ new_sp = sp_before_signal;
+ }
+ }
+ }
+ }
+#endif
+
+ return new_sp;
+}
+
+// This ensures that y_absl::GetStackTrace sets up the Link Register properly.
+ABSL_ATTRIBUTE_NOINLINE static void AbslStacktracePowerPCDummyFunction() {
+ ABSL_BLOCK_TAIL_CALL_OPTIMIZATION();
+}
+
+template <bool IS_STACK_FRAMES, bool IS_WITH_CONTEXT>
+ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS // May read random elements from stack.
+ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack.
+static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count,
+ const void *ucp, int *min_dropped_frames) {
+ void **sp;
+ // Apple macOS uses an old version of gnu as -- both Darwin 7.9.0 (Panther)
+ // and Darwin 8.8.1 (Tiger) use as 1.38. This means we have to use a
+ // different asm syntax. I don't know quite the best way to discriminate
+ // systems using the old as from the new one; I've gone with __APPLE__.
+#ifdef __APPLE__
+ __asm__ volatile ("mr %0,r1" : "=r" (sp));
+#else
+ __asm__ volatile ("mr %0,1" : "=r" (sp));
+#endif
+
+ // On PowerPC, the "Link Register" or "Link Record" (LR), is a stack
+ // entry that holds the return address of the subroutine call (what
+ // instruction we run after our function finishes). This is the
+ // same as the stack-pointer of our parent routine, which is what we
+ // want here. While the compiler will always(?) set up LR for
+ // subroutine calls, it may not for leaf functions (such as this one).
+ // This routine forces the compiler (at least gcc) to push it anyway.
+ AbslStacktracePowerPCDummyFunction();
+
+ // The LR save area is used by the callee, so the top entry is bogus.
+ skip_count++;
+
+ int n = 0;
+
+ // Unlike ABIs of X86 and ARM, PowerPC ABIs say that return address (in
+ // the link register) of a function call is stored in the caller's stack
+ // frame instead of the callee's. When we look for the return address
+ // associated with a stack frame, we need to make sure that there is a
+ // caller frame before it. So we call NextStackFrame before entering the
+ // loop below and check next_sp instead of sp for loop termination.
+ // The outermost frame is set up by runtimes and it does not have a
+ // caller frame, so it is skipped.
+
+ // The y_absl::GetStackFrames routine is called when we are in some
+ // informational context (the failure signal handler for example).
+ // Use the non-strict unwinding rules to produce a stack trace
+ // that is as complete as possible (even if it contains a few
+ // bogus entries in some rare cases).
+ void **next_sp = NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(sp, ucp);
+
+ while (next_sp && n < max_depth) {
+ if (skip_count > 0) {
+ skip_count--;
+ } else {
+ result[n] = StacktracePowerPCGetLR(sp);
+ if (IS_STACK_FRAMES) {
+ if (next_sp > sp) {
+ sizes[n] = (uintptr_t)next_sp - (uintptr_t)sp;
+ } else {
+ // A frame-size of 0 is used to indicate unknown frame size.
+ sizes[n] = 0;
+ }
+ }
+ n++;
+ }
+
+ sp = next_sp;
+ next_sp = NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(sp, ucp);
+ }
+
+ if (min_dropped_frames != nullptr) {
+ // Implementation detail: we clamp the max of frames we are willing to
+ // count, so as not to spend too much time in the loop below.
+ const int kMaxUnwind = 1000;
+ int j = 0;
+ for (; next_sp != nullptr && j < kMaxUnwind; j++) {
+ next_sp = NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(next_sp, ucp);
+ }
+ *min_dropped_frames = j;
+ }
+ return n;
+}
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace debugging_internal {
+bool StackTraceWorksForTest() {
+ return true;
+}
+} // namespace debugging_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_DEBUGGING_INTERNAL_STACKTRACE_POWERPC_INL_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_riscv-inl.inc b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_riscv-inl.inc
new file mode 100644
index 00000000000..8571c8cbfd6
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_riscv-inl.inc
@@ -0,0 +1,234 @@
+// Copyright 2021 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_RISCV_INL_H_
+#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_RISCV_INL_H_
+
+// Generate stack trace for riscv
+
+#include <sys/ucontext.h>
+
+#include "y_absl/base/config.h"
+#if defined(__linux__)
+#include <sys/mman.h>
+#include <ucontext.h>
+#include <unistd.h>
+#endif
+
+#include <atomic>
+#include <cassert>
+#include <cstdint>
+#include <iostream>
+
+#include "y_absl/base/attributes.h"
+#include "y_absl/debugging/internal/address_is_readable.h"
+#include "y_absl/debugging/internal/vdso_support.h"
+#include "y_absl/debugging/stacktrace.h"
+
+static const uintptr_t kUnknownFrameSize = 0;
+
+#if defined(__linux__)
+// Returns the address of the VDSO __kernel_rt_sigreturn function, if present.
+static const unsigned char *GetKernelRtSigreturnAddress() {
+ constexpr uintptr_t kImpossibleAddress = 0;
+ ABSL_CONST_INIT static std::atomic<uintptr_t> memoized(kImpossibleAddress);
+ uintptr_t address = memoized.load(std::memory_order_relaxed);
+ if (address != kImpossibleAddress) {
+ return reinterpret_cast<const unsigned char *>(address);
+ }
+
+ address = reinterpret_cast<uintptr_t>(nullptr);
+
+#if ABSL_HAVE_VDSO_SUPPORT
+ y_absl::debugging_internal::VDSOSupport vdso;
+ if (vdso.IsPresent()) {
+ y_absl::debugging_internal::VDSOSupport::SymbolInfo symbol_info;
+ // Symbol versioning pulled from arch/riscv/kernel/vdso/vdso.lds at v5.10.
+ auto lookup = [&](int type) {
+ return vdso.LookupSymbol("__kernel_rt_sigreturn", "LINUX_4.15", type,
+ &symbol_info);
+ };
+ if ((!lookup(STT_FUNC) && !lookup(STT_NOTYPE)) ||
+ symbol_info.address == nullptr) {
+ // Unexpected: VDSO is present, yet the expected symbol is missing or
+ // null.
+ assert(false && "VDSO is present, but doesn't have expected symbol");
+ } else {
+ if (reinterpret_cast<uintptr_t>(symbol_info.address) !=
+ kImpossibleAddress) {
+ address = reinterpret_cast<uintptr_t>(symbol_info.address);
+ } else {
+ assert(false && "VDSO returned invalid address");
+ }
+ }
+ }
+#endif
+
+ memoized.store(address, std::memory_order_relaxed);
+ return reinterpret_cast<const unsigned char *>(address);
+}
+#endif // __linux__
+
+// Compute the size of a stack frame in [low..high). We assume that low < high.
+// Return size of kUnknownFrameSize.
+template <typename T>
+static inline uintptr_t ComputeStackFrameSize(const T *low, const T *high) {
+ const char *low_char_ptr = reinterpret_cast<const char *>(low);
+ const char *high_char_ptr = reinterpret_cast<const char *>(high);
+ return low < high ? high_char_ptr - low_char_ptr : kUnknownFrameSize;
+}
+
+// Given a pointer to a stack frame, locate and return the calling stackframe,
+// or return null if no stackframe can be found. Perform sanity checks (the
+// strictness of which is controlled by the boolean parameter
+// "STRICT_UNWINDING") to reduce the chance that a bad pointer is returned.
+template <bool STRICT_UNWINDING, bool WITH_CONTEXT>
+ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS // May read random elements from stack.
+ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack.
+static void ** NextStackFrame(void **old_frame_pointer, const void *uc) {
+ // .
+ // .
+ // .
+ // +-> +----------------+
+ // | | return address |
+ // | | previous fp |
+ // | | ... |
+ // | +----------------+ <-+
+ // | | return address | |
+ // +---|- previous fp | |
+ // | ... | |
+ // $fp ->|----------------+ |
+ // | return address | |
+ // | previous fp -|---+
+ // $sp ->| ... |
+ // +----------------+
+ void **new_frame_pointer = reinterpret_cast<void **>(old_frame_pointer[-2]);
+ bool check_frame_size = true;
+
+#if defined(__linux__)
+ if (WITH_CONTEXT && uc != nullptr) {
+ // Check to see if next frame's return address is __kernel_rt_sigreturn.
+ if (old_frame_pointer[-1] == GetKernelRtSigreturnAddress()) {
+ const ucontext_t *ucv = static_cast<const ucontext_t *>(uc);
+ // old_frame_pointer is not suitable for unwinding, look at ucontext to
+ // discover frame pointer before signal.
+ //
+ // RISCV ELF psABI has the frame pointer at x8/fp/s0.
+ // -- RISCV psABI Table 18.2
+ void **const pre_signal_frame_pointer =
+ reinterpret_cast<void **>(ucv->uc_mcontext.__gregs[8]);
+
+ // Check the alleged frame pointer is actually readable. This is to
+ // prevent "double fault" in case we hit the first fault due to stack
+ // corruption.
+ if (!y_absl::debugging_internal::AddressIsReadable(
+ pre_signal_frame_pointer))
+ return nullptr;
+
+ // Alleged frame pointer is readable, use it for further unwinding.
+ new_frame_pointer = pre_signal_frame_pointer;
+
+ // Skip frame size check if we return from a signal. We may be using an
+ // alterate stack for signals.
+ check_frame_size = false;
+ }
+ }
+#endif
+
+ // The RISCV ELF psABI mandates that the stack pointer is always 16-byte
+ // aligned.
+ // FIXME(abdulras) this doesn't hold for ILP32E which only mandates a 4-byte
+ // alignment.
+ if ((reinterpret_cast<uintptr_t>(new_frame_pointer) & 15) != 0)
+ return nullptr;
+
+ // Check frame size. In strict mode, we assume frames to be under 100,000
+ // bytes. In non-strict mode, we relax the limit to 1MB.
+ if (check_frame_size) {
+ const uintptr_t max_size = STRICT_UNWINDING ? 100000 : 1000000;
+ const uintptr_t frame_size =
+ ComputeStackFrameSize(old_frame_pointer, new_frame_pointer);
+ if (frame_size == kUnknownFrameSize || frame_size > max_size)
+ return nullptr;
+ }
+
+ return new_frame_pointer;
+}
+
+template <bool IS_STACK_FRAMES, bool IS_WITH_CONTEXT>
+ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS // May read random elements from stack.
+ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack.
+static int UnwindImpl(void **result, int *sizes, int max_depth, int skip_count,
+ const void *ucp, int *min_dropped_frames) {
+#if defined(__GNUC__)
+ void **frame_pointer = reinterpret_cast<void **>(__builtin_frame_address(0));
+#else
+#error reading stack pointer not yet supported on this platform
+#endif
+
+ skip_count++; // Skip the frame for this function.
+ int n = 0;
+
+ // The `frame_pointer` that is computed here points to the top of the frame.
+ // The two words preceding the address are the return address and the previous
+ // frame pointer. To find a PC value associated with the current frame, we
+ // need to go down a level in the call chain. So we remember the return
+ // address of the last frame seen. This does not work for the first stack
+ // frame, which belongs to `UnwindImp()` but we skip the frame for
+ // `UnwindImp()` anyway.
+ void *prev_return_address = nullptr;
+
+ while (frame_pointer && n < max_depth) {
+ // The y_absl::GetStackFrames routine si called when we are in some
+ // informational context (the failure signal handler for example). Use the
+ // non-strict unwinding rules to produce a stack trace that is as complete
+ // as possible (even if it contains a few bogus entries in some rare cases).
+ void **next_frame_pointer =
+ NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(frame_pointer, ucp);
+
+ if (skip_count > 0) {
+ skip_count--;
+ } else {
+ result[n] = prev_return_address;
+ if (IS_STACK_FRAMES) {
+ sizes[n] = ComputeStackFrameSize(frame_pointer, next_frame_pointer);
+ }
+ n++;
+ }
+ prev_return_address = frame_pointer[-1];
+ frame_pointer = next_frame_pointer;
+ }
+ if (min_dropped_frames != nullptr) {
+ // Implementation detail: we clamp the max of frames we are willing to
+ // count, so as not to spend too much time in the loop below.
+ const int kMaxUnwind = 200;
+ int j = 0;
+ for (; frame_pointer != nullptr && j < kMaxUnwind; j++) {
+ frame_pointer =
+ NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(frame_pointer, ucp);
+ }
+ *min_dropped_frames = j;
+ }
+ return n;
+}
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace debugging_internal {
+bool StackTraceWorksForTest() { return true; }
+} // namespace debugging_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_unimplemented-inl.inc b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_unimplemented-inl.inc
new file mode 100644
index 00000000000..54dccaeae97
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_unimplemented-inl.inc
@@ -0,0 +1,24 @@
+#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_UNIMPLEMENTED_INL_H_
+#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_UNIMPLEMENTED_INL_H_
+
+template <bool IS_STACK_FRAMES, bool IS_WITH_CONTEXT>
+static int UnwindImpl(void** /* result */, int* /* sizes */,
+ int /* max_depth */, int /* skip_count */,
+ const void* /* ucp */, int *min_dropped_frames) {
+ if (min_dropped_frames != nullptr) {
+ *min_dropped_frames = 0;
+ }
+ return 0;
+}
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace debugging_internal {
+bool StackTraceWorksForTest() {
+ return false;
+}
+} // namespace debugging_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_DEBUGGING_INTERNAL_STACKTRACE_UNIMPLEMENTED_INL_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_win32-inl.inc b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_win32-inl.inc
new file mode 100644
index 00000000000..69bd7e3f2f4
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_win32-inl.inc
@@ -0,0 +1,93 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Produces a stack trace for Windows. Normally, one could use
+// stacktrace_x86-inl.h or stacktrace_x86_64-inl.h -- and indeed, that
+// should work for binaries compiled using MSVC in "debug" mode.
+// However, in "release" mode, Windows uses frame-pointer
+// optimization, which makes getting a stack trace very difficult.
+//
+// There are several approaches one can take. One is to use Windows
+// intrinsics like StackWalk64. These can work, but have restrictions
+// on how successful they can be. Another attempt is to write a
+// version of stacktrace_x86-inl.h that has heuristic support for
+// dealing with FPO, similar to what WinDbg does (see
+// http://www.nynaeve.net/?p=97). There are (non-working) examples of
+// these approaches, complete with TODOs, in stacktrace_win32-inl.h#1
+//
+// The solution we've ended up doing is to call the undocumented
+// windows function RtlCaptureStackBackTrace, which probably doesn't
+// work with FPO but at least is fast, and doesn't require a symbol
+// server.
+//
+// This code is inspired by a patch from David Vitek:
+// https://code.google.com/p/google-perftools/issues/detail?id=83
+
+#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_WIN32_INL_H_
+#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_WIN32_INL_H_
+
+#include <windows.h> // for GetProcAddress and GetModuleHandle
+#include <cassert>
+
+typedef USHORT NTAPI RtlCaptureStackBackTrace_Function(
+ IN ULONG frames_to_skip,
+ IN ULONG frames_to_capture,
+ OUT PVOID *backtrace,
+ OUT PULONG backtrace_hash);
+
+// It is not possible to load RtlCaptureStackBackTrace at static init time in
+// UWP. CaptureStackBackTrace is the public version of RtlCaptureStackBackTrace
+#if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP) && \
+ !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP)
+static RtlCaptureStackBackTrace_Function* const RtlCaptureStackBackTrace_fn =
+ &::CaptureStackBackTrace;
+#else
+// Load the function we need at static init time, where we don't have
+// to worry about someone else holding the loader's lock.
+static RtlCaptureStackBackTrace_Function* const RtlCaptureStackBackTrace_fn =
+ (RtlCaptureStackBackTrace_Function*)GetProcAddress(
+ GetModuleHandleA("ntdll.dll"), "RtlCaptureStackBackTrace");
+#endif // WINAPI_PARTITION_APP && !WINAPI_PARTITION_DESKTOP
+
+template <bool IS_STACK_FRAMES, bool IS_WITH_CONTEXT>
+static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count,
+ const void*, int* min_dropped_frames) {
+ int n = 0;
+ if (!RtlCaptureStackBackTrace_fn) {
+ // can't find a stacktrace with no function to call
+ } else {
+ n = (int)RtlCaptureStackBackTrace_fn(skip_count + 2, max_depth, result, 0);
+ }
+ if (IS_STACK_FRAMES) {
+ // No implementation for finding out the stack frame sizes yet.
+ memset(sizes, 0, sizeof(*sizes) * n);
+ }
+ if (min_dropped_frames != nullptr) {
+ // Not implemented.
+ *min_dropped_frames = 0;
+ }
+ return n;
+}
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace debugging_internal {
+bool StackTraceWorksForTest() {
+ return false;
+}
+} // namespace debugging_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_DEBUGGING_INTERNAL_STACKTRACE_WIN32_INL_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_x86-inl.inc b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_x86-inl.inc
new file mode 100644
index 00000000000..8936f8ca70c
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_x86-inl.inc
@@ -0,0 +1,364 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Produce stack trace
+
+#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_X86_INL_INC_
+#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_X86_INL_INC_
+
+#if defined(__linux__) && (defined(__i386__) || defined(__x86_64__))
+#include <ucontext.h> // for ucontext_t
+#endif
+
+#if !defined(_WIN32)
+#include <unistd.h>
+#endif
+
+#include <cassert>
+#include <cstdint>
+#include <limits>
+
+#include "y_absl/base/macros.h"
+#include "y_absl/base/port.h"
+#include "y_absl/debugging/internal/address_is_readable.h"
+#include "y_absl/debugging/internal/vdso_support.h" // a no-op on non-elf or non-glibc systems
+#include "y_absl/debugging/stacktrace.h"
+
+#include "y_absl/base/internal/raw_logging.h"
+
+using y_absl::debugging_internal::AddressIsReadable;
+
+#if defined(__linux__) && defined(__i386__)
+// Count "push %reg" instructions in VDSO __kernel_vsyscall(),
+// preceeding "syscall" or "sysenter".
+// If __kernel_vsyscall uses frame pointer, answer 0.
+//
+// kMaxBytes tells how many instruction bytes of __kernel_vsyscall
+// to analyze before giving up. Up to kMaxBytes+1 bytes of
+// instructions could be accessed.
+//
+// Here are known __kernel_vsyscall instruction sequences:
+//
+// SYSENTER (linux-2.6.26/arch/x86/vdso/vdso32/sysenter.S).
+// Used on Intel.
+// 0xffffe400 <__kernel_vsyscall+0>: push %ecx
+// 0xffffe401 <__kernel_vsyscall+1>: push %edx
+// 0xffffe402 <__kernel_vsyscall+2>: push %ebp
+// 0xffffe403 <__kernel_vsyscall+3>: mov %esp,%ebp
+// 0xffffe405 <__kernel_vsyscall+5>: sysenter
+//
+// SYSCALL (see linux-2.6.26/arch/x86/vdso/vdso32/syscall.S).
+// Used on AMD.
+// 0xffffe400 <__kernel_vsyscall+0>: push %ebp
+// 0xffffe401 <__kernel_vsyscall+1>: mov %ecx,%ebp
+// 0xffffe403 <__kernel_vsyscall+3>: syscall
+//
+
+// The sequence below isn't actually expected in Google fleet,
+// here only for completeness. Remove this comment from OSS release.
+
+// i386 (see linux-2.6.26/arch/x86/vdso/vdso32/int80.S)
+// 0xffffe400 <__kernel_vsyscall+0>: int $0x80
+// 0xffffe401 <__kernel_vsyscall+1>: ret
+//
+static const int kMaxBytes = 10;
+
+// We use assert()s instead of DCHECK()s -- this is too low level
+// for DCHECK().
+
+static int CountPushInstructions(const unsigned char *const addr) {
+ int result = 0;
+ for (int i = 0; i < kMaxBytes; ++i) {
+ if (addr[i] == 0x89) {
+ // "mov reg,reg"
+ if (addr[i + 1] == 0xE5) {
+ // Found "mov %esp,%ebp".
+ return 0;
+ }
+ ++i; // Skip register encoding byte.
+ } else if (addr[i] == 0x0F &&
+ (addr[i + 1] == 0x34 || addr[i + 1] == 0x05)) {
+ // Found "sysenter" or "syscall".
+ return result;
+ } else if ((addr[i] & 0xF0) == 0x50) {
+ // Found "push %reg".
+ ++result;
+ } else if (addr[i] == 0xCD && addr[i + 1] == 0x80) {
+ // Found "int $0x80"
+ assert(result == 0);
+ return 0;
+ } else {
+ // Unexpected instruction.
+ assert(false && "unexpected instruction in __kernel_vsyscall");
+ return 0;
+ }
+ }
+ // Unexpected: didn't find SYSENTER or SYSCALL in
+ // [__kernel_vsyscall, __kernel_vsyscall + kMaxBytes) interval.
+ assert(false && "did not find SYSENTER or SYSCALL in __kernel_vsyscall");
+ return 0;
+}
+#endif
+
+// Assume stack frames larger than 100,000 bytes are bogus.
+static const int kMaxFrameBytes = 100000;
+
+// Returns the stack frame pointer from signal context, 0 if unknown.
+// vuc is a ucontext_t *. We use void* to avoid the use
+// of ucontext_t on non-POSIX systems.
+static uintptr_t GetFP(const void *vuc) {
+#if !defined(__linux__)
+ static_cast<void>(vuc); // Avoid an unused argument compiler warning.
+#else
+ if (vuc != nullptr) {
+ auto *uc = reinterpret_cast<const ucontext_t *>(vuc);
+#if defined(__i386__)
+ const auto bp = uc->uc_mcontext.gregs[REG_EBP];
+ const auto sp = uc->uc_mcontext.gregs[REG_ESP];
+#elif defined(__x86_64__)
+ const auto bp = uc->uc_mcontext.gregs[REG_RBP];
+ const auto sp = uc->uc_mcontext.gregs[REG_RSP];
+#else
+ const uintptr_t bp = 0;
+ const uintptr_t sp = 0;
+#endif
+ // Sanity-check that the base pointer is valid. It's possible that some
+ // code in the process is compiled with --copt=-fomit-frame-pointer or
+ // --copt=-momit-leaf-frame-pointer.
+ //
+ // TODO(bcmills): -momit-leaf-frame-pointer is currently the default
+ // behavior when building with clang. Talk to the C++ toolchain team about
+ // fixing that.
+ if (bp >= sp && bp - sp <= kMaxFrameBytes) return bp;
+
+ // If bp isn't a plausible frame pointer, return the stack pointer instead.
+ // If we're lucky, it points to the start of a stack frame; otherwise, we'll
+ // get one frame of garbage in the stack trace and fail the sanity check on
+ // the next iteration.
+ return sp;
+ }
+#endif
+ return 0;
+}
+
+// Given a pointer to a stack frame, locate and return the calling
+// stackframe, or return null if no stackframe can be found. Perform sanity
+// checks (the strictness of which is controlled by the boolean parameter
+// "STRICT_UNWINDING") to reduce the chance that a bad pointer is returned.
+template <bool STRICT_UNWINDING, bool WITH_CONTEXT>
+ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS // May read random elements from stack.
+ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack.
+static void **NextStackFrame(void **old_fp, const void *uc,
+ size_t stack_low, size_t stack_high) {
+ void **new_fp = (void **)*old_fp;
+
+#if defined(__linux__) && defined(__i386__)
+ if (WITH_CONTEXT && uc != nullptr) {
+ // How many "push %reg" instructions are there at __kernel_vsyscall?
+ // This is constant for a given kernel and processor, so compute
+ // it only once.
+ static int num_push_instructions = -1; // Sentinel: not computed yet.
+ // Initialize with sentinel value: __kernel_rt_sigreturn can not possibly
+ // be there.
+ static const unsigned char *kernel_rt_sigreturn_address = nullptr;
+ static const unsigned char *kernel_vsyscall_address = nullptr;
+ if (num_push_instructions == -1) {
+#ifdef ABSL_HAVE_VDSO_SUPPORT
+ y_absl::debugging_internal::VDSOSupport vdso;
+ if (vdso.IsPresent()) {
+ y_absl::debugging_internal::VDSOSupport::SymbolInfo
+ rt_sigreturn_symbol_info;
+ y_absl::debugging_internal::VDSOSupport::SymbolInfo vsyscall_symbol_info;
+ if (!vdso.LookupSymbol("__kernel_rt_sigreturn", "LINUX_2.5", STT_FUNC,
+ &rt_sigreturn_symbol_info) ||
+ !vdso.LookupSymbol("__kernel_vsyscall", "LINUX_2.5", STT_FUNC,
+ &vsyscall_symbol_info) ||
+ rt_sigreturn_symbol_info.address == nullptr ||
+ vsyscall_symbol_info.address == nullptr) {
+ // Unexpected: 32-bit VDSO is present, yet one of the expected
+ // symbols is missing or null.
+ assert(false && "VDSO is present, but doesn't have expected symbols");
+ num_push_instructions = 0;
+ } else {
+ kernel_rt_sigreturn_address =
+ reinterpret_cast<const unsigned char *>(
+ rt_sigreturn_symbol_info.address);
+ kernel_vsyscall_address =
+ reinterpret_cast<const unsigned char *>(
+ vsyscall_symbol_info.address);
+ num_push_instructions =
+ CountPushInstructions(kernel_vsyscall_address);
+ }
+ } else {
+ num_push_instructions = 0;
+ }
+#else // ABSL_HAVE_VDSO_SUPPORT
+ num_push_instructions = 0;
+#endif // ABSL_HAVE_VDSO_SUPPORT
+ }
+ if (num_push_instructions != 0 && kernel_rt_sigreturn_address != nullptr &&
+ old_fp[1] == kernel_rt_sigreturn_address) {
+ const ucontext_t *ucv = static_cast<const ucontext_t *>(uc);
+ // This kernel does not use frame pointer in its VDSO code,
+ // and so %ebp is not suitable for unwinding.
+ void **const reg_ebp =
+ reinterpret_cast<void **>(ucv->uc_mcontext.gregs[REG_EBP]);
+ const unsigned char *const reg_eip =
+ reinterpret_cast<unsigned char *>(ucv->uc_mcontext.gregs[REG_EIP]);
+ if (new_fp == reg_ebp && kernel_vsyscall_address <= reg_eip &&
+ reg_eip - kernel_vsyscall_address < kMaxBytes) {
+ // We "stepped up" to __kernel_vsyscall, but %ebp is not usable.
+ // Restore from 'ucv' instead.
+ void **const reg_esp =
+ reinterpret_cast<void **>(ucv->uc_mcontext.gregs[REG_ESP]);
+ // Check that alleged %esp is not null and is reasonably aligned.
+ if (reg_esp &&
+ ((uintptr_t)reg_esp & (sizeof(reg_esp) - 1)) == 0) {
+ // Check that alleged %esp is actually readable. This is to prevent
+ // "double fault" in case we hit the first fault due to e.g. stack
+ // corruption.
+ void *const reg_esp2 = reg_esp[num_push_instructions - 1];
+ if (AddressIsReadable(reg_esp2)) {
+ // Alleged %esp is readable, use it for further unwinding.
+ new_fp = reinterpret_cast<void **>(reg_esp2);
+ }
+ }
+ }
+ }
+ }
+#endif
+
+ const uintptr_t old_fp_u = reinterpret_cast<uintptr_t>(old_fp);
+ const uintptr_t new_fp_u = reinterpret_cast<uintptr_t>(new_fp);
+
+ // Check that the transition from frame pointer old_fp to frame
+ // pointer new_fp isn't clearly bogus. Skip the checks if new_fp
+ // matches the signal context, so that we don't skip out early when
+ // using an alternate signal stack.
+ //
+ // TODO(bcmills): The GetFP call should be completely unnecessary when
+ // ENABLE_COMBINED_UNWINDER is set (because we should be back in the thread's
+ // stack by this point), but it is empirically still needed (e.g. when the
+ // stack includes a call to abort). unw_get_reg returns UNW_EBADREG for some
+ // frames. Figure out why GetValidFrameAddr and/or libunwind isn't doing what
+ // it's supposed to.
+ if (STRICT_UNWINDING &&
+ (!WITH_CONTEXT || uc == nullptr || new_fp_u != GetFP(uc))) {
+ // With the stack growing downwards, older stack frame must be
+ // at a greater address that the current one.
+ if (new_fp_u <= old_fp_u) return nullptr;
+ if (new_fp_u - old_fp_u > kMaxFrameBytes) return nullptr;
+
+ if (stack_low < old_fp_u && old_fp_u <= stack_high) {
+ // Old BP was in the expected stack region...
+ if (!(stack_low < new_fp_u && new_fp_u <= stack_high)) {
+ // ... but new BP is outside of expected stack region.
+ // It is most likely bogus.
+ return nullptr;
+ }
+ } else {
+ // We may be here if we are executing in a co-routine with a
+ // separate stack. We can't do safety checks in this case.
+ }
+ } else {
+ if (new_fp == nullptr) return nullptr; // skip AddressIsReadable() below
+ // In the non-strict mode, allow discontiguous stack frames.
+ // (alternate-signal-stacks for example).
+ if (new_fp == old_fp) return nullptr;
+ }
+
+ if (new_fp_u & (sizeof(void *) - 1)) return nullptr;
+#ifdef __i386__
+ // On 32-bit machines, the stack pointer can be very close to
+ // 0xffffffff, so we explicitly check for a pointer into the
+ // last two pages in the address space
+ if (new_fp_u >= 0xffffe000) return nullptr;
+#endif
+#if !defined(_WIN32)
+ if (!STRICT_UNWINDING) {
+ // Lax sanity checks cause a crash in 32-bit tcmalloc/crash_reason_test
+ // on AMD-based machines with VDSO-enabled kernels.
+ // Make an extra sanity check to insure new_fp is readable.
+ // Note: NextStackFrame<false>() is only called while the program
+ // is already on its last leg, so it's ok to be slow here.
+
+ if (!AddressIsReadable(new_fp)) {
+ return nullptr;
+ }
+ }
+#endif
+ return new_fp;
+}
+
+template <bool IS_STACK_FRAMES, bool IS_WITH_CONTEXT>
+ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS // May read random elements from stack.
+ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack.
+ABSL_ATTRIBUTE_NOINLINE
+static int UnwindImpl(void **result, int *sizes, int max_depth, int skip_count,
+ const void *ucp, int *min_dropped_frames) {
+ int n = 0;
+ void **fp = reinterpret_cast<void **>(__builtin_frame_address(0));
+
+ size_t stack_low = getpagesize(); // Assume that the first page is not stack.
+ size_t stack_high = std::numeric_limits<size_t>::max() - sizeof(void *);
+
+ while (fp && n < max_depth) {
+ if (*(fp + 1) == reinterpret_cast<void *>(0)) {
+ // In 64-bit code, we often see a frame that
+ // points to itself and has a return address of 0.
+ break;
+ }
+ void **next_fp = NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(
+ fp, ucp, stack_low, stack_high);
+ if (skip_count > 0) {
+ skip_count--;
+ } else {
+ result[n] = *(fp + 1);
+ if (IS_STACK_FRAMES) {
+ if (next_fp > fp) {
+ sizes[n] = (uintptr_t)next_fp - (uintptr_t)fp;
+ } else {
+ // A frame-size of 0 is used to indicate unknown frame size.
+ sizes[n] = 0;
+ }
+ }
+ n++;
+ }
+ fp = next_fp;
+ }
+ if (min_dropped_frames != nullptr) {
+ // Implementation detail: we clamp the max of frames we are willing to
+ // count, so as not to spend too much time in the loop below.
+ const int kMaxUnwind = 1000;
+ int j = 0;
+ for (; fp != nullptr && j < kMaxUnwind; j++) {
+ fp = NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(fp, ucp, stack_low,
+ stack_high);
+ }
+ *min_dropped_frames = j;
+ }
+ return n;
+}
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace debugging_internal {
+bool StackTraceWorksForTest() {
+ return true;
+}
+} // namespace debugging_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_DEBUGGING_INTERNAL_STACKTRACE_X86_INL_INC_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/symbolize.h b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/symbolize.h
new file mode 100644
index 00000000000..e6e2b0724a4
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/symbolize.h
@@ -0,0 +1,153 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file contains internal parts of the Abseil symbolizer.
+// Do not depend on the anything in this file, it may change at anytime.
+
+#ifndef ABSL_DEBUGGING_INTERNAL_SYMBOLIZE_H_
+#define ABSL_DEBUGGING_INTERNAL_SYMBOLIZE_H_
+
+#ifdef __cplusplus
+
+#include <cstddef>
+#include <cstdint>
+
+#include "y_absl/base/config.h"
+#include "y_absl/strings/string_view.h"
+
+#ifdef ABSL_INTERNAL_HAVE_ELF_SYMBOLIZE
+#error ABSL_INTERNAL_HAVE_ELF_SYMBOLIZE cannot be directly set
+#elif defined(__ELF__) && defined(__GLIBC__) && !defined(__native_client__) \
+ && !defined(__asmjs__) && !defined(__wasm__)
+#define ABSL_INTERNAL_HAVE_ELF_SYMBOLIZE 1
+
+#include <elf.h>
+#include <link.h> // For ElfW() macro.
+#include <functional>
+#include <util/generic/string.h>
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace debugging_internal {
+
+// Iterates over all sections, invoking callback on each with the section name
+// and the section header.
+//
+// Returns true on success; otherwise returns false in case of errors.
+//
+// This is not async-signal-safe.
+bool ForEachSection(int fd,
+ const std::function<bool(y_absl::string_view name,
+ const ElfW(Shdr) &)>& callback);
+
+// Gets the section header for the given name, if it exists. Returns true on
+// success. Otherwise, returns false.
+bool GetSectionHeaderByName(int fd, const char *name, size_t name_len,
+ ElfW(Shdr) *out);
+
+} // namespace debugging_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_INTERNAL_HAVE_ELF_SYMBOLIZE
+
+#ifdef ABSL_INTERNAL_HAVE_DARWIN_SYMBOLIZE
+#error ABSL_INTERNAL_HAVE_DARWIN_SYMBOLIZE cannot be directly set
+#elif defined(__APPLE__)
+#define ABSL_INTERNAL_HAVE_DARWIN_SYMBOLIZE 1
+#endif
+
+#ifdef ABSL_INTERNAL_HAVE_EMSCRIPTEN_SYMBOLIZE
+#error ABSL_INTERNAL_HAVE_EMSCRIPTEN_SYMBOLIZE cannot be directly set
+#elif defined(__EMSCRIPTEN__)
+#define ABSL_INTERNAL_HAVE_EMSCRIPTEN_SYMBOLIZE 1
+#endif
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace debugging_internal {
+
+struct SymbolDecoratorArgs {
+ // The program counter we are getting symbolic name for.
+ const void *pc;
+ // 0 for main executable, load address for shared libraries.
+ ptrdiff_t relocation;
+ // Read-only file descriptor for ELF image covering "pc",
+ // or -1 if no such ELF image exists in /proc/self/maps.
+ int fd;
+ // Output buffer, size.
+ // Note: the buffer may not be empty -- default symbolizer may have already
+ // produced some output, and earlier decorators may have adorned it in
+ // some way. You are free to replace or augment the contents (within the
+ // symbol_buf_size limit).
+ char *const symbol_buf;
+ size_t symbol_buf_size;
+ // Temporary scratch space, size.
+ // Use that space in preference to allocating your own stack buffer to
+ // conserve stack.
+ char *const tmp_buf;
+ size_t tmp_buf_size;
+ // User-provided argument
+ void* arg;
+};
+using SymbolDecorator = void (*)(const SymbolDecoratorArgs *);
+
+// Installs a function-pointer as a decorator. Returns a value less than zero
+// if the system cannot install the decorator. Otherwise, returns a unique
+// identifier corresponding to the decorator. This identifier can be used to
+// uninstall the decorator - See RemoveSymbolDecorator() below.
+int InstallSymbolDecorator(SymbolDecorator decorator, void* arg);
+
+// Removes a previously installed function-pointer decorator. Parameter "ticket"
+// is the return-value from calling InstallSymbolDecorator().
+bool RemoveSymbolDecorator(int ticket);
+
+// Remove all installed decorators. Returns true if successful, false if
+// symbolization is currently in progress.
+bool RemoveAllSymbolDecorators(void);
+
+// Registers an address range to a file mapping.
+//
+// Preconditions:
+// start <= end
+// filename != nullptr
+//
+// Returns true if the file was successfully registered.
+bool RegisterFileMappingHint(const void* start, const void* end,
+ uint64_t offset, const char* filename);
+
+// Looks up the file mapping registered by RegisterFileMappingHint for an
+// address range. If there is one, the file name is stored in *filename and
+// *start and *end are modified to reflect the registered mapping. Returns
+// whether any hint was found.
+bool GetFileMappingHint(const void** start, const void** end, uint64_t* offset,
+ const char** filename);
+
+} // namespace debugging_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // __cplusplus
+
+#include <stdbool.h>
+
+#ifdef __cplusplus
+extern "C"
+#endif // __cplusplus
+
+ bool
+ YAbslInternalGetFileMappingHint(const void** start, const void** end,
+ uint64_t* offset, const char** filename);
+
+#endif // ABSL_DEBUGGING_INTERNAL_SYMBOLIZE_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/vdso_support.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/vdso_support.cc
new file mode 100644
index 00000000000..7905eba32fe
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/vdso_support.cc
@@ -0,0 +1,191 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Allow dynamic symbol lookup in the kernel VDSO page.
+//
+// VDSOSupport -- a class representing kernel VDSO (if present).
+
+#include "y_absl/debugging/internal/vdso_support.h"
+
+#ifdef ABSL_HAVE_VDSO_SUPPORT // defined in vdso_support.h
+
+#if !defined(__has_include)
+#define __has_include(header) 0
+#endif
+
+#include <errno.h>
+#include <fcntl.h>
+#if __has_include(<syscall.h>)
+#include <syscall.h>
+#elif __has_include(<sys/syscall.h>)
+#include <sys/syscall.h>
+#endif
+#include <unistd.h>
+
+#if defined(__GLIBC__) && \
+ (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 16))
+#define ABSL_HAVE_GETAUXVAL
+#endif
+
+#ifdef ABSL_HAVE_GETAUXVAL
+#include <sys/auxv.h>
+#endif
+
+#include "y_absl/base/dynamic_annotations.h"
+#include "y_absl/base/internal/raw_logging.h"
+#include "y_absl/base/port.h"
+
+#ifndef AT_SYSINFO_EHDR
+#define AT_SYSINFO_EHDR 33 // for crosstoolv10
+#endif
+
+#if defined(__FreeBSD__)
+using Elf64_auxv_t = Elf64_Auxinfo;
+using Elf32_auxv_t = Elf32_Auxinfo;
+#endif
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace debugging_internal {
+
+ABSL_CONST_INIT
+std::atomic<const void *> VDSOSupport::vdso_base_(
+ debugging_internal::ElfMemImage::kInvalidBase);
+
+std::atomic<VDSOSupport::GetCpuFn> VDSOSupport::getcpu_fn_(&InitAndGetCPU);
+VDSOSupport::VDSOSupport()
+ // If vdso_base_ is still set to kInvalidBase, we got here
+ // before VDSOSupport::Init has been called. Call it now.
+ : image_(vdso_base_.load(std::memory_order_relaxed) ==
+ debugging_internal::ElfMemImage::kInvalidBase
+ ? Init()
+ : vdso_base_.load(std::memory_order_relaxed)) {}
+
+// NOTE: we can't use GoogleOnceInit() below, because we can be
+// called by tcmalloc, and none of the *once* stuff may be functional yet.
+//
+// In addition, we hope that the VDSOSupportHelper constructor
+// causes this code to run before there are any threads, and before
+// InitGoogle() has executed any chroot or setuid calls.
+//
+// Finally, even if there is a race here, it is harmless, because
+// the operation should be idempotent.
+const void *VDSOSupport::Init() {
+ const auto kInvalidBase = debugging_internal::ElfMemImage::kInvalidBase;
+#ifdef ABSL_HAVE_GETAUXVAL
+ if (vdso_base_.load(std::memory_order_relaxed) == kInvalidBase) {
+ errno = 0;
+ const void *const sysinfo_ehdr =
+ reinterpret_cast<const void *>(getauxval(AT_SYSINFO_EHDR));
+ if (errno == 0) {
+ vdso_base_.store(sysinfo_ehdr, std::memory_order_relaxed);
+ }
+ }
+#endif // ABSL_HAVE_GETAUXVAL
+ if (vdso_base_.load(std::memory_order_relaxed) == kInvalidBase) {
+ int fd = open("/proc/self/auxv", O_RDONLY);
+ if (fd == -1) {
+ // Kernel too old to have a VDSO.
+ vdso_base_.store(nullptr, std::memory_order_relaxed);
+ getcpu_fn_.store(&GetCPUViaSyscall, std::memory_order_relaxed);
+ return nullptr;
+ }
+ ElfW(auxv_t) aux;
+ while (read(fd, &aux, sizeof(aux)) == sizeof(aux)) {
+ if (aux.a_type == AT_SYSINFO_EHDR) {
+ vdso_base_.store(reinterpret_cast<void *>(aux.a_un.a_val),
+ std::memory_order_relaxed);
+ break;
+ }
+ }
+ close(fd);
+ if (vdso_base_.load(std::memory_order_relaxed) == kInvalidBase) {
+ // Didn't find AT_SYSINFO_EHDR in auxv[].
+ vdso_base_.store(nullptr, std::memory_order_relaxed);
+ }
+ }
+ GetCpuFn fn = &GetCPUViaSyscall; // default if VDSO not present.
+ if (vdso_base_.load(std::memory_order_relaxed)) {
+ VDSOSupport vdso;
+ SymbolInfo info;
+ if (vdso.LookupSymbol("__vdso_getcpu", "LINUX_2.6", STT_FUNC, &info)) {
+ fn = reinterpret_cast<GetCpuFn>(const_cast<void *>(info.address));
+ }
+ }
+ // Subtle: this code runs outside of any locks; prevent compiler
+ // from assigning to getcpu_fn_ more than once.
+ getcpu_fn_.store(fn, std::memory_order_relaxed);
+ return vdso_base_.load(std::memory_order_relaxed);
+}
+
+const void *VDSOSupport::SetBase(const void *base) {
+ ABSL_RAW_CHECK(base != debugging_internal::ElfMemImage::kInvalidBase,
+ "internal error");
+ const void *old_base = vdso_base_.load(std::memory_order_relaxed);
+ vdso_base_.store(base, std::memory_order_relaxed);
+ image_.Init(base);
+ // Also reset getcpu_fn_, so GetCPU could be tested with simulated VDSO.
+ getcpu_fn_.store(&InitAndGetCPU, std::memory_order_relaxed);
+ return old_base;
+}
+
+bool VDSOSupport::LookupSymbol(const char *name,
+ const char *version,
+ int type,
+ SymbolInfo *info) const {
+ return image_.LookupSymbol(name, version, type, info);
+}
+
+bool VDSOSupport::LookupSymbolByAddress(const void *address,
+ SymbolInfo *info_out) const {
+ return image_.LookupSymbolByAddress(address, info_out);
+}
+
+// NOLINT on 'long' because this routine mimics kernel api.
+long VDSOSupport::GetCPUViaSyscall(unsigned *cpu, // NOLINT(runtime/int)
+ void *, void *) {
+#ifdef SYS_getcpu
+ return syscall(SYS_getcpu, cpu, nullptr, nullptr);
+#else
+ // x86_64 never implemented sys_getcpu(), except as a VDSO call.
+ static_cast<void>(cpu); // Avoid an unused argument compiler warning.
+ errno = ENOSYS;
+ return -1;
+#endif
+}
+
+// Use fast __vdso_getcpu if available.
+long VDSOSupport::InitAndGetCPU(unsigned *cpu, // NOLINT(runtime/int)
+ void *x, void *y) {
+ Init();
+ GetCpuFn fn = getcpu_fn_.load(std::memory_order_relaxed);
+ ABSL_RAW_CHECK(fn != &InitAndGetCPU, "Init() did not set getcpu_fn_");
+ return (*fn)(cpu, x, y);
+}
+
+// This function must be very fast, and may be called from very
+// low level (e.g. tcmalloc). Hence I avoid things like
+// GoogleOnceInit() and ::operator new.
+ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY
+int GetCPU() {
+ unsigned cpu;
+ int ret_code = (*VDSOSupport::getcpu_fn_)(&cpu, nullptr, nullptr);
+ return ret_code == 0 ? cpu : ret_code;
+}
+
+} // namespace debugging_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_HAVE_VDSO_SUPPORT
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/vdso_support.h b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/vdso_support.h
new file mode 100644
index 00000000000..c0064b9fe10
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/vdso_support.h
@@ -0,0 +1,158 @@
+//
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+// Allow dynamic symbol lookup in the kernel VDSO page.
+//
+// VDSO stands for "Virtual Dynamic Shared Object" -- a page of
+// executable code, which looks like a shared library, but doesn't
+// necessarily exist anywhere on disk, and which gets mmap()ed into
+// every process by kernels which support VDSO, such as 2.6.x for 32-bit
+// executables, and 2.6.24 and above for 64-bit executables.
+//
+// More details could be found here:
+// http://www.trilithium.com/johan/2005/08/linux-gate/
+//
+// VDSOSupport -- a class representing kernel VDSO (if present).
+//
+// Example usage:
+// VDSOSupport vdso;
+// VDSOSupport::SymbolInfo info;
+// typedef (*FN)(unsigned *, void *, void *);
+// FN fn = nullptr;
+// if (vdso.LookupSymbol("__vdso_getcpu", "LINUX_2.6", STT_FUNC, &info)) {
+// fn = reinterpret_cast<FN>(info.address);
+// }
+
+#ifndef ABSL_DEBUGGING_INTERNAL_VDSO_SUPPORT_H_
+#define ABSL_DEBUGGING_INTERNAL_VDSO_SUPPORT_H_
+
+#include <atomic>
+
+#include "y_absl/base/attributes.h"
+#include "y_absl/debugging/internal/elf_mem_image.h"
+
+#ifdef ABSL_HAVE_ELF_MEM_IMAGE
+
+#ifdef ABSL_HAVE_VDSO_SUPPORT
+#error ABSL_HAVE_VDSO_SUPPORT cannot be directly set
+#else
+#define ABSL_HAVE_VDSO_SUPPORT 1
+#endif
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace debugging_internal {
+
+// NOTE: this class may be used from within tcmalloc, and can not
+// use any memory allocation routines.
+class VDSOSupport {
+ public:
+ VDSOSupport();
+
+ typedef ElfMemImage::SymbolInfo SymbolInfo;
+ typedef ElfMemImage::SymbolIterator SymbolIterator;
+
+ // On PowerPC64 VDSO symbols can either be of type STT_FUNC or STT_NOTYPE
+ // depending on how the kernel is built. The kernel is normally built with
+ // STT_NOTYPE type VDSO symbols. Let's make things simpler first by using a
+ // compile-time constant.
+#ifdef __powerpc64__
+ enum { kVDSOSymbolType = STT_NOTYPE };
+#else
+ enum { kVDSOSymbolType = STT_FUNC };
+#endif
+
+ // Answers whether we have a vdso at all.
+ bool IsPresent() const { return image_.IsPresent(); }
+
+ // Allow to iterate over all VDSO symbols.
+ SymbolIterator begin() const { return image_.begin(); }
+ SymbolIterator end() const { return image_.end(); }
+
+ // Look up versioned dynamic symbol in the kernel VDSO.
+ // Returns false if VDSO is not present, or doesn't contain given
+ // symbol/version/type combination.
+ // If info_out != nullptr, additional details are filled in.
+ bool LookupSymbol(const char *name, const char *version,
+ int symbol_type, SymbolInfo *info_out) const;
+
+ // Find info about symbol (if any) which overlaps given address.
+ // Returns true if symbol was found; false if VDSO isn't present
+ // or doesn't have a symbol overlapping given address.
+ // If info_out != nullptr, additional details are filled in.
+ bool LookupSymbolByAddress(const void *address, SymbolInfo *info_out) const;
+
+ // Used only for testing. Replace real VDSO base with a mock.
+ // Returns previous value of vdso_base_. After you are done testing,
+ // you are expected to call SetBase() with previous value, in order to
+ // reset state to the way it was.
+ const void *SetBase(const void *s);
+
+ // Computes vdso_base_ and returns it. Should be called as early as
+ // possible; before any thread creation, chroot or setuid.
+ static const void *Init();
+
+ private:
+ // image_ represents VDSO ELF image in memory.
+ // image_.ehdr_ == nullptr implies there is no VDSO.
+ ElfMemImage image_;
+
+ // Cached value of auxv AT_SYSINFO_EHDR, computed once.
+ // This is a tri-state:
+ // kInvalidBase => value hasn't been determined yet.
+ // 0 => there is no VDSO.
+ // else => vma of VDSO Elf{32,64}_Ehdr.
+ //
+ // When testing with mock VDSO, low bit is set.
+ // The low bit is always available because vdso_base_ is
+ // page-aligned.
+ static std::atomic<const void *> vdso_base_;
+
+ // NOLINT on 'long' because these routines mimic kernel api.
+ // The 'cache' parameter may be used by some versions of the kernel,
+ // and should be nullptr or point to a static buffer containing at
+ // least two 'long's.
+ static long InitAndGetCPU(unsigned *cpu, void *cache, // NOLINT 'long'.
+ void *unused);
+ static long GetCPUViaSyscall(unsigned *cpu, void *cache, // NOLINT 'long'.
+ void *unused);
+ typedef long (*GetCpuFn)(unsigned *cpu, void *cache, // NOLINT 'long'.
+ void *unused);
+
+ // This function pointer may point to InitAndGetCPU,
+ // GetCPUViaSyscall, or __vdso_getcpu at different stages of initialization.
+ ABSL_CONST_INIT static std::atomic<GetCpuFn> getcpu_fn_;
+
+ friend int GetCPU(void); // Needs access to getcpu_fn_.
+
+ VDSOSupport(const VDSOSupport&) = delete;
+ VDSOSupport& operator=(const VDSOSupport&) = delete;
+};
+
+// Same as sched_getcpu() on later glibc versions.
+// Return current CPU, using (fast) __vdso_getcpu@LINUX_2.6 if present,
+// otherwise use syscall(SYS_getcpu,...).
+// May return -1 with errno == ENOSYS if the kernel doesn't
+// support SYS_getcpu.
+int GetCPU();
+
+} // namespace debugging_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_HAVE_ELF_MEM_IMAGE
+
+#endif // ABSL_DEBUGGING_INTERNAL_VDSO_SUPPORT_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/ya.make b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/ya.make
new file mode 100644
index 00000000000..e07162537bb
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/ya.make
@@ -0,0 +1,40 @@
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
+OWNER(
+ somov
+ g:cpp-contrib
+)
+
+LICENSE(Apache-2.0)
+
+LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
+
+PEERDIR(
+ contrib/restricted/abseil-cpp-tstring/y_absl/base
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/low_level_alloc
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_wait
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/throw_delegate
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/log_severity
+ contrib/restricted/abseil-cpp-tstring/y_absl/debugging
+ contrib/restricted/abseil-cpp-tstring/y_absl/debugging/stacktrace
+ contrib/restricted/abseil-cpp-tstring/y_absl/debugging/symbolize
+ contrib/restricted/abseil-cpp-tstring/y_absl/demangle
+ contrib/restricted/abseil-cpp-tstring/y_absl/numeric
+ contrib/restricted/abseil-cpp-tstring/y_absl/strings
+ contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/absl_strings_internal
+)
+
+ADDINCL(
+ GLOBAL contrib/restricted/abseil-cpp-tstring
+)
+
+NO_COMPILER_WARNINGS()
+
+SRCS(
+ examine_stack.cc
+)
+
+END()
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/leak_check.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/leak_check.cc
new file mode 100644
index 00000000000..ae5cf79fe29
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/leak_check.cc
@@ -0,0 +1,69 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Wrappers around lsan_interface functions.
+// When lsan is not linked in, these functions are not available,
+// therefore Abseil code which depends on these functions is conditioned on the
+// definition of LEAK_SANITIZER.
+#include "y_absl/base/attributes.h"
+#include "y_absl/debugging/leak_check.h"
+
+#ifndef LEAK_SANITIZER
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+bool HaveLeakSanitizer() { return false; }
+bool LeakCheckerIsActive() { return false; }
+void DoIgnoreLeak(const void*) { }
+void RegisterLivePointers(const void*, size_t) { }
+void UnRegisterLivePointers(const void*, size_t) { }
+LeakCheckDisabler::LeakCheckDisabler() { }
+LeakCheckDisabler::~LeakCheckDisabler() { }
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#else
+
+#include <sanitizer/lsan_interface.h>
+
+#if ABSL_HAVE_ATTRIBUTE_WEAK
+extern "C" ABSL_ATTRIBUTE_WEAK int __lsan_is_turned_off();
+#endif
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+bool HaveLeakSanitizer() { return true; }
+
+#if ABSL_HAVE_ATTRIBUTE_WEAK
+bool LeakCheckerIsActive() {
+ return !(&__lsan_is_turned_off && __lsan_is_turned_off());
+}
+#else
+bool LeakCheckerIsActive() { return true; }
+#endif
+
+bool FindAndReportLeaks() { return __lsan_do_recoverable_leak_check(); }
+void DoIgnoreLeak(const void* ptr) { __lsan_ignore_object(ptr); }
+void RegisterLivePointers(const void* ptr, size_t size) {
+ __lsan_register_root_region(ptr, size);
+}
+void UnRegisterLivePointers(const void* ptr, size_t size) {
+ __lsan_unregister_root_region(ptr, size);
+}
+LeakCheckDisabler::LeakCheckDisabler() { __lsan_disable(); }
+LeakCheckDisabler::~LeakCheckDisabler() { __lsan_enable(); }
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // LEAK_SANITIZER
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/leak_check.h b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/leak_check.h
new file mode 100644
index 00000000000..d69f08f2487
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/leak_check.h
@@ -0,0 +1,133 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: leak_check.h
+// -----------------------------------------------------------------------------
+//
+// This file contains functions that affect leak checking behavior within
+// targets built with the LeakSanitizer (LSan), a memory leak detector that is
+// integrated within the AddressSanitizer (ASan) as an additional component, or
+// which can be used standalone. LSan and ASan are included (or can be provided)
+// as additional components for most compilers such as Clang, gcc and MSVC.
+// Note: this leak checking API is not yet supported in MSVC.
+// Leak checking is enabled by default in all ASan builds.
+//
+// See https://github.com/google/sanitizers/wiki/AddressSanitizerLeakSanitizer
+//
+// -----------------------------------------------------------------------------
+#ifndef ABSL_DEBUGGING_LEAK_CHECK_H_
+#define ABSL_DEBUGGING_LEAK_CHECK_H_
+
+#include <cstddef>
+
+#include "y_absl/base/config.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+// HaveLeakSanitizer()
+//
+// Returns true if a leak-checking sanitizer (either ASan or standalone LSan) is
+// currently built into this target.
+bool HaveLeakSanitizer();
+
+// LeakCheckerIsActive()
+//
+// Returns true if a leak-checking sanitizer (either ASan or standalone LSan) is
+// currently built into this target and is turned on.
+bool LeakCheckerIsActive();
+
+// DoIgnoreLeak()
+//
+// Implements `IgnoreLeak()` below. This function should usually
+// not be called directly; calling `IgnoreLeak()` is preferred.
+void DoIgnoreLeak(const void* ptr);
+
+// IgnoreLeak()
+//
+// Instruct the leak sanitizer to ignore leak warnings on the object referenced
+// by the passed pointer, as well as all heap objects transitively referenced
+// by it. The passed object pointer can point to either the beginning of the
+// object or anywhere within it.
+//
+// Example:
+//
+// static T* obj = IgnoreLeak(new T(...));
+//
+// If the passed `ptr` does not point to an actively allocated object at the
+// time `IgnoreLeak()` is called, the call is a no-op; if it is actively
+// allocated, leak sanitizer will assume this object is referenced even if
+// there is no actual reference in user memory.
+//
+template <typename T>
+T* IgnoreLeak(T* ptr) {
+ DoIgnoreLeak(ptr);
+ return ptr;
+}
+
+// FindAndReportLeaks()
+//
+// If any leaks are detected, prints a leak report and returns true. This
+// function may be called repeatedly, and does not affect end-of-process leak
+// checking.
+//
+// Example:
+// if (FindAndReportLeaks()) {
+// ... diagnostic already printed. Exit with failure code.
+// exit(1)
+// }
+bool FindAndReportLeaks();
+
+// LeakCheckDisabler
+//
+// This helper class indicates that any heap allocations done in the code block
+// covered by the scoped object, which should be allocated on the stack, will
+// not be reported as leaks. Leak check disabling will occur within the code
+// block and any nested function calls within the code block.
+//
+// Example:
+//
+// void Foo() {
+// LeakCheckDisabler disabler;
+// ... code that allocates objects whose leaks should be ignored ...
+// }
+//
+// REQUIRES: Destructor runs in same thread as constructor
+class LeakCheckDisabler {
+ public:
+ LeakCheckDisabler();
+ LeakCheckDisabler(const LeakCheckDisabler&) = delete;
+ LeakCheckDisabler& operator=(const LeakCheckDisabler&) = delete;
+ ~LeakCheckDisabler();
+};
+
+// RegisterLivePointers()
+//
+// Registers `ptr[0,size-1]` as pointers to memory that is still actively being
+// referenced and for which leak checking should be ignored. This function is
+// useful if you store pointers in mapped memory, for memory ranges that we know
+// are correct but for which normal analysis would flag as leaked code.
+void RegisterLivePointers(const void* ptr, size_t size);
+
+// UnRegisterLivePointers()
+//
+// Deregisters the pointers previously marked as active in
+// `RegisterLivePointers()`, enabling leak checking of those pointers.
+void UnRegisterLivePointers(const void* ptr, size_t size);
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_DEBUGGING_LEAK_CHECK_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/leak_check/ya.make b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/leak_check/ya.make
new file mode 100644
index 00000000000..f22152ef8a6
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/leak_check/ya.make
@@ -0,0 +1,26 @@
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
+WITHOUT_LICENSE_TEXTS()
+
+OWNER(
+ somov
+ g:cpp-contrib
+)
+
+LICENSE(Apache-2.0)
+
+ADDINCL(
+ GLOBAL contrib/restricted/abseil-cpp-tstring
+)
+
+NO_COMPILER_WARNINGS()
+
+SRCDIR(contrib/restricted/abseil-cpp-tstring/y_absl/debugging)
+
+SRCS(
+ leak_check.cc
+)
+
+END()
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/leak_check_disable.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/leak_check_disable.cc
new file mode 100644
index 00000000000..924d6e3d543
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/leak_check_disable.cc
@@ -0,0 +1,20 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Disable LeakSanitizer when this file is linked in.
+// This function overrides __lsan_is_turned_off from sanitizer/lsan_interface.h
+extern "C" int __lsan_is_turned_off();
+extern "C" int __lsan_is_turned_off() {
+ return 1;
+}
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/leak_check_disable/ya.make b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/leak_check_disable/ya.make
new file mode 100644
index 00000000000..41792f1bc66
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/leak_check_disable/ya.make
@@ -0,0 +1,26 @@
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
+WITHOUT_LICENSE_TEXTS()
+
+OWNER(
+ somov
+ g:cpp-contrib
+)
+
+LICENSE(Apache-2.0)
+
+ADDINCL(
+ GLOBAL contrib/restricted/abseil-cpp-tstring
+)
+
+NO_COMPILER_WARNINGS()
+
+SRCDIR(contrib/restricted/abseil-cpp-tstring/y_absl/debugging)
+
+SRCS(
+ leak_check_disable.cc
+)
+
+END()
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/stacktrace.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/stacktrace.cc
new file mode 100644
index 00000000000..9263d6a2a8c
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/stacktrace.cc
@@ -0,0 +1,142 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Produce stack trace.
+//
+// There are three different ways we can try to get the stack trace:
+//
+// 1) Our hand-coded stack-unwinder. This depends on a certain stack
+// layout, which is used by gcc (and those systems using a
+// gcc-compatible ABI) on x86 systems, at least since gcc 2.95.
+// It uses the frame pointer to do its work.
+//
+// 2) The libunwind library. This is still in development, and as a
+// separate library adds a new dependency, but doesn't need a frame
+// pointer. It also doesn't call malloc.
+//
+// 3) The gdb unwinder -- also the one used by the c++ exception code.
+// It's obviously well-tested, but has a fatal flaw: it can call
+// malloc() from the unwinder. This is a problem because we're
+// trying to use the unwinder to instrument malloc().
+//
+// Note: if you add a new implementation here, make sure it works
+// correctly when y_absl::GetStackTrace() is called with max_depth == 0.
+// Some code may do that.
+
+#include "y_absl/debugging/stacktrace.h"
+
+#include <atomic>
+
+#include "y_absl/base/attributes.h"
+#include "y_absl/base/port.h"
+#include "y_absl/debugging/internal/stacktrace_config.h"
+
+#if defined(ABSL_STACKTRACE_INL_HEADER)
+#include ABSL_STACKTRACE_INL_HEADER
+#else
+# error Cannot calculate stack trace: will need to write for your environment
+
+# include "y_absl/debugging/internal/stacktrace_aarch64-inl.inc"
+# include "y_absl/debugging/internal/stacktrace_arm-inl.inc"
+# include "y_absl/debugging/internal/stacktrace_emscripten-inl.inc"
+# include "y_absl/debugging/internal/stacktrace_generic-inl.inc"
+# include "y_absl/debugging/internal/stacktrace_powerpc-inl.inc"
+# include "y_absl/debugging/internal/stacktrace_riscv-inl.inc"
+# include "y_absl/debugging/internal/stacktrace_unimplemented-inl.inc"
+# include "y_absl/debugging/internal/stacktrace_win32-inl.inc"
+# include "y_absl/debugging/internal/stacktrace_x86-inl.inc"
+#endif
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace {
+
+typedef int (*Unwinder)(void**, int*, int, int, const void*, int*);
+std::atomic<Unwinder> custom;
+
+template <bool IS_STACK_FRAMES, bool IS_WITH_CONTEXT>
+ABSL_ATTRIBUTE_ALWAYS_INLINE inline int Unwind(void** result, int* sizes,
+ int max_depth, int skip_count,
+ const void* uc,
+ int* min_dropped_frames) {
+ Unwinder f = &UnwindImpl<IS_STACK_FRAMES, IS_WITH_CONTEXT>;
+ Unwinder g = custom.load(std::memory_order_acquire);
+ if (g != nullptr) f = g;
+
+ // Add 1 to skip count for the unwinder function itself
+ int size = (*f)(result, sizes, max_depth, skip_count + 1, uc,
+ min_dropped_frames);
+ // To disable tail call to (*f)(...)
+ ABSL_BLOCK_TAIL_CALL_OPTIMIZATION();
+ return size;
+}
+
+} // anonymous namespace
+
+ABSL_ATTRIBUTE_NOINLINE ABSL_ATTRIBUTE_NO_TAIL_CALL int GetStackFrames(
+ void** result, int* sizes, int max_depth, int skip_count) {
+ return Unwind<true, false>(result, sizes, max_depth, skip_count, nullptr,
+ nullptr);
+}
+
+ABSL_ATTRIBUTE_NOINLINE ABSL_ATTRIBUTE_NO_TAIL_CALL int
+GetStackFramesWithContext(void** result, int* sizes, int max_depth,
+ int skip_count, const void* uc,
+ int* min_dropped_frames) {
+ return Unwind<true, true>(result, sizes, max_depth, skip_count, uc,
+ min_dropped_frames);
+}
+
+ABSL_ATTRIBUTE_NOINLINE ABSL_ATTRIBUTE_NO_TAIL_CALL int GetStackTrace(
+ void** result, int max_depth, int skip_count) {
+ return Unwind<false, false>(result, nullptr, max_depth, skip_count, nullptr,
+ nullptr);
+}
+
+ABSL_ATTRIBUTE_NOINLINE ABSL_ATTRIBUTE_NO_TAIL_CALL int
+GetStackTraceWithContext(void** result, int max_depth, int skip_count,
+ const void* uc, int* min_dropped_frames) {
+ return Unwind<false, true>(result, nullptr, max_depth, skip_count, uc,
+ min_dropped_frames);
+}
+
+void SetStackUnwinder(Unwinder w) {
+ custom.store(w, std::memory_order_release);
+}
+
+int DefaultStackUnwinder(void** pcs, int* sizes, int depth, int skip,
+ const void* uc, int* min_dropped_frames) {
+ skip++; // For this function
+ Unwinder f = nullptr;
+ if (sizes == nullptr) {
+ if (uc == nullptr) {
+ f = &UnwindImpl<false, false>;
+ } else {
+ f = &UnwindImpl<false, true>;
+ }
+ } else {
+ if (uc == nullptr) {
+ f = &UnwindImpl<true, false>;
+ } else {
+ f = &UnwindImpl<true, true>;
+ }
+ }
+ volatile int x = 0;
+ int n = (*f)(pcs, sizes, depth, skip, uc, min_dropped_frames);
+ x = 1; (void) x; // To disable tail call to (*f)(...)
+ return n;
+}
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/stacktrace.h b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/stacktrace.h
new file mode 100644
index 00000000000..cbd48b986b0
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/stacktrace.h
@@ -0,0 +1,231 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: stacktrace.h
+// -----------------------------------------------------------------------------
+//
+// This file contains routines to extract the current stack trace and associated
+// stack frames. These functions are thread-safe and async-signal-safe.
+//
+// Note that stack trace functionality is platform dependent and requires
+// additional support from the compiler/build system in most cases. (That is,
+// this functionality generally only works on platforms/builds that have been
+// specifically configured to support it.)
+//
+// Note: stack traces in Abseil that do not utilize a symbolizer will result in
+// frames consisting of function addresses rather than human-readable function
+// names. (See symbolize.h for information on symbolizing these values.)
+
+#ifndef ABSL_DEBUGGING_STACKTRACE_H_
+#define ABSL_DEBUGGING_STACKTRACE_H_
+
+#include "y_absl/base/config.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+// GetStackFrames()
+//
+// Records program counter values for up to `max_depth` frames, skipping the
+// most recent `skip_count` stack frames, stores their corresponding values
+// and sizes in `results` and `sizes` buffers, and returns the number of frames
+// stored. (Note that the frame generated for the `y_absl::GetStackFrames()`
+// routine itself is also skipped.)
+//
+// Example:
+//
+// main() { foo(); }
+// foo() { bar(); }
+// bar() {
+// void* result[10];
+// int sizes[10];
+// int depth = y_absl::GetStackFrames(result, sizes, 10, 1);
+// }
+//
+// The current stack frame would consist of three function calls: `bar()`,
+// `foo()`, and then `main()`; however, since the `GetStackFrames()` call sets
+// `skip_count` to `1`, it will skip the frame for `bar()`, the most recently
+// invoked function call. It will therefore return 2 and fill `result` with
+// program counters within the following functions:
+//
+// result[0] foo()
+// result[1] main()
+//
+// (Note: in practice, a few more entries after `main()` may be added to account
+// for startup processes.)
+//
+// Corresponding stack frame sizes will also be recorded:
+//
+// sizes[0] 16
+// sizes[1] 16
+//
+// (Stack frame sizes of `16` above are just for illustration purposes.)
+//
+// Stack frame sizes of 0 or less indicate that those frame sizes couldn't
+// be identified.
+//
+// This routine may return fewer stack frame entries than are
+// available. Also note that `result` and `sizes` must both be non-null.
+extern int GetStackFrames(void** result, int* sizes, int max_depth,
+ int skip_count);
+
+// GetStackFramesWithContext()
+//
+// Records program counter values obtained from a signal handler. Records
+// program counter values for up to `max_depth` frames, skipping the most recent
+// `skip_count` stack frames, stores their corresponding values and sizes in
+// `results` and `sizes` buffers, and returns the number of frames stored. (Note
+// that the frame generated for the `y_absl::GetStackFramesWithContext()` routine
+// itself is also skipped.)
+//
+// The `uc` parameter, if non-null, should be a pointer to a `ucontext_t` value
+// passed to a signal handler registered via the `sa_sigaction` field of a
+// `sigaction` struct. (See
+// http://man7.org/linux/man-pages/man2/sigaction.2.html.) The `uc` value may
+// help a stack unwinder to provide a better stack trace under certain
+// conditions. `uc` may safely be null.
+//
+// The `min_dropped_frames` output parameter, if non-null, points to the
+// location to note any dropped stack frames, if any, due to buffer limitations
+// or other reasons. (This value will be set to `0` if no frames were dropped.)
+// The number of total stack frames is guaranteed to be >= skip_count +
+// max_depth + *min_dropped_frames.
+extern int GetStackFramesWithContext(void** result, int* sizes, int max_depth,
+ int skip_count, const void* uc,
+ int* min_dropped_frames);
+
+// GetStackTrace()
+//
+// Records program counter values for up to `max_depth` frames, skipping the
+// most recent `skip_count` stack frames, stores their corresponding values
+// in `results`, and returns the number of frames
+// stored. Note that this function is similar to `y_absl::GetStackFrames()`
+// except that it returns the stack trace only, and not stack frame sizes.
+//
+// Example:
+//
+// main() { foo(); }
+// foo() { bar(); }
+// bar() {
+// void* result[10];
+// int depth = y_absl::GetStackTrace(result, 10, 1);
+// }
+//
+// This produces:
+//
+// result[0] foo
+// result[1] main
+// .... ...
+//
+// `result` must not be null.
+extern int GetStackTrace(void** result, int max_depth, int skip_count);
+
+// GetStackTraceWithContext()
+//
+// Records program counter values obtained from a signal handler. Records
+// program counter values for up to `max_depth` frames, skipping the most recent
+// `skip_count` stack frames, stores their corresponding values in `results`,
+// and returns the number of frames stored. (Note that the frame generated for
+// the `y_absl::GetStackFramesWithContext()` routine itself is also skipped.)
+//
+// The `uc` parameter, if non-null, should be a pointer to a `ucontext_t` value
+// passed to a signal handler registered via the `sa_sigaction` field of a
+// `sigaction` struct. (See
+// http://man7.org/linux/man-pages/man2/sigaction.2.html.) The `uc` value may
+// help a stack unwinder to provide a better stack trace under certain
+// conditions. `uc` may safely be null.
+//
+// The `min_dropped_frames` output parameter, if non-null, points to the
+// location to note any dropped stack frames, if any, due to buffer limitations
+// or other reasons. (This value will be set to `0` if no frames were dropped.)
+// The number of total stack frames is guaranteed to be >= skip_count +
+// max_depth + *min_dropped_frames.
+extern int GetStackTraceWithContext(void** result, int max_depth,
+ int skip_count, const void* uc,
+ int* min_dropped_frames);
+
+// SetStackUnwinder()
+//
+// Provides a custom function for unwinding stack frames that will be used in
+// place of the default stack unwinder when invoking the static
+// GetStack{Frames,Trace}{,WithContext}() functions above.
+//
+// The arguments passed to the unwinder function will match the
+// arguments passed to `y_absl::GetStackFramesWithContext()` except that sizes
+// will be non-null iff the caller is interested in frame sizes.
+//
+// If unwinder is set to null, we revert to the default stack-tracing behavior.
+//
+// *****************************************************************************
+// WARNING
+// *****************************************************************************
+//
+// y_absl::SetStackUnwinder is not suitable for general purpose use. It is
+// provided for custom runtimes.
+// Some things to watch out for when calling `y_absl::SetStackUnwinder()`:
+//
+// (a) The unwinder may be called from within signal handlers and
+// therefore must be async-signal-safe.
+//
+// (b) Even after a custom stack unwinder has been unregistered, other
+// threads may still be in the process of using that unwinder.
+// Therefore do not clean up any state that may be needed by an old
+// unwinder.
+// *****************************************************************************
+extern void SetStackUnwinder(int (*unwinder)(void** pcs, int* sizes,
+ int max_depth, int skip_count,
+ const void* uc,
+ int* min_dropped_frames));
+
+// DefaultStackUnwinder()
+//
+// Records program counter values of up to `max_depth` frames, skipping the most
+// recent `skip_count` stack frames, and stores their corresponding values in
+// `pcs`. (Note that the frame generated for this call itself is also skipped.)
+// This function acts as a generic stack-unwinder; prefer usage of the more
+// specific `GetStack{Trace,Frames}{,WithContext}()` functions above.
+//
+// If you have set your own stack unwinder (with the `SetStackUnwinder()`
+// function above, you can still get the default stack unwinder by calling
+// `DefaultStackUnwinder()`, which will ignore any previously set stack unwinder
+// and use the default one instead.
+//
+// Because this function is generic, only `pcs` is guaranteed to be non-null
+// upon return. It is legal for `sizes`, `uc`, and `min_dropped_frames` to all
+// be null when called.
+//
+// The semantics are the same as the corresponding `GetStack*()` function in the
+// case where `y_absl::SetStackUnwinder()` was never called. Equivalents are:
+//
+// null sizes | non-nullptr sizes
+// |==========================================================|
+// null uc | GetStackTrace() | GetStackFrames() |
+// non-null uc | GetStackTraceWithContext() | GetStackFramesWithContext() |
+// |==========================================================|
+extern int DefaultStackUnwinder(void** pcs, int* sizes, int max_depth,
+ int skip_count, const void* uc,
+ int* min_dropped_frames);
+
+namespace debugging_internal {
+// Returns true for platforms which are expected to have functioning stack trace
+// implementations. Intended to be used for tests which want to exclude
+// verification of logic known to be broken because stack traces are not
+// working.
+extern bool StackTraceWorksForTest();
+} // namespace debugging_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_DEBUGGING_STACKTRACE_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/stacktrace/ya.make b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/stacktrace/ya.make
new file mode 100644
index 00000000000..a56bdc8a4ce
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/stacktrace/ya.make
@@ -0,0 +1,32 @@
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
+WITHOUT_LICENSE_TEXTS()
+
+OWNER(
+ somov
+ g:cpp-contrib
+)
+
+LICENSE(Apache-2.0)
+
+PEERDIR(
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/log_severity
+ contrib/restricted/abseil-cpp-tstring/y_absl/debugging
+)
+
+ADDINCL(
+ GLOBAL contrib/restricted/abseil-cpp-tstring
+)
+
+NO_COMPILER_WARNINGS()
+
+SRCDIR(contrib/restricted/abseil-cpp-tstring/y_absl/debugging)
+
+SRCS(
+ stacktrace.cc
+)
+
+END()
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/symbolize.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/symbolize.cc
new file mode 100644
index 00000000000..88c92a19ea4
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/symbolize.cc
@@ -0,0 +1,38 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "y_absl/debugging/symbolize.h"
+
+#ifdef _WIN32
+#include <winapifamily.h>
+#if !(WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP)) || \
+ WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP)
+// UWP doesn't have access to win32 APIs.
+#define ABSL_INTERNAL_HAVE_SYMBOLIZE_WIN32
+#endif
+#endif
+
+#if defined(ABSL_INTERNAL_HAVE_ELF_SYMBOLIZE)
+#include "y_absl/debugging/symbolize_elf.inc"
+#elif defined(ABSL_INTERNAL_HAVE_SYMBOLIZE_WIN32)
+// The Windows Symbolizer only works if PDB files containing the debug info
+// are available to the program at runtime.
+#include "y_absl/debugging/symbolize_win32.inc"
+#elif defined(__APPLE__)
+#include "y_absl/debugging/symbolize_darwin.inc"
+#elif defined(__EMSCRIPTEN__)
+#include "y_absl/debugging/symbolize_emscripten.inc"
+#else
+#include "y_absl/debugging/symbolize_unimplemented.inc"
+#endif
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/symbolize.h b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/symbolize.h
new file mode 100644
index 00000000000..2a99f65c834
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/symbolize.h
@@ -0,0 +1,99 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: symbolize.h
+// -----------------------------------------------------------------------------
+//
+// This file configures the Abseil symbolizer for use in converting instruction
+// pointer addresses (program counters) into human-readable names (function
+// calls, etc.) within Abseil code.
+//
+// The symbolizer may be invoked from several sources:
+//
+// * Implicitly, through the installation of an Abseil failure signal handler.
+// (See failure_signal_handler.h for more information.)
+// * By calling `Symbolize()` directly on a program counter you obtain through
+// `y_absl::GetStackTrace()` or `y_absl::GetStackFrames()`. (See stacktrace.h
+// for more information.
+// * By calling `Symbolize()` directly on a program counter you obtain through
+// other means (which would be platform-dependent).
+//
+// In all of the above cases, the symbolizer must first be initialized before
+// any program counter values can be symbolized. If you are installing a failure
+// signal handler, initialize the symbolizer before you do so.
+//
+// Example:
+//
+// int main(int argc, char** argv) {
+// // Initialize the Symbolizer before installing the failure signal handler
+// y_absl::InitializeSymbolizer(argv[0]);
+//
+// // Now you may install the failure signal handler
+// y_absl::FailureSignalHandlerOptions options;
+// y_absl::InstallFailureSignalHandler(options);
+//
+// // Start running your main program
+// ...
+// return 0;
+// }
+//
+#ifndef ABSL_DEBUGGING_SYMBOLIZE_H_
+#define ABSL_DEBUGGING_SYMBOLIZE_H_
+
+#include "y_absl/debugging/internal/symbolize.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+// InitializeSymbolizer()
+//
+// Initializes the program counter symbolizer, given the path of the program
+// (typically obtained through `main()`s `argv[0]`). The Abseil symbolizer
+// allows you to read program counters (instruction pointer values) using their
+// human-readable names within output such as stack traces.
+//
+// Example:
+//
+// int main(int argc, char *argv[]) {
+// y_absl::InitializeSymbolizer(argv[0]);
+// // Now you can use the symbolizer
+// }
+void InitializeSymbolizer(const char* argv0);
+//
+// Symbolize()
+//
+// Symbolizes a program counter (instruction pointer value) `pc` and, on
+// success, writes the name to `out`. The symbol name is demangled, if possible.
+// Note that the symbolized name may be truncated and will be NUL-terminated.
+// Demangling is supported for symbols generated by GCC 3.x or newer). Returns
+// `false` on failure.
+//
+// Example:
+//
+// // Print a program counter and its symbol name.
+// static void DumpPCAndSymbol(void *pc) {
+// char tmp[1024];
+// const char *symbol = "(unknown)";
+// if (y_absl::Symbolize(pc, tmp, sizeof(tmp))) {
+// symbol = tmp;
+// }
+// y_absl::PrintF("%p %s\n", pc, symbol);
+// }
+bool Symbolize(const void *pc, char *out, int out_size);
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_DEBUGGING_SYMBOLIZE_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/symbolize/ya.make b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/symbolize/ya.make
new file mode 100644
index 00000000000..58f9d70682a
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/symbolize/ya.make
@@ -0,0 +1,40 @@
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
+WITHOUT_LICENSE_TEXTS()
+
+OWNER(
+ somov
+ g:cpp-contrib
+)
+
+LICENSE(Apache-2.0)
+
+PEERDIR(
+ contrib/restricted/abseil-cpp-tstring/y_absl/base
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/low_level_alloc
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_wait
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/throw_delegate
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/log_severity
+ contrib/restricted/abseil-cpp-tstring/y_absl/debugging
+ contrib/restricted/abseil-cpp-tstring/y_absl/demangle
+ contrib/restricted/abseil-cpp-tstring/y_absl/numeric
+ contrib/restricted/abseil-cpp-tstring/y_absl/strings
+ contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/absl_strings_internal
+)
+
+ADDINCL(
+ GLOBAL contrib/restricted/abseil-cpp-tstring
+)
+
+NO_COMPILER_WARNINGS()
+
+SRCDIR(contrib/restricted/abseil-cpp-tstring/y_absl/debugging)
+
+SRCS(
+ symbolize.cc
+)
+
+END()
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/symbolize_darwin.inc b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/symbolize_darwin.inc
new file mode 100644
index 00000000000..c5960a79383
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/symbolize_darwin.inc
@@ -0,0 +1,101 @@
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <cxxabi.h>
+#include <execinfo.h>
+
+#include <algorithm>
+#include <cstring>
+
+#include "y_absl/base/internal/raw_logging.h"
+#include "y_absl/debugging/internal/demangle.h"
+#include "y_absl/strings/numbers.h"
+#include "y_absl/strings/str_cat.h"
+#include "y_absl/strings/string_view.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+void InitializeSymbolizer(const char*) {}
+
+namespace debugging_internal {
+namespace {
+
+static TString GetSymbolString(y_absl::string_view backtrace_line) {
+ // Example Backtrace lines:
+ // 0 libimaging_shared.dylib 0x018c152a
+ // _ZNSt11_Deque_baseIN3nik7mediadb4PageESaIS2_EE17_M_initialize_mapEm + 3478
+ //
+ // or
+ // 0 libimaging_shared.dylib 0x0000000001895c39
+ // _ZN3nik4util19register_shared_ptrINS_3gpu7TextureEEEvPKvS5_ + 39
+ //
+ // or
+ // 0 mysterious_app 0x0124000120120009 main + 17
+ auto address_pos = backtrace_line.find(" 0x");
+ if (address_pos == y_absl::string_view::npos) return TString();
+ y_absl::string_view symbol_view = backtrace_line.substr(address_pos + 1);
+
+ auto space_pos = symbol_view.find(" ");
+ if (space_pos == y_absl::string_view::npos) return TString();
+ symbol_view = symbol_view.substr(space_pos + 1); // to mangled symbol
+
+ auto plus_pos = symbol_view.find(" + ");
+ if (plus_pos == y_absl::string_view::npos) return TString();
+ symbol_view = symbol_view.substr(0, plus_pos); // strip remainng
+
+ return TString(symbol_view);
+}
+
+} // namespace
+} // namespace debugging_internal
+
+bool Symbolize(const void* pc, char* out, int out_size) {
+ if (out_size <= 0 || pc == nullptr) {
+ out = nullptr;
+ return false;
+ }
+
+ // This allocates a char* array.
+ char** frame_strings = backtrace_symbols(const_cast<void**>(&pc), 1);
+
+ if (frame_strings == nullptr) return false;
+
+ TString symbol = debugging_internal::GetSymbolString(frame_strings[0]);
+ free(frame_strings);
+
+ char tmp_buf[1024];
+ if (debugging_internal::Demangle(symbol.c_str(), tmp_buf, sizeof(tmp_buf))) {
+ size_t len = strlen(tmp_buf);
+ if (len + 1 <= static_cast<size_t>(out_size)) { // +1 for '\0'
+ assert(len < sizeof(tmp_buf));
+ memmove(out, tmp_buf, len + 1);
+ }
+ } else {
+ strncpy(out, symbol.c_str(), out_size);
+ }
+
+ if (out[out_size - 1] != '\0') {
+ // strncpy() does not '\0' terminate when it truncates.
+ static constexpr char kEllipsis[] = "...";
+ int ellipsis_size = std::min<int>(sizeof(kEllipsis) - 1, out_size - 1);
+ memcpy(out + out_size - ellipsis_size - 1, kEllipsis, ellipsis_size);
+ out[out_size - 1] = '\0';
+ }
+
+ return true;
+}
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/symbolize_elf.inc b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/symbolize_elf.inc
new file mode 100644
index 00000000000..18297405e3e
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/symbolize_elf.inc
@@ -0,0 +1,1574 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This library provides Symbolize() function that symbolizes program
+// counters to their corresponding symbol names on linux platforms.
+// This library has a minimal implementation of an ELF symbol table
+// reader (i.e. it doesn't depend on libelf, etc.).
+//
+// The algorithm used in Symbolize() is as follows.
+//
+// 1. Go through a list of maps in /proc/self/maps and find the map
+// containing the program counter.
+//
+// 2. Open the mapped file and find a regular symbol table inside.
+// Iterate over symbols in the symbol table and look for the symbol
+// containing the program counter. If such a symbol is found,
+// obtain the symbol name, and demangle the symbol if possible.
+// If the symbol isn't found in the regular symbol table (binary is
+// stripped), try the same thing with a dynamic symbol table.
+//
+// Note that Symbolize() is originally implemented to be used in
+// signal handlers, hence it doesn't use malloc() and other unsafe
+// operations. It should be both thread-safe and async-signal-safe.
+//
+// Implementation note:
+//
+// We don't use heaps but only use stacks. We want to reduce the
+// stack consumption so that the symbolizer can run on small stacks.
+//
+// Here are some numbers collected with GCC 4.1.0 on x86:
+// - sizeof(Elf32_Sym) = 16
+// - sizeof(Elf32_Shdr) = 40
+// - sizeof(Elf64_Sym) = 24
+// - sizeof(Elf64_Shdr) = 64
+//
+// This implementation is intended to be async-signal-safe but uses some
+// functions which are not guaranteed to be so, such as memchr() and
+// memmove(). We assume they are async-signal-safe.
+
+#include <dlfcn.h>
+#include <elf.h>
+#include <fcntl.h>
+#include <link.h> // For ElfW() macro.
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <algorithm>
+#include <array>
+#include <atomic>
+#include <cerrno>
+#include <cinttypes>
+#include <climits>
+#include <cstdint>
+#include <cstdio>
+#include <cstdlib>
+#include <cstring>
+
+#include "y_absl/base/casts.h"
+#include "y_absl/base/dynamic_annotations.h"
+#include "y_absl/base/internal/low_level_alloc.h"
+#include "y_absl/base/internal/raw_logging.h"
+#include "y_absl/base/internal/spinlock.h"
+#include "y_absl/base/port.h"
+#include "y_absl/debugging/internal/demangle.h"
+#include "y_absl/debugging/internal/vdso_support.h"
+#include "y_absl/strings/string_view.h"
+
+#if defined(__FreeBSD__) && !defined(ElfW)
+#define ElfW(x) __ElfN(x)
+#endif
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+// Value of argv[0]. Used by MaybeInitializeObjFile().
+static char *argv0_value = nullptr;
+
+void InitializeSymbolizer(const char *argv0) {
+#ifdef ABSL_HAVE_VDSO_SUPPORT
+ // We need to make sure VDSOSupport::Init() is called before any setuid or
+ // chroot calls, so InitializeSymbolizer() should be called very early in the
+ // life of a program.
+ y_absl::debugging_internal::VDSOSupport::Init();
+#endif
+ if (argv0_value != nullptr) {
+ free(argv0_value);
+ argv0_value = nullptr;
+ }
+ if (argv0 != nullptr && argv0[0] != '\0') {
+ argv0_value = strdup(argv0);
+ }
+}
+
+namespace debugging_internal {
+namespace {
+
+// Re-runs fn until it doesn't cause EINTR.
+#define NO_INTR(fn) \
+ do { \
+ } while ((fn) < 0 && errno == EINTR)
+
+// On Linux, ELF_ST_* are defined in <linux/elf.h>. To make this portable
+// we define our own ELF_ST_BIND and ELF_ST_TYPE if not available.
+#ifndef ELF_ST_BIND
+#define ELF_ST_BIND(info) (((unsigned char)(info)) >> 4)
+#endif
+
+#ifndef ELF_ST_TYPE
+#define ELF_ST_TYPE(info) (((unsigned char)(info)) & 0xF)
+#endif
+
+// Some platforms use a special .opd section to store function pointers.
+const char kOpdSectionName[] = ".opd";
+
+#if (defined(__powerpc__) && !(_CALL_ELF > 1)) || defined(__ia64)
+// Use opd section for function descriptors on these platforms, the function
+// address is the first word of the descriptor.
+enum { kPlatformUsesOPDSections = 1 };
+#else // not PPC or IA64
+enum { kPlatformUsesOPDSections = 0 };
+#endif
+
+// This works for PowerPC & IA64 only. A function descriptor consist of two
+// pointers and the first one is the function's entry.
+const size_t kFunctionDescriptorSize = sizeof(void *) * 2;
+
+const int kMaxDecorators = 10; // Seems like a reasonable upper limit.
+
+struct InstalledSymbolDecorator {
+ SymbolDecorator fn;
+ void *arg;
+ int ticket;
+};
+
+int g_num_decorators;
+InstalledSymbolDecorator g_decorators[kMaxDecorators];
+
+struct FileMappingHint {
+ const void *start;
+ const void *end;
+ uint64_t offset;
+ const char *filename;
+};
+
+// Protects g_decorators.
+// We are using SpinLock and not a Mutex here, because we may be called
+// from inside Mutex::Lock itself, and it prohibits recursive calls.
+// This happens in e.g. base/stacktrace_syscall_unittest.
+// Moreover, we are using only TryLock(), if the decorator list
+// is being modified (is busy), we skip all decorators, and possibly
+// loose some info. Sorry, that's the best we could do.
+ABSL_CONST_INIT y_absl::base_internal::SpinLock g_decorators_mu(
+ y_absl::kConstInit, y_absl::base_internal::SCHEDULE_KERNEL_ONLY);
+
+const int kMaxFileMappingHints = 8;
+int g_num_file_mapping_hints;
+FileMappingHint g_file_mapping_hints[kMaxFileMappingHints];
+// Protects g_file_mapping_hints.
+ABSL_CONST_INIT y_absl::base_internal::SpinLock g_file_mapping_mu(
+ y_absl::kConstInit, y_absl::base_internal::SCHEDULE_KERNEL_ONLY);
+
+// Async-signal-safe function to zero a buffer.
+// memset() is not guaranteed to be async-signal-safe.
+static void SafeMemZero(void* p, size_t size) {
+ unsigned char *c = static_cast<unsigned char *>(p);
+ while (size--) {
+ *c++ = 0;
+ }
+}
+
+struct ObjFile {
+ ObjFile()
+ : filename(nullptr),
+ start_addr(nullptr),
+ end_addr(nullptr),
+ offset(0),
+ fd(-1),
+ elf_type(-1) {
+ SafeMemZero(&elf_header, sizeof(elf_header));
+ SafeMemZero(&phdr[0], sizeof(phdr));
+ }
+
+ char *filename;
+ const void *start_addr;
+ const void *end_addr;
+ uint64_t offset;
+
+ // The following fields are initialized on the first access to the
+ // object file.
+ int fd;
+ int elf_type;
+ ElfW(Ehdr) elf_header;
+
+ // PT_LOAD program header describing executable code.
+ // Normally we expect just one, but SWIFT binaries have two.
+ std::array<ElfW(Phdr), 2> phdr;
+};
+
+// Build 4-way associative cache for symbols. Within each cache line, symbols
+// are replaced in LRU order.
+enum {
+ ASSOCIATIVITY = 4,
+};
+struct SymbolCacheLine {
+ const void *pc[ASSOCIATIVITY];
+ char *name[ASSOCIATIVITY];
+
+ // age[i] is incremented when a line is accessed. it's reset to zero if the
+ // i'th entry is read.
+ uint32_t age[ASSOCIATIVITY];
+};
+
+// ---------------------------------------------------------------
+// An async-signal-safe arena for LowLevelAlloc
+static std::atomic<base_internal::LowLevelAlloc::Arena *> g_sig_safe_arena;
+
+static base_internal::LowLevelAlloc::Arena *SigSafeArena() {
+ return g_sig_safe_arena.load(std::memory_order_acquire);
+}
+
+static void InitSigSafeArena() {
+ if (SigSafeArena() == nullptr) {
+ base_internal::LowLevelAlloc::Arena *new_arena =
+ base_internal::LowLevelAlloc::NewArena(
+ base_internal::LowLevelAlloc::kAsyncSignalSafe);
+ base_internal::LowLevelAlloc::Arena *old_value = nullptr;
+ if (!g_sig_safe_arena.compare_exchange_strong(old_value, new_arena,
+ std::memory_order_release,
+ std::memory_order_relaxed)) {
+ // We lost a race to allocate an arena; deallocate.
+ base_internal::LowLevelAlloc::DeleteArena(new_arena);
+ }
+ }
+}
+
+// ---------------------------------------------------------------
+// An AddrMap is a vector of ObjFile, using SigSafeArena() for allocation.
+
+class AddrMap {
+ public:
+ AddrMap() : size_(0), allocated_(0), obj_(nullptr) {}
+ ~AddrMap() { base_internal::LowLevelAlloc::Free(obj_); }
+ int Size() const { return size_; }
+ ObjFile *At(int i) { return &obj_[i]; }
+ ObjFile *Add();
+ void Clear();
+
+ private:
+ int size_; // count of valid elements (<= allocated_)
+ int allocated_; // count of allocated elements
+ ObjFile *obj_; // array of allocated_ elements
+ AddrMap(const AddrMap &) = delete;
+ AddrMap &operator=(const AddrMap &) = delete;
+};
+
+void AddrMap::Clear() {
+ for (int i = 0; i != size_; i++) {
+ At(i)->~ObjFile();
+ }
+ size_ = 0;
+}
+
+ObjFile *AddrMap::Add() {
+ if (size_ == allocated_) {
+ int new_allocated = allocated_ * 2 + 50;
+ ObjFile *new_obj_ =
+ static_cast<ObjFile *>(base_internal::LowLevelAlloc::AllocWithArena(
+ new_allocated * sizeof(*new_obj_), SigSafeArena()));
+ if (obj_) {
+ memcpy(new_obj_, obj_, allocated_ * sizeof(*new_obj_));
+ base_internal::LowLevelAlloc::Free(obj_);
+ }
+ obj_ = new_obj_;
+ allocated_ = new_allocated;
+ }
+ return new (&obj_[size_++]) ObjFile;
+}
+
+// ---------------------------------------------------------------
+
+enum FindSymbolResult { SYMBOL_NOT_FOUND = 1, SYMBOL_TRUNCATED, SYMBOL_FOUND };
+
+class Symbolizer {
+ public:
+ Symbolizer();
+ ~Symbolizer();
+ const char *GetSymbol(const void *const pc);
+
+ private:
+ char *CopyString(const char *s) {
+ int len = strlen(s);
+ char *dst = static_cast<char *>(
+ base_internal::LowLevelAlloc::AllocWithArena(len + 1, SigSafeArena()));
+ ABSL_RAW_CHECK(dst != nullptr, "out of memory");
+ memcpy(dst, s, len + 1);
+ return dst;
+ }
+ ObjFile *FindObjFile(const void *const start,
+ size_t size) ABSL_ATTRIBUTE_NOINLINE;
+ static bool RegisterObjFile(const char *filename,
+ const void *const start_addr,
+ const void *const end_addr, uint64_t offset,
+ void *arg);
+ SymbolCacheLine *GetCacheLine(const void *const pc);
+ const char *FindSymbolInCache(const void *const pc);
+ const char *InsertSymbolInCache(const void *const pc, const char *name);
+ void AgeSymbols(SymbolCacheLine *line);
+ void ClearAddrMap();
+ FindSymbolResult GetSymbolFromObjectFile(const ObjFile &obj,
+ const void *const pc,
+ const ptrdiff_t relocation,
+ char *out, int out_size,
+ char *tmp_buf, int tmp_buf_size);
+
+ enum {
+ SYMBOL_BUF_SIZE = 3072,
+ TMP_BUF_SIZE = 1024,
+ SYMBOL_CACHE_LINES = 128,
+ };
+
+ AddrMap addr_map_;
+
+ bool ok_;
+ bool addr_map_read_;
+
+ char symbol_buf_[SYMBOL_BUF_SIZE];
+
+ // tmp_buf_ will be used to store arrays of ElfW(Shdr) and ElfW(Sym)
+ // so we ensure that tmp_buf_ is properly aligned to store either.
+ alignas(16) char tmp_buf_[TMP_BUF_SIZE];
+ static_assert(alignof(ElfW(Shdr)) <= 16,
+ "alignment of tmp buf too small for Shdr");
+ static_assert(alignof(ElfW(Sym)) <= 16,
+ "alignment of tmp buf too small for Sym");
+
+ SymbolCacheLine symbol_cache_[SYMBOL_CACHE_LINES];
+};
+
+static std::atomic<Symbolizer *> g_cached_symbolizer;
+
+} // namespace
+
+static int SymbolizerSize() {
+#if defined(__wasm__) || defined(__asmjs__)
+ int pagesize = getpagesize();
+#else
+ int pagesize = sysconf(_SC_PAGESIZE);
+#endif
+ return ((sizeof(Symbolizer) - 1) / pagesize + 1) * pagesize;
+}
+
+// Return (and set null) g_cached_symbolized_state if it is not null.
+// Otherwise return a new symbolizer.
+static Symbolizer *AllocateSymbolizer() {
+ InitSigSafeArena();
+ Symbolizer *symbolizer =
+ g_cached_symbolizer.exchange(nullptr, std::memory_order_acquire);
+ if (symbolizer != nullptr) {
+ return symbolizer;
+ }
+ return new (base_internal::LowLevelAlloc::AllocWithArena(
+ SymbolizerSize(), SigSafeArena())) Symbolizer();
+}
+
+// Set g_cached_symbolize_state to s if it is null, otherwise
+// delete s.
+static void FreeSymbolizer(Symbolizer *s) {
+ Symbolizer *old_cached_symbolizer = nullptr;
+ if (!g_cached_symbolizer.compare_exchange_strong(old_cached_symbolizer, s,
+ std::memory_order_release,
+ std::memory_order_relaxed)) {
+ s->~Symbolizer();
+ base_internal::LowLevelAlloc::Free(s);
+ }
+}
+
+Symbolizer::Symbolizer() : ok_(true), addr_map_read_(false) {
+ for (SymbolCacheLine &symbol_cache_line : symbol_cache_) {
+ for (size_t j = 0; j < ABSL_ARRAYSIZE(symbol_cache_line.name); ++j) {
+ symbol_cache_line.pc[j] = nullptr;
+ symbol_cache_line.name[j] = nullptr;
+ symbol_cache_line.age[j] = 0;
+ }
+ }
+}
+
+Symbolizer::~Symbolizer() {
+ for (SymbolCacheLine &symbol_cache_line : symbol_cache_) {
+ for (char *s : symbol_cache_line.name) {
+ base_internal::LowLevelAlloc::Free(s);
+ }
+ }
+ ClearAddrMap();
+}
+
+// We don't use assert() since it's not guaranteed to be
+// async-signal-safe. Instead we define a minimal assertion
+// macro. So far, we don't need pretty printing for __FILE__, etc.
+#define SAFE_ASSERT(expr) ((expr) ? static_cast<void>(0) : abort())
+
+// Read up to "count" bytes from file descriptor "fd" into the buffer
+// starting at "buf" while handling short reads and EINTR. On
+// success, return the number of bytes read. Otherwise, return -1.
+static ssize_t ReadPersistent(int fd, void *buf, size_t count) {
+ SAFE_ASSERT(fd >= 0);
+ SAFE_ASSERT(count <= SSIZE_MAX);
+ char *buf0 = reinterpret_cast<char *>(buf);
+ size_t num_bytes = 0;
+ while (num_bytes < count) {
+ ssize_t len;
+ NO_INTR(len = read(fd, buf0 + num_bytes, count - num_bytes));
+ if (len < 0) { // There was an error other than EINTR.
+ ABSL_RAW_LOG(WARNING, "read failed: errno=%d", errno);
+ return -1;
+ }
+ if (len == 0) { // Reached EOF.
+ break;
+ }
+ num_bytes += len;
+ }
+ SAFE_ASSERT(num_bytes <= count);
+ return static_cast<ssize_t>(num_bytes);
+}
+
+// Read up to "count" bytes from "offset" in the file pointed by file
+// descriptor "fd" into the buffer starting at "buf". On success,
+// return the number of bytes read. Otherwise, return -1.
+static ssize_t ReadFromOffset(const int fd, void *buf, const size_t count,
+ const off_t offset) {
+ off_t off = lseek(fd, offset, SEEK_SET);
+ if (off == (off_t)-1) {
+ ABSL_RAW_LOG(WARNING, "lseek(%d, %ju, SEEK_SET) failed: errno=%d", fd,
+ static_cast<uintmax_t>(offset), errno);
+ return -1;
+ }
+ return ReadPersistent(fd, buf, count);
+}
+
+// Try reading exactly "count" bytes from "offset" bytes in a file
+// pointed by "fd" into the buffer starting at "buf" while handling
+// short reads and EINTR. On success, return true. Otherwise, return
+// false.
+static bool ReadFromOffsetExact(const int fd, void *buf, const size_t count,
+ const off_t offset) {
+ ssize_t len = ReadFromOffset(fd, buf, count, offset);
+ return len >= 0 && static_cast<size_t>(len) == count;
+}
+
+// Returns elf_header.e_type if the file pointed by fd is an ELF binary.
+static int FileGetElfType(const int fd) {
+ ElfW(Ehdr) elf_header;
+ if (!ReadFromOffsetExact(fd, &elf_header, sizeof(elf_header), 0)) {
+ return -1;
+ }
+ if (memcmp(elf_header.e_ident, ELFMAG, SELFMAG) != 0) {
+ return -1;
+ }
+ return elf_header.e_type;
+}
+
+// Read the section headers in the given ELF binary, and if a section
+// of the specified type is found, set the output to this section header
+// and return true. Otherwise, return false.
+// To keep stack consumption low, we would like this function to not get
+// inlined.
+static ABSL_ATTRIBUTE_NOINLINE bool GetSectionHeaderByType(
+ const int fd, ElfW(Half) sh_num, const off_t sh_offset, ElfW(Word) type,
+ ElfW(Shdr) * out, char *tmp_buf, int tmp_buf_size) {
+ ElfW(Shdr) *buf = reinterpret_cast<ElfW(Shdr) *>(tmp_buf);
+ const int buf_entries = tmp_buf_size / sizeof(buf[0]);
+ const int buf_bytes = buf_entries * sizeof(buf[0]);
+
+ for (int i = 0; i < sh_num;) {
+ const ssize_t num_bytes_left = (sh_num - i) * sizeof(buf[0]);
+ const ssize_t num_bytes_to_read =
+ (buf_bytes > num_bytes_left) ? num_bytes_left : buf_bytes;
+ const off_t offset = sh_offset + i * sizeof(buf[0]);
+ const ssize_t len = ReadFromOffset(fd, buf, num_bytes_to_read, offset);
+ if (len % sizeof(buf[0]) != 0) {
+ ABSL_RAW_LOG(
+ WARNING,
+ "Reading %zd bytes from offset %ju returned %zd which is not a "
+ "multiple of %zu.",
+ num_bytes_to_read, static_cast<uintmax_t>(offset), len,
+ sizeof(buf[0]));
+ return false;
+ }
+ const ssize_t num_headers_in_buf = len / sizeof(buf[0]);
+ SAFE_ASSERT(num_headers_in_buf <= buf_entries);
+ for (int j = 0; j < num_headers_in_buf; ++j) {
+ if (buf[j].sh_type == type) {
+ *out = buf[j];
+ return true;
+ }
+ }
+ i += num_headers_in_buf;
+ }
+ return false;
+}
+
+// There is no particular reason to limit section name to 63 characters,
+// but there has (as yet) been no need for anything longer either.
+const int kMaxSectionNameLen = 64;
+
+bool ForEachSection(int fd,
+ const std::function<bool(y_absl::string_view name,
+ const ElfW(Shdr) &)> &callback) {
+ ElfW(Ehdr) elf_header;
+ if (!ReadFromOffsetExact(fd, &elf_header, sizeof(elf_header), 0)) {
+ return false;
+ }
+
+ ElfW(Shdr) shstrtab;
+ off_t shstrtab_offset =
+ (elf_header.e_shoff + elf_header.e_shentsize * elf_header.e_shstrndx);
+ if (!ReadFromOffsetExact(fd, &shstrtab, sizeof(shstrtab), shstrtab_offset)) {
+ return false;
+ }
+
+ for (int i = 0; i < elf_header.e_shnum; ++i) {
+ ElfW(Shdr) out;
+ off_t section_header_offset =
+ (elf_header.e_shoff + elf_header.e_shentsize * i);
+ if (!ReadFromOffsetExact(fd, &out, sizeof(out), section_header_offset)) {
+ return false;
+ }
+ off_t name_offset = shstrtab.sh_offset + out.sh_name;
+ char header_name[kMaxSectionNameLen];
+ ssize_t n_read =
+ ReadFromOffset(fd, &header_name, kMaxSectionNameLen, name_offset);
+ if (n_read == -1) {
+ return false;
+ } else if (n_read > kMaxSectionNameLen) {
+ // Long read?
+ return false;
+ }
+
+ y_absl::string_view name(header_name, strnlen(header_name, n_read));
+ if (!callback(name, out)) {
+ break;
+ }
+ }
+ return true;
+}
+
+// name_len should include terminating '\0'.
+bool GetSectionHeaderByName(int fd, const char *name, size_t name_len,
+ ElfW(Shdr) * out) {
+ char header_name[kMaxSectionNameLen];
+ if (sizeof(header_name) < name_len) {
+ ABSL_RAW_LOG(WARNING,
+ "Section name '%s' is too long (%zu); "
+ "section will not be found (even if present).",
+ name, name_len);
+ // No point in even trying.
+ return false;
+ }
+
+ ElfW(Ehdr) elf_header;
+ if (!ReadFromOffsetExact(fd, &elf_header, sizeof(elf_header), 0)) {
+ return false;
+ }
+
+ ElfW(Shdr) shstrtab;
+ off_t shstrtab_offset =
+ (elf_header.e_shoff + elf_header.e_shentsize * elf_header.e_shstrndx);
+ if (!ReadFromOffsetExact(fd, &shstrtab, sizeof(shstrtab), shstrtab_offset)) {
+ return false;
+ }
+
+ for (int i = 0; i < elf_header.e_shnum; ++i) {
+ off_t section_header_offset =
+ (elf_header.e_shoff + elf_header.e_shentsize * i);
+ if (!ReadFromOffsetExact(fd, out, sizeof(*out), section_header_offset)) {
+ return false;
+ }
+ off_t name_offset = shstrtab.sh_offset + out->sh_name;
+ ssize_t n_read = ReadFromOffset(fd, &header_name, name_len, name_offset);
+ if (n_read < 0) {
+ return false;
+ } else if (static_cast<size_t>(n_read) != name_len) {
+ // Short read -- name could be at end of file.
+ continue;
+ }
+ if (memcmp(header_name, name, name_len) == 0) {
+ return true;
+ }
+ }
+ return false;
+}
+
+// Compare symbols at in the same address.
+// Return true if we should pick symbol1.
+static bool ShouldPickFirstSymbol(const ElfW(Sym) & symbol1,
+ const ElfW(Sym) & symbol2) {
+ // If one of the symbols is weak and the other is not, pick the one
+ // this is not a weak symbol.
+ char bind1 = ELF_ST_BIND(symbol1.st_info);
+ char bind2 = ELF_ST_BIND(symbol1.st_info);
+ if (bind1 == STB_WEAK && bind2 != STB_WEAK) return false;
+ if (bind2 == STB_WEAK && bind1 != STB_WEAK) return true;
+
+ // If one of the symbols has zero size and the other is not, pick the
+ // one that has non-zero size.
+ if (symbol1.st_size != 0 && symbol2.st_size == 0) {
+ return true;
+ }
+ if (symbol1.st_size == 0 && symbol2.st_size != 0) {
+ return false;
+ }
+
+ // If one of the symbols has no type and the other is not, pick the
+ // one that has a type.
+ char type1 = ELF_ST_TYPE(symbol1.st_info);
+ char type2 = ELF_ST_TYPE(symbol1.st_info);
+ if (type1 != STT_NOTYPE && type2 == STT_NOTYPE) {
+ return true;
+ }
+ if (type1 == STT_NOTYPE && type2 != STT_NOTYPE) {
+ return false;
+ }
+
+ // Pick the first one, if we still cannot decide.
+ return true;
+}
+
+// Return true if an address is inside a section.
+static bool InSection(const void *address, const ElfW(Shdr) * section) {
+ const char *start = reinterpret_cast<const char *>(section->sh_addr);
+ size_t size = static_cast<size_t>(section->sh_size);
+ return start <= address && address < (start + size);
+}
+
+static const char *ComputeOffset(const char *base, ptrdiff_t offset) {
+ // Note: cast to uintptr_t to avoid undefined behavior when base evaluates to
+ // zero and offset is non-zero.
+ return reinterpret_cast<const char *>(
+ reinterpret_cast<uintptr_t>(base) + offset);
+}
+
+// Read a symbol table and look for the symbol containing the
+// pc. Iterate over symbols in a symbol table and look for the symbol
+// containing "pc". If the symbol is found, and its name fits in
+// out_size, the name is written into out and SYMBOL_FOUND is returned.
+// If the name does not fit, truncated name is written into out,
+// and SYMBOL_TRUNCATED is returned. Out is NUL-terminated.
+// If the symbol is not found, SYMBOL_NOT_FOUND is returned;
+// To keep stack consumption low, we would like this function to not get
+// inlined.
+static ABSL_ATTRIBUTE_NOINLINE FindSymbolResult FindSymbol(
+ const void *const pc, const int fd, char *out, int out_size,
+ ptrdiff_t relocation, const ElfW(Shdr) * strtab, const ElfW(Shdr) * symtab,
+ const ElfW(Shdr) * opd, char *tmp_buf, int tmp_buf_size) {
+ if (symtab == nullptr) {
+ return SYMBOL_NOT_FOUND;
+ }
+
+ // Read multiple symbols at once to save read() calls.
+ ElfW(Sym) *buf = reinterpret_cast<ElfW(Sym) *>(tmp_buf);
+ const int buf_entries = tmp_buf_size / sizeof(buf[0]);
+
+ const int num_symbols = symtab->sh_size / symtab->sh_entsize;
+
+ // On platforms using an .opd section (PowerPC & IA64), a function symbol
+ // has the address of a function descriptor, which contains the real
+ // starting address. However, we do not always want to use the real
+ // starting address because we sometimes want to symbolize a function
+ // pointer into the .opd section, e.g. FindSymbol(&foo,...).
+ const bool pc_in_opd =
+ kPlatformUsesOPDSections && opd != nullptr && InSection(pc, opd);
+ const bool deref_function_descriptor_pointer =
+ kPlatformUsesOPDSections && opd != nullptr && !pc_in_opd;
+
+ ElfW(Sym) best_match;
+ SafeMemZero(&best_match, sizeof(best_match));
+ bool found_match = false;
+ for (int i = 0; i < num_symbols;) {
+ off_t offset = symtab->sh_offset + i * symtab->sh_entsize;
+ const int num_remaining_symbols = num_symbols - i;
+ const int entries_in_chunk = std::min(num_remaining_symbols, buf_entries);
+ const int bytes_in_chunk = entries_in_chunk * sizeof(buf[0]);
+ const ssize_t len = ReadFromOffset(fd, buf, bytes_in_chunk, offset);
+ SAFE_ASSERT(len % sizeof(buf[0]) == 0);
+ const ssize_t num_symbols_in_buf = len / sizeof(buf[0]);
+ SAFE_ASSERT(num_symbols_in_buf <= entries_in_chunk);
+ for (int j = 0; j < num_symbols_in_buf; ++j) {
+ const ElfW(Sym) &symbol = buf[j];
+
+ // For a DSO, a symbol address is relocated by the loading address.
+ // We keep the original address for opd redirection below.
+ const char *const original_start_address =
+ reinterpret_cast<const char *>(symbol.st_value);
+ const char *start_address =
+ ComputeOffset(original_start_address, relocation);
+
+#ifdef __arm__
+ // ARM functions are always aligned to multiples of two bytes; the
+ // lowest-order bit in start_address is ignored by the CPU and indicates
+ // whether the function contains ARM (0) or Thumb (1) code. We don't care
+ // about what encoding is being used; we just want the real start address
+ // of the function.
+ start_address = reinterpret_cast<const char *>(
+ reinterpret_cast<uintptr_t>(start_address) & ~1);
+#endif
+
+ if (deref_function_descriptor_pointer &&
+ InSection(original_start_address, opd)) {
+ // The opd section is mapped into memory. Just dereference
+ // start_address to get the first double word, which points to the
+ // function entry.
+ start_address = *reinterpret_cast<const char *const *>(start_address);
+ }
+
+ // If pc is inside the .opd section, it points to a function descriptor.
+ const size_t size = pc_in_opd ? kFunctionDescriptorSize : symbol.st_size;
+ const void *const end_address = ComputeOffset(start_address, size);
+ if (symbol.st_value != 0 && // Skip null value symbols.
+ symbol.st_shndx != 0 && // Skip undefined symbols.
+#ifdef STT_TLS
+ ELF_ST_TYPE(symbol.st_info) != STT_TLS && // Skip thread-local data.
+#endif // STT_TLS
+ ((start_address <= pc && pc < end_address) ||
+ (start_address == pc && pc == end_address))) {
+ if (!found_match || ShouldPickFirstSymbol(symbol, best_match)) {
+ found_match = true;
+ best_match = symbol;
+ }
+ }
+ }
+ i += num_symbols_in_buf;
+ }
+
+ if (found_match) {
+ const size_t off = strtab->sh_offset + best_match.st_name;
+ const ssize_t n_read = ReadFromOffset(fd, out, out_size, off);
+ if (n_read <= 0) {
+ // This should never happen.
+ ABSL_RAW_LOG(WARNING,
+ "Unable to read from fd %d at offset %zu: n_read = %zd", fd,
+ off, n_read);
+ return SYMBOL_NOT_FOUND;
+ }
+ ABSL_RAW_CHECK(n_read <= out_size, "ReadFromOffset read too much data.");
+
+ // strtab->sh_offset points into .strtab-like section that contains
+ // NUL-terminated strings: '\0foo\0barbaz\0...".
+ //
+ // sh_offset+st_name points to the start of symbol name, but we don't know
+ // how long the symbol is, so we try to read as much as we have space for,
+ // and usually over-read (i.e. there is a NUL somewhere before n_read).
+ if (memchr(out, '\0', n_read) == nullptr) {
+ // Either out_size was too small (n_read == out_size and no NUL), or
+ // we tried to read past the EOF (n_read < out_size) and .strtab is
+ // corrupt (missing terminating NUL; should never happen for valid ELF).
+ out[n_read - 1] = '\0';
+ return SYMBOL_TRUNCATED;
+ }
+ return SYMBOL_FOUND;
+ }
+
+ return SYMBOL_NOT_FOUND;
+}
+
+// Get the symbol name of "pc" from the file pointed by "fd". Process
+// both regular and dynamic symbol tables if necessary.
+// See FindSymbol() comment for description of return value.
+FindSymbolResult Symbolizer::GetSymbolFromObjectFile(
+ const ObjFile &obj, const void *const pc, const ptrdiff_t relocation,
+ char *out, int out_size, char *tmp_buf, int tmp_buf_size) {
+ ElfW(Shdr) symtab;
+ ElfW(Shdr) strtab;
+ ElfW(Shdr) opd;
+ ElfW(Shdr) *opd_ptr = nullptr;
+
+ // On platforms using an .opd sections for function descriptor, read
+ // the section header. The .opd section is in data segment and should be
+ // loaded but we check that it is mapped just to be extra careful.
+ if (kPlatformUsesOPDSections) {
+ if (GetSectionHeaderByName(obj.fd, kOpdSectionName,
+ sizeof(kOpdSectionName) - 1, &opd) &&
+ FindObjFile(reinterpret_cast<const char *>(opd.sh_addr) + relocation,
+ opd.sh_size) != nullptr) {
+ opd_ptr = &opd;
+ } else {
+ return SYMBOL_NOT_FOUND;
+ }
+ }
+
+ // Consult a regular symbol table, then fall back to the dynamic symbol table.
+ for (const auto symbol_table_type : {SHT_SYMTAB, SHT_DYNSYM}) {
+ if (!GetSectionHeaderByType(obj.fd, obj.elf_header.e_shnum,
+ obj.elf_header.e_shoff, symbol_table_type,
+ &symtab, tmp_buf, tmp_buf_size)) {
+ continue;
+ }
+ if (!ReadFromOffsetExact(
+ obj.fd, &strtab, sizeof(strtab),
+ obj.elf_header.e_shoff + symtab.sh_link * sizeof(symtab))) {
+ continue;
+ }
+ const FindSymbolResult rc =
+ FindSymbol(pc, obj.fd, out, out_size, relocation, &strtab, &symtab,
+ opd_ptr, tmp_buf, tmp_buf_size);
+ if (rc != SYMBOL_NOT_FOUND) {
+ return rc;
+ }
+ }
+
+ return SYMBOL_NOT_FOUND;
+}
+
+namespace {
+// Thin wrapper around a file descriptor so that the file descriptor
+// gets closed for sure.
+class FileDescriptor {
+ public:
+ explicit FileDescriptor(int fd) : fd_(fd) {}
+ FileDescriptor(const FileDescriptor &) = delete;
+ FileDescriptor &operator=(const FileDescriptor &) = delete;
+
+ ~FileDescriptor() {
+ if (fd_ >= 0) {
+ NO_INTR(close(fd_));
+ }
+ }
+
+ int get() const { return fd_; }
+
+ private:
+ const int fd_;
+};
+
+// Helper class for reading lines from file.
+//
+// Note: we don't use ProcMapsIterator since the object is big (it has
+// a 5k array member) and uses async-unsafe functions such as sscanf()
+// and snprintf().
+class LineReader {
+ public:
+ explicit LineReader(int fd, char *buf, int buf_len)
+ : fd_(fd),
+ buf_len_(buf_len),
+ buf_(buf),
+ bol_(buf),
+ eol_(buf),
+ eod_(buf) {}
+
+ LineReader(const LineReader &) = delete;
+ LineReader &operator=(const LineReader &) = delete;
+
+ // Read '\n'-terminated line from file. On success, modify "bol"
+ // and "eol", then return true. Otherwise, return false.
+ //
+ // Note: if the last line doesn't end with '\n', the line will be
+ // dropped. It's an intentional behavior to make the code simple.
+ bool ReadLine(const char **bol, const char **eol) {
+ if (BufferIsEmpty()) { // First time.
+ const ssize_t num_bytes = ReadPersistent(fd_, buf_, buf_len_);
+ if (num_bytes <= 0) { // EOF or error.
+ return false;
+ }
+ eod_ = buf_ + num_bytes;
+ bol_ = buf_;
+ } else {
+ bol_ = eol_ + 1; // Advance to the next line in the buffer.
+ SAFE_ASSERT(bol_ <= eod_); // "bol_" can point to "eod_".
+ if (!HasCompleteLine()) {
+ const int incomplete_line_length = eod_ - bol_;
+ // Move the trailing incomplete line to the beginning.
+ memmove(buf_, bol_, incomplete_line_length);
+ // Read text from file and append it.
+ char *const append_pos = buf_ + incomplete_line_length;
+ const int capacity_left = buf_len_ - incomplete_line_length;
+ const ssize_t num_bytes =
+ ReadPersistent(fd_, append_pos, capacity_left);
+ if (num_bytes <= 0) { // EOF or error.
+ return false;
+ }
+ eod_ = append_pos + num_bytes;
+ bol_ = buf_;
+ }
+ }
+ eol_ = FindLineFeed();
+ if (eol_ == nullptr) { // '\n' not found. Malformed line.
+ return false;
+ }
+ *eol_ = '\0'; // Replace '\n' with '\0'.
+
+ *bol = bol_;
+ *eol = eol_;
+ return true;
+ }
+
+ private:
+ char *FindLineFeed() const {
+ return reinterpret_cast<char *>(memchr(bol_, '\n', eod_ - bol_));
+ }
+
+ bool BufferIsEmpty() const { return buf_ == eod_; }
+
+ bool HasCompleteLine() const {
+ return !BufferIsEmpty() && FindLineFeed() != nullptr;
+ }
+
+ const int fd_;
+ const int buf_len_;
+ char *const buf_;
+ char *bol_;
+ char *eol_;
+ const char *eod_; // End of data in "buf_".
+};
+} // namespace
+
+// Place the hex number read from "start" into "*hex". The pointer to
+// the first non-hex character or "end" is returned.
+static const char *GetHex(const char *start, const char *end,
+ uint64_t *const value) {
+ uint64_t hex = 0;
+ const char *p;
+ for (p = start; p < end; ++p) {
+ int ch = *p;
+ if ((ch >= '0' && ch <= '9') || (ch >= 'A' && ch <= 'F') ||
+ (ch >= 'a' && ch <= 'f')) {
+ hex = (hex << 4) | (ch < 'A' ? ch - '0' : (ch & 0xF) + 9);
+ } else { // Encountered the first non-hex character.
+ break;
+ }
+ }
+ SAFE_ASSERT(p <= end);
+ *value = hex;
+ return p;
+}
+
+static const char *GetHex(const char *start, const char *end,
+ const void **const addr) {
+ uint64_t hex = 0;
+ const char *p = GetHex(start, end, &hex);
+ *addr = reinterpret_cast<void *>(hex);
+ return p;
+}
+
+// Normally we are only interested in "r?x" maps.
+// On the PowerPC, function pointers point to descriptors in the .opd
+// section. The descriptors themselves are not executable code, so
+// we need to relax the check below to "r??".
+static bool ShouldUseMapping(const char *const flags) {
+ return flags[0] == 'r' && (kPlatformUsesOPDSections || flags[2] == 'x');
+}
+
+// Read /proc/self/maps and run "callback" for each mmapped file found. If
+// "callback" returns false, stop scanning and return true. Else continue
+// scanning /proc/self/maps. Return true if no parse error is found.
+static ABSL_ATTRIBUTE_NOINLINE bool ReadAddrMap(
+ bool (*callback)(const char *filename, const void *const start_addr,
+ const void *const end_addr, uint64_t offset, void *arg),
+ void *arg, void *tmp_buf, int tmp_buf_size) {
+ // Use /proc/self/task/<pid>/maps instead of /proc/self/maps. The latter
+ // requires kernel to stop all threads, and is significantly slower when there
+ // are 1000s of threads.
+ char maps_path[80];
+ snprintf(maps_path, sizeof(maps_path), "/proc/self/task/%d/maps", getpid());
+
+ int maps_fd;
+ NO_INTR(maps_fd = open(maps_path, O_RDONLY));
+ FileDescriptor wrapped_maps_fd(maps_fd);
+ if (wrapped_maps_fd.get() < 0) {
+ ABSL_RAW_LOG(WARNING, "%s: errno=%d", maps_path, errno);
+ return false;
+ }
+
+ // Iterate over maps and look for the map containing the pc. Then
+ // look into the symbol tables inside.
+ LineReader reader(wrapped_maps_fd.get(), static_cast<char *>(tmp_buf),
+ tmp_buf_size);
+ while (true) {
+ const char *cursor;
+ const char *eol;
+ if (!reader.ReadLine(&cursor, &eol)) { // EOF or malformed line.
+ break;
+ }
+
+ const char *line = cursor;
+ const void *start_address;
+ // Start parsing line in /proc/self/maps. Here is an example:
+ //
+ // 08048000-0804c000 r-xp 00000000 08:01 2142121 /bin/cat
+ //
+ // We want start address (08048000), end address (0804c000), flags
+ // (r-xp) and file name (/bin/cat).
+
+ // Read start address.
+ cursor = GetHex(cursor, eol, &start_address);
+ if (cursor == eol || *cursor != '-') {
+ ABSL_RAW_LOG(WARNING, "Corrupt /proc/self/maps line: %s", line);
+ return false;
+ }
+ ++cursor; // Skip '-'.
+
+ // Read end address.
+ const void *end_address;
+ cursor = GetHex(cursor, eol, &end_address);
+ if (cursor == eol || *cursor != ' ') {
+ ABSL_RAW_LOG(WARNING, "Corrupt /proc/self/maps line: %s", line);
+ return false;
+ }
+ ++cursor; // Skip ' '.
+
+ // Read flags. Skip flags until we encounter a space or eol.
+ const char *const flags_start = cursor;
+ while (cursor < eol && *cursor != ' ') {
+ ++cursor;
+ }
+ // We expect at least four letters for flags (ex. "r-xp").
+ if (cursor == eol || cursor < flags_start + 4) {
+ ABSL_RAW_LOG(WARNING, "Corrupt /proc/self/maps: %s", line);
+ return false;
+ }
+
+ // Check flags.
+ if (!ShouldUseMapping(flags_start)) {
+ continue; // We skip this map.
+ }
+ ++cursor; // Skip ' '.
+
+ // Read file offset.
+ uint64_t offset;
+ cursor = GetHex(cursor, eol, &offset);
+ ++cursor; // Skip ' '.
+
+ // Skip to file name. "cursor" now points to dev. We need to skip at least
+ // two spaces for dev and inode.
+ int num_spaces = 0;
+ while (cursor < eol) {
+ if (*cursor == ' ') {
+ ++num_spaces;
+ } else if (num_spaces >= 2) {
+ // The first non-space character after skipping two spaces
+ // is the beginning of the file name.
+ break;
+ }
+ ++cursor;
+ }
+
+ // Check whether this entry corresponds to our hint table for the true
+ // filename.
+ bool hinted =
+ GetFileMappingHint(&start_address, &end_address, &offset, &cursor);
+ if (!hinted && (cursor == eol || cursor[0] == '[')) {
+ // not an object file, typically [vdso] or [vsyscall]
+ continue;
+ }
+ if (!callback(cursor, start_address, end_address, offset, arg)) break;
+ }
+ return true;
+}
+
+// Find the objfile mapped in address region containing [addr, addr + len).
+ObjFile *Symbolizer::FindObjFile(const void *const addr, size_t len) {
+ for (int i = 0; i < 2; ++i) {
+ if (!ok_) return nullptr;
+
+ // Read /proc/self/maps if necessary
+ if (!addr_map_read_) {
+ addr_map_read_ = true;
+ if (!ReadAddrMap(RegisterObjFile, this, tmp_buf_, TMP_BUF_SIZE)) {
+ ok_ = false;
+ return nullptr;
+ }
+ }
+
+ int lo = 0;
+ int hi = addr_map_.Size();
+ while (lo < hi) {
+ int mid = (lo + hi) / 2;
+ if (addr < addr_map_.At(mid)->end_addr) {
+ hi = mid;
+ } else {
+ lo = mid + 1;
+ }
+ }
+ if (lo != addr_map_.Size()) {
+ ObjFile *obj = addr_map_.At(lo);
+ SAFE_ASSERT(obj->end_addr > addr);
+ if (addr >= obj->start_addr &&
+ reinterpret_cast<const char *>(addr) + len <= obj->end_addr)
+ return obj;
+ }
+
+ // The address mapping may have changed since it was last read. Retry.
+ ClearAddrMap();
+ }
+ return nullptr;
+}
+
+void Symbolizer::ClearAddrMap() {
+ for (int i = 0; i != addr_map_.Size(); i++) {
+ ObjFile *o = addr_map_.At(i);
+ base_internal::LowLevelAlloc::Free(o->filename);
+ if (o->fd >= 0) {
+ NO_INTR(close(o->fd));
+ }
+ }
+ addr_map_.Clear();
+ addr_map_read_ = false;
+}
+
+// Callback for ReadAddrMap to register objfiles in an in-memory table.
+bool Symbolizer::RegisterObjFile(const char *filename,
+ const void *const start_addr,
+ const void *const end_addr, uint64_t offset,
+ void *arg) {
+ Symbolizer *impl = static_cast<Symbolizer *>(arg);
+
+ // Files are supposed to be added in the increasing address order. Make
+ // sure that's the case.
+ int addr_map_size = impl->addr_map_.Size();
+ if (addr_map_size != 0) {
+ ObjFile *old = impl->addr_map_.At(addr_map_size - 1);
+ if (old->end_addr > end_addr) {
+ ABSL_RAW_LOG(ERROR,
+ "Unsorted addr map entry: 0x%" PRIxPTR ": %s <-> 0x%" PRIxPTR
+ ": %s",
+ reinterpret_cast<uintptr_t>(end_addr), filename,
+ reinterpret_cast<uintptr_t>(old->end_addr), old->filename);
+ return true;
+ } else if (old->end_addr == end_addr) {
+ // The same entry appears twice. This sometimes happens for [vdso].
+ if (old->start_addr != start_addr ||
+ strcmp(old->filename, filename) != 0) {
+ ABSL_RAW_LOG(ERROR,
+ "Duplicate addr 0x%" PRIxPTR ": %s <-> 0x%" PRIxPTR ": %s",
+ reinterpret_cast<uintptr_t>(end_addr), filename,
+ reinterpret_cast<uintptr_t>(old->end_addr), old->filename);
+ }
+ return true;
+ }
+ }
+ ObjFile *obj = impl->addr_map_.Add();
+ obj->filename = impl->CopyString(filename);
+ obj->start_addr = start_addr;
+ obj->end_addr = end_addr;
+ obj->offset = offset;
+ obj->elf_type = -1; // filled on demand
+ obj->fd = -1; // opened on demand
+ return true;
+}
+
+// This function wraps the Demangle function to provide an interface
+// where the input symbol is demangled in-place.
+// To keep stack consumption low, we would like this function to not
+// get inlined.
+static ABSL_ATTRIBUTE_NOINLINE void DemangleInplace(char *out, int out_size,
+ char *tmp_buf,
+ int tmp_buf_size) {
+ if (Demangle(out, tmp_buf, tmp_buf_size)) {
+ // Demangling succeeded. Copy to out if the space allows.
+ int len = strlen(tmp_buf);
+ if (len + 1 <= out_size) { // +1 for '\0'.
+ SAFE_ASSERT(len < tmp_buf_size);
+ memmove(out, tmp_buf, len + 1);
+ }
+ }
+}
+
+SymbolCacheLine *Symbolizer::GetCacheLine(const void *const pc) {
+ uintptr_t pc0 = reinterpret_cast<uintptr_t>(pc);
+ pc0 >>= 3; // drop the low 3 bits
+
+ // Shuffle bits.
+ pc0 ^= (pc0 >> 6) ^ (pc0 >> 12) ^ (pc0 >> 18);
+ return &symbol_cache_[pc0 % SYMBOL_CACHE_LINES];
+}
+
+void Symbolizer::AgeSymbols(SymbolCacheLine *line) {
+ for (uint32_t &age : line->age) {
+ ++age;
+ }
+}
+
+const char *Symbolizer::FindSymbolInCache(const void *const pc) {
+ if (pc == nullptr) return nullptr;
+
+ SymbolCacheLine *line = GetCacheLine(pc);
+ for (size_t i = 0; i < ABSL_ARRAYSIZE(line->pc); ++i) {
+ if (line->pc[i] == pc) {
+ AgeSymbols(line);
+ line->age[i] = 0;
+ return line->name[i];
+ }
+ }
+ return nullptr;
+}
+
+const char *Symbolizer::InsertSymbolInCache(const void *const pc,
+ const char *name) {
+ SAFE_ASSERT(pc != nullptr);
+
+ SymbolCacheLine *line = GetCacheLine(pc);
+ uint32_t max_age = 0;
+ int oldest_index = -1;
+ for (size_t i = 0; i < ABSL_ARRAYSIZE(line->pc); ++i) {
+ if (line->pc[i] == nullptr) {
+ AgeSymbols(line);
+ line->pc[i] = pc;
+ line->name[i] = CopyString(name);
+ line->age[i] = 0;
+ return line->name[i];
+ }
+ if (line->age[i] >= max_age) {
+ max_age = line->age[i];
+ oldest_index = i;
+ }
+ }
+
+ AgeSymbols(line);
+ ABSL_RAW_CHECK(oldest_index >= 0, "Corrupt cache");
+ base_internal::LowLevelAlloc::Free(line->name[oldest_index]);
+ line->pc[oldest_index] = pc;
+ line->name[oldest_index] = CopyString(name);
+ line->age[oldest_index] = 0;
+ return line->name[oldest_index];
+}
+
+static void MaybeOpenFdFromSelfExe(ObjFile *obj) {
+ if (memcmp(obj->start_addr, ELFMAG, SELFMAG) != 0) {
+ return;
+ }
+ int fd = open("/proc/self/exe", O_RDONLY);
+ if (fd == -1) {
+ return;
+ }
+ // Verify that contents of /proc/self/exe matches in-memory image of
+ // the binary. This can fail if the "deleted" binary is in fact not
+ // the main executable, or for binaries that have the first PT_LOAD
+ // segment smaller than 4K. We do it in four steps so that the
+ // buffer is smaller and we don't consume too much stack space.
+ const char *mem = reinterpret_cast<const char *>(obj->start_addr);
+ for (int i = 0; i < 4; ++i) {
+ char buf[1024];
+ ssize_t n = read(fd, buf, sizeof(buf));
+ if (n != sizeof(buf) || memcmp(buf, mem, sizeof(buf)) != 0) {
+ close(fd);
+ return;
+ }
+ mem += sizeof(buf);
+ }
+ obj->fd = fd;
+}
+
+static bool MaybeInitializeObjFile(ObjFile *obj) {
+ if (obj->fd < 0) {
+ obj->fd = open(obj->filename, O_RDONLY);
+
+ if (obj->fd < 0) {
+ // Getting /proc/self/exe here means that we were hinted.
+ if (strcmp(obj->filename, "/proc/self/exe") == 0) {
+ // /proc/self/exe may be inaccessible (due to setuid, etc.), so try
+ // accessing the binary via argv0.
+ if (argv0_value != nullptr) {
+ obj->fd = open(argv0_value, O_RDONLY);
+ }
+ } else {
+ MaybeOpenFdFromSelfExe(obj);
+ }
+ }
+
+ if (obj->fd < 0) {
+ ABSL_RAW_LOG(WARNING, "%s: open failed: errno=%d", obj->filename, errno);
+ return false;
+ }
+ obj->elf_type = FileGetElfType(obj->fd);
+ if (obj->elf_type < 0) {
+ ABSL_RAW_LOG(WARNING, "%s: wrong elf type: %d", obj->filename,
+ obj->elf_type);
+ return false;
+ }
+
+ if (!ReadFromOffsetExact(obj->fd, &obj->elf_header, sizeof(obj->elf_header),
+ 0)) {
+ ABSL_RAW_LOG(WARNING, "%s: failed to read elf header", obj->filename);
+ return false;
+ }
+ const int phnum = obj->elf_header.e_phnum;
+ const int phentsize = obj->elf_header.e_phentsize;
+ size_t phoff = obj->elf_header.e_phoff;
+ size_t num_executable_load_segments = 0;
+ for (int j = 0; j < phnum; j++) {
+ ElfW(Phdr) phdr;
+ if (!ReadFromOffsetExact(obj->fd, &phdr, sizeof(phdr), phoff)) {
+ ABSL_RAW_LOG(WARNING, "%s: failed to read program header %d",
+ obj->filename, j);
+ return false;
+ }
+ phoff += phentsize;
+ constexpr int rx = PF_X | PF_R;
+ if (phdr.p_type != PT_LOAD || (phdr.p_flags & rx) != rx) {
+ // Not a LOAD segment, or not executable code.
+ continue;
+ }
+ if (num_executable_load_segments < obj->phdr.size()) {
+ memcpy(&obj->phdr[num_executable_load_segments++], &phdr, sizeof(phdr));
+ } else {
+ ABSL_RAW_LOG(WARNING, "%s: too many executable LOAD segments",
+ obj->filename);
+ break;
+ }
+ }
+ if (num_executable_load_segments == 0) {
+ // This object has no "r-x" LOAD segments. That's unexpected.
+ ABSL_RAW_LOG(WARNING, "%s: no executable LOAD segments", obj->filename);
+ return false;
+ }
+ }
+ return true;
+}
+
+// The implementation of our symbolization routine. If it
+// successfully finds the symbol containing "pc" and obtains the
+// symbol name, returns pointer to that symbol. Otherwise, returns nullptr.
+// If any symbol decorators have been installed via InstallSymbolDecorator(),
+// they are called here as well.
+// To keep stack consumption low, we would like this function to not
+// get inlined.
+const char *Symbolizer::GetSymbol(const void *const pc) {
+ const char *entry = FindSymbolInCache(pc);
+ if (entry != nullptr) {
+ return entry;
+ }
+ symbol_buf_[0] = '\0';
+
+ ObjFile *const obj = FindObjFile(pc, 1);
+ ptrdiff_t relocation = 0;
+ int fd = -1;
+ if (obj != nullptr) {
+ if (MaybeInitializeObjFile(obj)) {
+ const size_t start_addr = reinterpret_cast<size_t>(obj->start_addr);
+ if (obj->elf_type == ET_DYN && start_addr >= obj->offset) {
+ // This object was relocated.
+ //
+ // For obj->offset > 0, adjust the relocation since a mapping at offset
+ // X in the file will have a start address of [true relocation]+X.
+ relocation = start_addr - obj->offset;
+
+ // Note: some binaries have multiple "rx" LOAD segments. We must
+ // find the right one.
+ ElfW(Phdr) *phdr = nullptr;
+ for (size_t j = 0; j < obj->phdr.size(); j++) {
+ ElfW(Phdr) &p = obj->phdr[j];
+ if (p.p_type != PT_LOAD) {
+ // We only expect PT_LOADs. This must be PT_NULL that we didn't
+ // write over (i.e. we exhausted all interesting PT_LOADs).
+ ABSL_RAW_CHECK(p.p_type == PT_NULL, "unexpected p_type");
+ break;
+ }
+ if (pc < reinterpret_cast<void *>(start_addr + p.p_memsz)) {
+ phdr = &p;
+ break;
+ }
+ }
+ if (phdr == nullptr) {
+ // That's unexpected. Hope for the best.
+ ABSL_RAW_LOG(
+ WARNING,
+ "%s: unable to find LOAD segment for pc: %p, start_addr: %zx",
+ obj->filename, pc, start_addr);
+ } else {
+ // Adjust relocation in case phdr.p_vaddr != 0.
+ // This happens for binaries linked with `lld --rosegment`, and for
+ // binaries linked with BFD `ld -z separate-code`.
+ relocation -= phdr->p_vaddr - phdr->p_offset;
+ }
+ }
+
+ fd = obj->fd;
+ if (GetSymbolFromObjectFile(*obj, pc, relocation, symbol_buf_,
+ sizeof(symbol_buf_), tmp_buf_,
+ sizeof(tmp_buf_)) == SYMBOL_FOUND) {
+ // Only try to demangle the symbol name if it fit into symbol_buf_.
+ DemangleInplace(symbol_buf_, sizeof(symbol_buf_), tmp_buf_,
+ sizeof(tmp_buf_));
+ }
+ }
+ } else {
+#if ABSL_HAVE_VDSO_SUPPORT
+ VDSOSupport vdso;
+ if (vdso.IsPresent()) {
+ VDSOSupport::SymbolInfo symbol_info;
+ if (vdso.LookupSymbolByAddress(pc, &symbol_info)) {
+ // All VDSO symbols are known to be short.
+ size_t len = strlen(symbol_info.name);
+ ABSL_RAW_CHECK(len + 1 < sizeof(symbol_buf_),
+ "VDSO symbol unexpectedly long");
+ memcpy(symbol_buf_, symbol_info.name, len + 1);
+ }
+ }
+#endif
+ }
+
+ if (g_decorators_mu.TryLock()) {
+ if (g_num_decorators > 0) {
+ SymbolDecoratorArgs decorator_args = {
+ pc, relocation, fd, symbol_buf_, sizeof(symbol_buf_),
+ tmp_buf_, sizeof(tmp_buf_), nullptr};
+ for (int i = 0; i < g_num_decorators; ++i) {
+ decorator_args.arg = g_decorators[i].arg;
+ g_decorators[i].fn(&decorator_args);
+ }
+ }
+ g_decorators_mu.Unlock();
+ }
+ if (symbol_buf_[0] == '\0') {
+ return nullptr;
+ }
+ symbol_buf_[sizeof(symbol_buf_) - 1] = '\0'; // Paranoia.
+ return InsertSymbolInCache(pc, symbol_buf_);
+}
+
+bool RemoveAllSymbolDecorators(void) {
+ if (!g_decorators_mu.TryLock()) {
+ // Someone else is using decorators. Get out.
+ return false;
+ }
+ g_num_decorators = 0;
+ g_decorators_mu.Unlock();
+ return true;
+}
+
+bool RemoveSymbolDecorator(int ticket) {
+ if (!g_decorators_mu.TryLock()) {
+ // Someone else is using decorators. Get out.
+ return false;
+ }
+ for (int i = 0; i < g_num_decorators; ++i) {
+ if (g_decorators[i].ticket == ticket) {
+ while (i < g_num_decorators - 1) {
+ g_decorators[i] = g_decorators[i + 1];
+ ++i;
+ }
+ g_num_decorators = i;
+ break;
+ }
+ }
+ g_decorators_mu.Unlock();
+ return true; // Decorator is known to be removed.
+}
+
+int InstallSymbolDecorator(SymbolDecorator decorator, void *arg) {
+ static int ticket = 0;
+
+ if (!g_decorators_mu.TryLock()) {
+ // Someone else is using decorators. Get out.
+ return -2;
+ }
+ int ret = ticket;
+ if (g_num_decorators >= kMaxDecorators) {
+ ret = -1;
+ } else {
+ g_decorators[g_num_decorators] = {decorator, arg, ticket++};
+ ++g_num_decorators;
+ }
+ g_decorators_mu.Unlock();
+ return ret;
+}
+
+bool RegisterFileMappingHint(const void *start, const void *end, uint64_t offset,
+ const char *filename) {
+ SAFE_ASSERT(start <= end);
+ SAFE_ASSERT(filename != nullptr);
+
+ InitSigSafeArena();
+
+ if (!g_file_mapping_mu.TryLock()) {
+ return false;
+ }
+
+ bool ret = true;
+ if (g_num_file_mapping_hints >= kMaxFileMappingHints) {
+ ret = false;
+ } else {
+ // TODO(ckennelly): Move this into a string copy routine.
+ int len = strlen(filename);
+ char *dst = static_cast<char *>(
+ base_internal::LowLevelAlloc::AllocWithArena(len + 1, SigSafeArena()));
+ ABSL_RAW_CHECK(dst != nullptr, "out of memory");
+ memcpy(dst, filename, len + 1);
+
+ auto &hint = g_file_mapping_hints[g_num_file_mapping_hints++];
+ hint.start = start;
+ hint.end = end;
+ hint.offset = offset;
+ hint.filename = dst;
+ }
+
+ g_file_mapping_mu.Unlock();
+ return ret;
+}
+
+bool GetFileMappingHint(const void **start, const void **end, uint64_t *offset,
+ const char **filename) {
+ if (!g_file_mapping_mu.TryLock()) {
+ return false;
+ }
+ bool found = false;
+ for (int i = 0; i < g_num_file_mapping_hints; i++) {
+ if (g_file_mapping_hints[i].start <= *start &&
+ *end <= g_file_mapping_hints[i].end) {
+ // We assume that the start_address for the mapping is the base
+ // address of the ELF section, but when [start_address,end_address) is
+ // not strictly equal to [hint.start, hint.end), that assumption is
+ // invalid.
+ //
+ // This uses the hint's start address (even though hint.start is not
+ // necessarily equal to start_address) to ensure the correct
+ // relocation is computed later.
+ *start = g_file_mapping_hints[i].start;
+ *end = g_file_mapping_hints[i].end;
+ *offset = g_file_mapping_hints[i].offset;
+ *filename = g_file_mapping_hints[i].filename;
+ found = true;
+ break;
+ }
+ }
+ g_file_mapping_mu.Unlock();
+ return found;
+}
+
+} // namespace debugging_internal
+
+bool Symbolize(const void *pc, char *out, int out_size) {
+ // Symbolization is very slow under tsan.
+ ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN();
+ SAFE_ASSERT(out_size >= 0);
+ debugging_internal::Symbolizer *s = debugging_internal::AllocateSymbolizer();
+ const char *name = s->GetSymbol(pc);
+ bool ok = false;
+ if (name != nullptr && out_size > 0) {
+ strncpy(out, name, out_size);
+ ok = true;
+ if (out[out_size - 1] != '\0') {
+ // strncpy() does not '\0' terminate when it truncates. Do so, with
+ // trailing ellipsis.
+ static constexpr char kEllipsis[] = "...";
+ int ellipsis_size =
+ std::min(implicit_cast<int>(strlen(kEllipsis)), out_size - 1);
+ memcpy(out + out_size - ellipsis_size - 1, kEllipsis, ellipsis_size);
+ out[out_size - 1] = '\0';
+ }
+ }
+ debugging_internal::FreeSymbolizer(s);
+ ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_END();
+ return ok;
+}
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+extern "C" bool YAbslInternalGetFileMappingHint(const void **start,
+ const void **end, uint64_t *offset,
+ const char **filename) {
+ return y_absl::debugging_internal::GetFileMappingHint(start, end, offset,
+ filename);
+}
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/symbolize_emscripten.inc b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/symbolize_emscripten.inc
new file mode 100644
index 00000000000..6ddb5b4a7fc
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/symbolize_emscripten.inc
@@ -0,0 +1,72 @@
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <cxxabi.h>
+#error #include <emscripten.h>
+
+#include <algorithm>
+#include <cstring>
+
+#include "y_absl/base/internal/raw_logging.h"
+#include "y_absl/debugging/internal/demangle.h"
+#include "y_absl/strings/numbers.h"
+#include "y_absl/strings/str_cat.h"
+#include "y_absl/strings/string_view.h"
+
+extern "C" {
+const char* emscripten_pc_get_function(const void* pc);
+}
+
+// clang-format off
+EM_JS(bool, HaveOffsetConverter, (),
+ { return typeof wasmOffsetConverter !== 'undefined'; });
+// clang-format on
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+void InitializeSymbolizer(const char*) {
+ if (!HaveOffsetConverter()) {
+ ABSL_RAW_LOG(INFO,
+ "Symbolization unavailable. Rebuild with -sWASM=1 "
+ "and -sUSE_OFFSET_CONVERTER=1.");
+ }
+}
+
+bool Symbolize(const void* pc, char* out, int out_size) {
+ // Check if we have the offset converter necessary for pc_get_function.
+ // Without it, the program will abort().
+ if (!HaveOffsetConverter()) {
+ return false;
+ }
+ const char* func_name = emscripten_pc_get_function(pc);
+ if (func_name == nullptr) {
+ return false;
+ }
+
+ strncpy(out, func_name, out_size);
+
+ if (out[out_size - 1] != '\0') {
+ // strncpy() does not '\0' terminate when it truncates.
+ static constexpr char kEllipsis[] = "...";
+ int ellipsis_size = std::min<int>(sizeof(kEllipsis) - 1, out_size - 1);
+ memcpy(out + out_size - ellipsis_size - 1, kEllipsis, ellipsis_size);
+ out[out_size - 1] = '\0';
+ }
+
+ return true;
+}
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/symbolize_unimplemented.inc b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/symbolize_unimplemented.inc
new file mode 100644
index 00000000000..4e55ed122f8
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/symbolize_unimplemented.inc
@@ -0,0 +1,40 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <cstdint>
+
+#include "y_absl/base/internal/raw_logging.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+namespace debugging_internal {
+
+int InstallSymbolDecorator(SymbolDecorator, void*) { return -1; }
+bool RemoveSymbolDecorator(int) { return false; }
+bool RemoveAllSymbolDecorators(void) { return false; }
+bool RegisterFileMappingHint(const void *, const void *, uint64_t, const char *) {
+ return false;
+}
+bool GetFileMappingHint(const void **, const void **, uint64_t *, const char **) {
+ return false;
+}
+
+} // namespace debugging_internal
+
+void InitializeSymbolizer(const char*) {}
+bool Symbolize(const void *, char *, int) { return false; }
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/symbolize_win32.inc b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/symbolize_win32.inc
new file mode 100644
index 00000000000..81e0473f88c
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/symbolize_win32.inc
@@ -0,0 +1,81 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// See "Retrieving Symbol Information by Address":
+// https://msdn.microsoft.com/en-us/library/windows/desktop/ms680578(v=vs.85).aspx
+
+#include <windows.h>
+
+// MSVC header dbghelp.h has a warning for an ignored typedef.
+#pragma warning(push)
+#pragma warning(disable:4091)
+#include <dbghelp.h>
+#pragma warning(pop)
+
+#pragma comment(lib, "dbghelp.lib")
+
+#include <algorithm>
+#include <cstring>
+
+#include "y_absl/base/internal/raw_logging.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+static HANDLE process = NULL;
+
+void InitializeSymbolizer(const char*) {
+ if (process != nullptr) {
+ return;
+ }
+ process = GetCurrentProcess();
+
+ // Symbols are not loaded until a reference is made requiring the
+ // symbols be loaded. This is the fastest, most efficient way to use
+ // the symbol handler.
+ SymSetOptions(SYMOPT_DEFERRED_LOADS | SYMOPT_UNDNAME);
+ if (!SymInitialize(process, nullptr, true)) {
+ // GetLastError() returns a Win32 DWORD, but we assign to
+ // unsigned long long to simplify the ABSL_RAW_LOG case below. The uniform
+ // initialization guarantees this is not a narrowing conversion.
+ const unsigned long long error{GetLastError()}; // NOLINT(runtime/int)
+ ABSL_RAW_LOG(FATAL, "SymInitialize() failed: %llu", error);
+ }
+}
+
+bool Symbolize(const void* pc, char* out, int out_size) {
+ if (out_size <= 0) {
+ return false;
+ }
+ alignas(SYMBOL_INFO) char buf[sizeof(SYMBOL_INFO) + MAX_SYM_NAME];
+ SYMBOL_INFO* symbol = reinterpret_cast<SYMBOL_INFO*>(buf);
+ symbol->SizeOfStruct = sizeof(SYMBOL_INFO);
+ symbol->MaxNameLen = MAX_SYM_NAME;
+ if (!SymFromAddr(process, reinterpret_cast<DWORD64>(pc), nullptr, symbol)) {
+ return false;
+ }
+ strncpy(out, symbol->Name, out_size);
+ if (out[out_size - 1] != '\0') {
+ // strncpy() does not '\0' terminate when it truncates.
+ static constexpr char kEllipsis[] = "...";
+ int ellipsis_size =
+ std::min<int>(sizeof(kEllipsis) - 1, out_size - 1);
+ memcpy(out + out_size - ellipsis_size - 1, kEllipsis, ellipsis_size);
+ out[out_size - 1] = '\0';
+ }
+ return true;
+}
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/ya.make b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/ya.make
new file mode 100644
index 00000000000..d768f195c25
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/ya.make
@@ -0,0 +1,31 @@
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
+OWNER(
+ somov
+ g:cpp-contrib
+)
+
+LICENSE(Apache-2.0)
+
+LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
+
+PEERDIR(
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/log_severity
+)
+
+ADDINCL(
+ GLOBAL contrib/restricted/abseil-cpp-tstring
+)
+
+NO_COMPILER_WARNINGS()
+
+SRCS(
+ internal/address_is_readable.cc
+ internal/elf_mem_image.cc
+ internal/vdso_support.cc
+)
+
+END()
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/demangle/ya.make b/contrib/restricted/abseil-cpp-tstring/y_absl/demangle/ya.make
new file mode 100644
index 00000000000..a1abc9f6154
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/demangle/ya.make
@@ -0,0 +1,33 @@
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
+WITHOUT_LICENSE_TEXTS()
+
+OWNER(
+ somov
+ g:cpp-contrib
+)
+
+LICENSE(Apache-2.0)
+
+PEERDIR(
+ contrib/restricted/abseil-cpp-tstring/y_absl/base
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_wait
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/log_severity
+)
+
+ADDINCL(
+ GLOBAL contrib/restricted/abseil-cpp-tstring
+)
+
+NO_COMPILER_WARNINGS()
+
+SRCDIR(contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal)
+
+SRCS(
+ demangle.cc
+)
+
+END()
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/functional/.yandex_meta/licenses.list.txt b/contrib/restricted/abseil-cpp-tstring/y_absl/functional/.yandex_meta/licenses.list.txt
new file mode 100644
index 00000000000..7b6169efacd
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/functional/.yandex_meta/licenses.list.txt
@@ -0,0 +1,20 @@
+====================Apache-2.0====================
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+
+====================COPYRIGHT====================
+// Copyright 2018 The Abseil Authors.
+
+
+====================COPYRIGHT====================
+// Copyright 2019 The Abseil Authors.
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/functional/bind_front.h b/contrib/restricted/abseil-cpp-tstring/y_absl/functional/bind_front.h
new file mode 100644
index 00000000000..457b1fa7342
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/functional/bind_front.h
@@ -0,0 +1,184 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: bind_front.h
+// -----------------------------------------------------------------------------
+//
+// `y_absl::bind_front()` returns a functor by binding a number of arguments to
+// the front of a provided (usually more generic) functor. Unlike `std::bind`,
+// it does not require the use of argument placeholders. The simpler syntax of
+// `y_absl::bind_front()` allows you to avoid known misuses with `std::bind()`.
+//
+// `y_absl::bind_front()` is meant as a drop-in replacement for C++20's upcoming
+// `std::bind_front()`, which similarly resolves these issues with
+// `std::bind()`. Both `bind_front()` alternatives, unlike `std::bind()`, allow
+// partial function application. (See
+// https://en.wikipedia.org/wiki/Partial_application).
+
+#ifndef ABSL_FUNCTIONAL_BIND_FRONT_H_
+#define ABSL_FUNCTIONAL_BIND_FRONT_H_
+
+#include "y_absl/functional/internal/front_binder.h"
+#include "y_absl/utility/utility.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+// bind_front()
+//
+// Binds the first N arguments of an invocable object and stores them by value.
+//
+// Like `std::bind()`, `y_absl::bind_front()` is implicitly convertible to
+// `std::function`. In particular, it may be used as a simpler replacement for
+// `std::bind()` in most cases, as it does not require placeholders to be
+// specified. More importantly, it provides more reliable correctness guarantees
+// than `std::bind()`; while `std::bind()` will silently ignore passing more
+// parameters than expected, for example, `y_absl::bind_front()` will report such
+// mis-uses as errors.
+//
+// y_absl::bind_front(a...) can be seen as storing the results of
+// std::make_tuple(a...).
+//
+// Example: Binding a free function.
+//
+// int Minus(int a, int b) { return a - b; }
+//
+// assert(y_absl::bind_front(Minus)(3, 2) == 3 - 2);
+// assert(y_absl::bind_front(Minus, 3)(2) == 3 - 2);
+// assert(y_absl::bind_front(Minus, 3, 2)() == 3 - 2);
+//
+// Example: Binding a member function.
+//
+// struct Math {
+// int Double(int a) const { return 2 * a; }
+// };
+//
+// Math math;
+//
+// assert(y_absl::bind_front(&Math::Double)(&math, 3) == 2 * 3);
+// // Stores a pointer to math inside the functor.
+// assert(y_absl::bind_front(&Math::Double, &math)(3) == 2 * 3);
+// // Stores a copy of math inside the functor.
+// assert(y_absl::bind_front(&Math::Double, math)(3) == 2 * 3);
+// // Stores std::unique_ptr<Math> inside the functor.
+// assert(y_absl::bind_front(&Math::Double,
+// std::unique_ptr<Math>(new Math))(3) == 2 * 3);
+//
+// Example: Using `y_absl::bind_front()`, instead of `std::bind()`, with
+// `std::function`.
+//
+// class FileReader {
+// public:
+// void ReadFileAsync(const TString& filename, TString* content,
+// const std::function<void()>& done) {
+// // Calls Executor::Schedule(std::function<void()>).
+// Executor::DefaultExecutor()->Schedule(
+// y_absl::bind_front(&FileReader::BlockingRead, this,
+// filename, content, done));
+// }
+//
+// private:
+// void BlockingRead(const TString& filename, TString* content,
+// const std::function<void()>& done) {
+// CHECK_OK(file::GetContents(filename, content, {}));
+// done();
+// }
+// };
+//
+// `y_absl::bind_front()` stores bound arguments explicitly using the type passed
+// rather than implicitly based on the type accepted by its functor.
+//
+// Example: Binding arguments explicitly.
+//
+// void LogStringView(y_absl::string_view sv) {
+// LOG(INFO) << sv;
+// }
+//
+// Executor* e = Executor::DefaultExecutor();
+// TString s = "hello";
+// y_absl::string_view sv = s;
+//
+// // y_absl::bind_front(LogStringView, arg) makes a copy of arg and stores it.
+// e->Schedule(y_absl::bind_front(LogStringView, sv)); // ERROR: dangling
+// // string_view.
+//
+// e->Schedule(y_absl::bind_front(LogStringView, s)); // OK: stores a copy of
+// // s.
+//
+// To store some of the arguments passed to `y_absl::bind_front()` by reference,
+// use std::ref()` and `std::cref()`.
+//
+// Example: Storing some of the bound arguments by reference.
+//
+// class Service {
+// public:
+// void Serve(const Request& req, std::function<void()>* done) {
+// // The request protocol buffer won't be deleted until done is called.
+// // It's safe to store a reference to it inside the functor.
+// Executor::DefaultExecutor()->Schedule(
+// y_absl::bind_front(&Service::BlockingServe, this, std::cref(req),
+// done));
+// }
+//
+// private:
+// void BlockingServe(const Request& req, std::function<void()>* done);
+// };
+//
+// Example: Storing bound arguments by reference.
+//
+// void Print(const TString& a, const TString& b) {
+// std::cerr << a << b;
+// }
+//
+// TString hi = "Hello, ";
+// std::vector<TString> names = {"Chuk", "Gek"};
+// // Doesn't copy hi.
+// for_each(names.begin(), names.end(),
+// y_absl::bind_front(Print, std::ref(hi)));
+//
+// // DO NOT DO THIS: the functor may outlive "hi", resulting in
+// // dangling references.
+// foo->DoInFuture(y_absl::bind_front(Print, std::ref(hi), "Guest")); // BAD!
+// auto f = y_absl::bind_front(Print, std::ref(hi), "Guest"); // BAD!
+//
+// Example: Storing reference-like types.
+//
+// void Print(y_absl::string_view a, const TString& b) {
+// std::cerr << a << b;
+// }
+//
+// TString hi = "Hello, ";
+// // Copies "hi".
+// y_absl::bind_front(Print, hi)("Chuk");
+//
+// // Compile error: std::reference_wrapper<const string> is not implicitly
+// // convertible to string_view.
+// // y_absl::bind_front(Print, std::cref(hi))("Chuk");
+//
+// // Doesn't copy "hi".
+// y_absl::bind_front(Print, y_absl::string_view(hi))("Chuk");
+//
+template <class F, class... BoundArgs>
+constexpr functional_internal::bind_front_t<F, BoundArgs...> bind_front(
+ F&& func, BoundArgs&&... args) {
+ return functional_internal::bind_front_t<F, BoundArgs...>(
+ y_absl::in_place, y_absl::forward<F>(func),
+ y_absl::forward<BoundArgs>(args)...);
+}
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_FUNCTIONAL_BIND_FRONT_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/functional/function_ref.h b/contrib/restricted/abseil-cpp-tstring/y_absl/functional/function_ref.h
new file mode 100644
index 00000000000..6ae4c63c9e0
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/functional/function_ref.h
@@ -0,0 +1,142 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: function_ref.h
+// -----------------------------------------------------------------------------
+//
+// This header file defines the `y_absl::FunctionRef` type for holding a
+// non-owning reference to an object of any invocable type. This function
+// reference is typically most useful as a type-erased argument type for
+// accepting function types that neither take ownership nor copy the type; using
+// the reference type in this case avoids a copy and an allocation. Best
+// practices of other non-owning reference-like objects (such as
+// `y_absl::string_view`) apply here.
+//
+// An `y_absl::FunctionRef` is similar in usage to a `std::function` but has the
+// following differences:
+//
+// * It doesn't own the underlying object.
+// * It doesn't have a null or empty state.
+// * It never performs deep copies or allocations.
+// * It's much faster and cheaper to construct.
+// * It's trivially copyable and destructable.
+//
+// Generally, `y_absl::FunctionRef` should not be used as a return value, data
+// member, or to initialize a `std::function`. Such usages will often lead to
+// problematic lifetime issues. Once you convert something to an
+// `y_absl::FunctionRef` you cannot make a deep copy later.
+//
+// This class is suitable for use wherever a "const std::function<>&"
+// would be used without making a copy. ForEach functions and other versions of
+// the visitor pattern are a good example of when this class should be used.
+//
+// This class is trivial to copy and should be passed by value.
+#ifndef ABSL_FUNCTIONAL_FUNCTION_REF_H_
+#define ABSL_FUNCTIONAL_FUNCTION_REF_H_
+
+#include <cassert>
+#include <functional>
+#include <type_traits>
+
+#include "y_absl/base/attributes.h"
+#include "y_absl/functional/internal/function_ref.h"
+#include "y_absl/meta/type_traits.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+// FunctionRef
+//
+// Dummy class declaration to allow the partial specialization based on function
+// types below.
+template <typename T>
+class FunctionRef;
+
+// FunctionRef
+//
+// An `y_absl::FunctionRef` is a lightweight wrapper to any invokable object with
+// a compatible signature. Generally, an `y_absl::FunctionRef` should only be used
+// as an argument type and should be preferred as an argument over a const
+// reference to a `std::function`.
+//
+// Example:
+//
+// // The following function takes a function callback by const reference
+// bool Visitor(const std::function<void(my_proto&,
+// y_absl::string_view)>& callback);
+//
+// // Assuming that the function is not stored or otherwise copied, it can be
+// // replaced by an `y_absl::FunctionRef`:
+// bool Visitor(y_absl::FunctionRef<void(my_proto&, y_absl::string_view)>
+// callback);
+//
+// Note: the assignment operator within an `y_absl::FunctionRef` is intentionally
+// deleted to prevent misuse; because the `y_absl::FunctionRef` does not own the
+// underlying type, assignment likely indicates misuse.
+template <typename R, typename... Args>
+class FunctionRef<R(Args...)> {
+ private:
+ // Used to disable constructors for objects that are not compatible with the
+ // signature of this FunctionRef.
+ template <typename F,
+ typename FR = y_absl::base_internal::invoke_result_t<F, Args&&...>>
+ using EnableIfCompatible =
+ typename std::enable_if<std::is_void<R>::value ||
+ std::is_convertible<FR, R>::value>::type;
+
+ public:
+ // Constructs a FunctionRef from any invokable type.
+ template <typename F, typename = EnableIfCompatible<const F&>>
+ // NOLINTNEXTLINE(runtime/explicit)
+ FunctionRef(const F& f ABSL_ATTRIBUTE_LIFETIME_BOUND)
+ : invoker_(&y_absl::functional_internal::InvokeObject<F, R, Args...>) {
+ y_absl::functional_internal::AssertNonNull(f);
+ ptr_.obj = &f;
+ }
+
+ // Overload for function pointers. This eliminates a level of indirection that
+ // would happen if the above overload was used (it lets us store the pointer
+ // instead of a pointer to a pointer).
+ //
+ // This overload is also used for references to functions, since references to
+ // functions can decay to function pointers implicitly.
+ template <
+ typename F, typename = EnableIfCompatible<F*>,
+ y_absl::functional_internal::EnableIf<y_absl::is_function<F>::value> = 0>
+ FunctionRef(F* f) // NOLINT(runtime/explicit)
+ : invoker_(&y_absl::functional_internal::InvokeFunction<F*, R, Args...>) {
+ assert(f != nullptr);
+ ptr_.fun = reinterpret_cast<decltype(ptr_.fun)>(f);
+ }
+
+ // To help prevent subtle lifetime bugs, FunctionRef is not assignable.
+ // Typically, it should only be used as an argument type.
+ FunctionRef& operator=(const FunctionRef& rhs) = delete;
+ FunctionRef(const FunctionRef& rhs) = default;
+
+ // Call the underlying object.
+ R operator()(Args... args) const {
+ return invoker_(ptr_, std::forward<Args>(args)...);
+ }
+
+ private:
+ y_absl::functional_internal::VoidPtr ptr_;
+ y_absl::functional_internal::Invoker<R, Args...> invoker_;
+};
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_FUNCTIONAL_FUNCTION_REF_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/functional/internal/front_binder.h b/contrib/restricted/abseil-cpp-tstring/y_absl/functional/internal/front_binder.h
new file mode 100644
index 00000000000..b2fff442741
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/functional/internal/front_binder.h
@@ -0,0 +1,95 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Implementation details for `y_absl::bind_front()`.
+
+#ifndef ABSL_FUNCTIONAL_INTERNAL_FRONT_BINDER_H_
+#define ABSL_FUNCTIONAL_INTERNAL_FRONT_BINDER_H_
+
+#include <cstddef>
+#include <type_traits>
+#include <utility>
+
+#include "y_absl/base/internal/invoke.h"
+#include "y_absl/container/internal/compressed_tuple.h"
+#include "y_absl/meta/type_traits.h"
+#include "y_absl/utility/utility.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace functional_internal {
+
+// Invoke the method, expanding the tuple of bound arguments.
+template <class R, class Tuple, size_t... Idx, class... Args>
+R Apply(Tuple&& bound, y_absl::index_sequence<Idx...>, Args&&... free) {
+ return base_internal::invoke(
+ y_absl::forward<Tuple>(bound).template get<Idx>()...,
+ y_absl::forward<Args>(free)...);
+}
+
+template <class F, class... BoundArgs>
+class FrontBinder {
+ using BoundArgsT = y_absl::container_internal::CompressedTuple<F, BoundArgs...>;
+ using Idx = y_absl::make_index_sequence<sizeof...(BoundArgs) + 1>;
+
+ BoundArgsT bound_args_;
+
+ public:
+ template <class... Ts>
+ constexpr explicit FrontBinder(y_absl::in_place_t, Ts&&... ts)
+ : bound_args_(y_absl::forward<Ts>(ts)...) {}
+
+ template <class... FreeArgs, class R = base_internal::invoke_result_t<
+ F&, BoundArgs&..., FreeArgs&&...>>
+ R operator()(FreeArgs&&... free_args) & {
+ return functional_internal::Apply<R>(bound_args_, Idx(),
+ y_absl::forward<FreeArgs>(free_args)...);
+ }
+
+ template <class... FreeArgs,
+ class R = base_internal::invoke_result_t<
+ const F&, const BoundArgs&..., FreeArgs&&...>>
+ R operator()(FreeArgs&&... free_args) const& {
+ return functional_internal::Apply<R>(bound_args_, Idx(),
+ y_absl::forward<FreeArgs>(free_args)...);
+ }
+
+ template <class... FreeArgs, class R = base_internal::invoke_result_t<
+ F&&, BoundArgs&&..., FreeArgs&&...>>
+ R operator()(FreeArgs&&... free_args) && {
+ // This overload is called when *this is an rvalue. If some of the bound
+ // arguments are stored by value or rvalue reference, we move them.
+ return functional_internal::Apply<R>(y_absl::move(bound_args_), Idx(),
+ y_absl::forward<FreeArgs>(free_args)...);
+ }
+
+ template <class... FreeArgs,
+ class R = base_internal::invoke_result_t<
+ const F&&, const BoundArgs&&..., FreeArgs&&...>>
+ R operator()(FreeArgs&&... free_args) const&& {
+ // This overload is called when *this is an rvalue. If some of the bound
+ // arguments are stored by value or rvalue reference, we move them.
+ return functional_internal::Apply<R>(y_absl::move(bound_args_), Idx(),
+ y_absl::forward<FreeArgs>(free_args)...);
+ }
+};
+
+template <class F, class... BoundArgs>
+using bind_front_t = FrontBinder<decay_t<F>, y_absl::decay_t<BoundArgs>...>;
+
+} // namespace functional_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_FUNCTIONAL_INTERNAL_FRONT_BINDER_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/functional/internal/function_ref.h b/contrib/restricted/abseil-cpp-tstring/y_absl/functional/internal/function_ref.h
new file mode 100644
index 00000000000..1ba81ee96ab
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/functional/internal/function_ref.h
@@ -0,0 +1,106 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_FUNCTIONAL_INTERNAL_FUNCTION_REF_H_
+#define ABSL_FUNCTIONAL_INTERNAL_FUNCTION_REF_H_
+
+#include <cassert>
+#include <functional>
+#include <type_traits>
+
+#include "y_absl/base/internal/invoke.h"
+#include "y_absl/meta/type_traits.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace functional_internal {
+
+// Like a void* that can handle function pointers as well. The standard does not
+// allow function pointers to round-trip through void*, but void(*)() is fine.
+//
+// Note: It's important that this class remains trivial and is the same size as
+// a pointer, since this allows the compiler to perform tail-call optimizations
+// when the underlying function is a callable object with a matching signature.
+union VoidPtr {
+ const void* obj;
+ void (*fun)();
+};
+
+// Chooses the best type for passing T as an argument.
+// Attempt to be close to SystemV AMD64 ABI. Objects with trivial copy ctor are
+// passed by value.
+template <typename T>
+constexpr bool PassByValue() {
+ return !std::is_lvalue_reference<T>::value &&
+ y_absl::is_trivially_copy_constructible<T>::value &&
+ y_absl::is_trivially_copy_assignable<
+ typename std::remove_cv<T>::type>::value &&
+ std::is_trivially_destructible<T>::value &&
+ sizeof(T) <= 2 * sizeof(void*);
+}
+
+template <typename T>
+struct ForwardT : std::conditional<PassByValue<T>(), T, T&&> {};
+
+// An Invoker takes a pointer to the type-erased invokable object, followed by
+// the arguments that the invokable object expects.
+//
+// Note: The order of arguments here is an optimization, since member functions
+// have an implicit "this" pointer as their first argument, putting VoidPtr
+// first allows the compiler to perform tail-call optimization in many cases.
+template <typename R, typename... Args>
+using Invoker = R (*)(VoidPtr, typename ForwardT<Args>::type...);
+
+//
+// InvokeObject and InvokeFunction provide static "Invoke" functions that can be
+// used as Invokers for objects or functions respectively.
+//
+// static_cast<R> handles the case the return type is void.
+template <typename Obj, typename R, typename... Args>
+R InvokeObject(VoidPtr ptr, typename ForwardT<Args>::type... args) {
+ auto o = static_cast<const Obj*>(ptr.obj);
+ return static_cast<R>(
+ y_absl::base_internal::invoke(*o, std::forward<Args>(args)...));
+}
+
+template <typename Fun, typename R, typename... Args>
+R InvokeFunction(VoidPtr ptr, typename ForwardT<Args>::type... args) {
+ auto f = reinterpret_cast<Fun>(ptr.fun);
+ return static_cast<R>(
+ y_absl::base_internal::invoke(f, std::forward<Args>(args)...));
+}
+
+template <typename Sig>
+void AssertNonNull(const std::function<Sig>& f) {
+ assert(f != nullptr);
+ (void)f;
+}
+
+template <typename F>
+void AssertNonNull(const F&) {}
+
+template <typename F, typename C>
+void AssertNonNull(F C::*f) {
+ assert(f != nullptr);
+ (void)f;
+}
+
+template <bool C>
+using EnableIf = typename ::std::enable_if<C, int>::type;
+
+} // namespace functional_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_FUNCTIONAL_INTERNAL_FUNCTION_REF_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/functional/ya.make b/contrib/restricted/abseil-cpp-tstring/y_absl/functional/ya.make
new file mode 100644
index 00000000000..b5ead458565
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/functional/ya.make
@@ -0,0 +1,14 @@
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
+OWNER(
+ somov
+ g:cpp-contrib
+)
+
+LICENSE(Apache-2.0)
+
+LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
+
+END()
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/hash/.yandex_meta/licenses.list.txt b/contrib/restricted/abseil-cpp-tstring/y_absl/hash/.yandex_meta/licenses.list.txt
new file mode 100644
index 00000000000..6e983121940
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/hash/.yandex_meta/licenses.list.txt
@@ -0,0 +1,16 @@
+====================Apache-2.0====================
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+
+====================COPYRIGHT====================
+// Copyright 2018 The Abseil Authors.
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/hash/hash.h b/contrib/restricted/abseil-cpp-tstring/y_absl/hash/hash.h
new file mode 100644
index 00000000000..1e93ee96d90
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/hash/hash.h
@@ -0,0 +1,347 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: hash.h
+// -----------------------------------------------------------------------------
+//
+// This header file defines the Abseil `hash` library and the Abseil hashing
+// framework. This framework consists of the following:
+//
+// * The `y_absl::Hash` functor, which is used to invoke the hasher within the
+// Abseil hashing framework. `y_absl::Hash<T>` supports most basic types and
+// a number of Abseil types out of the box.
+// * `AbslHashValue`, an extension point that allows you to extend types to
+// support Abseil hashing without requiring you to define a hashing
+// algorithm.
+// * `HashState`, a type-erased class which implements the manipulation of the
+// hash state (H) itself, contains member functions `combine()` and
+// `combine_contiguous()`, which you can use to contribute to an existing
+// hash state when hashing your types.
+//
+// Unlike `std::hash` or other hashing frameworks, the Abseil hashing framework
+// provides most of its utility by abstracting away the hash algorithm (and its
+// implementation) entirely. Instead, a type invokes the Abseil hashing
+// framework by simply combining its state with the state of known, hashable
+// types. Hashing of that combined state is separately done by `y_absl::Hash`.
+//
+// One should assume that a hash algorithm is chosen randomly at the start of
+// each process. E.g., `y_absl::Hash<int>{}(9)` in one process and
+// `y_absl::Hash<int>{}(9)` in another process are likely to differ.
+//
+// `y_absl::Hash` is intended to strongly mix input bits with a target of passing
+// an [Avalanche Test](https://en.wikipedia.org/wiki/Avalanche_effect).
+//
+// Example:
+//
+// // Suppose we have a class `Circle` for which we want to add hashing:
+// class Circle {
+// public:
+// ...
+// private:
+// std::pair<int, int> center_;
+// int radius_;
+// };
+//
+// // To add hashing support to `Circle`, we simply need to add a free
+// // (non-member) function `AbslHashValue()`, and return the combined hash
+// // state of the existing hash state and the class state. You can add such a
+// // free function using a friend declaration within the body of the class:
+// class Circle {
+// public:
+// ...
+// template <typename H>
+// friend H AbslHashValue(H h, const Circle& c) {
+// return H::combine(std::move(h), c.center_, c.radius_);
+// }
+// ...
+// };
+//
+// For more information, see Adding Type Support to `y_absl::Hash` below.
+//
+#ifndef ABSL_HASH_HASH_H_
+#define ABSL_HASH_HASH_H_
+
+#include <tuple>
+
+#include "y_absl/hash/internal/hash.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+// -----------------------------------------------------------------------------
+// `y_absl::Hash`
+// -----------------------------------------------------------------------------
+//
+// `y_absl::Hash<T>` is a convenient general-purpose hash functor for any type `T`
+// satisfying any of the following conditions (in order):
+//
+// * T is an arithmetic or pointer type
+// * T defines an overload for `AbslHashValue(H, const T&)` for an arbitrary
+// hash state `H`.
+// - T defines a specialization of `std::hash<T>`
+//
+// `y_absl::Hash` intrinsically supports the following types:
+//
+// * All integral types (including bool)
+// * All enum types
+// * All floating-point types (although hashing them is discouraged)
+// * All pointer types, including nullptr_t
+// * std::pair<T1, T2>, if T1 and T2 are hashable
+// * std::tuple<Ts...>, if all the Ts... are hashable
+// * std::unique_ptr and std::shared_ptr
+// * All string-like types including:
+// * y_absl::Cord
+// * TString
+// * std::string_view (as well as any instance of std::basic_string that
+// uses char and std::char_traits)
+// * All the standard sequence containers (provided the elements are hashable)
+// * All the standard ordered associative containers (provided the elements are
+// hashable)
+// * y_absl types such as the following:
+// * y_absl::string_view
+// * y_absl::InlinedVector
+// * y_absl::FixedArray
+// * y_absl::uint128
+// * y_absl::Time, y_absl::Duration, and y_absl::TimeZone
+//
+// Note: the list above is not meant to be exhaustive. Additional type support
+// may be added, in which case the above list will be updated.
+//
+// -----------------------------------------------------------------------------
+// y_absl::Hash Invocation Evaluation
+// -----------------------------------------------------------------------------
+//
+// When invoked, `y_absl::Hash<T>` searches for supplied hash functions in the
+// following order:
+//
+// * Natively supported types out of the box (see above)
+// * Types for which an `AbslHashValue()` overload is provided (such as
+// user-defined types). See "Adding Type Support to `y_absl::Hash`" below.
+// * Types which define a `std::hash<T>` specialization
+//
+// The fallback to legacy hash functions exists mainly for backwards
+// compatibility. If you have a choice, prefer defining an `AbslHashValue`
+// overload instead of specializing any legacy hash functors.
+//
+// -----------------------------------------------------------------------------
+// The Hash State Concept, and using `HashState` for Type Erasure
+// -----------------------------------------------------------------------------
+//
+// The `y_absl::Hash` framework relies on the Concept of a "hash state." Such a
+// hash state is used in several places:
+//
+// * Within existing implementations of `y_absl::Hash<T>` to store the hashed
+// state of an object. Note that it is up to the implementation how it stores
+// such state. A hash table, for example, may mix the state to produce an
+// integer value; a testing framework may simply hold a vector of that state.
+// * Within implementations of `AbslHashValue()` used to extend user-defined
+// types. (See "Adding Type Support to y_absl::Hash" below.)
+// * Inside a `HashState`, providing type erasure for the concept of a hash
+// state, which you can use to extend the `y_absl::Hash` framework for types
+// that are otherwise difficult to extend using `AbslHashValue()`. (See the
+// `HashState` class below.)
+//
+// The "hash state" concept contains two member functions for mixing hash state:
+//
+// * `H::combine(state, values...)`
+//
+// Combines an arbitrary number of values into a hash state, returning the
+// updated state. Note that the existing hash state is move-only and must be
+// passed by value.
+//
+// Each of the value types T must be hashable by H.
+//
+// NOTE:
+//
+// state = H::combine(std::move(state), value1, value2, value3);
+//
+// must be guaranteed to produce the same hash expansion as
+//
+// state = H::combine(std::move(state), value1);
+// state = H::combine(std::move(state), value2);
+// state = H::combine(std::move(state), value3);
+//
+// * `H::combine_contiguous(state, data, size)`
+//
+// Combines a contiguous array of `size` elements into a hash state,
+// returning the updated state. Note that the existing hash state is
+// move-only and must be passed by value.
+//
+// NOTE:
+//
+// state = H::combine_contiguous(std::move(state), data, size);
+//
+// need NOT be guaranteed to produce the same hash expansion as a loop
+// (it may perform internal optimizations). If you need this guarantee, use a
+// loop instead.
+//
+// -----------------------------------------------------------------------------
+// Adding Type Support to `y_absl::Hash`
+// -----------------------------------------------------------------------------
+//
+// To add support for your user-defined type, add a proper `AbslHashValue()`
+// overload as a free (non-member) function. The overload will take an
+// existing hash state and should combine that state with state from the type.
+//
+// Example:
+//
+// template <typename H>
+// H AbslHashValue(H state, const MyType& v) {
+// return H::combine(std::move(state), v.field1, ..., v.fieldN);
+// }
+//
+// where `(field1, ..., fieldN)` are the members you would use on your
+// `operator==` to define equality.
+//
+// Notice that `AbslHashValue` is not a class member, but an ordinary function.
+// An `AbslHashValue` overload for a type should only be declared in the same
+// file and namespace as said type. The proper `AbslHashValue` implementation
+// for a given type will be discovered via ADL.
+//
+// Note: unlike `std::hash', `y_absl::Hash` should never be specialized. It must
+// only be extended by adding `AbslHashValue()` overloads.
+//
+template <typename T>
+using Hash = y_absl::hash_internal::Hash<T>;
+
+// HashOf
+//
+// y_absl::HashOf() is a helper that generates a hash from the values of its
+// arguments. It dispatches to y_absl::Hash directly, as follows:
+// * HashOf(t) == y_absl::Hash<T>{}(t)
+// * HashOf(a, b, c) == HashOf(std::make_tuple(a, b, c))
+//
+// HashOf(a1, a2, ...) == HashOf(b1, b2, ...) is guaranteed when
+// * The argument lists have pairwise identical C++ types
+// * a1 == b1 && a2 == b2 && ...
+//
+// The requirement that the arguments match in both type and value is critical.
+// It means that `a == b` does not necessarily imply `HashOf(a) == HashOf(b)` if
+// `a` and `b` have different types. For example, `HashOf(2) != HashOf(2.0)`.
+template <int&... ExplicitArgumentBarrier, typename... Types>
+size_t HashOf(const Types&... values) {
+ auto tuple = std::tie(values...);
+ return y_absl::Hash<decltype(tuple)>{}(tuple);
+}
+
+// HashState
+//
+// A type erased version of the hash state concept, for use in user-defined
+// `AbslHashValue` implementations that can't use templates (such as PImpl
+// classes, virtual functions, etc.). The type erasure adds overhead so it
+// should be avoided unless necessary.
+//
+// Note: This wrapper will only erase calls to:
+// combine_contiguous(H, const unsigned char*, size_t)
+//
+// All other calls will be handled internally and will not invoke overloads
+// provided by the wrapped class.
+//
+// Users of this class should still define a template `AbslHashValue` function,
+// but can use `y_absl::HashState::Create(&state)` to erase the type of the hash
+// state and dispatch to their private hashing logic.
+//
+// This state can be used like any other hash state. In particular, you can call
+// `HashState::combine()` and `HashState::combine_contiguous()` on it.
+//
+// Example:
+//
+// class Interface {
+// public:
+// template <typename H>
+// friend H AbslHashValue(H state, const Interface& value) {
+// state = H::combine(std::move(state), std::type_index(typeid(*this)));
+// value.HashValue(y_absl::HashState::Create(&state));
+// return state;
+// }
+// private:
+// virtual void HashValue(y_absl::HashState state) const = 0;
+// };
+//
+// class Impl : Interface {
+// private:
+// void HashValue(y_absl::HashState state) const override {
+// y_absl::HashState::combine(std::move(state), v1_, v2_);
+// }
+// int v1_;
+// TString v2_;
+// };
+class HashState : public hash_internal::HashStateBase<HashState> {
+ public:
+ // HashState::Create()
+ //
+ // Create a new `HashState` instance that wraps `state`. All calls to
+ // `combine()` and `combine_contiguous()` on the new instance will be
+ // redirected to the original `state` object. The `state` object must outlive
+ // the `HashState` instance.
+ template <typename T>
+ static HashState Create(T* state) {
+ HashState s;
+ s.Init(state);
+ return s;
+ }
+
+ HashState(const HashState&) = delete;
+ HashState& operator=(const HashState&) = delete;
+ HashState(HashState&&) = default;
+ HashState& operator=(HashState&&) = default;
+
+ // HashState::combine()
+ //
+ // Combines an arbitrary number of values into a hash state, returning the
+ // updated state.
+ using HashState::HashStateBase::combine;
+
+ // HashState::combine_contiguous()
+ //
+ // Combines a contiguous array of `size` elements into a hash state, returning
+ // the updated state.
+ static HashState combine_contiguous(HashState hash_state,
+ const unsigned char* first, size_t size) {
+ hash_state.combine_contiguous_(hash_state.state_, first, size);
+ return hash_state;
+ }
+ using HashState::HashStateBase::combine_contiguous;
+
+ private:
+ HashState() = default;
+
+ template <typename T>
+ static void CombineContiguousImpl(void* p, const unsigned char* first,
+ size_t size) {
+ T& state = *static_cast<T*>(p);
+ state = T::combine_contiguous(std::move(state), first, size);
+ }
+
+ template <typename T>
+ void Init(T* state) {
+ state_ = state;
+ combine_contiguous_ = &CombineContiguousImpl<T>;
+ }
+
+ // Do not erase an already erased state.
+ void Init(HashState* state) {
+ state_ = state->state_;
+ combine_contiguous_ = state->combine_contiguous_;
+ }
+
+ void* state_;
+ void (*combine_contiguous_)(void*, const unsigned char*, size_t);
+};
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_HASH_HASH_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/hash/hash_testing.h b/contrib/restricted/abseil-cpp-tstring/y_absl/hash/hash_testing.h
new file mode 100644
index 00000000000..2f22d370398
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/hash/hash_testing.h
@@ -0,0 +1,378 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_HASH_HASH_TESTING_H_
+#define ABSL_HASH_HASH_TESTING_H_
+
+#include <initializer_list>
+#include <tuple>
+#include <type_traits>
+#include <vector>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "y_absl/hash/internal/spy_hash_state.h"
+#include "y_absl/meta/type_traits.h"
+#include "y_absl/strings/str_cat.h"
+#include "y_absl/types/variant.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+// Run the y_absl::Hash algorithm over all the elements passed in and verify that
+// their hash expansion is congruent with their `==` operator.
+//
+// It is used in conjunction with EXPECT_TRUE. Failures will output information
+// on what requirement failed and on which objects.
+//
+// Users should pass a collection of types as either an initializer list or a
+// container of cases.
+//
+// EXPECT_TRUE(y_absl::VerifyTypeImplementsAbslHashCorrectly(
+// {v1, v2, ..., vN}));
+//
+// std::vector<MyType> cases;
+// // Fill cases...
+// EXPECT_TRUE(y_absl::VerifyTypeImplementsAbslHashCorrectly(cases));
+//
+// Users can pass a variety of types for testing heterogeneous lookup with
+// `std::make_tuple`:
+//
+// EXPECT_TRUE(y_absl::VerifyTypeImplementsAbslHashCorrectly(
+// std::make_tuple(v1, v2, ..., vN)));
+//
+//
+// Ideally, the values passed should provide enough coverage of the `==`
+// operator and the AbslHashValue implementations.
+// For dynamically sized types, the empty state should usually be included in
+// the values.
+//
+// The function accepts an optional comparator function, in case that `==` is
+// not enough for the values provided.
+//
+// Usage:
+//
+// EXPECT_TRUE(y_absl::VerifyTypeImplementsAbslHashCorrectly(
+// std::make_tuple(v1, v2, ..., vN), MyCustomEq{}));
+//
+// It checks the following requirements:
+// 1. The expansion for a value is deterministic.
+// 2. For any two objects `a` and `b` in the sequence, if `a == b` evaluates
+// to true, then their hash expansion must be equal.
+// 3. If `a == b` evaluates to false their hash expansion must be unequal.
+// 4. If `a == b` evaluates to false neither hash expansion can be a
+// suffix of the other.
+// 5. AbslHashValue overloads should not be called by the user. They are only
+// meant to be called by the framework. Users should call H::combine() and
+// H::combine_contiguous().
+// 6. No moved-from instance of the hash state is used in the implementation
+// of AbslHashValue.
+//
+// The values do not have to have the same type. This can be useful for
+// equivalent types that support heterogeneous lookup.
+//
+// A possible reason for breaking (2) is combining state in the hash expansion
+// that was not used in `==`.
+// For example:
+//
+// struct Bad2 {
+// int a, b;
+// template <typename H>
+// friend H AbslHashValue(H state, Bad2 x) {
+// // Uses a and b.
+// return H::combine(std::move(state), x.a, x.b);
+// }
+// friend bool operator==(Bad2 x, Bad2 y) {
+// // Only uses a.
+// return x.a == y.a;
+// }
+// };
+//
+// As for (3), breaking this usually means that there is state being passed to
+// the `==` operator that is not used in the hash expansion.
+// For example:
+//
+// struct Bad3 {
+// int a, b;
+// template <typename H>
+// friend H AbslHashValue(H state, Bad3 x) {
+// // Only uses a.
+// return H::combine(std::move(state), x.a);
+// }
+// friend bool operator==(Bad3 x, Bad3 y) {
+// // Uses a and b.
+// return x.a == y.a && x.b == y.b;
+// }
+// };
+//
+// Finally, a common way to break 4 is by combining dynamic ranges without
+// combining the size of the range.
+// For example:
+//
+// struct Bad4 {
+// int *p, size;
+// template <typename H>
+// friend H AbslHashValue(H state, Bad4 x) {
+// return H::combine_contiguous(std::move(state), x.p, x.p + x.size);
+// }
+// friend bool operator==(Bad4 x, Bad4 y) {
+// // Compare two ranges for equality. C++14 code can instead use std::equal.
+// return y_absl::equal(x.p, x.p + x.size, y.p, y.p + y.size);
+// }
+// };
+//
+// An easy solution to this is to combine the size after combining the range,
+// like so:
+// template <typename H>
+// friend H AbslHashValue(H state, Bad4 x) {
+// return H::combine(
+// H::combine_contiguous(std::move(state), x.p, x.p + x.size), x.size);
+// }
+//
+template <int&... ExplicitBarrier, typename Container>
+ABSL_MUST_USE_RESULT testing::AssertionResult
+VerifyTypeImplementsAbslHashCorrectly(const Container& values);
+
+template <int&... ExplicitBarrier, typename Container, typename Eq>
+ABSL_MUST_USE_RESULT testing::AssertionResult
+VerifyTypeImplementsAbslHashCorrectly(const Container& values, Eq equals);
+
+template <int&..., typename T>
+ABSL_MUST_USE_RESULT testing::AssertionResult
+VerifyTypeImplementsAbslHashCorrectly(std::initializer_list<T> values);
+
+template <int&..., typename T, typename Eq>
+ABSL_MUST_USE_RESULT testing::AssertionResult
+VerifyTypeImplementsAbslHashCorrectly(std::initializer_list<T> values,
+ Eq equals);
+
+namespace hash_internal {
+
+struct PrintVisitor {
+ size_t index;
+ template <typename T>
+ TString operator()(const T* value) const {
+ return y_absl::StrCat("#", index, "(", testing::PrintToString(*value), ")");
+ }
+};
+
+template <typename Eq>
+struct EqVisitor {
+ Eq eq;
+ template <typename T, typename U>
+ bool operator()(const T* t, const U* u) const {
+ return eq(*t, *u);
+ }
+};
+
+struct ExpandVisitor {
+ template <typename T>
+ SpyHashState operator()(const T* value) const {
+ return SpyHashState::combine(SpyHashState(), *value);
+ }
+};
+
+template <typename Container, typename Eq>
+ABSL_MUST_USE_RESULT testing::AssertionResult
+VerifyTypeImplementsAbslHashCorrectly(const Container& values, Eq equals) {
+ using V = typename Container::value_type;
+
+ struct Info {
+ const V& value;
+ size_t index;
+ TString ToString() const {
+ return y_absl::visit(PrintVisitor{index}, value);
+ }
+ SpyHashState expand() const { return y_absl::visit(ExpandVisitor{}, value); }
+ };
+
+ using EqClass = std::vector<Info>;
+ std::vector<EqClass> classes;
+
+ // Gather the values in equivalence classes.
+ size_t i = 0;
+ for (const auto& value : values) {
+ EqClass* c = nullptr;
+ for (auto& eqclass : classes) {
+ if (y_absl::visit(EqVisitor<Eq>{equals}, value, eqclass[0].value)) {
+ c = &eqclass;
+ break;
+ }
+ }
+ if (c == nullptr) {
+ classes.emplace_back();
+ c = &classes.back();
+ }
+ c->push_back({value, i});
+ ++i;
+
+ // Verify potential errors captured by SpyHashState.
+ if (auto error = c->back().expand().error()) {
+ return testing::AssertionFailure() << *error;
+ }
+ }
+
+ if (classes.size() < 2) {
+ return testing::AssertionFailure()
+ << "At least two equivalence classes are expected.";
+ }
+
+ // We assume that equality is correctly implemented.
+ // Now we verify that AbslHashValue is also correctly implemented.
+
+ for (const auto& c : classes) {
+ // All elements of the equivalence class must have the same hash
+ // expansion.
+ const SpyHashState expected = c[0].expand();
+ for (const Info& v : c) {
+ if (v.expand() != v.expand()) {
+ return testing::AssertionFailure()
+ << "Hash expansion for " << v.ToString()
+ << " is non-deterministic.";
+ }
+ if (v.expand() != expected) {
+ return testing::AssertionFailure()
+ << "Values " << c[0].ToString() << " and " << v.ToString()
+ << " evaluate as equal but have an unequal hash expansion.";
+ }
+ }
+
+ // Elements from other classes must have different hash expansion.
+ for (const auto& c2 : classes) {
+ if (&c == &c2) continue;
+ const SpyHashState c2_hash = c2[0].expand();
+ switch (SpyHashState::Compare(expected, c2_hash)) {
+ case SpyHashState::CompareResult::kEqual:
+ return testing::AssertionFailure()
+ << "Values " << c[0].ToString() << " and " << c2[0].ToString()
+ << " evaluate as unequal but have an equal hash expansion.";
+ case SpyHashState::CompareResult::kBSuffixA:
+ return testing::AssertionFailure()
+ << "Hash expansion of " << c2[0].ToString()
+ << " is a suffix of the hash expansion of " << c[0].ToString()
+ << ".";
+ case SpyHashState::CompareResult::kASuffixB:
+ return testing::AssertionFailure()
+ << "Hash expansion of " << c[0].ToString()
+ << " is a suffix of the hash expansion of " << c2[0].ToString()
+ << ".";
+ case SpyHashState::CompareResult::kUnequal:
+ break;
+ }
+ }
+ }
+ return testing::AssertionSuccess();
+}
+
+template <typename... T>
+struct TypeSet {
+ template <typename U, bool = disjunction<std::is_same<T, U>...>::value>
+ struct Insert {
+ using type = TypeSet<U, T...>;
+ };
+ template <typename U>
+ struct Insert<U, true> {
+ using type = TypeSet;
+ };
+
+ template <template <typename...> class C>
+ using apply = C<T...>;
+};
+
+template <typename... T>
+struct MakeTypeSet : TypeSet<> {};
+template <typename T, typename... Ts>
+struct MakeTypeSet<T, Ts...> : MakeTypeSet<Ts...>::template Insert<T>::type {};
+
+template <typename... T>
+using VariantForTypes = typename MakeTypeSet<
+ const typename std::decay<T>::type*...>::template apply<y_absl::variant>;
+
+template <typename Container>
+struct ContainerAsVector {
+ using V = y_absl::variant<const typename Container::value_type*>;
+ using Out = std::vector<V>;
+
+ static Out Do(const Container& values) {
+ Out out;
+ for (const auto& v : values) out.push_back(&v);
+ return out;
+ }
+};
+
+template <typename... T>
+struct ContainerAsVector<std::tuple<T...>> {
+ using V = VariantForTypes<T...>;
+ using Out = std::vector<V>;
+
+ template <size_t... I>
+ static Out DoImpl(const std::tuple<T...>& tuple, y_absl::index_sequence<I...>) {
+ return Out{&std::get<I>(tuple)...};
+ }
+
+ static Out Do(const std::tuple<T...>& values) {
+ return DoImpl(values, y_absl::index_sequence_for<T...>());
+ }
+};
+
+template <>
+struct ContainerAsVector<std::tuple<>> {
+ static std::vector<VariantForTypes<int>> Do(std::tuple<>) { return {}; }
+};
+
+struct DefaultEquals {
+ template <typename T, typename U>
+ bool operator()(const T& t, const U& u) const {
+ return t == u;
+ }
+};
+
+} // namespace hash_internal
+
+template <int&..., typename Container>
+ABSL_MUST_USE_RESULT testing::AssertionResult
+VerifyTypeImplementsAbslHashCorrectly(const Container& values) {
+ return hash_internal::VerifyTypeImplementsAbslHashCorrectly(
+ hash_internal::ContainerAsVector<Container>::Do(values),
+ hash_internal::DefaultEquals{});
+}
+
+template <int&..., typename Container, typename Eq>
+ABSL_MUST_USE_RESULT testing::AssertionResult
+VerifyTypeImplementsAbslHashCorrectly(const Container& values, Eq equals) {
+ return hash_internal::VerifyTypeImplementsAbslHashCorrectly(
+ hash_internal::ContainerAsVector<Container>::Do(values), equals);
+}
+
+template <int&..., typename T>
+ABSL_MUST_USE_RESULT testing::AssertionResult
+VerifyTypeImplementsAbslHashCorrectly(std::initializer_list<T> values) {
+ return hash_internal::VerifyTypeImplementsAbslHashCorrectly(
+ hash_internal::ContainerAsVector<std::initializer_list<T>>::Do(values),
+ hash_internal::DefaultEquals{});
+}
+
+template <int&..., typename T, typename Eq>
+ABSL_MUST_USE_RESULT testing::AssertionResult
+VerifyTypeImplementsAbslHashCorrectly(std::initializer_list<T> values,
+ Eq equals) {
+ return hash_internal::VerifyTypeImplementsAbslHashCorrectly(
+ hash_internal::ContainerAsVector<std::initializer_list<T>>::Do(values),
+ equals);
+}
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_HASH_HASH_TESTING_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/hash/internal/.yandex_meta/licenses.list.txt b/contrib/restricted/abseil-cpp-tstring/y_absl/hash/internal/.yandex_meta/licenses.list.txt
new file mode 100644
index 00000000000..bbc98ff778c
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/hash/internal/.yandex_meta/licenses.list.txt
@@ -0,0 +1,34 @@
+====================Apache-2.0====================
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+
+====================Apache-2.0====================
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+
+====================COPYRIGHT====================
+// Copyright 2018 The Abseil Authors.
+
+
+====================COPYRIGHT====================
+// Copyright 2020 The Abseil Authors.
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/hash/internal/city.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/hash/internal/city.cc
new file mode 100644
index 00000000000..5f1b655e7ef
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/hash/internal/city.cc
@@ -0,0 +1,349 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// This file provides CityHash64() and related functions.
+//
+// It's probably possible to create even faster hash functions by
+// writing a program that systematically explores some of the space of
+// possible hash functions, by using SIMD instructions, or by
+// compromising on hash quality.
+
+#include "y_absl/hash/internal/city.h"
+
+#include <string.h> // for memcpy and memset
+#include <algorithm>
+
+#include "y_absl/base/config.h"
+#include "y_absl/base/internal/endian.h"
+#include "y_absl/base/internal/unaligned_access.h"
+#include "y_absl/base/optimization.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace hash_internal {
+
+#ifdef ABSL_IS_BIG_ENDIAN
+#define uint32_in_expected_order(x) (y_absl::gbswap_32(x))
+#define uint64_in_expected_order(x) (y_absl::gbswap_64(x))
+#else
+#define uint32_in_expected_order(x) (x)
+#define uint64_in_expected_order(x) (x)
+#endif
+
+static uint64_t Fetch64(const char *p) {
+ return uint64_in_expected_order(ABSL_INTERNAL_UNALIGNED_LOAD64(p));
+}
+
+static uint32_t Fetch32(const char *p) {
+ return uint32_in_expected_order(ABSL_INTERNAL_UNALIGNED_LOAD32(p));
+}
+
+// Some primes between 2^63 and 2^64 for various uses.
+static const uint64_t k0 = 0xc3a5c85c97cb3127ULL;
+static const uint64_t k1 = 0xb492b66fbe98f273ULL;
+static const uint64_t k2 = 0x9ae16a3b2f90404fULL;
+
+// Magic numbers for 32-bit hashing. Copied from Murmur3.
+static const uint32_t c1 = 0xcc9e2d51;
+static const uint32_t c2 = 0x1b873593;
+
+// A 32-bit to 32-bit integer hash copied from Murmur3.
+static uint32_t fmix(uint32_t h) {
+ h ^= h >> 16;
+ h *= 0x85ebca6b;
+ h ^= h >> 13;
+ h *= 0xc2b2ae35;
+ h ^= h >> 16;
+ return h;
+}
+
+static uint32_t Rotate32(uint32_t val, int shift) {
+ // Avoid shifting by 32: doing so yields an undefined result.
+ return shift == 0 ? val : ((val >> shift) | (val << (32 - shift)));
+}
+
+#undef PERMUTE3
+#define PERMUTE3(a, b, c) \
+ do { \
+ std::swap(a, b); \
+ std::swap(a, c); \
+ } while (0)
+
+static uint32_t Mur(uint32_t a, uint32_t h) {
+ // Helper from Murmur3 for combining two 32-bit values.
+ a *= c1;
+ a = Rotate32(a, 17);
+ a *= c2;
+ h ^= a;
+ h = Rotate32(h, 19);
+ return h * 5 + 0xe6546b64;
+}
+
+static uint32_t Hash32Len13to24(const char *s, size_t len) {
+ uint32_t a = Fetch32(s - 4 + (len >> 1));
+ uint32_t b = Fetch32(s + 4);
+ uint32_t c = Fetch32(s + len - 8);
+ uint32_t d = Fetch32(s + (len >> 1));
+ uint32_t e = Fetch32(s);
+ uint32_t f = Fetch32(s + len - 4);
+ uint32_t h = len;
+
+ return fmix(Mur(f, Mur(e, Mur(d, Mur(c, Mur(b, Mur(a, h)))))));
+}
+
+static uint32_t Hash32Len0to4(const char *s, size_t len) {
+ uint32_t b = 0;
+ uint32_t c = 9;
+ for (size_t i = 0; i < len; i++) {
+ signed char v = s[i];
+ b = b * c1 + v;
+ c ^= b;
+ }
+ return fmix(Mur(b, Mur(len, c)));
+}
+
+static uint32_t Hash32Len5to12(const char *s, size_t len) {
+ uint32_t a = len, b = len * 5, c = 9, d = b;
+ a += Fetch32(s);
+ b += Fetch32(s + len - 4);
+ c += Fetch32(s + ((len >> 1) & 4));
+ return fmix(Mur(c, Mur(b, Mur(a, d))));
+}
+
+uint32_t CityHash32(const char *s, size_t len) {
+ if (len <= 24) {
+ return len <= 12
+ ? (len <= 4 ? Hash32Len0to4(s, len) : Hash32Len5to12(s, len))
+ : Hash32Len13to24(s, len);
+ }
+
+ // len > 24
+ uint32_t h = len, g = c1 * len, f = g;
+
+ uint32_t a0 = Rotate32(Fetch32(s + len - 4) * c1, 17) * c2;
+ uint32_t a1 = Rotate32(Fetch32(s + len - 8) * c1, 17) * c2;
+ uint32_t a2 = Rotate32(Fetch32(s + len - 16) * c1, 17) * c2;
+ uint32_t a3 = Rotate32(Fetch32(s + len - 12) * c1, 17) * c2;
+ uint32_t a4 = Rotate32(Fetch32(s + len - 20) * c1, 17) * c2;
+ h ^= a0;
+ h = Rotate32(h, 19);
+ h = h * 5 + 0xe6546b64;
+ h ^= a2;
+ h = Rotate32(h, 19);
+ h = h * 5 + 0xe6546b64;
+ g ^= a1;
+ g = Rotate32(g, 19);
+ g = g * 5 + 0xe6546b64;
+ g ^= a3;
+ g = Rotate32(g, 19);
+ g = g * 5 + 0xe6546b64;
+ f += a4;
+ f = Rotate32(f, 19);
+ f = f * 5 + 0xe6546b64;
+ size_t iters = (len - 1) / 20;
+ do {
+ uint32_t b0 = Rotate32(Fetch32(s) * c1, 17) * c2;
+ uint32_t b1 = Fetch32(s + 4);
+ uint32_t b2 = Rotate32(Fetch32(s + 8) * c1, 17) * c2;
+ uint32_t b3 = Rotate32(Fetch32(s + 12) * c1, 17) * c2;
+ uint32_t b4 = Fetch32(s + 16);
+ h ^= b0;
+ h = Rotate32(h, 18);
+ h = h * 5 + 0xe6546b64;
+ f += b1;
+ f = Rotate32(f, 19);
+ f = f * c1;
+ g += b2;
+ g = Rotate32(g, 18);
+ g = g * 5 + 0xe6546b64;
+ h ^= b3 + b1;
+ h = Rotate32(h, 19);
+ h = h * 5 + 0xe6546b64;
+ g ^= b4;
+ g = y_absl::gbswap_32(g) * 5;
+ h += b4 * 5;
+ h = y_absl::gbswap_32(h);
+ f += b0;
+ PERMUTE3(f, h, g);
+ s += 20;
+ } while (--iters != 0);
+ g = Rotate32(g, 11) * c1;
+ g = Rotate32(g, 17) * c1;
+ f = Rotate32(f, 11) * c1;
+ f = Rotate32(f, 17) * c1;
+ h = Rotate32(h + g, 19);
+ h = h * 5 + 0xe6546b64;
+ h = Rotate32(h, 17) * c1;
+ h = Rotate32(h + f, 19);
+ h = h * 5 + 0xe6546b64;
+ h = Rotate32(h, 17) * c1;
+ return h;
+}
+
+// Bitwise right rotate. Normally this will compile to a single
+// instruction, especially if the shift is a manifest constant.
+static uint64_t Rotate(uint64_t val, int shift) {
+ // Avoid shifting by 64: doing so yields an undefined result.
+ return shift == 0 ? val : ((val >> shift) | (val << (64 - shift)));
+}
+
+static uint64_t ShiftMix(uint64_t val) { return val ^ (val >> 47); }
+
+static uint64_t HashLen16(uint64_t u, uint64_t v, uint64_t mul) {
+ // Murmur-inspired hashing.
+ uint64_t a = (u ^ v) * mul;
+ a ^= (a >> 47);
+ uint64_t b = (v ^ a) * mul;
+ b ^= (b >> 47);
+ b *= mul;
+ return b;
+}
+
+static uint64_t HashLen16(uint64_t u, uint64_t v) {
+ const uint64_t kMul = 0x9ddfea08eb382d69ULL;
+ return HashLen16(u, v, kMul);
+}
+
+static uint64_t HashLen0to16(const char *s, size_t len) {
+ if (len >= 8) {
+ uint64_t mul = k2 + len * 2;
+ uint64_t a = Fetch64(s) + k2;
+ uint64_t b = Fetch64(s + len - 8);
+ uint64_t c = Rotate(b, 37) * mul + a;
+ uint64_t d = (Rotate(a, 25) + b) * mul;
+ return HashLen16(c, d, mul);
+ }
+ if (len >= 4) {
+ uint64_t mul = k2 + len * 2;
+ uint64_t a = Fetch32(s);
+ return HashLen16(len + (a << 3), Fetch32(s + len - 4), mul);
+ }
+ if (len > 0) {
+ uint8_t a = s[0];
+ uint8_t b = s[len >> 1];
+ uint8_t c = s[len - 1];
+ uint32_t y = static_cast<uint32_t>(a) + (static_cast<uint32_t>(b) << 8);
+ uint32_t z = len + (static_cast<uint32_t>(c) << 2);
+ return ShiftMix(y * k2 ^ z * k0) * k2;
+ }
+ return k2;
+}
+
+// This probably works well for 16-byte strings as well, but it may be overkill
+// in that case.
+static uint64_t HashLen17to32(const char *s, size_t len) {
+ uint64_t mul = k2 + len * 2;
+ uint64_t a = Fetch64(s) * k1;
+ uint64_t b = Fetch64(s + 8);
+ uint64_t c = Fetch64(s + len - 8) * mul;
+ uint64_t d = Fetch64(s + len - 16) * k2;
+ return HashLen16(Rotate(a + b, 43) + Rotate(c, 30) + d,
+ a + Rotate(b + k2, 18) + c, mul);
+}
+
+// Return a 16-byte hash for 48 bytes. Quick and dirty.
+// Callers do best to use "random-looking" values for a and b.
+static std::pair<uint64_t, uint64_t> WeakHashLen32WithSeeds(
+ uint64_t w, uint64_t x, uint64_t y, uint64_t z, uint64_t a, uint64_t b) {
+ a += w;
+ b = Rotate(b + a + z, 21);
+ uint64_t c = a;
+ a += x;
+ a += y;
+ b += Rotate(a, 44);
+ return std::make_pair(a + z, b + c);
+}
+
+// Return a 16-byte hash for s[0] ... s[31], a, and b. Quick and dirty.
+static std::pair<uint64_t, uint64_t> WeakHashLen32WithSeeds(const char *s,
+ uint64_t a,
+ uint64_t b) {
+ return WeakHashLen32WithSeeds(Fetch64(s), Fetch64(s + 8), Fetch64(s + 16),
+ Fetch64(s + 24), a, b);
+}
+
+// Return an 8-byte hash for 33 to 64 bytes.
+static uint64_t HashLen33to64(const char *s, size_t len) {
+ uint64_t mul = k2 + len * 2;
+ uint64_t a = Fetch64(s) * k2;
+ uint64_t b = Fetch64(s + 8);
+ uint64_t c = Fetch64(s + len - 24);
+ uint64_t d = Fetch64(s + len - 32);
+ uint64_t e = Fetch64(s + 16) * k2;
+ uint64_t f = Fetch64(s + 24) * 9;
+ uint64_t g = Fetch64(s + len - 8);
+ uint64_t h = Fetch64(s + len - 16) * mul;
+ uint64_t u = Rotate(a + g, 43) + (Rotate(b, 30) + c) * 9;
+ uint64_t v = ((a + g) ^ d) + f + 1;
+ uint64_t w = y_absl::gbswap_64((u + v) * mul) + h;
+ uint64_t x = Rotate(e + f, 42) + c;
+ uint64_t y = (y_absl::gbswap_64((v + w) * mul) + g) * mul;
+ uint64_t z = e + f + c;
+ a = y_absl::gbswap_64((x + z) * mul + y) + b;
+ b = ShiftMix((z + a) * mul + d + h) * mul;
+ return b + x;
+}
+
+uint64_t CityHash64(const char *s, size_t len) {
+ if (len <= 32) {
+ if (len <= 16) {
+ return HashLen0to16(s, len);
+ } else {
+ return HashLen17to32(s, len);
+ }
+ } else if (len <= 64) {
+ return HashLen33to64(s, len);
+ }
+
+ // For strings over 64 bytes we hash the end first, and then as we
+ // loop we keep 56 bytes of state: v, w, x, y, and z.
+ uint64_t x = Fetch64(s + len - 40);
+ uint64_t y = Fetch64(s + len - 16) + Fetch64(s + len - 56);
+ uint64_t z = HashLen16(Fetch64(s + len - 48) + len, Fetch64(s + len - 24));
+ std::pair<uint64_t, uint64_t> v =
+ WeakHashLen32WithSeeds(s + len - 64, len, z);
+ std::pair<uint64_t, uint64_t> w =
+ WeakHashLen32WithSeeds(s + len - 32, y + k1, x);
+ x = x * k1 + Fetch64(s);
+
+ // Decrease len to the nearest multiple of 64, and operate on 64-byte chunks.
+ len = (len - 1) & ~static_cast<size_t>(63);
+ do {
+ x = Rotate(x + y + v.first + Fetch64(s + 8), 37) * k1;
+ y = Rotate(y + v.second + Fetch64(s + 48), 42) * k1;
+ x ^= w.second;
+ y += v.first + Fetch64(s + 40);
+ z = Rotate(z + w.first, 33) * k1;
+ v = WeakHashLen32WithSeeds(s, v.second * k1, x + w.first);
+ w = WeakHashLen32WithSeeds(s + 32, z + w.second, y + Fetch64(s + 16));
+ std::swap(z, x);
+ s += 64;
+ len -= 64;
+ } while (len != 0);
+ return HashLen16(HashLen16(v.first, w.first) + ShiftMix(y) * k1 + z,
+ HashLen16(v.second, w.second) + x);
+}
+
+uint64_t CityHash64WithSeed(const char *s, size_t len, uint64_t seed) {
+ return CityHash64WithSeeds(s, len, k2, seed);
+}
+
+uint64_t CityHash64WithSeeds(const char *s, size_t len, uint64_t seed0,
+ uint64_t seed1) {
+ return HashLen16(CityHash64(s, len) - seed0, seed1);
+}
+
+} // namespace hash_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/hash/internal/city.h b/contrib/restricted/abseil-cpp-tstring/y_absl/hash/internal/city.h
new file mode 100644
index 00000000000..d2b32f00689
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/hash/internal/city.h
@@ -0,0 +1,78 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// https://code.google.com/p/cityhash/
+//
+// This file provides a few functions for hashing strings. All of them are
+// high-quality functions in the sense that they pass standard tests such
+// as Austin Appleby's SMHasher. They are also fast.
+//
+// For 64-bit x86 code, on short strings, we don't know of anything faster than
+// CityHash64 that is of comparable quality. We believe our nearest competitor
+// is Murmur3. For 64-bit x86 code, CityHash64 is an excellent choice for hash
+// tables and most other hashing (excluding cryptography).
+//
+// For 32-bit x86 code, we don't know of anything faster than CityHash32 that
+// is of comparable quality. We believe our nearest competitor is Murmur3A.
+// (On 64-bit CPUs, it is typically faster to use the other CityHash variants.)
+//
+// Functions in the CityHash family are not suitable for cryptography.
+//
+// Please see CityHash's README file for more details on our performance
+// measurements and so on.
+//
+// WARNING: This code has been only lightly tested on big-endian platforms!
+// It is known to work well on little-endian platforms that have a small penalty
+// for unaligned reads, such as current Intel and AMD moderate-to-high-end CPUs.
+// It should work on all 32-bit and 64-bit platforms that allow unaligned reads;
+// bug reports are welcome.
+//
+// By the way, for some hash functions, given strings a and b, the hash
+// of a+b is easily derived from the hashes of a and b. This property
+// doesn't hold for any hash functions in this file.
+
+#ifndef ABSL_HASH_INTERNAL_CITY_H_
+#define ABSL_HASH_INTERNAL_CITY_H_
+
+#include <stdint.h>
+#include <stdlib.h> // for size_t.
+
+#include <utility>
+
+#include "y_absl/base/config.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace hash_internal {
+
+// Hash function for a byte array.
+uint64_t CityHash64(const char *s, size_t len);
+
+// Hash function for a byte array. For convenience, a 64-bit seed is also
+// hashed into the result.
+uint64_t CityHash64WithSeed(const char *s, size_t len, uint64_t seed);
+
+// Hash function for a byte array. For convenience, two seeds are also
+// hashed into the result.
+uint64_t CityHash64WithSeeds(const char *s, size_t len, uint64_t seed0,
+ uint64_t seed1);
+
+// Hash function for a byte array. Most useful in 32-bit binaries.
+uint32_t CityHash32(const char *s, size_t len);
+
+} // namespace hash_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_HASH_INTERNAL_CITY_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/hash/internal/hash.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/hash/internal/hash.cc
new file mode 100644
index 00000000000..fe075de43aa
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/hash/internal/hash.cc
@@ -0,0 +1,69 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "y_absl/hash/internal/hash.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace hash_internal {
+
+uint64_t MixingHashState::CombineLargeContiguousImpl32(
+ uint64_t state, const unsigned char* first, size_t len) {
+ while (len >= PiecewiseChunkSize()) {
+ state = Mix(state,
+ hash_internal::CityHash32(reinterpret_cast<const char*>(first),
+ PiecewiseChunkSize()));
+ len -= PiecewiseChunkSize();
+ first += PiecewiseChunkSize();
+ }
+ // Handle the remainder.
+ return CombineContiguousImpl(state, first, len,
+ std::integral_constant<int, 4>{});
+}
+
+uint64_t MixingHashState::CombineLargeContiguousImpl64(
+ uint64_t state, const unsigned char* first, size_t len) {
+ while (len >= PiecewiseChunkSize()) {
+ state = Mix(state, Hash64(first, PiecewiseChunkSize()));
+ len -= PiecewiseChunkSize();
+ first += PiecewiseChunkSize();
+ }
+ // Handle the remainder.
+ return CombineContiguousImpl(state, first, len,
+ std::integral_constant<int, 8>{});
+}
+
+ABSL_CONST_INIT const void* const MixingHashState::kSeed = &kSeed;
+
+// The salt array used by LowLevelHash. This array is NOT the mechanism used to
+// make y_absl::Hash non-deterministic between program invocations. See `Seed()`
+// for that mechanism.
+//
+// Any random values are fine. These values are just digits from the decimal
+// part of pi.
+// https://en.wikipedia.org/wiki/Nothing-up-my-sleeve_number
+constexpr uint64_t kHashSalt[5] = {
+ uint64_t{0x243F6A8885A308D3}, uint64_t{0x13198A2E03707344},
+ uint64_t{0xA4093822299F31D0}, uint64_t{0x082EFA98EC4E6C89},
+ uint64_t{0x452821E638D01377},
+};
+
+uint64_t MixingHashState::LowLevelHashImpl(const unsigned char* data,
+ size_t len) {
+ return LowLevelHash(data, len, Seed(), kHashSalt);
+}
+
+} // namespace hash_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/hash/internal/hash.h b/contrib/restricted/abseil-cpp-tstring/y_absl/hash/internal/hash.h
new file mode 100644
index 00000000000..fcbe43accdc
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/hash/internal/hash.h
@@ -0,0 +1,1096 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: hash.h
+// -----------------------------------------------------------------------------
+//
+#ifndef ABSL_HASH_INTERNAL_HASH_H_
+#define ABSL_HASH_INTERNAL_HASH_H_
+
+#include <algorithm>
+#include <array>
+#include <bitset>
+#include <cmath>
+#include <cstring>
+#include <deque>
+#include <forward_list>
+#include <functional>
+#include <iterator>
+#include <limits>
+#include <list>
+#include <map>
+#include <memory>
+#include <set>
+#include <util/generic/string.h>
+#include <tuple>
+#include <type_traits>
+#include <utility>
+#include <vector>
+
+#include "y_absl/base/config.h"
+#include "y_absl/base/internal/unaligned_access.h"
+#include "y_absl/base/port.h"
+#include "y_absl/container/fixed_array.h"
+#include "y_absl/hash/internal/city.h"
+#include "y_absl/hash/internal/low_level_hash.h"
+#include "y_absl/meta/type_traits.h"
+#include "y_absl/numeric/int128.h"
+#include "y_absl/strings/string_view.h"
+#include "y_absl/types/optional.h"
+#include "y_absl/types/variant.h"
+#include "y_absl/utility/utility.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace hash_internal {
+
+// Internal detail: Large buffers are hashed in smaller chunks. This function
+// returns the size of these chunks.
+constexpr size_t PiecewiseChunkSize() { return 1024; }
+
+// PiecewiseCombiner
+//
+// PiecewiseCombiner is an internal-only helper class for hashing a piecewise
+// buffer of `char` or `unsigned char` as though it were contiguous. This class
+// provides two methods:
+//
+// H add_buffer(state, data, size)
+// H finalize(state)
+//
+// `add_buffer` can be called zero or more times, followed by a single call to
+// `finalize`. This will produce the same hash expansion as concatenating each
+// buffer piece into a single contiguous buffer, and passing this to
+// `H::combine_contiguous`.
+//
+// Example usage:
+// PiecewiseCombiner combiner;
+// for (const auto& piece : pieces) {
+// state = combiner.add_buffer(std::move(state), piece.data, piece.size);
+// }
+// return combiner.finalize(std::move(state));
+class PiecewiseCombiner {
+ public:
+ PiecewiseCombiner() : position_(0) {}
+ PiecewiseCombiner(const PiecewiseCombiner&) = delete;
+ PiecewiseCombiner& operator=(const PiecewiseCombiner&) = delete;
+
+ // PiecewiseCombiner::add_buffer()
+ //
+ // Appends the given range of bytes to the sequence to be hashed, which may
+ // modify the provided hash state.
+ template <typename H>
+ H add_buffer(H state, const unsigned char* data, size_t size);
+ template <typename H>
+ H add_buffer(H state, const char* data, size_t size) {
+ return add_buffer(std::move(state),
+ reinterpret_cast<const unsigned char*>(data), size);
+ }
+
+ // PiecewiseCombiner::finalize()
+ //
+ // Finishes combining the hash sequence, which may may modify the provided
+ // hash state.
+ //
+ // Once finalize() is called, add_buffer() may no longer be called. The
+ // resulting hash state will be the same as if the pieces passed to
+ // add_buffer() were concatenated into a single flat buffer, and then provided
+ // to H::combine_contiguous().
+ template <typename H>
+ H finalize(H state);
+
+ private:
+ unsigned char buf_[PiecewiseChunkSize()];
+ size_t position_;
+};
+
+// HashStateBase
+//
+// A hash state object represents an intermediate state in the computation
+// of an unspecified hash algorithm. `HashStateBase` provides a CRTP style
+// base class for hash state implementations. Developers adding type support
+// for `y_absl::Hash` should not rely on any parts of the state object other than
+// the following member functions:
+//
+// * HashStateBase::combine()
+// * HashStateBase::combine_contiguous()
+//
+// A derived hash state class of type `H` must provide a static member function
+// with a signature similar to the following:
+//
+// `static H combine_contiguous(H state, const unsigned char*, size_t)`.
+//
+// `HashStateBase` will provide a complete implementation for a hash state
+// object in terms of this method.
+//
+// Example:
+//
+// // Use CRTP to define your derived class.
+// struct MyHashState : HashStateBase<MyHashState> {
+// static H combine_contiguous(H state, const unsigned char*, size_t);
+// using MyHashState::HashStateBase::combine;
+// using MyHashState::HashStateBase::combine_contiguous;
+// };
+template <typename H>
+class HashStateBase {
+ public:
+ // HashStateBase::combine()
+ //
+ // Combines an arbitrary number of values into a hash state, returning the
+ // updated state.
+ //
+ // Each of the value types `T` must be separately hashable by the Abseil
+ // hashing framework.
+ //
+ // NOTE:
+ //
+ // state = H::combine(std::move(state), value1, value2, value3);
+ //
+ // is guaranteed to produce the same hash expansion as:
+ //
+ // state = H::combine(std::move(state), value1);
+ // state = H::combine(std::move(state), value2);
+ // state = H::combine(std::move(state), value3);
+ template <typename T, typename... Ts>
+ static H combine(H state, const T& value, const Ts&... values);
+ static H combine(H state) { return state; }
+
+ // HashStateBase::combine_contiguous()
+ //
+ // Combines a contiguous array of `size` elements into a hash state, returning
+ // the updated state.
+ //
+ // NOTE:
+ //
+ // state = H::combine_contiguous(std::move(state), data, size);
+ //
+ // is NOT guaranteed to produce the same hash expansion as a for-loop (it may
+ // perform internal optimizations). If you need this guarantee, use the
+ // for-loop instead.
+ template <typename T>
+ static H combine_contiguous(H state, const T* data, size_t size);
+
+ using AbslInternalPiecewiseCombiner = PiecewiseCombiner;
+};
+
+// is_uniquely_represented
+//
+// `is_uniquely_represented<T>` is a trait class that indicates whether `T`
+// is uniquely represented.
+//
+// A type is "uniquely represented" if two equal values of that type are
+// guaranteed to have the same bytes in their underlying storage. In other
+// words, if `a == b`, then `memcmp(&a, &b, sizeof(T))` is guaranteed to be
+// zero. This property cannot be detected automatically, so this trait is false
+// by default, but can be specialized by types that wish to assert that they are
+// uniquely represented. This makes them eligible for certain optimizations.
+//
+// If you have any doubt whatsoever, do not specialize this template.
+// The default is completely safe, and merely disables some optimizations
+// that will not matter for most types. Specializing this template,
+// on the other hand, can be very hazardous.
+//
+// To be uniquely represented, a type must not have multiple ways of
+// representing the same value; for example, float and double are not
+// uniquely represented, because they have distinct representations for
+// +0 and -0. Furthermore, the type's byte representation must consist
+// solely of user-controlled data, with no padding bits and no compiler-
+// controlled data such as vptrs or sanitizer metadata. This is usually
+// very difficult to guarantee, because in most cases the compiler can
+// insert data and padding bits at its own discretion.
+//
+// If you specialize this template for a type `T`, you must do so in the file
+// that defines that type (or in this file). If you define that specialization
+// anywhere else, `is_uniquely_represented<T>` could have different meanings
+// in different places.
+//
+// The Enable parameter is meaningless; it is provided as a convenience,
+// to support certain SFINAE techniques when defining specializations.
+template <typename T, typename Enable = void>
+struct is_uniquely_represented : std::false_type {};
+
+// is_uniquely_represented<unsigned char>
+//
+// unsigned char is a synonym for "byte", so it is guaranteed to be
+// uniquely represented.
+template <>
+struct is_uniquely_represented<unsigned char> : std::true_type {};
+
+// is_uniquely_represented for non-standard integral types
+//
+// Integral types other than bool should be uniquely represented on any
+// platform that this will plausibly be ported to.
+template <typename Integral>
+struct is_uniquely_represented<
+ Integral, typename std::enable_if<std::is_integral<Integral>::value>::type>
+ : std::true_type {};
+
+// is_uniquely_represented<bool>
+//
+//
+template <>
+struct is_uniquely_represented<bool> : std::false_type {};
+
+// hash_bytes()
+//
+// Convenience function that combines `hash_state` with the byte representation
+// of `value`.
+template <typename H, typename T>
+H hash_bytes(H hash_state, const T& value) {
+ const unsigned char* start = reinterpret_cast<const unsigned char*>(&value);
+ return H::combine_contiguous(std::move(hash_state), start, sizeof(value));
+}
+
+// -----------------------------------------------------------------------------
+// AbslHashValue for Basic Types
+// -----------------------------------------------------------------------------
+
+// Note: Default `AbslHashValue` implementations live in `hash_internal`. This
+// allows us to block lexical scope lookup when doing an unqualified call to
+// `AbslHashValue` below. User-defined implementations of `AbslHashValue` can
+// only be found via ADL.
+
+// AbslHashValue() for hashing bool values
+//
+// We use SFINAE to ensure that this overload only accepts bool, not types that
+// are convertible to bool.
+template <typename H, typename B>
+typename std::enable_if<std::is_same<B, bool>::value, H>::type AbslHashValue(
+ H hash_state, B value) {
+ return H::combine(std::move(hash_state),
+ static_cast<unsigned char>(value ? 1 : 0));
+}
+
+// AbslHashValue() for hashing enum values
+template <typename H, typename Enum>
+typename std::enable_if<std::is_enum<Enum>::value, H>::type AbslHashValue(
+ H hash_state, Enum e) {
+ // In practice, we could almost certainly just invoke hash_bytes directly,
+ // but it's possible that a sanitizer might one day want to
+ // store data in the unused bits of an enum. To avoid that risk, we
+ // convert to the underlying type before hashing. Hopefully this will get
+ // optimized away; if not, we can reopen discussion with c-toolchain-team.
+ return H::combine(std::move(hash_state),
+ static_cast<typename std::underlying_type<Enum>::type>(e));
+}
+// AbslHashValue() for hashing floating-point values
+template <typename H, typename Float>
+typename std::enable_if<std::is_same<Float, float>::value ||
+ std::is_same<Float, double>::value,
+ H>::type
+AbslHashValue(H hash_state, Float value) {
+ return hash_internal::hash_bytes(std::move(hash_state),
+ value == 0 ? 0 : value);
+}
+
+// Long double has the property that it might have extra unused bytes in it.
+// For example, in x86 sizeof(long double)==16 but it only really uses 80-bits
+// of it. This means we can't use hash_bytes on a long double and have to
+// convert it to something else first.
+template <typename H, typename LongDouble>
+typename std::enable_if<std::is_same<LongDouble, long double>::value, H>::type
+AbslHashValue(H hash_state, LongDouble value) {
+ const int category = std::fpclassify(value);
+ switch (category) {
+ case FP_INFINITE:
+ // Add the sign bit to differentiate between +Inf and -Inf
+ hash_state = H::combine(std::move(hash_state), std::signbit(value));
+ break;
+
+ case FP_NAN:
+ case FP_ZERO:
+ default:
+ // Category is enough for these.
+ break;
+
+ case FP_NORMAL:
+ case FP_SUBNORMAL:
+ // We can't convert `value` directly to double because this would have
+ // undefined behavior if the value is out of range.
+ // std::frexp gives us a value in the range (-1, -.5] or [.5, 1) that is
+ // guaranteed to be in range for `double`. The truncation is
+ // implementation defined, but that works as long as it is deterministic.
+ int exp;
+ auto mantissa = static_cast<double>(std::frexp(value, &exp));
+ hash_state = H::combine(std::move(hash_state), mantissa, exp);
+ }
+
+ return H::combine(std::move(hash_state), category);
+}
+
+// AbslHashValue() for hashing pointers
+template <typename H, typename T>
+H AbslHashValue(H hash_state, T* ptr) {
+ auto v = reinterpret_cast<uintptr_t>(ptr);
+ // Due to alignment, pointers tend to have low bits as zero, and the next few
+ // bits follow a pattern since they are also multiples of some base value.
+ // Mixing the pointer twice helps prevent stuck low bits for certain alignment
+ // values.
+ return H::combine(std::move(hash_state), v, v);
+}
+
+// AbslHashValue() for hashing nullptr_t
+template <typename H>
+H AbslHashValue(H hash_state, std::nullptr_t) {
+ return H::combine(std::move(hash_state), static_cast<void*>(nullptr));
+}
+
+// -----------------------------------------------------------------------------
+// AbslHashValue for Composite Types
+// -----------------------------------------------------------------------------
+
+// is_hashable()
+//
+// Trait class which returns true if T is hashable by the y_absl::Hash framework.
+// Used for the AbslHashValue implementations for composite types below.
+template <typename T>
+struct is_hashable;
+
+// AbslHashValue() for hashing pairs
+template <typename H, typename T1, typename T2>
+typename std::enable_if<is_hashable<T1>::value && is_hashable<T2>::value,
+ H>::type
+AbslHashValue(H hash_state, const std::pair<T1, T2>& p) {
+ return H::combine(std::move(hash_state), p.first, p.second);
+}
+
+// hash_tuple()
+//
+// Helper function for hashing a tuple. The third argument should
+// be an index_sequence running from 0 to tuple_size<Tuple> - 1.
+template <typename H, typename Tuple, size_t... Is>
+H hash_tuple(H hash_state, const Tuple& t, y_absl::index_sequence<Is...>) {
+ return H::combine(std::move(hash_state), std::get<Is>(t)...);
+}
+
+// AbslHashValue for hashing tuples
+template <typename H, typename... Ts>
+#if defined(_MSC_VER)
+// This SFINAE gets MSVC confused under some conditions. Let's just disable it
+// for now.
+H
+#else // _MSC_VER
+typename std::enable_if<y_absl::conjunction<is_hashable<Ts>...>::value, H>::type
+#endif // _MSC_VER
+AbslHashValue(H hash_state, const std::tuple<Ts...>& t) {
+ return hash_internal::hash_tuple(std::move(hash_state), t,
+ y_absl::make_index_sequence<sizeof...(Ts)>());
+}
+
+// -----------------------------------------------------------------------------
+// AbslHashValue for Pointers
+// -----------------------------------------------------------------------------
+
+// AbslHashValue for hashing unique_ptr
+template <typename H, typename T, typename D>
+H AbslHashValue(H hash_state, const std::unique_ptr<T, D>& ptr) {
+ return H::combine(std::move(hash_state), ptr.get());
+}
+
+// AbslHashValue for hashing shared_ptr
+template <typename H, typename T>
+H AbslHashValue(H hash_state, const std::shared_ptr<T>& ptr) {
+ return H::combine(std::move(hash_state), ptr.get());
+}
+
+// -----------------------------------------------------------------------------
+// AbslHashValue for String-Like Types
+// -----------------------------------------------------------------------------
+
+// AbslHashValue for hashing strings
+//
+// All the string-like types supported here provide the same hash expansion for
+// the same character sequence. These types are:
+//
+// - `y_absl::Cord`
+// - `TString` (and std::basic_string<char, std::char_traits<char>, A> for
+// any allocator A)
+// - `y_absl::string_view` and `std::string_view`
+//
+// For simplicity, we currently support only `char` strings. This support may
+// be broadened, if necessary, but with some caution - this overload would
+// misbehave in cases where the traits' `eq()` member isn't equivalent to `==`
+// on the underlying character type.
+template <typename H>
+H AbslHashValue(H hash_state, y_absl::string_view str) {
+ return H::combine(
+ H::combine_contiguous(std::move(hash_state), str.data(), str.size()),
+ str.size());
+}
+
+// Support std::wstring, std::u16string and std::u32string.
+template <typename Char, typename Alloc, typename H,
+ typename = y_absl::enable_if_t<std::is_same<Char, wchar_t>::value ||
+ std::is_same<Char, char16_t>::value ||
+ std::is_same<Char, char32_t>::value>>
+H AbslHashValue(
+ H hash_state,
+ const std::basic_string<Char, std::char_traits<Char>, Alloc>& str) {
+ return H::combine(
+ H::combine_contiguous(std::move(hash_state), str.data(), str.size()),
+ str.size());
+}
+
+// -----------------------------------------------------------------------------
+// AbslHashValue for Sequence Containers
+// -----------------------------------------------------------------------------
+
+// AbslHashValue for hashing std::array
+template <typename H, typename T, size_t N>
+typename std::enable_if<is_hashable<T>::value, H>::type AbslHashValue(
+ H hash_state, const std::array<T, N>& array) {
+ return H::combine_contiguous(std::move(hash_state), array.data(),
+ array.size());
+}
+
+// AbslHashValue for hashing std::deque
+template <typename H, typename T, typename Allocator>
+typename std::enable_if<is_hashable<T>::value, H>::type AbslHashValue(
+ H hash_state, const std::deque<T, Allocator>& deque) {
+ // TODO(gromer): investigate a more efficient implementation taking
+ // advantage of the chunk structure.
+ for (const auto& t : deque) {
+ hash_state = H::combine(std::move(hash_state), t);
+ }
+ return H::combine(std::move(hash_state), deque.size());
+}
+
+// AbslHashValue for hashing std::forward_list
+template <typename H, typename T, typename Allocator>
+typename std::enable_if<is_hashable<T>::value, H>::type AbslHashValue(
+ H hash_state, const std::forward_list<T, Allocator>& list) {
+ size_t size = 0;
+ for (const T& t : list) {
+ hash_state = H::combine(std::move(hash_state), t);
+ ++size;
+ }
+ return H::combine(std::move(hash_state), size);
+}
+
+// AbslHashValue for hashing std::list
+template <typename H, typename T, typename Allocator>
+typename std::enable_if<is_hashable<T>::value, H>::type AbslHashValue(
+ H hash_state, const std::list<T, Allocator>& list) {
+ for (const auto& t : list) {
+ hash_state = H::combine(std::move(hash_state), t);
+ }
+ return H::combine(std::move(hash_state), list.size());
+}
+
+// AbslHashValue for hashing std::vector
+//
+// Do not use this for vector<bool> on platforms that have a working
+// implementation of std::hash. It does not have a .data(), and a fallback for
+// std::hash<> is most likely faster.
+template <typename H, typename T, typename Allocator>
+typename std::enable_if<is_hashable<T>::value && !std::is_same<T, bool>::value,
+ H>::type
+AbslHashValue(H hash_state, const std::vector<T, Allocator>& vector) {
+ return H::combine(H::combine_contiguous(std::move(hash_state), vector.data(),
+ vector.size()),
+ vector.size());
+}
+
+#if defined(ABSL_IS_BIG_ENDIAN) && \
+ (defined(__GLIBCXX__) || defined(__GLIBCPP__))
+// AbslHashValue for hashing std::vector<bool>
+//
+// std::hash in libstdc++ does not work correctly with vector<bool> on Big
+// Endian platforms therefore we need to implement a custom AbslHashValue for
+// it. More details on the bug:
+// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=102531
+template <typename H, typename T, typename Allocator>
+typename std::enable_if<is_hashable<T>::value && std::is_same<T, bool>::value,
+ H>::type
+AbslHashValue(H hash_state, const std::vector<T, Allocator>& vector) {
+ typename H::AbslInternalPiecewiseCombiner combiner;
+ for (const auto& i : vector) {
+ unsigned char c = static_cast<unsigned char>(i);
+ hash_state = combiner.add_buffer(std::move(hash_state), &c, sizeof(c));
+ }
+ return H::combine(combiner.finalize(std::move(hash_state)), vector.size());
+}
+#endif
+
+// -----------------------------------------------------------------------------
+// AbslHashValue for Ordered Associative Containers
+// -----------------------------------------------------------------------------
+
+// AbslHashValue for hashing std::map
+template <typename H, typename Key, typename T, typename Compare,
+ typename Allocator>
+typename std::enable_if<is_hashable<Key>::value && is_hashable<T>::value,
+ H>::type
+AbslHashValue(H hash_state, const std::map<Key, T, Compare, Allocator>& map) {
+ for (const auto& t : map) {
+ hash_state = H::combine(std::move(hash_state), t);
+ }
+ return H::combine(std::move(hash_state), map.size());
+}
+
+// AbslHashValue for hashing std::multimap
+template <typename H, typename Key, typename T, typename Compare,
+ typename Allocator>
+typename std::enable_if<is_hashable<Key>::value && is_hashable<T>::value,
+ H>::type
+AbslHashValue(H hash_state,
+ const std::multimap<Key, T, Compare, Allocator>& map) {
+ for (const auto& t : map) {
+ hash_state = H::combine(std::move(hash_state), t);
+ }
+ return H::combine(std::move(hash_state), map.size());
+}
+
+// AbslHashValue for hashing std::set
+template <typename H, typename Key, typename Compare, typename Allocator>
+typename std::enable_if<is_hashable<Key>::value, H>::type AbslHashValue(
+ H hash_state, const std::set<Key, Compare, Allocator>& set) {
+ for (const auto& t : set) {
+ hash_state = H::combine(std::move(hash_state), t);
+ }
+ return H::combine(std::move(hash_state), set.size());
+}
+
+// AbslHashValue for hashing std::multiset
+template <typename H, typename Key, typename Compare, typename Allocator>
+typename std::enable_if<is_hashable<Key>::value, H>::type AbslHashValue(
+ H hash_state, const std::multiset<Key, Compare, Allocator>& set) {
+ for (const auto& t : set) {
+ hash_state = H::combine(std::move(hash_state), t);
+ }
+ return H::combine(std::move(hash_state), set.size());
+}
+
+// -----------------------------------------------------------------------------
+// AbslHashValue for Wrapper Types
+// -----------------------------------------------------------------------------
+
+// AbslHashValue for hashing std::reference_wrapper
+template <typename H, typename T>
+typename std::enable_if<is_hashable<T>::value, H>::type AbslHashValue(
+ H hash_state, std::reference_wrapper<T> opt) {
+ return H::combine(std::move(hash_state), opt.get());
+}
+
+// AbslHashValue for hashing y_absl::optional
+template <typename H, typename T>
+typename std::enable_if<is_hashable<T>::value, H>::type AbslHashValue(
+ H hash_state, const y_absl::optional<T>& opt) {
+ if (opt) hash_state = H::combine(std::move(hash_state), *opt);
+ return H::combine(std::move(hash_state), opt.has_value());
+}
+
+// VariantVisitor
+template <typename H>
+struct VariantVisitor {
+ H&& hash_state;
+ template <typename T>
+ H operator()(const T& t) const {
+ return H::combine(std::move(hash_state), t);
+ }
+};
+
+// AbslHashValue for hashing y_absl::variant
+template <typename H, typename... T>
+typename std::enable_if<conjunction<is_hashable<T>...>::value, H>::type
+AbslHashValue(H hash_state, const y_absl::variant<T...>& v) {
+ if (!v.valueless_by_exception()) {
+ hash_state = y_absl::visit(VariantVisitor<H>{std::move(hash_state)}, v);
+ }
+ return H::combine(std::move(hash_state), v.index());
+}
+
+// -----------------------------------------------------------------------------
+// AbslHashValue for Other Types
+// -----------------------------------------------------------------------------
+
+// AbslHashValue for hashing std::bitset is not defined on Little Endian
+// platforms, for the same reason as for vector<bool> (see std::vector above):
+// It does not expose the raw bytes, and a fallback to std::hash<> is most
+// likely faster.
+
+#if defined(ABSL_IS_BIG_ENDIAN) && \
+ (defined(__GLIBCXX__) || defined(__GLIBCPP__))
+// AbslHashValue for hashing std::bitset
+//
+// std::hash in libstdc++ does not work correctly with std::bitset on Big Endian
+// platforms therefore we need to implement a custom AbslHashValue for it. More
+// details on the bug: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=102531
+template <typename H, size_t N>
+H AbslHashValue(H hash_state, const std::bitset<N>& set) {
+ typename H::AbslInternalPiecewiseCombiner combiner;
+ for (int i = 0; i < N; i++) {
+ unsigned char c = static_cast<unsigned char>(set[i]);
+ hash_state = combiner.add_buffer(std::move(hash_state), &c, sizeof(c));
+ }
+ return H::combine(combiner.finalize(std::move(hash_state)), N);
+}
+#endif
+
+// -----------------------------------------------------------------------------
+
+// hash_range_or_bytes()
+//
+// Mixes all values in the range [data, data+size) into the hash state.
+// This overload accepts only uniquely-represented types, and hashes them by
+// hashing the entire range of bytes.
+template <typename H, typename T>
+typename std::enable_if<is_uniquely_represented<T>::value, H>::type
+hash_range_or_bytes(H hash_state, const T* data, size_t size) {
+ const auto* bytes = reinterpret_cast<const unsigned char*>(data);
+ return H::combine_contiguous(std::move(hash_state), bytes, sizeof(T) * size);
+}
+
+// hash_range_or_bytes()
+template <typename H, typename T>
+typename std::enable_if<!is_uniquely_represented<T>::value, H>::type
+hash_range_or_bytes(H hash_state, const T* data, size_t size) {
+ for (const auto end = data + size; data < end; ++data) {
+ hash_state = H::combine(std::move(hash_state), *data);
+ }
+ return hash_state;
+}
+
+#if defined(ABSL_INTERNAL_LEGACY_HASH_NAMESPACE) && \
+ ABSL_META_INTERNAL_STD_HASH_SFINAE_FRIENDLY_
+#define ABSL_HASH_INTERNAL_SUPPORT_LEGACY_HASH_ 1
+#else
+#define ABSL_HASH_INTERNAL_SUPPORT_LEGACY_HASH_ 0
+#endif
+
+// HashSelect
+//
+// Type trait to select the appropriate hash implementation to use.
+// HashSelect::type<T> will give the proper hash implementation, to be invoked
+// as:
+// HashSelect::type<T>::Invoke(state, value)
+// Also, HashSelect::type<T>::value is a boolean equal to `true` if there is a
+// valid `Invoke` function. Types that are not hashable will have a ::value of
+// `false`.
+struct HashSelect {
+ private:
+ struct State : HashStateBase<State> {
+ static State combine_contiguous(State hash_state, const unsigned char*,
+ size_t);
+ using State::HashStateBase::combine_contiguous;
+ };
+
+ struct UniquelyRepresentedProbe {
+ template <typename H, typename T>
+ static auto Invoke(H state, const T& value)
+ -> y_absl::enable_if_t<is_uniquely_represented<T>::value, H> {
+ return hash_internal::hash_bytes(std::move(state), value);
+ }
+ };
+
+ struct HashValueProbe {
+ template <typename H, typename T>
+ static auto Invoke(H state, const T& value) -> y_absl::enable_if_t<
+ std::is_same<H,
+ decltype(AbslHashValue(std::move(state), value))>::value,
+ H> {
+ return AbslHashValue(std::move(state), value);
+ }
+ };
+
+ struct LegacyHashProbe {
+#if ABSL_HASH_INTERNAL_SUPPORT_LEGACY_HASH_
+ template <typename H, typename T>
+ static auto Invoke(H state, const T& value) -> y_absl::enable_if_t<
+ std::is_convertible<
+ decltype(ABSL_INTERNAL_LEGACY_HASH_NAMESPACE::hash<T>()(value)),
+ size_t>::value,
+ H> {
+ return hash_internal::hash_bytes(
+ std::move(state),
+ ABSL_INTERNAL_LEGACY_HASH_NAMESPACE::hash<T>{}(value));
+ }
+#endif // ABSL_HASH_INTERNAL_SUPPORT_LEGACY_HASH_
+ };
+
+ struct StdHashProbe {
+ template <typename H, typename T>
+ static auto Invoke(H state, const T& value)
+ -> y_absl::enable_if_t<type_traits_internal::IsHashable<T>::value, H> {
+ return hash_internal::hash_bytes(std::move(state), std::hash<T>{}(value));
+ }
+ };
+
+ template <typename Hash, typename T>
+ struct Probe : Hash {
+ private:
+ template <typename H, typename = decltype(H::Invoke(
+ std::declval<State>(), std::declval<const T&>()))>
+ static std::true_type Test(int);
+ template <typename U>
+ static std::false_type Test(char);
+
+ public:
+ static constexpr bool value = decltype(Test<Hash>(0))::value;
+ };
+
+ public:
+ // Probe each implementation in order.
+ // disjunction provides short circuiting wrt instantiation.
+ template <typename T>
+ using Apply = y_absl::disjunction< //
+ Probe<UniquelyRepresentedProbe, T>, //
+ Probe<HashValueProbe, T>, //
+ Probe<LegacyHashProbe, T>, //
+ Probe<StdHashProbe, T>, //
+ std::false_type>;
+};
+
+template <typename T>
+struct is_hashable
+ : std::integral_constant<bool, HashSelect::template Apply<T>::value> {};
+
+// MixingHashState
+class ABSL_DLL MixingHashState : public HashStateBase<MixingHashState> {
+ // y_absl::uint128 is not an alias or a thin wrapper around the intrinsic.
+ // We use the intrinsic when available to improve performance.
+#ifdef ABSL_HAVE_INTRINSIC_INT128
+ using uint128 = __uint128_t;
+#else // ABSL_HAVE_INTRINSIC_INT128
+ using uint128 = y_absl::uint128;
+#endif // ABSL_HAVE_INTRINSIC_INT128
+
+ static constexpr uint64_t kMul =
+ sizeof(size_t) == 4 ? uint64_t{0xcc9e2d51}
+ : uint64_t{0x9ddfea08eb382d69};
+
+ template <typename T>
+ using IntegralFastPath =
+ conjunction<std::is_integral<T>, is_uniquely_represented<T>>;
+
+ public:
+ // Move only
+ MixingHashState(MixingHashState&&) = default;
+ MixingHashState& operator=(MixingHashState&&) = default;
+
+ // MixingHashState::combine_contiguous()
+ //
+ // Fundamental base case for hash recursion: mixes the given range of bytes
+ // into the hash state.
+ static MixingHashState combine_contiguous(MixingHashState hash_state,
+ const unsigned char* first,
+ size_t size) {
+ return MixingHashState(
+ CombineContiguousImpl(hash_state.state_, first, size,
+ std::integral_constant<int, sizeof(size_t)>{}));
+ }
+ using MixingHashState::HashStateBase::combine_contiguous;
+
+ // MixingHashState::hash()
+ //
+ // For performance reasons in non-opt mode, we specialize this for
+ // integral types.
+ // Otherwise we would be instantiating and calling dozens of functions for
+ // something that is just one multiplication and a couple xor's.
+ // The result should be the same as running the whole algorithm, but faster.
+ template <typename T, y_absl::enable_if_t<IntegralFastPath<T>::value, int> = 0>
+ static size_t hash(T value) {
+ return static_cast<size_t>(Mix(Seed(), static_cast<uint64_t>(value)));
+ }
+
+ // Overload of MixingHashState::hash()
+ template <typename T, y_absl::enable_if_t<!IntegralFastPath<T>::value, int> = 0>
+ static size_t hash(const T& value) {
+ return static_cast<size_t>(combine(MixingHashState{}, value).state_);
+ }
+
+ private:
+ // Invoked only once for a given argument; that plus the fact that this is
+ // move-only ensures that there is only one non-moved-from object.
+ MixingHashState() : state_(Seed()) {}
+
+ // Workaround for MSVC bug.
+ // We make the type copyable to fix the calling convention, even though we
+ // never actually copy it. Keep it private to not affect the public API of the
+ // type.
+ MixingHashState(const MixingHashState&) = default;
+
+ explicit MixingHashState(uint64_t state) : state_(state) {}
+
+ // Implementation of the base case for combine_contiguous where we actually
+ // mix the bytes into the state.
+ // Dispatch to different implementations of the combine_contiguous depending
+ // on the value of `sizeof(size_t)`.
+ static uint64_t CombineContiguousImpl(uint64_t state,
+ const unsigned char* first, size_t len,
+ std::integral_constant<int, 4>
+ /* sizeof_size_t */);
+ static uint64_t CombineContiguousImpl(uint64_t state,
+ const unsigned char* first, size_t len,
+ std::integral_constant<int, 8>
+ /* sizeof_size_t */);
+
+ // Slow dispatch path for calls to CombineContiguousImpl with a size argument
+ // larger than PiecewiseChunkSize(). Has the same effect as calling
+ // CombineContiguousImpl() repeatedly with the chunk stride size.
+ static uint64_t CombineLargeContiguousImpl32(uint64_t state,
+ const unsigned char* first,
+ size_t len);
+ static uint64_t CombineLargeContiguousImpl64(uint64_t state,
+ const unsigned char* first,
+ size_t len);
+
+ // Reads 9 to 16 bytes from p.
+ // The least significant 8 bytes are in .first, the rest (zero padded) bytes
+ // are in .second.
+ static std::pair<uint64_t, uint64_t> Read9To16(const unsigned char* p,
+ size_t len) {
+ uint64_t low_mem = y_absl::base_internal::UnalignedLoad64(p);
+ uint64_t high_mem = y_absl::base_internal::UnalignedLoad64(p + len - 8);
+#ifdef ABSL_IS_LITTLE_ENDIAN
+ uint64_t most_significant = high_mem;
+ uint64_t least_significant = low_mem;
+#else
+ uint64_t most_significant = low_mem;
+ uint64_t least_significant = high_mem;
+#endif
+ return {least_significant, most_significant >> (128 - len * 8)};
+ }
+
+ // Reads 4 to 8 bytes from p. Zero pads to fill uint64_t.
+ static uint64_t Read4To8(const unsigned char* p, size_t len) {
+ uint32_t low_mem = y_absl::base_internal::UnalignedLoad32(p);
+ uint32_t high_mem = y_absl::base_internal::UnalignedLoad32(p + len - 4);
+#ifdef ABSL_IS_LITTLE_ENDIAN
+ uint32_t most_significant = high_mem;
+ uint32_t least_significant = low_mem;
+#else
+ uint32_t most_significant = low_mem;
+ uint32_t least_significant = high_mem;
+#endif
+ return (static_cast<uint64_t>(most_significant) << (len - 4) * 8) |
+ least_significant;
+ }
+
+ // Reads 1 to 3 bytes from p. Zero pads to fill uint32_t.
+ static uint32_t Read1To3(const unsigned char* p, size_t len) {
+ unsigned char mem0 = p[0];
+ unsigned char mem1 = p[len / 2];
+ unsigned char mem2 = p[len - 1];
+#ifdef ABSL_IS_LITTLE_ENDIAN
+ unsigned char significant2 = mem2;
+ unsigned char significant1 = mem1;
+ unsigned char significant0 = mem0;
+#else
+ unsigned char significant2 = mem0;
+ unsigned char significant1 = mem1;
+ unsigned char significant0 = mem2;
+#endif
+ return static_cast<uint32_t>(significant0 | //
+ (significant1 << (len / 2 * 8)) | //
+ (significant2 << ((len - 1) * 8)));
+ }
+
+ ABSL_ATTRIBUTE_ALWAYS_INLINE static uint64_t Mix(uint64_t state, uint64_t v) {
+#if defined(__aarch64__)
+ // On AArch64, calculating a 128-bit product is inefficient, because it
+ // requires a sequence of two instructions to calculate the upper and lower
+ // halves of the result.
+ using MultType = uint64_t;
+#else
+ using MultType =
+ y_absl::conditional_t<sizeof(size_t) == 4, uint64_t, uint128>;
+#endif
+ // We do the addition in 64-bit space to make sure the 128-bit
+ // multiplication is fast. If we were to do it as MultType the compiler has
+ // to assume that the high word is non-zero and needs to perform 2
+ // multiplications instead of one.
+ MultType m = state + v;
+ m *= kMul;
+ return static_cast<uint64_t>(m ^ (m >> (sizeof(m) * 8 / 2)));
+ }
+
+ // An extern to avoid bloat on a direct call to LowLevelHash() with fixed
+ // values for both the seed and salt parameters.
+ static uint64_t LowLevelHashImpl(const unsigned char* data, size_t len);
+
+ ABSL_ATTRIBUTE_ALWAYS_INLINE static uint64_t Hash64(const unsigned char* data,
+ size_t len) {
+#ifdef ABSL_HAVE_INTRINSIC_INT128
+ return LowLevelHashImpl(data, len);
+#else
+ return hash_internal::CityHash64(reinterpret_cast<const char*>(data), len);
+#endif
+ }
+
+ // Seed()
+ //
+ // A non-deterministic seed.
+ //
+ // The current purpose of this seed is to generate non-deterministic results
+ // and prevent having users depend on the particular hash values.
+ // It is not meant as a security feature right now, but it leaves the door
+ // open to upgrade it to a true per-process random seed. A true random seed
+ // costs more and we don't need to pay for that right now.
+ //
+ // On platforms with ASLR, we take advantage of it to make a per-process
+ // random value.
+ // See https://en.wikipedia.org/wiki/Address_space_layout_randomization
+ //
+ // On other platforms this is still going to be non-deterministic but most
+ // probably per-build and not per-process.
+ ABSL_ATTRIBUTE_ALWAYS_INLINE static uint64_t Seed() {
+#if (!defined(__clang__) || __clang_major__ > 11) && \
+ !defined(__apple_build_version__)
+ return static_cast<uint64_t>(reinterpret_cast<uintptr_t>(&kSeed));
+#else
+ // Workaround the absence of
+ // https://github.com/llvm/llvm-project/commit/bc15bf66dcca76cc06fe71fca35b74dc4d521021.
+ return static_cast<uint64_t>(reinterpret_cast<uintptr_t>(kSeed));
+#endif
+ }
+ static const void* const kSeed;
+
+ uint64_t state_;
+};
+
+// MixingHashState::CombineContiguousImpl()
+inline uint64_t MixingHashState::CombineContiguousImpl(
+ uint64_t state, const unsigned char* first, size_t len,
+ std::integral_constant<int, 4> /* sizeof_size_t */) {
+ // For large values we use CityHash, for small ones we just use a
+ // multiplicative hash.
+ uint64_t v;
+ if (len > 8) {
+ if (ABSL_PREDICT_FALSE(len > PiecewiseChunkSize())) {
+ return CombineLargeContiguousImpl32(state, first, len);
+ }
+ v = hash_internal::CityHash32(reinterpret_cast<const char*>(first), len);
+ } else if (len >= 4) {
+ v = Read4To8(first, len);
+ } else if (len > 0) {
+ v = Read1To3(first, len);
+ } else {
+ // Empty ranges have no effect.
+ return state;
+ }
+ return Mix(state, v);
+}
+
+// Overload of MixingHashState::CombineContiguousImpl()
+inline uint64_t MixingHashState::CombineContiguousImpl(
+ uint64_t state, const unsigned char* first, size_t len,
+ std::integral_constant<int, 8> /* sizeof_size_t */) {
+ // For large values we use LowLevelHash or CityHash depending on the platform,
+ // for small ones we just use a multiplicative hash.
+ uint64_t v;
+ if (len > 16) {
+ if (ABSL_PREDICT_FALSE(len > PiecewiseChunkSize())) {
+ return CombineLargeContiguousImpl64(state, first, len);
+ }
+ v = Hash64(first, len);
+ } else if (len > 8) {
+ auto p = Read9To16(first, len);
+ state = Mix(state, p.first);
+ v = p.second;
+ } else if (len >= 4) {
+ v = Read4To8(first, len);
+ } else if (len > 0) {
+ v = Read1To3(first, len);
+ } else {
+ // Empty ranges have no effect.
+ return state;
+ }
+ return Mix(state, v);
+}
+
+struct AggregateBarrier {};
+
+// HashImpl
+
+// Add a private base class to make sure this type is not an aggregate.
+// Aggregates can be aggregate initialized even if the default constructor is
+// deleted.
+struct PoisonedHash : private AggregateBarrier {
+ PoisonedHash() = delete;
+ PoisonedHash(const PoisonedHash&) = delete;
+ PoisonedHash& operator=(const PoisonedHash&) = delete;
+};
+
+template <typename T>
+struct HashImpl {
+ size_t operator()(const T& value) const {
+ return MixingHashState::hash(value);
+ }
+};
+
+template <typename T>
+struct Hash
+ : y_absl::conditional_t<is_hashable<T>::value, HashImpl<T>, PoisonedHash> {};
+
+template <typename H>
+template <typename T, typename... Ts>
+H HashStateBase<H>::combine(H state, const T& value, const Ts&... values) {
+ return H::combine(hash_internal::HashSelect::template Apply<T>::Invoke(
+ std::move(state), value),
+ values...);
+}
+
+// HashStateBase::combine_contiguous()
+template <typename H>
+template <typename T>
+H HashStateBase<H>::combine_contiguous(H state, const T* data, size_t size) {
+ return hash_internal::hash_range_or_bytes(std::move(state), data, size);
+}
+
+// HashStateBase::PiecewiseCombiner::add_buffer()
+template <typename H>
+H PiecewiseCombiner::add_buffer(H state, const unsigned char* data,
+ size_t size) {
+ if (position_ + size < PiecewiseChunkSize()) {
+ // This partial chunk does not fill our existing buffer
+ memcpy(buf_ + position_, data, size);
+ position_ += size;
+ return state;
+ }
+
+ // If the buffer is partially filled we need to complete the buffer
+ // and hash it.
+ if (position_ != 0) {
+ const size_t bytes_needed = PiecewiseChunkSize() - position_;
+ memcpy(buf_ + position_, data, bytes_needed);
+ state = H::combine_contiguous(std::move(state), buf_, PiecewiseChunkSize());
+ data += bytes_needed;
+ size -= bytes_needed;
+ }
+
+ // Hash whatever chunks we can without copying
+ while (size >= PiecewiseChunkSize()) {
+ state = H::combine_contiguous(std::move(state), data, PiecewiseChunkSize());
+ data += PiecewiseChunkSize();
+ size -= PiecewiseChunkSize();
+ }
+ // Fill the buffer with the remainder
+ memcpy(buf_, data, size);
+ position_ = size;
+ return state;
+}
+
+// HashStateBase::PiecewiseCombiner::finalize()
+template <typename H>
+H PiecewiseCombiner::finalize(H state) {
+ // Hash the remainder left in the buffer, which may be empty
+ return H::combine_contiguous(std::move(state), buf_, position_);
+}
+
+} // namespace hash_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_HASH_INTERNAL_HASH_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/hash/internal/low_level_hash.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/hash/internal/low_level_hash.cc
new file mode 100644
index 00000000000..08b6dd85d40
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/hash/internal/low_level_hash.cc
@@ -0,0 +1,123 @@
+// Copyright 2020 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "y_absl/hash/internal/low_level_hash.h"
+
+#include "y_absl/base/internal/unaligned_access.h"
+#include "y_absl/numeric/bits.h"
+#include "y_absl/numeric/int128.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace hash_internal {
+
+static uint64_t Mix(uint64_t v0, uint64_t v1) {
+#if !defined(__aarch64__)
+ // The default bit-mixer uses 64x64->128-bit multiplication.
+ y_absl::uint128 p = v0;
+ p *= v1;
+ return y_absl::Uint128Low64(p) ^ y_absl::Uint128High64(p);
+#else
+ // The default bit-mixer above would perform poorly on some ARM microarchs,
+ // where calculating a 128-bit product requires a sequence of two
+ // instructions with a high combined latency and poor throughput.
+ // Instead, we mix bits using only 64-bit arithmetic, which is faster.
+ uint64_t p = v0 ^ y_absl::rotl(v1, 40);
+ p *= v1 ^ y_absl::rotl(v0, 39);
+ return p ^ (p >> 11);
+#endif
+}
+
+uint64_t LowLevelHash(const void* data, size_t len, uint64_t seed,
+ const uint64_t salt[]) {
+ const uint8_t* ptr = static_cast<const uint8_t*>(data);
+ uint64_t starting_length = static_cast<uint64_t>(len);
+ uint64_t current_state = seed ^ salt[0];
+
+ if (len > 64) {
+ // If we have more than 64 bytes, we're going to handle chunks of 64
+ // bytes at a time. We're going to build up two separate hash states
+ // which we will then hash together.
+ uint64_t duplicated_state = current_state;
+
+ do {
+ uint64_t a = y_absl::base_internal::UnalignedLoad64(ptr);
+ uint64_t b = y_absl::base_internal::UnalignedLoad64(ptr + 8);
+ uint64_t c = y_absl::base_internal::UnalignedLoad64(ptr + 16);
+ uint64_t d = y_absl::base_internal::UnalignedLoad64(ptr + 24);
+ uint64_t e = y_absl::base_internal::UnalignedLoad64(ptr + 32);
+ uint64_t f = y_absl::base_internal::UnalignedLoad64(ptr + 40);
+ uint64_t g = y_absl::base_internal::UnalignedLoad64(ptr + 48);
+ uint64_t h = y_absl::base_internal::UnalignedLoad64(ptr + 56);
+
+ uint64_t cs0 = Mix(a ^ salt[1], b ^ current_state);
+ uint64_t cs1 = Mix(c ^ salt[2], d ^ current_state);
+ current_state = (cs0 ^ cs1);
+
+ uint64_t ds0 = Mix(e ^ salt[3], f ^ duplicated_state);
+ uint64_t ds1 = Mix(g ^ salt[4], h ^ duplicated_state);
+ duplicated_state = (ds0 ^ ds1);
+
+ ptr += 64;
+ len -= 64;
+ } while (len > 64);
+
+ current_state = current_state ^ duplicated_state;
+ }
+
+ // We now have a data `ptr` with at most 64 bytes and the current state
+ // of the hashing state machine stored in current_state.
+ while (len > 16) {
+ uint64_t a = y_absl::base_internal::UnalignedLoad64(ptr);
+ uint64_t b = y_absl::base_internal::UnalignedLoad64(ptr + 8);
+
+ current_state = Mix(a ^ salt[1], b ^ current_state);
+
+ ptr += 16;
+ len -= 16;
+ }
+
+ // We now have a data `ptr` with at most 16 bytes.
+ uint64_t a = 0;
+ uint64_t b = 0;
+ if (len > 8) {
+ // When we have at least 9 and at most 16 bytes, set A to the first 64
+ // bits of the input and B to the last 64 bits of the input. Yes, they will
+ // overlap in the middle if we are working with less than the full 16
+ // bytes.
+ a = y_absl::base_internal::UnalignedLoad64(ptr);
+ b = y_absl::base_internal::UnalignedLoad64(ptr + len - 8);
+ } else if (len > 3) {
+ // If we have at least 4 and at most 8 bytes, set A to the first 32
+ // bits and B to the last 32 bits.
+ a = y_absl::base_internal::UnalignedLoad32(ptr);
+ b = y_absl::base_internal::UnalignedLoad32(ptr + len - 4);
+ } else if (len > 0) {
+ // If we have at least 1 and at most 3 bytes, read all of the provided
+ // bits into A, with some adjustments.
+ a = ((ptr[0] << 16) | (ptr[len >> 1] << 8) | ptr[len - 1]);
+ b = 0;
+ } else {
+ a = 0;
+ b = 0;
+ }
+
+ uint64_t w = Mix(a ^ salt[1], b ^ current_state);
+ uint64_t z = salt[1] ^ starting_length;
+ return Mix(w, z);
+}
+
+} // namespace hash_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/hash/internal/low_level_hash.h b/contrib/restricted/abseil-cpp-tstring/y_absl/hash/internal/low_level_hash.h
new file mode 100644
index 00000000000..4a71ab94180
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/hash/internal/low_level_hash.h
@@ -0,0 +1,50 @@
+// Copyright 2020 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// This file provides the Google-internal implementation of LowLevelHash.
+//
+// LowLevelHash is a fast hash function for hash tables, the fastest we've
+// currently (late 2020) found that passes the SMHasher tests. The algorithm
+// relies on intrinsic 128-bit multiplication for speed. This is not meant to be
+// secure - just fast.
+//
+// It is closely based on a version of wyhash, but does not maintain or
+// guarantee future compatibility with it.
+
+#ifndef ABSL_HASH_INTERNAL_LOW_LEVEL_HASH_H_
+#define ABSL_HASH_INTERNAL_LOW_LEVEL_HASH_H_
+
+#include <stdint.h>
+#include <stdlib.h>
+
+#include "y_absl/base/config.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace hash_internal {
+
+// Hash function for a byte array. A 64-bit seed and a set of five 64-bit
+// integers are hashed into the result.
+//
+// To allow all hashable types (including string_view and Span) to depend on
+// this algorithm, we keep the API low-level, with as few dependencies as
+// possible.
+uint64_t LowLevelHash(const void* data, size_t len, uint64_t seed,
+ const uint64_t salt[5]);
+
+} // namespace hash_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_HASH_INTERNAL_LOW_LEVEL_HASH_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/hash/internal/spy_hash_state.h b/contrib/restricted/abseil-cpp-tstring/y_absl/hash/internal/spy_hash_state.h
new file mode 100644
index 00000000000..520cbf904f6
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/hash/internal/spy_hash_state.h
@@ -0,0 +1,231 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_HASH_INTERNAL_SPY_HASH_STATE_H_
+#define ABSL_HASH_INTERNAL_SPY_HASH_STATE_H_
+
+#include <ostream>
+#include <util/generic/string.h>
+#include <vector>
+
+#include "y_absl/hash/hash.h"
+#include "y_absl/strings/match.h"
+#include "y_absl/strings/str_format.h"
+#include "y_absl/strings/str_join.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace hash_internal {
+
+// SpyHashState is an implementation of the HashState API that simply
+// accumulates all input bytes in an internal buffer. This makes it useful
+// for testing AbslHashValue overloads (so long as they are templated on the
+// HashState parameter), since it can report the exact hash representation
+// that the AbslHashValue overload produces.
+//
+// Sample usage:
+// EXPECT_EQ(SpyHashState::combine(SpyHashState(), foo),
+// SpyHashState::combine(SpyHashState(), bar));
+template <typename T>
+class SpyHashStateImpl : public HashStateBase<SpyHashStateImpl<T>> {
+ public:
+ SpyHashStateImpl() : error_(std::make_shared<y_absl::optional<TString>>()) {
+ static_assert(std::is_void<T>::value, "");
+ }
+
+ // Move-only
+ SpyHashStateImpl(const SpyHashStateImpl&) = delete;
+ SpyHashStateImpl& operator=(const SpyHashStateImpl&) = delete;
+
+ SpyHashStateImpl(SpyHashStateImpl&& other) noexcept {
+ *this = std::move(other);
+ }
+
+ SpyHashStateImpl& operator=(SpyHashStateImpl&& other) noexcept {
+ hash_representation_ = std::move(other.hash_representation_);
+ error_ = other.error_;
+ moved_from_ = other.moved_from_;
+ other.moved_from_ = true;
+ return *this;
+ }
+
+ template <typename U>
+ SpyHashStateImpl(SpyHashStateImpl<U>&& other) { // NOLINT
+ hash_representation_ = std::move(other.hash_representation_);
+ error_ = other.error_;
+ moved_from_ = other.moved_from_;
+ other.moved_from_ = true;
+ }
+
+ template <typename A, typename... Args>
+ static SpyHashStateImpl combine(SpyHashStateImpl s, const A& a,
+ const Args&... args) {
+ // Pass an instance of SpyHashStateImpl<A> when trying to combine `A`. This
+ // allows us to test that the user only uses this instance for combine calls
+ // and does not call AbslHashValue directly.
+ // See AbslHashValue implementation at the bottom.
+ s = SpyHashStateImpl<A>::HashStateBase::combine(std::move(s), a);
+ return SpyHashStateImpl::combine(std::move(s), args...);
+ }
+ static SpyHashStateImpl combine(SpyHashStateImpl s) {
+ if (direct_absl_hash_value_error_) {
+ *s.error_ = "AbslHashValue should not be invoked directly.";
+ } else if (s.moved_from_) {
+ *s.error_ = "Used moved-from instance of the hash state object.";
+ }
+ return s;
+ }
+
+ static void SetDirectAbslHashValueError() {
+ direct_absl_hash_value_error_ = true;
+ }
+
+ // Two SpyHashStateImpl objects are equal if they hold equal hash
+ // representations.
+ friend bool operator==(const SpyHashStateImpl& lhs,
+ const SpyHashStateImpl& rhs) {
+ return lhs.hash_representation_ == rhs.hash_representation_;
+ }
+
+ friend bool operator!=(const SpyHashStateImpl& lhs,
+ const SpyHashStateImpl& rhs) {
+ return !(lhs == rhs);
+ }
+
+ enum class CompareResult {
+ kEqual,
+ kASuffixB,
+ kBSuffixA,
+ kUnequal,
+ };
+
+ static CompareResult Compare(const SpyHashStateImpl& a,
+ const SpyHashStateImpl& b) {
+ const TString a_flat = y_absl::StrJoin(a.hash_representation_, "");
+ const TString b_flat = y_absl::StrJoin(b.hash_representation_, "");
+ if (a_flat == b_flat) return CompareResult::kEqual;
+ if (y_absl::EndsWith(a_flat, b_flat)) return CompareResult::kBSuffixA;
+ if (y_absl::EndsWith(b_flat, a_flat)) return CompareResult::kASuffixB;
+ return CompareResult::kUnequal;
+ }
+
+ // operator<< prints the hash representation as a hex and ASCII dump, to
+ // facilitate debugging.
+ friend std::ostream& operator<<(std::ostream& out,
+ const SpyHashStateImpl& hash_state) {
+ out << "[\n";
+ for (auto& s : hash_state.hash_representation_) {
+ size_t offset = 0;
+ for (char c : s) {
+ if (offset % 16 == 0) {
+ out << y_absl::StreamFormat("\n0x%04x: ", offset);
+ }
+ if (offset % 2 == 0) {
+ out << " ";
+ }
+ out << y_absl::StreamFormat("%02x", c);
+ ++offset;
+ }
+ out << "\n";
+ }
+ return out << "]";
+ }
+
+ // The base case of the combine recursion, which writes raw bytes into the
+ // internal buffer.
+ static SpyHashStateImpl combine_contiguous(SpyHashStateImpl hash_state,
+ const unsigned char* begin,
+ size_t size) {
+ const size_t large_chunk_stride = PiecewiseChunkSize();
+ if (size > large_chunk_stride) {
+ // Combining a large contiguous buffer must have the same effect as
+ // doing it piecewise by the stride length, followed by the (possibly
+ // empty) remainder.
+ while (size >= large_chunk_stride) {
+ hash_state = SpyHashStateImpl::combine_contiguous(
+ std::move(hash_state), begin, large_chunk_stride);
+ begin += large_chunk_stride;
+ size -= large_chunk_stride;
+ }
+ }
+
+ hash_state.hash_representation_.emplace_back(
+ reinterpret_cast<const char*>(begin), size);
+ return hash_state;
+ }
+
+ using SpyHashStateImpl::HashStateBase::combine_contiguous;
+
+ y_absl::optional<TString> error() const {
+ if (moved_from_) {
+ return "Returned a moved-from instance of the hash state object.";
+ }
+ return *error_;
+ }
+
+ private:
+ template <typename U>
+ friend class SpyHashStateImpl;
+
+ // This is true if SpyHashStateImpl<T> has been passed to a call of
+ // AbslHashValue with the wrong type. This detects that the user called
+ // AbslHashValue directly (because the hash state type does not match).
+ static bool direct_absl_hash_value_error_;
+
+ std::vector<TString> hash_representation_;
+ // This is a shared_ptr because we want all instances of the particular
+ // SpyHashState run to share the field. This way we can set the error for
+ // use-after-move and all the copies will see it.
+ std::shared_ptr<y_absl::optional<TString>> error_;
+ bool moved_from_ = false;
+};
+
+template <typename T>
+bool SpyHashStateImpl<T>::direct_absl_hash_value_error_;
+
+template <bool& B>
+struct OdrUse {
+ constexpr OdrUse() {}
+ bool& b = B;
+};
+
+template <void (*)()>
+struct RunOnStartup {
+ static bool run;
+ static constexpr OdrUse<run> kOdrUse{};
+};
+
+template <void (*f)()>
+bool RunOnStartup<f>::run = (f(), true);
+
+template <
+ typename T, typename U,
+ // Only trigger for when (T != U),
+ typename = y_absl::enable_if_t<!std::is_same<T, U>::value>,
+ // This statement works in two ways:
+ // - First, it instantiates RunOnStartup and forces the initialization of
+ // `run`, which set the global variable.
+ // - Second, it triggers a SFINAE error disabling the overload to prevent
+ // compile time errors. If we didn't disable the overload we would get
+ // ambiguous overload errors, which we don't want.
+ int = RunOnStartup<SpyHashStateImpl<T>::SetDirectAbslHashValueError>::run>
+void AbslHashValue(SpyHashStateImpl<T>, const U&);
+
+using SpyHashState = SpyHashStateImpl<void>;
+
+} // namespace hash_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_HASH_INTERNAL_SPY_HASH_STATE_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/hash/internal/ya.make b/contrib/restricted/abseil-cpp-tstring/y_absl/hash/internal/ya.make
new file mode 100644
index 00000000000..7f3aae37518
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/hash/internal/ya.make
@@ -0,0 +1,32 @@
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
+OWNER(
+ somov
+ g:cpp-contrib
+)
+
+LICENSE(Apache-2.0)
+
+LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
+
+PEERDIR(
+ contrib/restricted/abseil-cpp-tstring/y_absl/base
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_wait
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/log_severity
+ contrib/restricted/abseil-cpp-tstring/y_absl/numeric
+)
+
+ADDINCL(
+ GLOBAL contrib/restricted/abseil-cpp-tstring
+)
+
+NO_COMPILER_WARNINGS()
+
+SRCS(
+ low_level_hash.cc
+)
+
+END()
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/hash/ya.make b/contrib/restricted/abseil-cpp-tstring/y_absl/hash/ya.make
new file mode 100644
index 00000000000..576811ee62f
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/hash/ya.make
@@ -0,0 +1,41 @@
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
+OWNER(
+ somov
+ g:cpp-contrib
+)
+
+LICENSE(Apache-2.0)
+
+LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
+
+PEERDIR(
+ contrib/restricted/abseil-cpp-tstring/y_absl/base
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_wait
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/throw_delegate
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/log_severity
+ contrib/restricted/abseil-cpp-tstring/y_absl/city
+ contrib/restricted/abseil-cpp-tstring/y_absl/hash/internal
+ contrib/restricted/abseil-cpp-tstring/y_absl/numeric
+ contrib/restricted/abseil-cpp-tstring/y_absl/strings
+ contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/absl_strings_internal
+ contrib/restricted/abseil-cpp-tstring/y_absl/types/bad_optional_access
+ contrib/restricted/abseil-cpp-tstring/y_absl/types/bad_variant_access
+ contrib/restricted/abseil-cpp-tstring/y_absl/types
+ contrib/restricted/abseil-cpp-tstring/y_absl/types/internal
+)
+
+ADDINCL(
+ GLOBAL contrib/restricted/abseil-cpp-tstring
+)
+
+NO_COMPILER_WARNINGS()
+
+SRCS(
+ internal/hash.cc
+)
+
+END()
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/memory/.yandex_meta/licenses.list.txt b/contrib/restricted/abseil-cpp-tstring/y_absl/memory/.yandex_meta/licenses.list.txt
new file mode 100644
index 00000000000..7be6b428485
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/memory/.yandex_meta/licenses.list.txt
@@ -0,0 +1,16 @@
+====================Apache-2.0====================
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+
+====================COPYRIGHT====================
+// Copyright 2017 The Abseil Authors.
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/memory/memory.h b/contrib/restricted/abseil-cpp-tstring/y_absl/memory/memory.h
new file mode 100644
index 00000000000..134a614b33a
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/memory/memory.h
@@ -0,0 +1,698 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: memory.h
+// -----------------------------------------------------------------------------
+//
+// This header file contains utility functions for managing the creation and
+// conversion of smart pointers. This file is an extension to the C++
+// standard <memory> library header file.
+
+#ifndef ABSL_MEMORY_MEMORY_H_
+#define ABSL_MEMORY_MEMORY_H_
+
+#include <cstddef>
+#include <limits>
+#include <memory>
+#include <new>
+#include <type_traits>
+#include <utility>
+
+#include "y_absl/base/macros.h"
+#include "y_absl/meta/type_traits.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+// -----------------------------------------------------------------------------
+// Function Template: WrapUnique()
+// -----------------------------------------------------------------------------
+//
+// Adopts ownership from a raw pointer and transfers it to the returned
+// `std::unique_ptr`, whose type is deduced. Because of this deduction, *do not*
+// specify the template type `T` when calling `WrapUnique`.
+//
+// Example:
+// X* NewX(int, int);
+// auto x = WrapUnique(NewX(1, 2)); // 'x' is std::unique_ptr<X>.
+//
+// Do not call WrapUnique with an explicit type, as in
+// `WrapUnique<X>(NewX(1, 2))`. The purpose of WrapUnique is to automatically
+// deduce the pointer type. If you wish to make the type explicit, just use
+// `std::unique_ptr` directly.
+//
+// auto x = std::unique_ptr<X>(NewX(1, 2));
+// - or -
+// std::unique_ptr<X> x(NewX(1, 2));
+//
+// While `y_absl::WrapUnique` is useful for capturing the output of a raw
+// pointer factory, prefer 'y_absl::make_unique<T>(args...)' over
+// 'y_absl::WrapUnique(new T(args...))'.
+//
+// auto x = WrapUnique(new X(1, 2)); // works, but nonideal.
+// auto x = make_unique<X>(1, 2); // safer, standard, avoids raw 'new'.
+//
+// Note that `y_absl::WrapUnique(p)` is valid only if `delete p` is a valid
+// expression. In particular, `y_absl::WrapUnique()` cannot wrap pointers to
+// arrays, functions or void, and it must not be used to capture pointers
+// obtained from array-new expressions (even though that would compile!).
+template <typename T>
+std::unique_ptr<T> WrapUnique(T* ptr) {
+ static_assert(!std::is_array<T>::value, "array types are unsupported");
+ static_assert(std::is_object<T>::value, "non-object types are unsupported");
+ return std::unique_ptr<T>(ptr);
+}
+
+namespace memory_internal {
+
+// Traits to select proper overload and return type for `y_absl::make_unique<>`.
+template <typename T>
+struct MakeUniqueResult {
+ using scalar = std::unique_ptr<T>;
+};
+template <typename T>
+struct MakeUniqueResult<T[]> {
+ using array = std::unique_ptr<T[]>;
+};
+template <typename T, size_t N>
+struct MakeUniqueResult<T[N]> {
+ using invalid = void;
+};
+
+} // namespace memory_internal
+
+// gcc 4.8 has __cplusplus at 201301 but the libstdc++ shipped with it doesn't
+// define make_unique. Other supported compilers either just define __cplusplus
+// as 201103 but have make_unique (msvc), or have make_unique whenever
+// __cplusplus > 201103 (clang).
+#if defined(__cpp_lib_make_unique)
+using std::make_unique;
+#else
+// -----------------------------------------------------------------------------
+// Function Template: make_unique<T>()
+// -----------------------------------------------------------------------------
+//
+// Creates a `std::unique_ptr<>`, while avoiding issues creating temporaries
+// during the construction process. `y_absl::make_unique<>` also avoids redundant
+// type declarations, by avoiding the need to explicitly use the `new` operator.
+//
+// This implementation of `y_absl::make_unique<>` is designed for C++11 code and
+// will be replaced in C++14 by the equivalent `std::make_unique<>` abstraction.
+// `y_absl::make_unique<>` is designed to be 100% compatible with
+// `std::make_unique<>` so that the eventual migration will involve a simple
+// rename operation.
+//
+// For more background on why `std::unique_ptr<T>(new T(a,b))` is problematic,
+// see Herb Sutter's explanation on
+// (Exception-Safe Function Calls)[https://herbsutter.com/gotw/_102/].
+// (In general, reviewers should treat `new T(a,b)` with scrutiny.)
+//
+// Example usage:
+//
+// auto p = make_unique<X>(args...); // 'p' is a std::unique_ptr<X>
+// auto pa = make_unique<X[]>(5); // 'pa' is a std::unique_ptr<X[]>
+//
+// Three overloads of `y_absl::make_unique` are required:
+//
+// - For non-array T:
+//
+// Allocates a T with `new T(std::forward<Args> args...)`,
+// forwarding all `args` to T's constructor.
+// Returns a `std::unique_ptr<T>` owning that object.
+//
+// - For an array of unknown bounds T[]:
+//
+// `y_absl::make_unique<>` will allocate an array T of type U[] with
+// `new U[n]()` and return a `std::unique_ptr<U[]>` owning that array.
+//
+// Note that 'U[n]()' is different from 'U[n]', and elements will be
+// value-initialized. Note as well that `std::unique_ptr` will perform its
+// own destruction of the array elements upon leaving scope, even though
+// the array [] does not have a default destructor.
+//
+// NOTE: an array of unknown bounds T[] may still be (and often will be)
+// initialized to have a size, and will still use this overload. E.g:
+//
+// auto my_array = y_absl::make_unique<int[]>(10);
+//
+// - For an array of known bounds T[N]:
+//
+// `y_absl::make_unique<>` is deleted (like with `std::make_unique<>`) as
+// this overload is not useful.
+//
+// NOTE: an array of known bounds T[N] is not considered a useful
+// construction, and may cause undefined behavior in templates. E.g:
+//
+// auto my_array = y_absl::make_unique<int[10]>();
+//
+// In those cases, of course, you can still use the overload above and
+// simply initialize it to its desired size:
+//
+// auto my_array = y_absl::make_unique<int[]>(10);
+
+// `y_absl::make_unique` overload for non-array types.
+template <typename T, typename... Args>
+typename memory_internal::MakeUniqueResult<T>::scalar make_unique(
+ Args&&... args) {
+ return std::unique_ptr<T>(new T(std::forward<Args>(args)...));
+}
+
+// `y_absl::make_unique` overload for an array T[] of unknown bounds.
+// The array allocation needs to use the `new T[size]` form and cannot take
+// element constructor arguments. The `std::unique_ptr` will manage destructing
+// these array elements.
+template <typename T>
+typename memory_internal::MakeUniqueResult<T>::array make_unique(size_t n) {
+ return std::unique_ptr<T>(new typename y_absl::remove_extent_t<T>[n]());
+}
+
+// `y_absl::make_unique` overload for an array T[N] of known bounds.
+// This construction will be rejected.
+template <typename T, typename... Args>
+typename memory_internal::MakeUniqueResult<T>::invalid make_unique(
+ Args&&... /* args */) = delete;
+#endif
+
+// -----------------------------------------------------------------------------
+// Function Template: RawPtr()
+// -----------------------------------------------------------------------------
+//
+// Extracts the raw pointer from a pointer-like value `ptr`. `y_absl::RawPtr` is
+// useful within templates that need to handle a complement of raw pointers,
+// `std::nullptr_t`, and smart pointers.
+template <typename T>
+auto RawPtr(T&& ptr) -> decltype(std::addressof(*ptr)) {
+ // ptr is a forwarding reference to support Ts with non-const operators.
+ return (ptr != nullptr) ? std::addressof(*ptr) : nullptr;
+}
+inline std::nullptr_t RawPtr(std::nullptr_t) { return nullptr; }
+
+// -----------------------------------------------------------------------------
+// Function Template: ShareUniquePtr()
+// -----------------------------------------------------------------------------
+//
+// Adopts a `std::unique_ptr` rvalue and returns a `std::shared_ptr` of deduced
+// type. Ownership (if any) of the held value is transferred to the returned
+// shared pointer.
+//
+// Example:
+//
+// auto up = y_absl::make_unique<int>(10);
+// auto sp = y_absl::ShareUniquePtr(std::move(up)); // shared_ptr<int>
+// CHECK_EQ(*sp, 10);
+// CHECK(up == nullptr);
+//
+// Note that this conversion is correct even when T is an array type, and more
+// generally it works for *any* deleter of the `unique_ptr` (single-object
+// deleter, array deleter, or any custom deleter), since the deleter is adopted
+// by the shared pointer as well. The deleter is copied (unless it is a
+// reference).
+//
+// Implements the resolution of [LWG 2415](http://wg21.link/lwg2415), by which a
+// null shared pointer does not attempt to call the deleter.
+template <typename T, typename D>
+std::shared_ptr<T> ShareUniquePtr(std::unique_ptr<T, D>&& ptr) {
+ return ptr ? std::shared_ptr<T>(std::move(ptr)) : std::shared_ptr<T>();
+}
+
+// -----------------------------------------------------------------------------
+// Function Template: WeakenPtr()
+// -----------------------------------------------------------------------------
+//
+// Creates a weak pointer associated with a given shared pointer. The returned
+// value is a `std::weak_ptr` of deduced type.
+//
+// Example:
+//
+// auto sp = std::make_shared<int>(10);
+// auto wp = y_absl::WeakenPtr(sp);
+// CHECK_EQ(sp.get(), wp.lock().get());
+// sp.reset();
+// CHECK(wp.lock() == nullptr);
+//
+template <typename T>
+std::weak_ptr<T> WeakenPtr(const std::shared_ptr<T>& ptr) {
+ return std::weak_ptr<T>(ptr);
+}
+
+namespace memory_internal {
+
+// ExtractOr<E, O, D>::type evaluates to E<O> if possible. Otherwise, D.
+template <template <typename> class Extract, typename Obj, typename Default,
+ typename>
+struct ExtractOr {
+ using type = Default;
+};
+
+template <template <typename> class Extract, typename Obj, typename Default>
+struct ExtractOr<Extract, Obj, Default, void_t<Extract<Obj>>> {
+ using type = Extract<Obj>;
+};
+
+template <template <typename> class Extract, typename Obj, typename Default>
+using ExtractOrT = typename ExtractOr<Extract, Obj, Default, void>::type;
+
+// Extractors for the features of allocators.
+template <typename T>
+using GetPointer = typename T::pointer;
+
+template <typename T>
+using GetConstPointer = typename T::const_pointer;
+
+template <typename T>
+using GetVoidPointer = typename T::void_pointer;
+
+template <typename T>
+using GetConstVoidPointer = typename T::const_void_pointer;
+
+template <typename T>
+using GetDifferenceType = typename T::difference_type;
+
+template <typename T>
+using GetSizeType = typename T::size_type;
+
+template <typename T>
+using GetPropagateOnContainerCopyAssignment =
+ typename T::propagate_on_container_copy_assignment;
+
+template <typename T>
+using GetPropagateOnContainerMoveAssignment =
+ typename T::propagate_on_container_move_assignment;
+
+template <typename T>
+using GetPropagateOnContainerSwap = typename T::propagate_on_container_swap;
+
+template <typename T>
+using GetIsAlwaysEqual = typename T::is_always_equal;
+
+template <typename T>
+struct GetFirstArg;
+
+template <template <typename...> class Class, typename T, typename... Args>
+struct GetFirstArg<Class<T, Args...>> {
+ using type = T;
+};
+
+template <typename Ptr, typename = void>
+struct ElementType {
+ using type = typename GetFirstArg<Ptr>::type;
+};
+
+template <typename T>
+struct ElementType<T, void_t<typename T::element_type>> {
+ using type = typename T::element_type;
+};
+
+template <typename T, typename U>
+struct RebindFirstArg;
+
+template <template <typename...> class Class, typename T, typename... Args,
+ typename U>
+struct RebindFirstArg<Class<T, Args...>, U> {
+ using type = Class<U, Args...>;
+};
+
+template <typename T, typename U, typename = void>
+struct RebindPtr {
+ using type = typename RebindFirstArg<T, U>::type;
+};
+
+template <typename T, typename U>
+struct RebindPtr<T, U, void_t<typename T::template rebind<U>>> {
+ using type = typename T::template rebind<U>;
+};
+
+template <typename T, typename U>
+constexpr bool HasRebindAlloc(...) {
+ return false;
+}
+
+template <typename T, typename U>
+constexpr bool HasRebindAlloc(typename T::template rebind<U>::other*) {
+ return true;
+}
+
+template <typename T, typename U, bool = HasRebindAlloc<T, U>(nullptr)>
+struct RebindAlloc {
+ using type = typename RebindFirstArg<T, U>::type;
+};
+
+template <typename T, typename U>
+struct RebindAlloc<T, U, true> {
+ using type = typename T::template rebind<U>::other;
+};
+
+} // namespace memory_internal
+
+// -----------------------------------------------------------------------------
+// Class Template: pointer_traits
+// -----------------------------------------------------------------------------
+//
+// An implementation of C++11's std::pointer_traits.
+//
+// Provided for portability on toolchains that have a working C++11 compiler,
+// but the standard library is lacking in C++11 support. For example, some
+// version of the Android NDK.
+//
+
+template <typename Ptr>
+struct pointer_traits {
+ using pointer = Ptr;
+
+ // element_type:
+ // Ptr::element_type if present. Otherwise T if Ptr is a template
+ // instantiation Template<T, Args...>
+ using element_type = typename memory_internal::ElementType<Ptr>::type;
+
+ // difference_type:
+ // Ptr::difference_type if present, otherwise std::ptrdiff_t
+ using difference_type =
+ memory_internal::ExtractOrT<memory_internal::GetDifferenceType, Ptr,
+ std::ptrdiff_t>;
+
+ // rebind:
+ // Ptr::rebind<U> if exists, otherwise Template<U, Args...> if Ptr is a
+ // template instantiation Template<T, Args...>
+ template <typename U>
+ using rebind = typename memory_internal::RebindPtr<Ptr, U>::type;
+
+ // pointer_to:
+ // Calls Ptr::pointer_to(r)
+ static pointer pointer_to(element_type& r) { // NOLINT(runtime/references)
+ return Ptr::pointer_to(r);
+ }
+};
+
+// Specialization for T*.
+template <typename T>
+struct pointer_traits<T*> {
+ using pointer = T*;
+ using element_type = T;
+ using difference_type = std::ptrdiff_t;
+
+ template <typename U>
+ using rebind = U*;
+
+ // pointer_to:
+ // Calls std::addressof(r)
+ static pointer pointer_to(
+ element_type& r) noexcept { // NOLINT(runtime/references)
+ return std::addressof(r);
+ }
+};
+
+// -----------------------------------------------------------------------------
+// Class Template: allocator_traits
+// -----------------------------------------------------------------------------
+//
+// A C++11 compatible implementation of C++17's std::allocator_traits.
+//
+#if __cplusplus >= 201703L || (defined(_MSVC_LANG) && _MSVC_LANG >= 201703L)
+using std::allocator_traits;
+#else // __cplusplus >= 201703L
+template <typename Alloc>
+struct allocator_traits {
+ using allocator_type = Alloc;
+
+ // value_type:
+ // Alloc::value_type
+ using value_type = typename Alloc::value_type;
+
+ // pointer:
+ // Alloc::pointer if present, otherwise value_type*
+ using pointer = memory_internal::ExtractOrT<memory_internal::GetPointer,
+ Alloc, value_type*>;
+
+ // const_pointer:
+ // Alloc::const_pointer if present, otherwise
+ // y_absl::pointer_traits<pointer>::rebind<const value_type>
+ using const_pointer =
+ memory_internal::ExtractOrT<memory_internal::GetConstPointer, Alloc,
+ typename y_absl::pointer_traits<pointer>::
+ template rebind<const value_type>>;
+
+ // void_pointer:
+ // Alloc::void_pointer if present, otherwise
+ // y_absl::pointer_traits<pointer>::rebind<void>
+ using void_pointer = memory_internal::ExtractOrT<
+ memory_internal::GetVoidPointer, Alloc,
+ typename y_absl::pointer_traits<pointer>::template rebind<void>>;
+
+ // const_void_pointer:
+ // Alloc::const_void_pointer if present, otherwise
+ // y_absl::pointer_traits<pointer>::rebind<const void>
+ using const_void_pointer = memory_internal::ExtractOrT<
+ memory_internal::GetConstVoidPointer, Alloc,
+ typename y_absl::pointer_traits<pointer>::template rebind<const void>>;
+
+ // difference_type:
+ // Alloc::difference_type if present, otherwise
+ // y_absl::pointer_traits<pointer>::difference_type
+ using difference_type = memory_internal::ExtractOrT<
+ memory_internal::GetDifferenceType, Alloc,
+ typename y_absl::pointer_traits<pointer>::difference_type>;
+
+ // size_type:
+ // Alloc::size_type if present, otherwise
+ // std::make_unsigned<difference_type>::type
+ using size_type = memory_internal::ExtractOrT<
+ memory_internal::GetSizeType, Alloc,
+ typename std::make_unsigned<difference_type>::type>;
+
+ // propagate_on_container_copy_assignment:
+ // Alloc::propagate_on_container_copy_assignment if present, otherwise
+ // std::false_type
+ using propagate_on_container_copy_assignment = memory_internal::ExtractOrT<
+ memory_internal::GetPropagateOnContainerCopyAssignment, Alloc,
+ std::false_type>;
+
+ // propagate_on_container_move_assignment:
+ // Alloc::propagate_on_container_move_assignment if present, otherwise
+ // std::false_type
+ using propagate_on_container_move_assignment = memory_internal::ExtractOrT<
+ memory_internal::GetPropagateOnContainerMoveAssignment, Alloc,
+ std::false_type>;
+
+ // propagate_on_container_swap:
+ // Alloc::propagate_on_container_swap if present, otherwise std::false_type
+ using propagate_on_container_swap =
+ memory_internal::ExtractOrT<memory_internal::GetPropagateOnContainerSwap,
+ Alloc, std::false_type>;
+
+ // is_always_equal:
+ // Alloc::is_always_equal if present, otherwise std::is_empty<Alloc>::type
+ using is_always_equal =
+ memory_internal::ExtractOrT<memory_internal::GetIsAlwaysEqual, Alloc,
+ typename std::is_empty<Alloc>::type>;
+
+ // rebind_alloc:
+ // Alloc::rebind<T>::other if present, otherwise Alloc<T, Args> if this Alloc
+ // is Alloc<U, Args>
+ template <typename T>
+ using rebind_alloc = typename memory_internal::RebindAlloc<Alloc, T>::type;
+
+ // rebind_traits:
+ // y_absl::allocator_traits<rebind_alloc<T>>
+ template <typename T>
+ using rebind_traits = y_absl::allocator_traits<rebind_alloc<T>>;
+
+ // allocate(Alloc& a, size_type n):
+ // Calls a.allocate(n)
+ static pointer allocate(Alloc& a, // NOLINT(runtime/references)
+ size_type n) {
+ return a.allocate(n);
+ }
+
+ // allocate(Alloc& a, size_type n, const_void_pointer hint):
+ // Calls a.allocate(n, hint) if possible.
+ // If not possible, calls a.allocate(n)
+ static pointer allocate(Alloc& a, size_type n, // NOLINT(runtime/references)
+ const_void_pointer hint) {
+ return allocate_impl(0, a, n, hint);
+ }
+
+ // deallocate(Alloc& a, pointer p, size_type n):
+ // Calls a.deallocate(p, n)
+ static void deallocate(Alloc& a, pointer p, // NOLINT(runtime/references)
+ size_type n) {
+ a.deallocate(p, n);
+ }
+
+ // construct(Alloc& a, T* p, Args&&... args):
+ // Calls a.construct(p, std::forward<Args>(args)...) if possible.
+ // If not possible, calls
+ // ::new (static_cast<void*>(p)) T(std::forward<Args>(args)...)
+ template <typename T, typename... Args>
+ static void construct(Alloc& a, T* p, // NOLINT(runtime/references)
+ Args&&... args) {
+ construct_impl(0, a, p, std::forward<Args>(args)...);
+ }
+
+ // destroy(Alloc& a, T* p):
+ // Calls a.destroy(p) if possible. If not possible, calls p->~T().
+ template <typename T>
+ static void destroy(Alloc& a, T* p) { // NOLINT(runtime/references)
+ destroy_impl(0, a, p);
+ }
+
+ // max_size(const Alloc& a):
+ // Returns a.max_size() if possible. If not possible, returns
+ // std::numeric_limits<size_type>::max() / sizeof(value_type)
+ static size_type max_size(const Alloc& a) { return max_size_impl(0, a); }
+
+ // select_on_container_copy_construction(const Alloc& a):
+ // Returns a.select_on_container_copy_construction() if possible.
+ // If not possible, returns a.
+ static Alloc select_on_container_copy_construction(const Alloc& a) {
+ return select_on_container_copy_construction_impl(0, a);
+ }
+
+ private:
+ template <typename A>
+ static auto allocate_impl(int, A& a, // NOLINT(runtime/references)
+ size_type n, const_void_pointer hint)
+ -> decltype(a.allocate(n, hint)) {
+ return a.allocate(n, hint);
+ }
+ static pointer allocate_impl(char, Alloc& a, // NOLINT(runtime/references)
+ size_type n, const_void_pointer) {
+ return a.allocate(n);
+ }
+
+ template <typename A, typename... Args>
+ static auto construct_impl(int, A& a, // NOLINT(runtime/references)
+ Args&&... args)
+ -> decltype(a.construct(std::forward<Args>(args)...)) {
+ a.construct(std::forward<Args>(args)...);
+ }
+
+ template <typename T, typename... Args>
+ static void construct_impl(char, Alloc&, T* p, Args&&... args) {
+ ::new (static_cast<void*>(p)) T(std::forward<Args>(args)...);
+ }
+
+ template <typename A, typename T>
+ static auto destroy_impl(int, A& a, // NOLINT(runtime/references)
+ T* p) -> decltype(a.destroy(p)) {
+ a.destroy(p);
+ }
+ template <typename T>
+ static void destroy_impl(char, Alloc&, T* p) {
+ p->~T();
+ }
+
+ template <typename A>
+ static auto max_size_impl(int, const A& a) -> decltype(a.max_size()) {
+ return a.max_size();
+ }
+ static size_type max_size_impl(char, const Alloc&) {
+ return (std::numeric_limits<size_type>::max)() / sizeof(value_type);
+ }
+
+ template <typename A>
+ static auto select_on_container_copy_construction_impl(int, const A& a)
+ -> decltype(a.select_on_container_copy_construction()) {
+ return a.select_on_container_copy_construction();
+ }
+ static Alloc select_on_container_copy_construction_impl(char,
+ const Alloc& a) {
+ return a;
+ }
+};
+#endif // __cplusplus >= 201703L
+
+namespace memory_internal {
+
+// This template alias transforms Alloc::is_nothrow into a metafunction with
+// Alloc as a parameter so it can be used with ExtractOrT<>.
+template <typename Alloc>
+using GetIsNothrow = typename Alloc::is_nothrow;
+
+} // namespace memory_internal
+
+// ABSL_ALLOCATOR_NOTHROW is a build time configuration macro for user to
+// specify whether the default allocation function can throw or never throws.
+// If the allocation function never throws, user should define it to a non-zero
+// value (e.g. via `-DABSL_ALLOCATOR_NOTHROW`).
+// If the allocation function can throw, user should leave it undefined or
+// define it to zero.
+//
+// allocator_is_nothrow<Alloc> is a traits class that derives from
+// Alloc::is_nothrow if present, otherwise std::false_type. It's specialized
+// for Alloc = std::allocator<T> for any type T according to the state of
+// ABSL_ALLOCATOR_NOTHROW.
+//
+// default_allocator_is_nothrow is a class that derives from std::true_type
+// when the default allocator (global operator new) never throws, and
+// std::false_type when it can throw. It is a convenience shorthand for writing
+// allocator_is_nothrow<std::allocator<T>> (T can be any type).
+// NOTE: allocator_is_nothrow<std::allocator<T>> is guaranteed to derive from
+// the same type for all T, because users should specialize neither
+// allocator_is_nothrow nor std::allocator.
+template <typename Alloc>
+struct allocator_is_nothrow
+ : memory_internal::ExtractOrT<memory_internal::GetIsNothrow, Alloc,
+ std::false_type> {};
+
+#if defined(ABSL_ALLOCATOR_NOTHROW) && ABSL_ALLOCATOR_NOTHROW
+template <typename T>
+struct allocator_is_nothrow<std::allocator<T>> : std::true_type {};
+struct default_allocator_is_nothrow : std::true_type {};
+#else
+struct default_allocator_is_nothrow : std::false_type {};
+#endif
+
+namespace memory_internal {
+template <typename Allocator, typename Iterator, typename... Args>
+void ConstructRange(Allocator& alloc, Iterator first, Iterator last,
+ const Args&... args) {
+ for (Iterator cur = first; cur != last; ++cur) {
+ ABSL_INTERNAL_TRY {
+ std::allocator_traits<Allocator>::construct(alloc, std::addressof(*cur),
+ args...);
+ }
+ ABSL_INTERNAL_CATCH_ANY {
+ while (cur != first) {
+ --cur;
+ std::allocator_traits<Allocator>::destroy(alloc, std::addressof(*cur));
+ }
+ ABSL_INTERNAL_RETHROW;
+ }
+ }
+}
+
+template <typename Allocator, typename Iterator, typename InputIterator>
+void CopyRange(Allocator& alloc, Iterator destination, InputIterator first,
+ InputIterator last) {
+ for (Iterator cur = destination; first != last;
+ static_cast<void>(++cur), static_cast<void>(++first)) {
+ ABSL_INTERNAL_TRY {
+ std::allocator_traits<Allocator>::construct(alloc, std::addressof(*cur),
+ *first);
+ }
+ ABSL_INTERNAL_CATCH_ANY {
+ while (cur != destination) {
+ --cur;
+ std::allocator_traits<Allocator>::destroy(alloc, std::addressof(*cur));
+ }
+ ABSL_INTERNAL_RETHROW;
+ }
+ }
+}
+} // namespace memory_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_MEMORY_MEMORY_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/memory/ya.make b/contrib/restricted/abseil-cpp-tstring/y_absl/memory/ya.make
new file mode 100644
index 00000000000..87b37c9e1c9
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/memory/ya.make
@@ -0,0 +1,18 @@
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
+OWNER(
+ somov
+ g:cpp-contrib
+)
+
+LICENSE(Apache-2.0)
+
+LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
+
+PEERDIR(
+ contrib/restricted/abseil-cpp-tstring/y_absl/meta
+)
+
+END()
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/meta/.yandex_meta/licenses.list.txt b/contrib/restricted/abseil-cpp-tstring/y_absl/meta/.yandex_meta/licenses.list.txt
new file mode 100644
index 00000000000..7be6b428485
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/meta/.yandex_meta/licenses.list.txt
@@ -0,0 +1,16 @@
+====================Apache-2.0====================
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+
+====================COPYRIGHT====================
+// Copyright 2017 The Abseil Authors.
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/meta/type_traits.h b/contrib/restricted/abseil-cpp-tstring/y_absl/meta/type_traits.h
new file mode 100644
index 00000000000..eb9649060b7
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/meta/type_traits.h
@@ -0,0 +1,797 @@
+//
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// type_traits.h
+// -----------------------------------------------------------------------------
+//
+// This file contains C++11-compatible versions of standard <type_traits> API
+// functions for determining the characteristics of types. Such traits can
+// support type inference, classification, and transformation, as well as
+// make it easier to write templates based on generic type behavior.
+//
+// See https://en.cppreference.com/w/cpp/header/type_traits
+//
+// WARNING: use of many of the constructs in this header will count as "complex
+// template metaprogramming", so before proceeding, please carefully consider
+// https://google.github.io/styleguide/cppguide.html#Template_metaprogramming
+//
+// WARNING: using template metaprogramming to detect or depend on API
+// features is brittle and not guaranteed. Neither the standard library nor
+// Abseil provides any guarantee that APIs are stable in the face of template
+// metaprogramming. Use with caution.
+#ifndef ABSL_META_TYPE_TRAITS_H_
+#define ABSL_META_TYPE_TRAITS_H_
+
+#include <cstddef>
+#include <functional>
+#include <type_traits>
+
+#include "y_absl/base/config.h"
+
+// MSVC constructibility traits do not detect destructor properties and so our
+// implementations should not use them as a source-of-truth.
+#if defined(_MSC_VER) && !defined(__clang__) && !defined(__GNUC__)
+#define ABSL_META_INTERNAL_STD_CONSTRUCTION_TRAITS_DONT_CHECK_DESTRUCTION 1
+#endif
+
+// Defines the default alignment. `__STDCPP_DEFAULT_NEW_ALIGNMENT__` is a C++17
+// feature.
+#if defined(__STDCPP_DEFAULT_NEW_ALIGNMENT__)
+#define ABSL_INTERNAL_DEFAULT_NEW_ALIGNMENT __STDCPP_DEFAULT_NEW_ALIGNMENT__
+#else // defined(__STDCPP_DEFAULT_NEW_ALIGNMENT__)
+#define ABSL_INTERNAL_DEFAULT_NEW_ALIGNMENT alignof(std::max_align_t)
+#endif // defined(__STDCPP_DEFAULT_NEW_ALIGNMENT__)
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+// Defined and documented later on in this file.
+template <typename T>
+struct is_trivially_destructible;
+
+// Defined and documented later on in this file.
+template <typename T>
+struct is_trivially_move_assignable;
+
+namespace type_traits_internal {
+
+// Silence MSVC warnings about the destructor being defined as deleted.
+#if defined(_MSC_VER) && !defined(__GNUC__)
+#pragma warning(push)
+#pragma warning(disable : 4624)
+#endif // defined(_MSC_VER) && !defined(__GNUC__)
+
+template <class T>
+union SingleMemberUnion {
+ T t;
+};
+
+// Restore the state of the destructor warning that was silenced above.
+#if defined(_MSC_VER) && !defined(__GNUC__)
+#pragma warning(pop)
+#endif // defined(_MSC_VER) && !defined(__GNUC__)
+
+template <class T>
+struct IsTriviallyMoveConstructibleObject
+ : std::integral_constant<
+ bool, std::is_move_constructible<
+ type_traits_internal::SingleMemberUnion<T>>::value &&
+ y_absl::is_trivially_destructible<T>::value> {};
+
+template <class T>
+struct IsTriviallyCopyConstructibleObject
+ : std::integral_constant<
+ bool, std::is_copy_constructible<
+ type_traits_internal::SingleMemberUnion<T>>::value &&
+ y_absl::is_trivially_destructible<T>::value> {};
+
+template <class T>
+struct IsTriviallyMoveAssignableReference : std::false_type {};
+
+template <class T>
+struct IsTriviallyMoveAssignableReference<T&>
+ : y_absl::is_trivially_move_assignable<T>::type {};
+
+template <class T>
+struct IsTriviallyMoveAssignableReference<T&&>
+ : y_absl::is_trivially_move_assignable<T>::type {};
+
+template <typename... Ts>
+struct VoidTImpl {
+ using type = void;
+};
+
+// This trick to retrieve a default alignment is necessary for our
+// implementation of aligned_storage_t to be consistent with any implementation
+// of std::aligned_storage.
+template <size_t Len, typename T = std::aligned_storage<Len>>
+struct default_alignment_of_aligned_storage;
+
+template <size_t Len, size_t Align>
+struct default_alignment_of_aligned_storage<Len,
+ std::aligned_storage<Len, Align>> {
+ static constexpr size_t value = Align;
+};
+
+////////////////////////////////
+// Library Fundamentals V2 TS //
+////////////////////////////////
+
+// NOTE: The `is_detected` family of templates here differ from the library
+// fundamentals specification in that for library fundamentals, `Op<Args...>` is
+// evaluated as soon as the type `is_detected<Op, Args...>` undergoes
+// substitution, regardless of whether or not the `::value` is accessed. That
+// is inconsistent with all other standard traits and prevents lazy evaluation
+// in larger contexts (such as if the `is_detected` check is a trailing argument
+// of a `conjunction`. This implementation opts to instead be lazy in the same
+// way that the standard traits are (this "defect" of the detection idiom
+// specifications has been reported).
+
+template <class Enabler, template <class...> class Op, class... Args>
+struct is_detected_impl {
+ using type = std::false_type;
+};
+
+template <template <class...> class Op, class... Args>
+struct is_detected_impl<typename VoidTImpl<Op<Args...>>::type, Op, Args...> {
+ using type = std::true_type;
+};
+
+template <template <class...> class Op, class... Args>
+struct is_detected : is_detected_impl<void, Op, Args...>::type {};
+
+template <class Enabler, class To, template <class...> class Op, class... Args>
+struct is_detected_convertible_impl {
+ using type = std::false_type;
+};
+
+template <class To, template <class...> class Op, class... Args>
+struct is_detected_convertible_impl<
+ typename std::enable_if<std::is_convertible<Op<Args...>, To>::value>::type,
+ To, Op, Args...> {
+ using type = std::true_type;
+};
+
+template <class To, template <class...> class Op, class... Args>
+struct is_detected_convertible
+ : is_detected_convertible_impl<void, To, Op, Args...>::type {};
+
+template <typename T>
+using IsCopyAssignableImpl =
+ decltype(std::declval<T&>() = std::declval<const T&>());
+
+template <typename T>
+using IsMoveAssignableImpl = decltype(std::declval<T&>() = std::declval<T&&>());
+
+} // namespace type_traits_internal
+
+// MSVC 19.20 has a regression that causes our workarounds to fail, but their
+// std forms now appear to be compliant.
+#if defined(_MSC_VER) && !defined(__clang__) && (_MSC_VER >= 1920)
+
+template <typename T>
+using is_copy_assignable = std::is_copy_assignable<T>;
+
+template <typename T>
+using is_move_assignable = std::is_move_assignable<T>;
+
+#else
+
+template <typename T>
+struct is_copy_assignable : type_traits_internal::is_detected<
+ type_traits_internal::IsCopyAssignableImpl, T> {
+};
+
+template <typename T>
+struct is_move_assignable : type_traits_internal::is_detected<
+ type_traits_internal::IsMoveAssignableImpl, T> {
+};
+
+#endif
+
+// void_t()
+//
+// Ignores the type of any its arguments and returns `void`. In general, this
+// metafunction allows you to create a general case that maps to `void` while
+// allowing specializations that map to specific types.
+//
+// This metafunction is designed to be a drop-in replacement for the C++17
+// `std::void_t` metafunction.
+//
+// NOTE: `y_absl::void_t` does not use the standard-specified implementation so
+// that it can remain compatible with gcc < 5.1. This can introduce slightly
+// different behavior, such as when ordering partial specializations.
+template <typename... Ts>
+using void_t = typename type_traits_internal::VoidTImpl<Ts...>::type;
+
+// conjunction
+//
+// Performs a compile-time logical AND operation on the passed types (which
+// must have `::value` members convertible to `bool`. Short-circuits if it
+// encounters any `false` members (and does not compare the `::value` members
+// of any remaining arguments).
+//
+// This metafunction is designed to be a drop-in replacement for the C++17
+// `std::conjunction` metafunction.
+template <typename... Ts>
+struct conjunction : std::true_type {};
+
+template <typename T, typename... Ts>
+struct conjunction<T, Ts...>
+ : std::conditional<T::value, conjunction<Ts...>, T>::type {};
+
+template <typename T>
+struct conjunction<T> : T {};
+
+// disjunction
+//
+// Performs a compile-time logical OR operation on the passed types (which
+// must have `::value` members convertible to `bool`. Short-circuits if it
+// encounters any `true` members (and does not compare the `::value` members
+// of any remaining arguments).
+//
+// This metafunction is designed to be a drop-in replacement for the C++17
+// `std::disjunction` metafunction.
+template <typename... Ts>
+struct disjunction : std::false_type {};
+
+template <typename T, typename... Ts>
+struct disjunction<T, Ts...> :
+ std::conditional<T::value, T, disjunction<Ts...>>::type {};
+
+template <typename T>
+struct disjunction<T> : T {};
+
+// negation
+//
+// Performs a compile-time logical NOT operation on the passed type (which
+// must have `::value` members convertible to `bool`.
+//
+// This metafunction is designed to be a drop-in replacement for the C++17
+// `std::negation` metafunction.
+template <typename T>
+struct negation : std::integral_constant<bool, !T::value> {};
+
+// is_function()
+//
+// Determines whether the passed type `T` is a function type.
+//
+// This metafunction is designed to be a drop-in replacement for the C++11
+// `std::is_function()` metafunction for platforms that have incomplete C++11
+// support (such as libstdc++ 4.x).
+//
+// This metafunction works because appending `const` to a type does nothing to
+// function types and reference types (and forms a const-qualified type
+// otherwise).
+template <typename T>
+struct is_function
+ : std::integral_constant<
+ bool, !(std::is_reference<T>::value ||
+ std::is_const<typename std::add_const<T>::type>::value)> {};
+
+// is_trivially_destructible()
+//
+// Determines whether the passed type `T` is trivially destructible.
+//
+// This metafunction is designed to be a drop-in replacement for the C++11
+// `std::is_trivially_destructible()` metafunction for platforms that have
+// incomplete C++11 support (such as libstdc++ 4.x). On any platforms that do
+// fully support C++11, we check whether this yields the same result as the std
+// implementation.
+//
+// NOTE: the extensions (__has_trivial_xxx) are implemented in gcc (version >=
+// 4.3) and clang. Since we are supporting libstdc++ > 4.7, they should always
+// be present. These extensions are documented at
+// https://gcc.gnu.org/onlinedocs/gcc/Type-Traits.html#Type-Traits.
+template <typename T>
+struct is_trivially_destructible
+ : std::integral_constant<bool, __has_trivial_destructor(T) &&
+ std::is_destructible<T>::value> {
+#ifdef ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE
+ private:
+ static constexpr bool compliant = std::is_trivially_destructible<T>::value ==
+ is_trivially_destructible::value;
+ static_assert(compliant || std::is_trivially_destructible<T>::value,
+ "Not compliant with std::is_trivially_destructible; "
+ "Standard: false, Implementation: true");
+ static_assert(compliant || !std::is_trivially_destructible<T>::value,
+ "Not compliant with std::is_trivially_destructible; "
+ "Standard: true, Implementation: false");
+#endif // ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE
+};
+
+// is_trivially_default_constructible()
+//
+// Determines whether the passed type `T` is trivially default constructible.
+//
+// This metafunction is designed to be a drop-in replacement for the C++11
+// `std::is_trivially_default_constructible()` metafunction for platforms that
+// have incomplete C++11 support (such as libstdc++ 4.x). On any platforms that
+// do fully support C++11, we check whether this yields the same result as the
+// std implementation.
+//
+// NOTE: according to the C++ standard, Section: 20.15.4.3 [meta.unary.prop]
+// "The predicate condition for a template specialization is_constructible<T,
+// Args...> shall be satisfied if and only if the following variable
+// definition would be well-formed for some invented variable t:
+//
+// T t(declval<Args>()...);
+//
+// is_trivially_constructible<T, Args...> additionally requires that the
+// variable definition does not call any operation that is not trivial.
+// For the purposes of this check, the call to std::declval is considered
+// trivial."
+//
+// Notes from https://en.cppreference.com/w/cpp/types/is_constructible:
+// In many implementations, is_nothrow_constructible also checks if the
+// destructor throws because it is effectively noexcept(T(arg)). Same
+// applies to is_trivially_constructible, which, in these implementations, also
+// requires that the destructor is trivial.
+// GCC bug 51452: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=51452
+// LWG issue 2116: http://cplusplus.github.io/LWG/lwg-active.html#2116.
+//
+// "T obj();" need to be well-formed and not call any nontrivial operation.
+// Nontrivially destructible types will cause the expression to be nontrivial.
+template <typename T>
+struct is_trivially_default_constructible
+ : std::integral_constant<bool, __has_trivial_constructor(T) &&
+ std::is_default_constructible<T>::value &&
+ is_trivially_destructible<T>::value> {
+#if defined(ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE) && \
+ !defined( \
+ ABSL_META_INTERNAL_STD_CONSTRUCTION_TRAITS_DONT_CHECK_DESTRUCTION)
+ private:
+ static constexpr bool compliant =
+ std::is_trivially_default_constructible<T>::value ==
+ is_trivially_default_constructible::value;
+ static_assert(compliant || std::is_trivially_default_constructible<T>::value,
+ "Not compliant with std::is_trivially_default_constructible; "
+ "Standard: false, Implementation: true");
+ static_assert(compliant || !std::is_trivially_default_constructible<T>::value,
+ "Not compliant with std::is_trivially_default_constructible; "
+ "Standard: true, Implementation: false");
+#endif // ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE
+};
+
+// is_trivially_move_constructible()
+//
+// Determines whether the passed type `T` is trivially move constructible.
+//
+// This metafunction is designed to be a drop-in replacement for the C++11
+// `std::is_trivially_move_constructible()` metafunction for platforms that have
+// incomplete C++11 support (such as libstdc++ 4.x). On any platforms that do
+// fully support C++11, we check whether this yields the same result as the std
+// implementation.
+//
+// NOTE: `T obj(declval<T>());` needs to be well-formed and not call any
+// nontrivial operation. Nontrivially destructible types will cause the
+// expression to be nontrivial.
+template <typename T>
+struct is_trivially_move_constructible
+ : std::conditional<
+ std::is_object<T>::value && !std::is_array<T>::value,
+ type_traits_internal::IsTriviallyMoveConstructibleObject<T>,
+ std::is_reference<T>>::type::type {
+#if defined(ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE) && \
+ !defined( \
+ ABSL_META_INTERNAL_STD_CONSTRUCTION_TRAITS_DONT_CHECK_DESTRUCTION)
+ private:
+ static constexpr bool compliant =
+ std::is_trivially_move_constructible<T>::value ==
+ is_trivially_move_constructible::value;
+ static_assert(compliant || std::is_trivially_move_constructible<T>::value,
+ "Not compliant with std::is_trivially_move_constructible; "
+ "Standard: false, Implementation: true");
+ static_assert(compliant || !std::is_trivially_move_constructible<T>::value,
+ "Not compliant with std::is_trivially_move_constructible; "
+ "Standard: true, Implementation: false");
+#endif // ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE
+};
+
+// is_trivially_copy_constructible()
+//
+// Determines whether the passed type `T` is trivially copy constructible.
+//
+// This metafunction is designed to be a drop-in replacement for the C++11
+// `std::is_trivially_copy_constructible()` metafunction for platforms that have
+// incomplete C++11 support (such as libstdc++ 4.x). On any platforms that do
+// fully support C++11, we check whether this yields the same result as the std
+// implementation.
+//
+// NOTE: `T obj(declval<const T&>());` needs to be well-formed and not call any
+// nontrivial operation. Nontrivially destructible types will cause the
+// expression to be nontrivial.
+template <typename T>
+struct is_trivially_copy_constructible
+ : std::conditional<
+ std::is_object<T>::value && !std::is_array<T>::value,
+ type_traits_internal::IsTriviallyCopyConstructibleObject<T>,
+ std::is_lvalue_reference<T>>::type::type {
+#if defined(ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE) && \
+ !defined( \
+ ABSL_META_INTERNAL_STD_CONSTRUCTION_TRAITS_DONT_CHECK_DESTRUCTION)
+ private:
+ static constexpr bool compliant =
+ std::is_trivially_copy_constructible<T>::value ==
+ is_trivially_copy_constructible::value;
+ static_assert(compliant || std::is_trivially_copy_constructible<T>::value,
+ "Not compliant with std::is_trivially_copy_constructible; "
+ "Standard: false, Implementation: true");
+ static_assert(compliant || !std::is_trivially_copy_constructible<T>::value,
+ "Not compliant with std::is_trivially_copy_constructible; "
+ "Standard: true, Implementation: false");
+#endif // ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE
+};
+
+// is_trivially_move_assignable()
+//
+// Determines whether the passed type `T` is trivially move assignable.
+//
+// This metafunction is designed to be a drop-in replacement for the C++11
+// `std::is_trivially_move_assignable()` metafunction for platforms that have
+// incomplete C++11 support (such as libstdc++ 4.x). On any platforms that do
+// fully support C++11, we check whether this yields the same result as the std
+// implementation.
+//
+// NOTE: `is_assignable<T, U>::value` is `true` if the expression
+// `declval<T>() = declval<U>()` is well-formed when treated as an unevaluated
+// operand. `is_trivially_assignable<T, U>` requires the assignment to call no
+// operation that is not trivial. `is_trivially_copy_assignable<T>` is simply
+// `is_trivially_assignable<T&, T>`.
+template <typename T>
+struct is_trivially_move_assignable
+ : std::conditional<
+ std::is_object<T>::value && !std::is_array<T>::value &&
+ std::is_move_assignable<T>::value,
+ std::is_move_assignable<type_traits_internal::SingleMemberUnion<T>>,
+ type_traits_internal::IsTriviallyMoveAssignableReference<T>>::type::
+ type {
+#ifdef ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE
+ private:
+ static constexpr bool compliant =
+ std::is_trivially_move_assignable<T>::value ==
+ is_trivially_move_assignable::value;
+ static_assert(compliant || std::is_trivially_move_assignable<T>::value,
+ "Not compliant with std::is_trivially_move_assignable; "
+ "Standard: false, Implementation: true");
+ static_assert(compliant || !std::is_trivially_move_assignable<T>::value,
+ "Not compliant with std::is_trivially_move_assignable; "
+ "Standard: true, Implementation: false");
+#endif // ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE
+};
+
+// is_trivially_copy_assignable()
+//
+// Determines whether the passed type `T` is trivially copy assignable.
+//
+// This metafunction is designed to be a drop-in replacement for the C++11
+// `std::is_trivially_copy_assignable()` metafunction for platforms that have
+// incomplete C++11 support (such as libstdc++ 4.x). On any platforms that do
+// fully support C++11, we check whether this yields the same result as the std
+// implementation.
+//
+// NOTE: `is_assignable<T, U>::value` is `true` if the expression
+// `declval<T>() = declval<U>()` is well-formed when treated as an unevaluated
+// operand. `is_trivially_assignable<T, U>` requires the assignment to call no
+// operation that is not trivial. `is_trivially_copy_assignable<T>` is simply
+// `is_trivially_assignable<T&, const T&>`.
+template <typename T>
+struct is_trivially_copy_assignable
+ : std::integral_constant<
+ bool, __has_trivial_assign(typename std::remove_reference<T>::type) &&
+ y_absl::is_copy_assignable<T>::value> {
+#ifdef ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE
+ private:
+ static constexpr bool compliant =
+ std::is_trivially_copy_assignable<T>::value ==
+ is_trivially_copy_assignable::value;
+ static_assert(compliant || std::is_trivially_copy_assignable<T>::value,
+ "Not compliant with std::is_trivially_copy_assignable; "
+ "Standard: false, Implementation: true");
+ static_assert(compliant || !std::is_trivially_copy_assignable<T>::value,
+ "Not compliant with std::is_trivially_copy_assignable; "
+ "Standard: true, Implementation: false");
+#endif // ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE
+};
+
+#if defined(__cpp_lib_remove_cvref) && __cpp_lib_remove_cvref >= 201711L
+template <typename T>
+using remove_cvref = std::remove_cvref<T>;
+
+template <typename T>
+using remove_cvref_t = typename std::remove_cvref<T>::type;
+#else
+// remove_cvref()
+//
+// C++11 compatible implementation of std::remove_cvref which was added in
+// C++20.
+template <typename T>
+struct remove_cvref {
+ using type =
+ typename std::remove_cv<typename std::remove_reference<T>::type>::type;
+};
+
+template <typename T>
+using remove_cvref_t = typename remove_cvref<T>::type;
+#endif
+
+namespace type_traits_internal {
+// is_trivially_copyable()
+//
+// Determines whether the passed type `T` is trivially copyable.
+//
+// This metafunction is designed to be a drop-in replacement for the C++11
+// `std::is_trivially_copyable()` metafunction for platforms that have
+// incomplete C++11 support (such as libstdc++ 4.x). We use the C++17 definition
+// of TriviallyCopyable.
+//
+// NOTE: `is_trivially_copyable<T>::value` is `true` if all of T's copy/move
+// constructors/assignment operators are trivial or deleted, T has at least
+// one non-deleted copy/move constructor/assignment operator, and T is trivially
+// destructible. Arrays of trivially copyable types are trivially copyable.
+//
+// We expose this metafunction only for internal use within y_absl.
+template <typename T>
+class is_trivially_copyable_impl {
+ using ExtentsRemoved = typename std::remove_all_extents<T>::type;
+ static constexpr bool kIsCopyOrMoveConstructible =
+ std::is_copy_constructible<ExtentsRemoved>::value ||
+ std::is_move_constructible<ExtentsRemoved>::value;
+ static constexpr bool kIsCopyOrMoveAssignable =
+ y_absl::is_copy_assignable<ExtentsRemoved>::value ||
+ y_absl::is_move_assignable<ExtentsRemoved>::value;
+
+ public:
+ static constexpr bool kValue =
+ (__has_trivial_copy(ExtentsRemoved) || !kIsCopyOrMoveConstructible) &&
+ (__has_trivial_assign(ExtentsRemoved) || !kIsCopyOrMoveAssignable) &&
+ (kIsCopyOrMoveConstructible || kIsCopyOrMoveAssignable) &&
+ is_trivially_destructible<ExtentsRemoved>::value &&
+ // We need to check for this explicitly because otherwise we'll say
+ // references are trivial copyable when compiled by MSVC.
+ !std::is_reference<ExtentsRemoved>::value;
+};
+
+template <typename T>
+struct is_trivially_copyable
+ : std::integral_constant<
+ bool, type_traits_internal::is_trivially_copyable_impl<T>::kValue> {};
+} // namespace type_traits_internal
+
+// -----------------------------------------------------------------------------
+// C++14 "_t" trait aliases
+// -----------------------------------------------------------------------------
+
+template <typename T>
+using remove_cv_t = typename std::remove_cv<T>::type;
+
+template <typename T>
+using remove_const_t = typename std::remove_const<T>::type;
+
+template <typename T>
+using remove_volatile_t = typename std::remove_volatile<T>::type;
+
+template <typename T>
+using add_cv_t = typename std::add_cv<T>::type;
+
+template <typename T>
+using add_const_t = typename std::add_const<T>::type;
+
+template <typename T>
+using add_volatile_t = typename std::add_volatile<T>::type;
+
+template <typename T>
+using remove_reference_t = typename std::remove_reference<T>::type;
+
+template <typename T>
+using add_lvalue_reference_t = typename std::add_lvalue_reference<T>::type;
+
+template <typename T>
+using add_rvalue_reference_t = typename std::add_rvalue_reference<T>::type;
+
+template <typename T>
+using remove_pointer_t = typename std::remove_pointer<T>::type;
+
+template <typename T>
+using add_pointer_t = typename std::add_pointer<T>::type;
+
+template <typename T>
+using make_signed_t = typename std::make_signed<T>::type;
+
+template <typename T>
+using make_unsigned_t = typename std::make_unsigned<T>::type;
+
+template <typename T>
+using remove_extent_t = typename std::remove_extent<T>::type;
+
+template <typename T>
+using remove_all_extents_t = typename std::remove_all_extents<T>::type;
+
+template <size_t Len, size_t Align = type_traits_internal::
+ default_alignment_of_aligned_storage<Len>::value>
+using aligned_storage_t = typename std::aligned_storage<Len, Align>::type;
+
+template <typename T>
+using decay_t = typename std::decay<T>::type;
+
+template <bool B, typename T = void>
+using enable_if_t = typename std::enable_if<B, T>::type;
+
+template <bool B, typename T, typename F>
+using conditional_t = typename std::conditional<B, T, F>::type;
+
+template <typename... T>
+using common_type_t = typename std::common_type<T...>::type;
+
+template <typename T>
+using underlying_type_t = typename std::underlying_type<T>::type;
+
+
+namespace type_traits_internal {
+
+#if (defined(__cpp_lib_is_invocable) && __cpp_lib_is_invocable >= 201703L) || \
+ (defined(_MSVC_LANG) && _MSVC_LANG >= 201703L)
+// std::result_of is deprecated (C++17) or removed (C++20)
+template<typename> struct result_of;
+template<typename F, typename... Args>
+struct result_of<F(Args...)> : std::invoke_result<F, Args...> {};
+#else
+template<typename F> using result_of = std::result_of<F>;
+#endif
+
+} // namespace type_traits_internal
+
+template<typename F>
+using result_of_t = typename type_traits_internal::result_of<F>::type;
+
+namespace type_traits_internal {
+// In MSVC we can't probe std::hash or stdext::hash because it triggers a
+// static_assert instead of failing substitution. Libc++ prior to 4.0
+// also used a static_assert.
+//
+#if defined(_MSC_VER) || (defined(_LIBCPP_VERSION) && \
+ _LIBCPP_VERSION < 4000 && _LIBCPP_STD_VER > 11)
+#define ABSL_META_INTERNAL_STD_HASH_SFINAE_FRIENDLY_ 0
+#else
+#define ABSL_META_INTERNAL_STD_HASH_SFINAE_FRIENDLY_ 1
+#endif
+
+#if !ABSL_META_INTERNAL_STD_HASH_SFINAE_FRIENDLY_
+template <typename Key, typename = size_t>
+struct IsHashable : std::true_type {};
+#else // ABSL_META_INTERNAL_STD_HASH_SFINAE_FRIENDLY_
+template <typename Key, typename = void>
+struct IsHashable : std::false_type {};
+
+template <typename Key>
+struct IsHashable<
+ Key,
+ y_absl::enable_if_t<std::is_convertible<
+ decltype(std::declval<std::hash<Key>&>()(std::declval<Key const&>())),
+ std::size_t>::value>> : std::true_type {};
+#endif // !ABSL_META_INTERNAL_STD_HASH_SFINAE_FRIENDLY_
+
+struct AssertHashEnabledHelper {
+ private:
+ static void Sink(...) {}
+ struct NAT {};
+
+ template <class Key>
+ static auto GetReturnType(int)
+ -> decltype(std::declval<std::hash<Key>>()(std::declval<Key const&>()));
+ template <class Key>
+ static NAT GetReturnType(...);
+
+ template <class Key>
+ static std::nullptr_t DoIt() {
+ static_assert(IsHashable<Key>::value,
+ "std::hash<Key> does not provide a call operator");
+ static_assert(
+ std::is_default_constructible<std::hash<Key>>::value,
+ "std::hash<Key> must be default constructible when it is enabled");
+ static_assert(
+ std::is_copy_constructible<std::hash<Key>>::value,
+ "std::hash<Key> must be copy constructible when it is enabled");
+ static_assert(y_absl::is_copy_assignable<std::hash<Key>>::value,
+ "std::hash<Key> must be copy assignable when it is enabled");
+ // is_destructible is unchecked as it's implied by each of the
+ // is_constructible checks.
+ using ReturnType = decltype(GetReturnType<Key>(0));
+ static_assert(std::is_same<ReturnType, NAT>::value ||
+ std::is_same<ReturnType, size_t>::value,
+ "std::hash<Key> must return size_t");
+ return nullptr;
+ }
+
+ template <class... Ts>
+ friend void AssertHashEnabled();
+};
+
+template <class... Ts>
+inline void AssertHashEnabled() {
+ using Helper = AssertHashEnabledHelper;
+ Helper::Sink(Helper::DoIt<Ts>()...);
+}
+
+} // namespace type_traits_internal
+
+// An internal namespace that is required to implement the C++17 swap traits.
+// It is not further nested in type_traits_internal to avoid long symbol names.
+namespace swap_internal {
+
+// Necessary for the traits.
+using std::swap;
+
+// This declaration prevents global `swap` and `y_absl::swap` overloads from being
+// considered unless ADL picks them up.
+void swap();
+
+template <class T>
+using IsSwappableImpl = decltype(swap(std::declval<T&>(), std::declval<T&>()));
+
+// NOTE: This dance with the default template parameter is for MSVC.
+template <class T,
+ class IsNoexcept = std::integral_constant<
+ bool, noexcept(swap(std::declval<T&>(), std::declval<T&>()))>>
+using IsNothrowSwappableImpl = typename std::enable_if<IsNoexcept::value>::type;
+
+// IsSwappable
+//
+// Determines whether the standard swap idiom is a valid expression for
+// arguments of type `T`.
+template <class T>
+struct IsSwappable
+ : y_absl::type_traits_internal::is_detected<IsSwappableImpl, T> {};
+
+// IsNothrowSwappable
+//
+// Determines whether the standard swap idiom is a valid expression for
+// arguments of type `T` and is noexcept.
+template <class T>
+struct IsNothrowSwappable
+ : y_absl::type_traits_internal::is_detected<IsNothrowSwappableImpl, T> {};
+
+// Swap()
+//
+// Performs the swap idiom from a namespace where valid candidates may only be
+// found in `std` or via ADL.
+template <class T, y_absl::enable_if_t<IsSwappable<T>::value, int> = 0>
+void Swap(T& lhs, T& rhs) noexcept(IsNothrowSwappable<T>::value) {
+ swap(lhs, rhs);
+}
+
+// StdSwapIsUnconstrained
+//
+// Some standard library implementations are broken in that they do not
+// constrain `std::swap`. This will effectively tell us if we are dealing with
+// one of those implementations.
+using StdSwapIsUnconstrained = IsSwappable<void()>;
+
+} // namespace swap_internal
+
+namespace type_traits_internal {
+
+// Make the swap-related traits/function accessible from this namespace.
+using swap_internal::IsNothrowSwappable;
+using swap_internal::IsSwappable;
+using swap_internal::Swap;
+using swap_internal::StdSwapIsUnconstrained;
+
+} // namespace type_traits_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_META_TYPE_TRAITS_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/meta/ya.make b/contrib/restricted/abseil-cpp-tstring/y_absl/meta/ya.make
new file mode 100644
index 00000000000..8e771a5b75b
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/meta/ya.make
@@ -0,0 +1,18 @@
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
+OWNER(
+ somov
+ g:cpp-contrib
+)
+
+LICENSE(Apache-2.0)
+
+LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
+
+PEERDIR(
+ contrib/restricted/abseil-cpp-tstring/y_absl/base
+)
+
+END()
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/numeric/.yandex_meta/licenses.list.txt b/contrib/restricted/abseil-cpp-tstring/y_absl/numeric/.yandex_meta/licenses.list.txt
new file mode 100644
index 00000000000..3d70f7ab0ea
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/numeric/.yandex_meta/licenses.list.txt
@@ -0,0 +1,38 @@
+====================Apache-2.0====================
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+
+====================Apache-2.0====================
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+
+====================COPYRIGHT====================
+// Copyright 2017 The Abseil Authors.
+
+
+====================COPYRIGHT====================
+// Copyright 2020 The Abseil Authors.
+
+
+====================COPYRIGHT====================
+// Copyright 2021 The Abseil Authors.
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/numeric/bits.h b/contrib/restricted/abseil-cpp-tstring/y_absl/numeric/bits.h
new file mode 100644
index 00000000000..92cb0c3a91a
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/numeric/bits.h
@@ -0,0 +1,177 @@
+// Copyright 2020 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: bits.h
+// -----------------------------------------------------------------------------
+//
+// This file contains implementations of C++20's bitwise math functions, as
+// defined by:
+//
+// P0553R4:
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2019/p0553r4.html
+// P0556R3:
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/p0556r3.html
+// P1355R2:
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2019/p1355r2.html
+// P1956R1:
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2020/p1956r1.pdf
+//
+// When using a standard library that implements these functions, we use the
+// standard library's implementation.
+
+#ifndef ABSL_NUMERIC_BITS_H_
+#define ABSL_NUMERIC_BITS_H_
+
+#include <cstdint>
+#include <limits>
+#include <type_traits>
+
+#if (defined(__cpp_lib_int_pow2) && __cpp_lib_int_pow2 >= 202002L) || \
+ (defined(__cpp_lib_bitops) && __cpp_lib_bitops >= 201907L)
+#include <bit>
+#endif
+
+#include "y_absl/base/attributes.h"
+#include "y_absl/base/config.h"
+#include "y_absl/numeric/internal/bits.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+#if !(defined(__cpp_lib_bitops) && __cpp_lib_bitops >= 201907L)
+// rotating
+template <class T>
+ABSL_MUST_USE_RESULT constexpr
+ typename std::enable_if<std::is_unsigned<T>::value, T>::type
+ rotl(T x, int s) noexcept {
+ return numeric_internal::RotateLeft(x, s);
+}
+
+template <class T>
+ABSL_MUST_USE_RESULT constexpr
+ typename std::enable_if<std::is_unsigned<T>::value, T>::type
+ rotr(T x, int s) noexcept {
+ return numeric_internal::RotateRight(x, s);
+}
+
+// Counting functions
+//
+// While these functions are typically constexpr, on some platforms, they may
+// not be marked as constexpr due to constraints of the compiler/available
+// intrinsics.
+template <class T>
+ABSL_INTERNAL_CONSTEXPR_CLZ inline
+ typename std::enable_if<std::is_unsigned<T>::value, int>::type
+ countl_zero(T x) noexcept {
+ return numeric_internal::CountLeadingZeroes(x);
+}
+
+template <class T>
+ABSL_INTERNAL_CONSTEXPR_CLZ inline
+ typename std::enable_if<std::is_unsigned<T>::value, int>::type
+ countl_one(T x) noexcept {
+ // Avoid integer promotion to a wider type
+ return countl_zero(static_cast<T>(~x));
+}
+
+template <class T>
+ABSL_INTERNAL_CONSTEXPR_CTZ inline
+ typename std::enable_if<std::is_unsigned<T>::value, int>::type
+ countr_zero(T x) noexcept {
+ return numeric_internal::CountTrailingZeroes(x);
+}
+
+template <class T>
+ABSL_INTERNAL_CONSTEXPR_CTZ inline
+ typename std::enable_if<std::is_unsigned<T>::value, int>::type
+ countr_one(T x) noexcept {
+ // Avoid integer promotion to a wider type
+ return countr_zero(static_cast<T>(~x));
+}
+
+template <class T>
+ABSL_INTERNAL_CONSTEXPR_POPCOUNT inline
+ typename std::enable_if<std::is_unsigned<T>::value, int>::type
+ popcount(T x) noexcept {
+ return numeric_internal::Popcount(x);
+}
+#else // defined(__cpp_lib_bitops) && __cpp_lib_bitops >= 201907L
+
+using std::countl_one;
+using std::countl_zero;
+using std::countr_one;
+using std::countr_zero;
+using std::popcount;
+using std::rotl;
+using std::rotr;
+
+#endif
+
+#if !(defined(__cpp_lib_int_pow2) && __cpp_lib_int_pow2 >= 202002L)
+// Returns: true if x is an integral power of two; false otherwise.
+template <class T>
+constexpr inline typename std::enable_if<std::is_unsigned<T>::value, bool>::type
+has_single_bit(T x) noexcept {
+ return x != 0 && (x & (x - 1)) == 0;
+}
+
+// Returns: If x == 0, 0; otherwise one plus the base-2 logarithm of x, with any
+// fractional part discarded.
+template <class T>
+ABSL_INTERNAL_CONSTEXPR_CLZ inline
+ typename std::enable_if<std::is_unsigned<T>::value, T>::type
+ bit_width(T x) noexcept {
+ return std::numeric_limits<T>::digits - countl_zero(x);
+}
+
+// Returns: If x == 0, 0; otherwise the maximal value y such that
+// has_single_bit(y) is true and y <= x.
+template <class T>
+ABSL_INTERNAL_CONSTEXPR_CLZ inline
+ typename std::enable_if<std::is_unsigned<T>::value, T>::type
+ bit_floor(T x) noexcept {
+ return x == 0 ? 0 : T{1} << (bit_width(x) - 1);
+}
+
+// Returns: N, where N is the smallest power of 2 greater than or equal to x.
+//
+// Preconditions: N is representable as a value of type T.
+template <class T>
+ABSL_INTERNAL_CONSTEXPR_CLZ inline
+ typename std::enable_if<std::is_unsigned<T>::value, T>::type
+ bit_ceil(T x) {
+ // If T is narrower than unsigned, T{1} << bit_width will be promoted. We
+ // want to force it to wraparound so that bit_ceil of an invalid value are not
+ // core constant expressions.
+ //
+ // BitCeilNonPowerOf2 triggers an overflow in constexpr contexts if we would
+ // undergo promotion to unsigned but not fit the result into T without
+ // truncation.
+ return has_single_bit(x) ? T{1} << (bit_width(x) - 1)
+ : numeric_internal::BitCeilNonPowerOf2(x);
+}
+#else // defined(__cpp_lib_int_pow2) && __cpp_lib_int_pow2 >= 202002L
+
+using std::bit_ceil;
+using std::bit_floor;
+using std::bit_width;
+using std::has_single_bit;
+
+#endif
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_NUMERIC_BITS_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/numeric/int128.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/numeric/int128.cc
new file mode 100644
index 00000000000..6172372d757
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/numeric/int128.cc
@@ -0,0 +1,383 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "y_absl/numeric/int128.h"
+
+#include <stddef.h>
+
+#include <cassert>
+#include <iomanip>
+#include <ostream> // NOLINT(readability/streams)
+#include <sstream>
+#include <util/generic/string.h>
+#include <type_traits>
+
+#include "y_absl/base/optimization.h"
+#include "y_absl/numeric/bits.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+ABSL_DLL const uint128 kuint128max = MakeUint128(
+ std::numeric_limits<uint64_t>::max(), std::numeric_limits<uint64_t>::max());
+
+namespace {
+
+// Returns the 0-based position of the last set bit (i.e., most significant bit)
+// in the given uint128. The argument is not 0.
+//
+// For example:
+// Given: 5 (decimal) == 101 (binary)
+// Returns: 2
+inline ABSL_ATTRIBUTE_ALWAYS_INLINE int Fls128(uint128 n) {
+ if (uint64_t hi = Uint128High64(n)) {
+ ABSL_INTERNAL_ASSUME(hi != 0);
+ return 127 - countl_zero(hi);
+ }
+ const uint64_t low = Uint128Low64(n);
+ ABSL_INTERNAL_ASSUME(low != 0);
+ return 63 - countl_zero(low);
+}
+
+// Long division/modulo for uint128 implemented using the shift-subtract
+// division algorithm adapted from:
+// https://stackoverflow.com/questions/5386377/division-without-using
+inline void DivModImpl(uint128 dividend, uint128 divisor, uint128* quotient_ret,
+ uint128* remainder_ret) {
+ assert(divisor != 0);
+
+ if (divisor > dividend) {
+ *quotient_ret = 0;
+ *remainder_ret = dividend;
+ return;
+ }
+
+ if (divisor == dividend) {
+ *quotient_ret = 1;
+ *remainder_ret = 0;
+ return;
+ }
+
+ uint128 denominator = divisor;
+ uint128 quotient = 0;
+
+ // Left aligns the MSB of the denominator and the dividend.
+ const int shift = Fls128(dividend) - Fls128(denominator);
+ denominator <<= shift;
+
+ // Uses shift-subtract algorithm to divide dividend by denominator. The
+ // remainder will be left in dividend.
+ for (int i = 0; i <= shift; ++i) {
+ quotient <<= 1;
+ if (dividend >= denominator) {
+ dividend -= denominator;
+ quotient |= 1;
+ }
+ denominator >>= 1;
+ }
+
+ *quotient_ret = quotient;
+ *remainder_ret = dividend;
+}
+
+template <typename T>
+uint128 MakeUint128FromFloat(T v) {
+ static_assert(std::is_floating_point<T>::value, "");
+
+ // Rounding behavior is towards zero, same as for built-in types.
+
+ // Undefined behavior if v is NaN or cannot fit into uint128.
+ assert(std::isfinite(v) && v > -1 &&
+ (std::numeric_limits<T>::max_exponent <= 128 ||
+ v < std::ldexp(static_cast<T>(1), 128)));
+
+ if (v >= std::ldexp(static_cast<T>(1), 64)) {
+ uint64_t hi = static_cast<uint64_t>(std::ldexp(v, -64));
+ uint64_t lo = static_cast<uint64_t>(v - std::ldexp(static_cast<T>(hi), 64));
+ return MakeUint128(hi, lo);
+ }
+
+ return MakeUint128(0, static_cast<uint64_t>(v));
+}
+
+#if defined(__clang__) && !defined(__SSE3__)
+// Workaround for clang bug: https://bugs.llvm.org/show_bug.cgi?id=38289
+// Casting from long double to uint64_t is miscompiled and drops bits.
+// It is more work, so only use when we need the workaround.
+uint128 MakeUint128FromFloat(long double v) {
+ // Go 50 bits at a time, that fits in a double
+ static_assert(std::numeric_limits<double>::digits >= 50, "");
+ static_assert(std::numeric_limits<long double>::digits <= 150, "");
+ // Undefined behavior if v is not finite or cannot fit into uint128.
+ assert(std::isfinite(v) && v > -1 && v < std::ldexp(1.0L, 128));
+
+ v = std::ldexp(v, -100);
+ uint64_t w0 = static_cast<uint64_t>(static_cast<double>(std::trunc(v)));
+ v = std::ldexp(v - static_cast<double>(w0), 50);
+ uint64_t w1 = static_cast<uint64_t>(static_cast<double>(std::trunc(v)));
+ v = std::ldexp(v - static_cast<double>(w1), 50);
+ uint64_t w2 = static_cast<uint64_t>(static_cast<double>(std::trunc(v)));
+ return (static_cast<uint128>(w0) << 100) | (static_cast<uint128>(w1) << 50) |
+ static_cast<uint128>(w2);
+}
+#endif // __clang__ && !__SSE3__
+} // namespace
+
+uint128::uint128(float v) : uint128(MakeUint128FromFloat(v)) {}
+uint128::uint128(double v) : uint128(MakeUint128FromFloat(v)) {}
+uint128::uint128(long double v) : uint128(MakeUint128FromFloat(v)) {}
+
+#if !defined(ABSL_HAVE_INTRINSIC_INT128)
+uint128 operator/(uint128 lhs, uint128 rhs) {
+ uint128 quotient = 0;
+ uint128 remainder = 0;
+ DivModImpl(lhs, rhs, &quotient, &remainder);
+ return quotient;
+}
+
+uint128 operator%(uint128 lhs, uint128 rhs) {
+ uint128 quotient = 0;
+ uint128 remainder = 0;
+ DivModImpl(lhs, rhs, &quotient, &remainder);
+ return remainder;
+}
+#endif // !defined(ABSL_HAVE_INTRINSIC_INT128)
+
+namespace {
+
+TString Uint128ToFormattedString(uint128 v, std::ios_base::fmtflags flags) {
+ // Select a divisor which is the largest power of the base < 2^64.
+ uint128 div;
+ int div_base_log;
+ switch (flags & std::ios::basefield) {
+ case std::ios::hex:
+ div = 0x1000000000000000; // 16^15
+ div_base_log = 15;
+ break;
+ case std::ios::oct:
+ div = 01000000000000000000000; // 8^21
+ div_base_log = 21;
+ break;
+ default: // std::ios::dec
+ div = 10000000000000000000u; // 10^19
+ div_base_log = 19;
+ break;
+ }
+
+ // Now piece together the uint128 representation from three chunks of the
+ // original value, each less than "div" and therefore representable as a
+ // uint64_t.
+ std::ostringstream os;
+ std::ios_base::fmtflags copy_mask =
+ std::ios::basefield | std::ios::showbase | std::ios::uppercase;
+ os.setf(flags & copy_mask, copy_mask);
+ uint128 high = v;
+ uint128 low;
+ DivModImpl(high, div, &high, &low);
+ uint128 mid;
+ DivModImpl(high, div, &high, &mid);
+ if (Uint128Low64(high) != 0) {
+ os << Uint128Low64(high);
+ os << std::noshowbase << std::setfill('0') << std::setw(div_base_log);
+ os << Uint128Low64(mid);
+ os << std::setw(div_base_log);
+ } else if (Uint128Low64(mid) != 0) {
+ os << Uint128Low64(mid);
+ os << std::noshowbase << std::setfill('0') << std::setw(div_base_log);
+ }
+ os << Uint128Low64(low);
+ return TString(os.str());
+}
+
+} // namespace
+
+std::ostream& operator<<(std::ostream& os, uint128 v) {
+ std::ios_base::fmtflags flags = os.flags();
+ TString rep = Uint128ToFormattedString(v, flags);
+
+ // Add the requisite padding.
+ std::streamsize width = os.width(0);
+ if (static_cast<size_t>(width) > rep.size()) {
+ std::ios::fmtflags adjustfield = flags & std::ios::adjustfield;
+ if (adjustfield == std::ios::left) {
+ rep.append(width - rep.size(), os.fill());
+ } else if (adjustfield == std::ios::internal &&
+ (flags & std::ios::showbase) &&
+ (flags & std::ios::basefield) == std::ios::hex && v != 0) {
+ rep.insert((size_t)2, width - rep.size(), os.fill());
+ } else {
+ rep.insert((size_t)0, width - rep.size(), os.fill());
+ }
+ }
+
+ return os << rep;
+}
+
+namespace {
+
+uint128 UnsignedAbsoluteValue(int128 v) {
+ // Cast to uint128 before possibly negating because -Int128Min() is undefined.
+ return Int128High64(v) < 0 ? -uint128(v) : uint128(v);
+}
+
+} // namespace
+
+#if !defined(ABSL_HAVE_INTRINSIC_INT128)
+namespace {
+
+template <typename T>
+int128 MakeInt128FromFloat(T v) {
+ // Conversion when v is NaN or cannot fit into int128 would be undefined
+ // behavior if using an intrinsic 128-bit integer.
+ assert(std::isfinite(v) && (std::numeric_limits<T>::max_exponent <= 127 ||
+ (v >= -std::ldexp(static_cast<T>(1), 127) &&
+ v < std::ldexp(static_cast<T>(1), 127))));
+
+ // We must convert the absolute value and then negate as needed, because
+ // floating point types are typically sign-magnitude. Otherwise, the
+ // difference between the high and low 64 bits when interpreted as two's
+ // complement overwhelms the precision of the mantissa.
+ uint128 result = v < 0 ? -MakeUint128FromFloat(-v) : MakeUint128FromFloat(v);
+ return MakeInt128(int128_internal::BitCastToSigned(Uint128High64(result)),
+ Uint128Low64(result));
+}
+
+} // namespace
+
+int128::int128(float v) : int128(MakeInt128FromFloat(v)) {}
+int128::int128(double v) : int128(MakeInt128FromFloat(v)) {}
+int128::int128(long double v) : int128(MakeInt128FromFloat(v)) {}
+
+int128 operator/(int128 lhs, int128 rhs) {
+ assert(lhs != Int128Min() || rhs != -1); // UB on two's complement.
+
+ uint128 quotient = 0;
+ uint128 remainder = 0;
+ DivModImpl(UnsignedAbsoluteValue(lhs), UnsignedAbsoluteValue(rhs),
+ &quotient, &remainder);
+ if ((Int128High64(lhs) < 0) != (Int128High64(rhs) < 0)) quotient = -quotient;
+ return MakeInt128(int128_internal::BitCastToSigned(Uint128High64(quotient)),
+ Uint128Low64(quotient));
+}
+
+int128 operator%(int128 lhs, int128 rhs) {
+ assert(lhs != Int128Min() || rhs != -1); // UB on two's complement.
+
+ uint128 quotient = 0;
+ uint128 remainder = 0;
+ DivModImpl(UnsignedAbsoluteValue(lhs), UnsignedAbsoluteValue(rhs),
+ &quotient, &remainder);
+ if (Int128High64(lhs) < 0) remainder = -remainder;
+ return MakeInt128(int128_internal::BitCastToSigned(Uint128High64(remainder)),
+ Uint128Low64(remainder));
+}
+#endif // ABSL_HAVE_INTRINSIC_INT128
+
+std::ostream& operator<<(std::ostream& os, int128 v) {
+ std::ios_base::fmtflags flags = os.flags();
+ TString rep;
+
+ // Add the sign if needed.
+ bool print_as_decimal =
+ (flags & std::ios::basefield) == std::ios::dec ||
+ (flags & std::ios::basefield) == std::ios_base::fmtflags();
+ if (print_as_decimal) {
+ if (Int128High64(v) < 0) {
+ rep = "-";
+ } else if (flags & std::ios::showpos) {
+ rep = "+";
+ }
+ }
+
+ rep.append(Uint128ToFormattedString(
+ print_as_decimal ? UnsignedAbsoluteValue(v) : uint128(v), os.flags()));
+
+ // Add the requisite padding.
+ std::streamsize width = os.width(0);
+ if (static_cast<size_t>(width) > rep.size()) {
+ switch (flags & std::ios::adjustfield) {
+ case std::ios::left:
+ rep.append(width - rep.size(), os.fill());
+ break;
+ case std::ios::internal:
+ if (print_as_decimal && (rep[0] == '+' || rep[0] == '-')) {
+ rep.insert(1, width - rep.size(), os.fill());
+ } else if ((flags & std::ios::basefield) == std::ios::hex &&
+ (flags & std::ios::showbase) && v != 0) {
+ rep.insert((size_t)2, width - rep.size(), os.fill());
+ } else {
+ rep.insert((size_t)0, width - rep.size(), os.fill());
+ }
+ break;
+ default: // std::ios::right
+ rep.insert((size_t)0, width - rep.size(), os.fill());
+ break;
+ }
+ }
+
+ return os << rep;
+}
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+namespace std {
+constexpr bool numeric_limits<y_absl::uint128>::is_specialized;
+constexpr bool numeric_limits<y_absl::uint128>::is_signed;
+constexpr bool numeric_limits<y_absl::uint128>::is_integer;
+constexpr bool numeric_limits<y_absl::uint128>::is_exact;
+constexpr bool numeric_limits<y_absl::uint128>::has_infinity;
+constexpr bool numeric_limits<y_absl::uint128>::has_quiet_NaN;
+constexpr bool numeric_limits<y_absl::uint128>::has_signaling_NaN;
+constexpr float_denorm_style numeric_limits<y_absl::uint128>::has_denorm;
+constexpr bool numeric_limits<y_absl::uint128>::has_denorm_loss;
+constexpr float_round_style numeric_limits<y_absl::uint128>::round_style;
+constexpr bool numeric_limits<y_absl::uint128>::is_iec559;
+constexpr bool numeric_limits<y_absl::uint128>::is_bounded;
+constexpr bool numeric_limits<y_absl::uint128>::is_modulo;
+constexpr int numeric_limits<y_absl::uint128>::digits;
+constexpr int numeric_limits<y_absl::uint128>::digits10;
+constexpr int numeric_limits<y_absl::uint128>::max_digits10;
+constexpr int numeric_limits<y_absl::uint128>::radix;
+constexpr int numeric_limits<y_absl::uint128>::min_exponent;
+constexpr int numeric_limits<y_absl::uint128>::min_exponent10;
+constexpr int numeric_limits<y_absl::uint128>::max_exponent;
+constexpr int numeric_limits<y_absl::uint128>::max_exponent10;
+constexpr bool numeric_limits<y_absl::uint128>::traps;
+constexpr bool numeric_limits<y_absl::uint128>::tinyness_before;
+
+constexpr bool numeric_limits<y_absl::int128>::is_specialized;
+constexpr bool numeric_limits<y_absl::int128>::is_signed;
+constexpr bool numeric_limits<y_absl::int128>::is_integer;
+constexpr bool numeric_limits<y_absl::int128>::is_exact;
+constexpr bool numeric_limits<y_absl::int128>::has_infinity;
+constexpr bool numeric_limits<y_absl::int128>::has_quiet_NaN;
+constexpr bool numeric_limits<y_absl::int128>::has_signaling_NaN;
+constexpr float_denorm_style numeric_limits<y_absl::int128>::has_denorm;
+constexpr bool numeric_limits<y_absl::int128>::has_denorm_loss;
+constexpr float_round_style numeric_limits<y_absl::int128>::round_style;
+constexpr bool numeric_limits<y_absl::int128>::is_iec559;
+constexpr bool numeric_limits<y_absl::int128>::is_bounded;
+constexpr bool numeric_limits<y_absl::int128>::is_modulo;
+constexpr int numeric_limits<y_absl::int128>::digits;
+constexpr int numeric_limits<y_absl::int128>::digits10;
+constexpr int numeric_limits<y_absl::int128>::max_digits10;
+constexpr int numeric_limits<y_absl::int128>::radix;
+constexpr int numeric_limits<y_absl::int128>::min_exponent;
+constexpr int numeric_limits<y_absl::int128>::min_exponent10;
+constexpr int numeric_limits<y_absl::int128>::max_exponent;
+constexpr int numeric_limits<y_absl::int128>::max_exponent10;
+constexpr bool numeric_limits<y_absl::int128>::traps;
+constexpr bool numeric_limits<y_absl::int128>::tinyness_before;
+} // namespace std
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/numeric/int128.h b/contrib/restricted/abseil-cpp-tstring/y_absl/numeric/int128.h
new file mode 100644
index 00000000000..b54d614ce9f
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/numeric/int128.h
@@ -0,0 +1,1165 @@
+//
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: int128.h
+// -----------------------------------------------------------------------------
+//
+// This header file defines 128-bit integer types, `uint128` and `int128`.
+//
+// TODO(y_absl-team): This module is inconsistent as many inline `uint128` methods
+// are defined in this file, while many inline `int128` methods are defined in
+// the `int128_*_intrinsic.inc` files.
+
+#ifndef ABSL_NUMERIC_INT128_H_
+#define ABSL_NUMERIC_INT128_H_
+
+#include <cassert>
+#include <cmath>
+#include <cstdint>
+#include <cstring>
+#include <iosfwd>
+#include <limits>
+#include <utility>
+
+#include "y_absl/base/config.h"
+#include "y_absl/base/macros.h"
+#include "y_absl/base/port.h"
+
+#if defined(_MSC_VER)
+// In very old versions of MSVC and when the /Zc:wchar_t flag is off, wchar_t is
+// a typedef for unsigned short. Otherwise wchar_t is mapped to the __wchar_t
+// builtin type. We need to make sure not to define operator wchar_t()
+// alongside operator unsigned short() in these instances.
+#define ABSL_INTERNAL_WCHAR_T __wchar_t
+#if defined(_M_X64)
+#include <intrin.h>
+#pragma intrinsic(_umul128)
+#endif // defined(_M_X64)
+#else // defined(_MSC_VER)
+#define ABSL_INTERNAL_WCHAR_T wchar_t
+#endif // defined(_MSC_VER)
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+class int128;
+
+// uint128
+//
+// An unsigned 128-bit integer type. The API is meant to mimic an intrinsic type
+// as closely as is practical, including exhibiting undefined behavior in
+// analogous cases (e.g. division by zero). This type is intended to be a
+// drop-in replacement once C++ supports an intrinsic `uint128_t` type; when
+// that occurs, existing well-behaved uses of `uint128` will continue to work
+// using that new type.
+//
+// Note: code written with this type will continue to compile once `uint128_t`
+// is introduced, provided the replacement helper functions
+// `Uint128(Low|High)64()` and `MakeUint128()` are made.
+//
+// A `uint128` supports the following:
+//
+// * Implicit construction from integral types
+// * Explicit conversion to integral types
+//
+// Additionally, if your compiler supports `__int128`, `uint128` is
+// interoperable with that type. (Abseil checks for this compatibility through
+// the `ABSL_HAVE_INTRINSIC_INT128` macro.)
+//
+// However, a `uint128` differs from intrinsic integral types in the following
+// ways:
+//
+// * Errors on implicit conversions that do not preserve value (such as
+// loss of precision when converting to float values).
+// * Requires explicit construction from and conversion to floating point
+// types.
+// * Conversion to integral types requires an explicit static_cast() to
+// mimic use of the `-Wnarrowing` compiler flag.
+// * The alignment requirement of `uint128` may differ from that of an
+// intrinsic 128-bit integer type depending on platform and build
+// configuration.
+//
+// Example:
+//
+// float y = y_absl::Uint128Max(); // Error. uint128 cannot be implicitly
+// // converted to float.
+//
+// y_absl::uint128 v;
+// uint64_t i = v; // Error
+// uint64_t i = static_cast<uint64_t>(v); // OK
+//
+class
+#if defined(ABSL_HAVE_INTRINSIC_INT128)
+ alignas(unsigned __int128)
+#endif // ABSL_HAVE_INTRINSIC_INT128
+ uint128 {
+ public:
+ uint128() = default;
+
+ // Constructors from arithmetic types
+ constexpr uint128(int v); // NOLINT(runtime/explicit)
+ constexpr uint128(unsigned int v); // NOLINT(runtime/explicit)
+ constexpr uint128(long v); // NOLINT(runtime/int)
+ constexpr uint128(unsigned long v); // NOLINT(runtime/int)
+ constexpr uint128(long long v); // NOLINT(runtime/int)
+ constexpr uint128(unsigned long long v); // NOLINT(runtime/int)
+#ifdef ABSL_HAVE_INTRINSIC_INT128
+ constexpr uint128(__int128 v); // NOLINT(runtime/explicit)
+ constexpr uint128(unsigned __int128 v); // NOLINT(runtime/explicit)
+#endif // ABSL_HAVE_INTRINSIC_INT128
+ constexpr uint128(int128 v); // NOLINT(runtime/explicit)
+ explicit uint128(float v);
+ explicit uint128(double v);
+ explicit uint128(long double v);
+
+ // Assignment operators from arithmetic types
+ uint128& operator=(int v);
+ uint128& operator=(unsigned int v);
+ uint128& operator=(long v); // NOLINT(runtime/int)
+ uint128& operator=(unsigned long v); // NOLINT(runtime/int)
+ uint128& operator=(long long v); // NOLINT(runtime/int)
+ uint128& operator=(unsigned long long v); // NOLINT(runtime/int)
+#ifdef ABSL_HAVE_INTRINSIC_INT128
+ uint128& operator=(__int128 v);
+ uint128& operator=(unsigned __int128 v);
+#endif // ABSL_HAVE_INTRINSIC_INT128
+ uint128& operator=(int128 v);
+
+ // Conversion operators to other arithmetic types
+ constexpr explicit operator bool() const;
+ constexpr explicit operator char() const;
+ constexpr explicit operator signed char() const;
+ constexpr explicit operator unsigned char() const;
+ constexpr explicit operator char16_t() const;
+ constexpr explicit operator char32_t() const;
+ constexpr explicit operator ABSL_INTERNAL_WCHAR_T() const;
+ constexpr explicit operator short() const; // NOLINT(runtime/int)
+ // NOLINTNEXTLINE(runtime/int)
+ constexpr explicit operator unsigned short() const;
+ constexpr explicit operator int() const;
+ constexpr explicit operator unsigned int() const;
+ constexpr explicit operator long() const; // NOLINT(runtime/int)
+ // NOLINTNEXTLINE(runtime/int)
+ constexpr explicit operator unsigned long() const;
+ // NOLINTNEXTLINE(runtime/int)
+ constexpr explicit operator long long() const;
+ // NOLINTNEXTLINE(runtime/int)
+ constexpr explicit operator unsigned long long() const;
+#ifdef ABSL_HAVE_INTRINSIC_INT128
+ constexpr explicit operator __int128() const;
+ constexpr explicit operator unsigned __int128() const;
+#endif // ABSL_HAVE_INTRINSIC_INT128
+ explicit operator float() const;
+ explicit operator double() const;
+ explicit operator long double() const;
+
+ // Trivial copy constructor, assignment operator and destructor.
+
+ // Arithmetic operators.
+ uint128& operator+=(uint128 other);
+ uint128& operator-=(uint128 other);
+ uint128& operator*=(uint128 other);
+ // Long division/modulo for uint128.
+ uint128& operator/=(uint128 other);
+ uint128& operator%=(uint128 other);
+ uint128 operator++(int);
+ uint128 operator--(int);
+ uint128& operator<<=(int);
+ uint128& operator>>=(int);
+ uint128& operator&=(uint128 other);
+ uint128& operator|=(uint128 other);
+ uint128& operator^=(uint128 other);
+ uint128& operator++();
+ uint128& operator--();
+
+ // Uint128Low64()
+ //
+ // Returns the lower 64-bit value of a `uint128` value.
+ friend constexpr uint64_t Uint128Low64(uint128 v);
+
+ // Uint128High64()
+ //
+ // Returns the higher 64-bit value of a `uint128` value.
+ friend constexpr uint64_t Uint128High64(uint128 v);
+
+ // MakeUInt128()
+ //
+ // Constructs a `uint128` numeric value from two 64-bit unsigned integers.
+ // Note that this factory function is the only way to construct a `uint128`
+ // from integer values greater than 2^64.
+ //
+ // Example:
+ //
+ // y_absl::uint128 big = y_absl::MakeUint128(1, 0);
+ friend constexpr uint128 MakeUint128(uint64_t high, uint64_t low);
+
+ // Uint128Max()
+ //
+ // Returns the highest value for a 128-bit unsigned integer.
+ friend constexpr uint128 Uint128Max();
+
+ // Support for y_absl::Hash.
+ template <typename H>
+ friend H AbslHashValue(H h, uint128 v) {
+ return H::combine(std::move(h), Uint128High64(v), Uint128Low64(v));
+ }
+
+ private:
+ constexpr uint128(uint64_t high, uint64_t low);
+
+ // TODO(strel) Update implementation to use __int128 once all users of
+ // uint128 are fixed to not depend on alignof(uint128) == 8. Also add
+ // alignas(16) to class definition to keep alignment consistent across
+ // platforms.
+#if defined(ABSL_IS_LITTLE_ENDIAN)
+ uint64_t lo_;
+ uint64_t hi_;
+#elif defined(ABSL_IS_BIG_ENDIAN)
+ uint64_t hi_;
+ uint64_t lo_;
+#else // byte order
+#error "Unsupported byte order: must be little-endian or big-endian."
+#endif // byte order
+};
+
+// Prefer to use the constexpr `Uint128Max()`.
+//
+// TODO(y_absl-team) deprecate kuint128max once migration tool is released.
+ABSL_DLL extern const uint128 kuint128max;
+
+// allow uint128 to be logged
+std::ostream& operator<<(std::ostream& os, uint128 v);
+
+// TODO(strel) add operator>>(std::istream&, uint128)
+
+constexpr uint128 Uint128Max() {
+ return uint128((std::numeric_limits<uint64_t>::max)(),
+ (std::numeric_limits<uint64_t>::max)());
+}
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+// Specialized numeric_limits for uint128.
+namespace std {
+template <>
+class numeric_limits<y_absl::uint128> {
+ public:
+ static constexpr bool is_specialized = true;
+ static constexpr bool is_signed = false;
+ static constexpr bool is_integer = true;
+ static constexpr bool is_exact = true;
+ static constexpr bool has_infinity = false;
+ static constexpr bool has_quiet_NaN = false;
+ static constexpr bool has_signaling_NaN = false;
+ static constexpr float_denorm_style has_denorm = denorm_absent;
+ static constexpr bool has_denorm_loss = false;
+ static constexpr float_round_style round_style = round_toward_zero;
+ static constexpr bool is_iec559 = false;
+ static constexpr bool is_bounded = true;
+ static constexpr bool is_modulo = true;
+ static constexpr int digits = 128;
+ static constexpr int digits10 = 38;
+ static constexpr int max_digits10 = 0;
+ static constexpr int radix = 2;
+ static constexpr int min_exponent = 0;
+ static constexpr int min_exponent10 = 0;
+ static constexpr int max_exponent = 0;
+ static constexpr int max_exponent10 = 0;
+#ifdef ABSL_HAVE_INTRINSIC_INT128
+ static constexpr bool traps = numeric_limits<unsigned __int128>::traps;
+#else // ABSL_HAVE_INTRINSIC_INT128
+ static constexpr bool traps = numeric_limits<uint64_t>::traps;
+#endif // ABSL_HAVE_INTRINSIC_INT128
+ static constexpr bool tinyness_before = false;
+
+ static constexpr y_absl::uint128 (min)() { return 0; }
+ static constexpr y_absl::uint128 lowest() { return 0; }
+ static constexpr y_absl::uint128 (max)() { return y_absl::Uint128Max(); }
+ static constexpr y_absl::uint128 epsilon() { return 0; }
+ static constexpr y_absl::uint128 round_error() { return 0; }
+ static constexpr y_absl::uint128 infinity() { return 0; }
+ static constexpr y_absl::uint128 quiet_NaN() { return 0; }
+ static constexpr y_absl::uint128 signaling_NaN() { return 0; }
+ static constexpr y_absl::uint128 denorm_min() { return 0; }
+};
+} // namespace std
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+// int128
+//
+// A signed 128-bit integer type. The API is meant to mimic an intrinsic
+// integral type as closely as is practical, including exhibiting undefined
+// behavior in analogous cases (e.g. division by zero).
+//
+// An `int128` supports the following:
+//
+// * Implicit construction from integral types
+// * Explicit conversion to integral types
+//
+// However, an `int128` differs from intrinsic integral types in the following
+// ways:
+//
+// * It is not implicitly convertible to other integral types.
+// * Requires explicit construction from and conversion to floating point
+// types.
+
+// Additionally, if your compiler supports `__int128`, `int128` is
+// interoperable with that type. (Abseil checks for this compatibility through
+// the `ABSL_HAVE_INTRINSIC_INT128` macro.)
+//
+// The design goal for `int128` is that it will be compatible with a future
+// `int128_t`, if that type becomes a part of the standard.
+//
+// Example:
+//
+// float y = y_absl::int128(17); // Error. int128 cannot be implicitly
+// // converted to float.
+//
+// y_absl::int128 v;
+// int64_t i = v; // Error
+// int64_t i = static_cast<int64_t>(v); // OK
+//
+class int128 {
+ public:
+ int128() = default;
+
+ // Constructors from arithmetic types
+ constexpr int128(int v); // NOLINT(runtime/explicit)
+ constexpr int128(unsigned int v); // NOLINT(runtime/explicit)
+ constexpr int128(long v); // NOLINT(runtime/int)
+ constexpr int128(unsigned long v); // NOLINT(runtime/int)
+ constexpr int128(long long v); // NOLINT(runtime/int)
+ constexpr int128(unsigned long long v); // NOLINT(runtime/int)
+#ifdef ABSL_HAVE_INTRINSIC_INT128
+ constexpr int128(__int128 v); // NOLINT(runtime/explicit)
+ constexpr explicit int128(unsigned __int128 v);
+#endif // ABSL_HAVE_INTRINSIC_INT128
+ constexpr explicit int128(uint128 v);
+ explicit int128(float v);
+ explicit int128(double v);
+ explicit int128(long double v);
+
+ // Assignment operators from arithmetic types
+ int128& operator=(int v);
+ int128& operator=(unsigned int v);
+ int128& operator=(long v); // NOLINT(runtime/int)
+ int128& operator=(unsigned long v); // NOLINT(runtime/int)
+ int128& operator=(long long v); // NOLINT(runtime/int)
+ int128& operator=(unsigned long long v); // NOLINT(runtime/int)
+#ifdef ABSL_HAVE_INTRINSIC_INT128
+ int128& operator=(__int128 v);
+#endif // ABSL_HAVE_INTRINSIC_INT128
+
+ // Conversion operators to other arithmetic types
+ constexpr explicit operator bool() const;
+ constexpr explicit operator char() const;
+ constexpr explicit operator signed char() const;
+ constexpr explicit operator unsigned char() const;
+ constexpr explicit operator char16_t() const;
+ constexpr explicit operator char32_t() const;
+ constexpr explicit operator ABSL_INTERNAL_WCHAR_T() const;
+ constexpr explicit operator short() const; // NOLINT(runtime/int)
+ // NOLINTNEXTLINE(runtime/int)
+ constexpr explicit operator unsigned short() const;
+ constexpr explicit operator int() const;
+ constexpr explicit operator unsigned int() const;
+ constexpr explicit operator long() const; // NOLINT(runtime/int)
+ // NOLINTNEXTLINE(runtime/int)
+ constexpr explicit operator unsigned long() const;
+ // NOLINTNEXTLINE(runtime/int)
+ constexpr explicit operator long long() const;
+ // NOLINTNEXTLINE(runtime/int)
+ constexpr explicit operator unsigned long long() const;
+#ifdef ABSL_HAVE_INTRINSIC_INT128
+ constexpr explicit operator __int128() const;
+ constexpr explicit operator unsigned __int128() const;
+#endif // ABSL_HAVE_INTRINSIC_INT128
+ explicit operator float() const;
+ explicit operator double() const;
+ explicit operator long double() const;
+
+ // Trivial copy constructor, assignment operator and destructor.
+
+ // Arithmetic operators
+ int128& operator+=(int128 other);
+ int128& operator-=(int128 other);
+ int128& operator*=(int128 other);
+ int128& operator/=(int128 other);
+ int128& operator%=(int128 other);
+ int128 operator++(int); // postfix increment: i++
+ int128 operator--(int); // postfix decrement: i--
+ int128& operator++(); // prefix increment: ++i
+ int128& operator--(); // prefix decrement: --i
+ int128& operator&=(int128 other);
+ int128& operator|=(int128 other);
+ int128& operator^=(int128 other);
+ int128& operator<<=(int amount);
+ int128& operator>>=(int amount);
+
+ // Int128Low64()
+ //
+ // Returns the lower 64-bit value of a `int128` value.
+ friend constexpr uint64_t Int128Low64(int128 v);
+
+ // Int128High64()
+ //
+ // Returns the higher 64-bit value of a `int128` value.
+ friend constexpr int64_t Int128High64(int128 v);
+
+ // MakeInt128()
+ //
+ // Constructs a `int128` numeric value from two 64-bit integers. Note that
+ // signedness is conveyed in the upper `high` value.
+ //
+ // (y_absl::int128(1) << 64) * high + low
+ //
+ // Note that this factory function is the only way to construct a `int128`
+ // from integer values greater than 2^64 or less than -2^64.
+ //
+ // Example:
+ //
+ // y_absl::int128 big = y_absl::MakeInt128(1, 0);
+ // y_absl::int128 big_n = y_absl::MakeInt128(-1, 0);
+ friend constexpr int128 MakeInt128(int64_t high, uint64_t low);
+
+ // Int128Max()
+ //
+ // Returns the maximum value for a 128-bit signed integer.
+ friend constexpr int128 Int128Max();
+
+ // Int128Min()
+ //
+ // Returns the minimum value for a 128-bit signed integer.
+ friend constexpr int128 Int128Min();
+
+ // Support for y_absl::Hash.
+ template <typename H>
+ friend H AbslHashValue(H h, int128 v) {
+ return H::combine(std::move(h), Int128High64(v), Int128Low64(v));
+ }
+
+ private:
+ constexpr int128(int64_t high, uint64_t low);
+
+#if defined(ABSL_HAVE_INTRINSIC_INT128)
+ __int128 v_;
+#else // ABSL_HAVE_INTRINSIC_INT128
+#if defined(ABSL_IS_LITTLE_ENDIAN)
+ uint64_t lo_;
+ int64_t hi_;
+#elif defined(ABSL_IS_BIG_ENDIAN)
+ int64_t hi_;
+ uint64_t lo_;
+#else // byte order
+#error "Unsupported byte order: must be little-endian or big-endian."
+#endif // byte order
+#endif // ABSL_HAVE_INTRINSIC_INT128
+};
+
+std::ostream& operator<<(std::ostream& os, int128 v);
+
+// TODO(y_absl-team) add operator>>(std::istream&, int128)
+
+constexpr int128 Int128Max() {
+ return int128((std::numeric_limits<int64_t>::max)(),
+ (std::numeric_limits<uint64_t>::max)());
+}
+
+constexpr int128 Int128Min() {
+ return int128((std::numeric_limits<int64_t>::min)(), 0);
+}
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+// Specialized numeric_limits for int128.
+namespace std {
+template <>
+class numeric_limits<y_absl::int128> {
+ public:
+ static constexpr bool is_specialized = true;
+ static constexpr bool is_signed = true;
+ static constexpr bool is_integer = true;
+ static constexpr bool is_exact = true;
+ static constexpr bool has_infinity = false;
+ static constexpr bool has_quiet_NaN = false;
+ static constexpr bool has_signaling_NaN = false;
+ static constexpr float_denorm_style has_denorm = denorm_absent;
+ static constexpr bool has_denorm_loss = false;
+ static constexpr float_round_style round_style = round_toward_zero;
+ static constexpr bool is_iec559 = false;
+ static constexpr bool is_bounded = true;
+ static constexpr bool is_modulo = false;
+ static constexpr int digits = 127;
+ static constexpr int digits10 = 38;
+ static constexpr int max_digits10 = 0;
+ static constexpr int radix = 2;
+ static constexpr int min_exponent = 0;
+ static constexpr int min_exponent10 = 0;
+ static constexpr int max_exponent = 0;
+ static constexpr int max_exponent10 = 0;
+#ifdef ABSL_HAVE_INTRINSIC_INT128
+ static constexpr bool traps = numeric_limits<__int128>::traps;
+#else // ABSL_HAVE_INTRINSIC_INT128
+ static constexpr bool traps = numeric_limits<uint64_t>::traps;
+#endif // ABSL_HAVE_INTRINSIC_INT128
+ static constexpr bool tinyness_before = false;
+
+ static constexpr y_absl::int128 (min)() { return y_absl::Int128Min(); }
+ static constexpr y_absl::int128 lowest() { return y_absl::Int128Min(); }
+ static constexpr y_absl::int128 (max)() { return y_absl::Int128Max(); }
+ static constexpr y_absl::int128 epsilon() { return 0; }
+ static constexpr y_absl::int128 round_error() { return 0; }
+ static constexpr y_absl::int128 infinity() { return 0; }
+ static constexpr y_absl::int128 quiet_NaN() { return 0; }
+ static constexpr y_absl::int128 signaling_NaN() { return 0; }
+ static constexpr y_absl::int128 denorm_min() { return 0; }
+};
+} // namespace std
+
+// --------------------------------------------------------------------------
+// Implementation details follow
+// --------------------------------------------------------------------------
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+constexpr uint128 MakeUint128(uint64_t high, uint64_t low) {
+ return uint128(high, low);
+}
+
+// Assignment from integer types.
+
+inline uint128& uint128::operator=(int v) { return *this = uint128(v); }
+
+inline uint128& uint128::operator=(unsigned int v) {
+ return *this = uint128(v);
+}
+
+inline uint128& uint128::operator=(long v) { // NOLINT(runtime/int)
+ return *this = uint128(v);
+}
+
+// NOLINTNEXTLINE(runtime/int)
+inline uint128& uint128::operator=(unsigned long v) {
+ return *this = uint128(v);
+}
+
+// NOLINTNEXTLINE(runtime/int)
+inline uint128& uint128::operator=(long long v) {
+ return *this = uint128(v);
+}
+
+// NOLINTNEXTLINE(runtime/int)
+inline uint128& uint128::operator=(unsigned long long v) {
+ return *this = uint128(v);
+}
+
+#ifdef ABSL_HAVE_INTRINSIC_INT128
+inline uint128& uint128::operator=(__int128 v) {
+ return *this = uint128(v);
+}
+
+inline uint128& uint128::operator=(unsigned __int128 v) {
+ return *this = uint128(v);
+}
+#endif // ABSL_HAVE_INTRINSIC_INT128
+
+inline uint128& uint128::operator=(int128 v) {
+ return *this = uint128(v);
+}
+
+// Arithmetic operators.
+
+constexpr uint128 operator<<(uint128 lhs, int amount);
+constexpr uint128 operator>>(uint128 lhs, int amount);
+constexpr uint128 operator+(uint128 lhs, uint128 rhs);
+constexpr uint128 operator-(uint128 lhs, uint128 rhs);
+uint128 operator*(uint128 lhs, uint128 rhs);
+uint128 operator/(uint128 lhs, uint128 rhs);
+uint128 operator%(uint128 lhs, uint128 rhs);
+
+inline uint128& uint128::operator<<=(int amount) {
+ *this = *this << amount;
+ return *this;
+}
+
+inline uint128& uint128::operator>>=(int amount) {
+ *this = *this >> amount;
+ return *this;
+}
+
+inline uint128& uint128::operator+=(uint128 other) {
+ *this = *this + other;
+ return *this;
+}
+
+inline uint128& uint128::operator-=(uint128 other) {
+ *this = *this - other;
+ return *this;
+}
+
+inline uint128& uint128::operator*=(uint128 other) {
+ *this = *this * other;
+ return *this;
+}
+
+inline uint128& uint128::operator/=(uint128 other) {
+ *this = *this / other;
+ return *this;
+}
+
+inline uint128& uint128::operator%=(uint128 other) {
+ *this = *this % other;
+ return *this;
+}
+
+constexpr uint64_t Uint128Low64(uint128 v) { return v.lo_; }
+
+constexpr uint64_t Uint128High64(uint128 v) { return v.hi_; }
+
+// Constructors from integer types.
+
+#if defined(ABSL_IS_LITTLE_ENDIAN)
+
+constexpr uint128::uint128(uint64_t high, uint64_t low)
+ : lo_{low}, hi_{high} {}
+
+constexpr uint128::uint128(int v)
+ : lo_{static_cast<uint64_t>(v)},
+ hi_{v < 0 ? (std::numeric_limits<uint64_t>::max)() : 0} {}
+constexpr uint128::uint128(long v) // NOLINT(runtime/int)
+ : lo_{static_cast<uint64_t>(v)},
+ hi_{v < 0 ? (std::numeric_limits<uint64_t>::max)() : 0} {}
+constexpr uint128::uint128(long long v) // NOLINT(runtime/int)
+ : lo_{static_cast<uint64_t>(v)},
+ hi_{v < 0 ? (std::numeric_limits<uint64_t>::max)() : 0} {}
+
+constexpr uint128::uint128(unsigned int v) : lo_{v}, hi_{0} {}
+// NOLINTNEXTLINE(runtime/int)
+constexpr uint128::uint128(unsigned long v) : lo_{v}, hi_{0} {}
+// NOLINTNEXTLINE(runtime/int)
+constexpr uint128::uint128(unsigned long long v) : lo_{v}, hi_{0} {}
+
+#ifdef ABSL_HAVE_INTRINSIC_INT128
+constexpr uint128::uint128(__int128 v)
+ : lo_{static_cast<uint64_t>(v & ~uint64_t{0})},
+ hi_{static_cast<uint64_t>(static_cast<unsigned __int128>(v) >> 64)} {}
+constexpr uint128::uint128(unsigned __int128 v)
+ : lo_{static_cast<uint64_t>(v & ~uint64_t{0})},
+ hi_{static_cast<uint64_t>(v >> 64)} {}
+#endif // ABSL_HAVE_INTRINSIC_INT128
+
+constexpr uint128::uint128(int128 v)
+ : lo_{Int128Low64(v)}, hi_{static_cast<uint64_t>(Int128High64(v))} {}
+
+#elif defined(ABSL_IS_BIG_ENDIAN)
+
+constexpr uint128::uint128(uint64_t high, uint64_t low)
+ : hi_{high}, lo_{low} {}
+
+constexpr uint128::uint128(int v)
+ : hi_{v < 0 ? (std::numeric_limits<uint64_t>::max)() : 0},
+ lo_{static_cast<uint64_t>(v)} {}
+constexpr uint128::uint128(long v) // NOLINT(runtime/int)
+ : hi_{v < 0 ? (std::numeric_limits<uint64_t>::max)() : 0},
+ lo_{static_cast<uint64_t>(v)} {}
+constexpr uint128::uint128(long long v) // NOLINT(runtime/int)
+ : hi_{v < 0 ? (std::numeric_limits<uint64_t>::max)() : 0},
+ lo_{static_cast<uint64_t>(v)} {}
+
+constexpr uint128::uint128(unsigned int v) : hi_{0}, lo_{v} {}
+// NOLINTNEXTLINE(runtime/int)
+constexpr uint128::uint128(unsigned long v) : hi_{0}, lo_{v} {}
+// NOLINTNEXTLINE(runtime/int)
+constexpr uint128::uint128(unsigned long long v) : hi_{0}, lo_{v} {}
+
+#ifdef ABSL_HAVE_INTRINSIC_INT128
+constexpr uint128::uint128(__int128 v)
+ : hi_{static_cast<uint64_t>(static_cast<unsigned __int128>(v) >> 64)},
+ lo_{static_cast<uint64_t>(v & ~uint64_t{0})} {}
+constexpr uint128::uint128(unsigned __int128 v)
+ : hi_{static_cast<uint64_t>(v >> 64)},
+ lo_{static_cast<uint64_t>(v & ~uint64_t{0})} {}
+#endif // ABSL_HAVE_INTRINSIC_INT128
+
+constexpr uint128::uint128(int128 v)
+ : hi_{static_cast<uint64_t>(Int128High64(v))}, lo_{Int128Low64(v)} {}
+
+#else // byte order
+#error "Unsupported byte order: must be little-endian or big-endian."
+#endif // byte order
+
+// Conversion operators to integer types.
+
+constexpr uint128::operator bool() const { return lo_ || hi_; }
+
+constexpr uint128::operator char() const { return static_cast<char>(lo_); }
+
+constexpr uint128::operator signed char() const {
+ return static_cast<signed char>(lo_);
+}
+
+constexpr uint128::operator unsigned char() const {
+ return static_cast<unsigned char>(lo_);
+}
+
+constexpr uint128::operator char16_t() const {
+ return static_cast<char16_t>(lo_);
+}
+
+constexpr uint128::operator char32_t() const {
+ return static_cast<char32_t>(lo_);
+}
+
+constexpr uint128::operator ABSL_INTERNAL_WCHAR_T() const {
+ return static_cast<ABSL_INTERNAL_WCHAR_T>(lo_);
+}
+
+// NOLINTNEXTLINE(runtime/int)
+constexpr uint128::operator short() const { return static_cast<short>(lo_); }
+
+constexpr uint128::operator unsigned short() const { // NOLINT(runtime/int)
+ return static_cast<unsigned short>(lo_); // NOLINT(runtime/int)
+}
+
+constexpr uint128::operator int() const { return static_cast<int>(lo_); }
+
+constexpr uint128::operator unsigned int() const {
+ return static_cast<unsigned int>(lo_);
+}
+
+// NOLINTNEXTLINE(runtime/int)
+constexpr uint128::operator long() const { return static_cast<long>(lo_); }
+
+constexpr uint128::operator unsigned long() const { // NOLINT(runtime/int)
+ return static_cast<unsigned long>(lo_); // NOLINT(runtime/int)
+}
+
+constexpr uint128::operator long long() const { // NOLINT(runtime/int)
+ return static_cast<long long>(lo_); // NOLINT(runtime/int)
+}
+
+constexpr uint128::operator unsigned long long() const { // NOLINT(runtime/int)
+ return static_cast<unsigned long long>(lo_); // NOLINT(runtime/int)
+}
+
+#ifdef ABSL_HAVE_INTRINSIC_INT128
+constexpr uint128::operator __int128() const {
+ return (static_cast<__int128>(hi_) << 64) + lo_;
+}
+
+constexpr uint128::operator unsigned __int128() const {
+ return (static_cast<unsigned __int128>(hi_) << 64) + lo_;
+}
+#endif // ABSL_HAVE_INTRINSIC_INT128
+
+// Conversion operators to floating point types.
+
+inline uint128::operator float() const {
+ return static_cast<float>(lo_) + std::ldexp(static_cast<float>(hi_), 64);
+}
+
+inline uint128::operator double() const {
+ return static_cast<double>(lo_) + std::ldexp(static_cast<double>(hi_), 64);
+}
+
+inline uint128::operator long double() const {
+ return static_cast<long double>(lo_) +
+ std::ldexp(static_cast<long double>(hi_), 64);
+}
+
+// Comparison operators.
+
+constexpr bool operator==(uint128 lhs, uint128 rhs) {
+#if defined(ABSL_HAVE_INTRINSIC_INT128)
+ return static_cast<unsigned __int128>(lhs) ==
+ static_cast<unsigned __int128>(rhs);
+#else
+ return (Uint128Low64(lhs) == Uint128Low64(rhs) &&
+ Uint128High64(lhs) == Uint128High64(rhs));
+#endif
+}
+
+constexpr bool operator!=(uint128 lhs, uint128 rhs) { return !(lhs == rhs); }
+
+constexpr bool operator<(uint128 lhs, uint128 rhs) {
+#ifdef ABSL_HAVE_INTRINSIC_INT128
+ return static_cast<unsigned __int128>(lhs) <
+ static_cast<unsigned __int128>(rhs);
+#else
+ return (Uint128High64(lhs) == Uint128High64(rhs))
+ ? (Uint128Low64(lhs) < Uint128Low64(rhs))
+ : (Uint128High64(lhs) < Uint128High64(rhs));
+#endif
+}
+
+constexpr bool operator>(uint128 lhs, uint128 rhs) { return rhs < lhs; }
+
+constexpr bool operator<=(uint128 lhs, uint128 rhs) { return !(rhs < lhs); }
+
+constexpr bool operator>=(uint128 lhs, uint128 rhs) { return !(lhs < rhs); }
+
+// Unary operators.
+
+constexpr inline uint128 operator+(uint128 val) {
+ return val;
+}
+
+constexpr inline int128 operator+(int128 val) {
+ return val;
+}
+
+constexpr uint128 operator-(uint128 val) {
+#if defined(ABSL_HAVE_INTRINSIC_INT128)
+ return -static_cast<unsigned __int128>(val);
+#else
+ return MakeUint128(
+ ~Uint128High64(val) + static_cast<unsigned long>(Uint128Low64(val) == 0),
+ ~Uint128Low64(val) + 1);
+#endif
+}
+
+constexpr inline bool operator!(uint128 val) {
+#if defined(ABSL_HAVE_INTRINSIC_INT128)
+ return !static_cast<unsigned __int128>(val);
+#else
+ return !Uint128High64(val) && !Uint128Low64(val);
+#endif
+}
+
+// Logical operators.
+
+constexpr inline uint128 operator~(uint128 val) {
+#if defined(ABSL_HAVE_INTRINSIC_INT128)
+ return ~static_cast<unsigned __int128>(val);
+#else
+ return MakeUint128(~Uint128High64(val), ~Uint128Low64(val));
+#endif
+}
+
+constexpr inline uint128 operator|(uint128 lhs, uint128 rhs) {
+#if defined(ABSL_HAVE_INTRINSIC_INT128)
+ return static_cast<unsigned __int128>(lhs) |
+ static_cast<unsigned __int128>(rhs);
+#else
+ return MakeUint128(Uint128High64(lhs) | Uint128High64(rhs),
+ Uint128Low64(lhs) | Uint128Low64(rhs));
+#endif
+}
+
+constexpr inline uint128 operator&(uint128 lhs, uint128 rhs) {
+#if defined(ABSL_HAVE_INTRINSIC_INT128)
+ return static_cast<unsigned __int128>(lhs) &
+ static_cast<unsigned __int128>(rhs);
+#else
+ return MakeUint128(Uint128High64(lhs) & Uint128High64(rhs),
+ Uint128Low64(lhs) & Uint128Low64(rhs));
+#endif
+}
+
+constexpr inline uint128 operator^(uint128 lhs, uint128 rhs) {
+#if defined(ABSL_HAVE_INTRINSIC_INT128)
+ return static_cast<unsigned __int128>(lhs) ^
+ static_cast<unsigned __int128>(rhs);
+#else
+ return MakeUint128(Uint128High64(lhs) ^ Uint128High64(rhs),
+ Uint128Low64(lhs) ^ Uint128Low64(rhs));
+#endif
+}
+
+inline uint128& uint128::operator|=(uint128 other) {
+ *this = *this | other;
+ return *this;
+}
+
+inline uint128& uint128::operator&=(uint128 other) {
+ *this = *this & other;
+ return *this;
+}
+
+inline uint128& uint128::operator^=(uint128 other) {
+ *this = *this ^ other;
+ return *this;
+}
+
+// Arithmetic operators.
+
+constexpr uint128 operator<<(uint128 lhs, int amount) {
+#ifdef ABSL_HAVE_INTRINSIC_INT128
+ return static_cast<unsigned __int128>(lhs) << amount;
+#else
+ // uint64_t shifts of >= 64 are undefined, so we will need some
+ // special-casing.
+ return amount >= 64 ? MakeUint128(Uint128Low64(lhs) << (amount - 64), 0)
+ : amount == 0 ? lhs
+ : MakeUint128((Uint128High64(lhs) << amount) |
+ (Uint128Low64(lhs) >> (64 - amount)),
+ Uint128Low64(lhs) << amount);
+#endif
+}
+
+constexpr uint128 operator>>(uint128 lhs, int amount) {
+#ifdef ABSL_HAVE_INTRINSIC_INT128
+ return static_cast<unsigned __int128>(lhs) >> amount;
+#else
+ // uint64_t shifts of >= 64 are undefined, so we will need some
+ // special-casing.
+ return amount >= 64 ? MakeUint128(0, Uint128High64(lhs) >> (amount - 64))
+ : amount == 0 ? lhs
+ : MakeUint128(Uint128High64(lhs) >> amount,
+ (Uint128Low64(lhs) >> amount) |
+ (Uint128High64(lhs) << (64 - amount)));
+#endif
+}
+
+#if !defined(ABSL_HAVE_INTRINSIC_INT128)
+namespace int128_internal {
+constexpr uint128 AddResult(uint128 result, uint128 lhs) {
+ // check for carry
+ return (Uint128Low64(result) < Uint128Low64(lhs))
+ ? MakeUint128(Uint128High64(result) + 1, Uint128Low64(result))
+ : result;
+}
+} // namespace int128_internal
+#endif
+
+constexpr uint128 operator+(uint128 lhs, uint128 rhs) {
+#if defined(ABSL_HAVE_INTRINSIC_INT128)
+ return static_cast<unsigned __int128>(lhs) +
+ static_cast<unsigned __int128>(rhs);
+#else
+ return int128_internal::AddResult(
+ MakeUint128(Uint128High64(lhs) + Uint128High64(rhs),
+ Uint128Low64(lhs) + Uint128Low64(rhs)),
+ lhs);
+#endif
+}
+
+#if !defined(ABSL_HAVE_INTRINSIC_INT128)
+namespace int128_internal {
+constexpr uint128 SubstructResult(uint128 result, uint128 lhs, uint128 rhs) {
+ // check for carry
+ return (Uint128Low64(lhs) < Uint128Low64(rhs))
+ ? MakeUint128(Uint128High64(result) - 1, Uint128Low64(result))
+ : result;
+}
+} // namespace int128_internal
+#endif
+
+constexpr uint128 operator-(uint128 lhs, uint128 rhs) {
+#if defined(ABSL_HAVE_INTRINSIC_INT128)
+ return static_cast<unsigned __int128>(lhs) -
+ static_cast<unsigned __int128>(rhs);
+#else
+ return int128_internal::SubstructResult(
+ MakeUint128(Uint128High64(lhs) - Uint128High64(rhs),
+ Uint128Low64(lhs) - Uint128Low64(rhs)),
+ lhs, rhs);
+#endif
+}
+
+inline uint128 operator*(uint128 lhs, uint128 rhs) {
+#if defined(ABSL_HAVE_INTRINSIC_INT128)
+ // TODO(strel) Remove once alignment issues are resolved and unsigned __int128
+ // can be used for uint128 storage.
+ return static_cast<unsigned __int128>(lhs) *
+ static_cast<unsigned __int128>(rhs);
+#elif defined(_MSC_VER) && defined(_M_X64)
+ uint64_t carry;
+ uint64_t low = _umul128(Uint128Low64(lhs), Uint128Low64(rhs), &carry);
+ return MakeUint128(Uint128Low64(lhs) * Uint128High64(rhs) +
+ Uint128High64(lhs) * Uint128Low64(rhs) + carry,
+ low);
+#else // ABSL_HAVE_INTRINSIC128
+ uint64_t a32 = Uint128Low64(lhs) >> 32;
+ uint64_t a00 = Uint128Low64(lhs) & 0xffffffff;
+ uint64_t b32 = Uint128Low64(rhs) >> 32;
+ uint64_t b00 = Uint128Low64(rhs) & 0xffffffff;
+ uint128 result =
+ MakeUint128(Uint128High64(lhs) * Uint128Low64(rhs) +
+ Uint128Low64(lhs) * Uint128High64(rhs) + a32 * b32,
+ a00 * b00);
+ result += uint128(a32 * b00) << 32;
+ result += uint128(a00 * b32) << 32;
+ return result;
+#endif // ABSL_HAVE_INTRINSIC128
+}
+
+#if defined(ABSL_HAVE_INTRINSIC_INT128)
+inline uint128 operator/(uint128 lhs, uint128 rhs) {
+ return static_cast<unsigned __int128>(lhs) /
+ static_cast<unsigned __int128>(rhs);
+}
+
+inline uint128 operator%(uint128 lhs, uint128 rhs) {
+ return static_cast<unsigned __int128>(lhs) %
+ static_cast<unsigned __int128>(rhs);
+}
+#endif
+
+// Increment/decrement operators.
+
+inline uint128 uint128::operator++(int) {
+ uint128 tmp(*this);
+ *this += 1;
+ return tmp;
+}
+
+inline uint128 uint128::operator--(int) {
+ uint128 tmp(*this);
+ *this -= 1;
+ return tmp;
+}
+
+inline uint128& uint128::operator++() {
+ *this += 1;
+ return *this;
+}
+
+inline uint128& uint128::operator--() {
+ *this -= 1;
+ return *this;
+}
+
+constexpr int128 MakeInt128(int64_t high, uint64_t low) {
+ return int128(high, low);
+}
+
+// Assignment from integer types.
+inline int128& int128::operator=(int v) {
+ return *this = int128(v);
+}
+
+inline int128& int128::operator=(unsigned int v) {
+ return *this = int128(v);
+}
+
+inline int128& int128::operator=(long v) { // NOLINT(runtime/int)
+ return *this = int128(v);
+}
+
+// NOLINTNEXTLINE(runtime/int)
+inline int128& int128::operator=(unsigned long v) {
+ return *this = int128(v);
+}
+
+// NOLINTNEXTLINE(runtime/int)
+inline int128& int128::operator=(long long v) {
+ return *this = int128(v);
+}
+
+// NOLINTNEXTLINE(runtime/int)
+inline int128& int128::operator=(unsigned long long v) {
+ return *this = int128(v);
+}
+
+// Arithmetic operators.
+constexpr int128 operator-(int128 v);
+constexpr int128 operator+(int128 lhs, int128 rhs);
+constexpr int128 operator-(int128 lhs, int128 rhs);
+int128 operator*(int128 lhs, int128 rhs);
+int128 operator/(int128 lhs, int128 rhs);
+int128 operator%(int128 lhs, int128 rhs);
+constexpr int128 operator|(int128 lhs, int128 rhs);
+constexpr int128 operator&(int128 lhs, int128 rhs);
+constexpr int128 operator^(int128 lhs, int128 rhs);
+constexpr int128 operator<<(int128 lhs, int amount);
+constexpr int128 operator>>(int128 lhs, int amount);
+
+inline int128& int128::operator+=(int128 other) {
+ *this = *this + other;
+ return *this;
+}
+
+inline int128& int128::operator-=(int128 other) {
+ *this = *this - other;
+ return *this;
+}
+
+inline int128& int128::operator*=(int128 other) {
+ *this = *this * other;
+ return *this;
+}
+
+inline int128& int128::operator/=(int128 other) {
+ *this = *this / other;
+ return *this;
+}
+
+inline int128& int128::operator%=(int128 other) {
+ *this = *this % other;
+ return *this;
+}
+
+inline int128& int128::operator|=(int128 other) {
+ *this = *this | other;
+ return *this;
+}
+
+inline int128& int128::operator&=(int128 other) {
+ *this = *this & other;
+ return *this;
+}
+
+inline int128& int128::operator^=(int128 other) {
+ *this = *this ^ other;
+ return *this;
+}
+
+inline int128& int128::operator<<=(int amount) {
+ *this = *this << amount;
+ return *this;
+}
+
+inline int128& int128::operator>>=(int amount) {
+ *this = *this >> amount;
+ return *this;
+}
+
+// Forward declaration for comparison operators.
+constexpr bool operator!=(int128 lhs, int128 rhs);
+
+namespace int128_internal {
+
+// Casts from unsigned to signed while preserving the underlying binary
+// representation.
+constexpr int64_t BitCastToSigned(uint64_t v) {
+ // Casting an unsigned integer to a signed integer of the same
+ // width is implementation defined behavior if the source value would not fit
+ // in the destination type. We step around it with a roundtrip bitwise not
+ // operation to make sure this function remains constexpr. Clang, GCC, and
+ // MSVC optimize this to a no-op on x86-64.
+ return v & (uint64_t{1} << 63) ? ~static_cast<int64_t>(~v)
+ : static_cast<int64_t>(v);
+}
+
+} // namespace int128_internal
+
+#if defined(ABSL_HAVE_INTRINSIC_INT128)
+#include "y_absl/numeric/int128_have_intrinsic.inc" // IWYU pragma: export
+#else // ABSL_HAVE_INTRINSIC_INT128
+#include "y_absl/numeric/int128_no_intrinsic.inc" // IWYU pragma: export
+#endif // ABSL_HAVE_INTRINSIC_INT128
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#undef ABSL_INTERNAL_WCHAR_T
+
+#endif // ABSL_NUMERIC_INT128_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/numeric/int128_have_intrinsic.inc b/contrib/restricted/abseil-cpp-tstring/y_absl/numeric/int128_have_intrinsic.inc
new file mode 100644
index 00000000000..3945fa29837
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/numeric/int128_have_intrinsic.inc
@@ -0,0 +1,296 @@
+//
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file contains :int128 implementation details that depend on internal
+// representation when ABSL_HAVE_INTRINSIC_INT128 is defined. This file is
+// included by int128.h and relies on ABSL_INTERNAL_WCHAR_T being defined.
+
+namespace int128_internal {
+
+// Casts from unsigned to signed while preserving the underlying binary
+// representation.
+constexpr __int128 BitCastToSigned(unsigned __int128 v) {
+ // Casting an unsigned integer to a signed integer of the same
+ // width is implementation defined behavior if the source value would not fit
+ // in the destination type. We step around it with a roundtrip bitwise not
+ // operation to make sure this function remains constexpr. Clang and GCC
+ // optimize this to a no-op on x86-64.
+ return v & (static_cast<unsigned __int128>(1) << 127)
+ ? ~static_cast<__int128>(~v)
+ : static_cast<__int128>(v);
+}
+
+} // namespace int128_internal
+
+inline int128& int128::operator=(__int128 v) {
+ v_ = v;
+ return *this;
+}
+
+constexpr uint64_t Int128Low64(int128 v) {
+ return static_cast<uint64_t>(v.v_ & ~uint64_t{0});
+}
+
+constexpr int64_t Int128High64(int128 v) {
+ // Initially cast to unsigned to prevent a right shift on a negative value.
+ return int128_internal::BitCastToSigned(
+ static_cast<uint64_t>(static_cast<unsigned __int128>(v.v_) >> 64));
+}
+
+constexpr int128::int128(int64_t high, uint64_t low)
+ // Initially cast to unsigned to prevent a left shift that overflows.
+ : v_(int128_internal::BitCastToSigned(static_cast<unsigned __int128>(high)
+ << 64) |
+ low) {}
+
+
+constexpr int128::int128(int v) : v_{v} {}
+
+constexpr int128::int128(long v) : v_{v} {} // NOLINT(runtime/int)
+
+constexpr int128::int128(long long v) : v_{v} {} // NOLINT(runtime/int)
+
+constexpr int128::int128(__int128 v) : v_{v} {}
+
+constexpr int128::int128(unsigned int v) : v_{v} {}
+
+constexpr int128::int128(unsigned long v) : v_{v} {} // NOLINT(runtime/int)
+
+// NOLINTNEXTLINE(runtime/int)
+constexpr int128::int128(unsigned long long v) : v_{v} {}
+
+constexpr int128::int128(unsigned __int128 v) : v_{static_cast<__int128>(v)} {}
+
+inline int128::int128(float v) {
+ v_ = static_cast<__int128>(v);
+}
+
+inline int128::int128(double v) {
+ v_ = static_cast<__int128>(v);
+}
+
+inline int128::int128(long double v) {
+ v_ = static_cast<__int128>(v);
+}
+
+constexpr int128::int128(uint128 v) : v_{static_cast<__int128>(v)} {}
+
+constexpr int128::operator bool() const { return static_cast<bool>(v_); }
+
+constexpr int128::operator char() const { return static_cast<char>(v_); }
+
+constexpr int128::operator signed char() const {
+ return static_cast<signed char>(v_);
+}
+
+constexpr int128::operator unsigned char() const {
+ return static_cast<unsigned char>(v_);
+}
+
+constexpr int128::operator char16_t() const {
+ return static_cast<char16_t>(v_);
+}
+
+constexpr int128::operator char32_t() const {
+ return static_cast<char32_t>(v_);
+}
+
+constexpr int128::operator ABSL_INTERNAL_WCHAR_T() const {
+ return static_cast<ABSL_INTERNAL_WCHAR_T>(v_);
+}
+
+constexpr int128::operator short() const { // NOLINT(runtime/int)
+ return static_cast<short>(v_); // NOLINT(runtime/int)
+}
+
+constexpr int128::operator unsigned short() const { // NOLINT(runtime/int)
+ return static_cast<unsigned short>(v_); // NOLINT(runtime/int)
+}
+
+constexpr int128::operator int() const {
+ return static_cast<int>(v_);
+}
+
+constexpr int128::operator unsigned int() const {
+ return static_cast<unsigned int>(v_);
+}
+
+constexpr int128::operator long() const { // NOLINT(runtime/int)
+ return static_cast<long>(v_); // NOLINT(runtime/int)
+}
+
+constexpr int128::operator unsigned long() const { // NOLINT(runtime/int)
+ return static_cast<unsigned long>(v_); // NOLINT(runtime/int)
+}
+
+constexpr int128::operator long long() const { // NOLINT(runtime/int)
+ return static_cast<long long>(v_); // NOLINT(runtime/int)
+}
+
+constexpr int128::operator unsigned long long() const { // NOLINT(runtime/int)
+ return static_cast<unsigned long long>(v_); // NOLINT(runtime/int)
+}
+
+constexpr int128::operator __int128() const { return v_; }
+
+constexpr int128::operator unsigned __int128() const {
+ return static_cast<unsigned __int128>(v_);
+}
+
+// Clang on PowerPC sometimes produces incorrect __int128 to floating point
+// conversions. In that case, we do the conversion with a similar implementation
+// to the conversion operators in int128_no_intrinsic.inc.
+#if defined(__clang__) && !defined(__ppc64__)
+inline int128::operator float() const { return static_cast<float>(v_); }
+
+inline int128::operator double() const { return static_cast<double>(v_); }
+
+inline int128::operator long double() const {
+ return static_cast<long double>(v_);
+}
+
+#else // Clang on PowerPC
+// Forward declaration for conversion operators to floating point types.
+constexpr int128 operator-(int128 v);
+constexpr bool operator!=(int128 lhs, int128 rhs);
+
+inline int128::operator float() const {
+ // We must convert the absolute value and then negate as needed, because
+ // floating point types are typically sign-magnitude. Otherwise, the
+ // difference between the high and low 64 bits when interpreted as two's
+ // complement overwhelms the precision of the mantissa.
+ //
+ // Also check to make sure we don't negate Int128Min()
+ return v_ < 0 && *this != Int128Min()
+ ? -static_cast<float>(-*this)
+ : static_cast<float>(Int128Low64(*this)) +
+ std::ldexp(static_cast<float>(Int128High64(*this)), 64);
+}
+
+inline int128::operator double() const {
+ // See comment in int128::operator float() above.
+ return v_ < 0 && *this != Int128Min()
+ ? -static_cast<double>(-*this)
+ : static_cast<double>(Int128Low64(*this)) +
+ std::ldexp(static_cast<double>(Int128High64(*this)), 64);
+}
+
+inline int128::operator long double() const {
+ // See comment in int128::operator float() above.
+ return v_ < 0 && *this != Int128Min()
+ ? -static_cast<long double>(-*this)
+ : static_cast<long double>(Int128Low64(*this)) +
+ std::ldexp(static_cast<long double>(Int128High64(*this)),
+ 64);
+}
+#endif // Clang on PowerPC
+
+// Comparison operators.
+
+constexpr bool operator==(int128 lhs, int128 rhs) {
+ return static_cast<__int128>(lhs) == static_cast<__int128>(rhs);
+}
+
+constexpr bool operator!=(int128 lhs, int128 rhs) {
+ return static_cast<__int128>(lhs) != static_cast<__int128>(rhs);
+}
+
+constexpr bool operator<(int128 lhs, int128 rhs) {
+ return static_cast<__int128>(lhs) < static_cast<__int128>(rhs);
+}
+
+constexpr bool operator>(int128 lhs, int128 rhs) {
+ return static_cast<__int128>(lhs) > static_cast<__int128>(rhs);
+}
+
+constexpr bool operator<=(int128 lhs, int128 rhs) {
+ return static_cast<__int128>(lhs) <= static_cast<__int128>(rhs);
+}
+
+constexpr bool operator>=(int128 lhs, int128 rhs) {
+ return static_cast<__int128>(lhs) >= static_cast<__int128>(rhs);
+}
+
+// Unary operators.
+
+constexpr int128 operator-(int128 v) { return -static_cast<__int128>(v); }
+
+constexpr bool operator!(int128 v) { return !static_cast<__int128>(v); }
+
+constexpr int128 operator~(int128 val) { return ~static_cast<__int128>(val); }
+
+// Arithmetic operators.
+
+constexpr int128 operator+(int128 lhs, int128 rhs) {
+ return static_cast<__int128>(lhs) + static_cast<__int128>(rhs);
+}
+
+constexpr int128 operator-(int128 lhs, int128 rhs) {
+ return static_cast<__int128>(lhs) - static_cast<__int128>(rhs);
+}
+
+inline int128 operator*(int128 lhs, int128 rhs) {
+ return static_cast<__int128>(lhs) * static_cast<__int128>(rhs);
+}
+
+inline int128 operator/(int128 lhs, int128 rhs) {
+ return static_cast<__int128>(lhs) / static_cast<__int128>(rhs);
+}
+
+inline int128 operator%(int128 lhs, int128 rhs) {
+ return static_cast<__int128>(lhs) % static_cast<__int128>(rhs);
+}
+
+inline int128 int128::operator++(int) {
+ int128 tmp(*this);
+ ++v_;
+ return tmp;
+}
+
+inline int128 int128::operator--(int) {
+ int128 tmp(*this);
+ --v_;
+ return tmp;
+}
+
+inline int128& int128::operator++() {
+ ++v_;
+ return *this;
+}
+
+inline int128& int128::operator--() {
+ --v_;
+ return *this;
+}
+
+constexpr int128 operator|(int128 lhs, int128 rhs) {
+ return static_cast<__int128>(lhs) | static_cast<__int128>(rhs);
+}
+
+constexpr int128 operator&(int128 lhs, int128 rhs) {
+ return static_cast<__int128>(lhs) & static_cast<__int128>(rhs);
+}
+
+constexpr int128 operator^(int128 lhs, int128 rhs) {
+ return static_cast<__int128>(lhs) ^ static_cast<__int128>(rhs);
+}
+
+constexpr int128 operator<<(int128 lhs, int amount) {
+ return static_cast<__int128>(lhs) << amount;
+}
+
+constexpr int128 operator>>(int128 lhs, int amount) {
+ return static_cast<__int128>(lhs) >> amount;
+}
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/numeric/int128_no_intrinsic.inc b/contrib/restricted/abseil-cpp-tstring/y_absl/numeric/int128_no_intrinsic.inc
new file mode 100644
index 00000000000..8834804cec4
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/numeric/int128_no_intrinsic.inc
@@ -0,0 +1,311 @@
+//
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file contains :int128 implementation details that depend on internal
+// representation when ABSL_HAVE_INTRINSIC_INT128 is *not* defined. This file
+// is included by int128.h and relies on ABSL_INTERNAL_WCHAR_T being defined.
+
+constexpr uint64_t Int128Low64(int128 v) { return v.lo_; }
+
+constexpr int64_t Int128High64(int128 v) { return v.hi_; }
+
+#if defined(ABSL_IS_LITTLE_ENDIAN)
+
+constexpr int128::int128(int64_t high, uint64_t low) :
+ lo_(low), hi_(high) {}
+
+constexpr int128::int128(int v)
+ : lo_{static_cast<uint64_t>(v)}, hi_{v < 0 ? ~int64_t{0} : 0} {}
+constexpr int128::int128(long v) // NOLINT(runtime/int)
+ : lo_{static_cast<uint64_t>(v)}, hi_{v < 0 ? ~int64_t{0} : 0} {}
+constexpr int128::int128(long long v) // NOLINT(runtime/int)
+ : lo_{static_cast<uint64_t>(v)}, hi_{v < 0 ? ~int64_t{0} : 0} {}
+
+constexpr int128::int128(unsigned int v) : lo_{v}, hi_{0} {}
+// NOLINTNEXTLINE(runtime/int)
+constexpr int128::int128(unsigned long v) : lo_{v}, hi_{0} {}
+// NOLINTNEXTLINE(runtime/int)
+constexpr int128::int128(unsigned long long v) : lo_{v}, hi_{0} {}
+
+constexpr int128::int128(uint128 v)
+ : lo_{Uint128Low64(v)}, hi_{static_cast<int64_t>(Uint128High64(v))} {}
+
+#elif defined(ABSL_IS_BIG_ENDIAN)
+
+constexpr int128::int128(int64_t high, uint64_t low) :
+ hi_{high}, lo_{low} {}
+
+constexpr int128::int128(int v)
+ : hi_{v < 0 ? ~int64_t{0} : 0}, lo_{static_cast<uint64_t>(v)} {}
+constexpr int128::int128(long v) // NOLINT(runtime/int)
+ : hi_{v < 0 ? ~int64_t{0} : 0}, lo_{static_cast<uint64_t>(v)} {}
+constexpr int128::int128(long long v) // NOLINT(runtime/int)
+ : hi_{v < 0 ? ~int64_t{0} : 0}, lo_{static_cast<uint64_t>(v)} {}
+
+constexpr int128::int128(unsigned int v) : hi_{0}, lo_{v} {}
+// NOLINTNEXTLINE(runtime/int)
+constexpr int128::int128(unsigned long v) : hi_{0}, lo_{v} {}
+// NOLINTNEXTLINE(runtime/int)
+constexpr int128::int128(unsigned long long v) : hi_{0}, lo_{v} {}
+
+constexpr int128::int128(uint128 v)
+ : hi_{static_cast<int64_t>(Uint128High64(v))}, lo_{Uint128Low64(v)} {}
+
+#else // byte order
+#error "Unsupported byte order: must be little-endian or big-endian."
+#endif // byte order
+
+constexpr int128::operator bool() const { return lo_ || hi_; }
+
+constexpr int128::operator char() const {
+ // NOLINTNEXTLINE(runtime/int)
+ return static_cast<char>(static_cast<long long>(*this));
+}
+
+constexpr int128::operator signed char() const {
+ // NOLINTNEXTLINE(runtime/int)
+ return static_cast<signed char>(static_cast<long long>(*this));
+}
+
+constexpr int128::operator unsigned char() const {
+ return static_cast<unsigned char>(lo_);
+}
+
+constexpr int128::operator char16_t() const {
+ return static_cast<char16_t>(lo_);
+}
+
+constexpr int128::operator char32_t() const {
+ return static_cast<char32_t>(lo_);
+}
+
+constexpr int128::operator ABSL_INTERNAL_WCHAR_T() const {
+ // NOLINTNEXTLINE(runtime/int)
+ return static_cast<ABSL_INTERNAL_WCHAR_T>(static_cast<long long>(*this));
+}
+
+constexpr int128::operator short() const { // NOLINT(runtime/int)
+ // NOLINTNEXTLINE(runtime/int)
+ return static_cast<short>(static_cast<long long>(*this));
+}
+
+constexpr int128::operator unsigned short() const { // NOLINT(runtime/int)
+ return static_cast<unsigned short>(lo_); // NOLINT(runtime/int)
+}
+
+constexpr int128::operator int() const {
+ // NOLINTNEXTLINE(runtime/int)
+ return static_cast<int>(static_cast<long long>(*this));
+}
+
+constexpr int128::operator unsigned int() const {
+ return static_cast<unsigned int>(lo_);
+}
+
+constexpr int128::operator long() const { // NOLINT(runtime/int)
+ // NOLINTNEXTLINE(runtime/int)
+ return static_cast<long>(static_cast<long long>(*this));
+}
+
+constexpr int128::operator unsigned long() const { // NOLINT(runtime/int)
+ return static_cast<unsigned long>(lo_); // NOLINT(runtime/int)
+}
+
+constexpr int128::operator long long() const { // NOLINT(runtime/int)
+ // We don't bother checking the value of hi_. If *this < 0, lo_'s high bit
+ // must be set in order for the value to fit into a long long. Conversely, if
+ // lo_'s high bit is set, *this must be < 0 for the value to fit.
+ return int128_internal::BitCastToSigned(lo_);
+}
+
+constexpr int128::operator unsigned long long() const { // NOLINT(runtime/int)
+ return static_cast<unsigned long long>(lo_); // NOLINT(runtime/int)
+}
+
+inline int128::operator float() const {
+ // We must convert the absolute value and then negate as needed, because
+ // floating point types are typically sign-magnitude. Otherwise, the
+ // difference between the high and low 64 bits when interpreted as two's
+ // complement overwhelms the precision of the mantissa.
+ //
+ // Also check to make sure we don't negate Int128Min()
+ return hi_ < 0 && *this != Int128Min()
+ ? -static_cast<float>(-*this)
+ : static_cast<float>(lo_) +
+ std::ldexp(static_cast<float>(hi_), 64);
+}
+
+inline int128::operator double() const {
+ // See comment in int128::operator float() above.
+ return hi_ < 0 && *this != Int128Min()
+ ? -static_cast<double>(-*this)
+ : static_cast<double>(lo_) +
+ std::ldexp(static_cast<double>(hi_), 64);
+}
+
+inline int128::operator long double() const {
+ // See comment in int128::operator float() above.
+ return hi_ < 0 && *this != Int128Min()
+ ? -static_cast<long double>(-*this)
+ : static_cast<long double>(lo_) +
+ std::ldexp(static_cast<long double>(hi_), 64);
+}
+
+// Comparison operators.
+
+constexpr bool operator==(int128 lhs, int128 rhs) {
+ return (Int128Low64(lhs) == Int128Low64(rhs) &&
+ Int128High64(lhs) == Int128High64(rhs));
+}
+
+constexpr bool operator!=(int128 lhs, int128 rhs) { return !(lhs == rhs); }
+
+constexpr bool operator<(int128 lhs, int128 rhs) {
+ return (Int128High64(lhs) == Int128High64(rhs))
+ ? (Int128Low64(lhs) < Int128Low64(rhs))
+ : (Int128High64(lhs) < Int128High64(rhs));
+}
+
+constexpr bool operator>(int128 lhs, int128 rhs) {
+ return (Int128High64(lhs) == Int128High64(rhs))
+ ? (Int128Low64(lhs) > Int128Low64(rhs))
+ : (Int128High64(lhs) > Int128High64(rhs));
+}
+
+constexpr bool operator<=(int128 lhs, int128 rhs) { return !(lhs > rhs); }
+
+constexpr bool operator>=(int128 lhs, int128 rhs) { return !(lhs < rhs); }
+
+// Unary operators.
+
+constexpr int128 operator-(int128 v) {
+ return MakeInt128(~Int128High64(v) + (Int128Low64(v) == 0),
+ ~Int128Low64(v) + 1);
+}
+
+constexpr bool operator!(int128 v) {
+ return !Int128Low64(v) && !Int128High64(v);
+}
+
+constexpr int128 operator~(int128 val) {
+ return MakeInt128(~Int128High64(val), ~Int128Low64(val));
+}
+
+// Arithmetic operators.
+
+namespace int128_internal {
+constexpr int128 SignedAddResult(int128 result, int128 lhs) {
+ // check for carry
+ return (Int128Low64(result) < Int128Low64(lhs))
+ ? MakeInt128(Int128High64(result) + 1, Int128Low64(result))
+ : result;
+}
+} // namespace int128_internal
+constexpr int128 operator+(int128 lhs, int128 rhs) {
+ return int128_internal::SignedAddResult(
+ MakeInt128(Int128High64(lhs) + Int128High64(rhs),
+ Int128Low64(lhs) + Int128Low64(rhs)),
+ lhs);
+}
+
+namespace int128_internal {
+constexpr int128 SignedSubstructResult(int128 result, int128 lhs, int128 rhs) {
+ // check for carry
+ return (Int128Low64(lhs) < Int128Low64(rhs))
+ ? MakeInt128(Int128High64(result) - 1, Int128Low64(result))
+ : result;
+}
+} // namespace int128_internal
+constexpr int128 operator-(int128 lhs, int128 rhs) {
+ return int128_internal::SignedSubstructResult(
+ MakeInt128(Int128High64(lhs) - Int128High64(rhs),
+ Int128Low64(lhs) - Int128Low64(rhs)),
+ lhs, rhs);
+}
+
+inline int128 operator*(int128 lhs, int128 rhs) {
+ return MakeInt128(
+ int128_internal::BitCastToSigned(Uint128High64(uint128(lhs) * rhs)),
+ Uint128Low64(uint128(lhs) * rhs));
+}
+
+inline int128 int128::operator++(int) {
+ int128 tmp(*this);
+ *this += 1;
+ return tmp;
+}
+
+inline int128 int128::operator--(int) {
+ int128 tmp(*this);
+ *this -= 1;
+ return tmp;
+}
+
+inline int128& int128::operator++() {
+ *this += 1;
+ return *this;
+}
+
+inline int128& int128::operator--() {
+ *this -= 1;
+ return *this;
+}
+
+constexpr int128 operator|(int128 lhs, int128 rhs) {
+ return MakeInt128(Int128High64(lhs) | Int128High64(rhs),
+ Int128Low64(lhs) | Int128Low64(rhs));
+}
+
+constexpr int128 operator&(int128 lhs, int128 rhs) {
+ return MakeInt128(Int128High64(lhs) & Int128High64(rhs),
+ Int128Low64(lhs) & Int128Low64(rhs));
+}
+
+constexpr int128 operator^(int128 lhs, int128 rhs) {
+ return MakeInt128(Int128High64(lhs) ^ Int128High64(rhs),
+ Int128Low64(lhs) ^ Int128Low64(rhs));
+}
+
+constexpr int128 operator<<(int128 lhs, int amount) {
+ // int64_t shifts of >= 64 are undefined, so we need some special-casing.
+ return amount >= 64
+ ? MakeInt128(
+ static_cast<int64_t>(Int128Low64(lhs) << (amount - 64)), 0)
+ : amount == 0
+ ? lhs
+ : MakeInt128(
+ (Int128High64(lhs) << amount) |
+ static_cast<int64_t>(Int128Low64(lhs) >> (64 - amount)),
+ Int128Low64(lhs) << amount);
+}
+
+constexpr int128 operator>>(int128 lhs, int amount) {
+ // int64_t shifts of >= 64 are undefined, so we need some special-casing.
+ // The (Int128High64(lhs) >> 32) >> 32 "trick" causes the the most significant
+ // int64 to be inititialized with all zeros or all ones correctly. It takes
+ // into account whether the number is negative or positive, and whether the
+ // current architecture does arithmetic or logical right shifts for negative
+ // numbers.
+ return amount >= 64
+ ? MakeInt128(
+ (Int128High64(lhs) >> 32) >> 32,
+ static_cast<uint64_t>(Int128High64(lhs) >> (amount - 64)))
+ : amount == 0
+ ? lhs
+ : MakeInt128(Int128High64(lhs) >> amount,
+ (Int128Low64(lhs) >> amount) |
+ (static_cast<uint64_t>(Int128High64(lhs))
+ << (64 - amount)));
+}
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/numeric/internal/bits.h b/contrib/restricted/abseil-cpp-tstring/y_absl/numeric/internal/bits.h
new file mode 100644
index 00000000000..2a517399b04
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/numeric/internal/bits.h
@@ -0,0 +1,358 @@
+// Copyright 2020 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_NUMERIC_INTERNAL_BITS_H_
+#define ABSL_NUMERIC_INTERNAL_BITS_H_
+
+#include <cstdint>
+#include <limits>
+#include <type_traits>
+
+// Clang on Windows has __builtin_clzll; otherwise we need to use the
+// windows intrinsic functions.
+#if defined(_MSC_VER) && !defined(__clang__)
+#include <intrin.h>
+#endif
+
+#include "y_absl/base/attributes.h"
+#include "y_absl/base/config.h"
+
+#if defined(__GNUC__) && !defined(__clang__)
+// GCC
+#define ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(x) 1
+#else
+#define ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(x) ABSL_HAVE_BUILTIN(x)
+#endif
+
+#if ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_popcountl) && \
+ ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_popcountll)
+#define ABSL_INTERNAL_CONSTEXPR_POPCOUNT constexpr
+#define ABSL_INTERNAL_HAS_CONSTEXPR_POPCOUNT 1
+#else
+#define ABSL_INTERNAL_CONSTEXPR_POPCOUNT
+#define ABSL_INTERNAL_HAS_CONSTEXPR_POPCOUNT 0
+#endif
+
+#if ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_clz) && \
+ ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_clzll)
+#define ABSL_INTERNAL_CONSTEXPR_CLZ constexpr
+#define ABSL_INTERNAL_HAS_CONSTEXPR_CLZ 1
+#else
+#define ABSL_INTERNAL_CONSTEXPR_CLZ
+#define ABSL_INTERNAL_HAS_CONSTEXPR_CLZ 0
+#endif
+
+#if ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_ctz) && \
+ ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_ctzll)
+#define ABSL_INTERNAL_CONSTEXPR_CTZ constexpr
+#define ABSL_INTERNAL_HAS_CONSTEXPR_CTZ 1
+#else
+#define ABSL_INTERNAL_CONSTEXPR_CTZ
+#define ABSL_INTERNAL_HAS_CONSTEXPR_CTZ 0
+#endif
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace numeric_internal {
+
+constexpr bool IsPowerOf2(unsigned int x) noexcept {
+ return x != 0 && (x & (x - 1)) == 0;
+}
+
+template <class T>
+ABSL_MUST_USE_RESULT ABSL_ATTRIBUTE_ALWAYS_INLINE constexpr T RotateRight(
+ T x, int s) noexcept {
+ static_assert(std::is_unsigned<T>::value, "T must be unsigned");
+ static_assert(IsPowerOf2(std::numeric_limits<T>::digits),
+ "T must have a power-of-2 size");
+
+ return static_cast<T>(x >> (s & (std::numeric_limits<T>::digits - 1))) |
+ static_cast<T>(x << ((-s) & (std::numeric_limits<T>::digits - 1)));
+}
+
+template <class T>
+ABSL_MUST_USE_RESULT ABSL_ATTRIBUTE_ALWAYS_INLINE constexpr T RotateLeft(
+ T x, int s) noexcept {
+ static_assert(std::is_unsigned<T>::value, "T must be unsigned");
+ static_assert(IsPowerOf2(std::numeric_limits<T>::digits),
+ "T must have a power-of-2 size");
+
+ return static_cast<T>(x << (s & (std::numeric_limits<T>::digits - 1))) |
+ static_cast<T>(x >> ((-s) & (std::numeric_limits<T>::digits - 1)));
+}
+
+ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_POPCOUNT inline int
+Popcount32(uint32_t x) noexcept {
+#if ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_popcount)
+ static_assert(sizeof(unsigned int) == sizeof(x),
+ "__builtin_popcount does not take 32-bit arg");
+ return __builtin_popcount(x);
+#else
+ x -= ((x >> 1) & 0x55555555);
+ x = ((x >> 2) & 0x33333333) + (x & 0x33333333);
+ return static_cast<int>((((x + (x >> 4)) & 0xF0F0F0F) * 0x1010101) >> 24);
+#endif
+}
+
+ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_POPCOUNT inline int
+Popcount64(uint64_t x) noexcept {
+#if ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_popcountll)
+ static_assert(sizeof(unsigned long long) == sizeof(x), // NOLINT(runtime/int)
+ "__builtin_popcount does not take 64-bit arg");
+ return __builtin_popcountll(x);
+#else
+ x -= (x >> 1) & 0x5555555555555555ULL;
+ x = ((x >> 2) & 0x3333333333333333ULL) + (x & 0x3333333333333333ULL);
+ return static_cast<int>(
+ (((x + (x >> 4)) & 0xF0F0F0F0F0F0F0FULL) * 0x101010101010101ULL) >> 56);
+#endif
+}
+
+template <class T>
+ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_POPCOUNT inline int
+Popcount(T x) noexcept {
+ static_assert(std::is_unsigned<T>::value, "T must be unsigned");
+ static_assert(IsPowerOf2(std::numeric_limits<T>::digits),
+ "T must have a power-of-2 size");
+ static_assert(sizeof(x) <= sizeof(uint64_t), "T is too large");
+ return sizeof(x) <= sizeof(uint32_t) ? Popcount32(x) : Popcount64(x);
+}
+
+ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CLZ inline int
+CountLeadingZeroes32(uint32_t x) {
+#if ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_clz)
+ // Use __builtin_clz, which uses the following instructions:
+ // x86: bsr, lzcnt
+ // ARM64: clz
+ // PPC: cntlzd
+
+ static_assert(sizeof(unsigned int) == sizeof(x),
+ "__builtin_clz does not take 32-bit arg");
+ // Handle 0 as a special case because __builtin_clz(0) is undefined.
+ return x == 0 ? 32 : __builtin_clz(x);
+#elif defined(_MSC_VER) && !defined(__clang__)
+ unsigned long result = 0; // NOLINT(runtime/int)
+ if (_BitScanReverse(&result, x)) {
+ return 31 - result;
+ }
+ return 32;
+#else
+ int zeroes = 28;
+ if (x >> 16) {
+ zeroes -= 16;
+ x >>= 16;
+ }
+ if (x >> 8) {
+ zeroes -= 8;
+ x >>= 8;
+ }
+ if (x >> 4) {
+ zeroes -= 4;
+ x >>= 4;
+ }
+ return "\4\3\2\2\1\1\1\1\0\0\0\0\0\0\0"[x] + zeroes;
+#endif
+}
+
+ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CLZ inline int
+CountLeadingZeroes16(uint16_t x) {
+#if ABSL_HAVE_BUILTIN(__builtin_clzs)
+ static_assert(sizeof(unsigned short) == sizeof(x), // NOLINT(runtime/int)
+ "__builtin_clzs does not take 16-bit arg");
+ return x == 0 ? 16 : __builtin_clzs(x);
+#else
+ return CountLeadingZeroes32(x) - 16;
+#endif
+}
+
+ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CLZ inline int
+CountLeadingZeroes64(uint64_t x) {
+#if ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_clzll)
+ // Use __builtin_clzll, which uses the following instructions:
+ // x86: bsr, lzcnt
+ // ARM64: clz
+ // PPC: cntlzd
+ static_assert(sizeof(unsigned long long) == sizeof(x), // NOLINT(runtime/int)
+ "__builtin_clzll does not take 64-bit arg");
+
+ // Handle 0 as a special case because __builtin_clzll(0) is undefined.
+ return x == 0 ? 64 : __builtin_clzll(x);
+#elif defined(_MSC_VER) && !defined(__clang__) && \
+ (defined(_M_X64) || defined(_M_ARM64))
+ // MSVC does not have __buitin_clzll. Use _BitScanReverse64.
+ unsigned long result = 0; // NOLINT(runtime/int)
+ if (_BitScanReverse64(&result, x)) {
+ return 63 - result;
+ }
+ return 64;
+#elif defined(_MSC_VER) && !defined(__clang__)
+ // MSVC does not have __buitin_clzll. Compose two calls to _BitScanReverse
+ unsigned long result = 0; // NOLINT(runtime/int)
+ if ((x >> 32) &&
+ _BitScanReverse(&result, static_cast<unsigned long>(x >> 32))) {
+ return 31 - result;
+ }
+ if (_BitScanReverse(&result, static_cast<unsigned long>(x))) {
+ return 63 - result;
+ }
+ return 64;
+#else
+ int zeroes = 60;
+ if (x >> 32) {
+ zeroes -= 32;
+ x >>= 32;
+ }
+ if (x >> 16) {
+ zeroes -= 16;
+ x >>= 16;
+ }
+ if (x >> 8) {
+ zeroes -= 8;
+ x >>= 8;
+ }
+ if (x >> 4) {
+ zeroes -= 4;
+ x >>= 4;
+ }
+ return "\4\3\2\2\1\1\1\1\0\0\0\0\0\0\0"[x] + zeroes;
+#endif
+}
+
+template <typename T>
+ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CLZ inline int
+CountLeadingZeroes(T x) {
+ static_assert(std::is_unsigned<T>::value, "T must be unsigned");
+ static_assert(IsPowerOf2(std::numeric_limits<T>::digits),
+ "T must have a power-of-2 size");
+ static_assert(sizeof(T) <= sizeof(uint64_t), "T too large");
+ return sizeof(T) <= sizeof(uint16_t)
+ ? CountLeadingZeroes16(static_cast<uint16_t>(x)) -
+ (std::numeric_limits<uint16_t>::digits -
+ std::numeric_limits<T>::digits)
+ : (sizeof(T) <= sizeof(uint32_t)
+ ? CountLeadingZeroes32(static_cast<uint32_t>(x)) -
+ (std::numeric_limits<uint32_t>::digits -
+ std::numeric_limits<T>::digits)
+ : CountLeadingZeroes64(x));
+}
+
+ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CTZ inline int
+CountTrailingZeroesNonzero32(uint32_t x) {
+#if ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_ctz)
+ static_assert(sizeof(unsigned int) == sizeof(x),
+ "__builtin_ctz does not take 32-bit arg");
+ return __builtin_ctz(x);
+#elif defined(_MSC_VER) && !defined(__clang__)
+ unsigned long result = 0; // NOLINT(runtime/int)
+ _BitScanForward(&result, x);
+ return result;
+#else
+ int c = 31;
+ x &= ~x + 1;
+ if (x & 0x0000FFFF) c -= 16;
+ if (x & 0x00FF00FF) c -= 8;
+ if (x & 0x0F0F0F0F) c -= 4;
+ if (x & 0x33333333) c -= 2;
+ if (x & 0x55555555) c -= 1;
+ return c;
+#endif
+}
+
+ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CTZ inline int
+CountTrailingZeroesNonzero64(uint64_t x) {
+#if ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_ctzll)
+ static_assert(sizeof(unsigned long long) == sizeof(x), // NOLINT(runtime/int)
+ "__builtin_ctzll does not take 64-bit arg");
+ return __builtin_ctzll(x);
+#elif defined(_MSC_VER) && !defined(__clang__) && \
+ (defined(_M_X64) || defined(_M_ARM64))
+ unsigned long result = 0; // NOLINT(runtime/int)
+ _BitScanForward64(&result, x);
+ return result;
+#elif defined(_MSC_VER) && !defined(__clang__)
+ unsigned long result = 0; // NOLINT(runtime/int)
+ if (static_cast<uint32_t>(x) == 0) {
+ _BitScanForward(&result, static_cast<unsigned long>(x >> 32));
+ return result + 32;
+ }
+ _BitScanForward(&result, static_cast<unsigned long>(x));
+ return result;
+#else
+ int c = 63;
+ x &= ~x + 1;
+ if (x & 0x00000000FFFFFFFF) c -= 32;
+ if (x & 0x0000FFFF0000FFFF) c -= 16;
+ if (x & 0x00FF00FF00FF00FF) c -= 8;
+ if (x & 0x0F0F0F0F0F0F0F0F) c -= 4;
+ if (x & 0x3333333333333333) c -= 2;
+ if (x & 0x5555555555555555) c -= 1;
+ return c;
+#endif
+}
+
+ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CTZ inline int
+CountTrailingZeroesNonzero16(uint16_t x) {
+#if ABSL_HAVE_BUILTIN(__builtin_ctzs)
+ static_assert(sizeof(unsigned short) == sizeof(x), // NOLINT(runtime/int)
+ "__builtin_ctzs does not take 16-bit arg");
+ return __builtin_ctzs(x);
+#else
+ return CountTrailingZeroesNonzero32(x);
+#endif
+}
+
+template <class T>
+ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CTZ inline int
+CountTrailingZeroes(T x) noexcept {
+ static_assert(std::is_unsigned<T>::value, "T must be unsigned");
+ static_assert(IsPowerOf2(std::numeric_limits<T>::digits),
+ "T must have a power-of-2 size");
+ static_assert(sizeof(T) <= sizeof(uint64_t), "T too large");
+ return x == 0 ? std::numeric_limits<T>::digits
+ : (sizeof(T) <= sizeof(uint16_t)
+ ? CountTrailingZeroesNonzero16(static_cast<uint16_t>(x))
+ : (sizeof(T) <= sizeof(uint32_t)
+ ? CountTrailingZeroesNonzero32(
+ static_cast<uint32_t>(x))
+ : CountTrailingZeroesNonzero64(x)));
+}
+
+// If T is narrower than unsigned, T{1} << bit_width will be promoted. We
+// want to force it to wraparound so that bit_ceil of an invalid value are not
+// core constant expressions.
+template <class T>
+ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CLZ inline
+ typename std::enable_if<std::is_unsigned<T>::value, T>::type
+ BitCeilPromotionHelper(T x, T promotion) {
+ return (T{1} << (x + promotion)) >> promotion;
+}
+
+template <class T>
+ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CLZ inline
+ typename std::enable_if<std::is_unsigned<T>::value, T>::type
+ BitCeilNonPowerOf2(T x) {
+ // If T is narrower than unsigned, it undergoes promotion to unsigned when we
+ // shift. We calculate the number of bits added by the wider type.
+ return BitCeilPromotionHelper(
+ static_cast<T>(std::numeric_limits<T>::digits - CountLeadingZeroes(x)),
+ T{sizeof(T) >= sizeof(unsigned) ? 0
+ : std::numeric_limits<unsigned>::digits -
+ std::numeric_limits<T>::digits});
+}
+
+} // namespace numeric_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_NUMERIC_INTERNAL_BITS_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/numeric/internal/representation.h b/contrib/restricted/abseil-cpp-tstring/y_absl/numeric/internal/representation.h
new file mode 100644
index 00000000000..0761cf39bce
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/numeric/internal/representation.h
@@ -0,0 +1,55 @@
+// Copyright 2021 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_NUMERIC_INTERNAL_REPRESENTATION_H_
+#define ABSL_NUMERIC_INTERNAL_REPRESENTATION_H_
+
+#include <limits>
+
+#include "y_absl/base/config.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace numeric_internal {
+
+// Returns true iff long double is represented as a pair of doubles added
+// together.
+inline constexpr bool IsDoubleDouble() {
+ // A double-double value always has exactly twice the precision of a double
+ // value--one double carries the high digits and one double carries the low
+ // digits. This property is not shared with any other common floating-point
+ // representation, so this test won't trigger false positives. For reference,
+ // this table gives the number of bits of precision of each common
+ // floating-point representation:
+ //
+ // type precision
+ // IEEE single 24 b
+ // IEEE double 53
+ // x86 long double 64
+ // double-double 106
+ // IEEE quadruple 113
+ //
+ // Note in particular that a quadruple-precision float has greater precision
+ // than a double-double float despite taking up the same amount of memory; the
+ // quad has more of its bits allocated to the mantissa than the double-double
+ // has.
+ return std::numeric_limits<long double>::digits ==
+ 2 * std::numeric_limits<double>::digits;
+}
+
+} // namespace numeric_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_NUMERIC_INTERNAL_REPRESENTATION_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/numeric/ya.make b/contrib/restricted/abseil-cpp-tstring/y_absl/numeric/ya.make
new file mode 100644
index 00000000000..9f3280c39dc
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/numeric/ya.make
@@ -0,0 +1,24 @@
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
+OWNER(
+ somov
+ g:cpp-contrib
+)
+
+LICENSE(Apache-2.0)
+
+LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
+
+ADDINCL(
+ GLOBAL contrib/restricted/abseil-cpp-tstring
+)
+
+NO_COMPILER_WARNINGS()
+
+SRCS(
+ int128.cc
+)
+
+END()
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/profiling/internal/exponential_biased.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/profiling/internal/exponential_biased.cc
new file mode 100644
index 00000000000..02f5dfd9da8
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/profiling/internal/exponential_biased.cc
@@ -0,0 +1,93 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "y_absl/profiling/internal/exponential_biased.h"
+
+#include <stdint.h>
+
+#include <algorithm>
+#include <atomic>
+#include <cmath>
+#include <limits>
+
+#include "y_absl/base/attributes.h"
+#include "y_absl/base/optimization.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace profiling_internal {
+
+// The algorithm generates a random number between 0 and 1 and applies the
+// inverse cumulative distribution function for an exponential. Specifically:
+// Let m be the inverse of the sample period, then the probability
+// distribution function is m*exp(-mx) so the CDF is
+// p = 1 - exp(-mx), so
+// q = 1 - p = exp(-mx)
+// log_e(q) = -mx
+// -log_e(q)/m = x
+// log_2(q) * (-log_e(2) * 1/m) = x
+// In the code, q is actually in the range 1 to 2**26, hence the -26 below
+int64_t ExponentialBiased::GetSkipCount(int64_t mean) {
+ if (ABSL_PREDICT_FALSE(!initialized_)) {
+ Initialize();
+ }
+
+ uint64_t rng = NextRandom(rng_);
+ rng_ = rng;
+
+ // Take the top 26 bits as the random number
+ // (This plus the 1<<58 sampling bound give a max possible step of
+ // 5194297183973780480 bytes.)
+ // The uint32_t cast is to prevent a (hard-to-reproduce) NAN
+ // under piii debug for some binaries.
+ double q = static_cast<uint32_t>(rng >> (kPrngNumBits - 26)) + 1.0;
+ // Put the computed p-value through the CDF of a geometric.
+ double interval = bias_ + (std::log2(q) - 26) * (-std::log(2.0) * mean);
+ // Very large values of interval overflow int64_t. To avoid that, we will
+ // cheat and clamp any huge values to (int64_t max)/2. This is a potential
+ // source of bias, but the mean would need to be such a large value that it's
+ // not likely to come up. For example, with a mean of 1e18, the probability of
+ // hitting this condition is about 1/1000. For a mean of 1e17, standard
+ // calculators claim that this event won't happen.
+ if (interval > static_cast<double>(std::numeric_limits<int64_t>::max() / 2)) {
+ // Assume huge values are bias neutral, retain bias for next call.
+ return std::numeric_limits<int64_t>::max() / 2;
+ }
+ double value = std::rint(interval);
+ bias_ = interval - value;
+ return value;
+}
+
+int64_t ExponentialBiased::GetStride(int64_t mean) {
+ return GetSkipCount(mean - 1) + 1;
+}
+
+void ExponentialBiased::Initialize() {
+ // We don't get well distributed numbers from `this` so we call NextRandom() a
+ // bunch to mush the bits around. We use a global_rand to handle the case
+ // where the same thread (by memory address) gets created and destroyed
+ // repeatedly.
+ ABSL_CONST_INIT static std::atomic<uint32_t> global_rand(0);
+ uint64_t r = reinterpret_cast<uint64_t>(this) +
+ global_rand.fetch_add(1, std::memory_order_relaxed);
+ for (int i = 0; i < 20; ++i) {
+ r = NextRandom(r);
+ }
+ rng_ = r;
+ initialized_ = true;
+}
+
+} // namespace profiling_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/profiling/internal/exponential_biased.h b/contrib/restricted/abseil-cpp-tstring/y_absl/profiling/internal/exponential_biased.h
new file mode 100644
index 00000000000..ee7954a535e
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/profiling/internal/exponential_biased.h
@@ -0,0 +1,130 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_PROFILING_INTERNAL_EXPONENTIAL_BIASED_H_
+#define ABSL_PROFILING_INTERNAL_EXPONENTIAL_BIASED_H_
+
+#include <stdint.h>
+
+#include "y_absl/base/config.h"
+#include "y_absl/base/macros.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace profiling_internal {
+
+// ExponentialBiased provides a small and fast random number generator for a
+// rounded exponential distribution. This generator manages very little state,
+// and imposes no synchronization overhead. This makes it useful in specialized
+// scenarios requiring minimum overhead, such as stride based periodic sampling.
+//
+// ExponentialBiased provides two closely related functions, GetSkipCount() and
+// GetStride(), both returning a rounded integer defining a number of events
+// required before some event with a given mean probability occurs.
+//
+// The distribution is useful to generate a random wait time or some periodic
+// event with a given mean probability. For example, if an action is supposed to
+// happen on average once every 'N' events, then we can get a random 'stride'
+// counting down how long before the event to happen. For example, if we'd want
+// to sample one in every 1000 'Frobber' calls, our code could look like this:
+//
+// Frobber::Frobber() {
+// stride_ = exponential_biased_.GetStride(1000);
+// }
+//
+// void Frobber::Frob(int arg) {
+// if (--stride == 0) {
+// SampleFrob(arg);
+// stride_ = exponential_biased_.GetStride(1000);
+// }
+// ...
+// }
+//
+// The rounding of the return value creates a bias, especially for smaller means
+// where the distribution of the fraction is not evenly distributed. We correct
+// this bias by tracking the fraction we rounded up or down on each iteration,
+// effectively tracking the distance between the cumulative value, and the
+// rounded cumulative value. For example, given a mean of 2:
+//
+// raw = 1.63076, cumulative = 1.63076, rounded = 2, bias = -0.36923
+// raw = 0.14624, cumulative = 1.77701, rounded = 2, bias = 0.14624
+// raw = 4.93194, cumulative = 6.70895, rounded = 7, bias = -0.06805
+// raw = 0.24206, cumulative = 6.95101, rounded = 7, bias = 0.24206
+// etc...
+//
+// Adjusting with rounding bias is relatively trivial:
+//
+// double value = bias_ + exponential_distribution(mean)();
+// double rounded_value = std::rint(value);
+// bias_ = value - rounded_value;
+// return rounded_value;
+//
+// This class is thread-compatible.
+class ExponentialBiased {
+ public:
+ // The number of bits set by NextRandom.
+ static constexpr int kPrngNumBits = 48;
+
+ // `GetSkipCount()` returns the number of events to skip before some chosen
+ // event happens. For example, randomly tossing a coin, we will on average
+ // throw heads once before we get tails. We can simulate random coin tosses
+ // using GetSkipCount() as:
+ //
+ // ExponentialBiased eb;
+ // for (...) {
+ // int number_of_heads_before_tail = eb.GetSkipCount(1);
+ // for (int flips = 0; flips < number_of_heads_before_tail; ++flips) {
+ // printf("head...");
+ // }
+ // printf("tail\n");
+ // }
+ //
+ int64_t GetSkipCount(int64_t mean);
+
+ // GetStride() returns the number of events required for a specific event to
+ // happen. See the class comments for a usage example. `GetStride()` is
+ // equivalent to `GetSkipCount(mean - 1) + 1`. When to use `GetStride()` or
+ // `GetSkipCount()` depends mostly on what best fits the use case.
+ int64_t GetStride(int64_t mean);
+
+ // Computes a random number in the range [0, 1<<(kPrngNumBits+1) - 1]
+ //
+ // This is public to enable testing.
+ static uint64_t NextRandom(uint64_t rnd);
+
+ private:
+ void Initialize();
+
+ uint64_t rng_{0};
+ double bias_{0};
+ bool initialized_{false};
+};
+
+// Returns the next prng value.
+// pRNG is: aX+b mod c with a = 0x5DEECE66D, b = 0xB, c = 1<<48
+// This is the lrand64 generator.
+inline uint64_t ExponentialBiased::NextRandom(uint64_t rnd) {
+ const uint64_t prng_mult = uint64_t{0x5DEECE66D};
+ const uint64_t prng_add = 0xB;
+ const uint64_t prng_mod_power = 48;
+ const uint64_t prng_mod_mask =
+ ~((~static_cast<uint64_t>(0)) << prng_mod_power);
+ return (prng_mult * rnd + prng_add) & prng_mod_mask;
+}
+
+} // namespace profiling_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_PROFILING_INTERNAL_EXPONENTIAL_BIASED_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/profiling/internal/exponential_biased/ya.make b/contrib/restricted/abseil-cpp-tstring/y_absl/profiling/internal/exponential_biased/ya.make
new file mode 100644
index 00000000000..ddfe147041f
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/profiling/internal/exponential_biased/ya.make
@@ -0,0 +1,26 @@
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
+WITHOUT_LICENSE_TEXTS()
+
+OWNER(
+ somov
+ g:cpp-contrib
+)
+
+LICENSE(Apache-2.0)
+
+ADDINCL(
+ GLOBAL contrib/restricted/abseil-cpp-tstring
+)
+
+NO_COMPILER_WARNINGS()
+
+SRCDIR(contrib/restricted/abseil-cpp-tstring/y_absl/profiling/internal)
+
+SRCS(
+ exponential_biased.cc
+)
+
+END()
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/profiling/internal/periodic_sampler.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/profiling/internal/periodic_sampler.cc
new file mode 100644
index 00000000000..39fb3660f0b
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/profiling/internal/periodic_sampler.cc
@@ -0,0 +1,53 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "y_absl/profiling/internal/periodic_sampler.h"
+
+#include <atomic>
+
+#include "y_absl/profiling/internal/exponential_biased.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace profiling_internal {
+
+int64_t PeriodicSamplerBase::GetExponentialBiased(int period) noexcept {
+ return rng_.GetStride(period);
+}
+
+bool PeriodicSamplerBase::SubtleConfirmSample() noexcept {
+ int current_period = period();
+
+ // Deal with period case 0 (always off) and 1 (always on)
+ if (ABSL_PREDICT_FALSE(current_period < 2)) {
+ stride_ = 0;
+ return current_period == 1;
+ }
+
+ // Check if this is the first call to Sample()
+ if (ABSL_PREDICT_FALSE(stride_ == 1)) {
+ stride_ = static_cast<uint64_t>(-GetExponentialBiased(current_period));
+ if (static_cast<int64_t>(stride_) < -1) {
+ ++stride_;
+ return false;
+ }
+ }
+
+ stride_ = static_cast<uint64_t>(-GetExponentialBiased(current_period));
+ return true;
+}
+
+} // namespace profiling_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/profiling/internal/periodic_sampler.h b/contrib/restricted/abseil-cpp-tstring/y_absl/profiling/internal/periodic_sampler.h
new file mode 100644
index 00000000000..37796a9e00e
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/profiling/internal/periodic_sampler.h
@@ -0,0 +1,211 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_PROFILING_INTERNAL_PERIODIC_SAMPLER_H_
+#define ABSL_PROFILING_INTERNAL_PERIODIC_SAMPLER_H_
+
+#include <stdint.h>
+
+#include <atomic>
+
+#include "y_absl/base/optimization.h"
+#include "y_absl/profiling/internal/exponential_biased.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace profiling_internal {
+
+// PeriodicSamplerBase provides the basic period sampler implementation.
+//
+// This is the base class for the templated PeriodicSampler class, which holds
+// a global std::atomic value identified by a user defined tag, such that
+// each specific PeriodSampler implementation holds its own global period.
+//
+// PeriodicSamplerBase is thread-compatible except where stated otherwise.
+class PeriodicSamplerBase {
+ public:
+ // PeriodicSamplerBase is trivial / copyable / movable / destructible.
+ PeriodicSamplerBase() = default;
+ PeriodicSamplerBase(PeriodicSamplerBase&&) = default;
+ PeriodicSamplerBase(const PeriodicSamplerBase&) = default;
+
+ // Returns true roughly once every `period` calls. This is established by a
+ // randomly picked `stride` that is counted down on each call to `Sample`.
+ // This stride is picked such that the probability of `Sample()` returning
+ // true is 1 in `period`.
+ inline bool Sample() noexcept;
+
+ // The below methods are intended for optimized use cases where the
+ // size of the inlined fast path code is highly important. Applications
+ // should use the `Sample()` method unless they have proof that their
+ // specific use case requires the optimizations offered by these methods.
+ //
+ // An example of such a use case is SwissTable sampling. All sampling checks
+ // are in inlined SwissTable methods, and the number of call sites is huge.
+ // In this case, the inlined code size added to each translation unit calling
+ // SwissTable methods is non-trivial.
+ //
+ // The `SubtleMaybeSample()` function spuriously returns true even if the
+ // function should not be sampled, applications MUST match each call to
+ // 'SubtleMaybeSample()' returning true with a `SubtleConfirmSample()` call,
+ // and use the result of the latter as the sampling decision.
+ // In other words: the code should logically be equivalent to:
+ //
+ // if (SubtleMaybeSample() && SubtleConfirmSample()) {
+ // // Sample this call
+ // }
+ //
+ // In the 'inline-size' optimized case, the `SubtleConfirmSample()` call can
+ // be placed out of line, for example, the typical use case looks as follows:
+ //
+ // // --- frobber.h -----------
+ // void FrobberSampled();
+ //
+ // inline void FrobberImpl() {
+ // // ...
+ // }
+ //
+ // inline void Frobber() {
+ // if (ABSL_PREDICT_FALSE(sampler.SubtleMaybeSample())) {
+ // FrobberSampled();
+ // } else {
+ // FrobberImpl();
+ // }
+ // }
+ //
+ // // --- frobber.cc -----------
+ // void FrobberSampled() {
+ // if (!sampler.SubtleConfirmSample())) {
+ // // Spurious false positive
+ // FrobberImpl();
+ // return;
+ // }
+ //
+ // // Sampled execution
+ // // ...
+ // }
+ inline bool SubtleMaybeSample() noexcept;
+ bool SubtleConfirmSample() noexcept;
+
+ protected:
+ // We explicitly don't use a virtual destructor as this class is never
+ // virtually destroyed, and it keeps the class trivial, which avoids TLS
+ // prologue and epilogue code for our TLS instances.
+ ~PeriodicSamplerBase() = default;
+
+ // Returns the next stride for our sampler.
+ // This function is virtual for testing purposes only.
+ virtual int64_t GetExponentialBiased(int period) noexcept;
+
+ private:
+ // Returns the current period of this sampler. Thread-safe.
+ virtual int period() const noexcept = 0;
+
+ // Keep and decrement stride_ as an unsigned integer, but compare the value
+ // to zero casted as a signed int. clang and msvc do not create optimum code
+ // if we use signed for the combined decrement and sign comparison.
+ //
+ // Below 3 alternative options, all compiles generate the best code
+ // using the unsigned increment <---> signed int comparison option.
+ //
+ // Option 1:
+ // int64_t stride_;
+ // if (ABSL_PREDICT_TRUE(++stride_ < 0)) { ... }
+ //
+ // GCC x64 (OK) : https://gcc.godbolt.org/z/R5MzzA
+ // GCC ppc (OK) : https://gcc.godbolt.org/z/z7NZAt
+ // Clang x64 (BAD): https://gcc.godbolt.org/z/t4gPsd
+ // ICC x64 (OK) : https://gcc.godbolt.org/z/rE6s8W
+ // MSVC x64 (OK) : https://gcc.godbolt.org/z/ARMXqS
+ //
+ // Option 2:
+ // int64_t stride_ = 0;
+ // if (ABSL_PREDICT_TRUE(--stride_ >= 0)) { ... }
+ //
+ // GCC x64 (OK) : https://gcc.godbolt.org/z/jSQxYK
+ // GCC ppc (OK) : https://gcc.godbolt.org/z/VJdYaA
+ // Clang x64 (BAD): https://gcc.godbolt.org/z/Xm4NjX
+ // ICC x64 (OK) : https://gcc.godbolt.org/z/4snaFd
+ // MSVC x64 (BAD): https://gcc.godbolt.org/z/BgnEKE
+ //
+ // Option 3:
+ // uint64_t stride_;
+ // if (ABSL_PREDICT_TRUE(static_cast<int64_t>(++stride_) < 0)) { ... }
+ //
+ // GCC x64 (OK) : https://gcc.godbolt.org/z/bFbfPy
+ // GCC ppc (OK) : https://gcc.godbolt.org/z/S9KkUE
+ // Clang x64 (OK) : https://gcc.godbolt.org/z/UYzRb4
+ // ICC x64 (OK) : https://gcc.godbolt.org/z/ptTNfD
+ // MSVC x64 (OK) : https://gcc.godbolt.org/z/76j4-5
+ uint64_t stride_ = 0;
+ y_absl::profiling_internal::ExponentialBiased rng_;
+};
+
+inline bool PeriodicSamplerBase::SubtleMaybeSample() noexcept {
+ // See comments on `stride_` for the unsigned increment / signed compare.
+ if (ABSL_PREDICT_TRUE(static_cast<int64_t>(++stride_) < 0)) {
+ return false;
+ }
+ return true;
+}
+
+inline bool PeriodicSamplerBase::Sample() noexcept {
+ return ABSL_PREDICT_FALSE(SubtleMaybeSample()) ? SubtleConfirmSample()
+ : false;
+}
+
+// PeriodicSampler is a concreted periodic sampler implementation.
+// The user provided Tag identifies the implementation, and is required to
+// isolate the global state of this instance from other instances.
+//
+// Typical use case:
+//
+// struct HashTablezTag {};
+// thread_local PeriodicSampler sampler;
+//
+// void HashTableSamplingLogic(...) {
+// if (sampler.Sample()) {
+// HashTableSlowSamplePath(...);
+// }
+// }
+//
+template <typename Tag, int default_period = 0>
+class PeriodicSampler final : public PeriodicSamplerBase {
+ public:
+ ~PeriodicSampler() = default;
+
+ int period() const noexcept final {
+ return period_.load(std::memory_order_relaxed);
+ }
+
+ // Sets the global period for this sampler. Thread-safe.
+ // Setting a period of 0 disables the sampler, i.e., every call to Sample()
+ // will return false. Setting a period of 1 puts the sampler in 'always on'
+ // mode, i.e., every call to Sample() returns true.
+ static void SetGlobalPeriod(int period) {
+ period_.store(period, std::memory_order_relaxed);
+ }
+
+ private:
+ static std::atomic<int> period_;
+};
+
+template <typename Tag, int default_period>
+std::atomic<int> PeriodicSampler<Tag, default_period>::period_(default_period);
+
+} // namespace profiling_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_PROFILING_INTERNAL_PERIODIC_SAMPLER_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/profiling/internal/sample_recorder.h b/contrib/restricted/abseil-cpp-tstring/y_absl/profiling/internal/sample_recorder.h
new file mode 100644
index 00000000000..449d17a76dc
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/profiling/internal/sample_recorder.h
@@ -0,0 +1,230 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: sample_recorder.h
+// -----------------------------------------------------------------------------
+//
+// This header file defines a lock-free linked list for recording samples
+// collected from a random/stochastic process.
+//
+// This utility is internal-only. Use at your own risk.
+
+#ifndef ABSL_PROFILING_INTERNAL_SAMPLE_RECORDER_H_
+#define ABSL_PROFILING_INTERNAL_SAMPLE_RECORDER_H_
+
+#include <atomic>
+#include <cstddef>
+#include <functional>
+
+#include "y_absl/base/config.h"
+#include "y_absl/base/thread_annotations.h"
+#include "y_absl/synchronization/mutex.h"
+#include "y_absl/time/time.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace profiling_internal {
+
+// Sample<T> that has members required for linking samples in the linked list of
+// samples maintained by the SampleRecorder. Type T defines the sampled data.
+template <typename T>
+struct Sample {
+ // Guards the ability to restore the sample to a pristine state. This
+ // prevents races with sampling and resurrecting an object.
+ y_absl::Mutex init_mu;
+ T* next = nullptr;
+ T* dead ABSL_GUARDED_BY(init_mu) = nullptr;
+};
+
+// Holds samples and their associated stack traces with a soft limit of
+// `SetHashtablezMaxSamples()`.
+//
+// Thread safe.
+template <typename T>
+class SampleRecorder {
+ public:
+ SampleRecorder();
+ ~SampleRecorder();
+
+ // Registers for sampling. Returns an opaque registration info.
+ T* Register();
+
+ // Unregisters the sample.
+ void Unregister(T* sample);
+
+ // The dispose callback will be called on all samples the moment they are
+ // being unregistered. Only affects samples that are unregistered after the
+ // callback has been set.
+ // Returns the previous callback.
+ using DisposeCallback = void (*)(const T&);
+ DisposeCallback SetDisposeCallback(DisposeCallback f);
+
+ // Iterates over all the registered `StackInfo`s. Returning the number of
+ // samples that have been dropped.
+ int64_t Iterate(const std::function<void(const T& stack)>& f);
+
+ void SetMaxSamples(int32_t max);
+
+ private:
+ void PushNew(T* sample);
+ void PushDead(T* sample);
+ T* PopDead();
+
+ std::atomic<size_t> dropped_samples_;
+ std::atomic<size_t> size_estimate_;
+ std::atomic<int32_t> max_samples_{1 << 20};
+
+ // Intrusive lock free linked lists for tracking samples.
+ //
+ // `all_` records all samples (they are never removed from this list) and is
+ // terminated with a `nullptr`.
+ //
+ // `graveyard_.dead` is a circular linked list. When it is empty,
+ // `graveyard_.dead == &graveyard`. The list is circular so that
+ // every item on it (even the last) has a non-null dead pointer. This allows
+ // `Iterate` to determine if a given sample is live or dead using only
+ // information on the sample itself.
+ //
+ // For example, nodes [A, B, C, D, E] with [A, C, E] alive and [B, D] dead
+ // looks like this (G is the Graveyard):
+ //
+ // +---+ +---+ +---+ +---+ +---+
+ // all -->| A |--->| B |--->| C |--->| D |--->| E |
+ // | | | | | | | | | |
+ // +---+ | | +->| |-+ | | +->| |-+ | |
+ // | G | +---+ | +---+ | +---+ | +---+ | +---+
+ // | | | | | |
+ // | | --------+ +--------+ |
+ // +---+ |
+ // ^ |
+ // +--------------------------------------+
+ //
+ std::atomic<T*> all_;
+ T graveyard_;
+
+ std::atomic<DisposeCallback> dispose_;
+};
+
+template <typename T>
+typename SampleRecorder<T>::DisposeCallback
+SampleRecorder<T>::SetDisposeCallback(DisposeCallback f) {
+ return dispose_.exchange(f, std::memory_order_relaxed);
+}
+
+template <typename T>
+SampleRecorder<T>::SampleRecorder()
+ : dropped_samples_(0), size_estimate_(0), all_(nullptr), dispose_(nullptr) {
+ y_absl::MutexLock l(&graveyard_.init_mu);
+ graveyard_.dead = &graveyard_;
+}
+
+template <typename T>
+SampleRecorder<T>::~SampleRecorder() {
+ T* s = all_.load(std::memory_order_acquire);
+ while (s != nullptr) {
+ T* next = s->next;
+ delete s;
+ s = next;
+ }
+}
+
+template <typename T>
+void SampleRecorder<T>::PushNew(T* sample) {
+ sample->next = all_.load(std::memory_order_relaxed);
+ while (!all_.compare_exchange_weak(sample->next, sample,
+ std::memory_order_release,
+ std::memory_order_relaxed)) {
+ }
+}
+
+template <typename T>
+void SampleRecorder<T>::PushDead(T* sample) {
+ if (auto* dispose = dispose_.load(std::memory_order_relaxed)) {
+ dispose(*sample);
+ }
+
+ y_absl::MutexLock graveyard_lock(&graveyard_.init_mu);
+ y_absl::MutexLock sample_lock(&sample->init_mu);
+ sample->dead = graveyard_.dead;
+ graveyard_.dead = sample;
+}
+
+template <typename T>
+T* SampleRecorder<T>::PopDead() {
+ y_absl::MutexLock graveyard_lock(&graveyard_.init_mu);
+
+ // The list is circular, so eventually it collapses down to
+ // graveyard_.dead == &graveyard_
+ // when it is empty.
+ T* sample = graveyard_.dead;
+ if (sample == &graveyard_) return nullptr;
+
+ y_absl::MutexLock sample_lock(&sample->init_mu);
+ graveyard_.dead = sample->dead;
+ sample->dead = nullptr;
+ sample->PrepareForSampling();
+ return sample;
+}
+
+template <typename T>
+T* SampleRecorder<T>::Register() {
+ int64_t size = size_estimate_.fetch_add(1, std::memory_order_relaxed);
+ if (size > max_samples_.load(std::memory_order_relaxed)) {
+ size_estimate_.fetch_sub(1, std::memory_order_relaxed);
+ dropped_samples_.fetch_add(1, std::memory_order_relaxed);
+ return nullptr;
+ }
+
+ T* sample = PopDead();
+ if (sample == nullptr) {
+ // Resurrection failed. Hire a new warlock.
+ sample = new T();
+ PushNew(sample);
+ }
+
+ return sample;
+}
+
+template <typename T>
+void SampleRecorder<T>::Unregister(T* sample) {
+ PushDead(sample);
+ size_estimate_.fetch_sub(1, std::memory_order_relaxed);
+}
+
+template <typename T>
+int64_t SampleRecorder<T>::Iterate(
+ const std::function<void(const T& stack)>& f) {
+ T* s = all_.load(std::memory_order_acquire);
+ while (s != nullptr) {
+ y_absl::MutexLock l(&s->init_mu);
+ if (s->dead == nullptr) {
+ f(*s);
+ }
+ s = s->next;
+ }
+
+ return dropped_samples_.load(std::memory_order_relaxed);
+}
+
+template <typename T>
+void SampleRecorder<T>::SetMaxSamples(int32_t max) {
+ max_samples_.store(max, std::memory_order_release);
+}
+
+} // namespace profiling_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_PROFILING_INTERNAL_SAMPLE_RECORDER_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/status/.yandex_meta/licenses.list.txt b/contrib/restricted/abseil-cpp-tstring/y_absl/status/.yandex_meta/licenses.list.txt
new file mode 100644
index 00000000000..1c58023e4ba
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/status/.yandex_meta/licenses.list.txt
@@ -0,0 +1,20 @@
+====================Apache-2.0====================
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+
+====================COPYRIGHT====================
+// Copyright 2019 The Abseil Authors.
+
+
+====================COPYRIGHT====================
+// Copyright 2020 The Abseil Authors.
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/status/internal/status_internal.h b/contrib/restricted/abseil-cpp-tstring/y_absl/status/internal/status_internal.h
new file mode 100644
index 00000000000..6754d7d5184
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/status/internal/status_internal.h
@@ -0,0 +1,69 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+#ifndef ABSL_STATUS_INTERNAL_STATUS_INTERNAL_H_
+#define ABSL_STATUS_INTERNAL_STATUS_INTERNAL_H_
+
+#include <util/generic/string.h>
+
+#include "y_absl/container/inlined_vector.h"
+#include "y_absl/strings/cord.h"
+
+#ifndef SWIG
+// Disabled for SWIG as it doesn't parse attributes correctly.
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+// Returned Status objects may not be ignored. Codesearch doesn't handle ifdefs
+// as part of a class definitions (b/6995610), so we use a forward declaration.
+class ABSL_MUST_USE_RESULT Status;
+ABSL_NAMESPACE_END
+} // namespace y_absl
+#endif // !SWIG
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+enum class StatusCode : int;
+
+namespace status_internal {
+
+// Container for status payloads.
+struct Payload {
+ TString type_url;
+ y_absl::Cord payload;
+};
+
+using Payloads = y_absl::InlinedVector<Payload, 1>;
+
+// Reference-counted representation of Status data.
+struct StatusRep {
+ StatusRep(y_absl::StatusCode code_arg, y_absl::string_view message_arg,
+ std::unique_ptr<status_internal::Payloads> payloads_arg)
+ : ref(int32_t{1}),
+ code(code_arg),
+ message(message_arg),
+ payloads(std::move(payloads_arg)) {}
+
+ std::atomic<int32_t> ref;
+ y_absl::StatusCode code;
+ TString message;
+ std::unique_ptr<status_internal::Payloads> payloads;
+};
+
+y_absl::StatusCode MapToLocalCode(int value);
+} // namespace status_internal
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_STATUS_INTERNAL_STATUS_INTERNAL_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/status/internal/statusor_internal.h b/contrib/restricted/abseil-cpp-tstring/y_absl/status/internal/statusor_internal.h
new file mode 100644
index 00000000000..c4d78e28d20
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/status/internal/statusor_internal.h
@@ -0,0 +1,396 @@
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+#ifndef ABSL_STATUS_INTERNAL_STATUSOR_INTERNAL_H_
+#define ABSL_STATUS_INTERNAL_STATUSOR_INTERNAL_H_
+
+#include <type_traits>
+#include <utility>
+
+#include "y_absl/base/attributes.h"
+#include "y_absl/meta/type_traits.h"
+#include "y_absl/status/status.h"
+#include "y_absl/utility/utility.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+template <typename T>
+class ABSL_MUST_USE_RESULT StatusOr;
+
+namespace internal_statusor {
+
+// Detects whether `U` has conversion operator to `StatusOr<T>`, i.e. `operator
+// StatusOr<T>()`.
+template <typename T, typename U, typename = void>
+struct HasConversionOperatorToStatusOr : std::false_type {};
+
+template <typename T, typename U>
+void test(char (*)[sizeof(std::declval<U>().operator y_absl::StatusOr<T>())]);
+
+template <typename T, typename U>
+struct HasConversionOperatorToStatusOr<T, U, decltype(test<T, U>(0))>
+ : std::true_type {};
+
+// Detects whether `T` is constructible or convertible from `StatusOr<U>`.
+template <typename T, typename U>
+using IsConstructibleOrConvertibleFromStatusOr =
+ y_absl::disjunction<std::is_constructible<T, StatusOr<U>&>,
+ std::is_constructible<T, const StatusOr<U>&>,
+ std::is_constructible<T, StatusOr<U>&&>,
+ std::is_constructible<T, const StatusOr<U>&&>,
+ std::is_convertible<StatusOr<U>&, T>,
+ std::is_convertible<const StatusOr<U>&, T>,
+ std::is_convertible<StatusOr<U>&&, T>,
+ std::is_convertible<const StatusOr<U>&&, T>>;
+
+// Detects whether `T` is constructible or convertible or assignable from
+// `StatusOr<U>`.
+template <typename T, typename U>
+using IsConstructibleOrConvertibleOrAssignableFromStatusOr =
+ y_absl::disjunction<IsConstructibleOrConvertibleFromStatusOr<T, U>,
+ std::is_assignable<T&, StatusOr<U>&>,
+ std::is_assignable<T&, const StatusOr<U>&>,
+ std::is_assignable<T&, StatusOr<U>&&>,
+ std::is_assignable<T&, const StatusOr<U>&&>>;
+
+// Detects whether direct initializing `StatusOr<T>` from `U` is ambiguous, i.e.
+// when `U` is `StatusOr<V>` and `T` is constructible or convertible from `V`.
+template <typename T, typename U>
+struct IsDirectInitializationAmbiguous
+ : public y_absl::conditional_t<
+ std::is_same<y_absl::remove_cv_t<y_absl::remove_reference_t<U>>,
+ U>::value,
+ std::false_type,
+ IsDirectInitializationAmbiguous<
+ T, y_absl::remove_cv_t<y_absl::remove_reference_t<U>>>> {};
+
+template <typename T, typename V>
+struct IsDirectInitializationAmbiguous<T, y_absl::StatusOr<V>>
+ : public IsConstructibleOrConvertibleFromStatusOr<T, V> {};
+
+// Checks against the constraints of the direction initialization, i.e. when
+// `StatusOr<T>::StatusOr(U&&)` should participate in overload resolution.
+template <typename T, typename U>
+using IsDirectInitializationValid = y_absl::disjunction<
+ // Short circuits if T is basically U.
+ std::is_same<T, y_absl::remove_cv_t<y_absl::remove_reference_t<U>>>,
+ y_absl::negation<y_absl::disjunction<
+ std::is_same<y_absl::StatusOr<T>,
+ y_absl::remove_cv_t<y_absl::remove_reference_t<U>>>,
+ std::is_same<y_absl::Status,
+ y_absl::remove_cv_t<y_absl::remove_reference_t<U>>>,
+ std::is_same<y_absl::in_place_t,
+ y_absl::remove_cv_t<y_absl::remove_reference_t<U>>>,
+ IsDirectInitializationAmbiguous<T, U>>>>;
+
+// This trait detects whether `StatusOr<T>::operator=(U&&)` is ambiguous, which
+// is equivalent to whether all the following conditions are met:
+// 1. `U` is `StatusOr<V>`.
+// 2. `T` is constructible and assignable from `V`.
+// 3. `T` is constructible and assignable from `U` (i.e. `StatusOr<V>`).
+// For example, the following code is considered ambiguous:
+// (`T` is `bool`, `U` is `StatusOr<bool>`, `V` is `bool`)
+// StatusOr<bool> s1 = true; // s1.ok() && s1.ValueOrDie() == true
+// StatusOr<bool> s2 = false; // s2.ok() && s2.ValueOrDie() == false
+// s1 = s2; // ambiguous, `s1 = s2.ValueOrDie()` or `s1 = bool(s2)`?
+template <typename T, typename U>
+struct IsForwardingAssignmentAmbiguous
+ : public y_absl::conditional_t<
+ std::is_same<y_absl::remove_cv_t<y_absl::remove_reference_t<U>>,
+ U>::value,
+ std::false_type,
+ IsForwardingAssignmentAmbiguous<
+ T, y_absl::remove_cv_t<y_absl::remove_reference_t<U>>>> {};
+
+template <typename T, typename U>
+struct IsForwardingAssignmentAmbiguous<T, y_absl::StatusOr<U>>
+ : public IsConstructibleOrConvertibleOrAssignableFromStatusOr<T, U> {};
+
+// Checks against the constraints of the forwarding assignment, i.e. whether
+// `StatusOr<T>::operator(U&&)` should participate in overload resolution.
+template <typename T, typename U>
+using IsForwardingAssignmentValid = y_absl::disjunction<
+ // Short circuits if T is basically U.
+ std::is_same<T, y_absl::remove_cv_t<y_absl::remove_reference_t<U>>>,
+ y_absl::negation<y_absl::disjunction<
+ std::is_same<y_absl::StatusOr<T>,
+ y_absl::remove_cv_t<y_absl::remove_reference_t<U>>>,
+ std::is_same<y_absl::Status,
+ y_absl::remove_cv_t<y_absl::remove_reference_t<U>>>,
+ std::is_same<y_absl::in_place_t,
+ y_absl::remove_cv_t<y_absl::remove_reference_t<U>>>,
+ IsForwardingAssignmentAmbiguous<T, U>>>>;
+
+class Helper {
+ public:
+ // Move type-agnostic error handling to the .cc.
+ static void HandleInvalidStatusCtorArg(Status*);
+ ABSL_ATTRIBUTE_NORETURN static void Crash(const y_absl::Status& status);
+};
+
+// Construct an instance of T in `p` through placement new, passing Args... to
+// the constructor.
+// This abstraction is here mostly for the gcc performance fix.
+template <typename T, typename... Args>
+ABSL_ATTRIBUTE_NONNULL(1) void PlacementNew(void* p, Args&&... args) {
+ new (p) T(std::forward<Args>(args)...);
+}
+
+// Helper base class to hold the data and all operations.
+// We move all this to a base class to allow mixing with the appropriate
+// TraitsBase specialization.
+template <typename T>
+class StatusOrData {
+ template <typename U>
+ friend class StatusOrData;
+
+ public:
+ StatusOrData() = delete;
+
+ StatusOrData(const StatusOrData& other) {
+ if (other.ok()) {
+ MakeValue(other.data_);
+ MakeStatus();
+ } else {
+ MakeStatus(other.status_);
+ }
+ }
+
+ StatusOrData(StatusOrData&& other) noexcept {
+ if (other.ok()) {
+ MakeValue(std::move(other.data_));
+ MakeStatus();
+ } else {
+ MakeStatus(std::move(other.status_));
+ }
+ }
+
+ template <typename U>
+ explicit StatusOrData(const StatusOrData<U>& other) {
+ if (other.ok()) {
+ MakeValue(other.data_);
+ MakeStatus();
+ } else {
+ MakeStatus(other.status_);
+ }
+ }
+
+ template <typename U>
+ explicit StatusOrData(StatusOrData<U>&& other) {
+ if (other.ok()) {
+ MakeValue(std::move(other.data_));
+ MakeStatus();
+ } else {
+ MakeStatus(std::move(other.status_));
+ }
+ }
+
+ template <typename... Args>
+ explicit StatusOrData(y_absl::in_place_t, Args&&... args)
+ : data_(std::forward<Args>(args)...) {
+ MakeStatus();
+ }
+
+ explicit StatusOrData(const T& value) : data_(value) {
+ MakeStatus();
+ }
+ explicit StatusOrData(T&& value) : data_(std::move(value)) {
+ MakeStatus();
+ }
+
+ template <typename U,
+ y_absl::enable_if_t<std::is_constructible<y_absl::Status, U&&>::value,
+ int> = 0>
+ explicit StatusOrData(U&& v) : status_(std::forward<U>(v)) {
+ EnsureNotOk();
+ }
+
+ StatusOrData& operator=(const StatusOrData& other) {
+ if (this == &other) return *this;
+ if (other.ok())
+ Assign(other.data_);
+ else
+ AssignStatus(other.status_);
+ return *this;
+ }
+
+ StatusOrData& operator=(StatusOrData&& other) {
+ if (this == &other) return *this;
+ if (other.ok())
+ Assign(std::move(other.data_));
+ else
+ AssignStatus(std::move(other.status_));
+ return *this;
+ }
+
+ ~StatusOrData() {
+ if (ok()) {
+ status_.~Status();
+ data_.~T();
+ } else {
+ status_.~Status();
+ }
+ }
+
+ template <typename U>
+ void Assign(U&& value) {
+ if (ok()) {
+ data_ = std::forward<U>(value);
+ } else {
+ MakeValue(std::forward<U>(value));
+ status_ = OkStatus();
+ }
+ }
+
+ template <typename U>
+ void AssignStatus(U&& v) {
+ Clear();
+ status_ = static_cast<y_absl::Status>(std::forward<U>(v));
+ EnsureNotOk();
+ }
+
+ bool ok() const { return status_.ok(); }
+
+ protected:
+ // status_ will always be active after the constructor.
+ // We make it a union to be able to initialize exactly how we need without
+ // waste.
+ // Eg. in the copy constructor we use the default constructor of Status in
+ // the ok() path to avoid an extra Ref call.
+ union {
+ Status status_;
+ };
+
+ // data_ is active iff status_.ok()==true
+ struct Dummy {};
+ union {
+ // When T is const, we need some non-const object we can cast to void* for
+ // the placement new. dummy_ is that object.
+ Dummy dummy_;
+ T data_;
+ };
+
+ void Clear() {
+ if (ok()) data_.~T();
+ }
+
+ void EnsureOk() const {
+ if (ABSL_PREDICT_FALSE(!ok())) Helper::Crash(status_);
+ }
+
+ void EnsureNotOk() {
+ if (ABSL_PREDICT_FALSE(ok())) Helper::HandleInvalidStatusCtorArg(&status_);
+ }
+
+ // Construct the value (ie. data_) through placement new with the passed
+ // argument.
+ template <typename... Arg>
+ void MakeValue(Arg&&... arg) {
+ internal_statusor::PlacementNew<T>(&dummy_, std::forward<Arg>(arg)...);
+ }
+
+ // Construct the status (ie. status_) through placement new with the passed
+ // argument.
+ template <typename... Args>
+ void MakeStatus(Args&&... args) {
+ internal_statusor::PlacementNew<Status>(&status_,
+ std::forward<Args>(args)...);
+ }
+};
+
+// Helper base classes to allow implicitly deleted constructors and assignment
+// operators in `StatusOr`. For example, `CopyCtorBase` will explicitly delete
+// the copy constructor when T is not copy constructible and `StatusOr` will
+// inherit that behavior implicitly.
+template <typename T, bool = std::is_copy_constructible<T>::value>
+struct CopyCtorBase {
+ CopyCtorBase() = default;
+ CopyCtorBase(const CopyCtorBase&) = default;
+ CopyCtorBase(CopyCtorBase&&) = default;
+ CopyCtorBase& operator=(const CopyCtorBase&) = default;
+ CopyCtorBase& operator=(CopyCtorBase&&) = default;
+};
+
+template <typename T>
+struct CopyCtorBase<T, false> {
+ CopyCtorBase() = default;
+ CopyCtorBase(const CopyCtorBase&) = delete;
+ CopyCtorBase(CopyCtorBase&&) = default;
+ CopyCtorBase& operator=(const CopyCtorBase&) = default;
+ CopyCtorBase& operator=(CopyCtorBase&&) = default;
+};
+
+template <typename T, bool = std::is_move_constructible<T>::value>
+struct MoveCtorBase {
+ MoveCtorBase() = default;
+ MoveCtorBase(const MoveCtorBase&) = default;
+ MoveCtorBase(MoveCtorBase&&) = default;
+ MoveCtorBase& operator=(const MoveCtorBase&) = default;
+ MoveCtorBase& operator=(MoveCtorBase&&) = default;
+};
+
+template <typename T>
+struct MoveCtorBase<T, false> {
+ MoveCtorBase() = default;
+ MoveCtorBase(const MoveCtorBase&) = default;
+ MoveCtorBase(MoveCtorBase&&) = delete;
+ MoveCtorBase& operator=(const MoveCtorBase&) = default;
+ MoveCtorBase& operator=(MoveCtorBase&&) = default;
+};
+
+template <typename T, bool = std::is_copy_constructible<T>::value&&
+ std::is_copy_assignable<T>::value>
+struct CopyAssignBase {
+ CopyAssignBase() = default;
+ CopyAssignBase(const CopyAssignBase&) = default;
+ CopyAssignBase(CopyAssignBase&&) = default;
+ CopyAssignBase& operator=(const CopyAssignBase&) = default;
+ CopyAssignBase& operator=(CopyAssignBase&&) = default;
+};
+
+template <typename T>
+struct CopyAssignBase<T, false> {
+ CopyAssignBase() = default;
+ CopyAssignBase(const CopyAssignBase&) = default;
+ CopyAssignBase(CopyAssignBase&&) = default;
+ CopyAssignBase& operator=(const CopyAssignBase&) = delete;
+ CopyAssignBase& operator=(CopyAssignBase&&) = default;
+};
+
+template <typename T, bool = std::is_move_constructible<T>::value&&
+ std::is_move_assignable<T>::value>
+struct MoveAssignBase {
+ MoveAssignBase() = default;
+ MoveAssignBase(const MoveAssignBase&) = default;
+ MoveAssignBase(MoveAssignBase&&) = default;
+ MoveAssignBase& operator=(const MoveAssignBase&) = default;
+ MoveAssignBase& operator=(MoveAssignBase&&) = default;
+};
+
+template <typename T>
+struct MoveAssignBase<T, false> {
+ MoveAssignBase() = default;
+ MoveAssignBase(const MoveAssignBase&) = default;
+ MoveAssignBase(MoveAssignBase&&) = default;
+ MoveAssignBase& operator=(const MoveAssignBase&) = default;
+ MoveAssignBase& operator=(MoveAssignBase&&) = delete;
+};
+
+ABSL_ATTRIBUTE_NORETURN void ThrowBadStatusOrAccess(y_absl::Status status);
+
+} // namespace internal_statusor
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_STATUS_INTERNAL_STATUSOR_INTERNAL_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/status/status.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/status/status.cc
new file mode 100644
index 00000000000..3b7fe28e084
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/status/status.cc
@@ -0,0 +1,444 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+#include "y_absl/status/status.h"
+
+#include <cassert>
+
+#include "y_absl/base/internal/raw_logging.h"
+#include "y_absl/debugging/stacktrace.h"
+#include "y_absl/debugging/symbolize.h"
+#include "y_absl/status/status_payload_printer.h"
+#include "y_absl/strings/escaping.h"
+#include "y_absl/strings/str_cat.h"
+#include "y_absl/strings/str_format.h"
+#include "y_absl/strings/str_split.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+TString StatusCodeToString(StatusCode code) {
+ switch (code) {
+ case StatusCode::kOk:
+ return "OK";
+ case StatusCode::kCancelled:
+ return "CANCELLED";
+ case StatusCode::kUnknown:
+ return "UNKNOWN";
+ case StatusCode::kInvalidArgument:
+ return "INVALID_ARGUMENT";
+ case StatusCode::kDeadlineExceeded:
+ return "DEADLINE_EXCEEDED";
+ case StatusCode::kNotFound:
+ return "NOT_FOUND";
+ case StatusCode::kAlreadyExists:
+ return "ALREADY_EXISTS";
+ case StatusCode::kPermissionDenied:
+ return "PERMISSION_DENIED";
+ case StatusCode::kUnauthenticated:
+ return "UNAUTHENTICATED";
+ case StatusCode::kResourceExhausted:
+ return "RESOURCE_EXHAUSTED";
+ case StatusCode::kFailedPrecondition:
+ return "FAILED_PRECONDITION";
+ case StatusCode::kAborted:
+ return "ABORTED";
+ case StatusCode::kOutOfRange:
+ return "OUT_OF_RANGE";
+ case StatusCode::kUnimplemented:
+ return "UNIMPLEMENTED";
+ case StatusCode::kInternal:
+ return "INTERNAL";
+ case StatusCode::kUnavailable:
+ return "UNAVAILABLE";
+ case StatusCode::kDataLoss:
+ return "DATA_LOSS";
+ default:
+ return "";
+ }
+}
+
+std::ostream& operator<<(std::ostream& os, StatusCode code) {
+ return os << StatusCodeToString(code);
+}
+
+namespace status_internal {
+
+static int FindPayloadIndexByUrl(const Payloads* payloads,
+ y_absl::string_view type_url) {
+ if (payloads == nullptr) return -1;
+
+ for (size_t i = 0; i < payloads->size(); ++i) {
+ if ((*payloads)[i].type_url == type_url) return i;
+ }
+
+ return -1;
+}
+
+// Convert canonical code to a value known to this binary.
+y_absl::StatusCode MapToLocalCode(int value) {
+ y_absl::StatusCode code = static_cast<y_absl::StatusCode>(value);
+ switch (code) {
+ case y_absl::StatusCode::kOk:
+ case y_absl::StatusCode::kCancelled:
+ case y_absl::StatusCode::kUnknown:
+ case y_absl::StatusCode::kInvalidArgument:
+ case y_absl::StatusCode::kDeadlineExceeded:
+ case y_absl::StatusCode::kNotFound:
+ case y_absl::StatusCode::kAlreadyExists:
+ case y_absl::StatusCode::kPermissionDenied:
+ case y_absl::StatusCode::kResourceExhausted:
+ case y_absl::StatusCode::kFailedPrecondition:
+ case y_absl::StatusCode::kAborted:
+ case y_absl::StatusCode::kOutOfRange:
+ case y_absl::StatusCode::kUnimplemented:
+ case y_absl::StatusCode::kInternal:
+ case y_absl::StatusCode::kUnavailable:
+ case y_absl::StatusCode::kDataLoss:
+ case y_absl::StatusCode::kUnauthenticated:
+ return code;
+ default:
+ return y_absl::StatusCode::kUnknown;
+ }
+}
+} // namespace status_internal
+
+y_absl::optional<y_absl::Cord> Status::GetPayload(
+ y_absl::string_view type_url) const {
+ const auto* payloads = GetPayloads();
+ int index = status_internal::FindPayloadIndexByUrl(payloads, type_url);
+ if (index != -1) return (*payloads)[index].payload;
+
+ return y_absl::nullopt;
+}
+
+void Status::SetPayload(y_absl::string_view type_url, y_absl::Cord payload) {
+ if (ok()) return;
+
+ PrepareToModify();
+
+ status_internal::StatusRep* rep = RepToPointer(rep_);
+ if (!rep->payloads) {
+ rep->payloads = y_absl::make_unique<status_internal::Payloads>();
+ }
+
+ int index =
+ status_internal::FindPayloadIndexByUrl(rep->payloads.get(), type_url);
+ if (index != -1) {
+ (*rep->payloads)[index].payload = std::move(payload);
+ return;
+ }
+
+ rep->payloads->push_back({TString(type_url), std::move(payload)});
+}
+
+bool Status::ErasePayload(y_absl::string_view type_url) {
+ int index = status_internal::FindPayloadIndexByUrl(GetPayloads(), type_url);
+ if (index != -1) {
+ PrepareToModify();
+ GetPayloads()->erase(GetPayloads()->begin() + index);
+ if (GetPayloads()->empty() && message().empty()) {
+ // Special case: If this can be represented inlined, it MUST be
+ // inlined (EqualsSlow depends on this behavior).
+ StatusCode c = static_cast<StatusCode>(raw_code());
+ Unref(rep_);
+ rep_ = CodeToInlinedRep(c);
+ }
+ return true;
+ }
+
+ return false;
+}
+
+void Status::ForEachPayload(
+ y_absl::FunctionRef<void(y_absl::string_view, const y_absl::Cord&)> visitor)
+ const {
+ if (auto* payloads = GetPayloads()) {
+ bool in_reverse =
+ payloads->size() > 1 && reinterpret_cast<uintptr_t>(payloads) % 13 > 6;
+
+ for (size_t index = 0; index < payloads->size(); ++index) {
+ const auto& elem =
+ (*payloads)[in_reverse ? payloads->size() - 1 - index : index];
+
+#ifdef NDEBUG
+ visitor(elem.type_url, elem.payload);
+#else
+ // In debug mode invalidate the type url to prevent users from relying on
+ // this string lifetime.
+
+ // NOLINTNEXTLINE intentional extra conversion to force temporary.
+ visitor(TString(elem.type_url), elem.payload);
+#endif // NDEBUG
+ }
+ }
+}
+
+const TString* Status::EmptyString() {
+ static TString* empty_string = new TString();
+ return empty_string;
+}
+
+constexpr const char Status::kMovedFromString[];
+
+const TString* Status::MovedFromString() {
+ static TString* moved_from_string = new TString(kMovedFromString);
+ return moved_from_string;
+}
+
+void Status::UnrefNonInlined(uintptr_t rep) {
+ status_internal::StatusRep* r = RepToPointer(rep);
+ // Fast path: if ref==1, there is no need for a RefCountDec (since
+ // this is the only reference and therefore no other thread is
+ // allowed to be mucking with r).
+ if (r->ref.load(std::memory_order_acquire) == 1 ||
+ r->ref.fetch_sub(1, std::memory_order_acq_rel) - 1 == 0) {
+ delete r;
+ }
+}
+
+Status::Status(y_absl::StatusCode code, y_absl::string_view msg)
+ : rep_(CodeToInlinedRep(code)) {
+ if (code != y_absl::StatusCode::kOk && !msg.empty()) {
+ rep_ = PointerToRep(new status_internal::StatusRep(code, msg, nullptr));
+ }
+}
+
+int Status::raw_code() const {
+ if (IsInlined(rep_)) {
+ return static_cast<int>(InlinedRepToCode(rep_));
+ }
+ status_internal::StatusRep* rep = RepToPointer(rep_);
+ return static_cast<int>(rep->code);
+}
+
+y_absl::StatusCode Status::code() const {
+ return status_internal::MapToLocalCode(raw_code());
+}
+
+void Status::PrepareToModify() {
+ ABSL_RAW_CHECK(!ok(), "PrepareToModify shouldn't be called on OK status.");
+ if (IsInlined(rep_)) {
+ rep_ = PointerToRep(new status_internal::StatusRep(
+ static_cast<y_absl::StatusCode>(raw_code()), y_absl::string_view(),
+ nullptr));
+ return;
+ }
+
+ uintptr_t rep_i = rep_;
+ status_internal::StatusRep* rep = RepToPointer(rep_);
+ if (rep->ref.load(std::memory_order_acquire) != 1) {
+ std::unique_ptr<status_internal::Payloads> payloads;
+ if (rep->payloads) {
+ payloads = y_absl::make_unique<status_internal::Payloads>(*rep->payloads);
+ }
+ status_internal::StatusRep* const new_rep = new status_internal::StatusRep(
+ rep->code, message(), std::move(payloads));
+ rep_ = PointerToRep(new_rep);
+ UnrefNonInlined(rep_i);
+ }
+}
+
+bool Status::EqualsSlow(const y_absl::Status& a, const y_absl::Status& b) {
+ if (IsInlined(a.rep_) != IsInlined(b.rep_)) return false;
+ if (a.message() != b.message()) return false;
+ if (a.raw_code() != b.raw_code()) return false;
+ if (a.GetPayloads() == b.GetPayloads()) return true;
+
+ const status_internal::Payloads no_payloads;
+ const status_internal::Payloads* larger_payloads =
+ a.GetPayloads() ? a.GetPayloads() : &no_payloads;
+ const status_internal::Payloads* smaller_payloads =
+ b.GetPayloads() ? b.GetPayloads() : &no_payloads;
+ if (larger_payloads->size() < smaller_payloads->size()) {
+ std::swap(larger_payloads, smaller_payloads);
+ }
+ if ((larger_payloads->size() - smaller_payloads->size()) > 1) return false;
+ // Payloads can be ordered differently, so we can't just compare payload
+ // vectors.
+ for (const auto& payload : *larger_payloads) {
+
+ bool found = false;
+ for (const auto& other_payload : *smaller_payloads) {
+ if (payload.type_url == other_payload.type_url) {
+ if (payload.payload != other_payload.payload) {
+ return false;
+ }
+ found = true;
+ break;
+ }
+ }
+ if (!found) return false;
+ }
+ return true;
+}
+
+TString Status::ToStringSlow(StatusToStringMode mode) const {
+ TString text;
+ y_absl::StrAppend(&text, y_absl::StatusCodeToString(code()), ": ", message());
+
+ const bool with_payload = (mode & StatusToStringMode::kWithPayload) ==
+ StatusToStringMode::kWithPayload;
+
+ if (with_payload) {
+ status_internal::StatusPayloadPrinter printer =
+ status_internal::GetStatusPayloadPrinter();
+ this->ForEachPayload([&](y_absl::string_view type_url,
+ const y_absl::Cord& payload) {
+ y_absl::optional<TString> result;
+ if (printer) result = printer(type_url, payload);
+ y_absl::StrAppend(
+ &text, " [", type_url, "='",
+ result.has_value() ? *result : y_absl::CHexEscape(TString(payload)),
+ "']");
+ });
+ }
+
+ return text;
+}
+
+std::ostream& operator<<(std::ostream& os, const Status& x) {
+ os << x.ToString(StatusToStringMode::kWithEverything);
+ return os;
+}
+
+Status AbortedError(y_absl::string_view message) {
+ return Status(y_absl::StatusCode::kAborted, message);
+}
+
+Status AlreadyExistsError(y_absl::string_view message) {
+ return Status(y_absl::StatusCode::kAlreadyExists, message);
+}
+
+Status CancelledError(y_absl::string_view message) {
+ return Status(y_absl::StatusCode::kCancelled, message);
+}
+
+Status DataLossError(y_absl::string_view message) {
+ return Status(y_absl::StatusCode::kDataLoss, message);
+}
+
+Status DeadlineExceededError(y_absl::string_view message) {
+ return Status(y_absl::StatusCode::kDeadlineExceeded, message);
+}
+
+Status FailedPreconditionError(y_absl::string_view message) {
+ return Status(y_absl::StatusCode::kFailedPrecondition, message);
+}
+
+Status InternalError(y_absl::string_view message) {
+ return Status(y_absl::StatusCode::kInternal, message);
+}
+
+Status InvalidArgumentError(y_absl::string_view message) {
+ return Status(y_absl::StatusCode::kInvalidArgument, message);
+}
+
+Status NotFoundError(y_absl::string_view message) {
+ return Status(y_absl::StatusCode::kNotFound, message);
+}
+
+Status OutOfRangeError(y_absl::string_view message) {
+ return Status(y_absl::StatusCode::kOutOfRange, message);
+}
+
+Status PermissionDeniedError(y_absl::string_view message) {
+ return Status(y_absl::StatusCode::kPermissionDenied, message);
+}
+
+Status ResourceExhaustedError(y_absl::string_view message) {
+ return Status(y_absl::StatusCode::kResourceExhausted, message);
+}
+
+Status UnauthenticatedError(y_absl::string_view message) {
+ return Status(y_absl::StatusCode::kUnauthenticated, message);
+}
+
+Status UnavailableError(y_absl::string_view message) {
+ return Status(y_absl::StatusCode::kUnavailable, message);
+}
+
+Status UnimplementedError(y_absl::string_view message) {
+ return Status(y_absl::StatusCode::kUnimplemented, message);
+}
+
+Status UnknownError(y_absl::string_view message) {
+ return Status(y_absl::StatusCode::kUnknown, message);
+}
+
+bool IsAborted(const Status& status) {
+ return status.code() == y_absl::StatusCode::kAborted;
+}
+
+bool IsAlreadyExists(const Status& status) {
+ return status.code() == y_absl::StatusCode::kAlreadyExists;
+}
+
+bool IsCancelled(const Status& status) {
+ return status.code() == y_absl::StatusCode::kCancelled;
+}
+
+bool IsDataLoss(const Status& status) {
+ return status.code() == y_absl::StatusCode::kDataLoss;
+}
+
+bool IsDeadlineExceeded(const Status& status) {
+ return status.code() == y_absl::StatusCode::kDeadlineExceeded;
+}
+
+bool IsFailedPrecondition(const Status& status) {
+ return status.code() == y_absl::StatusCode::kFailedPrecondition;
+}
+
+bool IsInternal(const Status& status) {
+ return status.code() == y_absl::StatusCode::kInternal;
+}
+
+bool IsInvalidArgument(const Status& status) {
+ return status.code() == y_absl::StatusCode::kInvalidArgument;
+}
+
+bool IsNotFound(const Status& status) {
+ return status.code() == y_absl::StatusCode::kNotFound;
+}
+
+bool IsOutOfRange(const Status& status) {
+ return status.code() == y_absl::StatusCode::kOutOfRange;
+}
+
+bool IsPermissionDenied(const Status& status) {
+ return status.code() == y_absl::StatusCode::kPermissionDenied;
+}
+
+bool IsResourceExhausted(const Status& status) {
+ return status.code() == y_absl::StatusCode::kResourceExhausted;
+}
+
+bool IsUnauthenticated(const Status& status) {
+ return status.code() == y_absl::StatusCode::kUnauthenticated;
+}
+
+bool IsUnavailable(const Status& status) {
+ return status.code() == y_absl::StatusCode::kUnavailable;
+}
+
+bool IsUnimplemented(const Status& status) {
+ return status.code() == y_absl::StatusCode::kUnimplemented;
+}
+
+bool IsUnknown(const Status& status) {
+ return status.code() == y_absl::StatusCode::kUnknown;
+}
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/status/status.h b/contrib/restricted/abseil-cpp-tstring/y_absl/status/status.h
new file mode 100644
index 00000000000..fa461e9f76a
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/status/status.h
@@ -0,0 +1,882 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: status.h
+// -----------------------------------------------------------------------------
+//
+// This header file defines the Abseil `status` library, consisting of:
+//
+// * An `y_absl::Status` class for holding error handling information
+// * A set of canonical `y_absl::StatusCode` error codes, and associated
+// utilities for generating and propagating status codes.
+// * A set of helper functions for creating status codes and checking their
+// values
+//
+// Within Google, `y_absl::Status` is the primary mechanism for gracefully
+// handling errors across API boundaries (and in particular across RPC
+// boundaries). Some of these errors may be recoverable, but others may not.
+// Most functions that can produce a recoverable error should be designed to
+// return an `y_absl::Status` (or `y_absl::StatusOr`).
+//
+// Example:
+//
+// y_absl::Status myFunction(y_absl::string_view fname, ...) {
+// ...
+// // encounter error
+// if (error condition) {
+// return y_absl::InvalidArgumentError("bad mode");
+// }
+// // else, return OK
+// return y_absl::OkStatus();
+// }
+//
+// An `y_absl::Status` is designed to either return "OK" or one of a number of
+// different error codes, corresponding to typical error conditions.
+// In almost all cases, when using `y_absl::Status` you should use the canonical
+// error codes (of type `y_absl::StatusCode`) enumerated in this header file.
+// These canonical codes are understood across the codebase and will be
+// accepted across all API and RPC boundaries.
+#ifndef ABSL_STATUS_STATUS_H_
+#define ABSL_STATUS_STATUS_H_
+
+#include <iostream>
+#include <util/generic/string.h>
+
+#include "y_absl/container/inlined_vector.h"
+#include "y_absl/functional/function_ref.h"
+#include "y_absl/status/internal/status_internal.h"
+#include "y_absl/strings/cord.h"
+#include "y_absl/strings/string_view.h"
+#include "y_absl/types/optional.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+// y_absl::StatusCode
+//
+// An `y_absl::StatusCode` is an enumerated type indicating either no error ("OK")
+// or an error condition. In most cases, an `y_absl::Status` indicates a
+// recoverable error, and the purpose of signalling an error is to indicate what
+// action to take in response to that error. These error codes map to the proto
+// RPC error codes indicated in https://cloud.google.com/apis/design/errors.
+//
+// The errors listed below are the canonical errors associated with
+// `y_absl::Status` and are used throughout the codebase. As a result, these
+// error codes are somewhat generic.
+//
+// In general, try to return the most specific error that applies if more than
+// one error may pertain. For example, prefer `kOutOfRange` over
+// `kFailedPrecondition` if both codes apply. Similarly prefer `kNotFound` or
+// `kAlreadyExists` over `kFailedPrecondition`.
+//
+// Because these errors may cross RPC boundaries, these codes are tied to the
+// `google.rpc.Code` definitions within
+// https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto
+// The string value of these RPC codes is denoted within each enum below.
+//
+// If your error handling code requires more context, you can attach payloads
+// to your status. See `y_absl::Status::SetPayload()` and
+// `y_absl::Status::GetPayload()` below.
+enum class StatusCode : int {
+ // StatusCode::kOk
+ //
+ // kOK (gRPC code "OK") does not indicate an error; this value is returned on
+ // success. It is typical to check for this value before proceeding on any
+ // given call across an API or RPC boundary. To check this value, use the
+ // `y_absl::Status::ok()` member function rather than inspecting the raw code.
+ kOk = 0,
+
+ // StatusCode::kCancelled
+ //
+ // kCancelled (gRPC code "CANCELLED") indicates the operation was cancelled,
+ // typically by the caller.
+ kCancelled = 1,
+
+ // StatusCode::kUnknown
+ //
+ // kUnknown (gRPC code "UNKNOWN") indicates an unknown error occurred. In
+ // general, more specific errors should be raised, if possible. Errors raised
+ // by APIs that do not return enough error information may be converted to
+ // this error.
+ kUnknown = 2,
+
+ // StatusCode::kInvalidArgument
+ //
+ // kInvalidArgument (gRPC code "INVALID_ARGUMENT") indicates the caller
+ // specified an invalid argument, such as a malformed filename. Note that use
+ // of such errors should be narrowly limited to indicate the invalid nature of
+ // the arguments themselves. Errors with validly formed arguments that may
+ // cause errors with the state of the receiving system should be denoted with
+ // `kFailedPrecondition` instead.
+ kInvalidArgument = 3,
+
+ // StatusCode::kDeadlineExceeded
+ //
+ // kDeadlineExceeded (gRPC code "DEADLINE_EXCEEDED") indicates a deadline
+ // expired before the operation could complete. For operations that may change
+ // state within a system, this error may be returned even if the operation has
+ // completed successfully. For example, a successful response from a server
+ // could have been delayed long enough for the deadline to expire.
+ kDeadlineExceeded = 4,
+
+ // StatusCode::kNotFound
+ //
+ // kNotFound (gRPC code "NOT_FOUND") indicates some requested entity (such as
+ // a file or directory) was not found.
+ //
+ // `kNotFound` is useful if a request should be denied for an entire class of
+ // users, such as during a gradual feature rollout or undocumented allow list.
+ // If a request should be denied for specific sets of users, such as through
+ // user-based access control, use `kPermissionDenied` instead.
+ kNotFound = 5,
+
+ // StatusCode::kAlreadyExists
+ //
+ // kAlreadyExists (gRPC code "ALREADY_EXISTS") indicates that the entity a
+ // caller attempted to create (such as a file or directory) is already
+ // present.
+ kAlreadyExists = 6,
+
+ // StatusCode::kPermissionDenied
+ //
+ // kPermissionDenied (gRPC code "PERMISSION_DENIED") indicates that the caller
+ // does not have permission to execute the specified operation. Note that this
+ // error is different than an error due to an *un*authenticated user. This
+ // error code does not imply the request is valid or the requested entity
+ // exists or satisfies any other pre-conditions.
+ //
+ // `kPermissionDenied` must not be used for rejections caused by exhausting
+ // some resource. Instead, use `kResourceExhausted` for those errors.
+ // `kPermissionDenied` must not be used if the caller cannot be identified.
+ // Instead, use `kUnauthenticated` for those errors.
+ kPermissionDenied = 7,
+
+ // StatusCode::kResourceExhausted
+ //
+ // kResourceExhausted (gRPC code "RESOURCE_EXHAUSTED") indicates some resource
+ // has been exhausted, perhaps a per-user quota, or perhaps the entire file
+ // system is out of space.
+ kResourceExhausted = 8,
+
+ // StatusCode::kFailedPrecondition
+ //
+ // kFailedPrecondition (gRPC code "FAILED_PRECONDITION") indicates that the
+ // operation was rejected because the system is not in a state required for
+ // the operation's execution. For example, a directory to be deleted may be
+ // non-empty, an "rmdir" operation is applied to a non-directory, etc.
+ //
+ // Some guidelines that may help a service implementer in deciding between
+ // `kFailedPrecondition`, `kAborted`, and `kUnavailable`:
+ //
+ // (a) Use `kUnavailable` if the client can retry just the failing call.
+ // (b) Use `kAborted` if the client should retry at a higher transaction
+ // level (such as when a client-specified test-and-set fails, indicating
+ // the client should restart a read-modify-write sequence).
+ // (c) Use `kFailedPrecondition` if the client should not retry until
+ // the system state has been explicitly fixed. For example, if a "rmdir"
+ // fails because the directory is non-empty, `kFailedPrecondition`
+ // should be returned since the client should not retry unless
+ // the files are deleted from the directory.
+ kFailedPrecondition = 9,
+
+ // StatusCode::kAborted
+ //
+ // kAborted (gRPC code "ABORTED") indicates the operation was aborted,
+ // typically due to a concurrency issue such as a sequencer check failure or a
+ // failed transaction.
+ //
+ // See the guidelines above for deciding between `kFailedPrecondition`,
+ // `kAborted`, and `kUnavailable`.
+ kAborted = 10,
+
+ // StatusCode::kOutOfRange
+ //
+ // kOutOfRange (gRPC code "OUT_OF_RANGE") indicates the operation was
+ // attempted past the valid range, such as seeking or reading past an
+ // end-of-file.
+ //
+ // Unlike `kInvalidArgument`, this error indicates a problem that may
+ // be fixed if the system state changes. For example, a 32-bit file
+ // system will generate `kInvalidArgument` if asked to read at an
+ // offset that is not in the range [0,2^32-1], but it will generate
+ // `kOutOfRange` if asked to read from an offset past the current
+ // file size.
+ //
+ // There is a fair bit of overlap between `kFailedPrecondition` and
+ // `kOutOfRange`. We recommend using `kOutOfRange` (the more specific
+ // error) when it applies so that callers who are iterating through
+ // a space can easily look for an `kOutOfRange` error to detect when
+ // they are done.
+ kOutOfRange = 11,
+
+ // StatusCode::kUnimplemented
+ //
+ // kUnimplemented (gRPC code "UNIMPLEMENTED") indicates the operation is not
+ // implemented or supported in this service. In this case, the operation
+ // should not be re-attempted.
+ kUnimplemented = 12,
+
+ // StatusCode::kInternal
+ //
+ // kInternal (gRPC code "INTERNAL") indicates an internal error has occurred
+ // and some invariants expected by the underlying system have not been
+ // satisfied. This error code is reserved for serious errors.
+ kInternal = 13,
+
+ // StatusCode::kUnavailable
+ //
+ // kUnavailable (gRPC code "UNAVAILABLE") indicates the service is currently
+ // unavailable and that this is most likely a transient condition. An error
+ // such as this can be corrected by retrying with a backoff scheme. Note that
+ // it is not always safe to retry non-idempotent operations.
+ //
+ // See the guidelines above for deciding between `kFailedPrecondition`,
+ // `kAborted`, and `kUnavailable`.
+ kUnavailable = 14,
+
+ // StatusCode::kDataLoss
+ //
+ // kDataLoss (gRPC code "DATA_LOSS") indicates that unrecoverable data loss or
+ // corruption has occurred. As this error is serious, proper alerting should
+ // be attached to errors such as this.
+ kDataLoss = 15,
+
+ // StatusCode::kUnauthenticated
+ //
+ // kUnauthenticated (gRPC code "UNAUTHENTICATED") indicates that the request
+ // does not have valid authentication credentials for the operation. Correct
+ // the authentication and try again.
+ kUnauthenticated = 16,
+
+ // StatusCode::DoNotUseReservedForFutureExpansionUseDefaultInSwitchInstead_
+ //
+ // NOTE: this error code entry should not be used and you should not rely on
+ // its value, which may change.
+ //
+ // The purpose of this enumerated value is to force people who handle status
+ // codes with `switch()` statements to *not* simply enumerate all possible
+ // values, but instead provide a "default:" case. Providing such a default
+ // case ensures that code will compile when new codes are added.
+ kDoNotUseReservedForFutureExpansionUseDefaultInSwitchInstead_ = 20
+};
+
+// StatusCodeToString()
+//
+// Returns the name for the status code, or "" if it is an unknown value.
+TString StatusCodeToString(StatusCode code);
+
+// operator<<
+//
+// Streams StatusCodeToString(code) to `os`.
+std::ostream& operator<<(std::ostream& os, StatusCode code);
+
+// y_absl::StatusToStringMode
+//
+// An `y_absl::StatusToStringMode` is an enumerated type indicating how
+// `y_absl::Status::ToString()` should construct the output string for a non-ok
+// status.
+enum class StatusToStringMode : int {
+ // ToString will not contain any extra data (such as payloads). It will only
+ // contain the error code and message, if any.
+ kWithNoExtraData = 0,
+ // ToString will contain the payloads.
+ kWithPayload = 1 << 0,
+ // ToString will include all the extra data this Status has.
+ kWithEverything = ~kWithNoExtraData,
+ // Default mode used by ToString. Its exact value might change in the future.
+ kDefault = kWithPayload,
+};
+
+// y_absl::StatusToStringMode is specified as a bitmask type, which means the
+// following operations must be provided:
+inline constexpr StatusToStringMode operator&(StatusToStringMode lhs,
+ StatusToStringMode rhs) {
+ return static_cast<StatusToStringMode>(static_cast<int>(lhs) &
+ static_cast<int>(rhs));
+}
+inline constexpr StatusToStringMode operator|(StatusToStringMode lhs,
+ StatusToStringMode rhs) {
+ return static_cast<StatusToStringMode>(static_cast<int>(lhs) |
+ static_cast<int>(rhs));
+}
+inline constexpr StatusToStringMode operator^(StatusToStringMode lhs,
+ StatusToStringMode rhs) {
+ return static_cast<StatusToStringMode>(static_cast<int>(lhs) ^
+ static_cast<int>(rhs));
+}
+inline constexpr StatusToStringMode operator~(StatusToStringMode arg) {
+ return static_cast<StatusToStringMode>(~static_cast<int>(arg));
+}
+inline StatusToStringMode& operator&=(StatusToStringMode& lhs,
+ StatusToStringMode rhs) {
+ lhs = lhs & rhs;
+ return lhs;
+}
+inline StatusToStringMode& operator|=(StatusToStringMode& lhs,
+ StatusToStringMode rhs) {
+ lhs = lhs | rhs;
+ return lhs;
+}
+inline StatusToStringMode& operator^=(StatusToStringMode& lhs,
+ StatusToStringMode rhs) {
+ lhs = lhs ^ rhs;
+ return lhs;
+}
+
+// y_absl::Status
+//
+// The `y_absl::Status` class is generally used to gracefully handle errors
+// across API boundaries (and in particular across RPC boundaries). Some of
+// these errors may be recoverable, but others may not. Most
+// functions which can produce a recoverable error should be designed to return
+// either an `y_absl::Status` (or the similar `y_absl::StatusOr<T>`, which holds
+// either an object of type `T` or an error).
+//
+// API developers should construct their functions to return `y_absl::OkStatus()`
+// upon success, or an `y_absl::StatusCode` upon another type of error (e.g
+// an `y_absl::StatusCode::kInvalidArgument` error). The API provides convenience
+// functions to construct each status code.
+//
+// Example:
+//
+// y_absl::Status myFunction(y_absl::string_view fname, ...) {
+// ...
+// // encounter error
+// if (error condition) {
+// // Construct an y_absl::StatusCode::kInvalidArgument error
+// return y_absl::InvalidArgumentError("bad mode");
+// }
+// // else, return OK
+// return y_absl::OkStatus();
+// }
+//
+// Users handling status error codes should prefer checking for an OK status
+// using the `ok()` member function. Handling multiple error codes may justify
+// use of switch statement, but only check for error codes you know how to
+// handle; do not try to exhaustively match against all canonical error codes.
+// Errors that cannot be handled should be logged and/or propagated for higher
+// levels to deal with. If you do use a switch statement, make sure that you
+// also provide a `default:` switch case, so that code does not break as other
+// canonical codes are added to the API.
+//
+// Example:
+//
+// y_absl::Status result = DoSomething();
+// if (!result.ok()) {
+// LOG(ERROR) << result;
+// }
+//
+// // Provide a default if switching on multiple error codes
+// switch (result.code()) {
+// // The user hasn't authenticated. Ask them to reauth
+// case y_absl::StatusCode::kUnauthenticated:
+// DoReAuth();
+// break;
+// // The user does not have permission. Log an error.
+// case y_absl::StatusCode::kPermissionDenied:
+// LOG(ERROR) << result;
+// break;
+// // Propagate the error otherwise.
+// default:
+// return true;
+// }
+//
+// An `y_absl::Status` can optionally include a payload with more information
+// about the error. Typically, this payload serves one of several purposes:
+//
+// * It may provide more fine-grained semantic information about the error to
+// facilitate actionable remedies.
+// * It may provide human-readable contexual information that is more
+// appropriate to display to an end user.
+//
+// Example:
+//
+// y_absl::Status result = DoSomething();
+// // Inform user to retry after 30 seconds
+// // See more error details in googleapis/google/rpc/error_details.proto
+// if (y_absl::IsResourceExhausted(result)) {
+// google::rpc::RetryInfo info;
+// info.retry_delay().seconds() = 30;
+// // Payloads require a unique key (a URL to ensure no collisions with
+// // other payloads), and an `y_absl::Cord` to hold the encoded data.
+// y_absl::string_view url = "type.googleapis.com/google.rpc.RetryInfo";
+// result.SetPayload(url, info.SerializeAsCord());
+// return result;
+// }
+//
+// For documentation see https://abseil.io/docs/cpp/guides/status.
+//
+// Returned Status objects may not be ignored. status_internal.h has a forward
+// declaration of the form
+// class ABSL_MUST_USE_RESULT Status;
+class Status final {
+ public:
+ // Constructors
+
+ // This default constructor creates an OK status with no message or payload.
+ // Avoid this constructor and prefer explicit construction of an OK status
+ // with `y_absl::OkStatus()`.
+ Status();
+
+ // Creates a status in the canonical error space with the specified
+ // `y_absl::StatusCode` and error message. If `code == y_absl::StatusCode::kOk`, // NOLINT
+ // `msg` is ignored and an object identical to an OK status is constructed.
+ //
+ // The `msg` string must be in UTF-8. The implementation may complain (e.g., // NOLINT
+ // by printing a warning) if it is not.
+ Status(y_absl::StatusCode code, y_absl::string_view msg);
+
+ Status(const Status&);
+ Status& operator=(const Status& x);
+
+ // Move operators
+
+ // The moved-from state is valid but unspecified.
+ Status(Status&&) noexcept;
+ Status& operator=(Status&&);
+
+ ~Status();
+
+ // Status::Update()
+ //
+ // Updates the existing status with `new_status` provided that `this->ok()`.
+ // If the existing status already contains a non-OK error, this update has no
+ // effect and preserves the current data. Note that this behavior may change
+ // in the future to augment a current non-ok status with additional
+ // information about `new_status`.
+ //
+ // `Update()` provides a convenient way of keeping track of the first error
+ // encountered.
+ //
+ // Example:
+ // // Instead of "if (overall_status.ok()) overall_status = new_status"
+ // overall_status.Update(new_status);
+ //
+ void Update(const Status& new_status);
+ void Update(Status&& new_status);
+
+ // Status::ok()
+ //
+ // Returns `true` if `this->ok()`. Prefer checking for an OK status using this
+ // member function.
+ ABSL_MUST_USE_RESULT bool ok() const;
+
+ // Status::code()
+ //
+ // Returns the canonical error code of type `y_absl::StatusCode` of this status.
+ y_absl::StatusCode code() const;
+
+ // Status::raw_code()
+ //
+ // Returns a raw (canonical) error code corresponding to the enum value of
+ // `google.rpc.Code` definitions within
+ // https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto.
+ // These values could be out of the range of canonical `y_absl::StatusCode`
+ // enum values.
+ //
+ // NOTE: This function should only be called when converting to an associated
+ // wire format. Use `Status::code()` for error handling.
+ int raw_code() const;
+
+ // Status::message()
+ //
+ // Returns the error message associated with this error code, if available.
+ // Note that this message rarely describes the error code. It is not unusual
+ // for the error message to be the empty string. As a result, prefer
+ // `operator<<` or `Status::ToString()` for debug logging.
+ y_absl::string_view message() const;
+
+ friend bool operator==(const Status&, const Status&);
+ friend bool operator!=(const Status&, const Status&);
+
+ // Status::ToString()
+ //
+ // Returns a string based on the `mode`. By default, it returns combination of
+ // the error code name, the message and any associated payload messages. This
+ // string is designed simply to be human readable and its exact format should
+ // not be load bearing. Do not depend on the exact format of the result of
+ // `ToString()` which is subject to change.
+ //
+ // The printed code name and the message are generally substrings of the
+ // result, and the payloads to be printed use the status payload printer
+ // mechanism (which is internal).
+ TString ToString(
+ StatusToStringMode mode = StatusToStringMode::kDefault) const;
+
+ // Status::IgnoreError()
+ //
+ // Ignores any errors. This method does nothing except potentially suppress
+ // complaints from any tools that are checking that errors are not dropped on
+ // the floor.
+ void IgnoreError() const;
+
+ // swap()
+ //
+ // Swap the contents of one status with another.
+ friend void swap(Status& a, Status& b);
+
+ //----------------------------------------------------------------------------
+ // Payload Management APIs
+ //----------------------------------------------------------------------------
+
+ // A payload may be attached to a status to provide additional context to an
+ // error that may not be satisifed by an existing `y_absl::StatusCode`.
+ // Typically, this payload serves one of several purposes:
+ //
+ // * It may provide more fine-grained semantic information about the error
+ // to facilitate actionable remedies.
+ // * It may provide human-readable contexual information that is more
+ // appropriate to display to an end user.
+ //
+ // A payload consists of a [key,value] pair, where the key is a string
+ // referring to a unique "type URL" and the value is an object of type
+ // `y_absl::Cord` to hold the contextual data.
+ //
+ // The "type URL" should be unique and follow the format of a URL
+ // (https://en.wikipedia.org/wiki/URL) and, ideally, provide some
+ // documentation or schema on how to interpret its associated data. For
+ // example, the default type URL for a protobuf message type is
+ // "type.googleapis.com/packagename.messagename". Other custom wire formats
+ // should define the format of type URL in a similar practice so as to
+ // minimize the chance of conflict between type URLs.
+ // Users should ensure that the type URL can be mapped to a concrete
+ // C++ type if they want to deserialize the payload and read it effectively.
+ //
+ // To attach a payload to a status object, call `Status::SetPayload()`,
+ // passing it the type URL and an `y_absl::Cord` of associated data. Similarly,
+ // to extract the payload from a status, call `Status::GetPayload()`. You
+ // may attach multiple payloads (with differing type URLs) to any given
+ // status object, provided that the status is currently exhibiting an error
+ // code (i.e. is not OK).
+
+ // Status::GetPayload()
+ //
+ // Gets the payload of a status given its unique `type_url` key, if present.
+ y_absl::optional<y_absl::Cord> GetPayload(y_absl::string_view type_url) const;
+
+ // Status::SetPayload()
+ //
+ // Sets the payload for a non-ok status using a `type_url` key, overwriting
+ // any existing payload for that `type_url`.
+ //
+ // NOTE: This function does nothing if the Status is ok.
+ void SetPayload(y_absl::string_view type_url, y_absl::Cord payload);
+
+ // Status::ErasePayload()
+ //
+ // Erases the payload corresponding to the `type_url` key. Returns `true` if
+ // the payload was present.
+ bool ErasePayload(y_absl::string_view type_url);
+
+ // Status::ForEachPayload()
+ //
+ // Iterates over the stored payloads and calls the
+ // `visitor(type_key, payload)` callable for each one.
+ //
+ // NOTE: The order of calls to `visitor()` is not specified and may change at
+ // any time.
+ //
+ // NOTE: Any mutation on the same 'y_absl::Status' object during visitation is
+ // forbidden and could result in undefined behavior.
+ void ForEachPayload(
+ y_absl::FunctionRef<void(y_absl::string_view, const y_absl::Cord&)> visitor)
+ const;
+
+ private:
+ friend Status CancelledError();
+
+ // Creates a status in the canonical error space with the specified
+ // code, and an empty error message.
+ explicit Status(y_absl::StatusCode code);
+
+ static void UnrefNonInlined(uintptr_t rep);
+ static void Ref(uintptr_t rep);
+ static void Unref(uintptr_t rep);
+
+ // REQUIRES: !ok()
+ // Ensures rep_ is not shared with any other Status.
+ void PrepareToModify();
+
+ const status_internal::Payloads* GetPayloads() const;
+ status_internal::Payloads* GetPayloads();
+
+ // Takes ownership of payload.
+ static uintptr_t NewRep(
+ y_absl::StatusCode code, y_absl::string_view msg,
+ std::unique_ptr<status_internal::Payloads> payload);
+ static bool EqualsSlow(const y_absl::Status& a, const y_absl::Status& b);
+
+ // MSVC 14.0 limitation requires the const.
+ static constexpr const char kMovedFromString[] =
+ "Status accessed after move.";
+
+ static const TString* EmptyString();
+ static const TString* MovedFromString();
+
+ // Returns whether rep contains an inlined representation.
+ // See rep_ for details.
+ static bool IsInlined(uintptr_t rep);
+
+ // Indicates whether this Status was the rhs of a move operation. See rep_
+ // for details.
+ static bool IsMovedFrom(uintptr_t rep);
+ static uintptr_t MovedFromRep();
+
+ // Convert between error::Code and the inlined uintptr_t representation used
+ // by rep_. See rep_ for details.
+ static uintptr_t CodeToInlinedRep(y_absl::StatusCode code);
+ static y_absl::StatusCode InlinedRepToCode(uintptr_t rep);
+
+ // Converts between StatusRep* and the external uintptr_t representation used
+ // by rep_. See rep_ for details.
+ static uintptr_t PointerToRep(status_internal::StatusRep* r);
+ static status_internal::StatusRep* RepToPointer(uintptr_t r);
+
+ TString ToStringSlow(StatusToStringMode mode) const;
+
+ // Status supports two different representations.
+ // - When the low bit is off it is an inlined representation.
+ // It uses the canonical error space, no message or payload.
+ // The error code is (rep_ >> 2).
+ // The (rep_ & 2) bit is the "moved from" indicator, used in IsMovedFrom().
+ // - When the low bit is on it is an external representation.
+ // In this case all the data comes from a heap allocated Rep object.
+ // (rep_ - 1) is a status_internal::StatusRep* pointer to that structure.
+ uintptr_t rep_;
+};
+
+// OkStatus()
+//
+// Returns an OK status, equivalent to a default constructed instance. Prefer
+// usage of `y_absl::OkStatus()` when constructing such an OK status.
+Status OkStatus();
+
+// operator<<()
+//
+// Prints a human-readable representation of `x` to `os`.
+std::ostream& operator<<(std::ostream& os, const Status& x);
+
+// IsAborted()
+// IsAlreadyExists()
+// IsCancelled()
+// IsDataLoss()
+// IsDeadlineExceeded()
+// IsFailedPrecondition()
+// IsInternal()
+// IsInvalidArgument()
+// IsNotFound()
+// IsOutOfRange()
+// IsPermissionDenied()
+// IsResourceExhausted()
+// IsUnauthenticated()
+// IsUnavailable()
+// IsUnimplemented()
+// IsUnknown()
+//
+// These convenience functions return `true` if a given status matches the
+// `y_absl::StatusCode` error code of its associated function.
+ABSL_MUST_USE_RESULT bool IsAborted(const Status& status);
+ABSL_MUST_USE_RESULT bool IsAlreadyExists(const Status& status);
+ABSL_MUST_USE_RESULT bool IsCancelled(const Status& status);
+ABSL_MUST_USE_RESULT bool IsDataLoss(const Status& status);
+ABSL_MUST_USE_RESULT bool IsDeadlineExceeded(const Status& status);
+ABSL_MUST_USE_RESULT bool IsFailedPrecondition(const Status& status);
+ABSL_MUST_USE_RESULT bool IsInternal(const Status& status);
+ABSL_MUST_USE_RESULT bool IsInvalidArgument(const Status& status);
+ABSL_MUST_USE_RESULT bool IsNotFound(const Status& status);
+ABSL_MUST_USE_RESULT bool IsOutOfRange(const Status& status);
+ABSL_MUST_USE_RESULT bool IsPermissionDenied(const Status& status);
+ABSL_MUST_USE_RESULT bool IsResourceExhausted(const Status& status);
+ABSL_MUST_USE_RESULT bool IsUnauthenticated(const Status& status);
+ABSL_MUST_USE_RESULT bool IsUnavailable(const Status& status);
+ABSL_MUST_USE_RESULT bool IsUnimplemented(const Status& status);
+ABSL_MUST_USE_RESULT bool IsUnknown(const Status& status);
+
+// AbortedError()
+// AlreadyExistsError()
+// CancelledError()
+// DataLossError()
+// DeadlineExceededError()
+// FailedPreconditionError()
+// InternalError()
+// InvalidArgumentError()
+// NotFoundError()
+// OutOfRangeError()
+// PermissionDeniedError()
+// ResourceExhaustedError()
+// UnauthenticatedError()
+// UnavailableError()
+// UnimplementedError()
+// UnknownError()
+//
+// These convenience functions create an `y_absl::Status` object with an error
+// code as indicated by the associated function name, using the error message
+// passed in `message`.
+Status AbortedError(y_absl::string_view message);
+Status AlreadyExistsError(y_absl::string_view message);
+Status CancelledError(y_absl::string_view message);
+Status DataLossError(y_absl::string_view message);
+Status DeadlineExceededError(y_absl::string_view message);
+Status FailedPreconditionError(y_absl::string_view message);
+Status InternalError(y_absl::string_view message);
+Status InvalidArgumentError(y_absl::string_view message);
+Status NotFoundError(y_absl::string_view message);
+Status OutOfRangeError(y_absl::string_view message);
+Status PermissionDeniedError(y_absl::string_view message);
+Status ResourceExhaustedError(y_absl::string_view message);
+Status UnauthenticatedError(y_absl::string_view message);
+Status UnavailableError(y_absl::string_view message);
+Status UnimplementedError(y_absl::string_view message);
+Status UnknownError(y_absl::string_view message);
+
+//------------------------------------------------------------------------------
+// Implementation details follow
+//------------------------------------------------------------------------------
+
+inline Status::Status() : rep_(CodeToInlinedRep(y_absl::StatusCode::kOk)) {}
+
+inline Status::Status(y_absl::StatusCode code) : rep_(CodeToInlinedRep(code)) {}
+
+inline Status::Status(const Status& x) : rep_(x.rep_) { Ref(rep_); }
+
+inline Status& Status::operator=(const Status& x) {
+ uintptr_t old_rep = rep_;
+ if (x.rep_ != old_rep) {
+ Ref(x.rep_);
+ rep_ = x.rep_;
+ Unref(old_rep);
+ }
+ return *this;
+}
+
+inline Status::Status(Status&& x) noexcept : rep_(x.rep_) {
+ x.rep_ = MovedFromRep();
+}
+
+inline Status& Status::operator=(Status&& x) {
+ uintptr_t old_rep = rep_;
+ if (x.rep_ != old_rep) {
+ rep_ = x.rep_;
+ x.rep_ = MovedFromRep();
+ Unref(old_rep);
+ }
+ return *this;
+}
+
+inline void Status::Update(const Status& new_status) {
+ if (ok()) {
+ *this = new_status;
+ }
+}
+
+inline void Status::Update(Status&& new_status) {
+ if (ok()) {
+ *this = std::move(new_status);
+ }
+}
+
+inline Status::~Status() { Unref(rep_); }
+
+inline bool Status::ok() const {
+ return rep_ == CodeToInlinedRep(y_absl::StatusCode::kOk);
+}
+
+inline y_absl::string_view Status::message() const {
+ return !IsInlined(rep_)
+ ? RepToPointer(rep_)->message
+ : (IsMovedFrom(rep_) ? y_absl::string_view(kMovedFromString)
+ : y_absl::string_view());
+}
+
+inline bool operator==(const Status& lhs, const Status& rhs) {
+ return lhs.rep_ == rhs.rep_ || Status::EqualsSlow(lhs, rhs);
+}
+
+inline bool operator!=(const Status& lhs, const Status& rhs) {
+ return !(lhs == rhs);
+}
+
+inline TString Status::ToString(StatusToStringMode mode) const {
+ return ok() ? "OK" : ToStringSlow(mode);
+}
+
+inline void Status::IgnoreError() const {
+ // no-op
+}
+
+inline void swap(y_absl::Status& a, y_absl::Status& b) {
+ using std::swap;
+ swap(a.rep_, b.rep_);
+}
+
+inline const status_internal::Payloads* Status::GetPayloads() const {
+ return IsInlined(rep_) ? nullptr : RepToPointer(rep_)->payloads.get();
+}
+
+inline status_internal::Payloads* Status::GetPayloads() {
+ return IsInlined(rep_) ? nullptr : RepToPointer(rep_)->payloads.get();
+}
+
+inline bool Status::IsInlined(uintptr_t rep) { return (rep & 1) == 0; }
+
+inline bool Status::IsMovedFrom(uintptr_t rep) {
+ return IsInlined(rep) && (rep & 2) != 0;
+}
+
+inline uintptr_t Status::MovedFromRep() {
+ return CodeToInlinedRep(y_absl::StatusCode::kInternal) | 2;
+}
+
+inline uintptr_t Status::CodeToInlinedRep(y_absl::StatusCode code) {
+ return static_cast<uintptr_t>(code) << 2;
+}
+
+inline y_absl::StatusCode Status::InlinedRepToCode(uintptr_t rep) {
+ assert(IsInlined(rep));
+ return static_cast<y_absl::StatusCode>(rep >> 2);
+}
+
+inline status_internal::StatusRep* Status::RepToPointer(uintptr_t rep) {
+ assert(!IsInlined(rep));
+ return reinterpret_cast<status_internal::StatusRep*>(rep - 1);
+}
+
+inline uintptr_t Status::PointerToRep(status_internal::StatusRep* rep) {
+ return reinterpret_cast<uintptr_t>(rep) + 1;
+}
+
+inline void Status::Ref(uintptr_t rep) {
+ if (!IsInlined(rep)) {
+ RepToPointer(rep)->ref.fetch_add(1, std::memory_order_relaxed);
+ }
+}
+
+inline void Status::Unref(uintptr_t rep) {
+ if (!IsInlined(rep)) {
+ UnrefNonInlined(rep);
+ }
+}
+
+inline Status OkStatus() { return Status(); }
+
+// Creates a `Status` object with the `y_absl::StatusCode::kCancelled` error code
+// and an empty message. It is provided only for efficiency, given that
+// message-less kCancelled errors are common in the infrastructure.
+inline Status CancelledError() { return Status(y_absl::StatusCode::kCancelled); }
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_STATUS_STATUS_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/status/status_payload_printer.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/status/status_payload_printer.cc
new file mode 100644
index 00000000000..6990ff6e8d5
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/status/status_payload_printer.cc
@@ -0,0 +1,38 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+#include "y_absl/status/status_payload_printer.h"
+
+#include <atomic>
+
+#include "y_absl/base/attributes.h"
+#include "y_absl/base/internal/atomic_hook.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace status_internal {
+
+ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
+static y_absl::base_internal::AtomicHook<StatusPayloadPrinter> storage;
+
+void SetStatusPayloadPrinter(StatusPayloadPrinter printer) {
+ storage.Store(printer);
+}
+
+StatusPayloadPrinter GetStatusPayloadPrinter() {
+ return storage.Load();
+}
+
+} // namespace status_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/status/status_payload_printer.h b/contrib/restricted/abseil-cpp-tstring/y_absl/status/status_payload_printer.h
new file mode 100644
index 00000000000..6968e0f40b6
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/status/status_payload_printer.h
@@ -0,0 +1,51 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+#ifndef ABSL_STATUS_STATUS_PAYLOAD_PRINTER_H_
+#define ABSL_STATUS_STATUS_PAYLOAD_PRINTER_H_
+
+#include <util/generic/string.h>
+
+#include "y_absl/strings/cord.h"
+#include "y_absl/strings/string_view.h"
+#include "y_absl/types/optional.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace status_internal {
+
+// By default, `Status::ToString` and `operator<<(Status)` print a payload by
+// dumping the type URL and the raw bytes. To help debugging, we provide an
+// extension point, which is a global printer function that can be set by users
+// to specify how to print payloads. The function takes the type URL and the
+// payload as input, and should return a valid human-readable string on success
+// or `y_absl::nullopt` on failure (in which case it falls back to the default
+// approach of printing the raw bytes).
+// NOTE: This is an internal API and the design is subject to change in the
+// future in a non-backward-compatible way. Since it's only meant for debugging
+// purpose, you should not rely on it in any critical logic.
+using StatusPayloadPrinter = y_absl::optional<TString> (*)(y_absl::string_view,
+ const y_absl::Cord&);
+
+// Sets the global payload printer. Only one printer should be set per process.
+// If multiple printers are set, it's undefined which one will be used.
+void SetStatusPayloadPrinter(StatusPayloadPrinter);
+
+// Returns the global payload printer if previously set, otherwise `nullptr`.
+StatusPayloadPrinter GetStatusPayloadPrinter();
+
+} // namespace status_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_STATUS_STATUS_PAYLOAD_PRINTER_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/status/statusor.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/status/statusor.cc
new file mode 100644
index 00000000000..03e6d1cec41
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/status/statusor.cc
@@ -0,0 +1,103 @@
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+#include "y_absl/status/statusor.h"
+
+#include <cstdlib>
+#include <utility>
+
+#include "y_absl/base/call_once.h"
+#include "y_absl/base/internal/raw_logging.h"
+#include "y_absl/status/status.h"
+#include "y_absl/strings/str_cat.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+BadStatusOrAccess::BadStatusOrAccess(y_absl::Status status)
+ : status_(std::move(status)) {}
+
+BadStatusOrAccess::BadStatusOrAccess(const BadStatusOrAccess& other)
+ : status_(other.status_) {}
+
+BadStatusOrAccess& BadStatusOrAccess::operator=(
+ const BadStatusOrAccess& other) {
+ // Ensure assignment is correct regardless of whether this->InitWhat() has
+ // already been called.
+ other.InitWhat();
+ status_ = other.status_;
+ what_ = other.what_;
+ return *this;
+}
+
+BadStatusOrAccess& BadStatusOrAccess::operator=(BadStatusOrAccess&& other) {
+ // Ensure assignment is correct regardless of whether this->InitWhat() has
+ // already been called.
+ other.InitWhat();
+ status_ = std::move(other.status_);
+ what_ = std::move(other.what_);
+ return *this;
+}
+
+BadStatusOrAccess::BadStatusOrAccess(BadStatusOrAccess&& other)
+ : status_(std::move(other.status_)) {}
+
+const char* BadStatusOrAccess::what() const noexcept {
+ InitWhat();
+ return what_.c_str();
+}
+
+const y_absl::Status& BadStatusOrAccess::status() const { return status_; }
+
+void BadStatusOrAccess::InitWhat() const {
+ y_absl::call_once(init_what_, [this] {
+ what_ = y_absl::StrCat("Bad StatusOr access: ", status_.ToString());
+ });
+}
+
+namespace internal_statusor {
+
+void Helper::HandleInvalidStatusCtorArg(y_absl::Status* status) {
+ const char* kMessage =
+ "An OK status is not a valid constructor argument to StatusOr<T>";
+#ifdef NDEBUG
+ ABSL_INTERNAL_LOG(ERROR, kMessage);
+#else
+ ABSL_INTERNAL_LOG(FATAL, kMessage);
+#endif
+ // In optimized builds, we will fall back to InternalError.
+ *status = y_absl::InternalError(kMessage);
+}
+
+void Helper::Crash(const y_absl::Status& status) {
+ ABSL_INTERNAL_LOG(
+ FATAL,
+ y_absl::StrCat("Attempting to fetch value instead of handling error ",
+ status.ToString()));
+}
+
+void ThrowBadStatusOrAccess(y_absl::Status status) {
+#ifdef ABSL_HAVE_EXCEPTIONS
+ throw y_absl::BadStatusOrAccess(std::move(status));
+#else
+ ABSL_INTERNAL_LOG(
+ FATAL,
+ y_absl::StrCat("Attempting to fetch value instead of handling error ",
+ status.ToString()));
+ std::abort();
+#endif
+}
+
+} // namespace internal_statusor
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/status/statusor.h b/contrib/restricted/abseil-cpp-tstring/y_absl/status/statusor.h
new file mode 100644
index 00000000000..23e1b8a6e46
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/status/statusor.h
@@ -0,0 +1,770 @@
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: statusor.h
+// -----------------------------------------------------------------------------
+//
+// An `y_absl::StatusOr<T>` represents a union of an `y_absl::Status` object
+// and an object of type `T`. The `y_absl::StatusOr<T>` will either contain an
+// object of type `T` (indicating a successful operation), or an error (of type
+// `y_absl::Status`) explaining why such a value is not present.
+//
+// In general, check the success of an operation returning an
+// `y_absl::StatusOr<T>` like you would an `y_absl::Status` by using the `ok()`
+// member function.
+//
+// Example:
+//
+// StatusOr<Foo> result = Calculation();
+// if (result.ok()) {
+// result->DoSomethingCool();
+// } else {
+// LOG(ERROR) << result.status();
+// }
+#ifndef ABSL_STATUS_STATUSOR_H_
+#define ABSL_STATUS_STATUSOR_H_
+
+#include <exception>
+#include <initializer_list>
+#include <new>
+#include <util/generic/string.h>
+#include <type_traits>
+#include <utility>
+
+#include "y_absl/base/attributes.h"
+#include "y_absl/base/call_once.h"
+#include "y_absl/meta/type_traits.h"
+#include "y_absl/status/internal/statusor_internal.h"
+#include "y_absl/status/status.h"
+#include "y_absl/types/variant.h"
+#include "y_absl/utility/utility.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+// BadStatusOrAccess
+//
+// This class defines the type of object to throw (if exceptions are enabled),
+// when accessing the value of an `y_absl::StatusOr<T>` object that does not
+// contain a value. This behavior is analogous to that of
+// `std::bad_optional_access` in the case of accessing an invalid
+// `std::optional` value.
+//
+// Example:
+//
+// try {
+// y_absl::StatusOr<int> v = FetchInt();
+// DoWork(v.value()); // Accessing value() when not "OK" may throw
+// } catch (y_absl::BadStatusOrAccess& ex) {
+// LOG(ERROR) << ex.status();
+// }
+class BadStatusOrAccess : public std::exception {
+ public:
+ explicit BadStatusOrAccess(y_absl::Status status);
+ ~BadStatusOrAccess() override = default;
+
+ BadStatusOrAccess(const BadStatusOrAccess& other);
+ BadStatusOrAccess& operator=(const BadStatusOrAccess& other);
+ BadStatusOrAccess(BadStatusOrAccess&& other);
+ BadStatusOrAccess& operator=(BadStatusOrAccess&& other);
+
+ // BadStatusOrAccess::what()
+ //
+ // Returns the associated explanatory string of the `y_absl::StatusOr<T>`
+ // object's error code. This function contains information about the failing
+ // status, but its exact formatting may change and should not be depended on.
+ //
+ // The pointer of this string is guaranteed to be valid until any non-const
+ // function is invoked on the exception object.
+ const char* what() const noexcept override;
+
+ // BadStatusOrAccess::status()
+ //
+ // Returns the associated `y_absl::Status` of the `y_absl::StatusOr<T>` object's
+ // error.
+ const y_absl::Status& status() const;
+
+ private:
+ void InitWhat() const;
+
+ y_absl::Status status_;
+ mutable y_absl::once_flag init_what_;
+ mutable TString what_;
+};
+
+// Returned StatusOr objects may not be ignored.
+template <typename T>
+class ABSL_MUST_USE_RESULT StatusOr;
+
+// y_absl::StatusOr<T>
+//
+// The `y_absl::StatusOr<T>` class template is a union of an `y_absl::Status` object
+// and an object of type `T`. The `y_absl::StatusOr<T>` models an object that is
+// either a usable object, or an error (of type `y_absl::Status`) explaining why
+// such an object is not present. An `y_absl::StatusOr<T>` is typically the return
+// value of a function which may fail.
+//
+// An `y_absl::StatusOr<T>` can never hold an "OK" status (an
+// `y_absl::StatusCode::kOk` value); instead, the presence of an object of type
+// `T` indicates success. Instead of checking for a `kOk` value, use the
+// `y_absl::StatusOr<T>::ok()` member function. (It is for this reason, and code
+// readability, that using the `ok()` function is preferred for `y_absl::Status`
+// as well.)
+//
+// Example:
+//
+// StatusOr<Foo> result = DoBigCalculationThatCouldFail();
+// if (result.ok()) {
+// result->DoSomethingCool();
+// } else {
+// LOG(ERROR) << result.status();
+// }
+//
+// Accessing the object held by an `y_absl::StatusOr<T>` should be performed via
+// `operator*` or `operator->`, after a call to `ok()` confirms that the
+// `y_absl::StatusOr<T>` holds an object of type `T`:
+//
+// Example:
+//
+// y_absl::StatusOr<int> i = GetCount();
+// if (i.ok()) {
+// updated_total += *i
+// }
+//
+// NOTE: using `y_absl::StatusOr<T>::value()` when no valid value is present will
+// throw an exception if exceptions are enabled or terminate the process when
+// exceptions are not enabled.
+//
+// Example:
+//
+// StatusOr<Foo> result = DoBigCalculationThatCouldFail();
+// const Foo& foo = result.value(); // Crash/exception if no value present
+// foo.DoSomethingCool();
+//
+// A `y_absl::StatusOr<T*>` can be constructed from a null pointer like any other
+// pointer value, and the result will be that `ok()` returns `true` and
+// `value()` returns `nullptr`. Checking the value of pointer in an
+// `y_absl::StatusOr<T>` generally requires a bit more care, to ensure both that a
+// value is present and that value is not null:
+//
+// StatusOr<std::unique_ptr<Foo>> result = FooFactory::MakeNewFoo(arg);
+// if (!result.ok()) {
+// LOG(ERROR) << result.status();
+// } else if (*result == nullptr) {
+// LOG(ERROR) << "Unexpected null pointer";
+// } else {
+// (*result)->DoSomethingCool();
+// }
+//
+// Example factory implementation returning StatusOr<T>:
+//
+// StatusOr<Foo> FooFactory::MakeFoo(int arg) {
+// if (arg <= 0) {
+// return y_absl::Status(y_absl::StatusCode::kInvalidArgument,
+// "Arg must be positive");
+// }
+// return Foo(arg);
+// }
+template <typename T>
+class StatusOr : private internal_statusor::StatusOrData<T>,
+ private internal_statusor::CopyCtorBase<T>,
+ private internal_statusor::MoveCtorBase<T>,
+ private internal_statusor::CopyAssignBase<T>,
+ private internal_statusor::MoveAssignBase<T> {
+ template <typename U>
+ friend class StatusOr;
+
+ typedef internal_statusor::StatusOrData<T> Base;
+
+ public:
+ // StatusOr<T>::value_type
+ //
+ // This instance data provides a generic `value_type` member for use within
+ // generic programming. This usage is analogous to that of
+ // `optional::value_type` in the case of `std::optional`.
+ typedef T value_type;
+
+ // Constructors
+
+ // Constructs a new `y_absl::StatusOr` with an `y_absl::StatusCode::kUnknown`
+ // status. This constructor is marked 'explicit' to prevent usages in return
+ // values such as 'return {};', under the misconception that
+ // `y_absl::StatusOr<std::vector<int>>` will be initialized with an empty
+ // vector, instead of an `y_absl::StatusCode::kUnknown` error code.
+ explicit StatusOr();
+
+ // `StatusOr<T>` is copy constructible if `T` is copy constructible.
+ StatusOr(const StatusOr&) = default;
+ // `StatusOr<T>` is copy assignable if `T` is copy constructible and copy
+ // assignable.
+ StatusOr& operator=(const StatusOr&) = default;
+
+ // `StatusOr<T>` is move constructible if `T` is move constructible.
+ StatusOr(StatusOr&&) = default;
+ // `StatusOr<T>` is moveAssignable if `T` is move constructible and move
+ // assignable.
+ StatusOr& operator=(StatusOr&&) = default;
+
+ // Converting Constructors
+
+ // Constructs a new `y_absl::StatusOr<T>` from an `y_absl::StatusOr<U>`, when `T`
+ // is constructible from `U`. To avoid ambiguity, these constructors are
+ // disabled if `T` is also constructible from `StatusOr<U>.`. This constructor
+ // is explicit if and only if the corresponding construction of `T` from `U`
+ // is explicit. (This constructor inherits its explicitness from the
+ // underlying constructor.)
+ template <
+ typename U,
+ y_absl::enable_if_t<
+ y_absl::conjunction<
+ y_absl::negation<std::is_same<T, U>>,
+ std::is_constructible<T, const U&>,
+ std::is_convertible<const U&, T>,
+ y_absl::negation<
+ internal_statusor::IsConstructibleOrConvertibleFromStatusOr<
+ T, U>>>::value,
+ int> = 0>
+ StatusOr(const StatusOr<U>& other) // NOLINT
+ : Base(static_cast<const typename StatusOr<U>::Base&>(other)) {}
+ template <
+ typename U,
+ y_absl::enable_if_t<
+ y_absl::conjunction<
+ y_absl::negation<std::is_same<T, U>>,
+ std::is_constructible<T, const U&>,
+ y_absl::negation<std::is_convertible<const U&, T>>,
+ y_absl::negation<
+ internal_statusor::IsConstructibleOrConvertibleFromStatusOr<
+ T, U>>>::value,
+ int> = 0>
+ explicit StatusOr(const StatusOr<U>& other)
+ : Base(static_cast<const typename StatusOr<U>::Base&>(other)) {}
+
+ template <
+ typename U,
+ y_absl::enable_if_t<
+ y_absl::conjunction<
+ y_absl::negation<std::is_same<T, U>>, std::is_constructible<T, U&&>,
+ std::is_convertible<U&&, T>,
+ y_absl::negation<
+ internal_statusor::IsConstructibleOrConvertibleFromStatusOr<
+ T, U>>>::value,
+ int> = 0>
+ StatusOr(StatusOr<U>&& other) // NOLINT
+ : Base(static_cast<typename StatusOr<U>::Base&&>(other)) {}
+ template <
+ typename U,
+ y_absl::enable_if_t<
+ y_absl::conjunction<
+ y_absl::negation<std::is_same<T, U>>, std::is_constructible<T, U&&>,
+ y_absl::negation<std::is_convertible<U&&, T>>,
+ y_absl::negation<
+ internal_statusor::IsConstructibleOrConvertibleFromStatusOr<
+ T, U>>>::value,
+ int> = 0>
+ explicit StatusOr(StatusOr<U>&& other)
+ : Base(static_cast<typename StatusOr<U>::Base&&>(other)) {}
+
+ // Converting Assignment Operators
+
+ // Creates an `y_absl::StatusOr<T>` through assignment from an
+ // `y_absl::StatusOr<U>` when:
+ //
+ // * Both `y_absl::StatusOr<T>` and `y_absl::StatusOr<U>` are OK by assigning
+ // `U` to `T` directly.
+ // * `y_absl::StatusOr<T>` is OK and `y_absl::StatusOr<U>` contains an error
+ // code by destroying `y_absl::StatusOr<T>`'s value and assigning from
+ // `y_absl::StatusOr<U>'
+ // * `y_absl::StatusOr<T>` contains an error code and `y_absl::StatusOr<U>` is
+ // OK by directly initializing `T` from `U`.
+ // * Both `y_absl::StatusOr<T>` and `y_absl::StatusOr<U>` contain an error
+ // code by assigning the `Status` in `y_absl::StatusOr<U>` to
+ // `y_absl::StatusOr<T>`
+ //
+ // These overloads only apply if `y_absl::StatusOr<T>` is constructible and
+ // assignable from `y_absl::StatusOr<U>` and `StatusOr<T>` cannot be directly
+ // assigned from `StatusOr<U>`.
+ template <
+ typename U,
+ y_absl::enable_if_t<
+ y_absl::conjunction<
+ y_absl::negation<std::is_same<T, U>>,
+ std::is_constructible<T, const U&>,
+ std::is_assignable<T, const U&>,
+ y_absl::negation<
+ internal_statusor::
+ IsConstructibleOrConvertibleOrAssignableFromStatusOr<
+ T, U>>>::value,
+ int> = 0>
+ StatusOr& operator=(const StatusOr<U>& other) {
+ this->Assign(other);
+ return *this;
+ }
+ template <
+ typename U,
+ y_absl::enable_if_t<
+ y_absl::conjunction<
+ y_absl::negation<std::is_same<T, U>>, std::is_constructible<T, U&&>,
+ std::is_assignable<T, U&&>,
+ y_absl::negation<
+ internal_statusor::
+ IsConstructibleOrConvertibleOrAssignableFromStatusOr<
+ T, U>>>::value,
+ int> = 0>
+ StatusOr& operator=(StatusOr<U>&& other) {
+ this->Assign(std::move(other));
+ return *this;
+ }
+
+ // Constructs a new `y_absl::StatusOr<T>` with a non-ok status. After calling
+ // this constructor, `this->ok()` will be `false` and calls to `value()` will
+ // crash, or produce an exception if exceptions are enabled.
+ //
+ // The constructor also takes any type `U` that is convertible to
+ // `y_absl::Status`. This constructor is explicit if an only if `U` is not of
+ // type `y_absl::Status` and the conversion from `U` to `Status` is explicit.
+ //
+ // REQUIRES: !Status(std::forward<U>(v)).ok(). This requirement is DCHECKed.
+ // In optimized builds, passing y_absl::OkStatus() here will have the effect
+ // of passing y_absl::StatusCode::kInternal as a fallback.
+ template <
+ typename U = y_absl::Status,
+ y_absl::enable_if_t<
+ y_absl::conjunction<
+ std::is_convertible<U&&, y_absl::Status>,
+ std::is_constructible<y_absl::Status, U&&>,
+ y_absl::negation<std::is_same<y_absl::decay_t<U>, y_absl::StatusOr<T>>>,
+ y_absl::negation<std::is_same<y_absl::decay_t<U>, T>>,
+ y_absl::negation<std::is_same<y_absl::decay_t<U>, y_absl::in_place_t>>,
+ y_absl::negation<internal_statusor::HasConversionOperatorToStatusOr<
+ T, U&&>>>::value,
+ int> = 0>
+ StatusOr(U&& v) : Base(std::forward<U>(v)) {}
+
+ template <
+ typename U = y_absl::Status,
+ y_absl::enable_if_t<
+ y_absl::conjunction<
+ y_absl::negation<std::is_convertible<U&&, y_absl::Status>>,
+ std::is_constructible<y_absl::Status, U&&>,
+ y_absl::negation<std::is_same<y_absl::decay_t<U>, y_absl::StatusOr<T>>>,
+ y_absl::negation<std::is_same<y_absl::decay_t<U>, T>>,
+ y_absl::negation<std::is_same<y_absl::decay_t<U>, y_absl::in_place_t>>,
+ y_absl::negation<internal_statusor::HasConversionOperatorToStatusOr<
+ T, U&&>>>::value,
+ int> = 0>
+ explicit StatusOr(U&& v) : Base(std::forward<U>(v)) {}
+
+ template <
+ typename U = y_absl::Status,
+ y_absl::enable_if_t<
+ y_absl::conjunction<
+ std::is_convertible<U&&, y_absl::Status>,
+ std::is_constructible<y_absl::Status, U&&>,
+ y_absl::negation<std::is_same<y_absl::decay_t<U>, y_absl::StatusOr<T>>>,
+ y_absl::negation<std::is_same<y_absl::decay_t<U>, T>>,
+ y_absl::negation<std::is_same<y_absl::decay_t<U>, y_absl::in_place_t>>,
+ y_absl::negation<internal_statusor::HasConversionOperatorToStatusOr<
+ T, U&&>>>::value,
+ int> = 0>
+ StatusOr& operator=(U&& v) {
+ this->AssignStatus(std::forward<U>(v));
+ return *this;
+ }
+
+ // Perfect-forwarding value assignment operator.
+
+ // If `*this` contains a `T` value before the call, the contained value is
+ // assigned from `std::forward<U>(v)`; Otherwise, it is directly-initialized
+ // from `std::forward<U>(v)`.
+ // This function does not participate in overload unless:
+ // 1. `std::is_constructible_v<T, U>` is true,
+ // 2. `std::is_assignable_v<T&, U>` is true.
+ // 3. `std::is_same_v<StatusOr<T>, std::remove_cvref_t<U>>` is false.
+ // 4. Assigning `U` to `T` is not ambiguous:
+ // If `U` is `StatusOr<V>` and `T` is constructible and assignable from
+ // both `StatusOr<V>` and `V`, the assignment is considered bug-prone and
+ // ambiguous thus will fail to compile. For example:
+ // StatusOr<bool> s1 = true; // s1.ok() && *s1 == true
+ // StatusOr<bool> s2 = false; // s2.ok() && *s2 == false
+ // s1 = s2; // ambiguous, `s1 = *s2` or `s1 = bool(s2)`?
+ template <
+ typename U = T,
+ typename = typename std::enable_if<y_absl::conjunction<
+ std::is_constructible<T, U&&>, std::is_assignable<T&, U&&>,
+ y_absl::disjunction<
+ std::is_same<y_absl::remove_cv_t<y_absl::remove_reference_t<U>>, T>,
+ y_absl::conjunction<
+ y_absl::negation<std::is_convertible<U&&, y_absl::Status>>,
+ y_absl::negation<internal_statusor::
+ HasConversionOperatorToStatusOr<T, U&&>>>>,
+ internal_statusor::IsForwardingAssignmentValid<T, U&&>>::value>::type>
+ StatusOr& operator=(U&& v) {
+ this->Assign(std::forward<U>(v));
+ return *this;
+ }
+
+ // Constructs the inner value `T` in-place using the provided args, using the
+ // `T(args...)` constructor.
+ template <typename... Args>
+ explicit StatusOr(y_absl::in_place_t, Args&&... args);
+ template <typename U, typename... Args>
+ explicit StatusOr(y_absl::in_place_t, std::initializer_list<U> ilist,
+ Args&&... args);
+
+ // Constructs the inner value `T` in-place using the provided args, using the
+ // `T(U)` (direct-initialization) constructor. This constructor is only valid
+ // if `T` can be constructed from a `U`. Can accept move or copy constructors.
+ //
+ // This constructor is explicit if `U` is not convertible to `T`. To avoid
+ // ambiguity, this constructor is disabled if `U` is a `StatusOr<J>`, where
+ // `J` is convertible to `T`.
+ template <
+ typename U = T,
+ y_absl::enable_if_t<
+ y_absl::conjunction<
+ internal_statusor::IsDirectInitializationValid<T, U&&>,
+ std::is_constructible<T, U&&>, std::is_convertible<U&&, T>,
+ y_absl::disjunction<
+ std::is_same<y_absl::remove_cv_t<y_absl::remove_reference_t<U>>,
+ T>,
+ y_absl::conjunction<
+ y_absl::negation<std::is_convertible<U&&, y_absl::Status>>,
+ y_absl::negation<
+ internal_statusor::HasConversionOperatorToStatusOr<
+ T, U&&>>>>>::value,
+ int> = 0>
+ StatusOr(U&& u) // NOLINT
+ : StatusOr(y_absl::in_place, std::forward<U>(u)) {}
+
+ template <
+ typename U = T,
+ y_absl::enable_if_t<
+ y_absl::conjunction<
+ internal_statusor::IsDirectInitializationValid<T, U&&>,
+ y_absl::disjunction<
+ std::is_same<y_absl::remove_cv_t<y_absl::remove_reference_t<U>>,
+ T>,
+ y_absl::conjunction<
+ y_absl::negation<std::is_constructible<y_absl::Status, U&&>>,
+ y_absl::negation<
+ internal_statusor::HasConversionOperatorToStatusOr<
+ T, U&&>>>>,
+ std::is_constructible<T, U&&>,
+ y_absl::negation<std::is_convertible<U&&, T>>>::value,
+ int> = 0>
+ explicit StatusOr(U&& u) // NOLINT
+ : StatusOr(y_absl::in_place, std::forward<U>(u)) {}
+
+ // StatusOr<T>::ok()
+ //
+ // Returns whether or not this `y_absl::StatusOr<T>` holds a `T` value. This
+ // member function is analagous to `y_absl::Status::ok()` and should be used
+ // similarly to check the status of return values.
+ //
+ // Example:
+ //
+ // StatusOr<Foo> result = DoBigCalculationThatCouldFail();
+ // if (result.ok()) {
+ // // Handle result
+ // else {
+ // // Handle error
+ // }
+ ABSL_MUST_USE_RESULT bool ok() const { return this->status_.ok(); }
+
+ // StatusOr<T>::status()
+ //
+ // Returns a reference to the current `y_absl::Status` contained within the
+ // `y_absl::StatusOr<T>`. If `y_absl::StatusOr<T>` contains a `T`, then this
+ // function returns `y_absl::OkStatus()`.
+ const Status& status() const&;
+ Status status() &&;
+
+ // StatusOr<T>::value()
+ //
+ // Returns a reference to the held value if `this->ok()`. Otherwise, throws
+ // `y_absl::BadStatusOrAccess` if exceptions are enabled, or is guaranteed to
+ // terminate the process if exceptions are disabled.
+ //
+ // If you have already checked the status using `this->ok()`, you probably
+ // want to use `operator*()` or `operator->()` to access the value instead of
+ // `value`.
+ //
+ // Note: for value types that are cheap to copy, prefer simple code:
+ //
+ // T value = statusor.value();
+ //
+ // Otherwise, if the value type is expensive to copy, but can be left
+ // in the StatusOr, simply assign to a reference:
+ //
+ // T& value = statusor.value(); // or `const T&`
+ //
+ // Otherwise, if the value type supports an efficient move, it can be
+ // used as follows:
+ //
+ // T value = std::move(statusor).value();
+ //
+ // The `std::move` on statusor instead of on the whole expression enables
+ // warnings about possible uses of the statusor object after the move.
+ const T& value() const& ABSL_ATTRIBUTE_LIFETIME_BOUND;
+ T& value() & ABSL_ATTRIBUTE_LIFETIME_BOUND;
+ const T&& value() const&& ABSL_ATTRIBUTE_LIFETIME_BOUND;
+ T&& value() && ABSL_ATTRIBUTE_LIFETIME_BOUND;
+
+ // StatusOr<T>:: operator*()
+ //
+ // Returns a reference to the current value.
+ //
+ // REQUIRES: `this->ok() == true`, otherwise the behavior is undefined.
+ //
+ // Use `this->ok()` to verify that there is a current value within the
+ // `y_absl::StatusOr<T>`. Alternatively, see the `value()` member function for a
+ // similar API that guarantees crashing or throwing an exception if there is
+ // no current value.
+ const T& operator*() const& ABSL_ATTRIBUTE_LIFETIME_BOUND;
+ T& operator*() & ABSL_ATTRIBUTE_LIFETIME_BOUND;
+ const T&& operator*() const&& ABSL_ATTRIBUTE_LIFETIME_BOUND;
+ T&& operator*() && ABSL_ATTRIBUTE_LIFETIME_BOUND;
+
+ // StatusOr<T>::operator->()
+ //
+ // Returns a pointer to the current value.
+ //
+ // REQUIRES: `this->ok() == true`, otherwise the behavior is undefined.
+ //
+ // Use `this->ok()` to verify that there is a current value.
+ const T* operator->() const ABSL_ATTRIBUTE_LIFETIME_BOUND;
+ T* operator->() ABSL_ATTRIBUTE_LIFETIME_BOUND;
+
+ // StatusOr<T>::value_or()
+ //
+ // Returns the current value if `this->ok() == true`. Otherwise constructs a
+ // value using the provided `default_value`.
+ //
+ // Unlike `value`, this function returns by value, copying the current value
+ // if necessary. If the value type supports an efficient move, it can be used
+ // as follows:
+ //
+ // T value = std::move(statusor).value_or(def);
+ //
+ // Unlike with `value`, calling `std::move()` on the result of `value_or` will
+ // still trigger a copy.
+ template <typename U>
+ T value_or(U&& default_value) const&;
+ template <typename U>
+ T value_or(U&& default_value) &&;
+
+ // StatusOr<T>::IgnoreError()
+ //
+ // Ignores any errors. This method does nothing except potentially suppress
+ // complaints from any tools that are checking that errors are not dropped on
+ // the floor.
+ void IgnoreError() const;
+
+ // StatusOr<T>::emplace()
+ //
+ // Reconstructs the inner value T in-place using the provided args, using the
+ // T(args...) constructor. Returns reference to the reconstructed `T`.
+ template <typename... Args>
+ T& emplace(Args&&... args) {
+ if (ok()) {
+ this->Clear();
+ this->MakeValue(std::forward<Args>(args)...);
+ } else {
+ this->MakeValue(std::forward<Args>(args)...);
+ this->status_ = y_absl::OkStatus();
+ }
+ return this->data_;
+ }
+
+ template <
+ typename U, typename... Args,
+ y_absl::enable_if_t<
+ std::is_constructible<T, std::initializer_list<U>&, Args&&...>::value,
+ int> = 0>
+ T& emplace(std::initializer_list<U> ilist, Args&&... args) {
+ if (ok()) {
+ this->Clear();
+ this->MakeValue(ilist, std::forward<Args>(args)...);
+ } else {
+ this->MakeValue(ilist, std::forward<Args>(args)...);
+ this->status_ = y_absl::OkStatus();
+ }
+ return this->data_;
+ }
+
+ private:
+ using internal_statusor::StatusOrData<T>::Assign;
+ template <typename U>
+ void Assign(const y_absl::StatusOr<U>& other);
+ template <typename U>
+ void Assign(y_absl::StatusOr<U>&& other);
+};
+
+// operator==()
+//
+// This operator checks the equality of two `y_absl::StatusOr<T>` objects.
+template <typename T>
+bool operator==(const StatusOr<T>& lhs, const StatusOr<T>& rhs) {
+ if (lhs.ok() && rhs.ok()) return *lhs == *rhs;
+ return lhs.status() == rhs.status();
+}
+
+// operator!=()
+//
+// This operator checks the inequality of two `y_absl::StatusOr<T>` objects.
+template <typename T>
+bool operator!=(const StatusOr<T>& lhs, const StatusOr<T>& rhs) {
+ return !(lhs == rhs);
+}
+
+//------------------------------------------------------------------------------
+// Implementation details for StatusOr<T>
+//------------------------------------------------------------------------------
+
+// TODO(sbenza): avoid the string here completely.
+template <typename T>
+StatusOr<T>::StatusOr() : Base(Status(y_absl::StatusCode::kUnknown, "")) {}
+
+template <typename T>
+template <typename U>
+inline void StatusOr<T>::Assign(const StatusOr<U>& other) {
+ if (other.ok()) {
+ this->Assign(*other);
+ } else {
+ this->AssignStatus(other.status());
+ }
+}
+
+template <typename T>
+template <typename U>
+inline void StatusOr<T>::Assign(StatusOr<U>&& other) {
+ if (other.ok()) {
+ this->Assign(*std::move(other));
+ } else {
+ this->AssignStatus(std::move(other).status());
+ }
+}
+template <typename T>
+template <typename... Args>
+StatusOr<T>::StatusOr(y_absl::in_place_t, Args&&... args)
+ : Base(y_absl::in_place, std::forward<Args>(args)...) {}
+
+template <typename T>
+template <typename U, typename... Args>
+StatusOr<T>::StatusOr(y_absl::in_place_t, std::initializer_list<U> ilist,
+ Args&&... args)
+ : Base(y_absl::in_place, ilist, std::forward<Args>(args)...) {}
+
+template <typename T>
+const Status& StatusOr<T>::status() const& {
+ return this->status_;
+}
+template <typename T>
+Status StatusOr<T>::status() && {
+ return ok() ? OkStatus() : std::move(this->status_);
+}
+
+template <typename T>
+const T& StatusOr<T>::value() const& {
+ if (!this->ok()) internal_statusor::ThrowBadStatusOrAccess(this->status_);
+ return this->data_;
+}
+
+template <typename T>
+T& StatusOr<T>::value() & {
+ if (!this->ok()) internal_statusor::ThrowBadStatusOrAccess(this->status_);
+ return this->data_;
+}
+
+template <typename T>
+const T&& StatusOr<T>::value() const&& {
+ if (!this->ok()) {
+ internal_statusor::ThrowBadStatusOrAccess(std::move(this->status_));
+ }
+ return std::move(this->data_);
+}
+
+template <typename T>
+T&& StatusOr<T>::value() && {
+ if (!this->ok()) {
+ internal_statusor::ThrowBadStatusOrAccess(std::move(this->status_));
+ }
+ return std::move(this->data_);
+}
+
+template <typename T>
+const T& StatusOr<T>::operator*() const& {
+ this->EnsureOk();
+ return this->data_;
+}
+
+template <typename T>
+T& StatusOr<T>::operator*() & {
+ this->EnsureOk();
+ return this->data_;
+}
+
+template <typename T>
+const T&& StatusOr<T>::operator*() const&& {
+ this->EnsureOk();
+ return std::move(this->data_);
+}
+
+template <typename T>
+T&& StatusOr<T>::operator*() && {
+ this->EnsureOk();
+ return std::move(this->data_);
+}
+
+template <typename T>
+const T* StatusOr<T>::operator->() const {
+ this->EnsureOk();
+ return &this->data_;
+}
+
+template <typename T>
+T* StatusOr<T>::operator->() {
+ this->EnsureOk();
+ return &this->data_;
+}
+
+template <typename T>
+template <typename U>
+T StatusOr<T>::value_or(U&& default_value) const& {
+ if (ok()) {
+ return this->data_;
+ }
+ return std::forward<U>(default_value);
+}
+
+template <typename T>
+template <typename U>
+T StatusOr<T>::value_or(U&& default_value) && {
+ if (ok()) {
+ return std::move(this->data_);
+ }
+ return std::forward<U>(default_value);
+}
+
+template <typename T>
+void StatusOr<T>::IgnoreError() const {
+ // no-op
+}
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_STATUS_STATUSOR_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/status/statusor/ya.make b/contrib/restricted/abseil-cpp-tstring/y_absl/status/statusor/ya.make
new file mode 100644
index 00000000000..a29c7ae5b3b
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/status/statusor/ya.make
@@ -0,0 +1,57 @@
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
+WITHOUT_LICENSE_TEXTS()
+
+OWNER(
+ somov
+ g:cpp-contrib
+)
+
+LICENSE(Apache-2.0)
+
+PEERDIR(
+ contrib/restricted/abseil-cpp-tstring/y_absl/base
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/low_level_alloc
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_wait
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/throw_delegate
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/log_severity
+ contrib/restricted/abseil-cpp-tstring/y_absl/debugging
+ contrib/restricted/abseil-cpp-tstring/y_absl/debugging/stacktrace
+ contrib/restricted/abseil-cpp-tstring/y_absl/debugging/symbolize
+ contrib/restricted/abseil-cpp-tstring/y_absl/demangle
+ contrib/restricted/abseil-cpp-tstring/y_absl/numeric
+ contrib/restricted/abseil-cpp-tstring/y_absl/profiling/internal/exponential_biased
+ contrib/restricted/abseil-cpp-tstring/y_absl/status
+ contrib/restricted/abseil-cpp-tstring/y_absl/strings
+ contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord
+ contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/absl_cord_internal
+ contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/absl_strings_internal
+ contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_functions
+ contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_handle
+ contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_info
+ contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format
+ contrib/restricted/abseil-cpp-tstring/y_absl/synchronization
+ contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal
+ contrib/restricted/abseil-cpp-tstring/y_absl/time
+ contrib/restricted/abseil-cpp-tstring/y_absl/time/civil_time
+ contrib/restricted/abseil-cpp-tstring/y_absl/time/time_zone
+ contrib/restricted/abseil-cpp-tstring/y_absl/types/bad_optional_access
+ contrib/restricted/abseil-cpp-tstring/y_absl/types/bad_variant_access
+)
+
+ADDINCL(
+ GLOBAL contrib/restricted/abseil-cpp-tstring
+)
+
+NO_COMPILER_WARNINGS()
+
+SRCDIR(contrib/restricted/abseil-cpp-tstring/y_absl/status)
+
+SRCS(
+ statusor.cc
+)
+
+END()
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/status/ya.make b/contrib/restricted/abseil-cpp-tstring/y_absl/status/ya.make
new file mode 100644
index 00000000000..9414dfc26b7
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/status/ya.make
@@ -0,0 +1,54 @@
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
+OWNER(
+ somov
+ g:cpp-contrib
+)
+
+LICENSE(Apache-2.0)
+
+LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
+
+PEERDIR(
+ contrib/restricted/abseil-cpp-tstring/y_absl/base
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/low_level_alloc
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_wait
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/throw_delegate
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/log_severity
+ contrib/restricted/abseil-cpp-tstring/y_absl/debugging
+ contrib/restricted/abseil-cpp-tstring/y_absl/debugging/stacktrace
+ contrib/restricted/abseil-cpp-tstring/y_absl/debugging/symbolize
+ contrib/restricted/abseil-cpp-tstring/y_absl/demangle
+ contrib/restricted/abseil-cpp-tstring/y_absl/numeric
+ contrib/restricted/abseil-cpp-tstring/y_absl/profiling/internal/exponential_biased
+ contrib/restricted/abseil-cpp-tstring/y_absl/strings
+ contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord
+ contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/absl_cord_internal
+ contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/absl_strings_internal
+ contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_functions
+ contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_handle
+ contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_info
+ contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format
+ contrib/restricted/abseil-cpp-tstring/y_absl/synchronization
+ contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal
+ contrib/restricted/abseil-cpp-tstring/y_absl/time
+ contrib/restricted/abseil-cpp-tstring/y_absl/time/civil_time
+ contrib/restricted/abseil-cpp-tstring/y_absl/time/time_zone
+ contrib/restricted/abseil-cpp-tstring/y_absl/types/bad_optional_access
+)
+
+ADDINCL(
+ GLOBAL contrib/restricted/abseil-cpp-tstring
+)
+
+NO_COMPILER_WARNINGS()
+
+SRCS(
+ status.cc
+ status_payload_printer.cc
+)
+
+END()
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/.yandex_meta/licenses.list.txt b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/.yandex_meta/licenses.list.txt
new file mode 100644
index 00000000000..9d8552c68c7
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/.yandex_meta/licenses.list.txt
@@ -0,0 +1,46 @@
+====================Apache-2.0====================
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+
+====================Apache-2.0====================
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+
+====================COPYRIGHT====================
+// Copyright 2017 The Abseil Authors.
+
+
+====================COPYRIGHT====================
+// Copyright 2018 The Abseil Authors.
+
+
+====================COPYRIGHT====================
+// Copyright 2019 The Abseil Authors.
+
+
+====================COPYRIGHT====================
+// Copyright 2020 The Abseil Authors.
+
+
+====================COPYRIGHT====================
+// Copyright 2021 The Abseil Authors.
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/ascii.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/ascii.cc
new file mode 100644
index 00000000000..959d6c27ff7
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/ascii.cc
@@ -0,0 +1,200 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "y_absl/strings/ascii.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace ascii_internal {
+
+// # Table generated by this Python code (bit 0x02 is currently unused):
+// TODO(mbar) Move Python code for generation of table to BUILD and link here.
+
+// NOTE: The kAsciiPropertyBits table used within this code was generated by
+// Python code of the following form. (Bit 0x02 is currently unused and
+// available.)
+//
+// def Hex2(n):
+// return '0x' + hex(n/16)[2:] + hex(n%16)[2:]
+// def IsPunct(ch):
+// return (ord(ch) >= 32 and ord(ch) < 127 and
+// not ch.isspace() and not ch.isalnum())
+// def IsBlank(ch):
+// return ch in ' \t'
+// def IsCntrl(ch):
+// return ord(ch) < 32 or ord(ch) == 127
+// def IsXDigit(ch):
+// return ch.isdigit() or ch.lower() in 'abcdef'
+// for i in range(128):
+// ch = chr(i)
+// mask = ((ch.isalpha() and 0x01 or 0) |
+// (ch.isalnum() and 0x04 or 0) |
+// (ch.isspace() and 0x08 or 0) |
+// (IsPunct(ch) and 0x10 or 0) |
+// (IsBlank(ch) and 0x20 or 0) |
+// (IsCntrl(ch) and 0x40 or 0) |
+// (IsXDigit(ch) and 0x80 or 0))
+// print Hex2(mask) + ',',
+// if i % 16 == 7:
+// print ' //', Hex2(i & 0x78)
+// elif i % 16 == 15:
+// print
+
+// clang-format off
+// Array of bitfields holding character information. Each bit value corresponds
+// to a particular character feature. For readability, and because the value
+// of these bits is tightly coupled to this implementation, the individual bits
+// are not named. Note that bitfields for all characters above ASCII 127 are
+// zero-initialized.
+ABSL_DLL const unsigned char kPropertyBits[256] = {
+ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, // 0x00
+ 0x40, 0x68, 0x48, 0x48, 0x48, 0x48, 0x40, 0x40,
+ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, // 0x10
+ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
+ 0x28, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, // 0x20
+ 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
+ 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, // 0x30
+ 0x84, 0x84, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x85, 0x85, 0x85, 0x85, 0x85, 0x85, 0x05, // 0x40
+ 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05,
+ 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, // 0x50
+ 0x05, 0x05, 0x05, 0x10, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x85, 0x85, 0x85, 0x85, 0x85, 0x85, 0x05, // 0x60
+ 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05,
+ 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, // 0x70
+ 0x05, 0x05, 0x05, 0x10, 0x10, 0x10, 0x10, 0x40,
+};
+
+// Array of characters for the ascii_tolower() function. For values 'A'
+// through 'Z', return the lower-case character; otherwise, return the
+// identity of the passed character.
+ABSL_DLL const char kToLower[256] = {
+ '\x00', '\x01', '\x02', '\x03', '\x04', '\x05', '\x06', '\x07',
+ '\x08', '\x09', '\x0a', '\x0b', '\x0c', '\x0d', '\x0e', '\x0f',
+ '\x10', '\x11', '\x12', '\x13', '\x14', '\x15', '\x16', '\x17',
+ '\x18', '\x19', '\x1a', '\x1b', '\x1c', '\x1d', '\x1e', '\x1f',
+ '\x20', '\x21', '\x22', '\x23', '\x24', '\x25', '\x26', '\x27',
+ '\x28', '\x29', '\x2a', '\x2b', '\x2c', '\x2d', '\x2e', '\x2f',
+ '\x30', '\x31', '\x32', '\x33', '\x34', '\x35', '\x36', '\x37',
+ '\x38', '\x39', '\x3a', '\x3b', '\x3c', '\x3d', '\x3e', '\x3f',
+ '\x40', 'a', 'b', 'c', 'd', 'e', 'f', 'g',
+ 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o',
+ 'p', 'q', 'r', 's', 't', 'u', 'v', 'w',
+ 'x', 'y', 'z', '\x5b', '\x5c', '\x5d', '\x5e', '\x5f',
+ '\x60', '\x61', '\x62', '\x63', '\x64', '\x65', '\x66', '\x67',
+ '\x68', '\x69', '\x6a', '\x6b', '\x6c', '\x6d', '\x6e', '\x6f',
+ '\x70', '\x71', '\x72', '\x73', '\x74', '\x75', '\x76', '\x77',
+ '\x78', '\x79', '\x7a', '\x7b', '\x7c', '\x7d', '\x7e', '\x7f',
+ '\x80', '\x81', '\x82', '\x83', '\x84', '\x85', '\x86', '\x87',
+ '\x88', '\x89', '\x8a', '\x8b', '\x8c', '\x8d', '\x8e', '\x8f',
+ '\x90', '\x91', '\x92', '\x93', '\x94', '\x95', '\x96', '\x97',
+ '\x98', '\x99', '\x9a', '\x9b', '\x9c', '\x9d', '\x9e', '\x9f',
+ '\xa0', '\xa1', '\xa2', '\xa3', '\xa4', '\xa5', '\xa6', '\xa7',
+ '\xa8', '\xa9', '\xaa', '\xab', '\xac', '\xad', '\xae', '\xaf',
+ '\xb0', '\xb1', '\xb2', '\xb3', '\xb4', '\xb5', '\xb6', '\xb7',
+ '\xb8', '\xb9', '\xba', '\xbb', '\xbc', '\xbd', '\xbe', '\xbf',
+ '\xc0', '\xc1', '\xc2', '\xc3', '\xc4', '\xc5', '\xc6', '\xc7',
+ '\xc8', '\xc9', '\xca', '\xcb', '\xcc', '\xcd', '\xce', '\xcf',
+ '\xd0', '\xd1', '\xd2', '\xd3', '\xd4', '\xd5', '\xd6', '\xd7',
+ '\xd8', '\xd9', '\xda', '\xdb', '\xdc', '\xdd', '\xde', '\xdf',
+ '\xe0', '\xe1', '\xe2', '\xe3', '\xe4', '\xe5', '\xe6', '\xe7',
+ '\xe8', '\xe9', '\xea', '\xeb', '\xec', '\xed', '\xee', '\xef',
+ '\xf0', '\xf1', '\xf2', '\xf3', '\xf4', '\xf5', '\xf6', '\xf7',
+ '\xf8', '\xf9', '\xfa', '\xfb', '\xfc', '\xfd', '\xfe', '\xff',
+};
+
+// Array of characters for the ascii_toupper() function. For values 'a'
+// through 'z', return the upper-case character; otherwise, return the
+// identity of the passed character.
+ABSL_DLL const char kToUpper[256] = {
+ '\x00', '\x01', '\x02', '\x03', '\x04', '\x05', '\x06', '\x07',
+ '\x08', '\x09', '\x0a', '\x0b', '\x0c', '\x0d', '\x0e', '\x0f',
+ '\x10', '\x11', '\x12', '\x13', '\x14', '\x15', '\x16', '\x17',
+ '\x18', '\x19', '\x1a', '\x1b', '\x1c', '\x1d', '\x1e', '\x1f',
+ '\x20', '\x21', '\x22', '\x23', '\x24', '\x25', '\x26', '\x27',
+ '\x28', '\x29', '\x2a', '\x2b', '\x2c', '\x2d', '\x2e', '\x2f',
+ '\x30', '\x31', '\x32', '\x33', '\x34', '\x35', '\x36', '\x37',
+ '\x38', '\x39', '\x3a', '\x3b', '\x3c', '\x3d', '\x3e', '\x3f',
+ '\x40', '\x41', '\x42', '\x43', '\x44', '\x45', '\x46', '\x47',
+ '\x48', '\x49', '\x4a', '\x4b', '\x4c', '\x4d', '\x4e', '\x4f',
+ '\x50', '\x51', '\x52', '\x53', '\x54', '\x55', '\x56', '\x57',
+ '\x58', '\x59', '\x5a', '\x5b', '\x5c', '\x5d', '\x5e', '\x5f',
+ '\x60', 'A', 'B', 'C', 'D', 'E', 'F', 'G',
+ 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O',
+ 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W',
+ 'X', 'Y', 'Z', '\x7b', '\x7c', '\x7d', '\x7e', '\x7f',
+ '\x80', '\x81', '\x82', '\x83', '\x84', '\x85', '\x86', '\x87',
+ '\x88', '\x89', '\x8a', '\x8b', '\x8c', '\x8d', '\x8e', '\x8f',
+ '\x90', '\x91', '\x92', '\x93', '\x94', '\x95', '\x96', '\x97',
+ '\x98', '\x99', '\x9a', '\x9b', '\x9c', '\x9d', '\x9e', '\x9f',
+ '\xa0', '\xa1', '\xa2', '\xa3', '\xa4', '\xa5', '\xa6', '\xa7',
+ '\xa8', '\xa9', '\xaa', '\xab', '\xac', '\xad', '\xae', '\xaf',
+ '\xb0', '\xb1', '\xb2', '\xb3', '\xb4', '\xb5', '\xb6', '\xb7',
+ '\xb8', '\xb9', '\xba', '\xbb', '\xbc', '\xbd', '\xbe', '\xbf',
+ '\xc0', '\xc1', '\xc2', '\xc3', '\xc4', '\xc5', '\xc6', '\xc7',
+ '\xc8', '\xc9', '\xca', '\xcb', '\xcc', '\xcd', '\xce', '\xcf',
+ '\xd0', '\xd1', '\xd2', '\xd3', '\xd4', '\xd5', '\xd6', '\xd7',
+ '\xd8', '\xd9', '\xda', '\xdb', '\xdc', '\xdd', '\xde', '\xdf',
+ '\xe0', '\xe1', '\xe2', '\xe3', '\xe4', '\xe5', '\xe6', '\xe7',
+ '\xe8', '\xe9', '\xea', '\xeb', '\xec', '\xed', '\xee', '\xef',
+ '\xf0', '\xf1', '\xf2', '\xf3', '\xf4', '\xf5', '\xf6', '\xf7',
+ '\xf8', '\xf9', '\xfa', '\xfb', '\xfc', '\xfd', '\xfe', '\xff',
+};
+// clang-format on
+
+} // namespace ascii_internal
+
+void AsciiStrToLower(TString* s) {
+ for (auto& ch : *s) {
+ ch = y_absl::ascii_tolower(ch);
+ }
+}
+
+void AsciiStrToUpper(TString* s) {
+ for (auto& ch : *s) {
+ ch = y_absl::ascii_toupper(ch);
+ }
+}
+
+void RemoveExtraAsciiWhitespace(TString* str) {
+ auto stripped = StripAsciiWhitespace(*str);
+
+ if (stripped.empty()) {
+ str->clear();
+ return;
+ }
+
+ auto input_it = stripped.begin();
+ auto input_end = stripped.end();
+ auto output_it = &(*str)[0];
+ bool is_ws = false;
+
+ for (; input_it < input_end; ++input_it) {
+ if (is_ws) {
+ // Consecutive whitespace? Keep only the last.
+ is_ws = y_absl::ascii_isspace(*input_it);
+ if (is_ws) --output_it;
+ } else {
+ is_ws = y_absl::ascii_isspace(*input_it);
+ }
+
+ *output_it = *input_it;
+ ++output_it;
+ }
+
+ str->erase(output_it - &(*str)[0]);
+}
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/ascii.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/ascii.h
new file mode 100644
index 00000000000..bc04710d8c7
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/ascii.h
@@ -0,0 +1,242 @@
+//
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: ascii.h
+// -----------------------------------------------------------------------------
+//
+// This package contains functions operating on characters and strings
+// restricted to standard ASCII. These include character classification
+// functions analogous to those found in the ANSI C Standard Library <ctype.h>
+// header file.
+//
+// C++ implementations provide <ctype.h> functionality based on their
+// C environment locale. In general, reliance on such a locale is not ideal, as
+// the locale standard is problematic (and may not return invariant information
+// for the same character set, for example). These `ascii_*()` functions are
+// hard-wired for standard ASCII, much faster, and guaranteed to behave
+// consistently. They will never be overloaded, nor will their function
+// signature change.
+//
+// `ascii_isalnum()`, `ascii_isalpha()`, `ascii_isascii()`, `ascii_isblank()`,
+// `ascii_iscntrl()`, `ascii_isdigit()`, `ascii_isgraph()`, `ascii_islower()`,
+// `ascii_isprint()`, `ascii_ispunct()`, `ascii_isspace()`, `ascii_isupper()`,
+// `ascii_isxdigit()`
+// Analogous to the <ctype.h> functions with similar names, these
+// functions take an unsigned char and return a bool, based on whether the
+// character matches the condition specified.
+//
+// If the input character has a numerical value greater than 127, these
+// functions return `false`.
+//
+// `ascii_tolower()`, `ascii_toupper()`
+// Analogous to the <ctype.h> functions with similar names, these functions
+// take an unsigned char and return a char.
+//
+// If the input character is not an ASCII {lower,upper}-case letter (including
+// numerical values greater than 127) then the functions return the same value
+// as the input character.
+
+#ifndef ABSL_STRINGS_ASCII_H_
+#define ABSL_STRINGS_ASCII_H_
+
+#include <algorithm>
+#include <util/generic/string.h>
+
+#include "y_absl/base/attributes.h"
+#include "y_absl/base/config.h"
+#include "y_absl/strings/string_view.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace ascii_internal {
+
+// Declaration for an array of bitfields holding character information.
+ABSL_DLL extern const unsigned char kPropertyBits[256];
+
+// Declaration for the array of characters to upper-case characters.
+ABSL_DLL extern const char kToUpper[256];
+
+// Declaration for the array of characters to lower-case characters.
+ABSL_DLL extern const char kToLower[256];
+
+} // namespace ascii_internal
+
+// ascii_isalpha()
+//
+// Determines whether the given character is an alphabetic character.
+inline bool ascii_isalpha(unsigned char c) {
+ return (ascii_internal::kPropertyBits[c] & 0x01) != 0;
+}
+
+// ascii_isalnum()
+//
+// Determines whether the given character is an alphanumeric character.
+inline bool ascii_isalnum(unsigned char c) {
+ return (ascii_internal::kPropertyBits[c] & 0x04) != 0;
+}
+
+// ascii_isspace()
+//
+// Determines whether the given character is a whitespace character (space,
+// tab, vertical tab, formfeed, linefeed, or carriage return).
+inline bool ascii_isspace(unsigned char c) {
+ return (ascii_internal::kPropertyBits[c] & 0x08) != 0;
+}
+
+// ascii_ispunct()
+//
+// Determines whether the given character is a punctuation character.
+inline bool ascii_ispunct(unsigned char c) {
+ return (ascii_internal::kPropertyBits[c] & 0x10) != 0;
+}
+
+// ascii_isblank()
+//
+// Determines whether the given character is a blank character (tab or space).
+inline bool ascii_isblank(unsigned char c) {
+ return (ascii_internal::kPropertyBits[c] & 0x20) != 0;
+}
+
+// ascii_iscntrl()
+//
+// Determines whether the given character is a control character.
+inline bool ascii_iscntrl(unsigned char c) {
+ return (ascii_internal::kPropertyBits[c] & 0x40) != 0;
+}
+
+// ascii_isxdigit()
+//
+// Determines whether the given character can be represented as a hexadecimal
+// digit character (i.e. {0-9} or {A-F}).
+inline bool ascii_isxdigit(unsigned char c) {
+ return (ascii_internal::kPropertyBits[c] & 0x80) != 0;
+}
+
+// ascii_isdigit()
+//
+// Determines whether the given character can be represented as a decimal
+// digit character (i.e. {0-9}).
+inline bool ascii_isdigit(unsigned char c) { return c >= '0' && c <= '9'; }
+
+// ascii_isprint()
+//
+// Determines whether the given character is printable, including whitespace.
+inline bool ascii_isprint(unsigned char c) { return c >= 32 && c < 127; }
+
+// ascii_isgraph()
+//
+// Determines whether the given character has a graphical representation.
+inline bool ascii_isgraph(unsigned char c) { return c > 32 && c < 127; }
+
+// ascii_isupper()
+//
+// Determines whether the given character is uppercase.
+inline bool ascii_isupper(unsigned char c) { return c >= 'A' && c <= 'Z'; }
+
+// ascii_islower()
+//
+// Determines whether the given character is lowercase.
+inline bool ascii_islower(unsigned char c) { return c >= 'a' && c <= 'z'; }
+
+// ascii_isascii()
+//
+// Determines whether the given character is ASCII.
+inline bool ascii_isascii(unsigned char c) { return c < 128; }
+
+// ascii_tolower()
+//
+// Returns an ASCII character, converting to lowercase if uppercase is
+// passed. Note that character values > 127 are simply returned.
+inline char ascii_tolower(unsigned char c) {
+ return ascii_internal::kToLower[c];
+}
+
+// Converts the characters in `s` to lowercase, changing the contents of `s`.
+void AsciiStrToLower(TString* s);
+
+// Creates a lowercase string from a given y_absl::string_view.
+ABSL_MUST_USE_RESULT inline TString AsciiStrToLower(y_absl::string_view s) {
+ TString result(s);
+ y_absl::AsciiStrToLower(&result);
+ return result;
+}
+
+// ascii_toupper()
+//
+// Returns the ASCII character, converting to upper-case if lower-case is
+// passed. Note that characters values > 127 are simply returned.
+inline char ascii_toupper(unsigned char c) {
+ return ascii_internal::kToUpper[c];
+}
+
+// Converts the characters in `s` to uppercase, changing the contents of `s`.
+void AsciiStrToUpper(TString* s);
+
+// Creates an uppercase string from a given y_absl::string_view.
+ABSL_MUST_USE_RESULT inline TString AsciiStrToUpper(y_absl::string_view s) {
+ TString result(s);
+ y_absl::AsciiStrToUpper(&result);
+ return result;
+}
+
+// Returns y_absl::string_view with whitespace stripped from the beginning of the
+// given string_view.
+ABSL_MUST_USE_RESULT inline y_absl::string_view StripLeadingAsciiWhitespace(
+ y_absl::string_view str) {
+ auto it = std::find_if_not(str.begin(), str.end(), y_absl::ascii_isspace);
+ return str.substr(it - str.begin());
+}
+
+// Strips in place whitespace from the beginning of the given string.
+inline void StripLeadingAsciiWhitespace(TString* str) {
+ auto it = std::find_if_not(str->cbegin(), str->cend(), y_absl::ascii_isspace);
+ str->erase(str->begin(), it);
+}
+
+// Returns y_absl::string_view with whitespace stripped from the end of the given
+// string_view.
+ABSL_MUST_USE_RESULT inline y_absl::string_view StripTrailingAsciiWhitespace(
+ y_absl::string_view str) {
+ auto it = std::find_if_not(str.rbegin(), str.rend(), y_absl::ascii_isspace);
+ return str.substr(0, str.rend() - it);
+}
+
+// Strips in place whitespace from the end of the given string
+inline void StripTrailingAsciiWhitespace(TString* str) {
+ auto it = std::find_if_not(str->rbegin(), str->rend(), y_absl::ascii_isspace);
+ str->erase(str->rend() - it);
+}
+
+// Returns y_absl::string_view with whitespace stripped from both ends of the
+// given string_view.
+ABSL_MUST_USE_RESULT inline y_absl::string_view StripAsciiWhitespace(
+ y_absl::string_view str) {
+ return StripTrailingAsciiWhitespace(StripLeadingAsciiWhitespace(str));
+}
+
+// Strips in place whitespace from both ends of the given string
+inline void StripAsciiWhitespace(TString* str) {
+ StripTrailingAsciiWhitespace(str);
+ StripLeadingAsciiWhitespace(str);
+}
+
+// Removes leading, trailing, and consecutive internal whitespace.
+void RemoveExtraAsciiWhitespace(TString*);
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_STRINGS_ASCII_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/charconv.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/charconv.cc
new file mode 100644
index 00000000000..9515ca24dd4
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/charconv.cc
@@ -0,0 +1,984 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "y_absl/strings/charconv.h"
+
+#include <algorithm>
+#include <cassert>
+#include <cmath>
+#include <cstring>
+
+#include "y_absl/base/casts.h"
+#include "y_absl/numeric/bits.h"
+#include "y_absl/numeric/int128.h"
+#include "y_absl/strings/internal/charconv_bigint.h"
+#include "y_absl/strings/internal/charconv_parse.h"
+
+// The macro ABSL_BIT_PACK_FLOATS is defined on x86-64, where IEEE floating
+// point numbers have the same endianness in memory as a bitfield struct
+// containing the corresponding parts.
+//
+// When set, we replace calls to ldexp() with manual bit packing, which is
+// faster and is unaffected by floating point environment.
+#ifdef ABSL_BIT_PACK_FLOATS
+#error ABSL_BIT_PACK_FLOATS cannot be directly set
+#elif defined(__x86_64__) || defined(_M_X64)
+#define ABSL_BIT_PACK_FLOATS 1
+#endif
+
+// A note about subnormals:
+//
+// The code below talks about "normals" and "subnormals". A normal IEEE float
+// has a fixed-width mantissa and power of two exponent. For example, a normal
+// `double` has a 53-bit mantissa. Because the high bit is always 1, it is not
+// stored in the representation. The implicit bit buys an extra bit of
+// resolution in the datatype.
+//
+// The downside of this scheme is that there is a large gap between DBL_MIN and
+// zero. (Large, at least, relative to the different between DBL_MIN and the
+// next representable number). This gap is softened by the "subnormal" numbers,
+// which have the same power-of-two exponent as DBL_MIN, but no implicit 53rd
+// bit. An all-bits-zero exponent in the encoding represents subnormals. (Zero
+// is represented as a subnormal with an all-bits-zero mantissa.)
+//
+// The code below, in calculations, represents the mantissa as a uint64_t. The
+// end result normally has the 53rd bit set. It represents subnormals by using
+// narrower mantissas.
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace {
+
+template <typename FloatType>
+struct FloatTraits;
+
+template <>
+struct FloatTraits<double> {
+ // The number of mantissa bits in the given float type. This includes the
+ // implied high bit.
+ static constexpr int kTargetMantissaBits = 53;
+
+ // The largest supported IEEE exponent, in our integral mantissa
+ // representation.
+ //
+ // If `m` is the largest possible int kTargetMantissaBits bits wide, then
+ // m * 2**kMaxExponent is exactly equal to DBL_MAX.
+ static constexpr int kMaxExponent = 971;
+
+ // The smallest supported IEEE normal exponent, in our integral mantissa
+ // representation.
+ //
+ // If `m` is the smallest possible int kTargetMantissaBits bits wide, then
+ // m * 2**kMinNormalExponent is exactly equal to DBL_MIN.
+ static constexpr int kMinNormalExponent = -1074;
+
+ static double MakeNan(const char* tagp) {
+ // Support nan no matter which namespace it's in. Some platforms
+ // incorrectly don't put it in namespace std.
+ using namespace std; // NOLINT
+ return nan(tagp);
+ }
+
+ // Builds a nonzero floating point number out of the provided parts.
+ //
+ // This is intended to do the same operation as ldexp(mantissa, exponent),
+ // but using purely integer math, to avoid -ffastmath and floating
+ // point environment issues. Using type punning is also faster. We fall back
+ // to ldexp on a per-platform basis for portability.
+ //
+ // `exponent` must be between kMinNormalExponent and kMaxExponent.
+ //
+ // `mantissa` must either be exactly kTargetMantissaBits wide, in which case
+ // a normal value is made, or it must be less narrow than that, in which case
+ // `exponent` must be exactly kMinNormalExponent, and a subnormal value is
+ // made.
+ static double Make(uint64_t mantissa, int exponent, bool sign) {
+#ifndef ABSL_BIT_PACK_FLOATS
+ // Support ldexp no matter which namespace it's in. Some platforms
+ // incorrectly don't put it in namespace std.
+ using namespace std; // NOLINT
+ return sign ? -ldexp(mantissa, exponent) : ldexp(mantissa, exponent);
+#else
+ constexpr uint64_t kMantissaMask =
+ (uint64_t{1} << (kTargetMantissaBits - 1)) - 1;
+ uint64_t dbl = static_cast<uint64_t>(sign) << 63;
+ if (mantissa > kMantissaMask) {
+ // Normal value.
+ // Adjust by 1023 for the exponent representation bias, and an additional
+ // 52 due to the implied decimal point in the IEEE mantissa represenation.
+ dbl += uint64_t{exponent + 1023u + kTargetMantissaBits - 1} << 52;
+ mantissa &= kMantissaMask;
+ } else {
+ // subnormal value
+ assert(exponent == kMinNormalExponent);
+ }
+ dbl += mantissa;
+ return y_absl::bit_cast<double>(dbl);
+#endif // ABSL_BIT_PACK_FLOATS
+ }
+};
+
+// Specialization of floating point traits for the `float` type. See the
+// FloatTraits<double> specialization above for meaning of each of the following
+// members and methods.
+template <>
+struct FloatTraits<float> {
+ static constexpr int kTargetMantissaBits = 24;
+ static constexpr int kMaxExponent = 104;
+ static constexpr int kMinNormalExponent = -149;
+ static float MakeNan(const char* tagp) {
+ // Support nanf no matter which namespace it's in. Some platforms
+ // incorrectly don't put it in namespace std.
+ using namespace std; // NOLINT
+ return nanf(tagp);
+ }
+ static float Make(uint32_t mantissa, int exponent, bool sign) {
+#ifndef ABSL_BIT_PACK_FLOATS
+ // Support ldexpf no matter which namespace it's in. Some platforms
+ // incorrectly don't put it in namespace std.
+ using namespace std; // NOLINT
+ return sign ? -ldexpf(mantissa, exponent) : ldexpf(mantissa, exponent);
+#else
+ constexpr uint32_t kMantissaMask =
+ (uint32_t{1} << (kTargetMantissaBits - 1)) - 1;
+ uint32_t flt = static_cast<uint32_t>(sign) << 31;
+ if (mantissa > kMantissaMask) {
+ // Normal value.
+ // Adjust by 127 for the exponent representation bias, and an additional
+ // 23 due to the implied decimal point in the IEEE mantissa represenation.
+ flt += uint32_t{exponent + 127u + kTargetMantissaBits - 1} << 23;
+ mantissa &= kMantissaMask;
+ } else {
+ // subnormal value
+ assert(exponent == kMinNormalExponent);
+ }
+ flt += mantissa;
+ return y_absl::bit_cast<float>(flt);
+#endif // ABSL_BIT_PACK_FLOATS
+ }
+};
+
+// Decimal-to-binary conversions require coercing powers of 10 into a mantissa
+// and a power of 2. The two helper functions Power10Mantissa(n) and
+// Power10Exponent(n) perform this task. Together, these represent a hand-
+// rolled floating point value which is equal to or just less than 10**n.
+//
+// The return values satisfy two range guarantees:
+//
+// Power10Mantissa(n) * 2**Power10Exponent(n) <= 10**n
+// < (Power10Mantissa(n) + 1) * 2**Power10Exponent(n)
+//
+// 2**63 <= Power10Mantissa(n) < 2**64.
+//
+// Lookups into the power-of-10 table must first check the Power10Overflow() and
+// Power10Underflow() functions, to avoid out-of-bounds table access.
+//
+// Indexes into these tables are biased by -kPower10TableMin, and the table has
+// values in the range [kPower10TableMin, kPower10TableMax].
+extern const uint64_t kPower10MantissaTable[];
+extern const int16_t kPower10ExponentTable[];
+
+// The smallest allowed value for use with the Power10Mantissa() and
+// Power10Exponent() functions below. (If a smaller exponent is needed in
+// calculations, the end result is guaranteed to underflow.)
+constexpr int kPower10TableMin = -342;
+
+// The largest allowed value for use with the Power10Mantissa() and
+// Power10Exponent() functions below. (If a smaller exponent is needed in
+// calculations, the end result is guaranteed to overflow.)
+constexpr int kPower10TableMax = 308;
+
+uint64_t Power10Mantissa(int n) {
+ return kPower10MantissaTable[n - kPower10TableMin];
+}
+
+int Power10Exponent(int n) {
+ return kPower10ExponentTable[n - kPower10TableMin];
+}
+
+// Returns true if n is large enough that 10**n always results in an IEEE
+// overflow.
+bool Power10Overflow(int n) { return n > kPower10TableMax; }
+
+// Returns true if n is small enough that 10**n times a ParsedFloat mantissa
+// always results in an IEEE underflow.
+bool Power10Underflow(int n) { return n < kPower10TableMin; }
+
+// Returns true if Power10Mantissa(n) * 2**Power10Exponent(n) is exactly equal
+// to 10**n numerically. Put another way, this returns true if there is no
+// truncation error in Power10Mantissa(n).
+bool Power10Exact(int n) { return n >= 0 && n <= 27; }
+
+// Sentinel exponent values for representing numbers too large or too close to
+// zero to represent in a double.
+constexpr int kOverflow = 99999;
+constexpr int kUnderflow = -99999;
+
+// Struct representing the calculated conversion result of a positive (nonzero)
+// floating point number.
+//
+// The calculated number is mantissa * 2**exponent (mantissa is treated as an
+// integer.) `mantissa` is chosen to be the correct width for the IEEE float
+// representation being calculated. (`mantissa` will always have the same bit
+// width for normal values, and narrower bit widths for subnormals.)
+//
+// If the result of conversion was an underflow or overflow, exponent is set
+// to kUnderflow or kOverflow.
+struct CalculatedFloat {
+ uint64_t mantissa = 0;
+ int exponent = 0;
+};
+
+// Returns the bit width of the given uint128. (Equivalently, returns 128
+// minus the number of leading zero bits.)
+unsigned BitWidth(uint128 value) {
+ if (Uint128High64(value) == 0) {
+ return static_cast<unsigned>(bit_width(Uint128Low64(value)));
+ }
+ return 128 - countl_zero(Uint128High64(value));
+}
+
+// Calculates how far to the right a mantissa needs to be shifted to create a
+// properly adjusted mantissa for an IEEE floating point number.
+//
+// `mantissa_width` is the bit width of the mantissa to be shifted, and
+// `binary_exponent` is the exponent of the number before the shift.
+//
+// This accounts for subnormal values, and will return a larger-than-normal
+// shift if binary_exponent would otherwise be too low.
+template <typename FloatType>
+int NormalizedShiftSize(int mantissa_width, int binary_exponent) {
+ const int normal_shift =
+ mantissa_width - FloatTraits<FloatType>::kTargetMantissaBits;
+ const int minimum_shift =
+ FloatTraits<FloatType>::kMinNormalExponent - binary_exponent;
+ return std::max(normal_shift, minimum_shift);
+}
+
+// Right shifts a uint128 so that it has the requested bit width. (The
+// resulting value will have 128 - bit_width leading zeroes.) The initial
+// `value` must be wider than the requested bit width.
+//
+// Returns the number of bits shifted.
+int TruncateToBitWidth(int bit_width, uint128* value) {
+ const int current_bit_width = BitWidth(*value);
+ const int shift = current_bit_width - bit_width;
+ *value >>= shift;
+ return shift;
+}
+
+// Checks if the given ParsedFloat represents one of the edge cases that are
+// not dependent on number base: zero, infinity, or NaN. If so, sets *value
+// the appropriate double, and returns true.
+template <typename FloatType>
+bool HandleEdgeCase(const strings_internal::ParsedFloat& input, bool negative,
+ FloatType* value) {
+ if (input.type == strings_internal::FloatType::kNan) {
+ // A bug in both clang and gcc would cause the compiler to optimize away the
+ // buffer we are building below. Declaring the buffer volatile avoids the
+ // issue, and has no measurable performance impact in microbenchmarks.
+ //
+ // https://bugs.llvm.org/show_bug.cgi?id=37778
+ // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=86113
+ constexpr ptrdiff_t kNanBufferSize = 128;
+ volatile char n_char_sequence[kNanBufferSize];
+ if (input.subrange_begin == nullptr) {
+ n_char_sequence[0] = '\0';
+ } else {
+ ptrdiff_t nan_size = input.subrange_end - input.subrange_begin;
+ nan_size = std::min(nan_size, kNanBufferSize - 1);
+ std::copy_n(input.subrange_begin, nan_size, n_char_sequence);
+ n_char_sequence[nan_size] = '\0';
+ }
+ char* nan_argument = const_cast<char*>(n_char_sequence);
+ *value = negative ? -FloatTraits<FloatType>::MakeNan(nan_argument)
+ : FloatTraits<FloatType>::MakeNan(nan_argument);
+ return true;
+ }
+ if (input.type == strings_internal::FloatType::kInfinity) {
+ *value = negative ? -std::numeric_limits<FloatType>::infinity()
+ : std::numeric_limits<FloatType>::infinity();
+ return true;
+ }
+ if (input.mantissa == 0) {
+ *value = negative ? -0.0 : 0.0;
+ return true;
+ }
+ return false;
+}
+
+// Given a CalculatedFloat result of a from_chars conversion, generate the
+// correct output values.
+//
+// CalculatedFloat can represent an underflow or overflow, in which case the
+// error code in *result is set. Otherwise, the calculated floating point
+// number is stored in *value.
+template <typename FloatType>
+void EncodeResult(const CalculatedFloat& calculated, bool negative,
+ y_absl::from_chars_result* result, FloatType* value) {
+ if (calculated.exponent == kOverflow) {
+ result->ec = std::errc::result_out_of_range;
+ *value = negative ? -std::numeric_limits<FloatType>::max()
+ : std::numeric_limits<FloatType>::max();
+ return;
+ } else if (calculated.mantissa == 0 || calculated.exponent == kUnderflow) {
+ result->ec = std::errc::result_out_of_range;
+ *value = negative ? -0.0 : 0.0;
+ return;
+ }
+ *value = FloatTraits<FloatType>::Make(calculated.mantissa,
+ calculated.exponent, negative);
+}
+
+// Returns the given uint128 shifted to the right by `shift` bits, and rounds
+// the remaining bits using round_to_nearest logic. The value is returned as a
+// uint64_t, since this is the type used by this library for storing calculated
+// floating point mantissas.
+//
+// It is expected that the width of the input value shifted by `shift` will
+// be the correct bit-width for the target mantissa, which is strictly narrower
+// than a uint64_t.
+//
+// If `input_exact` is false, then a nonzero error epsilon is assumed. For
+// rounding purposes, the true value being rounded is strictly greater than the
+// input value. The error may represent a single lost carry bit.
+//
+// When input_exact, shifted bits of the form 1000000... represent a tie, which
+// is broken by rounding to even -- the rounding direction is chosen so the low
+// bit of the returned value is 0.
+//
+// When !input_exact, shifted bits of the form 10000000... represent a value
+// strictly greater than one half (due to the error epsilon), and so ties are
+// always broken by rounding up.
+//
+// When !input_exact, shifted bits of the form 01111111... are uncertain;
+// the true value may or may not be greater than 10000000..., due to the
+// possible lost carry bit. The correct rounding direction is unknown. In this
+// case, the result is rounded down, and `output_exact` is set to false.
+//
+// Zero and negative values of `shift` are accepted, in which case the word is
+// shifted left, as necessary.
+uint64_t ShiftRightAndRound(uint128 value, int shift, bool input_exact,
+ bool* output_exact) {
+ if (shift <= 0) {
+ *output_exact = input_exact;
+ return static_cast<uint64_t>(value << -shift);
+ }
+ if (shift >= 128) {
+ // Exponent is so small that we are shifting away all significant bits.
+ // Answer will not be representable, even as a subnormal, so return a zero
+ // mantissa (which represents underflow).
+ *output_exact = true;
+ return 0;
+ }
+
+ *output_exact = true;
+ const uint128 shift_mask = (uint128(1) << shift) - 1;
+ const uint128 halfway_point = uint128(1) << (shift - 1);
+
+ const uint128 shifted_bits = value & shift_mask;
+ value >>= shift;
+ if (shifted_bits > halfway_point) {
+ // Shifted bits greater than 10000... require rounding up.
+ return static_cast<uint64_t>(value + 1);
+ }
+ if (shifted_bits == halfway_point) {
+ // In exact mode, shifted bits of 10000... mean we're exactly halfway
+ // between two numbers, and we must round to even. So only round up if
+ // the low bit of `value` is set.
+ //
+ // In inexact mode, the nonzero error means the actual value is greater
+ // than the halfway point and we must alway round up.
+ if ((value & 1) == 1 || !input_exact) {
+ ++value;
+ }
+ return static_cast<uint64_t>(value);
+ }
+ if (!input_exact && shifted_bits == halfway_point - 1) {
+ // Rounding direction is unclear, due to error.
+ *output_exact = false;
+ }
+ // Otherwise, round down.
+ return static_cast<uint64_t>(value);
+}
+
+// Checks if a floating point guess needs to be rounded up, using high precision
+// math.
+//
+// `guess_mantissa` and `guess_exponent` represent a candidate guess for the
+// number represented by `parsed_decimal`.
+//
+// The exact number represented by `parsed_decimal` must lie between the two
+// numbers:
+// A = `guess_mantissa * 2**guess_exponent`
+// B = `(guess_mantissa + 1) * 2**guess_exponent`
+//
+// This function returns false if `A` is the better guess, and true if `B` is
+// the better guess, with rounding ties broken by rounding to even.
+bool MustRoundUp(uint64_t guess_mantissa, int guess_exponent,
+ const strings_internal::ParsedFloat& parsed_decimal) {
+ // 768 is the number of digits needed in the worst case. We could determine a
+ // better limit dynamically based on the value of parsed_decimal.exponent.
+ // This would optimize pathological input cases only. (Sane inputs won't have
+ // hundreds of digits of mantissa.)
+ y_absl::strings_internal::BigUnsigned<84> exact_mantissa;
+ int exact_exponent = exact_mantissa.ReadFloatMantissa(parsed_decimal, 768);
+
+ // Adjust the `guess` arguments to be halfway between A and B.
+ guess_mantissa = guess_mantissa * 2 + 1;
+ guess_exponent -= 1;
+
+ // In our comparison:
+ // lhs = exact = exact_mantissa * 10**exact_exponent
+ // = exact_mantissa * 5**exact_exponent * 2**exact_exponent
+ // rhs = guess = guess_mantissa * 2**guess_exponent
+ //
+ // Because we are doing integer math, we can't directly deal with negative
+ // exponents. We instead move these to the other side of the inequality.
+ y_absl::strings_internal::BigUnsigned<84>& lhs = exact_mantissa;
+ int comparison;
+ if (exact_exponent >= 0) {
+ lhs.MultiplyByFiveToTheNth(exact_exponent);
+ y_absl::strings_internal::BigUnsigned<84> rhs(guess_mantissa);
+ // There are powers of 2 on both sides of the inequality; reduce this to
+ // a single bit-shift.
+ if (exact_exponent > guess_exponent) {
+ lhs.ShiftLeft(exact_exponent - guess_exponent);
+ } else {
+ rhs.ShiftLeft(guess_exponent - exact_exponent);
+ }
+ comparison = Compare(lhs, rhs);
+ } else {
+ // Move the power of 5 to the other side of the equation, giving us:
+ // lhs = exact_mantissa * 2**exact_exponent
+ // rhs = guess_mantissa * 5**(-exact_exponent) * 2**guess_exponent
+ y_absl::strings_internal::BigUnsigned<84> rhs =
+ y_absl::strings_internal::BigUnsigned<84>::FiveToTheNth(-exact_exponent);
+ rhs.MultiplyBy(guess_mantissa);
+ if (exact_exponent > guess_exponent) {
+ lhs.ShiftLeft(exact_exponent - guess_exponent);
+ } else {
+ rhs.ShiftLeft(guess_exponent - exact_exponent);
+ }
+ comparison = Compare(lhs, rhs);
+ }
+ if (comparison < 0) {
+ return false;
+ } else if (comparison > 0) {
+ return true;
+ } else {
+ // When lhs == rhs, the decimal input is exactly between A and B.
+ // Round towards even -- round up only if the low bit of the initial
+ // `guess_mantissa` was a 1. We shifted guess_mantissa left 1 bit at
+ // the beginning of this function, so test the 2nd bit here.
+ return (guess_mantissa & 2) == 2;
+ }
+}
+
+// Constructs a CalculatedFloat from a given mantissa and exponent, but
+// with the following normalizations applied:
+//
+// If rounding has caused mantissa to increase just past the allowed bit
+// width, shift and adjust exponent.
+//
+// If exponent is too high, sets kOverflow.
+//
+// If mantissa is zero (representing a non-zero value not representable, even
+// as a subnormal), sets kUnderflow.
+template <typename FloatType>
+CalculatedFloat CalculatedFloatFromRawValues(uint64_t mantissa, int exponent) {
+ CalculatedFloat result;
+ if (mantissa == uint64_t{1} << FloatTraits<FloatType>::kTargetMantissaBits) {
+ mantissa >>= 1;
+ exponent += 1;
+ }
+ if (exponent > FloatTraits<FloatType>::kMaxExponent) {
+ result.exponent = kOverflow;
+ } else if (mantissa == 0) {
+ result.exponent = kUnderflow;
+ } else {
+ result.exponent = exponent;
+ result.mantissa = mantissa;
+ }
+ return result;
+}
+
+template <typename FloatType>
+CalculatedFloat CalculateFromParsedHexadecimal(
+ const strings_internal::ParsedFloat& parsed_hex) {
+ uint64_t mantissa = parsed_hex.mantissa;
+ int exponent = parsed_hex.exponent;
+ auto mantissa_width = static_cast<unsigned>(bit_width(mantissa));
+ const int shift = NormalizedShiftSize<FloatType>(mantissa_width, exponent);
+ bool result_exact;
+ exponent += shift;
+ mantissa = ShiftRightAndRound(mantissa, shift,
+ /* input exact= */ true, &result_exact);
+ // ParseFloat handles rounding in the hexadecimal case, so we don't have to
+ // check `result_exact` here.
+ return CalculatedFloatFromRawValues<FloatType>(mantissa, exponent);
+}
+
+template <typename FloatType>
+CalculatedFloat CalculateFromParsedDecimal(
+ const strings_internal::ParsedFloat& parsed_decimal) {
+ CalculatedFloat result;
+
+ // Large or small enough decimal exponents will always result in overflow
+ // or underflow.
+ if (Power10Underflow(parsed_decimal.exponent)) {
+ result.exponent = kUnderflow;
+ return result;
+ } else if (Power10Overflow(parsed_decimal.exponent)) {
+ result.exponent = kOverflow;
+ return result;
+ }
+
+ // Otherwise convert our power of 10 into a power of 2 times an integer
+ // mantissa, and multiply this by our parsed decimal mantissa.
+ uint128 wide_binary_mantissa = parsed_decimal.mantissa;
+ wide_binary_mantissa *= Power10Mantissa(parsed_decimal.exponent);
+ int binary_exponent = Power10Exponent(parsed_decimal.exponent);
+
+ // Discard bits that are inaccurate due to truncation error. The magic
+ // `mantissa_width` constants below are justified in
+ // https://abseil.io/about/design/charconv. They represent the number of bits
+ // in `wide_binary_mantissa` that are guaranteed to be unaffected by error
+ // propagation.
+ bool mantissa_exact;
+ int mantissa_width;
+ if (parsed_decimal.subrange_begin) {
+ // Truncated mantissa
+ mantissa_width = 58;
+ mantissa_exact = false;
+ binary_exponent +=
+ TruncateToBitWidth(mantissa_width, &wide_binary_mantissa);
+ } else if (!Power10Exact(parsed_decimal.exponent)) {
+ // Exact mantissa, truncated power of ten
+ mantissa_width = 63;
+ mantissa_exact = false;
+ binary_exponent +=
+ TruncateToBitWidth(mantissa_width, &wide_binary_mantissa);
+ } else {
+ // Product is exact
+ mantissa_width = BitWidth(wide_binary_mantissa);
+ mantissa_exact = true;
+ }
+
+ // Shift into an FloatType-sized mantissa, and round to nearest.
+ const int shift =
+ NormalizedShiftSize<FloatType>(mantissa_width, binary_exponent);
+ bool result_exact;
+ binary_exponent += shift;
+ uint64_t binary_mantissa = ShiftRightAndRound(wide_binary_mantissa, shift,
+ mantissa_exact, &result_exact);
+ if (!result_exact) {
+ // We could not determine the rounding direction using int128 math. Use
+ // full resolution math instead.
+ if (MustRoundUp(binary_mantissa, binary_exponent, parsed_decimal)) {
+ binary_mantissa += 1;
+ }
+ }
+
+ return CalculatedFloatFromRawValues<FloatType>(binary_mantissa,
+ binary_exponent);
+}
+
+template <typename FloatType>
+from_chars_result FromCharsImpl(const char* first, const char* last,
+ FloatType& value, chars_format fmt_flags) {
+ from_chars_result result;
+ result.ptr = first; // overwritten on successful parse
+ result.ec = std::errc();
+
+ bool negative = false;
+ if (first != last && *first == '-') {
+ ++first;
+ negative = true;
+ }
+ // If the `hex` flag is *not* set, then we will accept a 0x prefix and try
+ // to parse a hexadecimal float.
+ if ((fmt_flags & chars_format::hex) == chars_format{} && last - first >= 2 &&
+ *first == '0' && (first[1] == 'x' || first[1] == 'X')) {
+ const char* hex_first = first + 2;
+ strings_internal::ParsedFloat hex_parse =
+ strings_internal::ParseFloat<16>(hex_first, last, fmt_flags);
+ if (hex_parse.end == nullptr ||
+ hex_parse.type != strings_internal::FloatType::kNumber) {
+ // Either we failed to parse a hex float after the "0x", or we read
+ // "0xinf" or "0xnan" which we don't want to match.
+ //
+ // However, a string that begins with "0x" also begins with "0", which
+ // is normally a valid match for the number zero. So we want these
+ // strings to match zero unless fmt_flags is `scientific`. (This flag
+ // means an exponent is required, which the string "0" does not have.)
+ if (fmt_flags == chars_format::scientific) {
+ result.ec = std::errc::invalid_argument;
+ } else {
+ result.ptr = first + 1;
+ value = negative ? -0.0 : 0.0;
+ }
+ return result;
+ }
+ // We matched a value.
+ result.ptr = hex_parse.end;
+ if (HandleEdgeCase(hex_parse, negative, &value)) {
+ return result;
+ }
+ CalculatedFloat calculated =
+ CalculateFromParsedHexadecimal<FloatType>(hex_parse);
+ EncodeResult(calculated, negative, &result, &value);
+ return result;
+ }
+ // Otherwise, we choose the number base based on the flags.
+ if ((fmt_flags & chars_format::hex) == chars_format::hex) {
+ strings_internal::ParsedFloat hex_parse =
+ strings_internal::ParseFloat<16>(first, last, fmt_flags);
+ if (hex_parse.end == nullptr) {
+ result.ec = std::errc::invalid_argument;
+ return result;
+ }
+ result.ptr = hex_parse.end;
+ if (HandleEdgeCase(hex_parse, negative, &value)) {
+ return result;
+ }
+ CalculatedFloat calculated =
+ CalculateFromParsedHexadecimal<FloatType>(hex_parse);
+ EncodeResult(calculated, negative, &result, &value);
+ return result;
+ } else {
+ strings_internal::ParsedFloat decimal_parse =
+ strings_internal::ParseFloat<10>(first, last, fmt_flags);
+ if (decimal_parse.end == nullptr) {
+ result.ec = std::errc::invalid_argument;
+ return result;
+ }
+ result.ptr = decimal_parse.end;
+ if (HandleEdgeCase(decimal_parse, negative, &value)) {
+ return result;
+ }
+ CalculatedFloat calculated =
+ CalculateFromParsedDecimal<FloatType>(decimal_parse);
+ EncodeResult(calculated, negative, &result, &value);
+ return result;
+ }
+}
+} // namespace
+
+from_chars_result from_chars(const char* first, const char* last, double& value,
+ chars_format fmt) {
+ return FromCharsImpl(first, last, value, fmt);
+}
+
+from_chars_result from_chars(const char* first, const char* last, float& value,
+ chars_format fmt) {
+ return FromCharsImpl(first, last, value, fmt);
+}
+
+namespace {
+
+// Table of powers of 10, from kPower10TableMin to kPower10TableMax.
+//
+// kPower10MantissaTable[i - kPower10TableMin] stores the 64-bit mantissa (high
+// bit always on), and kPower10ExponentTable[i - kPower10TableMin] stores the
+// power-of-two exponent. For a given number i, this gives the unique mantissa
+// and exponent such that mantissa * 2**exponent <= 10**i < (mantissa + 1) *
+// 2**exponent.
+
+const uint64_t kPower10MantissaTable[] = {
+ 0xeef453d6923bd65aU, 0x9558b4661b6565f8U, 0xbaaee17fa23ebf76U,
+ 0xe95a99df8ace6f53U, 0x91d8a02bb6c10594U, 0xb64ec836a47146f9U,
+ 0xe3e27a444d8d98b7U, 0x8e6d8c6ab0787f72U, 0xb208ef855c969f4fU,
+ 0xde8b2b66b3bc4723U, 0x8b16fb203055ac76U, 0xaddcb9e83c6b1793U,
+ 0xd953e8624b85dd78U, 0x87d4713d6f33aa6bU, 0xa9c98d8ccb009506U,
+ 0xd43bf0effdc0ba48U, 0x84a57695fe98746dU, 0xa5ced43b7e3e9188U,
+ 0xcf42894a5dce35eaU, 0x818995ce7aa0e1b2U, 0xa1ebfb4219491a1fU,
+ 0xca66fa129f9b60a6U, 0xfd00b897478238d0U, 0x9e20735e8cb16382U,
+ 0xc5a890362fddbc62U, 0xf712b443bbd52b7bU, 0x9a6bb0aa55653b2dU,
+ 0xc1069cd4eabe89f8U, 0xf148440a256e2c76U, 0x96cd2a865764dbcaU,
+ 0xbc807527ed3e12bcU, 0xeba09271e88d976bU, 0x93445b8731587ea3U,
+ 0xb8157268fdae9e4cU, 0xe61acf033d1a45dfU, 0x8fd0c16206306babU,
+ 0xb3c4f1ba87bc8696U, 0xe0b62e2929aba83cU, 0x8c71dcd9ba0b4925U,
+ 0xaf8e5410288e1b6fU, 0xdb71e91432b1a24aU, 0x892731ac9faf056eU,
+ 0xab70fe17c79ac6caU, 0xd64d3d9db981787dU, 0x85f0468293f0eb4eU,
+ 0xa76c582338ed2621U, 0xd1476e2c07286faaU, 0x82cca4db847945caU,
+ 0xa37fce126597973cU, 0xcc5fc196fefd7d0cU, 0xff77b1fcbebcdc4fU,
+ 0x9faacf3df73609b1U, 0xc795830d75038c1dU, 0xf97ae3d0d2446f25U,
+ 0x9becce62836ac577U, 0xc2e801fb244576d5U, 0xf3a20279ed56d48aU,
+ 0x9845418c345644d6U, 0xbe5691ef416bd60cU, 0xedec366b11c6cb8fU,
+ 0x94b3a202eb1c3f39U, 0xb9e08a83a5e34f07U, 0xe858ad248f5c22c9U,
+ 0x91376c36d99995beU, 0xb58547448ffffb2dU, 0xe2e69915b3fff9f9U,
+ 0x8dd01fad907ffc3bU, 0xb1442798f49ffb4aU, 0xdd95317f31c7fa1dU,
+ 0x8a7d3eef7f1cfc52U, 0xad1c8eab5ee43b66U, 0xd863b256369d4a40U,
+ 0x873e4f75e2224e68U, 0xa90de3535aaae202U, 0xd3515c2831559a83U,
+ 0x8412d9991ed58091U, 0xa5178fff668ae0b6U, 0xce5d73ff402d98e3U,
+ 0x80fa687f881c7f8eU, 0xa139029f6a239f72U, 0xc987434744ac874eU,
+ 0xfbe9141915d7a922U, 0x9d71ac8fada6c9b5U, 0xc4ce17b399107c22U,
+ 0xf6019da07f549b2bU, 0x99c102844f94e0fbU, 0xc0314325637a1939U,
+ 0xf03d93eebc589f88U, 0x96267c7535b763b5U, 0xbbb01b9283253ca2U,
+ 0xea9c227723ee8bcbU, 0x92a1958a7675175fU, 0xb749faed14125d36U,
+ 0xe51c79a85916f484U, 0x8f31cc0937ae58d2U, 0xb2fe3f0b8599ef07U,
+ 0xdfbdcece67006ac9U, 0x8bd6a141006042bdU, 0xaecc49914078536dU,
+ 0xda7f5bf590966848U, 0x888f99797a5e012dU, 0xaab37fd7d8f58178U,
+ 0xd5605fcdcf32e1d6U, 0x855c3be0a17fcd26U, 0xa6b34ad8c9dfc06fU,
+ 0xd0601d8efc57b08bU, 0x823c12795db6ce57U, 0xa2cb1717b52481edU,
+ 0xcb7ddcdda26da268U, 0xfe5d54150b090b02U, 0x9efa548d26e5a6e1U,
+ 0xc6b8e9b0709f109aU, 0xf867241c8cc6d4c0U, 0x9b407691d7fc44f8U,
+ 0xc21094364dfb5636U, 0xf294b943e17a2bc4U, 0x979cf3ca6cec5b5aU,
+ 0xbd8430bd08277231U, 0xece53cec4a314ebdU, 0x940f4613ae5ed136U,
+ 0xb913179899f68584U, 0xe757dd7ec07426e5U, 0x9096ea6f3848984fU,
+ 0xb4bca50b065abe63U, 0xe1ebce4dc7f16dfbU, 0x8d3360f09cf6e4bdU,
+ 0xb080392cc4349decU, 0xdca04777f541c567U, 0x89e42caaf9491b60U,
+ 0xac5d37d5b79b6239U, 0xd77485cb25823ac7U, 0x86a8d39ef77164bcU,
+ 0xa8530886b54dbdebU, 0xd267caa862a12d66U, 0x8380dea93da4bc60U,
+ 0xa46116538d0deb78U, 0xcd795be870516656U, 0x806bd9714632dff6U,
+ 0xa086cfcd97bf97f3U, 0xc8a883c0fdaf7df0U, 0xfad2a4b13d1b5d6cU,
+ 0x9cc3a6eec6311a63U, 0xc3f490aa77bd60fcU, 0xf4f1b4d515acb93bU,
+ 0x991711052d8bf3c5U, 0xbf5cd54678eef0b6U, 0xef340a98172aace4U,
+ 0x9580869f0e7aac0eU, 0xbae0a846d2195712U, 0xe998d258869facd7U,
+ 0x91ff83775423cc06U, 0xb67f6455292cbf08U, 0xe41f3d6a7377eecaU,
+ 0x8e938662882af53eU, 0xb23867fb2a35b28dU, 0xdec681f9f4c31f31U,
+ 0x8b3c113c38f9f37eU, 0xae0b158b4738705eU, 0xd98ddaee19068c76U,
+ 0x87f8a8d4cfa417c9U, 0xa9f6d30a038d1dbcU, 0xd47487cc8470652bU,
+ 0x84c8d4dfd2c63f3bU, 0xa5fb0a17c777cf09U, 0xcf79cc9db955c2ccU,
+ 0x81ac1fe293d599bfU, 0xa21727db38cb002fU, 0xca9cf1d206fdc03bU,
+ 0xfd442e4688bd304aU, 0x9e4a9cec15763e2eU, 0xc5dd44271ad3cdbaU,
+ 0xf7549530e188c128U, 0x9a94dd3e8cf578b9U, 0xc13a148e3032d6e7U,
+ 0xf18899b1bc3f8ca1U, 0x96f5600f15a7b7e5U, 0xbcb2b812db11a5deU,
+ 0xebdf661791d60f56U, 0x936b9fcebb25c995U, 0xb84687c269ef3bfbU,
+ 0xe65829b3046b0afaU, 0x8ff71a0fe2c2e6dcU, 0xb3f4e093db73a093U,
+ 0xe0f218b8d25088b8U, 0x8c974f7383725573U, 0xafbd2350644eeacfU,
+ 0xdbac6c247d62a583U, 0x894bc396ce5da772U, 0xab9eb47c81f5114fU,
+ 0xd686619ba27255a2U, 0x8613fd0145877585U, 0xa798fc4196e952e7U,
+ 0xd17f3b51fca3a7a0U, 0x82ef85133de648c4U, 0xa3ab66580d5fdaf5U,
+ 0xcc963fee10b7d1b3U, 0xffbbcfe994e5c61fU, 0x9fd561f1fd0f9bd3U,
+ 0xc7caba6e7c5382c8U, 0xf9bd690a1b68637bU, 0x9c1661a651213e2dU,
+ 0xc31bfa0fe5698db8U, 0xf3e2f893dec3f126U, 0x986ddb5c6b3a76b7U,
+ 0xbe89523386091465U, 0xee2ba6c0678b597fU, 0x94db483840b717efU,
+ 0xba121a4650e4ddebU, 0xe896a0d7e51e1566U, 0x915e2486ef32cd60U,
+ 0xb5b5ada8aaff80b8U, 0xe3231912d5bf60e6U, 0x8df5efabc5979c8fU,
+ 0xb1736b96b6fd83b3U, 0xddd0467c64bce4a0U, 0x8aa22c0dbef60ee4U,
+ 0xad4ab7112eb3929dU, 0xd89d64d57a607744U, 0x87625f056c7c4a8bU,
+ 0xa93af6c6c79b5d2dU, 0xd389b47879823479U, 0x843610cb4bf160cbU,
+ 0xa54394fe1eedb8feU, 0xce947a3da6a9273eU, 0x811ccc668829b887U,
+ 0xa163ff802a3426a8U, 0xc9bcff6034c13052U, 0xfc2c3f3841f17c67U,
+ 0x9d9ba7832936edc0U, 0xc5029163f384a931U, 0xf64335bcf065d37dU,
+ 0x99ea0196163fa42eU, 0xc06481fb9bcf8d39U, 0xf07da27a82c37088U,
+ 0x964e858c91ba2655U, 0xbbe226efb628afeaU, 0xeadab0aba3b2dbe5U,
+ 0x92c8ae6b464fc96fU, 0xb77ada0617e3bbcbU, 0xe55990879ddcaabdU,
+ 0x8f57fa54c2a9eab6U, 0xb32df8e9f3546564U, 0xdff9772470297ebdU,
+ 0x8bfbea76c619ef36U, 0xaefae51477a06b03U, 0xdab99e59958885c4U,
+ 0x88b402f7fd75539bU, 0xaae103b5fcd2a881U, 0xd59944a37c0752a2U,
+ 0x857fcae62d8493a5U, 0xa6dfbd9fb8e5b88eU, 0xd097ad07a71f26b2U,
+ 0x825ecc24c873782fU, 0xa2f67f2dfa90563bU, 0xcbb41ef979346bcaU,
+ 0xfea126b7d78186bcU, 0x9f24b832e6b0f436U, 0xc6ede63fa05d3143U,
+ 0xf8a95fcf88747d94U, 0x9b69dbe1b548ce7cU, 0xc24452da229b021bU,
+ 0xf2d56790ab41c2a2U, 0x97c560ba6b0919a5U, 0xbdb6b8e905cb600fU,
+ 0xed246723473e3813U, 0x9436c0760c86e30bU, 0xb94470938fa89bceU,
+ 0xe7958cb87392c2c2U, 0x90bd77f3483bb9b9U, 0xb4ecd5f01a4aa828U,
+ 0xe2280b6c20dd5232U, 0x8d590723948a535fU, 0xb0af48ec79ace837U,
+ 0xdcdb1b2798182244U, 0x8a08f0f8bf0f156bU, 0xac8b2d36eed2dac5U,
+ 0xd7adf884aa879177U, 0x86ccbb52ea94baeaU, 0xa87fea27a539e9a5U,
+ 0xd29fe4b18e88640eU, 0x83a3eeeef9153e89U, 0xa48ceaaab75a8e2bU,
+ 0xcdb02555653131b6U, 0x808e17555f3ebf11U, 0xa0b19d2ab70e6ed6U,
+ 0xc8de047564d20a8bU, 0xfb158592be068d2eU, 0x9ced737bb6c4183dU,
+ 0xc428d05aa4751e4cU, 0xf53304714d9265dfU, 0x993fe2c6d07b7fabU,
+ 0xbf8fdb78849a5f96U, 0xef73d256a5c0f77cU, 0x95a8637627989aadU,
+ 0xbb127c53b17ec159U, 0xe9d71b689dde71afU, 0x9226712162ab070dU,
+ 0xb6b00d69bb55c8d1U, 0xe45c10c42a2b3b05U, 0x8eb98a7a9a5b04e3U,
+ 0xb267ed1940f1c61cU, 0xdf01e85f912e37a3U, 0x8b61313bbabce2c6U,
+ 0xae397d8aa96c1b77U, 0xd9c7dced53c72255U, 0x881cea14545c7575U,
+ 0xaa242499697392d2U, 0xd4ad2dbfc3d07787U, 0x84ec3c97da624ab4U,
+ 0xa6274bbdd0fadd61U, 0xcfb11ead453994baU, 0x81ceb32c4b43fcf4U,
+ 0xa2425ff75e14fc31U, 0xcad2f7f5359a3b3eU, 0xfd87b5f28300ca0dU,
+ 0x9e74d1b791e07e48U, 0xc612062576589ddaU, 0xf79687aed3eec551U,
+ 0x9abe14cd44753b52U, 0xc16d9a0095928a27U, 0xf1c90080baf72cb1U,
+ 0x971da05074da7beeU, 0xbce5086492111aeaU, 0xec1e4a7db69561a5U,
+ 0x9392ee8e921d5d07U, 0xb877aa3236a4b449U, 0xe69594bec44de15bU,
+ 0x901d7cf73ab0acd9U, 0xb424dc35095cd80fU, 0xe12e13424bb40e13U,
+ 0x8cbccc096f5088cbU, 0xafebff0bcb24aafeU, 0xdbe6fecebdedd5beU,
+ 0x89705f4136b4a597U, 0xabcc77118461cefcU, 0xd6bf94d5e57a42bcU,
+ 0x8637bd05af6c69b5U, 0xa7c5ac471b478423U, 0xd1b71758e219652bU,
+ 0x83126e978d4fdf3bU, 0xa3d70a3d70a3d70aU, 0xccccccccccccccccU,
+ 0x8000000000000000U, 0xa000000000000000U, 0xc800000000000000U,
+ 0xfa00000000000000U, 0x9c40000000000000U, 0xc350000000000000U,
+ 0xf424000000000000U, 0x9896800000000000U, 0xbebc200000000000U,
+ 0xee6b280000000000U, 0x9502f90000000000U, 0xba43b74000000000U,
+ 0xe8d4a51000000000U, 0x9184e72a00000000U, 0xb5e620f480000000U,
+ 0xe35fa931a0000000U, 0x8e1bc9bf04000000U, 0xb1a2bc2ec5000000U,
+ 0xde0b6b3a76400000U, 0x8ac7230489e80000U, 0xad78ebc5ac620000U,
+ 0xd8d726b7177a8000U, 0x878678326eac9000U, 0xa968163f0a57b400U,
+ 0xd3c21bcecceda100U, 0x84595161401484a0U, 0xa56fa5b99019a5c8U,
+ 0xcecb8f27f4200f3aU, 0x813f3978f8940984U, 0xa18f07d736b90be5U,
+ 0xc9f2c9cd04674edeU, 0xfc6f7c4045812296U, 0x9dc5ada82b70b59dU,
+ 0xc5371912364ce305U, 0xf684df56c3e01bc6U, 0x9a130b963a6c115cU,
+ 0xc097ce7bc90715b3U, 0xf0bdc21abb48db20U, 0x96769950b50d88f4U,
+ 0xbc143fa4e250eb31U, 0xeb194f8e1ae525fdU, 0x92efd1b8d0cf37beU,
+ 0xb7abc627050305adU, 0xe596b7b0c643c719U, 0x8f7e32ce7bea5c6fU,
+ 0xb35dbf821ae4f38bU, 0xe0352f62a19e306eU, 0x8c213d9da502de45U,
+ 0xaf298d050e4395d6U, 0xdaf3f04651d47b4cU, 0x88d8762bf324cd0fU,
+ 0xab0e93b6efee0053U, 0xd5d238a4abe98068U, 0x85a36366eb71f041U,
+ 0xa70c3c40a64e6c51U, 0xd0cf4b50cfe20765U, 0x82818f1281ed449fU,
+ 0xa321f2d7226895c7U, 0xcbea6f8ceb02bb39U, 0xfee50b7025c36a08U,
+ 0x9f4f2726179a2245U, 0xc722f0ef9d80aad6U, 0xf8ebad2b84e0d58bU,
+ 0x9b934c3b330c8577U, 0xc2781f49ffcfa6d5U, 0xf316271c7fc3908aU,
+ 0x97edd871cfda3a56U, 0xbde94e8e43d0c8ecU, 0xed63a231d4c4fb27U,
+ 0x945e455f24fb1cf8U, 0xb975d6b6ee39e436U, 0xe7d34c64a9c85d44U,
+ 0x90e40fbeea1d3a4aU, 0xb51d13aea4a488ddU, 0xe264589a4dcdab14U,
+ 0x8d7eb76070a08aecU, 0xb0de65388cc8ada8U, 0xdd15fe86affad912U,
+ 0x8a2dbf142dfcc7abU, 0xacb92ed9397bf996U, 0xd7e77a8f87daf7fbU,
+ 0x86f0ac99b4e8dafdU, 0xa8acd7c0222311bcU, 0xd2d80db02aabd62bU,
+ 0x83c7088e1aab65dbU, 0xa4b8cab1a1563f52U, 0xcde6fd5e09abcf26U,
+ 0x80b05e5ac60b6178U, 0xa0dc75f1778e39d6U, 0xc913936dd571c84cU,
+ 0xfb5878494ace3a5fU, 0x9d174b2dcec0e47bU, 0xc45d1df942711d9aU,
+ 0xf5746577930d6500U, 0x9968bf6abbe85f20U, 0xbfc2ef456ae276e8U,
+ 0xefb3ab16c59b14a2U, 0x95d04aee3b80ece5U, 0xbb445da9ca61281fU,
+ 0xea1575143cf97226U, 0x924d692ca61be758U, 0xb6e0c377cfa2e12eU,
+ 0xe498f455c38b997aU, 0x8edf98b59a373fecU, 0xb2977ee300c50fe7U,
+ 0xdf3d5e9bc0f653e1U, 0x8b865b215899f46cU, 0xae67f1e9aec07187U,
+ 0xda01ee641a708de9U, 0x884134fe908658b2U, 0xaa51823e34a7eedeU,
+ 0xd4e5e2cdc1d1ea96U, 0x850fadc09923329eU, 0xa6539930bf6bff45U,
+ 0xcfe87f7cef46ff16U, 0x81f14fae158c5f6eU, 0xa26da3999aef7749U,
+ 0xcb090c8001ab551cU, 0xfdcb4fa002162a63U, 0x9e9f11c4014dda7eU,
+ 0xc646d63501a1511dU, 0xf7d88bc24209a565U, 0x9ae757596946075fU,
+ 0xc1a12d2fc3978937U, 0xf209787bb47d6b84U, 0x9745eb4d50ce6332U,
+ 0xbd176620a501fbffU, 0xec5d3fa8ce427affU, 0x93ba47c980e98cdfU,
+ 0xb8a8d9bbe123f017U, 0xe6d3102ad96cec1dU, 0x9043ea1ac7e41392U,
+ 0xb454e4a179dd1877U, 0xe16a1dc9d8545e94U, 0x8ce2529e2734bb1dU,
+ 0xb01ae745b101e9e4U, 0xdc21a1171d42645dU, 0x899504ae72497ebaU,
+ 0xabfa45da0edbde69U, 0xd6f8d7509292d603U, 0x865b86925b9bc5c2U,
+ 0xa7f26836f282b732U, 0xd1ef0244af2364ffU, 0x8335616aed761f1fU,
+ 0xa402b9c5a8d3a6e7U, 0xcd036837130890a1U, 0x802221226be55a64U,
+ 0xa02aa96b06deb0fdU, 0xc83553c5c8965d3dU, 0xfa42a8b73abbf48cU,
+ 0x9c69a97284b578d7U, 0xc38413cf25e2d70dU, 0xf46518c2ef5b8cd1U,
+ 0x98bf2f79d5993802U, 0xbeeefb584aff8603U, 0xeeaaba2e5dbf6784U,
+ 0x952ab45cfa97a0b2U, 0xba756174393d88dfU, 0xe912b9d1478ceb17U,
+ 0x91abb422ccb812eeU, 0xb616a12b7fe617aaU, 0xe39c49765fdf9d94U,
+ 0x8e41ade9fbebc27dU, 0xb1d219647ae6b31cU, 0xde469fbd99a05fe3U,
+ 0x8aec23d680043beeU, 0xada72ccc20054ae9U, 0xd910f7ff28069da4U,
+ 0x87aa9aff79042286U, 0xa99541bf57452b28U, 0xd3fa922f2d1675f2U,
+ 0x847c9b5d7c2e09b7U, 0xa59bc234db398c25U, 0xcf02b2c21207ef2eU,
+ 0x8161afb94b44f57dU, 0xa1ba1ba79e1632dcU, 0xca28a291859bbf93U,
+ 0xfcb2cb35e702af78U, 0x9defbf01b061adabU, 0xc56baec21c7a1916U,
+ 0xf6c69a72a3989f5bU, 0x9a3c2087a63f6399U, 0xc0cb28a98fcf3c7fU,
+ 0xf0fdf2d3f3c30b9fU, 0x969eb7c47859e743U, 0xbc4665b596706114U,
+ 0xeb57ff22fc0c7959U, 0x9316ff75dd87cbd8U, 0xb7dcbf5354e9beceU,
+ 0xe5d3ef282a242e81U, 0x8fa475791a569d10U, 0xb38d92d760ec4455U,
+ 0xe070f78d3927556aU, 0x8c469ab843b89562U, 0xaf58416654a6babbU,
+ 0xdb2e51bfe9d0696aU, 0x88fcf317f22241e2U, 0xab3c2fddeeaad25aU,
+ 0xd60b3bd56a5586f1U, 0x85c7056562757456U, 0xa738c6bebb12d16cU,
+ 0xd106f86e69d785c7U, 0x82a45b450226b39cU, 0xa34d721642b06084U,
+ 0xcc20ce9bd35c78a5U, 0xff290242c83396ceU, 0x9f79a169bd203e41U,
+ 0xc75809c42c684dd1U, 0xf92e0c3537826145U, 0x9bbcc7a142b17ccbU,
+ 0xc2abf989935ddbfeU, 0xf356f7ebf83552feU, 0x98165af37b2153deU,
+ 0xbe1bf1b059e9a8d6U, 0xeda2ee1c7064130cU, 0x9485d4d1c63e8be7U,
+ 0xb9a74a0637ce2ee1U, 0xe8111c87c5c1ba99U, 0x910ab1d4db9914a0U,
+ 0xb54d5e4a127f59c8U, 0xe2a0b5dc971f303aU, 0x8da471a9de737e24U,
+ 0xb10d8e1456105dadU, 0xdd50f1996b947518U, 0x8a5296ffe33cc92fU,
+ 0xace73cbfdc0bfb7bU, 0xd8210befd30efa5aU, 0x8714a775e3e95c78U,
+ 0xa8d9d1535ce3b396U, 0xd31045a8341ca07cU, 0x83ea2b892091e44dU,
+ 0xa4e4b66b68b65d60U, 0xce1de40642e3f4b9U, 0x80d2ae83e9ce78f3U,
+ 0xa1075a24e4421730U, 0xc94930ae1d529cfcU, 0xfb9b7cd9a4a7443cU,
+ 0x9d412e0806e88aa5U, 0xc491798a08a2ad4eU, 0xf5b5d7ec8acb58a2U,
+ 0x9991a6f3d6bf1765U, 0xbff610b0cc6edd3fU, 0xeff394dcff8a948eU,
+ 0x95f83d0a1fb69cd9U, 0xbb764c4ca7a4440fU, 0xea53df5fd18d5513U,
+ 0x92746b9be2f8552cU, 0xb7118682dbb66a77U, 0xe4d5e82392a40515U,
+ 0x8f05b1163ba6832dU, 0xb2c71d5bca9023f8U, 0xdf78e4b2bd342cf6U,
+ 0x8bab8eefb6409c1aU, 0xae9672aba3d0c320U, 0xda3c0f568cc4f3e8U,
+ 0x8865899617fb1871U, 0xaa7eebfb9df9de8dU, 0xd51ea6fa85785631U,
+ 0x8533285c936b35deU, 0xa67ff273b8460356U, 0xd01fef10a657842cU,
+ 0x8213f56a67f6b29bU, 0xa298f2c501f45f42U, 0xcb3f2f7642717713U,
+ 0xfe0efb53d30dd4d7U, 0x9ec95d1463e8a506U, 0xc67bb4597ce2ce48U,
+ 0xf81aa16fdc1b81daU, 0x9b10a4e5e9913128U, 0xc1d4ce1f63f57d72U,
+ 0xf24a01a73cf2dccfU, 0x976e41088617ca01U, 0xbd49d14aa79dbc82U,
+ 0xec9c459d51852ba2U, 0x93e1ab8252f33b45U, 0xb8da1662e7b00a17U,
+ 0xe7109bfba19c0c9dU, 0x906a617d450187e2U, 0xb484f9dc9641e9daU,
+ 0xe1a63853bbd26451U, 0x8d07e33455637eb2U, 0xb049dc016abc5e5fU,
+ 0xdc5c5301c56b75f7U, 0x89b9b3e11b6329baU, 0xac2820d9623bf429U,
+ 0xd732290fbacaf133U, 0x867f59a9d4bed6c0U, 0xa81f301449ee8c70U,
+ 0xd226fc195c6a2f8cU, 0x83585d8fd9c25db7U, 0xa42e74f3d032f525U,
+ 0xcd3a1230c43fb26fU, 0x80444b5e7aa7cf85U, 0xa0555e361951c366U,
+ 0xc86ab5c39fa63440U, 0xfa856334878fc150U, 0x9c935e00d4b9d8d2U,
+ 0xc3b8358109e84f07U, 0xf4a642e14c6262c8U, 0x98e7e9cccfbd7dbdU,
+ 0xbf21e44003acdd2cU, 0xeeea5d5004981478U, 0x95527a5202df0ccbU,
+ 0xbaa718e68396cffdU, 0xe950df20247c83fdU, 0x91d28b7416cdd27eU,
+ 0xb6472e511c81471dU, 0xe3d8f9e563a198e5U, 0x8e679c2f5e44ff8fU,
+};
+
+const int16_t kPower10ExponentTable[] = {
+ -1200, -1196, -1193, -1190, -1186, -1183, -1180, -1176, -1173, -1170, -1166,
+ -1163, -1160, -1156, -1153, -1150, -1146, -1143, -1140, -1136, -1133, -1130,
+ -1127, -1123, -1120, -1117, -1113, -1110, -1107, -1103, -1100, -1097, -1093,
+ -1090, -1087, -1083, -1080, -1077, -1073, -1070, -1067, -1063, -1060, -1057,
+ -1053, -1050, -1047, -1043, -1040, -1037, -1034, -1030, -1027, -1024, -1020,
+ -1017, -1014, -1010, -1007, -1004, -1000, -997, -994, -990, -987, -984,
+ -980, -977, -974, -970, -967, -964, -960, -957, -954, -950, -947,
+ -944, -940, -937, -934, -931, -927, -924, -921, -917, -914, -911,
+ -907, -904, -901, -897, -894, -891, -887, -884, -881, -877, -874,
+ -871, -867, -864, -861, -857, -854, -851, -847, -844, -841, -838,
+ -834, -831, -828, -824, -821, -818, -814, -811, -808, -804, -801,
+ -798, -794, -791, -788, -784, -781, -778, -774, -771, -768, -764,
+ -761, -758, -754, -751, -748, -744, -741, -738, -735, -731, -728,
+ -725, -721, -718, -715, -711, -708, -705, -701, -698, -695, -691,
+ -688, -685, -681, -678, -675, -671, -668, -665, -661, -658, -655,
+ -651, -648, -645, -642, -638, -635, -632, -628, -625, -622, -618,
+ -615, -612, -608, -605, -602, -598, -595, -592, -588, -585, -582,
+ -578, -575, -572, -568, -565, -562, -558, -555, -552, -549, -545,
+ -542, -539, -535, -532, -529, -525, -522, -519, -515, -512, -509,
+ -505, -502, -499, -495, -492, -489, -485, -482, -479, -475, -472,
+ -469, -465, -462, -459, -455, -452, -449, -446, -442, -439, -436,
+ -432, -429, -426, -422, -419, -416, -412, -409, -406, -402, -399,
+ -396, -392, -389, -386, -382, -379, -376, -372, -369, -366, -362,
+ -359, -356, -353, -349, -346, -343, -339, -336, -333, -329, -326,
+ -323, -319, -316, -313, -309, -306, -303, -299, -296, -293, -289,
+ -286, -283, -279, -276, -273, -269, -266, -263, -259, -256, -253,
+ -250, -246, -243, -240, -236, -233, -230, -226, -223, -220, -216,
+ -213, -210, -206, -203, -200, -196, -193, -190, -186, -183, -180,
+ -176, -173, -170, -166, -163, -160, -157, -153, -150, -147, -143,
+ -140, -137, -133, -130, -127, -123, -120, -117, -113, -110, -107,
+ -103, -100, -97, -93, -90, -87, -83, -80, -77, -73, -70,
+ -67, -63, -60, -57, -54, -50, -47, -44, -40, -37, -34,
+ -30, -27, -24, -20, -17, -14, -10, -7, -4, 0, 3,
+ 6, 10, 13, 16, 20, 23, 26, 30, 33, 36, 39,
+ 43, 46, 49, 53, 56, 59, 63, 66, 69, 73, 76,
+ 79, 83, 86, 89, 93, 96, 99, 103, 106, 109, 113,
+ 116, 119, 123, 126, 129, 132, 136, 139, 142, 146, 149,
+ 152, 156, 159, 162, 166, 169, 172, 176, 179, 182, 186,
+ 189, 192, 196, 199, 202, 206, 209, 212, 216, 219, 222,
+ 226, 229, 232, 235, 239, 242, 245, 249, 252, 255, 259,
+ 262, 265, 269, 272, 275, 279, 282, 285, 289, 292, 295,
+ 299, 302, 305, 309, 312, 315, 319, 322, 325, 328, 332,
+ 335, 338, 342, 345, 348, 352, 355, 358, 362, 365, 368,
+ 372, 375, 378, 382, 385, 388, 392, 395, 398, 402, 405,
+ 408, 412, 415, 418, 422, 425, 428, 431, 435, 438, 441,
+ 445, 448, 451, 455, 458, 461, 465, 468, 471, 475, 478,
+ 481, 485, 488, 491, 495, 498, 501, 505, 508, 511, 515,
+ 518, 521, 524, 528, 531, 534, 538, 541, 544, 548, 551,
+ 554, 558, 561, 564, 568, 571, 574, 578, 581, 584, 588,
+ 591, 594, 598, 601, 604, 608, 611, 614, 617, 621, 624,
+ 627, 631, 634, 637, 641, 644, 647, 651, 654, 657, 661,
+ 664, 667, 671, 674, 677, 681, 684, 687, 691, 694, 697,
+ 701, 704, 707, 711, 714, 717, 720, 724, 727, 730, 734,
+ 737, 740, 744, 747, 750, 754, 757, 760, 764, 767, 770,
+ 774, 777, 780, 784, 787, 790, 794, 797, 800, 804, 807,
+ 810, 813, 817, 820, 823, 827, 830, 833, 837, 840, 843,
+ 847, 850, 853, 857, 860, 863, 867, 870, 873, 877, 880,
+ 883, 887, 890, 893, 897, 900, 903, 907, 910, 913, 916,
+ 920, 923, 926, 930, 933, 936, 940, 943, 946, 950, 953,
+ 956, 960,
+};
+
+} // namespace
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/charconv.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/charconv.h
new file mode 100644
index 00000000000..1a115aa2514
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/charconv.h
@@ -0,0 +1,120 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_STRINGS_CHARCONV_H_
+#define ABSL_STRINGS_CHARCONV_H_
+
+#include <system_error> // NOLINT(build/c++11)
+
+#include "y_absl/base/config.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+// Workalike compatibilty version of std::chars_format from C++17.
+//
+// This is an bitfield enumerator which can be passed to y_absl::from_chars to
+// configure the string-to-float conversion.
+enum class chars_format {
+ scientific = 1,
+ fixed = 2,
+ hex = 4,
+ general = fixed | scientific,
+};
+
+// The return result of a string-to-number conversion.
+//
+// `ec` will be set to `invalid_argument` if a well-formed number was not found
+// at the start of the input range, `result_out_of_range` if a well-formed
+// number was found, but it was out of the representable range of the requested
+// type, or to std::errc() otherwise.
+//
+// If a well-formed number was found, `ptr` is set to one past the sequence of
+// characters that were successfully parsed. If none was found, `ptr` is set
+// to the `first` argument to from_chars.
+struct from_chars_result {
+ const char* ptr;
+ std::errc ec;
+};
+
+// Workalike compatibilty version of std::from_chars from C++17. Currently
+// this only supports the `double` and `float` types.
+//
+// This interface incorporates the proposed resolutions for library issues
+// DR 3080 and DR 3081. If these are adopted with different wording,
+// Abseil's behavior will change to match the standard. (The behavior most
+// likely to change is for DR 3081, which says what `value` will be set to in
+// the case of overflow and underflow. Code that wants to avoid possible
+// breaking changes in this area should not depend on `value` when the returned
+// from_chars_result indicates a range error.)
+//
+// Searches the range [first, last) for the longest matching pattern beginning
+// at `first` that represents a floating point number. If one is found, store
+// the result in `value`.
+//
+// The matching pattern format is almost the same as that of strtod(), except
+// that (1) C locale is not respected, (2) an initial '+' character in the
+// input range will never be matched, and (3) leading whitespaces are not
+// ignored.
+//
+// If `fmt` is set, it must be one of the enumerator values of the chars_format.
+// (This is despite the fact that chars_format is a bitmask type.) If set to
+// `scientific`, a matching number must contain an exponent. If set to `fixed`,
+// then an exponent will never match. (For example, the string "1e5" will be
+// parsed as "1".) If set to `hex`, then a hexadecimal float is parsed in the
+// format that strtod() accepts, except that a "0x" prefix is NOT matched.
+// (In particular, in `hex` mode, the input "0xff" results in the largest
+// matching pattern "0".)
+y_absl::from_chars_result from_chars(const char* first, const char* last,
+ double& value, // NOLINT
+ chars_format fmt = chars_format::general);
+
+y_absl::from_chars_result from_chars(const char* first, const char* last,
+ float& value, // NOLINT
+ chars_format fmt = chars_format::general);
+
+// std::chars_format is specified as a bitmask type, which means the following
+// operations must be provided:
+inline constexpr chars_format operator&(chars_format lhs, chars_format rhs) {
+ return static_cast<chars_format>(static_cast<int>(lhs) &
+ static_cast<int>(rhs));
+}
+inline constexpr chars_format operator|(chars_format lhs, chars_format rhs) {
+ return static_cast<chars_format>(static_cast<int>(lhs) |
+ static_cast<int>(rhs));
+}
+inline constexpr chars_format operator^(chars_format lhs, chars_format rhs) {
+ return static_cast<chars_format>(static_cast<int>(lhs) ^
+ static_cast<int>(rhs));
+}
+inline constexpr chars_format operator~(chars_format arg) {
+ return static_cast<chars_format>(~static_cast<int>(arg));
+}
+inline chars_format& operator&=(chars_format& lhs, chars_format rhs) {
+ lhs = lhs & rhs;
+ return lhs;
+}
+inline chars_format& operator|=(chars_format& lhs, chars_format rhs) {
+ lhs = lhs | rhs;
+ return lhs;
+}
+inline chars_format& operator^=(chars_format& lhs, chars_format rhs) {
+ lhs = lhs ^ rhs;
+ return lhs;
+}
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_STRINGS_CHARCONV_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord.cc
new file mode 100644
index 00000000000..0de4ea1b3c1
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord.cc
@@ -0,0 +1,2047 @@
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "y_absl/strings/cord.h"
+
+#include <algorithm>
+#include <atomic>
+#include <cstddef>
+#include <cstdio>
+#include <cstdlib>
+#include <iomanip>
+#include <iostream>
+#include <limits>
+#include <ostream>
+#include <sstream>
+#include <type_traits>
+#include <unordered_set>
+#include <vector>
+
+#include "y_absl/base/casts.h"
+#include "y_absl/base/internal/raw_logging.h"
+#include "y_absl/base/macros.h"
+#include "y_absl/base/port.h"
+#include "y_absl/container/fixed_array.h"
+#include "y_absl/container/inlined_vector.h"
+#include "y_absl/strings/escaping.h"
+#include "y_absl/strings/internal/cord_internal.h"
+#include "y_absl/strings/internal/cord_rep_btree.h"
+#include "y_absl/strings/internal/cord_rep_flat.h"
+#include "y_absl/strings/internal/cordz_statistics.h"
+#include "y_absl/strings/internal/cordz_update_scope.h"
+#include "y_absl/strings/internal/cordz_update_tracker.h"
+#include "y_absl/strings/internal/resize_uninitialized.h"
+#include "y_absl/strings/str_cat.h"
+#include "y_absl/strings/str_format.h"
+#include "y_absl/strings/str_join.h"
+#include "y_absl/strings/string_view.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+using ::y_absl::cord_internal::CordRep;
+using ::y_absl::cord_internal::CordRepBtree;
+using ::y_absl::cord_internal::CordRepConcat;
+using ::y_absl::cord_internal::CordRepExternal;
+using ::y_absl::cord_internal::CordRepFlat;
+using ::y_absl::cord_internal::CordRepSubstring;
+using ::y_absl::cord_internal::CordzUpdateTracker;
+using ::y_absl::cord_internal::InlineData;
+using ::y_absl::cord_internal::kMaxFlatLength;
+using ::y_absl::cord_internal::kMinFlatLength;
+
+using ::y_absl::cord_internal::kInlinedVectorSize;
+using ::y_absl::cord_internal::kMaxBytesToCopy;
+
+constexpr uint64_t Fibonacci(unsigned char n, uint64_t a = 0, uint64_t b = 1) {
+ return n == 0 ? a : Fibonacci(n - 1, b, a + b);
+}
+
+static_assert(Fibonacci(63) == 6557470319842,
+ "Fibonacci values computed incorrectly");
+
+// Minimum length required for a given depth tree -- a tree is considered
+// balanced if
+// length(t) >= min_length[depth(t)]
+// The root node depth is allowed to become twice as large to reduce rebalancing
+// for larger strings (see IsRootBalanced).
+static constexpr uint64_t min_length[] = {
+ Fibonacci(2), Fibonacci(3), Fibonacci(4), Fibonacci(5),
+ Fibonacci(6), Fibonacci(7), Fibonacci(8), Fibonacci(9),
+ Fibonacci(10), Fibonacci(11), Fibonacci(12), Fibonacci(13),
+ Fibonacci(14), Fibonacci(15), Fibonacci(16), Fibonacci(17),
+ Fibonacci(18), Fibonacci(19), Fibonacci(20), Fibonacci(21),
+ Fibonacci(22), Fibonacci(23), Fibonacci(24), Fibonacci(25),
+ Fibonacci(26), Fibonacci(27), Fibonacci(28), Fibonacci(29),
+ Fibonacci(30), Fibonacci(31), Fibonacci(32), Fibonacci(33),
+ Fibonacci(34), Fibonacci(35), Fibonacci(36), Fibonacci(37),
+ Fibonacci(38), Fibonacci(39), Fibonacci(40), Fibonacci(41),
+ Fibonacci(42), Fibonacci(43), Fibonacci(44), Fibonacci(45),
+ Fibonacci(46), Fibonacci(47),
+ 0xffffffffffffffffull, // Avoid overflow
+};
+
+static const int kMinLengthSize = ABSL_ARRAYSIZE(min_length);
+
+static inline bool btree_enabled() {
+ return cord_internal::cord_btree_enabled.load(
+ std::memory_order_relaxed);
+}
+
+static inline bool IsRootBalanced(CordRep* node) {
+ if (!node->IsConcat()) {
+ return true;
+ } else if (node->concat()->depth() <= 15) {
+ return true;
+ } else if (node->concat()->depth() > kMinLengthSize) {
+ return false;
+ } else {
+ // Allow depth to become twice as large as implied by fibonacci rule to
+ // reduce rebalancing for larger strings.
+ return (node->length >= min_length[node->concat()->depth() / 2]);
+ }
+}
+
+static CordRep* Rebalance(CordRep* node);
+static void DumpNode(CordRep* rep, bool include_data, std::ostream* os,
+ int indent = 0);
+static bool VerifyNode(CordRep* root, CordRep* start_node,
+ bool full_validation);
+
+static inline CordRep* VerifyTree(CordRep* node) {
+ // Verification is expensive, so only do it in debug mode.
+ // Even in debug mode we normally do only light validation.
+ // If you are debugging Cord itself, you should define the
+ // macro EXTRA_CORD_VALIDATION, e.g. by adding
+ // --copt=-DEXTRA_CORD_VALIDATION to the blaze line.
+#ifdef EXTRA_CORD_VALIDATION
+ assert(node == nullptr || VerifyNode(node, node, /*full_validation=*/true));
+#else // EXTRA_CORD_VALIDATION
+ assert(node == nullptr || VerifyNode(node, node, /*full_validation=*/false));
+#endif // EXTRA_CORD_VALIDATION
+ static_cast<void>(&VerifyNode);
+
+ return node;
+}
+
+// Return the depth of a node
+static int Depth(const CordRep* rep) {
+ if (rep->IsConcat()) {
+ return rep->concat()->depth();
+ } else {
+ return 0;
+ }
+}
+
+static void SetConcatChildren(CordRepConcat* concat, CordRep* left,
+ CordRep* right) {
+ concat->left = left;
+ concat->right = right;
+
+ concat->length = left->length + right->length;
+ concat->set_depth(1 + std::max(Depth(left), Depth(right)));
+}
+
+// Create a concatenation of the specified nodes.
+// Does not change the refcounts of "left" and "right".
+// The returned node has a refcount of 1.
+static CordRep* RawConcat(CordRep* left, CordRep* right) {
+ // Avoid making degenerate concat nodes (one child is empty)
+ if (left == nullptr) return right;
+ if (right == nullptr) return left;
+ if (left->length == 0) {
+ CordRep::Unref(left);
+ return right;
+ }
+ if (right->length == 0) {
+ CordRep::Unref(right);
+ return left;
+ }
+
+ CordRepConcat* rep = new CordRepConcat();
+ rep->tag = cord_internal::CONCAT;
+ SetConcatChildren(rep, left, right);
+
+ return rep;
+}
+
+static CordRep* Concat(CordRep* left, CordRep* right) {
+ CordRep* rep = RawConcat(left, right);
+ if (rep != nullptr && !IsRootBalanced(rep)) {
+ rep = Rebalance(rep);
+ }
+ return VerifyTree(rep);
+}
+
+// Make a balanced tree out of an array of leaf nodes.
+static CordRep* MakeBalancedTree(CordRep** reps, size_t n) {
+ // Make repeated passes over the array, merging adjacent pairs
+ // until we are left with just a single node.
+ while (n > 1) {
+ size_t dst = 0;
+ for (size_t src = 0; src < n; src += 2) {
+ if (src + 1 < n) {
+ reps[dst] = Concat(reps[src], reps[src + 1]);
+ } else {
+ reps[dst] = reps[src];
+ }
+ dst++;
+ }
+ n = dst;
+ }
+
+ return reps[0];
+}
+
+static CordRepFlat* CreateFlat(const char* data, size_t length,
+ size_t alloc_hint) {
+ CordRepFlat* flat = CordRepFlat::New(length + alloc_hint);
+ flat->length = length;
+ memcpy(flat->Data(), data, length);
+ return flat;
+}
+
+// Creates a new flat or Btree out of the specified array.
+// The returned node has a refcount of 1.
+static CordRep* NewBtree(const char* data, size_t length, size_t alloc_hint) {
+ if (length <= kMaxFlatLength) {
+ return CreateFlat(data, length, alloc_hint);
+ }
+ CordRepFlat* flat = CreateFlat(data, kMaxFlatLength, 0);
+ data += kMaxFlatLength;
+ length -= kMaxFlatLength;
+ auto* root = CordRepBtree::Create(flat);
+ return CordRepBtree::Append(root, {data, length}, alloc_hint);
+}
+
+// Create a new tree out of the specified array.
+// The returned node has a refcount of 1.
+static CordRep* NewTree(const char* data, size_t length, size_t alloc_hint) {
+ if (length == 0) return nullptr;
+ if (btree_enabled()) {
+ return NewBtree(data, length, alloc_hint);
+ }
+ y_absl::FixedArray<CordRep*> reps((length - 1) / kMaxFlatLength + 1);
+ size_t n = 0;
+ do {
+ const size_t len = std::min(length, kMaxFlatLength);
+ CordRepFlat* rep = CordRepFlat::New(len + alloc_hint);
+ rep->length = len;
+ memcpy(rep->Data(), data, len);
+ reps[n++] = VerifyTree(rep);
+ data += len;
+ length -= len;
+ } while (length != 0);
+ return MakeBalancedTree(reps.data(), n);
+}
+
+namespace cord_internal {
+
+void InitializeCordRepExternal(y_absl::string_view data, CordRepExternal* rep) {
+ assert(!data.empty());
+ rep->length = data.size();
+ rep->tag = EXTERNAL;
+ rep->base = data.data();
+ VerifyTree(rep);
+}
+
+} // namespace cord_internal
+
+static CordRep* NewSubstring(CordRep* child, size_t offset, size_t length) {
+ // Never create empty substring nodes
+ if (length == 0) {
+ CordRep::Unref(child);
+ return nullptr;
+ } else {
+ CordRepSubstring* rep = new CordRepSubstring();
+ assert((offset + length) <= child->length);
+ rep->length = length;
+ rep->tag = cord_internal::SUBSTRING;
+ rep->start = offset;
+ rep->child = child;
+ return VerifyTree(rep);
+ }
+}
+
+// Creates a CordRep from the provided string. If the string is large enough,
+// and not wasteful, we move the string into an external cord rep, preserving
+// the already allocated string contents.
+// Requires the provided string length to be larger than `kMaxInline`.
+static CordRep* CordRepFromString(TString&& src) {
+ assert(src.length() > cord_internal::kMaxInline);
+ if (
+ // String is short: copy data to avoid external block overhead.
+ src.size() <= kMaxBytesToCopy ||
+ // String is wasteful: copy data to avoid pinning too much unused memory.
+ src.size() < src.capacity() / 2
+ ) {
+ return NewTree(src.data(), src.size(), 0);
+ }
+
+ struct StringReleaser {
+ void operator()(y_absl::string_view /* data */) {}
+ TString data;
+ };
+ const y_absl::string_view original_data = src;
+ auto* rep =
+ static_cast<::y_absl::cord_internal::CordRepExternalImpl<StringReleaser>*>(
+ y_absl::cord_internal::NewExternalRep(original_data,
+ StringReleaser{std::move(src)}));
+ // Moving src may have invalidated its data pointer, so adjust it.
+ rep->base = rep->template get<0>().data.data();
+ return rep;
+}
+
+// --------------------------------------------------------------------
+// Cord::InlineRep functions
+
+constexpr unsigned char Cord::InlineRep::kMaxInline;
+
+inline void Cord::InlineRep::set_data(const char* data, size_t n,
+ bool nullify_tail) {
+ static_assert(kMaxInline == 15, "set_data is hard-coded for a length of 15");
+
+ cord_internal::SmallMemmove(data_.as_chars(), data, n, nullify_tail);
+ set_inline_size(n);
+}
+
+inline char* Cord::InlineRep::set_data(size_t n) {
+ assert(n <= kMaxInline);
+ ResetToEmpty();
+ set_inline_size(n);
+ return data_.as_chars();
+}
+
+inline void Cord::InlineRep::reduce_size(size_t n) {
+ size_t tag = inline_size();
+ assert(tag <= kMaxInline);
+ assert(tag >= n);
+ tag -= n;
+ memset(data_.as_chars() + tag, 0, n);
+ set_inline_size(static_cast<char>(tag));
+}
+
+inline void Cord::InlineRep::remove_prefix(size_t n) {
+ cord_internal::SmallMemmove(data_.as_chars(), data_.as_chars() + n,
+ inline_size() - n);
+ reduce_size(n);
+}
+
+// Returns `rep` converted into a CordRepBtree.
+// Directly returns `rep` if `rep` is already a CordRepBtree.
+static CordRepBtree* ForceBtree(CordRep* rep) {
+ return rep->IsBtree() ? rep->btree() : CordRepBtree::Create(rep);
+}
+
+void Cord::InlineRep::AppendTreeToInlined(CordRep* tree,
+ MethodIdentifier method) {
+ assert(!is_tree());
+ if (!data_.is_empty()) {
+ CordRepFlat* flat = MakeFlatWithExtraCapacity(0);
+ if (btree_enabled()) {
+ tree = CordRepBtree::Append(CordRepBtree::Create(flat), tree);
+ } else {
+ tree = Concat(flat, tree);
+ }
+ }
+ EmplaceTree(tree, method);
+}
+
+void Cord::InlineRep::AppendTreeToTree(CordRep* tree, MethodIdentifier method) {
+ assert(is_tree());
+ const CordzUpdateScope scope(data_.cordz_info(), method);
+ if (btree_enabled()) {
+ tree = CordRepBtree::Append(ForceBtree(data_.as_tree()), tree);
+ } else {
+ tree = Concat(data_.as_tree(), tree);
+ }
+ SetTree(tree, scope);
+}
+
+void Cord::InlineRep::AppendTree(CordRep* tree, MethodIdentifier method) {
+ if (tree == nullptr) return;
+ if (data_.is_tree()) {
+ AppendTreeToTree(tree, method);
+ } else {
+ AppendTreeToInlined(tree, method);
+ }
+}
+
+void Cord::InlineRep::PrependTreeToInlined(CordRep* tree,
+ MethodIdentifier method) {
+ assert(!is_tree());
+ if (!data_.is_empty()) {
+ CordRepFlat* flat = MakeFlatWithExtraCapacity(0);
+ if (btree_enabled()) {
+ tree = CordRepBtree::Prepend(CordRepBtree::Create(flat), tree);
+ } else {
+ tree = Concat(tree, flat);
+ }
+ }
+ EmplaceTree(tree, method);
+}
+
+void Cord::InlineRep::PrependTreeToTree(CordRep* tree,
+ MethodIdentifier method) {
+ assert(is_tree());
+ const CordzUpdateScope scope(data_.cordz_info(), method);
+ if (btree_enabled()) {
+ tree = CordRepBtree::Prepend(ForceBtree(data_.as_tree()), tree);
+ } else {
+ tree = Concat(tree, data_.as_tree());
+ }
+ SetTree(tree, scope);
+}
+
+void Cord::InlineRep::PrependTree(CordRep* tree, MethodIdentifier method) {
+ assert(tree != nullptr);
+ if (data_.is_tree()) {
+ PrependTreeToTree(tree, method);
+ } else {
+ PrependTreeToInlined(tree, method);
+ }
+}
+
+// Searches for a non-full flat node at the rightmost leaf of the tree. If a
+// suitable leaf is found, the function will update the length field for all
+// nodes to account for the size increase. The append region address will be
+// written to region and the actual size increase will be written to size.
+static inline bool PrepareAppendRegion(CordRep* root, char** region,
+ size_t* size, size_t max_length) {
+ if (root->IsBtree() && root->refcount.IsMutable()) {
+ Span<char> span = root->btree()->GetAppendBuffer(max_length);
+ if (!span.empty()) {
+ *region = span.data();
+ *size = span.size();
+ return true;
+ }
+ }
+
+ // Search down the right-hand path for a non-full FLAT node.
+ CordRep* dst = root;
+ while (dst->IsConcat() && dst->refcount.IsMutable()) {
+ dst = dst->concat()->right;
+ }
+
+ if (!dst->IsFlat() || !dst->refcount.IsMutable()) {
+ *region = nullptr;
+ *size = 0;
+ return false;
+ }
+
+ const size_t in_use = dst->length;
+ const size_t capacity = dst->flat()->Capacity();
+ if (in_use == capacity) {
+ *region = nullptr;
+ *size = 0;
+ return false;
+ }
+
+ size_t size_increase = std::min(capacity - in_use, max_length);
+
+ // We need to update the length fields for all nodes, including the leaf node.
+ for (CordRep* rep = root; rep != dst; rep = rep->concat()->right) {
+ rep->length += size_increase;
+ }
+ dst->length += size_increase;
+
+ *region = dst->flat()->Data() + in_use;
+ *size = size_increase;
+ return true;
+}
+
+template <bool has_length>
+void Cord::InlineRep::GetAppendRegion(char** region, size_t* size,
+ size_t length) {
+ auto constexpr method = CordzUpdateTracker::kGetAppendRegion;
+
+ CordRep* root = tree();
+ size_t sz = root ? root->length : inline_size();
+ if (root == nullptr) {
+ size_t available = kMaxInline - sz;
+ if (available >= (has_length ? length : 1)) {
+ *region = data_.as_chars() + sz;
+ *size = has_length ? length : available;
+ set_inline_size(has_length ? sz + length : kMaxInline);
+ return;
+ }
+ }
+
+ size_t extra = has_length ? length : (std::max)(sz, kMinFlatLength);
+ CordRep* rep = root ? root : MakeFlatWithExtraCapacity(extra);
+ CordzUpdateScope scope(root ? data_.cordz_info() : nullptr, method);
+ if (PrepareAppendRegion(rep, region, size, length)) {
+ CommitTree(root, rep, scope, method);
+ return;
+ }
+
+ // Allocate new node.
+ CordRepFlat* new_node = CordRepFlat::New(extra);
+ new_node->length = std::min(new_node->Capacity(), length);
+ *region = new_node->Data();
+ *size = new_node->length;
+
+ if (btree_enabled()) {
+ rep = CordRepBtree::Append(ForceBtree(rep), new_node);
+ } else {
+ rep = Concat(rep, new_node);
+ }
+ CommitTree(root, rep, scope, method);
+}
+
+// Computes the memory side of the provided edge which must be a valid data edge
+// for a btrtee, i.e., a FLAT, EXTERNAL or SUBSTRING of a FLAT or EXTERNAL node.
+static bool RepMemoryUsageDataEdge(const CordRep* rep,
+ size_t* total_mem_usage) {
+ size_t maybe_sub_size = 0;
+ if (ABSL_PREDICT_FALSE(rep->IsSubstring())) {
+ maybe_sub_size = sizeof(cord_internal::CordRepSubstring);
+ rep = rep->substring()->child;
+ }
+ if (rep->IsFlat()) {
+ *total_mem_usage += maybe_sub_size + rep->flat()->AllocatedSize();
+ return true;
+ }
+ if (rep->IsExternal()) {
+ // We don't know anything about the embedded / bound data, but we can safely
+ // assume it is 'at least' a word / pointer to data. In the future we may
+ // choose to use the 'data' byte as a tag to identify the types of some
+ // well-known externals, such as a TString instance.
+ *total_mem_usage += maybe_sub_size +
+ sizeof(cord_internal::CordRepExternalImpl<intptr_t>) +
+ rep->length;
+ return true;
+ }
+ return false;
+}
+
+// If the rep is a leaf, this will increment the value at total_mem_usage and
+// will return true.
+static bool RepMemoryUsageLeaf(const CordRep* rep, size_t* total_mem_usage) {
+ if (rep->IsFlat()) {
+ *total_mem_usage += rep->flat()->AllocatedSize();
+ return true;
+ }
+ if (rep->IsExternal()) {
+ // We don't know anything about the embedded / bound data, but we can safely
+ // assume it is 'at least' a word / pointer to data. In the future we may
+ // choose to use the 'data' byte as a tag to identify the types of some
+ // well-known externals, such as a TString instance.
+ *total_mem_usage +=
+ sizeof(cord_internal::CordRepExternalImpl<intptr_t>) + rep->length;
+ return true;
+ }
+ return false;
+}
+
+void Cord::InlineRep::AssignSlow(const Cord::InlineRep& src) {
+ assert(&src != this);
+ assert(is_tree() || src.is_tree());
+ auto constexpr method = CordzUpdateTracker::kAssignCord;
+ if (ABSL_PREDICT_TRUE(!is_tree())) {
+ EmplaceTree(CordRep::Ref(src.as_tree()), src.data_, method);
+ return;
+ }
+
+ CordRep* tree = as_tree();
+ if (CordRep* src_tree = src.tree()) {
+ // Leave any existing `cordz_info` in place, and let MaybeTrackCord()
+ // decide if this cord should be (or remains to be) sampled or not.
+ data_.set_tree(CordRep::Ref(src_tree));
+ CordzInfo::MaybeTrackCord(data_, src.data_, method);
+ } else {
+ CordzInfo::MaybeUntrackCord(data_.cordz_info());
+ data_ = src.data_;
+ }
+ CordRep::Unref(tree);
+}
+
+void Cord::InlineRep::UnrefTree() {
+ if (is_tree()) {
+ CordzInfo::MaybeUntrackCord(data_.cordz_info());
+ CordRep::Unref(tree());
+ }
+}
+
+// --------------------------------------------------------------------
+// Constructors and destructors
+
+Cord::Cord(y_absl::string_view src, MethodIdentifier method)
+ : contents_(InlineData::kDefaultInit) {
+ const size_t n = src.size();
+ if (n <= InlineRep::kMaxInline) {
+ contents_.set_data(src.data(), n, true);
+ } else {
+ CordRep* rep = NewTree(src.data(), n, 0);
+ contents_.EmplaceTree(rep, method);
+ }
+}
+
+template <typename T, Cord::EnableIfString<T>>
+Cord::Cord(T&& src) : contents_(InlineData::kDefaultInit) {
+ if (src.size() <= InlineRep::kMaxInline) {
+ contents_.set_data(src.data(), src.size(), true);
+ } else {
+ CordRep* rep = CordRepFromString(std::forward<T>(src));
+ contents_.EmplaceTree(rep, CordzUpdateTracker::kConstructorString);
+ }
+}
+
+template Cord::Cord(TString&& src);
+
+// The destruction code is separate so that the compiler can determine
+// that it does not need to call the destructor on a moved-from Cord.
+void Cord::DestroyCordSlow() {
+ assert(contents_.is_tree());
+ CordzInfo::MaybeUntrackCord(contents_.cordz_info());
+ CordRep::Unref(VerifyTree(contents_.as_tree()));
+}
+
+// --------------------------------------------------------------------
+// Mutators
+
+void Cord::Clear() {
+ if (CordRep* tree = contents_.clear()) {
+ CordRep::Unref(tree);
+ }
+}
+
+Cord& Cord::AssignLargeString(TString&& src) {
+ auto constexpr method = CordzUpdateTracker::kAssignString;
+ assert(src.size() > kMaxBytesToCopy);
+ CordRep* rep = CordRepFromString(std::move(src));
+ if (CordRep* tree = contents_.tree()) {
+ CordzUpdateScope scope(contents_.cordz_info(), method);
+ contents_.SetTree(rep, scope);
+ CordRep::Unref(tree);
+ } else {
+ contents_.EmplaceTree(rep, method);
+ }
+ return *this;
+}
+
+Cord& Cord::operator=(y_absl::string_view src) {
+ auto constexpr method = CordzUpdateTracker::kAssignString;
+ const char* data = src.data();
+ size_t length = src.size();
+ CordRep* tree = contents_.tree();
+ if (length <= InlineRep::kMaxInline) {
+ // Embed into this->contents_, which is somewhat subtle:
+ // - MaybeUntrackCord must be called before Unref(tree).
+ // - MaybeUntrackCord must be called before set_data() clobbers cordz_info.
+ // - set_data() must be called before Unref(tree) as it may reference tree.
+ if (tree != nullptr) CordzInfo::MaybeUntrackCord(contents_.cordz_info());
+ contents_.set_data(data, length, true);
+ if (tree != nullptr) CordRep::Unref(tree);
+ return *this;
+ }
+ if (tree != nullptr) {
+ CordzUpdateScope scope(contents_.cordz_info(), method);
+ if (tree->IsFlat() && tree->flat()->Capacity() >= length &&
+ tree->refcount.IsMutable()) {
+ // Copy in place if the existing FLAT node is reusable.
+ memmove(tree->flat()->Data(), data, length);
+ tree->length = length;
+ VerifyTree(tree);
+ return *this;
+ }
+ contents_.SetTree(NewTree(data, length, 0), scope);
+ CordRep::Unref(tree);
+ } else {
+ contents_.EmplaceTree(NewTree(data, length, 0), method);
+ }
+ return *this;
+}
+
+// TODO(sanjay): Move to Cord::InlineRep section of file. For now,
+// we keep it here to make diffs easier.
+void Cord::InlineRep::AppendArray(y_absl::string_view src,
+ MethodIdentifier method) {
+ if (src.empty()) return; // memcpy(_, nullptr, 0) is undefined.
+
+ size_t appended = 0;
+ CordRep* rep = tree();
+ const CordRep* const root = rep;
+ CordzUpdateScope scope(root ? cordz_info() : nullptr, method);
+ if (root != nullptr) {
+ char* region;
+ if (PrepareAppendRegion(rep, &region, &appended, src.size())) {
+ memcpy(region, src.data(), appended);
+ }
+ } else {
+ // Try to fit in the inline buffer if possible.
+ size_t inline_length = inline_size();
+ if (src.size() <= kMaxInline - inline_length) {
+ // Append new data to embedded array
+ memcpy(data_.as_chars() + inline_length, src.data(), src.size());
+ set_inline_size(inline_length + src.size());
+ return;
+ }
+
+ // Allocate flat to be a perfect fit on first append exceeding inlined size.
+ // Subsequent growth will use amortized growth until we reach maximum flat
+ // size.
+ rep = CordRepFlat::New(inline_length + src.size());
+ appended = std::min(src.size(), rep->flat()->Capacity() - inline_length);
+ memcpy(rep->flat()->Data(), data_.as_chars(), inline_length);
+ memcpy(rep->flat()->Data() + inline_length, src.data(), appended);
+ rep->length = inline_length + appended;
+ }
+
+ src.remove_prefix(appended);
+ if (src.empty()) {
+ CommitTree(root, rep, scope, method);
+ return;
+ }
+
+ if (btree_enabled()) {
+ // TODO(b/192061034): keep legacy 10% growth rate: consider other rates.
+ rep = ForceBtree(rep);
+ const size_t min_growth = std::max<size_t>(rep->length / 10, src.size());
+ rep = CordRepBtree::Append(rep->btree(), src, min_growth - src.size());
+ } else {
+ // Use new block(s) for any remaining bytes that were not handled above.
+ // Alloc extra memory only if the right child of the root of the new tree
+ // is going to be a FLAT node, which will permit further inplace appends.
+ size_t length = src.size();
+ if (src.size() < kMaxFlatLength) {
+ // The new length is either
+ // - old size + 10%
+ // - old_size + src.size()
+ // This will cause a reasonable conservative step-up in size that is
+ // still large enough to avoid excessive amounts of small fragments
+ // being added.
+ length = std::max<size_t>(rep->length / 10, src.size());
+ }
+ rep = Concat(rep, NewTree(src.data(), src.size(), length - src.size()));
+ }
+ CommitTree(root, rep, scope, method);
+}
+
+inline CordRep* Cord::TakeRep() const& {
+ return CordRep::Ref(contents_.tree());
+}
+
+inline CordRep* Cord::TakeRep() && {
+ CordRep* rep = contents_.tree();
+ contents_.clear();
+ return rep;
+}
+
+template <typename C>
+inline void Cord::AppendImpl(C&& src) {
+ auto constexpr method = CordzUpdateTracker::kAppendCord;
+ if (empty()) {
+ // Since destination is empty, we can avoid allocating a node,
+ if (src.contents_.is_tree()) {
+ // by taking the tree directly
+ CordRep* rep = std::forward<C>(src).TakeRep();
+ contents_.EmplaceTree(rep, method);
+ } else {
+ // or copying over inline data
+ contents_.data_ = src.contents_.data_;
+ }
+ return;
+ }
+
+ // For short cords, it is faster to copy data if there is room in dst.
+ const size_t src_size = src.contents_.size();
+ if (src_size <= kMaxBytesToCopy) {
+ CordRep* src_tree = src.contents_.tree();
+ if (src_tree == nullptr) {
+ // src has embedded data.
+ contents_.AppendArray({src.contents_.data(), src_size}, method);
+ return;
+ }
+ if (src_tree->IsFlat()) {
+ // src tree just has one flat node.
+ contents_.AppendArray({src_tree->flat()->Data(), src_size}, method);
+ return;
+ }
+ if (&src == this) {
+ // ChunkIterator below assumes that src is not modified during traversal.
+ Append(Cord(src));
+ return;
+ }
+ // TODO(mec): Should we only do this if "dst" has space?
+ for (y_absl::string_view chunk : src.Chunks()) {
+ Append(chunk);
+ }
+ return;
+ }
+
+ // Guaranteed to be a tree (kMaxBytesToCopy > kInlinedSize)
+ CordRep* rep = std::forward<C>(src).TakeRep();
+ contents_.AppendTree(rep, CordzUpdateTracker::kAppendCord);
+}
+
+void Cord::Append(const Cord& src) {
+ AppendImpl(src);
+}
+
+void Cord::Append(Cord&& src) {
+ AppendImpl(std::move(src));
+}
+
+template <typename T, Cord::EnableIfString<T>>
+void Cord::Append(T&& src) {
+ if (src.size() <= kMaxBytesToCopy) {
+ Append(y_absl::string_view(src));
+ } else {
+ CordRep* rep = CordRepFromString(std::forward<T>(src));
+ contents_.AppendTree(rep, CordzUpdateTracker::kAppendString);
+ }
+}
+
+template void Cord::Append(TString&& src);
+
+void Cord::Prepend(const Cord& src) {
+ CordRep* src_tree = src.contents_.tree();
+ if (src_tree != nullptr) {
+ CordRep::Ref(src_tree);
+ contents_.PrependTree(src_tree, CordzUpdateTracker::kPrependCord);
+ return;
+ }
+
+ // `src` cord is inlined.
+ y_absl::string_view src_contents(src.contents_.data(), src.contents_.size());
+ return Prepend(src_contents);
+}
+
+void Cord::PrependArray(y_absl::string_view src, MethodIdentifier method) {
+ if (src.empty()) return; // memcpy(_, nullptr, 0) is undefined.
+ if (!contents_.is_tree()) {
+ size_t cur_size = contents_.inline_size();
+ if (cur_size + src.size() <= InlineRep::kMaxInline) {
+ // Use embedded storage.
+ char data[InlineRep::kMaxInline + 1] = {0};
+ memcpy(data, src.data(), src.size());
+ memcpy(data + src.size(), contents_.data(), cur_size);
+ memcpy(contents_.data_.as_chars(), data, InlineRep::kMaxInline + 1);
+ contents_.set_inline_size(cur_size + src.size());
+ return;
+ }
+ }
+ CordRep* rep = NewTree(src.data(), src.size(), 0);
+ contents_.PrependTree(rep, method);
+}
+
+template <typename T, Cord::EnableIfString<T>>
+inline void Cord::Prepend(T&& src) {
+ if (src.size() <= kMaxBytesToCopy) {
+ Prepend(y_absl::string_view(src));
+ } else {
+ CordRep* rep = CordRepFromString(std::forward<T>(src));
+ contents_.PrependTree(rep, CordzUpdateTracker::kPrependString);
+ }
+}
+
+template void Cord::Prepend(TString&& src);
+
+static CordRep* RemovePrefixFrom(CordRep* node, size_t n) {
+ if (n >= node->length) return nullptr;
+ if (n == 0) return CordRep::Ref(node);
+ y_absl::InlinedVector<CordRep*, kInlinedVectorSize> rhs_stack;
+
+ while (node->IsConcat()) {
+ assert(n <= node->length);
+ if (n < node->concat()->left->length) {
+ // Push right to stack, descend left.
+ rhs_stack.push_back(node->concat()->right);
+ node = node->concat()->left;
+ } else {
+ // Drop left, descend right.
+ n -= node->concat()->left->length;
+ node = node->concat()->right;
+ }
+ }
+ assert(n <= node->length);
+
+ if (n == 0) {
+ CordRep::Ref(node);
+ } else {
+ size_t start = n;
+ size_t len = node->length - n;
+ if (node->IsSubstring()) {
+ // Consider in-place update of node, similar to in RemoveSuffixFrom().
+ start += node->substring()->start;
+ node = node->substring()->child;
+ }
+ node = NewSubstring(CordRep::Ref(node), start, len);
+ }
+ while (!rhs_stack.empty()) {
+ node = Concat(node, CordRep::Ref(rhs_stack.back()));
+ rhs_stack.pop_back();
+ }
+ return node;
+}
+
+// RemoveSuffixFrom() is very similar to RemovePrefixFrom(), with the
+// exception that removing a suffix has an optimization where a node may be
+// edited in place iff that node and all its ancestors have a refcount of 1.
+static CordRep* RemoveSuffixFrom(CordRep* node, size_t n) {
+ if (n >= node->length) return nullptr;
+ if (n == 0) return CordRep::Ref(node);
+ y_absl::InlinedVector<CordRep*, kInlinedVectorSize> lhs_stack;
+ bool inplace_ok = node->refcount.IsMutable();
+
+ while (node->IsConcat()) {
+ assert(n <= node->length);
+ if (n < node->concat()->right->length) {
+ // Push left to stack, descend right.
+ lhs_stack.push_back(node->concat()->left);
+ node = node->concat()->right;
+ } else {
+ // Drop right, descend left.
+ n -= node->concat()->right->length;
+ node = node->concat()->left;
+ }
+ inplace_ok = inplace_ok && node->refcount.IsMutable();
+ }
+ assert(n <= node->length);
+
+ if (n == 0) {
+ CordRep::Ref(node);
+ } else if (inplace_ok && !node->IsExternal()) {
+ // Consider making a new buffer if the current node capacity is much
+ // larger than the new length.
+ CordRep::Ref(node);
+ node->length -= n;
+ } else {
+ size_t start = 0;
+ size_t len = node->length - n;
+ if (node->IsSubstring()) {
+ start = node->substring()->start;
+ node = node->substring()->child;
+ }
+ node = NewSubstring(CordRep::Ref(node), start, len);
+ }
+ while (!lhs_stack.empty()) {
+ node = Concat(CordRep::Ref(lhs_stack.back()), node);
+ lhs_stack.pop_back();
+ }
+ return node;
+}
+
+void Cord::RemovePrefix(size_t n) {
+ ABSL_INTERNAL_CHECK(n <= size(),
+ y_absl::StrCat("Requested prefix size ", n,
+ " exceeds Cord's size ", size()));
+ CordRep* tree = contents_.tree();
+ if (tree == nullptr) {
+ contents_.remove_prefix(n);
+ } else {
+ auto constexpr method = CordzUpdateTracker::kRemovePrefix;
+ CordzUpdateScope scope(contents_.cordz_info(), method);
+ if (tree->IsBtree()) {
+ CordRep* old = tree;
+ tree = tree->btree()->SubTree(n, tree->length - n);
+ CordRep::Unref(old);
+ } else {
+ CordRep* newrep = RemovePrefixFrom(tree, n);
+ CordRep::Unref(tree);
+ tree = VerifyTree(newrep);
+ }
+ contents_.SetTreeOrEmpty(tree, scope);
+ }
+}
+
+void Cord::RemoveSuffix(size_t n) {
+ ABSL_INTERNAL_CHECK(n <= size(),
+ y_absl::StrCat("Requested suffix size ", n,
+ " exceeds Cord's size ", size()));
+ CordRep* tree = contents_.tree();
+ if (tree == nullptr) {
+ contents_.reduce_size(n);
+ } else {
+ auto constexpr method = CordzUpdateTracker::kRemoveSuffix;
+ CordzUpdateScope scope(contents_.cordz_info(), method);
+ if (tree->IsBtree()) {
+ tree = CordRepBtree::RemoveSuffix(tree->btree(), n);
+ } else {
+ CordRep* newrep = RemoveSuffixFrom(tree, n);
+ CordRep::Unref(tree);
+ tree = VerifyTree(newrep);
+ }
+ contents_.SetTreeOrEmpty(tree, scope);
+ }
+}
+
+// Work item for NewSubRange().
+struct SubRange {
+ SubRange(CordRep* a_node, size_t a_pos, size_t a_n)
+ : node(a_node), pos(a_pos), n(a_n) {}
+ CordRep* node; // nullptr means concat last 2 results.
+ size_t pos;
+ size_t n;
+};
+
+static CordRep* NewSubRange(CordRep* node, size_t pos, size_t n) {
+ y_absl::InlinedVector<CordRep*, kInlinedVectorSize> results;
+ y_absl::InlinedVector<SubRange, kInlinedVectorSize> todo;
+ todo.push_back(SubRange(node, pos, n));
+ do {
+ const SubRange& sr = todo.back();
+ node = sr.node;
+ pos = sr.pos;
+ n = sr.n;
+ todo.pop_back();
+
+ if (node == nullptr) {
+ assert(results.size() >= 2);
+ CordRep* right = results.back();
+ results.pop_back();
+ CordRep* left = results.back();
+ results.pop_back();
+ results.push_back(Concat(left, right));
+ } else if (pos == 0 && n == node->length) {
+ results.push_back(CordRep::Ref(node));
+ } else if (!node->IsConcat()) {
+ if (node->IsSubstring()) {
+ pos += node->substring()->start;
+ node = node->substring()->child;
+ }
+ results.push_back(NewSubstring(CordRep::Ref(node), pos, n));
+ } else if (pos + n <= node->concat()->left->length) {
+ todo.push_back(SubRange(node->concat()->left, pos, n));
+ } else if (pos >= node->concat()->left->length) {
+ pos -= node->concat()->left->length;
+ todo.push_back(SubRange(node->concat()->right, pos, n));
+ } else {
+ size_t left_n = node->concat()->left->length - pos;
+ todo.push_back(SubRange(nullptr, 0, 0)); // Concat()
+ todo.push_back(SubRange(node->concat()->right, 0, n - left_n));
+ todo.push_back(SubRange(node->concat()->left, pos, left_n));
+ }
+ } while (!todo.empty());
+ assert(results.size() == 1);
+ return results[0];
+}
+
+Cord Cord::Subcord(size_t pos, size_t new_size) const {
+ Cord sub_cord;
+ size_t length = size();
+ if (pos > length) pos = length;
+ if (new_size > length - pos) new_size = length - pos;
+ if (new_size == 0) return sub_cord;
+
+ CordRep* tree = contents_.tree();
+ if (tree == nullptr) {
+ // sub_cord is newly constructed, no need to re-zero-out the tail of
+ // contents_ memory.
+ sub_cord.contents_.set_data(contents_.data() + pos, new_size, false);
+ return sub_cord;
+ }
+
+ if (new_size <= InlineRep::kMaxInline) {
+ char* dest = sub_cord.contents_.data_.as_chars();
+ Cord::ChunkIterator it = chunk_begin();
+ it.AdvanceBytes(pos);
+ size_t remaining_size = new_size;
+ while (remaining_size > it->size()) {
+ cord_internal::SmallMemmove(dest, it->data(), it->size());
+ remaining_size -= it->size();
+ dest += it->size();
+ ++it;
+ }
+ cord_internal::SmallMemmove(dest, it->data(), remaining_size);
+ sub_cord.contents_.set_inline_size(new_size);
+ return sub_cord;
+ }
+
+ if (tree->IsBtree()) {
+ tree = tree->btree()->SubTree(pos, new_size);
+ } else {
+ tree = NewSubRange(tree, pos, new_size);
+ }
+ sub_cord.contents_.EmplaceTree(tree, contents_.data_,
+ CordzUpdateTracker::kSubCord);
+ return sub_cord;
+}
+
+// --------------------------------------------------------------------
+// Balancing
+
+class CordForest {
+ public:
+ explicit CordForest(size_t length)
+ : root_length_(length), trees_(kMinLengthSize, nullptr) {}
+
+ void Build(CordRep* cord_root) {
+ std::vector<CordRep*> pending = {cord_root};
+
+ while (!pending.empty()) {
+ CordRep* node = pending.back();
+ pending.pop_back();
+ CheckNode(node);
+ if (ABSL_PREDICT_FALSE(!node->IsConcat())) {
+ AddNode(node);
+ continue;
+ }
+
+ CordRepConcat* concat_node = node->concat();
+ if (concat_node->depth() >= kMinLengthSize ||
+ concat_node->length < min_length[concat_node->depth()]) {
+ pending.push_back(concat_node->right);
+ pending.push_back(concat_node->left);
+
+ if (concat_node->refcount.IsOne()) {
+ concat_node->left = concat_freelist_;
+ concat_freelist_ = concat_node;
+ } else {
+ CordRep::Ref(concat_node->right);
+ CordRep::Ref(concat_node->left);
+ CordRep::Unref(concat_node);
+ }
+ } else {
+ AddNode(node);
+ }
+ }
+ }
+
+ CordRep* ConcatNodes() {
+ CordRep* sum = nullptr;
+ for (auto* node : trees_) {
+ if (node == nullptr) continue;
+
+ sum = PrependNode(node, sum);
+ root_length_ -= node->length;
+ if (root_length_ == 0) break;
+ }
+ ABSL_INTERNAL_CHECK(sum != nullptr, "Failed to locate sum node");
+ return VerifyTree(sum);
+ }
+
+ private:
+ CordRep* AppendNode(CordRep* node, CordRep* sum) {
+ return (sum == nullptr) ? node : MakeConcat(sum, node);
+ }
+
+ CordRep* PrependNode(CordRep* node, CordRep* sum) {
+ return (sum == nullptr) ? node : MakeConcat(node, sum);
+ }
+
+ void AddNode(CordRep* node) {
+ CordRep* sum = nullptr;
+
+ // Collect together everything with which we will merge with node
+ int i = 0;
+ for (; node->length > min_length[i + 1]; ++i) {
+ auto& tree_at_i = trees_[i];
+
+ if (tree_at_i == nullptr) continue;
+ sum = PrependNode(tree_at_i, sum);
+ tree_at_i = nullptr;
+ }
+
+ sum = AppendNode(node, sum);
+
+ // Insert sum into appropriate place in the forest
+ for (; sum->length >= min_length[i]; ++i) {
+ auto& tree_at_i = trees_[i];
+ if (tree_at_i == nullptr) continue;
+
+ sum = MakeConcat(tree_at_i, sum);
+ tree_at_i = nullptr;
+ }
+
+ // min_length[0] == 1, which means sum->length >= min_length[0]
+ assert(i > 0);
+ trees_[i - 1] = sum;
+ }
+
+ // Make concat node trying to resue existing CordRepConcat nodes we
+ // already collected in the concat_freelist_.
+ CordRep* MakeConcat(CordRep* left, CordRep* right) {
+ if (concat_freelist_ == nullptr) return RawConcat(left, right);
+
+ CordRepConcat* rep = concat_freelist_;
+ if (concat_freelist_->left == nullptr) {
+ concat_freelist_ = nullptr;
+ } else {
+ concat_freelist_ = concat_freelist_->left->concat();
+ }
+ SetConcatChildren(rep, left, right);
+
+ return rep;
+ }
+
+ static void CheckNode(CordRep* node) {
+ ABSL_INTERNAL_CHECK(node->length != 0u, "");
+ if (node->IsConcat()) {
+ ABSL_INTERNAL_CHECK(node->concat()->left != nullptr, "");
+ ABSL_INTERNAL_CHECK(node->concat()->right != nullptr, "");
+ ABSL_INTERNAL_CHECK(node->length == (node->concat()->left->length +
+ node->concat()->right->length),
+ "");
+ }
+ }
+
+ size_t root_length_;
+
+ // use an inlined vector instead of a flat array to get bounds checking
+ y_absl::InlinedVector<CordRep*, kInlinedVectorSize> trees_;
+
+ // List of concat nodes we can re-use for Cord balancing.
+ CordRepConcat* concat_freelist_ = nullptr;
+};
+
+static CordRep* Rebalance(CordRep* node) {
+ VerifyTree(node);
+ assert(node->IsConcat());
+
+ if (node->length == 0) {
+ return nullptr;
+ }
+
+ CordForest forest(node->length);
+ forest.Build(node);
+ return forest.ConcatNodes();
+}
+
+// --------------------------------------------------------------------
+// Comparators
+
+namespace {
+
+int ClampResult(int memcmp_res) {
+ return static_cast<int>(memcmp_res > 0) - static_cast<int>(memcmp_res < 0);
+}
+
+int CompareChunks(y_absl::string_view* lhs, y_absl::string_view* rhs,
+ size_t* size_to_compare) {
+ size_t compared_size = std::min(lhs->size(), rhs->size());
+ assert(*size_to_compare >= compared_size);
+ *size_to_compare -= compared_size;
+
+ int memcmp_res = ::memcmp(lhs->data(), rhs->data(), compared_size);
+ if (memcmp_res != 0) return memcmp_res;
+
+ lhs->remove_prefix(compared_size);
+ rhs->remove_prefix(compared_size);
+
+ return 0;
+}
+
+// This overload set computes comparison results from memcmp result. This
+// interface is used inside GenericCompare below. Differet implementations
+// are specialized for int and bool. For int we clamp result to {-1, 0, 1}
+// set. For bool we just interested in "value == 0".
+template <typename ResultType>
+ResultType ComputeCompareResult(int memcmp_res) {
+ return ClampResult(memcmp_res);
+}
+template <>
+bool ComputeCompareResult<bool>(int memcmp_res) {
+ return memcmp_res == 0;
+}
+
+} // namespace
+
+// Helper routine. Locates the first flat or external chunk of the Cord without
+// initializing the iterator, and returns a string_view referencing the data.
+inline y_absl::string_view Cord::InlineRep::FindFlatStartPiece() const {
+ if (!is_tree()) {
+ return y_absl::string_view(data_.as_chars(), data_.inline_size());
+ }
+
+ CordRep* node = tree();
+ if (node->IsFlat()) {
+ return y_absl::string_view(node->flat()->Data(), node->length);
+ }
+
+ if (node->IsExternal()) {
+ return y_absl::string_view(node->external()->base, node->length);
+ }
+
+ if (node->IsBtree()) {
+ CordRepBtree* tree = node->btree();
+ int height = tree->height();
+ while (--height >= 0) {
+ tree = tree->Edge(CordRepBtree::kFront)->btree();
+ }
+ return tree->Data(tree->begin());
+ }
+
+ // Walk down the left branches until we hit a non-CONCAT node.
+ while (node->IsConcat()) {
+ node = node->concat()->left;
+ }
+
+ // Get the child node if we encounter a SUBSTRING.
+ size_t offset = 0;
+ size_t length = node->length;
+ assert(length != 0);
+
+ if (node->IsSubstring()) {
+ offset = node->substring()->start;
+ node = node->substring()->child;
+ }
+
+ if (node->IsFlat()) {
+ return y_absl::string_view(node->flat()->Data() + offset, length);
+ }
+
+ assert(node->IsExternal() && "Expect FLAT or EXTERNAL node here");
+
+ return y_absl::string_view(node->external()->base + offset, length);
+}
+
+inline int Cord::CompareSlowPath(y_absl::string_view rhs, size_t compared_size,
+ size_t size_to_compare) const {
+ auto advance = [](Cord::ChunkIterator* it, y_absl::string_view* chunk) {
+ if (!chunk->empty()) return true;
+ ++*it;
+ if (it->bytes_remaining_ == 0) return false;
+ *chunk = **it;
+ return true;
+ };
+
+ Cord::ChunkIterator lhs_it = chunk_begin();
+
+ // compared_size is inside first chunk.
+ y_absl::string_view lhs_chunk =
+ (lhs_it.bytes_remaining_ != 0) ? *lhs_it : y_absl::string_view();
+ assert(compared_size <= lhs_chunk.size());
+ assert(compared_size <= rhs.size());
+ lhs_chunk.remove_prefix(compared_size);
+ rhs.remove_prefix(compared_size);
+ size_to_compare -= compared_size; // skip already compared size.
+
+ while (advance(&lhs_it, &lhs_chunk) && !rhs.empty()) {
+ int comparison_result = CompareChunks(&lhs_chunk, &rhs, &size_to_compare);
+ if (comparison_result != 0) return comparison_result;
+ if (size_to_compare == 0) return 0;
+ }
+
+ return static_cast<int>(rhs.empty()) - static_cast<int>(lhs_chunk.empty());
+}
+
+inline int Cord::CompareSlowPath(const Cord& rhs, size_t compared_size,
+ size_t size_to_compare) const {
+ auto advance = [](Cord::ChunkIterator* it, y_absl::string_view* chunk) {
+ if (!chunk->empty()) return true;
+ ++*it;
+ if (it->bytes_remaining_ == 0) return false;
+ *chunk = **it;
+ return true;
+ };
+
+ Cord::ChunkIterator lhs_it = chunk_begin();
+ Cord::ChunkIterator rhs_it = rhs.chunk_begin();
+
+ // compared_size is inside both first chunks.
+ y_absl::string_view lhs_chunk =
+ (lhs_it.bytes_remaining_ != 0) ? *lhs_it : y_absl::string_view();
+ y_absl::string_view rhs_chunk =
+ (rhs_it.bytes_remaining_ != 0) ? *rhs_it : y_absl::string_view();
+ assert(compared_size <= lhs_chunk.size());
+ assert(compared_size <= rhs_chunk.size());
+ lhs_chunk.remove_prefix(compared_size);
+ rhs_chunk.remove_prefix(compared_size);
+ size_to_compare -= compared_size; // skip already compared size.
+
+ while (advance(&lhs_it, &lhs_chunk) && advance(&rhs_it, &rhs_chunk)) {
+ int memcmp_res = CompareChunks(&lhs_chunk, &rhs_chunk, &size_to_compare);
+ if (memcmp_res != 0) return memcmp_res;
+ if (size_to_compare == 0) return 0;
+ }
+
+ return static_cast<int>(rhs_chunk.empty()) -
+ static_cast<int>(lhs_chunk.empty());
+}
+
+inline y_absl::string_view Cord::GetFirstChunk(const Cord& c) {
+ return c.contents_.FindFlatStartPiece();
+}
+inline y_absl::string_view Cord::GetFirstChunk(y_absl::string_view sv) {
+ return sv;
+}
+
+// Compares up to 'size_to_compare' bytes of 'lhs' with 'rhs'. It is assumed
+// that 'size_to_compare' is greater that size of smallest of first chunks.
+template <typename ResultType, typename RHS>
+ResultType GenericCompare(const Cord& lhs, const RHS& rhs,
+ size_t size_to_compare) {
+ y_absl::string_view lhs_chunk = Cord::GetFirstChunk(lhs);
+ y_absl::string_view rhs_chunk = Cord::GetFirstChunk(rhs);
+
+ size_t compared_size = std::min(lhs_chunk.size(), rhs_chunk.size());
+ assert(size_to_compare >= compared_size);
+ int memcmp_res = ::memcmp(lhs_chunk.data(), rhs_chunk.data(), compared_size);
+ if (compared_size == size_to_compare || memcmp_res != 0) {
+ return ComputeCompareResult<ResultType>(memcmp_res);
+ }
+
+ return ComputeCompareResult<ResultType>(
+ lhs.CompareSlowPath(rhs, compared_size, size_to_compare));
+}
+
+bool Cord::EqualsImpl(y_absl::string_view rhs, size_t size_to_compare) const {
+ return GenericCompare<bool>(*this, rhs, size_to_compare);
+}
+
+bool Cord::EqualsImpl(const Cord& rhs, size_t size_to_compare) const {
+ return GenericCompare<bool>(*this, rhs, size_to_compare);
+}
+
+template <typename RHS>
+inline int SharedCompareImpl(const Cord& lhs, const RHS& rhs) {
+ size_t lhs_size = lhs.size();
+ size_t rhs_size = rhs.size();
+ if (lhs_size == rhs_size) {
+ return GenericCompare<int>(lhs, rhs, lhs_size);
+ }
+ if (lhs_size < rhs_size) {
+ auto data_comp_res = GenericCompare<int>(lhs, rhs, lhs_size);
+ return data_comp_res == 0 ? -1 : data_comp_res;
+ }
+
+ auto data_comp_res = GenericCompare<int>(lhs, rhs, rhs_size);
+ return data_comp_res == 0 ? +1 : data_comp_res;
+}
+
+int Cord::Compare(y_absl::string_view rhs) const {
+ return SharedCompareImpl(*this, rhs);
+}
+
+int Cord::CompareImpl(const Cord& rhs) const {
+ return SharedCompareImpl(*this, rhs);
+}
+
+bool Cord::EndsWith(y_absl::string_view rhs) const {
+ size_t my_size = size();
+ size_t rhs_size = rhs.size();
+
+ if (my_size < rhs_size) return false;
+
+ Cord tmp(*this);
+ tmp.RemovePrefix(my_size - rhs_size);
+ return tmp.EqualsImpl(rhs, rhs_size);
+}
+
+bool Cord::EndsWith(const Cord& rhs) const {
+ size_t my_size = size();
+ size_t rhs_size = rhs.size();
+
+ if (my_size < rhs_size) return false;
+
+ Cord tmp(*this);
+ tmp.RemovePrefix(my_size - rhs_size);
+ return tmp.EqualsImpl(rhs, rhs_size);
+}
+
+// --------------------------------------------------------------------
+// Misc.
+
+Cord::operator TString() const {
+ TString s;
+ y_absl::CopyCordToString(*this, &s);
+ return s;
+}
+
+void CopyCordToString(const Cord& src, TString* dst) {
+ if (!src.contents_.is_tree()) {
+ src.contents_.CopyTo(dst);
+ } else {
+ y_absl::strings_internal::STLStringResizeUninitialized(dst, src.size());
+ src.CopyToArraySlowPath(&(*dst)[0]);
+ }
+}
+
+void Cord::CopyToArraySlowPath(char* dst) const {
+ assert(contents_.is_tree());
+ y_absl::string_view fragment;
+ if (GetFlatAux(contents_.tree(), &fragment)) {
+ memcpy(dst, fragment.data(), fragment.size());
+ return;
+ }
+ for (y_absl::string_view chunk : Chunks()) {
+ memcpy(dst, chunk.data(), chunk.size());
+ dst += chunk.size();
+ }
+}
+
+Cord::ChunkIterator& Cord::ChunkIterator::AdvanceStack() {
+ auto& stack_of_right_children = stack_of_right_children_;
+ if (stack_of_right_children.empty()) {
+ assert(!current_chunk_.empty()); // Called on invalid iterator.
+ // We have reached the end of the Cord.
+ return *this;
+ }
+
+ // Process the next node on the stack.
+ CordRep* node = stack_of_right_children.back();
+ stack_of_right_children.pop_back();
+
+ // Walk down the left branches until we hit a non-CONCAT node. Save the
+ // right children to the stack for subsequent traversal.
+ while (node->IsConcat()) {
+ stack_of_right_children.push_back(node->concat()->right);
+ node = node->concat()->left;
+ }
+
+ // Get the child node if we encounter a SUBSTRING.
+ size_t offset = 0;
+ size_t length = node->length;
+ if (node->IsSubstring()) {
+ offset = node->substring()->start;
+ node = node->substring()->child;
+ }
+
+ assert(node->IsExternal() || node->IsFlat());
+ assert(length != 0);
+ const char* data =
+ node->IsExternal() ? node->external()->base : node->flat()->Data();
+ current_chunk_ = y_absl::string_view(data + offset, length);
+ current_leaf_ = node;
+ return *this;
+}
+
+Cord Cord::ChunkIterator::AdvanceAndReadBytes(size_t n) {
+ ABSL_HARDENING_ASSERT(bytes_remaining_ >= n &&
+ "Attempted to iterate past `end()`");
+ Cord subcord;
+ auto constexpr method = CordzUpdateTracker::kCordReader;
+
+ if (n <= InlineRep::kMaxInline) {
+ // Range to read fits in inline data. Flatten it.
+ char* data = subcord.contents_.set_data(n);
+ while (n > current_chunk_.size()) {
+ memcpy(data, current_chunk_.data(), current_chunk_.size());
+ data += current_chunk_.size();
+ n -= current_chunk_.size();
+ ++*this;
+ }
+ memcpy(data, current_chunk_.data(), n);
+ if (n < current_chunk_.size()) {
+ RemoveChunkPrefix(n);
+ } else if (n > 0) {
+ ++*this;
+ }
+ return subcord;
+ }
+
+ if (btree_reader_) {
+ size_t chunk_size = current_chunk_.size();
+ if (n <= chunk_size && n <= kMaxBytesToCopy) {
+ subcord = Cord(current_chunk_.substr(0, n), method);
+ if (n < chunk_size) {
+ current_chunk_.remove_prefix(n);
+ } else {
+ current_chunk_ = btree_reader_.Next();
+ }
+ } else {
+ CordRep* rep;
+ current_chunk_ = btree_reader_.Read(n, chunk_size, rep);
+ subcord.contents_.EmplaceTree(rep, method);
+ }
+ bytes_remaining_ -= n;
+ return subcord;
+ }
+
+ auto& stack_of_right_children = stack_of_right_children_;
+ if (n < current_chunk_.size()) {
+ // Range to read is a proper subrange of the current chunk.
+ assert(current_leaf_ != nullptr);
+ CordRep* subnode = CordRep::Ref(current_leaf_);
+ const char* data = subnode->IsExternal() ? subnode->external()->base
+ : subnode->flat()->Data();
+ subnode = NewSubstring(subnode, current_chunk_.data() - data, n);
+ subcord.contents_.EmplaceTree(VerifyTree(subnode), method);
+ RemoveChunkPrefix(n);
+ return subcord;
+ }
+
+ // Range to read begins with a proper subrange of the current chunk.
+ assert(!current_chunk_.empty());
+ assert(current_leaf_ != nullptr);
+ CordRep* subnode = CordRep::Ref(current_leaf_);
+ if (current_chunk_.size() < subnode->length) {
+ const char* data = subnode->IsExternal() ? subnode->external()->base
+ : subnode->flat()->Data();
+ subnode = NewSubstring(subnode, current_chunk_.data() - data,
+ current_chunk_.size());
+ }
+ n -= current_chunk_.size();
+ bytes_remaining_ -= current_chunk_.size();
+
+ // Process the next node(s) on the stack, reading whole subtrees depending on
+ // their length and how many bytes we are advancing.
+ CordRep* node = nullptr;
+ while (!stack_of_right_children.empty()) {
+ node = stack_of_right_children.back();
+ stack_of_right_children.pop_back();
+ if (node->length > n) break;
+ // TODO(qrczak): This might unnecessarily recreate existing concat nodes.
+ // Avoiding that would need pretty complicated logic (instead of
+ // current_leaf, keep current_subtree_ which points to the highest node
+ // such that the current leaf can be found on the path of left children
+ // starting from current_subtree_; delay creating subnode while node is
+ // below current_subtree_; find the proper node along the path of left
+ // children starting from current_subtree_ if this loop exits while staying
+ // below current_subtree_; etc.; alternatively, push parents instead of
+ // right children on the stack).
+ subnode = Concat(subnode, CordRep::Ref(node));
+ n -= node->length;
+ bytes_remaining_ -= node->length;
+ node = nullptr;
+ }
+
+ if (node == nullptr) {
+ // We have reached the end of the Cord.
+ assert(bytes_remaining_ == 0);
+ subcord.contents_.EmplaceTree(VerifyTree(subnode), method);
+ return subcord;
+ }
+
+ // Walk down the appropriate branches until we hit a non-CONCAT node. Save the
+ // right children to the stack for subsequent traversal.
+ while (node->IsConcat()) {
+ if (node->concat()->left->length > n) {
+ // Push right, descend left.
+ stack_of_right_children.push_back(node->concat()->right);
+ node = node->concat()->left;
+ } else {
+ // Read left, descend right.
+ subnode = Concat(subnode, CordRep::Ref(node->concat()->left));
+ n -= node->concat()->left->length;
+ bytes_remaining_ -= node->concat()->left->length;
+ node = node->concat()->right;
+ }
+ }
+
+ // Get the child node if we encounter a SUBSTRING.
+ size_t offset = 0;
+ size_t length = node->length;
+ if (node->IsSubstring()) {
+ offset = node->substring()->start;
+ node = node->substring()->child;
+ }
+
+ // Range to read ends with a proper (possibly empty) subrange of the current
+ // chunk.
+ assert(node->IsExternal() || node->IsFlat());
+ assert(length > n);
+ if (n > 0) {
+ subnode = Concat(subnode, NewSubstring(CordRep::Ref(node), offset, n));
+ }
+ const char* data =
+ node->IsExternal() ? node->external()->base : node->flat()->Data();
+ current_chunk_ = y_absl::string_view(data + offset + n, length - n);
+ current_leaf_ = node;
+ bytes_remaining_ -= n;
+ subcord.contents_.EmplaceTree(VerifyTree(subnode), method);
+ return subcord;
+}
+
+void Cord::ChunkIterator::AdvanceBytesSlowPath(size_t n) {
+ assert(bytes_remaining_ >= n && "Attempted to iterate past `end()`");
+ assert(n >= current_chunk_.size()); // This should only be called when
+ // iterating to a new node.
+
+ n -= current_chunk_.size();
+ bytes_remaining_ -= current_chunk_.size();
+
+ if (stack_of_right_children_.empty()) {
+ // We have reached the end of the Cord.
+ assert(bytes_remaining_ == 0);
+ return;
+ }
+
+ // Process the next node(s) on the stack, skipping whole subtrees depending on
+ // their length and how many bytes we are advancing.
+ CordRep* node = nullptr;
+ auto& stack_of_right_children = stack_of_right_children_;
+ while (!stack_of_right_children.empty()) {
+ node = stack_of_right_children.back();
+ stack_of_right_children.pop_back();
+ if (node->length > n) break;
+ n -= node->length;
+ bytes_remaining_ -= node->length;
+ node = nullptr;
+ }
+
+ if (node == nullptr) {
+ // We have reached the end of the Cord.
+ assert(bytes_remaining_ == 0);
+ return;
+ }
+
+ // Walk down the appropriate branches until we hit a non-CONCAT node. Save the
+ // right children to the stack for subsequent traversal.
+ while (node->IsConcat()) {
+ if (node->concat()->left->length > n) {
+ // Push right, descend left.
+ stack_of_right_children.push_back(node->concat()->right);
+ node = node->concat()->left;
+ } else {
+ // Skip left, descend right.
+ n -= node->concat()->left->length;
+ bytes_remaining_ -= node->concat()->left->length;
+ node = node->concat()->right;
+ }
+ }
+
+ // Get the child node if we encounter a SUBSTRING.
+ size_t offset = 0;
+ size_t length = node->length;
+ if (node->IsSubstring()) {
+ offset = node->substring()->start;
+ node = node->substring()->child;
+ }
+
+ assert(node->IsExternal() || node->IsFlat());
+ assert(length > n);
+ const char* data =
+ node->IsExternal() ? node->external()->base : node->flat()->Data();
+ current_chunk_ = y_absl::string_view(data + offset + n, length - n);
+ current_leaf_ = node;
+ bytes_remaining_ -= n;
+}
+
+char Cord::operator[](size_t i) const {
+ ABSL_HARDENING_ASSERT(i < size());
+ size_t offset = i;
+ const CordRep* rep = contents_.tree();
+ if (rep == nullptr) {
+ return contents_.data()[i];
+ }
+ while (true) {
+ assert(rep != nullptr);
+ assert(offset < rep->length);
+ if (rep->IsFlat()) {
+ // Get the "i"th character directly from the flat array.
+ return rep->flat()->Data()[offset];
+ } else if (rep->IsBtree()) {
+ return rep->btree()->GetCharacter(offset);
+ } else if (rep->IsExternal()) {
+ // Get the "i"th character from the external array.
+ return rep->external()->base[offset];
+ } else if (rep->IsConcat()) {
+ // Recursively branch to the side of the concatenation that the "i"th
+ // character is on.
+ size_t left_length = rep->concat()->left->length;
+ if (offset < left_length) {
+ rep = rep->concat()->left;
+ } else {
+ offset -= left_length;
+ rep = rep->concat()->right;
+ }
+ } else {
+ // This must be a substring a node, so bypass it to get to the child.
+ assert(rep->IsSubstring());
+ offset += rep->substring()->start;
+ rep = rep->substring()->child;
+ }
+ }
+}
+
+y_absl::string_view Cord::FlattenSlowPath() {
+ assert(contents_.is_tree());
+ size_t total_size = size();
+ CordRep* new_rep;
+ char* new_buffer;
+
+ // Try to put the contents into a new flat rep. If they won't fit in the
+ // biggest possible flat node, use an external rep instead.
+ if (total_size <= kMaxFlatLength) {
+ new_rep = CordRepFlat::New(total_size);
+ new_rep->length = total_size;
+ new_buffer = new_rep->flat()->Data();
+ CopyToArraySlowPath(new_buffer);
+ } else {
+ new_buffer = std::allocator<char>().allocate(total_size);
+ CopyToArraySlowPath(new_buffer);
+ new_rep = y_absl::cord_internal::NewExternalRep(
+ y_absl::string_view(new_buffer, total_size), [](y_absl::string_view s) {
+ std::allocator<char>().deallocate(const_cast<char*>(s.data()),
+ s.size());
+ });
+ }
+ CordzUpdateScope scope(contents_.cordz_info(), CordzUpdateTracker::kFlatten);
+ CordRep::Unref(contents_.as_tree());
+ contents_.SetTree(new_rep, scope);
+ return y_absl::string_view(new_buffer, total_size);
+}
+
+/* static */ bool Cord::GetFlatAux(CordRep* rep, y_absl::string_view* fragment) {
+ assert(rep != nullptr);
+ if (rep->IsFlat()) {
+ *fragment = y_absl::string_view(rep->flat()->Data(), rep->length);
+ return true;
+ } else if (rep->IsExternal()) {
+ *fragment = y_absl::string_view(rep->external()->base, rep->length);
+ return true;
+ } else if (rep->IsBtree()) {
+ return rep->btree()->IsFlat(fragment);
+ } else if (rep->IsSubstring()) {
+ CordRep* child = rep->substring()->child;
+ if (child->IsFlat()) {
+ *fragment = y_absl::string_view(
+ child->flat()->Data() + rep->substring()->start, rep->length);
+ return true;
+ } else if (child->IsExternal()) {
+ *fragment = y_absl::string_view(
+ child->external()->base + rep->substring()->start, rep->length);
+ return true;
+ } else if (child->IsBtree()) {
+ return child->btree()->IsFlat(rep->substring()->start, rep->length,
+ fragment);
+ }
+ }
+ return false;
+}
+
+/* static */ void Cord::ForEachChunkAux(
+ y_absl::cord_internal::CordRep* rep,
+ y_absl::FunctionRef<void(y_absl::string_view)> callback) {
+ if (rep->IsBtree()) {
+ ChunkIterator it(rep), end;
+ while (it != end) {
+ callback(*it);
+ ++it;
+ }
+ return;
+ }
+
+ assert(rep != nullptr);
+ int stack_pos = 0;
+ constexpr int stack_max = 128;
+ // Stack of right branches for tree traversal
+ y_absl::cord_internal::CordRep* stack[stack_max];
+ y_absl::cord_internal::CordRep* current_node = rep;
+ while (true) {
+ if (current_node->IsConcat()) {
+ if (stack_pos == stack_max) {
+ // There's no more room on our stack array to add another right branch,
+ // and the idea is to avoid allocations, so call this function
+ // recursively to navigate this subtree further. (This is not something
+ // we expect to happen in practice).
+ ForEachChunkAux(current_node, callback);
+
+ // Pop the next right branch and iterate.
+ current_node = stack[--stack_pos];
+ continue;
+ } else {
+ // Save the right branch for later traversal and continue down the left
+ // branch.
+ stack[stack_pos++] = current_node->concat()->right;
+ current_node = current_node->concat()->left;
+ continue;
+ }
+ }
+ // This is a leaf node, so invoke our callback.
+ y_absl::string_view chunk;
+ bool success = GetFlatAux(current_node, &chunk);
+ assert(success);
+ if (success) {
+ callback(chunk);
+ }
+ if (stack_pos == 0) {
+ // end of traversal
+ return;
+ }
+ current_node = stack[--stack_pos];
+ }
+}
+
+static void DumpNode(CordRep* rep, bool include_data, std::ostream* os,
+ int indent) {
+ const int kIndentStep = 1;
+ y_absl::InlinedVector<CordRep*, kInlinedVectorSize> stack;
+ y_absl::InlinedVector<int, kInlinedVectorSize> indents;
+ for (;;) {
+ *os << std::setw(3) << rep->refcount.Get();
+ *os << " " << std::setw(7) << rep->length;
+ *os << " [";
+ if (include_data) *os << static_cast<void*>(rep);
+ *os << "]";
+ *os << " " << (IsRootBalanced(rep) ? 'b' : 'u');
+ *os << " " << std::setw(indent) << "";
+ if (rep->IsConcat()) {
+ *os << "CONCAT depth=" << Depth(rep) << "\n";
+ indent += kIndentStep;
+ indents.push_back(indent);
+ stack.push_back(rep->concat()->right);
+ rep = rep->concat()->left;
+ } else if (rep->IsSubstring()) {
+ *os << "SUBSTRING @ " << rep->substring()->start << "\n";
+ indent += kIndentStep;
+ rep = rep->substring()->child;
+ } else { // Leaf or ring
+ if (rep->IsExternal()) {
+ *os << "EXTERNAL [";
+ if (include_data)
+ *os << y_absl::CEscape(TString(rep->external()->base, rep->length));
+ *os << "]\n";
+ } else if (rep->IsFlat()) {
+ *os << "FLAT cap=" << rep->flat()->Capacity() << " [";
+ if (include_data)
+ *os << y_absl::CEscape(TString(rep->flat()->Data(), rep->length));
+ *os << "]\n";
+ } else {
+ CordRepBtree::Dump(rep, /*label=*/ "", include_data, *os);
+ }
+ if (stack.empty()) break;
+ rep = stack.back();
+ stack.pop_back();
+ indent = indents.back();
+ indents.pop_back();
+ }
+ }
+ ABSL_INTERNAL_CHECK(indents.empty(), "");
+}
+
+static TString ReportError(CordRep* root, CordRep* node) {
+ std::ostringstream buf;
+ buf << "Error at node " << node << " in:";
+ DumpNode(root, true, &buf);
+ return TString(buf.str());
+}
+
+static bool VerifyNode(CordRep* root, CordRep* start_node,
+ bool full_validation) {
+ y_absl::InlinedVector<CordRep*, 2> worklist;
+ worklist.push_back(start_node);
+ do {
+ CordRep* node = worklist.back();
+ worklist.pop_back();
+
+ ABSL_INTERNAL_CHECK(node != nullptr, ReportError(root, node));
+ if (node != root) {
+ ABSL_INTERNAL_CHECK(node->length != 0, ReportError(root, node));
+ }
+
+ if (node->IsConcat()) {
+ ABSL_INTERNAL_CHECK(node->concat()->left != nullptr,
+ ReportError(root, node));
+ ABSL_INTERNAL_CHECK(node->concat()->right != nullptr,
+ ReportError(root, node));
+ ABSL_INTERNAL_CHECK((node->length == node->concat()->left->length +
+ node->concat()->right->length),
+ ReportError(root, node));
+ if (full_validation) {
+ worklist.push_back(node->concat()->right);
+ worklist.push_back(node->concat()->left);
+ }
+ } else if (node->IsFlat()) {
+ ABSL_INTERNAL_CHECK(node->length <= node->flat()->Capacity(),
+ ReportError(root, node));
+ } else if (node->IsExternal()) {
+ ABSL_INTERNAL_CHECK(node->external()->base != nullptr,
+ ReportError(root, node));
+ } else if (node->IsSubstring()) {
+ ABSL_INTERNAL_CHECK(
+ node->substring()->start < node->substring()->child->length,
+ ReportError(root, node));
+ ABSL_INTERNAL_CHECK(node->substring()->start + node->length <=
+ node->substring()->child->length,
+ ReportError(root, node));
+ }
+ } while (!worklist.empty());
+ return true;
+}
+
+// Traverses the tree and computes the total memory allocated.
+/* static */ size_t Cord::MemoryUsageAux(const CordRep* rep) {
+ size_t total_mem_usage = 0;
+
+ // Allow a quick exit for the common case that the root is a leaf.
+ if (RepMemoryUsageLeaf(rep, &total_mem_usage)) {
+ return total_mem_usage;
+ }
+
+ // Iterate over the tree. cur_node is never a leaf node and leaf nodes will
+ // never be appended to tree_stack. This reduces overhead from manipulating
+ // tree_stack.
+ y_absl::InlinedVector<const CordRep*, kInlinedVectorSize> tree_stack;
+ const CordRep* cur_node = rep;
+ while (true) {
+ const CordRep* next_node = nullptr;
+
+ if (cur_node->IsConcat()) {
+ total_mem_usage += sizeof(CordRepConcat);
+ const CordRep* left = cur_node->concat()->left;
+ if (!RepMemoryUsageLeaf(left, &total_mem_usage)) {
+ next_node = left;
+ }
+
+ const CordRep* right = cur_node->concat()->right;
+ if (!RepMemoryUsageLeaf(right, &total_mem_usage)) {
+ if (next_node) {
+ tree_stack.push_back(next_node);
+ }
+ next_node = right;
+ }
+ } else if (cur_node->IsBtree()) {
+ total_mem_usage += sizeof(CordRepBtree);
+ const CordRepBtree* node = cur_node->btree();
+ if (node->height() == 0) {
+ for (const CordRep* edge : node->Edges()) {
+ RepMemoryUsageDataEdge(edge, &total_mem_usage);
+ }
+ } else {
+ for (const CordRep* edge : node->Edges()) {
+ tree_stack.push_back(edge);
+ }
+ }
+ } else {
+ // Since cur_node is not a leaf or a concat node it must be a substring.
+ assert(cur_node->IsSubstring());
+ total_mem_usage += sizeof(CordRepSubstring);
+ next_node = cur_node->substring()->child;
+ if (RepMemoryUsageLeaf(next_node, &total_mem_usage)) {
+ next_node = nullptr;
+ }
+ }
+
+ if (!next_node) {
+ if (tree_stack.empty()) {
+ return total_mem_usage;
+ }
+ next_node = tree_stack.back();
+ tree_stack.pop_back();
+ }
+ cur_node = next_node;
+ }
+}
+
+std::ostream& operator<<(std::ostream& out, const Cord& cord) {
+ for (y_absl::string_view chunk : cord.Chunks()) {
+ out.write(chunk.data(), chunk.size());
+ }
+ return out;
+}
+
+namespace strings_internal {
+size_t CordTestAccess::FlatOverhead() { return cord_internal::kFlatOverhead; }
+size_t CordTestAccess::MaxFlatLength() { return cord_internal::kMaxFlatLength; }
+size_t CordTestAccess::FlatTagToLength(uint8_t tag) {
+ return cord_internal::TagToLength(tag);
+}
+uint8_t CordTestAccess::LengthToTag(size_t s) {
+ ABSL_INTERNAL_CHECK(s <= kMaxFlatLength, y_absl::StrCat("Invalid length ", s));
+ return cord_internal::AllocatedSizeToTag(s + cord_internal::kFlatOverhead);
+}
+size_t CordTestAccess::SizeofCordRepConcat() { return sizeof(CordRepConcat); }
+size_t CordTestAccess::SizeofCordRepExternal() {
+ return sizeof(CordRepExternal);
+}
+size_t CordTestAccess::SizeofCordRepSubstring() {
+ return sizeof(CordRepSubstring);
+}
+} // namespace strings_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord.h
new file mode 100644
index 00000000000..62359e0cf8e
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord.h
@@ -0,0 +1,1521 @@
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: cord.h
+// -----------------------------------------------------------------------------
+//
+// This file defines the `y_absl::Cord` data structure and operations on that data
+// structure. A Cord is a string-like sequence of characters optimized for
+// specific use cases. Unlike a `TString`, which stores an array of
+// contiguous characters, Cord data is stored in a structure consisting of
+// separate, reference-counted "chunks." (Currently, this implementation is a
+// tree structure, though that implementation may change.)
+//
+// Because a Cord consists of these chunks, data can be added to or removed from
+// a Cord during its lifetime. Chunks may also be shared between Cords. Unlike a
+// `TString`, a Cord can therefore accommodate data that changes over its
+// lifetime, though it's not quite "mutable"; it can change only in the
+// attachment, detachment, or rearrangement of chunks of its constituent data.
+//
+// A Cord provides some benefit over `TString` under the following (albeit
+// narrow) circumstances:
+//
+// * Cord data is designed to grow and shrink over a Cord's lifetime. Cord
+// provides efficient insertions and deletions at the start and end of the
+// character sequences, avoiding copies in those cases. Static data should
+// generally be stored as strings.
+// * External memory consisting of string-like data can be directly added to
+// a Cord without requiring copies or allocations.
+// * Cord data may be shared and copied cheaply. Cord provides a copy-on-write
+// implementation and cheap sub-Cord operations. Copying a Cord is an O(1)
+// operation.
+//
+// As a consequence to the above, Cord data is generally large. Small data
+// should generally use strings, as construction of a Cord requires some
+// overhead. Small Cords (<= 15 bytes) are represented inline, but most small
+// Cords are expected to grow over their lifetimes.
+//
+// Note that because a Cord is made up of separate chunked data, random access
+// to character data within a Cord is slower than within a `TString`.
+//
+// Thread Safety
+//
+// Cord has the same thread-safety properties as many other types like
+// TString, std::vector<>, int, etc -- it is thread-compatible. In
+// particular, if threads do not call non-const methods, then it is safe to call
+// const methods without synchronization. Copying a Cord produces a new instance
+// that can be used concurrently with the original in arbitrary ways.
+
+#ifndef ABSL_STRINGS_CORD_H_
+#define ABSL_STRINGS_CORD_H_
+
+#include <algorithm>
+#include <cstddef>
+#include <cstdint>
+#include <cstring>
+#include <iosfwd>
+#include <iterator>
+#include <util/generic/string.h>
+#include <type_traits>
+
+#include "y_absl/base/config.h"
+#include "y_absl/base/internal/endian.h"
+#include "y_absl/base/internal/per_thread_tls.h"
+#include "y_absl/base/macros.h"
+#include "y_absl/base/port.h"
+#include "y_absl/container/inlined_vector.h"
+#include "y_absl/functional/function_ref.h"
+#include "y_absl/meta/type_traits.h"
+#include "y_absl/strings/internal/cord_internal.h"
+#include "y_absl/strings/internal/cord_rep_btree.h"
+#include "y_absl/strings/internal/cord_rep_btree_reader.h"
+#include "y_absl/strings/internal/cord_rep_ring.h"
+#include "y_absl/strings/internal/cordz_functions.h"
+#include "y_absl/strings/internal/cordz_info.h"
+#include "y_absl/strings/internal/cordz_statistics.h"
+#include "y_absl/strings/internal/cordz_update_scope.h"
+#include "y_absl/strings/internal/cordz_update_tracker.h"
+#include "y_absl/strings/internal/resize_uninitialized.h"
+#include "y_absl/strings/internal/string_constant.h"
+#include "y_absl/strings/string_view.h"
+#include "y_absl/types/optional.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+class Cord;
+class CordTestPeer;
+template <typename Releaser>
+Cord MakeCordFromExternal(y_absl::string_view, Releaser&&);
+void CopyCordToString(const Cord& src, TString* dst);
+
+// Cord
+//
+// A Cord is a sequence of characters, designed to be more efficient than a
+// `TString` in certain circumstances: namely, large string data that needs
+// to change over its lifetime or shared, especially when such data is shared
+// across API boundaries.
+//
+// A Cord stores its character data in a structure that allows efficient prepend
+// and append operations. This makes a Cord useful for large string data sent
+// over in a wire format that may need to be prepended or appended at some point
+// during the data exchange (e.g. HTTP, protocol buffers). For example, a
+// Cord is useful for storing an HTTP request, and prepending an HTTP header to
+// such a request.
+//
+// Cords should not be used for storing general string data, however. They
+// require overhead to construct and are slower than strings for random access.
+//
+// The Cord API provides the following common API operations:
+//
+// * Create or assign Cords out of existing string data, memory, or other Cords
+// * Append and prepend data to an existing Cord
+// * Create new Sub-Cords from existing Cord data
+// * Swap Cord data and compare Cord equality
+// * Write out Cord data by constructing a `TString`
+//
+// Additionally, the API provides iterator utilities to iterate through Cord
+// data via chunks or character bytes.
+//
+class Cord {
+ private:
+ template <typename T>
+ using EnableIfString =
+ y_absl::enable_if_t<std::is_same<T, TString>::value, int>;
+
+ public:
+ // Cord::Cord() Constructors.
+
+ // Creates an empty Cord.
+ constexpr Cord() noexcept;
+
+ // Creates a Cord from an existing Cord. Cord is copyable and efficiently
+ // movable. The moved-from state is valid but unspecified.
+ Cord(const Cord& src);
+ Cord(Cord&& src) noexcept;
+ Cord& operator=(const Cord& x);
+ Cord& operator=(Cord&& x) noexcept;
+
+ // Creates a Cord from a `src` string. This constructor is marked explicit to
+ // prevent implicit Cord constructions from arguments convertible to an
+ // `y_absl::string_view`.
+ explicit Cord(y_absl::string_view src);
+ Cord& operator=(y_absl::string_view src);
+
+ // Creates a Cord from a `TString&&` rvalue. These constructors are
+ // templated to avoid ambiguities for types that are convertible to both
+ // `y_absl::string_view` and `TString`, such as `const char*`.
+ template <typename T, EnableIfString<T> = 0>
+ explicit Cord(T&& src);
+ template <typename T, EnableIfString<T> = 0>
+ Cord& operator=(T&& src);
+
+ // Cord::~Cord()
+ //
+ // Destructs the Cord.
+ ~Cord() {
+ if (contents_.is_tree()) DestroyCordSlow();
+ }
+
+ // MakeCordFromExternal()
+ //
+ // Creates a Cord that takes ownership of external string memory. The
+ // contents of `data` are not copied to the Cord; instead, the external
+ // memory is added to the Cord and reference-counted. This data may not be
+ // changed for the life of the Cord, though it may be prepended or appended
+ // to.
+ //
+ // `MakeCordFromExternal()` takes a callable "releaser" that is invoked when
+ // the reference count for `data` reaches zero. As noted above, this data must
+ // remain live until the releaser is invoked. The callable releaser also must:
+ //
+ // * be move constructible
+ // * support `void operator()(y_absl::string_view) const` or `void operator()`
+ //
+ // Example:
+ //
+ // Cord MakeCord(BlockPool* pool) {
+ // Block* block = pool->NewBlock();
+ // FillBlock(block);
+ // return y_absl::MakeCordFromExternal(
+ // block->ToStringView(),
+ // [pool, block](y_absl::string_view v) {
+ // pool->FreeBlock(block, v);
+ // });
+ // }
+ //
+ // WARNING: Because a Cord can be reference-counted, it's likely a bug if your
+ // releaser doesn't do anything. For example, consider the following:
+ //
+ // void Foo(const char* buffer, int len) {
+ // auto c = y_absl::MakeCordFromExternal(y_absl::string_view(buffer, len),
+ // [](y_absl::string_view) {});
+ //
+ // // BUG: If Bar() copies its cord for any reason, including keeping a
+ // // substring of it, the lifetime of buffer might be extended beyond
+ // // when Foo() returns.
+ // Bar(c);
+ // }
+ template <typename Releaser>
+ friend Cord MakeCordFromExternal(y_absl::string_view data, Releaser&& releaser);
+
+ // Cord::Clear()
+ //
+ // Releases the Cord data. Any nodes that share data with other Cords, if
+ // applicable, will have their reference counts reduced by 1.
+ void Clear();
+
+ // Cord::Append()
+ //
+ // Appends data to the Cord, which may come from another Cord or other string
+ // data.
+ void Append(const Cord& src);
+ void Append(Cord&& src);
+ void Append(y_absl::string_view src);
+ template <typename T, EnableIfString<T> = 0>
+ void Append(T&& src);
+
+ // Cord::Prepend()
+ //
+ // Prepends data to the Cord, which may come from another Cord or other string
+ // data.
+ void Prepend(const Cord& src);
+ void Prepend(y_absl::string_view src);
+ template <typename T, EnableIfString<T> = 0>
+ void Prepend(T&& src);
+
+ // Cord::RemovePrefix()
+ //
+ // Removes the first `n` bytes of a Cord.
+ void RemovePrefix(size_t n);
+ void RemoveSuffix(size_t n);
+
+ // Cord::Subcord()
+ //
+ // Returns a new Cord representing the subrange [pos, pos + new_size) of
+ // *this. If pos >= size(), the result is empty(). If
+ // (pos + new_size) >= size(), the result is the subrange [pos, size()).
+ Cord Subcord(size_t pos, size_t new_size) const;
+
+ // Cord::swap()
+ //
+ // Swaps the contents of the Cord with `other`.
+ void swap(Cord& other) noexcept;
+
+ // swap()
+ //
+ // Swaps the contents of two Cords.
+ friend void swap(Cord& x, Cord& y) noexcept { x.swap(y); }
+
+ // Cord::size()
+ //
+ // Returns the size of the Cord.
+ size_t size() const;
+
+ // Cord::empty()
+ //
+ // Determines whether the given Cord is empty, returning `true` is so.
+ bool empty() const;
+
+ // Cord::EstimatedMemoryUsage()
+ //
+ // Returns the *approximate* number of bytes held in full or in part by this
+ // Cord (which may not remain the same between invocations). Note that Cords
+ // that share memory could each be "charged" independently for the same shared
+ // memory.
+ size_t EstimatedMemoryUsage() const;
+
+ // Cord::Compare()
+ //
+ // Compares 'this' Cord with rhs. This function and its relatives treat Cords
+ // as sequences of unsigned bytes. The comparison is a straightforward
+ // lexicographic comparison. `Cord::Compare()` returns values as follows:
+ //
+ // -1 'this' Cord is smaller
+ // 0 two Cords are equal
+ // 1 'this' Cord is larger
+ int Compare(y_absl::string_view rhs) const;
+ int Compare(const Cord& rhs) const;
+
+ // Cord::StartsWith()
+ //
+ // Determines whether the Cord starts with the passed string data `rhs`.
+ bool StartsWith(const Cord& rhs) const;
+ bool StartsWith(y_absl::string_view rhs) const;
+
+ // Cord::EndsWith()
+ //
+ // Determines whether the Cord ends with the passed string data `rhs`.
+ bool EndsWith(y_absl::string_view rhs) const;
+ bool EndsWith(const Cord& rhs) const;
+
+ // Cord::operator TString()
+ //
+ // Converts a Cord into a `TString()`. This operator is marked explicit to
+ // prevent unintended Cord usage in functions that take a string.
+ explicit operator TString() const;
+
+ // CopyCordToString()
+ //
+ // Copies the contents of a `src` Cord into a `*dst` string.
+ //
+ // This function optimizes the case of reusing the destination string since it
+ // can reuse previously allocated capacity. However, this function does not
+ // guarantee that pointers previously returned by `dst->data()` remain valid
+ // even if `*dst` had enough capacity to hold `src`. If `*dst` is a new
+ // object, prefer to simply use the conversion operator to `TString`.
+ friend void CopyCordToString(const Cord& src, TString* dst);
+
+ class CharIterator;
+
+ //----------------------------------------------------------------------------
+ // Cord::ChunkIterator
+ //----------------------------------------------------------------------------
+ //
+ // A `Cord::ChunkIterator` allows iteration over the constituent chunks of its
+ // Cord. Such iteration allows you to perform non-const operatons on the data
+ // of a Cord without modifying it.
+ //
+ // Generally, you do not instantiate a `Cord::ChunkIterator` directly;
+ // instead, you create one implicitly through use of the `Cord::Chunks()`
+ // member function.
+ //
+ // The `Cord::ChunkIterator` has the following properties:
+ //
+ // * The iterator is invalidated after any non-const operation on the
+ // Cord object over which it iterates.
+ // * The `string_view` returned by dereferencing a valid, non-`end()`
+ // iterator is guaranteed to be non-empty.
+ // * Two `ChunkIterator` objects can be compared equal if and only if they
+ // remain valid and iterate over the same Cord.
+ // * The iterator in this case is a proxy iterator; the `string_view`
+ // returned by the iterator does not live inside the Cord, and its
+ // lifetime is limited to the lifetime of the iterator itself. To help
+ // prevent lifetime issues, `ChunkIterator::reference` is not a true
+ // reference type and is equivalent to `value_type`.
+ // * The iterator keeps state that can grow for Cords that contain many
+ // nodes and are imbalanced due to sharing. Prefer to pass this type by
+ // const reference instead of by value.
+ class ChunkIterator {
+ public:
+ using iterator_category = std::input_iterator_tag;
+ using value_type = y_absl::string_view;
+ using difference_type = ptrdiff_t;
+ using pointer = const value_type*;
+ using reference = value_type;
+
+ ChunkIterator() = default;
+
+ ChunkIterator& operator++();
+ ChunkIterator operator++(int);
+ bool operator==(const ChunkIterator& other) const;
+ bool operator!=(const ChunkIterator& other) const;
+ reference operator*() const;
+ pointer operator->() const;
+
+ friend class Cord;
+ friend class CharIterator;
+
+ private:
+ using CordRep = y_absl::cord_internal::CordRep;
+ using CordRepBtree = y_absl::cord_internal::CordRepBtree;
+ using CordRepBtreeReader = y_absl::cord_internal::CordRepBtreeReader;
+
+ // Stack of right children of concat nodes that we have to visit.
+ // Keep this at the end of the structure to avoid cache-thrashing.
+ // TODO(jgm): Benchmark to see if there's a more optimal value than 47 for
+ // the inlined vector size (47 exists for backward compatibility).
+ using Stack = y_absl::InlinedVector<y_absl::cord_internal::CordRep*, 47>;
+
+ // Constructs a `begin()` iterator from `tree`. `tree` must not be null.
+ explicit ChunkIterator(cord_internal::CordRep* tree);
+
+ // Constructs a `begin()` iterator from `cord`.
+ explicit ChunkIterator(const Cord* cord);
+
+ // Initializes this instance from a tree. Invoked by constructors.
+ void InitTree(cord_internal::CordRep* tree);
+
+ // Removes `n` bytes from `current_chunk_`. Expects `n` to be smaller than
+ // `current_chunk_.size()`.
+ void RemoveChunkPrefix(size_t n);
+ Cord AdvanceAndReadBytes(size_t n);
+ void AdvanceBytes(size_t n);
+
+ // Stack specific operator++
+ ChunkIterator& AdvanceStack();
+
+ // Btree specific operator++
+ ChunkIterator& AdvanceBtree();
+ void AdvanceBytesBtree(size_t n);
+
+ // Iterates `n` bytes, where `n` is expected to be greater than or equal to
+ // `current_chunk_.size()`.
+ void AdvanceBytesSlowPath(size_t n);
+
+ // A view into bytes of the current `CordRep`. It may only be a view to a
+ // suffix of bytes if this is being used by `CharIterator`.
+ y_absl::string_view current_chunk_;
+ // The current leaf, or `nullptr` if the iterator points to short data.
+ // If the current chunk is a substring node, current_leaf_ points to the
+ // underlying flat or external node.
+ y_absl::cord_internal::CordRep* current_leaf_ = nullptr;
+ // The number of bytes left in the `Cord` over which we are iterating.
+ size_t bytes_remaining_ = 0;
+
+ // Cord reader for cord btrees. Empty if not traversing a btree.
+ CordRepBtreeReader btree_reader_;
+
+ // See 'Stack' alias definition.
+ Stack stack_of_right_children_;
+ };
+
+ // Cord::ChunkIterator::chunk_begin()
+ //
+ // Returns an iterator to the first chunk of the `Cord`.
+ //
+ // Generally, prefer using `Cord::Chunks()` within a range-based for loop for
+ // iterating over the chunks of a Cord. This method may be useful for getting
+ // a `ChunkIterator` where range-based for-loops are not useful.
+ //
+ // Example:
+ //
+ // y_absl::Cord::ChunkIterator FindAsChunk(const y_absl::Cord& c,
+ // y_absl::string_view s) {
+ // return std::find(c.chunk_begin(), c.chunk_end(), s);
+ // }
+ ChunkIterator chunk_begin() const;
+
+ // Cord::ChunkItertator::chunk_end()
+ //
+ // Returns an iterator one increment past the last chunk of the `Cord`.
+ //
+ // Generally, prefer using `Cord::Chunks()` within a range-based for loop for
+ // iterating over the chunks of a Cord. This method may be useful for getting
+ // a `ChunkIterator` where range-based for-loops may not be available.
+ ChunkIterator chunk_end() const;
+
+ //----------------------------------------------------------------------------
+ // Cord::ChunkIterator::ChunkRange
+ //----------------------------------------------------------------------------
+ //
+ // `ChunkRange` is a helper class for iterating over the chunks of the `Cord`,
+ // producing an iterator which can be used within a range-based for loop.
+ // Construction of a `ChunkRange` will return an iterator pointing to the
+ // first chunk of the Cord. Generally, do not construct a `ChunkRange`
+ // directly; instead, prefer to use the `Cord::Chunks()` method.
+ //
+ // Implementation note: `ChunkRange` is simply a convenience wrapper over
+ // `Cord::chunk_begin()` and `Cord::chunk_end()`.
+ class ChunkRange {
+ public:
+ // Fulfill minimum c++ container requirements [container.requirements]
+ // Theses (partial) container type definitions allow ChunkRange to be used
+ // in various utilities expecting a subset of [container.requirements].
+ // For example, the below enables using `::testing::ElementsAre(...)`
+ using value_type = y_absl::string_view;
+ using reference = value_type&;
+ using const_reference = const value_type&;
+ using iterator = ChunkIterator;
+ using const_iterator = ChunkIterator;
+
+ explicit ChunkRange(const Cord* cord) : cord_(cord) {}
+
+ ChunkIterator begin() const;
+ ChunkIterator end() const;
+
+ private:
+ const Cord* cord_;
+ };
+
+ // Cord::Chunks()
+ //
+ // Returns a `Cord::ChunkIterator::ChunkRange` for iterating over the chunks
+ // of a `Cord` with a range-based for-loop. For most iteration tasks on a
+ // Cord, use `Cord::Chunks()` to retrieve this iterator.
+ //
+ // Example:
+ //
+ // void ProcessChunks(const Cord& cord) {
+ // for (y_absl::string_view chunk : cord.Chunks()) { ... }
+ // }
+ //
+ // Note that the ordinary caveats of temporary lifetime extension apply:
+ //
+ // void Process() {
+ // for (y_absl::string_view chunk : CordFactory().Chunks()) {
+ // // The temporary Cord returned by CordFactory has been destroyed!
+ // }
+ // }
+ ChunkRange Chunks() const;
+
+ //----------------------------------------------------------------------------
+ // Cord::CharIterator
+ //----------------------------------------------------------------------------
+ //
+ // A `Cord::CharIterator` allows iteration over the constituent characters of
+ // a `Cord`.
+ //
+ // Generally, you do not instantiate a `Cord::CharIterator` directly; instead,
+ // you create one implicitly through use of the `Cord::Chars()` member
+ // function.
+ //
+ // A `Cord::CharIterator` has the following properties:
+ //
+ // * The iterator is invalidated after any non-const operation on the
+ // Cord object over which it iterates.
+ // * Two `CharIterator` objects can be compared equal if and only if they
+ // remain valid and iterate over the same Cord.
+ // * The iterator keeps state that can grow for Cords that contain many
+ // nodes and are imbalanced due to sharing. Prefer to pass this type by
+ // const reference instead of by value.
+ // * This type cannot act as a forward iterator because a `Cord` can reuse
+ // sections of memory. This fact violates the requirement for forward
+ // iterators to compare equal if dereferencing them returns the same
+ // object.
+ class CharIterator {
+ public:
+ using iterator_category = std::input_iterator_tag;
+ using value_type = char;
+ using difference_type = ptrdiff_t;
+ using pointer = const char*;
+ using reference = const char&;
+
+ CharIterator() = default;
+
+ CharIterator& operator++();
+ CharIterator operator++(int);
+ bool operator==(const CharIterator& other) const;
+ bool operator!=(const CharIterator& other) const;
+ reference operator*() const;
+ pointer operator->() const;
+
+ friend Cord;
+
+ private:
+ explicit CharIterator(const Cord* cord) : chunk_iterator_(cord) {}
+
+ ChunkIterator chunk_iterator_;
+ };
+
+ // Cord::CharIterator::AdvanceAndRead()
+ //
+ // Advances the `Cord::CharIterator` by `n_bytes` and returns the bytes
+ // advanced as a separate `Cord`. `n_bytes` must be less than or equal to the
+ // number of bytes within the Cord; otherwise, behavior is undefined. It is
+ // valid to pass `char_end()` and `0`.
+ static Cord AdvanceAndRead(CharIterator* it, size_t n_bytes);
+
+ // Cord::CharIterator::Advance()
+ //
+ // Advances the `Cord::CharIterator` by `n_bytes`. `n_bytes` must be less than
+ // or equal to the number of bytes remaining within the Cord; otherwise,
+ // behavior is undefined. It is valid to pass `char_end()` and `0`.
+ static void Advance(CharIterator* it, size_t n_bytes);
+
+ // Cord::CharIterator::ChunkRemaining()
+ //
+ // Returns the longest contiguous view starting at the iterator's position.
+ //
+ // `it` must be dereferenceable.
+ static y_absl::string_view ChunkRemaining(const CharIterator& it);
+
+ // Cord::CharIterator::char_begin()
+ //
+ // Returns an iterator to the first character of the `Cord`.
+ //
+ // Generally, prefer using `Cord::Chars()` within a range-based for loop for
+ // iterating over the chunks of a Cord. This method may be useful for getting
+ // a `CharIterator` where range-based for-loops may not be available.
+ CharIterator char_begin() const;
+
+ // Cord::CharIterator::char_end()
+ //
+ // Returns an iterator to one past the last character of the `Cord`.
+ //
+ // Generally, prefer using `Cord::Chars()` within a range-based for loop for
+ // iterating over the chunks of a Cord. This method may be useful for getting
+ // a `CharIterator` where range-based for-loops are not useful.
+ CharIterator char_end() const;
+
+ // Cord::CharIterator::CharRange
+ //
+ // `CharRange` is a helper class for iterating over the characters of a
+ // producing an iterator which can be used within a range-based for loop.
+ // Construction of a `CharRange` will return an iterator pointing to the first
+ // character of the Cord. Generally, do not construct a `CharRange` directly;
+ // instead, prefer to use the `Cord::Chars()` method show below.
+ //
+ // Implementation note: `CharRange` is simply a convenience wrapper over
+ // `Cord::char_begin()` and `Cord::char_end()`.
+ class CharRange {
+ public:
+ // Fulfill minimum c++ container requirements [container.requirements]
+ // Theses (partial) container type definitions allow CharRange to be used
+ // in various utilities expecting a subset of [container.requirements].
+ // For example, the below enables using `::testing::ElementsAre(...)`
+ using value_type = char;
+ using reference = value_type&;
+ using const_reference = const value_type&;
+ using iterator = CharIterator;
+ using const_iterator = CharIterator;
+
+ explicit CharRange(const Cord* cord) : cord_(cord) {}
+
+ CharIterator begin() const;
+ CharIterator end() const;
+
+ private:
+ const Cord* cord_;
+ };
+
+ // Cord::CharIterator::Chars()
+ //
+ // Returns a `Cord::CharIterator` for iterating over the characters of a
+ // `Cord` with a range-based for-loop. For most character-based iteration
+ // tasks on a Cord, use `Cord::Chars()` to retrieve this iterator.
+ //
+ // Example:
+ //
+ // void ProcessCord(const Cord& cord) {
+ // for (char c : cord.Chars()) { ... }
+ // }
+ //
+ // Note that the ordinary caveats of temporary lifetime extension apply:
+ //
+ // void Process() {
+ // for (char c : CordFactory().Chars()) {
+ // // The temporary Cord returned by CordFactory has been destroyed!
+ // }
+ // }
+ CharRange Chars() const;
+
+ // Cord::operator[]
+ //
+ // Gets the "i"th character of the Cord and returns it, provided that
+ // 0 <= i < Cord.size().
+ //
+ // NOTE: This routine is reasonably efficient. It is roughly
+ // logarithmic based on the number of chunks that make up the cord. Still,
+ // if you need to iterate over the contents of a cord, you should
+ // use a CharIterator/ChunkIterator rather than call operator[] or Get()
+ // repeatedly in a loop.
+ char operator[](size_t i) const;
+
+ // Cord::TryFlat()
+ //
+ // If this cord's representation is a single flat array, returns a
+ // string_view referencing that array. Otherwise returns nullopt.
+ y_absl::optional<y_absl::string_view> TryFlat() const;
+
+ // Cord::Flatten()
+ //
+ // Flattens the cord into a single array and returns a view of the data.
+ //
+ // If the cord was already flat, the contents are not modified.
+ y_absl::string_view Flatten();
+
+ // Supports y_absl::Cord as a sink object for y_absl::Format().
+ friend void AbslFormatFlush(y_absl::Cord* cord, y_absl::string_view part) {
+ cord->Append(part);
+ }
+
+ template <typename H>
+ friend H AbslHashValue(H hash_state, const y_absl::Cord& c) {
+ y_absl::optional<y_absl::string_view> maybe_flat = c.TryFlat();
+ if (maybe_flat.has_value()) {
+ return H::combine(std::move(hash_state), *maybe_flat);
+ }
+ return c.HashFragmented(std::move(hash_state));
+ }
+
+ // Create a Cord with the contents of StringConstant<T>::value.
+ // No allocations will be done and no data will be copied.
+ // This is an INTERNAL API and subject to change or removal. This API can only
+ // be used by spelling y_absl::strings_internal::MakeStringConstant, which is
+ // also an internal API.
+ template <typename T>
+ explicit constexpr Cord(strings_internal::StringConstant<T>);
+
+ private:
+ using CordRep = y_absl::cord_internal::CordRep;
+ using CordRepFlat = y_absl::cord_internal::CordRepFlat;
+ using CordzInfo = cord_internal::CordzInfo;
+ using CordzUpdateScope = cord_internal::CordzUpdateScope;
+ using CordzUpdateTracker = cord_internal::CordzUpdateTracker;
+ using InlineData = cord_internal::InlineData;
+ using MethodIdentifier = CordzUpdateTracker::MethodIdentifier;
+
+ // Creates a cord instance with `method` representing the originating
+ // public API call causing the cord to be created.
+ explicit Cord(y_absl::string_view src, MethodIdentifier method);
+
+ friend class CordTestPeer;
+ friend bool operator==(const Cord& lhs, const Cord& rhs);
+ friend bool operator==(const Cord& lhs, y_absl::string_view rhs);
+
+ friend const CordzInfo* GetCordzInfoForTesting(const Cord& cord);
+
+ // Calls the provided function once for each cord chunk, in order. Unlike
+ // Chunks(), this API will not allocate memory.
+ void ForEachChunk(y_absl::FunctionRef<void(y_absl::string_view)>) const;
+
+ // Allocates new contiguous storage for the contents of the cord. This is
+ // called by Flatten() when the cord was not already flat.
+ y_absl::string_view FlattenSlowPath();
+
+ // Actual cord contents are hidden inside the following simple
+ // class so that we can isolate the bulk of cord.cc from changes
+ // to the representation.
+ //
+ // InlineRep holds either a tree pointer, or an array of kMaxInline bytes.
+ class InlineRep {
+ public:
+ static constexpr unsigned char kMaxInline = cord_internal::kMaxInline;
+ static_assert(kMaxInline >= sizeof(y_absl::cord_internal::CordRep*), "");
+
+ constexpr InlineRep() : data_() {}
+ explicit InlineRep(InlineData::DefaultInitType init) : data_(init) {}
+ InlineRep(const InlineRep& src);
+ InlineRep(InlineRep&& src);
+ InlineRep& operator=(const InlineRep& src);
+ InlineRep& operator=(InlineRep&& src) noexcept;
+
+ explicit constexpr InlineRep(cord_internal::InlineData data);
+
+ void Swap(InlineRep* rhs);
+ bool empty() const;
+ size_t size() const;
+ const char* data() const; // Returns nullptr if holding pointer
+ void set_data(const char* data, size_t n,
+ bool nullify_tail); // Discards pointer, if any
+ char* set_data(size_t n); // Write data to the result
+ // Returns nullptr if holding bytes
+ y_absl::cord_internal::CordRep* tree() const;
+ y_absl::cord_internal::CordRep* as_tree() const;
+ // Returns non-null iff was holding a pointer
+ y_absl::cord_internal::CordRep* clear();
+ // Converts to pointer if necessary.
+ void reduce_size(size_t n); // REQUIRES: holding data
+ void remove_prefix(size_t n); // REQUIRES: holding data
+ void AppendArray(y_absl::string_view src, MethodIdentifier method);
+ y_absl::string_view FindFlatStartPiece() const;
+
+ // Creates a CordRepFlat instance from the current inlined data with `extra'
+ // bytes of desired additional capacity.
+ CordRepFlat* MakeFlatWithExtraCapacity(size_t extra);
+
+ // Sets the tree value for this instance. `rep` must not be null.
+ // Requires the current instance to hold a tree, and a lock to be held on
+ // any CordzInfo referenced by this instance. The latter is enforced through
+ // the CordzUpdateScope argument. If the current instance is sampled, then
+ // the CordzInfo instance is updated to reference the new `rep` value.
+ void SetTree(CordRep* rep, const CordzUpdateScope& scope);
+
+ // Identical to SetTree(), except that `rep` is allowed to be null, in
+ // which case the current instance is reset to an empty value.
+ void SetTreeOrEmpty(CordRep* rep, const CordzUpdateScope& scope);
+
+ // Sets the tree value for this instance, and randomly samples this cord.
+ // This function disregards existing contents in `data_`, and should be
+ // called when a Cord is 'promoted' from an 'uninitialized' or 'inlined'
+ // value to a non-inlined (tree / ring) value.
+ void EmplaceTree(CordRep* rep, MethodIdentifier method);
+
+ // Identical to EmplaceTree, except that it copies the parent stack from
+ // the provided `parent` data if the parent is sampled.
+ void EmplaceTree(CordRep* rep, const InlineData& parent,
+ MethodIdentifier method);
+
+ // Commits the change of a newly created, or updated `rep` root value into
+ // this cord. `old_rep` indicates the old (inlined or tree) value of the
+ // cord, and determines if the commit invokes SetTree() or EmplaceTree().
+ void CommitTree(const CordRep* old_rep, CordRep* rep,
+ const CordzUpdateScope& scope, MethodIdentifier method);
+
+ void AppendTreeToInlined(CordRep* tree, MethodIdentifier method);
+ void AppendTreeToTree(CordRep* tree, MethodIdentifier method);
+ void AppendTree(CordRep* tree, MethodIdentifier method);
+ void PrependTreeToInlined(CordRep* tree, MethodIdentifier method);
+ void PrependTreeToTree(CordRep* tree, MethodIdentifier method);
+ void PrependTree(CordRep* tree, MethodIdentifier method);
+
+ template <bool has_length>
+ void GetAppendRegion(char** region, size_t* size, size_t length);
+
+ bool IsSame(const InlineRep& other) const {
+ return memcmp(&data_, &other.data_, sizeof(data_)) == 0;
+ }
+ int BitwiseCompare(const InlineRep& other) const {
+ uint64_t x, y;
+ // Use memcpy to avoid aliasing issues.
+ memcpy(&x, &data_, sizeof(x));
+ memcpy(&y, &other.data_, sizeof(y));
+ if (x == y) {
+ memcpy(&x, reinterpret_cast<const char*>(&data_) + 8, sizeof(x));
+ memcpy(&y, reinterpret_cast<const char*>(&other.data_) + 8, sizeof(y));
+ if (x == y) return 0;
+ }
+ return y_absl::big_endian::FromHost64(x) < y_absl::big_endian::FromHost64(y)
+ ? -1
+ : 1;
+ }
+ void CopyTo(TString* dst) const {
+ // memcpy is much faster when operating on a known size. On most supported
+ // platforms, the small string optimization is large enough that resizing
+ // to 15 bytes does not cause a memory allocation.
+ y_absl::strings_internal::STLStringResizeUninitialized(dst,
+ sizeof(data_) - 1);
+ memcpy(&(*dst)[0], &data_, sizeof(data_) - 1);
+ // erase is faster than resize because the logic for memory allocation is
+ // not needed.
+ dst->erase(inline_size());
+ }
+
+ // Copies the inline contents into `dst`. Assumes the cord is not empty.
+ void CopyToArray(char* dst) const;
+
+ bool is_tree() const { return data_.is_tree(); }
+
+ // Returns true if the Cord is being profiled by cordz.
+ bool is_profiled() const { return data_.is_tree() && data_.is_profiled(); }
+
+ // Returns the profiled CordzInfo, or nullptr if not sampled.
+ y_absl::cord_internal::CordzInfo* cordz_info() const {
+ return data_.cordz_info();
+ }
+
+ // Sets the profiled CordzInfo. `cordz_info` must not be null.
+ void set_cordz_info(cord_internal::CordzInfo* cordz_info) {
+ assert(cordz_info != nullptr);
+ data_.set_cordz_info(cordz_info);
+ }
+
+ // Resets the current cordz_info to null / empty.
+ void clear_cordz_info() { data_.clear_cordz_info(); }
+
+ private:
+ friend class Cord;
+
+ void AssignSlow(const InlineRep& src);
+ // Unrefs the tree and stops profiling.
+ void UnrefTree();
+
+ void ResetToEmpty() { data_ = {}; }
+
+ void set_inline_size(size_t size) { data_.set_inline_size(size); }
+ size_t inline_size() const { return data_.inline_size(); }
+
+ cord_internal::InlineData data_;
+ };
+ InlineRep contents_;
+
+ // Helper for MemoryUsage().
+ static size_t MemoryUsageAux(const y_absl::cord_internal::CordRep* rep);
+
+ // Helper for GetFlat() and TryFlat().
+ static bool GetFlatAux(y_absl::cord_internal::CordRep* rep,
+ y_absl::string_view* fragment);
+
+ // Helper for ForEachChunk().
+ static void ForEachChunkAux(
+ y_absl::cord_internal::CordRep* rep,
+ y_absl::FunctionRef<void(y_absl::string_view)> callback);
+
+ // The destructor for non-empty Cords.
+ void DestroyCordSlow();
+
+ // Out-of-line implementation of slower parts of logic.
+ void CopyToArraySlowPath(char* dst) const;
+ int CompareSlowPath(y_absl::string_view rhs, size_t compared_size,
+ size_t size_to_compare) const;
+ int CompareSlowPath(const Cord& rhs, size_t compared_size,
+ size_t size_to_compare) const;
+ bool EqualsImpl(y_absl::string_view rhs, size_t size_to_compare) const;
+ bool EqualsImpl(const Cord& rhs, size_t size_to_compare) const;
+ int CompareImpl(const Cord& rhs) const;
+
+ template <typename ResultType, typename RHS>
+ friend ResultType GenericCompare(const Cord& lhs, const RHS& rhs,
+ size_t size_to_compare);
+ static y_absl::string_view GetFirstChunk(const Cord& c);
+ static y_absl::string_view GetFirstChunk(y_absl::string_view sv);
+
+ // Returns a new reference to contents_.tree(), or steals an existing
+ // reference if called on an rvalue.
+ y_absl::cord_internal::CordRep* TakeRep() const&;
+ y_absl::cord_internal::CordRep* TakeRep() &&;
+
+ // Helper for Append().
+ template <typename C>
+ void AppendImpl(C&& src);
+
+ // Prepends the provided data to this instance. `method` contains the public
+ // API method for this action which is tracked for Cordz sampling purposes.
+ void PrependArray(y_absl::string_view src, MethodIdentifier method);
+
+ // Assigns the value in 'src' to this instance, 'stealing' its contents.
+ // Requires src.length() > kMaxBytesToCopy.
+ Cord& AssignLargeString(TString&& src);
+
+ // Helper for AbslHashValue().
+ template <typename H>
+ H HashFragmented(H hash_state) const {
+ typename H::AbslInternalPiecewiseCombiner combiner;
+ ForEachChunk([&combiner, &hash_state](y_absl::string_view chunk) {
+ hash_state = combiner.add_buffer(std::move(hash_state), chunk.data(),
+ chunk.size());
+ });
+ return H::combine(combiner.finalize(std::move(hash_state)), size());
+ }
+};
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+// allow a Cord to be logged
+extern std::ostream& operator<<(std::ostream& out, const Cord& cord);
+
+// ------------------------------------------------------------------
+// Internal details follow. Clients should ignore.
+
+namespace cord_internal {
+
+// Fast implementation of memmove for up to 15 bytes. This implementation is
+// safe for overlapping regions. If nullify_tail is true, the destination is
+// padded with '\0' up to 16 bytes.
+inline void SmallMemmove(char* dst, const char* src, size_t n,
+ bool nullify_tail = false) {
+ if (n >= 8) {
+ assert(n <= 16);
+ uint64_t buf1;
+ uint64_t buf2;
+ memcpy(&buf1, src, 8);
+ memcpy(&buf2, src + n - 8, 8);
+ if (nullify_tail) {
+ memset(dst + 8, 0, 8);
+ }
+ memcpy(dst, &buf1, 8);
+ memcpy(dst + n - 8, &buf2, 8);
+ } else if (n >= 4) {
+ uint32_t buf1;
+ uint32_t buf2;
+ memcpy(&buf1, src, 4);
+ memcpy(&buf2, src + n - 4, 4);
+ if (nullify_tail) {
+ memset(dst + 4, 0, 4);
+ memset(dst + 8, 0, 8);
+ }
+ memcpy(dst, &buf1, 4);
+ memcpy(dst + n - 4, &buf2, 4);
+ } else {
+ if (n != 0) {
+ dst[0] = src[0];
+ dst[n / 2] = src[n / 2];
+ dst[n - 1] = src[n - 1];
+ }
+ if (nullify_tail) {
+ memset(dst + 8, 0, 8);
+ memset(dst + n, 0, 8);
+ }
+ }
+}
+
+// Does non-template-specific `CordRepExternal` initialization.
+// Expects `data` to be non-empty.
+void InitializeCordRepExternal(y_absl::string_view data, CordRepExternal* rep);
+
+// Creates a new `CordRep` that owns `data` and `releaser` and returns a pointer
+// to it, or `nullptr` if `data` was empty.
+template <typename Releaser>
+// NOLINTNEXTLINE - suppress clang-tidy raw pointer return.
+CordRep* NewExternalRep(y_absl::string_view data, Releaser&& releaser) {
+ using ReleaserType = y_absl::decay_t<Releaser>;
+ if (data.empty()) {
+ // Never create empty external nodes.
+ InvokeReleaser(Rank0{}, ReleaserType(std::forward<Releaser>(releaser)),
+ data);
+ return nullptr;
+ }
+
+ CordRepExternal* rep = new CordRepExternalImpl<ReleaserType>(
+ std::forward<Releaser>(releaser), 0);
+ InitializeCordRepExternal(data, rep);
+ return rep;
+}
+
+// Overload for function reference types that dispatches using a function
+// pointer because there are no `alignof()` or `sizeof()` a function reference.
+// NOLINTNEXTLINE - suppress clang-tidy raw pointer return.
+inline CordRep* NewExternalRep(y_absl::string_view data,
+ void (&releaser)(y_absl::string_view)) {
+ return NewExternalRep(data, &releaser);
+}
+
+} // namespace cord_internal
+
+template <typename Releaser>
+Cord MakeCordFromExternal(y_absl::string_view data, Releaser&& releaser) {
+ Cord cord;
+ if (auto* rep = ::y_absl::cord_internal::NewExternalRep(
+ data, std::forward<Releaser>(releaser))) {
+ cord.contents_.EmplaceTree(rep,
+ Cord::MethodIdentifier::kMakeCordFromExternal);
+ }
+ return cord;
+}
+
+constexpr Cord::InlineRep::InlineRep(cord_internal::InlineData data)
+ : data_(data) {}
+
+inline Cord::InlineRep::InlineRep(const Cord::InlineRep& src)
+ : data_(InlineData::kDefaultInit) {
+ if (CordRep* tree = src.tree()) {
+ EmplaceTree(CordRep::Ref(tree), src.data_,
+ CordzUpdateTracker::kConstructorCord);
+ } else {
+ data_ = src.data_;
+ }
+}
+
+inline Cord::InlineRep::InlineRep(Cord::InlineRep&& src) : data_(src.data_) {
+ src.ResetToEmpty();
+}
+
+inline Cord::InlineRep& Cord::InlineRep::operator=(const Cord::InlineRep& src) {
+ if (this == &src) {
+ return *this;
+ }
+ if (!is_tree() && !src.is_tree()) {
+ data_ = src.data_;
+ return *this;
+ }
+ AssignSlow(src);
+ return *this;
+}
+
+inline Cord::InlineRep& Cord::InlineRep::operator=(
+ Cord::InlineRep&& src) noexcept {
+ if (is_tree()) {
+ UnrefTree();
+ }
+ data_ = src.data_;
+ src.ResetToEmpty();
+ return *this;
+}
+
+inline void Cord::InlineRep::Swap(Cord::InlineRep* rhs) {
+ if (rhs == this) {
+ return;
+ }
+ std::swap(data_, rhs->data_);
+}
+
+inline const char* Cord::InlineRep::data() const {
+ return is_tree() ? nullptr : data_.as_chars();
+}
+
+inline y_absl::cord_internal::CordRep* Cord::InlineRep::as_tree() const {
+ assert(data_.is_tree());
+ return data_.as_tree();
+}
+
+inline y_absl::cord_internal::CordRep* Cord::InlineRep::tree() const {
+ if (is_tree()) {
+ return as_tree();
+ } else {
+ return nullptr;
+ }
+}
+
+inline bool Cord::InlineRep::empty() const { return data_.is_empty(); }
+
+inline size_t Cord::InlineRep::size() const {
+ return is_tree() ? as_tree()->length : inline_size();
+}
+
+inline cord_internal::CordRepFlat* Cord::InlineRep::MakeFlatWithExtraCapacity(
+ size_t extra) {
+ static_assert(cord_internal::kMinFlatLength >= sizeof(data_), "");
+ size_t len = data_.inline_size();
+ auto* result = CordRepFlat::New(len + extra);
+ result->length = len;
+ memcpy(result->Data(), data_.as_chars(), sizeof(data_));
+ return result;
+}
+
+inline void Cord::InlineRep::EmplaceTree(CordRep* rep,
+ MethodIdentifier method) {
+ assert(rep);
+ data_.make_tree(rep);
+ CordzInfo::MaybeTrackCord(data_, method);
+}
+
+inline void Cord::InlineRep::EmplaceTree(CordRep* rep, const InlineData& parent,
+ MethodIdentifier method) {
+ data_.make_tree(rep);
+ CordzInfo::MaybeTrackCord(data_, parent, method);
+}
+
+inline void Cord::InlineRep::SetTree(CordRep* rep,
+ const CordzUpdateScope& scope) {
+ assert(rep);
+ assert(data_.is_tree());
+ data_.set_tree(rep);
+ scope.SetCordRep(rep);
+}
+
+inline void Cord::InlineRep::SetTreeOrEmpty(CordRep* rep,
+ const CordzUpdateScope& scope) {
+ assert(data_.is_tree());
+ if (rep) {
+ data_.set_tree(rep);
+ } else {
+ data_ = {};
+ }
+ scope.SetCordRep(rep);
+}
+
+inline void Cord::InlineRep::CommitTree(const CordRep* old_rep, CordRep* rep,
+ const CordzUpdateScope& scope,
+ MethodIdentifier method) {
+ if (old_rep) {
+ SetTree(rep, scope);
+ } else {
+ EmplaceTree(rep, method);
+ }
+}
+
+inline y_absl::cord_internal::CordRep* Cord::InlineRep::clear() {
+ if (is_tree()) {
+ CordzInfo::MaybeUntrackCord(cordz_info());
+ }
+ y_absl::cord_internal::CordRep* result = tree();
+ ResetToEmpty();
+ return result;
+}
+
+inline void Cord::InlineRep::CopyToArray(char* dst) const {
+ assert(!is_tree());
+ size_t n = inline_size();
+ assert(n != 0);
+ cord_internal::SmallMemmove(dst, data_.as_chars(), n);
+}
+
+constexpr inline Cord::Cord() noexcept {}
+
+inline Cord::Cord(y_absl::string_view src)
+ : Cord(src, CordzUpdateTracker::kConstructorString) {}
+
+template <typename T>
+constexpr Cord::Cord(strings_internal::StringConstant<T>)
+ : contents_(strings_internal::StringConstant<T>::value.size() <=
+ cord_internal::kMaxInline
+ ? cord_internal::InlineData(
+ strings_internal::StringConstant<T>::value)
+ : cord_internal::InlineData(
+ &cord_internal::ConstInitExternalStorage<
+ strings_internal::StringConstant<T>>::value)) {}
+
+inline Cord& Cord::operator=(const Cord& x) {
+ contents_ = x.contents_;
+ return *this;
+}
+
+template <typename T, Cord::EnableIfString<T>>
+Cord& Cord::operator=(T&& src) {
+ if (src.size() <= cord_internal::kMaxBytesToCopy) {
+ return operator=(y_absl::string_view(src));
+ } else {
+ return AssignLargeString(std::forward<T>(src));
+ }
+}
+
+inline Cord::Cord(const Cord& src) : contents_(src.contents_) {}
+
+inline Cord::Cord(Cord&& src) noexcept : contents_(std::move(src.contents_)) {}
+
+inline void Cord::swap(Cord& other) noexcept {
+ contents_.Swap(&other.contents_);
+}
+
+inline Cord& Cord::operator=(Cord&& x) noexcept {
+ contents_ = std::move(x.contents_);
+ return *this;
+}
+
+extern template Cord::Cord(TString&& src);
+
+inline size_t Cord::size() const {
+ // Length is 1st field in str.rep_
+ return contents_.size();
+}
+
+inline bool Cord::empty() const { return contents_.empty(); }
+
+inline size_t Cord::EstimatedMemoryUsage() const {
+ size_t result = sizeof(Cord);
+ if (const y_absl::cord_internal::CordRep* rep = contents_.tree()) {
+ result += MemoryUsageAux(rep);
+ }
+ return result;
+}
+
+inline y_absl::optional<y_absl::string_view> Cord::TryFlat() const {
+ y_absl::cord_internal::CordRep* rep = contents_.tree();
+ if (rep == nullptr) {
+ return y_absl::string_view(contents_.data(), contents_.size());
+ }
+ y_absl::string_view fragment;
+ if (GetFlatAux(rep, &fragment)) {
+ return fragment;
+ }
+ return y_absl::nullopt;
+}
+
+inline y_absl::string_view Cord::Flatten() {
+ y_absl::cord_internal::CordRep* rep = contents_.tree();
+ if (rep == nullptr) {
+ return y_absl::string_view(contents_.data(), contents_.size());
+ } else {
+ y_absl::string_view already_flat_contents;
+ if (GetFlatAux(rep, &already_flat_contents)) {
+ return already_flat_contents;
+ }
+ }
+ return FlattenSlowPath();
+}
+
+inline void Cord::Append(y_absl::string_view src) {
+ contents_.AppendArray(src, CordzUpdateTracker::kAppendString);
+}
+
+inline void Cord::Prepend(y_absl::string_view src) {
+ PrependArray(src, CordzUpdateTracker::kPrependString);
+}
+
+extern template void Cord::Append(TString&& src);
+extern template void Cord::Prepend(TString&& src);
+
+inline int Cord::Compare(const Cord& rhs) const {
+ if (!contents_.is_tree() && !rhs.contents_.is_tree()) {
+ return contents_.BitwiseCompare(rhs.contents_);
+ }
+
+ return CompareImpl(rhs);
+}
+
+// Does 'this' cord start/end with rhs
+inline bool Cord::StartsWith(const Cord& rhs) const {
+ if (contents_.IsSame(rhs.contents_)) return true;
+ size_t rhs_size = rhs.size();
+ if (size() < rhs_size) return false;
+ return EqualsImpl(rhs, rhs_size);
+}
+
+inline bool Cord::StartsWith(y_absl::string_view rhs) const {
+ size_t rhs_size = rhs.size();
+ if (size() < rhs_size) return false;
+ return EqualsImpl(rhs, rhs_size);
+}
+
+inline void Cord::ChunkIterator::InitTree(cord_internal::CordRep* tree) {
+ if (tree->tag == cord_internal::BTREE) {
+ current_chunk_ = btree_reader_.Init(tree->btree());
+ return;
+ }
+
+ stack_of_right_children_.push_back(tree);
+ operator++();
+}
+
+inline Cord::ChunkIterator::ChunkIterator(cord_internal::CordRep* tree)
+ : bytes_remaining_(tree->length) {
+ InitTree(tree);
+}
+
+inline Cord::ChunkIterator::ChunkIterator(const Cord* cord)
+ : bytes_remaining_(cord->size()) {
+ if (cord->contents_.is_tree()) {
+ InitTree(cord->contents_.as_tree());
+ } else {
+ current_chunk_ =
+ y_absl::string_view(cord->contents_.data(), bytes_remaining_);
+ }
+}
+
+inline Cord::ChunkIterator& Cord::ChunkIterator::AdvanceBtree() {
+ current_chunk_ = btree_reader_.Next();
+ return *this;
+}
+
+inline void Cord::ChunkIterator::AdvanceBytesBtree(size_t n) {
+ assert(n >= current_chunk_.size());
+ bytes_remaining_ -= n;
+ if (bytes_remaining_) {
+ if (n == current_chunk_.size()) {
+ current_chunk_ = btree_reader_.Next();
+ } else {
+ size_t offset = btree_reader_.length() - bytes_remaining_;
+ current_chunk_ = btree_reader_.Seek(offset);
+ }
+ } else {
+ current_chunk_ = {};
+ }
+}
+
+inline Cord::ChunkIterator& Cord::ChunkIterator::operator++() {
+ ABSL_HARDENING_ASSERT(bytes_remaining_ > 0 &&
+ "Attempted to iterate past `end()`");
+ assert(bytes_remaining_ >= current_chunk_.size());
+ bytes_remaining_ -= current_chunk_.size();
+ if (bytes_remaining_ > 0) {
+ return btree_reader_ ? AdvanceBtree() : AdvanceStack();
+ } else {
+ current_chunk_ = {};
+ }
+ return *this;
+}
+
+inline Cord::ChunkIterator Cord::ChunkIterator::operator++(int) {
+ ChunkIterator tmp(*this);
+ operator++();
+ return tmp;
+}
+
+inline bool Cord::ChunkIterator::operator==(const ChunkIterator& other) const {
+ return bytes_remaining_ == other.bytes_remaining_;
+}
+
+inline bool Cord::ChunkIterator::operator!=(const ChunkIterator& other) const {
+ return !(*this == other);
+}
+
+inline Cord::ChunkIterator::reference Cord::ChunkIterator::operator*() const {
+ ABSL_HARDENING_ASSERT(bytes_remaining_ != 0);
+ return current_chunk_;
+}
+
+inline Cord::ChunkIterator::pointer Cord::ChunkIterator::operator->() const {
+ ABSL_HARDENING_ASSERT(bytes_remaining_ != 0);
+ return &current_chunk_;
+}
+
+inline void Cord::ChunkIterator::RemoveChunkPrefix(size_t n) {
+ assert(n < current_chunk_.size());
+ current_chunk_.remove_prefix(n);
+ bytes_remaining_ -= n;
+}
+
+inline void Cord::ChunkIterator::AdvanceBytes(size_t n) {
+ assert(bytes_remaining_ >= n);
+ if (ABSL_PREDICT_TRUE(n < current_chunk_.size())) {
+ RemoveChunkPrefix(n);
+ } else if (n != 0) {
+ btree_reader_ ? AdvanceBytesBtree(n) : AdvanceBytesSlowPath(n);
+ }
+}
+
+inline Cord::ChunkIterator Cord::chunk_begin() const {
+ return ChunkIterator(this);
+}
+
+inline Cord::ChunkIterator Cord::chunk_end() const { return ChunkIterator(); }
+
+inline Cord::ChunkIterator Cord::ChunkRange::begin() const {
+ return cord_->chunk_begin();
+}
+
+inline Cord::ChunkIterator Cord::ChunkRange::end() const {
+ return cord_->chunk_end();
+}
+
+inline Cord::ChunkRange Cord::Chunks() const { return ChunkRange(this); }
+
+inline Cord::CharIterator& Cord::CharIterator::operator++() {
+ if (ABSL_PREDICT_TRUE(chunk_iterator_->size() > 1)) {
+ chunk_iterator_.RemoveChunkPrefix(1);
+ } else {
+ ++chunk_iterator_;
+ }
+ return *this;
+}
+
+inline Cord::CharIterator Cord::CharIterator::operator++(int) {
+ CharIterator tmp(*this);
+ operator++();
+ return tmp;
+}
+
+inline bool Cord::CharIterator::operator==(const CharIterator& other) const {
+ return chunk_iterator_ == other.chunk_iterator_;
+}
+
+inline bool Cord::CharIterator::operator!=(const CharIterator& other) const {
+ return !(*this == other);
+}
+
+inline Cord::CharIterator::reference Cord::CharIterator::operator*() const {
+ return *chunk_iterator_->data();
+}
+
+inline Cord::CharIterator::pointer Cord::CharIterator::operator->() const {
+ return chunk_iterator_->data();
+}
+
+inline Cord Cord::AdvanceAndRead(CharIterator* it, size_t n_bytes) {
+ assert(it != nullptr);
+ return it->chunk_iterator_.AdvanceAndReadBytes(n_bytes);
+}
+
+inline void Cord::Advance(CharIterator* it, size_t n_bytes) {
+ assert(it != nullptr);
+ it->chunk_iterator_.AdvanceBytes(n_bytes);
+}
+
+inline y_absl::string_view Cord::ChunkRemaining(const CharIterator& it) {
+ return *it.chunk_iterator_;
+}
+
+inline Cord::CharIterator Cord::char_begin() const {
+ return CharIterator(this);
+}
+
+inline Cord::CharIterator Cord::char_end() const { return CharIterator(); }
+
+inline Cord::CharIterator Cord::CharRange::begin() const {
+ return cord_->char_begin();
+}
+
+inline Cord::CharIterator Cord::CharRange::end() const {
+ return cord_->char_end();
+}
+
+inline Cord::CharRange Cord::Chars() const { return CharRange(this); }
+
+inline void Cord::ForEachChunk(
+ y_absl::FunctionRef<void(y_absl::string_view)> callback) const {
+ y_absl::cord_internal::CordRep* rep = contents_.tree();
+ if (rep == nullptr) {
+ callback(y_absl::string_view(contents_.data(), contents_.size()));
+ } else {
+ return ForEachChunkAux(rep, callback);
+ }
+}
+
+// Nonmember Cord-to-Cord relational operarators.
+inline bool operator==(const Cord& lhs, const Cord& rhs) {
+ if (lhs.contents_.IsSame(rhs.contents_)) return true;
+ size_t rhs_size = rhs.size();
+ if (lhs.size() != rhs_size) return false;
+ return lhs.EqualsImpl(rhs, rhs_size);
+}
+
+inline bool operator!=(const Cord& x, const Cord& y) { return !(x == y); }
+inline bool operator<(const Cord& x, const Cord& y) { return x.Compare(y) < 0; }
+inline bool operator>(const Cord& x, const Cord& y) { return x.Compare(y) > 0; }
+inline bool operator<=(const Cord& x, const Cord& y) {
+ return x.Compare(y) <= 0;
+}
+inline bool operator>=(const Cord& x, const Cord& y) {
+ return x.Compare(y) >= 0;
+}
+
+// Nonmember Cord-to-y_absl::string_view relational operators.
+//
+// Due to implicit conversions, these also enable comparisons of Cord with
+// with TString, ::string, and const char*.
+inline bool operator==(const Cord& lhs, y_absl::string_view rhs) {
+ size_t lhs_size = lhs.size();
+ size_t rhs_size = rhs.size();
+ if (lhs_size != rhs_size) return false;
+ return lhs.EqualsImpl(rhs, rhs_size);
+}
+
+inline bool operator==(y_absl::string_view x, const Cord& y) { return y == x; }
+inline bool operator!=(const Cord& x, y_absl::string_view y) { return !(x == y); }
+inline bool operator!=(y_absl::string_view x, const Cord& y) { return !(x == y); }
+inline bool operator<(const Cord& x, y_absl::string_view y) {
+ return x.Compare(y) < 0;
+}
+inline bool operator<(y_absl::string_view x, const Cord& y) {
+ return y.Compare(x) > 0;
+}
+inline bool operator>(const Cord& x, y_absl::string_view y) { return y < x; }
+inline bool operator>(y_absl::string_view x, const Cord& y) { return y < x; }
+inline bool operator<=(const Cord& x, y_absl::string_view y) { return !(y < x); }
+inline bool operator<=(y_absl::string_view x, const Cord& y) { return !(y < x); }
+inline bool operator>=(const Cord& x, y_absl::string_view y) { return !(x < y); }
+inline bool operator>=(y_absl::string_view x, const Cord& y) { return !(x < y); }
+
+// Some internals exposed to test code.
+namespace strings_internal {
+class CordTestAccess {
+ public:
+ static size_t FlatOverhead();
+ static size_t MaxFlatLength();
+ static size_t SizeofCordRepConcat();
+ static size_t SizeofCordRepExternal();
+ static size_t SizeofCordRepSubstring();
+ static size_t FlatTagToLength(uint8_t tag);
+ static uint8_t LengthToTag(size_t s);
+};
+} // namespace strings_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_STRINGS_CORD_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord/ya.make b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord/ya.make
new file mode 100644
index 00000000000..8f3ec1f20fc
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord/ya.make
@@ -0,0 +1,58 @@
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
+WITHOUT_LICENSE_TEXTS()
+
+OWNER(
+ somov
+ g:cpp-contrib
+)
+
+LICENSE(Apache-2.0)
+
+PEERDIR(
+ contrib/restricted/abseil-cpp-tstring/y_absl/base
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/low_level_alloc
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_wait
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/throw_delegate
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/log_severity
+ contrib/restricted/abseil-cpp-tstring/y_absl/debugging
+ contrib/restricted/abseil-cpp-tstring/y_absl/debugging/stacktrace
+ contrib/restricted/abseil-cpp-tstring/y_absl/debugging/symbolize
+ contrib/restricted/abseil-cpp-tstring/y_absl/demangle
+ contrib/restricted/abseil-cpp-tstring/y_absl/numeric
+ contrib/restricted/abseil-cpp-tstring/y_absl/profiling/internal/exponential_biased
+ contrib/restricted/abseil-cpp-tstring/y_absl/strings
+ contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/absl_cord_internal
+ contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/absl_strings_internal
+ contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_functions
+ contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_handle
+ contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_info
+ contrib/restricted/abseil-cpp-tstring/y_absl/synchronization
+ contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal
+ contrib/restricted/abseil-cpp-tstring/y_absl/time
+ contrib/restricted/abseil-cpp-tstring/y_absl/time/civil_time
+ contrib/restricted/abseil-cpp-tstring/y_absl/time/time_zone
+ contrib/restricted/abseil-cpp-tstring/y_absl/types/bad_optional_access
+ contrib/restricted/abseil-cpp-tstring/y_absl/algorithm
+ contrib/restricted/abseil-cpp-tstring/y_absl/container
+ contrib/restricted/abseil-cpp-tstring/y_absl/functional
+ contrib/restricted/abseil-cpp-tstring/y_absl/types
+ contrib/restricted/abseil-cpp-tstring/y_absl/utility
+)
+
+ADDINCL(
+ GLOBAL contrib/restricted/abseil-cpp-tstring
+)
+
+NO_COMPILER_WARNINGS()
+
+SRCDIR(contrib/restricted/abseil-cpp-tstring/y_absl/strings)
+
+SRCS(
+ cord.cc
+)
+
+END()
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord_test_helpers.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord_test_helpers.h
new file mode 100644
index 00000000000..8dd7c057519
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord_test_helpers.h
@@ -0,0 +1,122 @@
+//
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef ABSL_STRINGS_CORD_TEST_HELPERS_H_
+#define ABSL_STRINGS_CORD_TEST_HELPERS_H_
+
+#include <cstdint>
+#include <iostream>
+#include <util/generic/string.h>
+
+#include "y_absl/base/config.h"
+#include "y_absl/strings/cord.h"
+#include "y_absl/strings/internal/cord_internal.h"
+#include "y_absl/strings/string_view.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+// Cord sizes relevant for testing
+enum class TestCordSize {
+ // An empty value
+ kEmpty = 0,
+
+ // An inlined string value
+ kInlined = cord_internal::kMaxInline / 2 + 1,
+
+ // 'Well known' SSO lengths (excluding terminating zero).
+ // libstdcxx has a maximum SSO of 15, libc++ has a maximum SSO of 22.
+ kStringSso1 = 15,
+ kStringSso2 = 22,
+
+ // A string value which is too large to fit in inlined data, but small enough
+ // such that Cord prefers copying the value if possible, i.e.: not stealing
+ // TString inputs, or referencing existing CordReps on Append, etc.
+ kSmall = cord_internal::kMaxBytesToCopy / 2 + 1,
+
+ // A string value large enough that Cord prefers to reference or steal from
+ // existing inputs rather than copying contents of the input.
+ kMedium = cord_internal::kMaxFlatLength / 2 + 1,
+
+ // A string value large enough to cause it to be stored in mutliple flats.
+ kLarge = cord_internal::kMaxFlatLength * 4
+};
+
+// To string helper
+inline y_absl::string_view ToString(TestCordSize size) {
+ switch (size) {
+ case TestCordSize::kEmpty:
+ return "Empty";
+ case TestCordSize::kInlined:
+ return "Inlined";
+ case TestCordSize::kSmall:
+ return "Small";
+ case TestCordSize::kStringSso1:
+ return "StringSso1";
+ case TestCordSize::kStringSso2:
+ return "StringSso2";
+ case TestCordSize::kMedium:
+ return "Medium";
+ case TestCordSize::kLarge:
+ return "Large";
+ }
+ return "???";
+}
+
+// Returns the length matching the specified size
+inline size_t Length(TestCordSize size) { return static_cast<size_t>(size); }
+
+// Stream output helper
+inline std::ostream& operator<<(std::ostream& stream, TestCordSize size) {
+ return stream << ToString(size);
+}
+
+// Creates a multi-segment Cord from an iterable container of strings. The
+// resulting Cord is guaranteed to have one segment for every string in the
+// container. This allows code to be unit tested with multi-segment Cord
+// inputs.
+//
+// Example:
+//
+// y_absl::Cord c = y_absl::MakeFragmentedCord({"A ", "fragmented ", "Cord"});
+// EXPECT_FALSE(c.GetFlat(&unused));
+//
+// The mechanism by which this Cord is created is an implementation detail. Any
+// implementation that produces a multi-segment Cord may produce a flat Cord in
+// the future as new optimizations are added to the Cord class.
+// MakeFragmentedCord will, however, always be updated to return a multi-segment
+// Cord.
+template <typename Container>
+Cord MakeFragmentedCord(const Container& c) {
+ Cord result;
+ for (const auto& s : c) {
+ auto* external = new TString(s);
+ Cord tmp = y_absl::MakeCordFromExternal(
+ *external, [external](y_absl::string_view) { delete external; });
+ tmp.Prepend(result);
+ result = tmp;
+ }
+ return result;
+}
+
+inline Cord MakeFragmentedCord(std::initializer_list<y_absl::string_view> list) {
+ return MakeFragmentedCord<std::initializer_list<y_absl::string_view>>(list);
+}
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_STRINGS_CORD_TEST_HELPERS_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/cordz_test_helpers.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/cordz_test_helpers.h
new file mode 100644
index 00000000000..bbb952f0733
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/cordz_test_helpers.h
@@ -0,0 +1,151 @@
+// Copyright 2021 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_STRINGS_CORDZ_TEST_HELPERS_H_
+#define ABSL_STRINGS_CORDZ_TEST_HELPERS_H_
+
+#include <utility>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "y_absl/base/config.h"
+#include "y_absl/base/macros.h"
+#include "y_absl/strings/cord.h"
+#include "y_absl/strings/internal/cord_internal.h"
+#include "y_absl/strings/internal/cordz_info.h"
+#include "y_absl/strings/internal/cordz_sample_token.h"
+#include "y_absl/strings/internal/cordz_statistics.h"
+#include "y_absl/strings/internal/cordz_update_tracker.h"
+#include "y_absl/strings/str_cat.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+// Returns the CordzInfo for the cord, or nullptr if the cord is not sampled.
+inline const cord_internal::CordzInfo* GetCordzInfoForTesting(
+ const Cord& cord) {
+ if (!cord.contents_.is_tree()) return nullptr;
+ return cord.contents_.cordz_info();
+}
+
+// Returns true if the provided cordz_info is in the list of sampled cords.
+inline bool CordzInfoIsListed(const cord_internal::CordzInfo* cordz_info,
+ cord_internal::CordzSampleToken token = {}) {
+ for (const cord_internal::CordzInfo& info : token) {
+ if (cordz_info == &info) return true;
+ }
+ return false;
+}
+
+// Matcher on Cord that verifies all of:
+// - the cord is sampled
+// - the CordzInfo of the cord is listed / discoverable.
+// - the reported CordzStatistics match the cord's actual properties
+// - the cord has an (initial) UpdateTracker count of 1 for `method`
+MATCHER_P(HasValidCordzInfoOf, method, "CordzInfo matches cord") {
+ const cord_internal::CordzInfo* cord_info = GetCordzInfoForTesting(arg);
+ if (cord_info == nullptr) {
+ *result_listener << "cord is not sampled";
+ return false;
+ }
+ if (!CordzInfoIsListed(cord_info)) {
+ *result_listener << "cord is sampled, but not listed";
+ return false;
+ }
+ cord_internal::CordzStatistics stat = cord_info->GetCordzStatistics();
+ if (stat.size != arg.size()) {
+ *result_listener << "cordz size " << stat.size
+ << " does not match cord size " << arg.size();
+ return false;
+ }
+ if (stat.update_tracker.Value(method) != 1) {
+ *result_listener << "Expected method count 1 for " << method << ", found "
+ << stat.update_tracker.Value(method);
+ return false;
+ }
+ return true;
+}
+
+// Matcher on Cord that verifies that the cord is sampled and that the CordzInfo
+// update tracker has 'method' with a call count of 'n'
+MATCHER_P2(CordzMethodCountEq, method, n,
+ y_absl::StrCat("CordzInfo method count equals ", n)) {
+ const cord_internal::CordzInfo* cord_info = GetCordzInfoForTesting(arg);
+ if (cord_info == nullptr) {
+ *result_listener << "cord is not sampled";
+ return false;
+ }
+ cord_internal::CordzStatistics stat = cord_info->GetCordzStatistics();
+ if (stat.update_tracker.Value(method) != n) {
+ *result_listener << "Expected method count " << n << " for " << method
+ << ", found " << stat.update_tracker.Value(method);
+ return false;
+ }
+ return true;
+}
+
+// Cordz will only update with a new rate once the previously scheduled event
+// has fired. When we disable Cordz, a long delay takes place where we won't
+// consider profiling new Cords. CordzSampleIntervalHelper will burn through
+// that interval and allow for testing that assumes that the average sampling
+// interval is a particular value.
+class CordzSamplingIntervalHelper {
+ public:
+ explicit CordzSamplingIntervalHelper(int32_t interval)
+ : orig_mean_interval_(y_absl::cord_internal::get_cordz_mean_interval()) {
+ y_absl::cord_internal::set_cordz_mean_interval(interval);
+ y_absl::cord_internal::cordz_set_next_sample_for_testing(interval);
+ }
+
+ ~CordzSamplingIntervalHelper() {
+ y_absl::cord_internal::set_cordz_mean_interval(orig_mean_interval_);
+ y_absl::cord_internal::cordz_set_next_sample_for_testing(orig_mean_interval_);
+ }
+
+ private:
+ int32_t orig_mean_interval_;
+};
+
+// Wrapper struct managing a small CordRep `rep`
+struct TestCordRep {
+ cord_internal::CordRepFlat* rep;
+
+ TestCordRep() {
+ rep = cord_internal::CordRepFlat::New(100);
+ rep->length = 100;
+ memset(rep->Data(), 1, 100);
+ }
+ ~TestCordRep() { cord_internal::CordRep::Unref(rep); }
+};
+
+// Wrapper struct managing a small CordRep `rep`, and
+// an InlineData `data` initialized with that CordRep.
+struct TestCordData {
+ TestCordRep rep;
+ cord_internal::InlineData data{rep.rep};
+};
+
+// Creates a Cord that is not sampled
+template <typename... Args>
+Cord UnsampledCord(Args... args) {
+ CordzSamplingIntervalHelper never(9999);
+ Cord cord(std::forward<Args>(args)...);
+ ABSL_ASSERT(GetCordzInfoForTesting(cord) == nullptr);
+ return cord;
+}
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_STRINGS_CORDZ_TEST_HELPERS_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/escaping.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/escaping.cc
new file mode 100644
index 00000000000..8c82740608c
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/escaping.cc
@@ -0,0 +1,949 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "y_absl/strings/escaping.h"
+
+#include <algorithm>
+#include <cassert>
+#include <cstdint>
+#include <cstring>
+#include <iterator>
+#include <limits>
+#include <util/generic/string.h>
+
+#include "y_absl/base/internal/endian.h"
+#include "y_absl/base/internal/raw_logging.h"
+#include "y_absl/base/internal/unaligned_access.h"
+#include "y_absl/strings/internal/char_map.h"
+#include "y_absl/strings/internal/escaping.h"
+#include "y_absl/strings/internal/resize_uninitialized.h"
+#include "y_absl/strings/internal/utf8.h"
+#include "y_absl/strings/str_cat.h"
+#include "y_absl/strings/str_join.h"
+#include "y_absl/strings/string_view.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace {
+
+// These are used for the leave_nulls_escaped argument to CUnescapeInternal().
+constexpr bool kUnescapeNulls = false;
+
+inline bool is_octal_digit(char c) { return ('0' <= c) && (c <= '7'); }
+
+inline int hex_digit_to_int(char c) {
+ static_assert('0' == 0x30 && 'A' == 0x41 && 'a' == 0x61,
+ "Character set must be ASCII.");
+ assert(y_absl::ascii_isxdigit(c));
+ int x = static_cast<unsigned char>(c);
+ if (x > '9') {
+ x += 9;
+ }
+ return x & 0xf;
+}
+
+inline bool IsSurrogate(char32_t c, y_absl::string_view src, TString* error) {
+ if (c >= 0xD800 && c <= 0xDFFF) {
+ if (error) {
+ *error = y_absl::StrCat("invalid surrogate character (0xD800-DFFF): \\",
+ src);
+ }
+ return true;
+ }
+ return false;
+}
+
+// ----------------------------------------------------------------------
+// CUnescapeInternal()
+// Implements both CUnescape() and CUnescapeForNullTerminatedString().
+//
+// Unescapes C escape sequences and is the reverse of CEscape().
+//
+// If 'source' is valid, stores the unescaped string and its size in
+// 'dest' and 'dest_len' respectively, and returns true. Otherwise
+// returns false and optionally stores the error description in
+// 'error'. Set 'error' to nullptr to disable error reporting.
+//
+// 'dest' should point to a buffer that is at least as big as 'source'.
+// 'source' and 'dest' may be the same.
+//
+// NOTE: any changes to this function must also be reflected in the older
+// UnescapeCEscapeSequences().
+// ----------------------------------------------------------------------
+bool CUnescapeInternal(y_absl::string_view source, bool leave_nulls_escaped,
+ char* dest, ptrdiff_t* dest_len, TString* error) {
+ char* d = dest;
+ const char* p = source.data();
+ const char* end = p + source.size();
+ const char* last_byte = end - 1;
+
+ // Small optimization for case where source = dest and there's no escaping
+ while (p == d && p < end && *p != '\\') p++, d++;
+
+ while (p < end) {
+ if (*p != '\\') {
+ *d++ = *p++;
+ } else {
+ if (++p > last_byte) { // skip past the '\\'
+ if (error) *error = "String cannot end with \\";
+ return false;
+ }
+ switch (*p) {
+ case 'a': *d++ = '\a'; break;
+ case 'b': *d++ = '\b'; break;
+ case 'f': *d++ = '\f'; break;
+ case 'n': *d++ = '\n'; break;
+ case 'r': *d++ = '\r'; break;
+ case 't': *d++ = '\t'; break;
+ case 'v': *d++ = '\v'; break;
+ case '\\': *d++ = '\\'; break;
+ case '?': *d++ = '\?'; break; // \? Who knew?
+ case '\'': *d++ = '\''; break;
+ case '"': *d++ = '\"'; break;
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7': {
+ // octal digit: 1 to 3 digits
+ const char* octal_start = p;
+ unsigned int ch = *p - '0';
+ if (p < last_byte && is_octal_digit(p[1])) ch = ch * 8 + *++p - '0';
+ if (p < last_byte && is_octal_digit(p[1]))
+ ch = ch * 8 + *++p - '0'; // now points at last digit
+ if (ch > 0xff) {
+ if (error) {
+ *error = "Value of \\" +
+ TString(octal_start, p + 1 - octal_start) +
+ " exceeds 0xff";
+ }
+ return false;
+ }
+ if ((ch == 0) && leave_nulls_escaped) {
+ // Copy the escape sequence for the null character
+ const ptrdiff_t octal_size = p + 1 - octal_start;
+ *d++ = '\\';
+ memmove(d, octal_start, octal_size);
+ d += octal_size;
+ break;
+ }
+ *d++ = ch;
+ break;
+ }
+ case 'x':
+ case 'X': {
+ if (p >= last_byte) {
+ if (error) *error = "String cannot end with \\x";
+ return false;
+ } else if (!y_absl::ascii_isxdigit(p[1])) {
+ if (error) *error = "\\x cannot be followed by a non-hex digit";
+ return false;
+ }
+ unsigned int ch = 0;
+ const char* hex_start = p;
+ while (p < last_byte && y_absl::ascii_isxdigit(p[1]))
+ // Arbitrarily many hex digits
+ ch = (ch << 4) + hex_digit_to_int(*++p);
+ if (ch > 0xFF) {
+ if (error) {
+ *error = "Value of \\" +
+ TString(hex_start, p + 1 - hex_start) +
+ " exceeds 0xff";
+ }
+ return false;
+ }
+ if ((ch == 0) && leave_nulls_escaped) {
+ // Copy the escape sequence for the null character
+ const ptrdiff_t hex_size = p + 1 - hex_start;
+ *d++ = '\\';
+ memmove(d, hex_start, hex_size);
+ d += hex_size;
+ break;
+ }
+ *d++ = ch;
+ break;
+ }
+ case 'u': {
+ // \uhhhh => convert 4 hex digits to UTF-8
+ char32_t rune = 0;
+ const char* hex_start = p;
+ if (p + 4 >= end) {
+ if (error) {
+ *error = "\\u must be followed by 4 hex digits: \\" +
+ TString(hex_start, p + 1 - hex_start);
+ }
+ return false;
+ }
+ for (int i = 0; i < 4; ++i) {
+ // Look one char ahead.
+ if (y_absl::ascii_isxdigit(p[1])) {
+ rune = (rune << 4) + hex_digit_to_int(*++p); // Advance p.
+ } else {
+ if (error) {
+ *error = "\\u must be followed by 4 hex digits: \\" +
+ TString(hex_start, p + 1 - hex_start);
+ }
+ return false;
+ }
+ }
+ if ((rune == 0) && leave_nulls_escaped) {
+ // Copy the escape sequence for the null character
+ *d++ = '\\';
+ memmove(d, hex_start, 5); // u0000
+ d += 5;
+ break;
+ }
+ if (IsSurrogate(rune, y_absl::string_view(hex_start, 5), error)) {
+ return false;
+ }
+ d += strings_internal::EncodeUTF8Char(d, rune);
+ break;
+ }
+ case 'U': {
+ // \Uhhhhhhhh => convert 8 hex digits to UTF-8
+ char32_t rune = 0;
+ const char* hex_start = p;
+ if (p + 8 >= end) {
+ if (error) {
+ *error = "\\U must be followed by 8 hex digits: \\" +
+ TString(hex_start, p + 1 - hex_start);
+ }
+ return false;
+ }
+ for (int i = 0; i < 8; ++i) {
+ // Look one char ahead.
+ if (y_absl::ascii_isxdigit(p[1])) {
+ // Don't change rune until we're sure this
+ // is within the Unicode limit, but do advance p.
+ uint32_t newrune = (rune << 4) + hex_digit_to_int(*++p);
+ if (newrune > 0x10FFFF) {
+ if (error) {
+ *error = "Value of \\" +
+ TString(hex_start, p + 1 - hex_start) +
+ " exceeds Unicode limit (0x10FFFF)";
+ }
+ return false;
+ } else {
+ rune = newrune;
+ }
+ } else {
+ if (error) {
+ *error = "\\U must be followed by 8 hex digits: \\" +
+ TString(hex_start, p + 1 - hex_start);
+ }
+ return false;
+ }
+ }
+ if ((rune == 0) && leave_nulls_escaped) {
+ // Copy the escape sequence for the null character
+ *d++ = '\\';
+ memmove(d, hex_start, 9); // U00000000
+ d += 9;
+ break;
+ }
+ if (IsSurrogate(rune, y_absl::string_view(hex_start, 9), error)) {
+ return false;
+ }
+ d += strings_internal::EncodeUTF8Char(d, rune);
+ break;
+ }
+ default: {
+ if (error) *error = TString("Unknown escape sequence: \\") + *p;
+ return false;
+ }
+ }
+ p++; // read past letter we escaped
+ }
+ }
+ *dest_len = d - dest;
+ return true;
+}
+
+// ----------------------------------------------------------------------
+// CUnescapeInternal()
+//
+// Same as above but uses a TString for output. 'source' and 'dest'
+// may be the same.
+// ----------------------------------------------------------------------
+bool CUnescapeInternal(y_absl::string_view source, bool leave_nulls_escaped,
+ TString* dest, TString* error) {
+ strings_internal::STLStringResizeUninitialized(dest, source.size());
+
+ ptrdiff_t dest_size;
+ if (!CUnescapeInternal(source,
+ leave_nulls_escaped,
+ &(*dest)[0],
+ &dest_size,
+ error)) {
+ return false;
+ }
+ dest->erase(dest_size);
+ return true;
+}
+
+// ----------------------------------------------------------------------
+// CEscape()
+// CHexEscape()
+// Utf8SafeCEscape()
+// Utf8SafeCHexEscape()
+// Escapes 'src' using C-style escape sequences. This is useful for
+// preparing query flags. The 'Hex' version uses hexadecimal rather than
+// octal sequences. The 'Utf8Safe' version does not touch UTF-8 bytes.
+//
+// Escaped chars: \n, \r, \t, ", ', \, and !y_absl::ascii_isprint().
+// ----------------------------------------------------------------------
+TString CEscapeInternal(y_absl::string_view src, bool use_hex,
+ bool utf8_safe) {
+ TString dest;
+ bool last_hex_escape = false; // true if last output char was \xNN.
+
+ for (unsigned char c : src) {
+ bool is_hex_escape = false;
+ switch (c) {
+ case '\n': dest.append("\\" "n"); break;
+ case '\r': dest.append("\\" "r"); break;
+ case '\t': dest.append("\\" "t"); break;
+ case '\"': dest.append("\\" "\""); break;
+ case '\'': dest.append("\\" "'"); break;
+ case '\\': dest.append("\\" "\\"); break;
+ default:
+ // Note that if we emit \xNN and the src character after that is a hex
+ // digit then that digit must be escaped too to prevent it being
+ // interpreted as part of the character code by C.
+ if ((!utf8_safe || c < 0x80) &&
+ (!y_absl::ascii_isprint(c) ||
+ (last_hex_escape && y_absl::ascii_isxdigit(c)))) {
+ if (use_hex) {
+ dest.append("\\" "x");
+ dest.push_back(numbers_internal::kHexChar[c / 16]);
+ dest.push_back(numbers_internal::kHexChar[c % 16]);
+ is_hex_escape = true;
+ } else {
+ dest.append("\\");
+ dest.push_back(numbers_internal::kHexChar[c / 64]);
+ dest.push_back(numbers_internal::kHexChar[(c % 64) / 8]);
+ dest.push_back(numbers_internal::kHexChar[c % 8]);
+ }
+ } else {
+ dest.push_back(c);
+ break;
+ }
+ }
+ last_hex_escape = is_hex_escape;
+ }
+
+ return dest;
+}
+
+/* clang-format off */
+constexpr char c_escaped_len[256] = {
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 2, 2, 4, 4, 2, 4, 4, // \t, \n, \r
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ 1, 1, 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, // ", '
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // '0'..'9'
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 'A'..'O'
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, // 'P'..'Z', '\'
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 'a'..'o'
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 4, // 'p'..'z', DEL
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+};
+/* clang-format on */
+
+// Calculates the length of the C-style escaped version of 'src'.
+// Assumes that non-printable characters are escaped using octal sequences, and
+// that UTF-8 bytes are not handled specially.
+inline size_t CEscapedLength(y_absl::string_view src) {
+ size_t escaped_len = 0;
+ for (unsigned char c : src) escaped_len += c_escaped_len[c];
+ return escaped_len;
+}
+
+void CEscapeAndAppendInternal(y_absl::string_view src, TString* dest) {
+ size_t escaped_len = CEscapedLength(src);
+ if (escaped_len == src.size()) {
+ dest->append(src.data(), src.size());
+ return;
+ }
+
+ size_t cur_dest_len = dest->size();
+ strings_internal::STLStringResizeUninitialized(dest,
+ cur_dest_len + escaped_len);
+ char* append_ptr = &(*dest)[cur_dest_len];
+
+ for (unsigned char c : src) {
+ int char_len = c_escaped_len[c];
+ if (char_len == 1) {
+ *append_ptr++ = c;
+ } else if (char_len == 2) {
+ switch (c) {
+ case '\n':
+ *append_ptr++ = '\\';
+ *append_ptr++ = 'n';
+ break;
+ case '\r':
+ *append_ptr++ = '\\';
+ *append_ptr++ = 'r';
+ break;
+ case '\t':
+ *append_ptr++ = '\\';
+ *append_ptr++ = 't';
+ break;
+ case '\"':
+ *append_ptr++ = '\\';
+ *append_ptr++ = '\"';
+ break;
+ case '\'':
+ *append_ptr++ = '\\';
+ *append_ptr++ = '\'';
+ break;
+ case '\\':
+ *append_ptr++ = '\\';
+ *append_ptr++ = '\\';
+ break;
+ }
+ } else {
+ *append_ptr++ = '\\';
+ *append_ptr++ = '0' + c / 64;
+ *append_ptr++ = '0' + (c % 64) / 8;
+ *append_ptr++ = '0' + c % 8;
+ }
+ }
+}
+
+bool Base64UnescapeInternal(const char* src_param, size_t szsrc, char* dest,
+ size_t szdest, const signed char* unbase64,
+ size_t* len) {
+ static const char kPad64Equals = '=';
+ static const char kPad64Dot = '.';
+
+ size_t destidx = 0;
+ int decode = 0;
+ int state = 0;
+ unsigned int ch = 0;
+ unsigned int temp = 0;
+
+ // If "char" is signed by default, using *src as an array index results in
+ // accessing negative array elements. Treat the input as a pointer to
+ // unsigned char to avoid this.
+ const unsigned char* src = reinterpret_cast<const unsigned char*>(src_param);
+
+ // The GET_INPUT macro gets the next input character, skipping
+ // over any whitespace, and stopping when we reach the end of the
+ // string or when we read any non-data character. The arguments are
+ // an arbitrary identifier (used as a label for goto) and the number
+ // of data bytes that must remain in the input to avoid aborting the
+ // loop.
+#define GET_INPUT(label, remain) \
+ label: \
+ --szsrc; \
+ ch = *src++; \
+ decode = unbase64[ch]; \
+ if (decode < 0) { \
+ if (y_absl::ascii_isspace(ch) && szsrc >= remain) goto label; \
+ state = 4 - remain; \
+ break; \
+ }
+
+ // if dest is null, we're just checking to see if it's legal input
+ // rather than producing output. (I suspect this could just be done
+ // with a regexp...). We duplicate the loop so this test can be
+ // outside it instead of in every iteration.
+
+ if (dest) {
+ // This loop consumes 4 input bytes and produces 3 output bytes
+ // per iteration. We can't know at the start that there is enough
+ // data left in the string for a full iteration, so the loop may
+ // break out in the middle; if so 'state' will be set to the
+ // number of input bytes read.
+
+ while (szsrc >= 4) {
+ // We'll start by optimistically assuming that the next four
+ // bytes of the string (src[0..3]) are four good data bytes
+ // (that is, no nulls, whitespace, padding chars, or illegal
+ // chars). We need to test src[0..2] for nulls individually
+ // before constructing temp to preserve the property that we
+ // never read past a null in the string (no matter how long
+ // szsrc claims the string is).
+
+ if (!src[0] || !src[1] || !src[2] ||
+ ((temp = ((unsigned(unbase64[src[0]]) << 18) |
+ (unsigned(unbase64[src[1]]) << 12) |
+ (unsigned(unbase64[src[2]]) << 6) |
+ (unsigned(unbase64[src[3]])))) &
+ 0x80000000)) {
+ // Iff any of those four characters was bad (null, illegal,
+ // whitespace, padding), then temp's high bit will be set
+ // (because unbase64[] is -1 for all bad characters).
+ //
+ // We'll back up and resort to the slower decoder, which knows
+ // how to handle those cases.
+
+ GET_INPUT(first, 4);
+ temp = decode;
+ GET_INPUT(second, 3);
+ temp = (temp << 6) | decode;
+ GET_INPUT(third, 2);
+ temp = (temp << 6) | decode;
+ GET_INPUT(fourth, 1);
+ temp = (temp << 6) | decode;
+ } else {
+ // We really did have four good data bytes, so advance four
+ // characters in the string.
+
+ szsrc -= 4;
+ src += 4;
+ }
+
+ // temp has 24 bits of input, so write that out as three bytes.
+
+ if (destidx + 3 > szdest) return false;
+ dest[destidx + 2] = temp;
+ temp >>= 8;
+ dest[destidx + 1] = temp;
+ temp >>= 8;
+ dest[destidx] = temp;
+ destidx += 3;
+ }
+ } else {
+ while (szsrc >= 4) {
+ if (!src[0] || !src[1] || !src[2] ||
+ ((temp = ((unsigned(unbase64[src[0]]) << 18) |
+ (unsigned(unbase64[src[1]]) << 12) |
+ (unsigned(unbase64[src[2]]) << 6) |
+ (unsigned(unbase64[src[3]])))) &
+ 0x80000000)) {
+ GET_INPUT(first_no_dest, 4);
+ GET_INPUT(second_no_dest, 3);
+ GET_INPUT(third_no_dest, 2);
+ GET_INPUT(fourth_no_dest, 1);
+ } else {
+ szsrc -= 4;
+ src += 4;
+ }
+ destidx += 3;
+ }
+ }
+
+#undef GET_INPUT
+
+ // if the loop terminated because we read a bad character, return
+ // now.
+ if (decode < 0 && ch != kPad64Equals && ch != kPad64Dot &&
+ !y_absl::ascii_isspace(ch))
+ return false;
+
+ if (ch == kPad64Equals || ch == kPad64Dot) {
+ // if we stopped by hitting an '=' or '.', un-read that character -- we'll
+ // look at it again when we count to check for the proper number of
+ // equals signs at the end.
+ ++szsrc;
+ --src;
+ } else {
+ // This loop consumes 1 input byte per iteration. It's used to
+ // clean up the 0-3 input bytes remaining when the first, faster
+ // loop finishes. 'temp' contains the data from 'state' input
+ // characters read by the first loop.
+ while (szsrc > 0) {
+ --szsrc;
+ ch = *src++;
+ decode = unbase64[ch];
+ if (decode < 0) {
+ if (y_absl::ascii_isspace(ch)) {
+ continue;
+ } else if (ch == kPad64Equals || ch == kPad64Dot) {
+ // back up one character; we'll read it again when we check
+ // for the correct number of pad characters at the end.
+ ++szsrc;
+ --src;
+ break;
+ } else {
+ return false;
+ }
+ }
+
+ // Each input character gives us six bits of output.
+ temp = (temp << 6) | decode;
+ ++state;
+ if (state == 4) {
+ // If we've accumulated 24 bits of output, write that out as
+ // three bytes.
+ if (dest) {
+ if (destidx + 3 > szdest) return false;
+ dest[destidx + 2] = temp;
+ temp >>= 8;
+ dest[destidx + 1] = temp;
+ temp >>= 8;
+ dest[destidx] = temp;
+ }
+ destidx += 3;
+ state = 0;
+ temp = 0;
+ }
+ }
+ }
+
+ // Process the leftover data contained in 'temp' at the end of the input.
+ int expected_equals = 0;
+ switch (state) {
+ case 0:
+ // Nothing left over; output is a multiple of 3 bytes.
+ break;
+
+ case 1:
+ // Bad input; we have 6 bits left over.
+ return false;
+
+ case 2:
+ // Produce one more output byte from the 12 input bits we have left.
+ if (dest) {
+ if (destidx + 1 > szdest) return false;
+ temp >>= 4;
+ dest[destidx] = temp;
+ }
+ ++destidx;
+ expected_equals = 2;
+ break;
+
+ case 3:
+ // Produce two more output bytes from the 18 input bits we have left.
+ if (dest) {
+ if (destidx + 2 > szdest) return false;
+ temp >>= 2;
+ dest[destidx + 1] = temp;
+ temp >>= 8;
+ dest[destidx] = temp;
+ }
+ destidx += 2;
+ expected_equals = 1;
+ break;
+
+ default:
+ // state should have no other values at this point.
+ ABSL_RAW_LOG(FATAL, "This can't happen; base64 decoder state = %d",
+ state);
+ }
+
+ // The remainder of the string should be all whitespace, mixed with
+ // exactly 0 equals signs, or exactly 'expected_equals' equals
+ // signs. (Always accepting 0 equals signs is an Abseil extension
+ // not covered in the RFC, as is accepting dot as the pad character.)
+
+ int equals = 0;
+ while (szsrc > 0) {
+ if (*src == kPad64Equals || *src == kPad64Dot)
+ ++equals;
+ else if (!y_absl::ascii_isspace(*src))
+ return false;
+ --szsrc;
+ ++src;
+ }
+
+ const bool ok = (equals == 0 || equals == expected_equals);
+ if (ok) *len = destidx;
+ return ok;
+}
+
+// The arrays below were generated by the following code
+// #include <sys/time.h>
+// #include <stdlib.h>
+// #include <string.h>
+// main()
+// {
+// static const char Base64[] =
+// "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
+// char* pos;
+// int idx, i, j;
+// printf(" ");
+// for (i = 0; i < 255; i += 8) {
+// for (j = i; j < i + 8; j++) {
+// pos = strchr(Base64, j);
+// if ((pos == nullptr) || (j == 0))
+// idx = -1;
+// else
+// idx = pos - Base64;
+// if (idx == -1)
+// printf(" %2d, ", idx);
+// else
+// printf(" %2d/*%c*/,", idx, j);
+// }
+// printf("\n ");
+// }
+// }
+//
+// where the value of "Base64[]" was replaced by one of the base-64 conversion
+// tables from the functions below.
+/* clang-format off */
+constexpr signed char kUnBase64[] = {
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, 62/*+*/, -1, -1, -1, 63/*/ */,
+ 52/*0*/, 53/*1*/, 54/*2*/, 55/*3*/, 56/*4*/, 57/*5*/, 58/*6*/, 59/*7*/,
+ 60/*8*/, 61/*9*/, -1, -1, -1, -1, -1, -1,
+ -1, 0/*A*/, 1/*B*/, 2/*C*/, 3/*D*/, 4/*E*/, 5/*F*/, 6/*G*/,
+ 07/*H*/, 8/*I*/, 9/*J*/, 10/*K*/, 11/*L*/, 12/*M*/, 13/*N*/, 14/*O*/,
+ 15/*P*/, 16/*Q*/, 17/*R*/, 18/*S*/, 19/*T*/, 20/*U*/, 21/*V*/, 22/*W*/,
+ 23/*X*/, 24/*Y*/, 25/*Z*/, -1, -1, -1, -1, -1,
+ -1, 26/*a*/, 27/*b*/, 28/*c*/, 29/*d*/, 30/*e*/, 31/*f*/, 32/*g*/,
+ 33/*h*/, 34/*i*/, 35/*j*/, 36/*k*/, 37/*l*/, 38/*m*/, 39/*n*/, 40/*o*/,
+ 41/*p*/, 42/*q*/, 43/*r*/, 44/*s*/, 45/*t*/, 46/*u*/, 47/*v*/, 48/*w*/,
+ 49/*x*/, 50/*y*/, 51/*z*/, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1
+};
+
+constexpr signed char kUnWebSafeBase64[] = {
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, 62/*-*/, -1, -1,
+ 52/*0*/, 53/*1*/, 54/*2*/, 55/*3*/, 56/*4*/, 57/*5*/, 58/*6*/, 59/*7*/,
+ 60/*8*/, 61/*9*/, -1, -1, -1, -1, -1, -1,
+ -1, 0/*A*/, 1/*B*/, 2/*C*/, 3/*D*/, 4/*E*/, 5/*F*/, 6/*G*/,
+ 07/*H*/, 8/*I*/, 9/*J*/, 10/*K*/, 11/*L*/, 12/*M*/, 13/*N*/, 14/*O*/,
+ 15/*P*/, 16/*Q*/, 17/*R*/, 18/*S*/, 19/*T*/, 20/*U*/, 21/*V*/, 22/*W*/,
+ 23/*X*/, 24/*Y*/, 25/*Z*/, -1, -1, -1, -1, 63/*_*/,
+ -1, 26/*a*/, 27/*b*/, 28/*c*/, 29/*d*/, 30/*e*/, 31/*f*/, 32/*g*/,
+ 33/*h*/, 34/*i*/, 35/*j*/, 36/*k*/, 37/*l*/, 38/*m*/, 39/*n*/, 40/*o*/,
+ 41/*p*/, 42/*q*/, 43/*r*/, 44/*s*/, 45/*t*/, 46/*u*/, 47/*v*/, 48/*w*/,
+ 49/*x*/, 50/*y*/, 51/*z*/, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1
+};
+/* clang-format on */
+
+constexpr char kWebSafeBase64Chars[] =
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_";
+
+template <typename String>
+bool Base64UnescapeInternal(const char* src, size_t slen, String* dest,
+ const signed char* unbase64) {
+ // Determine the size of the output string. Base64 encodes every 3 bytes into
+ // 4 characters. any leftover chars are added directly for good measure.
+ // This is documented in the base64 RFC: http://tools.ietf.org/html/rfc3548
+ const size_t dest_len = 3 * (slen / 4) + (slen % 4);
+
+ strings_internal::STLStringResizeUninitialized(dest, dest_len);
+
+ // We are getting the destination buffer by getting the beginning of the
+ // string and converting it into a char *.
+ size_t len;
+ const bool ok =
+ Base64UnescapeInternal(src, slen, &(*dest)[0], dest_len, unbase64, &len);
+ if (!ok) {
+ dest->clear();
+ return false;
+ }
+
+ // could be shorter if there was padding
+ assert(len <= dest_len);
+ dest->erase(len);
+
+ return true;
+}
+
+/* clang-format off */
+constexpr char kHexValueLenient[256] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, // '0'..'9'
+ 0, 10, 11, 12, 13, 14, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 'A'..'F'
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 10, 11, 12, 13, 14, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 'a'..'f'
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+};
+
+/* clang-format on */
+
+// This is a templated function so that T can be either a char*
+// or a string. This works because we use the [] operator to access
+// individual characters at a time.
+template <typename T>
+void HexStringToBytesInternal(const char* from, T to, ptrdiff_t num) {
+ for (int i = 0; i < num; i++) {
+ to[i] = (kHexValueLenient[from[i * 2] & 0xFF] << 4) +
+ (kHexValueLenient[from[i * 2 + 1] & 0xFF]);
+ }
+}
+
+// This is a templated function so that T can be either a char* or a
+// TString.
+template <typename T>
+void BytesToHexStringInternal(const unsigned char* src, T dest, ptrdiff_t num) {
+ auto dest_ptr = &dest[0];
+ for (auto src_ptr = src; src_ptr != (src + num); ++src_ptr, dest_ptr += 2) {
+ const char* hex_p = &numbers_internal::kHexTable[*src_ptr * 2];
+ std::copy(hex_p, hex_p + 2, dest_ptr);
+ }
+}
+
+} // namespace
+
+// ----------------------------------------------------------------------
+// CUnescape()
+//
+// See CUnescapeInternal() for implementation details.
+// ----------------------------------------------------------------------
+bool CUnescape(y_absl::string_view source, TString* dest,
+ TString* error) {
+ return CUnescapeInternal(source, kUnescapeNulls, dest, error);
+}
+
+TString CEscape(y_absl::string_view src) {
+ TString dest;
+ CEscapeAndAppendInternal(src, &dest);
+ return dest;
+}
+
+TString CHexEscape(y_absl::string_view src) {
+ return CEscapeInternal(src, true, false);
+}
+
+TString Utf8SafeCEscape(y_absl::string_view src) {
+ return CEscapeInternal(src, false, true);
+}
+
+TString Utf8SafeCHexEscape(y_absl::string_view src) {
+ return CEscapeInternal(src, true, true);
+}
+
+// ----------------------------------------------------------------------
+// Base64Unescape() - base64 decoder
+// Base64Escape() - base64 encoder
+// WebSafeBase64Unescape() - Google's variation of base64 decoder
+// WebSafeBase64Escape() - Google's variation of base64 encoder
+//
+// Check out
+// http://tools.ietf.org/html/rfc2045 for formal description, but what we
+// care about is that...
+// Take the encoded stuff in groups of 4 characters and turn each
+// character into a code 0 to 63 thus:
+// A-Z map to 0 to 25
+// a-z map to 26 to 51
+// 0-9 map to 52 to 61
+// +(- for WebSafe) maps to 62
+// /(_ for WebSafe) maps to 63
+// There will be four numbers, all less than 64 which can be represented
+// by a 6 digit binary number (aaaaaa, bbbbbb, cccccc, dddddd respectively).
+// Arrange the 6 digit binary numbers into three bytes as such:
+// aaaaaabb bbbbcccc ccdddddd
+// Equals signs (one or two) are used at the end of the encoded block to
+// indicate that the text was not an integer multiple of three bytes long.
+// ----------------------------------------------------------------------
+
+bool Base64Unescape(y_absl::string_view src, TString* dest) {
+ return Base64UnescapeInternal(src.data(), src.size(), dest, kUnBase64);
+}
+
+bool WebSafeBase64Unescape(y_absl::string_view src, TString* dest) {
+ return Base64UnescapeInternal(src.data(), src.size(), dest, kUnWebSafeBase64);
+}
+
+void Base64Escape(y_absl::string_view src, TString* dest) {
+ strings_internal::Base64EscapeInternal(
+ reinterpret_cast<const unsigned char*>(src.data()), src.size(), dest,
+ true, strings_internal::kBase64Chars);
+}
+
+void WebSafeBase64Escape(y_absl::string_view src, TString* dest) {
+ strings_internal::Base64EscapeInternal(
+ reinterpret_cast<const unsigned char*>(src.data()), src.size(), dest,
+ false, kWebSafeBase64Chars);
+}
+
+TString Base64Escape(y_absl::string_view src) {
+ TString dest;
+ strings_internal::Base64EscapeInternal(
+ reinterpret_cast<const unsigned char*>(src.data()), src.size(), &dest,
+ true, strings_internal::kBase64Chars);
+ return dest;
+}
+
+TString WebSafeBase64Escape(y_absl::string_view src) {
+ TString dest;
+ strings_internal::Base64EscapeInternal(
+ reinterpret_cast<const unsigned char*>(src.data()), src.size(), &dest,
+ false, kWebSafeBase64Chars);
+ return dest;
+}
+
+TString HexStringToBytes(y_absl::string_view from) {
+ TString result;
+ const auto num = from.size() / 2;
+ strings_internal::STLStringResizeUninitialized(&result, num);
+ y_absl::HexStringToBytesInternal<TString&>(from.data(), result, num);
+ return result;
+}
+
+TString BytesToHexString(y_absl::string_view from) {
+ TString result;
+ strings_internal::STLStringResizeUninitialized(&result, 2 * from.size());
+ y_absl::BytesToHexStringInternal<TString&>(
+ reinterpret_cast<const unsigned char*>(from.data()), result, from.size());
+ return result;
+}
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/escaping.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/escaping.h
new file mode 100644
index 00000000000..8868b87879b
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/escaping.h
@@ -0,0 +1,164 @@
+//
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: escaping.h
+// -----------------------------------------------------------------------------
+//
+// This header file contains string utilities involved in escaping and
+// unescaping strings in various ways.
+
+#ifndef ABSL_STRINGS_ESCAPING_H_
+#define ABSL_STRINGS_ESCAPING_H_
+
+#include <cstddef>
+#include <util/generic/string.h>
+#include <vector>
+
+#include "y_absl/base/macros.h"
+#include "y_absl/strings/ascii.h"
+#include "y_absl/strings/str_join.h"
+#include "y_absl/strings/string_view.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+// CUnescape()
+//
+// Unescapes a `source` string and copies it into `dest`, rewriting C-style
+// escape sequences (https://en.cppreference.com/w/cpp/language/escape) into
+// their proper code point equivalents, returning `true` if successful.
+//
+// The following unescape sequences can be handled:
+//
+// * ASCII escape sequences ('\n','\r','\\', etc.) to their ASCII equivalents
+// * Octal escape sequences ('\nnn') to byte nnn. The unescaped value must
+// resolve to a single byte or an error will occur. E.g. values greater than
+// 0xff will produce an error.
+// * Hexadecimal escape sequences ('\xnn') to byte nn. While an arbitrary
+// number of following digits are allowed, the unescaped value must resolve
+// to a single byte or an error will occur. E.g. '\x0045' is equivalent to
+// '\x45', but '\x1234' will produce an error.
+// * Unicode escape sequences ('\unnnn' for exactly four hex digits or
+// '\Unnnnnnnn' for exactly eight hex digits, which will be encoded in
+// UTF-8. (E.g., `\u2019` unescapes to the three bytes 0xE2, 0x80, and
+// 0x99).
+//
+// If any errors are encountered, this function returns `false`, leaving the
+// `dest` output parameter in an unspecified state, and stores the first
+// encountered error in `error`. To disable error reporting, set `error` to
+// `nullptr` or use the overload with no error reporting below.
+//
+// Example:
+//
+// TString s = "foo\\rbar\\nbaz\\t";
+// TString unescaped_s;
+// if (!y_absl::CUnescape(s, &unescaped_s) {
+// ...
+// }
+// EXPECT_EQ(unescaped_s, "foo\rbar\nbaz\t");
+bool CUnescape(y_absl::string_view source, TString* dest, TString* error);
+
+// Overload of `CUnescape()` with no error reporting.
+inline bool CUnescape(y_absl::string_view source, TString* dest) {
+ return CUnescape(source, dest, nullptr);
+}
+
+// CEscape()
+//
+// Escapes a 'src' string using C-style escapes sequences
+// (https://en.cppreference.com/w/cpp/language/escape), escaping other
+// non-printable/non-whitespace bytes as octal sequences (e.g. "\377").
+//
+// Example:
+//
+// TString s = "foo\rbar\tbaz\010\011\012\013\014\x0d\n";
+// TString escaped_s = y_absl::CEscape(s);
+// EXPECT_EQ(escaped_s, "foo\\rbar\\tbaz\\010\\t\\n\\013\\014\\r\\n");
+TString CEscape(y_absl::string_view src);
+
+// CHexEscape()
+//
+// Escapes a 'src' string using C-style escape sequences, escaping
+// other non-printable/non-whitespace bytes as hexadecimal sequences (e.g.
+// "\xFF").
+//
+// Example:
+//
+// TString s = "foo\rbar\tbaz\010\011\012\013\014\x0d\n";
+// TString escaped_s = y_absl::CHexEscape(s);
+// EXPECT_EQ(escaped_s, "foo\\rbar\\tbaz\\x08\\t\\n\\x0b\\x0c\\r\\n");
+TString CHexEscape(y_absl::string_view src);
+
+// Utf8SafeCEscape()
+//
+// Escapes a 'src' string using C-style escape sequences, escaping bytes as
+// octal sequences, and passing through UTF-8 characters without conversion.
+// I.e., when encountering any bytes with their high bit set, this function
+// will not escape those values, whether or not they are valid UTF-8.
+TString Utf8SafeCEscape(y_absl::string_view src);
+
+// Utf8SafeCHexEscape()
+//
+// Escapes a 'src' string using C-style escape sequences, escaping bytes as
+// hexadecimal sequences, and passing through UTF-8 characters without
+// conversion.
+TString Utf8SafeCHexEscape(y_absl::string_view src);
+
+// Base64Unescape()
+//
+// Converts a `src` string encoded in Base64 to its binary equivalent, writing
+// it to a `dest` buffer, returning `true` on success. If `src` contains invalid
+// characters, `dest` is cleared and returns `false`.
+bool Base64Unescape(y_absl::string_view src, TString* dest);
+
+// WebSafeBase64Unescape()
+//
+// Converts a `src` string encoded in Base64 to its binary equivalent, writing
+// it to a `dest` buffer, but using '-' instead of '+', and '_' instead of '/'.
+// If `src` contains invalid characters, `dest` is cleared and returns `false`.
+bool WebSafeBase64Unescape(y_absl::string_view src, TString* dest);
+
+// Base64Escape()
+//
+// Encodes a `src` string into a base64-encoded string, with padding characters.
+// This function conforms with RFC 4648 section 4 (base64).
+void Base64Escape(y_absl::string_view src, TString* dest);
+TString Base64Escape(y_absl::string_view src);
+
+// WebSafeBase64Escape()
+//
+// Encodes a `src` string into a base64-like string, using '-' instead of '+'
+// and '_' instead of '/', and without padding. This function conforms with RFC
+// 4648 section 5 (base64url).
+void WebSafeBase64Escape(y_absl::string_view src, TString* dest);
+TString WebSafeBase64Escape(y_absl::string_view src);
+
+// HexStringToBytes()
+//
+// Converts an ASCII hex string into bytes, returning binary data of length
+// `from.size()/2`.
+TString HexStringToBytes(y_absl::string_view from);
+
+// BytesToHexString()
+//
+// Converts binary data into an ASCII text string, returning a string of size
+// `2*from.size()`.
+TString BytesToHexString(y_absl::string_view from);
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_STRINGS_ESCAPING_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/absl_cord_internal/ya.make b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/absl_cord_internal/ya.make
new file mode 100644
index 00000000000..42b7b6cd5e2
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/absl_cord_internal/ya.make
@@ -0,0 +1,42 @@
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
+WITHOUT_LICENSE_TEXTS()
+
+OWNER(
+ somov
+ g:cpp-contrib
+)
+
+LICENSE(Apache-2.0)
+
+PEERDIR(
+ contrib/restricted/abseil-cpp-tstring/y_absl/base
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_wait
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/throw_delegate
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/log_severity
+ contrib/restricted/abseil-cpp-tstring/y_absl/numeric
+ contrib/restricted/abseil-cpp-tstring/y_absl/strings
+ contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/absl_strings_internal
+)
+
+ADDINCL(
+ GLOBAL contrib/restricted/abseil-cpp-tstring
+)
+
+NO_COMPILER_WARNINGS()
+
+SRCDIR(contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal)
+
+SRCS(
+ cord_internal.cc
+ cord_rep_btree.cc
+ cord_rep_btree_navigator.cc
+ cord_rep_btree_reader.cc
+ cord_rep_consume.cc
+ cord_rep_ring.cc
+)
+
+END()
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/absl_strings_internal/ya.make b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/absl_strings_internal/ya.make
new file mode 100644
index 00000000000..4e57fc75f66
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/absl_strings_internal/ya.make
@@ -0,0 +1,35 @@
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
+WITHOUT_LICENSE_TEXTS()
+
+OWNER(
+ somov
+ g:cpp-contrib
+)
+
+LICENSE(Apache-2.0)
+
+PEERDIR(
+ contrib/restricted/abseil-cpp-tstring/y_absl/base
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_wait
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/log_severity
+)
+
+ADDINCL(
+ GLOBAL contrib/restricted/abseil-cpp-tstring
+)
+
+NO_COMPILER_WARNINGS()
+
+SRCDIR(contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal)
+
+SRCS(
+ escaping.cc
+ ostringstream.cc
+ utf8.cc
+)
+
+END()
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/char_map.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/char_map.h
new file mode 100644
index 00000000000..25428e304c2
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/char_map.h
@@ -0,0 +1,156 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Character Map Class
+//
+// A fast, bit-vector map for 8-bit unsigned characters.
+// This class is useful for non-character purposes as well.
+
+#ifndef ABSL_STRINGS_INTERNAL_CHAR_MAP_H_
+#define ABSL_STRINGS_INTERNAL_CHAR_MAP_H_
+
+#include <cstddef>
+#include <cstdint>
+#include <cstring>
+
+#include "y_absl/base/macros.h"
+#include "y_absl/base/port.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace strings_internal {
+
+class Charmap {
+ public:
+ constexpr Charmap() : m_() {}
+
+ // Initializes with a given char*. Note that NUL is not treated as
+ // a terminator, but rather a char to be flicked.
+ Charmap(const char* str, int len) : m_() {
+ while (len--) SetChar(*str++);
+ }
+
+ // Initializes with a given char*. NUL is treated as a terminator
+ // and will not be in the charmap.
+ explicit Charmap(const char* str) : m_() {
+ while (*str) SetChar(*str++);
+ }
+
+ constexpr bool contains(unsigned char c) const {
+ return (m_[c / 64] >> (c % 64)) & 0x1;
+ }
+
+ // Returns true if and only if a character exists in both maps.
+ bool IntersectsWith(const Charmap& c) const {
+ for (size_t i = 0; i < ABSL_ARRAYSIZE(m_); ++i) {
+ if ((m_[i] & c.m_[i]) != 0) return true;
+ }
+ return false;
+ }
+
+ bool IsZero() const {
+ for (uint64_t c : m_) {
+ if (c != 0) return false;
+ }
+ return true;
+ }
+
+ // Containing only a single specified char.
+ static constexpr Charmap Char(char x) {
+ return Charmap(CharMaskForWord(x, 0), CharMaskForWord(x, 1),
+ CharMaskForWord(x, 2), CharMaskForWord(x, 3));
+ }
+
+ // Containing all the chars in the C-string 's'.
+ // Note that this is expensively recursive because of the C++11 constexpr
+ // formulation. Use only in constexpr initializers.
+ static constexpr Charmap FromString(const char* s) {
+ return *s == 0 ? Charmap() : (Char(*s) | FromString(s + 1));
+ }
+
+ // Containing all the chars in the closed interval [lo,hi].
+ static constexpr Charmap Range(char lo, char hi) {
+ return Charmap(RangeForWord(lo, hi, 0), RangeForWord(lo, hi, 1),
+ RangeForWord(lo, hi, 2), RangeForWord(lo, hi, 3));
+ }
+
+ friend constexpr Charmap operator&(const Charmap& a, const Charmap& b) {
+ return Charmap(a.m_[0] & b.m_[0], a.m_[1] & b.m_[1], a.m_[2] & b.m_[2],
+ a.m_[3] & b.m_[3]);
+ }
+
+ friend constexpr Charmap operator|(const Charmap& a, const Charmap& b) {
+ return Charmap(a.m_[0] | b.m_[0], a.m_[1] | b.m_[1], a.m_[2] | b.m_[2],
+ a.m_[3] | b.m_[3]);
+ }
+
+ friend constexpr Charmap operator~(const Charmap& a) {
+ return Charmap(~a.m_[0], ~a.m_[1], ~a.m_[2], ~a.m_[3]);
+ }
+
+ private:
+ constexpr Charmap(uint64_t b0, uint64_t b1, uint64_t b2, uint64_t b3)
+ : m_{b0, b1, b2, b3} {}
+
+ static constexpr uint64_t RangeForWord(unsigned char lo, unsigned char hi,
+ uint64_t word) {
+ return OpenRangeFromZeroForWord(hi + 1, word) &
+ ~OpenRangeFromZeroForWord(lo, word);
+ }
+
+ // All the chars in the specified word of the range [0, upper).
+ static constexpr uint64_t OpenRangeFromZeroForWord(uint64_t upper,
+ uint64_t word) {
+ return (upper <= 64 * word)
+ ? 0
+ : (upper >= 64 * (word + 1))
+ ? ~static_cast<uint64_t>(0)
+ : (~static_cast<uint64_t>(0) >> (64 - upper % 64));
+ }
+
+ static constexpr uint64_t CharMaskForWord(unsigned char x, uint64_t word) {
+ return (x / 64 == word) ? (static_cast<uint64_t>(1) << (x % 64)) : 0;
+ }
+
+ private:
+ void SetChar(unsigned char c) {
+ m_[c / 64] |= static_cast<uint64_t>(1) << (c % 64);
+ }
+
+ uint64_t m_[4];
+};
+
+// Mirror the char-classifying predicates in <cctype>
+constexpr Charmap UpperCharmap() { return Charmap::Range('A', 'Z'); }
+constexpr Charmap LowerCharmap() { return Charmap::Range('a', 'z'); }
+constexpr Charmap DigitCharmap() { return Charmap::Range('0', '9'); }
+constexpr Charmap AlphaCharmap() { return LowerCharmap() | UpperCharmap(); }
+constexpr Charmap AlnumCharmap() { return DigitCharmap() | AlphaCharmap(); }
+constexpr Charmap XDigitCharmap() {
+ return DigitCharmap() | Charmap::Range('A', 'F') | Charmap::Range('a', 'f');
+}
+constexpr Charmap PrintCharmap() { return Charmap::Range(0x20, 0x7e); }
+constexpr Charmap SpaceCharmap() { return Charmap::FromString("\t\n\v\f\r "); }
+constexpr Charmap CntrlCharmap() {
+ return Charmap::Range(0, 0x7f) & ~PrintCharmap();
+}
+constexpr Charmap BlankCharmap() { return Charmap::FromString("\t "); }
+constexpr Charmap GraphCharmap() { return PrintCharmap() & ~SpaceCharmap(); }
+constexpr Charmap PunctCharmap() { return GraphCharmap() & ~AlnumCharmap(); }
+
+} // namespace strings_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_STRINGS_INTERNAL_CHAR_MAP_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/charconv_bigint.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/charconv_bigint.cc
new file mode 100644
index 00000000000..72a4fa188b0
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/charconv_bigint.cc
@@ -0,0 +1,359 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "y_absl/strings/internal/charconv_bigint.h"
+
+#include <algorithm>
+#include <cassert>
+#include <util/generic/string.h>
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace strings_internal {
+
+namespace {
+
+// Table containing some large powers of 5, for fast computation.
+
+// Constant step size for entries in the kLargePowersOfFive table. Each entry
+// is larger than the previous entry by a factor of 5**kLargePowerOfFiveStep
+// (or 5**27).
+//
+// In other words, the Nth entry in the table is 5**(27*N).
+//
+// 5**27 is the largest power of 5 that fits in 64 bits.
+constexpr int kLargePowerOfFiveStep = 27;
+
+// The largest legal index into the kLargePowersOfFive table.
+//
+// In other words, the largest precomputed power of 5 is 5**(27*20).
+constexpr int kLargestPowerOfFiveIndex = 20;
+
+// Table of powers of (5**27), up to (5**27)**20 == 5**540.
+//
+// Used to generate large powers of 5 while limiting the number of repeated
+// multiplications required.
+//
+// clang-format off
+const uint32_t kLargePowersOfFive[] = {
+// 5**27 (i=1), start=0, end=2
+ 0xfa10079dU, 0x6765c793U,
+// 5**54 (i=2), start=2, end=6
+ 0x97d9f649U, 0x6664242dU, 0x29939b14U, 0x29c30f10U,
+// 5**81 (i=3), start=6, end=12
+ 0xc4f809c5U, 0x7bf3f22aU, 0x67bdae34U, 0xad340517U, 0x369d1b5fU, 0x10de1593U,
+// 5**108 (i=4), start=12, end=20
+ 0x92b260d1U, 0x9efff7c7U, 0x81de0ec6U, 0xaeba5d56U, 0x410664a4U, 0x4f40737aU,
+ 0x20d3846fU, 0x06d00f73U,
+// 5**135 (i=5), start=20, end=30
+ 0xff1b172dU, 0x13a1d71cU, 0xefa07617U, 0x7f682d3dU, 0xff8c90c0U, 0x3f0131e7U,
+ 0x3fdcb9feU, 0x917b0177U, 0x16c407a7U, 0x02c06b9dU,
+// 5**162 (i=6), start=30, end=42
+ 0x960f7199U, 0x056667ecU, 0xe07aefd8U, 0x80f2b9ccU, 0x8273f5e3U, 0xeb9a214aU,
+ 0x40b38005U, 0x0e477ad4U, 0x277d08e6U, 0xfa28b11eU, 0xd3f7d784U, 0x011c835bU,
+// 5**189 (i=7), start=42, end=56
+ 0xf723d9d5U, 0x3282d3f3U, 0xe00857d1U, 0x69659d25U, 0x2cf117cfU, 0x24da6d07U,
+ 0x954d1417U, 0x3e5d8cedU, 0x7a8bb766U, 0xfd785ae6U, 0x645436d2U, 0x40c78b34U,
+ 0x94151217U, 0x0072e9f7U,
+// 5**216 (i=8), start=56, end=72
+ 0x2b416aa1U, 0x7893c5a7U, 0xe37dc6d4U, 0x2bad2beaU, 0xf0fc846cU, 0x7575ae4bU,
+ 0x62587b14U, 0x83b67a34U, 0x02110cdbU, 0xf7992f55U, 0x00deb022U, 0xa4a23becU,
+ 0x8af5c5cdU, 0xb85b654fU, 0x818df38bU, 0x002e69d2U,
+// 5**243 (i=9), start=72, end=90
+ 0x3518cbbdU, 0x20b0c15fU, 0x38756c2fU, 0xfb5dc3ddU, 0x22ad2d94U, 0xbf35a952U,
+ 0xa699192aU, 0x9a613326U, 0xad2a9cedU, 0xd7f48968U, 0xe87dfb54U, 0xc8f05db6U,
+ 0x5ef67531U, 0x31c1ab49U, 0xe202ac9fU, 0x9b2957b5U, 0xa143f6d3U, 0x0012bf07U,
+// 5**270 (i=10), start=90, end=110
+ 0x8b971de9U, 0x21aba2e1U, 0x63944362U, 0x57172336U, 0xd9544225U, 0xfb534166U,
+ 0x08c563eeU, 0x14640ee2U, 0x24e40d31U, 0x02b06537U, 0x03887f14U, 0x0285e533U,
+ 0xb744ef26U, 0x8be3a6c4U, 0x266979b4U, 0x6761ece2U, 0xd9cb39e4U, 0xe67de319U,
+ 0x0d39e796U, 0x00079250U,
+// 5**297 (i=11), start=110, end=132
+ 0x260eb6e5U, 0xf414a796U, 0xee1a7491U, 0xdb9368ebU, 0xf50c105bU, 0x59157750U,
+ 0x9ed2fb5cU, 0xf6e56d8bU, 0xeaee8d23U, 0x0f319f75U, 0x2aa134d6U, 0xac2908e9U,
+ 0xd4413298U, 0x02f02a55U, 0x989d5a7aU, 0x70dde184U, 0xba8040a7U, 0x03200981U,
+ 0xbe03b11cU, 0x3c1c2a18U, 0xd60427a1U, 0x00030ee0U,
+// 5**324 (i=12), start=132, end=156
+ 0xce566d71U, 0xf1c4aa25U, 0x4e93ca53U, 0xa72283d0U, 0x551a73eaU, 0x3d0538e2U,
+ 0x8da4303fU, 0x6a58de60U, 0x0e660221U, 0x49cf61a6U, 0x8d058fc1U, 0xb9d1a14cU,
+ 0x4bab157dU, 0xc85c6932U, 0x518c8b9eU, 0x9b92b8d0U, 0x0d8a0e21U, 0xbd855df9U,
+ 0xb3ea59a1U, 0x8da29289U, 0x4584d506U, 0x3752d80fU, 0xb72569c6U, 0x00013c33U,
+// 5**351 (i=13), start=156, end=182
+ 0x190f354dU, 0x83695cfeU, 0xe5a4d0c7U, 0xb60fb7e8U, 0xee5bbcc4U, 0xb922054cU,
+ 0xbb4f0d85U, 0x48394028U, 0x1d8957dbU, 0x0d7edb14U, 0x4ecc7587U, 0x505e9e02U,
+ 0x4c87f36bU, 0x99e66bd6U, 0x44b9ed35U, 0x753037d4U, 0xe5fe5f27U, 0x2742c203U,
+ 0x13b2ed2bU, 0xdc525d2cU, 0xe6fde59aU, 0x77ffb18fU, 0x13c5752cU, 0x08a84bccU,
+ 0x859a4940U, 0x00007fb6U,
+// 5**378 (i=14), start=182, end=210
+ 0x4f98cb39U, 0xa60edbbcU, 0x83b5872eU, 0xa501acffU, 0x9cc76f78U, 0xbadd4c73U,
+ 0x43e989faU, 0xca7acf80U, 0x2e0c824fU, 0xb19f4ffcU, 0x092fd81cU, 0xe4eb645bU,
+ 0xa1ff84c2U, 0x8a5a83baU, 0xa8a1fae9U, 0x1db43609U, 0xb0fed50bU, 0x0dd7d2bdU,
+ 0x7d7accd8U, 0x91fa640fU, 0x37dcc6c5U, 0x1c417fd5U, 0xe4d462adU, 0xe8a43399U,
+ 0x131bf9a5U, 0x8df54d29U, 0x36547dc1U, 0x00003395U,
+// 5**405 (i=15), start=210, end=240
+ 0x5bd330f5U, 0x77d21967U, 0x1ac481b7U, 0x6be2f7ceU, 0x7f4792a9U, 0xe84c2c52U,
+ 0x84592228U, 0x9dcaf829U, 0xdab44ce1U, 0x3d0c311bU, 0x532e297dU, 0x4704e8b4U,
+ 0x9cdc32beU, 0x41e64d9dU, 0x7717bea1U, 0xa824c00dU, 0x08f50b27U, 0x0f198d77U,
+ 0x49bbfdf0U, 0x025c6c69U, 0xd4e55cd3U, 0xf083602bU, 0xb9f0fecdU, 0xc0864aeaU,
+ 0x9cb98681U, 0xaaf620e9U, 0xacb6df30U, 0x4faafe66U, 0x8af13c3bU, 0x000014d5U,
+// 5**432 (i=16), start=240, end=272
+ 0x682bb941U, 0x89a9f297U, 0xcba75d7bU, 0x404217b1U, 0xb4e519e9U, 0xa1bc162bU,
+ 0xf7f5910aU, 0x98715af5U, 0x2ff53e57U, 0xe3ef118cU, 0x490c4543U, 0xbc9b1734U,
+ 0x2affbe4dU, 0x4cedcb4cU, 0xfb14e99eU, 0x35e34212U, 0xece39c24U, 0x07673ab3U,
+ 0xe73115ddU, 0xd15d38e7U, 0x093eed3bU, 0xf8e7eac5U, 0x78a8cc80U, 0x25227aacU,
+ 0x3f590551U, 0x413da1cbU, 0xdf643a55U, 0xab65ad44U, 0xd70b23d7U, 0xc672cd76U,
+ 0x3364ea62U, 0x0000086aU,
+// 5**459 (i=17), start=272, end=306
+ 0x22f163ddU, 0x23cf07acU, 0xbe2af6c2U, 0xf412f6f6U, 0xc3ff541eU, 0x6eeaf7deU,
+ 0xa47047e0U, 0x408cda92U, 0x0f0eeb08U, 0x56deba9dU, 0xcfc6b090U, 0x8bbbdf04U,
+ 0x3933cdb3U, 0x9e7bb67dU, 0x9f297035U, 0x38946244U, 0xee1d37bbU, 0xde898174U,
+ 0x63f3559dU, 0x705b72fbU, 0x138d27d9U, 0xf8603a78U, 0x735eec44U, 0xe30987d5U,
+ 0xc6d38070U, 0x9cfe548eU, 0x9ff01422U, 0x7c564aa8U, 0x91cc60baU, 0xcbc3565dU,
+ 0x7550a50bU, 0x6909aeadU, 0x13234c45U, 0x00000366U,
+// 5**486 (i=18), start=306, end=342
+ 0x17954989U, 0x3a7d7709U, 0x98042de5U, 0xa9011443U, 0x45e723c2U, 0x269ffd6fU,
+ 0x58852a46U, 0xaaa1042aU, 0x2eee8153U, 0xb2b6c39eU, 0xaf845b65U, 0xf6c365d7U,
+ 0xe4cffb2bU, 0xc840e90cU, 0xabea8abbU, 0x5c58f8d2U, 0x5c19fa3aU, 0x4670910aU,
+ 0x4449f21cU, 0xefa645b3U, 0xcc427decU, 0x083c3d73U, 0x467cb413U, 0x6fe10ae4U,
+ 0x3caffc72U, 0x9f8da55eU, 0x5e5c8ea7U, 0x490594bbU, 0xf0871b0bU, 0xdd89816cU,
+ 0x8e931df8U, 0xe85ce1c9U, 0xcca090a5U, 0x575fa16bU, 0x6b9f106cU, 0x0000015fU,
+// 5**513 (i=19), start=342, end=380
+ 0xee20d805U, 0x57bc3c07U, 0xcdea624eU, 0xd3f0f52dU, 0x9924b4f4U, 0xcf968640U,
+ 0x61d41962U, 0xe87fb464U, 0xeaaf51c7U, 0x564c8b60U, 0xccda4028U, 0x529428bbU,
+ 0x313a1fa8U, 0x96bd0f94U, 0x7a82ebaaU, 0xad99e7e9U, 0xf2668cd4U, 0xbe33a45eU,
+ 0xfd0db669U, 0x87ee369fU, 0xd3ec20edU, 0x9c4d7db7U, 0xdedcf0d8U, 0x7cd2ca64U,
+ 0xe25a6577U, 0x61003fd4U, 0xe56f54ccU, 0x10b7c748U, 0x40526e5eU, 0x7300ae87U,
+ 0x5c439261U, 0x2c0ff469U, 0xbf723f12U, 0xb2379b61U, 0xbf59b4f5U, 0xc91b1c3fU,
+ 0xf0046d27U, 0x0000008dU,
+// 5**540 (i=20), start=380, end=420
+ 0x525c9e11U, 0xf4e0eb41U, 0xebb2895dU, 0x5da512f9U, 0x7d9b29d4U, 0x452f4edcU,
+ 0x0b90bc37U, 0x341777cbU, 0x63d269afU, 0x1da77929U, 0x0a5c1826U, 0x77991898U,
+ 0x5aeddf86U, 0xf853a877U, 0x538c31ccU, 0xe84896daU, 0xb7a0010bU, 0x17ef4de5U,
+ 0xa52a2adeU, 0x029fd81cU, 0x987ce701U, 0x27fefd77U, 0xdb46c66fU, 0x5d301900U,
+ 0x496998c0U, 0xbb6598b9U, 0x5eebb607U, 0xe547354aU, 0xdf4a2f7eU, 0xf06c4955U,
+ 0x96242ffaU, 0x1775fb27U, 0xbecc58ceU, 0xebf2a53bU, 0x3eaad82aU, 0xf41137baU,
+ 0x573e6fbaU, 0xfb4866b8U, 0x54002148U, 0x00000039U,
+};
+// clang-format on
+
+// Returns a pointer to the big integer data for (5**27)**i. i must be
+// between 1 and 20, inclusive.
+const uint32_t* LargePowerOfFiveData(int i) {
+ return kLargePowersOfFive + i * (i - 1);
+}
+
+// Returns the size of the big integer data for (5**27)**i, in words. i must be
+// between 1 and 20, inclusive.
+int LargePowerOfFiveSize(int i) { return 2 * i; }
+} // namespace
+
+ABSL_DLL const uint32_t kFiveToNth[14] = {
+ 1, 5, 25, 125, 625, 3125, 15625,
+ 78125, 390625, 1953125, 9765625, 48828125, 244140625, 1220703125,
+};
+
+ABSL_DLL const uint32_t kTenToNth[10] = {
+ 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000,
+};
+
+template <int max_words>
+int BigUnsigned<max_words>::ReadFloatMantissa(const ParsedFloat& fp,
+ int significant_digits) {
+ SetToZero();
+ assert(fp.type == FloatType::kNumber);
+
+ if (fp.subrange_begin == nullptr) {
+ // We already exactly parsed the mantissa, so no more work is necessary.
+ words_[0] = fp.mantissa & 0xffffffffu;
+ words_[1] = fp.mantissa >> 32;
+ if (words_[1]) {
+ size_ = 2;
+ } else if (words_[0]) {
+ size_ = 1;
+ }
+ return fp.exponent;
+ }
+ int exponent_adjust =
+ ReadDigits(fp.subrange_begin, fp.subrange_end, significant_digits);
+ return fp.literal_exponent + exponent_adjust;
+}
+
+template <int max_words>
+int BigUnsigned<max_words>::ReadDigits(const char* begin, const char* end,
+ int significant_digits) {
+ assert(significant_digits <= Digits10() + 1);
+ SetToZero();
+
+ bool after_decimal_point = false;
+ // Discard any leading zeroes before the decimal point
+ while (begin < end && *begin == '0') {
+ ++begin;
+ }
+ int dropped_digits = 0;
+ // Discard any trailing zeroes. These may or may not be after the decimal
+ // point.
+ while (begin < end && *std::prev(end) == '0') {
+ --end;
+ ++dropped_digits;
+ }
+ if (begin < end && *std::prev(end) == '.') {
+ // If the string ends in '.', either before or after dropping zeroes, then
+ // drop the decimal point and look for more digits to drop.
+ dropped_digits = 0;
+ --end;
+ while (begin < end && *std::prev(end) == '0') {
+ --end;
+ ++dropped_digits;
+ }
+ } else if (dropped_digits) {
+ // We dropped digits, and aren't sure if they're before or after the decimal
+ // point. Figure that out now.
+ const char* dp = std::find(begin, end, '.');
+ if (dp != end) {
+ // The dropped trailing digits were after the decimal point, so don't
+ // count them.
+ dropped_digits = 0;
+ }
+ }
+ // Any non-fraction digits we dropped need to be accounted for in our exponent
+ // adjustment.
+ int exponent_adjust = dropped_digits;
+
+ uint32_t queued = 0;
+ int digits_queued = 0;
+ for (; begin != end && significant_digits > 0; ++begin) {
+ if (*begin == '.') {
+ after_decimal_point = true;
+ continue;
+ }
+ if (after_decimal_point) {
+ // For each fractional digit we emit in our parsed integer, adjust our
+ // decimal exponent to compensate.
+ --exponent_adjust;
+ }
+ int digit = (*begin - '0');
+ --significant_digits;
+ if (significant_digits == 0 && std::next(begin) != end &&
+ (digit == 0 || digit == 5)) {
+ // If this is the very last significant digit, but insignificant digits
+ // remain, we know that the last of those remaining significant digits is
+ // nonzero. (If it wasn't, we would have stripped it before we got here.)
+ // So if this final digit is a 0 or 5, adjust it upward by 1.
+ //
+ // This adjustment is what allows incredibly large mantissas ending in
+ // 500000...000000000001 to correctly round up, rather than to nearest.
+ ++digit;
+ }
+ queued = 10 * queued + digit;
+ ++digits_queued;
+ if (digits_queued == kMaxSmallPowerOfTen) {
+ MultiplyBy(kTenToNth[kMaxSmallPowerOfTen]);
+ AddWithCarry(0, queued);
+ queued = digits_queued = 0;
+ }
+ }
+ // Encode any remaining digits.
+ if (digits_queued) {
+ MultiplyBy(kTenToNth[digits_queued]);
+ AddWithCarry(0, queued);
+ }
+
+ // If any insignificant digits remain, we will drop them. But if we have not
+ // yet read the decimal point, then we have to adjust the exponent to account
+ // for the dropped digits.
+ if (begin < end && !after_decimal_point) {
+ // This call to std::find will result in a pointer either to the decimal
+ // point, or to the end of our buffer if there was none.
+ //
+ // Either way, [begin, decimal_point) will contain the set of dropped digits
+ // that require an exponent adjustment.
+ const char* decimal_point = std::find(begin, end, '.');
+ exponent_adjust += (decimal_point - begin);
+ }
+ return exponent_adjust;
+}
+
+template <int max_words>
+/* static */ BigUnsigned<max_words> BigUnsigned<max_words>::FiveToTheNth(
+ int n) {
+ BigUnsigned answer(1u);
+
+ // Seed from the table of large powers, if possible.
+ bool first_pass = true;
+ while (n >= kLargePowerOfFiveStep) {
+ int big_power =
+ std::min(n / kLargePowerOfFiveStep, kLargestPowerOfFiveIndex);
+ if (first_pass) {
+ // just copy, rather than multiplying by 1
+ std::copy(
+ LargePowerOfFiveData(big_power),
+ LargePowerOfFiveData(big_power) + LargePowerOfFiveSize(big_power),
+ answer.words_);
+ answer.size_ = LargePowerOfFiveSize(big_power);
+ first_pass = false;
+ } else {
+ answer.MultiplyBy(LargePowerOfFiveSize(big_power),
+ LargePowerOfFiveData(big_power));
+ }
+ n -= kLargePowerOfFiveStep * big_power;
+ }
+ answer.MultiplyByFiveToTheNth(n);
+ return answer;
+}
+
+template <int max_words>
+void BigUnsigned<max_words>::MultiplyStep(int original_size,
+ const uint32_t* other_words,
+ int other_size, int step) {
+ int this_i = std::min(original_size - 1, step);
+ int other_i = step - this_i;
+
+ uint64_t this_word = 0;
+ uint64_t carry = 0;
+ for (; this_i >= 0 && other_i < other_size; --this_i, ++other_i) {
+ uint64_t product = words_[this_i];
+ product *= other_words[other_i];
+ this_word += product;
+ carry += (this_word >> 32);
+ this_word &= 0xffffffff;
+ }
+ AddWithCarry(step + 1, carry);
+ words_[step] = this_word & 0xffffffff;
+ if (this_word > 0 && size_ <= step) {
+ size_ = step + 1;
+ }
+}
+
+template <int max_words>
+TString BigUnsigned<max_words>::ToString() const {
+ BigUnsigned<max_words> copy = *this;
+ TString result;
+ // Build result in reverse order
+ while (copy.size() > 0) {
+ int next_digit = copy.DivMod<10>();
+ result.push_back('0' + next_digit);
+ }
+ if (result.empty()) {
+ result.push_back('0');
+ }
+ std::reverse(result.begin(), result.vend());
+ return result;
+}
+
+template class BigUnsigned<4>;
+template class BigUnsigned<84>;
+
+} // namespace strings_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/charconv_bigint.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/charconv_bigint.h
new file mode 100644
index 00000000000..a77aab14dd2
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/charconv_bigint.h
@@ -0,0 +1,423 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_STRINGS_INTERNAL_CHARCONV_BIGINT_H_
+#define ABSL_STRINGS_INTERNAL_CHARCONV_BIGINT_H_
+
+#include <algorithm>
+#include <cstdint>
+#include <iostream>
+#include <util/generic/string.h>
+
+#include "y_absl/base/config.h"
+#include "y_absl/strings/ascii.h"
+#include "y_absl/strings/internal/charconv_parse.h"
+#include "y_absl/strings/string_view.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace strings_internal {
+
+// The largest power that 5 that can be raised to, and still fit in a uint32_t.
+constexpr int kMaxSmallPowerOfFive = 13;
+// The largest power that 10 that can be raised to, and still fit in a uint32_t.
+constexpr int kMaxSmallPowerOfTen = 9;
+
+ABSL_DLL extern const uint32_t
+ kFiveToNth[kMaxSmallPowerOfFive + 1];
+ABSL_DLL extern const uint32_t kTenToNth[kMaxSmallPowerOfTen + 1];
+
+// Large, fixed-width unsigned integer.
+//
+// Exact rounding for decimal-to-binary floating point conversion requires very
+// large integer math, but a design goal of y_absl::from_chars is to avoid
+// allocating memory. The integer precision needed for decimal-to-binary
+// conversions is large but bounded, so a huge fixed-width integer class
+// suffices.
+//
+// This is an intentionally limited big integer class. Only needed operations
+// are implemented. All storage lives in an array data member, and all
+// arithmetic is done in-place, to avoid requiring separate storage for operand
+// and result.
+//
+// This is an internal class. Some methods live in the .cc file, and are
+// instantiated only for the values of max_words we need.
+template <int max_words>
+class BigUnsigned {
+ public:
+ static_assert(max_words == 4 || max_words == 84,
+ "unsupported max_words value");
+
+ BigUnsigned() : size_(0), words_{} {}
+ explicit constexpr BigUnsigned(uint64_t v)
+ : size_((v >> 32) ? 2 : v ? 1 : 0),
+ words_{static_cast<uint32_t>(v & 0xffffffffu),
+ static_cast<uint32_t>(v >> 32)} {}
+
+ // Constructs a BigUnsigned from the given string_view containing a decimal
+ // value. If the input string is not a decimal integer, constructs a 0
+ // instead.
+ explicit BigUnsigned(y_absl::string_view sv) : size_(0), words_{} {
+ // Check for valid input, returning a 0 otherwise. This is reasonable
+ // behavior only because this constructor is for unit tests.
+ if (std::find_if_not(sv.begin(), sv.end(), ascii_isdigit) != sv.end() ||
+ sv.empty()) {
+ return;
+ }
+ int exponent_adjust =
+ ReadDigits(sv.data(), sv.data() + sv.size(), Digits10() + 1);
+ if (exponent_adjust > 0) {
+ MultiplyByTenToTheNth(exponent_adjust);
+ }
+ }
+
+ // Loads the mantissa value of a previously-parsed float.
+ //
+ // Returns the associated decimal exponent. The value of the parsed float is
+ // exactly *this * 10**exponent.
+ int ReadFloatMantissa(const ParsedFloat& fp, int significant_digits);
+
+ // Returns the number of decimal digits of precision this type provides. All
+ // numbers with this many decimal digits or fewer are representable by this
+ // type.
+ //
+ // Analagous to std::numeric_limits<BigUnsigned>::digits10.
+ static constexpr int Digits10() {
+ // 9975007/1035508 is very slightly less than log10(2**32).
+ return static_cast<uint64_t>(max_words) * 9975007 / 1035508;
+ }
+
+ // Shifts left by the given number of bits.
+ void ShiftLeft(int count) {
+ if (count > 0) {
+ const int word_shift = count / 32;
+ if (word_shift >= max_words) {
+ SetToZero();
+ return;
+ }
+ size_ = (std::min)(size_ + word_shift, max_words);
+ count %= 32;
+ if (count == 0) {
+ std::copy_backward(words_, words_ + size_ - word_shift, words_ + size_);
+ } else {
+ for (int i = (std::min)(size_, max_words - 1); i > word_shift; --i) {
+ words_[i] = (words_[i - word_shift] << count) |
+ (words_[i - word_shift - 1] >> (32 - count));
+ }
+ words_[word_shift] = words_[0] << count;
+ // Grow size_ if necessary.
+ if (size_ < max_words && words_[size_]) {
+ ++size_;
+ }
+ }
+ std::fill(words_, words_ + word_shift, 0u);
+ }
+ }
+
+
+ // Multiplies by v in-place.
+ void MultiplyBy(uint32_t v) {
+ if (size_ == 0 || v == 1) {
+ return;
+ }
+ if (v == 0) {
+ SetToZero();
+ return;
+ }
+ const uint64_t factor = v;
+ uint64_t window = 0;
+ for (int i = 0; i < size_; ++i) {
+ window += factor * words_[i];
+ words_[i] = window & 0xffffffff;
+ window >>= 32;
+ }
+ // If carry bits remain and there's space for them, grow size_.
+ if (window && size_ < max_words) {
+ words_[size_] = window & 0xffffffff;
+ ++size_;
+ }
+ }
+
+ void MultiplyBy(uint64_t v) {
+ uint32_t words[2];
+ words[0] = static_cast<uint32_t>(v);
+ words[1] = static_cast<uint32_t>(v >> 32);
+ if (words[1] == 0) {
+ MultiplyBy(words[0]);
+ } else {
+ MultiplyBy(2, words);
+ }
+ }
+
+ // Multiplies in place by 5 to the power of n. n must be non-negative.
+ void MultiplyByFiveToTheNth(int n) {
+ while (n >= kMaxSmallPowerOfFive) {
+ MultiplyBy(kFiveToNth[kMaxSmallPowerOfFive]);
+ n -= kMaxSmallPowerOfFive;
+ }
+ if (n > 0) {
+ MultiplyBy(kFiveToNth[n]);
+ }
+ }
+
+ // Multiplies in place by 10 to the power of n. n must be non-negative.
+ void MultiplyByTenToTheNth(int n) {
+ if (n > kMaxSmallPowerOfTen) {
+ // For large n, raise to a power of 5, then shift left by the same amount.
+ // (10**n == 5**n * 2**n.) This requires fewer multiplications overall.
+ MultiplyByFiveToTheNth(n);
+ ShiftLeft(n);
+ } else if (n > 0) {
+ // We can do this more quickly for very small N by using a single
+ // multiplication.
+ MultiplyBy(kTenToNth[n]);
+ }
+ }
+
+ // Returns the value of 5**n, for non-negative n. This implementation uses
+ // a lookup table, and is faster then seeding a BigUnsigned with 1 and calling
+ // MultiplyByFiveToTheNth().
+ static BigUnsigned FiveToTheNth(int n);
+
+ // Multiplies by another BigUnsigned, in-place.
+ template <int M>
+ void MultiplyBy(const BigUnsigned<M>& other) {
+ MultiplyBy(other.size(), other.words());
+ }
+
+ void SetToZero() {
+ std::fill(words_, words_ + size_, 0u);
+ size_ = 0;
+ }
+
+ // Returns the value of the nth word of this BigUnsigned. This is
+ // range-checked, and returns 0 on out-of-bounds accesses.
+ uint32_t GetWord(int index) const {
+ if (index < 0 || index >= size_) {
+ return 0;
+ }
+ return words_[index];
+ }
+
+ // Returns this integer as a decimal string. This is not used in the decimal-
+ // to-binary conversion; it is intended to aid in testing.
+ TString ToString() const;
+
+ int size() const { return size_; }
+ const uint32_t* words() const { return words_; }
+
+ private:
+ // Reads the number between [begin, end), possibly containing a decimal point,
+ // into this BigUnsigned.
+ //
+ // Callers are required to ensure [begin, end) contains a valid number, with
+ // one or more decimal digits and at most one decimal point. This routine
+ // will behave unpredictably if these preconditions are not met.
+ //
+ // Only the first `significant_digits` digits are read. Digits beyond this
+ // limit are "sticky": If the final significant digit is 0 or 5, and if any
+ // dropped digit is nonzero, then that final significant digit is adjusted up
+ // to 1 or 6. This adjustment allows for precise rounding.
+ //
+ // Returns `exponent_adjustment`, a power-of-ten exponent adjustment to
+ // account for the decimal point and for dropped significant digits. After
+ // this function returns,
+ // actual_value_of_parsed_string ~= *this * 10**exponent_adjustment.
+ int ReadDigits(const char* begin, const char* end, int significant_digits);
+
+ // Performs a step of big integer multiplication. This computes the full
+ // (64-bit-wide) values that should be added at the given index (step), and
+ // adds to that location in-place.
+ //
+ // Because our math all occurs in place, we must multiply starting from the
+ // highest word working downward. (This is a bit more expensive due to the
+ // extra carries involved.)
+ //
+ // This must be called in steps, for each word to be calculated, starting from
+ // the high end and working down to 0. The first value of `step` should be
+ // `std::min(original_size + other.size_ - 2, max_words - 1)`.
+ // The reason for this expression is that multiplying the i'th word from one
+ // multiplicand and the j'th word of another multiplicand creates a
+ // two-word-wide value to be stored at the (i+j)'th element. The highest
+ // word indices we will access are `original_size - 1` from this object, and
+ // `other.size_ - 1` from our operand. Therefore,
+ // `original_size + other.size_ - 2` is the first step we should calculate,
+ // but limited on an upper bound by max_words.
+
+ // Working from high-to-low ensures that we do not overwrite the portions of
+ // the initial value of *this which are still needed for later steps.
+ //
+ // Once called with step == 0, *this contains the result of the
+ // multiplication.
+ //
+ // `original_size` is the size_ of *this before the first call to
+ // MultiplyStep(). `other_words` and `other_size` are the contents of our
+ // operand. `step` is the step to perform, as described above.
+ void MultiplyStep(int original_size, const uint32_t* other_words,
+ int other_size, int step);
+
+ void MultiplyBy(int other_size, const uint32_t* other_words) {
+ const int original_size = size_;
+ const int first_step =
+ (std::min)(original_size + other_size - 2, max_words - 1);
+ for (int step = first_step; step >= 0; --step) {
+ MultiplyStep(original_size, other_words, other_size, step);
+ }
+ }
+
+ // Adds a 32-bit value to the index'th word, with carry.
+ void AddWithCarry(int index, uint32_t value) {
+ if (value) {
+ while (index < max_words && value > 0) {
+ words_[index] += value;
+ // carry if we overflowed in this word:
+ if (value > words_[index]) {
+ value = 1;
+ ++index;
+ } else {
+ value = 0;
+ }
+ }
+ size_ = (std::min)(max_words, (std::max)(index + 1, size_));
+ }
+ }
+
+ void AddWithCarry(int index, uint64_t value) {
+ if (value && index < max_words) {
+ uint32_t high = value >> 32;
+ uint32_t low = value & 0xffffffff;
+ words_[index] += low;
+ if (words_[index] < low) {
+ ++high;
+ if (high == 0) {
+ // Carry from the low word caused our high word to overflow.
+ // Short circuit here to do the right thing.
+ AddWithCarry(index + 2, static_cast<uint32_t>(1));
+ return;
+ }
+ }
+ if (high > 0) {
+ AddWithCarry(index + 1, high);
+ } else {
+ // Normally 32-bit AddWithCarry() sets size_, but since we don't call
+ // it when `high` is 0, do it ourselves here.
+ size_ = (std::min)(max_words, (std::max)(index + 1, size_));
+ }
+ }
+ }
+
+ // Divide this in place by a constant divisor. Returns the remainder of the
+ // division.
+ template <uint32_t divisor>
+ uint32_t DivMod() {
+ uint64_t accumulator = 0;
+ for (int i = size_ - 1; i >= 0; --i) {
+ accumulator <<= 32;
+ accumulator += words_[i];
+ // accumulator / divisor will never overflow an int32_t in this loop
+ words_[i] = static_cast<uint32_t>(accumulator / divisor);
+ accumulator = accumulator % divisor;
+ }
+ while (size_ > 0 && words_[size_ - 1] == 0) {
+ --size_;
+ }
+ return static_cast<uint32_t>(accumulator);
+ }
+
+ // The number of elements in words_ that may carry significant values.
+ // All elements beyond this point are 0.
+ //
+ // When size_ is 0, this BigUnsigned stores the value 0.
+ // When size_ is nonzero, is *not* guaranteed that words_[size_ - 1] is
+ // nonzero. This can occur due to overflow truncation.
+ // In particular, x.size_ != y.size_ does *not* imply x != y.
+ int size_;
+ uint32_t words_[max_words];
+};
+
+// Compares two big integer instances.
+//
+// Returns -1 if lhs < rhs, 0 if lhs == rhs, and 1 if lhs > rhs.
+template <int N, int M>
+int Compare(const BigUnsigned<N>& lhs, const BigUnsigned<M>& rhs) {
+ int limit = (std::max)(lhs.size(), rhs.size());
+ for (int i = limit - 1; i >= 0; --i) {
+ const uint32_t lhs_word = lhs.GetWord(i);
+ const uint32_t rhs_word = rhs.GetWord(i);
+ if (lhs_word < rhs_word) {
+ return -1;
+ } else if (lhs_word > rhs_word) {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+template <int N, int M>
+bool operator==(const BigUnsigned<N>& lhs, const BigUnsigned<M>& rhs) {
+ int limit = (std::max)(lhs.size(), rhs.size());
+ for (int i = 0; i < limit; ++i) {
+ if (lhs.GetWord(i) != rhs.GetWord(i)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+template <int N, int M>
+bool operator!=(const BigUnsigned<N>& lhs, const BigUnsigned<M>& rhs) {
+ return !(lhs == rhs);
+}
+
+template <int N, int M>
+bool operator<(const BigUnsigned<N>& lhs, const BigUnsigned<M>& rhs) {
+ return Compare(lhs, rhs) == -1;
+}
+
+template <int N, int M>
+bool operator>(const BigUnsigned<N>& lhs, const BigUnsigned<M>& rhs) {
+ return rhs < lhs;
+}
+template <int N, int M>
+bool operator<=(const BigUnsigned<N>& lhs, const BigUnsigned<M>& rhs) {
+ return !(rhs < lhs);
+}
+template <int N, int M>
+bool operator>=(const BigUnsigned<N>& lhs, const BigUnsigned<M>& rhs) {
+ return !(lhs < rhs);
+}
+
+// Output operator for BigUnsigned, for testing purposes only.
+template <int N>
+std::ostream& operator<<(std::ostream& os, const BigUnsigned<N>& num) {
+ return os << num.ToString();
+}
+
+// Explicit instantiation declarations for the sizes of BigUnsigned that we
+// are using.
+//
+// For now, the choices of 4 and 84 are arbitrary; 4 is a small value that is
+// still bigger than an int128, and 84 is a large value we will want to use
+// in the from_chars implementation.
+//
+// Comments justifying the use of 84 belong in the from_chars implementation,
+// and will be added in a follow-up CL.
+extern template class BigUnsigned<4>;
+extern template class BigUnsigned<84>;
+
+} // namespace strings_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_STRINGS_INTERNAL_CHARCONV_BIGINT_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/charconv_parse.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/charconv_parse.cc
new file mode 100644
index 00000000000..f0f78eb68cf
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/charconv_parse.cc
@@ -0,0 +1,504 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "y_absl/strings/internal/charconv_parse.h"
+#include "y_absl/strings/charconv.h"
+
+#include <cassert>
+#include <cstdint>
+#include <limits>
+
+#include "y_absl/strings/internal/memutil.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace {
+
+// ParseFloat<10> will read the first 19 significant digits of the mantissa.
+// This number was chosen for multiple reasons.
+//
+// (a) First, for whatever integer type we choose to represent the mantissa, we
+// want to choose the largest possible number of decimal digits for that integer
+// type. We are using uint64_t, which can express any 19-digit unsigned
+// integer.
+//
+// (b) Second, we need to parse enough digits that the binary value of any
+// mantissa we capture has more bits of resolution than the mantissa
+// representation in the target float. Our algorithm requires at least 3 bits
+// of headway, but 19 decimal digits give a little more than that.
+//
+// The following static assertions verify the above comments:
+constexpr int kDecimalMantissaDigitsMax = 19;
+
+static_assert(std::numeric_limits<uint64_t>::digits10 ==
+ kDecimalMantissaDigitsMax,
+ "(a) above");
+
+// IEEE doubles, which we assume in Abseil, have 53 binary bits of mantissa.
+static_assert(std::numeric_limits<double>::is_iec559, "IEEE double assumed");
+static_assert(std::numeric_limits<double>::radix == 2, "IEEE double fact");
+static_assert(std::numeric_limits<double>::digits == 53, "IEEE double fact");
+
+// The lowest valued 19-digit decimal mantissa we can read still contains
+// sufficient information to reconstruct a binary mantissa.
+static_assert(1000000000000000000u > (uint64_t{1} << (53 + 3)), "(b) above");
+
+// ParseFloat<16> will read the first 15 significant digits of the mantissa.
+//
+// Because a base-16-to-base-2 conversion can be done exactly, we do not need
+// to maximize the number of scanned hex digits to improve our conversion. What
+// is required is to scan two more bits than the mantissa can represent, so that
+// we always round correctly.
+//
+// (One extra bit does not suffice to perform correct rounding, since a number
+// exactly halfway between two representable floats has unique rounding rules,
+// so we need to differentiate between a "halfway between" number and a "closer
+// to the larger value" number.)
+constexpr int kHexadecimalMantissaDigitsMax = 15;
+
+// The minimum number of significant bits that will be read from
+// kHexadecimalMantissaDigitsMax hex digits. We must subtract by three, since
+// the most significant digit can be a "1", which only contributes a single
+// significant bit.
+constexpr int kGuaranteedHexadecimalMantissaBitPrecision =
+ 4 * kHexadecimalMantissaDigitsMax - 3;
+
+static_assert(kGuaranteedHexadecimalMantissaBitPrecision >
+ std::numeric_limits<double>::digits + 2,
+ "kHexadecimalMantissaDigitsMax too small");
+
+// We also impose a limit on the number of significant digits we will read from
+// an exponent, to avoid having to deal with integer overflow. We use 9 for
+// this purpose.
+//
+// If we read a 9 digit exponent, the end result of the conversion will
+// necessarily be infinity or zero, depending on the sign of the exponent.
+// Therefore we can just drop extra digits on the floor without any extra
+// logic.
+constexpr int kDecimalExponentDigitsMax = 9;
+static_assert(std::numeric_limits<int>::digits10 >= kDecimalExponentDigitsMax,
+ "int type too small");
+
+// To avoid incredibly large inputs causing integer overflow for our exponent,
+// we impose an arbitrary but very large limit on the number of significant
+// digits we will accept. The implementation refuses to match a string with
+// more consecutive significant mantissa digits than this.
+constexpr int kDecimalDigitLimit = 50000000;
+
+// Corresponding limit for hexadecimal digit inputs. This is one fourth the
+// amount of kDecimalDigitLimit, since each dropped hexadecimal digit requires
+// a binary exponent adjustment of 4.
+constexpr int kHexadecimalDigitLimit = kDecimalDigitLimit / 4;
+
+// The largest exponent we can read is 999999999 (per
+// kDecimalExponentDigitsMax), and the largest exponent adjustment we can get
+// from dropped mantissa digits is 2 * kDecimalDigitLimit, and the sum of these
+// comfortably fits in an integer.
+//
+// We count kDecimalDigitLimit twice because there are independent limits for
+// numbers before and after the decimal point. (In the case where there are no
+// significant digits before the decimal point, there are independent limits for
+// post-decimal-point leading zeroes and for significant digits.)
+static_assert(999999999 + 2 * kDecimalDigitLimit <
+ std::numeric_limits<int>::max(),
+ "int type too small");
+static_assert(999999999 + 2 * (4 * kHexadecimalDigitLimit) <
+ std::numeric_limits<int>::max(),
+ "int type too small");
+
+// Returns true if the provided bitfield allows parsing an exponent value
+// (e.g., "1.5e100").
+bool AllowExponent(chars_format flags) {
+ bool fixed = (flags & chars_format::fixed) == chars_format::fixed;
+ bool scientific =
+ (flags & chars_format::scientific) == chars_format::scientific;
+ return scientific || !fixed;
+}
+
+// Returns true if the provided bitfield requires an exponent value be present.
+bool RequireExponent(chars_format flags) {
+ bool fixed = (flags & chars_format::fixed) == chars_format::fixed;
+ bool scientific =
+ (flags & chars_format::scientific) == chars_format::scientific;
+ return scientific && !fixed;
+}
+
+const int8_t kAsciiToInt[256] = {
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8,
+ 9, -1, -1, -1, -1, -1, -1, -1, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1};
+
+// Returns true if `ch` is a digit in the given base
+template <int base>
+bool IsDigit(char ch);
+
+// Converts a valid `ch` to its digit value in the given base.
+template <int base>
+unsigned ToDigit(char ch);
+
+// Returns true if `ch` is the exponent delimiter for the given base.
+template <int base>
+bool IsExponentCharacter(char ch);
+
+// Returns the maximum number of significant digits we will read for a float
+// in the given base.
+template <int base>
+constexpr int MantissaDigitsMax();
+
+// Returns the largest consecutive run of digits we will accept when parsing a
+// number in the given base.
+template <int base>
+constexpr int DigitLimit();
+
+// Returns the amount the exponent must be adjusted by for each dropped digit.
+// (For decimal this is 1, since the digits are in base 10 and the exponent base
+// is also 10, but for hexadecimal this is 4, since the digits are base 16 but
+// the exponent base is 2.)
+template <int base>
+constexpr int DigitMagnitude();
+
+template <>
+bool IsDigit<10>(char ch) {
+ return ch >= '0' && ch <= '9';
+}
+template <>
+bool IsDigit<16>(char ch) {
+ return kAsciiToInt[static_cast<unsigned char>(ch)] >= 0;
+}
+
+template <>
+unsigned ToDigit<10>(char ch) {
+ return ch - '0';
+}
+template <>
+unsigned ToDigit<16>(char ch) {
+ return kAsciiToInt[static_cast<unsigned char>(ch)];
+}
+
+template <>
+bool IsExponentCharacter<10>(char ch) {
+ return ch == 'e' || ch == 'E';
+}
+
+template <>
+bool IsExponentCharacter<16>(char ch) {
+ return ch == 'p' || ch == 'P';
+}
+
+template <>
+constexpr int MantissaDigitsMax<10>() {
+ return kDecimalMantissaDigitsMax;
+}
+template <>
+constexpr int MantissaDigitsMax<16>() {
+ return kHexadecimalMantissaDigitsMax;
+}
+
+template <>
+constexpr int DigitLimit<10>() {
+ return kDecimalDigitLimit;
+}
+template <>
+constexpr int DigitLimit<16>() {
+ return kHexadecimalDigitLimit;
+}
+
+template <>
+constexpr int DigitMagnitude<10>() {
+ return 1;
+}
+template <>
+constexpr int DigitMagnitude<16>() {
+ return 4;
+}
+
+// Reads decimal digits from [begin, end) into *out. Returns the number of
+// digits consumed.
+//
+// After max_digits has been read, keeps consuming characters, but no longer
+// adjusts *out. If a nonzero digit is dropped this way, *dropped_nonzero_digit
+// is set; otherwise, it is left unmodified.
+//
+// If no digits are matched, returns 0 and leaves *out unchanged.
+//
+// ConsumeDigits does not protect against overflow on *out; max_digits must
+// be chosen with respect to type T to avoid the possibility of overflow.
+template <int base, typename T>
+int ConsumeDigits(const char* begin, const char* end, int max_digits, T* out,
+ bool* dropped_nonzero_digit) {
+ if (base == 10) {
+ assert(max_digits <= std::numeric_limits<T>::digits10);
+ } else if (base == 16) {
+ assert(max_digits * 4 <= std::numeric_limits<T>::digits);
+ }
+ const char* const original_begin = begin;
+
+ // Skip leading zeros, but only if *out is zero.
+ // They don't cause an overflow so we don't have to count them for
+ // `max_digits`.
+ while (!*out && end != begin && *begin == '0') ++begin;
+
+ T accumulator = *out;
+ const char* significant_digits_end =
+ (end - begin > max_digits) ? begin + max_digits : end;
+ while (begin < significant_digits_end && IsDigit<base>(*begin)) {
+ // Do not guard against *out overflow; max_digits was chosen to avoid this.
+ // Do assert against it, to detect problems in debug builds.
+ auto digit = static_cast<T>(ToDigit<base>(*begin));
+ assert(accumulator * base >= accumulator);
+ accumulator *= base;
+ assert(accumulator + digit >= accumulator);
+ accumulator += digit;
+ ++begin;
+ }
+ bool dropped_nonzero = false;
+ while (begin < end && IsDigit<base>(*begin)) {
+ dropped_nonzero = dropped_nonzero || (*begin != '0');
+ ++begin;
+ }
+ if (dropped_nonzero && dropped_nonzero_digit != nullptr) {
+ *dropped_nonzero_digit = true;
+ }
+ *out = accumulator;
+ return static_cast<int>(begin - original_begin);
+}
+
+// Returns true if `v` is one of the chars allowed inside parentheses following
+// a NaN.
+bool IsNanChar(char v) {
+ return (v == '_') || (v >= '0' && v <= '9') || (v >= 'a' && v <= 'z') ||
+ (v >= 'A' && v <= 'Z');
+}
+
+// Checks the range [begin, end) for a strtod()-formatted infinity or NaN. If
+// one is found, sets `out` appropriately and returns true.
+bool ParseInfinityOrNan(const char* begin, const char* end,
+ strings_internal::ParsedFloat* out) {
+ if (end - begin < 3) {
+ return false;
+ }
+ switch (*begin) {
+ case 'i':
+ case 'I': {
+ // An infinity string consists of the characters "inf" or "infinity",
+ // case insensitive.
+ if (strings_internal::memcasecmp(begin + 1, "nf", 2) != 0) {
+ return false;
+ }
+ out->type = strings_internal::FloatType::kInfinity;
+ if (end - begin >= 8 &&
+ strings_internal::memcasecmp(begin + 3, "inity", 5) == 0) {
+ out->end = begin + 8;
+ } else {
+ out->end = begin + 3;
+ }
+ return true;
+ }
+ case 'n':
+ case 'N': {
+ // A NaN consists of the characters "nan", case insensitive, optionally
+ // followed by a parenthesized sequence of zero or more alphanumeric
+ // characters and/or underscores.
+ if (strings_internal::memcasecmp(begin + 1, "an", 2) != 0) {
+ return false;
+ }
+ out->type = strings_internal::FloatType::kNan;
+ out->end = begin + 3;
+ // NaN is allowed to be followed by a parenthesized string, consisting of
+ // only the characters [a-zA-Z0-9_]. Match that if it's present.
+ begin += 3;
+ if (begin < end && *begin == '(') {
+ const char* nan_begin = begin + 1;
+ while (nan_begin < end && IsNanChar(*nan_begin)) {
+ ++nan_begin;
+ }
+ if (nan_begin < end && *nan_begin == ')') {
+ // We found an extra NaN specifier range
+ out->subrange_begin = begin + 1;
+ out->subrange_end = nan_begin;
+ out->end = nan_begin + 1;
+ }
+ }
+ return true;
+ }
+ default:
+ return false;
+ }
+}
+} // namespace
+
+namespace strings_internal {
+
+template <int base>
+strings_internal::ParsedFloat ParseFloat(const char* begin, const char* end,
+ chars_format format_flags) {
+ strings_internal::ParsedFloat result;
+
+ // Exit early if we're given an empty range.
+ if (begin == end) return result;
+
+ // Handle the infinity and NaN cases.
+ if (ParseInfinityOrNan(begin, end, &result)) {
+ return result;
+ }
+
+ const char* const mantissa_begin = begin;
+ while (begin < end && *begin == '0') {
+ ++begin; // skip leading zeros
+ }
+ uint64_t mantissa = 0;
+
+ int exponent_adjustment = 0;
+ bool mantissa_is_inexact = false;
+ int pre_decimal_digits = ConsumeDigits<base>(
+ begin, end, MantissaDigitsMax<base>(), &mantissa, &mantissa_is_inexact);
+ begin += pre_decimal_digits;
+ int digits_left;
+ if (pre_decimal_digits >= DigitLimit<base>()) {
+ // refuse to parse pathological inputs
+ return result;
+ } else if (pre_decimal_digits > MantissaDigitsMax<base>()) {
+ // We dropped some non-fraction digits on the floor. Adjust our exponent
+ // to compensate.
+ exponent_adjustment =
+ static_cast<int>(pre_decimal_digits - MantissaDigitsMax<base>());
+ digits_left = 0;
+ } else {
+ digits_left =
+ static_cast<int>(MantissaDigitsMax<base>() - pre_decimal_digits);
+ }
+ if (begin < end && *begin == '.') {
+ ++begin;
+ if (mantissa == 0) {
+ // If we haven't seen any nonzero digits yet, keep skipping zeros. We
+ // have to adjust the exponent to reflect the changed place value.
+ const char* begin_zeros = begin;
+ while (begin < end && *begin == '0') {
+ ++begin;
+ }
+ int zeros_skipped = static_cast<int>(begin - begin_zeros);
+ if (zeros_skipped >= DigitLimit<base>()) {
+ // refuse to parse pathological inputs
+ return result;
+ }
+ exponent_adjustment -= static_cast<int>(zeros_skipped);
+ }
+ int post_decimal_digits = ConsumeDigits<base>(
+ begin, end, digits_left, &mantissa, &mantissa_is_inexact);
+ begin += post_decimal_digits;
+
+ // Since `mantissa` is an integer, each significant digit we read after
+ // the decimal point requires an adjustment to the exponent. "1.23e0" will
+ // be stored as `mantissa` == 123 and `exponent` == -2 (that is,
+ // "123e-2").
+ if (post_decimal_digits >= DigitLimit<base>()) {
+ // refuse to parse pathological inputs
+ return result;
+ } else if (post_decimal_digits > digits_left) {
+ exponent_adjustment -= digits_left;
+ } else {
+ exponent_adjustment -= post_decimal_digits;
+ }
+ }
+ // If we've found no mantissa whatsoever, this isn't a number.
+ if (mantissa_begin == begin) {
+ return result;
+ }
+ // A bare "." doesn't count as a mantissa either.
+ if (begin - mantissa_begin == 1 && *mantissa_begin == '.') {
+ return result;
+ }
+
+ if (mantissa_is_inexact) {
+ // We dropped significant digits on the floor. Handle this appropriately.
+ if (base == 10) {
+ // If we truncated significant decimal digits, store the full range of the
+ // mantissa for future big integer math for exact rounding.
+ result.subrange_begin = mantissa_begin;
+ result.subrange_end = begin;
+ } else if (base == 16) {
+ // If we truncated hex digits, reflect this fact by setting the low
+ // ("sticky") bit. This allows for correct rounding in all cases.
+ mantissa |= 1;
+ }
+ }
+ result.mantissa = mantissa;
+
+ const char* const exponent_begin = begin;
+ result.literal_exponent = 0;
+ bool found_exponent = false;
+ if (AllowExponent(format_flags) && begin < end &&
+ IsExponentCharacter<base>(*begin)) {
+ bool negative_exponent = false;
+ ++begin;
+ if (begin < end && *begin == '-') {
+ negative_exponent = true;
+ ++begin;
+ } else if (begin < end && *begin == '+') {
+ ++begin;
+ }
+ const char* const exponent_digits_begin = begin;
+ // Exponent is always expressed in decimal, even for hexadecimal floats.
+ begin += ConsumeDigits<10>(begin, end, kDecimalExponentDigitsMax,
+ &result.literal_exponent, nullptr);
+ if (begin == exponent_digits_begin) {
+ // there were no digits where we expected an exponent. We failed to read
+ // an exponent and should not consume the 'e' after all. Rewind 'begin'.
+ found_exponent = false;
+ begin = exponent_begin;
+ } else {
+ found_exponent = true;
+ if (negative_exponent) {
+ result.literal_exponent = -result.literal_exponent;
+ }
+ }
+ }
+
+ if (!found_exponent && RequireExponent(format_flags)) {
+ // Provided flags required an exponent, but none was found. This results
+ // in a failure to scan.
+ return result;
+ }
+
+ // Success!
+ result.type = strings_internal::FloatType::kNumber;
+ if (result.mantissa > 0) {
+ result.exponent = result.literal_exponent +
+ (DigitMagnitude<base>() * exponent_adjustment);
+ } else {
+ result.exponent = 0;
+ }
+ result.end = begin;
+ return result;
+}
+
+template ParsedFloat ParseFloat<10>(const char* begin, const char* end,
+ chars_format format_flags);
+template ParsedFloat ParseFloat<16>(const char* begin, const char* end,
+ chars_format format_flags);
+
+} // namespace strings_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/charconv_parse.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/charconv_parse.h
new file mode 100644
index 00000000000..3f942cd4cb0
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/charconv_parse.h
@@ -0,0 +1,99 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_STRINGS_INTERNAL_CHARCONV_PARSE_H_
+#define ABSL_STRINGS_INTERNAL_CHARCONV_PARSE_H_
+
+#include <cstdint>
+
+#include "y_absl/base/config.h"
+#include "y_absl/strings/charconv.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace strings_internal {
+
+// Enum indicating whether a parsed float is a number or special value.
+enum class FloatType { kNumber, kInfinity, kNan };
+
+// The decomposed parts of a parsed `float` or `double`.
+struct ParsedFloat {
+ // Representation of the parsed mantissa, with the decimal point adjusted to
+ // make it an integer.
+ //
+ // During decimal scanning, this contains 19 significant digits worth of
+ // mantissa value. If digits beyond this point are found, they
+ // are truncated, and if any of these dropped digits are nonzero, then
+ // `mantissa` is inexact, and the full mantissa is stored in [subrange_begin,
+ // subrange_end).
+ //
+ // During hexadecimal scanning, this contains 15 significant hex digits worth
+ // of mantissa value. Digits beyond this point are sticky -- they are
+ // truncated, but if any dropped digits are nonzero, the low bit of mantissa
+ // will be set. (This allows for precise rounding, and avoids the need
+ // to store the full mantissa in [subrange_begin, subrange_end).)
+ uint64_t mantissa = 0;
+
+ // Floating point expontent. This reflects any decimal point adjustments and
+ // any truncated digits from the mantissa. The absolute value of the parsed
+ // number is represented by mantissa * (base ** exponent), where base==10 for
+ // decimal floats, and base==2 for hexadecimal floats.
+ int exponent = 0;
+
+ // The literal exponent value scanned from the input, or 0 if none was
+ // present. This does not reflect any adjustments applied to mantissa.
+ int literal_exponent = 0;
+
+ // The type of number scanned.
+ FloatType type = FloatType::kNumber;
+
+ // When non-null, [subrange_begin, subrange_end) marks a range of characters
+ // that require further processing. The meaning is dependent on float type.
+ // If type == kNumber and this is set, this is a "wide input": the input
+ // mantissa contained more than 19 digits. The range contains the full
+ // mantissa. It plus `literal_exponent` need to be examined to find the best
+ // floating point match.
+ // If type == kNan and this is set, the range marks the contents of a
+ // matched parenthesized character region after the NaN.
+ const char* subrange_begin = nullptr;
+ const char* subrange_end = nullptr;
+
+ // One-past-the-end of the successfully parsed region, or nullptr if no
+ // matching pattern was found.
+ const char* end = nullptr;
+};
+
+// Read the floating point number in the provided range, and populate
+// ParsedFloat accordingly.
+//
+// format_flags is a bitmask value specifying what patterns this API will match.
+// `scientific` and `fixed` are honored per std::from_chars rules
+// ([utility.from.chars], C++17): if exactly one of these bits is set, then an
+// exponent is required, or dislallowed, respectively.
+//
+// Template parameter `base` must be either 10 or 16. For base 16, a "0x" is
+// *not* consumed. The `hex` bit from format_flags is ignored by ParseFloat.
+template <int base>
+ParsedFloat ParseFloat(const char* begin, const char* end,
+ y_absl::chars_format format_flags);
+
+extern template ParsedFloat ParseFloat<10>(const char* begin, const char* end,
+ y_absl::chars_format format_flags);
+extern template ParsedFloat ParseFloat<16>(const char* begin, const char* end,
+ y_absl::chars_format format_flags);
+
+} // namespace strings_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+#endif // ABSL_STRINGS_INTERNAL_CHARCONV_PARSE_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_internal.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_internal.cc
new file mode 100644
index 00000000000..6fc39985d80
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_internal.cc
@@ -0,0 +1,89 @@
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+#include "y_absl/strings/internal/cord_internal.h"
+
+#include <atomic>
+#include <cassert>
+#include <memory>
+
+#include "y_absl/container/inlined_vector.h"
+#include "y_absl/strings/internal/cord_rep_btree.h"
+#include "y_absl/strings/internal/cord_rep_flat.h"
+#include "y_absl/strings/internal/cord_rep_ring.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+
+ABSL_CONST_INIT std::atomic<bool> cord_btree_enabled(kCordEnableBtreeDefault);
+ABSL_CONST_INIT std::atomic<bool> cord_ring_buffer_enabled(
+ kCordEnableRingBufferDefault);
+ABSL_CONST_INIT std::atomic<bool> shallow_subcords_enabled(
+ kCordShallowSubcordsDefault);
+ABSL_CONST_INIT std::atomic<bool> cord_btree_exhaustive_validation(false);
+
+void CordRep::Destroy(CordRep* rep) {
+ assert(rep != nullptr);
+
+ y_absl::InlinedVector<CordRep*, Constants::kInlinedVectorSize> pending;
+ while (true) {
+ assert(!rep->refcount.IsImmortal());
+ if (rep->tag == CONCAT) {
+ CordRepConcat* rep_concat = rep->concat();
+ CordRep* right = rep_concat->right;
+ if (!right->refcount.Decrement()) {
+ pending.push_back(right);
+ }
+ CordRep* left = rep_concat->left;
+ delete rep_concat;
+ rep = nullptr;
+ if (!left->refcount.Decrement()) {
+ rep = left;
+ continue;
+ }
+ } else if (rep->tag == BTREE) {
+ CordRepBtree::Destroy(rep->btree());
+ rep = nullptr;
+ } else if (rep->tag == RING) {
+ CordRepRing::Destroy(rep->ring());
+ rep = nullptr;
+ } else if (rep->tag == EXTERNAL) {
+ CordRepExternal::Delete(rep);
+ rep = nullptr;
+ } else if (rep->tag == SUBSTRING) {
+ CordRepSubstring* rep_substring = rep->substring();
+ CordRep* child = rep_substring->child;
+ delete rep_substring;
+ rep = nullptr;
+ if (!child->refcount.Decrement()) {
+ rep = child;
+ continue;
+ }
+ } else {
+ CordRepFlat::Delete(rep);
+ rep = nullptr;
+ }
+
+ if (!pending.empty()) {
+ rep = pending.back();
+ pending.pop_back();
+ } else {
+ break;
+ }
+ }
+}
+
+} // namespace cord_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_internal.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_internal.h
new file mode 100644
index 00000000000..82f5ac7b818
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_internal.h
@@ -0,0 +1,620 @@
+// Copyright 2021 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_STRINGS_INTERNAL_CORD_INTERNAL_H_
+#define ABSL_STRINGS_INTERNAL_CORD_INTERNAL_H_
+
+#include <atomic>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <type_traits>
+
+#include "y_absl/base/config.h"
+#include "y_absl/base/internal/endian.h"
+#include "y_absl/base/internal/invoke.h"
+#include "y_absl/base/optimization.h"
+#include "y_absl/container/internal/compressed_tuple.h"
+#include "y_absl/meta/type_traits.h"
+#include "y_absl/strings/string_view.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+
+class CordzInfo;
+
+// Default feature enable states for cord ring buffers
+enum CordFeatureDefaults {
+ kCordEnableBtreeDefault = true,
+ kCordEnableRingBufferDefault = false,
+ kCordShallowSubcordsDefault = false
+};
+
+extern std::atomic<bool> cord_btree_enabled;
+extern std::atomic<bool> cord_ring_buffer_enabled;
+extern std::atomic<bool> shallow_subcords_enabled;
+
+// `cord_btree_exhaustive_validation` can be set to force exhaustive validation
+// in debug assertions, and code that calls `IsValid()` explicitly. By default,
+// assertions should be relatively cheap and AssertValid() can easily lead to
+// O(n^2) complexity as recursive / full tree validation is O(n).
+extern std::atomic<bool> cord_btree_exhaustive_validation;
+
+inline void enable_cord_btree(bool enable) {
+ cord_btree_enabled.store(enable, std::memory_order_relaxed);
+}
+
+inline void enable_cord_ring_buffer(bool enable) {
+ cord_ring_buffer_enabled.store(enable, std::memory_order_relaxed);
+}
+
+inline void enable_shallow_subcords(bool enable) {
+ shallow_subcords_enabled.store(enable, std::memory_order_relaxed);
+}
+
+enum Constants {
+ // The inlined size to use with y_absl::InlinedVector.
+ //
+ // Note: The InlinedVectors in this file (and in cord.h) do not need to use
+ // the same value for their inlined size. The fact that they do is historical.
+ // It may be desirable for each to use a different inlined size optimized for
+ // that InlinedVector's usage.
+ //
+ // TODO(jgm): Benchmark to see if there's a more optimal value than 47 for
+ // the inlined vector size (47 exists for backward compatibility).
+ kInlinedVectorSize = 47,
+
+ // Prefer copying blocks of at most this size, otherwise reference count.
+ kMaxBytesToCopy = 511
+};
+
+// Compact class for tracking the reference count and state flags for CordRep
+// instances. Data is stored in an atomic int32_t for compactness and speed.
+class RefcountAndFlags {
+ public:
+ constexpr RefcountAndFlags() : count_{kRefIncrement} {}
+ struct Immortal {};
+ explicit constexpr RefcountAndFlags(Immortal) : count_(kImmortalFlag) {}
+ struct WithCrc {};
+ explicit constexpr RefcountAndFlags(WithCrc)
+ : count_(kCrcFlag | kRefIncrement) {}
+
+ // Increments the reference count. Imposes no memory ordering.
+ inline void Increment() {
+ count_.fetch_add(kRefIncrement, std::memory_order_relaxed);
+ }
+
+ // Asserts that the current refcount is greater than 0. If the refcount is
+ // greater than 1, decrements the reference count.
+ //
+ // Returns false if there are no references outstanding; true otherwise.
+ // Inserts barriers to ensure that state written before this method returns
+ // false will be visible to a thread that just observed this method returning
+ // false. Always returns false when the immortal bit is set.
+ inline bool Decrement() {
+ int32_t refcount = count_.load(std::memory_order_acquire) & kRefcountMask;
+ assert(refcount > 0 || refcount & kImmortalFlag);
+ return refcount != kRefIncrement &&
+ (count_.fetch_sub(kRefIncrement, std::memory_order_acq_rel) &
+ kRefcountMask) != kRefIncrement;
+ }
+
+ // Same as Decrement but expect that refcount is greater than 1.
+ inline bool DecrementExpectHighRefcount() {
+ int32_t refcount =
+ count_.fetch_sub(kRefIncrement, std::memory_order_acq_rel) &
+ kRefcountMask;
+ assert(refcount > 0 || refcount & kImmortalFlag);
+ return refcount != kRefIncrement;
+ }
+
+ // Returns the current reference count using acquire semantics.
+ inline int32_t Get() const {
+ return count_.load(std::memory_order_acquire) >> kNumFlags;
+ }
+
+ // Returns true if the referenced object carries a CRC value.
+ bool HasCrc() const {
+ return (count_.load(std::memory_order_relaxed) & kCrcFlag) != 0;
+ }
+
+ // Returns true iff the atomic integer is 1 and this node does not store
+ // a CRC. When both these conditions are met, the current thread owns
+ // the reference and no other thread shares it, so its contents may be
+ // safely mutated.
+ //
+ // If the referenced item is shared, carries a CRC, or is immortal,
+ // it should not be modified in-place, and this function returns false.
+ //
+ // This call performs the memory barrier needed for the owning thread
+ // to act on the object, so that if it returns true, it may safely
+ // assume exclusive access to the object.
+ inline bool IsMutable() {
+ return (count_.load(std::memory_order_acquire)) == kRefIncrement;
+ }
+
+ // Returns whether the atomic integer is 1. Similar to IsMutable(),
+ // but does not check for a stored CRC. (An unshared node with a CRC is not
+ // mutable, because changing its data would invalidate the CRC.)
+ //
+ // When this returns true, there are no other references, and data sinks
+ // may safely adopt the children of the CordRep.
+ inline bool IsOne() {
+ return (count_.load(std::memory_order_acquire) & kRefcountMask) ==
+ kRefIncrement;
+ }
+
+ bool IsImmortal() const {
+ return (count_.load(std::memory_order_relaxed) & kImmortalFlag) != 0;
+ }
+
+ private:
+ // We reserve the bottom bits for flags.
+ // kImmortalBit indicates that this entity should never be collected; it is
+ // used for the StringConstant constructor to avoid collecting immutable
+ // constant cords.
+ // kReservedFlag is reserved for future use.
+ enum {
+ kNumFlags = 2,
+
+ kImmortalFlag = 0x1,
+ kCrcFlag = 0x2,
+ kRefIncrement = (1 << kNumFlags),
+
+ // Bitmask to use when checking refcount by equality. This masks out
+ // all flags except kImmortalFlag, which is part of the refcount for
+ // purposes of equality. (A refcount of 0 or 1 does not count as 0 or 1
+ // if the immortal bit is set.)
+ kRefcountMask = ~kCrcFlag,
+ };
+
+ std::atomic<int32_t> count_;
+};
+
+// The overhead of a vtable is too much for Cord, so we roll our own subclasses
+// using only a single byte to differentiate classes from each other - the "tag"
+// byte. Define the subclasses first so we can provide downcasting helper
+// functions in the base class.
+
+struct CordRepConcat;
+struct CordRepExternal;
+struct CordRepFlat;
+struct CordRepSubstring;
+class CordRepRing;
+class CordRepBtree;
+
+// Various representations that we allow
+enum CordRepKind {
+ CONCAT = 0,
+ SUBSTRING = 1,
+ BTREE = 2,
+ RING = 3,
+ EXTERNAL = 4,
+
+ // We have different tags for different sized flat arrays,
+ // starting with FLAT, and limited to MAX_FLAT_TAG. The 225 value is based on
+ // the current 'size to tag' encoding of 8 / 32 bytes. If a new tag is needed
+ // in the future, then 'FLAT' and 'MAX_FLAT_TAG' should be adjusted as well
+ // as the Tag <---> Size logic so that FLAT stil represents the minimum flat
+ // allocation size. (32 bytes as of now).
+ FLAT = 5,
+ MAX_FLAT_TAG = 225
+};
+
+// There are various locations where we want to check if some rep is a 'plain'
+// data edge, i.e. an external or flat rep. By having FLAT == EXTERNAL + 1, we
+// can perform this check in a single branch as 'tag >= EXTERNAL'
+// Likewise, we have some locations where we check for 'ring or external/flat',
+// so likewise align RING to EXTERNAL.
+// Note that we can leave this optimization to the compiler. The compiler will
+// DTRT when it sees a condition like `tag == EXTERNAL || tag >= FLAT`.
+static_assert(RING == BTREE + 1, "BTREE and RING not consecutive");
+static_assert(EXTERNAL == RING + 1, "BTREE and EXTERNAL not consecutive");
+static_assert(FLAT == EXTERNAL + 1, "EXTERNAL and FLAT not consecutive");
+
+struct CordRep {
+ CordRep() = default;
+ constexpr CordRep(RefcountAndFlags::Immortal immortal, size_t l)
+ : length(l), refcount(immortal), tag(EXTERNAL), storage{} {}
+
+ // The following three fields have to be less than 32 bytes since
+ // that is the smallest supported flat node size.
+ size_t length;
+ RefcountAndFlags refcount;
+ // If tag < FLAT, it represents CordRepKind and indicates the type of node.
+ // Otherwise, the node type is CordRepFlat and the tag is the encoded size.
+ uint8_t tag;
+
+ // `storage` provides two main purposes:
+ // - the starting point for FlatCordRep.Data() [flexible-array-member]
+ // - 3 bytes of additional storage for use by derived classes.
+ // The latter is used by CordrepConcat and CordRepBtree. CordRepConcat stores
+ // a 'depth' value in storage[0], and the (future) CordRepBtree class stores
+ // `height`, `begin` and `end` in the 3 entries. Otherwise we would need to
+ // allocate room for these in the derived class, as not all compilers reuse
+ // padding space from the base class (clang and gcc do, MSVC does not, etc)
+ uint8_t storage[3];
+
+ // Returns true if this instance's tag matches the requested type.
+ constexpr bool IsRing() const { return tag == RING; }
+ constexpr bool IsConcat() const { return tag == CONCAT; }
+ constexpr bool IsSubstring() const { return tag == SUBSTRING; }
+ constexpr bool IsExternal() const { return tag == EXTERNAL; }
+ constexpr bool IsFlat() const { return tag >= FLAT; }
+ constexpr bool IsBtree() const { return tag == BTREE; }
+
+ inline CordRepRing* ring();
+ inline const CordRepRing* ring() const;
+ inline CordRepConcat* concat();
+ inline const CordRepConcat* concat() const;
+ inline CordRepSubstring* substring();
+ inline const CordRepSubstring* substring() const;
+ inline CordRepExternal* external();
+ inline const CordRepExternal* external() const;
+ inline CordRepFlat* flat();
+ inline const CordRepFlat* flat() const;
+ inline CordRepBtree* btree();
+ inline const CordRepBtree* btree() const;
+
+ // --------------------------------------------------------------------
+ // Memory management
+
+ // Destroys the provided `rep`.
+ static void Destroy(CordRep* rep);
+
+ // Increments the reference count of `rep`.
+ // Requires `rep` to be a non-null pointer value.
+ static inline CordRep* Ref(CordRep* rep);
+
+ // Decrements the reference count of `rep`. Destroys rep if count reaches
+ // zero. Requires `rep` to be a non-null pointer value.
+ static inline void Unref(CordRep* rep);
+};
+
+struct CordRepConcat : public CordRep {
+ CordRep* left;
+ CordRep* right;
+
+ uint8_t depth() const { return storage[0]; }
+ void set_depth(uint8_t depth) { storage[0] = depth; }
+};
+
+struct CordRepSubstring : public CordRep {
+ size_t start; // Starting offset of substring in child
+ CordRep* child;
+};
+
+// Type for function pointer that will invoke the releaser function and also
+// delete the `CordRepExternalImpl` corresponding to the passed in
+// `CordRepExternal`.
+using ExternalReleaserInvoker = void (*)(CordRepExternal*);
+
+// External CordReps are allocated together with a type erased releaser. The
+// releaser is stored in the memory directly following the CordRepExternal.
+struct CordRepExternal : public CordRep {
+ CordRepExternal() = default;
+ explicit constexpr CordRepExternal(y_absl::string_view str)
+ : CordRep(RefcountAndFlags::Immortal{}, str.size()),
+ base(str.data()),
+ releaser_invoker(nullptr) {}
+
+ const char* base;
+ // Pointer to function that knows how to call and destroy the releaser.
+ ExternalReleaserInvoker releaser_invoker;
+
+ // Deletes (releases) the external rep.
+ // Requires rep != nullptr and rep->IsExternal()
+ static void Delete(CordRep* rep);
+};
+
+struct Rank1 {};
+struct Rank0 : Rank1 {};
+
+template <typename Releaser, typename = ::y_absl::base_internal::invoke_result_t<
+ Releaser, y_absl::string_view>>
+void InvokeReleaser(Rank0, Releaser&& releaser, y_absl::string_view data) {
+ ::y_absl::base_internal::invoke(std::forward<Releaser>(releaser), data);
+}
+
+template <typename Releaser,
+ typename = ::y_absl::base_internal::invoke_result_t<Releaser>>
+void InvokeReleaser(Rank1, Releaser&& releaser, y_absl::string_view) {
+ ::y_absl::base_internal::invoke(std::forward<Releaser>(releaser));
+}
+
+// We use CompressedTuple so that we can benefit from EBCO.
+template <typename Releaser>
+struct CordRepExternalImpl
+ : public CordRepExternal,
+ public ::y_absl::container_internal::CompressedTuple<Releaser> {
+ // The extra int arg is so that we can avoid interfering with copy/move
+ // constructors while still benefitting from perfect forwarding.
+ template <typename T>
+ CordRepExternalImpl(T&& releaser, int)
+ : CordRepExternalImpl::CompressedTuple(std::forward<T>(releaser)) {
+ this->releaser_invoker = &Release;
+ }
+
+ ~CordRepExternalImpl() {
+ InvokeReleaser(Rank0{}, std::move(this->template get<0>()),
+ y_absl::string_view(base, length));
+ }
+
+ static void Release(CordRepExternal* rep) {
+ delete static_cast<CordRepExternalImpl*>(rep);
+ }
+};
+
+inline void CordRepExternal::Delete(CordRep* rep) {
+ assert(rep != nullptr && rep->IsExternal());
+ auto* rep_external = static_cast<CordRepExternal*>(rep);
+ assert(rep_external->releaser_invoker != nullptr);
+ rep_external->releaser_invoker(rep_external);
+}
+
+template <typename Str>
+struct ConstInitExternalStorage {
+ ABSL_CONST_INIT static CordRepExternal value;
+};
+
+template <typename Str>
+CordRepExternal ConstInitExternalStorage<Str>::value(Str::value);
+
+enum {
+ kMaxInline = 15,
+};
+
+constexpr char GetOrNull(y_absl::string_view data, size_t pos) {
+ return pos < data.size() ? data[pos] : '\0';
+}
+
+// We store cordz_info as 64 bit pointer value in big endian format. This
+// guarantees that the least significant byte of cordz_info matches the last
+// byte of the inline data representation in as_chars_, which holds the inlined
+// size or the 'is_tree' bit.
+using cordz_info_t = int64_t;
+
+// Assert that the `cordz_info` pointer value perfectly overlaps the last half
+// of `as_chars_` and can hold a pointer value.
+static_assert(sizeof(cordz_info_t) * 2 == kMaxInline + 1, "");
+static_assert(sizeof(cordz_info_t) >= sizeof(intptr_t), "");
+
+// BigEndianByte() creates a big endian representation of 'value', i.e.: a big
+// endian value where the last byte in the host's representation holds 'value`,
+// with all other bytes being 0.
+static constexpr cordz_info_t BigEndianByte(unsigned char value) {
+#if defined(ABSL_IS_BIG_ENDIAN)
+ return value;
+#else
+ return static_cast<cordz_info_t>(value) << ((sizeof(cordz_info_t) - 1) * 8);
+#endif
+}
+
+class InlineData {
+ public:
+ // DefaultInitType forces the use of the default initialization constructor.
+ enum DefaultInitType { kDefaultInit };
+
+ // kNullCordzInfo holds the big endian representation of intptr_t(1)
+ // This is the 'null' / initial value of 'cordz_info'. The null value
+ // is specifically big endian 1 as with 64-bit pointers, the last
+ // byte of cordz_info overlaps with the last byte holding the tag.
+ static constexpr cordz_info_t kNullCordzInfo = BigEndianByte(1);
+
+ constexpr InlineData() : as_chars_{0} {}
+ explicit InlineData(DefaultInitType) {}
+ explicit constexpr InlineData(CordRep* rep) : as_tree_(rep) {}
+ explicit constexpr InlineData(y_absl::string_view chars)
+ : as_chars_{
+ GetOrNull(chars, 0), GetOrNull(chars, 1),
+ GetOrNull(chars, 2), GetOrNull(chars, 3),
+ GetOrNull(chars, 4), GetOrNull(chars, 5),
+ GetOrNull(chars, 6), GetOrNull(chars, 7),
+ GetOrNull(chars, 8), GetOrNull(chars, 9),
+ GetOrNull(chars, 10), GetOrNull(chars, 11),
+ GetOrNull(chars, 12), GetOrNull(chars, 13),
+ GetOrNull(chars, 14), static_cast<char>((chars.size() << 1))} {}
+
+ // Returns true if the current instance is empty.
+ // The 'empty value' is an inlined data value of zero length.
+ bool is_empty() const { return tag() == 0; }
+
+ // Returns true if the current instance holds a tree value.
+ bool is_tree() const { return (tag() & 1) != 0; }
+
+ // Returns true if the current instance holds a cordz_info value.
+ // Requires the current instance to hold a tree value.
+ bool is_profiled() const {
+ assert(is_tree());
+ return as_tree_.cordz_info != kNullCordzInfo;
+ }
+
+ // Returns true if either of the provided instances hold a cordz_info value.
+ // This method is more efficient than the equivalent `data1.is_profiled() ||
+ // data2.is_profiled()`. Requires both arguments to hold a tree.
+ static bool is_either_profiled(const InlineData& data1,
+ const InlineData& data2) {
+ assert(data1.is_tree() && data2.is_tree());
+ return (data1.as_tree_.cordz_info | data2.as_tree_.cordz_info) !=
+ kNullCordzInfo;
+ }
+
+ // Returns the cordz_info sampling instance for this instance, or nullptr
+ // if the current instance is not sampled and does not have CordzInfo data.
+ // Requires the current instance to hold a tree value.
+ CordzInfo* cordz_info() const {
+ assert(is_tree());
+ intptr_t info =
+ static_cast<intptr_t>(y_absl::big_endian::ToHost64(as_tree_.cordz_info));
+ assert(info & 1);
+ return reinterpret_cast<CordzInfo*>(info - 1);
+ }
+
+ // Sets the current cordz_info sampling instance for this instance, or nullptr
+ // if the current instance is not sampled and does not have CordzInfo data.
+ // Requires the current instance to hold a tree value.
+ void set_cordz_info(CordzInfo* cordz_info) {
+ assert(is_tree());
+ intptr_t info = reinterpret_cast<intptr_t>(cordz_info) | 1;
+ as_tree_.cordz_info = y_absl::big_endian::FromHost64(info);
+ }
+
+ // Resets the current cordz_info to null / empty.
+ void clear_cordz_info() {
+ assert(is_tree());
+ as_tree_.cordz_info = kNullCordzInfo;
+ }
+
+ // Returns a read only pointer to the character data inside this instance.
+ // Requires the current instance to hold inline data.
+ const char* as_chars() const {
+ assert(!is_tree());
+ return as_chars_;
+ }
+
+ // Returns a mutable pointer to the character data inside this instance.
+ // Should be used for 'write only' operations setting an inlined value.
+ // Applications can set the value of inlined data either before or after
+ // setting the inlined size, i.e., both of the below are valid:
+ //
+ // // Set inlined data and inline size
+ // memcpy(data_.as_chars(), data, size);
+ // data_.set_inline_size(size);
+ //
+ // // Set inlined size and inline data
+ // data_.set_inline_size(size);
+ // memcpy(data_.as_chars(), data, size);
+ //
+ // It's an error to read from the returned pointer without a preceding write
+ // if the current instance does not hold inline data, i.e.: is_tree() == true.
+ char* as_chars() { return as_chars_; }
+
+ // Returns the tree value of this value.
+ // Requires the current instance to hold a tree value.
+ CordRep* as_tree() const {
+ assert(is_tree());
+ return as_tree_.rep;
+ }
+
+ // Initialize this instance to holding the tree value `rep`,
+ // initializing the cordz_info to null, i.e.: 'not profiled'.
+ void make_tree(CordRep* rep) {
+ as_tree_.rep = rep;
+ as_tree_.cordz_info = kNullCordzInfo;
+ }
+
+ // Set the tree value of this instance to 'rep`.
+ // Requires the current instance to already hold a tree value.
+ // Does not affect the value of cordz_info.
+ void set_tree(CordRep* rep) {
+ assert(is_tree());
+ as_tree_.rep = rep;
+ }
+
+ // Returns the size of the inlined character data inside this instance.
+ // Requires the current instance to hold inline data.
+ size_t inline_size() const {
+ assert(!is_tree());
+ return tag() >> 1;
+ }
+
+ // Sets the size of the inlined character data inside this instance.
+ // Requires `size` to be <= kMaxInline.
+ // See the documentation on 'as_chars()' for more information and examples.
+ void set_inline_size(size_t size) {
+ ABSL_ASSERT(size <= kMaxInline);
+ tag() = static_cast<char>(size << 1);
+ }
+
+ private:
+ // See cordz_info_t for forced alignment and size of `cordz_info` details.
+ struct AsTree {
+ explicit constexpr AsTree(y_absl::cord_internal::CordRep* tree)
+ : rep(tree), cordz_info(kNullCordzInfo) {}
+ // This union uses up extra space so that whether rep is 32 or 64 bits,
+ // cordz_info will still start at the eighth byte, and the last
+ // byte of cordz_info will still be the last byte of InlineData.
+ union {
+ y_absl::cord_internal::CordRep* rep;
+ cordz_info_t unused_aligner;
+ };
+ cordz_info_t cordz_info;
+ };
+
+ char& tag() { return reinterpret_cast<char*>(this)[kMaxInline]; }
+ char tag() const { return reinterpret_cast<const char*>(this)[kMaxInline]; }
+
+ // If the data has length <= kMaxInline, we store it in `as_chars_`, and
+ // store the size in the last char of `as_chars_` shifted left + 1.
+ // Else we store it in a tree and store a pointer to that tree in
+ // `as_tree_.rep` and store a tag in `tagged_size`.
+ union {
+ char as_chars_[kMaxInline + 1];
+ AsTree as_tree_;
+ };
+};
+
+static_assert(sizeof(InlineData) == kMaxInline + 1, "");
+
+inline CordRepConcat* CordRep::concat() {
+ assert(IsConcat());
+ return static_cast<CordRepConcat*>(this);
+}
+
+inline const CordRepConcat* CordRep::concat() const {
+ assert(IsConcat());
+ return static_cast<const CordRepConcat*>(this);
+}
+
+inline CordRepSubstring* CordRep::substring() {
+ assert(IsSubstring());
+ return static_cast<CordRepSubstring*>(this);
+}
+
+inline const CordRepSubstring* CordRep::substring() const {
+ assert(IsSubstring());
+ return static_cast<const CordRepSubstring*>(this);
+}
+
+inline CordRepExternal* CordRep::external() {
+ assert(IsExternal());
+ return static_cast<CordRepExternal*>(this);
+}
+
+inline const CordRepExternal* CordRep::external() const {
+ assert(IsExternal());
+ return static_cast<const CordRepExternal*>(this);
+}
+
+inline CordRep* CordRep::Ref(CordRep* rep) {
+ assert(rep != nullptr);
+ rep->refcount.Increment();
+ return rep;
+}
+
+inline void CordRep::Unref(CordRep* rep) {
+ assert(rep != nullptr);
+ // Expect refcount to be 0. Avoiding the cost of an atomic decrement should
+ // typically outweigh the cost of an extra branch checking for ref == 1.
+ if (ABSL_PREDICT_FALSE(!rep->refcount.DecrementExpectHighRefcount())) {
+ Destroy(rep);
+ }
+}
+
+} // namespace cord_internal
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
+#endif // ABSL_STRINGS_INTERNAL_CORD_INTERNAL_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_btree.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_btree.cc
new file mode 100644
index 00000000000..93121c99584
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_btree.cc
@@ -0,0 +1,1128 @@
+// Copyright 2021 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "y_absl/strings/internal/cord_rep_btree.h"
+
+#include <cassert>
+#include <cstdint>
+#include <iostream>
+#include <util/generic/string.h>
+
+#include "y_absl/base/attributes.h"
+#include "y_absl/base/config.h"
+#include "y_absl/base/internal/raw_logging.h"
+#include "y_absl/strings/internal/cord_internal.h"
+#include "y_absl/strings/internal/cord_rep_consume.h"
+#include "y_absl/strings/internal/cord_rep_flat.h"
+#include "y_absl/strings/str_cat.h"
+#include "y_absl/strings/string_view.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+
+constexpr size_t CordRepBtree::kMaxCapacity; // NOLINT: needed for c++ < c++17
+
+namespace {
+
+using NodeStack = CordRepBtree * [CordRepBtree::kMaxDepth];
+using EdgeType = CordRepBtree::EdgeType;
+using OpResult = CordRepBtree::OpResult;
+using CopyResult = CordRepBtree::CopyResult;
+
+constexpr auto kFront = CordRepBtree::kFront;
+constexpr auto kBack = CordRepBtree::kBack;
+
+inline bool exhaustive_validation() {
+ return cord_btree_exhaustive_validation.load(std::memory_order_relaxed);
+}
+
+// Implementation of the various 'Dump' functions.
+// Prints the entire tree structure or 'rep'. External callers should
+// not specify 'depth' and leave it to its default (0) value.
+// Rep may be a CordRepBtree tree, or a SUBSTRING / EXTERNAL / FLAT node.
+void DumpAll(const CordRep* rep, bool include_contents, std::ostream& stream,
+ int depth = 0) {
+ // Allow for full height trees + substring -> flat / external nodes.
+ assert(depth <= CordRepBtree::kMaxDepth + 2);
+ TString sharing = const_cast<CordRep*>(rep)->refcount.IsOne()
+ ? TString("Private")
+ : y_absl::StrCat("Shared(", rep->refcount.Get(), ")");
+ TString sptr = y_absl::StrCat("0x", y_absl::Hex(rep));
+
+ // Dumps the data contents of `rep` if `include_contents` is true.
+ // Always emits a new line character.
+ auto maybe_dump_data = [&stream, include_contents](const CordRep* r) {
+ if (include_contents) {
+ // Allow for up to 60 wide display of content data, which with some
+ // indentation and prefix / labels keeps us within roughly 80-100 wide.
+ constexpr size_t kMaxDataLength = 60;
+ stream << ", data = \""
+ << CordRepBtree::EdgeData(r).substr(0, kMaxDataLength)
+ << (r->length > kMaxDataLength ? "\"..." : "\"");
+ }
+ stream << '\n';
+ };
+
+ // For each level, we print the 'shared/private' state and the rep pointer,
+ // indented by two spaces per recursive depth.
+ stream << TString(depth * 2, ' ') << sharing << " (" << sptr << ") ";
+
+ if (rep->IsBtree()) {
+ const CordRepBtree* node = rep->btree();
+ TString label =
+ node->height() ? y_absl::StrCat("Node(", node->height(), ")") : "Leaf";
+ stream << label << ", len = " << node->length
+ << ", begin = " << node->begin() << ", end = " << node->end()
+ << "\n";
+ for (CordRep* edge : node->Edges()) {
+ DumpAll(edge, include_contents, stream, depth + 1);
+ }
+ } else if (rep->tag == SUBSTRING) {
+ const CordRepSubstring* substring = rep->substring();
+ stream << "Substring, len = " << rep->length
+ << ", start = " << substring->start;
+ maybe_dump_data(rep);
+ DumpAll(substring->child, include_contents, stream, depth + 1);
+ } else if (rep->tag >= FLAT) {
+ stream << "Flat, len = " << rep->length
+ << ", cap = " << rep->flat()->Capacity();
+ maybe_dump_data(rep);
+ } else if (rep->tag == EXTERNAL) {
+ stream << "Extn, len = " << rep->length;
+ maybe_dump_data(rep);
+ }
+}
+
+// TODO(b/192061034): add 'bytes to copy' logic to avoid large slop on substring
+// small data out of large reps, and general efficiency of 'always copy small
+// data'. Consider making this a cord rep internal library function.
+CordRepSubstring* CreateSubstring(CordRep* rep, size_t offset, size_t n) {
+ assert(n != 0);
+ assert(offset + n <= rep->length);
+ assert(offset != 0 || n != rep->length);
+
+ if (rep->tag == SUBSTRING) {
+ CordRepSubstring* substring = rep->substring();
+ offset += substring->start;
+ rep = CordRep::Ref(substring->child);
+ CordRep::Unref(substring);
+ }
+ CordRepSubstring* substring = new CordRepSubstring();
+ substring->length = n;
+ substring->tag = SUBSTRING;
+ substring->start = offset;
+ substring->child = rep;
+ return substring;
+}
+
+// TODO(b/192061034): consider making this a cord rep library function.
+inline CordRep* MakeSubstring(CordRep* rep, size_t offset, size_t n) {
+ if (n == rep->length) return rep;
+ if (n == 0) return CordRep::Unref(rep), nullptr;
+ return CreateSubstring(rep, offset, n);
+}
+
+// TODO(b/192061034): consider making this a cord rep library function.
+inline CordRep* MakeSubstring(CordRep* rep, size_t offset) {
+ if (offset == 0) return rep;
+ return CreateSubstring(rep, offset, rep->length - offset);
+}
+
+// Resizes `edge` to the provided `length`. Adopts a reference on `edge`.
+// This method directly returns `edge` if `length` equals `edge->length`.
+// If `is_mutable` is set to true, this function may return `edge` with
+// `edge->length` set to the new length depending on the type and size of
+// `edge`. Otherwise, this function returns a new CordRepSubstring value.
+// Requires `length > 0 && length <= edge->length`.
+CordRep* ResizeEdge(CordRep* edge, size_t length, bool is_mutable) {
+ assert(length > 0);
+ assert(length <= edge->length);
+ assert(CordRepBtree::IsDataEdge(edge));
+ if (length >= edge->length) return edge;
+
+ if (is_mutable && (edge->tag >= FLAT || edge->tag == SUBSTRING)) {
+ edge->length = length;
+ return edge;
+ }
+
+ return CreateSubstring(edge, 0, length);
+}
+
+template <EdgeType edge_type>
+inline y_absl::string_view Consume(y_absl::string_view s, size_t n) {
+ return edge_type == kBack ? s.substr(n) : s.substr(0, s.size() - n);
+}
+
+template <EdgeType edge_type>
+inline y_absl::string_view Consume(char* dst, y_absl::string_view s, size_t n) {
+ if (edge_type == kBack) {
+ memcpy(dst, s.data(), n);
+ return s.substr(n);
+ } else {
+ const size_t offset = s.size() - n;
+ memcpy(dst, s.data() + offset, n);
+ return s.substr(0, offset);
+ }
+}
+
+// Known issue / optimization weirdness: the store associated with the
+// decrement introduces traffic between cpus (even if the result of that
+// traffic does nothing), making this faster than a single call to
+// refcount.Decrement() checking the zero refcount condition.
+template <typename R, typename Fn>
+inline void FastUnref(R* r, Fn&& fn) {
+ if (r->refcount.IsOne()) {
+ fn(r);
+ } else if (!r->refcount.DecrementExpectHighRefcount()) {
+ fn(r);
+ }
+}
+
+// Deletes a leaf node data edge. Requires `rep` to be an EXTERNAL or FLAT
+// node, or a SUBSTRING of an EXTERNAL or FLAT node.
+void DeleteLeafEdge(CordRep* rep) {
+ for (;;) {
+ if (rep->tag >= FLAT) {
+ CordRepFlat::Delete(rep->flat());
+ return;
+ }
+ if (rep->tag == EXTERNAL) {
+ CordRepExternal::Delete(rep->external());
+ return;
+ }
+ assert(rep->tag == SUBSTRING);
+ CordRepSubstring* substring = rep->substring();
+ rep = substring->child;
+ assert(rep->tag == EXTERNAL || rep->tag >= FLAT);
+ delete substring;
+ if (rep->refcount.Decrement()) return;
+ }
+}
+
+// StackOperations contains the logic to build a left-most or right-most stack
+// (leg) down to the leaf level of a btree, and 'unwind' / 'Finalize' methods to
+// propagate node changes up the stack.
+template <EdgeType edge_type>
+struct StackOperations {
+ // Returns true if the node at 'depth' is mutable, i.e. has a refcount
+ // of one, carries no CRC, and all of its parent nodes have a refcount of one.
+ inline bool owned(int depth) const { return depth < share_depth; }
+
+ // Returns the node at 'depth'.
+ inline CordRepBtree* node(int depth) const { return stack[depth]; }
+
+ // Builds a `depth` levels deep stack starting at `tree` recording which nodes
+ // are private in the form of the 'share depth' where nodes are shared.
+ inline CordRepBtree* BuildStack(CordRepBtree* tree, int depth) {
+ assert(depth <= tree->height());
+ int current_depth = 0;
+ while (current_depth < depth && tree->refcount.IsMutable()) {
+ stack[current_depth++] = tree;
+ tree = tree->Edge(edge_type)->btree();
+ }
+ share_depth = current_depth + (tree->refcount.IsMutable() ? 1 : 0);
+ while (current_depth < depth) {
+ stack[current_depth++] = tree;
+ tree = tree->Edge(edge_type)->btree();
+ }
+ return tree;
+ }
+
+ // Builds a stack with the invariant that all nodes are private owned / not
+ // shared and carry no CRC data. This is used in iterative updates where a
+ // previous propagation guaranteed all nodes have this property.
+ inline void BuildOwnedStack(CordRepBtree* tree, int height) {
+ assert(height <= CordRepBtree::kMaxHeight);
+ int depth = 0;
+ while (depth < height) {
+ assert(tree->refcount.IsMutable());
+ stack[depth++] = tree;
+ tree = tree->Edge(edge_type)->btree();
+ }
+ assert(tree->refcount.IsMutable());
+ share_depth = depth + 1;
+ }
+
+ // Processes the final 'top level' result action for the tree.
+ // See the 'Action' enum for the various action implications.
+ static inline CordRepBtree* Finalize(CordRepBtree* tree, OpResult result) {
+ switch (result.action) {
+ case CordRepBtree::kPopped:
+ tree = edge_type == kBack ? CordRepBtree::New(tree, result.tree)
+ : CordRepBtree::New(result.tree, tree);
+ if (ABSL_PREDICT_FALSE(tree->height() > CordRepBtree::kMaxHeight)) {
+ tree = CordRepBtree::Rebuild(tree);
+ ABSL_RAW_CHECK(tree->height() <= CordRepBtree::kMaxHeight,
+ "Max height exceeded");
+ }
+ return tree;
+ case CordRepBtree::kCopied:
+ CordRep::Unref(tree);
+ ABSL_FALLTHROUGH_INTENDED;
+ case CordRepBtree::kSelf:
+ return result.tree;
+ }
+ ABSL_INTERNAL_UNREACHABLE;
+ return result.tree;
+ }
+
+ // Propagate the action result in 'result' up into all nodes of the stack
+ // starting at depth 'depth'. 'length' contains the extra length of data that
+ // was added at the lowest level, and is updated into all nodes of the stack.
+ // See the 'Action' enum for the various action implications.
+ // If 'propagate' is true, then any copied node values are updated into the
+ // stack, which is used for iterative processing on the same stack.
+ template <bool propagate = false>
+ inline CordRepBtree* Unwind(CordRepBtree* tree, int depth, size_t length,
+ OpResult result) {
+ // TODO(mvels): revisit the below code to check if 3 loops with 3
+ // (incremental) conditions is faster than 1 loop with a switch.
+ // Benchmarking and perf recordings indicate the loop with switch is
+ // fastest, likely because of indirect jumps on the tight case values and
+ // dense branches. But it's worth considering 3 loops, as the `action`
+ // transitions are mono directional. E.g.:
+ // while (action == kPopped) {
+ // ...
+ // }
+ // while (action == kCopied) {
+ // ...
+ // }
+ // ...
+ // We also found that an "if () do {}" loop here seems faster, possibly
+ // because it allows the branch predictor more granular heuristics on
+ // 'single leaf' (`depth` == 0) and 'single depth' (`depth` == 1) cases
+ // which appear to be the most common use cases.
+ if (depth != 0) {
+ do {
+ CordRepBtree* node = stack[--depth];
+ const bool owned = depth < share_depth;
+ switch (result.action) {
+ case CordRepBtree::kPopped:
+ assert(!propagate);
+ result = node->AddEdge<edge_type>(owned, result.tree, length);
+ break;
+ case CordRepBtree::kCopied:
+ result = node->SetEdge<edge_type>(owned, result.tree, length);
+ if (propagate) stack[depth] = result.tree;
+ break;
+ case CordRepBtree::kSelf:
+ node->length += length;
+ while (depth > 0) {
+ node = stack[--depth];
+ node->length += length;
+ }
+ return node;
+ }
+ } while (depth > 0);
+ }
+ return Finalize(tree, result);
+ }
+
+ // Invokes `Unwind` with `propagate=true` to update the stack node values.
+ inline CordRepBtree* Propagate(CordRepBtree* tree, int depth, size_t length,
+ OpResult result) {
+ return Unwind</*propagate=*/true>(tree, depth, length, result);
+ }
+
+ // `share_depth` contains the depth at which the nodes in the stack cannot
+ // be mutated. I.e., if the top most level is shared (i.e.:
+ // `!refcount.IsMutable()`), then `share_depth` is 0. If the 2nd node
+ // is shared (and implicitly all nodes below that) then `share_depth` is 1,
+ // etc. A `share_depth` greater than the depth of the stack indicates that
+ // none of the nodes in the stack are shared.
+ int share_depth;
+
+ NodeStack stack;
+};
+
+} // namespace
+
+void CordRepBtree::Dump(const CordRep* rep, y_absl::string_view label,
+ bool include_contents, std::ostream& stream) {
+ stream << "===================================\n";
+ if (!label.empty()) {
+ stream << label << '\n';
+ stream << "-----------------------------------\n";
+ }
+ if (rep) {
+ DumpAll(rep, include_contents, stream);
+ } else {
+ stream << "NULL\n";
+ }
+}
+
+void CordRepBtree::Dump(const CordRep* rep, y_absl::string_view label,
+ std::ostream& stream) {
+ Dump(rep, label, false, stream);
+}
+
+void CordRepBtree::Dump(const CordRep* rep, std::ostream& stream) {
+ Dump(rep, y_absl::string_view(), false, stream);
+}
+
+void CordRepBtree::DestroyLeaf(CordRepBtree* tree, size_t begin, size_t end) {
+ for (CordRep* edge : tree->Edges(begin, end)) {
+ FastUnref(edge, DeleteLeafEdge);
+ }
+ Delete(tree);
+}
+
+void CordRepBtree::DestroyNonLeaf(CordRepBtree* tree, size_t begin,
+ size_t end) {
+ for (CordRep* edge : tree->Edges(begin, end)) {
+ FastUnref(edge->btree(), Destroy);
+ }
+ Delete(tree);
+}
+
+bool CordRepBtree::IsValid(const CordRepBtree* tree, bool shallow) {
+#define NODE_CHECK_VALID(x) \
+ if (!(x)) { \
+ ABSL_RAW_LOG(ERROR, "CordRepBtree::CheckValid() FAILED: %s", #x); \
+ return false; \
+ }
+#define NODE_CHECK_EQ(x, y) \
+ if ((x) != (y)) { \
+ ABSL_RAW_LOG(ERROR, \
+ "CordRepBtree::CheckValid() FAILED: %s != %s (%s vs %s)", #x, \
+ #y, y_absl::StrCat(x).c_str(), y_absl::StrCat(y).c_str()); \
+ return false; \
+ }
+
+ NODE_CHECK_VALID(tree != nullptr);
+ NODE_CHECK_VALID(tree->IsBtree());
+ NODE_CHECK_VALID(tree->height() <= kMaxHeight);
+ NODE_CHECK_VALID(tree->begin() < tree->capacity());
+ NODE_CHECK_VALID(tree->end() <= tree->capacity());
+ NODE_CHECK_VALID(tree->begin() <= tree->end());
+ size_t child_length = 0;
+ for (CordRep* edge : tree->Edges()) {
+ NODE_CHECK_VALID(edge != nullptr);
+ if (tree->height() > 0) {
+ NODE_CHECK_VALID(edge->IsBtree());
+ NODE_CHECK_VALID(edge->btree()->height() == tree->height() - 1);
+ } else {
+ NODE_CHECK_VALID(IsDataEdge(edge));
+ }
+ child_length += edge->length;
+ }
+ NODE_CHECK_EQ(child_length, tree->length);
+ if ((!shallow || exhaustive_validation()) && tree->height() > 0) {
+ for (CordRep* edge : tree->Edges()) {
+ if (!IsValid(edge->btree(), shallow)) return false;
+ }
+ }
+ return true;
+
+#undef NODE_CHECK_VALID
+#undef NODE_CHECK_EQ
+}
+
+#ifndef NDEBUG
+
+CordRepBtree* CordRepBtree::AssertValid(CordRepBtree* tree, bool shallow) {
+ if (!IsValid(tree, shallow)) {
+ Dump(tree, "CordRepBtree validation failed:", false, std::cout);
+ ABSL_RAW_LOG(FATAL, "CordRepBtree::CheckValid() FAILED");
+ }
+ return tree;
+}
+
+const CordRepBtree* CordRepBtree::AssertValid(const CordRepBtree* tree,
+ bool shallow) {
+ if (!IsValid(tree, shallow)) {
+ Dump(tree, "CordRepBtree validation failed:", false, std::cout);
+ ABSL_RAW_LOG(FATAL, "CordRepBtree::CheckValid() FAILED");
+ }
+ return tree;
+}
+
+#endif // NDEBUG
+
+template <EdgeType edge_type>
+inline OpResult CordRepBtree::AddEdge(bool owned, CordRep* edge, size_t delta) {
+ if (size() >= kMaxCapacity) return {New(edge), kPopped};
+ OpResult result = ToOpResult(owned);
+ result.tree->Add<edge_type>(edge);
+ result.tree->length += delta;
+ return result;
+}
+
+template <EdgeType edge_type>
+OpResult CordRepBtree::SetEdge(bool owned, CordRep* edge, size_t delta) {
+ OpResult result;
+ const size_t idx = index(edge_type);
+ if (owned) {
+ result = {this, kSelf};
+ CordRep::Unref(edges_[idx]);
+ } else {
+ // Create a copy containing all unchanged edges. Unchanged edges are the
+ // open interval [begin, back) or [begin + 1, end) depending on `edge_type`.
+ // We conveniently cover both case using a constexpr `shift` being 0 or 1
+ // as `end :== back + 1`.
+ result = {CopyRaw(), kCopied};
+ constexpr int shift = edge_type == kFront ? 1 : 0;
+ for (CordRep* r : Edges(begin() + shift, back() + shift)) {
+ CordRep::Ref(r);
+ }
+ }
+ result.tree->edges_[idx] = edge;
+ result.tree->length += delta;
+ return result;
+}
+
+template <EdgeType edge_type>
+CordRepBtree* CordRepBtree::AddCordRep(CordRepBtree* tree, CordRep* rep) {
+ const int depth = tree->height();
+ const size_t length = rep->length;
+ StackOperations<edge_type> ops;
+ CordRepBtree* leaf = ops.BuildStack(tree, depth);
+ const OpResult result =
+ leaf->AddEdge<edge_type>(ops.owned(depth), rep, length);
+ return ops.Unwind(tree, depth, length, result);
+}
+
+template <>
+CordRepBtree* CordRepBtree::NewLeaf<kBack>(y_absl::string_view data,
+ size_t extra) {
+ CordRepBtree* leaf = CordRepBtree::New(0);
+ size_t length = 0;
+ size_t end = 0;
+ const size_t cap = leaf->capacity();
+ while (!data.empty() && end != cap) {
+ auto* flat = CordRepFlat::New(data.length() + extra);
+ flat->length = (std::min)(data.length(), flat->Capacity());
+ length += flat->length;
+ leaf->edges_[end++] = flat;
+ data = Consume<kBack>(flat->Data(), data, flat->length);
+ }
+ leaf->length = length;
+ leaf->set_end(end);
+ return leaf;
+}
+
+template <>
+CordRepBtree* CordRepBtree::NewLeaf<kFront>(y_absl::string_view data,
+ size_t extra) {
+ CordRepBtree* leaf = CordRepBtree::New(0);
+ size_t length = 0;
+ size_t begin = leaf->capacity();
+ leaf->set_end(leaf->capacity());
+ while (!data.empty() && begin != 0) {
+ auto* flat = CordRepFlat::New(data.length() + extra);
+ flat->length = (std::min)(data.length(), flat->Capacity());
+ length += flat->length;
+ leaf->edges_[--begin] = flat;
+ data = Consume<kFront>(flat->Data(), data, flat->length);
+ }
+ leaf->length = length;
+ leaf->set_begin(begin);
+ return leaf;
+}
+
+template <>
+y_absl::string_view CordRepBtree::AddData<kBack>(y_absl::string_view data,
+ size_t extra) {
+ assert(!data.empty());
+ assert(size() < capacity());
+ AlignBegin();
+ const size_t cap = capacity();
+ do {
+ CordRepFlat* flat = CordRepFlat::New(data.length() + extra);
+ const size_t n = (std::min)(data.length(), flat->Capacity());
+ flat->length = n;
+ edges_[fetch_add_end(1)] = flat;
+ data = Consume<kBack>(flat->Data(), data, n);
+ } while (!data.empty() && end() != cap);
+ return data;
+}
+
+template <>
+y_absl::string_view CordRepBtree::AddData<kFront>(y_absl::string_view data,
+ size_t extra) {
+ assert(!data.empty());
+ assert(size() < capacity());
+ AlignEnd();
+ do {
+ CordRepFlat* flat = CordRepFlat::New(data.length() + extra);
+ const size_t n = (std::min)(data.length(), flat->Capacity());
+ flat->length = n;
+ edges_[sub_fetch_begin(1)] = flat;
+ data = Consume<kFront>(flat->Data(), data, n);
+ } while (!data.empty() && begin() != 0);
+ return data;
+}
+
+template <EdgeType edge_type>
+CordRepBtree* CordRepBtree::AddData(CordRepBtree* tree, y_absl::string_view data,
+ size_t extra) {
+ if (ABSL_PREDICT_FALSE(data.empty())) return tree;
+
+ const size_t original_data_size = data.size();
+ int depth = tree->height();
+ StackOperations<edge_type> ops;
+ CordRepBtree* leaf = ops.BuildStack(tree, depth);
+
+ // If there is capacity in the last edge, append as much data
+ // as possible into this last edge.
+ if (leaf->size() < leaf->capacity()) {
+ OpResult result = leaf->ToOpResult(ops.owned(depth));
+ data = result.tree->AddData<edge_type>(data, extra);
+ if (data.empty()) {
+ result.tree->length += original_data_size;
+ return ops.Unwind(tree, depth, original_data_size, result);
+ }
+
+ // We added some data into this leaf, but not all. Propagate the added
+ // length to the top most node, and rebuild the stack with any newly copied
+ // or updated nodes. From this point on, the path (leg) from the top most
+ // node to the right-most node towards the leaf node is privately owned.
+ size_t delta = original_data_size - data.size();
+ assert(delta > 0);
+ result.tree->length += delta;
+ tree = ops.Propagate(tree, depth, delta, result);
+ ops.share_depth = depth + 1;
+ }
+
+ // We were unable to append all data into the existing right-most leaf node.
+ // This means all remaining data must be put into (a) new leaf node(s) which
+ // we append to the tree. To make this efficient, we iteratively build full
+ // leaf nodes from `data` until the created leaf contains all remaining data.
+ // We utilize the `Unwind` method to merge the created leaf into the first
+ // level towards root that has capacity. On each iteration with remaining
+ // data, we rebuild the stack in the knowledge that right-most nodes are
+ // privately owned after the first `Unwind` completes.
+ for (;;) {
+ OpResult result = {CordRepBtree::NewLeaf<edge_type>(data, extra), kPopped};
+ if (result.tree->length == data.size()) {
+ return ops.Unwind(tree, depth, result.tree->length, result);
+ }
+ data = Consume<edge_type>(data, result.tree->length);
+ tree = ops.Unwind(tree, depth, result.tree->length, result);
+ depth = tree->height();
+ ops.BuildOwnedStack(tree, depth);
+ }
+}
+
+template <EdgeType edge_type>
+CordRepBtree* CordRepBtree::Merge(CordRepBtree* dst, CordRepBtree* src) {
+ assert(dst->height() >= src->height());
+
+ // Capture source length as we may consume / destroy `src`.
+ const size_t length = src->length;
+
+ // We attempt to merge `src` at its corresponding height in `dst`.
+ const int depth = dst->height() - src->height();
+ StackOperations<edge_type> ops;
+ CordRepBtree* merge_node = ops.BuildStack(dst, depth);
+
+ // If there is enough space in `merge_node` for all edges from `src`, add all
+ // edges to this node, making a fresh copy as needed if not privately owned.
+ // If `merge_node` does not have capacity for `src`, we rely on `Unwind` and
+ // `Finalize` to merge `src` into the first level towards `root` where there
+ // is capacity for another edge, or create a new top level node.
+ OpResult result;
+ if (merge_node->size() + src->size() <= kMaxCapacity) {
+ result = merge_node->ToOpResult(ops.owned(depth));
+ result.tree->Add<edge_type>(src->Edges());
+ result.tree->length += src->length;
+ if (src->refcount.IsOne()) {
+ Delete(src);
+ } else {
+ for (CordRep* edge : src->Edges()) CordRep::Ref(edge);
+ CordRepBtree::Unref(src);
+ }
+ } else {
+ result = {src, kPopped};
+ }
+
+ // Unless we merged at the top level (i.e.: src and dst are equal height),
+ // unwind the result towards the top level, and finalize the result.
+ if (depth) {
+ return ops.Unwind(dst, depth, length, result);
+ }
+ return ops.Finalize(dst, result);
+}
+
+CopyResult CordRepBtree::CopySuffix(size_t offset) {
+ assert(offset < this->length);
+
+ // As long as `offset` starts inside the last edge, we can 'drop' the current
+ // depth. For the most extreme example: if offset references the last data
+ // edge in the tree, there is only a single edge / path from the top of the
+ // tree to that last edge, so we can drop all the nodes except that edge.
+ // The fast path check for this is `back->length >= length - offset`.
+ int height = this->height();
+ CordRepBtree* node = this;
+ size_t len = node->length - offset;
+ CordRep* back = node->Edge(kBack);
+ while (back->length >= len) {
+ offset = back->length - len;
+ if (--height < 0) {
+ return {MakeSubstring(CordRep::Ref(back), offset), height};
+ }
+ node = back->btree();
+ back = node->Edge(kBack);
+ }
+ if (offset == 0) return {CordRep::Ref(node), height};
+
+ // Offset does not point into the last edge, so we span at least two edges.
+ // Find the index of offset with `IndexBeyond` which provides us the edge
+ // 'beyond' the offset if offset is not a clean starting point of an edge.
+ Position pos = node->IndexBeyond(offset);
+ CordRepBtree* sub = node->CopyToEndFrom(pos.index, len);
+ const CopyResult result = {sub, height};
+
+ // `pos.n` contains a non zero value if the offset is not an exact starting
+ // point of an edge. In this case, `pos.n` contains the 'trailing' amount of
+ // bytes of the edge preceding that in `pos.index`. We need to iteratively
+ // adjust the preceding edge with the 'broken' offset until we have a perfect
+ // start of the edge.
+ while (pos.n != 0) {
+ assert(pos.index >= 1);
+ const size_t begin = pos.index - 1;
+ sub->set_begin(begin);
+ CordRep* const edge = node->Edge(begin);
+
+ len = pos.n;
+ offset = edge->length - len;
+
+ if (--height < 0) {
+ sub->edges_[begin] = MakeSubstring(CordRep::Ref(edge), offset, len);
+ return result;
+ }
+
+ node = edge->btree();
+ pos = node->IndexBeyond(offset);
+
+ CordRepBtree* nsub = node->CopyToEndFrom(pos.index, len);
+ sub->edges_[begin] = nsub;
+ sub = nsub;
+ }
+ sub->set_begin(pos.index);
+ return result;
+}
+
+CopyResult CordRepBtree::CopyPrefix(size_t n, bool allow_folding) {
+ assert(n > 0);
+ assert(n <= this->length);
+
+ // As long as `n` does not exceed the length of the first edge, we can 'drop'
+ // the current depth. For the most extreme example: if we'd copy a 1 byte
+ // prefix from a tree, there is only a single edge / path from the top of the
+ // tree to the single data edge containing this byte, so we can drop all the
+ // nodes except the data node.
+ int height = this->height();
+ CordRepBtree* node = this;
+ CordRep* front = node->Edge(kFront);
+ if (allow_folding) {
+ while (front->length >= n) {
+ if (--height < 0) return {MakeSubstring(CordRep::Ref(front), 0, n), -1};
+ node = front->btree();
+ front = node->Edge(kFront);
+ }
+ }
+ if (node->length == n) return {CordRep::Ref(node), height};
+
+ // `n` spans at least two nodes, find the end point of the span.
+ Position pos = node->IndexOf(n);
+
+ // Create a partial copy of the node up to `pos.index`, with a defined length
+ // of `n`. Any 'partial last edge' is added further below as needed.
+ CordRepBtree* sub = node->CopyBeginTo(pos.index, n);
+ const CopyResult result = {sub, height};
+
+ // `pos.n` contains the 'offset inside the edge for IndexOf(n)'. As long as
+ // this is not zero, we don't have a 'clean cut', so we need to make a
+ // (partial) copy of that last edge, and repeat this until pos.n is zero.
+ while (pos.n != 0) {
+ size_t end = pos.index;
+ n = pos.n;
+
+ CordRep* edge = node->Edge(pos.index);
+ if (--height < 0) {
+ sub->edges_[end++] = MakeSubstring(CordRep::Ref(edge), 0, n);
+ sub->set_end(end);
+ AssertValid(result.edge->btree());
+ return result;
+ }
+
+ node = edge->btree();
+ pos = node->IndexOf(n);
+ CordRepBtree* nsub = node->CopyBeginTo(pos.index, n);
+ sub->edges_[end++] = nsub;
+ sub->set_end(end);
+ sub = nsub;
+ }
+ sub->set_end(pos.index);
+ AssertValid(result.edge->btree());
+ return result;
+}
+
+CordRep* CordRepBtree::ExtractFront(CordRepBtree* tree) {
+ CordRep* front = tree->Edge(tree->begin());
+ if (tree->refcount.IsMutable()) {
+ Unref(tree->Edges(tree->begin() + 1, tree->end()));
+ CordRepBtree::Delete(tree);
+ } else {
+ CordRep::Ref(front);
+ CordRep::Unref(tree);
+ }
+ return front;
+}
+
+CordRepBtree* CordRepBtree::ConsumeBeginTo(CordRepBtree* tree, size_t end,
+ size_t new_length) {
+ assert(end <= tree->end());
+ if (tree->refcount.IsMutable()) {
+ Unref(tree->Edges(end, tree->end()));
+ tree->set_end(end);
+ tree->length = new_length;
+ } else {
+ CordRepBtree* old = tree;
+ tree = tree->CopyBeginTo(end, new_length);
+ CordRep::Unref(old);
+ }
+ return tree;
+}
+
+CordRep* CordRepBtree::RemoveSuffix(CordRepBtree* tree, size_t n) {
+ // Check input and deal with trivial cases 'Remove all/none'
+ assert(tree != nullptr);
+ assert(n <= tree->length);
+ const size_t len = tree->length;
+ if (ABSL_PREDICT_FALSE(n == 0)) {
+ return tree;
+ }
+ if (ABSL_PREDICT_FALSE(n >= len)) {
+ CordRepBtree::Unref(tree);
+ return nullptr;
+ }
+
+ size_t length = len - n;
+ int height = tree->height();
+ bool is_mutable = tree->refcount.IsMutable();
+
+ // Extract all top nodes which are reduced to size = 1
+ Position pos = tree->IndexOfLength(length);
+ while (pos.index == tree->begin()) {
+ CordRep* edge = ExtractFront(tree);
+ is_mutable &= edge->refcount.IsMutable();
+ if (height-- == 0) return ResizeEdge(edge, length, is_mutable);
+ tree = edge->btree();
+ pos = tree->IndexOfLength(length);
+ }
+
+ // Repeat the following sequence traversing down the tree:
+ // - Crop the top node to the 'last remaining edge' adjusting length.
+ // - Set the length for down edges to the partial length in that last edge.
+ // - Repeat this until the last edge is 'included in full'
+ // - If we hit the data edge level, resize and return the last data edge
+ CordRepBtree* top = tree = ConsumeBeginTo(tree, pos.index + 1, length);
+ CordRep* edge = tree->Edge(pos.index);
+ length = pos.n;
+ while (length != edge->length) {
+ // ConsumeBeginTo guarantees `tree` is a clean, privately owned copy.
+ assert(tree->refcount.IsMutable());
+ const bool edge_is_mutable = edge->refcount.IsMutable();
+
+ if (height-- == 0) {
+ tree->edges_[pos.index] = ResizeEdge(edge, length, edge_is_mutable);
+ return AssertValid(top);
+ }
+
+ if (!edge_is_mutable) {
+ // We can't 'in place' remove any suffixes down this edge.
+ // Replace this edge with a prefix copy instead.
+ tree->edges_[pos.index] = edge->btree()->CopyPrefix(length, false).edge;
+ CordRep::Unref(edge);
+ return AssertValid(top);
+ }
+
+ // Move down one level, rinse repeat.
+ tree = edge->btree();
+ pos = tree->IndexOfLength(length);
+ tree = ConsumeBeginTo(edge->btree(), pos.index + 1, length);
+ edge = tree->Edge(pos.index);
+ length = pos.n;
+ }
+
+ return AssertValid(top);
+}
+
+CordRep* CordRepBtree::SubTree(size_t offset, size_t n) {
+ assert(n <= this->length);
+ assert(offset <= this->length - n);
+ if (ABSL_PREDICT_FALSE(n == 0)) return nullptr;
+
+ CordRepBtree* node = this;
+ int height = node->height();
+ Position front = node->IndexOf(offset);
+ CordRep* left = node->edges_[front.index];
+ while (front.n + n <= left->length) {
+ if (--height < 0) return MakeSubstring(CordRep::Ref(left), front.n, n);
+ node = left->btree();
+ front = node->IndexOf(front.n);
+ left = node->edges_[front.index];
+ }
+
+ const Position back = node->IndexBefore(front, n);
+ CordRep* const right = node->edges_[back.index];
+ assert(back.index > front.index);
+
+ // Get partial suffix and prefix entries.
+ CopyResult prefix;
+ CopyResult suffix;
+ if (height > 0) {
+ // Copy prefix and suffix of the boundary nodes.
+ prefix = left->btree()->CopySuffix(front.n);
+ suffix = right->btree()->CopyPrefix(back.n);
+
+ // If there is an edge between the prefix and suffix edges, then the tree
+ // must remain at its previous (full) height. If we have no edges between
+ // prefix and suffix edges, then the tree must be as high as either the
+ // suffix or prefix edges (which are collapsed to their minimum heights).
+ if (front.index + 1 == back.index) {
+ height = (std::max)(prefix.height, suffix.height) + 1;
+ }
+
+ // Raise prefix and suffixes to the new tree height.
+ for (int h = prefix.height + 1; h < height; ++h) {
+ prefix.edge = CordRepBtree::New(prefix.edge);
+ }
+ for (int h = suffix.height + 1; h < height; ++h) {
+ suffix.edge = CordRepBtree::New(suffix.edge);
+ }
+ } else {
+ // Leaf node, simply take substrings for prefix and suffix.
+ prefix = CopyResult{MakeSubstring(CordRep::Ref(left), front.n), -1};
+ suffix = CopyResult{MakeSubstring(CordRep::Ref(right), 0, back.n), -1};
+ }
+
+ // Compose resulting tree.
+ CordRepBtree* sub = CordRepBtree::New(height);
+ size_t end = 0;
+ sub->edges_[end++] = prefix.edge;
+ for (CordRep* r : node->Edges(front.index + 1, back.index)) {
+ sub->edges_[end++] = CordRep::Ref(r);
+ }
+ sub->edges_[end++] = suffix.edge;
+ sub->set_end(end);
+ sub->length = n;
+ return AssertValid(sub);
+}
+
+CordRepBtree* CordRepBtree::MergeTrees(CordRepBtree* left,
+ CordRepBtree* right) {
+ return left->height() >= right->height() ? Merge<kBack>(left, right)
+ : Merge<kFront>(right, left);
+}
+
+bool CordRepBtree::IsFlat(y_absl::string_view* fragment) const {
+ if (height() == 0 && size() == 1) {
+ if (fragment) *fragment = Data(begin());
+ return true;
+ }
+ return false;
+}
+
+bool CordRepBtree::IsFlat(size_t offset, const size_t n,
+ y_absl::string_view* fragment) const {
+ assert(n <= this->length);
+ assert(offset <= this->length - n);
+ if (ABSL_PREDICT_FALSE(n == 0)) return false;
+ int height = this->height();
+ const CordRepBtree* node = this;
+ for (;;) {
+ const Position front = node->IndexOf(offset);
+ const CordRep* edge = node->Edge(front.index);
+ if (edge->length < front.n + n) return false;
+ if (--height < 0) {
+ if (fragment) *fragment = EdgeData(edge).substr(front.n, n);
+ return true;
+ }
+ offset = front.n;
+ node = node->Edge(front.index)->btree();
+ }
+}
+
+char CordRepBtree::GetCharacter(size_t offset) const {
+ assert(offset < length);
+ const CordRepBtree* node = this;
+ int height = node->height();
+ for (;;) {
+ Position front = node->IndexOf(offset);
+ if (--height < 0) return node->Data(front.index)[front.n];
+ offset = front.n;
+ node = node->Edge(front.index)->btree();
+ }
+}
+
+Span<char> CordRepBtree::GetAppendBufferSlow(size_t size) {
+ // The inlined version in `GetAppendBuffer()` deals with all heights <= 3.
+ assert(height() >= 4);
+ assert(refcount.IsMutable());
+
+ // Build a stack of nodes we may potentially need to update if we find a
+ // non-shared FLAT with capacity at the leaf level.
+ const int depth = height();
+ CordRepBtree* node = this;
+ CordRepBtree* stack[kMaxDepth];
+ for (int i = 0; i < depth; ++i) {
+ node = node->Edge(kBack)->btree();
+ if (!node->refcount.IsMutable()) return {};
+ stack[i] = node;
+ }
+
+ // Must be a privately owned, mutable flat.
+ CordRep* const edge = node->Edge(kBack);
+ if (!edge->refcount.IsMutable() || edge->tag < FLAT) return {};
+
+ // Must have capacity.
+ const size_t avail = edge->flat()->Capacity() - edge->length;
+ if (avail == 0) return {};
+
+ // Build span on remaining capacity.
+ size_t delta = (std::min)(size, avail);
+ Span<char> span = {edge->flat()->Data() + edge->length, delta};
+ edge->length += delta;
+ this->length += delta;
+ for (int i = 0; i < depth; ++i) {
+ stack[i]->length += delta;
+ }
+ return span;
+}
+
+CordRepBtree* CordRepBtree::CreateSlow(CordRep* rep) {
+ if (rep->IsBtree()) return rep->btree();
+
+ CordRepBtree* node = nullptr;
+ auto consume = [&node](CordRep* r, size_t offset, size_t length) {
+ r = MakeSubstring(r, offset, length);
+ if (node == nullptr) {
+ node = New(r);
+ } else {
+ node = CordRepBtree::AddCordRep<kBack>(node, r);
+ }
+ };
+ Consume(rep, consume);
+ return node;
+}
+
+CordRepBtree* CordRepBtree::AppendSlow(CordRepBtree* tree, CordRep* rep) {
+ if (ABSL_PREDICT_TRUE(rep->IsBtree())) {
+ return MergeTrees(tree, rep->btree());
+ }
+ auto consume = [&tree](CordRep* r, size_t offset, size_t length) {
+ r = MakeSubstring(r, offset, length);
+ tree = CordRepBtree::AddCordRep<kBack>(tree, r);
+ };
+ Consume(rep, consume);
+ return tree;
+}
+
+CordRepBtree* CordRepBtree::PrependSlow(CordRepBtree* tree, CordRep* rep) {
+ if (ABSL_PREDICT_TRUE(rep->IsBtree())) {
+ return MergeTrees(rep->btree(), tree);
+ }
+ auto consume = [&tree](CordRep* r, size_t offset, size_t length) {
+ r = MakeSubstring(r, offset, length);
+ tree = CordRepBtree::AddCordRep<kFront>(tree, r);
+ };
+ ReverseConsume(rep, consume);
+ return tree;
+}
+
+CordRepBtree* CordRepBtree::Append(CordRepBtree* tree, y_absl::string_view data,
+ size_t extra) {
+ return CordRepBtree::AddData<kBack>(tree, data, extra);
+}
+
+CordRepBtree* CordRepBtree::Prepend(CordRepBtree* tree, y_absl::string_view data,
+ size_t extra) {
+ return CordRepBtree::AddData<kFront>(tree, data, extra);
+}
+
+template CordRepBtree* CordRepBtree::AddCordRep<kFront>(CordRepBtree* tree,
+ CordRep* rep);
+template CordRepBtree* CordRepBtree::AddCordRep<kBack>(CordRepBtree* tree,
+ CordRep* rep);
+template CordRepBtree* CordRepBtree::AddData<kFront>(CordRepBtree* tree,
+ y_absl::string_view data,
+ size_t extra);
+template CordRepBtree* CordRepBtree::AddData<kBack>(CordRepBtree* tree,
+ y_absl::string_view data,
+ size_t extra);
+
+void CordRepBtree::Rebuild(CordRepBtree** stack, CordRepBtree* tree,
+ bool consume) {
+ bool owned = consume && tree->refcount.IsOne();
+ if (tree->height() == 0) {
+ for (CordRep* edge : tree->Edges()) {
+ if (!owned) edge = CordRep::Ref(edge);
+ size_t height = 0;
+ size_t length = edge->length;
+ CordRepBtree* node = stack[0];
+ OpResult result = node->AddEdge<kBack>(true, edge, length);
+ while (result.action == CordRepBtree::kPopped) {
+ stack[height] = result.tree;
+ if (stack[++height] == nullptr) {
+ result.action = CordRepBtree::kSelf;
+ stack[height] = CordRepBtree::New(node, result.tree);
+ } else {
+ node = stack[height];
+ result = node->AddEdge<kBack>(true, result.tree, length);
+ }
+ }
+ while (stack[++height] != nullptr) {
+ stack[height]->length += length;
+ }
+ }
+ } else {
+ for (CordRep* rep : tree->Edges()) {
+ Rebuild(stack, rep->btree(), owned);
+ }
+ }
+ if (consume) {
+ if (owned) {
+ CordRepBtree::Delete(tree);
+ } else {
+ CordRepBtree::Unref(tree);
+ }
+ }
+}
+
+CordRepBtree* CordRepBtree::Rebuild(CordRepBtree* tree) {
+ // Set up initial stack with empty leaf node.
+ CordRepBtree* node = CordRepBtree::New();
+ CordRepBtree* stack[CordRepBtree::kMaxDepth + 1] = {node};
+
+ // Recursively build the tree, consuming the input tree.
+ Rebuild(stack, tree, /* consume reference */ true);
+
+ // Return top most node
+ for (CordRepBtree* parent : stack) {
+ if (parent == nullptr) return node;
+ node = parent;
+ }
+
+ // Unreachable
+ assert(false);
+ return nullptr;
+}
+
+} // namespace cord_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_btree.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_btree.h
new file mode 100644
index 00000000000..3ad8097cc84
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_btree.h
@@ -0,0 +1,939 @@
+// Copyright 2021 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_STRINGS_INTERNAL_CORD_REP_BTREE_H_
+#define ABSL_STRINGS_INTERNAL_CORD_REP_BTREE_H_
+
+#include <cassert>
+#include <cstdint>
+#include <iosfwd>
+
+#include "y_absl/base/config.h"
+#include "y_absl/base/internal/raw_logging.h"
+#include "y_absl/base/optimization.h"
+#include "y_absl/strings/internal/cord_internal.h"
+#include "y_absl/strings/internal/cord_rep_flat.h"
+#include "y_absl/strings/string_view.h"
+#include "y_absl/types/span.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+
+class CordRepBtreeNavigator;
+
+// CordRepBtree is as the name implies a btree implementation of a Cordrep tree.
+// Data is stored at the leaf level only, non leaf nodes contain down pointers
+// only. Allowed types of data edges are FLAT, EXTERNAL and SUBSTRINGs of FLAT
+// or EXTERNAL nodes. The implementation allows for data to be added to either
+// end of the tree only, it does not provide any 'insert' logic. This has the
+// benefit that we can expect good fill ratios: all nodes except the outer
+// 'legs' will have 100% fill ratios for trees built using Append/Prepend
+// methods. Merged trees will typically have a fill ratio well above 50% as in a
+// similar fashion, one side of the merged tree will typically have a 100% fill
+// ratio, and the 'open' end will average 50%. All operations are O(log(n)) or
+// better, and the tree never needs balancing.
+//
+// All methods accepting a CordRep* or CordRepBtree* adopt a reference on that
+// input unless explicitly stated otherwise. All functions returning a CordRep*
+// or CordRepBtree* instance transfer a reference back to the caller.
+// Simplified, callers both 'donate' and 'consume' a reference count on each
+// call, simplifying the API. An example of building a tree:
+//
+// CordRepBtree* tree = CordRepBtree::Create(MakeFlat("Hello"));
+// tree = CordRepBtree::Append(tree, MakeFlat("world"));
+//
+// In the above example, all inputs are consumed, making each call affecting
+// `tree` reference count neutral. The returned `tree` value can be different
+// from the input if the input is shared with other threads, or if the tree
+// grows in height, but callers typically never have to concern themselves with
+// that and trust that all methods DTRT at all times.
+class CordRepBtree : public CordRep {
+ public:
+ // EdgeType identifies `front` and `back` enum values.
+ // Various implementations in CordRepBtree such as `Add` and `Edge` are
+ // generic and templated on operating on either of the boundary edges.
+ // For more information on the possible edges contained in a CordRepBtree
+ // instance see the documentation for `edges_`.
+ enum class EdgeType { kFront, kBack };
+
+ // Convenience constants into `EdgeType`
+ static constexpr EdgeType kFront = EdgeType::kFront;
+ static constexpr EdgeType kBack = EdgeType::kBack;
+
+ // Maximum number of edges: based on experiments and performance data, we can
+ // pick suitable values resulting in optimum cacheline aligned values. The
+ // preferred values are based on 64-bit systems where we aim to align this
+ // class onto 64 bytes, i.e.: 6 = 64 bytes, 14 = 128 bytes, etc.
+ // TODO(b/192061034): experiment with alternative sizes.
+ static constexpr size_t kMaxCapacity = 6;
+
+ // Reasonable maximum height of the btree. We can expect a fill ratio of at
+ // least 50%: trees are always expanded at the front or back. Concatenating
+ // trees will then typically fold at the top most node, where the lower nodes
+ // are at least at capacity on one side of joined inputs. At a lower fill
+ // rate of 4 edges per node, we have capacity for ~16 million leaf nodes.
+ // We will fail / abort if an application ever exceeds this height, which
+ // should be extremely rare (near impossible) and be an indication of an
+ // application error: we do not assume it reasonable for any application to
+ // operate correctly with such monster trees.
+ // Another compelling reason for the number `12` is that any contextual stack
+ // required for navigation or insertion requires 12 words and 12 bytes, which
+ // fits inside 2 cache lines with some room to spare, and is reasonable as a
+ // local stack variable compared to Cord's current near 400 bytes stack use.
+ // The maximum `height` value of a node is then `kMaxDepth - 1` as node height
+ // values start with a value of 0 for leaf nodes.
+ static constexpr int kMaxDepth = 12;
+ static constexpr int kMaxHeight = kMaxDepth - 1;
+
+ // `Action` defines the action for unwinding changes done at the btree's leaf
+ // level that need to be propagated up to the parent node(s). Each operation
+ // on a node has an effect / action defined as follows:
+ // - kSelf
+ // The operation (add / update, etc) was performed directly on the node as
+ // the node is private to the current thread (i.e.: not shared directly or
+ // indirectly through a refcount > 1). Changes can be propagated directly to
+ // all parent nodes as all parent nodes are also then private to the current
+ // thread.
+ // - kCopied
+ // The operation (add / update, etc) was performed on a copy of the original
+ // node, as the node is (potentially) directly or indirectly shared with
+ // other threads. Changes need to be propagated into the parent nodes where
+ // the old down pointer must be unreffed and replaced with this new copy.
+ // Such changes to parent nodes may themselves require a copy if the parent
+ // node is also shared. A kCopied action can propagate all the way to the
+ // top node where we then must unref the `tree` input provided by the
+ // caller, and return the new copy.
+ // - kPopped
+ // The operation (typically add) could not be satisfied due to insufficient
+ // capacity in the targeted node, and a new 'leg' was created that needs to
+ // be added into the parent node. For example, adding a FLAT inside a leaf
+ // node that is at capacity will create a new leaf node containing that
+ // FLAT, that needs to be 'popped' up the btree. Such 'pop' actions can
+ // cascade up the tree if parent nodes are also at capacity. A 'Popped'
+ // action propagating all the way to the top of the tree will result in
+ // the tree becoming one level higher than the current tree through a final
+ // `CordRepBtree::New(tree, popped)` call, resulting in a new top node
+ // referencing the old tree and the new (fully popped upwards) 'leg'.
+ enum Action { kSelf, kCopied, kPopped };
+
+ // Result of an operation on a node. See the `Action` enum for details.
+ struct OpResult {
+ CordRepBtree* tree;
+ Action action;
+ };
+
+ // Return value of the CopyPrefix and CopySuffix methods which can
+ // return a node or data edge at any height inside the tree.
+ // A height of 0 defines the lowest (leaf) node, a height of -1 identifies
+ // `edge` as being a plain data node: EXTERNAL / FLAT or SUBSTRING thereof.
+ struct CopyResult {
+ CordRep* edge;
+ int height;
+ };
+
+ // Logical position inside a node:
+ // - index: index of the edge.
+ // - n: size or offset value depending on context.
+ struct Position {
+ size_t index;
+ size_t n;
+ };
+
+ // Creates a btree from the given input. Adopts a ref of `rep`.
+ // If the input `rep` is itself a btree, i.e., `IsBtree()`, then this
+ // function immediately returns `rep->btree()`. If the input is a valid data
+ // edge (see IsDataEdge()), then a new leaf node is returned containing `rep`
+ // as the sole data edge. Else, the input is assumed to be a (legacy) concat
+ // tree, and the input is consumed and transformed into a btree().
+ static CordRepBtree* Create(CordRep* rep);
+
+ // Destroys the provided tree. Should only be called by cord internal API's,
+ // typically after a ref_count.Decrement() on the last reference count.
+ static void Destroy(CordRepBtree* tree);
+
+ // Use CordRep::Unref() as we overload for y_absl::Span<CordRep* const>.
+ using CordRep::Unref;
+
+ // Unrefs all edges in `edges` which are assumed to be 'likely one'.
+ static void Unref(y_absl::Span<CordRep* const> edges);
+
+ // Appends / Prepends an existing CordRep instance to this tree.
+ // The below methods accept three types of input:
+ // 1) `rep` is a data node (See `IsDataNode` for valid data edges).
+ // `rep` is appended or prepended to this tree 'as is'.
+ // 2) `rep` is a BTREE.
+ // `rep` is merged into `tree` respecting the Append/Prepend order.
+ // 3) `rep` is some other (legacy) type.
+ // `rep` is converted in place and added to `tree`
+ // Requires `tree` and `rep` to be not null.
+ static CordRepBtree* Append(CordRepBtree* tree, CordRep* rep);
+ static CordRepBtree* Prepend(CordRepBtree* tree, CordRep* rep);
+
+ // Append/Prepend the data in `data` to this tree.
+ // The `extra` parameter defines how much extra capacity should be allocated
+ // for any additional FLAT being allocated. This is an optimization hint from
+ // the caller. For example, a caller may need to add 2 string_views of data
+ // "abc" and "defghi" which are not consecutive. The caller can in this case
+ // invoke `AddData(tree, "abc", 6)`, and any newly added flat is allocated
+ // where possible with at least 6 bytes of extra capacity beyond `length`.
+ // This helps avoiding data getting fragmented over multiple flats.
+ // There is no limit on the size of `data`. If `data` can not be stored inside
+ // a single flat, then the function will iteratively add flats until all data
+ // has been consumed and appended or prepended to the tree.
+ static CordRepBtree* Append(CordRepBtree* tree, string_view data,
+ size_t extra = 0);
+ static CordRepBtree* Prepend(CordRepBtree* tree, string_view data,
+ size_t extra = 0);
+
+ // Returns a new tree, containing `n` bytes of data from this instance
+ // starting at offset `offset`. Where possible, the returned tree shares
+ // (re-uses) data edges and nodes with this instance to minimize the
+ // combined memory footprint of both trees.
+ // Requires `offset + n <= length`. Returns `nullptr` if `n` is zero.
+ CordRep* SubTree(size_t offset, size_t n);
+
+ // Removes `n` trailing bytes from `tree`, and returns the resulting tree
+ // or data edge. Returns `tree` if n is zero, and nullptr if n == length.
+ // This function is logically identical to:
+ // result = tree->SubTree(0, tree->length - n);
+ // Unref(tree);
+ // return result;
+ // However, the actual implementation will as much as possible perform 'in
+ // place' modifications on the tree on all nodes and edges that are mutable.
+ // For example, in a fully privately owned tree with the last edge being a
+ // flat of length 12, RemoveSuffix(1) will simply set the length of that data
+ // edge to 11, and reduce the length of all nodes on the edge path by 1.
+ static CordRep* RemoveSuffix(CordRepBtree* tree, size_t n);
+
+ // Returns the character at the given offset.
+ char GetCharacter(size_t offset) const;
+
+ // Returns true if this node holds a single data edge, and if so, sets
+ // `fragment` to reference the contained data. `fragment` is an optional
+ // output parameter and allowed to be null.
+ bool IsFlat(y_absl::string_view* fragment) const;
+
+ // Returns true if the data of `n` bytes starting at offset `offset`
+ // is contained in a single data edge, and if so, sets fragment to reference
+ // the contained data. `fragment` is an optional output parameter and allowed
+ // to be null.
+ bool IsFlat(size_t offset, size_t n, y_absl::string_view* fragment) const;
+
+ // Returns a span (mutable range of bytes) of up to `size` bytes into the
+ // last FLAT data edge inside this tree under the following conditions:
+ // - none of the nodes down into the FLAT node are shared.
+ // - the last data edge in this tree is a non-shared FLAT.
+ // - the referenced FLAT has additional capacity available.
+ // If all these conditions are met, a non-empty span is returned, and the
+ // length of the flat node and involved tree nodes have been increased by
+ // `span.length()`. The caller is responsible for immediately assigning values
+ // to all uninitialized data reference by the returned span.
+ // Requires `this->refcount.IsMutable()`: this function forces the
+ // caller to do this fast path check on the top level node, as this is the
+ // most commonly shared node of a cord tree.
+ Span<char> GetAppendBuffer(size_t size);
+
+ // Returns the `height` of the tree. The height of a tree is limited to
+ // kMaxHeight. `height` is implemented as an `int` as in some places we
+ // use negative (-1) values for 'data edges'.
+ int height() const { return static_cast<int>(storage[0]); }
+
+ // Properties: begin, back, end, front/back boundary indexes.
+ size_t begin() const { return static_cast<size_t>(storage[1]); }
+ size_t back() const { return static_cast<size_t>(storage[2]) - 1; }
+ size_t end() const { return static_cast<size_t>(storage[2]); }
+ size_t index(EdgeType edge) const {
+ return edge == kFront ? begin() : back();
+ }
+
+ // Properties: size and capacity.
+ // `capacity` contains the current capacity of this instance, where
+ // `kMaxCapacity` contains the maximum capacity of a btree node.
+ // For now, `capacity` and `kMaxCapacity` return the same value, but this may
+ // change in the future if we see benefit in dynamically sizing 'small' nodes
+ // to 'large' nodes for large data trees.
+ size_t size() const { return end() - begin(); }
+ size_t capacity() const { return kMaxCapacity; }
+
+ // Edge access
+ inline CordRep* Edge(size_t index) const;
+ inline CordRep* Edge(EdgeType edge_type) const;
+ inline y_absl::Span<CordRep* const> Edges() const;
+ inline y_absl::Span<CordRep* const> Edges(size_t begin, size_t end) const;
+
+ // Returns reference to the data edge at `index`.
+ // Requires this instance to be a leaf node, and `index` to be valid index.
+ inline y_absl::string_view Data(size_t index) const;
+
+ static const char* EdgeDataPtr(const CordRep* r);
+ static y_absl::string_view EdgeData(const CordRep* r);
+
+ // Returns true if the provided rep is a FLAT, EXTERNAL or a SUBSTRING node
+ // holding a FLAT or EXTERNAL child rep.
+ static bool IsDataEdge(const CordRep* rep);
+
+ // Diagnostics: returns true if `tree` is valid and internally consistent.
+ // If `shallow` is false, then the provided top level node and all child nodes
+ // below it are recursively checked. If `shallow` is true, only the provided
+ // node in `tree` and the cumulative length, type and height of the direct
+ // child nodes of `tree` are checked. The value of `shallow` is ignored if the
+ // internal `cord_btree_exhaustive_validation` diagnostics variable is true,
+ // in which case the performed validations works as if `shallow` were false.
+ // This function is intended for debugging and testing purposes only.
+ static bool IsValid(const CordRepBtree* tree, bool shallow = false);
+
+ // Diagnostics: asserts that the provided tree is valid.
+ // `AssertValid()` performs a shallow validation by default. `shallow` can be
+ // set to false in which case an exhaustive validation is performed. This
+ // function is implemented in terms of calling `IsValid()` and asserting the
+ // return value to be true. See `IsValid()` for more information.
+ // This function is intended for debugging and testing purposes only.
+ static CordRepBtree* AssertValid(CordRepBtree* tree, bool shallow = true);
+ static const CordRepBtree* AssertValid(const CordRepBtree* tree,
+ bool shallow = true);
+
+ // Diagnostics: dump the contents of this tree to `stream`.
+ // This function is intended for debugging and testing purposes only.
+ static void Dump(const CordRep* rep, std::ostream& stream);
+ static void Dump(const CordRep* rep, y_absl::string_view label,
+ std::ostream& stream);
+ static void Dump(const CordRep* rep, y_absl::string_view label,
+ bool include_contents, std::ostream& stream);
+
+ // Adds the edge `edge` to this node if possible. `owned` indicates if the
+ // current node is potentially shared or not with other threads. Returns:
+ // - {kSelf, <this>}
+ // The edge was directly added to this node.
+ // - {kCopied, <node>}
+ // The edge was added to a copy of this node.
+ // - {kPopped, New(edge, height())}
+ // A new leg with the edge was created as this node has no extra capacity.
+ template <EdgeType edge_type>
+ inline OpResult AddEdge(bool owned, CordRep* edge, size_t delta);
+
+ // Replaces the front or back edge with the provided new edge. Returns:
+ // - {kSelf, <this>}
+ // The edge was directly set in this node. The old edge is unreffed.
+ // - {kCopied, <node>}
+ // A copy of this node was created with the new edge value.
+ // In both cases, the function adopts a reference on `edge`.
+ template <EdgeType edge_type>
+ OpResult SetEdge(bool owned, CordRep* edge, size_t delta);
+
+ // Creates a new empty node at the specified height.
+ static CordRepBtree* New(int height = 0);
+
+ // Creates a new node containing `rep`, with the height being computed
+ // automatically based on the type of `rep`.
+ static CordRepBtree* New(CordRep* rep);
+
+ // Creates a new node containing both `front` and `back` at height
+ // `front.height() + 1`. Requires `back.height() == front.height()`.
+ static CordRepBtree* New(CordRepBtree* front, CordRepBtree* back);
+
+ // Creates a fully balanced tree from the provided tree by rebuilding a new
+ // tree from all data edges in the input. This function is automatically
+ // invoked internally when the tree exceeds the maximum height.
+ static CordRepBtree* Rebuild(CordRepBtree* tree);
+
+ private:
+ CordRepBtree() = default;
+ ~CordRepBtree() = default;
+
+ // Initializes the main properties `tag`, `begin`, `end`, `height`.
+ inline void InitInstance(int height, size_t begin = 0, size_t end = 0);
+
+ // Direct property access begin / end
+ void set_begin(size_t begin) { storage[1] = static_cast<uint8_t>(begin); }
+ void set_end(size_t end) { storage[2] = static_cast<uint8_t>(end); }
+
+ // Decreases the value of `begin` by `n`, and returns the new value. Notice
+ // how this returns the new value unlike atomic::fetch_add which returns the
+ // old value. This is because this is used to prepend edges at 'begin - 1'.
+ size_t sub_fetch_begin(size_t n) {
+ storage[1] -= static_cast<uint8_t>(n);
+ return storage[1];
+ }
+
+ // Increases the value of `end` by `n`, and returns the previous value. This
+ // function is typically used to append edges at 'end'.
+ size_t fetch_add_end(size_t n) {
+ const uint8_t current = storage[2];
+ storage[2] = static_cast<uint8_t>(current + n);
+ return current;
+ }
+
+ // Returns the index of the last edge starting on, or before `offset`, with
+ // `n` containing the relative offset of `offset` inside that edge.
+ // Requires `offset` < length.
+ Position IndexOf(size_t offset) const;
+
+ // Returns the index of the last edge starting before `offset`, with `n`
+ // containing the relative offset of `offset` inside that edge.
+ // This function is useful to find the edges for some span of bytes ending at
+ // `offset` (i.e., `n` bytes). For example:
+ //
+ // Position pos = IndexBefore(n)
+ // edges = Edges(begin(), pos.index) // All full edges (may be empty)
+ // last = Sub(Edge(pos.index), 0, pos.n) // Last partial edge (may be empty)
+ //
+ // Requires 0 < `offset` <= length.
+ Position IndexBefore(size_t offset) const;
+
+ // Returns the index of the edge ending at (or on) length `length`, and the
+ // number of bytes inside that edge up to `length`. For example, if we have a
+ // Node with 2 edges, one of 10 and one of 20 long, then IndexOfLength(27)
+ // will return {1, 17}, and IndexOfLength(10) will return {0, 10}.
+ Position IndexOfLength(size_t n) const;
+
+ // Identical to the above function except starting from the position `front`.
+ // This function is equivalent to `IndexBefore(front.n + offset)`, with
+ // the difference that this function is optimized to start at `front.index`.
+ Position IndexBefore(Position front, size_t offset) const;
+
+ // Returns the index of the edge directly beyond the edge containing offset
+ // `offset`, with `n` containing the distance of that edge from `offset`.
+ // This function is useful for iteratively finding suffix nodes and remaining
+ // partial bytes in left-most suffix nodes as for example in CopySuffix.
+ // Requires `offset` < length.
+ Position IndexBeyond(size_t offset) const;
+
+ // Destruction
+ static void DestroyLeaf(CordRepBtree* tree, size_t begin, size_t end);
+ static void DestroyNonLeaf(CordRepBtree* tree, size_t begin, size_t end);
+ static void DestroyTree(CordRepBtree* tree, size_t begin, size_t end);
+ static void Delete(CordRepBtree* tree) { delete tree; }
+
+ // Creates a new leaf node containing as much data as possible from `data`.
+ // The data is added either forwards or reversed depending on `edge_type`.
+ // Callers must check the length of the returned node to determine if all data
+ // was copied or not.
+ // See the `Append/Prepend` function for the meaning and purpose of `extra`.
+ template <EdgeType edge_type>
+ static CordRepBtree* NewLeaf(y_absl::string_view data, size_t extra);
+
+ // Creates a raw copy of this Btree node, copying all properties, but
+ // without adding any references to existing edges.
+ CordRepBtree* CopyRaw() const;
+
+ // Creates a full copy of this Btree node, adding a reference on all edges.
+ CordRepBtree* Copy() const;
+
+ // Creates a partial copy of this Btree node, copying all edges up to `end`,
+ // adding a reference on each copied edge, and sets the length of the newly
+ // created copy to `new_length`.
+ CordRepBtree* CopyBeginTo(size_t end, size_t new_length) const;
+
+ // Returns a tree containing the edges [tree->begin(), end) and length
+ // of `new_length`. This method consumes a reference on the provided
+ // tree, and logically performs the following operation:
+ // result = tree->CopyBeginTo(end, new_length);
+ // CordRep::Unref(tree);
+ // return result;
+ static CordRepBtree* ConsumeBeginTo(CordRepBtree* tree, size_t end,
+ size_t new_length);
+
+ // Creates a partial copy of this Btree node, copying all edges starting at
+ // `begin`, adding a reference on each copied edge, and sets the length of
+ // the newly created copy to `new_length`.
+ CordRepBtree* CopyToEndFrom(size_t begin, size_t new_length) const;
+
+ // Extracts and returns the front edge from the provided tree.
+ // This method consumes a reference on the provided tree, and logically
+ // performs the following operation:
+ // edge = CordRep::Ref(tree->Edge(kFront));
+ // CordRep::Unref(tree);
+ // return edge;
+ static CordRep* ExtractFront(CordRepBtree* tree);
+
+ // Returns a tree containing the result of appending `right` to `left`.
+ static CordRepBtree* MergeTrees(CordRepBtree* left, CordRepBtree* right);
+
+ // Fallback functions for `Create()`, `Append()` and `Prepend()` which
+ // deal with legacy / non conforming input, i.e.: CONCAT trees.
+ static CordRepBtree* CreateSlow(CordRep* rep);
+ static CordRepBtree* AppendSlow(CordRepBtree*, CordRep* rep);
+ static CordRepBtree* PrependSlow(CordRepBtree*, CordRep* rep);
+
+ // Recursively rebuilds `tree` into `stack`. If 'consume` is set to true, the
+ // function will consume a reference on `tree`. `stack` is a null terminated
+ // array containing the new tree's state, with the current leaf node at
+ // stack[0], and parent nodes above that, or null for 'top of tree'.
+ static void Rebuild(CordRepBtree** stack, CordRepBtree* tree, bool consume);
+
+ // Aligns existing edges to start at index 0, to allow for a new edge to be
+ // added to the back of the current edges.
+ inline void AlignBegin();
+
+ // Aligns existing edges to end at `capacity`, to allow for a new edge to be
+ // added in front of the current edges.
+ inline void AlignEnd();
+
+ // Adds the provided edge to this node.
+ // Requires this node to have capacity for the edge. Realigns / moves
+ // existing edges as needed to prepend or append the new edge.
+ template <EdgeType edge_type>
+ inline void Add(CordRep* rep);
+
+ // Adds the provided edges to this node.
+ // Requires this node to have capacity for the edges. Realigns / moves
+ // existing edges as needed to prepend or append the new edges.
+ template <EdgeType edge_type>
+ inline void Add(y_absl::Span<CordRep* const>);
+
+ // Adds data from `data` to this node until either all data has been consumed,
+ // or there is no more capacity for additional flat nodes inside this node.
+ // Requires the current node to be a leaf node, data to be non empty, and the
+ // current node to have capacity for at least one more data edge.
+ // Returns any remaining data from `data` that was not added, which is
+ // depending on the edge type (front / back) either the remaining prefix of
+ // suffix of the input.
+ // See the `Append/Prepend` function for the meaning and purpose of `extra`.
+ template <EdgeType edge_type>
+ y_absl::string_view AddData(y_absl::string_view data, size_t extra);
+
+ // Replace the front or back edge with the provided value.
+ // Adopts a reference on `edge` and unrefs the old edge.
+ template <EdgeType edge_type>
+ inline void SetEdge(CordRep* edge);
+
+ // Returns a partial copy of the current tree containing the first `n` bytes
+ // of data. `CopyResult` contains both the resulting edge and its height. The
+ // resulting tree may be less high than the current tree, or even be a single
+ // matching data edge if `allow_folding` is set to true.
+ // For example, if `n == 1`, then the result will be the single data edge, and
+ // height will be set to -1 (one below the owning leaf node). If n == 0, this
+ // function returns null. Requires `n <= length`
+ CopyResult CopyPrefix(size_t n, bool allow_folding = true);
+
+ // Returns a partial copy of the current tree containing all data starting
+ // after `offset`. `CopyResult` contains both the resulting edge and its
+ // height. The resulting tree may be less high than the current tree, or even
+ // be a single matching data edge. For example, if `n == length - 1`, then the
+ // result will be a single data edge, and height will be set to -1 (one below
+ // the owning leaf node).
+ // Requires `offset < length`
+ CopyResult CopySuffix(size_t offset);
+
+ // Returns a OpResult value of {this, kSelf} or {Copy(), kCopied}
+ // depending on the value of `owned`.
+ inline OpResult ToOpResult(bool owned);
+
+ // Adds `rep` to the specified tree, returning the modified tree.
+ template <EdgeType edge_type>
+ static CordRepBtree* AddCordRep(CordRepBtree* tree, CordRep* rep);
+
+ // Adds `data` to the specified tree, returning the modified tree.
+ // See the `Append/Prepend` function for the meaning and purpose of `extra`.
+ template <EdgeType edge_type>
+ static CordRepBtree* AddData(CordRepBtree* tree, y_absl::string_view data,
+ size_t extra = 0);
+
+ // Merges `src` into `dst` with `src` being added either before (kFront) or
+ // after (kBack) `dst`. Requires the height of `dst` to be greater than or
+ // equal to the height of `src`.
+ template <EdgeType edge_type>
+ static CordRepBtree* Merge(CordRepBtree* dst, CordRepBtree* src);
+
+ // Fallback version of GetAppendBuffer for large trees: GetAppendBuffer()
+ // implements an inlined version for trees of limited height (3 levels),
+ // GetAppendBufferSlow implements the logic for large trees.
+ Span<char> GetAppendBufferSlow(size_t size);
+
+ // `edges_` contains all edges starting from this instance.
+ // These are explicitly `child` edges only, a cord btree (or any cord tree in
+ // that respect) does not store `parent` pointers anywhere: multiple trees /
+ // parents can reference the same shared child edge. The type of these edges
+ // depends on the height of the node. `Leaf nodes` (height == 0) contain `data
+ // edges` (external or flat nodes, or sub-strings thereof). All other nodes
+ // (height > 0) contain pointers to BTREE nodes with a height of `height - 1`.
+ CordRep* edges_[kMaxCapacity];
+
+ friend class CordRepBtreeTestPeer;
+ friend class CordRepBtreeNavigator;
+};
+
+inline CordRepBtree* CordRep::btree() {
+ assert(IsBtree());
+ return static_cast<CordRepBtree*>(this);
+}
+
+inline const CordRepBtree* CordRep::btree() const {
+ assert(IsBtree());
+ return static_cast<const CordRepBtree*>(this);
+}
+
+inline void CordRepBtree::InitInstance(int height, size_t begin, size_t end) {
+ tag = BTREE;
+ storage[0] = static_cast<uint8_t>(height);
+ storage[1] = static_cast<uint8_t>(begin);
+ storage[2] = static_cast<uint8_t>(end);
+}
+
+inline CordRep* CordRepBtree::Edge(size_t index) const {
+ assert(index >= begin());
+ assert(index < end());
+ return edges_[index];
+}
+
+inline CordRep* CordRepBtree::Edge(EdgeType edge_type) const {
+ return edges_[edge_type == kFront ? begin() : back()];
+}
+
+inline y_absl::Span<CordRep* const> CordRepBtree::Edges() const {
+ return {edges_ + begin(), size()};
+}
+
+inline y_absl::Span<CordRep* const> CordRepBtree::Edges(size_t begin,
+ size_t end) const {
+ assert(begin <= end);
+ assert(begin >= this->begin());
+ assert(end <= this->end());
+ return {edges_ + begin, static_cast<size_t>(end - begin)};
+}
+
+inline const char* CordRepBtree::EdgeDataPtr(const CordRep* r) {
+ assert(IsDataEdge(r));
+ size_t offset = 0;
+ if (r->tag == SUBSTRING) {
+ offset = r->substring()->start;
+ r = r->substring()->child;
+ }
+ return (r->tag >= FLAT ? r->flat()->Data() : r->external()->base) + offset;
+}
+
+inline y_absl::string_view CordRepBtree::EdgeData(const CordRep* r) {
+ return y_absl::string_view(EdgeDataPtr(r), r->length);
+}
+
+inline y_absl::string_view CordRepBtree::Data(size_t index) const {
+ assert(height() == 0);
+ return EdgeData(Edge(index));
+}
+
+inline bool CordRepBtree::IsDataEdge(const CordRep* rep) {
+ // The fast path is that `rep` is an EXTERNAL or FLAT node, making the below
+ // if a single, well predicted branch. We then repeat the FLAT or EXTERNAL
+ // check in the slow path the SUBSTRING check to optimize for the hot path.
+ if (rep->tag == EXTERNAL || rep->tag >= FLAT) return true;
+ if (rep->tag == SUBSTRING) rep = rep->substring()->child;
+ return rep->tag == EXTERNAL || rep->tag >= FLAT;
+}
+
+inline CordRepBtree* CordRepBtree::New(int height) {
+ CordRepBtree* tree = new CordRepBtree;
+ tree->length = 0;
+ tree->InitInstance(height);
+ return tree;
+}
+
+inline CordRepBtree* CordRepBtree::New(CordRep* rep) {
+ CordRepBtree* tree = new CordRepBtree;
+ int height = rep->IsBtree() ? rep->btree()->height() + 1 : 0;
+ tree->length = rep->length;
+ tree->InitInstance(height, /*begin=*/0, /*end=*/1);
+ tree->edges_[0] = rep;
+ return tree;
+}
+
+inline CordRepBtree* CordRepBtree::New(CordRepBtree* front,
+ CordRepBtree* back) {
+ assert(front->height() == back->height());
+ CordRepBtree* tree = new CordRepBtree;
+ tree->length = front->length + back->length;
+ tree->InitInstance(front->height() + 1, /*begin=*/0, /*end=*/2);
+ tree->edges_[0] = front;
+ tree->edges_[1] = back;
+ return tree;
+}
+
+inline void CordRepBtree::DestroyTree(CordRepBtree* tree, size_t begin,
+ size_t end) {
+ if (tree->height() == 0) {
+ DestroyLeaf(tree, begin, end);
+ } else {
+ DestroyNonLeaf(tree, begin, end);
+ }
+}
+
+inline void CordRepBtree::Destroy(CordRepBtree* tree) {
+ DestroyTree(tree, tree->begin(), tree->end());
+}
+
+inline void CordRepBtree::Unref(y_absl::Span<CordRep* const> edges) {
+ for (CordRep* edge : edges) {
+ if (ABSL_PREDICT_FALSE(!edge->refcount.Decrement())) {
+ CordRep::Destroy(edge);
+ }
+ }
+}
+
+inline CordRepBtree* CordRepBtree::CopyRaw() const {
+ auto* tree = static_cast<CordRepBtree*>(::operator new(sizeof(CordRepBtree)));
+ memcpy(static_cast<void*>(tree), this, sizeof(CordRepBtree));
+ new (&tree->refcount) RefcountAndFlags;
+ return tree;
+}
+
+inline CordRepBtree* CordRepBtree::Copy() const {
+ CordRepBtree* tree = CopyRaw();
+ for (CordRep* rep : Edges()) CordRep::Ref(rep);
+ return tree;
+}
+
+inline CordRepBtree* CordRepBtree::CopyToEndFrom(size_t begin,
+ size_t new_length) const {
+ assert(begin >= this->begin());
+ assert(begin <= this->end());
+ CordRepBtree* tree = CopyRaw();
+ tree->length = new_length;
+ tree->set_begin(begin);
+ for (CordRep* edge : tree->Edges()) CordRep::Ref(edge);
+ return tree;
+}
+
+inline CordRepBtree* CordRepBtree::CopyBeginTo(size_t end,
+ size_t new_length) const {
+ assert(end <= capacity());
+ assert(end >= this->begin());
+ CordRepBtree* tree = CopyRaw();
+ tree->length = new_length;
+ tree->set_end(end);
+ for (CordRep* edge : tree->Edges()) CordRep::Ref(edge);
+ return tree;
+}
+
+inline void CordRepBtree::AlignBegin() {
+ // The below code itself does not need to be fast as typically we have
+ // mono-directional append/prepend calls, and `begin` / `end` are typically
+ // adjusted no more than once. But we want to avoid potential register clobber
+ // effects, making the compiler emit register save/store/spills, and minimize
+ // the size of code.
+ const size_t delta = begin();
+ if (ABSL_PREDICT_FALSE(delta != 0)) {
+ const size_t new_end = end() - delta;
+ set_begin(0);
+ set_end(new_end);
+ // TODO(mvels): we can write this using 2 loads / 2 stores depending on
+ // total size for the kMaxCapacity = 6 case. I.e., we can branch (switch) on
+ // size, and then do overlapping load/store of up to 4 pointers (inlined as
+ // XMM, YMM or ZMM load/store) and up to 2 pointers (XMM / YMM), which is a)
+ // compact and b) not clobbering any registers.
+ ABSL_INTERNAL_ASSUME(new_end <= kMaxCapacity);
+#ifdef __clang__
+#pragma unroll 1
+#endif
+ for (size_t i = 0; i < new_end; ++i) {
+ edges_[i] = edges_[i + delta];
+ }
+ }
+}
+
+inline void CordRepBtree::AlignEnd() {
+ // See comments in `AlignBegin` for motivation on the hand-rolled for loops.
+ const size_t delta = capacity() - end();
+ if (delta != 0) {
+ const size_t new_begin = begin() + delta;
+ const size_t new_end = end() + delta;
+ set_begin(new_begin);
+ set_end(new_end);
+ ABSL_INTERNAL_ASSUME(new_end <= kMaxCapacity);
+#ifdef __clang__
+#pragma unroll 1
+#endif
+ for (size_t i = new_end - 1; i >= new_begin; --i) {
+ edges_[i] = edges_[i - delta];
+ }
+ }
+}
+
+template <>
+inline void CordRepBtree::Add<CordRepBtree::kBack>(CordRep* rep) {
+ AlignBegin();
+ edges_[fetch_add_end(1)] = rep;
+}
+
+template <>
+inline void CordRepBtree::Add<CordRepBtree::kBack>(
+ y_absl::Span<CordRep* const> edges) {
+ AlignBegin();
+ size_t new_end = end();
+ for (CordRep* edge : edges) edges_[new_end++] = edge;
+ set_end(new_end);
+}
+
+template <>
+inline void CordRepBtree::Add<CordRepBtree::kFront>(CordRep* rep) {
+ AlignEnd();
+ edges_[sub_fetch_begin(1)] = rep;
+}
+
+template <>
+inline void CordRepBtree::Add<CordRepBtree::kFront>(
+ y_absl::Span<CordRep* const> edges) {
+ AlignEnd();
+ size_t new_begin = begin() - edges.size();
+ set_begin(new_begin);
+ for (CordRep* edge : edges) edges_[new_begin++] = edge;
+}
+
+template <CordRepBtree::EdgeType edge_type>
+inline void CordRepBtree::SetEdge(CordRep* edge) {
+ const int idx = edge_type == kFront ? begin() : back();
+ CordRep::Unref(edges_[idx]);
+ edges_[idx] = edge;
+}
+
+inline CordRepBtree::OpResult CordRepBtree::ToOpResult(bool owned) {
+ return owned ? OpResult{this, kSelf} : OpResult{Copy(), kCopied};
+}
+
+inline CordRepBtree::Position CordRepBtree::IndexOf(size_t offset) const {
+ assert(offset < length);
+ size_t index = begin();
+ while (offset >= edges_[index]->length) offset -= edges_[index++]->length;
+ return {index, offset};
+}
+
+inline CordRepBtree::Position CordRepBtree::IndexBefore(size_t offset) const {
+ assert(offset > 0);
+ assert(offset <= length);
+ size_t index = begin();
+ while (offset > edges_[index]->length) offset -= edges_[index++]->length;
+ return {index, offset};
+}
+
+inline CordRepBtree::Position CordRepBtree::IndexBefore(Position front,
+ size_t offset) const {
+ size_t index = front.index;
+ offset = offset + front.n;
+ while (offset > edges_[index]->length) offset -= edges_[index++]->length;
+ return {index, offset};
+}
+
+inline CordRepBtree::Position CordRepBtree::IndexOfLength(size_t n) const {
+ assert(n <= length);
+ size_t index = back();
+ size_t strip = length - n;
+ while (strip >= edges_[index]->length) strip -= edges_[index--]->length;
+ return {index, edges_[index]->length - strip};
+}
+
+inline CordRepBtree::Position CordRepBtree::IndexBeyond(
+ const size_t offset) const {
+ // We need to find the edge which `starting offset` is beyond (>=)`offset`.
+ // For this we can't use the `offset -= length` logic of IndexOf. Instead, we
+ // track the offset of the `current edge` in `off`, which we increase as we
+ // iterate over the edges until we find the matching edge.
+ size_t off = 0;
+ size_t index = begin();
+ while (offset > off) off += edges_[index++]->length;
+ return {index, off - offset};
+}
+
+inline CordRepBtree* CordRepBtree::Create(CordRep* rep) {
+ if (IsDataEdge(rep)) return New(rep);
+ return CreateSlow(rep);
+}
+
+inline Span<char> CordRepBtree::GetAppendBuffer(size_t size) {
+ assert(refcount.IsMutable());
+ CordRepBtree* tree = this;
+ const int height = this->height();
+ CordRepBtree* n1 = tree;
+ CordRepBtree* n2 = tree;
+ CordRepBtree* n3 = tree;
+ switch (height) {
+ case 3:
+ tree = tree->Edge(kBack)->btree();
+ if (!tree->refcount.IsMutable()) return {};
+ n2 = tree;
+ ABSL_FALLTHROUGH_INTENDED;
+ case 2:
+ tree = tree->Edge(kBack)->btree();
+ if (!tree->refcount.IsMutable()) return {};
+ n1 = tree;
+ ABSL_FALLTHROUGH_INTENDED;
+ case 1:
+ tree = tree->Edge(kBack)->btree();
+ if (!tree->refcount.IsMutable()) return {};
+ ABSL_FALLTHROUGH_INTENDED;
+ case 0:
+ CordRep* edge = tree->Edge(kBack);
+ if (!edge->refcount.IsMutable()) return {};
+ if (edge->tag < FLAT) return {};
+ size_t avail = edge->flat()->Capacity() - edge->length;
+ if (avail == 0) return {};
+ size_t delta = (std::min)(size, avail);
+ Span<char> span = {edge->flat()->Data() + edge->length, delta};
+ edge->length += delta;
+ switch (height) {
+ case 3:
+ n3->length += delta;
+ ABSL_FALLTHROUGH_INTENDED;
+ case 2:
+ n2->length += delta;
+ ABSL_FALLTHROUGH_INTENDED;
+ case 1:
+ n1->length += delta;
+ ABSL_FALLTHROUGH_INTENDED;
+ case 0:
+ tree->length += delta;
+ return span;
+ }
+ break;
+ }
+ return GetAppendBufferSlow(size);
+}
+
+extern template CordRepBtree* CordRepBtree::AddCordRep<CordRepBtree::kBack>(
+ CordRepBtree* tree, CordRep* rep);
+
+extern template CordRepBtree* CordRepBtree::AddCordRep<CordRepBtree::kFront>(
+ CordRepBtree* tree, CordRep* rep);
+
+inline CordRepBtree* CordRepBtree::Append(CordRepBtree* tree, CordRep* rep) {
+ if (ABSL_PREDICT_TRUE(IsDataEdge(rep))) {
+ return CordRepBtree::AddCordRep<kBack>(tree, rep);
+ }
+ return AppendSlow(tree, rep);
+}
+
+inline CordRepBtree* CordRepBtree::Prepend(CordRepBtree* tree, CordRep* rep) {
+ if (ABSL_PREDICT_TRUE(IsDataEdge(rep))) {
+ return CordRepBtree::AddCordRep<kFront>(tree, rep);
+ }
+ return PrependSlow(tree, rep);
+}
+
+#ifdef NDEBUG
+
+inline CordRepBtree* CordRepBtree::AssertValid(CordRepBtree* tree,
+ bool /* shallow */) {
+ return tree;
+}
+
+inline const CordRepBtree* CordRepBtree::AssertValid(const CordRepBtree* tree,
+ bool /* shallow */) {
+ return tree;
+}
+
+#endif
+
+} // namespace cord_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_STRINGS_INTERNAL_CORD_REP_BTREE_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_btree_navigator.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_btree_navigator.cc
new file mode 100644
index 00000000000..6dae7bcd3e3
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_btree_navigator.cc
@@ -0,0 +1,185 @@
+// Copyright 2021 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "y_absl/strings/internal/cord_rep_btree_navigator.h"
+
+#include <cassert>
+
+#include "y_absl/strings/internal/cord_internal.h"
+#include "y_absl/strings/internal/cord_rep_btree.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+
+using ReadResult = CordRepBtreeNavigator::ReadResult;
+
+namespace {
+
+// Returns a `CordRepSubstring` from `rep` starting at `offset` of size `n`.
+// If `rep` is already a `CordRepSubstring` instance, an adjusted instance is
+// created based on the old offset and new offset.
+// Adopts a reference on `rep`. Rep must be a valid data edge. Returns
+// nullptr if `n == 0`, `rep` if `n == rep->length`.
+// Requires `offset < rep->length` and `offset + n <= rep->length`.
+// TODO(192061034): move to utility library in internal and optimize for small
+// substrings of larger reps.
+inline CordRep* Substring(CordRep* rep, size_t offset, size_t n) {
+ assert(n <= rep->length);
+ assert(offset < rep->length);
+ assert(offset <= rep->length - n);
+ assert(CordRepBtree::IsDataEdge(rep));
+
+ if (n == 0) return nullptr;
+ if (n == rep->length) return CordRep::Ref(rep);
+
+ if (rep->tag == SUBSTRING) {
+ offset += rep->substring()->start;
+ rep = rep->substring()->child;
+ }
+
+ CordRepSubstring* substring = new CordRepSubstring();
+ substring->length = n;
+ substring->tag = SUBSTRING;
+ substring->start = offset;
+ substring->child = CordRep::Ref(rep);
+ return substring;
+}
+
+inline CordRep* Substring(CordRep* rep, size_t offset) {
+ return Substring(rep, offset, rep->length - offset);
+}
+
+} // namespace
+
+CordRepBtreeNavigator::Position CordRepBtreeNavigator::Skip(size_t n) {
+ int height = 0;
+ size_t index = index_[0];
+ CordRepBtree* node = node_[0];
+ CordRep* edge = node->Edge(index);
+
+ // Overall logic: Find an edge of at least the length we need to skip.
+ // We consume all edges which are smaller (i.e., must be 100% skipped).
+ // If we exhausted all edges on the current level, we move one level
+ // up the tree, and repeat until we either find the edge, or until we hit
+ // the top of the tree meaning the skip exceeds tree->length.
+ while (n >= edge->length) {
+ n -= edge->length;
+ while (++index == node->end()) {
+ if (++height > height_) return {nullptr, n};
+ node = node_[height];
+ index = index_[height];
+ }
+ edge = node->Edge(index);
+ }
+
+ // If we moved up the tree, descend down to the leaf level, consuming all
+ // edges that must be skipped.
+ while (height > 0) {
+ node = edge->btree();
+ index_[height] = index;
+ node_[--height] = node;
+ index = node->begin();
+ edge = node->Edge(index);
+ while (n >= edge->length) {
+ n -= edge->length;
+ ++index;
+ assert(index != node->end());
+ edge = node->Edge(index);
+ }
+ }
+ index_[0] = index;
+ return {edge, n};
+}
+
+ReadResult CordRepBtreeNavigator::Read(size_t edge_offset, size_t n) {
+ int height = 0;
+ size_t length = edge_offset + n;
+ size_t index = index_[0];
+ CordRepBtree* node = node_[0];
+ CordRep* edge = node->Edge(index);
+ assert(edge_offset < edge->length);
+
+ if (length < edge->length) {
+ return {Substring(edge, edge_offset, n), length};
+ }
+
+ // Similar to 'Skip', we consume all edges that are inside the 'length' of
+ // data that needs to be read. If we exhaust the current level, we move one
+ // level up the tree and repeat until we hit the final edge that must be
+ // (partially) read. We consume all edges into `subtree`.
+ CordRepBtree* subtree = CordRepBtree::New(Substring(edge, edge_offset));
+ size_t subtree_end = 1;
+ do {
+ length -= edge->length;
+ while (++index == node->end()) {
+ index_[height] = index;
+ if (++height > height_) {
+ subtree->set_end(subtree_end);
+ if (length == 0) return {subtree, 0};
+ CordRep::Unref(subtree);
+ return {nullptr, length};
+ }
+ if (length != 0) {
+ subtree->set_end(subtree_end);
+ subtree = CordRepBtree::New(subtree);
+ subtree_end = 1;
+ }
+ node = node_[height];
+ index = index_[height];
+ }
+ edge = node->Edge(index);
+ if (length >= edge->length) {
+ subtree->length += edge->length;
+ subtree->edges_[subtree_end++] = CordRep::Ref(edge);
+ }
+ } while (length >= edge->length);
+ CordRepBtree* tree = subtree;
+ subtree->length += length;
+
+ // If we moved up the tree, descend down to the leaf level, consuming all
+ // edges that must be read, adding 'down' nodes to `subtree`.
+ while (height > 0) {
+ node = edge->btree();
+ index_[height] = index;
+ node_[--height] = node;
+ index = node->begin();
+ edge = node->Edge(index);
+
+ if (length != 0) {
+ CordRepBtree* right = CordRepBtree::New(height);
+ right->length = length;
+ subtree->edges_[subtree_end++] = right;
+ subtree->set_end(subtree_end);
+ subtree = right;
+ subtree_end = 0;
+ while (length >= edge->length) {
+ subtree->edges_[subtree_end++] = CordRep::Ref(edge);
+ length -= edge->length;
+ edge = node->Edge(++index);
+ }
+ }
+ }
+ // Add any (partial) edge still remaining at the leaf level.
+ if (length != 0) {
+ subtree->edges_[subtree_end++] = Substring(edge, 0, length);
+ }
+ subtree->set_end(subtree_end);
+ index_[0] = index;
+ return {tree, length};
+}
+
+} // namespace cord_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_btree_navigator.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_btree_navigator.h
new file mode 100644
index 00000000000..40c58e3b3ce
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_btree_navigator.h
@@ -0,0 +1,265 @@
+// Copyright 2021 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_STRINGS_INTERNAL_CORD_REP_BTREE_NAVIGATOR_H_
+#define ABSL_STRINGS_INTERNAL_CORD_REP_BTREE_NAVIGATOR_H_
+
+#include <cassert>
+#include <iostream>
+
+#include "y_absl/strings/internal/cord_internal.h"
+#include "y_absl/strings/internal/cord_rep_btree.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+
+// CordRepBtreeNavigator is a bi-directional navigator allowing callers to
+// navigate all the (leaf) data edges in a CordRepBtree instance.
+//
+// A CordRepBtreeNavigator instance is by default empty. Callers initialize a
+// navigator instance by calling one of `InitFirst()`, `InitLast()` or
+// `InitOffset()`, which establishes a current position. Callers can then
+// navigate using the `Next`, `Previous`, `Skip` and `Seek` methods.
+//
+// The navigator instance does not take or adopt a reference on the provided
+// `tree` on any of the initialization calls. Callers are responsible for
+// guaranteeing the lifecycle of the provided tree. A navigator instance can
+// be reset to the empty state by calling `Reset`.
+//
+// A navigator only keeps positional state on the 'current data edge', it does
+// explicitly not keep any 'offset' state. The class does accept and return
+// offsets in the `Read()`, `Skip()` and 'Seek()` methods as these would
+// otherwise put a big burden on callers. Callers are expected to maintain
+// (returned) offset info if they require such granular state.
+class CordRepBtreeNavigator {
+ public:
+ // The logical position as returned by the Seek() and Skip() functions.
+ // Returns the current leaf edge for the desired seek or skip position and
+ // the offset of that position inside that edge.
+ struct Position {
+ CordRep* edge;
+ size_t offset;
+ };
+
+ // The read result as returned by the Read() function.
+ // `tree` contains the resulting tree which is identical to the result
+ // of calling CordRepBtree::SubTree(...) on the tree being navigated.
+ // `n` contains the number of bytes used from the last navigated to
+ // edge of the tree.
+ struct ReadResult {
+ CordRep* tree;
+ size_t n;
+ };
+
+ // Returns true if this instance is not empty.
+ explicit operator bool() const;
+
+ // Returns the tree for this instance or nullptr if empty.
+ CordRepBtree* btree() const;
+
+ // Returns the data edge of the current position.
+ // Requires this instance to not be empty.
+ CordRep* Current() const;
+
+ // Resets this navigator to `tree`, returning the first data edge in the tree.
+ CordRep* InitFirst(CordRepBtree* tree);
+
+ // Resets this navigator to `tree`, returning the last data edge in the tree.
+ CordRep* InitLast(CordRepBtree* tree);
+
+ // Resets this navigator to `tree` returning the data edge at position
+ // `offset` and the relative offset of `offset` into that data edge.
+ // Returns `Position.edge = nullptr` if the provided offset is greater
+ // than or equal to the length of the tree, in which case the state of
+ // the navigator instance remains unchanged.
+ Position InitOffset(CordRepBtree* tree, size_t offset);
+
+ // Navigates to the next data edge.
+ // Returns the next data edge or nullptr if there is no next data edge, in
+ // which case the current position remains unchanged.
+ CordRep* Next();
+
+ // Navigates to the previous data edge.
+ // Returns the previous data edge or nullptr if there is no previous data
+ // edge, in which case the current position remains unchanged.
+ CordRep* Previous();
+
+ // Navigates to the data edge at position `offset`. Returns the navigated to
+ // data edge in `Position.edge` and the relative offset of `offset` into that
+ // data edge in `Position.offset`. Returns `Position.edge = nullptr` if the
+ // provide offset is greater than or equal to the tree's length.
+ Position Seek(size_t offset);
+
+ // Reads `n` bytes of data starting at offset `edge_offset` of the current
+ // data edge, and returns the result in `ReadResult.tree`. `ReadResult.n`
+ // contains the 'bytes used` from the last / current data edge in the tree.
+ // This allows users that mix regular navigation (using string views) and
+ // 'read into cord' navigation to keep track of the current state, and which
+ // bytes have been consumed from a navigator.
+ // This function returns `ReadResult.tree = nullptr` if the requested length
+ // exceeds the length of the tree starting at the current data edge.
+ ReadResult Read(size_t edge_offset, size_t n);
+
+ // Skips `n` bytes forward from the current data edge, returning the navigated
+ // to data edge in `Position.edge` and `Position.offset` containing the offset
+ // inside that data edge. Note that the state of the navigator is left
+ // unchanged if `n` is smaller than the length of the current data edge.
+ Position Skip(size_t n);
+
+ // Resets this instance to the default / empty state.
+ void Reset();
+
+ private:
+ // Slow path for Next() if Next() reached the end of a leaf node. Backtracks
+ // up the stack until it finds a node that has a 'next' position available,
+ // and then does a 'front dive' towards the next leaf node.
+ CordRep* NextUp();
+
+ // Slow path for Previous() if Previous() reached the beginning of a leaf
+ // node. Backtracks up the stack until it finds a node that has a 'previous'
+ // position available, and then does a 'back dive' towards the previous leaf
+ // node.
+ CordRep* PreviousUp();
+
+ // Generic implementation of InitFirst() and InitLast().
+ template <CordRepBtree::EdgeType edge_type>
+ CordRep* Init(CordRepBtree* tree);
+
+ // `height_` contains the height of the current tree, or -1 if empty.
+ int height_ = -1;
+
+ // `index_` and `node_` contain the navigation state as the 'path' to the
+ // current data edge which is at `node_[0]->Edge(index_[0])`. The contents
+ // of these are undefined until the instance is initialized (`height_ >= 0`).
+ uint8_t index_[CordRepBtree::kMaxHeight];
+ CordRepBtree* node_[CordRepBtree::kMaxHeight];
+};
+
+// Returns true if this instance is not empty.
+inline CordRepBtreeNavigator::operator bool() const { return height_ >= 0; }
+
+inline CordRepBtree* CordRepBtreeNavigator::btree() const {
+ return height_ >= 0 ? node_[height_] : nullptr;
+}
+
+inline CordRep* CordRepBtreeNavigator::Current() const {
+ assert(height_ >= 0);
+ return node_[0]->Edge(index_[0]);
+}
+
+inline void CordRepBtreeNavigator::Reset() { height_ = -1; }
+
+inline CordRep* CordRepBtreeNavigator::InitFirst(CordRepBtree* tree) {
+ return Init<CordRepBtree::kFront>(tree);
+}
+
+inline CordRep* CordRepBtreeNavigator::InitLast(CordRepBtree* tree) {
+ return Init<CordRepBtree::kBack>(tree);
+}
+
+template <CordRepBtree::EdgeType edge_type>
+inline CordRep* CordRepBtreeNavigator::Init(CordRepBtree* tree) {
+ assert(tree != nullptr);
+ assert(tree->size() > 0);
+ int height = height_ = tree->height();
+ size_t index = tree->index(edge_type);
+ node_[height] = tree;
+ index_[height] = static_cast<uint8_t>(index);
+ while (--height >= 0) {
+ tree = tree->Edge(index)->btree();
+ node_[height] = tree;
+ index = tree->index(edge_type);
+ index_[height] = static_cast<uint8_t>(index);
+ }
+ return node_[0]->Edge(index);
+}
+
+inline CordRepBtreeNavigator::Position CordRepBtreeNavigator::Seek(
+ size_t offset) {
+ assert(btree() != nullptr);
+ int height = height_;
+ CordRepBtree* edge = node_[height];
+ if (ABSL_PREDICT_FALSE(offset >= edge->length)) return {nullptr, 0};
+ CordRepBtree::Position index = edge->IndexOf(offset);
+ index_[height] = static_cast<uint8_t>(index.index);
+ while (--height >= 0) {
+ edge = edge->Edge(index.index)->btree();
+ node_[height] = edge;
+ index = edge->IndexOf(index.n);
+ index_[height] = static_cast<uint8_t>(index.index);
+ }
+ return {edge->Edge(index.index), index.n};
+}
+
+inline CordRepBtreeNavigator::Position CordRepBtreeNavigator::InitOffset(
+ CordRepBtree* tree, size_t offset) {
+ assert(tree != nullptr);
+ if (ABSL_PREDICT_FALSE(offset >= tree->length)) return {nullptr, 0};
+ height_ = tree->height();
+ node_[height_] = tree;
+ return Seek(offset);
+}
+
+inline CordRep* CordRepBtreeNavigator::Next() {
+ CordRepBtree* edge = node_[0];
+ return index_[0] == edge->back() ? NextUp() : edge->Edge(++index_[0]);
+}
+
+inline CordRep* CordRepBtreeNavigator::Previous() {
+ CordRepBtree* edge = node_[0];
+ return index_[0] == edge->begin() ? PreviousUp() : edge->Edge(--index_[0]);
+}
+
+inline CordRep* CordRepBtreeNavigator::NextUp() {
+ assert(index_[0] == node_[0]->back());
+ CordRepBtree* edge;
+ size_t index;
+ int height = 0;
+ do {
+ if (++height > height_) return nullptr;
+ edge = node_[height];
+ index = index_[height] + 1;
+ } while (index == edge->end());
+ index_[height] = static_cast<uint8_t>(index);
+ do {
+ node_[--height] = edge = edge->Edge(index)->btree();
+ index_[height] = static_cast<uint8_t>(index = edge->begin());
+ } while (height > 0);
+ return edge->Edge(index);
+}
+
+inline CordRep* CordRepBtreeNavigator::PreviousUp() {
+ assert(index_[0] == node_[0]->begin());
+ CordRepBtree* edge;
+ size_t index;
+ int height = 0;
+ do {
+ if (++height > height_) return nullptr;
+ edge = node_[height];
+ index = index_[height];
+ } while (index == edge->begin());
+ index_[height] = static_cast<uint8_t>(--index);
+ do {
+ node_[--height] = edge = edge->Edge(index)->btree();
+ index_[height] = static_cast<uint8_t>(index = edge->back());
+ } while (height > 0);
+ return edge->Edge(index);
+}
+
+} // namespace cord_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_STRINGS_INTERNAL_CORD_REP_BTREE_NAVIGATOR_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_btree_reader.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_btree_reader.cc
new file mode 100644
index 00000000000..0bc9dba2e68
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_btree_reader.cc
@@ -0,0 +1,68 @@
+// Copyright 2021 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "y_absl/strings/internal/cord_rep_btree_reader.h"
+
+#include <cassert>
+
+#include "y_absl/base/config.h"
+#include "y_absl/strings/internal/cord_internal.h"
+#include "y_absl/strings/internal/cord_rep_btree.h"
+#include "y_absl/strings/internal/cord_rep_btree_navigator.h"
+#include "y_absl/strings/internal/cord_rep_flat.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+
+y_absl::string_view CordRepBtreeReader::Read(size_t n, size_t chunk_size,
+ CordRep*& tree) {
+ assert(chunk_size <= navigator_.Current()->length);
+
+ // If chunk_size is non-zero, we need to start inside last returned edge.
+ // Else we start reading at the next data edge of the tree.
+ CordRep* edge = chunk_size ? navigator_.Current() : navigator_.Next();
+ const size_t offset = chunk_size ? edge->length - chunk_size : 0;
+
+ // Read the sub tree and verify we got what we wanted.
+ ReadResult result = navigator_.Read(offset, n);
+ tree = result.tree;
+
+ // If the data returned in `tree` was covered entirely by `chunk_size`, i.e.,
+ // read from the 'previous' edge, we did not consume any additional data, and
+ // can directly return the substring into the current data edge as the next
+ // chunk. We can easily establish from the above code that `navigator_.Next()`
+ // has not been called as that requires `chunk_size` to be zero.
+ if (n < chunk_size) return CordRepBtree::EdgeData(edge).substr(result.n);
+
+ // The amount of data taken from the last edge is `chunk_size` and `result.n`
+ // contains the offset into the current edge trailing the read data (which can
+ // be 0). As the call to `navigator_.Read()` could have consumed all remaining
+ // data, calling `navigator_.Current()` is not safe before checking if we
+ // already consumed all remaining data.
+ const size_t consumed_by_read = n - chunk_size - result.n;
+ if (consumed_by_read >= remaining_) {
+ remaining_ = 0;
+ return {};
+ }
+
+ // We did not read all data, return remaining data from current edge.
+ edge = navigator_.Current();
+ remaining_ -= consumed_by_read + edge->length;
+ return CordRepBtree::EdgeData(edge).substr(result.n);
+}
+
+} // namespace cord_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_btree_reader.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_btree_reader.h
new file mode 100644
index 00000000000..00b2261f713
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_btree_reader.h
@@ -0,0 +1,211 @@
+// Copyright 2021 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_STRINGS_INTERNAL_CORD_REP_BTREE_READER_H_
+#define ABSL_STRINGS_INTERNAL_CORD_REP_BTREE_READER_H_
+
+#include <cassert>
+
+#include "y_absl/base/config.h"
+#include "y_absl/strings/internal/cord_internal.h"
+#include "y_absl/strings/internal/cord_rep_btree.h"
+#include "y_absl/strings/internal/cord_rep_btree_navigator.h"
+#include "y_absl/strings/internal/cord_rep_flat.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+
+// CordRepBtreeReader implements logic to iterate over cord btrees.
+// References to the underlying data are returned as y_absl::string_view values.
+// The most typical use case is a forward only iteration over tree data.
+// The class also provides `Skip()`, `Seek()` and `Read()` methods similar to
+// CordRepBtreeNavigator that allow more advanced navigation.
+//
+// Example: iterate over all data inside a cord btree:
+//
+// CordRepBtreeReader reader;
+// for (string_view sv = reader.Init(tree); !sv.Empty(); sv = sv.Next()) {
+// DoSomethingWithDataIn(sv);
+// }
+//
+// All navigation methods always return the next 'chunk' of data. The class
+// assumes that all data is directly 'consumed' by the caller. For example:
+// invoking `Skip()` will skip the desired number of bytes, and directly
+// read and return the next chunk of data directly after the skipped bytes.
+//
+// Example: iterate over all data inside a btree skipping the first 100 bytes:
+//
+// CordRepBtreeReader reader;
+// y_absl::string_view sv = reader.Init(tree);
+// if (sv.length() > 100) {
+// sv.RemovePrefix(100);
+// } else {
+// sv = reader.Skip(100 - sv.length());
+// }
+// while (!sv.empty()) {
+// DoSomethingWithDataIn(sv);
+// y_absl::string_view sv = reader.Next();
+// }
+//
+// It is important to notice that `remaining` is based on the end position of
+// the last data edge returned to the caller, not the cumulative data returned
+// to the caller which can be less in cases of skipping or seeking over data.
+//
+// For example, consider a cord btree with five data edges: "abc", "def", "ghi",
+// "jkl" and "mno":
+//
+// y_absl::string_view sv;
+// CordRepBtreeReader reader;
+//
+// sv = reader.Init(tree); // sv = "abc", remaining = 12
+// sv = reader.Skip(4); // sv = "hi", remaining = 6
+// sv = reader.Skip(2); // sv = "l", remaining = 3
+// sv = reader.Next(); // sv = "mno", remaining = 0
+// sv = reader.Seek(1); // sv = "bc", remaining = 12
+//
+class CordRepBtreeReader {
+ public:
+ using ReadResult = CordRepBtreeNavigator::ReadResult;
+ using Position = CordRepBtreeNavigator::Position;
+
+ // Returns true if this instance is not empty.
+ explicit operator bool() const { return navigator_.btree() != nullptr; }
+
+ // Returns the tree referenced by this instance or nullptr if empty.
+ CordRepBtree* btree() const { return navigator_.btree(); }
+
+ // Returns the current data edge inside the referenced btree.
+ // Requires that the current instance is not empty.
+ CordRep* node() const { return navigator_.Current(); }
+
+ // Returns the length of the referenced tree.
+ // Requires that the current instance is not empty.
+ size_t length() const;
+
+ // Returns the number of remaining bytes available for iteration, which is the
+ // number of bytes directly following the end of the last chunk returned.
+ // This value will be zero if we iterated over the last edge in the bound
+ // tree, in which case any call to Next() or Skip() will return an empty
+ // string_view reflecting the EOF state.
+ // Note that a call to `Seek()` resets `remaining` to a value based on the
+ // end position of the chunk returned by that call.
+ size_t remaining() const { return remaining_; }
+
+ // Resets this instance to an empty value.
+ void Reset() { navigator_.Reset(); }
+
+ // Initializes this instance with `tree`. `tree` must not be null.
+ // Returns a reference to the first data edge of the provided tree.
+ y_absl::string_view Init(CordRepBtree* tree);
+
+ // Navigates to and returns the next data edge of the referenced tree.
+ // Returns an empty string_view if an attempt is made to read beyond the end
+ // of the tree, i.e.: if `remaining()` is zero indicating an EOF condition.
+ // Requires that the current instance is not empty.
+ y_absl::string_view Next();
+
+ // Skips the provided amount of bytes and returns a reference to the data
+ // directly following the skipped bytes.
+ y_absl::string_view Skip(size_t skip);
+
+ // Reads `n` bytes into `tree`.
+ // If `chunk_size` is zero, starts reading at the next data edge. If
+ // `chunk_size` is non zero, the read starts at the last `chunk_size` bytes of
+ // the last returned data edge. Effectively, this means that the read starts
+ // at offset `consumed() - chunk_size`.
+ // Requires that `chunk_size` is less than or equal to the length of the
+ // last returned data edge. The purpose of `chunk_size` is to simplify code
+ // partially consuming a returned chunk and wanting to include the remaining
+ // bytes in the Read call. For example, the below code will read 1000 bytes of
+ // data into a cord tree if the first chunk starts with "big:":
+ //
+ // CordRepBtreeReader reader;
+ // y_absl::string_view sv = reader.Init(tree);
+ // if (y_absl::StartsWith(sv, "big:")) {
+ // CordRepBtree tree;
+ // sv = reader.Read(1000, sv.size() - 4 /* "big:" */, &tree);
+ // }
+ //
+ // This method will return an empty string view if all remaining data was
+ // read. If `n` exceeded the amount of remaining data this function will
+ // return an empty string view and `tree` will be set to nullptr.
+ // In both cases, `consumed` will be set to `length`.
+ y_absl::string_view Read(size_t n, size_t chunk_size, CordRep*& tree);
+
+ // Navigates to the chunk at offset `offset`.
+ // Returns a reference into the navigated to chunk, adjusted for the relative
+ // position of `offset` into that chunk. For example, calling `Seek(13)` on a
+ // cord tree containing 2 chunks of 10 and 20 bytes respectively will return
+ // a string view into the second chunk starting at offset 3 with a size of 17.
+ // Returns an empty string view if `offset` is equal to or greater than the
+ // length of the referenced tree.
+ y_absl::string_view Seek(size_t offset);
+
+ private:
+ size_t remaining_ = 0;
+ CordRepBtreeNavigator navigator_;
+};
+
+inline size_t CordRepBtreeReader::length() const {
+ assert(btree() != nullptr);
+ return btree()->length;
+}
+
+inline y_absl::string_view CordRepBtreeReader::Init(CordRepBtree* tree) {
+ assert(tree != nullptr);
+ const CordRep* edge = navigator_.InitFirst(tree);
+ remaining_ = tree->length - edge->length;
+ return CordRepBtree::EdgeData(edge);
+}
+
+inline y_absl::string_view CordRepBtreeReader::Next() {
+ if (remaining_ == 0) return {};
+ const CordRep* edge = navigator_.Next();
+ assert(edge != nullptr);
+ remaining_ -= edge->length;
+ return CordRepBtree::EdgeData(edge);
+}
+
+inline y_absl::string_view CordRepBtreeReader::Skip(size_t skip) {
+ // As we are always positioned on the last 'consumed' edge, we
+ // need to skip the current edge as well as `skip`.
+ const size_t edge_length = navigator_.Current()->length;
+ CordRepBtreeNavigator::Position pos = navigator_.Skip(skip + edge_length);
+ if (ABSL_PREDICT_FALSE(pos.edge == nullptr)) {
+ remaining_ = 0;
+ return {};
+ }
+ // The combined length of all edges skipped before `pos.edge` is `skip -
+ // pos.offset`, all of which are 'consumed', as well as the current edge.
+ remaining_ -= skip - pos.offset + pos.edge->length;
+ return CordRepBtree::EdgeData(pos.edge).substr(pos.offset);
+}
+
+inline y_absl::string_view CordRepBtreeReader::Seek(size_t offset) {
+ const CordRepBtreeNavigator::Position pos = navigator_.Seek(offset);
+ if (ABSL_PREDICT_FALSE(pos.edge == nullptr)) {
+ remaining_ = 0;
+ return {};
+ }
+ y_absl::string_view chunk = CordRepBtree::EdgeData(pos.edge).substr(pos.offset);
+ remaining_ = length() - offset - chunk.length();
+ return chunk;
+}
+
+} // namespace cord_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_STRINGS_INTERNAL_CORD_REP_BTREE_READER_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_consume.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_consume.cc
new file mode 100644
index 00000000000..ffc0179e52f
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_consume.cc
@@ -0,0 +1,129 @@
+// Copyright 2021 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "y_absl/strings/internal/cord_rep_consume.h"
+
+#include <array>
+#include <utility>
+
+#include "y_absl/container/inlined_vector.h"
+#include "y_absl/functional/function_ref.h"
+#include "y_absl/strings/internal/cord_internal.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+
+namespace {
+
+// Unrefs the provided `substring`, and returns `substring->child`
+// Adds or assumes a reference on `substring->child`
+CordRep* ClipSubstring(CordRepSubstring* substring) {
+ CordRep* child = substring->child;
+ if (substring->refcount.IsOne()) {
+ delete substring;
+ } else {
+ CordRep::Ref(child);
+ CordRep::Unref(substring);
+ }
+ return child;
+}
+
+// Unrefs the provided `concat`, and returns `{concat->left, concat->right}`
+// Adds or assumes a reference on `concat->left` and `concat->right`.
+// Returns an array of 2 elements containing the left and right nodes.
+std::array<CordRep*, 2> ClipConcat(CordRepConcat* concat) {
+ std::array<CordRep*, 2> result{concat->left, concat->right};
+ if (concat->refcount.IsOne()) {
+ delete concat;
+ } else {
+ CordRep::Ref(result[0]);
+ CordRep::Ref(result[1]);
+ CordRep::Unref(concat);
+ }
+ return result;
+}
+
+void Consume(bool forward, CordRep* rep, ConsumeFn consume_fn) {
+ size_t offset = 0;
+ size_t length = rep->length;
+ struct Entry {
+ CordRep* rep;
+ size_t offset;
+ size_t length;
+ };
+ y_absl::InlinedVector<Entry, 40> stack;
+
+ for (;;) {
+ if (rep->tag == CONCAT) {
+ std::array<CordRep*, 2> res = ClipConcat(rep->concat());
+ CordRep* left = res[0];
+ CordRep* right = res[1];
+
+ if (left->length <= offset) {
+ // Don't need left node
+ offset -= left->length;
+ CordRep::Unref(left);
+ rep = right;
+ continue;
+ }
+
+ size_t length_left = left->length - offset;
+ if (length_left >= length) {
+ // Don't need right node
+ CordRep::Unref(right);
+ rep = left;
+ continue;
+ }
+
+ // Need both nodes
+ size_t length_right = length - length_left;
+ if (forward) {
+ stack.push_back({right, 0, length_right});
+ rep = left;
+ length = length_left;
+ } else {
+ stack.push_back({left, offset, length_left});
+ rep = right;
+ offset = 0;
+ length = length_right;
+ }
+ } else if (rep->tag == SUBSTRING) {
+ offset += rep->substring()->start;
+ rep = ClipSubstring(rep->substring());
+ } else {
+ consume_fn(rep, offset, length);
+ if (stack.empty()) return;
+
+ rep = stack.back().rep;
+ offset = stack.back().offset;
+ length = stack.back().length;
+ stack.pop_back();
+ }
+ }
+}
+
+} // namespace
+
+void Consume(CordRep* rep, ConsumeFn consume_fn) {
+ return Consume(true, rep, std::move(consume_fn));
+}
+
+void ReverseConsume(CordRep* rep, ConsumeFn consume_fn) {
+ return Consume(false, rep, std::move(consume_fn));
+}
+
+} // namespace cord_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_consume.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_consume.h
new file mode 100644
index 00000000000..7f6e5584f45
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_consume.h
@@ -0,0 +1,50 @@
+// Copyright 2021 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_STRINGS_INTERNAL_CORD_REP_CONSUME_H_
+#define ABSL_STRINGS_INTERNAL_CORD_REP_CONSUME_H_
+
+#include <functional>
+
+#include "y_absl/functional/function_ref.h"
+#include "y_absl/strings/internal/cord_internal.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+
+// Functor for the Consume() and ReverseConsume() functions:
+// void ConsumeFunc(CordRep* rep, size_t offset, size_t length);
+// See the Consume() and ReverseConsume() function comments for documentation.
+using ConsumeFn = FunctionRef<void(CordRep*, size_t, size_t)>;
+
+// Consume() and ReverseConsume() consume CONCAT based trees and invoke the
+// provided functor with the contained nodes in the proper forward or reverse
+// order, which is used to convert CONCAT trees into other tree or cord data.
+// All CONCAT and SUBSTRING nodes are processed internally. The 'offset`
+// parameter of the functor is non-zero for any nodes below SUBSTRING nodes.
+// It's up to the caller to form these back into SUBSTRING nodes or otherwise
+// store offset / prefix information. These functions are intended to be used
+// only for migration / transitional code where due to factors such as ODR
+// violations, we can not 100% guarantee that all code respects 'new format'
+// settings and flags, so we need to be able to parse old data on the fly until
+// all old code is deprecated / no longer the default format.
+void Consume(CordRep* rep, ConsumeFn consume_fn);
+void ReverseConsume(CordRep* rep, ConsumeFn consume_fn);
+
+} // namespace cord_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_STRINGS_INTERNAL_CORD_REP_CONSUME_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_flat.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_flat.h
new file mode 100644
index 00000000000..976613031c2
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_flat.h
@@ -0,0 +1,146 @@
+// Copyright 2020 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_STRINGS_INTERNAL_CORD_REP_FLAT_H_
+#define ABSL_STRINGS_INTERNAL_CORD_REP_FLAT_H_
+
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <memory>
+
+#include "y_absl/strings/internal/cord_internal.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+
+// Note: all constants below are never ODR used and internal to cord, we define
+// these as static constexpr to avoid 'in struct' definition and usage clutter.
+
+// Largest and smallest flat node lengths we are willing to allocate
+// Flat allocation size is stored in tag, which currently can encode sizes up
+// to 4K, encoded as multiple of either 8 or 32 bytes.
+// If we allow for larger sizes, we need to change this to 8/64, 16/128, etc.
+// kMinFlatSize is bounded by tag needing to be at least FLAT * 8 bytes, and
+// ideally a 'nice' size aligning with allocation and cacheline sizes like 32.
+// kMaxFlatSize is bounded by the size resulting in a computed tag no greater
+// than MAX_FLAT_TAG. MAX_FLAT_TAG provides for additional 'high' tag values.
+static constexpr size_t kFlatOverhead = offsetof(CordRep, storage);
+static constexpr size_t kMinFlatSize = 32;
+static constexpr size_t kMaxFlatSize = 4096;
+static constexpr size_t kMaxFlatLength = kMaxFlatSize - kFlatOverhead;
+static constexpr size_t kMinFlatLength = kMinFlatSize - kFlatOverhead;
+
+constexpr uint8_t AllocatedSizeToTagUnchecked(size_t size) {
+ return static_cast<uint8_t>((size <= 1024) ? size / 8 + 1
+ : 129 + size / 32 - 1024 / 32);
+}
+
+static_assert(kMinFlatSize / 8 + 1 >= FLAT, "");
+static_assert(AllocatedSizeToTagUnchecked(kMaxFlatSize) <= MAX_FLAT_TAG, "");
+
+// Helper functions for rounded div, and rounding to exact sizes.
+constexpr size_t DivUp(size_t n, size_t m) { return (n + m - 1) / m; }
+constexpr size_t RoundUp(size_t n, size_t m) { return DivUp(n, m) * m; }
+
+// Returns the size to the nearest equal or larger value that can be
+// expressed exactly as a tag value.
+inline size_t RoundUpForTag(size_t size) {
+ return RoundUp(size, (size <= 1024) ? 8 : 32);
+}
+
+// Converts the allocated size to a tag, rounding down if the size
+// does not exactly match a 'tag expressible' size value. The result is
+// undefined if the size exceeds the maximum size that can be encoded in
+// a tag, i.e., if size is larger than TagToAllocatedSize(<max tag>).
+inline uint8_t AllocatedSizeToTag(size_t size) {
+ const uint8_t tag = AllocatedSizeToTagUnchecked(size);
+ assert(tag <= MAX_FLAT_TAG);
+ return tag;
+}
+
+// Converts the provided tag to the corresponding allocated size
+constexpr size_t TagToAllocatedSize(uint8_t tag) {
+ return (tag <= 129) ? ((tag - 1) * 8) : (1024 + (tag - 129) * 32);
+}
+
+// Converts the provided tag to the corresponding available data length
+constexpr size_t TagToLength(uint8_t tag) {
+ return TagToAllocatedSize(tag) - kFlatOverhead;
+}
+
+// Enforce that kMaxFlatSize maps to a well-known exact tag value.
+static_assert(TagToAllocatedSize(225) == kMaxFlatSize, "Bad tag logic");
+
+struct CordRepFlat : public CordRep {
+ // Creates a new flat node.
+ static CordRepFlat* New(size_t len) {
+ if (len <= kMinFlatLength) {
+ len = kMinFlatLength;
+ } else if (len > kMaxFlatLength) {
+ len = kMaxFlatLength;
+ }
+
+ // Round size up so it matches a size we can exactly express in a tag.
+ const size_t size = RoundUpForTag(len + kFlatOverhead);
+ void* const raw_rep = ::operator new(size);
+ CordRepFlat* rep = new (raw_rep) CordRepFlat();
+ rep->tag = AllocatedSizeToTag(size);
+ return rep;
+ }
+
+ // Deletes a CordRepFlat instance created previously through a call to New().
+ // Flat CordReps are allocated and constructed with raw ::operator new and
+ // placement new, and must be destructed and deallocated accordingly.
+ static void Delete(CordRep*rep) {
+ assert(rep->tag >= FLAT && rep->tag <= MAX_FLAT_TAG);
+
+#if defined(__cpp_sized_deallocation)
+ size_t size = TagToAllocatedSize(rep->tag);
+ rep->~CordRep();
+ ::operator delete(rep, size);
+#else
+ rep->~CordRep();
+ ::operator delete(rep);
+#endif
+ }
+
+ // Returns a pointer to the data inside this flat rep.
+ char* Data() { return reinterpret_cast<char*>(storage); }
+ const char* Data() const { return reinterpret_cast<const char*>(storage); }
+
+ // Returns the maximum capacity (payload size) of this instance.
+ size_t Capacity() const { return TagToLength(tag); }
+
+ // Returns the allocated size (payload + overhead) of this instance.
+ size_t AllocatedSize() const { return TagToAllocatedSize(tag); }
+};
+
+// Now that CordRepFlat is defined, we can define CordRep's helper casts:
+inline CordRepFlat* CordRep::flat() {
+ assert(tag >= FLAT && tag <= MAX_FLAT_TAG);
+ return reinterpret_cast<CordRepFlat*>(this);
+}
+
+inline const CordRepFlat* CordRep::flat() const {
+ assert(tag >= FLAT && tag <= MAX_FLAT_TAG);
+ return reinterpret_cast<const CordRepFlat*>(this);
+}
+
+} // namespace cord_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_STRINGS_INTERNAL_CORD_REP_FLAT_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_ring.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_ring.cc
new file mode 100644
index 00000000000..06c7e75bd8c
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_ring.cc
@@ -0,0 +1,771 @@
+// Copyright 2020 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+#include "y_absl/strings/internal/cord_rep_ring.h"
+
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <iostream>
+#include <limits>
+#include <memory>
+#include <util/generic/string.h>
+
+#include "y_absl/base/internal/raw_logging.h"
+#include "y_absl/base/internal/throw_delegate.h"
+#include "y_absl/base/macros.h"
+#include "y_absl/container/inlined_vector.h"
+#include "y_absl/strings/internal/cord_internal.h"
+#include "y_absl/strings/internal/cord_rep_consume.h"
+#include "y_absl/strings/internal/cord_rep_flat.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+
+namespace {
+
+using index_type = CordRepRing::index_type;
+
+enum class Direction { kForward, kReversed };
+
+inline bool IsFlatOrExternal(CordRep* rep) {
+ return rep->IsFlat() || rep->IsExternal();
+}
+
+// Verifies that n + extra <= kMaxCapacity: throws std::length_error otherwise.
+inline void CheckCapacity(size_t n, size_t extra) {
+ if (ABSL_PREDICT_FALSE(extra > CordRepRing::kMaxCapacity - n)) {
+ base_internal::ThrowStdLengthError("Maximum capacity exceeded");
+ }
+}
+
+// Creates a flat from the provided string data, allocating up to `extra`
+// capacity in the returned flat depending on kMaxFlatLength limitations.
+// Requires `len` to be less or equal to `kMaxFlatLength`
+CordRepFlat* CreateFlat(const char* s, size_t n, size_t extra = 0) { // NOLINT
+ assert(n <= kMaxFlatLength);
+ auto* rep = CordRepFlat::New(n + extra);
+ rep->length = n;
+ memcpy(rep->Data(), s, n);
+ return rep;
+}
+
+// Unrefs the entries in `[head, tail)`.
+// Requires all entries to be a FLAT or EXTERNAL node.
+void UnrefEntries(const CordRepRing* rep, index_type head, index_type tail) {
+ rep->ForEach(head, tail, [rep](index_type ix) {
+ CordRep* child = rep->entry_child(ix);
+ if (!child->refcount.Decrement()) {
+ if (child->tag >= FLAT) {
+ CordRepFlat::Delete(child->flat());
+ } else {
+ CordRepExternal::Delete(child->external());
+ }
+ }
+ });
+}
+
+} // namespace
+
+std::ostream& operator<<(std::ostream& s, const CordRepRing& rep) {
+ // Note: 'pos' values are defined as size_t (for overflow reasons), but that
+ // prints really awkward for small prepended values such as -5. ssize_t is not
+ // portable (POSIX), so we use ptrdiff_t instead to cast to signed values.
+ s << " CordRepRing(" << &rep << ", length = " << rep.length
+ << ", head = " << rep.head_ << ", tail = " << rep.tail_
+ << ", cap = " << rep.capacity_ << ", rc = " << rep.refcount.Get()
+ << ", begin_pos_ = " << static_cast<ptrdiff_t>(rep.begin_pos_) << ") {\n";
+ CordRepRing::index_type head = rep.head();
+ do {
+ CordRep* child = rep.entry_child(head);
+ s << " entry[" << head << "] length = " << rep.entry_length(head)
+ << ", child " << child << ", clen = " << child->length
+ << ", tag = " << static_cast<int>(child->tag)
+ << ", rc = " << child->refcount.Get()
+ << ", offset = " << rep.entry_data_offset(head)
+ << ", end_pos = " << static_cast<ptrdiff_t>(rep.entry_end_pos(head))
+ << "\n";
+ head = rep.advance(head);
+ } while (head != rep.tail());
+ return s << "}\n";
+}
+
+void CordRepRing::AddDataOffset(index_type index, size_t n) {
+ entry_data_offset()[index] += static_cast<offset_type>(n);
+}
+
+void CordRepRing::SubLength(index_type index, size_t n) {
+ entry_end_pos()[index] -= n;
+}
+
+class CordRepRing::Filler {
+ public:
+ Filler(CordRepRing* rep, index_type pos) : rep_(rep), head_(pos), pos_(pos) {}
+
+ index_type head() const { return head_; }
+ index_type pos() const { return pos_; }
+
+ void Add(CordRep* child, size_t offset, pos_type end_pos) {
+ rep_->entry_end_pos()[pos_] = end_pos;
+ rep_->entry_child()[pos_] = child;
+ rep_->entry_data_offset()[pos_] = static_cast<offset_type>(offset);
+ pos_ = rep_->advance(pos_);
+ }
+
+ private:
+ CordRepRing* rep_;
+ index_type head_;
+ index_type pos_;
+};
+
+constexpr size_t CordRepRing::kMaxCapacity; // NOLINT: needed for c++11
+
+bool CordRepRing::IsValid(std::ostream& output) const {
+ if (capacity_ == 0) {
+ output << "capacity == 0";
+ return false;
+ }
+
+ if (head_ >= capacity_ || tail_ >= capacity_) {
+ output << "head " << head_ << " and/or tail " << tail_ << "exceed capacity "
+ << capacity_;
+ return false;
+ }
+
+ const index_type back = retreat(tail_);
+ size_t pos_length = Distance(begin_pos_, entry_end_pos(back));
+ if (pos_length != length) {
+ output << "length " << length << " does not match positional length "
+ << pos_length << " from begin_pos " << begin_pos_ << " and entry["
+ << back << "].end_pos " << entry_end_pos(back);
+ return false;
+ }
+
+ index_type head = head_;
+ pos_type begin_pos = begin_pos_;
+ do {
+ pos_type end_pos = entry_end_pos(head);
+ size_t entry_length = Distance(begin_pos, end_pos);
+ if (entry_length == 0) {
+ output << "entry[" << head << "] has an invalid length " << entry_length
+ << " from begin_pos " << begin_pos << " and end_pos " << end_pos;
+ return false;
+ }
+
+ CordRep* child = entry_child(head);
+ if (child == nullptr) {
+ output << "entry[" << head << "].child == nullptr";
+ return false;
+ }
+ if (child->tag < FLAT && child->tag != EXTERNAL) {
+ output << "entry[" << head << "].child has an invalid tag "
+ << static_cast<int>(child->tag);
+ return false;
+ }
+
+ size_t offset = entry_data_offset(head);
+ if (offset >= child->length || entry_length > child->length - offset) {
+ output << "entry[" << head << "] has offset " << offset
+ << " and entry length " << entry_length
+ << " which are outside of the child's length of " << child->length;
+ return false;
+ }
+
+ begin_pos = end_pos;
+ head = advance(head);
+ } while (head != tail_);
+
+ return true;
+}
+
+#ifdef EXTRA_CORD_RING_VALIDATION
+CordRepRing* CordRepRing::Validate(CordRepRing* rep, const char* file,
+ int line) {
+ if (!rep->IsValid(std::cerr)) {
+ std::cerr << "\nERROR: CordRepRing corrupted";
+ if (line) std::cerr << " at line " << line;
+ if (file) std::cerr << " in file " << file;
+ std::cerr << "\nContent = " << *rep;
+ abort();
+ }
+ return rep;
+}
+#endif // EXTRA_CORD_RING_VALIDATION
+
+CordRepRing* CordRepRing::New(size_t capacity, size_t extra) {
+ CheckCapacity(capacity, extra);
+
+ size_t size = AllocSize(capacity += extra);
+ void* mem = ::operator new(size);
+ auto* rep = new (mem) CordRepRing(static_cast<index_type>(capacity));
+ rep->tag = RING;
+ rep->capacity_ = static_cast<index_type>(capacity);
+ rep->begin_pos_ = 0;
+ return rep;
+}
+
+void CordRepRing::SetCapacityForTesting(size_t capacity) {
+ // Adjust for the changed layout
+ assert(capacity <= capacity_);
+ assert(head() == 0 || head() < tail());
+ memmove(Layout::Partial(capacity).Pointer<1>(data_) + head(),
+ Layout::Partial(capacity_).Pointer<1>(data_) + head(),
+ entries() * sizeof(Layout::ElementType<1>));
+ memmove(Layout::Partial(capacity, capacity).Pointer<2>(data_) + head(),
+ Layout::Partial(capacity_, capacity_).Pointer<2>(data_) + head(),
+ entries() * sizeof(Layout::ElementType<2>));
+ capacity_ = static_cast<index_type>(capacity);
+}
+
+void CordRepRing::Delete(CordRepRing* rep) {
+ assert(rep != nullptr && rep->IsRing());
+#if defined(__cpp_sized_deallocation)
+ size_t size = AllocSize(rep->capacity_);
+ rep->~CordRepRing();
+ ::operator delete(rep, size);
+#else
+ rep->~CordRepRing();
+ ::operator delete(rep);
+#endif
+}
+
+void CordRepRing::Destroy(CordRepRing* rep) {
+ UnrefEntries(rep, rep->head(), rep->tail());
+ Delete(rep);
+}
+
+template <bool ref>
+void CordRepRing::Fill(const CordRepRing* src, index_type head,
+ index_type tail) {
+ this->length = src->length;
+ head_ = 0;
+ tail_ = advance(0, src->entries(head, tail));
+ begin_pos_ = src->begin_pos_;
+
+ // TODO(mvels): there may be opportunities here for large buffers.
+ auto* dst_pos = entry_end_pos();
+ auto* dst_child = entry_child();
+ auto* dst_offset = entry_data_offset();
+ src->ForEach(head, tail, [&](index_type index) {
+ *dst_pos++ = src->entry_end_pos(index);
+ CordRep* child = src->entry_child(index);
+ *dst_child++ = ref ? CordRep::Ref(child) : child;
+ *dst_offset++ = src->entry_data_offset(index);
+ });
+}
+
+CordRepRing* CordRepRing::Copy(CordRepRing* rep, index_type head,
+ index_type tail, size_t extra) {
+ CordRepRing* newrep = CordRepRing::New(rep->entries(head, tail), extra);
+ newrep->Fill<true>(rep, head, tail);
+ CordRep::Unref(rep);
+ return newrep;
+}
+
+CordRepRing* CordRepRing::Mutable(CordRepRing* rep, size_t extra) {
+ // Get current number of entries, and check for max capacity.
+ size_t entries = rep->entries();
+
+ if (!rep->refcount.IsMutable()) {
+ return Copy(rep, rep->head(), rep->tail(), extra);
+ } else if (entries + extra > rep->capacity()) {
+ const size_t min_grow = rep->capacity() + rep->capacity() / 2;
+ const size_t min_extra = (std::max)(extra, min_grow - entries);
+ CordRepRing* newrep = CordRepRing::New(entries, min_extra);
+ newrep->Fill<false>(rep, rep->head(), rep->tail());
+ CordRepRing::Delete(rep);
+ return newrep;
+ } else {
+ return rep;
+ }
+}
+
+Span<char> CordRepRing::GetAppendBuffer(size_t size) {
+ assert(refcount.IsMutable());
+ index_type back = retreat(tail_);
+ CordRep* child = entry_child(back);
+ if (child->tag >= FLAT && child->refcount.IsMutable()) {
+ size_t capacity = child->flat()->Capacity();
+ pos_type end_pos = entry_end_pos(back);
+ size_t data_offset = entry_data_offset(back);
+ size_t entry_length = Distance(entry_begin_pos(back), end_pos);
+ size_t used = data_offset + entry_length;
+ if (size_t n = (std::min)(capacity - used, size)) {
+ child->length = data_offset + entry_length + n;
+ entry_end_pos()[back] = end_pos + n;
+ this->length += n;
+ return {child->flat()->Data() + used, n};
+ }
+ }
+ return {nullptr, 0};
+}
+
+Span<char> CordRepRing::GetPrependBuffer(size_t size) {
+ assert(refcount.IsMutable());
+ CordRep* child = entry_child(head_);
+ size_t data_offset = entry_data_offset(head_);
+ if (data_offset && child->refcount.IsMutable() && child->tag >= FLAT) {
+ size_t n = (std::min)(data_offset, size);
+ this->length += n;
+ begin_pos_ -= n;
+ data_offset -= n;
+ entry_data_offset()[head_] = static_cast<offset_type>(data_offset);
+ return {child->flat()->Data() + data_offset, n};
+ }
+ return {nullptr, 0};
+}
+
+CordRepRing* CordRepRing::CreateFromLeaf(CordRep* child, size_t offset,
+ size_t len, size_t extra) {
+ CordRepRing* rep = CordRepRing::New(1, extra);
+ rep->head_ = 0;
+ rep->tail_ = rep->advance(0);
+ rep->length = len;
+ rep->entry_end_pos()[0] = len;
+ rep->entry_child()[0] = child;
+ rep->entry_data_offset()[0] = static_cast<offset_type>(offset);
+ return Validate(rep);
+}
+
+CordRepRing* CordRepRing::CreateSlow(CordRep* child, size_t extra) {
+ CordRepRing* rep = nullptr;
+ Consume(child, [&](CordRep* child_arg, size_t offset, size_t len) {
+ if (IsFlatOrExternal(child_arg)) {
+ rep = rep ? AppendLeaf(rep, child_arg, offset, len)
+ : CreateFromLeaf(child_arg, offset, len, extra);
+ } else if (rep) {
+ rep = AddRing<AddMode::kAppend>(rep, child_arg->ring(), offset, len);
+ } else if (offset == 0 && child_arg->length == len) {
+ rep = Mutable(child_arg->ring(), extra);
+ } else {
+ rep = SubRing(child_arg->ring(), offset, len, extra);
+ }
+ });
+ return Validate(rep, nullptr, __LINE__);
+}
+
+CordRepRing* CordRepRing::Create(CordRep* child, size_t extra) {
+ size_t length = child->length;
+ if (IsFlatOrExternal(child)) {
+ return CreateFromLeaf(child, 0, length, extra);
+ }
+ if (child->IsRing()) {
+ return Mutable(child->ring(), extra);
+ }
+ return CreateSlow(child, extra);
+}
+
+template <CordRepRing::AddMode mode>
+CordRepRing* CordRepRing::AddRing(CordRepRing* rep, CordRepRing* ring,
+ size_t offset, size_t len) {
+ assert(offset < ring->length);
+ constexpr bool append = mode == AddMode::kAppend;
+ Position head = ring->Find(offset);
+ Position tail = ring->FindTail(head.index, offset + len);
+ const index_type entries = ring->entries(head.index, tail.index);
+
+ rep = Mutable(rep, entries);
+
+ // The delta for making ring[head].end_pos into 'len - offset'
+ const pos_type delta_length =
+ (append ? rep->begin_pos_ + rep->length : rep->begin_pos_ - len) -
+ ring->entry_begin_pos(head.index) - head.offset;
+
+ // Start filling at `tail`, or `entries` before `head`
+ Filler filler(rep, append ? rep->tail_ : rep->retreat(rep->head_, entries));
+
+ if (ring->refcount.IsOne()) {
+ // Copy entries from source stealing the ref and adjusting the end position.
+ // Commit the filler as this is no-op.
+ ring->ForEach(head.index, tail.index, [&](index_type ix) {
+ filler.Add(ring->entry_child(ix), ring->entry_data_offset(ix),
+ ring->entry_end_pos(ix) + delta_length);
+ });
+
+ // Unref entries we did not copy over, and delete source.
+ if (head.index != ring->head_) UnrefEntries(ring, ring->head_, head.index);
+ if (tail.index != ring->tail_) UnrefEntries(ring, tail.index, ring->tail_);
+ CordRepRing::Delete(ring);
+ } else {
+ ring->ForEach(head.index, tail.index, [&](index_type ix) {
+ CordRep* child = ring->entry_child(ix);
+ filler.Add(child, ring->entry_data_offset(ix),
+ ring->entry_end_pos(ix) + delta_length);
+ CordRep::Ref(child);
+ });
+ CordRepRing::Unref(ring);
+ }
+
+ if (head.offset) {
+ // Increase offset of first 'source' entry appended or prepended.
+ // This is always the entry in `filler.head()`
+ rep->AddDataOffset(filler.head(), head.offset);
+ }
+
+ if (tail.offset) {
+ // Reduce length of last 'source' entry appended or prepended.
+ // This is always the entry tailed by `filler.pos()`
+ rep->SubLength(rep->retreat(filler.pos()), tail.offset);
+ }
+
+ // Commit changes
+ rep->length += len;
+ if (append) {
+ rep->tail_ = filler.pos();
+ } else {
+ rep->head_ = filler.head();
+ rep->begin_pos_ -= len;
+ }
+
+ return Validate(rep);
+}
+
+CordRepRing* CordRepRing::AppendSlow(CordRepRing* rep, CordRep* child) {
+ Consume(child, [&rep](CordRep* child_arg, size_t offset, size_t len) {
+ if (child_arg->IsRing()) {
+ rep = AddRing<AddMode::kAppend>(rep, child_arg->ring(), offset, len);
+ } else {
+ rep = AppendLeaf(rep, child_arg, offset, len);
+ }
+ });
+ return rep;
+}
+
+CordRepRing* CordRepRing::AppendLeaf(CordRepRing* rep, CordRep* child,
+ size_t offset, size_t len) {
+ rep = Mutable(rep, 1);
+ index_type back = rep->tail_;
+ const pos_type begin_pos = rep->begin_pos_ + rep->length;
+ rep->tail_ = rep->advance(rep->tail_);
+ rep->length += len;
+ rep->entry_end_pos()[back] = begin_pos + len;
+ rep->entry_child()[back] = child;
+ rep->entry_data_offset()[back] = static_cast<offset_type>(offset);
+ return Validate(rep, nullptr, __LINE__);
+}
+
+CordRepRing* CordRepRing::Append(CordRepRing* rep, CordRep* child) {
+ size_t length = child->length;
+ if (IsFlatOrExternal(child)) {
+ return AppendLeaf(rep, child, 0, length);
+ }
+ if (child->IsRing()) {
+ return AddRing<AddMode::kAppend>(rep, child->ring(), 0, length);
+ }
+ return AppendSlow(rep, child);
+}
+
+CordRepRing* CordRepRing::PrependSlow(CordRepRing* rep, CordRep* child) {
+ ReverseConsume(child, [&](CordRep* child_arg, size_t offset, size_t len) {
+ if (IsFlatOrExternal(child_arg)) {
+ rep = PrependLeaf(rep, child_arg, offset, len);
+ } else {
+ rep = AddRing<AddMode::kPrepend>(rep, child_arg->ring(), offset, len);
+ }
+ });
+ return Validate(rep);
+}
+
+CordRepRing* CordRepRing::PrependLeaf(CordRepRing* rep, CordRep* child,
+ size_t offset, size_t len) {
+ rep = Mutable(rep, 1);
+ index_type head = rep->retreat(rep->head_);
+ pos_type end_pos = rep->begin_pos_;
+ rep->head_ = head;
+ rep->length += len;
+ rep->begin_pos_ -= len;
+ rep->entry_end_pos()[head] = end_pos;
+ rep->entry_child()[head] = child;
+ rep->entry_data_offset()[head] = static_cast<offset_type>(offset);
+ return Validate(rep);
+}
+
+CordRepRing* CordRepRing::Prepend(CordRepRing* rep, CordRep* child) {
+ size_t length = child->length;
+ if (IsFlatOrExternal(child)) {
+ return PrependLeaf(rep, child, 0, length);
+ }
+ if (child->IsRing()) {
+ return AddRing<AddMode::kPrepend>(rep, child->ring(), 0, length);
+ }
+ return PrependSlow(rep, child);
+}
+
+CordRepRing* CordRepRing::Append(CordRepRing* rep, y_absl::string_view data,
+ size_t extra) {
+ if (rep->refcount.IsMutable()) {
+ Span<char> avail = rep->GetAppendBuffer(data.length());
+ if (!avail.empty()) {
+ memcpy(avail.data(), data.data(), avail.length());
+ data.remove_prefix(avail.length());
+ }
+ }
+ if (data.empty()) return Validate(rep);
+
+ const size_t flats = (data.length() - 1) / kMaxFlatLength + 1;
+ rep = Mutable(rep, flats);
+
+ Filler filler(rep, rep->tail_);
+ pos_type pos = rep->begin_pos_ + rep->length;
+
+ while (data.length() >= kMaxFlatLength) {
+ auto* flat = CreateFlat(data.data(), kMaxFlatLength);
+ filler.Add(flat, 0, pos += kMaxFlatLength);
+ data.remove_prefix(kMaxFlatLength);
+ }
+
+ if (data.length()) {
+ auto* flat = CreateFlat(data.data(), data.length(), extra);
+ filler.Add(flat, 0, pos += data.length());
+ }
+
+ rep->length = pos - rep->begin_pos_;
+ rep->tail_ = filler.pos();
+
+ return Validate(rep);
+}
+
+CordRepRing* CordRepRing::Prepend(CordRepRing* rep, y_absl::string_view data,
+ size_t extra) {
+ if (rep->refcount.IsMutable()) {
+ Span<char> avail = rep->GetPrependBuffer(data.length());
+ if (!avail.empty()) {
+ const char* tail = data.data() + data.length() - avail.length();
+ memcpy(avail.data(), tail, avail.length());
+ data.remove_suffix(avail.length());
+ }
+ }
+ if (data.empty()) return rep;
+
+ const size_t flats = (data.length() - 1) / kMaxFlatLength + 1;
+ rep = Mutable(rep, flats);
+ pos_type pos = rep->begin_pos_;
+ Filler filler(rep, rep->retreat(rep->head_, static_cast<index_type>(flats)));
+
+ size_t first_size = data.size() - (flats - 1) * kMaxFlatLength;
+ CordRepFlat* flat = CordRepFlat::New(first_size + extra);
+ flat->length = first_size + extra;
+ memcpy(flat->Data() + extra, data.data(), first_size);
+ data.remove_prefix(first_size);
+ filler.Add(flat, extra, pos);
+ pos -= first_size;
+
+ while (!data.empty()) {
+ assert(data.size() >= kMaxFlatLength);
+ flat = CreateFlat(data.data(), kMaxFlatLength);
+ filler.Add(flat, 0, pos);
+ pos -= kMaxFlatLength;
+ data.remove_prefix(kMaxFlatLength);
+ }
+
+ rep->head_ = filler.head();
+ rep->length += rep->begin_pos_ - pos;
+ rep->begin_pos_ = pos;
+
+ return Validate(rep);
+}
+
+// 32 entries is 32 * sizeof(pos_type) = 4 cache lines on x86
+static constexpr index_type kBinarySearchThreshold = 32;
+static constexpr index_type kBinarySearchEndCount = 8;
+
+template <bool wrap>
+CordRepRing::index_type CordRepRing::FindBinary(index_type head,
+ index_type tail,
+ size_t offset) const {
+ index_type count = tail + (wrap ? capacity_ : 0) - head;
+ do {
+ count = (count - 1) / 2;
+ assert(count < entries(head, tail_));
+ index_type mid = wrap ? advance(head, count) : head + count;
+ index_type after_mid = wrap ? advance(mid) : mid + 1;
+ bool larger = (offset >= entry_end_offset(mid));
+ head = larger ? after_mid : head;
+ tail = larger ? tail : mid;
+ assert(head != tail);
+ } while (ABSL_PREDICT_TRUE(count > kBinarySearchEndCount));
+ return head;
+}
+
+CordRepRing::Position CordRepRing::FindSlow(index_type head,
+ size_t offset) const {
+ index_type tail = tail_;
+
+ // Binary search until we are good for linear search
+ // Optimize for branchless / non wrapping ops
+ if (tail > head) {
+ index_type count = tail - head;
+ if (count > kBinarySearchThreshold) {
+ head = FindBinary<false>(head, tail, offset);
+ }
+ } else {
+ index_type count = capacity_ + tail - head;
+ if (count > kBinarySearchThreshold) {
+ head = FindBinary<true>(head, tail, offset);
+ }
+ }
+
+ pos_type pos = entry_begin_pos(head);
+ pos_type end_pos = entry_end_pos(head);
+ while (offset >= Distance(begin_pos_, end_pos)) {
+ head = advance(head);
+ pos = end_pos;
+ end_pos = entry_end_pos(head);
+ }
+
+ return {head, offset - Distance(begin_pos_, pos)};
+}
+
+CordRepRing::Position CordRepRing::FindTailSlow(index_type head,
+ size_t offset) const {
+ index_type tail = tail_;
+ const size_t tail_offset = offset - 1;
+
+ // Binary search until we are good for linear search
+ // Optimize for branchless / non wrapping ops
+ if (tail > head) {
+ index_type count = tail - head;
+ if (count > kBinarySearchThreshold) {
+ head = FindBinary<false>(head, tail, tail_offset);
+ }
+ } else {
+ index_type count = capacity_ + tail - head;
+ if (count > kBinarySearchThreshold) {
+ head = FindBinary<true>(head, tail, tail_offset);
+ }
+ }
+
+ size_t end_offset = entry_end_offset(head);
+ while (tail_offset >= end_offset) {
+ head = advance(head);
+ end_offset = entry_end_offset(head);
+ }
+
+ return {advance(head), end_offset - offset};
+}
+
+char CordRepRing::GetCharacter(size_t offset) const {
+ assert(offset < length);
+
+ Position pos = Find(offset);
+ size_t data_offset = entry_data_offset(pos.index) + pos.offset;
+ return GetRepData(entry_child(pos.index))[data_offset];
+}
+
+CordRepRing* CordRepRing::SubRing(CordRepRing* rep, size_t offset,
+ size_t len, size_t extra) {
+ assert(offset <= rep->length);
+ assert(offset <= rep->length - len);
+
+ if (len == 0) {
+ CordRep::Unref(rep);
+ return nullptr;
+ }
+
+ // Find position of first byte
+ Position head = rep->Find(offset);
+ Position tail = rep->FindTail(head.index, offset + len);
+ const size_t new_entries = rep->entries(head.index, tail.index);
+
+ if (rep->refcount.IsMutable() && extra <= (rep->capacity() - new_entries)) {
+ // We adopt a privately owned rep and no extra entries needed.
+ if (head.index != rep->head_) UnrefEntries(rep, rep->head_, head.index);
+ if (tail.index != rep->tail_) UnrefEntries(rep, tail.index, rep->tail_);
+ rep->head_ = head.index;
+ rep->tail_ = tail.index;
+ } else {
+ // Copy subset to new rep
+ rep = Copy(rep, head.index, tail.index, extra);
+ head.index = rep->head_;
+ tail.index = rep->tail_;
+ }
+
+ // Adjust begin_pos and length
+ rep->length = len;
+ rep->begin_pos_ += offset;
+
+ // Adjust head and tail blocks
+ if (head.offset) {
+ rep->AddDataOffset(head.index, head.offset);
+ }
+ if (tail.offset) {
+ rep->SubLength(rep->retreat(tail.index), tail.offset);
+ }
+
+ return Validate(rep);
+}
+
+CordRepRing* CordRepRing::RemovePrefix(CordRepRing* rep, size_t len,
+ size_t extra) {
+ assert(len <= rep->length);
+ if (len == rep->length) {
+ CordRep::Unref(rep);
+ return nullptr;
+ }
+
+ Position head = rep->Find(len);
+ if (rep->refcount.IsMutable()) {
+ if (head.index != rep->head_) UnrefEntries(rep, rep->head_, head.index);
+ rep->head_ = head.index;
+ } else {
+ rep = Copy(rep, head.index, rep->tail_, extra);
+ head.index = rep->head_;
+ }
+
+ // Adjust begin_pos and length
+ rep->length -= len;
+ rep->begin_pos_ += len;
+
+ // Adjust head block
+ if (head.offset) {
+ rep->AddDataOffset(head.index, head.offset);
+ }
+
+ return Validate(rep);
+}
+
+CordRepRing* CordRepRing::RemoveSuffix(CordRepRing* rep, size_t len,
+ size_t extra) {
+ assert(len <= rep->length);
+
+ if (len == rep->length) {
+ CordRep::Unref(rep);
+ return nullptr;
+ }
+
+ Position tail = rep->FindTail(rep->length - len);
+ if (rep->refcount.IsMutable()) {
+ // We adopt a privately owned rep, scrub.
+ if (tail.index != rep->tail_) UnrefEntries(rep, tail.index, rep->tail_);
+ rep->tail_ = tail.index;
+ } else {
+ // Copy subset to new rep
+ rep = Copy(rep, rep->head_, tail.index, extra);
+ tail.index = rep->tail_;
+ }
+
+ // Adjust length
+ rep->length -= len;
+
+ // Adjust tail block
+ if (tail.offset) {
+ rep->SubLength(rep->retreat(tail.index), tail.offset);
+ }
+
+ return Validate(rep);
+}
+
+} // namespace cord_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_ring.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_ring.h
new file mode 100644
index 00000000000..5f9784d8daa
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_ring.h
@@ -0,0 +1,607 @@
+// Copyright 2020 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_STRINGS_INTERNAL_CORD_REP_RING_H_
+#define ABSL_STRINGS_INTERNAL_CORD_REP_RING_H_
+
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <iosfwd>
+#include <limits>
+#include <memory>
+
+#include "y_absl/container/internal/layout.h"
+#include "y_absl/strings/internal/cord_internal.h"
+#include "y_absl/strings/internal/cord_rep_flat.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+
+// All operations modifying a ring buffer are implemented as static methods
+// requiring a CordRepRing instance with a reference adopted by the method.
+//
+// The methods return the modified ring buffer, which may be equal to the input
+// if the input was not shared, and having large enough capacity to accommodate
+// any newly added node(s). Otherwise, a copy of the input rep with the new
+// node(s) added is returned.
+//
+// Any modification on non shared ring buffers with enough capacity will then
+// require minimum atomic operations. Caller should where possible provide
+// reasonable `extra` hints for both anticipated extra `flat` byte space, as
+// well as anticipated extra nodes required for complex operations.
+//
+// Example of code creating a ring buffer, adding some data to it,
+// and discarding the buffer when done:
+//
+// void FunWithRings() {
+// // Create ring with 3 flats
+// CordRep* flat = CreateFlat("Hello");
+// CordRepRing* ring = CordRepRing::Create(flat, 2);
+// ring = CordRepRing::Append(ring, CreateFlat(" "));
+// ring = CordRepRing::Append(ring, CreateFlat("world"));
+// DoSomethingWithRing(ring);
+// CordRep::Unref(ring);
+// }
+//
+// Example of code Copying an existing ring buffer and modifying it:
+//
+// void MoreFunWithRings(CordRepRing* src) {
+// CordRepRing* ring = CordRep::Ref(src)->ring();
+// ring = CordRepRing::Append(ring, CreateFlat("Hello"));
+// ring = CordRepRing::Append(ring, CreateFlat(" "));
+// ring = CordRepRing::Append(ring, CreateFlat("world"));
+// DoSomethingWithRing(ring);
+// CordRep::Unref(ring);
+// }
+//
+class CordRepRing : public CordRep {
+ public:
+ // `pos_type` represents a 'logical position'. A CordRepRing instance has a
+ // `begin_pos` (default 0), and each node inside the buffer will have an
+ // `end_pos` which is the `end_pos` of the previous node (or `begin_pos`) plus
+ // this node's length. The purpose is to allow for a binary search on this
+ // position, while allowing O(1) prepend and append operations.
+ using pos_type = size_t;
+
+ // `index_type` is the type for the `head`, `tail` and `capacity` indexes.
+ // Ring buffers are limited to having no more than four billion entries.
+ using index_type = uint32_t;
+
+ // `offset_type` is the type for the data offset inside a child rep's data.
+ using offset_type = uint32_t;
+
+ // Position holds the node index and relative offset into the node for
+ // some physical offset in the contained data as returned by the Find()
+ // and FindTail() methods.
+ struct Position {
+ index_type index;
+ size_t offset;
+ };
+
+ // The maximum # of child nodes that can be hosted inside a CordRepRing.
+ static constexpr size_t kMaxCapacity = (std::numeric_limits<uint32_t>::max)();
+
+ // CordRepring can not be default constructed, moved, copied or assigned.
+ CordRepRing() = delete;
+ CordRepRing(const CordRepRing&) = delete;
+ CordRepRing& operator=(const CordRepRing&) = delete;
+
+ // Returns true if this instance is valid, false if some or all of the
+ // invariants are broken. Intended for debug purposes only.
+ // `output` receives an explanation of the broken invariants.
+ bool IsValid(std::ostream& output) const;
+
+ // Returns the size in bytes for a CordRepRing with `capacity' entries.
+ static constexpr size_t AllocSize(size_t capacity);
+
+ // Returns the distance in bytes from `pos` to `end_pos`.
+ static constexpr size_t Distance(pos_type pos, pos_type end_pos);
+
+ // Creates a new ring buffer from the provided `rep`. Adopts a reference
+ // on `rep`. The returned ring buffer has a capacity of at least `extra + 1`
+ static CordRepRing* Create(CordRep* child, size_t extra = 0);
+
+ // `head`, `tail` and `capacity` indexes defining the ring buffer boundaries.
+ index_type head() const { return head_; }
+ index_type tail() const { return tail_; }
+ index_type capacity() const { return capacity_; }
+
+ // Returns the number of entries in this instance.
+ index_type entries() const { return entries(head_, tail_); }
+
+ // Returns the logical begin position of this instance.
+ pos_type begin_pos() const { return begin_pos_; }
+
+ // Returns the number of entries for a given head-tail range.
+ // Requires `head` and `tail` values to be less than `capacity()`.
+ index_type entries(index_type head, index_type tail) const {
+ assert(head < capacity_ && tail < capacity_);
+ return tail - head + ((tail > head) ? 0 : capacity_);
+ }
+
+ // Returns the logical end position of entry `index`.
+ pos_type const& entry_end_pos(index_type index) const {
+ assert(IsValidIndex(index));
+ return Layout::Partial().Pointer<0>(data_)[index];
+ }
+
+ // Returns the child pointer of entry `index`.
+ CordRep* const& entry_child(index_type index) const {
+ assert(IsValidIndex(index));
+ return Layout::Partial(capacity()).Pointer<1>(data_)[index];
+ }
+
+ // Returns the data offset of entry `index`
+ offset_type const& entry_data_offset(index_type index) const {
+ assert(IsValidIndex(index));
+ return Layout::Partial(capacity(), capacity()).Pointer<2>(data_)[index];
+ }
+
+ // Appends the provided child node to the `rep` instance.
+ // Adopts a reference from `rep` and `child` which may not be null.
+ // If the provided child is a FLAT or EXTERNAL node, or a SUBSTRING node
+ // containing a FLAT or EXTERNAL node, then flat or external the node is added
+ // 'as is', with an offset added for the SUBSTRING case.
+ // If the provided child is a RING or CONCAT tree, or a SUBSTRING of a RING or
+ // CONCAT tree, then all child nodes not excluded by any start offset or
+ // length values are added recursively.
+ static CordRepRing* Append(CordRepRing* rep, CordRep* child);
+
+ // Appends the provided string data to the `rep` instance.
+ // This function will attempt to utilize any remaining capacity in the last
+ // node of the input if that node is not shared (directly or indirectly), and
+ // of type FLAT. Remaining data will be added as one or more FLAT nodes.
+ // Any last node added to the ring buffer will be allocated with up to
+ // `extra` bytes of capacity for (anticipated) subsequent append actions.
+ static CordRepRing* Append(CordRepRing* rep, string_view data,
+ size_t extra = 0);
+
+ // Prepends the provided child node to the `rep` instance.
+ // Adopts a reference from `rep` and `child` which may not be null.
+ // If the provided child is a FLAT or EXTERNAL node, or a SUBSTRING node
+ // containing a FLAT or EXTERNAL node, then flat or external the node is
+ // prepended 'as is', with an optional offset added for the SUBSTRING case.
+ // If the provided child is a RING or CONCAT tree, or a SUBSTRING of a RING
+ // or CONCAT tree, then all child nodes not excluded by any start offset or
+ // length values are added recursively.
+ static CordRepRing* Prepend(CordRepRing* rep, CordRep* child);
+
+ // Prepends the provided string data to the `rep` instance.
+ // This function will attempt to utilize any remaining capacity in the first
+ // node of the input if that node is not shared (directly or indirectly), and
+ // of type FLAT. Remaining data will be added as one or more FLAT nodes.
+ // Any first node prepnded to the ring buffer will be allocated with up to
+ // `extra` bytes of capacity for (anticipated) subsequent prepend actions.
+ static CordRepRing* Prepend(CordRepRing* rep, string_view data,
+ size_t extra = 0);
+
+ // Returns a span referencing potentially unused capacity in the last node.
+ // The returned span may be empty if no such capacity is available, or if the
+ // current instance is shared. Else, a span of size `n <= size` is returned.
+ // If non empty, the ring buffer is adjusted to the new length, with the newly
+ // added capacity left uninitialized. Callers should assign a value to the
+ // entire span before any other operations on this instance.
+ Span<char> GetAppendBuffer(size_t size);
+
+ // Returns a span referencing potentially unused capacity in the first node.
+ // This function is identical to GetAppendBuffer except that it returns a span
+ // referencing up to `size` capacity directly before the existing data.
+ Span<char> GetPrependBuffer(size_t size);
+
+ // Returns a cord ring buffer containing `len` bytes of data starting at
+ // `offset`. If the input is not shared, this function will remove all head
+ // and tail child nodes outside of the requested range, and adjust the new
+ // head and tail nodes as required. If the input is shared, this function
+ // returns a new instance sharing some or all of the nodes from the input.
+ static CordRepRing* SubRing(CordRepRing* r, size_t offset, size_t len,
+ size_t extra = 0);
+
+ // Returns a cord ring buffer with the first `len` bytes removed.
+ // If the input is not shared, this function will remove all head child nodes
+ // fully inside the first `length` bytes, and adjust the new head as required.
+ // If the input is shared, this function returns a new instance sharing some
+ // or all of the nodes from the input.
+ static CordRepRing* RemoveSuffix(CordRepRing* r, size_t len,
+ size_t extra = 0);
+
+ // Returns a cord ring buffer with the last `len` bytes removed.
+ // If the input is not shared, this function will remove all head child nodes
+ // fully inside the first `length` bytes, and adjust the new head as required.
+ // If the input is shared, this function returns a new instance sharing some
+ // or all of the nodes from the input.
+ static CordRepRing* RemovePrefix(CordRepRing* r, size_t len,
+ size_t extra = 0);
+
+ // Returns the character at `offset`. Requires that `offset < length`.
+ char GetCharacter(size_t offset) const;
+
+ // Returns true if this instance manages a single contiguous buffer, in which
+ // case the (optional) output parameter `fragment` is set. Otherwise, the
+ // function returns false, and `fragment` is left unchanged.
+ bool IsFlat(y_absl::string_view* fragment) const;
+
+ // Returns true if the data starting at `offset` with length `len` is
+ // managed by this instance inside a single contiguous buffer, in which case
+ // the (optional) output parameter `fragment` is set to the contiguous memory
+ // starting at offset `offset` with length `length`. Otherwise, the function
+ // returns false, and `fragment` is left unchanged.
+ bool IsFlat(size_t offset, size_t len, y_absl::string_view* fragment) const;
+
+ // Testing only: set capacity to requested capacity.
+ void SetCapacityForTesting(size_t capacity);
+
+ // Returns the CordRep data pointer for the provided CordRep.
+ // Requires that the provided `rep` is either a FLAT or EXTERNAL CordRep.
+ static const char* GetLeafData(const CordRep* rep);
+
+ // Returns the CordRep data pointer for the provided CordRep.
+ // Requires that `rep` is either a FLAT, EXTERNAL, or SUBSTRING CordRep.
+ static const char* GetRepData(const CordRep* rep);
+
+ // Advances the provided position, wrapping around capacity as needed.
+ // Requires `index` < capacity()
+ inline index_type advance(index_type index) const;
+
+ // Advances the provided position by 'n`, wrapping around capacity as needed.
+ // Requires `index` < capacity() and `n` <= capacity.
+ inline index_type advance(index_type index, index_type n) const;
+
+ // Retreats the provided position, wrapping around 0 as needed.
+ // Requires `index` < capacity()
+ inline index_type retreat(index_type index) const;
+
+ // Retreats the provided position by 'n', wrapping around 0 as needed.
+ // Requires `index` < capacity()
+ inline index_type retreat(index_type index, index_type n) const;
+
+ // Returns the logical begin position of entry `index`
+ pos_type const& entry_begin_pos(index_type index) const {
+ return (index == head_) ? begin_pos_ : entry_end_pos(retreat(index));
+ }
+
+ // Returns the physical start offset of entry `index`
+ size_t entry_start_offset(index_type index) const {
+ return Distance(begin_pos_, entry_begin_pos(index));
+ }
+
+ // Returns the physical end offset of entry `index`
+ size_t entry_end_offset(index_type index) const {
+ return Distance(begin_pos_, entry_end_pos(index));
+ }
+
+ // Returns the data length for entry `index`
+ size_t entry_length(index_type index) const {
+ return Distance(entry_begin_pos(index), entry_end_pos(index));
+ }
+
+ // Returns the data for entry `index`
+ y_absl::string_view entry_data(index_type index) const;
+
+ // Returns the position for `offset` as {index, prefix}. `index` holds the
+ // index of the entry at the specified offset and `prefix` holds the relative
+ // offset inside that entry.
+ // Requires `offset` < length.
+ //
+ // For example we can implement GetCharacter(offset) as:
+ // char GetCharacter(size_t offset) {
+ // Position pos = this->Find(offset);
+ // return this->entry_data(pos.pos)[pos.offset];
+ // }
+ inline Position Find(size_t offset) const;
+
+ // Find starting at `head`
+ inline Position Find(index_type head, size_t offset) const;
+
+ // Returns the tail position for `offset` as {tail index, suffix}.
+ // `tail index` holds holds the index of the entry holding the offset directly
+ // before 'offset` advanced by one. 'suffix` holds the relative offset from
+ // that relative offset in the entry to the end of the entry.
+ // For example, FindTail(length) will return {tail(), 0}, FindTail(length - 5)
+ // will return {retreat(tail), 5)} provided the preceding entry contains at
+ // least 5 bytes of data.
+ // Requires offset >= 1 && offset <= length.
+ //
+ // This function is very useful in functions that need to clip the end of some
+ // ring buffer such as 'RemovePrefix'.
+ // For example, we could implement RemovePrefix for non shared instances as:
+ // void RemoveSuffix(size_t n) {
+ // Position pos = FindTail(length - n);
+ // UnrefEntries(pos.pos, this->tail_);
+ // this->tail_ = pos.pos;
+ // entry(retreat(pos.pos)).end_pos -= pos.offset;
+ // }
+ inline Position FindTail(size_t offset) const;
+
+ // Find tail starting at `head`
+ inline Position FindTail(index_type head, size_t offset) const;
+
+ // Invokes f(index_type index) for each entry inside the range [head, tail>
+ template <typename F>
+ void ForEach(index_type head, index_type tail, F&& f) const {
+ index_type n1 = (tail > head) ? tail : capacity_;
+ for (index_type i = head; i < n1; ++i) f(i);
+ if (tail <= head) {
+ for (index_type i = 0; i < tail; ++i) f(i);
+ }
+ }
+
+ // Invokes f(index_type index) for each entry inside this instance.
+ template <typename F>
+ void ForEach(F&& f) const {
+ ForEach(head_, tail_, std::forward<F>(f));
+ }
+
+ // Dump this instance's data tp stream `s` in human readable format, excluding
+ // the actual data content itself. Intended for debug purposes only.
+ friend std::ostream& operator<<(std::ostream& s, const CordRepRing& rep);
+
+ private:
+ enum class AddMode { kAppend, kPrepend };
+
+ using Layout = container_internal::Layout<pos_type, CordRep*, offset_type>;
+
+ class Filler;
+ class Transaction;
+ class CreateTransaction;
+
+ static constexpr size_t kLayoutAlignment = Layout::Partial().Alignment();
+
+ // Creates a new CordRepRing.
+ explicit CordRepRing(index_type capacity) : capacity_(capacity) {}
+
+ // Returns true if `index` is a valid index into this instance.
+ bool IsValidIndex(index_type index) const;
+
+ // Debug use only: validates the provided CordRepRing invariants.
+ // Verification of all CordRepRing methods can be enabled by defining
+ // EXTRA_CORD_RING_VALIDATION, i.e.: `--copts=-DEXTRA_CORD_RING_VALIDATION`
+ // Verification is VERY expensive, so only do it for debugging purposes.
+ static CordRepRing* Validate(CordRepRing* rep, const char* file = nullptr,
+ int line = 0);
+
+ // Allocates a CordRepRing large enough to hold `capacity + extra' entries.
+ // The returned capacity may be larger if the allocated memory allows for it.
+ // The maximum capacity of a CordRepRing is capped at kMaxCapacity.
+ // Throws `std::length_error` if `capacity + extra' exceeds kMaxCapacity.
+ static CordRepRing* New(size_t capacity, size_t extra);
+
+ // Deallocates (but does not destroy) the provided ring buffer.
+ static void Delete(CordRepRing* rep);
+
+ // Destroys the provided ring buffer, decrementing the reference count of all
+ // contained child CordReps. The provided 1\`rep` should have a ref count of
+ // one (pre decrement destroy call observing `refcount.IsOne()`) or zero
+ // (post decrement destroy call observing `!refcount.Decrement()`).
+ static void Destroy(CordRepRing* rep);
+
+ // Returns a mutable reference to the logical end position array.
+ pos_type* entry_end_pos() {
+ return Layout::Partial().Pointer<0>(data_);
+ }
+
+ // Returns a mutable reference to the child pointer array.
+ CordRep** entry_child() {
+ return Layout::Partial(capacity()).Pointer<1>(data_);
+ }
+
+ // Returns a mutable reference to the data offset array.
+ offset_type* entry_data_offset() {
+ return Layout::Partial(capacity(), capacity()).Pointer<2>(data_);
+ }
+
+ // Find implementations for the non fast path 0 / length cases.
+ Position FindSlow(index_type head, size_t offset) const;
+ Position FindTailSlow(index_type head, size_t offset) const;
+
+ // Finds the index of the first node that is inside a reasonable distance
+ // of the node at `offset` from which we can continue with a linear search.
+ template <bool wrap>
+ index_type FindBinary(index_type head, index_type tail, size_t offset) const;
+
+ // Fills the current (initialized) instance from the provided source, copying
+ // entries [head, tail). Adds a reference to copied entries if `ref` is true.
+ template <bool ref>
+ void Fill(const CordRepRing* src, index_type head, index_type tail);
+
+ // Create a copy of 'rep', copying all entries [head, tail), allocating room
+ // for `extra` entries. Adds a reference on all copied entries.
+ static CordRepRing* Copy(CordRepRing* rep, index_type head, index_type tail,
+ size_t extra = 0);
+
+ // Returns a Mutable CordRepRing reference from `rep` with room for at least
+ // `extra` additional nodes. Adopts a reference count from `rep`.
+ // This function will return `rep` if, and only if:
+ // - rep.entries + extra <= rep.capacity
+ // - rep.refcount == 1
+ // Otherwise, this function will create a new copy of `rep` with additional
+ // capacity to satisfy `extra` extra nodes, and unref the old `rep` instance.
+ //
+ // If a new CordRepRing can not be allocated, or the new capacity would exceed
+ // the maxmimum capacity, then the input is consumed only, and an exception is
+ // thrown.
+ static CordRepRing* Mutable(CordRepRing* rep, size_t extra);
+
+ // Slow path for Append(CordRepRing* rep, CordRep* child). This function is
+ // exercised if the provided `child` in Append() is not a leaf node, i.e., a
+ // ring buffer or old (concat) cord tree.
+ static CordRepRing* AppendSlow(CordRepRing* rep, CordRep* child);
+
+ // Appends the provided leaf node. Requires `child` to be FLAT or EXTERNAL.
+ static CordRepRing* AppendLeaf(CordRepRing* rep, CordRep* child,
+ size_t offset, size_t length);
+
+ // Prepends the provided leaf node. Requires `child` to be FLAT or EXTERNAL.
+ static CordRepRing* PrependLeaf(CordRepRing* rep, CordRep* child,
+ size_t offset, size_t length);
+
+ // Slow path for Prepend(CordRepRing* rep, CordRep* child). This function is
+ // exercised if the provided `child` in Prepend() is not a leaf node, i.e., a
+ // ring buffer or old (concat) cord tree.
+ static CordRepRing* PrependSlow(CordRepRing* rep, CordRep* child);
+
+ // Slow path for Create(CordRep* child, size_t extra). This function is
+ // exercised if the provided `child` in Prepend() is not a leaf node, i.e., a
+ // ring buffer or old (concat) cord tree.
+ static CordRepRing* CreateSlow(CordRep* child, size_t extra);
+
+ // Creates a new ring buffer from the provided `child` leaf node. Requires
+ // `child` to be FLAT or EXTERNAL. on `rep`.
+ // The returned ring buffer has a capacity of at least `1 + extra`
+ static CordRepRing* CreateFromLeaf(CordRep* child, size_t offset,
+ size_t length, size_t extra);
+
+ // Appends or prepends (depending on AddMode) the ring buffer in `ring' to
+ // `rep` starting at `offset` with length `len`.
+ template <AddMode mode>
+ static CordRepRing* AddRing(CordRepRing* rep, CordRepRing* ring,
+ size_t offset, size_t len);
+
+ // Increases the data offset for entry `index` by `n`.
+ void AddDataOffset(index_type index, size_t n);
+
+ // Descreases the length for entry `index` by `n`.
+ void SubLength(index_type index, size_t n);
+
+ index_type head_;
+ index_type tail_;
+ index_type capacity_;
+ pos_type begin_pos_;
+
+ alignas(kLayoutAlignment) char data_[kLayoutAlignment];
+
+ friend struct CordRep;
+};
+
+constexpr size_t CordRepRing::AllocSize(size_t capacity) {
+ return sizeof(CordRepRing) - sizeof(data_) +
+ Layout(capacity, capacity, capacity).AllocSize();
+}
+
+inline constexpr size_t CordRepRing::Distance(pos_type pos, pos_type end_pos) {
+ return (end_pos - pos);
+}
+
+inline const char* CordRepRing::GetLeafData(const CordRep* rep) {
+ return rep->tag != EXTERNAL ? rep->flat()->Data() : rep->external()->base;
+}
+
+inline const char* CordRepRing::GetRepData(const CordRep* rep) {
+ if (rep->tag >= FLAT) return rep->flat()->Data();
+ if (rep->tag == EXTERNAL) return rep->external()->base;
+ return GetLeafData(rep->substring()->child) + rep->substring()->start;
+}
+
+inline CordRepRing::index_type CordRepRing::advance(index_type index) const {
+ assert(index < capacity_);
+ return ++index == capacity_ ? 0 : index;
+}
+
+inline CordRepRing::index_type CordRepRing::advance(index_type index,
+ index_type n) const {
+ assert(index < capacity_ && n <= capacity_);
+ return (index += n) >= capacity_ ? index - capacity_ : index;
+}
+
+inline CordRepRing::index_type CordRepRing::retreat(index_type index) const {
+ assert(index < capacity_);
+ return (index > 0 ? index : capacity_) - 1;
+}
+
+inline CordRepRing::index_type CordRepRing::retreat(index_type index,
+ index_type n) const {
+ assert(index < capacity_ && n <= capacity_);
+ return index >= n ? index - n : capacity_ - n + index;
+}
+
+inline y_absl::string_view CordRepRing::entry_data(index_type index) const {
+ size_t data_offset = entry_data_offset(index);
+ return {GetRepData(entry_child(index)) + data_offset, entry_length(index)};
+}
+
+inline bool CordRepRing::IsValidIndex(index_type index) const {
+ if (index >= capacity_) return false;
+ return (tail_ > head_) ? (index >= head_ && index < tail_)
+ : (index >= head_ || index < tail_);
+}
+
+#ifndef EXTRA_CORD_RING_VALIDATION
+inline CordRepRing* CordRepRing::Validate(CordRepRing* rep,
+ const char* /*file*/, int /*line*/) {
+ return rep;
+}
+#endif
+
+inline CordRepRing::Position CordRepRing::Find(size_t offset) const {
+ assert(offset < length);
+ return (offset == 0) ? Position{head_, 0} : FindSlow(head_, offset);
+}
+
+inline CordRepRing::Position CordRepRing::Find(index_type head,
+ size_t offset) const {
+ assert(offset < length);
+ assert(IsValidIndex(head) && offset >= entry_start_offset(head));
+ return (offset == 0) ? Position{head_, 0} : FindSlow(head, offset);
+}
+
+inline CordRepRing::Position CordRepRing::FindTail(size_t offset) const {
+ assert(offset > 0 && offset <= length);
+ return (offset == length) ? Position{tail_, 0} : FindTailSlow(head_, offset);
+}
+
+inline CordRepRing::Position CordRepRing::FindTail(index_type head,
+ size_t offset) const {
+ assert(offset > 0 && offset <= length);
+ assert(IsValidIndex(head) && offset >= entry_start_offset(head) + 1);
+ return (offset == length) ? Position{tail_, 0} : FindTailSlow(head, offset);
+}
+
+// Now that CordRepRing is defined, we can define CordRep's helper casts:
+inline CordRepRing* CordRep::ring() {
+ assert(IsRing());
+ return static_cast<CordRepRing*>(this);
+}
+
+inline const CordRepRing* CordRep::ring() const {
+ assert(IsRing());
+ return static_cast<const CordRepRing*>(this);
+}
+
+inline bool CordRepRing::IsFlat(y_absl::string_view* fragment) const {
+ if (entries() == 1) {
+ if (fragment) *fragment = entry_data(head());
+ return true;
+ }
+ return false;
+}
+
+inline bool CordRepRing::IsFlat(size_t offset, size_t len,
+ y_absl::string_view* fragment) const {
+ const Position pos = Find(offset);
+ const y_absl::string_view data = entry_data(pos.index);
+ if (data.length() >= len && data.length() - len >= pos.offset) {
+ if (fragment) *fragment = data.substr(pos.offset, len);
+ return true;
+ }
+ return false;
+}
+
+std::ostream& operator<<(std::ostream& s, const CordRepRing& rep);
+
+} // namespace cord_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_STRINGS_INTERNAL_CORD_REP_RING_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_ring_reader.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_ring_reader.h
new file mode 100644
index 00000000000..3f64d04faed
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_ring_reader.h
@@ -0,0 +1,118 @@
+// Copyright 2021 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_STRINGS_INTERNAL_CORD_REP_RING_READER_H_
+#define ABSL_STRINGS_INTERNAL_CORD_REP_RING_READER_H_
+
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+
+#include "y_absl/strings/internal/cord_internal.h"
+#include "y_absl/strings/internal/cord_rep_ring.h"
+#include "y_absl/strings/string_view.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+
+// CordRepRingReader provides basic navigation over CordRepRing data.
+class CordRepRingReader {
+ public:
+ // Returns true if this instance is not empty.
+ explicit operator bool() const { return ring_ != nullptr; }
+
+ // Returns the ring buffer reference for this instance, or nullptr if empty.
+ CordRepRing* ring() const { return ring_; }
+
+ // Returns the current node index inside the ring buffer for this instance.
+ // The returned value is undefined if this instance is empty.
+ CordRepRing::index_type index() const { return index_; }
+
+ // Returns the current node inside the ring buffer for this instance.
+ // The returned value is undefined if this instance is empty.
+ CordRep* node() const { return ring_->entry_child(index_); }
+
+ // Returns the length of the referenced ring buffer.
+ // Requires the current instance to be non empty.
+ size_t length() const {
+ assert(ring_);
+ return ring_->length;
+ }
+
+ // Returns the end offset of the last navigated-to chunk, which represents the
+ // total bytes 'consumed' relative to the start of the ring. The returned
+ // value is never zero. For example, initializing a reader with a ring buffer
+ // with a first chunk of 19 bytes will return consumed() = 19.
+ // Requires the current instance to be non empty.
+ size_t consumed() const {
+ assert(ring_);
+ return ring_->entry_end_offset(index_);
+ }
+
+ // Returns the number of bytes remaining beyond the last navigated-to chunk.
+ // Requires the current instance to be non empty.
+ size_t remaining() const {
+ assert(ring_);
+ return length() - consumed();
+ }
+
+ // Resets this instance to an empty value
+ void Reset() { ring_ = nullptr; }
+
+ // Resets this instance to the start of `ring`. `ring` must not be null.
+ // Returns a reference into the first chunk of the provided ring.
+ y_absl::string_view Reset(CordRepRing* ring) {
+ assert(ring);
+ ring_ = ring;
+ index_ = ring_->head();
+ return ring_->entry_data(index_);
+ }
+
+ // Navigates to the next chunk inside the reference ring buffer.
+ // Returns a reference into the navigated-to chunk.
+ // Requires remaining() to be non zero.
+ y_absl::string_view Next() {
+ assert(remaining());
+ index_ = ring_->advance(index_);
+ return ring_->entry_data(index_);
+ }
+
+ // Navigates to the chunk at offset `offset`.
+ // Returns a reference into the navigated-to chunk, adjusted for the relative
+ // position of `offset` into that chunk. For example, calling Seek(13) on a
+ // ring buffer containing 2 chunks of 10 and 20 bytes respectively will return
+ // a string view into the second chunk starting at offset 3 with a size of 17.
+ // Requires `offset` to be less than `length()`
+ y_absl::string_view Seek(size_t offset) {
+ assert(offset < length());
+ size_t current = ring_->entry_end_offset(index_);
+ CordRepRing::index_type hint = (offset >= current) ? index_ : ring_->head();
+ const CordRepRing::Position head = ring_->Find(hint, offset);
+ index_ = head.index;
+ auto data = ring_->entry_data(head.index);
+ data.remove_prefix(head.offset);
+ return data;
+ }
+
+ private:
+ CordRepRing* ring_ = nullptr;
+ CordRepRing::index_type index_;
+};
+
+} // namespace cord_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_STRINGS_INTERNAL_CORD_REP_RING_READER_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_test_util.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_test_util.h
new file mode 100644
index 00000000000..98dcc0d6498
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_test_util.h
@@ -0,0 +1,220 @@
+// Copyright 2021 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_STRINGS_INTERNAL_CORD_REP_TEST_UTIL_H_
+#define ABSL_STRINGS_INTERNAL_CORD_REP_TEST_UTIL_H_
+
+#include <cassert>
+#include <memory>
+#include <random>
+#include <util/generic/string.h>
+#include <vector>
+
+#include "y_absl/base/config.h"
+#include "y_absl/base/internal/raw_logging.h"
+#include "y_absl/strings/internal/cord_internal.h"
+#include "y_absl/strings/internal/cord_rep_btree.h"
+#include "y_absl/strings/internal/cord_rep_flat.h"
+#include "y_absl/strings/string_view.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace cordrep_testing {
+
+inline cord_internal::CordRepSubstring* MakeSubstring(
+ size_t start, size_t len, cord_internal::CordRep* rep) {
+ auto* sub = new cord_internal::CordRepSubstring;
+ sub->tag = cord_internal::SUBSTRING;
+ sub->start = start;
+ sub->length = len <= 0 ? rep->length - start + len : len;
+ sub->child = rep;
+ return sub;
+}
+
+inline cord_internal::CordRepConcat* MakeConcat(cord_internal::CordRep* left,
+ cord_internal::CordRep* right,
+ int depth = 0) {
+ auto* concat = new cord_internal::CordRepConcat;
+ concat->tag = cord_internal::CONCAT;
+ concat->length = left->length + right->length;
+ concat->left = left;
+ concat->right = right;
+ concat->set_depth(depth);
+ return concat;
+}
+
+inline cord_internal::CordRepFlat* MakeFlat(y_absl::string_view value) {
+ assert(value.length() <= cord_internal::kMaxFlatLength);
+ auto* flat = cord_internal::CordRepFlat::New(value.length());
+ flat->length = value.length();
+ memcpy(flat->Data(), value.data(), value.length());
+ return flat;
+}
+
+// Creates an external node for testing
+inline cord_internal::CordRepExternal* MakeExternal(y_absl::string_view s) {
+ struct Rep : public cord_internal::CordRepExternal {
+ TString s;
+ explicit Rep(y_absl::string_view sv) : s(sv) {
+ this->tag = cord_internal::EXTERNAL;
+ this->base = s.data();
+ this->length = s.length();
+ this->releaser_invoker = [](cord_internal::CordRepExternal* self) {
+ delete static_cast<Rep*>(self);
+ };
+ }
+ };
+ return new Rep(s);
+}
+
+inline TString CreateRandomString(size_t n) {
+ y_absl::string_view data =
+ "abcdefghijklmnopqrstuvwxyz"
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ "0123456789~!@#$%^&*()_+=-<>?:\"{}[]|";
+ std::minstd_rand rnd;
+ std::uniform_int_distribution<size_t> dist(0, data.size() - 1);
+ TString s(n, ' ');
+ for (size_t i = 0; i < n; ++i) {
+ s[i] = data[dist(rnd)];
+ }
+ return s;
+}
+
+// Creates an array of flats from the provided string, chopping
+// the provided string up into flats of size `chunk_size` characters
+// resulting in roughly `data.size() / chunk_size` total flats.
+inline std::vector<cord_internal::CordRep*> CreateFlatsFromString(
+ y_absl::string_view data, size_t chunk_size) {
+ assert(chunk_size > 0);
+ std::vector<cord_internal::CordRep*> flats;
+ for (y_absl::string_view s = data; !s.empty(); s.remove_prefix(chunk_size)) {
+ flats.push_back(MakeFlat(s.substr(0, chunk_size)));
+ }
+ return flats;
+}
+
+inline cord_internal::CordRepBtree* CordRepBtreeFromFlats(
+ y_absl::Span<cord_internal::CordRep* const> flats) {
+ assert(!flats.empty());
+ auto* node = cord_internal::CordRepBtree::Create(flats[0]);
+ for (size_t i = 1; i < flats.size(); ++i) {
+ node = cord_internal::CordRepBtree::Append(node, flats[i]);
+ }
+ return node;
+}
+
+template <typename Fn>
+inline void CordVisitReps(cord_internal::CordRep* rep, Fn&& fn) {
+ fn(rep);
+ while (rep->tag == cord_internal::SUBSTRING) {
+ rep = rep->substring()->child;
+ fn(rep);
+ }
+ if (rep->tag == cord_internal::BTREE) {
+ for (cord_internal::CordRep* edge : rep->btree()->Edges()) {
+ CordVisitReps(edge, fn);
+ }
+ } else if (rep->tag == cord_internal::CONCAT) {
+ CordVisitReps(rep->concat()->left, fn);
+ CordVisitReps(rep->concat()->right, fn);
+ }
+}
+
+template <typename Predicate>
+inline std::vector<cord_internal::CordRep*> CordCollectRepsIf(
+ Predicate&& predicate, cord_internal::CordRep* rep) {
+ std::vector<cord_internal::CordRep*> reps;
+ CordVisitReps(rep, [&reps, &predicate](cord_internal::CordRep* rep) {
+ if (predicate(rep)) reps.push_back(rep);
+ });
+ return reps;
+}
+
+inline std::vector<cord_internal::CordRep*> CordCollectReps(
+ cord_internal::CordRep* rep) {
+ std::vector<cord_internal::CordRep*> reps;
+ auto fn = [&reps](cord_internal::CordRep* rep) { reps.push_back(rep); };
+ CordVisitReps(rep, fn);
+ return reps;
+}
+
+inline void CordToString(cord_internal::CordRep* rep, TString& s) {
+ size_t offset = 0;
+ size_t length = rep->length;
+ while (rep->tag == cord_internal::SUBSTRING) {
+ offset += rep->substring()->start;
+ rep = rep->substring()->child;
+ }
+ if (rep->tag == cord_internal::BTREE) {
+ for (cord_internal::CordRep* edge : rep->btree()->Edges()) {
+ CordToString(edge, s);
+ }
+ } else if (rep->tag >= cord_internal::FLAT) {
+ s.append(rep->flat()->Data() + offset, length);
+ } else if (rep->tag == cord_internal::EXTERNAL) {
+ s.append(rep->external()->base + offset, length);
+ } else {
+ ABSL_RAW_LOG(FATAL, "Unsupported tag %d", rep->tag);
+ }
+}
+
+inline TString CordToString(cord_internal::CordRep* rep) {
+ TString s;
+ s.reserve(rep->length);
+ CordToString(rep, s);
+ return s;
+}
+
+// RAII Helper class to automatically unref reps on destruction.
+class AutoUnref {
+ public:
+ ~AutoUnref() {
+ for (CordRep* rep : unrefs_) CordRep::Unref(rep);
+ }
+
+ // Adds `rep` to the list of reps to be unreffed at destruction.
+ template <typename CordRepType>
+ CordRepType* Add(CordRepType* rep) {
+ unrefs_.push_back(rep);
+ return rep;
+ }
+
+ // Increments the reference count of `rep` by one, and adds it to
+ // the list of reps to be unreffed at destruction.
+ template <typename CordRepType>
+ CordRepType* Ref(CordRepType* rep) {
+ unrefs_.push_back(CordRep::Ref(rep));
+ return rep;
+ }
+
+ // Increments the reference count of `rep` by one if `condition` is true,
+ // and adds it to the list of reps to be unreffed at destruction.
+ template <typename CordRepType>
+ CordRepType* RefIf(bool condition, CordRepType* rep) {
+ if (condition) unrefs_.push_back(CordRep::Ref(rep));
+ return rep;
+ }
+
+ private:
+ using CordRep = y_absl::cord_internal::CordRep;
+
+ std::vector<CordRep*> unrefs_;
+};
+
+} // namespace cordrep_testing
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_STRINGS_INTERNAL_CORD_REP_TEST_UTIL_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_functions.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_functions.cc
new file mode 100644
index 00000000000..e9936f22fe5
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_functions.cc
@@ -0,0 +1,96 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "y_absl/strings/internal/cordz_functions.h"
+
+#include <atomic>
+#include <cmath>
+#include <limits>
+#include <random>
+
+#include "y_absl/base/attributes.h"
+#include "y_absl/base/config.h"
+#include "y_absl/base/internal/raw_logging.h"
+#include "y_absl/profiling/internal/exponential_biased.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+namespace {
+
+// The average interval until the next sample. A value of 0 disables profiling
+// while a value of 1 will profile all Cords.
+std::atomic<int> g_cordz_mean_interval(50000);
+
+} // namespace
+
+#ifdef ABSL_INTERNAL_CORDZ_ENABLED
+
+// Special negative 'not initialized' per thread value for cordz_next_sample.
+static constexpr int64_t kInitCordzNextSample = -1;
+
+ABSL_CONST_INIT thread_local int64_t cordz_next_sample = kInitCordzNextSample;
+
+// kIntervalIfDisabled is the number of profile-eligible events need to occur
+// before the code will confirm that cordz is still disabled.
+constexpr int64_t kIntervalIfDisabled = 1 << 16;
+
+ABSL_ATTRIBUTE_NOINLINE bool cordz_should_profile_slow() {
+
+ thread_local y_absl::profiling_internal::ExponentialBiased
+ exponential_biased_generator;
+ int32_t mean_interval = get_cordz_mean_interval();
+
+ // Check if we disabled profiling. If so, set the next sample to a "large"
+ // number to minimize the overhead of the should_profile codepath.
+ if (mean_interval <= 0) {
+ cordz_next_sample = kIntervalIfDisabled;
+ return false;
+ }
+
+ // Check if we're always sampling.
+ if (mean_interval == 1) {
+ cordz_next_sample = 1;
+ return true;
+ }
+
+ if (cordz_next_sample <= 0) {
+ // If first check on current thread, check cordz_should_profile()
+ // again using the created (initial) stride in cordz_next_sample.
+ const bool initialized = cordz_next_sample != kInitCordzNextSample;
+ cordz_next_sample = exponential_biased_generator.GetStride(mean_interval);
+ return initialized || cordz_should_profile();
+ }
+
+ --cordz_next_sample;
+ return false;
+}
+
+void cordz_set_next_sample_for_testing(int64_t next_sample) {
+ cordz_next_sample = next_sample;
+}
+
+#endif // ABSL_INTERNAL_CORDZ_ENABLED
+
+int32_t get_cordz_mean_interval() {
+ return g_cordz_mean_interval.load(std::memory_order_acquire);
+}
+
+void set_cordz_mean_interval(int32_t mean_interval) {
+ g_cordz_mean_interval.store(mean_interval, std::memory_order_release);
+}
+
+} // namespace cord_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_functions.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_functions.h
new file mode 100644
index 00000000000..802efaa976f
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_functions.h
@@ -0,0 +1,85 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_STRINGS_CORDZ_FUNCTIONS_H_
+#define ABSL_STRINGS_CORDZ_FUNCTIONS_H_
+
+#include <stdint.h>
+
+#include "y_absl/base/attributes.h"
+#include "y_absl/base/config.h"
+#include "y_absl/base/optimization.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+
+// Returns the current sample rate. This represents the average interval
+// between samples.
+int32_t get_cordz_mean_interval();
+
+// Sets the sample rate with the average interval between samples.
+void set_cordz_mean_interval(int32_t mean_interval);
+
+// Enable cordz unless any of the following applies:
+// - no thread local support
+// - MSVC build
+// - Android build
+// - Apple build
+// - DLL build
+// Hashtablez is turned off completely in opensource builds.
+// MSVC's static atomics are dynamically initialized in debug mode, which breaks
+// sampling.
+#if defined(ABSL_HAVE_THREAD_LOCAL) && !defined(_MSC_VER) && \
+ !defined(ABSL_BUILD_DLL) && !defined(ABSL_CONSUME_DLL) && \
+ !defined(__ANDROID__) && !defined(__APPLE__)
+#define ABSL_INTERNAL_CORDZ_ENABLED 1
+#endif
+
+#ifdef ABSL_INTERNAL_CORDZ_ENABLED
+
+// cordz_next_sample is the number of events until the next sample event. If
+// the value is 1 or less, the code will check on the next event if cordz is
+// enabled, and if so, will sample the Cord. cordz is only enabled when we can
+// use thread locals.
+ABSL_CONST_INIT extern thread_local int64_t cordz_next_sample;
+
+// Determines if the next sample should be profiled. If it is, the value pointed
+// at by next_sample will be set with the interval until the next sample.
+bool cordz_should_profile_slow();
+
+// Returns true if the next cord should be sampled.
+inline bool cordz_should_profile() {
+ if (ABSL_PREDICT_TRUE(cordz_next_sample > 1)) {
+ cordz_next_sample--;
+ return false;
+ }
+ return cordz_should_profile_slow();
+}
+
+// Sets the interval until the next sample (for testing only)
+void cordz_set_next_sample_for_testing(int64_t next_sample);
+
+#else // ABSL_INTERNAL_CORDZ_ENABLED
+
+inline bool cordz_should_profile() { return false; }
+inline void cordz_set_next_sample_for_testing(int64_t) {}
+
+#endif // ABSL_INTERNAL_CORDZ_ENABLED
+
+} // namespace cord_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_STRINGS_CORDZ_FUNCTIONS_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_functions/ya.make b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_functions/ya.make
new file mode 100644
index 00000000000..06e99346da3
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_functions/ya.make
@@ -0,0 +1,32 @@
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
+WITHOUT_LICENSE_TEXTS()
+
+OWNER(
+ somov
+ g:cpp-contrib
+)
+
+LICENSE(Apache-2.0)
+
+PEERDIR(
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/log_severity
+ contrib/restricted/abseil-cpp-tstring/y_absl/profiling/internal/exponential_biased
+)
+
+ADDINCL(
+ GLOBAL contrib/restricted/abseil-cpp-tstring
+)
+
+NO_COMPILER_WARNINGS()
+
+SRCDIR(contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal)
+
+SRCS(
+ cordz_functions.cc
+)
+
+END()
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_handle.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_handle.cc
new file mode 100644
index 00000000000..707c6d2a9be
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_handle.cc
@@ -0,0 +1,139 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+#include "y_absl/strings/internal/cordz_handle.h"
+
+#include <atomic>
+
+#include "y_absl/base/internal/raw_logging.h" // For ABSL_RAW_CHECK
+#include "y_absl/base/internal/spinlock.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+
+using ::y_absl::base_internal::SpinLockHolder;
+
+ABSL_CONST_INIT CordzHandle::Queue CordzHandle::global_queue_(y_absl::kConstInit);
+
+CordzHandle::CordzHandle(bool is_snapshot) : is_snapshot_(is_snapshot) {
+ if (is_snapshot) {
+ SpinLockHolder lock(&queue_->mutex);
+ CordzHandle* dq_tail = queue_->dq_tail.load(std::memory_order_acquire);
+ if (dq_tail != nullptr) {
+ dq_prev_ = dq_tail;
+ dq_tail->dq_next_ = this;
+ }
+ queue_->dq_tail.store(this, std::memory_order_release);
+ }
+}
+
+CordzHandle::~CordzHandle() {
+ ODRCheck();
+ if (is_snapshot_) {
+ std::vector<CordzHandle*> to_delete;
+ {
+ SpinLockHolder lock(&queue_->mutex);
+ CordzHandle* next = dq_next_;
+ if (dq_prev_ == nullptr) {
+ // We were head of the queue, delete every CordzHandle until we reach
+ // either the end of the list, or a snapshot handle.
+ while (next && !next->is_snapshot_) {
+ to_delete.push_back(next);
+ next = next->dq_next_;
+ }
+ } else {
+ // Another CordzHandle existed before this one, don't delete anything.
+ dq_prev_->dq_next_ = next;
+ }
+ if (next) {
+ next->dq_prev_ = dq_prev_;
+ } else {
+ queue_->dq_tail.store(dq_prev_, std::memory_order_release);
+ }
+ }
+ for (CordzHandle* handle : to_delete) {
+ delete handle;
+ }
+ }
+}
+
+bool CordzHandle::SafeToDelete() const {
+ return is_snapshot_ || queue_->IsEmpty();
+}
+
+void CordzHandle::Delete(CordzHandle* handle) {
+ assert(handle);
+ if (handle) {
+ handle->ODRCheck();
+ Queue* const queue = handle->queue_;
+ if (!handle->SafeToDelete()) {
+ SpinLockHolder lock(&queue->mutex);
+ CordzHandle* dq_tail = queue->dq_tail.load(std::memory_order_acquire);
+ if (dq_tail != nullptr) {
+ handle->dq_prev_ = dq_tail;
+ dq_tail->dq_next_ = handle;
+ queue->dq_tail.store(handle, std::memory_order_release);
+ return;
+ }
+ }
+ delete handle;
+ }
+}
+
+std::vector<const CordzHandle*> CordzHandle::DiagnosticsGetDeleteQueue() {
+ std::vector<const CordzHandle*> handles;
+ SpinLockHolder lock(&global_queue_.mutex);
+ CordzHandle* dq_tail = global_queue_.dq_tail.load(std::memory_order_acquire);
+ for (const CordzHandle* p = dq_tail; p; p = p->dq_prev_) {
+ handles.push_back(p);
+ }
+ return handles;
+}
+
+bool CordzHandle::DiagnosticsHandleIsSafeToInspect(
+ const CordzHandle* handle) const {
+ ODRCheck();
+ if (!is_snapshot_) return false;
+ if (handle == nullptr) return true;
+ if (handle->is_snapshot_) return false;
+ bool snapshot_found = false;
+ SpinLockHolder lock(&queue_->mutex);
+ for (const CordzHandle* p = queue_->dq_tail; p; p = p->dq_prev_) {
+ if (p == handle) return !snapshot_found;
+ if (p == this) snapshot_found = true;
+ }
+ ABSL_ASSERT(snapshot_found); // Assert that 'this' is in delete queue.
+ return true;
+}
+
+std::vector<const CordzHandle*>
+CordzHandle::DiagnosticsGetSafeToInspectDeletedHandles() {
+ ODRCheck();
+ std::vector<const CordzHandle*> handles;
+ if (!is_snapshot()) {
+ return handles;
+ }
+
+ SpinLockHolder lock(&queue_->mutex);
+ for (const CordzHandle* p = dq_next_; p != nullptr; p = p->dq_next_) {
+ if (!p->is_snapshot()) {
+ handles.push_back(p);
+ }
+ }
+ return handles;
+}
+
+} // namespace cord_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_handle.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_handle.h
new file mode 100644
index 00000000000..f181bc7d6b7
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_handle.h
@@ -0,0 +1,131 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_STRINGS_CORDZ_HANDLE_H_
+#define ABSL_STRINGS_CORDZ_HANDLE_H_
+
+#include <atomic>
+#include <vector>
+
+#include "y_absl/base/config.h"
+#include "y_absl/base/internal/raw_logging.h"
+#include "y_absl/base/internal/spinlock.h"
+#include "y_absl/synchronization/mutex.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+
+// This base class allows multiple types of object (CordzInfo and
+// CordzSampleToken) to exist simultaneously on the delete queue (pointed to by
+// global_dq_tail and traversed using dq_prev_ and dq_next_). The
+// delete queue guarantees that once a profiler creates a CordzSampleToken and
+// has gained visibility into a CordzInfo object, that CordzInfo object will not
+// be deleted prematurely. This allows the profiler to inspect all CordzInfo
+// objects that are alive without needing to hold a global lock.
+class CordzHandle {
+ public:
+ CordzHandle() : CordzHandle(false) {}
+
+ bool is_snapshot() const { return is_snapshot_; }
+
+ // Returns true if this instance is safe to be deleted because it is either a
+ // snapshot, which is always safe to delete, or not included in the global
+ // delete queue and thus not included in any snapshot.
+ // Callers are responsible for making sure this instance can not be newly
+ // discovered by other threads. For example, CordzInfo instances first de-list
+ // themselves from the global CordzInfo list before determining if they are
+ // safe to be deleted directly.
+ // If SafeToDelete returns false, callers MUST use the Delete() method to
+ // safely queue CordzHandle instances for deletion.
+ bool SafeToDelete() const;
+
+ // Deletes the provided instance, or puts it on the delete queue to be deleted
+ // once there are no more sample tokens (snapshot) instances potentially
+ // referencing the instance. `handle` should not be null.
+ static void Delete(CordzHandle* handle);
+
+ // Returns the current entries in the delete queue in LIFO order.
+ static std::vector<const CordzHandle*> DiagnosticsGetDeleteQueue();
+
+ // Returns true if the provided handle is nullptr or guarded by this handle.
+ // Since the CordzSnapshot token is itself a CordzHandle, this method will
+ // allow tests to check if that token is keeping an arbitrary CordzHandle
+ // alive.
+ bool DiagnosticsHandleIsSafeToInspect(const CordzHandle* handle) const;
+
+ // Returns the current entries in the delete queue, in LIFO order, that are
+ // protected by this. CordzHandle objects are only placed on the delete queue
+ // after CordzHandle::Delete is called with them as an argument. Only
+ // CordzHandle objects that are not also CordzSnapshot objects will be
+ // included in the return vector. For each of the handles in the return
+ // vector, the earliest that their memory can be freed is when this
+ // CordzSnapshot object is deleted.
+ std::vector<const CordzHandle*> DiagnosticsGetSafeToInspectDeletedHandles();
+
+ protected:
+ explicit CordzHandle(bool is_snapshot);
+ virtual ~CordzHandle();
+
+ private:
+ // Global queue data. CordzHandle stores a pointer to the global queue
+ // instance to harden against ODR violations.
+ struct Queue {
+ constexpr explicit Queue(y_absl::ConstInitType)
+ : mutex(y_absl::kConstInit,
+ y_absl::base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL) {}
+
+ y_absl::base_internal::SpinLock mutex;
+ std::atomic<CordzHandle*> dq_tail ABSL_GUARDED_BY(mutex){nullptr};
+
+ // Returns true if this delete queue is empty. This method does not acquire
+ // the lock, but does a 'load acquire' observation on the delete queue tail.
+ // It is used inside Delete() to check for the presence of a delete queue
+ // without holding the lock. The assumption is that the caller is in the
+ // state of 'being deleted', and can not be newly discovered by a concurrent
+ // 'being constructed' snapshot instance. Practically, this means that any
+ // such discovery (`find`, 'first' or 'next', etc) must have proper 'happens
+ // before / after' semantics and atomic fences.
+ bool IsEmpty() const ABSL_NO_THREAD_SAFETY_ANALYSIS {
+ return dq_tail.load(std::memory_order_acquire) == nullptr;
+ }
+ };
+
+ void ODRCheck() const {
+#ifndef NDEBUG
+ ABSL_RAW_CHECK(queue_ == &global_queue_, "ODR violation in Cord");
+#endif
+ }
+
+ ABSL_CONST_INIT static Queue global_queue_;
+ Queue* const queue_ = &global_queue_;
+ const bool is_snapshot_;
+
+ // dq_prev_ and dq_next_ require the global queue mutex to be held.
+ // Unfortunately we can't use thread annotations such that the thread safety
+ // analysis understands that queue_ and global_queue_ are one and the same.
+ CordzHandle* dq_prev_ = nullptr;
+ CordzHandle* dq_next_ = nullptr;
+};
+
+class CordzSnapshot : public CordzHandle {
+ public:
+ CordzSnapshot() : CordzHandle(true) {}
+};
+
+} // namespace cord_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_STRINGS_CORDZ_HANDLE_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_handle/ya.make b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_handle/ya.make
new file mode 100644
index 00000000000..e1812171391
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_handle/ya.make
@@ -0,0 +1,47 @@
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
+WITHOUT_LICENSE_TEXTS()
+
+OWNER(
+ somov
+ g:cpp-contrib
+)
+
+LICENSE(Apache-2.0)
+
+PEERDIR(
+ contrib/restricted/abseil-cpp-tstring/y_absl/base
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/low_level_alloc
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_wait
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/throw_delegate
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/log_severity
+ contrib/restricted/abseil-cpp-tstring/y_absl/debugging
+ contrib/restricted/abseil-cpp-tstring/y_absl/debugging/stacktrace
+ contrib/restricted/abseil-cpp-tstring/y_absl/debugging/symbolize
+ contrib/restricted/abseil-cpp-tstring/y_absl/demangle
+ contrib/restricted/abseil-cpp-tstring/y_absl/numeric
+ contrib/restricted/abseil-cpp-tstring/y_absl/strings
+ contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/absl_strings_internal
+ contrib/restricted/abseil-cpp-tstring/y_absl/synchronization
+ contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal
+ contrib/restricted/abseil-cpp-tstring/y_absl/time
+ contrib/restricted/abseil-cpp-tstring/y_absl/time/civil_time
+ contrib/restricted/abseil-cpp-tstring/y_absl/time/time_zone
+)
+
+ADDINCL(
+ GLOBAL contrib/restricted/abseil-cpp-tstring
+)
+
+NO_COMPILER_WARNINGS()
+
+SRCDIR(contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal)
+
+SRCS(
+ cordz_handle.cc
+)
+
+END()
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_info.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_info.cc
new file mode 100644
index 00000000000..e3849a0b498
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_info.cc
@@ -0,0 +1,445 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "y_absl/strings/internal/cordz_info.h"
+
+#include "y_absl/base/config.h"
+#include "y_absl/base/internal/spinlock.h"
+#include "y_absl/container/inlined_vector.h"
+#include "y_absl/debugging/stacktrace.h"
+#include "y_absl/strings/internal/cord_internal.h"
+#include "y_absl/strings/internal/cord_rep_btree.h"
+#include "y_absl/strings/internal/cord_rep_ring.h"
+#include "y_absl/strings/internal/cordz_handle.h"
+#include "y_absl/strings/internal/cordz_statistics.h"
+#include "y_absl/strings/internal/cordz_update_tracker.h"
+#include "y_absl/synchronization/mutex.h"
+#include "y_absl/types/span.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+
+using ::y_absl::base_internal::SpinLockHolder;
+
+constexpr int CordzInfo::kMaxStackDepth;
+
+ABSL_CONST_INIT CordzInfo::List CordzInfo::global_list_{y_absl::kConstInit};
+
+namespace {
+
+// CordRepAnalyzer performs the analysis of a cord.
+//
+// It computes absolute node counts and total memory usage, and an 'estimated
+// fair share memory usage` statistic.
+// Conceptually, it divides the 'memory usage' at each location in the 'cord
+// graph' by the cumulative reference count of that location. The cumulative
+// reference count is the factored total of all edges leading into that node.
+//
+// The top level node is treated specially: we assume the current thread
+// (typically called from the CordzHandler) to hold a reference purely to
+// perform a safe analysis, and not being part of the application. So we
+// substract 1 from the reference count of the top node to compute the
+// 'application fair share' excluding the reference of the current thread.
+//
+// An example of fair sharing, and why we multiply reference counts:
+// Assume we have 2 CordReps, both being a Substring referencing a Flat:
+// CordSubstring A (refcount = 5) --> child Flat C (refcount = 2)
+// CordSubstring B (refcount = 9) --> child Flat C (refcount = 2)
+//
+// Flat C has 2 incoming edges from the 2 substrings (refcount = 2) and is not
+// referenced directly anywhere else. Translated into a 'fair share', we then
+// attribute 50% of the memory (memory / refcount = 2) to each incoming edge.
+// Rep A has a refcount of 5, so we attribute each incoming edge 1 / 5th of the
+// memory cost below it, i.e.: the fair share of Rep A of the memory used by C
+// is then 'memory C / (refcount C * refcount A) + (memory A / refcount A)'.
+// It is also easy to see how all incoming edges add up to 100%.
+class CordRepAnalyzer {
+ public:
+ // Creates an analyzer instance binding to `statistics`.
+ explicit CordRepAnalyzer(CordzStatistics& statistics)
+ : statistics_(statistics) {}
+
+ // Analyzes the memory statistics and node counts for the provided `rep`, and
+ // adds the results to `statistics`. Note that node counts and memory sizes
+ // are not initialized, computed values are added to any existing values.
+ void AnalyzeCordRep(const CordRep* rep) {
+ // Process all linear nodes.
+ // As per the class comments, use refcout - 1 on the top level node, as the
+ // top level node is assumed to be referenced only for analysis purposes.
+ size_t refcount = rep->refcount.Get();
+ RepRef repref{rep, (refcount > 1) ? refcount - 1 : 1};
+
+ // Process all top level linear nodes (substrings and flats).
+ repref = CountLinearReps(repref, memory_usage_);
+
+ if (repref.rep != nullptr) {
+ if (repref.rep->tag == RING) {
+ AnalyzeRing(repref);
+ } else if (repref.rep->tag == BTREE) {
+ AnalyzeBtree(repref);
+ } else if (repref.rep->tag == CONCAT) {
+ AnalyzeConcat(repref);
+ } else {
+ // We should have either a concat, btree, or ring node if not null.
+ assert(false);
+ }
+ }
+
+ // Adds values to output
+ statistics_.estimated_memory_usage += memory_usage_.total;
+ statistics_.estimated_fair_share_memory_usage +=
+ static_cast<size_t>(memory_usage_.fair_share);
+ }
+
+ private:
+ // RepRef identifies a CordRep* inside the Cord tree with its cumulative
+ // refcount including itself. For example, a tree consisting of a substring
+ // with a refcount of 3 and a child flat with a refcount of 4 will have RepRef
+ // refcounts of 3 and 12 respectively.
+ struct RepRef {
+ const CordRep* rep;
+ size_t refcount;
+
+ // Returns a 'child' RepRef which contains the cumulative reference count of
+ // this instance multiplied by the child's reference count.
+ RepRef Child(const CordRep* child) const {
+ return RepRef{child, refcount * child->refcount.Get()};
+ }
+ };
+
+ // Memory usage values
+ struct MemoryUsage {
+ size_t total = 0;
+ double fair_share = 0.0;
+
+ // Adds 'size` memory usage to this class, with a cumulative (recursive)
+ // reference count of `refcount`
+ void Add(size_t size, size_t refcount) {
+ total += size;
+ fair_share += static_cast<double>(size) / refcount;
+ }
+ };
+
+ // Returns `rr` if `rr.rep` is not null and a CONCAT type.
+ // Asserts that `rr.rep` is a concat node or null.
+ static RepRef AssertConcat(RepRef repref) {
+ const CordRep* rep = repref.rep;
+ assert(rep == nullptr || rep->tag == CONCAT);
+ return (rep != nullptr && rep->tag == CONCAT) ? repref : RepRef{nullptr, 0};
+ }
+
+ // Counts a flat of the provide allocated size
+ void CountFlat(size_t size) {
+ statistics_.node_count++;
+ statistics_.node_counts.flat++;
+ if (size <= 64) {
+ statistics_.node_counts.flat_64++;
+ } else if (size <= 128) {
+ statistics_.node_counts.flat_128++;
+ } else if (size <= 256) {
+ statistics_.node_counts.flat_256++;
+ } else if (size <= 512) {
+ statistics_.node_counts.flat_512++;
+ } else if (size <= 1024) {
+ statistics_.node_counts.flat_1k++;
+ }
+ }
+
+ // Processes 'linear' reps (substring, flat, external) not requiring iteration
+ // or recursion. Returns RefRep{null} if all reps were processed, else returns
+ // the top-most non-linear concat or ring cordrep.
+ // Node counts are updated into `statistics_`, memory usage is update into
+ // `memory_usage`, which typically references `memory_usage_` except for ring
+ // buffers where we count children unrounded.
+ RepRef CountLinearReps(RepRef rep, MemoryUsage& memory_usage) {
+ // Consume all substrings
+ while (rep.rep->tag == SUBSTRING) {
+ statistics_.node_count++;
+ statistics_.node_counts.substring++;
+ memory_usage.Add(sizeof(CordRepSubstring), rep.refcount);
+ rep = rep.Child(rep.rep->substring()->child);
+ }
+
+ // Consume possible FLAT
+ if (rep.rep->tag >= FLAT) {
+ size_t size = rep.rep->flat()->AllocatedSize();
+ CountFlat(size);
+ memory_usage.Add(size, rep.refcount);
+ return RepRef{nullptr, 0};
+ }
+
+ // Consume possible external
+ if (rep.rep->tag == EXTERNAL) {
+ statistics_.node_count++;
+ statistics_.node_counts.external++;
+ size_t size = rep.rep->length + sizeof(CordRepExternalImpl<intptr_t>);
+ memory_usage.Add(size, rep.refcount);
+ return RepRef{nullptr, 0};
+ }
+
+ return rep;
+ }
+
+ // Analyzes the provided concat node in a flattened recursive way.
+ void AnalyzeConcat(RepRef rep) {
+ y_absl::InlinedVector<RepRef, 47> pending;
+
+ while (rep.rep != nullptr) {
+ const CordRepConcat* concat = rep.rep->concat();
+ RepRef left = rep.Child(concat->left);
+ RepRef right = rep.Child(concat->right);
+
+ statistics_.node_count++;
+ statistics_.node_counts.concat++;
+ memory_usage_.Add(sizeof(CordRepConcat), rep.refcount);
+
+ right = AssertConcat(CountLinearReps(right, memory_usage_));
+ rep = AssertConcat(CountLinearReps(left, memory_usage_));
+ if (rep.rep != nullptr) {
+ if (right.rep != nullptr) {
+ pending.push_back(right);
+ }
+ } else if (right.rep != nullptr) {
+ rep = right;
+ } else if (!pending.empty()) {
+ rep = pending.back();
+ pending.pop_back();
+ }
+ }
+ }
+
+ // Analyzes the provided ring.
+ void AnalyzeRing(RepRef rep) {
+ statistics_.node_count++;
+ statistics_.node_counts.ring++;
+ const CordRepRing* ring = rep.rep->ring();
+ memory_usage_.Add(CordRepRing::AllocSize(ring->capacity()), rep.refcount);
+ ring->ForEach([&](CordRepRing::index_type pos) {
+ CountLinearReps(rep.Child(ring->entry_child(pos)), memory_usage_);
+ });
+ }
+
+ // Analyzes the provided btree.
+ void AnalyzeBtree(RepRef rep) {
+ statistics_.node_count++;
+ statistics_.node_counts.btree++;
+ memory_usage_.Add(sizeof(CordRepBtree), rep.refcount);
+ const CordRepBtree* tree = rep.rep->btree();
+ if (tree->height() > 0) {
+ for (CordRep* edge : tree->Edges()) {
+ AnalyzeBtree(rep.Child(edge));
+ }
+ } else {
+ for (CordRep* edge : tree->Edges()) {
+ CountLinearReps(rep.Child(edge), memory_usage_);
+ }
+ }
+ }
+
+ CordzStatistics& statistics_;
+ MemoryUsage memory_usage_;
+};
+
+} // namespace
+
+CordzInfo* CordzInfo::Head(const CordzSnapshot& snapshot) {
+ ABSL_ASSERT(snapshot.is_snapshot());
+
+ // We can do an 'unsafe' load of 'head', as we are guaranteed that the
+ // instance it points to is kept alive by the provided CordzSnapshot, so we
+ // can simply return the current value using an acquire load.
+ // We do enforce in DEBUG builds that the 'head' value is present in the
+ // delete queue: ODR violations may lead to 'snapshot' and 'global_list_'
+ // being in different libraries / modules.
+ CordzInfo* head = global_list_.head.load(std::memory_order_acquire);
+ ABSL_ASSERT(snapshot.DiagnosticsHandleIsSafeToInspect(head));
+ return head;
+}
+
+CordzInfo* CordzInfo::Next(const CordzSnapshot& snapshot) const {
+ ABSL_ASSERT(snapshot.is_snapshot());
+
+ // Similar to the 'Head()' function, we do not need a mutex here.
+ CordzInfo* next = ci_next_.load(std::memory_order_acquire);
+ ABSL_ASSERT(snapshot.DiagnosticsHandleIsSafeToInspect(this));
+ ABSL_ASSERT(snapshot.DiagnosticsHandleIsSafeToInspect(next));
+ return next;
+}
+
+void CordzInfo::TrackCord(InlineData& cord, MethodIdentifier method) {
+ assert(cord.is_tree());
+ assert(!cord.is_profiled());
+ CordzInfo* cordz_info = new CordzInfo(cord.as_tree(), nullptr, method);
+ cord.set_cordz_info(cordz_info);
+ cordz_info->Track();
+}
+
+void CordzInfo::TrackCord(InlineData& cord, const InlineData& src,
+ MethodIdentifier method) {
+ assert(cord.is_tree());
+ assert(src.is_tree());
+
+ // Unsample current as we the current cord is being replaced with 'src',
+ // so any method history is no longer relevant.
+ CordzInfo* cordz_info = cord.cordz_info();
+ if (cordz_info != nullptr) cordz_info->Untrack();
+
+ // Start new cord sample
+ cordz_info = new CordzInfo(cord.as_tree(), src.cordz_info(), method);
+ cord.set_cordz_info(cordz_info);
+ cordz_info->Track();
+}
+
+void CordzInfo::MaybeTrackCordImpl(InlineData& cord, const InlineData& src,
+ MethodIdentifier method) {
+ if (src.is_profiled()) {
+ TrackCord(cord, src, method);
+ } else if (cord.is_profiled()) {
+ cord.cordz_info()->Untrack();
+ cord.clear_cordz_info();
+ }
+}
+
+CordzInfo::MethodIdentifier CordzInfo::GetParentMethod(const CordzInfo* src) {
+ if (src == nullptr) return MethodIdentifier::kUnknown;
+ return src->parent_method_ != MethodIdentifier::kUnknown ? src->parent_method_
+ : src->method_;
+}
+
+int CordzInfo::FillParentStack(const CordzInfo* src, void** stack) {
+ assert(stack);
+ if (src == nullptr) return 0;
+ if (src->parent_stack_depth_) {
+ memcpy(stack, src->parent_stack_, src->parent_stack_depth_ * sizeof(void*));
+ return src->parent_stack_depth_;
+ }
+ memcpy(stack, src->stack_, src->stack_depth_ * sizeof(void*));
+ return src->stack_depth_;
+}
+
+CordzInfo::CordzInfo(CordRep* rep, const CordzInfo* src,
+ MethodIdentifier method)
+ : rep_(rep),
+ stack_depth_(y_absl::GetStackTrace(stack_, /*max_depth=*/kMaxStackDepth,
+ /*skip_count=*/1)),
+ parent_stack_depth_(FillParentStack(src, parent_stack_)),
+ method_(method),
+ parent_method_(GetParentMethod(src)),
+ create_time_(y_absl::Now()) {
+ update_tracker_.LossyAdd(method);
+ if (src) {
+ // Copy parent counters.
+ update_tracker_.LossyAdd(src->update_tracker_);
+ }
+}
+
+CordzInfo::~CordzInfo() {
+ // `rep_` is potentially kept alive if CordzInfo is included
+ // in a collection snapshot (which should be rare).
+ if (ABSL_PREDICT_FALSE(rep_)) {
+ CordRep::Unref(rep_);
+ }
+}
+
+void CordzInfo::Track() {
+ SpinLockHolder l(&list_->mutex);
+
+ CordzInfo* const head = list_->head.load(std::memory_order_acquire);
+ if (head != nullptr) {
+ head->ci_prev_.store(this, std::memory_order_release);
+ }
+ ci_next_.store(head, std::memory_order_release);
+ list_->head.store(this, std::memory_order_release);
+}
+
+void CordzInfo::Untrack() {
+ ODRCheck();
+ {
+ SpinLockHolder l(&list_->mutex);
+
+ CordzInfo* const head = list_->head.load(std::memory_order_acquire);
+ CordzInfo* const next = ci_next_.load(std::memory_order_acquire);
+ CordzInfo* const prev = ci_prev_.load(std::memory_order_acquire);
+
+ if (next) {
+ ABSL_ASSERT(next->ci_prev_.load(std::memory_order_acquire) == this);
+ next->ci_prev_.store(prev, std::memory_order_release);
+ }
+ if (prev) {
+ ABSL_ASSERT(head != this);
+ ABSL_ASSERT(prev->ci_next_.load(std::memory_order_acquire) == this);
+ prev->ci_next_.store(next, std::memory_order_release);
+ } else {
+ ABSL_ASSERT(head == this);
+ list_->head.store(next, std::memory_order_release);
+ }
+ }
+
+ // We can no longer be discovered: perform a fast path check if we are not
+ // listed on any delete queue, so we can directly delete this instance.
+ if (SafeToDelete()) {
+ UnsafeSetCordRep(nullptr);
+ delete this;
+ return;
+ }
+
+ // We are likely part of a snapshot, extend the life of the CordRep
+ {
+ y_absl::MutexLock lock(&mutex_);
+ if (rep_) CordRep::Ref(rep_);
+ }
+ CordzHandle::Delete(this);
+}
+
+void CordzInfo::Lock(MethodIdentifier method)
+ ABSL_EXCLUSIVE_LOCK_FUNCTION(mutex_) {
+ mutex_.Lock();
+ update_tracker_.LossyAdd(method);
+ assert(rep_);
+}
+
+void CordzInfo::Unlock() ABSL_UNLOCK_FUNCTION(mutex_) {
+ bool tracked = rep_ != nullptr;
+ mutex_.Unlock();
+ if (!tracked) {
+ Untrack();
+ }
+}
+
+y_absl::Span<void* const> CordzInfo::GetStack() const {
+ return y_absl::MakeConstSpan(stack_, stack_depth_);
+}
+
+y_absl::Span<void* const> CordzInfo::GetParentStack() const {
+ return y_absl::MakeConstSpan(parent_stack_, parent_stack_depth_);
+}
+
+CordzStatistics CordzInfo::GetCordzStatistics() const {
+ CordzStatistics stats;
+ stats.method = method_;
+ stats.parent_method = parent_method_;
+ stats.update_tracker = update_tracker_;
+ if (CordRep* rep = RefCordRep()) {
+ stats.size = rep->length;
+ CordRepAnalyzer analyzer(stats);
+ analyzer.AnalyzeCordRep(rep);
+ CordRep::Unref(rep);
+ }
+ return stats;
+}
+
+} // namespace cord_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_info.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_info.h
new file mode 100644
index 00000000000..e24214d2595
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_info.h
@@ -0,0 +1,298 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_STRINGS_CORDZ_INFO_H_
+#define ABSL_STRINGS_CORDZ_INFO_H_
+
+#include <atomic>
+#include <cstdint>
+#include <functional>
+
+#include "y_absl/base/config.h"
+#include "y_absl/base/internal/raw_logging.h"
+#include "y_absl/base/internal/spinlock.h"
+#include "y_absl/base/thread_annotations.h"
+#include "y_absl/strings/internal/cord_internal.h"
+#include "y_absl/strings/internal/cordz_functions.h"
+#include "y_absl/strings/internal/cordz_handle.h"
+#include "y_absl/strings/internal/cordz_statistics.h"
+#include "y_absl/strings/internal/cordz_update_tracker.h"
+#include "y_absl/synchronization/mutex.h"
+#include "y_absl/types/span.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+
+// CordzInfo tracks a profiled Cord. Each of these objects can be in two places.
+// If a Cord is alive, the CordzInfo will be in the global_cordz_infos map, and
+// can also be retrieved via the linked list starting with
+// global_cordz_infos_head and continued via the cordz_info_next() method. When
+// a Cord has reached the end of its lifespan, the CordzInfo object will be
+// migrated out of the global_cordz_infos list and the global_cordz_infos_map,
+// and will either be deleted or appended to the global_delete_queue. If it is
+// placed on the global_delete_queue, the CordzInfo object will be cleaned in
+// the destructor of a CordzSampleToken object.
+class ABSL_LOCKABLE CordzInfo : public CordzHandle {
+ public:
+ using MethodIdentifier = CordzUpdateTracker::MethodIdentifier;
+
+ // TrackCord creates a CordzInfo instance which tracks important metrics of
+ // a sampled cord, and stores the created CordzInfo instance into `cord'. All
+ // CordzInfo instances are placed in a global list which is used to discover
+ // and snapshot all actively tracked cords. Callers are responsible for
+ // calling UntrackCord() before the tracked Cord instance is deleted, or to
+ // stop tracking the sampled Cord. Callers are also responsible for guarding
+ // changes to the 'tree' value of a Cord (InlineData.tree) through the Lock()
+ // and Unlock() calls. Any change resulting in a new tree value for the cord
+ // requires a call to SetCordRep() before the old tree has been unreffed
+ // and/or deleted. `method` identifies the Cord public API method initiating
+ // the cord to be sampled.
+ // Requires `cord` to hold a tree, and `cord.cordz_info()` to be null.
+ static void TrackCord(InlineData& cord, MethodIdentifier method);
+
+ // Identical to TrackCord(), except that this function fills the
+ // `parent_stack` and `parent_method` properties of the returned CordzInfo
+ // instance from the provided `src` instance if `src` is sampled.
+ // This function should be used for sampling 'copy constructed' and 'copy
+ // assigned' cords. This function allows 'cord` to be already sampled, in
+ // which case the CordzInfo will be newly created from `src`.
+ static void TrackCord(InlineData& cord, const InlineData& src,
+ MethodIdentifier method);
+
+ // Maybe sample the cord identified by 'cord' for method 'method'.
+ // Uses `cordz_should_profile` to randomly pick cords to be sampled, and if
+ // so, invokes `TrackCord` to start sampling `cord`.
+ static void MaybeTrackCord(InlineData& cord, MethodIdentifier method);
+
+ // Maybe sample the cord identified by 'cord' for method 'method'.
+ // `src` identifies a 'parent' cord which is assigned to `cord`, typically the
+ // input cord for a copy constructor, or an assign method such as `operator=`
+ // `cord` will be sampled if (and only if) `src` is sampled.
+ // If `cord` is currently being sampled and `src` is not being sampled, then
+ // this function will stop sampling the cord and reset the cord's cordz_info.
+ //
+ // Previously this function defined that `cord` will be sampled if either
+ // `src` is sampled, or if `cord` is randomly picked for sampling. However,
+ // this can cause issues, as there may be paths where some cord is assigned an
+ // indirect copy of it's own value. As such a 'string of copies' would then
+ // remain sampled (`src.is_profiled`), then assigning such a cord back to
+ // 'itself' creates a cycle where the cord will converge to 'always sampled`.
+ //
+ // For example:
+ //
+ // Cord x;
+ // for (...) {
+ // // Copy ctor --> y.is_profiled := x.is_profiled | random(...)
+ // Cord y = x;
+ // ...
+ // // Assign x = y --> x.is_profiled = y.is_profiled | random(...)
+ // // ==> x.is_profiled |= random(...)
+ // // ==> x converges to 'always profiled'
+ // x = y;
+ // }
+ static void MaybeTrackCord(InlineData& cord, const InlineData& src,
+ MethodIdentifier method);
+
+ // Stops tracking changes for a sampled cord, and deletes the provided info.
+ // This function must be called before the sampled cord instance is deleted,
+ // and before the root cordrep of the sampled cord is unreffed.
+ // This function may extend the lifetime of the cordrep in cases where the
+ // CordInfo instance is being held by a concurrent collection thread.
+ void Untrack();
+
+ // Invokes UntrackCord() on `info` if `info` is not null.
+ static void MaybeUntrackCord(CordzInfo* info);
+
+ CordzInfo() = delete;
+ CordzInfo(const CordzInfo&) = delete;
+ CordzInfo& operator=(const CordzInfo&) = delete;
+
+ // Retrieves the oldest existing CordzInfo.
+ static CordzInfo* Head(const CordzSnapshot& snapshot)
+ ABSL_NO_THREAD_SAFETY_ANALYSIS;
+
+ // Retrieves the next oldest existing CordzInfo older than 'this' instance.
+ CordzInfo* Next(const CordzSnapshot& snapshot) const
+ ABSL_NO_THREAD_SAFETY_ANALYSIS;
+
+ // Locks this instance for the update identified by `method`.
+ // Increases the count for `method` in `update_tracker`.
+ void Lock(MethodIdentifier method) ABSL_EXCLUSIVE_LOCK_FUNCTION(mutex_);
+
+ // Unlocks this instance. If the contained `rep` has been set to null
+ // indicating the Cord has been cleared or is otherwise no longer sampled,
+ // then this method will delete this CordzInfo instance.
+ void Unlock() ABSL_UNLOCK_FUNCTION(mutex_);
+
+ // Asserts that this CordzInfo instance is locked.
+ void AssertHeld() ABSL_ASSERT_EXCLUSIVE_LOCK(mutex_);
+
+ // Updates the `rep` property of this instance. This methods is invoked by
+ // Cord logic each time the root node of a sampled Cord changes, and before
+ // the old root reference count is deleted. This guarantees that collection
+ // code can always safely take a reference on the tracked cord.
+ // Requires a lock to be held through the `Lock()` method.
+ // TODO(b/117940323): annotate with ABSL_EXCLUSIVE_LOCKS_REQUIRED once all
+ // Cord code is in a state where this can be proven true by the compiler.
+ void SetCordRep(CordRep* rep);
+
+ // Returns the current `rep` property of this instance with a reference
+ // added, or null if this instance represents a cord that has since been
+ // deleted or untracked.
+ CordRep* RefCordRep() const ABSL_LOCKS_EXCLUDED(mutex_);
+
+ // Returns the current value of `rep_` for testing purposes only.
+ CordRep* GetCordRepForTesting() const ABSL_NO_THREAD_SAFETY_ANALYSIS {
+ return rep_;
+ }
+
+ // Sets the current value of `rep_` for testing purposes only.
+ void SetCordRepForTesting(CordRep* rep) ABSL_NO_THREAD_SAFETY_ANALYSIS {
+ rep_ = rep;
+ }
+
+ // Returns the stack trace for where the cord was first sampled. Cords are
+ // potentially sampled when they promote from an inlined cord to a tree or
+ // ring representation, which is not necessarily the location where the cord
+ // was first created. Some cords are created as inlined cords, and only as
+ // data is added do they become a non-inlined cord. However, typically the
+ // location represents reasonably well where the cord is 'created'.
+ y_absl::Span<void* const> GetStack() const;
+
+ // Returns the stack trace for a sampled cord's 'parent stack trace'. This
+ // value may be set if the cord is sampled (promoted) after being created
+ // from, or being assigned the value of an existing (sampled) cord.
+ y_absl::Span<void* const> GetParentStack() const;
+
+ // Retrieves the CordzStatistics associated with this Cord. The statistics
+ // are only updated when a Cord goes through a mutation, such as an Append
+ // or RemovePrefix.
+ CordzStatistics GetCordzStatistics() const;
+
+ private:
+ using SpinLock = y_absl::base_internal::SpinLock;
+ using SpinLockHolder = ::y_absl::base_internal::SpinLockHolder;
+
+ // Global cordz info list. CordzInfo stores a pointer to the global list
+ // instance to harden against ODR violations.
+ struct List {
+ constexpr explicit List(y_absl::ConstInitType)
+ : mutex(y_absl::kConstInit,
+ y_absl::base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL) {}
+
+ SpinLock mutex;
+ std::atomic<CordzInfo*> head ABSL_GUARDED_BY(mutex){nullptr};
+ };
+
+ static constexpr int kMaxStackDepth = 64;
+
+ explicit CordzInfo(CordRep* rep, const CordzInfo* src,
+ MethodIdentifier method);
+ ~CordzInfo() override;
+
+ // Sets `rep_` without holding a lock.
+ void UnsafeSetCordRep(CordRep* rep) ABSL_NO_THREAD_SAFETY_ANALYSIS;
+
+ void Track();
+
+ // Returns the parent method from `src`, which is either `parent_method_` or
+ // `method_` depending on `parent_method_` being kUnknown.
+ // Returns kUnknown if `src` is null.
+ static MethodIdentifier GetParentMethod(const CordzInfo* src);
+
+ // Fills the provided stack from `src`, copying either `parent_stack_` or
+ // `stack_` depending on `parent_stack_` being empty, returning the size of
+ // the parent stack.
+ // Returns 0 if `src` is null.
+ static int FillParentStack(const CordzInfo* src, void** stack);
+
+ void ODRCheck() const {
+#ifndef NDEBUG
+ ABSL_RAW_CHECK(list_ == &global_list_, "ODR violation in Cord");
+#endif
+ }
+
+ // Non-inlined implementation of `MaybeTrackCord`, which is executed if
+ // either `src` is sampled or `cord` is sampled, and either untracks or
+ // tracks `cord` as documented per `MaybeTrackCord`.
+ static void MaybeTrackCordImpl(InlineData& cord, const InlineData& src,
+ MethodIdentifier method);
+
+ ABSL_CONST_INIT static List global_list_;
+ List* const list_ = &global_list_;
+
+ // ci_prev_ and ci_next_ require the global list mutex to be held.
+ // Unfortunately we can't use thread annotations such that the thread safety
+ // analysis understands that list_ and global_list_ are one and the same.
+ std::atomic<CordzInfo*> ci_prev_{nullptr};
+ std::atomic<CordzInfo*> ci_next_{nullptr};
+
+ mutable y_absl::Mutex mutex_;
+ CordRep* rep_ ABSL_GUARDED_BY(mutex_);
+
+ void* stack_[kMaxStackDepth];
+ void* parent_stack_[kMaxStackDepth];
+ const int stack_depth_;
+ const int parent_stack_depth_;
+ const MethodIdentifier method_;
+ const MethodIdentifier parent_method_;
+ CordzUpdateTracker update_tracker_;
+ const y_absl::Time create_time_;
+};
+
+inline ABSL_ATTRIBUTE_ALWAYS_INLINE void CordzInfo::MaybeTrackCord(
+ InlineData& cord, MethodIdentifier method) {
+ if (ABSL_PREDICT_FALSE(cordz_should_profile())) {
+ TrackCord(cord, method);
+ }
+}
+
+inline ABSL_ATTRIBUTE_ALWAYS_INLINE void CordzInfo::MaybeTrackCord(
+ InlineData& cord, const InlineData& src, MethodIdentifier method) {
+ if (ABSL_PREDICT_FALSE(InlineData::is_either_profiled(cord, src))) {
+ MaybeTrackCordImpl(cord, src, method);
+ }
+}
+
+inline ABSL_ATTRIBUTE_ALWAYS_INLINE void CordzInfo::MaybeUntrackCord(
+ CordzInfo* info) {
+ if (ABSL_PREDICT_FALSE(info)) {
+ info->Untrack();
+ }
+}
+
+inline void CordzInfo::AssertHeld() ABSL_ASSERT_EXCLUSIVE_LOCK(mutex_) {
+#ifndef NDEBUG
+ mutex_.AssertHeld();
+#endif
+}
+
+inline void CordzInfo::SetCordRep(CordRep* rep) {
+ AssertHeld();
+ rep_ = rep;
+}
+
+inline void CordzInfo::UnsafeSetCordRep(CordRep* rep) { rep_ = rep; }
+
+inline CordRep* CordzInfo::RefCordRep() const ABSL_LOCKS_EXCLUDED(mutex_) {
+ MutexLock lock(&mutex_);
+ return rep_ ? CordRep::Ref(rep_) : nullptr;
+}
+
+} // namespace cord_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_STRINGS_CORDZ_INFO_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_info/ya.make b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_info/ya.make
new file mode 100644
index 00000000000..930eaa8b05b
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_info/ya.make
@@ -0,0 +1,51 @@
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
+WITHOUT_LICENSE_TEXTS()
+
+OWNER(
+ somov
+ g:cpp-contrib
+)
+
+LICENSE(Apache-2.0)
+
+PEERDIR(
+ contrib/restricted/abseil-cpp-tstring/y_absl/base
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/low_level_alloc
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_wait
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/throw_delegate
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/log_severity
+ contrib/restricted/abseil-cpp-tstring/y_absl/debugging
+ contrib/restricted/abseil-cpp-tstring/y_absl/debugging/stacktrace
+ contrib/restricted/abseil-cpp-tstring/y_absl/debugging/symbolize
+ contrib/restricted/abseil-cpp-tstring/y_absl/demangle
+ contrib/restricted/abseil-cpp-tstring/y_absl/numeric
+ contrib/restricted/abseil-cpp-tstring/y_absl/profiling/internal/exponential_biased
+ contrib/restricted/abseil-cpp-tstring/y_absl/strings
+ contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/absl_cord_internal
+ contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/absl_strings_internal
+ contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_functions
+ contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_handle
+ contrib/restricted/abseil-cpp-tstring/y_absl/synchronization
+ contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal
+ contrib/restricted/abseil-cpp-tstring/y_absl/time
+ contrib/restricted/abseil-cpp-tstring/y_absl/time/civil_time
+ contrib/restricted/abseil-cpp-tstring/y_absl/time/time_zone
+)
+
+ADDINCL(
+ GLOBAL contrib/restricted/abseil-cpp-tstring
+)
+
+NO_COMPILER_WARNINGS()
+
+SRCDIR(contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal)
+
+SRCS(
+ cordz_info.cc
+)
+
+END()
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_sample_token.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_sample_token.cc
new file mode 100644
index 00000000000..f29678adb8e
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_sample_token.cc
@@ -0,0 +1,64 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "y_absl/strings/internal/cordz_sample_token.h"
+
+#include "y_absl/base/config.h"
+#include "y_absl/strings/internal/cordz_handle.h"
+#include "y_absl/strings/internal/cordz_info.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+
+CordzSampleToken::Iterator& CordzSampleToken::Iterator::operator++() {
+ if (current_) {
+ current_ = current_->Next(*token_);
+ }
+ return *this;
+}
+
+CordzSampleToken::Iterator CordzSampleToken::Iterator::operator++(int) {
+ Iterator it(*this);
+ operator++();
+ return it;
+}
+
+bool operator==(const CordzSampleToken::Iterator& lhs,
+ const CordzSampleToken::Iterator& rhs) {
+ return lhs.current_ == rhs.current_ &&
+ (lhs.current_ == nullptr || lhs.token_ == rhs.token_);
+}
+
+bool operator!=(const CordzSampleToken::Iterator& lhs,
+ const CordzSampleToken::Iterator& rhs) {
+ return !(lhs == rhs);
+}
+
+CordzSampleToken::Iterator::reference CordzSampleToken::Iterator::operator*()
+ const {
+ return *current_;
+}
+
+CordzSampleToken::Iterator::pointer CordzSampleToken::Iterator::operator->()
+ const {
+ return current_;
+}
+
+CordzSampleToken::Iterator::Iterator(const CordzSampleToken* token)
+ : token_(token), current_(CordzInfo::Head(*token)) {}
+
+} // namespace cord_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_sample_token.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_sample_token.h
new file mode 100644
index 00000000000..85bed6dae8f
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_sample_token.h
@@ -0,0 +1,97 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "y_absl/base/config.h"
+#include "y_absl/strings/internal/cordz_handle.h"
+#include "y_absl/strings/internal/cordz_info.h"
+
+#ifndef ABSL_STRINGS_CORDZ_SAMPLE_TOKEN_H_
+#define ABSL_STRINGS_CORDZ_SAMPLE_TOKEN_H_
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+
+// The existence of a CordzSampleToken guarantees that a reader can traverse the
+// global_cordz_infos_head linked-list without needing to hold a mutex. When a
+// CordzSampleToken exists, all CordzInfo objects that would be destroyed are
+// instead appended to a deletion queue. When the CordzSampleToken is destroyed,
+// it will also clean up any of these CordzInfo objects.
+//
+// E.g., ST are CordzSampleToken objects and CH are CordzHandle objects.
+// ST1 <- CH1 <- CH2 <- ST2 <- CH3 <- global_delete_queue_tail
+//
+// This list tracks that CH1 and CH2 were created after ST1, so the thread
+// holding ST1 might have a referece to CH1, CH2, ST2, and CH3. However, ST2 was
+// created later, so the thread holding the ST2 token cannot have a reference to
+// ST1, CH1, or CH2. If ST1 is cleaned up first, that thread will delete ST1,
+// CH1, and CH2. If instead ST2 is cleaned up first, that thread will only
+// delete ST2.
+//
+// If ST1 is cleaned up first, the new list will be:
+// ST2 <- CH3 <- global_delete_queue_tail
+//
+// If ST2 is cleaned up first, the new list will be:
+// ST1 <- CH1 <- CH2 <- CH3 <- global_delete_queue_tail
+//
+// All new CordzHandle objects are appended to the list, so if a new thread
+// comes along before either ST1 or ST2 are cleaned up, the new list will be:
+// ST1 <- CH1 <- CH2 <- ST2 <- CH3 <- ST3 <- global_delete_queue_tail
+//
+// A thread must hold the global_delete_queue_mu mutex whenever it's altering
+// this list.
+//
+// It is safe for thread that holds a CordzSampleToken to read
+// global_cordz_infos at any time since the objects it is able to retrieve will
+// not be deleted while the CordzSampleToken exists.
+class CordzSampleToken : public CordzSnapshot {
+ public:
+ class Iterator {
+ public:
+ using iterator_category = std::input_iterator_tag;
+ using value_type = const CordzInfo&;
+ using difference_type = ptrdiff_t;
+ using pointer = const CordzInfo*;
+ using reference = value_type;
+
+ Iterator() = default;
+
+ Iterator& operator++();
+ Iterator operator++(int);
+ friend bool operator==(const Iterator& lhs, const Iterator& rhs);
+ friend bool operator!=(const Iterator& lhs, const Iterator& rhs);
+ reference operator*() const;
+ pointer operator->() const;
+
+ private:
+ friend class CordzSampleToken;
+ explicit Iterator(const CordzSampleToken* token);
+
+ const CordzSampleToken* token_ = nullptr;
+ pointer current_ = nullptr;
+ };
+
+ CordzSampleToken() = default;
+ CordzSampleToken(const CordzSampleToken&) = delete;
+ CordzSampleToken& operator=(const CordzSampleToken&) = delete;
+
+ Iterator begin() { return Iterator(this); }
+ Iterator end() { return Iterator(); }
+};
+
+} // namespace cord_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_STRINGS_CORDZ_SAMPLE_TOKEN_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_sample_token/ya.make b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_sample_token/ya.make
new file mode 100644
index 00000000000..4d46274f4e6
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_sample_token/ya.make
@@ -0,0 +1,52 @@
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
+WITHOUT_LICENSE_TEXTS()
+
+OWNER(
+ somov
+ g:cpp-contrib
+)
+
+LICENSE(Apache-2.0)
+
+PEERDIR(
+ contrib/restricted/abseil-cpp-tstring/y_absl/base
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/low_level_alloc
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_wait
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/throw_delegate
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/log_severity
+ contrib/restricted/abseil-cpp-tstring/y_absl/debugging
+ contrib/restricted/abseil-cpp-tstring/y_absl/debugging/stacktrace
+ contrib/restricted/abseil-cpp-tstring/y_absl/debugging/symbolize
+ contrib/restricted/abseil-cpp-tstring/y_absl/demangle
+ contrib/restricted/abseil-cpp-tstring/y_absl/numeric
+ contrib/restricted/abseil-cpp-tstring/y_absl/profiling/internal/exponential_biased
+ contrib/restricted/abseil-cpp-tstring/y_absl/strings
+ contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/absl_cord_internal
+ contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/absl_strings_internal
+ contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_functions
+ contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_handle
+ contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_info
+ contrib/restricted/abseil-cpp-tstring/y_absl/synchronization
+ contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal
+ contrib/restricted/abseil-cpp-tstring/y_absl/time
+ contrib/restricted/abseil-cpp-tstring/y_absl/time/civil_time
+ contrib/restricted/abseil-cpp-tstring/y_absl/time/time_zone
+)
+
+ADDINCL(
+ GLOBAL contrib/restricted/abseil-cpp-tstring
+)
+
+NO_COMPILER_WARNINGS()
+
+SRCDIR(contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal)
+
+SRCS(
+ cordz_sample_token.cc
+)
+
+END()
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_statistics.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_statistics.h
new file mode 100644
index 00000000000..34e7c34bd83
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_statistics.h
@@ -0,0 +1,87 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_STRINGS_INTERNAL_CORDZ_STATISTICS_H_
+#define ABSL_STRINGS_INTERNAL_CORDZ_STATISTICS_H_
+
+#include <cstdint>
+
+#include "y_absl/base/config.h"
+#include "y_absl/strings/internal/cordz_update_tracker.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+
+// CordzStatistics captures some meta information about a Cord's shape.
+struct CordzStatistics {
+ using MethodIdentifier = CordzUpdateTracker::MethodIdentifier;
+
+ // Node counts information
+ struct NodeCounts {
+ size_t flat = 0; // #flats
+ size_t flat_64 = 0; // #flats up to 64 bytes
+ size_t flat_128 = 0; // #flats up to 128 bytes
+ size_t flat_256 = 0; // #flats up to 256 bytes
+ size_t flat_512 = 0; // #flats up to 512 bytes
+ size_t flat_1k = 0; // #flats up to 1K bytes
+ size_t external = 0; // #external reps
+ size_t substring = 0; // #substring reps
+ size_t concat = 0; // #concat reps
+ size_t ring = 0; // #ring buffer reps
+ size_t btree = 0; // #btree reps
+ };
+
+ // The size of the cord in bytes. This matches the result of Cord::size().
+ int64_t size = 0;
+
+ // The estimated memory used by the sampled cord. This value matches the
+ // value as reported by Cord::EstimatedMemoryUsage().
+ // A value of 0 implies the property has not been recorded.
+ int64_t estimated_memory_usage = 0;
+
+ // The effective memory used by the sampled cord, inversely weighted by the
+ // effective indegree of each allocated node. This is a representation of the
+ // fair share of memory usage that should be attributed to the sampled cord.
+ // This value is more useful for cases where one or more nodes are referenced
+ // by multiple Cord instances, and for cases where a Cord includes the same
+ // node multiple times (either directly or indirectly).
+ // A value of 0 implies the property has not been recorded.
+ int64_t estimated_fair_share_memory_usage = 0;
+
+ // The total number of nodes referenced by this cord.
+ // For ring buffer Cords, this includes the 'ring buffer' node.
+ // For btree Cords, this includes all 'CordRepBtree' tree nodes as well as all
+ // the substring, flat and external nodes referenced by the tree.
+ // A value of 0 implies the property has not been recorded.
+ int64_t node_count = 0;
+
+ // Detailed node counts per type
+ NodeCounts node_counts;
+
+ // The cord method responsible for sampling the cord.
+ MethodIdentifier method = MethodIdentifier::kUnknown;
+
+ // The cord method responsible for sampling the parent cord if applicable.
+ MethodIdentifier parent_method = MethodIdentifier::kUnknown;
+
+ // Update tracker tracking invocation count per cord method.
+ CordzUpdateTracker update_tracker;
+};
+
+} // namespace cord_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_STRINGS_INTERNAL_CORDZ_STATISTICS_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_update_scope.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_update_scope.h
new file mode 100644
index 00000000000..66e0e8f51b0
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_update_scope.h
@@ -0,0 +1,71 @@
+// Copyright 2021 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_STRINGS_INTERNAL_CORDZ_UPDATE_SCOPE_H_
+#define ABSL_STRINGS_INTERNAL_CORDZ_UPDATE_SCOPE_H_
+
+#include "y_absl/base/config.h"
+#include "y_absl/base/optimization.h"
+#include "y_absl/base/thread_annotations.h"
+#include "y_absl/strings/internal/cord_internal.h"
+#include "y_absl/strings/internal/cordz_info.h"
+#include "y_absl/strings/internal/cordz_update_tracker.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+
+// CordzUpdateScope scopes an update to the provided CordzInfo.
+// The class invokes `info->Lock(method)` and `info->Unlock()` to guard
+// cordrep updates. This class does nothing if `info` is null.
+// See also the 'Lock`, `Unlock` and `SetCordRep` methods in `CordzInfo`.
+class ABSL_SCOPED_LOCKABLE CordzUpdateScope {
+ public:
+ CordzUpdateScope(CordzInfo* info, CordzUpdateTracker::MethodIdentifier method)
+ ABSL_EXCLUSIVE_LOCK_FUNCTION(info)
+ : info_(info) {
+ if (ABSL_PREDICT_FALSE(info_)) {
+ info->Lock(method);
+ }
+ }
+
+ // CordzUpdateScope can not be copied or assigned to.
+ CordzUpdateScope(CordzUpdateScope&& rhs) = delete;
+ CordzUpdateScope(const CordzUpdateScope&) = delete;
+ CordzUpdateScope& operator=(CordzUpdateScope&& rhs) = delete;
+ CordzUpdateScope& operator=(const CordzUpdateScope&) = delete;
+
+ ~CordzUpdateScope() ABSL_UNLOCK_FUNCTION() {
+ if (ABSL_PREDICT_FALSE(info_)) {
+ info_->Unlock();
+ }
+ }
+
+ void SetCordRep(CordRep* rep) const {
+ if (ABSL_PREDICT_FALSE(info_)) {
+ info_->SetCordRep(rep);
+ }
+ }
+
+ CordzInfo* info() const { return info_; }
+
+ private:
+ CordzInfo* info_;
+};
+
+} // namespace cord_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_STRINGS_INTERNAL_CORDZ_UPDATE_SCOPE_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_update_tracker.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_update_tracker.h
new file mode 100644
index 00000000000..48a449b4bf6
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_update_tracker.h
@@ -0,0 +1,121 @@
+// Copyright 2021 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_STRINGS_INTERNAL_CORDZ_UPDATE_TRACKER_H_
+#define ABSL_STRINGS_INTERNAL_CORDZ_UPDATE_TRACKER_H_
+
+#include <atomic>
+#include <cstdint>
+
+#include "y_absl/base/config.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+
+// CordzUpdateTracker tracks counters for Cord update methods.
+//
+// The purpose of CordzUpdateTracker is to track the number of calls to methods
+// updating Cord data for sampled cords. The class internally uses 'lossy'
+// atomic operations: Cord is thread-compatible, so there is no need to
+// synchronize updates. However, Cordz collection threads may call 'Value()' at
+// any point, so the class needs to provide thread safe access.
+//
+// This class is thread-safe. But as per above comments, all non-const methods
+// should be used single-threaded only: updates are thread-safe but lossy.
+class CordzUpdateTracker {
+ public:
+ // Tracked update methods.
+ enum MethodIdentifier {
+ kUnknown,
+ kAppendBuffer,
+ kAppendCord,
+ kAppendExternalMemory,
+ kAppendString,
+ kAssignCord,
+ kAssignString,
+ kClear,
+ kConstructorCord,
+ kConstructorString,
+ kCordReader,
+ kFlatten,
+ kGetAppendRegion,
+ kMakeCordFromExternal,
+ kMoveAppendCord,
+ kMoveAssignCord,
+ kMovePrependCord,
+ kPrependBuffer,
+ kPrependCord,
+ kPrependString,
+ kRemovePrefix,
+ kRemoveSuffix,
+ kSubCord,
+
+ // kNumMethods defines the number of entries: must be the last entry.
+ kNumMethods,
+ };
+
+ // Constructs a new instance. All counters are zero-initialized.
+ constexpr CordzUpdateTracker() noexcept : values_{} {}
+
+ // Copy constructs a new instance.
+ CordzUpdateTracker(const CordzUpdateTracker& rhs) noexcept { *this = rhs; }
+
+ // Assigns the provided value to this instance.
+ CordzUpdateTracker& operator=(const CordzUpdateTracker& rhs) noexcept {
+ for (int i = 0; i < kNumMethods; ++i) {
+ values_[i].store(rhs.values_[i].load(std::memory_order_relaxed),
+ std::memory_order_relaxed);
+ }
+ return *this;
+ }
+
+ // Returns the value for the specified method.
+ int64_t Value(MethodIdentifier method) const {
+ return values_[method].load(std::memory_order_relaxed);
+ }
+
+ // Increases the value for the specified method by `n`
+ void LossyAdd(MethodIdentifier method, int64_t n = 1) {
+ auto& value = values_[method];
+ value.store(value.load(std::memory_order_relaxed) + n,
+ std::memory_order_relaxed);
+ }
+
+ // Adds all the values from `src` to this instance
+ void LossyAdd(const CordzUpdateTracker& src) {
+ for (int i = 0; i < kNumMethods; ++i) {
+ MethodIdentifier method = static_cast<MethodIdentifier>(i);
+ if (int64_t value = src.Value(method)) {
+ LossyAdd(method, value);
+ }
+ }
+ }
+
+ private:
+ // Until C++20 std::atomic is not constexpr default-constructible, so we need
+ // a wrapper for this class to be constexpr constructible.
+ class Counter : public std::atomic<int64_t> {
+ public:
+ constexpr Counter() noexcept : std::atomic<int64_t>(0) {}
+ };
+
+ Counter values_[kNumMethods];
+};
+
+} // namespace cord_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_STRINGS_INTERNAL_CORDZ_UPDATE_TRACKER_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/escaping.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/escaping.cc
new file mode 100644
index 00000000000..01b8974983d
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/escaping.cc
@@ -0,0 +1,180 @@
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "y_absl/strings/internal/escaping.h"
+
+#include "y_absl/base/internal/endian.h"
+#include "y_absl/base/internal/raw_logging.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace strings_internal {
+
+const char kBase64Chars[] =
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
+
+size_t CalculateBase64EscapedLenInternal(size_t input_len, bool do_padding) {
+ // Base64 encodes three bytes of input at a time. If the input is not
+ // divisible by three, we pad as appropriate.
+ //
+ // (from https://tools.ietf.org/html/rfc3548)
+ // Special processing is performed if fewer than 24 bits are available
+ // at the end of the data being encoded. A full encoding quantum is
+ // always completed at the end of a quantity. When fewer than 24 input
+ // bits are available in an input group, zero bits are added (on the
+ // right) to form an integral number of 6-bit groups. Padding at the
+ // end of the data is performed using the '=' character. Since all base
+ // 64 input is an integral number of octets, only the following cases
+ // can arise:
+
+ // Base64 encodes each three bytes of input into four bytes of output.
+ size_t len = (input_len / 3) * 4;
+
+ if (input_len % 3 == 0) {
+ // (from https://tools.ietf.org/html/rfc3548)
+ // (1) the final quantum of encoding input is an integral multiple of 24
+ // bits; here, the final unit of encoded output will be an integral
+ // multiple of 4 characters with no "=" padding,
+ } else if (input_len % 3 == 1) {
+ // (from https://tools.ietf.org/html/rfc3548)
+ // (2) the final quantum of encoding input is exactly 8 bits; here, the
+ // final unit of encoded output will be two characters followed by two
+ // "=" padding characters, or
+ len += 2;
+ if (do_padding) {
+ len += 2;
+ }
+ } else { // (input_len % 3 == 2)
+ // (from https://tools.ietf.org/html/rfc3548)
+ // (3) the final quantum of encoding input is exactly 16 bits; here, the
+ // final unit of encoded output will be three characters followed by one
+ // "=" padding character.
+ len += 3;
+ if (do_padding) {
+ len += 1;
+ }
+ }
+
+ assert(len >= input_len); // make sure we didn't overflow
+ return len;
+}
+
+size_t Base64EscapeInternal(const unsigned char* src, size_t szsrc, char* dest,
+ size_t szdest, const char* base64,
+ bool do_padding) {
+ static const char kPad64 = '=';
+
+ if (szsrc * 4 > szdest * 3) return 0;
+
+ char* cur_dest = dest;
+ const unsigned char* cur_src = src;
+
+ char* const limit_dest = dest + szdest;
+ const unsigned char* const limit_src = src + szsrc;
+
+ // Three bytes of data encodes to four characters of cyphertext.
+ // So we can pump through three-byte chunks atomically.
+ if (szsrc >= 3) { // "limit_src - 3" is UB if szsrc < 3.
+ while (cur_src < limit_src - 3) { // While we have >= 32 bits.
+ uint32_t in = y_absl::big_endian::Load32(cur_src) >> 8;
+
+ cur_dest[0] = base64[in >> 18];
+ in &= 0x3FFFF;
+ cur_dest[1] = base64[in >> 12];
+ in &= 0xFFF;
+ cur_dest[2] = base64[in >> 6];
+ in &= 0x3F;
+ cur_dest[3] = base64[in];
+
+ cur_dest += 4;
+ cur_src += 3;
+ }
+ }
+ // To save time, we didn't update szdest or szsrc in the loop. So do it now.
+ szdest = limit_dest - cur_dest;
+ szsrc = limit_src - cur_src;
+
+ /* now deal with the tail (<=3 bytes) */
+ switch (szsrc) {
+ case 0:
+ // Nothing left; nothing more to do.
+ break;
+ case 1: {
+ // One byte left: this encodes to two characters, and (optionally)
+ // two pad characters to round out the four-character cypherblock.
+ if (szdest < 2) return 0;
+ uint32_t in = cur_src[0];
+ cur_dest[0] = base64[in >> 2];
+ in &= 0x3;
+ cur_dest[1] = base64[in << 4];
+ cur_dest += 2;
+ szdest -= 2;
+ if (do_padding) {
+ if (szdest < 2) return 0;
+ cur_dest[0] = kPad64;
+ cur_dest[1] = kPad64;
+ cur_dest += 2;
+ szdest -= 2;
+ }
+ break;
+ }
+ case 2: {
+ // Two bytes left: this encodes to three characters, and (optionally)
+ // one pad character to round out the four-character cypherblock.
+ if (szdest < 3) return 0;
+ uint32_t in = y_absl::big_endian::Load16(cur_src);
+ cur_dest[0] = base64[in >> 10];
+ in &= 0x3FF;
+ cur_dest[1] = base64[in >> 4];
+ in &= 0x00F;
+ cur_dest[2] = base64[in << 2];
+ cur_dest += 3;
+ szdest -= 3;
+ if (do_padding) {
+ if (szdest < 1) return 0;
+ cur_dest[0] = kPad64;
+ cur_dest += 1;
+ szdest -= 1;
+ }
+ break;
+ }
+ case 3: {
+ // Three bytes left: same as in the big loop above. We can't do this in
+ // the loop because the loop above always reads 4 bytes, and the fourth
+ // byte is past the end of the input.
+ if (szdest < 4) return 0;
+ uint32_t in = (cur_src[0] << 16) + y_absl::big_endian::Load16(cur_src + 1);
+ cur_dest[0] = base64[in >> 18];
+ in &= 0x3FFFF;
+ cur_dest[1] = base64[in >> 12];
+ in &= 0xFFF;
+ cur_dest[2] = base64[in >> 6];
+ in &= 0x3F;
+ cur_dest[3] = base64[in];
+ cur_dest += 4;
+ szdest -= 4;
+ break;
+ }
+ default:
+ // Should not be reached: blocks of 4 bytes are handled
+ // in the while loop before this switch statement.
+ ABSL_RAW_LOG(FATAL, "Logic problem? szsrc = %zu", szsrc);
+ break;
+ }
+ return (cur_dest - dest);
+}
+
+} // namespace strings_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/escaping.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/escaping.h
new file mode 100644
index 00000000000..d62fc0fbcba
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/escaping.h
@@ -0,0 +1,58 @@
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_STRINGS_INTERNAL_ESCAPING_H_
+#define ABSL_STRINGS_INTERNAL_ESCAPING_H_
+
+#include <cassert>
+
+#include "y_absl/strings/internal/resize_uninitialized.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace strings_internal {
+
+ABSL_CONST_INIT extern const char kBase64Chars[];
+
+// Calculates how long a string will be when it is base64 encoded given its
+// length and whether or not the result should be padded.
+size_t CalculateBase64EscapedLenInternal(size_t input_len, bool do_padding);
+
+// Base64-encodes `src` using the alphabet provided in `base64` and writes the
+// result to `dest`. If `do_padding` is true, `dest` is padded with '=' chars
+// until its length is a multiple of 3. Returns the length of `dest`.
+size_t Base64EscapeInternal(const unsigned char* src, size_t szsrc, char* dest,
+ size_t szdest, const char* base64, bool do_padding);
+
+// Base64-encodes `src` using the alphabet provided in `base64` and writes the
+// result to `dest`. If `do_padding` is true, `dest` is padded with '=' chars
+// until its length is a multiple of 3.
+template <typename String>
+void Base64EscapeInternal(const unsigned char* src, size_t szsrc, String* dest,
+ bool do_padding, const char* base64_chars) {
+ const size_t calc_escaped_size =
+ CalculateBase64EscapedLenInternal(szsrc, do_padding);
+ STLStringResizeUninitialized(dest, calc_escaped_size);
+
+ const size_t escaped_len = Base64EscapeInternal(
+ src, szsrc, &(*dest)[0], dest->size(), base64_chars, do_padding);
+ assert(calc_escaped_size == escaped_len);
+ dest->erase(escaped_len);
+}
+
+} // namespace strings_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_STRINGS_INTERNAL_ESCAPING_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/escaping_test_common.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/escaping_test_common.h
new file mode 100644
index 00000000000..f1451272251
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/escaping_test_common.h
@@ -0,0 +1,133 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// This test contains common things needed by both escaping_test.cc and
+// escaping_benchmark.cc.
+
+#ifndef ABSL_STRINGS_INTERNAL_ESCAPING_TEST_COMMON_H_
+#define ABSL_STRINGS_INTERNAL_ESCAPING_TEST_COMMON_H_
+
+#include <array>
+#include "y_absl/strings/string_view.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace strings_internal {
+
+struct base64_testcase {
+ y_absl::string_view plaintext;
+ y_absl::string_view cyphertext;
+};
+
+inline const std::array<base64_testcase, 5>& base64_strings() {
+ static const std::array<base64_testcase, 5> testcase{{
+ // Some google quotes
+ // Cyphertext created with "uuencode (GNU sharutils) 4.6.3"
+ // (Note that we're testing the websafe encoding, though, so if
+ // you add messages, be sure to run "tr -- '+/' '-_'" on the output)
+ { "I was always good at math and science, and I never realized "
+ "that was unusual or somehow undesirable. So one of the things "
+ "I care a lot about is helping to remove that stigma, "
+ "to show girls that you can be feminine, you can like the things "
+ "that girls like, but you can also be really good at technology. "
+ "You can be really good at building things."
+ " - Marissa Meyer, Newsweek, 2010-12-22" "\n",
+
+ "SSB3YXMgYWx3YXlzIGdvb2QgYXQgbWF0aCBhbmQgc2NpZW5jZSwgYW5kIEkg"
+ "bmV2ZXIgcmVhbGl6ZWQgdGhhdCB3YXMgdW51c3VhbCBvciBzb21laG93IHVu"
+ "ZGVzaXJhYmxlLiBTbyBvbmUgb2YgdGhlIHRoaW5ncyBJIGNhcmUgYSBsb3Qg"
+ "YWJvdXQgaXMgaGVscGluZyB0byByZW1vdmUgdGhhdCBzdGlnbWEsIHRvIHNo"
+ "b3cgZ2lybHMgdGhhdCB5b3UgY2FuIGJlIGZlbWluaW5lLCB5b3UgY2FuIGxp"
+ "a2UgdGhlIHRoaW5ncyB0aGF0IGdpcmxzIGxpa2UsIGJ1dCB5b3UgY2FuIGFs"
+ "c28gYmUgcmVhbGx5IGdvb2QgYXQgdGVjaG5vbG9neS4gWW91IGNhbiBiZSBy"
+ "ZWFsbHkgZ29vZCBhdCBidWlsZGluZyB0aGluZ3MuIC0gTWFyaXNzYSBNZXll"
+ "ciwgTmV3c3dlZWssIDIwMTAtMTItMjIK" },
+
+ { "Typical first year for a new cluster: "
+ "~0.5 overheating "
+ "~1 PDU failure "
+ "~1 rack-move "
+ "~1 network rewiring "
+ "~20 rack failures "
+ "~5 racks go wonky "
+ "~8 network maintenances "
+ "~12 router reloads "
+ "~3 router failures "
+ "~dozens of minor 30-second blips for dns "
+ "~1000 individual machine failures "
+ "~thousands of hard drive failures "
+ "slow disks, bad memory, misconfigured machines, flaky machines, etc."
+ " - Jeff Dean, The Joys of Real Hardware" "\n",
+
+ "VHlwaWNhbCBmaXJzdCB5ZWFyIGZvciBhIG5ldyBjbHVzdGVyOiB-MC41IG92"
+ "ZXJoZWF0aW5nIH4xIFBEVSBmYWlsdXJlIH4xIHJhY2stbW92ZSB-MSBuZXR3"
+ "b3JrIHJld2lyaW5nIH4yMCByYWNrIGZhaWx1cmVzIH41IHJhY2tzIGdvIHdv"
+ "bmt5IH44IG5ldHdvcmsgbWFpbnRlbmFuY2VzIH4xMiByb3V0ZXIgcmVsb2Fk"
+ "cyB-MyByb3V0ZXIgZmFpbHVyZXMgfmRvemVucyBvZiBtaW5vciAzMC1zZWNv"
+ "bmQgYmxpcHMgZm9yIGRucyB-MTAwMCBpbmRpdmlkdWFsIG1hY2hpbmUgZmFp"
+ "bHVyZXMgfnRob3VzYW5kcyBvZiBoYXJkIGRyaXZlIGZhaWx1cmVzIHNsb3cg"
+ "ZGlza3MsIGJhZCBtZW1vcnksIG1pc2NvbmZpZ3VyZWQgbWFjaGluZXMsIGZs"
+ "YWt5IG1hY2hpbmVzLCBldGMuIC0gSmVmZiBEZWFuLCBUaGUgSm95cyBvZiBS"
+ "ZWFsIEhhcmR3YXJlCg" },
+
+ { "I'm the head of the webspam team at Google. "
+ "That means that if you type your name into Google and get porn back, "
+ "it's my fault. Unless you're a porn star, in which case porn is a "
+ "completely reasonable response."
+ " - Matt Cutts, Google Plus" "\n",
+
+ "SSdtIHRoZSBoZWFkIG9mIHRoZSB3ZWJzcGFtIHRlYW0gYXQgR29vZ2xlLiAg"
+ "VGhhdCBtZWFucyB0aGF0IGlmIHlvdSB0eXBlIHlvdXIgbmFtZSBpbnRvIEdv"
+ "b2dsZSBhbmQgZ2V0IHBvcm4gYmFjaywgaXQncyBteSBmYXVsdC4gVW5sZXNz"
+ "IHlvdSdyZSBhIHBvcm4gc3RhciwgaW4gd2hpY2ggY2FzZSBwb3JuIGlzIGEg"
+ "Y29tcGxldGVseSByZWFzb25hYmxlIHJlc3BvbnNlLiAtIE1hdHQgQ3V0dHMs"
+ "IEdvb2dsZSBQbHVzCg" },
+
+ { "It will still be a long time before machines approach human "
+ "intelligence. "
+ "But luckily, machines don't actually have to be intelligent; "
+ "they just have to fake it. Access to a wealth of information, "
+ "combined with a rudimentary decision-making capacity, "
+ "can often be almost as useful. Of course, the results are better yet "
+ "when coupled with intelligence. A reference librarian with access to "
+ "a good search engine is a formidable tool."
+ " - Craig Silverstein, Siemens Pictures of the Future, Spring 2004"
+ "\n",
+
+ "SXQgd2lsbCBzdGlsbCBiZSBhIGxvbmcgdGltZSBiZWZvcmUgbWFjaGluZXMg"
+ "YXBwcm9hY2ggaHVtYW4gaW50ZWxsaWdlbmNlLiBCdXQgbHVja2lseSwgbWFj"
+ "aGluZXMgZG9uJ3QgYWN0dWFsbHkgaGF2ZSB0byBiZSBpbnRlbGxpZ2VudDsg"
+ "dGhleSBqdXN0IGhhdmUgdG8gZmFrZSBpdC4gQWNjZXNzIHRvIGEgd2VhbHRo"
+ "IG9mIGluZm9ybWF0aW9uLCBjb21iaW5lZCB3aXRoIGEgcnVkaW1lbnRhcnkg"
+ "ZGVjaXNpb24tbWFraW5nIGNhcGFjaXR5LCBjYW4gb2Z0ZW4gYmUgYWxtb3N0"
+ "IGFzIHVzZWZ1bC4gT2YgY291cnNlLCB0aGUgcmVzdWx0cyBhcmUgYmV0dGVy"
+ "IHlldCB3aGVuIGNvdXBsZWQgd2l0aCBpbnRlbGxpZ2VuY2UuIEEgcmVmZXJl"
+ "bmNlIGxpYnJhcmlhbiB3aXRoIGFjY2VzcyB0byBhIGdvb2Qgc2VhcmNoIGVu"
+ "Z2luZSBpcyBhIGZvcm1pZGFibGUgdG9vbC4gLSBDcmFpZyBTaWx2ZXJzdGVp"
+ "biwgU2llbWVucyBQaWN0dXJlcyBvZiB0aGUgRnV0dXJlLCBTcHJpbmcgMjAw"
+ "NAo" },
+
+ // Degenerate edge case
+ { "",
+ "" },
+ }};
+
+ return testcase;
+}
+
+} // namespace strings_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_STRINGS_INTERNAL_ESCAPING_TEST_COMMON_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/memutil.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/memutil.cc
new file mode 100644
index 00000000000..0ba6574fdb6
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/memutil.cc
@@ -0,0 +1,112 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "y_absl/strings/internal/memutil.h"
+
+#include <cstdlib>
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace strings_internal {
+
+int memcasecmp(const char* s1, const char* s2, size_t len) {
+ const unsigned char* us1 = reinterpret_cast<const unsigned char*>(s1);
+ const unsigned char* us2 = reinterpret_cast<const unsigned char*>(s2);
+
+ for (size_t i = 0; i < len; i++) {
+ const int diff =
+ int{static_cast<unsigned char>(y_absl::ascii_tolower(us1[i]))} -
+ int{static_cast<unsigned char>(y_absl::ascii_tolower(us2[i]))};
+ if (diff != 0) return diff;
+ }
+ return 0;
+}
+
+char* memdup(const char* s, size_t slen) {
+ void* copy;
+ if ((copy = malloc(slen)) == nullptr) return nullptr;
+ memcpy(copy, s, slen);
+ return reinterpret_cast<char*>(copy);
+}
+
+char* memrchr(const char* s, int c, size_t slen) {
+ for (const char* e = s + slen - 1; e >= s; e--) {
+ if (*e == c) return const_cast<char*>(e);
+ }
+ return nullptr;
+}
+
+size_t memspn(const char* s, size_t slen, const char* accept) {
+ const char* p = s;
+ const char* spanp;
+ char c, sc;
+
+cont:
+ c = *p++;
+ if (slen-- == 0) return p - 1 - s;
+ for (spanp = accept; (sc = *spanp++) != '\0';)
+ if (sc == c) goto cont;
+ return p - 1 - s;
+}
+
+size_t memcspn(const char* s, size_t slen, const char* reject) {
+ const char* p = s;
+ const char* spanp;
+ char c, sc;
+
+ while (slen-- != 0) {
+ c = *p++;
+ for (spanp = reject; (sc = *spanp++) != '\0';)
+ if (sc == c) return p - 1 - s;
+ }
+ return p - s;
+}
+
+char* mempbrk(const char* s, size_t slen, const char* accept) {
+ const char* scanp;
+ int sc;
+
+ for (; slen; ++s, --slen) {
+ for (scanp = accept; (sc = *scanp++) != '\0';)
+ if (sc == *s) return const_cast<char*>(s);
+ }
+ return nullptr;
+}
+
+// This is significantly faster for case-sensitive matches with very
+// few possible matches. See unit test for benchmarks.
+const char* memmatch(const char* phaystack, size_t haylen, const char* pneedle,
+ size_t neelen) {
+ if (0 == neelen) {
+ return phaystack; // even if haylen is 0
+ }
+ if (haylen < neelen) return nullptr;
+
+ const char* match;
+ const char* hayend = phaystack + haylen - neelen + 1;
+ // A static cast is used here to work around the fact that memchr returns
+ // a void* on Posix-compliant systems and const void* on Windows.
+ while ((match = static_cast<const char*>(
+ memchr(phaystack, pneedle[0], hayend - phaystack)))) {
+ if (memcmp(match, pneedle, neelen) == 0)
+ return match;
+ else
+ phaystack = match + 1;
+ }
+ return nullptr;
+}
+
+} // namespace strings_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/memutil.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/memutil.h
new file mode 100644
index 00000000000..ee442fe25f7
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/memutil.h
@@ -0,0 +1,148 @@
+//
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+// These routines provide mem versions of standard C string routines,
+// such as strpbrk. They function exactly the same as the str versions,
+// so if you wonder what they are, replace the word "mem" by
+// "str" and check out the man page. I could return void*, as the
+// strutil.h mem*() routines tend to do, but I return char* instead
+// since this is by far the most common way these functions are called.
+//
+// The difference between the mem and str versions is the mem version
+// takes a pointer and a length, rather than a '\0'-terminated string.
+// The memcase* routines defined here assume the locale is "C"
+// (they use y_absl::ascii_tolower instead of tolower).
+//
+// These routines are based on the BSD library.
+//
+// Here's a list of routines from string.h, and their mem analogues.
+// Functions in lowercase are defined in string.h; those in UPPERCASE
+// are defined here:
+//
+// strlen --
+// strcat strncat MEMCAT
+// strcpy strncpy memcpy
+// -- memccpy (very cool function, btw)
+// -- memmove
+// -- memset
+// strcmp strncmp memcmp
+// strcasecmp strncasecmp MEMCASECMP
+// strchr memchr
+// strcoll --
+// strxfrm --
+// strdup strndup MEMDUP
+// strrchr MEMRCHR
+// strspn MEMSPN
+// strcspn MEMCSPN
+// strpbrk MEMPBRK
+// strstr MEMSTR MEMMEM
+// (g)strcasestr MEMCASESTR MEMCASEMEM
+// strtok --
+// strprefix MEMPREFIX (strprefix is from strutil.h)
+// strcaseprefix MEMCASEPREFIX (strcaseprefix is from strutil.h)
+// strsuffix MEMSUFFIX (strsuffix is from strutil.h)
+// strcasesuffix MEMCASESUFFIX (strcasesuffix is from strutil.h)
+// -- MEMIS
+// -- MEMCASEIS
+// strcount MEMCOUNT (strcount is from strutil.h)
+
+#ifndef ABSL_STRINGS_INTERNAL_MEMUTIL_H_
+#define ABSL_STRINGS_INTERNAL_MEMUTIL_H_
+
+#include <cstddef>
+#include <cstring>
+
+#include "y_absl/base/port.h" // disable some warnings on Windows
+#include "y_absl/strings/ascii.h" // for y_absl::ascii_tolower
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace strings_internal {
+
+inline char* memcat(char* dest, size_t destlen, const char* src,
+ size_t srclen) {
+ return reinterpret_cast<char*>(memcpy(dest + destlen, src, srclen));
+}
+
+int memcasecmp(const char* s1, const char* s2, size_t len);
+char* memdup(const char* s, size_t slen);
+char* memrchr(const char* s, int c, size_t slen);
+size_t memspn(const char* s, size_t slen, const char* accept);
+size_t memcspn(const char* s, size_t slen, const char* reject);
+char* mempbrk(const char* s, size_t slen, const char* accept);
+
+// This is for internal use only. Don't call this directly
+template <bool case_sensitive>
+const char* int_memmatch(const char* haystack, size_t haylen,
+ const char* needle, size_t neelen) {
+ if (0 == neelen) {
+ return haystack; // even if haylen is 0
+ }
+ const char* hayend = haystack + haylen;
+ const char* needlestart = needle;
+ const char* needleend = needlestart + neelen;
+
+ for (; haystack < hayend; ++haystack) {
+ char hay = case_sensitive
+ ? *haystack
+ : y_absl::ascii_tolower(static_cast<unsigned char>(*haystack));
+ char nee = case_sensitive
+ ? *needle
+ : y_absl::ascii_tolower(static_cast<unsigned char>(*needle));
+ if (hay == nee) {
+ if (++needle == needleend) {
+ return haystack + 1 - neelen;
+ }
+ } else if (needle != needlestart) {
+ // must back up haystack in case a prefix matched (find "aab" in "aaab")
+ haystack -= needle - needlestart; // for loop will advance one more
+ needle = needlestart;
+ }
+ }
+ return nullptr;
+}
+
+// These are the guys you can call directly
+inline const char* memstr(const char* phaystack, size_t haylen,
+ const char* pneedle) {
+ return int_memmatch<true>(phaystack, haylen, pneedle, strlen(pneedle));
+}
+
+inline const char* memcasestr(const char* phaystack, size_t haylen,
+ const char* pneedle) {
+ return int_memmatch<false>(phaystack, haylen, pneedle, strlen(pneedle));
+}
+
+inline const char* memmem(const char* phaystack, size_t haylen,
+ const char* pneedle, size_t needlelen) {
+ return int_memmatch<true>(phaystack, haylen, pneedle, needlelen);
+}
+
+inline const char* memcasemem(const char* phaystack, size_t haylen,
+ const char* pneedle, size_t needlelen) {
+ return int_memmatch<false>(phaystack, haylen, pneedle, needlelen);
+}
+
+// This is significantly faster for case-sensitive matches with very
+// few possible matches. See unit test for benchmarks.
+const char* memmatch(const char* phaystack, size_t haylen, const char* pneedle,
+ size_t neelen);
+
+} // namespace strings_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_STRINGS_INTERNAL_MEMUTIL_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/numbers_test_common.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/numbers_test_common.h
new file mode 100644
index 00000000000..12aec3ac110
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/numbers_test_common.h
@@ -0,0 +1,184 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// This file contains common things needed by numbers_test.cc,
+// numbers_legacy_test.cc and numbers_benchmark.cc.
+
+#ifndef ABSL_STRINGS_INTERNAL_NUMBERS_TEST_COMMON_H_
+#define ABSL_STRINGS_INTERNAL_NUMBERS_TEST_COMMON_H_
+
+#include <array>
+#include <cstdint>
+#include <limits>
+#include <util/generic/string.h>
+
+#include "y_absl/base/config.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace strings_internal {
+
+template <typename IntType>
+inline bool Itoa(IntType value, int base, TString* destination) {
+ destination->clear();
+ if (base <= 1 || base > 36) {
+ return false;
+ }
+
+ if (value == 0) {
+ destination->push_back('0');
+ return true;
+ }
+
+ bool negative = value < 0;
+ while (value != 0) {
+ const IntType next_value = value / base;
+ // Can't use std::abs here because of problems when IntType is unsigned.
+ int remainder =
+ static_cast<int>(value > next_value * base ? value - next_value * base
+ : next_value * base - value);
+ char c = remainder < 10 ? '0' + remainder : 'A' + remainder - 10;
+ destination->insert(0, 1, c);
+ value = next_value;
+ }
+
+ if (negative) {
+ destination->insert(0, 1, '-');
+ }
+ return true;
+}
+
+struct uint32_test_case {
+ const char* str;
+ bool expect_ok;
+ int base; // base to pass to the conversion function
+ uint32_t expected;
+};
+
+inline const std::array<uint32_test_case, 27>& strtouint32_test_cases() {
+ static const std::array<uint32_test_case, 27> test_cases{{
+ {"0xffffffff", true, 16, (std::numeric_limits<uint32_t>::max)()},
+ {"0x34234324", true, 16, 0x34234324},
+ {"34234324", true, 16, 0x34234324},
+ {"0", true, 16, 0},
+ {" \t\n 0xffffffff", true, 16, (std::numeric_limits<uint32_t>::max)()},
+ {" \f\v 46", true, 10, 46}, // must accept weird whitespace
+ {" \t\n 72717222", true, 8, 072717222},
+ {" \t\n 072717222", true, 8, 072717222},
+ {" \t\n 072717228", false, 8, 07271722},
+ {"0", true, 0, 0},
+
+ // Base-10 version.
+ {"34234324", true, 0, 34234324},
+ {"4294967295", true, 0, (std::numeric_limits<uint32_t>::max)()},
+ {"34234324 \n\t", true, 10, 34234324},
+
+ // Unusual base
+ {"0", true, 3, 0},
+ {"2", true, 3, 2},
+ {"11", true, 3, 4},
+
+ // Invalid uints.
+ {"", false, 0, 0},
+ {" ", false, 0, 0},
+ {"abc", false, 0, 0}, // would be valid hex, but prefix is missing
+ {"34234324a", false, 0, 34234324},
+ {"34234.3", false, 0, 34234},
+ {"-1", false, 0, 0},
+ {" -123", false, 0, 0},
+ {" \t\n -123", false, 0, 0},
+
+ // Out of bounds.
+ {"4294967296", false, 0, (std::numeric_limits<uint32_t>::max)()},
+ {"0x100000000", false, 0, (std::numeric_limits<uint32_t>::max)()},
+ {nullptr, false, 0, 0},
+ }};
+ return test_cases;
+}
+
+struct uint64_test_case {
+ const char* str;
+ bool expect_ok;
+ int base;
+ uint64_t expected;
+};
+
+inline const std::array<uint64_test_case, 34>& strtouint64_test_cases() {
+ static const std::array<uint64_test_case, 34> test_cases{{
+ {"0x3423432448783446", true, 16, int64_t{0x3423432448783446}},
+ {"3423432448783446", true, 16, int64_t{0x3423432448783446}},
+
+ {"0", true, 16, 0},
+ {"000", true, 0, 0},
+ {"0", true, 0, 0},
+ {" \t\n 0xffffffffffffffff", true, 16,
+ (std::numeric_limits<uint64_t>::max)()},
+
+ {"012345670123456701234", true, 8, int64_t{012345670123456701234}},
+ {"12345670123456701234", true, 8, int64_t{012345670123456701234}},
+
+ {"12845670123456701234", false, 8, 0},
+
+ // Base-10 version.
+ {"34234324487834466", true, 0, int64_t{34234324487834466}},
+
+ {" \t\n 18446744073709551615", true, 0,
+ (std::numeric_limits<uint64_t>::max)()},
+
+ {"34234324487834466 \n\t ", true, 0, int64_t{34234324487834466}},
+
+ {" \f\v 46", true, 10, 46}, // must accept weird whitespace
+
+ // Unusual base
+ {"0", true, 3, 0},
+ {"2", true, 3, 2},
+ {"11", true, 3, 4},
+
+ {"0", true, 0, 0},
+
+ // Invalid uints.
+ {"", false, 0, 0},
+ {" ", false, 0, 0},
+ {"abc", false, 0, 0},
+ {"34234324487834466a", false, 0, 0},
+ {"34234487834466.3", false, 0, 0},
+ {"-1", false, 0, 0},
+ {" -123", false, 0, 0},
+ {" \t\n -123", false, 0, 0},
+
+ // Out of bounds.
+ {"18446744073709551616", false, 10, 0},
+ {"18446744073709551616", false, 0, 0},
+ {"0x10000000000000000", false, 16,
+ (std::numeric_limits<uint64_t>::max)()},
+ {"0X10000000000000000", false, 16,
+ (std::numeric_limits<uint64_t>::max)()}, // 0X versus 0x.
+ {"0x10000000000000000", false, 0, (std::numeric_limits<uint64_t>::max)()},
+ {"0X10000000000000000", false, 0,
+ (std::numeric_limits<uint64_t>::max)()}, // 0X versus 0x.
+
+ {"0x1234", true, 16, 0x1234},
+
+ // Base-10 string version.
+ {"1234", true, 0, 1234},
+ {nullptr, false, 0, 0},
+ }};
+ return test_cases;
+}
+
+} // namespace strings_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_STRINGS_INTERNAL_NUMBERS_TEST_COMMON_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/ostringstream.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/ostringstream.cc
new file mode 100644
index 00000000000..ba18857d833
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/ostringstream.cc
@@ -0,0 +1,36 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "y_absl/strings/internal/ostringstream.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace strings_internal {
+
+OStringStream::Buf::int_type OStringStream::overflow(int c) {
+ assert(s_);
+ if (!Buf::traits_type::eq_int_type(c, Buf::traits_type::eof()))
+ s_->push_back(static_cast<char>(c));
+ return 1;
+}
+
+std::streamsize OStringStream::xsputn(const char* s, std::streamsize n) {
+ assert(s_);
+ s_->append(s, n);
+ return n;
+}
+
+} // namespace strings_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/ostringstream.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/ostringstream.h
new file mode 100644
index 00000000000..d00cef9c239
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/ostringstream.h
@@ -0,0 +1,89 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_STRINGS_INTERNAL_OSTRINGSTREAM_H_
+#define ABSL_STRINGS_INTERNAL_OSTRINGSTREAM_H_
+
+#include <cassert>
+#include <ostream>
+#include <streambuf>
+#include <util/generic/string.h>
+
+#include "y_absl/base/port.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace strings_internal {
+
+// The same as std::ostringstream but appends to a user-specified TString,
+// and is faster. It is ~70% faster to create, ~50% faster to write to, and
+// completely free to extract the result TString.
+//
+// TString s;
+// OStringStream strm(&s);
+// strm << 42 << ' ' << 3.14; // appends to `s`
+//
+// The stream object doesn't have to be named. Starting from C++11 operator<<
+// works with rvalues of std::ostream.
+//
+// TString s;
+// OStringStream(&s) << 42 << ' ' << 3.14; // appends to `s`
+//
+// OStringStream is faster to create than std::ostringstream but it's still
+// relatively slow. Avoid creating multiple streams where a single stream will
+// do.
+//
+// Creates unnecessary instances of OStringStream: slow.
+//
+// TString s;
+// OStringStream(&s) << 42;
+// OStringStream(&s) << ' ';
+// OStringStream(&s) << 3.14;
+//
+// Creates a single instance of OStringStream and reuses it: fast.
+//
+// TString s;
+// OStringStream strm(&s);
+// strm << 42;
+// strm << ' ';
+// strm << 3.14;
+//
+// Note: flush() has no effect. No reason to call it.
+class OStringStream : private std::basic_streambuf<char>, public std::ostream {
+ public:
+ // The argument can be null, in which case you'll need to call str(p) with a
+ // non-null argument before you can write to the stream.
+ //
+ // The destructor of OStringStream doesn't use the TString. It's OK to
+ // destroy the TString before the stream.
+ explicit OStringStream(TString* s) : std::ostream(this), s_(s) {}
+
+ TString* str() { return s_; }
+ const TString* str() const { return s_; }
+ void str(TString* s) { s_ = s; }
+
+ private:
+ using Buf = std::basic_streambuf<char>;
+
+ Buf::int_type overflow(int c) override;
+ std::streamsize xsputn(const char* s, std::streamsize n) override;
+
+ TString* s_;
+};
+
+} // namespace strings_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_STRINGS_INTERNAL_OSTRINGSTREAM_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/pow10_helper.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/pow10_helper.h
new file mode 100644
index 00000000000..e4d41d7e4ef
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/pow10_helper.h
@@ -0,0 +1,40 @@
+//
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// This test helper library contains a table of powers of 10, to guarantee
+// precise values are computed across the full range of doubles. We can't rely
+// on the pow() function, because not all standard libraries ship a version
+// that is precise.
+#ifndef ABSL_STRINGS_INTERNAL_POW10_HELPER_H_
+#define ABSL_STRINGS_INTERNAL_POW10_HELPER_H_
+
+#include <vector>
+
+#include "y_absl/base/config.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace strings_internal {
+
+// Computes the precise value of 10^exp. (I.e. the nearest representable
+// double to the exact value, rounding to nearest-even in the (single) case of
+// being exactly halfway between.)
+double Pow10(int exp);
+
+} // namespace strings_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_STRINGS_INTERNAL_POW10_HELPER_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/resize_uninitialized.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/resize_uninitialized.h
new file mode 100644
index 00000000000..14860bb237b
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/resize_uninitialized.h
@@ -0,0 +1,119 @@
+//
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef ABSL_STRINGS_INTERNAL_RESIZE_UNINITIALIZED_H_
+#define ABSL_STRINGS_INTERNAL_RESIZE_UNINITIALIZED_H_
+
+#include <algorithm>
+#include <util/generic/string.h>
+#include <type_traits>
+#include <utility>
+
+#include "y_absl/base/port.h"
+#include "y_absl/meta/type_traits.h" // for void_t
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace strings_internal {
+
+// In this type trait, we look for a __resize_default_init member function, and
+// we use it if available, otherwise, we use resize. We provide HasMember to
+// indicate whether __resize_default_init is present.
+template <typename string_type, typename = void>
+struct ResizeUninitializedTraits {
+ using HasMember = std::false_type;
+ static void Resize(string_type* s, size_t new_size) { s->resize(new_size); }
+};
+
+// __resize_default_init is provided by libc++ >= 8.0
+template <typename string_type>
+struct ResizeUninitializedTraits<
+ string_type, y_absl::void_t<decltype(std::declval<string_type&>()
+ .__resize_default_init(237))> > {
+ using HasMember = std::true_type;
+ static void Resize(string_type* s, size_t new_size) {
+ s->__resize_default_init(new_size);
+ }
+};
+
+// Returns true if the TString implementation supports a resize where
+// the new characters added to the TString are left untouched.
+//
+// (A better name might be "STLStringSupportsUninitializedResize", alluding to
+// the previous function.)
+template <typename string_type>
+inline constexpr bool STLStringSupportsNontrashingResize(string_type*) {
+ return ResizeUninitializedTraits<string_type>::HasMember::value;
+}
+
+// Like str->resize(new_size), except any new characters added to "*str" as a
+// result of resizing may be left uninitialized, rather than being filled with
+// '0' bytes. Typically used when code is then going to overwrite the backing
+// store of the TString with known data.
+template <typename string_type, typename = void>
+inline void STLStringResizeUninitialized(string_type* s, size_t new_size) {
+ ResizeUninitializedTraits<string_type>::Resize(s, new_size);
+}
+
+// Used to ensure exponential growth so that the amortized complexity of
+// increasing the string size by a small amount is O(1), in contrast to
+// O(str->size()) in the case of precise growth.
+template <typename string_type>
+void STLStringReserveAmortized(string_type* s, size_t new_size) {
+ const size_t cap = s->capacity();
+ if (new_size > cap) {
+ // Make sure to always grow by at least a factor of 2x.
+ s->reserve((std::max)(new_size, 2 * cap));
+ }
+}
+
+// In this type trait, we look for an __append_default_init member function, and
+// we use it if available, otherwise, we use append.
+template <typename string_type, typename = void>
+struct AppendUninitializedTraits {
+ static void Append(string_type* s, size_t n) {
+ s->append(n, typename string_type::value_type());
+ }
+};
+
+template <typename string_type>
+struct AppendUninitializedTraits<
+ string_type, y_absl::void_t<decltype(std::declval<string_type&>()
+ .__append_default_init(237))> > {
+ static void Append(string_type* s, size_t n) {
+ s->__append_default_init(n);
+ }
+};
+
+// Like STLStringResizeUninitialized(str, new_size), except guaranteed to use
+// exponential growth so that the amortized complexity of increasing the string
+// size by a small amount is O(1), in contrast to O(str->size()) in the case of
+// precise growth.
+template <typename string_type>
+void STLStringResizeUninitializedAmortized(string_type* s, size_t new_size) {
+ const size_t size = s->size();
+ if (new_size > size) {
+ AppendUninitializedTraits<string_type>::Append(s, new_size - size);
+ } else {
+ s->erase(new_size);
+ }
+}
+
+} // namespace strings_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_STRINGS_INTERNAL_RESIZE_UNINITIALIZED_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/stl_type_traits.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/stl_type_traits.h
new file mode 100644
index 00000000000..db8d4635d03
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/stl_type_traits.h
@@ -0,0 +1,248 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+// Thie file provides the IsStrictlyBaseOfAndConvertibleToSTLContainer type
+// trait metafunction to assist in working with the _GLIBCXX_DEBUG debug
+// wrappers of STL containers.
+//
+// DO NOT INCLUDE THIS FILE DIRECTLY. Use this file by including
+// y_absl/strings/str_split.h.
+//
+// IWYU pragma: private, include "y_absl/strings/str_split.h"
+
+#ifndef ABSL_STRINGS_INTERNAL_STL_TYPE_TRAITS_H_
+#define ABSL_STRINGS_INTERNAL_STL_TYPE_TRAITS_H_
+
+#include <array>
+#include <bitset>
+#include <deque>
+#include <forward_list>
+#include <list>
+#include <map>
+#include <set>
+#include <type_traits>
+#include <unordered_map>
+#include <unordered_set>
+#include <vector>
+
+#include "y_absl/meta/type_traits.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace strings_internal {
+
+template <typename C, template <typename...> class T>
+struct IsSpecializationImpl : std::false_type {};
+template <template <typename...> class T, typename... Args>
+struct IsSpecializationImpl<T<Args...>, T> : std::true_type {};
+template <typename C, template <typename...> class T>
+using IsSpecialization = IsSpecializationImpl<y_absl::decay_t<C>, T>;
+
+template <typename C>
+struct IsArrayImpl : std::false_type {};
+template <template <typename, size_t> class A, typename T, size_t N>
+struct IsArrayImpl<A<T, N>> : std::is_same<A<T, N>, std::array<T, N>> {};
+template <typename C>
+using IsArray = IsArrayImpl<y_absl::decay_t<C>>;
+
+template <typename C>
+struct IsBitsetImpl : std::false_type {};
+template <template <size_t> class B, size_t N>
+struct IsBitsetImpl<B<N>> : std::is_same<B<N>, std::bitset<N>> {};
+template <typename C>
+using IsBitset = IsBitsetImpl<y_absl::decay_t<C>>;
+
+template <typename C>
+struct IsSTLContainer
+ : y_absl::disjunction<
+ IsArray<C>, IsBitset<C>, IsSpecialization<C, std::deque>,
+ IsSpecialization<C, std::forward_list>,
+ IsSpecialization<C, std::list>, IsSpecialization<C, std::map>,
+ IsSpecialization<C, std::multimap>, IsSpecialization<C, std::set>,
+ IsSpecialization<C, std::multiset>,
+ IsSpecialization<C, std::unordered_map>,
+ IsSpecialization<C, std::unordered_multimap>,
+ IsSpecialization<C, std::unordered_set>,
+ IsSpecialization<C, std::unordered_multiset>,
+ IsSpecialization<C, std::vector>> {};
+
+template <typename C, template <typename...> class T, typename = void>
+struct IsBaseOfSpecializationImpl : std::false_type {};
+// IsBaseOfSpecializationImpl needs multiple partial specializations to SFINAE
+// on the existence of container dependent types and plug them into the STL
+// template.
+template <typename C, template <typename, typename> class T>
+struct IsBaseOfSpecializationImpl<
+ C, T, y_absl::void_t<typename C::value_type, typename C::allocator_type>>
+ : std::is_base_of<C,
+ T<typename C::value_type, typename C::allocator_type>> {};
+template <typename C, template <typename, typename, typename> class T>
+struct IsBaseOfSpecializationImpl<
+ C, T,
+ y_absl::void_t<typename C::key_type, typename C::key_compare,
+ typename C::allocator_type>>
+ : std::is_base_of<C, T<typename C::key_type, typename C::key_compare,
+ typename C::allocator_type>> {};
+template <typename C, template <typename, typename, typename, typename> class T>
+struct IsBaseOfSpecializationImpl<
+ C, T,
+ y_absl::void_t<typename C::key_type, typename C::mapped_type,
+ typename C::key_compare, typename C::allocator_type>>
+ : std::is_base_of<C,
+ T<typename C::key_type, typename C::mapped_type,
+ typename C::key_compare, typename C::allocator_type>> {
+};
+template <typename C, template <typename, typename, typename, typename> class T>
+struct IsBaseOfSpecializationImpl<
+ C, T,
+ y_absl::void_t<typename C::key_type, typename C::hasher,
+ typename C::key_equal, typename C::allocator_type>>
+ : std::is_base_of<C, T<typename C::key_type, typename C::hasher,
+ typename C::key_equal, typename C::allocator_type>> {
+};
+template <typename C,
+ template <typename, typename, typename, typename, typename> class T>
+struct IsBaseOfSpecializationImpl<
+ C, T,
+ y_absl::void_t<typename C::key_type, typename C::mapped_type,
+ typename C::hasher, typename C::key_equal,
+ typename C::allocator_type>>
+ : std::is_base_of<C, T<typename C::key_type, typename C::mapped_type,
+ typename C::hasher, typename C::key_equal,
+ typename C::allocator_type>> {};
+template <typename C, template <typename...> class T>
+using IsBaseOfSpecialization = IsBaseOfSpecializationImpl<y_absl::decay_t<C>, T>;
+
+template <typename C>
+struct IsBaseOfArrayImpl : std::false_type {};
+template <template <typename, size_t> class A, typename T, size_t N>
+struct IsBaseOfArrayImpl<A<T, N>> : std::is_base_of<A<T, N>, std::array<T, N>> {
+};
+template <typename C>
+using IsBaseOfArray = IsBaseOfArrayImpl<y_absl::decay_t<C>>;
+
+template <typename C>
+struct IsBaseOfBitsetImpl : std::false_type {};
+template <template <size_t> class B, size_t N>
+struct IsBaseOfBitsetImpl<B<N>> : std::is_base_of<B<N>, std::bitset<N>> {};
+template <typename C>
+using IsBaseOfBitset = IsBaseOfBitsetImpl<y_absl::decay_t<C>>;
+
+template <typename C>
+struct IsBaseOfSTLContainer
+ : y_absl::disjunction<IsBaseOfArray<C>, IsBaseOfBitset<C>,
+ IsBaseOfSpecialization<C, std::deque>,
+ IsBaseOfSpecialization<C, std::forward_list>,
+ IsBaseOfSpecialization<C, std::list>,
+ IsBaseOfSpecialization<C, std::map>,
+ IsBaseOfSpecialization<C, std::multimap>,
+ IsBaseOfSpecialization<C, std::set>,
+ IsBaseOfSpecialization<C, std::multiset>,
+ IsBaseOfSpecialization<C, std::unordered_map>,
+ IsBaseOfSpecialization<C, std::unordered_multimap>,
+ IsBaseOfSpecialization<C, std::unordered_set>,
+ IsBaseOfSpecialization<C, std::unordered_multiset>,
+ IsBaseOfSpecialization<C, std::vector>> {};
+
+template <typename C, template <typename...> class T, typename = void>
+struct IsConvertibleToSpecializationImpl : std::false_type {};
+// IsConvertibleToSpecializationImpl needs multiple partial specializations to
+// SFINAE on the existence of container dependent types and plug them into the
+// STL template.
+template <typename C, template <typename, typename> class T>
+struct IsConvertibleToSpecializationImpl<
+ C, T, y_absl::void_t<typename C::value_type, typename C::allocator_type>>
+ : std::is_convertible<
+ C, T<typename C::value_type, typename C::allocator_type>> {};
+template <typename C, template <typename, typename, typename> class T>
+struct IsConvertibleToSpecializationImpl<
+ C, T,
+ y_absl::void_t<typename C::key_type, typename C::key_compare,
+ typename C::allocator_type>>
+ : std::is_convertible<C, T<typename C::key_type, typename C::key_compare,
+ typename C::allocator_type>> {};
+template <typename C, template <typename, typename, typename, typename> class T>
+struct IsConvertibleToSpecializationImpl<
+ C, T,
+ y_absl::void_t<typename C::key_type, typename C::mapped_type,
+ typename C::key_compare, typename C::allocator_type>>
+ : std::is_convertible<
+ C, T<typename C::key_type, typename C::mapped_type,
+ typename C::key_compare, typename C::allocator_type>> {};
+template <typename C, template <typename, typename, typename, typename> class T>
+struct IsConvertibleToSpecializationImpl<
+ C, T,
+ y_absl::void_t<typename C::key_type, typename C::hasher,
+ typename C::key_equal, typename C::allocator_type>>
+ : std::is_convertible<
+ C, T<typename C::key_type, typename C::hasher, typename C::key_equal,
+ typename C::allocator_type>> {};
+template <typename C,
+ template <typename, typename, typename, typename, typename> class T>
+struct IsConvertibleToSpecializationImpl<
+ C, T,
+ y_absl::void_t<typename C::key_type, typename C::mapped_type,
+ typename C::hasher, typename C::key_equal,
+ typename C::allocator_type>>
+ : std::is_convertible<C, T<typename C::key_type, typename C::mapped_type,
+ typename C::hasher, typename C::key_equal,
+ typename C::allocator_type>> {};
+template <typename C, template <typename...> class T>
+using IsConvertibleToSpecialization =
+ IsConvertibleToSpecializationImpl<y_absl::decay_t<C>, T>;
+
+template <typename C>
+struct IsConvertibleToArrayImpl : std::false_type {};
+template <template <typename, size_t> class A, typename T, size_t N>
+struct IsConvertibleToArrayImpl<A<T, N>>
+ : std::is_convertible<A<T, N>, std::array<T, N>> {};
+template <typename C>
+using IsConvertibleToArray = IsConvertibleToArrayImpl<y_absl::decay_t<C>>;
+
+template <typename C>
+struct IsConvertibleToBitsetImpl : std::false_type {};
+template <template <size_t> class B, size_t N>
+struct IsConvertibleToBitsetImpl<B<N>>
+ : std::is_convertible<B<N>, std::bitset<N>> {};
+template <typename C>
+using IsConvertibleToBitset = IsConvertibleToBitsetImpl<y_absl::decay_t<C>>;
+
+template <typename C>
+struct IsConvertibleToSTLContainer
+ : y_absl::disjunction<
+ IsConvertibleToArray<C>, IsConvertibleToBitset<C>,
+ IsConvertibleToSpecialization<C, std::deque>,
+ IsConvertibleToSpecialization<C, std::forward_list>,
+ IsConvertibleToSpecialization<C, std::list>,
+ IsConvertibleToSpecialization<C, std::map>,
+ IsConvertibleToSpecialization<C, std::multimap>,
+ IsConvertibleToSpecialization<C, std::set>,
+ IsConvertibleToSpecialization<C, std::multiset>,
+ IsConvertibleToSpecialization<C, std::unordered_map>,
+ IsConvertibleToSpecialization<C, std::unordered_multimap>,
+ IsConvertibleToSpecialization<C, std::unordered_set>,
+ IsConvertibleToSpecialization<C, std::unordered_multiset>,
+ IsConvertibleToSpecialization<C, std::vector>> {};
+
+template <typename C>
+struct IsStrictlyBaseOfAndConvertibleToSTLContainer
+ : y_absl::conjunction<y_absl::negation<IsSTLContainer<C>>,
+ IsBaseOfSTLContainer<C>,
+ IsConvertibleToSTLContainer<C>> {};
+
+} // namespace strings_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+#endif // ABSL_STRINGS_INTERNAL_STL_TYPE_TRAITS_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/.yandex_meta/licenses.list.txt b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/.yandex_meta/licenses.list.txt
new file mode 100644
index 00000000000..33d60b3d2b8
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/.yandex_meta/licenses.list.txt
@@ -0,0 +1,20 @@
+====================Apache-2.0====================
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+
+====================COPYRIGHT====================
+// Copyright 2017 The Abseil Authors.
+
+
+====================COPYRIGHT====================
+// Copyright 2020 The Abseil Authors.
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/arg.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/arg.cc
new file mode 100644
index 00000000000..8d5c3b61ac3
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/arg.cc
@@ -0,0 +1,488 @@
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//
+// POSIX spec:
+// http://pubs.opengroup.org/onlinepubs/009695399/functions/fprintf.html
+//
+#include "y_absl/strings/internal/str_format/arg.h"
+
+#include <cassert>
+#include <cerrno>
+#include <cstdlib>
+#include <util/generic/string.h>
+#include <type_traits>
+
+#include "y_absl/base/port.h"
+#include "y_absl/strings/internal/str_format/float_conversion.h"
+#include "y_absl/strings/numbers.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace str_format_internal {
+namespace {
+
+// Reduce *capacity by s.size(), clipped to a 0 minimum.
+void ReducePadding(string_view s, size_t *capacity) {
+ *capacity = Excess(s.size(), *capacity);
+}
+
+// Reduce *capacity by n, clipped to a 0 minimum.
+void ReducePadding(size_t n, size_t *capacity) {
+ *capacity = Excess(n, *capacity);
+}
+
+template <typename T>
+struct MakeUnsigned : std::make_unsigned<T> {};
+template <>
+struct MakeUnsigned<y_absl::int128> {
+ using type = y_absl::uint128;
+};
+template <>
+struct MakeUnsigned<y_absl::uint128> {
+ using type = y_absl::uint128;
+};
+
+template <typename T>
+struct IsSigned : std::is_signed<T> {};
+template <>
+struct IsSigned<y_absl::int128> : std::true_type {};
+template <>
+struct IsSigned<y_absl::uint128> : std::false_type {};
+
+// Integral digit printer.
+// Call one of the PrintAs* routines after construction once.
+// Use with_neg_and_zero/without_neg_or_zero/is_negative to access the results.
+class IntDigits {
+ public:
+ // Print the unsigned integer as octal.
+ // Supports unsigned integral types and uint128.
+ template <typename T>
+ void PrintAsOct(T v) {
+ static_assert(!IsSigned<T>::value, "");
+ char *p = storage_ + sizeof(storage_);
+ do {
+ *--p = static_cast<char>('0' + (static_cast<size_t>(v) & 7));
+ v >>= 3;
+ } while (v);
+ start_ = p;
+ size_ = storage_ + sizeof(storage_) - p;
+ }
+
+ // Print the signed or unsigned integer as decimal.
+ // Supports all integral types.
+ template <typename T>
+ void PrintAsDec(T v) {
+ static_assert(std::is_integral<T>::value, "");
+ start_ = storage_;
+ size_ = numbers_internal::FastIntToBuffer(v, storage_) - storage_;
+ }
+
+ void PrintAsDec(int128 v) {
+ auto u = static_cast<uint128>(v);
+ bool add_neg = false;
+ if (v < 0) {
+ add_neg = true;
+ u = uint128{} - u;
+ }
+ PrintAsDec(u, add_neg);
+ }
+
+ void PrintAsDec(uint128 v, bool add_neg = false) {
+ // This function can be sped up if needed. We can call FastIntToBuffer
+ // twice, or fix FastIntToBuffer to support uint128.
+ char *p = storage_ + sizeof(storage_);
+ do {
+ p -= 2;
+ numbers_internal::PutTwoDigits(static_cast<size_t>(v % 100), p);
+ v /= 100;
+ } while (v);
+ if (p[0] == '0') {
+ // We printed one too many hexits.
+ ++p;
+ }
+ if (add_neg) {
+ *--p = '-';
+ }
+ size_ = storage_ + sizeof(storage_) - p;
+ start_ = p;
+ }
+
+ // Print the unsigned integer as hex using lowercase.
+ // Supports unsigned integral types and uint128.
+ template <typename T>
+ void PrintAsHexLower(T v) {
+ static_assert(!IsSigned<T>::value, "");
+ char *p = storage_ + sizeof(storage_);
+
+ do {
+ p -= 2;
+ constexpr const char* table = numbers_internal::kHexTable;
+ std::memcpy(p, table + 2 * (static_cast<size_t>(v) & 0xFF), 2);
+ if (sizeof(T) == 1) break;
+ v >>= 8;
+ } while (v);
+ if (p[0] == '0') {
+ // We printed one too many digits.
+ ++p;
+ }
+ start_ = p;
+ size_ = storage_ + sizeof(storage_) - p;
+ }
+
+ // Print the unsigned integer as hex using uppercase.
+ // Supports unsigned integral types and uint128.
+ template <typename T>
+ void PrintAsHexUpper(T v) {
+ static_assert(!IsSigned<T>::value, "");
+ char *p = storage_ + sizeof(storage_);
+
+ // kHexTable is only lowercase, so do it manually for uppercase.
+ do {
+ *--p = "0123456789ABCDEF"[static_cast<size_t>(v) & 15];
+ v >>= 4;
+ } while (v);
+ start_ = p;
+ size_ = storage_ + sizeof(storage_) - p;
+ }
+
+ // The printed value including the '-' sign if available.
+ // For inputs of value `0`, this will return "0"
+ string_view with_neg_and_zero() const { return {start_, size_}; }
+
+ // The printed value not including the '-' sign.
+ // For inputs of value `0`, this will return "".
+ string_view without_neg_or_zero() const {
+ static_assert('-' < '0', "The check below verifies both.");
+ size_t advance = start_[0] <= '0' ? 1 : 0;
+ return {start_ + advance, size_ - advance};
+ }
+
+ bool is_negative() const { return start_[0] == '-'; }
+
+ private:
+ const char *start_;
+ size_t size_;
+ // Max size: 128 bit value as octal -> 43 digits, plus sign char
+ char storage_[128 / 3 + 1 + 1];
+};
+
+// Note: 'o' conversions do not have a base indicator, it's just that
+// the '#' flag is specified to modify the precision for 'o' conversions.
+string_view BaseIndicator(const IntDigits &as_digits,
+ const FormatConversionSpecImpl conv) {
+ // always show 0x for %p.
+ bool alt = conv.has_alt_flag() ||
+ conv.conversion_char() == FormatConversionCharInternal::p;
+ bool hex = (conv.conversion_char() == FormatConversionCharInternal::x ||
+ conv.conversion_char() == FormatConversionCharInternal::X ||
+ conv.conversion_char() == FormatConversionCharInternal::p);
+ // From the POSIX description of '#' flag:
+ // "For x or X conversion specifiers, a non-zero result shall have
+ // 0x (or 0X) prefixed to it."
+ if (alt && hex && !as_digits.without_neg_or_zero().empty()) {
+ return conv.conversion_char() == FormatConversionCharInternal::X ? "0X"
+ : "0x";
+ }
+ return {};
+}
+
+string_view SignColumn(bool neg, const FormatConversionSpecImpl conv) {
+ if (conv.conversion_char() == FormatConversionCharInternal::d ||
+ conv.conversion_char() == FormatConversionCharInternal::i) {
+ if (neg) return "-";
+ if (conv.has_show_pos_flag()) return "+";
+ if (conv.has_sign_col_flag()) return " ";
+ }
+ return {};
+}
+
+bool ConvertCharImpl(unsigned char v, const FormatConversionSpecImpl conv,
+ FormatSinkImpl *sink) {
+ size_t fill = 0;
+ if (conv.width() >= 0) fill = conv.width();
+ ReducePadding(1, &fill);
+ if (!conv.has_left_flag()) sink->Append(fill, ' ');
+ sink->Append(1, v);
+ if (conv.has_left_flag()) sink->Append(fill, ' ');
+ return true;
+}
+
+bool ConvertIntImplInnerSlow(const IntDigits &as_digits,
+ const FormatConversionSpecImpl conv,
+ FormatSinkImpl *sink) {
+ // Print as a sequence of Substrings:
+ // [left_spaces][sign][base_indicator][zeroes][formatted][right_spaces]
+ size_t fill = 0;
+ if (conv.width() >= 0) fill = conv.width();
+
+ string_view formatted = as_digits.without_neg_or_zero();
+ ReducePadding(formatted, &fill);
+
+ string_view sign = SignColumn(as_digits.is_negative(), conv);
+ ReducePadding(sign, &fill);
+
+ string_view base_indicator = BaseIndicator(as_digits, conv);
+ ReducePadding(base_indicator, &fill);
+
+ int precision = conv.precision();
+ bool precision_specified = precision >= 0;
+ if (!precision_specified)
+ precision = 1;
+
+ if (conv.has_alt_flag() &&
+ conv.conversion_char() == FormatConversionCharInternal::o) {
+ // From POSIX description of the '#' (alt) flag:
+ // "For o conversion, it increases the precision (if necessary) to
+ // force the first digit of the result to be zero."
+ if (formatted.empty() || *formatted.begin() != '0') {
+ int needed = static_cast<int>(formatted.size()) + 1;
+ precision = std::max(precision, needed);
+ }
+ }
+
+ size_t num_zeroes = Excess(formatted.size(), precision);
+ ReducePadding(num_zeroes, &fill);
+
+ size_t num_left_spaces = !conv.has_left_flag() ? fill : 0;
+ size_t num_right_spaces = conv.has_left_flag() ? fill : 0;
+
+ // From POSIX description of the '0' (zero) flag:
+ // "For d, i, o, u, x, and X conversion specifiers, if a precision
+ // is specified, the '0' flag is ignored."
+ if (!precision_specified && conv.has_zero_flag()) {
+ num_zeroes += num_left_spaces;
+ num_left_spaces = 0;
+ }
+
+ sink->Append(num_left_spaces, ' ');
+ sink->Append(sign);
+ sink->Append(base_indicator);
+ sink->Append(num_zeroes, '0');
+ sink->Append(formatted);
+ sink->Append(num_right_spaces, ' ');
+ return true;
+}
+
+template <typename T>
+bool ConvertIntArg(T v, const FormatConversionSpecImpl conv,
+ FormatSinkImpl *sink) {
+ using U = typename MakeUnsigned<T>::type;
+ IntDigits as_digits;
+
+ // This odd casting is due to a bug in -Wswitch behavior in gcc49 which causes
+ // it to complain about a switch/case type mismatch, even though both are
+ // FormatConverionChar. Likely this is because at this point
+ // FormatConversionChar is declared, but not defined.
+ switch (static_cast<uint8_t>(conv.conversion_char())) {
+ case static_cast<uint8_t>(FormatConversionCharInternal::c):
+ return ConvertCharImpl(static_cast<unsigned char>(v), conv, sink);
+
+ case static_cast<uint8_t>(FormatConversionCharInternal::o):
+ as_digits.PrintAsOct(static_cast<U>(v));
+ break;
+
+ case static_cast<uint8_t>(FormatConversionCharInternal::x):
+ as_digits.PrintAsHexLower(static_cast<U>(v));
+ break;
+ case static_cast<uint8_t>(FormatConversionCharInternal::X):
+ as_digits.PrintAsHexUpper(static_cast<U>(v));
+ break;
+
+ case static_cast<uint8_t>(FormatConversionCharInternal::u):
+ as_digits.PrintAsDec(static_cast<U>(v));
+ break;
+
+ case static_cast<uint8_t>(FormatConversionCharInternal::d):
+ case static_cast<uint8_t>(FormatConversionCharInternal::i):
+ as_digits.PrintAsDec(v);
+ break;
+
+ case static_cast<uint8_t>(FormatConversionCharInternal::a):
+ case static_cast<uint8_t>(FormatConversionCharInternal::e):
+ case static_cast<uint8_t>(FormatConversionCharInternal::f):
+ case static_cast<uint8_t>(FormatConversionCharInternal::g):
+ case static_cast<uint8_t>(FormatConversionCharInternal::A):
+ case static_cast<uint8_t>(FormatConversionCharInternal::E):
+ case static_cast<uint8_t>(FormatConversionCharInternal::F):
+ case static_cast<uint8_t>(FormatConversionCharInternal::G):
+ return ConvertFloatImpl(static_cast<double>(v), conv, sink);
+
+ default:
+ ABSL_INTERNAL_ASSUME(false);
+ }
+
+ if (conv.is_basic()) {
+ sink->Append(as_digits.with_neg_and_zero());
+ return true;
+ }
+ return ConvertIntImplInnerSlow(as_digits, conv, sink);
+}
+
+template <typename T>
+bool ConvertFloatArg(T v, const FormatConversionSpecImpl conv,
+ FormatSinkImpl *sink) {
+ return FormatConversionCharIsFloat(conv.conversion_char()) &&
+ ConvertFloatImpl(v, conv, sink);
+}
+
+inline bool ConvertStringArg(string_view v, const FormatConversionSpecImpl conv,
+ FormatSinkImpl *sink) {
+ if (conv.is_basic()) {
+ sink->Append(v);
+ return true;
+ }
+ return sink->PutPaddedString(v, conv.width(), conv.precision(),
+ conv.has_left_flag());
+}
+
+} // namespace
+
+// ==================== Strings ====================
+StringConvertResult FormatConvertImpl(const TString &v,
+ const FormatConversionSpecImpl conv,
+ FormatSinkImpl *sink) {
+ return {ConvertStringArg(v, conv, sink)};
+}
+
+StringConvertResult FormatConvertImpl(string_view v,
+ const FormatConversionSpecImpl conv,
+ FormatSinkImpl *sink) {
+ return {ConvertStringArg(v, conv, sink)};
+}
+
+ArgConvertResult<FormatConversionCharSetUnion(
+ FormatConversionCharSetInternal::s, FormatConversionCharSetInternal::p)>
+FormatConvertImpl(const char *v, const FormatConversionSpecImpl conv,
+ FormatSinkImpl *sink) {
+ if (conv.conversion_char() == FormatConversionCharInternal::p)
+ return {FormatConvertImpl(VoidPtr(v), conv, sink).value};
+ size_t len;
+ if (v == nullptr) {
+ len = 0;
+ } else if (conv.precision() < 0) {
+ len = std::strlen(v);
+ } else {
+ // If precision is set, we look for the NUL-terminator on the valid range.
+ len = std::find(v, v + conv.precision(), '\0') - v;
+ }
+ return {ConvertStringArg(string_view(v, len), conv, sink)};
+}
+
+// ==================== Raw pointers ====================
+ArgConvertResult<FormatConversionCharSetInternal::p> FormatConvertImpl(
+ VoidPtr v, const FormatConversionSpecImpl conv, FormatSinkImpl *sink) {
+ if (!v.value) {
+ sink->Append("(nil)");
+ return {true};
+ }
+ IntDigits as_digits;
+ as_digits.PrintAsHexLower(v.value);
+ return {ConvertIntImplInnerSlow(as_digits, conv, sink)};
+}
+
+// ==================== Floats ====================
+FloatingConvertResult FormatConvertImpl(float v,
+ const FormatConversionSpecImpl conv,
+ FormatSinkImpl *sink) {
+ return {ConvertFloatArg(v, conv, sink)};
+}
+FloatingConvertResult FormatConvertImpl(double v,
+ const FormatConversionSpecImpl conv,
+ FormatSinkImpl *sink) {
+ return {ConvertFloatArg(v, conv, sink)};
+}
+FloatingConvertResult FormatConvertImpl(long double v,
+ const FormatConversionSpecImpl conv,
+ FormatSinkImpl *sink) {
+ return {ConvertFloatArg(v, conv, sink)};
+}
+
+// ==================== Chars ====================
+IntegralConvertResult FormatConvertImpl(char v,
+ const FormatConversionSpecImpl conv,
+ FormatSinkImpl *sink) {
+ return {ConvertIntArg(v, conv, sink)};
+}
+IntegralConvertResult FormatConvertImpl(signed char v,
+ const FormatConversionSpecImpl conv,
+ FormatSinkImpl *sink) {
+ return {ConvertIntArg(v, conv, sink)};
+}
+IntegralConvertResult FormatConvertImpl(unsigned char v,
+ const FormatConversionSpecImpl conv,
+ FormatSinkImpl *sink) {
+ return {ConvertIntArg(v, conv, sink)};
+}
+
+// ==================== Ints ====================
+IntegralConvertResult FormatConvertImpl(short v, // NOLINT
+ const FormatConversionSpecImpl conv,
+ FormatSinkImpl *sink) {
+ return {ConvertIntArg(v, conv, sink)};
+}
+IntegralConvertResult FormatConvertImpl(unsigned short v, // NOLINT
+ const FormatConversionSpecImpl conv,
+ FormatSinkImpl *sink) {
+ return {ConvertIntArg(v, conv, sink)};
+}
+IntegralConvertResult FormatConvertImpl(int v,
+ const FormatConversionSpecImpl conv,
+ FormatSinkImpl *sink) {
+ return {ConvertIntArg(v, conv, sink)};
+}
+IntegralConvertResult FormatConvertImpl(unsigned v,
+ const FormatConversionSpecImpl conv,
+ FormatSinkImpl *sink) {
+ return {ConvertIntArg(v, conv, sink)};
+}
+IntegralConvertResult FormatConvertImpl(long v, // NOLINT
+ const FormatConversionSpecImpl conv,
+ FormatSinkImpl *sink) {
+ return {ConvertIntArg(v, conv, sink)};
+}
+IntegralConvertResult FormatConvertImpl(unsigned long v, // NOLINT
+ const FormatConversionSpecImpl conv,
+ FormatSinkImpl *sink) {
+ return {ConvertIntArg(v, conv, sink)};
+}
+IntegralConvertResult FormatConvertImpl(long long v, // NOLINT
+ const FormatConversionSpecImpl conv,
+ FormatSinkImpl *sink) {
+ return {ConvertIntArg(v, conv, sink)};
+}
+IntegralConvertResult FormatConvertImpl(unsigned long long v, // NOLINT
+ const FormatConversionSpecImpl conv,
+ FormatSinkImpl *sink) {
+ return {ConvertIntArg(v, conv, sink)};
+}
+IntegralConvertResult FormatConvertImpl(y_absl::int128 v,
+ const FormatConversionSpecImpl conv,
+ FormatSinkImpl *sink) {
+ return {ConvertIntArg(v, conv, sink)};
+}
+IntegralConvertResult FormatConvertImpl(y_absl::uint128 v,
+ const FormatConversionSpecImpl conv,
+ FormatSinkImpl *sink) {
+ return {ConvertIntArg(v, conv, sink)};
+}
+
+ABSL_INTERNAL_FORMAT_DISPATCH_OVERLOADS_EXPAND_();
+
+
+
+} // namespace str_format_internal
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/arg.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/arg.h
new file mode 100644
index 00000000000..59b7bcc7274
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/arg.h
@@ -0,0 +1,528 @@
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_STRINGS_INTERNAL_STR_FORMAT_ARG_H_
+#define ABSL_STRINGS_INTERNAL_STR_FORMAT_ARG_H_
+
+#include <string.h>
+#include <wchar.h>
+
+#include <cstdio>
+#include <iomanip>
+#include <limits>
+#include <memory>
+#include <sstream>
+#include <util/generic/string.h>
+#include <util/stream/str.h>
+#include <type_traits>
+
+#include "y_absl/base/port.h"
+#include "y_absl/meta/type_traits.h"
+#include "y_absl/numeric/int128.h"
+#include "y_absl/strings/internal/str_format/extension.h"
+#include "y_absl/strings/string_view.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+class Cord;
+class FormatCountCapture;
+class FormatSink;
+
+template <y_absl::FormatConversionCharSet C>
+struct FormatConvertResult;
+class FormatConversionSpec;
+
+namespace str_format_internal {
+
+template <typename T, typename = void>
+struct HasUserDefinedConvert : std::false_type {};
+
+template <typename T>
+struct HasUserDefinedConvert<T, void_t<decltype(AbslFormatConvert(
+ std::declval<const T&>(),
+ std::declval<const FormatConversionSpec&>(),
+ std::declval<FormatSink*>()))>>
+ : std::true_type {};
+
+void AbslFormatConvert(); // Stops the lexical name lookup
+template <typename T>
+auto FormatConvertImpl(const T& v, FormatConversionSpecImpl conv,
+ FormatSinkImpl* sink)
+ -> decltype(AbslFormatConvert(v,
+ std::declval<const FormatConversionSpec&>(),
+ std::declval<FormatSink*>())) {
+ using FormatConversionSpecT =
+ y_absl::enable_if_t<sizeof(const T& (*)()) != 0, FormatConversionSpec>;
+ using FormatSinkT =
+ y_absl::enable_if_t<sizeof(const T& (*)()) != 0, FormatSink>;
+ auto fcs = conv.Wrap<FormatConversionSpecT>();
+ auto fs = sink->Wrap<FormatSinkT>();
+ return AbslFormatConvert(v, fcs, &fs);
+}
+
+template <typename T>
+class StreamedWrapper;
+
+// If 'v' can be converted (in the printf sense) according to 'conv',
+// then convert it, appending to `sink` and return `true`.
+// Otherwise fail and return `false`.
+
+// AbslFormatConvert(v, conv, sink) is intended to be found by ADL on 'v'
+// as an extension mechanism. These FormatConvertImpl functions are the default
+// implementations.
+// The ADL search is augmented via the 'Sink*' parameter, which also
+// serves as a disambiguator to reject possible unintended 'AbslFormatConvert'
+// functions in the namespaces associated with 'v'.
+
+// Raw pointers.
+struct VoidPtr {
+ VoidPtr() = default;
+ template <typename T,
+ decltype(reinterpret_cast<uintptr_t>(std::declval<T*>())) = 0>
+ VoidPtr(T* ptr) // NOLINT
+ : value(ptr ? reinterpret_cast<uintptr_t>(ptr) : 0) {}
+ uintptr_t value;
+};
+
+template <FormatConversionCharSet C>
+struct ArgConvertResult {
+ bool value;
+};
+
+template <FormatConversionCharSet C>
+constexpr FormatConversionCharSet ExtractCharSet(FormatConvertResult<C>) {
+ return C;
+}
+
+template <FormatConversionCharSet C>
+constexpr FormatConversionCharSet ExtractCharSet(ArgConvertResult<C>) {
+ return C;
+}
+
+using StringConvertResult =
+ ArgConvertResult<FormatConversionCharSetInternal::s>;
+ArgConvertResult<FormatConversionCharSetInternal::p> FormatConvertImpl(
+ VoidPtr v, FormatConversionSpecImpl conv, FormatSinkImpl* sink);
+
+// Strings.
+StringConvertResult FormatConvertImpl(const TString& v,
+ FormatConversionSpecImpl conv,
+ FormatSinkImpl* sink);
+StringConvertResult FormatConvertImpl(string_view v,
+ FormatConversionSpecImpl conv,
+ FormatSinkImpl* sink);
+#if defined(ABSL_HAVE_STD_STRING_VIEW) && !defined(ABSL_USES_STD_STRING_VIEW)
+inline StringConvertResult FormatConvertImpl(std::string_view v,
+ FormatConversionSpecImpl conv,
+ FormatSinkImpl* sink) {
+ return FormatConvertImpl(y_absl::string_view(v.data(), v.size()), conv, sink);
+}
+#endif // ABSL_HAVE_STD_STRING_VIEW && !ABSL_USES_STD_STRING_VIEW
+
+ArgConvertResult<FormatConversionCharSetUnion(
+ FormatConversionCharSetInternal::s, FormatConversionCharSetInternal::p)>
+FormatConvertImpl(const char* v, const FormatConversionSpecImpl conv,
+ FormatSinkImpl* sink);
+
+template <class AbslCord, typename std::enable_if<std::is_same<
+ AbslCord, y_absl::Cord>::value>::type* = nullptr>
+StringConvertResult FormatConvertImpl(const AbslCord& value,
+ FormatConversionSpecImpl conv,
+ FormatSinkImpl* sink) {
+ bool is_left = conv.has_left_flag();
+ size_t space_remaining = 0;
+
+ int width = conv.width();
+ if (width >= 0) space_remaining = width;
+
+ size_t to_write = value.size();
+
+ int precision = conv.precision();
+ if (precision >= 0)
+ to_write = (std::min)(to_write, static_cast<size_t>(precision));
+
+ space_remaining = Excess(to_write, space_remaining);
+
+ if (space_remaining > 0 && !is_left) sink->Append(space_remaining, ' ');
+
+ for (string_view piece : value.Chunks()) {
+ if (piece.size() > to_write) {
+ piece.remove_suffix(piece.size() - to_write);
+ to_write = 0;
+ } else {
+ to_write -= piece.size();
+ }
+ sink->Append(piece);
+ if (to_write == 0) {
+ break;
+ }
+ }
+
+ if (space_remaining > 0 && is_left) sink->Append(space_remaining, ' ');
+ return {true};
+}
+
+using IntegralConvertResult = ArgConvertResult<FormatConversionCharSetUnion(
+ FormatConversionCharSetInternal::c,
+ FormatConversionCharSetInternal::kNumeric,
+ FormatConversionCharSetInternal::kStar)>;
+using FloatingConvertResult =
+ ArgConvertResult<FormatConversionCharSetInternal::kFloating>;
+
+// Floats.
+FloatingConvertResult FormatConvertImpl(float v, FormatConversionSpecImpl conv,
+ FormatSinkImpl* sink);
+FloatingConvertResult FormatConvertImpl(double v, FormatConversionSpecImpl conv,
+ FormatSinkImpl* sink);
+FloatingConvertResult FormatConvertImpl(long double v,
+ FormatConversionSpecImpl conv,
+ FormatSinkImpl* sink);
+
+// Chars.
+IntegralConvertResult FormatConvertImpl(char v, FormatConversionSpecImpl conv,
+ FormatSinkImpl* sink);
+IntegralConvertResult FormatConvertImpl(signed char v,
+ FormatConversionSpecImpl conv,
+ FormatSinkImpl* sink);
+IntegralConvertResult FormatConvertImpl(unsigned char v,
+ FormatConversionSpecImpl conv,
+ FormatSinkImpl* sink);
+
+// Ints.
+IntegralConvertResult FormatConvertImpl(short v, // NOLINT
+ FormatConversionSpecImpl conv,
+ FormatSinkImpl* sink);
+IntegralConvertResult FormatConvertImpl(unsigned short v, // NOLINT
+ FormatConversionSpecImpl conv,
+ FormatSinkImpl* sink);
+IntegralConvertResult FormatConvertImpl(int v, FormatConversionSpecImpl conv,
+ FormatSinkImpl* sink);
+IntegralConvertResult FormatConvertImpl(unsigned v,
+ FormatConversionSpecImpl conv,
+ FormatSinkImpl* sink);
+IntegralConvertResult FormatConvertImpl(long v, // NOLINT
+ FormatConversionSpecImpl conv,
+ FormatSinkImpl* sink);
+IntegralConvertResult FormatConvertImpl(unsigned long v, // NOLINT
+ FormatConversionSpecImpl conv,
+ FormatSinkImpl* sink);
+IntegralConvertResult FormatConvertImpl(long long v, // NOLINT
+ FormatConversionSpecImpl conv,
+ FormatSinkImpl* sink);
+IntegralConvertResult FormatConvertImpl(unsigned long long v, // NOLINT
+ FormatConversionSpecImpl conv,
+ FormatSinkImpl* sink);
+IntegralConvertResult FormatConvertImpl(int128 v, FormatConversionSpecImpl conv,
+ FormatSinkImpl* sink);
+IntegralConvertResult FormatConvertImpl(uint128 v,
+ FormatConversionSpecImpl conv,
+ FormatSinkImpl* sink);
+template <typename T, enable_if_t<std::is_same<T, bool>::value, int> = 0>
+IntegralConvertResult FormatConvertImpl(T v, FormatConversionSpecImpl conv,
+ FormatSinkImpl* sink) {
+ return FormatConvertImpl(static_cast<int>(v), conv, sink);
+}
+
+// We provide this function to help the checker, but it is never defined.
+// FormatArgImpl will use the underlying Convert functions instead.
+template <typename T>
+typename std::enable_if<std::is_enum<T>::value &&
+ !HasUserDefinedConvert<T>::value,
+ IntegralConvertResult>::type
+FormatConvertImpl(T v, FormatConversionSpecImpl conv, FormatSinkImpl* sink);
+
+template <typename T>
+StringConvertResult FormatConvertImpl(const StreamedWrapper<T>& v,
+ FormatConversionSpecImpl conv,
+ FormatSinkImpl* out) {
+ TString buf;
+ TStringOutput oss(buf);
+ oss << v.v_;
+ if (!buf) return {false};
+ return str_format_internal::FormatConvertImpl(buf, conv, out);
+}
+
+// Use templates and dependent types to delay evaluation of the function
+// until after FormatCountCapture is fully defined.
+struct FormatCountCaptureHelper {
+ template <class T = int>
+ static ArgConvertResult<FormatConversionCharSetInternal::n> ConvertHelper(
+ const FormatCountCapture& v, FormatConversionSpecImpl conv,
+ FormatSinkImpl* sink) {
+ const y_absl::enable_if_t<sizeof(T) != 0, FormatCountCapture>& v2 = v;
+
+ if (conv.conversion_char() !=
+ str_format_internal::FormatConversionCharInternal::n) {
+ return {false};
+ }
+ *v2.p_ = static_cast<int>(sink->size());
+ return {true};
+ }
+};
+
+template <class T = int>
+ArgConvertResult<FormatConversionCharSetInternal::n> FormatConvertImpl(
+ const FormatCountCapture& v, FormatConversionSpecImpl conv,
+ FormatSinkImpl* sink) {
+ return FormatCountCaptureHelper::ConvertHelper(v, conv, sink);
+}
+
+// Helper friend struct to hide implementation details from the public API of
+// FormatArgImpl.
+struct FormatArgImplFriend {
+ template <typename Arg>
+ static bool ToInt(Arg arg, int* out) {
+ // A value initialized FormatConversionSpecImpl has a `none` conv, which
+ // tells the dispatcher to run the `int` conversion.
+ return arg.dispatcher_(arg.data_, {}, out);
+ }
+
+ template <typename Arg>
+ static bool Convert(Arg arg, FormatConversionSpecImpl conv,
+ FormatSinkImpl* out) {
+ return arg.dispatcher_(arg.data_, conv, out);
+ }
+
+ template <typename Arg>
+ static typename Arg::Dispatcher GetVTablePtrForTest(Arg arg) {
+ return arg.dispatcher_;
+ }
+};
+
+template <typename Arg>
+constexpr FormatConversionCharSet ArgumentToConv() {
+ return y_absl::str_format_internal::ExtractCharSet(
+ decltype(str_format_internal::FormatConvertImpl(
+ std::declval<const Arg&>(),
+ std::declval<const FormatConversionSpecImpl&>(),
+ std::declval<FormatSinkImpl*>())){});
+}
+
+// A type-erased handle to a format argument.
+class FormatArgImpl {
+ private:
+ enum { kInlinedSpace = 8 };
+
+ using VoidPtr = str_format_internal::VoidPtr;
+
+ union Data {
+ const void* ptr;
+ const volatile void* volatile_ptr;
+ char buf[kInlinedSpace];
+ };
+
+ using Dispatcher = bool (*)(Data, FormatConversionSpecImpl, void* out);
+
+ template <typename T>
+ struct store_by_value
+ : std::integral_constant<bool, (sizeof(T) <= kInlinedSpace) &&
+ (std::is_integral<T>::value ||
+ std::is_floating_point<T>::value ||
+ std::is_pointer<T>::value ||
+ std::is_same<VoidPtr, T>::value)> {};
+
+ enum StoragePolicy { ByPointer, ByVolatilePointer, ByValue };
+ template <typename T>
+ struct storage_policy
+ : std::integral_constant<StoragePolicy,
+ (std::is_volatile<T>::value
+ ? ByVolatilePointer
+ : (store_by_value<T>::value ? ByValue
+ : ByPointer))> {
+ };
+
+ // To reduce the number of vtables we will decay values before hand.
+ // Anything with a user-defined Convert will get its own vtable.
+ // For everything else:
+ // - Decay char* and char arrays into `const char*`
+ // - Decay any other pointer to `const void*`
+ // - Decay all enums to their underlying type.
+ // - Decay function pointers to void*.
+ template <typename T, typename = void>
+ struct DecayType {
+ static constexpr bool kHasUserDefined =
+ str_format_internal::HasUserDefinedConvert<T>::value;
+ using type = typename std::conditional<
+ !kHasUserDefined && std::is_convertible<T, const char*>::value,
+ const char*,
+ typename std::conditional<!kHasUserDefined &&
+ std::is_convertible<T, VoidPtr>::value,
+ VoidPtr, const T&>::type>::type;
+ };
+ template <typename T>
+ struct DecayType<T,
+ typename std::enable_if<
+ !str_format_internal::HasUserDefinedConvert<T>::value &&
+ std::is_enum<T>::value>::type> {
+ using type = typename std::underlying_type<T>::type;
+ };
+
+ public:
+ template <typename T>
+ explicit FormatArgImpl(const T& value) {
+ using D = typename DecayType<T>::type;
+ static_assert(
+ std::is_same<D, const T&>::value || storage_policy<D>::value == ByValue,
+ "Decayed types must be stored by value");
+ Init(static_cast<D>(value));
+ }
+
+ private:
+ friend struct str_format_internal::FormatArgImplFriend;
+ template <typename T, StoragePolicy = storage_policy<T>::value>
+ struct Manager;
+
+ template <typename T>
+ struct Manager<T, ByPointer> {
+ static Data SetValue(const T& value) {
+ Data data;
+ data.ptr = std::addressof(value);
+ return data;
+ }
+
+ static const T& Value(Data arg) { return *static_cast<const T*>(arg.ptr); }
+ };
+
+ template <typename T>
+ struct Manager<T, ByVolatilePointer> {
+ static Data SetValue(const T& value) {
+ Data data;
+ data.volatile_ptr = &value;
+ return data;
+ }
+
+ static const T& Value(Data arg) {
+ return *static_cast<const T*>(arg.volatile_ptr);
+ }
+ };
+
+ template <typename T>
+ struct Manager<T, ByValue> {
+ static Data SetValue(const T& value) {
+ Data data;
+ memcpy(data.buf, &value, sizeof(value));
+ return data;
+ }
+
+ static T Value(Data arg) {
+ T value;
+ memcpy(&value, arg.buf, sizeof(T));
+ return value;
+ }
+ };
+
+ template <typename T>
+ void Init(const T& value) {
+ data_ = Manager<T>::SetValue(value);
+ dispatcher_ = &Dispatch<T>;
+ }
+
+ template <typename T>
+ static int ToIntVal(const T& val) {
+ using CommonType = typename std::conditional<std::is_signed<T>::value,
+ int64_t, uint64_t>::type;
+ if (static_cast<CommonType>(val) >
+ static_cast<CommonType>((std::numeric_limits<int>::max)())) {
+ return (std::numeric_limits<int>::max)();
+ } else if (std::is_signed<T>::value &&
+ static_cast<CommonType>(val) <
+ static_cast<CommonType>((std::numeric_limits<int>::min)())) {
+ return (std::numeric_limits<int>::min)();
+ }
+ return static_cast<int>(val);
+ }
+
+ template <typename T>
+ static bool ToInt(Data arg, int* out, std::true_type /* is_integral */,
+ std::false_type) {
+ *out = ToIntVal(Manager<T>::Value(arg));
+ return true;
+ }
+
+ template <typename T>
+ static bool ToInt(Data arg, int* out, std::false_type,
+ std::true_type /* is_enum */) {
+ *out = ToIntVal(static_cast<typename std::underlying_type<T>::type>(
+ Manager<T>::Value(arg)));
+ return true;
+ }
+
+ template <typename T>
+ static bool ToInt(Data, int*, std::false_type, std::false_type) {
+ return false;
+ }
+
+ template <typename T>
+ static bool Dispatch(Data arg, FormatConversionSpecImpl spec, void* out) {
+ // A `none` conv indicates that we want the `int` conversion.
+ if (ABSL_PREDICT_FALSE(spec.conversion_char() ==
+ FormatConversionCharInternal::kNone)) {
+ return ToInt<T>(arg, static_cast<int*>(out), std::is_integral<T>(),
+ std::is_enum<T>());
+ }
+ if (ABSL_PREDICT_FALSE(!Contains(ArgumentToConv<T>(),
+ spec.conversion_char()))) {
+ return false;
+ }
+ return str_format_internal::FormatConvertImpl(
+ Manager<T>::Value(arg), spec,
+ static_cast<FormatSinkImpl*>(out))
+ .value;
+ }
+
+ Data data_;
+ Dispatcher dispatcher_;
+};
+
+#define ABSL_INTERNAL_FORMAT_DISPATCH_INSTANTIATE_(T, E) \
+ E template bool FormatArgImpl::Dispatch<T>(Data, FormatConversionSpecImpl, \
+ void*)
+
+#define ABSL_INTERNAL_FORMAT_DISPATCH_OVERLOADS_EXPAND_(...) \
+ ABSL_INTERNAL_FORMAT_DISPATCH_INSTANTIATE_(str_format_internal::VoidPtr, \
+ __VA_ARGS__); \
+ ABSL_INTERNAL_FORMAT_DISPATCH_INSTANTIATE_(bool, __VA_ARGS__); \
+ ABSL_INTERNAL_FORMAT_DISPATCH_INSTANTIATE_(char, __VA_ARGS__); \
+ ABSL_INTERNAL_FORMAT_DISPATCH_INSTANTIATE_(signed char, __VA_ARGS__); \
+ ABSL_INTERNAL_FORMAT_DISPATCH_INSTANTIATE_(unsigned char, __VA_ARGS__); \
+ ABSL_INTERNAL_FORMAT_DISPATCH_INSTANTIATE_(short, __VA_ARGS__); /* NOLINT */ \
+ ABSL_INTERNAL_FORMAT_DISPATCH_INSTANTIATE_(unsigned short, /* NOLINT */ \
+ __VA_ARGS__); \
+ ABSL_INTERNAL_FORMAT_DISPATCH_INSTANTIATE_(int, __VA_ARGS__); \
+ ABSL_INTERNAL_FORMAT_DISPATCH_INSTANTIATE_(unsigned int, __VA_ARGS__); \
+ ABSL_INTERNAL_FORMAT_DISPATCH_INSTANTIATE_(long, __VA_ARGS__); /* NOLINT */ \
+ ABSL_INTERNAL_FORMAT_DISPATCH_INSTANTIATE_(unsigned long, /* NOLINT */ \
+ __VA_ARGS__); \
+ ABSL_INTERNAL_FORMAT_DISPATCH_INSTANTIATE_(long long, /* NOLINT */ \
+ __VA_ARGS__); \
+ ABSL_INTERNAL_FORMAT_DISPATCH_INSTANTIATE_(unsigned long long, /* NOLINT */ \
+ __VA_ARGS__); \
+ ABSL_INTERNAL_FORMAT_DISPATCH_INSTANTIATE_(int128, __VA_ARGS__); \
+ ABSL_INTERNAL_FORMAT_DISPATCH_INSTANTIATE_(uint128, __VA_ARGS__); \
+ ABSL_INTERNAL_FORMAT_DISPATCH_INSTANTIATE_(float, __VA_ARGS__); \
+ ABSL_INTERNAL_FORMAT_DISPATCH_INSTANTIATE_(double, __VA_ARGS__); \
+ ABSL_INTERNAL_FORMAT_DISPATCH_INSTANTIATE_(long double, __VA_ARGS__); \
+ ABSL_INTERNAL_FORMAT_DISPATCH_INSTANTIATE_(const char*, __VA_ARGS__); \
+ ABSL_INTERNAL_FORMAT_DISPATCH_INSTANTIATE_(TString, __VA_ARGS__); \
+ ABSL_INTERNAL_FORMAT_DISPATCH_INSTANTIATE_(string_view, __VA_ARGS__)
+
+ABSL_INTERNAL_FORMAT_DISPATCH_OVERLOADS_EXPAND_(extern);
+
+
+} // namespace str_format_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_STRINGS_INTERNAL_STR_FORMAT_ARG_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/bind.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/bind.cc
new file mode 100644
index 00000000000..211ce25dea0
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/bind.cc
@@ -0,0 +1,258 @@
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "y_absl/strings/internal/str_format/bind.h"
+
+#include <cerrno>
+#include <limits>
+#include <sstream>
+#include <util/generic/string.h>
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace str_format_internal {
+
+namespace {
+
+inline bool BindFromPosition(int position, int* value,
+ y_absl::Span<const FormatArgImpl> pack) {
+ assert(position > 0);
+ if (static_cast<size_t>(position) > pack.size()) {
+ return false;
+ }
+ // -1 because positions are 1-based
+ return FormatArgImplFriend::ToInt(pack[position - 1], value);
+}
+
+class ArgContext {
+ public:
+ explicit ArgContext(y_absl::Span<const FormatArgImpl> pack) : pack_(pack) {}
+
+ // Fill 'bound' with the results of applying the context's argument pack
+ // to the specified 'unbound'. We synthesize a BoundConversion by
+ // lining up a UnboundConversion with a user argument. We also
+ // resolve any '*' specifiers for width and precision, so after
+ // this call, 'bound' has all the information it needs to be formatted.
+ // Returns false on failure.
+ bool Bind(const UnboundConversion* unbound, BoundConversion* bound);
+
+ private:
+ y_absl::Span<const FormatArgImpl> pack_;
+};
+
+inline bool ArgContext::Bind(const UnboundConversion* unbound,
+ BoundConversion* bound) {
+ const FormatArgImpl* arg = nullptr;
+ int arg_position = unbound->arg_position;
+ if (static_cast<size_t>(arg_position - 1) >= pack_.size()) return false;
+ arg = &pack_[arg_position - 1]; // 1-based
+
+ if (unbound->flags != Flags::kBasic) {
+ int width = unbound->width.value();
+ bool force_left = false;
+ if (unbound->width.is_from_arg()) {
+ if (!BindFromPosition(unbound->width.get_from_arg(), &width, pack_))
+ return false;
+ if (width < 0) {
+ // "A negative field width is taken as a '-' flag followed by a
+ // positive field width."
+ force_left = true;
+ // Make sure we don't overflow the width when negating it.
+ width = -std::max(width, -std::numeric_limits<int>::max());
+ }
+ }
+
+ int precision = unbound->precision.value();
+ if (unbound->precision.is_from_arg()) {
+ if (!BindFromPosition(unbound->precision.get_from_arg(), &precision,
+ pack_))
+ return false;
+ }
+
+ FormatConversionSpecImplFriend::SetWidth(width, bound);
+ FormatConversionSpecImplFriend::SetPrecision(precision, bound);
+
+ if (force_left) {
+ FormatConversionSpecImplFriend::SetFlags(unbound->flags | Flags::kLeft,
+ bound);
+ } else {
+ FormatConversionSpecImplFriend::SetFlags(unbound->flags, bound);
+ }
+ } else {
+ FormatConversionSpecImplFriend::SetFlags(unbound->flags, bound);
+ FormatConversionSpecImplFriend::SetWidth(-1, bound);
+ FormatConversionSpecImplFriend::SetPrecision(-1, bound);
+ }
+ FormatConversionSpecImplFriend::SetConversionChar(unbound->conv, bound);
+ bound->set_arg(arg);
+ return true;
+}
+
+template <typename Converter>
+class ConverterConsumer {
+ public:
+ ConverterConsumer(Converter converter, y_absl::Span<const FormatArgImpl> pack)
+ : converter_(converter), arg_context_(pack) {}
+
+ bool Append(string_view s) {
+ converter_.Append(s);
+ return true;
+ }
+ bool ConvertOne(const UnboundConversion& conv, string_view conv_string) {
+ BoundConversion bound;
+ if (!arg_context_.Bind(&conv, &bound)) return false;
+ return converter_.ConvertOne(bound, conv_string);
+ }
+
+ private:
+ Converter converter_;
+ ArgContext arg_context_;
+};
+
+template <typename Converter>
+bool ConvertAll(const UntypedFormatSpecImpl format,
+ y_absl::Span<const FormatArgImpl> args, Converter converter) {
+ if (format.has_parsed_conversion()) {
+ return format.parsed_conversion()->ProcessFormat(
+ ConverterConsumer<Converter>(converter, args));
+ } else {
+ return ParseFormatString(format.str(),
+ ConverterConsumer<Converter>(converter, args));
+ }
+}
+
+class DefaultConverter {
+ public:
+ explicit DefaultConverter(FormatSinkImpl* sink) : sink_(sink) {}
+
+ void Append(string_view s) const { sink_->Append(s); }
+
+ bool ConvertOne(const BoundConversion& bound, string_view /*conv*/) const {
+ return FormatArgImplFriend::Convert(*bound.arg(), bound, sink_);
+ }
+
+ private:
+ FormatSinkImpl* sink_;
+};
+
+class SummarizingConverter {
+ public:
+ explicit SummarizingConverter(FormatSinkImpl* sink) : sink_(sink) {}
+
+ void Append(string_view s) const { sink_->Append(s); }
+
+ bool ConvertOne(const BoundConversion& bound, string_view /*conv*/) const {
+ UntypedFormatSpecImpl spec("%d");
+
+ std::ostringstream ss;
+ ss << "{" << Streamable(spec, {*bound.arg()}) << ":"
+ << FormatConversionSpecImplFriend::FlagsToString(bound);
+ if (bound.width() >= 0) ss << bound.width();
+ if (bound.precision() >= 0) ss << "." << bound.precision();
+ ss << bound.conversion_char() << "}";
+ Append(ss.str());
+ return true;
+ }
+
+ private:
+ FormatSinkImpl* sink_;
+};
+
+} // namespace
+
+bool BindWithPack(const UnboundConversion* props,
+ y_absl::Span<const FormatArgImpl> pack,
+ BoundConversion* bound) {
+ return ArgContext(pack).Bind(props, bound);
+}
+
+TString Summarize(const UntypedFormatSpecImpl format,
+ y_absl::Span<const FormatArgImpl> args) {
+ typedef SummarizingConverter Converter;
+ TString out;
+ {
+ // inner block to destroy sink before returning out. It ensures a last
+ // flush.
+ FormatSinkImpl sink(&out);
+ if (!ConvertAll(format, args, Converter(&sink))) {
+ return "";
+ }
+ }
+ return out;
+}
+
+bool FormatUntyped(FormatRawSinkImpl raw_sink,
+ const UntypedFormatSpecImpl format,
+ y_absl::Span<const FormatArgImpl> args) {
+ FormatSinkImpl sink(raw_sink);
+ using Converter = DefaultConverter;
+ return ConvertAll(format, args, Converter(&sink));
+}
+
+std::ostream& Streamable::Print(std::ostream& os) const {
+ if (!FormatUntyped(&os, format_, args_)) os.setstate(std::ios::failbit);
+ return os;
+}
+
+TString& AppendPack(TString* out, const UntypedFormatSpecImpl format,
+ y_absl::Span<const FormatArgImpl> args) {
+ size_t orig = out->size();
+ if (ABSL_PREDICT_FALSE(!FormatUntyped(out, format, args))) {
+ out->erase(orig);
+ }
+ return *out;
+}
+
+TString FormatPack(const UntypedFormatSpecImpl format,
+ y_absl::Span<const FormatArgImpl> args) {
+ TString out;
+ if (ABSL_PREDICT_FALSE(!FormatUntyped(&out, format, args))) {
+ out.clear();
+ }
+ return out;
+}
+
+int FprintF(std::FILE* output, const UntypedFormatSpecImpl format,
+ y_absl::Span<const FormatArgImpl> args) {
+ FILERawSink sink(output);
+ if (!FormatUntyped(&sink, format, args)) {
+ errno = EINVAL;
+ return -1;
+ }
+ if (sink.error()) {
+ errno = sink.error();
+ return -1;
+ }
+ if (sink.count() > static_cast<size_t>(std::numeric_limits<int>::max())) {
+ errno = EFBIG;
+ return -1;
+ }
+ return static_cast<int>(sink.count());
+}
+
+int SnprintF(char* output, size_t size, const UntypedFormatSpecImpl format,
+ y_absl::Span<const FormatArgImpl> args) {
+ BufferRawSink sink(output, size ? size - 1 : 0);
+ if (!FormatUntyped(&sink, format, args)) {
+ errno = EINVAL;
+ return -1;
+ }
+ size_t total = sink.total_written();
+ if (size) output[std::min(total, size - 1)] = 0;
+ return static_cast<int>(total);
+}
+
+} // namespace str_format_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/bind.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/bind.h
new file mode 100644
index 00000000000..3966610710a
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/bind.h
@@ -0,0 +1,217 @@
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_STRINGS_INTERNAL_STR_FORMAT_BIND_H_
+#define ABSL_STRINGS_INTERNAL_STR_FORMAT_BIND_H_
+
+#include <array>
+#include <cstdio>
+#include <sstream>
+#include <util/generic/string.h>
+
+#include "y_absl/base/port.h"
+#include "y_absl/strings/internal/str_format/arg.h"
+#include "y_absl/strings/internal/str_format/checker.h"
+#include "y_absl/strings/internal/str_format/parser.h"
+#include "y_absl/types/span.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+class UntypedFormatSpec;
+
+namespace str_format_internal {
+
+class BoundConversion : public FormatConversionSpecImpl {
+ public:
+ const FormatArgImpl* arg() const { return arg_; }
+ void set_arg(const FormatArgImpl* a) { arg_ = a; }
+
+ private:
+ const FormatArgImpl* arg_;
+};
+
+// This is the type-erased class that the implementation uses.
+class UntypedFormatSpecImpl {
+ public:
+ UntypedFormatSpecImpl() = delete;
+
+ explicit UntypedFormatSpecImpl(string_view s)
+ : data_(s.data()), size_(s.size()) {}
+ explicit UntypedFormatSpecImpl(
+ const str_format_internal::ParsedFormatBase* pc)
+ : data_(pc), size_(~size_t{}) {}
+
+ bool has_parsed_conversion() const { return size_ == ~size_t{}; }
+
+ string_view str() const {
+ assert(!has_parsed_conversion());
+ return string_view(static_cast<const char*>(data_), size_);
+ }
+ const str_format_internal::ParsedFormatBase* parsed_conversion() const {
+ assert(has_parsed_conversion());
+ return static_cast<const str_format_internal::ParsedFormatBase*>(data_);
+ }
+
+ template <typename T>
+ static const UntypedFormatSpecImpl& Extract(const T& s) {
+ return s.spec_;
+ }
+
+ private:
+ const void* data_;
+ size_t size_;
+};
+
+template <typename T, FormatConversionCharSet...>
+struct MakeDependent {
+ using type = T;
+};
+
+// Implicitly convertible from `const char*`, `string_view`, and the
+// `ExtendedParsedFormat` type. This abstraction allows all format functions to
+// operate on any without providing too many overloads.
+template <FormatConversionCharSet... Args>
+class FormatSpecTemplate
+ : public MakeDependent<UntypedFormatSpec, Args...>::type {
+ using Base = typename MakeDependent<UntypedFormatSpec, Args...>::type;
+
+ public:
+#ifdef ABSL_INTERNAL_ENABLE_FORMAT_CHECKER
+
+ // Honeypot overload for when the string is not constexpr.
+ // We use the 'unavailable' attribute to give a better compiler error than
+ // just 'method is deleted'.
+ FormatSpecTemplate(...) // NOLINT
+ __attribute__((unavailable("Format string is not constexpr.")));
+
+ // Honeypot overload for when the format is constexpr and invalid.
+ // We use the 'unavailable' attribute to give a better compiler error than
+ // just 'method is deleted'.
+ // To avoid checking the format twice, we just check that the format is
+ // constexpr. If it is valid, then the overload below will kick in.
+ // We add the template here to make this overload have lower priority.
+ template <typename = void>
+ FormatSpecTemplate(const char* s) // NOLINT
+ __attribute__((
+ enable_if(str_format_internal::EnsureConstexpr(s), "constexpr trap"),
+ unavailable(
+ "Format specified does not match the arguments passed.")));
+
+ template <typename T = void>
+ FormatSpecTemplate(string_view s) // NOLINT
+ __attribute__((enable_if(str_format_internal::EnsureConstexpr(s),
+ "constexpr trap"))) {
+ static_assert(sizeof(T*) == 0,
+ "Format specified does not match the arguments passed.");
+ }
+
+ // Good format overload.
+ FormatSpecTemplate(const char* s) // NOLINT
+ __attribute__((enable_if(ValidFormatImpl<Args...>(s), "bad format trap")))
+ : Base(s) {}
+
+ FormatSpecTemplate(string_view s) // NOLINT
+ __attribute__((enable_if(ValidFormatImpl<Args...>(s), "bad format trap")))
+ : Base(s) {}
+
+#else // ABSL_INTERNAL_ENABLE_FORMAT_CHECKER
+
+ FormatSpecTemplate(const char* s) : Base(s) {} // NOLINT
+ FormatSpecTemplate(string_view s) : Base(s) {} // NOLINT
+
+#endif // ABSL_INTERNAL_ENABLE_FORMAT_CHECKER
+
+ template <
+ FormatConversionCharSet... C,
+ typename = typename std::enable_if<sizeof...(C) == sizeof...(Args)>::type,
+ typename = typename std::enable_if<AllOf(Contains(Args,
+ C)...)>::type>
+ FormatSpecTemplate(const ExtendedParsedFormat<C...>& pc) // NOLINT
+ : Base(&pc) {}
+};
+
+class Streamable {
+ public:
+ Streamable(const UntypedFormatSpecImpl& format,
+ y_absl::Span<const FormatArgImpl> args)
+ : format_(format) {
+ if (args.size() <= ABSL_ARRAYSIZE(few_args_)) {
+ for (size_t i = 0; i < args.size(); ++i) {
+ few_args_[i] = args[i];
+ }
+ args_ = y_absl::MakeSpan(few_args_, args.size());
+ } else {
+ many_args_.assign(args.begin(), args.end());
+ args_ = many_args_;
+ }
+ }
+
+ std::ostream& Print(std::ostream& os) const;
+
+ friend std::ostream& operator<<(std::ostream& os, const Streamable& l) {
+ return l.Print(os);
+ }
+
+ private:
+ const UntypedFormatSpecImpl& format_;
+ y_absl::Span<const FormatArgImpl> args_;
+ // if args_.size() is 4 or less:
+ FormatArgImpl few_args_[4] = {FormatArgImpl(0), FormatArgImpl(0),
+ FormatArgImpl(0), FormatArgImpl(0)};
+ // if args_.size() is more than 4:
+ std::vector<FormatArgImpl> many_args_;
+};
+
+// for testing
+TString Summarize(UntypedFormatSpecImpl format,
+ y_absl::Span<const FormatArgImpl> args);
+bool BindWithPack(const UnboundConversion* props,
+ y_absl::Span<const FormatArgImpl> pack, BoundConversion* bound);
+
+bool FormatUntyped(FormatRawSinkImpl raw_sink,
+ UntypedFormatSpecImpl format,
+ y_absl::Span<const FormatArgImpl> args);
+
+TString& AppendPack(TString* out, UntypedFormatSpecImpl format,
+ y_absl::Span<const FormatArgImpl> args);
+
+TString FormatPack(const UntypedFormatSpecImpl format,
+ y_absl::Span<const FormatArgImpl> args);
+
+int FprintF(std::FILE* output, UntypedFormatSpecImpl format,
+ y_absl::Span<const FormatArgImpl> args);
+int SnprintF(char* output, size_t size, UntypedFormatSpecImpl format,
+ y_absl::Span<const FormatArgImpl> args);
+
+// Returned by Streamed(v). Converts via '%s' to the TString created
+// by std::ostream << v.
+template <typename T>
+class StreamedWrapper {
+ public:
+ explicit StreamedWrapper(const T& v) : v_(v) { }
+
+ private:
+ template <typename S>
+ friend ArgConvertResult<FormatConversionCharSetInternal::s> FormatConvertImpl(
+ const StreamedWrapper<S>& v, FormatConversionSpecImpl conv,
+ FormatSinkImpl* out);
+ const T& v_;
+};
+
+} // namespace str_format_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_STRINGS_INTERNAL_STR_FORMAT_BIND_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/checker.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/checker.h
new file mode 100644
index 00000000000..7c530d25075
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/checker.h
@@ -0,0 +1,333 @@
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_STRINGS_INTERNAL_STR_FORMAT_CHECKER_H_
+#define ABSL_STRINGS_INTERNAL_STR_FORMAT_CHECKER_H_
+
+#include "y_absl/base/attributes.h"
+#include "y_absl/strings/internal/str_format/arg.h"
+#include "y_absl/strings/internal/str_format/extension.h"
+
+// Compile time check support for entry points.
+
+#ifndef ABSL_INTERNAL_ENABLE_FORMAT_CHECKER
+#if ABSL_HAVE_ATTRIBUTE(enable_if) && !defined(__native_client__)
+#define ABSL_INTERNAL_ENABLE_FORMAT_CHECKER 1
+#endif // ABSL_HAVE_ATTRIBUTE(enable_if) && !defined(__native_client__)
+#endif // ABSL_INTERNAL_ENABLE_FORMAT_CHECKER
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace str_format_internal {
+
+constexpr bool AllOf() { return true; }
+
+template <typename... T>
+constexpr bool AllOf(bool b, T... t) {
+ return b && AllOf(t...);
+}
+
+#ifdef ABSL_INTERNAL_ENABLE_FORMAT_CHECKER
+
+constexpr bool ContainsChar(const char* chars, char c) {
+ return *chars == c || (*chars && ContainsChar(chars + 1, c));
+}
+
+// A constexpr compatible list of Convs.
+struct ConvList {
+ const FormatConversionCharSet* array;
+ int count;
+
+ // We do the bound check here to avoid having to do it on the callers.
+ // Returning an empty FormatConversionCharSet has the same effect as
+ // short circuiting because it will never match any conversion.
+ constexpr FormatConversionCharSet operator[](int i) const {
+ return i < count ? array[i] : FormatConversionCharSet{};
+ }
+
+ constexpr ConvList without_front() const {
+ return count != 0 ? ConvList{array + 1, count - 1} : *this;
+ }
+};
+
+template <size_t count>
+struct ConvListT {
+ // Make sure the array has size > 0.
+ FormatConversionCharSet list[count ? count : 1];
+};
+
+constexpr char GetChar(string_view str, size_t index) {
+ return index < str.size() ? str[index] : char{};
+}
+
+constexpr string_view ConsumeFront(string_view str, size_t len = 1) {
+ return len <= str.size() ? string_view(str.data() + len, str.size() - len)
+ : string_view();
+}
+
+constexpr string_view ConsumeAnyOf(string_view format, const char* chars) {
+ return ContainsChar(chars, GetChar(format, 0))
+ ? ConsumeAnyOf(ConsumeFront(format), chars)
+ : format;
+}
+
+constexpr bool IsDigit(char c) { return c >= '0' && c <= '9'; }
+
+// Helper class for the ParseDigits function.
+// It encapsulates the two return values we need there.
+struct Integer {
+ string_view format;
+ int value;
+
+ // If the next character is a '$', consume it.
+ // Otherwise, make `this` an invalid positional argument.
+ constexpr Integer ConsumePositionalDollar() const {
+ return GetChar(format, 0) == '$' ? Integer{ConsumeFront(format), value}
+ : Integer{format, 0};
+ }
+};
+
+constexpr Integer ParseDigits(string_view format, int value = 0) {
+ return IsDigit(GetChar(format, 0))
+ ? ParseDigits(ConsumeFront(format),
+ 10 * value + GetChar(format, 0) - '0')
+ : Integer{format, value};
+}
+
+// Parse digits for a positional argument.
+// The parsing also consumes the '$'.
+constexpr Integer ParsePositional(string_view format) {
+ return ParseDigits(format).ConsumePositionalDollar();
+}
+
+// Parses a single conversion specifier.
+// See ConvParser::Run() for post conditions.
+class ConvParser {
+ constexpr ConvParser SetFormat(string_view format) const {
+ return ConvParser(format, args_, error_, arg_position_, is_positional_);
+ }
+
+ constexpr ConvParser SetArgs(ConvList args) const {
+ return ConvParser(format_, args, error_, arg_position_, is_positional_);
+ }
+
+ constexpr ConvParser SetError(bool error) const {
+ return ConvParser(format_, args_, error_ || error, arg_position_,
+ is_positional_);
+ }
+
+ constexpr ConvParser SetArgPosition(int arg_position) const {
+ return ConvParser(format_, args_, error_, arg_position, is_positional_);
+ }
+
+ // Consumes the next arg and verifies that it matches `conv`.
+ // `error_` is set if there is no next arg or if it doesn't match `conv`.
+ constexpr ConvParser ConsumeNextArg(char conv) const {
+ return SetArgs(args_.without_front()).SetError(!Contains(args_[0], conv));
+ }
+
+ // Verify that positional argument `i.value` matches `conv`.
+ // `error_` is set if `i.value` is not a valid argument or if it doesn't
+ // match.
+ constexpr ConvParser VerifyPositional(Integer i, char conv) const {
+ return SetFormat(i.format).SetError(!Contains(args_[i.value - 1], conv));
+ }
+
+ // Parse the position of the arg and store it in `arg_position_`.
+ constexpr ConvParser ParseArgPosition(Integer arg) const {
+ return SetFormat(arg.format).SetArgPosition(arg.value);
+ }
+
+ // Consume the flags.
+ constexpr ConvParser ParseFlags() const {
+ return SetFormat(ConsumeAnyOf(format_, "-+ #0"));
+ }
+
+ // Consume the width.
+ // If it is '*', we verify that it matches `args_`. `error_` is set if it
+ // doesn't match.
+ constexpr ConvParser ParseWidth() const {
+ return IsDigit(GetChar(format_, 0))
+ ? SetFormat(ParseDigits(format_).format)
+ : GetChar(format_, 0) == '*'
+ ? is_positional_
+ ? VerifyPositional(
+ ParsePositional(ConsumeFront(format_)), '*')
+ : SetFormat(ConsumeFront(format_))
+ .ConsumeNextArg('*')
+ : *this;
+ }
+
+ // Consume the precision.
+ // If it is '*', we verify that it matches `args_`. `error_` is set if it
+ // doesn't match.
+ constexpr ConvParser ParsePrecision() const {
+ return GetChar(format_, 0) != '.'
+ ? *this
+ : GetChar(format_, 1) == '*'
+ ? is_positional_
+ ? VerifyPositional(
+ ParsePositional(ConsumeFront(format_, 2)), '*')
+ : SetFormat(ConsumeFront(format_, 2))
+ .ConsumeNextArg('*')
+ : SetFormat(ParseDigits(ConsumeFront(format_)).format);
+ }
+
+ // Consume the length characters.
+ constexpr ConvParser ParseLength() const {
+ return SetFormat(ConsumeAnyOf(format_, "lLhjztq"));
+ }
+
+ // Consume the conversion character and verify that it matches `args_`.
+ // `error_` is set if it doesn't match.
+ constexpr ConvParser ParseConversion() const {
+ return is_positional_
+ ? VerifyPositional({ConsumeFront(format_), arg_position_},
+ GetChar(format_, 0))
+ : ConsumeNextArg(GetChar(format_, 0))
+ .SetFormat(ConsumeFront(format_));
+ }
+
+ constexpr ConvParser(string_view format, ConvList args, bool error,
+ int arg_position, bool is_positional)
+ : format_(format),
+ args_(args),
+ error_(error),
+ arg_position_(arg_position),
+ is_positional_(is_positional) {}
+
+ public:
+ constexpr ConvParser(string_view format, ConvList args, bool is_positional)
+ : format_(format),
+ args_(args),
+ error_(false),
+ arg_position_(0),
+ is_positional_(is_positional) {}
+
+ // Consume the whole conversion specifier.
+ // `format()` will be set to the character after the conversion character.
+ // `error()` will be set if any of the arguments do not match.
+ constexpr ConvParser Run() const {
+ return (is_positional_ ? ParseArgPosition(ParsePositional(format_)) : *this)
+ .ParseFlags()
+ .ParseWidth()
+ .ParsePrecision()
+ .ParseLength()
+ .ParseConversion();
+ }
+
+ constexpr string_view format() const { return format_; }
+ constexpr ConvList args() const { return args_; }
+ constexpr bool error() const { return error_; }
+ constexpr bool is_positional() const { return is_positional_; }
+
+ private:
+ string_view format_;
+ // Current list of arguments. If we are not in positional mode we will consume
+ // from the front.
+ ConvList args_;
+ bool error_;
+ // Holds the argument position of the conversion character, if we are in
+ // positional mode. Otherwise, it is unspecified.
+ int arg_position_;
+ // Whether we are in positional mode.
+ // It changes the behavior of '*' and where to find the converted argument.
+ bool is_positional_;
+};
+
+// Parses a whole format expression.
+// See FormatParser::Run().
+class FormatParser {
+ static constexpr bool FoundPercent(string_view format) {
+ return format.empty() ||
+ (GetChar(format, 0) == '%' && GetChar(format, 1) != '%');
+ }
+
+ // We use an inner function to increase the recursion limit.
+ // The inner function consumes up to `limit` characters on every run.
+ // This increases the limit from 512 to ~512*limit.
+ static constexpr string_view ConsumeNonPercentInner(string_view format,
+ int limit = 20) {
+ return FoundPercent(format) || !limit
+ ? format
+ : ConsumeNonPercentInner(
+ ConsumeFront(format, GetChar(format, 0) == '%' &&
+ GetChar(format, 1) == '%'
+ ? 2
+ : 1),
+ limit - 1);
+ }
+
+ // Consume characters until the next conversion spec %.
+ // It skips %%.
+ static constexpr string_view ConsumeNonPercent(string_view format) {
+ return FoundPercent(format)
+ ? format
+ : ConsumeNonPercent(ConsumeNonPercentInner(format));
+ }
+
+ static constexpr bool IsPositional(string_view format) {
+ return IsDigit(GetChar(format, 0)) ? IsPositional(ConsumeFront(format))
+ : GetChar(format, 0) == '$';
+ }
+
+ constexpr bool RunImpl(bool is_positional) const {
+ // In non-positional mode we require all arguments to be consumed.
+ // In positional mode just reaching the end of the format without errors is
+ // enough.
+ return (format_.empty() && (is_positional || args_.count == 0)) ||
+ (!format_.empty() &&
+ ValidateArg(
+ ConvParser(ConsumeFront(format_), args_, is_positional).Run()));
+ }
+
+ constexpr bool ValidateArg(ConvParser conv) const {
+ return !conv.error() && FormatParser(conv.format(), conv.args())
+ .RunImpl(conv.is_positional());
+ }
+
+ public:
+ constexpr FormatParser(string_view format, ConvList args)
+ : format_(ConsumeNonPercent(format)), args_(args) {}
+
+ // Runs the parser for `format` and `args`.
+ // It verifies that the format is valid and that all conversion specifiers
+ // match the arguments passed.
+ // In non-positional mode it also verfies that all arguments are consumed.
+ constexpr bool Run() const {
+ return RunImpl(!format_.empty() && IsPositional(ConsumeFront(format_)));
+ }
+
+ private:
+ string_view format_;
+ // Current list of arguments.
+ // If we are not in positional mode we will consume from the front and will
+ // have to be empty in the end.
+ ConvList args_;
+};
+
+template <FormatConversionCharSet... C>
+constexpr bool ValidFormatImpl(string_view format) {
+ return FormatParser(format,
+ {ConvListT<sizeof...(C)>{{C...}}.list, sizeof...(C)})
+ .Run();
+}
+
+#endif // ABSL_INTERNAL_ENABLE_FORMAT_CHECKER
+
+} // namespace str_format_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_STRINGS_INTERNAL_STR_FORMAT_CHECKER_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/extension.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/extension.cc
new file mode 100644
index 00000000000..f2a4169ae7c
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/extension.cc
@@ -0,0 +1,75 @@
+//
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "y_absl/strings/internal/str_format/extension.h"
+
+#include <errno.h>
+#include <algorithm>
+#include <util/generic/string.h>
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace str_format_internal {
+
+TString FlagsToString(Flags v) {
+ TString s;
+ s.append(FlagsContains(v, Flags::kLeft) ? "-" : "");
+ s.append(FlagsContains(v, Flags::kShowPos) ? "+" : "");
+ s.append(FlagsContains(v, Flags::kSignCol) ? " " : "");
+ s.append(FlagsContains(v, Flags::kAlt) ? "#" : "");
+ s.append(FlagsContains(v, Flags::kZero) ? "0" : "");
+ return s;
+}
+
+#define ABSL_INTERNAL_X_VAL(id) \
+ constexpr y_absl::FormatConversionChar FormatConversionCharInternal::id;
+ABSL_INTERNAL_CONVERSION_CHARS_EXPAND_(ABSL_INTERNAL_X_VAL, )
+#undef ABSL_INTERNAL_X_VAL
+// NOLINTNEXTLINE(readability-redundant-declaration)
+constexpr y_absl::FormatConversionChar FormatConversionCharInternal::kNone;
+
+#define ABSL_INTERNAL_CHAR_SET_CASE(c) \
+ constexpr FormatConversionCharSet FormatConversionCharSetInternal::c;
+ABSL_INTERNAL_CONVERSION_CHARS_EXPAND_(ABSL_INTERNAL_CHAR_SET_CASE, )
+#undef ABSL_INTERNAL_CHAR_SET_CASE
+
+// NOLINTNEXTLINE(readability-redundant-declaration)
+constexpr FormatConversionCharSet FormatConversionCharSetInternal::kStar;
+// NOLINTNEXTLINE(readability-redundant-declaration)
+constexpr FormatConversionCharSet FormatConversionCharSetInternal::kIntegral;
+// NOLINTNEXTLINE(readability-redundant-declaration)
+constexpr FormatConversionCharSet FormatConversionCharSetInternal::kFloating;
+// NOLINTNEXTLINE(readability-redundant-declaration)
+constexpr FormatConversionCharSet FormatConversionCharSetInternal::kNumeric;
+// NOLINTNEXTLINE(readability-redundant-declaration)
+constexpr FormatConversionCharSet FormatConversionCharSetInternal::kPointer;
+
+bool FormatSinkImpl::PutPaddedString(string_view value, int width,
+ int precision, bool left) {
+ size_t space_remaining = 0;
+ if (width >= 0) space_remaining = width;
+ size_t n = value.size();
+ if (precision >= 0) n = std::min(n, static_cast<size_t>(precision));
+ string_view shown(value.data(), n);
+ space_remaining = Excess(shown.size(), space_remaining);
+ if (!left) Append(space_remaining, ' ');
+ Append(shown);
+ if (left) Append(space_remaining, ' ');
+ return true;
+}
+
+} // namespace str_format_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/extension.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/extension.h
new file mode 100644
index 00000000000..e5de5cb6a1a
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/extension.h
@@ -0,0 +1,445 @@
+//
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef ABSL_STRINGS_INTERNAL_STR_FORMAT_EXTENSION_H_
+#define ABSL_STRINGS_INTERNAL_STR_FORMAT_EXTENSION_H_
+
+#include <limits.h>
+
+#include <cstddef>
+#include <cstring>
+#include <ostream>
+
+#include "y_absl/base/config.h"
+#include "y_absl/base/port.h"
+#include "y_absl/meta/type_traits.h"
+#include "y_absl/strings/internal/str_format/output.h"
+#include "y_absl/strings/string_view.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+enum class FormatConversionChar : uint8_t;
+enum class FormatConversionCharSet : uint64_t;
+
+namespace str_format_internal {
+
+class FormatRawSinkImpl {
+ public:
+ // Implicitly convert from any type that provides the hook function as
+ // described above.
+ template <typename T, decltype(str_format_internal::InvokeFlush(
+ std::declval<T*>(), string_view()))* = nullptr>
+ FormatRawSinkImpl(T* raw) // NOLINT
+ : sink_(raw), write_(&FormatRawSinkImpl::Flush<T>) {}
+
+ void Write(string_view s) { write_(sink_, s); }
+
+ template <typename T>
+ static FormatRawSinkImpl Extract(T s) {
+ return s.sink_;
+ }
+
+ private:
+ template <typename T>
+ static void Flush(void* r, string_view s) {
+ str_format_internal::InvokeFlush(static_cast<T*>(r), s);
+ }
+
+ void* sink_;
+ void (*write_)(void*, string_view);
+};
+
+// An abstraction to which conversions write their string data.
+class FormatSinkImpl {
+ public:
+ explicit FormatSinkImpl(FormatRawSinkImpl raw) : raw_(raw) {}
+
+ ~FormatSinkImpl() { Flush(); }
+
+ void Flush() {
+ raw_.Write(string_view(buf_, pos_ - buf_));
+ pos_ = buf_;
+ }
+
+ void Append(size_t n, char c) {
+ if (n == 0) return;
+ size_ += n;
+ auto raw_append = [&](size_t count) {
+ memset(pos_, c, count);
+ pos_ += count;
+ };
+ while (n > Avail()) {
+ n -= Avail();
+ if (Avail() > 0) {
+ raw_append(Avail());
+ }
+ Flush();
+ }
+ raw_append(n);
+ }
+
+ void Append(string_view v) {
+ size_t n = v.size();
+ if (n == 0) return;
+ size_ += n;
+ if (n >= Avail()) {
+ Flush();
+ raw_.Write(v);
+ return;
+ }
+ memcpy(pos_, v.data(), n);
+ pos_ += n;
+ }
+
+ size_t size() const { return size_; }
+
+ // Put 'v' to 'sink' with specified width, precision, and left flag.
+ bool PutPaddedString(string_view v, int width, int precision, bool left);
+
+ template <typename T>
+ T Wrap() {
+ return T(this);
+ }
+
+ template <typename T>
+ static FormatSinkImpl* Extract(T* s) {
+ return s->sink_;
+ }
+
+ private:
+ size_t Avail() const { return buf_ + sizeof(buf_) - pos_; }
+
+ FormatRawSinkImpl raw_;
+ size_t size_ = 0;
+ char* pos_ = buf_;
+ char buf_[1024];
+};
+
+enum class Flags : uint8_t {
+ kBasic = 0,
+ kLeft = 1 << 0,
+ kShowPos = 1 << 1,
+ kSignCol = 1 << 2,
+ kAlt = 1 << 3,
+ kZero = 1 << 4,
+ // This is not a real flag. It just exists to turn off kBasic when no other
+ // flags are set. This is for when width/precision are specified.
+ kNonBasic = 1 << 5,
+};
+
+constexpr Flags operator|(Flags a, Flags b) {
+ return static_cast<Flags>(static_cast<uint8_t>(a) | static_cast<uint8_t>(b));
+}
+
+constexpr bool FlagsContains(Flags haystack, Flags needle) {
+ return (static_cast<uint8_t>(haystack) & static_cast<uint8_t>(needle)) ==
+ static_cast<uint8_t>(needle);
+}
+
+TString FlagsToString(Flags v);
+
+inline std::ostream& operator<<(std::ostream& os, Flags v) {
+ return os << FlagsToString(v);
+}
+
+// clang-format off
+#define ABSL_INTERNAL_CONVERSION_CHARS_EXPAND_(X_VAL, X_SEP) \
+ /* text */ \
+ X_VAL(c) X_SEP X_VAL(s) X_SEP \
+ /* ints */ \
+ X_VAL(d) X_SEP X_VAL(i) X_SEP X_VAL(o) X_SEP \
+ X_VAL(u) X_SEP X_VAL(x) X_SEP X_VAL(X) X_SEP \
+ /* floats */ \
+ X_VAL(f) X_SEP X_VAL(F) X_SEP X_VAL(e) X_SEP X_VAL(E) X_SEP \
+ X_VAL(g) X_SEP X_VAL(G) X_SEP X_VAL(a) X_SEP X_VAL(A) X_SEP \
+ /* misc */ \
+ X_VAL(n) X_SEP X_VAL(p)
+// clang-format on
+
+// This type should not be referenced, it exists only to provide labels
+// internally that match the values declared in FormatConversionChar in
+// str_format.h. This is meant to allow internal libraries to use the same
+// declared interface type as the public interface
+// (y_absl::StrFormatConversionChar) while keeping the definition in a public
+// header.
+// Internal libraries should use the form
+// `FormatConversionCharInternal::c`, `FormatConversionCharInternal::kNone` for
+// comparisons. Use in switch statements is not recommended due to a bug in how
+// gcc 4.9 -Wswitch handles declared but undefined enums.
+struct FormatConversionCharInternal {
+ FormatConversionCharInternal() = delete;
+
+ private:
+ // clang-format off
+ enum class Enum : uint8_t {
+ c, s, // text
+ d, i, o, u, x, X, // int
+ f, F, e, E, g, G, a, A, // float
+ n, p, // misc
+ kNone
+ };
+ // clang-format on
+ public:
+#define ABSL_INTERNAL_X_VAL(id) \
+ static constexpr FormatConversionChar id = \
+ static_cast<FormatConversionChar>(Enum::id);
+ ABSL_INTERNAL_CONVERSION_CHARS_EXPAND_(ABSL_INTERNAL_X_VAL, )
+#undef ABSL_INTERNAL_X_VAL
+ static constexpr FormatConversionChar kNone =
+ static_cast<FormatConversionChar>(Enum::kNone);
+};
+// clang-format on
+
+inline FormatConversionChar FormatConversionCharFromChar(char c) {
+ switch (c) {
+#define ABSL_INTERNAL_X_VAL(id) \
+ case #id[0]: \
+ return FormatConversionCharInternal::id;
+ ABSL_INTERNAL_CONVERSION_CHARS_EXPAND_(ABSL_INTERNAL_X_VAL, )
+#undef ABSL_INTERNAL_X_VAL
+ }
+ return FormatConversionCharInternal::kNone;
+}
+
+inline bool FormatConversionCharIsUpper(FormatConversionChar c) {
+ if (c == FormatConversionCharInternal::X ||
+ c == FormatConversionCharInternal::F ||
+ c == FormatConversionCharInternal::E ||
+ c == FormatConversionCharInternal::G ||
+ c == FormatConversionCharInternal::A) {
+ return true;
+ } else {
+ return false;
+ }
+}
+
+inline bool FormatConversionCharIsFloat(FormatConversionChar c) {
+ if (c == FormatConversionCharInternal::a ||
+ c == FormatConversionCharInternal::e ||
+ c == FormatConversionCharInternal::f ||
+ c == FormatConversionCharInternal::g ||
+ c == FormatConversionCharInternal::A ||
+ c == FormatConversionCharInternal::E ||
+ c == FormatConversionCharInternal::F ||
+ c == FormatConversionCharInternal::G) {
+ return true;
+ } else {
+ return false;
+ }
+}
+
+inline char FormatConversionCharToChar(FormatConversionChar c) {
+ if (c == FormatConversionCharInternal::kNone) {
+ return '\0';
+
+#define ABSL_INTERNAL_X_VAL(e) \
+ } else if (c == FormatConversionCharInternal::e) { \
+ return #e[0];
+#define ABSL_INTERNAL_X_SEP
+ ABSL_INTERNAL_CONVERSION_CHARS_EXPAND_(ABSL_INTERNAL_X_VAL,
+ ABSL_INTERNAL_X_SEP)
+ } else {
+ return '\0';
+ }
+
+#undef ABSL_INTERNAL_X_VAL
+#undef ABSL_INTERNAL_X_SEP
+}
+
+// The associated char.
+inline std::ostream& operator<<(std::ostream& os, FormatConversionChar v) {
+ char c = FormatConversionCharToChar(v);
+ if (!c) c = '?';
+ return os << c;
+}
+
+struct FormatConversionSpecImplFriend;
+
+class FormatConversionSpecImpl {
+ public:
+ // Width and precison are not specified, no flags are set.
+ bool is_basic() const { return flags_ == Flags::kBasic; }
+ bool has_left_flag() const { return FlagsContains(flags_, Flags::kLeft); }
+ bool has_show_pos_flag() const {
+ return FlagsContains(flags_, Flags::kShowPos);
+ }
+ bool has_sign_col_flag() const {
+ return FlagsContains(flags_, Flags::kSignCol);
+ }
+ bool has_alt_flag() const { return FlagsContains(flags_, Flags::kAlt); }
+ bool has_zero_flag() const { return FlagsContains(flags_, Flags::kZero); }
+
+ FormatConversionChar conversion_char() const {
+ // Keep this field first in the struct . It generates better code when
+ // accessing it when ConversionSpec is passed by value in registers.
+ static_assert(offsetof(FormatConversionSpecImpl, conv_) == 0, "");
+ return conv_;
+ }
+
+ // Returns the specified width. If width is unspecfied, it returns a negative
+ // value.
+ int width() const { return width_; }
+ // Returns the specified precision. If precision is unspecfied, it returns a
+ // negative value.
+ int precision() const { return precision_; }
+
+ template <typename T>
+ T Wrap() {
+ return T(*this);
+ }
+
+ private:
+ friend struct str_format_internal::FormatConversionSpecImplFriend;
+ FormatConversionChar conv_ = FormatConversionCharInternal::kNone;
+ Flags flags_;
+ int width_;
+ int precision_;
+};
+
+struct FormatConversionSpecImplFriend final {
+ static void SetFlags(Flags f, FormatConversionSpecImpl* conv) {
+ conv->flags_ = f;
+ }
+ static void SetConversionChar(FormatConversionChar c,
+ FormatConversionSpecImpl* conv) {
+ conv->conv_ = c;
+ }
+ static void SetWidth(int w, FormatConversionSpecImpl* conv) {
+ conv->width_ = w;
+ }
+ static void SetPrecision(int p, FormatConversionSpecImpl* conv) {
+ conv->precision_ = p;
+ }
+ static TString FlagsToString(const FormatConversionSpecImpl& spec) {
+ return str_format_internal::FlagsToString(spec.flags_);
+ }
+};
+
+// Type safe OR operator.
+// We need this for two reasons:
+// 1. operator| on enums makes them decay to integers and the result is an
+// integer. We need the result to stay as an enum.
+// 2. We use "enum class" which would not work even if we accepted the decay.
+constexpr FormatConversionCharSet FormatConversionCharSetUnion(
+ FormatConversionCharSet a) {
+ return a;
+}
+
+template <typename... CharSet>
+constexpr FormatConversionCharSet FormatConversionCharSetUnion(
+ FormatConversionCharSet a, CharSet... rest) {
+ return static_cast<FormatConversionCharSet>(
+ static_cast<uint64_t>(a) |
+ static_cast<uint64_t>(FormatConversionCharSetUnion(rest...)));
+}
+
+constexpr uint64_t FormatConversionCharToConvInt(FormatConversionChar c) {
+ return uint64_t{1} << (1 + static_cast<uint8_t>(c));
+}
+
+constexpr uint64_t FormatConversionCharToConvInt(char conv) {
+ return
+#define ABSL_INTERNAL_CHAR_SET_CASE(c) \
+ conv == #c[0] \
+ ? FormatConversionCharToConvInt(FormatConversionCharInternal::c) \
+ :
+ ABSL_INTERNAL_CONVERSION_CHARS_EXPAND_(ABSL_INTERNAL_CHAR_SET_CASE, )
+#undef ABSL_INTERNAL_CHAR_SET_CASE
+ conv == '*'
+ ? 1
+ : 0;
+}
+
+constexpr FormatConversionCharSet FormatConversionCharToConvValue(char conv) {
+ return static_cast<FormatConversionCharSet>(
+ FormatConversionCharToConvInt(conv));
+}
+
+struct FormatConversionCharSetInternal {
+#define ABSL_INTERNAL_CHAR_SET_CASE(c) \
+ static constexpr FormatConversionCharSet c = \
+ FormatConversionCharToConvValue(#c[0]);
+ ABSL_INTERNAL_CONVERSION_CHARS_EXPAND_(ABSL_INTERNAL_CHAR_SET_CASE, )
+#undef ABSL_INTERNAL_CHAR_SET_CASE
+
+ // Used for width/precision '*' specification.
+ static constexpr FormatConversionCharSet kStar =
+ FormatConversionCharToConvValue('*');
+
+ static constexpr FormatConversionCharSet kIntegral =
+ FormatConversionCharSetUnion(d, i, u, o, x, X);
+ static constexpr FormatConversionCharSet kFloating =
+ FormatConversionCharSetUnion(a, e, f, g, A, E, F, G);
+ static constexpr FormatConversionCharSet kNumeric =
+ FormatConversionCharSetUnion(kIntegral, kFloating);
+ static constexpr FormatConversionCharSet kPointer = p;
+};
+
+// Type safe OR operator.
+// We need this for two reasons:
+// 1. operator| on enums makes them decay to integers and the result is an
+// integer. We need the result to stay as an enum.
+// 2. We use "enum class" which would not work even if we accepted the decay.
+constexpr FormatConversionCharSet operator|(FormatConversionCharSet a,
+ FormatConversionCharSet b) {
+ return FormatConversionCharSetUnion(a, b);
+}
+
+// Overloaded conversion functions to support y_absl::ParsedFormat.
+// Get a conversion with a single character in it.
+constexpr FormatConversionCharSet ToFormatConversionCharSet(char c) {
+ return static_cast<FormatConversionCharSet>(
+ FormatConversionCharToConvValue(c));
+}
+
+// Get a conversion with a single character in it.
+constexpr FormatConversionCharSet ToFormatConversionCharSet(
+ FormatConversionCharSet c) {
+ return c;
+}
+
+template <typename T>
+void ToFormatConversionCharSet(T) = delete;
+
+// Checks whether `c` exists in `set`.
+constexpr bool Contains(FormatConversionCharSet set, char c) {
+ return (static_cast<uint64_t>(set) &
+ static_cast<uint64_t>(FormatConversionCharToConvValue(c))) != 0;
+}
+
+// Checks whether all the characters in `c` are contained in `set`
+constexpr bool Contains(FormatConversionCharSet set,
+ FormatConversionCharSet c) {
+ return (static_cast<uint64_t>(set) & static_cast<uint64_t>(c)) ==
+ static_cast<uint64_t>(c);
+}
+
+// Checks whether all the characters in `c` are contained in `set`
+constexpr bool Contains(FormatConversionCharSet set, FormatConversionChar c) {
+ return (static_cast<uint64_t>(set) & FormatConversionCharToConvInt(c)) != 0;
+}
+
+// Return capacity - used, clipped to a minimum of 0.
+inline size_t Excess(size_t used, size_t capacity) {
+ return used < capacity ? capacity - used : 0;
+}
+
+} // namespace str_format_internal
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_STRINGS_INTERNAL_STR_FORMAT_EXTENSION_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/float_conversion.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/float_conversion.cc
new file mode 100644
index 00000000000..c49062538d1
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/float_conversion.cc
@@ -0,0 +1,1423 @@
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "y_absl/strings/internal/str_format/float_conversion.h"
+
+#include <string.h>
+
+#include <algorithm>
+#include <cassert>
+#include <cmath>
+#include <limits>
+#include <util/generic/string.h>
+
+#include "y_absl/base/attributes.h"
+#include "y_absl/base/config.h"
+#include "y_absl/base/optimization.h"
+#include "y_absl/functional/function_ref.h"
+#include "y_absl/meta/type_traits.h"
+#include "y_absl/numeric/bits.h"
+#include "y_absl/numeric/int128.h"
+#include "y_absl/numeric/internal/representation.h"
+#include "y_absl/strings/numbers.h"
+#include "y_absl/types/optional.h"
+#include "y_absl/types/span.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace str_format_internal {
+
+namespace {
+
+using ::y_absl::numeric_internal::IsDoubleDouble;
+
+// The code below wants to avoid heap allocations.
+// To do so it needs to allocate memory on the stack.
+// `StackArray` will allocate memory on the stack in the form of a uint32_t
+// array and call the provided callback with said memory.
+// It will allocate memory in increments of 512 bytes. We could allocate the
+// largest needed unconditionally, but that is more than we need in most of
+// cases. This way we use less stack in the common cases.
+class StackArray {
+ using Func = y_absl::FunctionRef<void(y_absl::Span<uint32_t>)>;
+ static constexpr size_t kStep = 512 / sizeof(uint32_t);
+ // 5 steps is 2560 bytes, which is enough to hold a long double with the
+ // largest/smallest exponents.
+ // The operations below will static_assert their particular maximum.
+ static constexpr size_t kNumSteps = 5;
+
+ // We do not want this function to be inlined.
+ // Otherwise the caller will allocate the stack space unnecessarily for all
+ // the variants even though it only calls one.
+ template <size_t steps>
+ ABSL_ATTRIBUTE_NOINLINE static void RunWithCapacityImpl(Func f) {
+ uint32_t values[steps * kStep]{};
+ f(y_absl::MakeSpan(values));
+ }
+
+ public:
+ static constexpr size_t kMaxCapacity = kStep * kNumSteps;
+
+ static void RunWithCapacity(size_t capacity, Func f) {
+ assert(capacity <= kMaxCapacity);
+ const size_t step = (capacity + kStep - 1) / kStep;
+ assert(step <= kNumSteps);
+ switch (step) {
+ case 1:
+ return RunWithCapacityImpl<1>(f);
+ case 2:
+ return RunWithCapacityImpl<2>(f);
+ case 3:
+ return RunWithCapacityImpl<3>(f);
+ case 4:
+ return RunWithCapacityImpl<4>(f);
+ case 5:
+ return RunWithCapacityImpl<5>(f);
+ }
+
+ assert(false && "Invalid capacity");
+ }
+};
+
+// Calculates `10 * (*v) + carry` and stores the result in `*v` and returns
+// the carry.
+template <typename Int>
+inline Int MultiplyBy10WithCarry(Int *v, Int carry) {
+ using BiggerInt = y_absl::conditional_t<sizeof(Int) == 4, uint64_t, uint128>;
+ BiggerInt tmp = 10 * static_cast<BiggerInt>(*v) + carry;
+ *v = static_cast<Int>(tmp);
+ return static_cast<Int>(tmp >> (sizeof(Int) * 8));
+}
+
+// Calculates `(2^64 * carry + *v) / 10`.
+// Stores the quotient in `*v` and returns the remainder.
+// Requires: `0 <= carry <= 9`
+inline uint64_t DivideBy10WithCarry(uint64_t *v, uint64_t carry) {
+ constexpr uint64_t divisor = 10;
+ // 2^64 / divisor = chunk_quotient + chunk_remainder / divisor
+ constexpr uint64_t chunk_quotient = (uint64_t{1} << 63) / (divisor / 2);
+ constexpr uint64_t chunk_remainder = uint64_t{} - chunk_quotient * divisor;
+
+ const uint64_t mod = *v % divisor;
+ const uint64_t next_carry = chunk_remainder * carry + mod;
+ *v = *v / divisor + carry * chunk_quotient + next_carry / divisor;
+ return next_carry % divisor;
+}
+
+using MaxFloatType =
+ typename std::conditional<IsDoubleDouble(), double, long double>::type;
+
+// Generates the decimal representation for an integer of the form `v * 2^exp`,
+// where `v` and `exp` are both positive integers.
+// It generates the digits from the left (ie the most significant digit first)
+// to allow for direct printing into the sink.
+//
+// Requires `0 <= exp` and `exp <= numeric_limits<MaxFloatType>::max_exponent`.
+class BinaryToDecimal {
+ static constexpr int ChunksNeeded(int exp) {
+ // We will left shift a uint128 by `exp` bits, so we need `128+exp` total
+ // bits. Round up to 32.
+ // See constructor for details about adding `10%` to the value.
+ return (128 + exp + 31) / 32 * 11 / 10;
+ }
+
+ public:
+ // Run the conversion for `v * 2^exp` and call `f(binary_to_decimal)`.
+ // This function will allocate enough stack space to perform the conversion.
+ static void RunConversion(uint128 v, int exp,
+ y_absl::FunctionRef<void(BinaryToDecimal)> f) {
+ assert(exp > 0);
+ assert(exp <= std::numeric_limits<MaxFloatType>::max_exponent);
+ static_assert(
+ static_cast<int>(StackArray::kMaxCapacity) >=
+ ChunksNeeded(std::numeric_limits<MaxFloatType>::max_exponent),
+ "");
+
+ StackArray::RunWithCapacity(
+ ChunksNeeded(exp),
+ [=](y_absl::Span<uint32_t> input) { f(BinaryToDecimal(input, v, exp)); });
+ }
+
+ int TotalDigits() const {
+ return static_cast<int>((decimal_end_ - decimal_start_) * kDigitsPerChunk +
+ CurrentDigits().size());
+ }
+
+ // See the current block of digits.
+ y_absl::string_view CurrentDigits() const {
+ return y_absl::string_view(digits_ + kDigitsPerChunk - size_, size_);
+ }
+
+ // Advance the current view of digits.
+ // Returns `false` when no more digits are available.
+ bool AdvanceDigits() {
+ if (decimal_start_ >= decimal_end_) return false;
+
+ uint32_t w = data_[decimal_start_++];
+ for (size_ = 0; size_ < kDigitsPerChunk; w /= 10) {
+ digits_[kDigitsPerChunk - ++size_] = w % 10 + '0';
+ }
+ return true;
+ }
+
+ private:
+ BinaryToDecimal(y_absl::Span<uint32_t> data, uint128 v, int exp) : data_(data) {
+ // We need to print the digits directly into the sink object without
+ // buffering them all first. To do this we need two things:
+ // - to know the total number of digits to do padding when necessary
+ // - to generate the decimal digits from the left.
+ //
+ // In order to do this, we do a two pass conversion.
+ // On the first pass we convert the binary representation of the value into
+ // a decimal representation in which each uint32_t chunk holds up to 9
+ // decimal digits. In the second pass we take each decimal-holding-uint32_t
+ // value and generate the ascii decimal digits into `digits_`.
+ //
+ // The binary and decimal representations actually share the same memory
+ // region. As we go converting the chunks from binary to decimal we free
+ // them up and reuse them for the decimal representation. One caveat is that
+ // the decimal representation is around 7% less efficient in space than the
+ // binary one. We allocate an extra 10% memory to account for this. See
+ // ChunksNeeded for this calculation.
+ int chunk_index = exp / 32;
+ decimal_start_ = decimal_end_ = ChunksNeeded(exp);
+ const int offset = exp % 32;
+ // Left shift v by exp bits.
+ data_[chunk_index] = static_cast<uint32_t>(v << offset);
+ for (v >>= (32 - offset); v; v >>= 32)
+ data_[++chunk_index] = static_cast<uint32_t>(v);
+
+ while (chunk_index >= 0) {
+ // While we have more than one chunk available, go in steps of 1e9.
+ // `data_[chunk_index]` holds the highest non-zero binary chunk, so keep
+ // the variable updated.
+ uint32_t carry = 0;
+ for (int i = chunk_index; i >= 0; --i) {
+ uint64_t tmp = uint64_t{data_[i]} + (uint64_t{carry} << 32);
+ data_[i] = static_cast<uint32_t>(tmp / uint64_t{1000000000});
+ carry = static_cast<uint32_t>(tmp % uint64_t{1000000000});
+ }
+
+ // If the highest chunk is now empty, remove it from view.
+ if (data_[chunk_index] == 0) --chunk_index;
+
+ --decimal_start_;
+ assert(decimal_start_ != chunk_index);
+ data_[decimal_start_] = carry;
+ }
+
+ // Fill the first set of digits. The first chunk might not be complete, so
+ // handle differently.
+ for (uint32_t first = data_[decimal_start_++]; first != 0; first /= 10) {
+ digits_[kDigitsPerChunk - ++size_] = first % 10 + '0';
+ }
+ }
+
+ private:
+ static constexpr int kDigitsPerChunk = 9;
+
+ int decimal_start_;
+ int decimal_end_;
+
+ char digits_[kDigitsPerChunk];
+ int size_ = 0;
+
+ y_absl::Span<uint32_t> data_;
+};
+
+// Converts a value of the form `x * 2^-exp` into a sequence of decimal digits.
+// Requires `-exp < 0` and
+// `-exp >= limits<MaxFloatType>::min_exponent - limits<MaxFloatType>::digits`.
+class FractionalDigitGenerator {
+ public:
+ // Run the conversion for `v * 2^exp` and call `f(generator)`.
+ // This function will allocate enough stack space to perform the conversion.
+ static void RunConversion(
+ uint128 v, int exp, y_absl::FunctionRef<void(FractionalDigitGenerator)> f) {
+ using Limits = std::numeric_limits<MaxFloatType>;
+ assert(-exp < 0);
+ assert(-exp >= Limits::min_exponent - 128);
+ static_assert(StackArray::kMaxCapacity >=
+ (Limits::digits + 128 - Limits::min_exponent + 31) / 32,
+ "");
+ StackArray::RunWithCapacity((Limits::digits + exp + 31) / 32,
+ [=](y_absl::Span<uint32_t> input) {
+ f(FractionalDigitGenerator(input, v, exp));
+ });
+ }
+
+ // Returns true if there are any more non-zero digits left.
+ bool HasMoreDigits() const { return next_digit_ != 0 || chunk_index_ >= 0; }
+
+ // Returns true if the remainder digits are greater than 5000...
+ bool IsGreaterThanHalf() const {
+ return next_digit_ > 5 || (next_digit_ == 5 && chunk_index_ >= 0);
+ }
+ // Returns true if the remainder digits are exactly 5000...
+ bool IsExactlyHalf() const { return next_digit_ == 5 && chunk_index_ < 0; }
+
+ struct Digits {
+ int digit_before_nine;
+ int num_nines;
+ };
+
+ // Get the next set of digits.
+ // They are composed by a non-9 digit followed by a runs of zero or more 9s.
+ Digits GetDigits() {
+ Digits digits{next_digit_, 0};
+
+ next_digit_ = GetOneDigit();
+ while (next_digit_ == 9) {
+ ++digits.num_nines;
+ next_digit_ = GetOneDigit();
+ }
+
+ return digits;
+ }
+
+ private:
+ // Return the next digit.
+ int GetOneDigit() {
+ if (chunk_index_ < 0) return 0;
+
+ uint32_t carry = 0;
+ for (int i = chunk_index_; i >= 0; --i) {
+ carry = MultiplyBy10WithCarry(&data_[i], carry);
+ }
+ // If the lowest chunk is now empty, remove it from view.
+ if (data_[chunk_index_] == 0) --chunk_index_;
+ return carry;
+ }
+
+ FractionalDigitGenerator(y_absl::Span<uint32_t> data, uint128 v, int exp)
+ : chunk_index_(exp / 32), data_(data) {
+ const int offset = exp % 32;
+ // Right shift `v` by `exp` bits.
+ data_[chunk_index_] = static_cast<uint32_t>(v << (32 - offset));
+ v >>= offset;
+ // Make sure we don't overflow the data. We already calculated that
+ // non-zero bits fit, so we might not have space for leading zero bits.
+ for (int pos = chunk_index_; v; v >>= 32)
+ data_[--pos] = static_cast<uint32_t>(v);
+
+ // Fill next_digit_, as GetDigits expects it to be populated always.
+ next_digit_ = GetOneDigit();
+ }
+
+ int next_digit_;
+ int chunk_index_;
+ y_absl::Span<uint32_t> data_;
+};
+
+// Count the number of leading zero bits.
+int LeadingZeros(uint64_t v) { return countl_zero(v); }
+int LeadingZeros(uint128 v) {
+ auto high = static_cast<uint64_t>(v >> 64);
+ auto low = static_cast<uint64_t>(v);
+ return high != 0 ? countl_zero(high) : 64 + countl_zero(low);
+}
+
+// Round up the text digits starting at `p`.
+// The buffer must have an extra digit that is known to not need rounding.
+// This is done below by having an extra '0' digit on the left.
+void RoundUp(char *p) {
+ while (*p == '9' || *p == '.') {
+ if (*p == '9') *p = '0';
+ --p;
+ }
+ ++*p;
+}
+
+// Check the previous digit and round up or down to follow the round-to-even
+// policy.
+void RoundToEven(char *p) {
+ if (*p == '.') --p;
+ if (*p % 2 == 1) RoundUp(p);
+}
+
+// Simple integral decimal digit printing for values that fit in 64-bits.
+// Returns the pointer to the last written digit.
+char *PrintIntegralDigitsFromRightFast(uint64_t v, char *p) {
+ do {
+ *--p = DivideBy10WithCarry(&v, 0) + '0';
+ } while (v != 0);
+ return p;
+}
+
+// Simple integral decimal digit printing for values that fit in 128-bits.
+// Returns the pointer to the last written digit.
+char *PrintIntegralDigitsFromRightFast(uint128 v, char *p) {
+ auto high = static_cast<uint64_t>(v >> 64);
+ auto low = static_cast<uint64_t>(v);
+
+ while (high != 0) {
+ uint64_t carry = DivideBy10WithCarry(&high, 0);
+ carry = DivideBy10WithCarry(&low, carry);
+ *--p = carry + '0';
+ }
+ return PrintIntegralDigitsFromRightFast(low, p);
+}
+
+// Simple fractional decimal digit printing for values that fir in 64-bits after
+// shifting.
+// Performs rounding if necessary to fit within `precision`.
+// Returns the pointer to one after the last character written.
+char *PrintFractionalDigitsFast(uint64_t v, char *start, int exp,
+ int precision) {
+ char *p = start;
+ v <<= (64 - exp);
+ while (precision > 0) {
+ if (!v) return p;
+ *p++ = MultiplyBy10WithCarry(&v, uint64_t{0}) + '0';
+ --precision;
+ }
+
+ // We need to round.
+ if (v < 0x8000000000000000) {
+ // We round down, so nothing to do.
+ } else if (v > 0x8000000000000000) {
+ // We round up.
+ RoundUp(p - 1);
+ } else {
+ RoundToEven(p - 1);
+ }
+
+ assert(precision == 0);
+ // Precision can only be zero here.
+ return p;
+}
+
+// Simple fractional decimal digit printing for values that fir in 128-bits
+// after shifting.
+// Performs rounding if necessary to fit within `precision`.
+// Returns the pointer to one after the last character written.
+char *PrintFractionalDigitsFast(uint128 v, char *start, int exp,
+ int precision) {
+ char *p = start;
+ v <<= (128 - exp);
+ auto high = static_cast<uint64_t>(v >> 64);
+ auto low = static_cast<uint64_t>(v);
+
+ // While we have digits to print and `low` is not empty, do the long
+ // multiplication.
+ while (precision > 0 && low != 0) {
+ uint64_t carry = MultiplyBy10WithCarry(&low, uint64_t{0});
+ carry = MultiplyBy10WithCarry(&high, carry);
+
+ *p++ = carry + '0';
+ --precision;
+ }
+
+ // Now `low` is empty, so use a faster approach for the rest of the digits.
+ // This block is pretty much the same as the main loop for the 64-bit case
+ // above.
+ while (precision > 0) {
+ if (!high) return p;
+ *p++ = MultiplyBy10WithCarry(&high, uint64_t{0}) + '0';
+ --precision;
+ }
+
+ // We need to round.
+ if (high < 0x8000000000000000) {
+ // We round down, so nothing to do.
+ } else if (high > 0x8000000000000000 || low != 0) {
+ // We round up.
+ RoundUp(p - 1);
+ } else {
+ RoundToEven(p - 1);
+ }
+
+ assert(precision == 0);
+ // Precision can only be zero here.
+ return p;
+}
+
+struct FormatState {
+ char sign_char;
+ int precision;
+ const FormatConversionSpecImpl &conv;
+ FormatSinkImpl *sink;
+
+ // In `alt` mode (flag #) we keep the `.` even if there are no fractional
+ // digits. In non-alt mode, we strip it.
+ bool ShouldPrintDot() const { return precision != 0 || conv.has_alt_flag(); }
+};
+
+struct Padding {
+ int left_spaces;
+ int zeros;
+ int right_spaces;
+};
+
+Padding ExtraWidthToPadding(size_t total_size, const FormatState &state) {
+ if (state.conv.width() < 0 ||
+ static_cast<size_t>(state.conv.width()) <= total_size) {
+ return {0, 0, 0};
+ }
+ int missing_chars = state.conv.width() - total_size;
+ if (state.conv.has_left_flag()) {
+ return {0, 0, missing_chars};
+ } else if (state.conv.has_zero_flag()) {
+ return {0, missing_chars, 0};
+ } else {
+ return {missing_chars, 0, 0};
+ }
+}
+
+void FinalPrint(const FormatState &state, y_absl::string_view data,
+ int padding_offset, int trailing_zeros,
+ y_absl::string_view data_postfix) {
+ if (state.conv.width() < 0) {
+ // No width specified. Fast-path.
+ if (state.sign_char != '\0') state.sink->Append(1, state.sign_char);
+ state.sink->Append(data);
+ state.sink->Append(trailing_zeros, '0');
+ state.sink->Append(data_postfix);
+ return;
+ }
+
+ auto padding = ExtraWidthToPadding((state.sign_char != '\0' ? 1 : 0) +
+ data.size() + data_postfix.size() +
+ static_cast<size_t>(trailing_zeros),
+ state);
+
+ state.sink->Append(padding.left_spaces, ' ');
+ if (state.sign_char != '\0') state.sink->Append(1, state.sign_char);
+ // Padding in general needs to be inserted somewhere in the middle of `data`.
+ state.sink->Append(data.substr(0, padding_offset));
+ state.sink->Append(padding.zeros, '0');
+ state.sink->Append(data.substr(padding_offset));
+ state.sink->Append(trailing_zeros, '0');
+ state.sink->Append(data_postfix);
+ state.sink->Append(padding.right_spaces, ' ');
+}
+
+// Fastpath %f formatter for when the shifted value fits in a simple integral
+// type.
+// Prints `v*2^exp` with the options from `state`.
+template <typename Int>
+void FormatFFast(Int v, int exp, const FormatState &state) {
+ constexpr int input_bits = sizeof(Int) * 8;
+
+ static constexpr size_t integral_size =
+ /* in case we need to round up an extra digit */ 1 +
+ /* decimal digits for uint128 */ 40 + 1;
+ char buffer[integral_size + /* . */ 1 + /* max digits uint128 */ 128];
+ buffer[integral_size] = '.';
+ char *const integral_digits_end = buffer + integral_size;
+ char *integral_digits_start;
+ char *const fractional_digits_start = buffer + integral_size + 1;
+ char *fractional_digits_end = fractional_digits_start;
+
+ if (exp >= 0) {
+ const int total_bits = input_bits - LeadingZeros(v) + exp;
+ integral_digits_start =
+ total_bits <= 64
+ ? PrintIntegralDigitsFromRightFast(static_cast<uint64_t>(v) << exp,
+ integral_digits_end)
+ : PrintIntegralDigitsFromRightFast(static_cast<uint128>(v) << exp,
+ integral_digits_end);
+ } else {
+ exp = -exp;
+
+ integral_digits_start = PrintIntegralDigitsFromRightFast(
+ exp < input_bits ? v >> exp : 0, integral_digits_end);
+ // PrintFractionalDigits may pull a carried 1 all the way up through the
+ // integral portion.
+ integral_digits_start[-1] = '0';
+
+ fractional_digits_end =
+ exp <= 64 ? PrintFractionalDigitsFast(v, fractional_digits_start, exp,
+ state.precision)
+ : PrintFractionalDigitsFast(static_cast<uint128>(v),
+ fractional_digits_start, exp,
+ state.precision);
+ // There was a carry, so include the first digit too.
+ if (integral_digits_start[-1] != '0') --integral_digits_start;
+ }
+
+ size_t size = fractional_digits_end - integral_digits_start;
+
+ // In `alt` mode (flag #) we keep the `.` even if there are no fractional
+ // digits. In non-alt mode, we strip it.
+ if (!state.ShouldPrintDot()) --size;
+ FinalPrint(state, y_absl::string_view(integral_digits_start, size),
+ /*padding_offset=*/0,
+ static_cast<int>(state.precision - (fractional_digits_end -
+ fractional_digits_start)),
+ /*data_postfix=*/"");
+}
+
+// Slow %f formatter for when the shifted value does not fit in a uint128, and
+// `exp > 0`.
+// Prints `v*2^exp` with the options from `state`.
+// This one is guaranteed to not have fractional digits, so we don't have to
+// worry about anything after the `.`.
+void FormatFPositiveExpSlow(uint128 v, int exp, const FormatState &state) {
+ BinaryToDecimal::RunConversion(v, exp, [&](BinaryToDecimal btd) {
+ const size_t total_digits =
+ btd.TotalDigits() +
+ (state.ShouldPrintDot() ? static_cast<size_t>(state.precision) + 1 : 0);
+
+ const auto padding = ExtraWidthToPadding(
+ total_digits + (state.sign_char != '\0' ? 1 : 0), state);
+
+ state.sink->Append(padding.left_spaces, ' ');
+ if (state.sign_char != '\0') state.sink->Append(1, state.sign_char);
+ state.sink->Append(padding.zeros, '0');
+
+ do {
+ state.sink->Append(btd.CurrentDigits());
+ } while (btd.AdvanceDigits());
+
+ if (state.ShouldPrintDot()) state.sink->Append(1, '.');
+ state.sink->Append(state.precision, '0');
+ state.sink->Append(padding.right_spaces, ' ');
+ });
+}
+
+// Slow %f formatter for when the shifted value does not fit in a uint128, and
+// `exp < 0`.
+// Prints `v*2^exp` with the options from `state`.
+// This one is guaranteed to be < 1.0, so we don't have to worry about integral
+// digits.
+void FormatFNegativeExpSlow(uint128 v, int exp, const FormatState &state) {
+ const size_t total_digits =
+ /* 0 */ 1 +
+ (state.ShouldPrintDot() ? static_cast<size_t>(state.precision) + 1 : 0);
+ auto padding =
+ ExtraWidthToPadding(total_digits + (state.sign_char ? 1 : 0), state);
+ padding.zeros += 1;
+ state.sink->Append(padding.left_spaces, ' ');
+ if (state.sign_char != '\0') state.sink->Append(1, state.sign_char);
+ state.sink->Append(padding.zeros, '0');
+
+ if (state.ShouldPrintDot()) state.sink->Append(1, '.');
+
+ // Print digits
+ int digits_to_go = state.precision;
+
+ FractionalDigitGenerator::RunConversion(
+ v, exp, [&](FractionalDigitGenerator digit_gen) {
+ // There are no digits to print here.
+ if (state.precision == 0) return;
+
+ // We go one digit at a time, while keeping track of runs of nines.
+ // The runs of nines are used to perform rounding when necessary.
+
+ while (digits_to_go > 0 && digit_gen.HasMoreDigits()) {
+ auto digits = digit_gen.GetDigits();
+
+ // Now we have a digit and a run of nines.
+ // See if we can print them all.
+ if (digits.num_nines + 1 < digits_to_go) {
+ // We don't have to round yet, so print them.
+ state.sink->Append(1, digits.digit_before_nine + '0');
+ state.sink->Append(digits.num_nines, '9');
+ digits_to_go -= digits.num_nines + 1;
+
+ } else {
+ // We can't print all the nines, see where we have to truncate.
+
+ bool round_up = false;
+ if (digits.num_nines + 1 > digits_to_go) {
+ // We round up at a nine. No need to print them.
+ round_up = true;
+ } else {
+ // We can fit all the nines, but truncate just after it.
+ if (digit_gen.IsGreaterThanHalf()) {
+ round_up = true;
+ } else if (digit_gen.IsExactlyHalf()) {
+ // Round to even
+ round_up =
+ digits.num_nines != 0 || digits.digit_before_nine % 2 == 1;
+ }
+ }
+
+ if (round_up) {
+ state.sink->Append(1, digits.digit_before_nine + '1');
+ --digits_to_go;
+ // The rest will be zeros.
+ } else {
+ state.sink->Append(1, digits.digit_before_nine + '0');
+ state.sink->Append(digits_to_go - 1, '9');
+ digits_to_go = 0;
+ }
+ return;
+ }
+ }
+ });
+
+ state.sink->Append(digits_to_go, '0');
+ state.sink->Append(padding.right_spaces, ' ');
+}
+
+template <typename Int>
+void FormatF(Int mantissa, int exp, const FormatState &state) {
+ if (exp >= 0) {
+ const int total_bits = sizeof(Int) * 8 - LeadingZeros(mantissa) + exp;
+
+ // Fallback to the slow stack-based approach if we can't do it in a 64 or
+ // 128 bit state.
+ if (ABSL_PREDICT_FALSE(total_bits > 128)) {
+ return FormatFPositiveExpSlow(mantissa, exp, state);
+ }
+ } else {
+ // Fallback to the slow stack-based approach if we can't do it in a 64 or
+ // 128 bit state.
+ if (ABSL_PREDICT_FALSE(exp < -128)) {
+ return FormatFNegativeExpSlow(mantissa, -exp, state);
+ }
+ }
+ return FormatFFast(mantissa, exp, state);
+}
+
+// Grab the group of four bits (nibble) from `n`. E.g., nibble 1 corresponds to
+// bits 4-7.
+template <typename Int>
+uint8_t GetNibble(Int n, int nibble_index) {
+ constexpr Int mask_low_nibble = Int{0xf};
+ int shift = nibble_index * 4;
+ n &= mask_low_nibble << shift;
+ return static_cast<uint8_t>((n >> shift) & 0xf);
+}
+
+// Add one to the given nibble, applying carry to higher nibbles. Returns true
+// if overflow, false otherwise.
+template <typename Int>
+bool IncrementNibble(int nibble_index, Int *n) {
+ constexpr int kShift = sizeof(Int) * 8 - 1;
+ constexpr int kNumNibbles = sizeof(Int) * 8 / 4;
+ Int before = *n >> kShift;
+ // Here we essentially want to take the number 1 and move it into the requsted
+ // nibble, then add it to *n to effectively increment the nibble. However,
+ // ASan will complain if we try to shift the 1 beyond the limits of the Int,
+ // i.e., if the nibble_index is out of range. So therefore we check for this
+ // and if we are out of range we just add 0 which leaves *n unchanged, which
+ // seems like the reasonable thing to do in that case.
+ *n += ((nibble_index >= kNumNibbles) ? 0 : (Int{1} << (nibble_index * 4)));
+ Int after = *n >> kShift;
+ return (before && !after) || (nibble_index >= kNumNibbles);
+}
+
+// Return a mask with 1's in the given nibble and all lower nibbles.
+template <typename Int>
+Int MaskUpToNibbleInclusive(int nibble_index) {
+ constexpr int kNumNibbles = sizeof(Int) * 8 / 4;
+ static const Int ones = ~Int{0};
+ return ones >> std::max(0, 4 * (kNumNibbles - nibble_index - 1));
+}
+
+// Return a mask with 1's below the given nibble.
+template <typename Int>
+Int MaskUpToNibbleExclusive(int nibble_index) {
+ return nibble_index <= 0 ? 0 : MaskUpToNibbleInclusive<Int>(nibble_index - 1);
+}
+
+template <typename Int>
+Int MoveToNibble(uint8_t nibble, int nibble_index) {
+ return Int{nibble} << (4 * nibble_index);
+}
+
+// Given mantissa size, find optimal # of mantissa bits to put in initial digit.
+//
+// In the hex representation we keep a single hex digit to the left of the dot.
+// However, the question as to how many bits of the mantissa should be put into
+// that hex digit in theory is arbitrary, but in practice it is optimal to
+// choose based on the size of the mantissa. E.g., for a `double`, there are 53
+// mantissa bits, so that means that we should put 1 bit to the left of the dot,
+// thereby leaving 52 bits to the right, which is evenly divisible by four and
+// thus all fractional digits represent actual precision. For a `long double`,
+// on the other hand, there are 64 bits of mantissa, thus we can use all four
+// bits for the initial hex digit and still have a number left over (60) that is
+// a multiple of four. Once again, the goal is to have all fractional digits
+// represent real precision.
+template <typename Float>
+constexpr int HexFloatLeadingDigitSizeInBits() {
+ return std::numeric_limits<Float>::digits % 4 > 0
+ ? std::numeric_limits<Float>::digits % 4
+ : 4;
+}
+
+// This function captures the rounding behavior of glibc for hex float
+// representations. E.g. when rounding 0x1.ab800000 to a precision of .2
+// ("%.2a") glibc will round up because it rounds toward the even number (since
+// 0xb is an odd number, it will round up to 0xc). However, when rounding at a
+// point that is not followed by 800000..., it disregards the parity and rounds
+// up if > 8 and rounds down if < 8.
+template <typename Int>
+bool HexFloatNeedsRoundUp(Int mantissa, int final_nibble_displayed,
+ uint8_t leading) {
+ // If the last nibble (hex digit) to be displayed is the lowest on in the
+ // mantissa then that means that we don't have any further nibbles to inform
+ // rounding, so don't round.
+ if (final_nibble_displayed <= 0) {
+ return false;
+ }
+ int rounding_nibble_idx = final_nibble_displayed - 1;
+ constexpr int kTotalNibbles = sizeof(Int) * 8 / 4;
+ assert(final_nibble_displayed <= kTotalNibbles);
+ Int mantissa_up_to_rounding_nibble_inclusive =
+ mantissa & MaskUpToNibbleInclusive<Int>(rounding_nibble_idx);
+ Int eight = MoveToNibble<Int>(8, rounding_nibble_idx);
+ if (mantissa_up_to_rounding_nibble_inclusive != eight) {
+ return mantissa_up_to_rounding_nibble_inclusive > eight;
+ }
+ // Nibble in question == 8.
+ uint8_t round_if_odd = (final_nibble_displayed == kTotalNibbles)
+ ? leading
+ : GetNibble(mantissa, final_nibble_displayed);
+ return round_if_odd % 2 == 1;
+}
+
+// Stores values associated with a Float type needed by the FormatA
+// implementation in order to avoid templatizing that function by the Float
+// type.
+struct HexFloatTypeParams {
+ template <typename Float>
+ explicit HexFloatTypeParams(Float)
+ : min_exponent(std::numeric_limits<Float>::min_exponent - 1),
+ leading_digit_size_bits(HexFloatLeadingDigitSizeInBits<Float>()) {
+ assert(leading_digit_size_bits >= 1 && leading_digit_size_bits <= 4);
+ }
+
+ int min_exponent;
+ int leading_digit_size_bits;
+};
+
+// Hex Float Rounding. First check if we need to round; if so, then we do that
+// by manipulating (incrementing) the mantissa, that way we can later print the
+// mantissa digits by iterating through them in the same way regardless of
+// whether a rounding happened.
+template <typename Int>
+void FormatARound(bool precision_specified, const FormatState &state,
+ uint8_t *leading, Int *mantissa, int *exp) {
+ constexpr int kTotalNibbles = sizeof(Int) * 8 / 4;
+ // Index of the last nibble that we could display given precision.
+ int final_nibble_displayed =
+ precision_specified ? std::max(0, (kTotalNibbles - state.precision)) : 0;
+ if (HexFloatNeedsRoundUp(*mantissa, final_nibble_displayed, *leading)) {
+ // Need to round up.
+ bool overflow = IncrementNibble(final_nibble_displayed, mantissa);
+ *leading += (overflow ? 1 : 0);
+ if (ABSL_PREDICT_FALSE(*leading > 15)) {
+ // We have overflowed the leading digit. This would mean that we would
+ // need two hex digits to the left of the dot, which is not allowed. So
+ // adjust the mantissa and exponent so that the result is always 1.0eXXX.
+ *leading = 1;
+ *mantissa = 0;
+ *exp += 4;
+ }
+ }
+ // Now that we have handled a possible round-up we can go ahead and zero out
+ // all the nibbles of the mantissa that we won't need.
+ if (precision_specified) {
+ *mantissa &= ~MaskUpToNibbleExclusive<Int>(final_nibble_displayed);
+ }
+}
+
+template <typename Int>
+void FormatANormalize(const HexFloatTypeParams float_traits, uint8_t *leading,
+ Int *mantissa, int *exp) {
+ constexpr int kIntBits = sizeof(Int) * 8;
+ static const Int kHighIntBit = Int{1} << (kIntBits - 1);
+ const int kLeadDigitBitsCount = float_traits.leading_digit_size_bits;
+ // Normalize mantissa so that highest bit set is in MSB position, unless we
+ // get interrupted by the exponent threshold.
+ while (*mantissa && !(*mantissa & kHighIntBit)) {
+ if (ABSL_PREDICT_FALSE(*exp - 1 < float_traits.min_exponent)) {
+ *mantissa >>= (float_traits.min_exponent - *exp);
+ *exp = float_traits.min_exponent;
+ return;
+ }
+ *mantissa <<= 1;
+ --*exp;
+ }
+ // Extract bits for leading digit then shift them away leaving the
+ // fractional part.
+ *leading =
+ static_cast<uint8_t>(*mantissa >> (kIntBits - kLeadDigitBitsCount));
+ *exp -= (*mantissa != 0) ? kLeadDigitBitsCount : *exp;
+ *mantissa <<= kLeadDigitBitsCount;
+}
+
+template <typename Int>
+void FormatA(const HexFloatTypeParams float_traits, Int mantissa, int exp,
+ bool uppercase, const FormatState &state) {
+ // Int properties.
+ constexpr int kIntBits = sizeof(Int) * 8;
+ constexpr int kTotalNibbles = sizeof(Int) * 8 / 4;
+ // Did the user specify a precision explicitly?
+ const bool precision_specified = state.conv.precision() >= 0;
+
+ // ========== Normalize/Denormalize ==========
+ exp += kIntBits; // make all digits fractional digits.
+ // This holds the (up to four) bits of leading digit, i.e., the '1' in the
+ // number 0x1.e6fp+2. It's always > 0 unless number is zero or denormal.
+ uint8_t leading = 0;
+ FormatANormalize(float_traits, &leading, &mantissa, &exp);
+
+ // =============== Rounding ==================
+ // Check if we need to round; if so, then we do that by manipulating
+ // (incrementing) the mantissa before beginning to print characters.
+ FormatARound(precision_specified, state, &leading, &mantissa, &exp);
+
+ // ============= Format Result ===============
+ // This buffer holds the "0x1.ab1de3" portion of "0x1.ab1de3pe+2". Compute the
+ // size with long double which is the largest of the floats.
+ constexpr size_t kBufSizeForHexFloatRepr =
+ 2 // 0x
+ + std::numeric_limits<MaxFloatType>::digits / 4 // number of hex digits
+ + 1 // round up
+ + 1; // "." (dot)
+ char digits_buffer[kBufSizeForHexFloatRepr];
+ char *digits_iter = digits_buffer;
+ const char *const digits =
+ static_cast<const char *>("0123456789ABCDEF0123456789abcdef") +
+ (uppercase ? 0 : 16);
+
+ // =============== Hex Prefix ================
+ *digits_iter++ = '0';
+ *digits_iter++ = uppercase ? 'X' : 'x';
+
+ // ========== Non-Fractional Digit ===========
+ *digits_iter++ = digits[leading];
+
+ // ================== Dot ====================
+ // There are three reasons we might need a dot. Keep in mind that, at this
+ // point, the mantissa holds only the fractional part.
+ if ((precision_specified && state.precision > 0) ||
+ (!precision_specified && mantissa > 0) || state.conv.has_alt_flag()) {
+ *digits_iter++ = '.';
+ }
+
+ // ============ Fractional Digits ============
+ int digits_emitted = 0;
+ while (mantissa > 0) {
+ *digits_iter++ = digits[GetNibble(mantissa, kTotalNibbles - 1)];
+ mantissa <<= 4;
+ ++digits_emitted;
+ }
+ int trailing_zeros =
+ precision_specified ? state.precision - digits_emitted : 0;
+ assert(trailing_zeros >= 0);
+ auto digits_result = string_view(digits_buffer, digits_iter - digits_buffer);
+
+ // =============== Exponent ==================
+ constexpr size_t kBufSizeForExpDecRepr =
+ numbers_internal::kFastToBufferSize // requred for FastIntToBuffer
+ + 1 // 'p' or 'P'
+ + 1; // '+' or '-'
+ char exp_buffer[kBufSizeForExpDecRepr];
+ exp_buffer[0] = uppercase ? 'P' : 'p';
+ exp_buffer[1] = exp >= 0 ? '+' : '-';
+ numbers_internal::FastIntToBuffer(exp < 0 ? -exp : exp, exp_buffer + 2);
+
+ // ============ Assemble Result ==============
+ FinalPrint(state, //
+ digits_result, // 0xN.NNN...
+ 2, // offset in `data` to start padding if needed.
+ trailing_zeros, // num remaining mantissa padding zeros
+ exp_buffer); // exponent
+}
+
+char *CopyStringTo(y_absl::string_view v, char *out) {
+ std::memcpy(out, v.data(), v.size());
+ return out + v.size();
+}
+
+template <typename Float>
+bool FallbackToSnprintf(const Float v, const FormatConversionSpecImpl &conv,
+ FormatSinkImpl *sink) {
+ int w = conv.width() >= 0 ? conv.width() : 0;
+ int p = conv.precision() >= 0 ? conv.precision() : -1;
+ char fmt[32];
+ {
+ char *fp = fmt;
+ *fp++ = '%';
+ fp = CopyStringTo(FormatConversionSpecImplFriend::FlagsToString(conv), fp);
+ fp = CopyStringTo("*.*", fp);
+ if (std::is_same<long double, Float>()) {
+ *fp++ = 'L';
+ }
+ *fp++ = FormatConversionCharToChar(conv.conversion_char());
+ *fp = 0;
+ assert(fp < fmt + sizeof(fmt));
+ }
+ TString space(512, '\0');
+ y_absl::string_view result;
+ while (true) {
+ int n = snprintf(&space[0], space.size(), fmt, w, p, v);
+ if (n < 0) return false;
+ if (static_cast<size_t>(n) < space.size()) {
+ result = y_absl::string_view(space.data(), n);
+ break;
+ }
+ space.resize(n + 1);
+ }
+ sink->Append(result);
+ return true;
+}
+
+// 128-bits in decimal: ceil(128*log(2)/log(10))
+// or std::numeric_limits<__uint128_t>::digits10
+constexpr int kMaxFixedPrecision = 39;
+
+constexpr int kBufferLength = /*sign*/ 1 +
+ /*integer*/ kMaxFixedPrecision +
+ /*point*/ 1 +
+ /*fraction*/ kMaxFixedPrecision +
+ /*exponent e+123*/ 5;
+
+struct Buffer {
+ void push_front(char c) {
+ assert(begin > data);
+ *--begin = c;
+ }
+ void push_back(char c) {
+ assert(end < data + sizeof(data));
+ *end++ = c;
+ }
+ void pop_back() {
+ assert(begin < end);
+ --end;
+ }
+
+ char &back() {
+ assert(begin < end);
+ return end[-1];
+ }
+
+ char last_digit() const { return end[-1] == '.' ? end[-2] : end[-1]; }
+
+ int size() const { return static_cast<int>(end - begin); }
+
+ char data[kBufferLength];
+ char *begin;
+ char *end;
+};
+
+enum class FormatStyle { Fixed, Precision };
+
+// If the value is Inf or Nan, print it and return true.
+// Otherwise, return false.
+template <typename Float>
+bool ConvertNonNumericFloats(char sign_char, Float v,
+ const FormatConversionSpecImpl &conv,
+ FormatSinkImpl *sink) {
+ char text[4], *ptr = text;
+ if (sign_char != '\0') *ptr++ = sign_char;
+ if (std::isnan(v)) {
+ ptr = std::copy_n(
+ FormatConversionCharIsUpper(conv.conversion_char()) ? "NAN" : "nan", 3,
+ ptr);
+ } else if (std::isinf(v)) {
+ ptr = std::copy_n(
+ FormatConversionCharIsUpper(conv.conversion_char()) ? "INF" : "inf", 3,
+ ptr);
+ } else {
+ return false;
+ }
+
+ return sink->PutPaddedString(string_view(text, ptr - text), conv.width(), -1,
+ conv.has_left_flag());
+}
+
+// Round up the last digit of the value.
+// It will carry over and potentially overflow. 'exp' will be adjusted in that
+// case.
+template <FormatStyle mode>
+void RoundUp(Buffer *buffer, int *exp) {
+ char *p = &buffer->back();
+ while (p >= buffer->begin && (*p == '9' || *p == '.')) {
+ if (*p == '9') *p = '0';
+ --p;
+ }
+
+ if (p < buffer->begin) {
+ *p = '1';
+ buffer->begin = p;
+ if (mode == FormatStyle::Precision) {
+ std::swap(p[1], p[2]); // move the .
+ ++*exp;
+ buffer->pop_back();
+ }
+ } else {
+ ++*p;
+ }
+}
+
+void PrintExponent(int exp, char e, Buffer *out) {
+ out->push_back(e);
+ if (exp < 0) {
+ out->push_back('-');
+ exp = -exp;
+ } else {
+ out->push_back('+');
+ }
+ // Exponent digits.
+ if (exp > 99) {
+ out->push_back(exp / 100 + '0');
+ out->push_back(exp / 10 % 10 + '0');
+ out->push_back(exp % 10 + '0');
+ } else {
+ out->push_back(exp / 10 + '0');
+ out->push_back(exp % 10 + '0');
+ }
+}
+
+template <typename Float, typename Int>
+constexpr bool CanFitMantissa() {
+ return
+#if defined(__clang__) && !defined(__SSE3__)
+ // Workaround for clang bug: https://bugs.llvm.org/show_bug.cgi?id=38289
+ // Casting from long double to uint64_t is miscompiled and drops bits.
+ (!std::is_same<Float, long double>::value ||
+ !std::is_same<Int, uint64_t>::value) &&
+#endif
+ std::numeric_limits<Float>::digits <= std::numeric_limits<Int>::digits;
+}
+
+template <typename Float>
+struct Decomposed {
+ using MantissaType =
+ y_absl::conditional_t<std::is_same<long double, Float>::value, uint128,
+ uint64_t>;
+ static_assert(std::numeric_limits<Float>::digits <= sizeof(MantissaType) * 8,
+ "");
+ MantissaType mantissa;
+ int exponent;
+};
+
+// Decompose the double into an integer mantissa and an exponent.
+template <typename Float>
+Decomposed<Float> Decompose(Float v) {
+ int exp;
+ Float m = std::frexp(v, &exp);
+ m = std::ldexp(m, std::numeric_limits<Float>::digits);
+ exp -= std::numeric_limits<Float>::digits;
+
+ return {static_cast<typename Decomposed<Float>::MantissaType>(m), exp};
+}
+
+// Print 'digits' as decimal.
+// In Fixed mode, we add a '.' at the end.
+// In Precision mode, we add a '.' after the first digit.
+template <FormatStyle mode, typename Int>
+int PrintIntegralDigits(Int digits, Buffer *out) {
+ int printed = 0;
+ if (digits) {
+ for (; digits; digits /= 10) out->push_front(digits % 10 + '0');
+ printed = out->size();
+ if (mode == FormatStyle::Precision) {
+ out->push_front(*out->begin);
+ out->begin[1] = '.';
+ } else {
+ out->push_back('.');
+ }
+ } else if (mode == FormatStyle::Fixed) {
+ out->push_front('0');
+ out->push_back('.');
+ printed = 1;
+ }
+ return printed;
+}
+
+// Back out 'extra_digits' digits and round up if necessary.
+bool RemoveExtraPrecision(int extra_digits, bool has_leftover_value,
+ Buffer *out, int *exp_out) {
+ if (extra_digits <= 0) return false;
+
+ // Back out the extra digits
+ out->end -= extra_digits;
+
+ bool needs_to_round_up = [&] {
+ // We look at the digit just past the end.
+ // There must be 'extra_digits' extra valid digits after end.
+ if (*out->end > '5') return true;
+ if (*out->end < '5') return false;
+ if (has_leftover_value || std::any_of(out->end + 1, out->end + extra_digits,
+ [](char c) { return c != '0'; }))
+ return true;
+
+ // Ends in ...50*, round to even.
+ return out->last_digit() % 2 == 1;
+ }();
+
+ if (needs_to_round_up) {
+ RoundUp<FormatStyle::Precision>(out, exp_out);
+ }
+ return true;
+}
+
+// Print the value into the buffer.
+// This will not include the exponent, which will be returned in 'exp_out' for
+// Precision mode.
+template <typename Int, typename Float, FormatStyle mode>
+bool FloatToBufferImpl(Int int_mantissa, int exp, int precision, Buffer *out,
+ int *exp_out) {
+ assert((CanFitMantissa<Float, Int>()));
+
+ const int int_bits = std::numeric_limits<Int>::digits;
+
+ // In precision mode, we start printing one char to the right because it will
+ // also include the '.'
+ // In fixed mode we put the dot afterwards on the right.
+ out->begin = out->end =
+ out->data + 1 + kMaxFixedPrecision + (mode == FormatStyle::Precision);
+
+ if (exp >= 0) {
+ if (std::numeric_limits<Float>::digits + exp > int_bits) {
+ // The value will overflow the Int
+ return false;
+ }
+ int digits_printed = PrintIntegralDigits<mode>(int_mantissa << exp, out);
+ int digits_to_zero_pad = precision;
+ if (mode == FormatStyle::Precision) {
+ *exp_out = digits_printed - 1;
+ digits_to_zero_pad -= digits_printed - 1;
+ if (RemoveExtraPrecision(-digits_to_zero_pad, false, out, exp_out)) {
+ return true;
+ }
+ }
+ for (; digits_to_zero_pad-- > 0;) out->push_back('0');
+ return true;
+ }
+
+ exp = -exp;
+ // We need at least 4 empty bits for the next decimal digit.
+ // We will multiply by 10.
+ if (exp > int_bits - 4) return false;
+
+ const Int mask = (Int{1} << exp) - 1;
+
+ // Print the integral part first.
+ int digits_printed = PrintIntegralDigits<mode>(int_mantissa >> exp, out);
+ int_mantissa &= mask;
+
+ int fractional_count = precision;
+ if (mode == FormatStyle::Precision) {
+ if (digits_printed == 0) {
+ // Find the first non-zero digit, when in Precision mode.
+ *exp_out = 0;
+ if (int_mantissa) {
+ while (int_mantissa <= mask) {
+ int_mantissa *= 10;
+ --*exp_out;
+ }
+ }
+ out->push_front(static_cast<char>(int_mantissa >> exp) + '0');
+ out->push_back('.');
+ int_mantissa &= mask;
+ } else {
+ // We already have a digit, and a '.'
+ *exp_out = digits_printed - 1;
+ fractional_count -= *exp_out;
+ if (RemoveExtraPrecision(-fractional_count, int_mantissa != 0, out,
+ exp_out)) {
+ // If we had enough digits, return right away.
+ // The code below will try to round again otherwise.
+ return true;
+ }
+ }
+ }
+
+ auto get_next_digit = [&] {
+ int_mantissa *= 10;
+ int digit = static_cast<int>(int_mantissa >> exp);
+ int_mantissa &= mask;
+ return digit;
+ };
+
+ // Print fractional_count more digits, if available.
+ for (; fractional_count > 0; --fractional_count) {
+ out->push_back(get_next_digit() + '0');
+ }
+
+ int next_digit = get_next_digit();
+ if (next_digit > 5 ||
+ (next_digit == 5 && (int_mantissa || out->last_digit() % 2 == 1))) {
+ RoundUp<mode>(out, exp_out);
+ }
+
+ return true;
+}
+
+template <FormatStyle mode, typename Float>
+bool FloatToBuffer(Decomposed<Float> decomposed, int precision, Buffer *out,
+ int *exp) {
+ if (precision > kMaxFixedPrecision) return false;
+
+ // Try with uint64_t.
+ if (CanFitMantissa<Float, std::uint64_t>() &&
+ FloatToBufferImpl<std::uint64_t, Float, mode>(
+ static_cast<std::uint64_t>(decomposed.mantissa),
+ static_cast<std::uint64_t>(decomposed.exponent), precision, out, exp))
+ return true;
+
+#if defined(ABSL_HAVE_INTRINSIC_INT128)
+ // If that is not enough, try with __uint128_t.
+ return CanFitMantissa<Float, __uint128_t>() &&
+ FloatToBufferImpl<__uint128_t, Float, mode>(
+ static_cast<__uint128_t>(decomposed.mantissa),
+ static_cast<__uint128_t>(decomposed.exponent), precision, out,
+ exp);
+#endif
+ return false;
+}
+
+void WriteBufferToSink(char sign_char, y_absl::string_view str,
+ const FormatConversionSpecImpl &conv,
+ FormatSinkImpl *sink) {
+ int left_spaces = 0, zeros = 0, right_spaces = 0;
+ int missing_chars =
+ conv.width() >= 0 ? std::max(conv.width() - static_cast<int>(str.size()) -
+ static_cast<int>(sign_char != 0),
+ 0)
+ : 0;
+ if (conv.has_left_flag()) {
+ right_spaces = missing_chars;
+ } else if (conv.has_zero_flag()) {
+ zeros = missing_chars;
+ } else {
+ left_spaces = missing_chars;
+ }
+
+ sink->Append(left_spaces, ' ');
+ if (sign_char != '\0') sink->Append(1, sign_char);
+ sink->Append(zeros, '0');
+ sink->Append(str);
+ sink->Append(right_spaces, ' ');
+}
+
+template <typename Float>
+bool FloatToSink(const Float v, const FormatConversionSpecImpl &conv,
+ FormatSinkImpl *sink) {
+ // Print the sign or the sign column.
+ Float abs_v = v;
+ char sign_char = 0;
+ if (std::signbit(abs_v)) {
+ sign_char = '-';
+ abs_v = -abs_v;
+ } else if (conv.has_show_pos_flag()) {
+ sign_char = '+';
+ } else if (conv.has_sign_col_flag()) {
+ sign_char = ' ';
+ }
+
+ // Print nan/inf.
+ if (ConvertNonNumericFloats(sign_char, abs_v, conv, sink)) {
+ return true;
+ }
+
+ int precision = conv.precision() < 0 ? 6 : conv.precision();
+
+ int exp = 0;
+
+ auto decomposed = Decompose(abs_v);
+
+ Buffer buffer;
+
+ FormatConversionChar c = conv.conversion_char();
+
+ if (c == FormatConversionCharInternal::f ||
+ c == FormatConversionCharInternal::F) {
+ FormatF(decomposed.mantissa, decomposed.exponent,
+ {sign_char, precision, conv, sink});
+ return true;
+ } else if (c == FormatConversionCharInternal::e ||
+ c == FormatConversionCharInternal::E) {
+ if (!FloatToBuffer<FormatStyle::Precision>(decomposed, precision, &buffer,
+ &exp)) {
+ return FallbackToSnprintf(v, conv, sink);
+ }
+ if (!conv.has_alt_flag() && buffer.back() == '.') buffer.pop_back();
+ PrintExponent(
+ exp, FormatConversionCharIsUpper(conv.conversion_char()) ? 'E' : 'e',
+ &buffer);
+ } else if (c == FormatConversionCharInternal::g ||
+ c == FormatConversionCharInternal::G) {
+ precision = std::max(0, precision - 1);
+ if (!FloatToBuffer<FormatStyle::Precision>(decomposed, precision, &buffer,
+ &exp)) {
+ return FallbackToSnprintf(v, conv, sink);
+ }
+ if (precision + 1 > exp && exp >= -4) {
+ if (exp < 0) {
+ // Have 1.23456, needs 0.00123456
+ // Move the first digit
+ buffer.begin[1] = *buffer.begin;
+ // Add some zeros
+ for (; exp < -1; ++exp) *buffer.begin-- = '0';
+ *buffer.begin-- = '.';
+ *buffer.begin = '0';
+ } else if (exp > 0) {
+ // Have 1.23456, needs 1234.56
+ // Move the '.' exp positions to the right.
+ std::rotate(buffer.begin + 1, buffer.begin + 2, buffer.begin + exp + 2);
+ }
+ exp = 0;
+ }
+ if (!conv.has_alt_flag()) {
+ while (buffer.back() == '0') buffer.pop_back();
+ if (buffer.back() == '.') buffer.pop_back();
+ }
+ if (exp) {
+ PrintExponent(
+ exp, FormatConversionCharIsUpper(conv.conversion_char()) ? 'E' : 'e',
+ &buffer);
+ }
+ } else if (c == FormatConversionCharInternal::a ||
+ c == FormatConversionCharInternal::A) {
+ bool uppercase = (c == FormatConversionCharInternal::A);
+ FormatA(HexFloatTypeParams(Float{}), decomposed.mantissa,
+ decomposed.exponent, uppercase, {sign_char, precision, conv, sink});
+ return true;
+ } else {
+ return false;
+ }
+
+ WriteBufferToSink(sign_char,
+ y_absl::string_view(buffer.begin, buffer.end - buffer.begin),
+ conv, sink);
+
+ return true;
+}
+
+} // namespace
+
+bool ConvertFloatImpl(long double v, const FormatConversionSpecImpl &conv,
+ FormatSinkImpl *sink) {
+ if (IsDoubleDouble()) {
+ // This is the `double-double` representation of `long double`. We do not
+ // handle it natively. Fallback to snprintf.
+ return FallbackToSnprintf(v, conv, sink);
+ }
+
+ return FloatToSink(v, conv, sink);
+}
+
+bool ConvertFloatImpl(float v, const FormatConversionSpecImpl &conv,
+ FormatSinkImpl *sink) {
+ return FloatToSink(static_cast<double>(v), conv, sink);
+}
+
+bool ConvertFloatImpl(double v, const FormatConversionSpecImpl &conv,
+ FormatSinkImpl *sink) {
+ return FloatToSink(v, conv, sink);
+}
+
+} // namespace str_format_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/float_conversion.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/float_conversion.h
new file mode 100644
index 00000000000..d93a4157568
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/float_conversion.h
@@ -0,0 +1,37 @@
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_STRINGS_INTERNAL_STR_FORMAT_FLOAT_CONVERSION_H_
+#define ABSL_STRINGS_INTERNAL_STR_FORMAT_FLOAT_CONVERSION_H_
+
+#include "y_absl/strings/internal/str_format/extension.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace str_format_internal {
+
+bool ConvertFloatImpl(float v, const FormatConversionSpecImpl &conv,
+ FormatSinkImpl *sink);
+
+bool ConvertFloatImpl(double v, const FormatConversionSpecImpl &conv,
+ FormatSinkImpl *sink);
+
+bool ConvertFloatImpl(long double v, const FormatConversionSpecImpl &conv,
+ FormatSinkImpl *sink);
+
+} // namespace str_format_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_STRINGS_INTERNAL_STR_FORMAT_FLOAT_CONVERSION_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/output.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/output.cc
new file mode 100644
index 00000000000..ade3f67ef21
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/output.cc
@@ -0,0 +1,72 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "y_absl/strings/internal/str_format/output.h"
+
+#include <errno.h>
+#include <cstring>
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace str_format_internal {
+
+namespace {
+struct ClearErrnoGuard {
+ ClearErrnoGuard() : old_value(errno) { errno = 0; }
+ ~ClearErrnoGuard() {
+ if (!errno) errno = old_value;
+ }
+ int old_value;
+};
+} // namespace
+
+void BufferRawSink::Write(string_view v) {
+ size_t to_write = std::min(v.size(), size_);
+ std::memcpy(buffer_, v.data(), to_write);
+ buffer_ += to_write;
+ size_ -= to_write;
+ total_written_ += v.size();
+}
+
+void FILERawSink::Write(string_view v) {
+ while (!v.empty() && !error_) {
+ // Reset errno to zero in case the libc implementation doesn't set errno
+ // when a failure occurs.
+ ClearErrnoGuard guard;
+
+ if (size_t result = std::fwrite(v.data(), 1, v.size(), output_)) {
+ // Some progress was made.
+ count_ += result;
+ v.remove_prefix(result);
+ } else {
+ if (errno == EINTR) {
+ continue;
+ } else if (errno) {
+ error_ = errno;
+ } else if (std::ferror(output_)) {
+ // Non-POSIX compliant libc implementations may not set errno, so we
+ // have check the streams error indicator.
+ error_ = EBADF;
+ } else {
+ // We're likely on a non-POSIX system that encountered EINTR but had no
+ // way of reporting it.
+ continue;
+ }
+ }
+ }
+}
+
+} // namespace str_format_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/output.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/output.h
new file mode 100644
index 00000000000..8fc46fbafa8
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/output.h
@@ -0,0 +1,96 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Output extension hooks for the Format library.
+// `internal::InvokeFlush` calls the appropriate flush function for the
+// specified output argument.
+// `BufferRawSink` is a simple output sink for a char buffer. Used by SnprintF.
+// `FILERawSink` is a std::FILE* based sink. Used by PrintF and FprintF.
+
+#ifndef ABSL_STRINGS_INTERNAL_STR_FORMAT_OUTPUT_H_
+#define ABSL_STRINGS_INTERNAL_STR_FORMAT_OUTPUT_H_
+
+#include <cstdio>
+#include <ostream>
+#include <util/generic/string.h>
+
+#include "y_absl/base/port.h"
+#include "y_absl/strings/string_view.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace str_format_internal {
+
+// RawSink implementation that writes into a char* buffer.
+// It will not overflow the buffer, but will keep the total count of chars
+// that would have been written.
+class BufferRawSink {
+ public:
+ BufferRawSink(char* buffer, size_t size) : buffer_(buffer), size_(size) {}
+
+ size_t total_written() const { return total_written_; }
+ void Write(string_view v);
+
+ private:
+ char* buffer_;
+ size_t size_;
+ size_t total_written_ = 0;
+};
+
+// RawSink implementation that writes into a FILE*.
+// It keeps track of the total number of bytes written and any error encountered
+// during the writes.
+class FILERawSink {
+ public:
+ explicit FILERawSink(std::FILE* output) : output_(output) {}
+
+ void Write(string_view v);
+
+ size_t count() const { return count_; }
+ int error() const { return error_; }
+
+ private:
+ std::FILE* output_;
+ int error_ = 0;
+ size_t count_ = 0;
+};
+
+// Provide RawSink integration with common types from the STL.
+inline void AbslFormatFlush(TString* out, string_view s) {
+ out->append(s.data(), s.size());
+}
+inline void AbslFormatFlush(std::ostream* out, string_view s) {
+ out->write(s.data(), s.size());
+}
+
+inline void AbslFormatFlush(FILERawSink* sink, string_view v) {
+ sink->Write(v);
+}
+
+inline void AbslFormatFlush(BufferRawSink* sink, string_view v) {
+ sink->Write(v);
+}
+
+// This is a SFINAE to get a better compiler error message when the type
+// is not supported.
+template <typename T>
+auto InvokeFlush(T* out, string_view s) -> decltype(AbslFormatFlush(out, s)) {
+ AbslFormatFlush(out, s);
+}
+
+} // namespace str_format_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_STRINGS_INTERNAL_STR_FORMAT_OUTPUT_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/parser.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/parser.cc
new file mode 100644
index 00000000000..af07e32fe57
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/parser.cc
@@ -0,0 +1,339 @@
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "y_absl/strings/internal/str_format/parser.h"
+
+#include <assert.h>
+#include <string.h>
+#include <wchar.h>
+#include <cctype>
+#include <cstdint>
+
+#include <algorithm>
+#include <initializer_list>
+#include <limits>
+#include <ostream>
+#include <util/generic/string.h>
+#include <unordered_set>
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace str_format_internal {
+
+using CC = FormatConversionCharInternal;
+using LM = LengthMod;
+
+// Abbreviations to fit in the table below.
+constexpr auto f_sign = Flags::kSignCol;
+constexpr auto f_alt = Flags::kAlt;
+constexpr auto f_pos = Flags::kShowPos;
+constexpr auto f_left = Flags::kLeft;
+constexpr auto f_zero = Flags::kZero;
+
+ABSL_CONST_INIT const ConvTag kTags[256] = {
+ {}, {}, {}, {}, {}, {}, {}, {}, // 00-07
+ {}, {}, {}, {}, {}, {}, {}, {}, // 08-0f
+ {}, {}, {}, {}, {}, {}, {}, {}, // 10-17
+ {}, {}, {}, {}, {}, {}, {}, {}, // 18-1f
+ f_sign, {}, {}, f_alt, {}, {}, {}, {}, // !"#$%&'
+ {}, {}, {}, f_pos, {}, f_left, {}, {}, // ()*+,-./
+ f_zero, {}, {}, {}, {}, {}, {}, {}, // 01234567
+ {}, {}, {}, {}, {}, {}, {}, {}, // 89:;<=>?
+ {}, CC::A, {}, {}, {}, CC::E, CC::F, CC::G, // @ABCDEFG
+ {}, {}, {}, {}, LM::L, {}, {}, {}, // HIJKLMNO
+ {}, {}, {}, {}, {}, {}, {}, {}, // PQRSTUVW
+ CC::X, {}, {}, {}, {}, {}, {}, {}, // XYZ[\]^_
+ {}, CC::a, {}, CC::c, CC::d, CC::e, CC::f, CC::g, // `abcdefg
+ LM::h, CC::i, LM::j, {}, LM::l, {}, CC::n, CC::o, // hijklmno
+ CC::p, LM::q, {}, CC::s, LM::t, CC::u, {}, {}, // pqrstuvw
+ CC::x, {}, LM::z, {}, {}, {}, {}, {}, // xyz{|}!
+ {}, {}, {}, {}, {}, {}, {}, {}, // 80-87
+ {}, {}, {}, {}, {}, {}, {}, {}, // 88-8f
+ {}, {}, {}, {}, {}, {}, {}, {}, // 90-97
+ {}, {}, {}, {}, {}, {}, {}, {}, // 98-9f
+ {}, {}, {}, {}, {}, {}, {}, {}, // a0-a7
+ {}, {}, {}, {}, {}, {}, {}, {}, // a8-af
+ {}, {}, {}, {}, {}, {}, {}, {}, // b0-b7
+ {}, {}, {}, {}, {}, {}, {}, {}, // b8-bf
+ {}, {}, {}, {}, {}, {}, {}, {}, // c0-c7
+ {}, {}, {}, {}, {}, {}, {}, {}, // c8-cf
+ {}, {}, {}, {}, {}, {}, {}, {}, // d0-d7
+ {}, {}, {}, {}, {}, {}, {}, {}, // d8-df
+ {}, {}, {}, {}, {}, {}, {}, {}, // e0-e7
+ {}, {}, {}, {}, {}, {}, {}, {}, // e8-ef
+ {}, {}, {}, {}, {}, {}, {}, {}, // f0-f7
+ {}, {}, {}, {}, {}, {}, {}, {}, // f8-ff
+};
+
+namespace {
+
+bool CheckFastPathSetting(const UnboundConversion& conv) {
+ bool width_precision_needed =
+ conv.width.value() >= 0 || conv.precision.value() >= 0;
+ if (width_precision_needed && conv.flags == Flags::kBasic) {
+ fprintf(stderr,
+ "basic=%d left=%d show_pos=%d sign_col=%d alt=%d zero=%d "
+ "width=%d precision=%d\n",
+ conv.flags == Flags::kBasic ? 1 : 0,
+ FlagsContains(conv.flags, Flags::kLeft) ? 1 : 0,
+ FlagsContains(conv.flags, Flags::kShowPos) ? 1 : 0,
+ FlagsContains(conv.flags, Flags::kSignCol) ? 1 : 0,
+ FlagsContains(conv.flags, Flags::kAlt) ? 1 : 0,
+ FlagsContains(conv.flags, Flags::kZero) ? 1 : 0, conv.width.value(),
+ conv.precision.value());
+ return false;
+ }
+ return true;
+}
+
+template <bool is_positional>
+const char *ConsumeConversion(const char *pos, const char *const end,
+ UnboundConversion *conv, int *next_arg) {
+ const char* const original_pos = pos;
+ char c;
+ // Read the next char into `c` and update `pos`. Returns false if there are
+ // no more chars to read.
+#define ABSL_FORMAT_PARSER_INTERNAL_GET_CHAR() \
+ do { \
+ if (ABSL_PREDICT_FALSE(pos == end)) return nullptr; \
+ c = *pos++; \
+ } while (0)
+
+ const auto parse_digits = [&] {
+ int digits = c - '0';
+ // We do not want to overflow `digits` so we consume at most digits10
+ // digits. If there are more digits the parsing will fail later on when the
+ // digit doesn't match the expected characters.
+ int num_digits = std::numeric_limits<int>::digits10;
+ for (;;) {
+ if (ABSL_PREDICT_FALSE(pos == end)) break;
+ c = *pos++;
+ if (!std::isdigit(c)) break;
+ --num_digits;
+ if (ABSL_PREDICT_FALSE(!num_digits)) break;
+ digits = 10 * digits + c - '0';
+ }
+ return digits;
+ };
+
+ if (is_positional) {
+ ABSL_FORMAT_PARSER_INTERNAL_GET_CHAR();
+ if (ABSL_PREDICT_FALSE(c < '1' || c > '9')) return nullptr;
+ conv->arg_position = parse_digits();
+ assert(conv->arg_position > 0);
+ if (ABSL_PREDICT_FALSE(c != '$')) return nullptr;
+ }
+
+ ABSL_FORMAT_PARSER_INTERNAL_GET_CHAR();
+
+ // We should start with the basic flag on.
+ assert(conv->flags == Flags::kBasic);
+
+ // Any non alpha character makes this conversion not basic.
+ // This includes flags (-+ #0), width (1-9, *) or precision (.).
+ // All conversion characters and length modifiers are alpha characters.
+ if (c < 'A') {
+ while (c <= '0') {
+ auto tag = GetTagForChar(c);
+ if (tag.is_flags()) {
+ conv->flags = conv->flags | tag.as_flags();
+ ABSL_FORMAT_PARSER_INTERNAL_GET_CHAR();
+ } else {
+ break;
+ }
+ }
+
+ if (c <= '9') {
+ if (c >= '0') {
+ int maybe_width = parse_digits();
+ if (!is_positional && c == '$') {
+ if (ABSL_PREDICT_FALSE(*next_arg != 0)) return nullptr;
+ // Positional conversion.
+ *next_arg = -1;
+ return ConsumeConversion<true>(original_pos, end, conv, next_arg);
+ }
+ conv->flags = conv->flags | Flags::kNonBasic;
+ conv->width.set_value(maybe_width);
+ } else if (c == '*') {
+ conv->flags = conv->flags | Flags::kNonBasic;
+ ABSL_FORMAT_PARSER_INTERNAL_GET_CHAR();
+ if (is_positional) {
+ if (ABSL_PREDICT_FALSE(c < '1' || c > '9')) return nullptr;
+ conv->width.set_from_arg(parse_digits());
+ if (ABSL_PREDICT_FALSE(c != '$')) return nullptr;
+ ABSL_FORMAT_PARSER_INTERNAL_GET_CHAR();
+ } else {
+ conv->width.set_from_arg(++*next_arg);
+ }
+ }
+ }
+
+ if (c == '.') {
+ conv->flags = conv->flags | Flags::kNonBasic;
+ ABSL_FORMAT_PARSER_INTERNAL_GET_CHAR();
+ if (std::isdigit(c)) {
+ conv->precision.set_value(parse_digits());
+ } else if (c == '*') {
+ ABSL_FORMAT_PARSER_INTERNAL_GET_CHAR();
+ if (is_positional) {
+ if (ABSL_PREDICT_FALSE(c < '1' || c > '9')) return nullptr;
+ conv->precision.set_from_arg(parse_digits());
+ if (c != '$') return nullptr;
+ ABSL_FORMAT_PARSER_INTERNAL_GET_CHAR();
+ } else {
+ conv->precision.set_from_arg(++*next_arg);
+ }
+ } else {
+ conv->precision.set_value(0);
+ }
+ }
+ }
+
+ auto tag = GetTagForChar(c);
+
+ if (ABSL_PREDICT_FALSE(!tag.is_conv())) {
+ if (ABSL_PREDICT_FALSE(!tag.is_length())) return nullptr;
+
+ // It is a length modifier.
+ using str_format_internal::LengthMod;
+ LengthMod length_mod = tag.as_length();
+ ABSL_FORMAT_PARSER_INTERNAL_GET_CHAR();
+ if (c == 'h' && length_mod == LengthMod::h) {
+ conv->length_mod = LengthMod::hh;
+ ABSL_FORMAT_PARSER_INTERNAL_GET_CHAR();
+ } else if (c == 'l' && length_mod == LengthMod::l) {
+ conv->length_mod = LengthMod::ll;
+ ABSL_FORMAT_PARSER_INTERNAL_GET_CHAR();
+ } else {
+ conv->length_mod = length_mod;
+ }
+ tag = GetTagForChar(c);
+ if (ABSL_PREDICT_FALSE(!tag.is_conv())) return nullptr;
+ }
+
+ assert(CheckFastPathSetting(*conv));
+ (void)(&CheckFastPathSetting);
+
+ conv->conv = tag.as_conv();
+ if (!is_positional) conv->arg_position = ++*next_arg;
+ return pos;
+}
+
+} // namespace
+
+TString LengthModToString(LengthMod v) {
+ switch (v) {
+ case LengthMod::h:
+ return "h";
+ case LengthMod::hh:
+ return "hh";
+ case LengthMod::l:
+ return "l";
+ case LengthMod::ll:
+ return "ll";
+ case LengthMod::L:
+ return "L";
+ case LengthMod::j:
+ return "j";
+ case LengthMod::z:
+ return "z";
+ case LengthMod::t:
+ return "t";
+ case LengthMod::q:
+ return "q";
+ case LengthMod::none:
+ return "";
+ }
+ return "";
+}
+
+const char *ConsumeUnboundConversion(const char *p, const char *end,
+ UnboundConversion *conv, int *next_arg) {
+ if (*next_arg < 0) return ConsumeConversion<true>(p, end, conv, next_arg);
+ return ConsumeConversion<false>(p, end, conv, next_arg);
+}
+
+struct ParsedFormatBase::ParsedFormatConsumer {
+ explicit ParsedFormatConsumer(ParsedFormatBase *parsedformat)
+ : parsed(parsedformat), data_pos(parsedformat->data_.get()) {}
+
+ bool Append(string_view s) {
+ if (s.empty()) return true;
+
+ size_t text_end = AppendText(s);
+
+ if (!parsed->items_.empty() && !parsed->items_.back().is_conversion) {
+ // Let's extend the existing text run.
+ parsed->items_.back().text_end = text_end;
+ } else {
+ // Let's make a new text run.
+ parsed->items_.push_back({false, text_end, {}});
+ }
+ return true;
+ }
+
+ bool ConvertOne(const UnboundConversion &conv, string_view s) {
+ size_t text_end = AppendText(s);
+ parsed->items_.push_back({true, text_end, conv});
+ return true;
+ }
+
+ size_t AppendText(string_view s) {
+ memcpy(data_pos, s.data(), s.size());
+ data_pos += s.size();
+ return static_cast<size_t>(data_pos - parsed->data_.get());
+ }
+
+ ParsedFormatBase *parsed;
+ char* data_pos;
+};
+
+ParsedFormatBase::ParsedFormatBase(
+ string_view format, bool allow_ignored,
+ std::initializer_list<FormatConversionCharSet> convs)
+ : data_(format.empty() ? nullptr : new char[format.size()]) {
+ has_error_ = !ParseFormatString(format, ParsedFormatConsumer(this)) ||
+ !MatchesConversions(allow_ignored, convs);
+}
+
+bool ParsedFormatBase::MatchesConversions(
+ bool allow_ignored,
+ std::initializer_list<FormatConversionCharSet> convs) const {
+ std::unordered_set<int> used;
+ auto add_if_valid_conv = [&](int pos, char c) {
+ if (static_cast<size_t>(pos) > convs.size() ||
+ !Contains(convs.begin()[pos - 1], c))
+ return false;
+ used.insert(pos);
+ return true;
+ };
+ for (const ConversionItem &item : items_) {
+ if (!item.is_conversion) continue;
+ auto &conv = item.conv;
+ if (conv.precision.is_from_arg() &&
+ !add_if_valid_conv(conv.precision.get_from_arg(), '*'))
+ return false;
+ if (conv.width.is_from_arg() &&
+ !add_if_valid_conv(conv.width.get_from_arg(), '*'))
+ return false;
+ if (!add_if_valid_conv(conv.arg_position,
+ FormatConversionCharToChar(conv.conv)))
+ return false;
+ }
+ return used.size() == convs.size() || allow_ignored;
+}
+
+} // namespace str_format_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/parser.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/parser.h
new file mode 100644
index 00000000000..ba614bb8b4a
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/parser.h
@@ -0,0 +1,357 @@
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_STRINGS_INTERNAL_STR_FORMAT_PARSER_H_
+#define ABSL_STRINGS_INTERNAL_STR_FORMAT_PARSER_H_
+
+#include <limits.h>
+#include <stddef.h>
+#include <stdlib.h>
+
+#include <cassert>
+#include <cstdint>
+#include <initializer_list>
+#include <iosfwd>
+#include <iterator>
+#include <memory>
+#include <util/generic/string.h>
+#include <vector>
+
+#include "y_absl/strings/internal/str_format/checker.h"
+#include "y_absl/strings/internal/str_format/extension.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace str_format_internal {
+
+enum class LengthMod : std::uint8_t { h, hh, l, ll, L, j, z, t, q, none };
+
+TString LengthModToString(LengthMod v);
+
+// The analyzed properties of a single specified conversion.
+struct UnboundConversion {
+ UnboundConversion() {}
+
+ class InputValue {
+ public:
+ void set_value(int value) {
+ assert(value >= 0);
+ value_ = value;
+ }
+ int value() const { return value_; }
+
+ // Marks the value as "from arg". aka the '*' format.
+ // Requires `value >= 1`.
+ // When set, is_from_arg() return true and get_from_arg() returns the
+ // original value.
+ // `value()`'s return value is unspecfied in this state.
+ void set_from_arg(int value) {
+ assert(value > 0);
+ value_ = -value - 1;
+ }
+ bool is_from_arg() const { return value_ < -1; }
+ int get_from_arg() const {
+ assert(is_from_arg());
+ return -value_ - 1;
+ }
+
+ private:
+ int value_ = -1;
+ };
+
+ // No need to initialize. It will always be set in the parser.
+ int arg_position;
+
+ InputValue width;
+ InputValue precision;
+
+ Flags flags = Flags::kBasic;
+ LengthMod length_mod = LengthMod::none;
+ FormatConversionChar conv = FormatConversionCharInternal::kNone;
+};
+
+// Consume conversion spec prefix (not including '%') of [p, end) if valid.
+// Examples of valid specs would be e.g.: "s", "d", "-12.6f".
+// If valid, it returns the first character following the conversion spec,
+// and the spec part is broken down and returned in 'conv'.
+// If invalid, returns nullptr.
+const char* ConsumeUnboundConversion(const char* p, const char* end,
+ UnboundConversion* conv, int* next_arg);
+
+// Helper tag class for the table below.
+// It allows fast `char -> ConversionChar/LengthMod/Flags` checking and
+// conversions.
+class ConvTag {
+ public:
+ constexpr ConvTag(FormatConversionChar conversion_char) // NOLINT
+ : tag_(static_cast<uint8_t>(conversion_char)) {}
+ constexpr ConvTag(LengthMod length_mod) // NOLINT
+ : tag_(0x80 | static_cast<uint8_t>(length_mod)) {}
+ constexpr ConvTag(Flags flags) // NOLINT
+ : tag_(0xc0 | static_cast<uint8_t>(flags)) {}
+ constexpr ConvTag() : tag_(0xFF) {}
+
+ bool is_conv() const { return (tag_ & 0x80) == 0; }
+ bool is_length() const { return (tag_ & 0xC0) == 0x80; }
+ bool is_flags() const { return (tag_ & 0xE0) == 0xC0; }
+
+ FormatConversionChar as_conv() const {
+ assert(is_conv());
+ assert(!is_length());
+ assert(!is_flags());
+ return static_cast<FormatConversionChar>(tag_);
+ }
+ LengthMod as_length() const {
+ assert(!is_conv());
+ assert(is_length());
+ assert(!is_flags());
+ return static_cast<LengthMod>(tag_ & 0x3F);
+ }
+ Flags as_flags() const {
+ assert(!is_conv());
+ assert(!is_length());
+ assert(is_flags());
+ return static_cast<Flags>(tag_ & 0x1F);
+ }
+
+ private:
+ uint8_t tag_;
+};
+
+extern const ConvTag kTags[256];
+// Keep a single table for all the conversion chars and length modifiers.
+inline ConvTag GetTagForChar(char c) {
+ return kTags[static_cast<unsigned char>(c)];
+}
+
+// Parse the format string provided in 'src' and pass the identified items into
+// 'consumer'.
+// Text runs will be passed by calling
+// Consumer::Append(string_view);
+// ConversionItems will be passed by calling
+// Consumer::ConvertOne(UnboundConversion, string_view);
+// In the case of ConvertOne, the string_view that is passed is the
+// portion of the format string corresponding to the conversion, not including
+// the leading %. On success, it returns true. On failure, it stops and returns
+// false.
+template <typename Consumer>
+bool ParseFormatString(string_view src, Consumer consumer) {
+ int next_arg = 0;
+ const char* p = src.data();
+ const char* const end = p + src.size();
+ while (p != end) {
+ const char* percent = static_cast<const char*>(memchr(p, '%', end - p));
+ if (!percent) {
+ // We found the last substring.
+ return consumer.Append(string_view(p, end - p));
+ }
+ // We found a percent, so push the text run then process the percent.
+ if (ABSL_PREDICT_FALSE(!consumer.Append(string_view(p, percent - p)))) {
+ return false;
+ }
+ if (ABSL_PREDICT_FALSE(percent + 1 >= end)) return false;
+
+ auto tag = GetTagForChar(percent[1]);
+ if (tag.is_conv()) {
+ if (ABSL_PREDICT_FALSE(next_arg < 0)) {
+ // This indicates an error in the format string.
+ // The only way to get `next_arg < 0` here is to have a positional
+ // argument first which sets next_arg to -1 and then a non-positional
+ // argument.
+ return false;
+ }
+ p = percent + 2;
+
+ // Keep this case separate from the one below.
+ // ConvertOne is more efficient when the compiler can see that the `basic`
+ // flag is set.
+ UnboundConversion conv;
+ conv.conv = tag.as_conv();
+ conv.arg_position = ++next_arg;
+ if (ABSL_PREDICT_FALSE(
+ !consumer.ConvertOne(conv, string_view(percent + 1, 1)))) {
+ return false;
+ }
+ } else if (percent[1] != '%') {
+ UnboundConversion conv;
+ p = ConsumeUnboundConversion(percent + 1, end, &conv, &next_arg);
+ if (ABSL_PREDICT_FALSE(p == nullptr)) return false;
+ if (ABSL_PREDICT_FALSE(!consumer.ConvertOne(
+ conv, string_view(percent + 1, p - (percent + 1))))) {
+ return false;
+ }
+ } else {
+ if (ABSL_PREDICT_FALSE(!consumer.Append("%"))) return false;
+ p = percent + 2;
+ continue;
+ }
+ }
+ return true;
+}
+
+// Always returns true, or fails to compile in a constexpr context if s does not
+// point to a constexpr char array.
+constexpr bool EnsureConstexpr(string_view s) {
+ return s.empty() || s[0] == s[0];
+}
+
+class ParsedFormatBase {
+ public:
+ explicit ParsedFormatBase(
+ string_view format, bool allow_ignored,
+ std::initializer_list<FormatConversionCharSet> convs);
+
+ ParsedFormatBase(const ParsedFormatBase& other) { *this = other; }
+
+ ParsedFormatBase(ParsedFormatBase&& other) { *this = std::move(other); }
+
+ ParsedFormatBase& operator=(const ParsedFormatBase& other) {
+ if (this == &other) return *this;
+ has_error_ = other.has_error_;
+ items_ = other.items_;
+ size_t text_size = items_.empty() ? 0 : items_.back().text_end;
+ data_.reset(new char[text_size]);
+ memcpy(data_.get(), other.data_.get(), text_size);
+ return *this;
+ }
+
+ ParsedFormatBase& operator=(ParsedFormatBase&& other) {
+ if (this == &other) return *this;
+ has_error_ = other.has_error_;
+ data_ = std::move(other.data_);
+ items_ = std::move(other.items_);
+ // Reset the vector to make sure the invariants hold.
+ other.items_.clear();
+ return *this;
+ }
+
+ template <typename Consumer>
+ bool ProcessFormat(Consumer consumer) const {
+ const char* const base = data_.get();
+ string_view text(base, 0);
+ for (const auto& item : items_) {
+ const char* const end = text.data() + text.size();
+ text = string_view(end, (base + item.text_end) - end);
+ if (item.is_conversion) {
+ if (!consumer.ConvertOne(item.conv, text)) return false;
+ } else {
+ if (!consumer.Append(text)) return false;
+ }
+ }
+ return !has_error_;
+ }
+
+ bool has_error() const { return has_error_; }
+
+ private:
+ // Returns whether the conversions match and if !allow_ignored it verifies
+ // that all conversions are used by the format.
+ bool MatchesConversions(
+ bool allow_ignored,
+ std::initializer_list<FormatConversionCharSet> convs) const;
+
+ struct ParsedFormatConsumer;
+
+ struct ConversionItem {
+ bool is_conversion;
+ // Points to the past-the-end location of this element in the data_ array.
+ size_t text_end;
+ UnboundConversion conv;
+ };
+
+ bool has_error_;
+ std::unique_ptr<char[]> data_;
+ std::vector<ConversionItem> items_;
+};
+
+
+// A value type representing a preparsed format. These can be created, copied
+// around, and reused to speed up formatting loops.
+// The user must specify through the template arguments the conversion
+// characters used in the format. This will be checked at compile time.
+//
+// This class uses Conv enum values to specify each argument.
+// This allows for more flexibility as you can specify multiple possible
+// conversion characters for each argument.
+// ParsedFormat<char...> is a simplified alias for when the user only
+// needs to specify a single conversion character for each argument.
+//
+// Example:
+// // Extended format supports multiple characters per argument:
+// using MyFormat = ExtendedParsedFormat<Conv::d | Conv::x>;
+// MyFormat GetFormat(bool use_hex) {
+// if (use_hex) return MyFormat("foo %x bar");
+// return MyFormat("foo %d bar");
+// }
+// // 'format' can be used with any value that supports 'd' and 'x',
+// // like `int`.
+// auto format = GetFormat(use_hex);
+// value = StringF(format, i);
+//
+// This class also supports runtime format checking with the ::New() and
+// ::NewAllowIgnored() factory functions.
+// This is the only API that allows the user to pass a runtime specified format
+// string. These factory functions will return NULL if the format does not match
+// the conversions requested by the user.
+template <FormatConversionCharSet... C>
+class ExtendedParsedFormat : public str_format_internal::ParsedFormatBase {
+ public:
+ explicit ExtendedParsedFormat(string_view format)
+#ifdef ABSL_INTERNAL_ENABLE_FORMAT_CHECKER
+ __attribute__((
+ enable_if(str_format_internal::EnsureConstexpr(format),
+ "Format string is not constexpr."),
+ enable_if(str_format_internal::ValidFormatImpl<C...>(format),
+ "Format specified does not match the template arguments.")))
+#endif // ABSL_INTERNAL_ENABLE_FORMAT_CHECKER
+ : ExtendedParsedFormat(format, false) {
+ }
+
+ // ExtendedParsedFormat factory function.
+ // The user still has to specify the conversion characters, but they will not
+ // be checked at compile time. Instead, it will be checked at runtime.
+ // This delays the checking to runtime, but allows the user to pass
+ // dynamically sourced formats.
+ // It returns NULL if the format does not match the conversion characters.
+ // The user is responsible for checking the return value before using it.
+ //
+ // The 'New' variant will check that all the specified arguments are being
+ // consumed by the format and return NULL if any argument is being ignored.
+ // The 'NewAllowIgnored' variant will not verify this and will allow formats
+ // that ignore arguments.
+ static std::unique_ptr<ExtendedParsedFormat> New(string_view format) {
+ return New(format, false);
+ }
+ static std::unique_ptr<ExtendedParsedFormat> NewAllowIgnored(
+ string_view format) {
+ return New(format, true);
+ }
+
+ private:
+ static std::unique_ptr<ExtendedParsedFormat> New(string_view format,
+ bool allow_ignored) {
+ std::unique_ptr<ExtendedParsedFormat> conv(
+ new ExtendedParsedFormat(format, allow_ignored));
+ if (conv->has_error()) return nullptr;
+ return conv;
+ }
+
+ ExtendedParsedFormat(string_view s, bool allow_ignored)
+ : ParsedFormatBase(s, allow_ignored, {C...}) {}
+};
+} // namespace str_format_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_STRINGS_INTERNAL_STR_FORMAT_PARSER_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/ya.make b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/ya.make
new file mode 100644
index 00000000000..ff8069cd0f0
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/ya.make
@@ -0,0 +1,40 @@
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
+OWNER(
+ somov
+ g:cpp-contrib
+)
+
+LICENSE(Apache-2.0)
+
+LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
+
+PEERDIR(
+ contrib/restricted/abseil-cpp-tstring/y_absl/base
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_wait
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/throw_delegate
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/log_severity
+ contrib/restricted/abseil-cpp-tstring/y_absl/numeric
+ contrib/restricted/abseil-cpp-tstring/y_absl/strings
+ contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/absl_strings_internal
+)
+
+ADDINCL(
+ GLOBAL contrib/restricted/abseil-cpp-tstring
+)
+
+NO_COMPILER_WARNINGS()
+
+SRCS(
+ arg.cc
+ bind.cc
+ extension.cc
+ float_conversion.cc
+ output.cc
+ parser.cc
+)
+
+END()
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_join_internal.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_join_internal.h
new file mode 100644
index 00000000000..0a220fa33da
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_join_internal.h
@@ -0,0 +1,314 @@
+//
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+// This file declares INTERNAL parts of the Join API that are inlined/templated
+// or otherwise need to be available at compile time. The main abstractions
+// defined in this file are:
+//
+// - A handful of default Formatters
+// - JoinAlgorithm() overloads
+// - JoinRange() overloads
+// - JoinTuple()
+//
+// DO NOT INCLUDE THIS FILE DIRECTLY. Use this file by including
+// y_absl/strings/str_join.h
+//
+// IWYU pragma: private, include "y_absl/strings/str_join.h"
+
+#ifndef ABSL_STRINGS_INTERNAL_STR_JOIN_INTERNAL_H_
+#define ABSL_STRINGS_INTERNAL_STR_JOIN_INTERNAL_H_
+
+#include <cstring>
+#include <iterator>
+#include <memory>
+#include <util/generic/string.h>
+#include <type_traits>
+#include <utility>
+
+#include "y_absl/strings/internal/ostringstream.h"
+#include "y_absl/strings/internal/resize_uninitialized.h"
+#include "y_absl/strings/str_cat.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace strings_internal {
+
+//
+// Formatter objects
+//
+// The following are implementation classes for standard Formatter objects. The
+// factory functions that users will call to create and use these formatters are
+// defined and documented in strings/join.h.
+//
+
+// The default formatter. Converts alpha-numeric types to strings.
+struct AlphaNumFormatterImpl {
+ // This template is needed in order to support passing in a dereferenced
+ // vector<bool>::iterator
+ template <typename T>
+ void operator()(TString* out, const T& t) const {
+ StrAppend(out, AlphaNum(t));
+ }
+
+ void operator()(TString* out, const AlphaNum& t) const {
+ StrAppend(out, t);
+ }
+};
+
+// A type that's used to overload the JoinAlgorithm() function (defined below)
+// for ranges that do not require additional formatting (e.g., a range of
+// strings).
+
+struct NoFormatter : public AlphaNumFormatterImpl {};
+
+// Formats types to strings using the << operator.
+class StreamFormatterImpl {
+ public:
+ // The method isn't const because it mutates state. Making it const will
+ // render StreamFormatterImpl thread-hostile.
+ template <typename T>
+ void operator()(TString* out, const T& t) {
+ // The stream is created lazily to avoid paying the relatively high cost
+ // of its construction when joining an empty range.
+ if (strm_) {
+ strm_->clear(); // clear the bad, fail and eof bits in case they were set
+ strm_->str(out);
+ } else {
+ strm_.reset(new strings_internal::OStringStream(out));
+ }
+ *strm_ << t;
+ }
+
+ private:
+ std::unique_ptr<strings_internal::OStringStream> strm_;
+};
+
+// Formats a std::pair<>. The 'first' member is formatted using f1_ and the
+// 'second' member is formatted using f2_. sep_ is the separator.
+template <typename F1, typename F2>
+class PairFormatterImpl {
+ public:
+ PairFormatterImpl(F1 f1, y_absl::string_view sep, F2 f2)
+ : f1_(std::move(f1)), sep_(sep), f2_(std::move(f2)) {}
+
+ template <typename T>
+ void operator()(TString* out, const T& p) {
+ f1_(out, p.first);
+ out->append(sep_);
+ f2_(out, p.second);
+ }
+
+ template <typename T>
+ void operator()(TString* out, const T& p) const {
+ f1_(out, p.first);
+ out->append(sep_);
+ f2_(out, p.second);
+ }
+
+ private:
+ F1 f1_;
+ TString sep_;
+ F2 f2_;
+};
+
+// Wraps another formatter and dereferences the argument to operator() then
+// passes the dereferenced argument to the wrapped formatter. This can be
+// useful, for example, to join a std::vector<int*>.
+template <typename Formatter>
+class DereferenceFormatterImpl {
+ public:
+ DereferenceFormatterImpl() : f_() {}
+ explicit DereferenceFormatterImpl(Formatter&& f)
+ : f_(std::forward<Formatter>(f)) {}
+
+ template <typename T>
+ void operator()(TString* out, const T& t) {
+ f_(out, *t);
+ }
+
+ template <typename T>
+ void operator()(TString* out, const T& t) const {
+ f_(out, *t);
+ }
+
+ private:
+ Formatter f_;
+};
+
+// DefaultFormatter<T> is a traits class that selects a default Formatter to use
+// for the given type T. The ::Type member names the Formatter to use. This is
+// used by the strings::Join() functions that do NOT take a Formatter argument,
+// in which case a default Formatter must be chosen.
+//
+// AlphaNumFormatterImpl is the default in the base template, followed by
+// specializations for other types.
+template <typename ValueType>
+struct DefaultFormatter {
+ typedef AlphaNumFormatterImpl Type;
+};
+template <>
+struct DefaultFormatter<const char*> {
+ typedef AlphaNumFormatterImpl Type;
+};
+template <>
+struct DefaultFormatter<char*> {
+ typedef AlphaNumFormatterImpl Type;
+};
+template <>
+struct DefaultFormatter<TString> {
+ typedef NoFormatter Type;
+};
+template <>
+struct DefaultFormatter<y_absl::string_view> {
+ typedef NoFormatter Type;
+};
+template <typename ValueType>
+struct DefaultFormatter<ValueType*> {
+ typedef DereferenceFormatterImpl<typename DefaultFormatter<ValueType>::Type>
+ Type;
+};
+
+template <typename ValueType>
+struct DefaultFormatter<std::unique_ptr<ValueType>>
+ : public DefaultFormatter<ValueType*> {};
+
+//
+// JoinAlgorithm() functions
+//
+
+// The main joining algorithm. This simply joins the elements in the given
+// iterator range, each separated by the given separator, into an output string,
+// and formats each element using the provided Formatter object.
+template <typename Iterator, typename Formatter>
+TString JoinAlgorithm(Iterator start, Iterator end, y_absl::string_view s,
+ Formatter&& f) {
+ TString result;
+ y_absl::string_view sep("");
+ for (Iterator it = start; it != end; ++it) {
+ result.append(sep.data(), sep.size());
+ f(&result, *it);
+ sep = s;
+ }
+ return result;
+}
+
+// A joining algorithm that's optimized for a forward iterator range of
+// string-like objects that do not need any additional formatting. This is to
+// optimize the common case of joining, say, a std::vector<string> or a
+// std::vector<y_absl::string_view>.
+//
+// This is an overload of the previous JoinAlgorithm() function. Here the
+// Formatter argument is of type NoFormatter. Since NoFormatter is an internal
+// type, this overload is only invoked when strings::Join() is called with a
+// range of string-like objects (e.g., TString, y_absl::string_view), and an
+// explicit Formatter argument was NOT specified.
+//
+// The optimization is that the needed space will be reserved in the output
+// string to avoid the need to resize while appending. To do this, the iterator
+// range will be traversed twice: once to calculate the total needed size, and
+// then again to copy the elements and delimiters to the output string.
+template <typename Iterator,
+ typename = typename std::enable_if<std::is_convertible<
+ typename std::iterator_traits<Iterator>::iterator_category,
+ std::forward_iterator_tag>::value>::type>
+TString JoinAlgorithm(Iterator start, Iterator end, y_absl::string_view s,
+ NoFormatter) {
+ TString result;
+ if (start != end) {
+ // Sums size
+ size_t result_size = start->size();
+ for (Iterator it = start; ++it != end;) {
+ result_size += s.size();
+ result_size += it->size();
+ }
+
+ if (result_size > 0) {
+ STLStringResizeUninitialized(&result, result_size);
+
+ // Joins strings
+ char* result_buf = &*result.begin();
+ memcpy(result_buf, start->data(), start->size());
+ result_buf += start->size();
+ for (Iterator it = start; ++it != end;) {
+ memcpy(result_buf, s.data(), s.size());
+ result_buf += s.size();
+ memcpy(result_buf, it->data(), it->size());
+ result_buf += it->size();
+ }
+ }
+ }
+
+ return result;
+}
+
+// JoinTupleLoop implements a loop over the elements of a std::tuple, which
+// are heterogeneous. The primary template matches the tuple interior case. It
+// continues the iteration after appending a separator (for nonzero indices)
+// and formatting an element of the tuple. The specialization for the I=N case
+// matches the end-of-tuple, and terminates the iteration.
+template <size_t I, size_t N>
+struct JoinTupleLoop {
+ template <typename Tup, typename Formatter>
+ void operator()(TString* out, const Tup& tup, y_absl::string_view sep,
+ Formatter&& fmt) {
+ if (I > 0) out->append(sep.data(), sep.size());
+ fmt(out, std::get<I>(tup));
+ JoinTupleLoop<I + 1, N>()(out, tup, sep, fmt);
+ }
+};
+template <size_t N>
+struct JoinTupleLoop<N, N> {
+ template <typename Tup, typename Formatter>
+ void operator()(TString*, const Tup&, y_absl::string_view, Formatter&&) {}
+};
+
+template <typename... T, typename Formatter>
+TString JoinAlgorithm(const std::tuple<T...>& tup, y_absl::string_view sep,
+ Formatter&& fmt) {
+ TString result;
+ JoinTupleLoop<0, sizeof...(T)>()(&result, tup, sep, fmt);
+ return result;
+}
+
+template <typename Iterator>
+TString JoinRange(Iterator first, Iterator last,
+ y_absl::string_view separator) {
+ // No formatter was explicitly given, so a default must be chosen.
+ typedef typename std::iterator_traits<Iterator>::value_type ValueType;
+ typedef typename DefaultFormatter<ValueType>::Type Formatter;
+ return JoinAlgorithm(first, last, separator, Formatter());
+}
+
+template <typename Range, typename Formatter>
+TString JoinRange(const Range& range, y_absl::string_view separator,
+ Formatter&& fmt) {
+ using std::begin;
+ using std::end;
+ return JoinAlgorithm(begin(range), end(range), separator, fmt);
+}
+
+template <typename Range>
+TString JoinRange(const Range& range, y_absl::string_view separator) {
+ using std::begin;
+ using std::end;
+ return JoinRange(begin(range), end(range), separator);
+}
+
+} // namespace strings_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_STRINGS_INTERNAL_STR_JOIN_INTERNAL_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_split_internal.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_split_internal.h
new file mode 100644
index 00000000000..237864c0ede
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_split_internal.h
@@ -0,0 +1,430 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+// This file declares INTERNAL parts of the Split API that are inline/templated
+// or otherwise need to be available at compile time. The main abstractions
+// defined in here are
+//
+// - ConvertibleToStringView
+// - SplitIterator<>
+// - Splitter<>
+//
+// DO NOT INCLUDE THIS FILE DIRECTLY. Use this file by including
+// y_absl/strings/str_split.h.
+//
+// IWYU pragma: private, include "y_absl/strings/str_split.h"
+
+#ifndef ABSL_STRINGS_INTERNAL_STR_SPLIT_INTERNAL_H_
+#define ABSL_STRINGS_INTERNAL_STR_SPLIT_INTERNAL_H_
+
+#include <array>
+#include <initializer_list>
+#include <iterator>
+#include <tuple>
+#include <type_traits>
+#include <utility>
+#include <vector>
+
+#include "y_absl/base/macros.h"
+#include "y_absl/base/port.h"
+#include "y_absl/meta/type_traits.h"
+#include "y_absl/strings/string_view.h"
+
+#ifdef _GLIBCXX_DEBUG
+#include "y_absl/strings/internal/stl_type_traits.h"
+#endif // _GLIBCXX_DEBUG
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace strings_internal {
+
+// This class is implicitly constructible from everything that y_absl::string_view
+// is implicitly constructible from, except for rvalue strings. This means it
+// can be used as a function parameter in places where passing a temporary
+// string might cause memory lifetime issues.
+class ConvertibleToStringView {
+ public:
+ ConvertibleToStringView(const char* s) // NOLINT(runtime/explicit)
+ : value_(s) {}
+ ConvertibleToStringView(char* s) : value_(s) {} // NOLINT(runtime/explicit)
+ ConvertibleToStringView(y_absl::string_view s) // NOLINT(runtime/explicit)
+ : value_(s) {}
+ ConvertibleToStringView(const TString& s) // NOLINT(runtime/explicit)
+ : value_(s) {}
+
+ // Disable conversion from rvalue strings.
+ ConvertibleToStringView(TString&& s) = delete;
+ ConvertibleToStringView(const TString&& s) = delete;
+
+ y_absl::string_view value() const { return value_; }
+
+ private:
+ y_absl::string_view value_;
+};
+
+// An iterator that enumerates the parts of a string from a Splitter. The text
+// to be split, the Delimiter, and the Predicate are all taken from the given
+// Splitter object. Iterators may only be compared if they refer to the same
+// Splitter instance.
+//
+// This class is NOT part of the public splitting API.
+template <typename Splitter>
+class SplitIterator {
+ public:
+ using iterator_category = std::input_iterator_tag;
+ using value_type = y_absl::string_view;
+ using difference_type = ptrdiff_t;
+ using pointer = const value_type*;
+ using reference = const value_type&;
+
+ enum State { kInitState, kLastState, kEndState };
+ SplitIterator(State state, const Splitter* splitter)
+ : pos_(0),
+ state_(state),
+ splitter_(splitter),
+ delimiter_(splitter->delimiter()),
+ predicate_(splitter->predicate()) {
+ // Hack to maintain backward compatibility. This one block makes it so an
+ // empty y_absl::string_view whose .data() happens to be nullptr behaves
+ // *differently* from an otherwise empty y_absl::string_view whose .data() is
+ // not nullptr. This is an undesirable difference in general, but this
+ // behavior is maintained to avoid breaking existing code that happens to
+ // depend on this old behavior/bug. Perhaps it will be fixed one day. The
+ // difference in behavior is as follows:
+ // Split(y_absl::string_view(""), '-'); // {""}
+ // Split(y_absl::string_view(), '-'); // {}
+ if (splitter_->text().data() == nullptr) {
+ state_ = kEndState;
+ pos_ = splitter_->text().size();
+ return;
+ }
+
+ if (state_ == kEndState) {
+ pos_ = splitter_->text().size();
+ } else {
+ ++(*this);
+ }
+ }
+
+ bool at_end() const { return state_ == kEndState; }
+
+ reference operator*() const { return curr_; }
+ pointer operator->() const { return &curr_; }
+
+ SplitIterator& operator++() {
+ do {
+ if (state_ == kLastState) {
+ state_ = kEndState;
+ return *this;
+ }
+ const y_absl::string_view text = splitter_->text();
+ const y_absl::string_view d = delimiter_.Find(text, pos_);
+ if (d.data() == text.data() + text.size()) state_ = kLastState;
+ curr_ = text.substr(pos_, d.data() - (text.data() + pos_));
+ pos_ += curr_.size() + d.size();
+ } while (!predicate_(curr_));
+ return *this;
+ }
+
+ SplitIterator operator++(int) {
+ SplitIterator old(*this);
+ ++(*this);
+ return old;
+ }
+
+ friend bool operator==(const SplitIterator& a, const SplitIterator& b) {
+ return a.state_ == b.state_ && a.pos_ == b.pos_;
+ }
+
+ friend bool operator!=(const SplitIterator& a, const SplitIterator& b) {
+ return !(a == b);
+ }
+
+ private:
+ size_t pos_;
+ State state_;
+ y_absl::string_view curr_;
+ const Splitter* splitter_;
+ typename Splitter::DelimiterType delimiter_;
+ typename Splitter::PredicateType predicate_;
+};
+
+// HasMappedType<T>::value is true iff there exists a type T::mapped_type.
+template <typename T, typename = void>
+struct HasMappedType : std::false_type {};
+template <typename T>
+struct HasMappedType<T, y_absl::void_t<typename T::mapped_type>>
+ : std::true_type {};
+
+// HasValueType<T>::value is true iff there exists a type T::value_type.
+template <typename T, typename = void>
+struct HasValueType : std::false_type {};
+template <typename T>
+struct HasValueType<T, y_absl::void_t<typename T::value_type>> : std::true_type {
+};
+
+// HasConstIterator<T>::value is true iff there exists a type T::const_iterator.
+template <typename T, typename = void>
+struct HasConstIterator : std::false_type {};
+template <typename T>
+struct HasConstIterator<T, y_absl::void_t<typename T::const_iterator>>
+ : std::true_type {};
+
+// HasEmplace<T>::value is true iff there exists a method T::emplace().
+template <typename T, typename = void>
+struct HasEmplace : std::false_type {};
+template <typename T>
+struct HasEmplace<T, y_absl::void_t<decltype(std::declval<T>().emplace())>>
+ : std::true_type {};
+
+// IsInitializerList<T>::value is true iff T is an std::initializer_list. More
+// details below in Splitter<> where this is used.
+std::false_type IsInitializerListDispatch(...); // default: No
+template <typename T>
+std::true_type IsInitializerListDispatch(std::initializer_list<T>*);
+template <typename T>
+struct IsInitializerList
+ : decltype(IsInitializerListDispatch(static_cast<T*>(nullptr))) {};
+
+// A SplitterIsConvertibleTo<C>::type alias exists iff the specified condition
+// is true for type 'C'.
+//
+// Restricts conversion to container-like types (by testing for the presence of
+// a const_iterator member type) and also to disable conversion to an
+// std::initializer_list (which also has a const_iterator). Otherwise, code
+// compiled in C++11 will get an error due to ambiguous conversion paths (in
+// C++11 std::vector<T>::operator= is overloaded to take either a std::vector<T>
+// or an std::initializer_list<T>).
+
+template <typename C, bool has_value_type, bool has_mapped_type>
+struct SplitterIsConvertibleToImpl : std::false_type {};
+
+template <typename C>
+struct SplitterIsConvertibleToImpl<C, true, false>
+ : std::is_constructible<typename C::value_type, y_absl::string_view> {};
+
+template <typename C>
+struct SplitterIsConvertibleToImpl<C, true, true>
+ : y_absl::conjunction<
+ std::is_constructible<typename C::key_type, y_absl::string_view>,
+ std::is_constructible<typename C::mapped_type, y_absl::string_view>> {};
+
+template <typename C>
+struct SplitterIsConvertibleTo
+ : SplitterIsConvertibleToImpl<
+ C,
+#ifdef _GLIBCXX_DEBUG
+ !IsStrictlyBaseOfAndConvertibleToSTLContainer<C>::value &&
+#endif // _GLIBCXX_DEBUG
+ !IsInitializerList<
+ typename std::remove_reference<C>::type>::value &&
+ HasValueType<C>::value && HasConstIterator<C>::value,
+ HasMappedType<C>::value> {
+};
+
+// This class implements the range that is returned by y_absl::StrSplit(). This
+// class has templated conversion operators that allow it to be implicitly
+// converted to a variety of types that the caller may have specified on the
+// left-hand side of an assignment.
+//
+// The main interface for interacting with this class is through its implicit
+// conversion operators. However, this class may also be used like a container
+// in that it has .begin() and .end() member functions. It may also be used
+// within a range-for loop.
+//
+// Output containers can be collections of any type that is constructible from
+// an y_absl::string_view.
+//
+// An Predicate functor may be supplied. This predicate will be used to filter
+// the split strings: only strings for which the predicate returns true will be
+// kept. A Predicate object is any unary functor that takes an y_absl::string_view
+// and returns bool.
+//
+// The StringType parameter can be either string_view or string, depending on
+// whether the Splitter refers to a string stored elsewhere, or if the string
+// resides inside the Splitter itself.
+template <typename Delimiter, typename Predicate, typename StringType>
+class Splitter {
+ public:
+ using DelimiterType = Delimiter;
+ using PredicateType = Predicate;
+ using const_iterator = strings_internal::SplitIterator<Splitter>;
+ using value_type = typename std::iterator_traits<const_iterator>::value_type;
+
+ Splitter(StringType input_text, Delimiter d, Predicate p)
+ : text_(std::move(input_text)),
+ delimiter_(std::move(d)),
+ predicate_(std::move(p)) {}
+
+ y_absl::string_view text() const { return text_; }
+ const Delimiter& delimiter() const { return delimiter_; }
+ const Predicate& predicate() const { return predicate_; }
+
+ // Range functions that iterate the split substrings as y_absl::string_view
+ // objects. These methods enable a Splitter to be used in a range-based for
+ // loop.
+ const_iterator begin() const { return {const_iterator::kInitState, this}; }
+ const_iterator end() const { return {const_iterator::kEndState, this}; }
+
+ // An implicit conversion operator that is restricted to only those containers
+ // that the splitter is convertible to.
+ template <typename Container,
+ typename = typename std::enable_if<
+ SplitterIsConvertibleTo<Container>::value>::type>
+ operator Container() const { // NOLINT(runtime/explicit)
+ return ConvertToContainer<Container, typename Container::value_type,
+ HasMappedType<Container>::value>()(*this);
+ }
+
+ // Returns a pair with its .first and .second members set to the first two
+ // strings returned by the begin() iterator. Either/both of .first and .second
+ // will be constructed with empty strings if the iterator doesn't have a
+ // corresponding value.
+ template <typename First, typename Second>
+ operator std::pair<First, Second>() const { // NOLINT(runtime/explicit)
+ y_absl::string_view first, second;
+ auto it = begin();
+ if (it != end()) {
+ first = *it;
+ if (++it != end()) {
+ second = *it;
+ }
+ }
+ return {First(first), Second(second)};
+ }
+
+ private:
+ // ConvertToContainer is a functor converting a Splitter to the requested
+ // Container of ValueType. It is specialized below to optimize splitting to
+ // certain combinations of Container and ValueType.
+ //
+ // This base template handles the generic case of storing the split results in
+ // the requested non-map-like container and converting the split substrings to
+ // the requested type.
+ template <typename Container, typename ValueType, bool is_map = false>
+ struct ConvertToContainer {
+ Container operator()(const Splitter& splitter) const {
+ Container c;
+ auto it = std::inserter(c, c.end());
+ for (const auto& sp : splitter) {
+ *it++ = ValueType(sp);
+ }
+ return c;
+ }
+ };
+
+ // Partial specialization for a std::vector<y_absl::string_view>.
+ //
+ // Optimized for the common case of splitting to a
+ // std::vector<y_absl::string_view>. In this case we first split the results to
+ // a small array of y_absl::string_view on the stack, to reduce reallocations.
+ template <typename A>
+ struct ConvertToContainer<std::vector<y_absl::string_view, A>,
+ y_absl::string_view, false> {
+ std::vector<y_absl::string_view, A> operator()(
+ const Splitter& splitter) const {
+ struct raw_view {
+ const char* data;
+ size_t size;
+ operator y_absl::string_view() const { // NOLINT(runtime/explicit)
+ return {data, size};
+ }
+ };
+ std::vector<y_absl::string_view, A> v;
+ std::array<raw_view, 16> ar;
+ for (auto it = splitter.begin(); !it.at_end();) {
+ size_t index = 0;
+ do {
+ ar[index].data = it->data();
+ ar[index].size = it->size();
+ ++it;
+ } while (++index != ar.size() && !it.at_end());
+ v.insert(v.end(), ar.begin(), ar.begin() + index);
+ }
+ return v;
+ }
+ };
+
+ // Partial specialization for a std::vector<TString>.
+ //
+ // Optimized for the common case of splitting to a std::vector<TString>.
+ // In this case we first split the results to a std::vector<y_absl::string_view>
+ // so the returned std::vector<TString> can have space reserved to avoid
+ // TString moves.
+ template <typename A>
+ struct ConvertToContainer<std::vector<TString, A>, TString, false> {
+ std::vector<TString, A> operator()(const Splitter& splitter) const {
+ const std::vector<y_absl::string_view> v = splitter;
+ return std::vector<TString, A>(v.begin(), v.end());
+ }
+ };
+
+ // Partial specialization for containers of pairs (e.g., maps).
+ //
+ // The algorithm is to insert a new pair into the map for each even-numbered
+ // item, with the even-numbered item as the key with a default-constructed
+ // value. Each odd-numbered item will then be assigned to the last pair's
+ // value.
+ template <typename Container, typename First, typename Second>
+ struct ConvertToContainer<Container, std::pair<const First, Second>, true> {
+ using iterator = typename Container::iterator;
+
+ Container operator()(const Splitter& splitter) const {
+ Container m;
+ iterator it;
+ bool insert = true;
+ for (const y_absl::string_view sv : splitter) {
+ if (insert) {
+ it = InsertOrEmplace(&m, sv);
+ } else {
+ it->second = Second(sv);
+ }
+ insert = !insert;
+ }
+ return m;
+ }
+
+ // Inserts the key and an empty value into the map, returning an iterator to
+ // the inserted item. We use emplace() if available, otherwise insert().
+ template <typename M>
+ static y_absl::enable_if_t<HasEmplace<M>::value, iterator> InsertOrEmplace(
+ M* m, y_absl::string_view key) {
+ // Use piecewise_construct to support old versions of gcc in which pair
+ // constructor can't otherwise construct string from string_view.
+ return ToIter(m->emplace(std::piecewise_construct, std::make_tuple(key),
+ std::tuple<>()));
+ }
+ template <typename M>
+ static y_absl::enable_if_t<!HasEmplace<M>::value, iterator> InsertOrEmplace(
+ M* m, y_absl::string_view key) {
+ return ToIter(m->insert(std::make_pair(First(key), Second(""))));
+ }
+
+ static iterator ToIter(std::pair<iterator, bool> pair) {
+ return pair.first;
+ }
+ static iterator ToIter(iterator iter) { return iter; }
+ };
+
+ StringType text_;
+ Delimiter delimiter_;
+ Predicate predicate_;
+};
+
+} // namespace strings_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_STRINGS_INTERNAL_STR_SPLIT_INTERNAL_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/string_constant.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/string_constant.h
new file mode 100644
index 00000000000..b18e821b49d
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/string_constant.h
@@ -0,0 +1,64 @@
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_STRINGS_INTERNAL_STRING_CONSTANT_H_
+#define ABSL_STRINGS_INTERNAL_STRING_CONSTANT_H_
+
+#include "y_absl/meta/type_traits.h"
+#include "y_absl/strings/string_view.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace strings_internal {
+
+// StringConstant<T> represents a compile time string constant.
+// It can be accessed via its `y_absl::string_view value` static member.
+// It is guaranteed that the `string_view` returned has constant `.data()`,
+// constant `.size()` and constant `value[i]` for all `0 <= i < .size()`
+//
+// The `T` is an opaque type. It is guaranteed that different string constants
+// will have different values of `T`. This allows users to associate the string
+// constant with other static state at compile time.
+//
+// Instances should be made using the `MakeStringConstant()` factory function
+// below.
+template <typename T>
+struct StringConstant {
+ static constexpr y_absl::string_view value = T{}();
+ constexpr y_absl::string_view operator()() const { return value; }
+
+ // Check to be sure `view` points to constant data.
+ // Otherwise, it can't be constant evaluated.
+ static_assert(value.empty() || 2 * value[0] != 1,
+ "The input string_view must point to constant data.");
+};
+
+template <typename T>
+constexpr y_absl::string_view StringConstant<T>::value; // NOLINT
+
+// Factory function for `StringConstant` instances.
+// It supports callables that have a constexpr default constructor and a
+// constexpr operator().
+// It must return an `y_absl::string_view` or `const char*` pointing to constant
+// data. This is validated at compile time.
+template <typename T>
+constexpr StringConstant<T> MakeStringConstant(T) {
+ return {};
+}
+
+} // namespace strings_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_STRINGS_INTERNAL_STRING_CONSTANT_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/utf8.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/utf8.cc
new file mode 100644
index 00000000000..06b1cae79de
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/utf8.cc
@@ -0,0 +1,53 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// UTF8 utilities, implemented to reduce dependencies.
+
+#include "y_absl/strings/internal/utf8.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace strings_internal {
+
+size_t EncodeUTF8Char(char *buffer, char32_t utf8_char) {
+ if (utf8_char <= 0x7F) {
+ *buffer = static_cast<char>(utf8_char);
+ return 1;
+ } else if (utf8_char <= 0x7FF) {
+ buffer[1] = 0x80 | (utf8_char & 0x3F);
+ utf8_char >>= 6;
+ buffer[0] = 0xC0 | utf8_char;
+ return 2;
+ } else if (utf8_char <= 0xFFFF) {
+ buffer[2] = 0x80 | (utf8_char & 0x3F);
+ utf8_char >>= 6;
+ buffer[1] = 0x80 | (utf8_char & 0x3F);
+ utf8_char >>= 6;
+ buffer[0] = 0xE0 | utf8_char;
+ return 3;
+ } else {
+ buffer[3] = 0x80 | (utf8_char & 0x3F);
+ utf8_char >>= 6;
+ buffer[2] = 0x80 | (utf8_char & 0x3F);
+ utf8_char >>= 6;
+ buffer[1] = 0x80 | (utf8_char & 0x3F);
+ utf8_char >>= 6;
+ buffer[0] = 0xF0 | utf8_char;
+ return 4;
+ }
+}
+
+} // namespace strings_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/utf8.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/utf8.h
new file mode 100644
index 00000000000..1b2d6abd510
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/utf8.h
@@ -0,0 +1,50 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// UTF8 utilities, implemented to reduce dependencies.
+
+#ifndef ABSL_STRINGS_INTERNAL_UTF8_H_
+#define ABSL_STRINGS_INTERNAL_UTF8_H_
+
+#include <cstddef>
+#include <cstdint>
+
+#include "y_absl/base/config.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace strings_internal {
+
+// For Unicode code points 0 through 0x10FFFF, EncodeUTF8Char writes
+// out the UTF-8 encoding into buffer, and returns the number of chars
+// it wrote.
+//
+// As described in https://tools.ietf.org/html/rfc3629#section-3 , the encodings
+// are:
+// 00 - 7F : 0xxxxxxx
+// 80 - 7FF : 110xxxxx 10xxxxxx
+// 800 - FFFF : 1110xxxx 10xxxxxx 10xxxxxx
+// 10000 - 10FFFF : 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+//
+// Values greater than 0x10FFFF are not supported and may or may not write
+// characters into buffer, however never will more than kMaxEncodedUTF8Size
+// bytes be written, regardless of the value of utf8_char.
+enum { kMaxEncodedUTF8Size = 4 };
+size_t EncodeUTF8Char(char *buffer, char32_t utf8_char);
+
+} // namespace strings_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_STRINGS_INTERNAL_UTF8_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/match.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/match.cc
new file mode 100644
index 00000000000..3197bdf4320
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/match.cc
@@ -0,0 +1,43 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "y_absl/strings/match.h"
+
+#include "y_absl/strings/internal/memutil.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+bool EqualsIgnoreCase(y_absl::string_view piece1,
+ y_absl::string_view piece2) noexcept {
+ return (piece1.size() == piece2.size() &&
+ 0 == y_absl::strings_internal::memcasecmp(piece1.data(), piece2.data(),
+ piece1.size()));
+ // memcasecmp uses y_absl::ascii_tolower().
+}
+
+bool StartsWithIgnoreCase(y_absl::string_view text,
+ y_absl::string_view prefix) noexcept {
+ return (text.size() >= prefix.size()) &&
+ EqualsIgnoreCase(text.substr(0, prefix.size()), prefix);
+}
+
+bool EndsWithIgnoreCase(y_absl::string_view text,
+ y_absl::string_view suffix) noexcept {
+ return (text.size() >= suffix.size()) &&
+ EqualsIgnoreCase(text.substr(text.size() - suffix.size()), suffix);
+}
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/match.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/match.h
new file mode 100644
index 00000000000..4709abc93fb
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/match.h
@@ -0,0 +1,100 @@
+//
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: match.h
+// -----------------------------------------------------------------------------
+//
+// This file contains simple utilities for performing string matching checks.
+// All of these function parameters are specified as `y_absl::string_view`,
+// meaning that these functions can accept `TString`, `y_absl::string_view` or
+// NUL-terminated C-style strings.
+//
+// Examples:
+// TString s = "foo";
+// y_absl::string_view sv = "f";
+// assert(y_absl::StrContains(s, sv));
+//
+// Note: The order of parameters in these functions is designed to mimic the
+// order an equivalent member function would exhibit;
+// e.g. `s.Contains(x)` ==> `y_absl::StrContains(s, x).
+#ifndef ABSL_STRINGS_MATCH_H_
+#define ABSL_STRINGS_MATCH_H_
+
+#include <cstring>
+
+#include "y_absl/strings/string_view.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+// StrContains()
+//
+// Returns whether a given string `haystack` contains the substring `needle`.
+inline bool StrContains(y_absl::string_view haystack,
+ y_absl::string_view needle) noexcept {
+ return haystack.find(needle, 0) != haystack.npos;
+}
+
+inline bool StrContains(y_absl::string_view haystack, char needle) noexcept {
+ return haystack.find(needle) != haystack.npos;
+}
+
+// StartsWith()
+//
+// Returns whether a given string `text` begins with `prefix`.
+inline bool StartsWith(y_absl::string_view text,
+ y_absl::string_view prefix) noexcept {
+ return prefix.empty() ||
+ (text.size() >= prefix.size() &&
+ memcmp(text.data(), prefix.data(), prefix.size()) == 0);
+}
+
+// EndsWith()
+//
+// Returns whether a given string `text` ends with `suffix`.
+inline bool EndsWith(y_absl::string_view text,
+ y_absl::string_view suffix) noexcept {
+ return suffix.empty() ||
+ (text.size() >= suffix.size() &&
+ memcmp(text.data() + (text.size() - suffix.size()), suffix.data(),
+ suffix.size()) == 0);
+}
+
+// EqualsIgnoreCase()
+//
+// Returns whether given ASCII strings `piece1` and `piece2` are equal, ignoring
+// case in the comparison.
+bool EqualsIgnoreCase(y_absl::string_view piece1,
+ y_absl::string_view piece2) noexcept;
+
+// StartsWithIgnoreCase()
+//
+// Returns whether a given ASCII string `text` starts with `prefix`,
+// ignoring case in the comparison.
+bool StartsWithIgnoreCase(y_absl::string_view text,
+ y_absl::string_view prefix) noexcept;
+
+// EndsWithIgnoreCase()
+//
+// Returns whether a given ASCII string `text` ends with `suffix`, ignoring
+// case in the comparison.
+bool EndsWithIgnoreCase(y_absl::string_view text,
+ y_absl::string_view suffix) noexcept;
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_STRINGS_MATCH_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/numbers.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/numbers.cc
new file mode 100644
index 00000000000..528d044fa60
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/numbers.cc
@@ -0,0 +1,1093 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file contains string processing functions related to
+// numeric values.
+
+#include "y_absl/strings/numbers.h"
+
+#include <algorithm>
+#include <cassert>
+#include <cfloat> // for DBL_DIG and FLT_DIG
+#include <cmath> // for HUGE_VAL
+#include <cstdint>
+#include <cstdio>
+#include <cstdlib>
+#include <cstring>
+#include <iterator>
+#include <limits>
+#include <memory>
+#include <utility>
+
+#include "y_absl/base/attributes.h"
+#include "y_absl/base/internal/raw_logging.h"
+#include "y_absl/numeric/bits.h"
+#include "y_absl/strings/ascii.h"
+#include "y_absl/strings/charconv.h"
+#include "y_absl/strings/escaping.h"
+#include "y_absl/strings/internal/memutil.h"
+#include "y_absl/strings/match.h"
+#include "y_absl/strings/str_cat.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+bool SimpleAtof(y_absl::string_view str, float* out) {
+ *out = 0.0;
+ str = StripAsciiWhitespace(str);
+ // std::from_chars doesn't accept an initial +, but SimpleAtof does, so if one
+ // is present, skip it, while avoiding accepting "+-0" as valid.
+ if (!str.empty() && str[0] == '+') {
+ str.remove_prefix(1);
+ if (!str.empty() && str[0] == '-') {
+ return false;
+ }
+ }
+ auto result = y_absl::from_chars(str.data(), str.data() + str.size(), *out);
+ if (result.ec == std::errc::invalid_argument) {
+ return false;
+ }
+ if (result.ptr != str.data() + str.size()) {
+ // not all non-whitespace characters consumed
+ return false;
+ }
+ // from_chars() with DR 3081's current wording will return max() on
+ // overflow. SimpleAtof returns infinity instead.
+ if (result.ec == std::errc::result_out_of_range) {
+ if (*out > 1.0) {
+ *out = std::numeric_limits<float>::infinity();
+ } else if (*out < -1.0) {
+ *out = -std::numeric_limits<float>::infinity();
+ }
+ }
+ return true;
+}
+
+bool SimpleAtod(y_absl::string_view str, double* out) {
+ *out = 0.0;
+ str = StripAsciiWhitespace(str);
+ // std::from_chars doesn't accept an initial +, but SimpleAtod does, so if one
+ // is present, skip it, while avoiding accepting "+-0" as valid.
+ if (!str.empty() && str[0] == '+') {
+ str.remove_prefix(1);
+ if (!str.empty() && str[0] == '-') {
+ return false;
+ }
+ }
+ auto result = y_absl::from_chars(str.data(), str.data() + str.size(), *out);
+ if (result.ec == std::errc::invalid_argument) {
+ return false;
+ }
+ if (result.ptr != str.data() + str.size()) {
+ // not all non-whitespace characters consumed
+ return false;
+ }
+ // from_chars() with DR 3081's current wording will return max() on
+ // overflow. SimpleAtod returns infinity instead.
+ if (result.ec == std::errc::result_out_of_range) {
+ if (*out > 1.0) {
+ *out = std::numeric_limits<double>::infinity();
+ } else if (*out < -1.0) {
+ *out = -std::numeric_limits<double>::infinity();
+ }
+ }
+ return true;
+}
+
+bool SimpleAtob(y_absl::string_view str, bool* out) {
+ ABSL_RAW_CHECK(out != nullptr, "Output pointer must not be nullptr.");
+ if (EqualsIgnoreCase(str, "true") || EqualsIgnoreCase(str, "t") ||
+ EqualsIgnoreCase(str, "yes") || EqualsIgnoreCase(str, "y") ||
+ EqualsIgnoreCase(str, "1")) {
+ *out = true;
+ return true;
+ }
+ if (EqualsIgnoreCase(str, "false") || EqualsIgnoreCase(str, "f") ||
+ EqualsIgnoreCase(str, "no") || EqualsIgnoreCase(str, "n") ||
+ EqualsIgnoreCase(str, "0")) {
+ *out = false;
+ return true;
+ }
+ return false;
+}
+
+// ----------------------------------------------------------------------
+// FastIntToBuffer() overloads
+//
+// Like the Fast*ToBuffer() functions above, these are intended for speed.
+// Unlike the Fast*ToBuffer() functions, however, these functions write
+// their output to the beginning of the buffer. The caller is responsible
+// for ensuring that the buffer has enough space to hold the output.
+//
+// Returns a pointer to the end of the string (i.e. the null character
+// terminating the string).
+// ----------------------------------------------------------------------
+
+namespace {
+
+// Used to optimize printing a decimal number's final digit.
+const char one_ASCII_final_digits[10][2] {
+ {'0', 0}, {'1', 0}, {'2', 0}, {'3', 0}, {'4', 0},
+ {'5', 0}, {'6', 0}, {'7', 0}, {'8', 0}, {'9', 0},
+};
+
+} // namespace
+
+char* numbers_internal::FastIntToBuffer(uint32_t i, char* buffer) {
+ uint32_t digits;
+ // The idea of this implementation is to trim the number of divides to as few
+ // as possible, and also reducing memory stores and branches, by going in
+ // steps of two digits at a time rather than one whenever possible.
+ // The huge-number case is first, in the hopes that the compiler will output
+ // that case in one branch-free block of code, and only output conditional
+ // branches into it from below.
+ if (i >= 1000000000) { // >= 1,000,000,000
+ digits = i / 100000000; // 100,000,000
+ i -= digits * 100000000;
+ PutTwoDigits(digits, buffer);
+ buffer += 2;
+ lt100_000_000:
+ digits = i / 1000000; // 1,000,000
+ i -= digits * 1000000;
+ PutTwoDigits(digits, buffer);
+ buffer += 2;
+ lt1_000_000:
+ digits = i / 10000; // 10,000
+ i -= digits * 10000;
+ PutTwoDigits(digits, buffer);
+ buffer += 2;
+ lt10_000:
+ digits = i / 100;
+ i -= digits * 100;
+ PutTwoDigits(digits, buffer);
+ buffer += 2;
+ lt100:
+ digits = i;
+ PutTwoDigits(digits, buffer);
+ buffer += 2;
+ *buffer = 0;
+ return buffer;
+ }
+
+ if (i < 100) {
+ digits = i;
+ if (i >= 10) goto lt100;
+ memcpy(buffer, one_ASCII_final_digits[i], 2);
+ return buffer + 1;
+ }
+ if (i < 10000) { // 10,000
+ if (i >= 1000) goto lt10_000;
+ digits = i / 100;
+ i -= digits * 100;
+ *buffer++ = '0' + digits;
+ goto lt100;
+ }
+ if (i < 1000000) { // 1,000,000
+ if (i >= 100000) goto lt1_000_000;
+ digits = i / 10000; // 10,000
+ i -= digits * 10000;
+ *buffer++ = '0' + digits;
+ goto lt10_000;
+ }
+ if (i < 100000000) { // 100,000,000
+ if (i >= 10000000) goto lt100_000_000;
+ digits = i / 1000000; // 1,000,000
+ i -= digits * 1000000;
+ *buffer++ = '0' + digits;
+ goto lt1_000_000;
+ }
+ // we already know that i < 1,000,000,000
+ digits = i / 100000000; // 100,000,000
+ i -= digits * 100000000;
+ *buffer++ = '0' + digits;
+ goto lt100_000_000;
+}
+
+char* numbers_internal::FastIntToBuffer(int32_t i, char* buffer) {
+ uint32_t u = i;
+ if (i < 0) {
+ *buffer++ = '-';
+ // We need to do the negation in modular (i.e., "unsigned")
+ // arithmetic; MSVC++ apprently warns for plain "-u", so
+ // we write the equivalent expression "0 - u" instead.
+ u = 0 - u;
+ }
+ return numbers_internal::FastIntToBuffer(u, buffer);
+}
+
+char* numbers_internal::FastIntToBuffer(uint64_t i, char* buffer) {
+ uint32_t u32 = static_cast<uint32_t>(i);
+ if (u32 == i) return numbers_internal::FastIntToBuffer(u32, buffer);
+
+ // Here we know i has at least 10 decimal digits.
+ uint64_t top_1to11 = i / 1000000000;
+ u32 = static_cast<uint32_t>(i - top_1to11 * 1000000000);
+ uint32_t top_1to11_32 = static_cast<uint32_t>(top_1to11);
+
+ if (top_1to11_32 == top_1to11) {
+ buffer = numbers_internal::FastIntToBuffer(top_1to11_32, buffer);
+ } else {
+ // top_1to11 has more than 32 bits too; print it in two steps.
+ uint32_t top_8to9 = static_cast<uint32_t>(top_1to11 / 100);
+ uint32_t mid_2 = static_cast<uint32_t>(top_1to11 - top_8to9 * 100);
+ buffer = numbers_internal::FastIntToBuffer(top_8to9, buffer);
+ PutTwoDigits(mid_2, buffer);
+ buffer += 2;
+ }
+
+ // We have only 9 digits now, again the maximum uint32_t can handle fully.
+ uint32_t digits = u32 / 10000000; // 10,000,000
+ u32 -= digits * 10000000;
+ PutTwoDigits(digits, buffer);
+ buffer += 2;
+ digits = u32 / 100000; // 100,000
+ u32 -= digits * 100000;
+ PutTwoDigits(digits, buffer);
+ buffer += 2;
+ digits = u32 / 1000; // 1,000
+ u32 -= digits * 1000;
+ PutTwoDigits(digits, buffer);
+ buffer += 2;
+ digits = u32 / 10;
+ u32 -= digits * 10;
+ PutTwoDigits(digits, buffer);
+ buffer += 2;
+ memcpy(buffer, one_ASCII_final_digits[u32], 2);
+ return buffer + 1;
+}
+
+char* numbers_internal::FastIntToBuffer(int64_t i, char* buffer) {
+ uint64_t u = i;
+ if (i < 0) {
+ *buffer++ = '-';
+ u = 0 - u;
+ }
+ return numbers_internal::FastIntToBuffer(u, buffer);
+}
+
+// Given a 128-bit number expressed as a pair of uint64_t, high half first,
+// return that number multiplied by the given 32-bit value. If the result is
+// too large to fit in a 128-bit number, divide it by 2 until it fits.
+static std::pair<uint64_t, uint64_t> Mul32(std::pair<uint64_t, uint64_t> num,
+ uint32_t mul) {
+ uint64_t bits0_31 = num.second & 0xFFFFFFFF;
+ uint64_t bits32_63 = num.second >> 32;
+ uint64_t bits64_95 = num.first & 0xFFFFFFFF;
+ uint64_t bits96_127 = num.first >> 32;
+
+ // The picture so far: each of these 64-bit values has only the lower 32 bits
+ // filled in.
+ // bits96_127: [ 00000000 xxxxxxxx ]
+ // bits64_95: [ 00000000 xxxxxxxx ]
+ // bits32_63: [ 00000000 xxxxxxxx ]
+ // bits0_31: [ 00000000 xxxxxxxx ]
+
+ bits0_31 *= mul;
+ bits32_63 *= mul;
+ bits64_95 *= mul;
+ bits96_127 *= mul;
+
+ // Now the top halves may also have value, though all 64 of their bits will
+ // never be set at the same time, since they are a result of a 32x32 bit
+ // multiply. This makes the carry calculation slightly easier.
+ // bits96_127: [ mmmmmmmm | mmmmmmmm ]
+ // bits64_95: [ | mmmmmmmm mmmmmmmm | ]
+ // bits32_63: | [ mmmmmmmm | mmmmmmmm ]
+ // bits0_31: | [ | mmmmmmmm mmmmmmmm ]
+ // eventually: [ bits128_up | ...bits64_127.... | ..bits0_63... ]
+
+ uint64_t bits0_63 = bits0_31 + (bits32_63 << 32);
+ uint64_t bits64_127 = bits64_95 + (bits96_127 << 32) + (bits32_63 >> 32) +
+ (bits0_63 < bits0_31);
+ uint64_t bits128_up = (bits96_127 >> 32) + (bits64_127 < bits64_95);
+ if (bits128_up == 0) return {bits64_127, bits0_63};
+
+ auto shift = static_cast<unsigned>(bit_width(bits128_up));
+ uint64_t lo = (bits0_63 >> shift) + (bits64_127 << (64 - shift));
+ uint64_t hi = (bits64_127 >> shift) + (bits128_up << (64 - shift));
+ return {hi, lo};
+}
+
+// Compute num * 5 ^ expfive, and return the first 128 bits of the result,
+// where the first bit is always a one. So PowFive(1, 0) starts 0b100000,
+// PowFive(1, 1) starts 0b101000, PowFive(1, 2) starts 0b110010, etc.
+static std::pair<uint64_t, uint64_t> PowFive(uint64_t num, int expfive) {
+ std::pair<uint64_t, uint64_t> result = {num, 0};
+ while (expfive >= 13) {
+ // 5^13 is the highest power of five that will fit in a 32-bit integer.
+ result = Mul32(result, 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5);
+ expfive -= 13;
+ }
+ constexpr int powers_of_five[13] = {
+ 1,
+ 5,
+ 5 * 5,
+ 5 * 5 * 5,
+ 5 * 5 * 5 * 5,
+ 5 * 5 * 5 * 5 * 5,
+ 5 * 5 * 5 * 5 * 5 * 5,
+ 5 * 5 * 5 * 5 * 5 * 5 * 5,
+ 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5,
+ 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5,
+ 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5,
+ 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5,
+ 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5};
+ result = Mul32(result, powers_of_five[expfive & 15]);
+ int shift = countl_zero(result.first);
+ if (shift != 0) {
+ result.first = (result.first << shift) + (result.second >> (64 - shift));
+ result.second = (result.second << shift);
+ }
+ return result;
+}
+
+struct ExpDigits {
+ int32_t exponent;
+ char digits[6];
+};
+
+// SplitToSix converts value, a positive double-precision floating-point number,
+// into a base-10 exponent and 6 ASCII digits, where the first digit is never
+// zero. For example, SplitToSix(1) returns an exponent of zero and a digits
+// array of {'1', '0', '0', '0', '0', '0'}. If value is exactly halfway between
+// two possible representations, e.g. value = 100000.5, then "round to even" is
+// performed.
+static ExpDigits SplitToSix(const double value) {
+ ExpDigits exp_dig;
+ int exp = 5;
+ double d = value;
+ // First step: calculate a close approximation of the output, where the
+ // value d will be between 100,000 and 999,999, representing the digits
+ // in the output ASCII array, and exp is the base-10 exponent. It would be
+ // faster to use a table here, and to look up the base-2 exponent of value,
+ // however value is an IEEE-754 64-bit number, so the table would have 2,000
+ // entries, which is not cache-friendly.
+ if (d >= 999999.5) {
+ if (d >= 1e+261) exp += 256, d *= 1e-256;
+ if (d >= 1e+133) exp += 128, d *= 1e-128;
+ if (d >= 1e+69) exp += 64, d *= 1e-64;
+ if (d >= 1e+37) exp += 32, d *= 1e-32;
+ if (d >= 1e+21) exp += 16, d *= 1e-16;
+ if (d >= 1e+13) exp += 8, d *= 1e-8;
+ if (d >= 1e+9) exp += 4, d *= 1e-4;
+ if (d >= 1e+7) exp += 2, d *= 1e-2;
+ if (d >= 1e+6) exp += 1, d *= 1e-1;
+ } else {
+ if (d < 1e-250) exp -= 256, d *= 1e256;
+ if (d < 1e-122) exp -= 128, d *= 1e128;
+ if (d < 1e-58) exp -= 64, d *= 1e64;
+ if (d < 1e-26) exp -= 32, d *= 1e32;
+ if (d < 1e-10) exp -= 16, d *= 1e16;
+ if (d < 1e-2) exp -= 8, d *= 1e8;
+ if (d < 1e+2) exp -= 4, d *= 1e4;
+ if (d < 1e+4) exp -= 2, d *= 1e2;
+ if (d < 1e+5) exp -= 1, d *= 1e1;
+ }
+ // At this point, d is in the range [99999.5..999999.5) and exp is in the
+ // range [-324..308]. Since we need to round d up, we want to add a half
+ // and truncate.
+ // However, the technique above may have lost some precision, due to its
+ // repeated multiplication by constants that each may be off by half a bit
+ // of precision. This only matters if we're close to the edge though.
+ // Since we'd like to know if the fractional part of d is close to a half,
+ // we multiply it by 65536 and see if the fractional part is close to 32768.
+ // (The number doesn't have to be a power of two,but powers of two are faster)
+ uint64_t d64k = d * 65536;
+ int dddddd; // A 6-digit decimal integer.
+ if ((d64k % 65536) == 32767 || (d64k % 65536) == 32768) {
+ // OK, it's fairly likely that precision was lost above, which is
+ // not a surprise given only 52 mantissa bits are available. Therefore
+ // redo the calculation using 128-bit numbers. (64 bits are not enough).
+
+ // Start out with digits rounded down; maybe add one below.
+ dddddd = static_cast<int>(d64k / 65536);
+
+ // mantissa is a 64-bit integer representing M.mmm... * 2^63. The actual
+ // value we're representing, of course, is M.mmm... * 2^exp2.
+ int exp2;
+ double m = std::frexp(value, &exp2);
+ uint64_t mantissa = m * (32768.0 * 65536.0 * 65536.0 * 65536.0);
+ // std::frexp returns an m value in the range [0.5, 1.0), however we
+ // can't multiply it by 2^64 and convert to an integer because some FPUs
+ // throw an exception when converting an number higher than 2^63 into an
+ // integer - even an unsigned 64-bit integer! Fortunately it doesn't matter
+ // since m only has 52 significant bits anyway.
+ mantissa <<= 1;
+ exp2 -= 64; // not needed, but nice for debugging
+
+ // OK, we are here to compare:
+ // (dddddd + 0.5) * 10^(exp-5) vs. mantissa * 2^exp2
+ // so we can round up dddddd if appropriate. Those values span the full
+ // range of 600 orders of magnitude of IEE 64-bit floating-point.
+ // Fortunately, we already know they are very close, so we don't need to
+ // track the base-2 exponent of both sides. This greatly simplifies the
+ // the math since the 2^exp2 calculation is unnecessary and the power-of-10
+ // calculation can become a power-of-5 instead.
+
+ std::pair<uint64_t, uint64_t> edge, val;
+ if (exp >= 6) {
+ // Compare (dddddd + 0.5) * 5 ^ (exp - 5) to mantissa
+ // Since we're tossing powers of two, 2 * dddddd + 1 is the
+ // same as dddddd + 0.5
+ edge = PowFive(2 * dddddd + 1, exp - 5);
+
+ val.first = mantissa;
+ val.second = 0;
+ } else {
+ // We can't compare (dddddd + 0.5) * 5 ^ (exp - 5) to mantissa as we did
+ // above because (exp - 5) is negative. So we compare (dddddd + 0.5) to
+ // mantissa * 5 ^ (5 - exp)
+ edge = PowFive(2 * dddddd + 1, 0);
+
+ val = PowFive(mantissa, 5 - exp);
+ }
+ // printf("exp=%d %016lx %016lx vs %016lx %016lx\n", exp, val.first,
+ // val.second, edge.first, edge.second);
+ if (val > edge) {
+ dddddd++;
+ } else if (val == edge) {
+ dddddd += (dddddd & 1);
+ }
+ } else {
+ // Here, we are not close to the edge.
+ dddddd = static_cast<int>((d64k + 32768) / 65536);
+ }
+ if (dddddd == 1000000) {
+ dddddd = 100000;
+ exp += 1;
+ }
+ exp_dig.exponent = exp;
+
+ int two_digits = dddddd / 10000;
+ dddddd -= two_digits * 10000;
+ numbers_internal::PutTwoDigits(two_digits, &exp_dig.digits[0]);
+
+ two_digits = dddddd / 100;
+ dddddd -= two_digits * 100;
+ numbers_internal::PutTwoDigits(two_digits, &exp_dig.digits[2]);
+
+ numbers_internal::PutTwoDigits(dddddd, &exp_dig.digits[4]);
+ return exp_dig;
+}
+
+// Helper function for fast formatting of floating-point.
+// The result is the same as "%g", a.k.a. "%.6g".
+size_t numbers_internal::SixDigitsToBuffer(double d, char* const buffer) {
+ static_assert(std::numeric_limits<float>::is_iec559,
+ "IEEE-754/IEC-559 support only");
+
+ char* out = buffer; // we write data to out, incrementing as we go, but
+ // FloatToBuffer always returns the address of the buffer
+ // passed in.
+
+ if (std::isnan(d)) {
+ strcpy(out, "nan"); // NOLINT(runtime/printf)
+ return 3;
+ }
+ if (d == 0) { // +0 and -0 are handled here
+ if (std::signbit(d)) *out++ = '-';
+ *out++ = '0';
+ *out = 0;
+ return out - buffer;
+ }
+ if (d < 0) {
+ *out++ = '-';
+ d = -d;
+ }
+ if (d > std::numeric_limits<double>::max()) {
+ strcpy(out, "inf"); // NOLINT(runtime/printf)
+ return out + 3 - buffer;
+ }
+
+ auto exp_dig = SplitToSix(d);
+ int exp = exp_dig.exponent;
+ const char* digits = exp_dig.digits;
+ out[0] = '0';
+ out[1] = '.';
+ switch (exp) {
+ case 5:
+ memcpy(out, &digits[0], 6), out += 6;
+ *out = 0;
+ return out - buffer;
+ case 4:
+ memcpy(out, &digits[0], 5), out += 5;
+ if (digits[5] != '0') {
+ *out++ = '.';
+ *out++ = digits[5];
+ }
+ *out = 0;
+ return out - buffer;
+ case 3:
+ memcpy(out, &digits[0], 4), out += 4;
+ if ((digits[5] | digits[4]) != '0') {
+ *out++ = '.';
+ *out++ = digits[4];
+ if (digits[5] != '0') *out++ = digits[5];
+ }
+ *out = 0;
+ return out - buffer;
+ case 2:
+ memcpy(out, &digits[0], 3), out += 3;
+ *out++ = '.';
+ memcpy(out, &digits[3], 3);
+ out += 3;
+ while (out[-1] == '0') --out;
+ if (out[-1] == '.') --out;
+ *out = 0;
+ return out - buffer;
+ case 1:
+ memcpy(out, &digits[0], 2), out += 2;
+ *out++ = '.';
+ memcpy(out, &digits[2], 4);
+ out += 4;
+ while (out[-1] == '0') --out;
+ if (out[-1] == '.') --out;
+ *out = 0;
+ return out - buffer;
+ case 0:
+ memcpy(out, &digits[0], 1), out += 1;
+ *out++ = '.';
+ memcpy(out, &digits[1], 5);
+ out += 5;
+ while (out[-1] == '0') --out;
+ if (out[-1] == '.') --out;
+ *out = 0;
+ return out - buffer;
+ case -4:
+ out[2] = '0';
+ ++out;
+ ABSL_FALLTHROUGH_INTENDED;
+ case -3:
+ out[2] = '0';
+ ++out;
+ ABSL_FALLTHROUGH_INTENDED;
+ case -2:
+ out[2] = '0';
+ ++out;
+ ABSL_FALLTHROUGH_INTENDED;
+ case -1:
+ out += 2;
+ memcpy(out, &digits[0], 6);
+ out += 6;
+ while (out[-1] == '0') --out;
+ *out = 0;
+ return out - buffer;
+ }
+ assert(exp < -4 || exp >= 6);
+ out[0] = digits[0];
+ assert(out[1] == '.');
+ out += 2;
+ memcpy(out, &digits[1], 5), out += 5;
+ while (out[-1] == '0') --out;
+ if (out[-1] == '.') --out;
+ *out++ = 'e';
+ if (exp > 0) {
+ *out++ = '+';
+ } else {
+ *out++ = '-';
+ exp = -exp;
+ }
+ if (exp > 99) {
+ int dig1 = exp / 100;
+ exp -= dig1 * 100;
+ *out++ = '0' + dig1;
+ }
+ PutTwoDigits(exp, out);
+ out += 2;
+ *out = 0;
+ return out - buffer;
+}
+
+namespace {
+// Represents integer values of digits.
+// Uses 36 to indicate an invalid character since we support
+// bases up to 36.
+static const int8_t kAsciiToInt[256] = {
+ 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, // 16 36s.
+ 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36,
+ 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 0, 1, 2, 3, 4, 5,
+ 6, 7, 8, 9, 36, 36, 36, 36, 36, 36, 36, 10, 11, 12, 13, 14, 15, 16, 17,
+ 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36,
+ 36, 36, 36, 36, 36, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
+ 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 36, 36, 36, 36, 36, 36,
+ 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36,
+ 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36,
+ 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36,
+ 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36,
+ 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36,
+ 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36,
+ 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36};
+
+// Parse the sign and optional hex or oct prefix in text.
+inline bool safe_parse_sign_and_base(y_absl::string_view* text /*inout*/,
+ int* base_ptr /*inout*/,
+ bool* negative_ptr /*output*/) {
+ if (text->data() == nullptr) {
+ return false;
+ }
+
+ const char* start = text->data();
+ const char* end = start + text->size();
+ int base = *base_ptr;
+
+ // Consume whitespace.
+ while (start < end && y_absl::ascii_isspace(start[0])) {
+ ++start;
+ }
+ while (start < end && y_absl::ascii_isspace(end[-1])) {
+ --end;
+ }
+ if (start >= end) {
+ return false;
+ }
+
+ // Consume sign.
+ *negative_ptr = (start[0] == '-');
+ if (*negative_ptr || start[0] == '+') {
+ ++start;
+ if (start >= end) {
+ return false;
+ }
+ }
+
+ // Consume base-dependent prefix.
+ // base 0: "0x" -> base 16, "0" -> base 8, default -> base 10
+ // base 16: "0x" -> base 16
+ // Also validate the base.
+ if (base == 0) {
+ if (end - start >= 2 && start[0] == '0' &&
+ (start[1] == 'x' || start[1] == 'X')) {
+ base = 16;
+ start += 2;
+ if (start >= end) {
+ // "0x" with no digits after is invalid.
+ return false;
+ }
+ } else if (end - start >= 1 && start[0] == '0') {
+ base = 8;
+ start += 1;
+ } else {
+ base = 10;
+ }
+ } else if (base == 16) {
+ if (end - start >= 2 && start[0] == '0' &&
+ (start[1] == 'x' || start[1] == 'X')) {
+ start += 2;
+ if (start >= end) {
+ // "0x" with no digits after is invalid.
+ return false;
+ }
+ }
+ } else if (base >= 2 && base <= 36) {
+ // okay
+ } else {
+ return false;
+ }
+ *text = y_absl::string_view(start, end - start);
+ *base_ptr = base;
+ return true;
+}
+
+// Consume digits.
+//
+// The classic loop:
+//
+// for each digit
+// value = value * base + digit
+// value *= sign
+//
+// The classic loop needs overflow checking. It also fails on the most
+// negative integer, -2147483648 in 32-bit two's complement representation.
+//
+// My improved loop:
+//
+// if (!negative)
+// for each digit
+// value = value * base
+// value = value + digit
+// else
+// for each digit
+// value = value * base
+// value = value - digit
+//
+// Overflow checking becomes simple.
+
+// Lookup tables per IntType:
+// vmax/base and vmin/base are precomputed because division costs at least 8ns.
+// TODO(junyer): Doing this per base instead (i.e. an array of structs, not a
+// struct of arrays) would probably be better in terms of d-cache for the most
+// commonly used bases.
+template <typename IntType>
+struct LookupTables {
+ ABSL_CONST_INIT static const IntType kVmaxOverBase[];
+ ABSL_CONST_INIT static const IntType kVminOverBase[];
+};
+
+// An array initializer macro for X/base where base in [0, 36].
+// However, note that lookups for base in [0, 1] should never happen because
+// base has been validated to be in [2, 36] by safe_parse_sign_and_base().
+#define X_OVER_BASE_INITIALIZER(X) \
+ { \
+ 0, 0, X / 2, X / 3, X / 4, X / 5, X / 6, X / 7, X / 8, X / 9, X / 10, \
+ X / 11, X / 12, X / 13, X / 14, X / 15, X / 16, X / 17, X / 18, \
+ X / 19, X / 20, X / 21, X / 22, X / 23, X / 24, X / 25, X / 26, \
+ X / 27, X / 28, X / 29, X / 30, X / 31, X / 32, X / 33, X / 34, \
+ X / 35, X / 36, \
+ }
+
+// This kVmaxOverBase is generated with
+// for (int base = 2; base < 37; ++base) {
+// y_absl::uint128 max = std::numeric_limits<y_absl::uint128>::max();
+// auto result = max / base;
+// std::cout << " MakeUint128(" << y_absl::Uint128High64(result) << "u, "
+// << y_absl::Uint128Low64(result) << "u),\n";
+// }
+// See https://godbolt.org/z/aneYsb
+//
+// uint128& operator/=(uint128) is not constexpr, so hardcode the resulting
+// array to avoid a static initializer.
+template<>
+const uint128 LookupTables<uint128>::kVmaxOverBase[] = {
+ 0,
+ 0,
+ MakeUint128(9223372036854775807u, 18446744073709551615u),
+ MakeUint128(6148914691236517205u, 6148914691236517205u),
+ MakeUint128(4611686018427387903u, 18446744073709551615u),
+ MakeUint128(3689348814741910323u, 3689348814741910323u),
+ MakeUint128(3074457345618258602u, 12297829382473034410u),
+ MakeUint128(2635249153387078802u, 5270498306774157604u),
+ MakeUint128(2305843009213693951u, 18446744073709551615u),
+ MakeUint128(2049638230412172401u, 14347467612885206812u),
+ MakeUint128(1844674407370955161u, 11068046444225730969u),
+ MakeUint128(1676976733973595601u, 8384883669867978007u),
+ MakeUint128(1537228672809129301u, 6148914691236517205u),
+ MakeUint128(1418980313362273201u, 4256940940086819603u),
+ MakeUint128(1317624576693539401u, 2635249153387078802u),
+ MakeUint128(1229782938247303441u, 1229782938247303441u),
+ MakeUint128(1152921504606846975u, 18446744073709551615u),
+ MakeUint128(1085102592571150095u, 1085102592571150095u),
+ MakeUint128(1024819115206086200u, 16397105843297379214u),
+ MakeUint128(970881267037344821u, 16504981539634861972u),
+ MakeUint128(922337203685477580u, 14757395258967641292u),
+ MakeUint128(878416384462359600u, 14054662151397753612u),
+ MakeUint128(838488366986797800u, 13415813871788764811u),
+ MakeUint128(802032351030850070u, 4812194106185100421u),
+ MakeUint128(768614336404564650u, 12297829382473034410u),
+ MakeUint128(737869762948382064u, 11805916207174113034u),
+ MakeUint128(709490156681136600u, 11351842506898185609u),
+ MakeUint128(683212743470724133u, 17080318586768103348u),
+ MakeUint128(658812288346769700u, 10540996613548315209u),
+ MakeUint128(636094623231363848u, 15266270957552732371u),
+ MakeUint128(614891469123651720u, 9838263505978427528u),
+ MakeUint128(595056260442243600u, 9520900167075897608u),
+ MakeUint128(576460752303423487u, 18446744073709551615u),
+ MakeUint128(558992244657865200u, 8943875914525843207u),
+ MakeUint128(542551296285575047u, 9765923333140350855u),
+ MakeUint128(527049830677415760u, 8432797290838652167u),
+ MakeUint128(512409557603043100u, 8198552921648689607u),
+};
+
+// This kVmaxOverBase generated with
+// for (int base = 2; base < 37; ++base) {
+// y_absl::int128 max = std::numeric_limits<y_absl::int128>::max();
+// auto result = max / base;
+// std::cout << "\tMakeInt128(" << y_absl::Int128High64(result) << ", "
+// << y_absl::Int128Low64(result) << "u),\n";
+// }
+// See https://godbolt.org/z/7djYWz
+//
+// int128& operator/=(int128) is not constexpr, so hardcode the resulting array
+// to avoid a static initializer.
+template<>
+const int128 LookupTables<int128>::kVmaxOverBase[] = {
+ 0,
+ 0,
+ MakeInt128(4611686018427387903, 18446744073709551615u),
+ MakeInt128(3074457345618258602, 12297829382473034410u),
+ MakeInt128(2305843009213693951, 18446744073709551615u),
+ MakeInt128(1844674407370955161, 11068046444225730969u),
+ MakeInt128(1537228672809129301, 6148914691236517205u),
+ MakeInt128(1317624576693539401, 2635249153387078802u),
+ MakeInt128(1152921504606846975, 18446744073709551615u),
+ MakeInt128(1024819115206086200, 16397105843297379214u),
+ MakeInt128(922337203685477580, 14757395258967641292u),
+ MakeInt128(838488366986797800, 13415813871788764811u),
+ MakeInt128(768614336404564650, 12297829382473034410u),
+ MakeInt128(709490156681136600, 11351842506898185609u),
+ MakeInt128(658812288346769700, 10540996613548315209u),
+ MakeInt128(614891469123651720, 9838263505978427528u),
+ MakeInt128(576460752303423487, 18446744073709551615u),
+ MakeInt128(542551296285575047, 9765923333140350855u),
+ MakeInt128(512409557603043100, 8198552921648689607u),
+ MakeInt128(485440633518672410, 17475862806672206794u),
+ MakeInt128(461168601842738790, 7378697629483820646u),
+ MakeInt128(439208192231179800, 7027331075698876806u),
+ MakeInt128(419244183493398900, 6707906935894382405u),
+ MakeInt128(401016175515425035, 2406097053092550210u),
+ MakeInt128(384307168202282325, 6148914691236517205u),
+ MakeInt128(368934881474191032, 5902958103587056517u),
+ MakeInt128(354745078340568300, 5675921253449092804u),
+ MakeInt128(341606371735362066, 17763531330238827482u),
+ MakeInt128(329406144173384850, 5270498306774157604u),
+ MakeInt128(318047311615681924, 7633135478776366185u),
+ MakeInt128(307445734561825860, 4919131752989213764u),
+ MakeInt128(297528130221121800, 4760450083537948804u),
+ MakeInt128(288230376151711743, 18446744073709551615u),
+ MakeInt128(279496122328932600, 4471937957262921603u),
+ MakeInt128(271275648142787523, 14106333703424951235u),
+ MakeInt128(263524915338707880, 4216398645419326083u),
+ MakeInt128(256204778801521550, 4099276460824344803u),
+};
+
+// This kVminOverBase generated with
+// for (int base = 2; base < 37; ++base) {
+// y_absl::int128 min = std::numeric_limits<y_absl::int128>::min();
+// auto result = min / base;
+// std::cout << "\tMakeInt128(" << y_absl::Int128High64(result) << ", "
+// << y_absl::Int128Low64(result) << "u),\n";
+// }
+//
+// See https://godbolt.org/z/7djYWz
+//
+// int128& operator/=(int128) is not constexpr, so hardcode the resulting array
+// to avoid a static initializer.
+template<>
+const int128 LookupTables<int128>::kVminOverBase[] = {
+ 0,
+ 0,
+ MakeInt128(-4611686018427387904, 0u),
+ MakeInt128(-3074457345618258603, 6148914691236517206u),
+ MakeInt128(-2305843009213693952, 0u),
+ MakeInt128(-1844674407370955162, 7378697629483820647u),
+ MakeInt128(-1537228672809129302, 12297829382473034411u),
+ MakeInt128(-1317624576693539402, 15811494920322472814u),
+ MakeInt128(-1152921504606846976, 0u),
+ MakeInt128(-1024819115206086201, 2049638230412172402u),
+ MakeInt128(-922337203685477581, 3689348814741910324u),
+ MakeInt128(-838488366986797801, 5030930201920786805u),
+ MakeInt128(-768614336404564651, 6148914691236517206u),
+ MakeInt128(-709490156681136601, 7094901566811366007u),
+ MakeInt128(-658812288346769701, 7905747460161236407u),
+ MakeInt128(-614891469123651721, 8608480567731124088u),
+ MakeInt128(-576460752303423488, 0u),
+ MakeInt128(-542551296285575048, 8680820740569200761u),
+ MakeInt128(-512409557603043101, 10248191152060862009u),
+ MakeInt128(-485440633518672411, 970881267037344822u),
+ MakeInt128(-461168601842738791, 11068046444225730970u),
+ MakeInt128(-439208192231179801, 11419412998010674810u),
+ MakeInt128(-419244183493398901, 11738837137815169211u),
+ MakeInt128(-401016175515425036, 16040647020617001406u),
+ MakeInt128(-384307168202282326, 12297829382473034411u),
+ MakeInt128(-368934881474191033, 12543785970122495099u),
+ MakeInt128(-354745078340568301, 12770822820260458812u),
+ MakeInt128(-341606371735362067, 683212743470724134u),
+ MakeInt128(-329406144173384851, 13176245766935394012u),
+ MakeInt128(-318047311615681925, 10813608594933185431u),
+ MakeInt128(-307445734561825861, 13527612320720337852u),
+ MakeInt128(-297528130221121801, 13686293990171602812u),
+ MakeInt128(-288230376151711744, 0u),
+ MakeInt128(-279496122328932601, 13974806116446630013u),
+ MakeInt128(-271275648142787524, 4340410370284600381u),
+ MakeInt128(-263524915338707881, 14230345428290225533u),
+ MakeInt128(-256204778801521551, 14347467612885206813u),
+};
+
+template <typename IntType>
+const IntType LookupTables<IntType>::kVmaxOverBase[] =
+ X_OVER_BASE_INITIALIZER(std::numeric_limits<IntType>::max());
+
+template <typename IntType>
+const IntType LookupTables<IntType>::kVminOverBase[] =
+ X_OVER_BASE_INITIALIZER(std::numeric_limits<IntType>::min());
+
+#undef X_OVER_BASE_INITIALIZER
+
+template <typename IntType>
+inline bool safe_parse_positive_int(y_absl::string_view text, int base,
+ IntType* value_p) {
+ IntType value = 0;
+ const IntType vmax = std::numeric_limits<IntType>::max();
+ assert(vmax > 0);
+ assert(base >= 0);
+ assert(vmax >= static_cast<IntType>(base));
+ const IntType vmax_over_base = LookupTables<IntType>::kVmaxOverBase[base];
+ assert(base < 2 ||
+ std::numeric_limits<IntType>::max() / base == vmax_over_base);
+ const char* start = text.data();
+ const char* end = start + text.size();
+ // loop over digits
+ for (; start < end; ++start) {
+ unsigned char c = static_cast<unsigned char>(start[0]);
+ int digit = kAsciiToInt[c];
+ if (digit >= base) {
+ *value_p = value;
+ return false;
+ }
+ if (value > vmax_over_base) {
+ *value_p = vmax;
+ return false;
+ }
+ value *= base;
+ if (value > vmax - digit) {
+ *value_p = vmax;
+ return false;
+ }
+ value += digit;
+ }
+ *value_p = value;
+ return true;
+}
+
+template <typename IntType>
+inline bool safe_parse_negative_int(y_absl::string_view text, int base,
+ IntType* value_p) {
+ IntType value = 0;
+ const IntType vmin = std::numeric_limits<IntType>::min();
+ assert(vmin < 0);
+ assert(vmin <= 0 - base);
+ IntType vmin_over_base = LookupTables<IntType>::kVminOverBase[base];
+ assert(base < 2 ||
+ std::numeric_limits<IntType>::min() / base == vmin_over_base);
+ // 2003 c++ standard [expr.mul]
+ // "... the sign of the remainder is implementation-defined."
+ // Although (vmin/base)*base + vmin%base is always vmin.
+ // 2011 c++ standard tightens the spec but we cannot rely on it.
+ // TODO(junyer): Handle this in the lookup table generation.
+ if (vmin % base > 0) {
+ vmin_over_base += 1;
+ }
+ const char* start = text.data();
+ const char* end = start + text.size();
+ // loop over digits
+ for (; start < end; ++start) {
+ unsigned char c = static_cast<unsigned char>(start[0]);
+ int digit = kAsciiToInt[c];
+ if (digit >= base) {
+ *value_p = value;
+ return false;
+ }
+ if (value < vmin_over_base) {
+ *value_p = vmin;
+ return false;
+ }
+ value *= base;
+ if (value < vmin + digit) {
+ *value_p = vmin;
+ return false;
+ }
+ value -= digit;
+ }
+ *value_p = value;
+ return true;
+}
+
+// Input format based on POSIX.1-2008 strtol
+// http://pubs.opengroup.org/onlinepubs/9699919799/functions/strtol.html
+template <typename IntType>
+inline bool safe_int_internal(y_absl::string_view text, IntType* value_p,
+ int base) {
+ *value_p = 0;
+ bool negative;
+ if (!safe_parse_sign_and_base(&text, &base, &negative)) {
+ return false;
+ }
+ if (!negative) {
+ return safe_parse_positive_int(text, base, value_p);
+ } else {
+ return safe_parse_negative_int(text, base, value_p);
+ }
+}
+
+template <typename IntType>
+inline bool safe_uint_internal(y_absl::string_view text, IntType* value_p,
+ int base) {
+ *value_p = 0;
+ bool negative;
+ if (!safe_parse_sign_and_base(&text, &base, &negative) || negative) {
+ return false;
+ }
+ return safe_parse_positive_int(text, base, value_p);
+}
+} // anonymous namespace
+
+namespace numbers_internal {
+
+// Digit conversion.
+ABSL_CONST_INIT ABSL_DLL const char kHexChar[] =
+ "0123456789abcdef";
+
+ABSL_CONST_INIT ABSL_DLL const char kHexTable[513] =
+ "000102030405060708090a0b0c0d0e0f"
+ "101112131415161718191a1b1c1d1e1f"
+ "202122232425262728292a2b2c2d2e2f"
+ "303132333435363738393a3b3c3d3e3f"
+ "404142434445464748494a4b4c4d4e4f"
+ "505152535455565758595a5b5c5d5e5f"
+ "606162636465666768696a6b6c6d6e6f"
+ "707172737475767778797a7b7c7d7e7f"
+ "808182838485868788898a8b8c8d8e8f"
+ "909192939495969798999a9b9c9d9e9f"
+ "a0a1a2a3a4a5a6a7a8a9aaabacadaeaf"
+ "b0b1b2b3b4b5b6b7b8b9babbbcbdbebf"
+ "c0c1c2c3c4c5c6c7c8c9cacbcccdcecf"
+ "d0d1d2d3d4d5d6d7d8d9dadbdcdddedf"
+ "e0e1e2e3e4e5e6e7e8e9eaebecedeeef"
+ "f0f1f2f3f4f5f6f7f8f9fafbfcfdfeff";
+
+ABSL_CONST_INIT ABSL_DLL const char two_ASCII_digits[100][2] = {
+ {'0', '0'}, {'0', '1'}, {'0', '2'}, {'0', '3'}, {'0', '4'}, {'0', '5'},
+ {'0', '6'}, {'0', '7'}, {'0', '8'}, {'0', '9'}, {'1', '0'}, {'1', '1'},
+ {'1', '2'}, {'1', '3'}, {'1', '4'}, {'1', '5'}, {'1', '6'}, {'1', '7'},
+ {'1', '8'}, {'1', '9'}, {'2', '0'}, {'2', '1'}, {'2', '2'}, {'2', '3'},
+ {'2', '4'}, {'2', '5'}, {'2', '6'}, {'2', '7'}, {'2', '8'}, {'2', '9'},
+ {'3', '0'}, {'3', '1'}, {'3', '2'}, {'3', '3'}, {'3', '4'}, {'3', '5'},
+ {'3', '6'}, {'3', '7'}, {'3', '8'}, {'3', '9'}, {'4', '0'}, {'4', '1'},
+ {'4', '2'}, {'4', '3'}, {'4', '4'}, {'4', '5'}, {'4', '6'}, {'4', '7'},
+ {'4', '8'}, {'4', '9'}, {'5', '0'}, {'5', '1'}, {'5', '2'}, {'5', '3'},
+ {'5', '4'}, {'5', '5'}, {'5', '6'}, {'5', '7'}, {'5', '8'}, {'5', '9'},
+ {'6', '0'}, {'6', '1'}, {'6', '2'}, {'6', '3'}, {'6', '4'}, {'6', '5'},
+ {'6', '6'}, {'6', '7'}, {'6', '8'}, {'6', '9'}, {'7', '0'}, {'7', '1'},
+ {'7', '2'}, {'7', '3'}, {'7', '4'}, {'7', '5'}, {'7', '6'}, {'7', '7'},
+ {'7', '8'}, {'7', '9'}, {'8', '0'}, {'8', '1'}, {'8', '2'}, {'8', '3'},
+ {'8', '4'}, {'8', '5'}, {'8', '6'}, {'8', '7'}, {'8', '8'}, {'8', '9'},
+ {'9', '0'}, {'9', '1'}, {'9', '2'}, {'9', '3'}, {'9', '4'}, {'9', '5'},
+ {'9', '6'}, {'9', '7'}, {'9', '8'}, {'9', '9'}};
+
+bool safe_strto32_base(y_absl::string_view text, int32_t* value, int base) {
+ return safe_int_internal<int32_t>(text, value, base);
+}
+
+bool safe_strto64_base(y_absl::string_view text, int64_t* value, int base) {
+ return safe_int_internal<int64_t>(text, value, base);
+}
+
+bool safe_strto128_base(y_absl::string_view text, int128* value, int base) {
+ return safe_int_internal<y_absl::int128>(text, value, base);
+}
+
+bool safe_strtou32_base(y_absl::string_view text, uint32_t* value, int base) {
+ return safe_uint_internal<uint32_t>(text, value, base);
+}
+
+bool safe_strtou64_base(y_absl::string_view text, uint64_t* value, int base) {
+ return safe_uint_internal<uint64_t>(text, value, base);
+}
+
+bool safe_strtou128_base(y_absl::string_view text, uint128* value, int base) {
+ return safe_uint_internal<y_absl::uint128>(text, value, base);
+}
+
+} // namespace numbers_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/numbers.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/numbers.h
new file mode 100644
index 00000000000..ce181d8eb10
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/numbers.h
@@ -0,0 +1,308 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: numbers.h
+// -----------------------------------------------------------------------------
+//
+// This package contains functions for converting strings to numbers. For
+// converting numbers to strings, use `StrCat()` or `StrAppend()` in str_cat.h,
+// which automatically detect and convert most number values appropriately.
+
+#ifndef ABSL_STRINGS_NUMBERS_H_
+#define ABSL_STRINGS_NUMBERS_H_
+
+#if defined(__SSE4_2__) && !defined(__CUDACC__)
+#define _Y__SSE4_2__
+#endif
+
+#ifdef _Y__SSE4_2__
+#ifdef _MSC_VER
+#include <intrin.h>
+#else
+#include <x86intrin.h>
+#endif
+#endif
+
+#include <cstddef>
+#include <cstdlib>
+#include <cstring>
+#include <ctime>
+#include <limits>
+#include <util/generic/string.h>
+#include <type_traits>
+
+#include "y_absl/base/config.h"
+#ifdef _Y__SSE4_2__
+// TODO(jorg): Remove this when we figure out the right way
+// to swap bytes on SSE 4.2 that works with the compilers
+// we claim to support. Also, add tests for the compiler
+// that doesn't support the Intel _bswap64 intrinsic but
+// does support all the SSE 4.2 intrinsics
+#include "y_absl/base/internal/endian.h"
+#endif
+#include "y_absl/base/macros.h"
+#include "y_absl/base/port.h"
+#include "y_absl/numeric/bits.h"
+#include "y_absl/numeric/int128.h"
+#include "y_absl/strings/string_view.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+// SimpleAtoi()
+//
+// Converts the given string (optionally followed or preceded by ASCII
+// whitespace) into an integer value, returning `true` if successful. The string
+// must reflect a base-10 integer whose value falls within the range of the
+// integer type (optionally preceded by a `+` or `-`). If any errors are
+// encountered, this function returns `false`, leaving `out` in an unspecified
+// state.
+template <typename int_type>
+ABSL_MUST_USE_RESULT bool SimpleAtoi(y_absl::string_view str, int_type* out);
+
+// SimpleAtof()
+//
+// Converts the given string (optionally followed or preceded by ASCII
+// whitespace) into a float, which may be rounded on overflow or underflow,
+// returning `true` if successful.
+// See https://en.cppreference.com/w/c/string/byte/strtof for details about the
+// allowed formats for `str`, except SimpleAtof() is locale-independent and will
+// always use the "C" locale. If any errors are encountered, this function
+// returns `false`, leaving `out` in an unspecified state.
+ABSL_MUST_USE_RESULT bool SimpleAtof(y_absl::string_view str, float* out);
+
+// SimpleAtod()
+//
+// Converts the given string (optionally followed or preceded by ASCII
+// whitespace) into a double, which may be rounded on overflow or underflow,
+// returning `true` if successful.
+// See https://en.cppreference.com/w/c/string/byte/strtof for details about the
+// allowed formats for `str`, except SimpleAtod is locale-independent and will
+// always use the "C" locale. If any errors are encountered, this function
+// returns `false`, leaving `out` in an unspecified state.
+ABSL_MUST_USE_RESULT bool SimpleAtod(y_absl::string_view str, double* out);
+
+// SimpleAtob()
+//
+// Converts the given string into a boolean, returning `true` if successful.
+// The following case-insensitive strings are interpreted as boolean `true`:
+// "true", "t", "yes", "y", "1". The following case-insensitive strings
+// are interpreted as boolean `false`: "false", "f", "no", "n", "0". If any
+// errors are encountered, this function returns `false`, leaving `out` in an
+// unspecified state.
+ABSL_MUST_USE_RESULT bool SimpleAtob(y_absl::string_view str, bool* out);
+
+// SimpleHexAtoi()
+//
+// Converts a hexadecimal string (optionally followed or preceded by ASCII
+// whitespace) to an integer, returning `true` if successful. Only valid base-16
+// hexadecimal integers whose value falls within the range of the integer type
+// (optionally preceded by a `+` or `-`) can be converted. A valid hexadecimal
+// value may include both upper and lowercase character symbols, and may
+// optionally include a leading "0x" (or "0X") number prefix, which is ignored
+// by this function. If any errors are encountered, this function returns
+// `false`, leaving `out` in an unspecified state.
+template <typename int_type>
+ABSL_MUST_USE_RESULT bool SimpleHexAtoi(y_absl::string_view str, int_type* out);
+
+// Overloads of SimpleHexAtoi() for 128 bit integers.
+ABSL_MUST_USE_RESULT inline bool SimpleHexAtoi(y_absl::string_view str,
+ y_absl::int128* out);
+ABSL_MUST_USE_RESULT inline bool SimpleHexAtoi(y_absl::string_view str,
+ y_absl::uint128* out);
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+// End of public API. Implementation details follow.
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace numbers_internal {
+
+// Digit conversion.
+ABSL_DLL extern const char kHexChar[17]; // 0123456789abcdef
+ABSL_DLL extern const char
+ kHexTable[513]; // 000102030405060708090a0b0c0d0e0f1011...
+ABSL_DLL extern const char
+ two_ASCII_digits[100][2]; // 00, 01, 02, 03...
+
+// Writes a two-character representation of 'i' to 'buf'. 'i' must be in the
+// range 0 <= i < 100, and buf must have space for two characters. Example:
+// char buf[2];
+// PutTwoDigits(42, buf);
+// // buf[0] == '4'
+// // buf[1] == '2'
+inline void PutTwoDigits(size_t i, char* buf) {
+ assert(i < 100);
+ memcpy(buf, two_ASCII_digits[i], 2);
+}
+
+// safe_strto?() functions for implementing SimpleAtoi()
+
+bool safe_strto32_base(y_absl::string_view text, int32_t* value, int base);
+bool safe_strto64_base(y_absl::string_view text, int64_t* value, int base);
+bool safe_strto128_base(y_absl::string_view text, y_absl::int128* value,
+ int base);
+bool safe_strtou32_base(y_absl::string_view text, uint32_t* value, int base);
+bool safe_strtou64_base(y_absl::string_view text, uint64_t* value, int base);
+bool safe_strtou128_base(y_absl::string_view text, y_absl::uint128* value,
+ int base);
+
+static const int kFastToBufferSize = 32;
+static const int kSixDigitsToBufferSize = 16;
+
+// Helper function for fast formatting of floating-point values.
+// The result is the same as printf's "%g", a.k.a. "%.6g"; that is, six
+// significant digits are returned, trailing zeros are removed, and numbers
+// outside the range 0.0001-999999 are output using scientific notation
+// (1.23456e+06). This routine is heavily optimized.
+// Required buffer size is `kSixDigitsToBufferSize`.
+size_t SixDigitsToBuffer(double d, char* buffer);
+
+// These functions are intended for speed. All functions take an output buffer
+// as an argument and return a pointer to the last byte they wrote, which is the
+// terminating '\0'. At most `kFastToBufferSize` bytes are written.
+char* FastIntToBuffer(int32_t, char*);
+char* FastIntToBuffer(uint32_t, char*);
+char* FastIntToBuffer(int64_t, char*);
+char* FastIntToBuffer(uint64_t, char*);
+
+// For enums and integer types that are not an exact match for the types above,
+// use templates to call the appropriate one of the four overloads above.
+template <typename int_type>
+char* FastIntToBuffer(int_type i, char* buffer) {
+ static_assert(sizeof(i) <= 64 / 8,
+ "FastIntToBuffer works only with 64-bit-or-less integers.");
+ // TODO(jorg): This signed-ness check is used because it works correctly
+ // with enums, and it also serves to check that int_type is not a pointer.
+ // If one day something like std::is_signed<enum E> works, switch to it.
+ if (static_cast<int_type>(1) - 2 < 0) { // Signed
+ if (sizeof(i) > 32 / 8) { // 33-bit to 64-bit
+ return FastIntToBuffer(static_cast<int64_t>(i), buffer);
+ } else { // 32-bit or less
+ return FastIntToBuffer(static_cast<int32_t>(i), buffer);
+ }
+ } else { // Unsigned
+ if (sizeof(i) > 32 / 8) { // 33-bit to 64-bit
+ return FastIntToBuffer(static_cast<uint64_t>(i), buffer);
+ } else { // 32-bit or less
+ return FastIntToBuffer(static_cast<uint32_t>(i), buffer);
+ }
+ }
+}
+
+// Implementation of SimpleAtoi, generalized to support arbitrary base (used
+// with base different from 10 elsewhere in Abseil implementation).
+template <typename int_type>
+ABSL_MUST_USE_RESULT bool safe_strtoi_base(y_absl::string_view s, int_type* out,
+ int base) {
+ static_assert(sizeof(*out) == 4 || sizeof(*out) == 8,
+ "SimpleAtoi works only with 32-bit or 64-bit integers.");
+ static_assert(!std::is_floating_point<int_type>::value,
+ "Use SimpleAtof or SimpleAtod instead.");
+ bool parsed;
+ // TODO(jorg): This signed-ness check is used because it works correctly
+ // with enums, and it also serves to check that int_type is not a pointer.
+ // If one day something like std::is_signed<enum E> works, switch to it.
+ if (static_cast<int_type>(1) - 2 < 0) { // Signed
+ if (sizeof(*out) == 64 / 8) { // 64-bit
+ int64_t val;
+ parsed = numbers_internal::safe_strto64_base(s, &val, base);
+ *out = static_cast<int_type>(val);
+ } else { // 32-bit
+ int32_t val;
+ parsed = numbers_internal::safe_strto32_base(s, &val, base);
+ *out = static_cast<int_type>(val);
+ }
+ } else { // Unsigned
+ if (sizeof(*out) == 64 / 8) { // 64-bit
+ uint64_t val;
+ parsed = numbers_internal::safe_strtou64_base(s, &val, base);
+ *out = static_cast<int_type>(val);
+ } else { // 32-bit
+ uint32_t val;
+ parsed = numbers_internal::safe_strtou32_base(s, &val, base);
+ *out = static_cast<int_type>(val);
+ }
+ }
+ return parsed;
+}
+
+// FastHexToBufferZeroPad16()
+//
+// Outputs `val` into `out` as if by `snprintf(out, 17, "%016x", val)` but
+// without the terminating null character. Thus `out` must be of length >= 16.
+// Returns the number of non-pad digits of the output (it can never be zero
+// since 0 has one digit).
+inline size_t FastHexToBufferZeroPad16(uint64_t val, char* out) {
+#ifdef _Y__SSE4_2__
+ uint64_t be = y_absl::big_endian::FromHost64(val);
+ const auto kNibbleMask = _mm_set1_epi8(0xf);
+ const auto kHexDigits = _mm_setr_epi8('0', '1', '2', '3', '4', '5', '6', '7',
+ '8', '9', 'a', 'b', 'c', 'd', 'e', 'f');
+ auto v = _mm_loadl_epi64(reinterpret_cast<__m128i*>(&be)); // load lo dword
+ auto v4 = _mm_srli_epi64(v, 4); // shift 4 right
+ auto il = _mm_unpacklo_epi8(v4, v); // interleave bytes
+ auto m = _mm_and_si128(il, kNibbleMask); // mask out nibbles
+ auto hexchars = _mm_shuffle_epi8(kHexDigits, m); // hex chars
+ _mm_storeu_si128(reinterpret_cast<__m128i*>(out), hexchars);
+#else
+ for (int i = 0; i < 8; ++i) {
+ auto byte = (val >> (56 - 8 * i)) & 0xFF;
+ auto* hex = &y_absl::numbers_internal::kHexTable[byte * 2];
+ std::memcpy(out + 2 * i, hex, 2);
+ }
+#endif
+ // | 0x1 so that even 0 has 1 digit.
+ return 16 - countl_zero(val | 0x1) / 4;
+}
+
+} // namespace numbers_internal
+
+template <typename int_type>
+ABSL_MUST_USE_RESULT bool SimpleAtoi(y_absl::string_view str, int_type* out) {
+ return numbers_internal::safe_strtoi_base(str, out, 10);
+}
+
+ABSL_MUST_USE_RESULT inline bool SimpleAtoi(y_absl::string_view str,
+ y_absl::int128* out) {
+ return numbers_internal::safe_strto128_base(str, out, 10);
+}
+
+ABSL_MUST_USE_RESULT inline bool SimpleAtoi(y_absl::string_view str,
+ y_absl::uint128* out) {
+ return numbers_internal::safe_strtou128_base(str, out, 10);
+}
+
+template <typename int_type>
+ABSL_MUST_USE_RESULT bool SimpleHexAtoi(y_absl::string_view str, int_type* out) {
+ return numbers_internal::safe_strtoi_base(str, out, 16);
+}
+
+ABSL_MUST_USE_RESULT inline bool SimpleHexAtoi(y_absl::string_view str,
+ y_absl::int128* out) {
+ return numbers_internal::safe_strto128_base(str, out, 16);
+}
+
+ABSL_MUST_USE_RESULT inline bool SimpleHexAtoi(y_absl::string_view str,
+ y_absl::uint128* out) {
+ return numbers_internal::safe_strtou128_base(str, out, 16);
+}
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_STRINGS_NUMBERS_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/str_cat.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/str_cat.cc
new file mode 100644
index 00000000000..9e11702eae2
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/str_cat.cc
@@ -0,0 +1,246 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "y_absl/strings/str_cat.h"
+
+#include <assert.h>
+
+#include <algorithm>
+#include <cstdint>
+#include <cstring>
+
+#include "y_absl/strings/ascii.h"
+#include "y_absl/strings/internal/resize_uninitialized.h"
+#include "y_absl/strings/numbers.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+AlphaNum::AlphaNum(Hex hex) {
+ static_assert(numbers_internal::kFastToBufferSize >= 32,
+ "This function only works when output buffer >= 32 bytes long");
+ char* const end = &digits_[numbers_internal::kFastToBufferSize];
+ auto real_width =
+ y_absl::numbers_internal::FastHexToBufferZeroPad16(hex.value, end - 16);
+ if (real_width >= hex.width) {
+ piece_ = y_absl::string_view(end - real_width, real_width);
+ } else {
+ // Pad first 16 chars because FastHexToBufferZeroPad16 pads only to 16 and
+ // max pad width can be up to 20.
+ std::memset(end - 32, hex.fill, 16);
+ // Patch up everything else up to the real_width.
+ std::memset(end - real_width - 16, hex.fill, 16);
+ piece_ = y_absl::string_view(end - hex.width, hex.width);
+ }
+}
+
+AlphaNum::AlphaNum(Dec dec) {
+ assert(dec.width <= numbers_internal::kFastToBufferSize);
+ char* const end = &digits_[numbers_internal::kFastToBufferSize];
+ char* const minfill = end - dec.width;
+ char* writer = end;
+ uint64_t value = dec.value;
+ bool neg = dec.neg;
+ while (value > 9) {
+ *--writer = '0' + (value % 10);
+ value /= 10;
+ }
+ *--writer = '0' + value;
+ if (neg) *--writer = '-';
+
+ ptrdiff_t fillers = writer - minfill;
+ if (fillers > 0) {
+ // Tricky: if the fill character is ' ', then it's <fill><+/-><digits>
+ // But...: if the fill character is '0', then it's <+/-><fill><digits>
+ bool add_sign_again = false;
+ if (neg && dec.fill == '0') { // If filling with '0',
+ ++writer; // ignore the sign we just added
+ add_sign_again = true; // and re-add the sign later.
+ }
+ writer -= fillers;
+ std::fill_n(writer, fillers, dec.fill);
+ if (add_sign_again) *--writer = '-';
+ }
+
+ piece_ = y_absl::string_view(writer, end - writer);
+}
+
+// ----------------------------------------------------------------------
+// StrCat()
+// This merges the given strings or integers, with no delimiter. This
+// is designed to be the fastest possible way to construct a string out
+// of a mix of raw C strings, string_views, strings, and integer values.
+// ----------------------------------------------------------------------
+
+// Append is merely a version of memcpy that returns the address of the byte
+// after the area just overwritten.
+static char* Append(char* out, const AlphaNum& x) {
+ // memcpy is allowed to overwrite arbitrary memory, so doing this after the
+ // call would force an extra fetch of x.size().
+ char* after = out + x.size();
+ if (x.size() != 0) {
+ memcpy(out, x.data(), x.size());
+ }
+ return after;
+}
+
+TString StrCat(const AlphaNum& a, const AlphaNum& b) {
+ TString result;
+ y_absl::strings_internal::STLStringResizeUninitialized(&result,
+ a.size() + b.size());
+ char* const begin = &result[0];
+ char* out = begin;
+ out = Append(out, a);
+ out = Append(out, b);
+ assert(out == begin + result.size());
+ return result;
+}
+
+TString StrCat(const AlphaNum& a, const AlphaNum& b, const AlphaNum& c) {
+ TString result;
+ strings_internal::STLStringResizeUninitialized(
+ &result, a.size() + b.size() + c.size());
+ char* const begin = &result[0];
+ char* out = begin;
+ out = Append(out, a);
+ out = Append(out, b);
+ out = Append(out, c);
+ assert(out == begin + result.size());
+ return result;
+}
+
+TString StrCat(const AlphaNum& a, const AlphaNum& b, const AlphaNum& c,
+ const AlphaNum& d) {
+ TString result;
+ strings_internal::STLStringResizeUninitialized(
+ &result, a.size() + b.size() + c.size() + d.size());
+ char* const begin = &result[0];
+ char* out = begin;
+ out = Append(out, a);
+ out = Append(out, b);
+ out = Append(out, c);
+ out = Append(out, d);
+ assert(out == begin + result.size());
+ return result;
+}
+
+namespace strings_internal {
+
+// Do not call directly - these are not part of the public API.
+TString CatPieces(std::initializer_list<y_absl::string_view> pieces) {
+ TString result;
+ size_t total_size = 0;
+ for (const y_absl::string_view& piece : pieces) total_size += piece.size();
+ strings_internal::STLStringResizeUninitialized(&result, total_size);
+
+ char* const begin = &result[0];
+ char* out = begin;
+ for (const y_absl::string_view& piece : pieces) {
+ const size_t this_size = piece.size();
+ if (this_size != 0) {
+ memcpy(out, piece.data(), this_size);
+ out += this_size;
+ }
+ }
+ assert(out == begin + result.size());
+ return result;
+}
+
+// It's possible to call StrAppend with an y_absl::string_view that is itself a
+// fragment of the string we're appending to. However the results of this are
+// random. Therefore, check for this in debug mode. Use unsigned math so we
+// only have to do one comparison. Note, there's an exception case: appending an
+// empty string is always allowed.
+#define ASSERT_NO_OVERLAP(dest, src) \
+ assert(((src).size() == 0) || \
+ (uintptr_t((src).data() - (dest).data()) > uintptr_t((dest).size())))
+
+void AppendPieces(TString* dest,
+ std::initializer_list<y_absl::string_view> pieces) {
+ size_t old_size = dest->size();
+ size_t total_size = old_size;
+ for (const y_absl::string_view& piece : pieces) {
+ ASSERT_NO_OVERLAP(*dest, piece);
+ total_size += piece.size();
+ }
+ strings_internal::STLStringResizeUninitializedAmortized(dest, total_size);
+
+ char* const begin = &(*dest)[0];
+ char* out = begin + old_size;
+ for (const y_absl::string_view& piece : pieces) {
+ const size_t this_size = piece.size();
+ if (this_size != 0) {
+ memcpy(out, piece.data(), this_size);
+ out += this_size;
+ }
+ }
+ assert(out == begin + dest->size());
+}
+
+} // namespace strings_internal
+
+void StrAppend(TString* dest, const AlphaNum& a) {
+ ASSERT_NO_OVERLAP(*dest, a);
+ dest->append(a.data(), a.size());
+}
+
+void StrAppend(TString* dest, const AlphaNum& a, const AlphaNum& b) {
+ ASSERT_NO_OVERLAP(*dest, a);
+ ASSERT_NO_OVERLAP(*dest, b);
+ TString::size_type old_size = dest->size();
+ strings_internal::STLStringResizeUninitializedAmortized(
+ dest, old_size + a.size() + b.size());
+ char* const begin = &(*dest)[0];
+ char* out = begin + old_size;
+ out = Append(out, a);
+ out = Append(out, b);
+ assert(out == begin + dest->size());
+}
+
+void StrAppend(TString* dest, const AlphaNum& a, const AlphaNum& b,
+ const AlphaNum& c) {
+ ASSERT_NO_OVERLAP(*dest, a);
+ ASSERT_NO_OVERLAP(*dest, b);
+ ASSERT_NO_OVERLAP(*dest, c);
+ TString::size_type old_size = dest->size();
+ strings_internal::STLStringResizeUninitializedAmortized(
+ dest, old_size + a.size() + b.size() + c.size());
+ char* const begin = &(*dest)[0];
+ char* out = begin + old_size;
+ out = Append(out, a);
+ out = Append(out, b);
+ out = Append(out, c);
+ assert(out == begin + dest->size());
+}
+
+void StrAppend(TString* dest, const AlphaNum& a, const AlphaNum& b,
+ const AlphaNum& c, const AlphaNum& d) {
+ ASSERT_NO_OVERLAP(*dest, a);
+ ASSERT_NO_OVERLAP(*dest, b);
+ ASSERT_NO_OVERLAP(*dest, c);
+ ASSERT_NO_OVERLAP(*dest, d);
+ TString::size_type old_size = dest->size();
+ strings_internal::STLStringResizeUninitializedAmortized(
+ dest, old_size + a.size() + b.size() + c.size() + d.size());
+ char* const begin = &(*dest)[0];
+ char* out = begin + old_size;
+ out = Append(out, a);
+ out = Append(out, b);
+ out = Append(out, c);
+ out = Append(out, d);
+ assert(out == begin + dest->size());
+}
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/str_cat.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/str_cat.h
new file mode 100644
index 00000000000..a77c9ae906b
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/str_cat.h
@@ -0,0 +1,411 @@
+//
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: str_cat.h
+// -----------------------------------------------------------------------------
+//
+// This package contains functions for efficiently concatenating and appending
+// strings: `StrCat()` and `StrAppend()`. Most of the work within these routines
+// is actually handled through use of a special AlphaNum type, which was
+// designed to be used as a parameter type that efficiently manages conversion
+// to strings and avoids copies in the above operations.
+//
+// Any routine accepting either a string or a number may accept `AlphaNum`.
+// The basic idea is that by accepting a `const AlphaNum &` as an argument
+// to your function, your callers will automagically convert bools, integers,
+// and floating point values to strings for you.
+//
+// NOTE: Use of `AlphaNum` outside of the //y_absl/strings package is unsupported
+// except for the specific case of function parameters of type `AlphaNum` or
+// `const AlphaNum &`. In particular, instantiating `AlphaNum` directly as a
+// stack variable is not supported.
+//
+// Conversion from 8-bit values is not accepted because, if it were, then an
+// attempt to pass ':' instead of ":" might result in a 58 ending up in your
+// result.
+//
+// Bools convert to "0" or "1". Pointers to types other than `char *` are not
+// valid inputs. No output is generated for null `char *` pointers.
+//
+// Floating point numbers are formatted with six-digit precision, which is
+// the default for "std::cout <<" or printf "%g" (the same as "%.6g").
+//
+// You can convert to hexadecimal output rather than decimal output using the
+// `Hex` type contained here. To do so, pass `Hex(my_int)` as a parameter to
+// `StrCat()` or `StrAppend()`. You may specify a minimum hex field width using
+// a `PadSpec` enum.
+//
+// -----------------------------------------------------------------------------
+
+#ifndef ABSL_STRINGS_STR_CAT_H_
+#define ABSL_STRINGS_STR_CAT_H_
+
+#include <array>
+#include <cstdint>
+#include <util/generic/string.h>
+#include <type_traits>
+#include <vector>
+
+#include "y_absl/base/port.h"
+#include "y_absl/strings/numbers.h"
+#include "y_absl/strings/string_view.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+namespace strings_internal {
+// AlphaNumBuffer allows a way to pass a string to StrCat without having to do
+// memory allocation. It is simply a pair of a fixed-size character array, and
+// a size. Please don't use outside of y_absl, yet.
+template <size_t max_size>
+struct AlphaNumBuffer {
+ std::array<char, max_size> data;
+ size_t size;
+};
+
+} // namespace strings_internal
+
+// Enum that specifies the number of significant digits to return in a `Hex` or
+// `Dec` conversion and fill character to use. A `kZeroPad2` value, for example,
+// would produce hexadecimal strings such as "0a","0f" and a 'kSpacePad5' value
+// would produce hexadecimal strings such as " a"," f".
+enum PadSpec : uint8_t {
+ kNoPad = 1,
+ kZeroPad2,
+ kZeroPad3,
+ kZeroPad4,
+ kZeroPad5,
+ kZeroPad6,
+ kZeroPad7,
+ kZeroPad8,
+ kZeroPad9,
+ kZeroPad10,
+ kZeroPad11,
+ kZeroPad12,
+ kZeroPad13,
+ kZeroPad14,
+ kZeroPad15,
+ kZeroPad16,
+ kZeroPad17,
+ kZeroPad18,
+ kZeroPad19,
+ kZeroPad20,
+
+ kSpacePad2 = kZeroPad2 + 64,
+ kSpacePad3,
+ kSpacePad4,
+ kSpacePad5,
+ kSpacePad6,
+ kSpacePad7,
+ kSpacePad8,
+ kSpacePad9,
+ kSpacePad10,
+ kSpacePad11,
+ kSpacePad12,
+ kSpacePad13,
+ kSpacePad14,
+ kSpacePad15,
+ kSpacePad16,
+ kSpacePad17,
+ kSpacePad18,
+ kSpacePad19,
+ kSpacePad20,
+};
+
+// -----------------------------------------------------------------------------
+// Hex
+// -----------------------------------------------------------------------------
+//
+// `Hex` stores a set of hexadecimal string conversion parameters for use
+// within `AlphaNum` string conversions.
+struct Hex {
+ uint64_t value;
+ uint8_t width;
+ char fill;
+
+ template <typename Int>
+ explicit Hex(
+ Int v, PadSpec spec = y_absl::kNoPad,
+ typename std::enable_if<sizeof(Int) == 1 &&
+ !std::is_pointer<Int>::value>::type* = nullptr)
+ : Hex(spec, static_cast<uint8_t>(v)) {}
+ template <typename Int>
+ explicit Hex(
+ Int v, PadSpec spec = y_absl::kNoPad,
+ typename std::enable_if<sizeof(Int) == 2 &&
+ !std::is_pointer<Int>::value>::type* = nullptr)
+ : Hex(spec, static_cast<uint16_t>(v)) {}
+ template <typename Int>
+ explicit Hex(
+ Int v, PadSpec spec = y_absl::kNoPad,
+ typename std::enable_if<sizeof(Int) == 4 &&
+ !std::is_pointer<Int>::value>::type* = nullptr)
+ : Hex(spec, static_cast<uint32_t>(v)) {}
+ template <typename Int>
+ explicit Hex(
+ Int v, PadSpec spec = y_absl::kNoPad,
+ typename std::enable_if<sizeof(Int) == 8 &&
+ !std::is_pointer<Int>::value>::type* = nullptr)
+ : Hex(spec, static_cast<uint64_t>(v)) {}
+ template <typename Pointee>
+ explicit Hex(Pointee* v, PadSpec spec = y_absl::kNoPad)
+ : Hex(spec, reinterpret_cast<uintptr_t>(v)) {}
+
+ private:
+ Hex(PadSpec spec, uint64_t v)
+ : value(v),
+ width(spec == y_absl::kNoPad
+ ? 1
+ : spec >= y_absl::kSpacePad2 ? spec - y_absl::kSpacePad2 + 2
+ : spec - y_absl::kZeroPad2 + 2),
+ fill(spec >= y_absl::kSpacePad2 ? ' ' : '0') {}
+};
+
+// -----------------------------------------------------------------------------
+// Dec
+// -----------------------------------------------------------------------------
+//
+// `Dec` stores a set of decimal string conversion parameters for use
+// within `AlphaNum` string conversions. Dec is slower than the default
+// integer conversion, so use it only if you need padding.
+struct Dec {
+ uint64_t value;
+ uint8_t width;
+ char fill;
+ bool neg;
+
+ template <typename Int>
+ explicit Dec(Int v, PadSpec spec = y_absl::kNoPad,
+ typename std::enable_if<(sizeof(Int) <= 8)>::type* = nullptr)
+ : value(v >= 0 ? static_cast<uint64_t>(v)
+ : uint64_t{0} - static_cast<uint64_t>(v)),
+ width(spec == y_absl::kNoPad
+ ? 1
+ : spec >= y_absl::kSpacePad2 ? spec - y_absl::kSpacePad2 + 2
+ : spec - y_absl::kZeroPad2 + 2),
+ fill(spec >= y_absl::kSpacePad2 ? ' ' : '0'),
+ neg(v < 0) {}
+};
+
+// -----------------------------------------------------------------------------
+// AlphaNum
+// -----------------------------------------------------------------------------
+//
+// The `AlphaNum` class acts as the main parameter type for `StrCat()` and
+// `StrAppend()`, providing efficient conversion of numeric, boolean, and
+// hexadecimal values (through the `Hex` type) into strings.
+
+class AlphaNum {
+ public:
+ // No bool ctor -- bools convert to an integral type.
+ // A bool ctor would also convert incoming pointers (bletch).
+
+ AlphaNum(int x) // NOLINT(runtime/explicit)
+ : piece_(digits_,
+ numbers_internal::FastIntToBuffer(x, digits_) - &digits_[0]) {}
+ AlphaNum(unsigned int x) // NOLINT(runtime/explicit)
+ : piece_(digits_,
+ numbers_internal::FastIntToBuffer(x, digits_) - &digits_[0]) {}
+ AlphaNum(long x) // NOLINT(*)
+ : piece_(digits_,
+ numbers_internal::FastIntToBuffer(x, digits_) - &digits_[0]) {}
+ AlphaNum(unsigned long x) // NOLINT(*)
+ : piece_(digits_,
+ numbers_internal::FastIntToBuffer(x, digits_) - &digits_[0]) {}
+ AlphaNum(long long x) // NOLINT(*)
+ : piece_(digits_,
+ numbers_internal::FastIntToBuffer(x, digits_) - &digits_[0]) {}
+ AlphaNum(unsigned long long x) // NOLINT(*)
+ : piece_(digits_,
+ numbers_internal::FastIntToBuffer(x, digits_) - &digits_[0]) {}
+
+ AlphaNum(float f) // NOLINT(runtime/explicit)
+ : piece_(digits_, numbers_internal::SixDigitsToBuffer(f, digits_)) {}
+ AlphaNum(double f) // NOLINT(runtime/explicit)
+ : piece_(digits_, numbers_internal::SixDigitsToBuffer(f, digits_)) {}
+
+ AlphaNum(Hex hex); // NOLINT(runtime/explicit)
+ AlphaNum(Dec dec); // NOLINT(runtime/explicit)
+
+ template <size_t size>
+ AlphaNum( // NOLINT(runtime/explicit)
+ const strings_internal::AlphaNumBuffer<size>& buf)
+ : piece_(&buf.data[0], buf.size) {}
+
+ AlphaNum(const char* c_str) : piece_(c_str) {} // NOLINT(runtime/explicit)
+ AlphaNum(y_absl::string_view pc) : piece_(pc) {} // NOLINT(runtime/explicit)
+
+ template <typename Allocator>
+ AlphaNum( // NOLINT(runtime/explicit)
+ const std::basic_string<char, std::char_traits<char>, Allocator>& str)
+ : piece_(str) {}
+
+ AlphaNum(const TString& str)
+ : piece_(str.data(), str.size()) {}
+
+ // Use string literals ":" instead of character literals ':'.
+ AlphaNum(char c) = delete; // NOLINT(runtime/explicit)
+
+ AlphaNum(const AlphaNum&) = delete;
+ AlphaNum& operator=(const AlphaNum&) = delete;
+
+ y_absl::string_view::size_type size() const { return piece_.size(); }
+ const char* data() const { return piece_.data(); }
+ y_absl::string_view Piece() const { return piece_; }
+
+ // Normal enums are already handled by the integer formatters.
+ // This overload matches only scoped enums.
+ template <typename T,
+ typename = typename std::enable_if<
+ std::is_enum<T>{} && !std::is_convertible<T, int>{}>::type>
+ AlphaNum(T e) // NOLINT(runtime/explicit)
+ : AlphaNum(static_cast<typename std::underlying_type<T>::type>(e)) {}
+
+ // vector<bool>::reference and const_reference require special help to
+ // convert to `AlphaNum` because it requires two user defined conversions.
+ template <
+ typename T,
+ typename std::enable_if<
+ std::is_class<T>::value &&
+ (std::is_same<T, std::vector<bool>::reference>::value ||
+ std::is_same<T, std::vector<bool>::const_reference>::value)>::type* =
+ nullptr>
+ AlphaNum(T e) : AlphaNum(static_cast<bool>(e)) {} // NOLINT(runtime/explicit)
+
+ private:
+ y_absl::string_view piece_;
+ char digits_[numbers_internal::kFastToBufferSize];
+};
+
+// -----------------------------------------------------------------------------
+// StrCat()
+// -----------------------------------------------------------------------------
+//
+// Merges given strings or numbers, using no delimiter(s), returning the merged
+// result as a string.
+//
+// `StrCat()` is designed to be the fastest possible way to construct a string
+// out of a mix of raw C strings, string_views, strings, bool values,
+// and numeric values.
+//
+// Don't use `StrCat()` for user-visible strings. The localization process
+// works poorly on strings built up out of fragments.
+//
+// For clarity and performance, don't use `StrCat()` when appending to a
+// string. Use `StrAppend()` instead. In particular, avoid using any of these
+// (anti-)patterns:
+//
+// str.append(StrCat(...))
+// str += StrCat(...)
+// str = StrCat(str, ...)
+//
+// The last case is the worst, with a potential to change a loop
+// from a linear time operation with O(1) dynamic allocations into a
+// quadratic time operation with O(n) dynamic allocations.
+//
+// See `StrAppend()` below for more information.
+
+namespace strings_internal {
+
+// Do not call directly - this is not part of the public API.
+TString CatPieces(std::initializer_list<y_absl::string_view> pieces);
+void AppendPieces(TString* dest,
+ std::initializer_list<y_absl::string_view> pieces);
+
+} // namespace strings_internal
+
+ABSL_MUST_USE_RESULT inline TString StrCat() { return TString(); }
+
+ABSL_MUST_USE_RESULT inline TString StrCat(const AlphaNum& a) {
+ return TString(a.data(), a.size());
+}
+
+ABSL_MUST_USE_RESULT TString StrCat(const AlphaNum& a, const AlphaNum& b);
+ABSL_MUST_USE_RESULT TString StrCat(const AlphaNum& a, const AlphaNum& b,
+ const AlphaNum& c);
+ABSL_MUST_USE_RESULT TString StrCat(const AlphaNum& a, const AlphaNum& b,
+ const AlphaNum& c, const AlphaNum& d);
+
+// Support 5 or more arguments
+template <typename... AV>
+ABSL_MUST_USE_RESULT inline TString StrCat(
+ const AlphaNum& a, const AlphaNum& b, const AlphaNum& c, const AlphaNum& d,
+ const AlphaNum& e, const AV&... args) {
+ return strings_internal::CatPieces(
+ {a.Piece(), b.Piece(), c.Piece(), d.Piece(), e.Piece(),
+ static_cast<const AlphaNum&>(args).Piece()...});
+}
+
+// -----------------------------------------------------------------------------
+// StrAppend()
+// -----------------------------------------------------------------------------
+//
+// Appends a string or set of strings to an existing string, in a similar
+// fashion to `StrCat()`.
+//
+// WARNING: `StrAppend(&str, a, b, c, ...)` requires that none of the
+// a, b, c, parameters be a reference into str. For speed, `StrAppend()` does
+// not try to check each of its input arguments to be sure that they are not
+// a subset of the string being appended to. That is, while this will work:
+//
+// TString s = "foo";
+// s += s;
+//
+// This output is undefined:
+//
+// TString s = "foo";
+// StrAppend(&s, s);
+//
+// This output is undefined as well, since `y_absl::string_view` does not own its
+// data:
+//
+// TString s = "foobar";
+// y_absl::string_view p = s;
+// StrAppend(&s, p);
+
+inline void StrAppend(TString*) {}
+void StrAppend(TString* dest, const AlphaNum& a);
+void StrAppend(TString* dest, const AlphaNum& a, const AlphaNum& b);
+void StrAppend(TString* dest, const AlphaNum& a, const AlphaNum& b,
+ const AlphaNum& c);
+void StrAppend(TString* dest, const AlphaNum& a, const AlphaNum& b,
+ const AlphaNum& c, const AlphaNum& d);
+
+// Support 5 or more arguments
+template <typename... AV>
+inline void StrAppend(TString* dest, const AlphaNum& a, const AlphaNum& b,
+ const AlphaNum& c, const AlphaNum& d, const AlphaNum& e,
+ const AV&... args) {
+ strings_internal::AppendPieces(
+ dest, {a.Piece(), b.Piece(), c.Piece(), d.Piece(), e.Piece(),
+ static_cast<const AlphaNum&>(args).Piece()...});
+}
+
+// Helper function for the future StrCat default floating-point format, %.6g
+// This is fast.
+inline strings_internal::AlphaNumBuffer<
+ numbers_internal::kSixDigitsToBufferSize>
+SixDigits(double d) {
+ strings_internal::AlphaNumBuffer<numbers_internal::kSixDigitsToBufferSize>
+ result;
+ result.size = numbers_internal::SixDigitsToBuffer(d, &result.data[0]);
+ return result;
+}
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_STRINGS_STR_CAT_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/str_format.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/str_format.h
new file mode 100644
index 00000000000..4079f38fb48
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/str_format.h
@@ -0,0 +1,812 @@
+//
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: str_format.h
+// -----------------------------------------------------------------------------
+//
+// The `str_format` library is a typesafe replacement for the family of
+// `printf()` string formatting routines within the `<cstdio>` standard library
+// header. Like the `printf` family, `str_format` uses a "format string" to
+// perform argument substitutions based on types. See the `FormatSpec` section
+// below for format string documentation.
+//
+// Example:
+//
+// TString s = y_absl::StrFormat(
+// "%s %s You have $%d!", "Hello", name, dollars);
+//
+// The library consists of the following basic utilities:
+//
+// * `y_absl::StrFormat()`, a type-safe replacement for `std::sprintf()`, to
+// write a format string to a `string` value.
+// * `y_absl::StrAppendFormat()` to append a format string to a `string`
+// * `y_absl::StreamFormat()` to more efficiently write a format string to a
+// stream, such as`std::cout`.
+// * `y_absl::PrintF()`, `y_absl::FPrintF()` and `y_absl::SNPrintF()` as
+// replacements for `std::printf()`, `std::fprintf()` and `std::snprintf()`.
+//
+// Note: a version of `std::sprintf()` is not supported as it is
+// generally unsafe due to buffer overflows.
+//
+// Additionally, you can provide a format string (and its associated arguments)
+// using one of the following abstractions:
+//
+// * A `FormatSpec` class template fully encapsulates a format string and its
+// type arguments and is usually provided to `str_format` functions as a
+// variadic argument of type `FormatSpec<Arg...>`. The `FormatSpec<Args...>`
+// template is evaluated at compile-time, providing type safety.
+// * A `ParsedFormat` instance, which encapsulates a specific, pre-compiled
+// format string for a specific set of type(s), and which can be passed
+// between API boundaries. (The `FormatSpec` type should not be used
+// directly except as an argument type for wrapper functions.)
+//
+// The `str_format` library provides the ability to output its format strings to
+// arbitrary sink types:
+//
+// * A generic `Format()` function to write outputs to arbitrary sink types,
+// which must implement a `FormatRawSink` interface.
+//
+// * A `FormatUntyped()` function that is similar to `Format()` except it is
+// loosely typed. `FormatUntyped()` is not a template and does not perform
+// any compile-time checking of the format string; instead, it returns a
+// boolean from a runtime check.
+//
+// In addition, the `str_format` library provides extension points for
+// augmenting formatting to new types. See "StrFormat Extensions" below.
+
+#ifndef ABSL_STRINGS_STR_FORMAT_H_
+#define ABSL_STRINGS_STR_FORMAT_H_
+
+#include <cstdio>
+#include <util/generic/string.h>
+
+#include "y_absl/strings/internal/str_format/arg.h" // IWYU pragma: export
+#include "y_absl/strings/internal/str_format/bind.h" // IWYU pragma: export
+#include "y_absl/strings/internal/str_format/checker.h" // IWYU pragma: export
+#include "y_absl/strings/internal/str_format/extension.h" // IWYU pragma: export
+#include "y_absl/strings/internal/str_format/parser.h" // IWYU pragma: export
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+// UntypedFormatSpec
+//
+// A type-erased class that can be used directly within untyped API entry
+// points. An `UntypedFormatSpec` is specifically used as an argument to
+// `FormatUntyped()`.
+//
+// Example:
+//
+// y_absl::UntypedFormatSpec format("%d");
+// TString out;
+// CHECK(y_absl::FormatUntyped(&out, format, {y_absl::FormatArg(1)}));
+class UntypedFormatSpec {
+ public:
+ UntypedFormatSpec() = delete;
+ UntypedFormatSpec(const UntypedFormatSpec&) = delete;
+ UntypedFormatSpec& operator=(const UntypedFormatSpec&) = delete;
+
+ explicit UntypedFormatSpec(string_view s) : spec_(s) {}
+
+ protected:
+ explicit UntypedFormatSpec(const str_format_internal::ParsedFormatBase* pc)
+ : spec_(pc) {}
+
+ private:
+ friend str_format_internal::UntypedFormatSpecImpl;
+ str_format_internal::UntypedFormatSpecImpl spec_;
+};
+
+// FormatStreamed()
+//
+// Takes a streamable argument and returns an object that can print it
+// with '%s'. Allows printing of types that have an `operator<<` but no
+// intrinsic type support within `StrFormat()` itself.
+//
+// Example:
+//
+// y_absl::StrFormat("%s", y_absl::FormatStreamed(obj));
+template <typename T>
+str_format_internal::StreamedWrapper<T> FormatStreamed(const T& v) {
+ return str_format_internal::StreamedWrapper<T>(v);
+}
+
+// FormatCountCapture
+//
+// This class provides a way to safely wrap `StrFormat()` captures of `%n`
+// conversions, which denote the number of characters written by a formatting
+// operation to this point, into an integer value.
+//
+// This wrapper is designed to allow safe usage of `%n` within `StrFormat(); in
+// the `printf()` family of functions, `%n` is not safe to use, as the `int *`
+// buffer can be used to capture arbitrary data.
+//
+// Example:
+//
+// int n = 0;
+// TString s = y_absl::StrFormat("%s%d%n", "hello", 123,
+// y_absl::FormatCountCapture(&n));
+// EXPECT_EQ(8, n);
+class FormatCountCapture {
+ public:
+ explicit FormatCountCapture(int* p) : p_(p) {}
+
+ private:
+ // FormatCountCaptureHelper is used to define FormatConvertImpl() for this
+ // class.
+ friend struct str_format_internal::FormatCountCaptureHelper;
+ // Unused() is here because of the false positive from -Wunused-private-field
+ // p_ is used in the templated function of the friend FormatCountCaptureHelper
+ // class.
+ int* Unused() { return p_; }
+ int* p_;
+};
+
+// FormatSpec
+//
+// The `FormatSpec` type defines the makeup of a format string within the
+// `str_format` library. It is a variadic class template that is evaluated at
+// compile-time, according to the format string and arguments that are passed to
+// it.
+//
+// You should not need to manipulate this type directly. You should only name it
+// if you are writing wrapper functions which accept format arguments that will
+// be provided unmodified to functions in this library. Such a wrapper function
+// might be a class method that provides format arguments and/or internally uses
+// the result of formatting.
+//
+// For a `FormatSpec` to be valid at compile-time, it must be provided as
+// either:
+//
+// * A `constexpr` literal or `y_absl::string_view`, which is how it most often
+// used.
+// * A `ParsedFormat` instantiation, which ensures the format string is
+// valid before use. (See below.)
+//
+// Example:
+//
+// // Provided as a string literal.
+// y_absl::StrFormat("Welcome to %s, Number %d!", "The Village", 6);
+//
+// // Provided as a constexpr y_absl::string_view.
+// constexpr y_absl::string_view formatString = "Welcome to %s, Number %d!";
+// y_absl::StrFormat(formatString, "The Village", 6);
+//
+// // Provided as a pre-compiled ParsedFormat object.
+// // Note that this example is useful only for illustration purposes.
+// y_absl::ParsedFormat<'s', 'd'> formatString("Welcome to %s, Number %d!");
+// y_absl::StrFormat(formatString, "TheVillage", 6);
+//
+// A format string generally follows the POSIX syntax as used within the POSIX
+// `printf` specification.
+//
+// (See http://pubs.opengroup.org/onlinepubs/9699919799/functions/fprintf.html.)
+//
+// In specific, the `FormatSpec` supports the following type specifiers:
+// * `c` for characters
+// * `s` for strings
+// * `d` or `i` for integers
+// * `o` for unsigned integer conversions into octal
+// * `x` or `X` for unsigned integer conversions into hex
+// * `u` for unsigned integers
+// * `f` or `F` for floating point values into decimal notation
+// * `e` or `E` for floating point values into exponential notation
+// * `a` or `A` for floating point values into hex exponential notation
+// * `g` or `G` for floating point values into decimal or exponential
+// notation based on their precision
+// * `p` for pointer address values
+// * `n` for the special case of writing out the number of characters
+// written to this point. The resulting value must be captured within an
+// `y_absl::FormatCountCapture` type.
+//
+// Implementation-defined behavior:
+// * A null pointer provided to "%s" or "%p" is output as "(nil)".
+// * A non-null pointer provided to "%p" is output in hex as if by %#x or
+// %#lx.
+//
+// NOTE: `o`, `x\X` and `u` will convert signed values to their unsigned
+// counterpart before formatting.
+//
+// Examples:
+// "%c", 'a' -> "a"
+// "%c", 32 -> " "
+// "%s", "C" -> "C"
+// "%s", TString("C++") -> "C++"
+// "%d", -10 -> "-10"
+// "%o", 10 -> "12"
+// "%x", 16 -> "10"
+// "%f", 123456789 -> "123456789.000000"
+// "%e", .01 -> "1.00000e-2"
+// "%a", -3.0 -> "-0x1.8p+1"
+// "%g", .01 -> "1e-2"
+// "%p", (void*)&value -> "0x7ffdeb6ad2a4"
+//
+// int n = 0;
+// TString s = y_absl::StrFormat(
+// "%s%d%n", "hello", 123, y_absl::FormatCountCapture(&n));
+// EXPECT_EQ(8, n);
+//
+// The `FormatSpec` intrinsically supports all of these fundamental C++ types:
+//
+// * Characters: `char`, `signed char`, `unsigned char`
+// * Integers: `int`, `short`, `unsigned short`, `unsigned`, `long`,
+// `unsigned long`, `long long`, `unsigned long long`
+// * Floating-point: `float`, `double`, `long double`
+//
+// However, in the `str_format` library, a format conversion specifies a broader
+// C++ conceptual category instead of an exact type. For example, `%s` binds to
+// any string-like argument, so `TString`, `y_absl::string_view`, and
+// `const char*` are all accepted. Likewise, `%d` accepts any integer-like
+// argument, etc.
+
+template <typename... Args>
+using FormatSpec = str_format_internal::FormatSpecTemplate<
+ str_format_internal::ArgumentToConv<Args>()...>;
+
+// ParsedFormat
+//
+// A `ParsedFormat` is a class template representing a preparsed `FormatSpec`,
+// with template arguments specifying the conversion characters used within the
+// format string. Such characters must be valid format type specifiers, and
+// these type specifiers are checked at compile-time.
+//
+// Instances of `ParsedFormat` can be created, copied, and reused to speed up
+// formatting loops. A `ParsedFormat` may either be constructed statically, or
+// dynamically through its `New()` factory function, which only constructs a
+// runtime object if the format is valid at that time.
+//
+// Example:
+//
+// // Verified at compile time.
+// y_absl::ParsedFormat<'s', 'd'> formatString("Welcome to %s, Number %d!");
+// y_absl::StrFormat(formatString, "TheVillage", 6);
+//
+// // Verified at runtime.
+// auto format_runtime = y_absl::ParsedFormat<'d'>::New(format_string);
+// if (format_runtime) {
+// value = y_absl::StrFormat(*format_runtime, i);
+// } else {
+// ... error case ...
+// }
+
+#if defined(__cpp_nontype_template_parameter_auto)
+// If C++17 is available, an 'extended' format is also allowed that can specify
+// multiple conversion characters per format argument, using a combination of
+// `y_absl::FormatConversionCharSet` enum values (logically a set union)
+// via the `|` operator. (Single character-based arguments are still accepted,
+// but cannot be combined). Some common conversions also have predefined enum
+// values, such as `y_absl::FormatConversionCharSet::kIntegral`.
+//
+// Example:
+// // Extended format supports multiple conversion characters per argument,
+// // specified via a combination of `FormatConversionCharSet` enums.
+// using MyFormat = y_absl::ParsedFormat<y_absl::FormatConversionCharSet::d |
+// y_absl::FormatConversionCharSet::x>;
+// MyFormat GetFormat(bool use_hex) {
+// if (use_hex) return MyFormat("foo %x bar");
+// return MyFormat("foo %d bar");
+// }
+// // `format` can be used with any value that supports 'd' and 'x',
+// // like `int`.
+// auto format = GetFormat(use_hex);
+// value = StringF(format, i);
+template <auto... Conv>
+using ParsedFormat = y_absl::str_format_internal::ExtendedParsedFormat<
+ y_absl::str_format_internal::ToFormatConversionCharSet(Conv)...>;
+#else
+template <char... Conv>
+using ParsedFormat = str_format_internal::ExtendedParsedFormat<
+ y_absl::str_format_internal::ToFormatConversionCharSet(Conv)...>;
+#endif // defined(__cpp_nontype_template_parameter_auto)
+
+// StrFormat()
+//
+// Returns a `string` given a `printf()`-style format string and zero or more
+// additional arguments. Use it as you would `sprintf()`. `StrFormat()` is the
+// primary formatting function within the `str_format` library, and should be
+// used in most cases where you need type-safe conversion of types into
+// formatted strings.
+//
+// The format string generally consists of ordinary character data along with
+// one or more format conversion specifiers (denoted by the `%` character).
+// Ordinary character data is returned unchanged into the result string, while
+// each conversion specification performs a type substitution from
+// `StrFormat()`'s other arguments. See the comments for `FormatSpec` for full
+// information on the makeup of this format string.
+//
+// Example:
+//
+// TString s = y_absl::StrFormat(
+// "Welcome to %s, Number %d!", "The Village", 6);
+// EXPECT_EQ("Welcome to The Village, Number 6!", s);
+//
+// Returns an empty string in case of error.
+template <typename... Args>
+ABSL_MUST_USE_RESULT TString StrFormat(const FormatSpec<Args...>& format,
+ const Args&... args) {
+ return str_format_internal::FormatPack(
+ str_format_internal::UntypedFormatSpecImpl::Extract(format),
+ {str_format_internal::FormatArgImpl(args)...});
+}
+
+// StrAppendFormat()
+//
+// Appends to a `dst` string given a format string, and zero or more additional
+// arguments, returning `*dst` as a convenience for chaining purposes. Appends
+// nothing in case of error (but possibly alters its capacity).
+//
+// Example:
+//
+// TString orig("For example PI is approximately ");
+// std::cout << StrAppendFormat(&orig, "%12.6f", 3.14);
+template <typename... Args>
+TString& StrAppendFormat(TString* dst,
+ const FormatSpec<Args...>& format,
+ const Args&... args) {
+ return str_format_internal::AppendPack(
+ dst, str_format_internal::UntypedFormatSpecImpl::Extract(format),
+ {str_format_internal::FormatArgImpl(args)...});
+}
+
+// StreamFormat()
+//
+// Writes to an output stream given a format string and zero or more arguments,
+// generally in a manner that is more efficient than streaming the result of
+// `y_absl:: StrFormat()`. The returned object must be streamed before the full
+// expression ends.
+//
+// Example:
+//
+// std::cout << StreamFormat("%12.6f", 3.14);
+template <typename... Args>
+ABSL_MUST_USE_RESULT str_format_internal::Streamable StreamFormat(
+ const FormatSpec<Args...>& format, const Args&... args) {
+ return str_format_internal::Streamable(
+ str_format_internal::UntypedFormatSpecImpl::Extract(format),
+ {str_format_internal::FormatArgImpl(args)...});
+}
+
+// PrintF()
+//
+// Writes to stdout given a format string and zero or more arguments. This
+// function is functionally equivalent to `std::printf()` (and type-safe);
+// prefer `y_absl::PrintF()` over `std::printf()`.
+//
+// Example:
+//
+// std::string_view s = "Ulaanbaatar";
+// y_absl::PrintF("The capital of Mongolia is %s", s);
+//
+// Outputs: "The capital of Mongolia is Ulaanbaatar"
+//
+template <typename... Args>
+int PrintF(const FormatSpec<Args...>& format, const Args&... args) {
+ return str_format_internal::FprintF(
+ stdout, str_format_internal::UntypedFormatSpecImpl::Extract(format),
+ {str_format_internal::FormatArgImpl(args)...});
+}
+
+// FPrintF()
+//
+// Writes to a file given a format string and zero or more arguments. This
+// function is functionally equivalent to `std::fprintf()` (and type-safe);
+// prefer `y_absl::FPrintF()` over `std::fprintf()`.
+//
+// Example:
+//
+// std::string_view s = "Ulaanbaatar";
+// y_absl::FPrintF(stdout, "The capital of Mongolia is %s", s);
+//
+// Outputs: "The capital of Mongolia is Ulaanbaatar"
+//
+template <typename... Args>
+int FPrintF(std::FILE* output, const FormatSpec<Args...>& format,
+ const Args&... args) {
+ return str_format_internal::FprintF(
+ output, str_format_internal::UntypedFormatSpecImpl::Extract(format),
+ {str_format_internal::FormatArgImpl(args)...});
+}
+
+// SNPrintF()
+//
+// Writes to a sized buffer given a format string and zero or more arguments.
+// This function is functionally equivalent to `std::snprintf()` (and
+// type-safe); prefer `y_absl::SNPrintF()` over `std::snprintf()`.
+//
+// In particular, a successful call to `y_absl::SNPrintF()` writes at most `size`
+// bytes of the formatted output to `output`, including a NUL-terminator, and
+// returns the number of bytes that would have been written if truncation did
+// not occur. In the event of an error, a negative value is returned and `errno`
+// is set.
+//
+// Example:
+//
+// std::string_view s = "Ulaanbaatar";
+// char output[128];
+// y_absl::SNPrintF(output, sizeof(output),
+// "The capital of Mongolia is %s", s);
+//
+// Post-condition: output == "The capital of Mongolia is Ulaanbaatar"
+//
+template <typename... Args>
+int SNPrintF(char* output, std::size_t size, const FormatSpec<Args...>& format,
+ const Args&... args) {
+ return str_format_internal::SnprintF(
+ output, size, str_format_internal::UntypedFormatSpecImpl::Extract(format),
+ {str_format_internal::FormatArgImpl(args)...});
+}
+
+// -----------------------------------------------------------------------------
+// Custom Output Formatting Functions
+// -----------------------------------------------------------------------------
+
+// FormatRawSink
+//
+// FormatRawSink is a type erased wrapper around arbitrary sink objects
+// specifically used as an argument to `Format()`.
+//
+// All the object has to do define an overload of `AbslFormatFlush()` for the
+// sink, usually by adding a ADL-based free function in the same namespace as
+// the sink:
+//
+// void AbslFormatFlush(MySink* dest, y_absl::string_view part);
+//
+// where `dest` is the pointer passed to `y_absl::Format()`. The function should
+// append `part` to `dest`.
+//
+// FormatRawSink does not own the passed sink object. The passed object must
+// outlive the FormatRawSink.
+class FormatRawSink {
+ public:
+ // Implicitly convert from any type that provides the hook function as
+ // described above.
+ template <typename T,
+ typename = typename std::enable_if<std::is_constructible<
+ str_format_internal::FormatRawSinkImpl, T*>::value>::type>
+ FormatRawSink(T* raw) // NOLINT
+ : sink_(raw) {}
+
+ private:
+ friend str_format_internal::FormatRawSinkImpl;
+ str_format_internal::FormatRawSinkImpl sink_;
+};
+
+// Format()
+//
+// Writes a formatted string to an arbitrary sink object (implementing the
+// `y_absl::FormatRawSink` interface), using a format string and zero or more
+// additional arguments.
+//
+// By default, `TString`, `std::ostream`, and `y_absl::Cord` are supported as
+// destination objects. If a `TString` is used the formatted string is
+// appended to it.
+//
+// `y_absl::Format()` is a generic version of `y_absl::StrAppendFormat()`, for
+// custom sinks. The format string, like format strings for `StrFormat()`, is
+// checked at compile-time.
+//
+// On failure, this function returns `false` and the state of the sink is
+// unspecified.
+template <typename... Args>
+bool Format(FormatRawSink raw_sink, const FormatSpec<Args...>& format,
+ const Args&... args) {
+ return str_format_internal::FormatUntyped(
+ str_format_internal::FormatRawSinkImpl::Extract(raw_sink),
+ str_format_internal::UntypedFormatSpecImpl::Extract(format),
+ {str_format_internal::FormatArgImpl(args)...});
+}
+
+// FormatArg
+//
+// A type-erased handle to a format argument specifically used as an argument to
+// `FormatUntyped()`. You may construct `FormatArg` by passing
+// reference-to-const of any printable type. `FormatArg` is both copyable and
+// assignable. The source data must outlive the `FormatArg` instance. See
+// example below.
+//
+using FormatArg = str_format_internal::FormatArgImpl;
+
+// FormatUntyped()
+//
+// Writes a formatted string to an arbitrary sink object (implementing the
+// `y_absl::FormatRawSink` interface), using an `UntypedFormatSpec` and zero or
+// more additional arguments.
+//
+// This function acts as the most generic formatting function in the
+// `str_format` library. The caller provides a raw sink, an unchecked format
+// string, and (usually) a runtime specified list of arguments; no compile-time
+// checking of formatting is performed within this function. As a result, a
+// caller should check the return value to verify that no error occurred.
+// On failure, this function returns `false` and the state of the sink is
+// unspecified.
+//
+// The arguments are provided in an `y_absl::Span<const y_absl::FormatArg>`.
+// Each `y_absl::FormatArg` object binds to a single argument and keeps a
+// reference to it. The values used to create the `FormatArg` objects must
+// outlive this function call.
+//
+// Example:
+//
+// std::optional<TString> FormatDynamic(
+// const TString& in_format,
+// const vector<TString>& in_args) {
+// TString out;
+// std::vector<y_absl::FormatArg> args;
+// for (const auto& v : in_args) {
+// // It is important that 'v' is a reference to the objects in in_args.
+// // The values we pass to FormatArg must outlive the call to
+// // FormatUntyped.
+// args.emplace_back(v);
+// }
+// y_absl::UntypedFormatSpec format(in_format);
+// if (!y_absl::FormatUntyped(&out, format, args)) {
+// return std::nullopt;
+// }
+// return std::move(out);
+// }
+//
+ABSL_MUST_USE_RESULT inline bool FormatUntyped(
+ FormatRawSink raw_sink, const UntypedFormatSpec& format,
+ y_absl::Span<const FormatArg> args) {
+ return str_format_internal::FormatUntyped(
+ str_format_internal::FormatRawSinkImpl::Extract(raw_sink),
+ str_format_internal::UntypedFormatSpecImpl::Extract(format), args);
+}
+
+//------------------------------------------------------------------------------
+// StrFormat Extensions
+//------------------------------------------------------------------------------
+//
+// AbslFormatConvert()
+//
+// The StrFormat library provides a customization API for formatting
+// user-defined types using y_absl::StrFormat(). The API relies on detecting an
+// overload in the user-defined type's namespace of a free (non-member)
+// `AbslFormatConvert()` function, usually as a friend definition with the
+// following signature:
+//
+// y_absl::FormatConvertResult<...> AbslFormatConvert(
+// const X& value,
+// const y_absl::FormatConversionSpec& spec,
+// y_absl::FormatSink *sink);
+//
+// An `AbslFormatConvert()` overload for a type should only be declared in the
+// same file and namespace as said type.
+//
+// The abstractions within this definition include:
+//
+// * An `y_absl::FormatConversionSpec` to specify the fields to pull from a
+// user-defined type's format string
+// * An `y_absl::FormatSink` to hold the converted string data during the
+// conversion process.
+// * An `y_absl::FormatConvertResult` to hold the status of the returned
+// formatting operation
+//
+// The return type encodes all the conversion characters that your
+// AbslFormatConvert() routine accepts. The return value should be {true}.
+// A return value of {false} will result in `StrFormat()` returning
+// an empty string. This result will be propagated to the result of
+// `FormatUntyped`.
+//
+// Example:
+//
+// struct Point {
+// // To add formatting support to `Point`, we simply need to add a free
+// // (non-member) function `AbslFormatConvert()`. This method interprets
+// // `spec` to print in the request format. The allowed conversion characters
+// // can be restricted via the type of the result, in this example
+// // string and integral formatting are allowed (but not, for instance
+// // floating point characters like "%f"). You can add such a free function
+// // using a friend declaration within the body of the class:
+// friend y_absl::FormatConvertResult<y_absl::FormatConversionCharSet::kString |
+// y_absl::FormatConversionCharSet::kIntegral>
+// AbslFormatConvert(const Point& p, const y_absl::FormatConversionSpec& spec,
+// y_absl::FormatSink* s) {
+// if (spec.conversion_char() == y_absl::FormatConversionChar::s) {
+// s->Append(y_absl::StrCat("x=", p.x, " y=", p.y));
+// } else {
+// s->Append(y_absl::StrCat(p.x, ",", p.y));
+// }
+// return {true};
+// }
+//
+// int x;
+// int y;
+// };
+
+// clang-format off
+
+// FormatConversionChar
+//
+// Specifies the formatting character provided in the format string
+// passed to `StrFormat()`.
+enum class FormatConversionChar : uint8_t {
+ c, s, // text
+ d, i, o, u, x, X, // int
+ f, F, e, E, g, G, a, A, // float
+ n, p // misc
+};
+// clang-format on
+
+// FormatConversionSpec
+//
+// Specifies modifications to the conversion of the format string, through use
+// of one or more format flags in the source format string.
+class FormatConversionSpec {
+ public:
+ // FormatConversionSpec::is_basic()
+ //
+ // Indicates that width and precision are not specified, and no additional
+ // flags are set for this conversion character in the format string.
+ bool is_basic() const { return impl_.is_basic(); }
+
+ // FormatConversionSpec::has_left_flag()
+ //
+ // Indicates whether the result should be left justified for this conversion
+ // character in the format string. This flag is set through use of a '-'
+ // character in the format string. E.g. "%-s"
+ bool has_left_flag() const { return impl_.has_left_flag(); }
+
+ // FormatConversionSpec::has_show_pos_flag()
+ //
+ // Indicates whether a sign column is prepended to the result for this
+ // conversion character in the format string, even if the result is positive.
+ // This flag is set through use of a '+' character in the format string.
+ // E.g. "%+d"
+ bool has_show_pos_flag() const { return impl_.has_show_pos_flag(); }
+
+ // FormatConversionSpec::has_sign_col_flag()
+ //
+ // Indicates whether a mandatory sign column is added to the result for this
+ // conversion character. This flag is set through use of a space character
+ // (' ') in the format string. E.g. "% i"
+ bool has_sign_col_flag() const { return impl_.has_sign_col_flag(); }
+
+ // FormatConversionSpec::has_alt_flag()
+ //
+ // Indicates whether an "alternate" format is applied to the result for this
+ // conversion character. Alternative forms depend on the type of conversion
+ // character, and unallowed alternatives are undefined. This flag is set
+ // through use of a '#' character in the format string. E.g. "%#h"
+ bool has_alt_flag() const { return impl_.has_alt_flag(); }
+
+ // FormatConversionSpec::has_zero_flag()
+ //
+ // Indicates whether zeroes should be prepended to the result for this
+ // conversion character instead of spaces. This flag is set through use of the
+ // '0' character in the format string. E.g. "%0f"
+ bool has_zero_flag() const { return impl_.has_zero_flag(); }
+
+ // FormatConversionSpec::conversion_char()
+ //
+ // Returns the underlying conversion character.
+ FormatConversionChar conversion_char() const {
+ return impl_.conversion_char();
+ }
+
+ // FormatConversionSpec::width()
+ //
+ // Returns the specified width (indicated through use of a non-zero integer
+ // value or '*' character) of the conversion character. If width is
+ // unspecified, it returns a negative value.
+ int width() const { return impl_.width(); }
+
+ // FormatConversionSpec::precision()
+ //
+ // Returns the specified precision (through use of the '.' character followed
+ // by a non-zero integer value or '*' character) of the conversion character.
+ // If precision is unspecified, it returns a negative value.
+ int precision() const { return impl_.precision(); }
+
+ private:
+ explicit FormatConversionSpec(
+ str_format_internal::FormatConversionSpecImpl impl)
+ : impl_(impl) {}
+
+ friend str_format_internal::FormatConversionSpecImpl;
+
+ y_absl::str_format_internal::FormatConversionSpecImpl impl_;
+};
+
+// Type safe OR operator for FormatConversionCharSet to allow accepting multiple
+// conversion chars in custom format converters.
+constexpr FormatConversionCharSet operator|(FormatConversionCharSet a,
+ FormatConversionCharSet b) {
+ return static_cast<FormatConversionCharSet>(static_cast<uint64_t>(a) |
+ static_cast<uint64_t>(b));
+}
+
+// FormatConversionCharSet
+//
+// Specifies the _accepted_ conversion types as a template parameter to
+// FormatConvertResult for custom implementations of `AbslFormatConvert`.
+// Note the helper predefined alias definitions (kIntegral, etc.) below.
+enum class FormatConversionCharSet : uint64_t {
+ // text
+ c = str_format_internal::FormatConversionCharToConvInt('c'),
+ s = str_format_internal::FormatConversionCharToConvInt('s'),
+ // integer
+ d = str_format_internal::FormatConversionCharToConvInt('d'),
+ i = str_format_internal::FormatConversionCharToConvInt('i'),
+ o = str_format_internal::FormatConversionCharToConvInt('o'),
+ u = str_format_internal::FormatConversionCharToConvInt('u'),
+ x = str_format_internal::FormatConversionCharToConvInt('x'),
+ X = str_format_internal::FormatConversionCharToConvInt('X'),
+ // Float
+ f = str_format_internal::FormatConversionCharToConvInt('f'),
+ F = str_format_internal::FormatConversionCharToConvInt('F'),
+ e = str_format_internal::FormatConversionCharToConvInt('e'),
+ E = str_format_internal::FormatConversionCharToConvInt('E'),
+ g = str_format_internal::FormatConversionCharToConvInt('g'),
+ G = str_format_internal::FormatConversionCharToConvInt('G'),
+ a = str_format_internal::FormatConversionCharToConvInt('a'),
+ A = str_format_internal::FormatConversionCharToConvInt('A'),
+ // misc
+ n = str_format_internal::FormatConversionCharToConvInt('n'),
+ p = str_format_internal::FormatConversionCharToConvInt('p'),
+
+ // Used for width/precision '*' specification.
+ kStar = static_cast<uint64_t>(
+ y_absl::str_format_internal::FormatConversionCharSetInternal::kStar),
+ // Some predefined values:
+ kIntegral = d | i | u | o | x | X,
+ kFloating = a | e | f | g | A | E | F | G,
+ kNumeric = kIntegral | kFloating,
+ kString = s,
+ kPointer = p,
+};
+
+// FormatSink
+//
+// An abstraction to which conversions write their string data.
+//
+class FormatSink {
+ public:
+ // Appends `count` copies of `ch`.
+ void Append(size_t count, char ch) { sink_->Append(count, ch); }
+
+ void Append(string_view v) { sink_->Append(v); }
+
+ // Appends the first `precision` bytes of `v`. If this is less than
+ // `width`, spaces will be appended first (if `left` is false), or
+ // after (if `left` is true) to ensure the total amount appended is
+ // at least `width`.
+ bool PutPaddedString(string_view v, int width, int precision, bool left) {
+ return sink_->PutPaddedString(v, width, precision, left);
+ }
+
+ private:
+ friend str_format_internal::FormatSinkImpl;
+ explicit FormatSink(str_format_internal::FormatSinkImpl* s) : sink_(s) {}
+ str_format_internal::FormatSinkImpl* sink_;
+};
+
+// FormatConvertResult
+//
+// Indicates whether a call to AbslFormatConvert() was successful.
+// This return type informs the StrFormat extension framework (through
+// ADL but using the return type) of what conversion characters are supported.
+// It is strongly discouraged to return {false}, as this will result in an
+// empty string in StrFormat.
+template <FormatConversionCharSet C>
+struct FormatConvertResult {
+ bool value;
+};
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_STRINGS_STR_FORMAT_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/str_join.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/str_join.h
new file mode 100644
index 00000000000..46a0323c6e3
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/str_join.h
@@ -0,0 +1,293 @@
+//
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: str_join.h
+// -----------------------------------------------------------------------------
+//
+// This header file contains functions for joining a range of elements and
+// returning the result as a TString. StrJoin operations are specified by
+// passing a range, a separator string to use between the elements joined, and
+// an optional Formatter responsible for converting each argument in the range
+// to a string. If omitted, a default `AlphaNumFormatter()` is called on the
+// elements to be joined, using the same formatting that `y_absl::StrCat()` uses.
+// This package defines a number of default formatters, and you can define your
+// own implementations.
+//
+// Ranges are specified by passing a container with `std::begin()` and
+// `std::end()` iterators, container-specific `begin()` and `end()` iterators, a
+// brace-initialized `std::initializer_list`, or a `std::tuple` of heterogeneous
+// objects. The separator string is specified as an `y_absl::string_view`.
+//
+// Because the default formatter uses the `y_absl::AlphaNum` class,
+// `y_absl::StrJoin()`, like `y_absl::StrCat()`, will work out-of-the-box on
+// collections of strings, ints, floats, doubles, etc.
+//
+// Example:
+//
+// std::vector<TString> v = {"foo", "bar", "baz"};
+// TString s = y_absl::StrJoin(v, "-");
+// EXPECT_EQ("foo-bar-baz", s);
+//
+// See comments on the `y_absl::StrJoin()` function for more examples.
+
+#ifndef ABSL_STRINGS_STR_JOIN_H_
+#define ABSL_STRINGS_STR_JOIN_H_
+
+#include <cstdio>
+#include <cstring>
+#include <initializer_list>
+#include <iterator>
+#include <util/generic/string.h>
+#include <tuple>
+#include <type_traits>
+#include <utility>
+
+#include "y_absl/base/macros.h"
+#include "y_absl/strings/internal/str_join_internal.h"
+#include "y_absl/strings/string_view.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+// -----------------------------------------------------------------------------
+// Concept: Formatter
+// -----------------------------------------------------------------------------
+//
+// A Formatter is a function object that is responsible for formatting its
+// argument as a string and appending it to a given output TString.
+// Formatters may be implemented as function objects, lambdas, or normal
+// functions. You may provide your own Formatter to enable `y_absl::StrJoin()` to
+// work with arbitrary types.
+//
+// The following is an example of a custom Formatter that simply uses
+// `std::to_string()` to format an integer as a TString.
+//
+// struct MyFormatter {
+// void operator()(TString* out, int i) const {
+// out->append(std::to_string(i));
+// }
+// };
+//
+// You would use the above formatter by passing an instance of it as the final
+// argument to `y_absl::StrJoin()`:
+//
+// std::vector<int> v = {1, 2, 3, 4};
+// TString s = y_absl::StrJoin(v, "-", MyFormatter());
+// EXPECT_EQ("1-2-3-4", s);
+//
+// The following standard formatters are provided within this file:
+//
+// - `AlphaNumFormatter()` (the default)
+// - `StreamFormatter()`
+// - `PairFormatter()`
+// - `DereferenceFormatter()`
+
+// AlphaNumFormatter()
+//
+// Default formatter used if none is specified. Uses `y_absl::AlphaNum` to convert
+// numeric arguments to strings.
+inline strings_internal::AlphaNumFormatterImpl AlphaNumFormatter() {
+ return strings_internal::AlphaNumFormatterImpl();
+}
+
+// StreamFormatter()
+//
+// Formats its argument using the << operator.
+inline strings_internal::StreamFormatterImpl StreamFormatter() {
+ return strings_internal::StreamFormatterImpl();
+}
+
+// Function Template: PairFormatter(Formatter, y_absl::string_view, Formatter)
+//
+// Formats a `std::pair` by putting a given separator between the pair's
+// `.first` and `.second` members. This formatter allows you to specify
+// custom Formatters for both the first and second member of each pair.
+template <typename FirstFormatter, typename SecondFormatter>
+inline strings_internal::PairFormatterImpl<FirstFormatter, SecondFormatter>
+PairFormatter(FirstFormatter f1, y_absl::string_view sep, SecondFormatter f2) {
+ return strings_internal::PairFormatterImpl<FirstFormatter, SecondFormatter>(
+ std::move(f1), sep, std::move(f2));
+}
+
+// Function overload of PairFormatter() for using a default
+// `AlphaNumFormatter()` for each Formatter in the pair.
+inline strings_internal::PairFormatterImpl<
+ strings_internal::AlphaNumFormatterImpl,
+ strings_internal::AlphaNumFormatterImpl>
+PairFormatter(y_absl::string_view sep) {
+ return PairFormatter(AlphaNumFormatter(), sep, AlphaNumFormatter());
+}
+
+// Function Template: DereferenceFormatter(Formatter)
+//
+// Formats its argument by dereferencing it and then applying the given
+// formatter. This formatter is useful for formatting a container of
+// pointer-to-T. This pattern often shows up when joining repeated fields in
+// protocol buffers.
+template <typename Formatter>
+strings_internal::DereferenceFormatterImpl<Formatter> DereferenceFormatter(
+ Formatter&& f) {
+ return strings_internal::DereferenceFormatterImpl<Formatter>(
+ std::forward<Formatter>(f));
+}
+
+// Function overload of `DereferenceFormatter()` for using a default
+// `AlphaNumFormatter()`.
+inline strings_internal::DereferenceFormatterImpl<
+ strings_internal::AlphaNumFormatterImpl>
+DereferenceFormatter() {
+ return strings_internal::DereferenceFormatterImpl<
+ strings_internal::AlphaNumFormatterImpl>(AlphaNumFormatter());
+}
+
+// -----------------------------------------------------------------------------
+// StrJoin()
+// -----------------------------------------------------------------------------
+//
+// Joins a range of elements and returns the result as a TString.
+// `y_absl::StrJoin()` takes a range, a separator string to use between the
+// elements joined, and an optional Formatter responsible for converting each
+// argument in the range to a string.
+//
+// If omitted, the default `AlphaNumFormatter()` is called on the elements to be
+// joined.
+//
+// Example 1:
+// // Joins a collection of strings. This pattern also works with a collection
+// // of `y_absl::string_view` or even `const char*`.
+// std::vector<TString> v = {"foo", "bar", "baz"};
+// TString s = y_absl::StrJoin(v, "-");
+// EXPECT_EQ("foo-bar-baz", s);
+//
+// Example 2:
+// // Joins the values in the given `std::initializer_list<>` specified using
+// // brace initialization. This pattern also works with an initializer_list
+// // of ints or `y_absl::string_view` -- any `AlphaNum`-compatible type.
+// TString s = y_absl::StrJoin({"foo", "bar", "baz"}, "-");
+// EXPECT_EQ("foo-bar-baz", s);
+//
+// Example 3:
+// // Joins a collection of ints. This pattern also works with floats,
+// // doubles, int64s -- any `StrCat()`-compatible type.
+// std::vector<int> v = {1, 2, 3, -4};
+// TString s = y_absl::StrJoin(v, "-");
+// EXPECT_EQ("1-2-3--4", s);
+//
+// Example 4:
+// // Joins a collection of pointer-to-int. By default, pointers are
+// // dereferenced and the pointee is formatted using the default format for
+// // that type; such dereferencing occurs for all levels of indirection, so
+// // this pattern works just as well for `std::vector<int**>` as for
+// // `std::vector<int*>`.
+// int x = 1, y = 2, z = 3;
+// std::vector<int*> v = {&x, &y, &z};
+// TString s = y_absl::StrJoin(v, "-");
+// EXPECT_EQ("1-2-3", s);
+//
+// Example 5:
+// // Dereferencing of `std::unique_ptr<>` is also supported:
+// std::vector<std::unique_ptr<int>> v
+// v.emplace_back(new int(1));
+// v.emplace_back(new int(2));
+// v.emplace_back(new int(3));
+// TString s = y_absl::StrJoin(v, "-");
+// EXPECT_EQ("1-2-3", s);
+//
+// Example 6:
+// // Joins a `std::map`, with each key-value pair separated by an equals
+// // sign. This pattern would also work with, say, a
+// // `std::vector<std::pair<>>`.
+// std::map<TString, int> m = {
+// std::make_pair("a", 1),
+// std::make_pair("b", 2),
+// std::make_pair("c", 3)};
+// TString s = y_absl::StrJoin(m, ",", y_absl::PairFormatter("="));
+// EXPECT_EQ("a=1,b=2,c=3", s);
+//
+// Example 7:
+// // These examples show how `y_absl::StrJoin()` handles a few common edge
+// // cases:
+// std::vector<TString> v_empty;
+// EXPECT_EQ("", y_absl::StrJoin(v_empty, "-"));
+//
+// std::vector<TString> v_one_item = {"foo"};
+// EXPECT_EQ("foo", y_absl::StrJoin(v_one_item, "-"));
+//
+// std::vector<TString> v_empty_string = {""};
+// EXPECT_EQ("", y_absl::StrJoin(v_empty_string, "-"));
+//
+// std::vector<TString> v_one_item_empty_string = {"a", ""};
+// EXPECT_EQ("a-", y_absl::StrJoin(v_one_item_empty_string, "-"));
+//
+// std::vector<TString> v_two_empty_string = {"", ""};
+// EXPECT_EQ("-", y_absl::StrJoin(v_two_empty_string, "-"));
+//
+// Example 8:
+// // Joins a `std::tuple<T...>` of heterogeneous types, converting each to
+// // a TString using the `y_absl::AlphaNum` class.
+// TString s = y_absl::StrJoin(std::make_tuple(123, "abc", 0.456), "-");
+// EXPECT_EQ("123-abc-0.456", s);
+
+template <typename Iterator, typename Formatter>
+TString StrJoin(Iterator start, Iterator end, y_absl::string_view sep,
+ Formatter&& fmt) {
+ return strings_internal::JoinAlgorithm(start, end, sep, fmt);
+}
+
+template <typename Range, typename Formatter>
+TString StrJoin(const Range& range, y_absl::string_view separator,
+ Formatter&& fmt) {
+ return strings_internal::JoinRange(range, separator, fmt);
+}
+
+template <typename T, typename Formatter>
+TString StrJoin(std::initializer_list<T> il, y_absl::string_view separator,
+ Formatter&& fmt) {
+ return strings_internal::JoinRange(il, separator, fmt);
+}
+
+template <typename... T, typename Formatter>
+TString StrJoin(const std::tuple<T...>& value, y_absl::string_view separator,
+ Formatter&& fmt) {
+ return strings_internal::JoinAlgorithm(value, separator, fmt);
+}
+
+template <typename Iterator>
+TString StrJoin(Iterator start, Iterator end, y_absl::string_view separator) {
+ return strings_internal::JoinRange(start, end, separator);
+}
+
+template <typename Range>
+TString StrJoin(const Range& range, y_absl::string_view separator) {
+ return strings_internal::JoinRange(range, separator);
+}
+
+template <typename T>
+TString StrJoin(std::initializer_list<T> il,
+ y_absl::string_view separator) {
+ return strings_internal::JoinRange(il, separator);
+}
+
+template <typename... T>
+TString StrJoin(const std::tuple<T...>& value,
+ y_absl::string_view separator) {
+ return strings_internal::JoinAlgorithm(value, separator, AlphaNumFormatter());
+}
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_STRINGS_STR_JOIN_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/str_replace.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/str_replace.cc
new file mode 100644
index 00000000000..77b78c6c160
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/str_replace.cc
@@ -0,0 +1,82 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "y_absl/strings/str_replace.h"
+
+#include "y_absl/strings/str_cat.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace strings_internal {
+
+using FixedMapping =
+ std::initializer_list<std::pair<y_absl::string_view, y_absl::string_view>>;
+
+// Applies the ViableSubstitutions in subs_ptr to the y_absl::string_view s, and
+// stores the result in *result_ptr. Returns the number of substitutions that
+// occurred.
+int ApplySubstitutions(
+ y_absl::string_view s,
+ std::vector<strings_internal::ViableSubstitution>* subs_ptr,
+ TString* result_ptr) {
+ auto& subs = *subs_ptr;
+ int substitutions = 0;
+ size_t pos = 0;
+ while (!subs.empty()) {
+ auto& sub = subs.back();
+ if (sub.offset >= pos) {
+ if (pos <= s.size()) {
+ StrAppend(result_ptr, s.substr(pos, sub.offset - pos), sub.replacement);
+ }
+ pos = sub.offset + sub.old.size();
+ substitutions += 1;
+ }
+ sub.offset = s.find(sub.old, pos);
+ if (sub.offset == s.npos) {
+ subs.pop_back();
+ } else {
+ // Insertion sort to ensure the last ViableSubstitution continues to be
+ // before all the others.
+ size_t index = subs.size();
+ while (--index && subs[index - 1].OccursBefore(subs[index])) {
+ std::swap(subs[index], subs[index - 1]);
+ }
+ }
+ }
+ result_ptr->append(s.data() + pos, s.size() - pos);
+ return substitutions;
+}
+
+} // namespace strings_internal
+
+// We can implement this in terms of the generic StrReplaceAll, but
+// we must specify the template overload because C++ cannot deduce the type
+// of an initializer_list parameter to a function, and also if we don't specify
+// the type, we just call ourselves.
+//
+// Note that we implement them here, rather than in the header, so that they
+// aren't inlined.
+
+TString StrReplaceAll(y_absl::string_view s,
+ strings_internal::FixedMapping replacements) {
+ return StrReplaceAll<strings_internal::FixedMapping>(s, replacements);
+}
+
+int StrReplaceAll(strings_internal::FixedMapping replacements,
+ TString* target) {
+ return StrReplaceAll<strings_internal::FixedMapping>(replacements, target);
+}
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/str_replace.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/str_replace.h
new file mode 100644
index 00000000000..42c85616a0d
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/str_replace.h
@@ -0,0 +1,219 @@
+//
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: str_replace.h
+// -----------------------------------------------------------------------------
+//
+// This file defines `y_absl::StrReplaceAll()`, a general-purpose string
+// replacement function designed for large, arbitrary text substitutions,
+// especially on strings which you are receiving from some other system for
+// further processing (e.g. processing regular expressions, escaping HTML
+// entities, etc.). `StrReplaceAll` is designed to be efficient even when only
+// one substitution is being performed, or when substitution is rare.
+//
+// If the string being modified is known at compile-time, and the substitutions
+// vary, `y_absl::Substitute()` may be a better choice.
+//
+// Example:
+//
+// TString html_escaped = y_absl::StrReplaceAll(user_input, {
+// {"&", "&amp;"},
+// {"<", "&lt;"},
+// {">", "&gt;"},
+// {"\"", "&quot;"},
+// {"'", "&#39;"}});
+#ifndef ABSL_STRINGS_STR_REPLACE_H_
+#define ABSL_STRINGS_STR_REPLACE_H_
+
+#include <util/generic/string.h>
+#include <utility>
+#include <vector>
+
+#include "y_absl/base/attributes.h"
+#include "y_absl/strings/string_view.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+// StrReplaceAll()
+//
+// Replaces character sequences within a given string with replacements provided
+// within an initializer list of key/value pairs. Candidate replacements are
+// considered in order as they occur within the string, with earlier matches
+// taking precedence, and longer matches taking precedence for candidates
+// starting at the same position in the string. Once a substitution is made, the
+// replaced text is not considered for any further substitutions.
+//
+// Example:
+//
+// TString s = y_absl::StrReplaceAll(
+// "$who bought $count #Noun. Thanks $who!",
+// {{"$count", y_absl::StrCat(5)},
+// {"$who", "Bob"},
+// {"#Noun", "Apples"}});
+// EXPECT_EQ("Bob bought 5 Apples. Thanks Bob!", s);
+ABSL_MUST_USE_RESULT TString StrReplaceAll(
+ y_absl::string_view s,
+ std::initializer_list<std::pair<y_absl::string_view, y_absl::string_view>>
+ replacements);
+
+// Overload of `StrReplaceAll()` to accept a container of key/value replacement
+// pairs (typically either an associative map or a `std::vector` of `std::pair`
+// elements). A vector of pairs is generally more efficient.
+//
+// Examples:
+//
+// std::map<const y_absl::string_view, const y_absl::string_view> replacements;
+// replacements["$who"] = "Bob";
+// replacements["$count"] = "5";
+// replacements["#Noun"] = "Apples";
+// TString s = y_absl::StrReplaceAll(
+// "$who bought $count #Noun. Thanks $who!",
+// replacements);
+// EXPECT_EQ("Bob bought 5 Apples. Thanks Bob!", s);
+//
+// // A std::vector of std::pair elements can be more efficient.
+// std::vector<std::pair<const y_absl::string_view, TString>> replacements;
+// replacements.push_back({"&", "&amp;"});
+// replacements.push_back({"<", "&lt;"});
+// replacements.push_back({">", "&gt;"});
+// TString s = y_absl::StrReplaceAll("if (ptr < &foo)",
+// replacements);
+// EXPECT_EQ("if (ptr &lt; &amp;foo)", s);
+template <typename StrToStrMapping>
+TString StrReplaceAll(y_absl::string_view s,
+ const StrToStrMapping& replacements);
+
+// Overload of `StrReplaceAll()` to replace character sequences within a given
+// output string *in place* with replacements provided within an initializer
+// list of key/value pairs, returning the number of substitutions that occurred.
+//
+// Example:
+//
+// TString s = TString("$who bought $count #Noun. Thanks $who!");
+// int count;
+// count = y_absl::StrReplaceAll({{"$count", y_absl::StrCat(5)},
+// {"$who", "Bob"},
+// {"#Noun", "Apples"}}, &s);
+// EXPECT_EQ(count, 4);
+// EXPECT_EQ("Bob bought 5 Apples. Thanks Bob!", s);
+int StrReplaceAll(
+ std::initializer_list<std::pair<y_absl::string_view, y_absl::string_view>>
+ replacements,
+ TString* target);
+
+// Overload of `StrReplaceAll()` to replace patterns within a given output
+// string *in place* with replacements provided within a container of key/value
+// pairs.
+//
+// Example:
+//
+// TString s = TString("if (ptr < &foo)");
+// int count = y_absl::StrReplaceAll({{"&", "&amp;"},
+// {"<", "&lt;"},
+// {">", "&gt;"}}, &s);
+// EXPECT_EQ(count, 2);
+// EXPECT_EQ("if (ptr &lt; &amp;foo)", s);
+template <typename StrToStrMapping>
+int StrReplaceAll(const StrToStrMapping& replacements, TString* target);
+
+// Implementation details only, past this point.
+namespace strings_internal {
+
+struct ViableSubstitution {
+ y_absl::string_view old;
+ y_absl::string_view replacement;
+ size_t offset;
+
+ ViableSubstitution(y_absl::string_view old_str,
+ y_absl::string_view replacement_str, size_t offset_val)
+ : old(old_str), replacement(replacement_str), offset(offset_val) {}
+
+ // One substitution occurs "before" another (takes priority) if either
+ // it has the lowest offset, or it has the same offset but a larger size.
+ bool OccursBefore(const ViableSubstitution& y) const {
+ if (offset != y.offset) return offset < y.offset;
+ return old.size() > y.old.size();
+ }
+};
+
+// Build a vector of ViableSubstitutions based on the given list of
+// replacements. subs can be implemented as a priority_queue. However, it turns
+// out that most callers have small enough a list of substitutions that the
+// overhead of such a queue isn't worth it.
+template <typename StrToStrMapping>
+std::vector<ViableSubstitution> FindSubstitutions(
+ y_absl::string_view s, const StrToStrMapping& replacements) {
+ std::vector<ViableSubstitution> subs;
+ subs.reserve(replacements.size());
+
+ for (const auto& rep : replacements) {
+ using std::get;
+ y_absl::string_view old(get<0>(rep));
+
+ size_t pos = s.find(old);
+ if (pos == s.npos) continue;
+
+ // Ignore attempts to replace "". This condition is almost never true,
+ // but above condition is frequently true. That's why we test for this
+ // now and not before.
+ if (old.empty()) continue;
+
+ subs.emplace_back(old, get<1>(rep), pos);
+
+ // Insertion sort to ensure the last ViableSubstitution comes before
+ // all the others.
+ size_t index = subs.size();
+ while (--index && subs[index - 1].OccursBefore(subs[index])) {
+ std::swap(subs[index], subs[index - 1]);
+ }
+ }
+ return subs;
+}
+
+int ApplySubstitutions(y_absl::string_view s,
+ std::vector<ViableSubstitution>* subs_ptr,
+ TString* result_ptr);
+
+} // namespace strings_internal
+
+template <typename StrToStrMapping>
+TString StrReplaceAll(y_absl::string_view s,
+ const StrToStrMapping& replacements) {
+ auto subs = strings_internal::FindSubstitutions(s, replacements);
+ TString result;
+ result.reserve(s.size());
+ strings_internal::ApplySubstitutions(s, &subs, &result);
+ return result;
+}
+
+template <typename StrToStrMapping>
+int StrReplaceAll(const StrToStrMapping& replacements, TString* target) {
+ auto subs = strings_internal::FindSubstitutions(*target, replacements);
+ if (subs.empty()) return 0;
+
+ TString result;
+ result.reserve(target->size());
+ int substitutions =
+ strings_internal::ApplySubstitutions(*target, &subs, &result);
+ target->swap(result);
+ return substitutions;
+}
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_STRINGS_STR_REPLACE_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/str_split.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/str_split.cc
new file mode 100644
index 00000000000..5f9193e6ba2
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/str_split.cc
@@ -0,0 +1,139 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "y_absl/strings/str_split.h"
+
+#include <algorithm>
+#include <cassert>
+#include <cstdint>
+#include <cstdlib>
+#include <cstring>
+#include <iterator>
+#include <limits>
+#include <memory>
+
+#include "y_absl/base/internal/raw_logging.h"
+#include "y_absl/strings/ascii.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+namespace {
+
+// This GenericFind() template function encapsulates the finding algorithm
+// shared between the ByString and ByAnyChar delimiters. The FindPolicy
+// template parameter allows each delimiter to customize the actual find
+// function to use and the length of the found delimiter. For example, the
+// Literal delimiter will ultimately use y_absl::string_view::find(), and the
+// AnyOf delimiter will use y_absl::string_view::find_first_of().
+template <typename FindPolicy>
+y_absl::string_view GenericFind(y_absl::string_view text,
+ y_absl::string_view delimiter, size_t pos,
+ FindPolicy find_policy) {
+ if (delimiter.empty() && text.length() > 0) {
+ // Special case for empty string delimiters: always return a zero-length
+ // y_absl::string_view referring to the item at position 1 past pos.
+ return y_absl::string_view(text.data() + pos + 1, 0);
+ }
+ size_t found_pos = y_absl::string_view::npos;
+ y_absl::string_view found(text.data() + text.size(),
+ 0); // By default, not found
+ found_pos = find_policy.Find(text, delimiter, pos);
+ if (found_pos != y_absl::string_view::npos) {
+ found = y_absl::string_view(text.data() + found_pos,
+ find_policy.Length(delimiter));
+ }
+ return found;
+}
+
+// Finds using y_absl::string_view::find(), therefore the length of the found
+// delimiter is delimiter.length().
+struct LiteralPolicy {
+ size_t Find(y_absl::string_view text, y_absl::string_view delimiter, size_t pos) {
+ return text.find(delimiter, pos);
+ }
+ size_t Length(y_absl::string_view delimiter) { return delimiter.length(); }
+};
+
+// Finds using y_absl::string_view::find_first_of(), therefore the length of the
+// found delimiter is 1.
+struct AnyOfPolicy {
+ size_t Find(y_absl::string_view text, y_absl::string_view delimiter, size_t pos) {
+ return text.find_first_of(delimiter, pos);
+ }
+ size_t Length(y_absl::string_view /* delimiter */) { return 1; }
+};
+
+} // namespace
+
+//
+// ByString
+//
+
+ByString::ByString(y_absl::string_view sp) : delimiter_(sp) {}
+
+y_absl::string_view ByString::Find(y_absl::string_view text, size_t pos) const {
+ if (delimiter_.length() == 1) {
+ // Much faster to call find on a single character than on an
+ // y_absl::string_view.
+ size_t found_pos = text.find(delimiter_[0], pos);
+ if (found_pos == y_absl::string_view::npos)
+ return y_absl::string_view(text.data() + text.size(), 0);
+ return text.substr(found_pos, 1);
+ }
+ return GenericFind(text, delimiter_, pos, LiteralPolicy());
+}
+
+//
+// ByChar
+//
+
+y_absl::string_view ByChar::Find(y_absl::string_view text, size_t pos) const {
+ size_t found_pos = text.find(c_, pos);
+ if (found_pos == y_absl::string_view::npos)
+ return y_absl::string_view(text.data() + text.size(), 0);
+ return text.substr(found_pos, 1);
+}
+
+//
+// ByAnyChar
+//
+
+ByAnyChar::ByAnyChar(y_absl::string_view sp) : delimiters_(sp) {}
+
+y_absl::string_view ByAnyChar::Find(y_absl::string_view text, size_t pos) const {
+ return GenericFind(text, delimiters_, pos, AnyOfPolicy());
+}
+
+//
+// ByLength
+//
+ByLength::ByLength(ptrdiff_t length) : length_(length) {
+ ABSL_RAW_CHECK(length > 0, "");
+}
+
+y_absl::string_view ByLength::Find(y_absl::string_view text,
+ size_t pos) const {
+ pos = std::min(pos, text.size()); // truncate `pos`
+ y_absl::string_view substr = text.substr(pos);
+ // If the string is shorter than the chunk size we say we
+ // "can't find the delimiter" so this will be the last chunk.
+ if (substr.length() <= static_cast<size_t>(length_))
+ return y_absl::string_view(text.data() + text.size(), 0);
+
+ return y_absl::string_view(substr.data() + length_, 0);
+}
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/str_split.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/str_split.h
new file mode 100644
index 00000000000..d32d54813ed
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/str_split.h
@@ -0,0 +1,548 @@
+//
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: str_split.h
+// -----------------------------------------------------------------------------
+//
+// This file contains functions for splitting strings. It defines the main
+// `StrSplit()` function, several delimiters for determining the boundaries on
+// which to split the string, and predicates for filtering delimited results.
+// `StrSplit()` adapts the returned collection to the type specified by the
+// caller.
+//
+// Example:
+//
+// // Splits the given string on commas. Returns the results in a
+// // vector of strings.
+// std::vector<TString> v = y_absl::StrSplit("a,b,c", ',');
+// // Can also use ","
+// // v[0] == "a", v[1] == "b", v[2] == "c"
+//
+// See StrSplit() below for more information.
+#ifndef ABSL_STRINGS_STR_SPLIT_H_
+#define ABSL_STRINGS_STR_SPLIT_H_
+
+#include <algorithm>
+#include <cstddef>
+#include <map>
+#include <set>
+#include <util/generic/string.h>
+#include <utility>
+#include <vector>
+
+#include "y_absl/base/internal/raw_logging.h"
+#include "y_absl/base/macros.h"
+#include "y_absl/strings/internal/str_split_internal.h"
+#include "y_absl/strings/string_view.h"
+#include "y_absl/strings/strip.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+//------------------------------------------------------------------------------
+// Delimiters
+//------------------------------------------------------------------------------
+//
+// `StrSplit()` uses delimiters to define the boundaries between elements in the
+// provided input. Several `Delimiter` types are defined below. If a string
+// (`const char*`, `TString`, or `y_absl::string_view`) is passed in place of
+// an explicit `Delimiter` object, `StrSplit()` treats it the same way as if it
+// were passed a `ByString` delimiter.
+//
+// A `Delimiter` is an object with a `Find()` function that knows how to find
+// the first occurrence of itself in a given `y_absl::string_view`.
+//
+// The following `Delimiter` types are available for use within `StrSplit()`:
+//
+// - `ByString` (default for string arguments)
+// - `ByChar` (default for a char argument)
+// - `ByAnyChar`
+// - `ByLength`
+// - `MaxSplits`
+//
+// A Delimiter's `Find()` member function will be passed an input `text` that is
+// to be split and a position (`pos`) to begin searching for the next delimiter
+// in `text`. The returned y_absl::string_view should refer to the next occurrence
+// (after `pos`) of the represented delimiter; this returned y_absl::string_view
+// represents the next location where the input `text` should be broken.
+//
+// The returned y_absl::string_view may be zero-length if the Delimiter does not
+// represent a part of the string (e.g., a fixed-length delimiter). If no
+// delimiter is found in the input `text`, a zero-length y_absl::string_view
+// referring to `text.end()` should be returned (e.g.,
+// `text.substr(text.size())`). It is important that the returned
+// y_absl::string_view always be within the bounds of the input `text` given as an
+// argument--it must not refer to a string that is physically located outside of
+// the given string.
+//
+// The following example is a simple Delimiter object that is created with a
+// single char and will look for that char in the text passed to the `Find()`
+// function:
+//
+// struct SimpleDelimiter {
+// const char c_;
+// explicit SimpleDelimiter(char c) : c_(c) {}
+// y_absl::string_view Find(y_absl::string_view text, size_t pos) {
+// auto found = text.find(c_, pos);
+// if (found == y_absl::string_view::npos)
+// return text.substr(text.size());
+//
+// return text.substr(found, 1);
+// }
+// };
+
+// ByString
+//
+// A sub-string delimiter. If `StrSplit()` is passed a string in place of a
+// `Delimiter` object, the string will be implicitly converted into a
+// `ByString` delimiter.
+//
+// Example:
+//
+// // Because a string literal is converted to an `y_absl::ByString`,
+// // the following two splits are equivalent.
+//
+// std::vector<TString> v1 = y_absl::StrSplit("a, b, c", ", ");
+//
+// using y_absl::ByString;
+// std::vector<TString> v2 = y_absl::StrSplit("a, b, c",
+// ByString(", "));
+// // v[0] == "a", v[1] == "b", v[2] == "c"
+class ByString {
+ public:
+ explicit ByString(y_absl::string_view sp);
+ y_absl::string_view Find(y_absl::string_view text, size_t pos) const;
+
+ private:
+ const TString delimiter_;
+};
+
+// ByChar
+//
+// A single character delimiter. `ByChar` is functionally equivalent to a
+// 1-char string within a `ByString` delimiter, but slightly more efficient.
+//
+// Example:
+//
+// // Because a char literal is converted to a y_absl::ByChar,
+// // the following two splits are equivalent.
+// std::vector<TString> v1 = y_absl::StrSplit("a,b,c", ',');
+// using y_absl::ByChar;
+// std::vector<TString> v2 = y_absl::StrSplit("a,b,c", ByChar(','));
+// // v[0] == "a", v[1] == "b", v[2] == "c"
+//
+// `ByChar` is also the default delimiter if a single character is given
+// as the delimiter to `StrSplit()`. For example, the following calls are
+// equivalent:
+//
+// std::vector<TString> v = y_absl::StrSplit("a-b", '-');
+//
+// using y_absl::ByChar;
+// std::vector<TString> v = y_absl::StrSplit("a-b", ByChar('-'));
+//
+class ByChar {
+ public:
+ explicit ByChar(char c) : c_(c) {}
+ y_absl::string_view Find(y_absl::string_view text, size_t pos) const;
+
+ private:
+ char c_;
+};
+
+// ByAnyChar
+//
+// A delimiter that will match any of the given byte-sized characters within
+// its provided string.
+//
+// Note: this delimiter works with single-byte string data, but does not work
+// with variable-width encodings, such as UTF-8.
+//
+// Example:
+//
+// using y_absl::ByAnyChar;
+// std::vector<TString> v = y_absl::StrSplit("a,b=c", ByAnyChar(",="));
+// // v[0] == "a", v[1] == "b", v[2] == "c"
+//
+// If `ByAnyChar` is given the empty string, it behaves exactly like
+// `ByString` and matches each individual character in the input string.
+//
+class ByAnyChar {
+ public:
+ explicit ByAnyChar(y_absl::string_view sp);
+ y_absl::string_view Find(y_absl::string_view text, size_t pos) const;
+
+ private:
+ const TString delimiters_;
+};
+
+// ByLength
+//
+// A delimiter for splitting into equal-length strings. The length argument to
+// the constructor must be greater than 0.
+//
+// Note: this delimiter works with single-byte string data, but does not work
+// with variable-width encodings, such as UTF-8.
+//
+// Example:
+//
+// using y_absl::ByLength;
+// std::vector<TString> v = y_absl::StrSplit("123456789", ByLength(3));
+
+// // v[0] == "123", v[1] == "456", v[2] == "789"
+//
+// Note that the string does not have to be a multiple of the fixed split
+// length. In such a case, the last substring will be shorter.
+//
+// using y_absl::ByLength;
+// std::vector<TString> v = y_absl::StrSplit("12345", ByLength(2));
+//
+// // v[0] == "12", v[1] == "34", v[2] == "5"
+class ByLength {
+ public:
+ explicit ByLength(ptrdiff_t length);
+ y_absl::string_view Find(y_absl::string_view text, size_t pos) const;
+
+ private:
+ const ptrdiff_t length_;
+};
+
+namespace strings_internal {
+
+// A traits-like metafunction for selecting the default Delimiter object type
+// for a particular Delimiter type. The base case simply exposes type Delimiter
+// itself as the delimiter's Type. However, there are specializations for
+// string-like objects that map them to the ByString delimiter object.
+// This allows functions like y_absl::StrSplit() and y_absl::MaxSplits() to accept
+// string-like objects (e.g., ',') as delimiter arguments but they will be
+// treated as if a ByString delimiter was given.
+template <typename Delimiter>
+struct SelectDelimiter {
+ using type = Delimiter;
+};
+
+template <>
+struct SelectDelimiter<char> {
+ using type = ByChar;
+};
+template <>
+struct SelectDelimiter<char*> {
+ using type = ByString;
+};
+template <>
+struct SelectDelimiter<const char*> {
+ using type = ByString;
+};
+template <>
+struct SelectDelimiter<y_absl::string_view> {
+ using type = ByString;
+};
+template <>
+struct SelectDelimiter<TString> {
+ using type = ByString;
+};
+
+// Wraps another delimiter and sets a max number of matches for that delimiter.
+template <typename Delimiter>
+class MaxSplitsImpl {
+ public:
+ MaxSplitsImpl(Delimiter delimiter, int limit)
+ : delimiter_(delimiter), limit_(limit), count_(0) {}
+ y_absl::string_view Find(y_absl::string_view text, size_t pos) {
+ if (count_++ == limit_) {
+ return y_absl::string_view(text.data() + text.size(),
+ 0); // No more matches.
+ }
+ return delimiter_.Find(text, pos);
+ }
+
+ private:
+ Delimiter delimiter_;
+ const int limit_;
+ int count_;
+};
+
+} // namespace strings_internal
+
+// MaxSplits()
+//
+// A delimiter that limits the number of matches which can occur to the passed
+// `limit`. The last element in the returned collection will contain all
+// remaining unsplit pieces, which may contain instances of the delimiter.
+// The collection will contain at most `limit` + 1 elements.
+// Example:
+//
+// using y_absl::MaxSplits;
+// std::vector<TString> v = y_absl::StrSplit("a,b,c", MaxSplits(',', 1));
+//
+// // v[0] == "a", v[1] == "b,c"
+template <typename Delimiter>
+inline strings_internal::MaxSplitsImpl<
+ typename strings_internal::SelectDelimiter<Delimiter>::type>
+MaxSplits(Delimiter delimiter, int limit) {
+ typedef
+ typename strings_internal::SelectDelimiter<Delimiter>::type DelimiterType;
+ return strings_internal::MaxSplitsImpl<DelimiterType>(
+ DelimiterType(delimiter), limit);
+}
+
+//------------------------------------------------------------------------------
+// Predicates
+//------------------------------------------------------------------------------
+//
+// Predicates filter the results of a `StrSplit()` by determining whether or not
+// a resultant element is included in the result set. A predicate may be passed
+// as an optional third argument to the `StrSplit()` function.
+//
+// Predicates are unary functions (or functors) that take a single
+// `y_absl::string_view` argument and return a bool indicating whether the
+// argument should be included (`true`) or excluded (`false`).
+//
+// Predicates are useful when filtering out empty substrings. By default, empty
+// substrings may be returned by `StrSplit()`, which is similar to the way split
+// functions work in other programming languages.
+
+// AllowEmpty()
+//
+// Always returns `true`, indicating that all strings--including empty
+// strings--should be included in the split output. This predicate is not
+// strictly needed because this is the default behavior of `StrSplit()`;
+// however, it might be useful at some call sites to make the intent explicit.
+//
+// Example:
+//
+// std::vector<TString> v = y_absl::StrSplit(" a , ,,b,", ',', AllowEmpty());
+//
+// // v[0] == " a ", v[1] == " ", v[2] == "", v[3] = "b", v[4] == ""
+struct AllowEmpty {
+ bool operator()(y_absl::string_view) const { return true; }
+};
+
+// SkipEmpty()
+//
+// Returns `false` if the given `y_absl::string_view` is empty, indicating that
+// `StrSplit()` should omit the empty string.
+//
+// Example:
+//
+// std::vector<TString> v = y_absl::StrSplit(",a,,b,", ',', SkipEmpty());
+//
+// // v[0] == "a", v[1] == "b"
+//
+// Note: `SkipEmpty()` does not consider a string containing only whitespace
+// to be empty. To skip such whitespace as well, use the `SkipWhitespace()`
+// predicate.
+struct SkipEmpty {
+ bool operator()(y_absl::string_view sp) const { return !sp.empty(); }
+};
+
+// SkipWhitespace()
+//
+// Returns `false` if the given `y_absl::string_view` is empty *or* contains only
+// whitespace, indicating that `StrSplit()` should omit the string.
+//
+// Example:
+//
+// std::vector<TString> v = y_absl::StrSplit(" a , ,,b,",
+// ',', SkipWhitespace());
+// // v[0] == " a ", v[1] == "b"
+//
+// // SkipEmpty() would return whitespace elements
+// std::vector<TString> v = y_absl::StrSplit(" a , ,,b,", ',', SkipEmpty());
+// // v[0] == " a ", v[1] == " ", v[2] == "b"
+struct SkipWhitespace {
+ bool operator()(y_absl::string_view sp) const {
+ sp = y_absl::StripAsciiWhitespace(sp);
+ return !sp.empty();
+ }
+};
+
+template <typename T>
+using EnableSplitIfString =
+ typename std::enable_if<std::is_same<T, TString>::value ||
+ std::is_same<T, const TString>::value,
+ int>::type;
+
+//------------------------------------------------------------------------------
+// StrSplit()
+//------------------------------------------------------------------------------
+
+// StrSplit()
+//
+// Splits a given string based on the provided `Delimiter` object, returning the
+// elements within the type specified by the caller. Optionally, you may pass a
+// `Predicate` to `StrSplit()` indicating whether to include or exclude the
+// resulting element within the final result set. (See the overviews for
+// Delimiters and Predicates above.)
+//
+// Example:
+//
+// std::vector<TString> v = y_absl::StrSplit("a,b,c,d", ',');
+// // v[0] == "a", v[1] == "b", v[2] == "c", v[3] == "d"
+//
+// You can also provide an explicit `Delimiter` object:
+//
+// Example:
+//
+// using y_absl::ByAnyChar;
+// std::vector<TString> v = y_absl::StrSplit("a,b=c", ByAnyChar(",="));
+// // v[0] == "a", v[1] == "b", v[2] == "c"
+//
+// See above for more information on delimiters.
+//
+// By default, empty strings are included in the result set. You can optionally
+// include a third `Predicate` argument to apply a test for whether the
+// resultant element should be included in the result set:
+//
+// Example:
+//
+// std::vector<TString> v = y_absl::StrSplit(" a , ,,b,",
+// ',', SkipWhitespace());
+// // v[0] == " a ", v[1] == "b"
+//
+// See above for more information on predicates.
+//
+//------------------------------------------------------------------------------
+// StrSplit() Return Types
+//------------------------------------------------------------------------------
+//
+// The `StrSplit()` function adapts the returned collection to the collection
+// specified by the caller (e.g. `std::vector` above). The returned collections
+// may contain `TString`, `y_absl::string_view` (in which case the original
+// string being split must ensure that it outlives the collection), or any
+// object that can be explicitly created from an `y_absl::string_view`. This
+// behavior works for:
+//
+// 1) All standard STL containers including `std::vector`, `std::list`,
+// `std::deque`, `std::set`,`std::multiset`, 'std::map`, and `std::multimap`
+// 2) `std::pair` (which is not actually a container). See below.
+//
+// Example:
+//
+// // The results are returned as `y_absl::string_view` objects. Note that we
+// // have to ensure that the input string outlives any results.
+// std::vector<y_absl::string_view> v = y_absl::StrSplit("a,b,c", ',');
+//
+// // Stores results in a std::set<TString>, which also performs
+// // de-duplication and orders the elements in ascending order.
+// std::set<TString> a = y_absl::StrSplit("b,a,c,a,b", ',');
+// // v[0] == "a", v[1] == "b", v[2] = "c"
+//
+// // `StrSplit()` can be used within a range-based for loop, in which case
+// // each element will be of type `y_absl::string_view`.
+// std::vector<TString> v;
+// for (const auto sv : y_absl::StrSplit("a,b,c", ',')) {
+// if (sv != "b") v.emplace_back(sv);
+// }
+// // v[0] == "a", v[1] == "c"
+//
+// // Stores results in a map. The map implementation assumes that the input
+// // is provided as a series of key/value pairs. For example, the 0th element
+// // resulting from the split will be stored as a key to the 1st element. If
+// // an odd number of elements are resolved, the last element is paired with
+// // a default-constructed value (e.g., empty string).
+// std::map<TString, TString> m = y_absl::StrSplit("a,b,c", ',');
+// // m["a"] == "b", m["c"] == "" // last component value equals ""
+//
+// Splitting to `std::pair` is an interesting case because it can hold only two
+// elements and is not a collection type. When splitting to a `std::pair` the
+// first two split strings become the `std::pair` `.first` and `.second`
+// members, respectively. The remaining split substrings are discarded. If there
+// are less than two split substrings, the empty string is used for the
+// corresponding
+// `std::pair` member.
+//
+// Example:
+//
+// // Stores first two split strings as the members in a std::pair.
+// std::pair<TString, TString> p = y_absl::StrSplit("a,b,c", ',');
+// // p.first == "a", p.second == "b" // "c" is omitted.
+//
+// The `StrSplit()` function can be used multiple times to perform more
+// complicated splitting logic, such as intelligently parsing key-value pairs.
+//
+// Example:
+//
+// // The input string "a=b=c,d=e,f=,g" becomes
+// // { "a" => "b=c", "d" => "e", "f" => "", "g" => "" }
+// std::map<TString, TString> m;
+// for (y_absl::string_view sp : y_absl::StrSplit("a=b=c,d=e,f=,g", ',')) {
+// m.insert(y_absl::StrSplit(sp, y_absl::MaxSplits('=', 1)));
+// }
+// EXPECT_EQ("b=c", m.find("a")->second);
+// EXPECT_EQ("e", m.find("d")->second);
+// EXPECT_EQ("", m.find("f")->second);
+// EXPECT_EQ("", m.find("g")->second);
+//
+// WARNING: Due to a legacy bug that is maintained for backward compatibility,
+// splitting the following empty string_views produces different results:
+//
+// y_absl::StrSplit(y_absl::string_view(""), '-'); // {""}
+// y_absl::StrSplit(y_absl::string_view(), '-'); // {}, but should be {""}
+//
+// Try not to depend on this distinction because the bug may one day be fixed.
+template <typename Delimiter>
+strings_internal::Splitter<
+ typename strings_internal::SelectDelimiter<Delimiter>::type, AllowEmpty,
+ y_absl::string_view>
+StrSplit(strings_internal::ConvertibleToStringView text, Delimiter d) {
+ using DelimiterType =
+ typename strings_internal::SelectDelimiter<Delimiter>::type;
+ return strings_internal::Splitter<DelimiterType, AllowEmpty,
+ y_absl::string_view>(
+ text.value(), DelimiterType(d), AllowEmpty());
+}
+
+template <typename Delimiter, typename StringType,
+ EnableSplitIfString<StringType> = 0>
+strings_internal::Splitter<
+ typename strings_internal::SelectDelimiter<Delimiter>::type, AllowEmpty,
+ TString>
+StrSplit(StringType&& text, Delimiter d) {
+ using DelimiterType =
+ typename strings_internal::SelectDelimiter<Delimiter>::type;
+ return strings_internal::Splitter<DelimiterType, AllowEmpty, TString>(
+ std::move(text), DelimiterType(d), AllowEmpty());
+}
+
+template <typename Delimiter, typename Predicate>
+strings_internal::Splitter<
+ typename strings_internal::SelectDelimiter<Delimiter>::type, Predicate,
+ y_absl::string_view>
+StrSplit(strings_internal::ConvertibleToStringView text, Delimiter d,
+ Predicate p) {
+ using DelimiterType =
+ typename strings_internal::SelectDelimiter<Delimiter>::type;
+ return strings_internal::Splitter<DelimiterType, Predicate,
+ y_absl::string_view>(
+ text.value(), DelimiterType(d), std::move(p));
+}
+
+template <typename Delimiter, typename Predicate, typename StringType,
+ EnableSplitIfString<StringType> = 0>
+strings_internal::Splitter<
+ typename strings_internal::SelectDelimiter<Delimiter>::type, Predicate,
+ TString>
+StrSplit(StringType&& text, Delimiter d, Predicate p) {
+ using DelimiterType =
+ typename strings_internal::SelectDelimiter<Delimiter>::type;
+ return strings_internal::Splitter<DelimiterType, Predicate, TString>(
+ std::move(text), DelimiterType(d), std::move(p));
+}
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_STRINGS_STR_SPLIT_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/string_view.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/string_view.cc
new file mode 100644
index 00000000000..9893c7ab991
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/string_view.cc
@@ -0,0 +1,230 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "y_absl/strings/string_view.h"
+
+#ifndef ABSL_USES_STD_STRING_VIEW
+
+#include <algorithm>
+#include <climits>
+#include <cstring>
+#include <ostream>
+
+#include "y_absl/strings/internal/memutil.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+namespace {
+void WritePadding(std::ostream& o, size_t pad) {
+ char fill_buf[32];
+ memset(fill_buf, o.fill(), sizeof(fill_buf));
+ while (pad) {
+ size_t n = std::min(pad, sizeof(fill_buf));
+ o.write(fill_buf, n);
+ pad -= n;
+ }
+}
+
+class LookupTable {
+ public:
+ // For each character in wanted, sets the index corresponding
+ // to the ASCII code of that character. This is used by
+ // the find_.*_of methods below to tell whether or not a character is in
+ // the lookup table in constant time.
+ explicit LookupTable(string_view wanted) {
+ for (char c : wanted) {
+ table_[Index(c)] = true;
+ }
+ }
+ bool operator[](char c) const { return table_[Index(c)]; }
+
+ private:
+ static unsigned char Index(char c) { return static_cast<unsigned char>(c); }
+ bool table_[UCHAR_MAX + 1] = {};
+};
+
+} // namespace
+
+std::ostream& operator<<(std::ostream& o, string_view piece) {
+ std::ostream::sentry sentry(o);
+ if (sentry) {
+ size_t lpad = 0;
+ size_t rpad = 0;
+ if (static_cast<size_t>(o.width()) > piece.size()) {
+ size_t pad = o.width() - piece.size();
+ if ((o.flags() & o.adjustfield) == o.left) {
+ rpad = pad;
+ } else {
+ lpad = pad;
+ }
+ }
+ if (lpad) WritePadding(o, lpad);
+ o.write(piece.data(), piece.size());
+ if (rpad) WritePadding(o, rpad);
+ o.width(0);
+ }
+ return o;
+}
+
+string_view::size_type string_view::find(string_view s,
+ size_type pos) const noexcept {
+ if (empty() || pos > length_) {
+ if (empty() && pos == 0 && s.empty()) return 0;
+ return npos;
+ }
+ const char* result =
+ strings_internal::memmatch(ptr_ + pos, length_ - pos, s.ptr_, s.length_);
+ return result ? result - ptr_ : npos;
+}
+
+string_view::size_type string_view::find(char c, size_type pos) const noexcept {
+ if (empty() || pos >= length_) {
+ return npos;
+ }
+ const char* result =
+ static_cast<const char*>(memchr(ptr_ + pos, c, length_ - pos));
+ return result != nullptr ? result - ptr_ : npos;
+}
+
+string_view::size_type string_view::rfind(string_view s,
+ size_type pos) const noexcept {
+ if (length_ < s.length_) return npos;
+ if (s.empty()) return std::min(length_, pos);
+ const char* last = ptr_ + std::min(length_ - s.length_, pos) + s.length_;
+ const char* result = std::find_end(ptr_, last, s.ptr_, s.ptr_ + s.length_);
+ return result != last ? result - ptr_ : npos;
+}
+
+// Search range is [0..pos] inclusive. If pos == npos, search everything.
+string_view::size_type string_view::rfind(char c,
+ size_type pos) const noexcept {
+ // Note: memrchr() is not available on Windows.
+ if (empty()) return npos;
+ for (size_type i = std::min(pos, length_ - 1);; --i) {
+ if (ptr_[i] == c) {
+ return i;
+ }
+ if (i == 0) break;
+ }
+ return npos;
+}
+
+string_view::size_type string_view::find_first_of(
+ string_view s, size_type pos) const noexcept {
+ if (empty() || s.empty()) {
+ return npos;
+ }
+ // Avoid the cost of LookupTable() for a single-character search.
+ if (s.length_ == 1) return find_first_of(s.ptr_[0], pos);
+ LookupTable tbl(s);
+ for (size_type i = pos; i < length_; ++i) {
+ if (tbl[ptr_[i]]) {
+ return i;
+ }
+ }
+ return npos;
+}
+
+string_view::size_type string_view::find_first_not_of(
+ string_view s, size_type pos) const noexcept {
+ if (empty()) return npos;
+ // Avoid the cost of LookupTable() for a single-character search.
+ if (s.length_ == 1) return find_first_not_of(s.ptr_[0], pos);
+ LookupTable tbl(s);
+ for (size_type i = pos; i < length_; ++i) {
+ if (!tbl[ptr_[i]]) {
+ return i;
+ }
+ }
+ return npos;
+}
+
+string_view::size_type string_view::find_first_not_of(
+ char c, size_type pos) const noexcept {
+ if (empty()) return npos;
+ for (; pos < length_; ++pos) {
+ if (ptr_[pos] != c) {
+ return pos;
+ }
+ }
+ return npos;
+}
+
+string_view::size_type string_view::find_last_of(string_view s,
+ size_type pos) const noexcept {
+ if (empty() || s.empty()) return npos;
+ // Avoid the cost of LookupTable() for a single-character search.
+ if (s.length_ == 1) return find_last_of(s.ptr_[0], pos);
+ LookupTable tbl(s);
+ for (size_type i = std::min(pos, length_ - 1);; --i) {
+ if (tbl[ptr_[i]]) {
+ return i;
+ }
+ if (i == 0) break;
+ }
+ return npos;
+}
+
+string_view::size_type string_view::find_last_not_of(
+ string_view s, size_type pos) const noexcept {
+ if (empty()) return npos;
+ size_type i = std::min(pos, length_ - 1);
+ if (s.empty()) return i;
+ // Avoid the cost of LookupTable() for a single-character search.
+ if (s.length_ == 1) return find_last_not_of(s.ptr_[0], pos);
+ LookupTable tbl(s);
+ for (;; --i) {
+ if (!tbl[ptr_[i]]) {
+ return i;
+ }
+ if (i == 0) break;
+ }
+ return npos;
+}
+
+string_view::size_type string_view::find_last_not_of(
+ char c, size_type pos) const noexcept {
+ if (empty()) return npos;
+ size_type i = std::min(pos, length_ - 1);
+ for (;; --i) {
+ if (ptr_[i] != c) {
+ return i;
+ }
+ if (i == 0) break;
+ }
+ return npos;
+}
+
+// MSVC has non-standard behavior that implicitly creates definitions for static
+// const members. These implicit definitions conflict with explicit out-of-class
+// member definitions that are required by the C++ standard, resulting in
+// LNK1169 "multiply defined" errors at link time. __declspec(selectany) asks
+// MSVC to choose only one definition for the symbol it decorates. See details
+// at https://msdn.microsoft.com/en-us/library/34h23df8(v=vs.100).aspx
+#ifdef _MSC_VER
+#define ABSL_STRING_VIEW_SELECTANY __declspec(selectany)
+#else
+#define ABSL_STRING_VIEW_SELECTANY
+#endif
+
+ABSL_STRING_VIEW_SELECTANY
+constexpr string_view::size_type string_view::npos;
+ABSL_STRING_VIEW_SELECTANY
+constexpr string_view::size_type string_view::kMaxSize;
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_USES_STD_STRING_VIEW
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/string_view.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/string_view.h
new file mode 100644
index 00000000000..c3906fe1c5a
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/string_view.h
@@ -0,0 +1,712 @@
+//
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: string_view.h
+// -----------------------------------------------------------------------------
+//
+// This file contains the definition of the `y_absl::string_view` class. A
+// `string_view` points to a contiguous span of characters, often part or all of
+// another `TString`, double-quoted string literal, character array, or even
+// another `string_view`.
+//
+// This `y_absl::string_view` abstraction is designed to be a drop-in
+// replacement for the C++17 `std::string_view` abstraction.
+#ifndef ABSL_STRINGS_STRING_VIEW_H_
+#define ABSL_STRINGS_STRING_VIEW_H_
+
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstring>
+#include <iosfwd>
+#include <iterator>
+#include <limits>
+#include <util/generic/string.h>
+
+#include "y_absl/base/attributes.h"
+#include "y_absl/base/config.h"
+#include "y_absl/base/internal/throw_delegate.h"
+#include "y_absl/base/macros.h"
+#include "y_absl/base/optimization.h"
+#include "y_absl/base/port.h"
+
+#ifdef ABSL_USES_STD_STRING_VIEW
+
+#include <string_view> // IWYU pragma: export
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+using string_view = std::string_view;
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#else // ABSL_USES_STD_STRING_VIEW
+
+#error "std::string_view should be used in all configurations"
+
+#if ABSL_HAVE_BUILTIN(__builtin_memcmp) || \
+ (defined(__GNUC__) && !defined(__clang__))
+#define ABSL_INTERNAL_STRING_VIEW_MEMCMP __builtin_memcmp
+#else // ABSL_HAVE_BUILTIN(__builtin_memcmp)
+#define ABSL_INTERNAL_STRING_VIEW_MEMCMP memcmp
+#endif // ABSL_HAVE_BUILTIN(__builtin_memcmp)
+
+#if defined(__cplusplus) && __cplusplus >= 201402L
+#define ABSL_INTERNAL_STRING_VIEW_CXX14_CONSTEXPR constexpr
+#else
+#define ABSL_INTERNAL_STRING_VIEW_CXX14_CONSTEXPR
+#endif
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+// y_absl::string_view
+//
+// A `string_view` provides a lightweight view into the string data provided by
+// a `TString`, double-quoted string literal, character array, or even
+// another `string_view`. A `string_view` does *not* own the string to which it
+// points, and that data cannot be modified through the view.
+//
+// You can use `string_view` as a function or method parameter anywhere a
+// parameter can receive a double-quoted string literal, `const char*`,
+// `TString`, or another `y_absl::string_view` argument with no need to copy
+// the string data. Systematic use of `string_view` within function arguments
+// reduces data copies and `strlen()` calls.
+//
+// Because of its small size, prefer passing `string_view` by value:
+//
+// void MyFunction(y_absl::string_view arg);
+//
+// If circumstances require, you may also pass one by const reference:
+//
+// void MyFunction(const y_absl::string_view& arg); // not preferred
+//
+// Passing by value generates slightly smaller code for many architectures.
+//
+// In either case, the source data of the `string_view` must outlive the
+// `string_view` itself.
+//
+// A `string_view` is also suitable for local variables if you know that the
+// lifetime of the underlying object is longer than the lifetime of your
+// `string_view` variable. However, beware of binding a `string_view` to a
+// temporary value:
+//
+// // BAD use of string_view: lifetime problem
+// y_absl::string_view sv = obj.ReturnAString();
+//
+// // GOOD use of string_view: str outlives sv
+// TString str = obj.ReturnAString();
+// y_absl::string_view sv = str;
+//
+// Due to lifetime issues, a `string_view` is sometimes a poor choice for a
+// return value and usually a poor choice for a data member. If you do use a
+// `string_view` this way, it is your responsibility to ensure that the object
+// pointed to by the `string_view` outlives the `string_view`.
+//
+// A `string_view` may represent a whole string or just part of a string. For
+// example, when splitting a string, `std::vector<y_absl::string_view>` is a
+// natural data type for the output.
+//
+// For another example, a Cord is a non-contiguous, potentially very
+// long string-like object. The Cord class has an interface that iteratively
+// provides string_view objects that point to the successive pieces of a Cord
+// object.
+//
+// When constructed from a source which is NUL-terminated, the `string_view`
+// itself will not include the NUL-terminator unless a specific size (including
+// the NUL) is passed to the constructor. As a result, common idioms that work
+// on NUL-terminated strings do not work on `string_view` objects. If you write
+// code that scans a `string_view`, you must check its length rather than test
+// for nul, for example. Note, however, that nuls may still be embedded within
+// a `string_view` explicitly.
+//
+// You may create a null `string_view` in two ways:
+//
+// y_absl::string_view sv;
+// y_absl::string_view sv(nullptr, 0);
+//
+// For the above, `sv.data() == nullptr`, `sv.length() == 0`, and
+// `sv.empty() == true`. Also, if you create a `string_view` with a non-null
+// pointer then `sv.data() != nullptr`. Thus, you can use `string_view()` to
+// signal an undefined value that is different from other `string_view` values
+// in a similar fashion to how `const char* p1 = nullptr;` is different from
+// `const char* p2 = "";`. However, in practice, it is not recommended to rely
+// on this behavior.
+//
+// Be careful not to confuse a null `string_view` with an empty one. A null
+// `string_view` is an empty `string_view`, but some empty `string_view`s are
+// not null. Prefer checking for emptiness over checking for null.
+//
+// There are many ways to create an empty string_view:
+//
+// const char* nullcp = nullptr;
+// // string_view.size() will return 0 in all cases.
+// y_absl::string_view();
+// y_absl::string_view(nullcp, 0);
+// y_absl::string_view("");
+// y_absl::string_view("", 0);
+// y_absl::string_view("abcdef", 0);
+// y_absl::string_view("abcdef" + 6, 0);
+//
+// All empty `string_view` objects whether null or not, are equal:
+//
+// y_absl::string_view() == y_absl::string_view("", 0)
+// y_absl::string_view(nullptr, 0) == y_absl::string_view("abcdef"+6, 0)
+class string_view {
+ public:
+ using traits_type = std::char_traits<char>;
+ using value_type = char;
+ using pointer = char*;
+ using const_pointer = const char*;
+ using reference = char&;
+ using const_reference = const char&;
+ using const_iterator = const char*;
+ using iterator = const_iterator;
+ using const_reverse_iterator = std::reverse_iterator<const_iterator>;
+ using reverse_iterator = const_reverse_iterator;
+ using size_type = size_t;
+ using difference_type = std::ptrdiff_t;
+
+ static constexpr size_type npos = static_cast<size_type>(-1);
+
+ // Null `string_view` constructor
+ constexpr string_view() noexcept : ptr_(nullptr), length_(0) {}
+
+ // Implicit constructors
+
+ template <typename Allocator>
+ string_view( // NOLINT(runtime/explicit)
+ const std::basic_string<char, std::char_traits<char>, Allocator>& str
+ ABSL_ATTRIBUTE_LIFETIME_BOUND) noexcept
+ // This is implemented in terms of `string_view(p, n)` so `str.size()`
+ // doesn't need to be reevaluated after `ptr_` is set.
+ // The length check is also skipped since it is unnecessary and causes
+ // code bloat.
+ : string_view(str.data(), str.size(), SkipCheckLengthTag{}) {}
+
+ // Implicit constructor of a `string_view` from NUL-terminated `str`. When
+ // accepting possibly null strings, use `y_absl::NullSafeStringView(str)`
+ // instead (see below).
+ // The length check is skipped since it is unnecessary and causes code bloat.
+ constexpr string_view(const char* str) // NOLINT(runtime/explicit)
+ : ptr_(str), length_(str ? StrlenInternal(str) : 0) {}
+
+ // Implicit constructor of a `string_view` from a `const char*` and length.
+ constexpr string_view(const char* data, size_type len)
+ : ptr_(data), length_(CheckLengthInternal(len)) {}
+
+ // NOTE: Harmlessly omitted to work around gdb bug.
+ // constexpr string_view(const string_view&) noexcept = default;
+ // string_view& operator=(const string_view&) noexcept = default;
+
+ // Iterators
+
+ // string_view::begin()
+ //
+ // Returns an iterator pointing to the first character at the beginning of the
+ // `string_view`, or `end()` if the `string_view` is empty.
+ constexpr const_iterator begin() const noexcept { return ptr_; }
+
+ // string_view::end()
+ //
+ // Returns an iterator pointing just beyond the last character at the end of
+ // the `string_view`. This iterator acts as a placeholder; attempting to
+ // access it results in undefined behavior.
+ constexpr const_iterator end() const noexcept { return ptr_ + length_; }
+
+ // string_view::cbegin()
+ //
+ // Returns a const iterator pointing to the first character at the beginning
+ // of the `string_view`, or `end()` if the `string_view` is empty.
+ constexpr const_iterator cbegin() const noexcept { return begin(); }
+
+ // string_view::cend()
+ //
+ // Returns a const iterator pointing just beyond the last character at the end
+ // of the `string_view`. This pointer acts as a placeholder; attempting to
+ // access its element results in undefined behavior.
+ constexpr const_iterator cend() const noexcept { return end(); }
+
+ // string_view::rbegin()
+ //
+ // Returns a reverse iterator pointing to the last character at the end of the
+ // `string_view`, or `rend()` if the `string_view` is empty.
+ const_reverse_iterator rbegin() const noexcept {
+ return const_reverse_iterator(end());
+ }
+
+ // string_view::rend()
+ //
+ // Returns a reverse iterator pointing just before the first character at the
+ // beginning of the `string_view`. This pointer acts as a placeholder;
+ // attempting to access its element results in undefined behavior.
+ const_reverse_iterator rend() const noexcept {
+ return const_reverse_iterator(begin());
+ }
+
+ // string_view::crbegin()
+ //
+ // Returns a const reverse iterator pointing to the last character at the end
+ // of the `string_view`, or `crend()` if the `string_view` is empty.
+ const_reverse_iterator crbegin() const noexcept { return rbegin(); }
+
+ // string_view::crend()
+ //
+ // Returns a const reverse iterator pointing just before the first character
+ // at the beginning of the `string_view`. This pointer acts as a placeholder;
+ // attempting to access its element results in undefined behavior.
+ const_reverse_iterator crend() const noexcept { return rend(); }
+
+ // Capacity Utilities
+
+ // string_view::size()
+ //
+ // Returns the number of characters in the `string_view`.
+ constexpr size_type size() const noexcept { return length_; }
+
+ // string_view::length()
+ //
+ // Returns the number of characters in the `string_view`. Alias for `size()`.
+ constexpr size_type length() const noexcept { return size(); }
+
+ // string_view::max_size()
+ //
+ // Returns the maximum number of characters the `string_view` can hold.
+ constexpr size_type max_size() const noexcept { return kMaxSize; }
+
+ // string_view::empty()
+ //
+ // Checks if the `string_view` is empty (refers to no characters).
+ constexpr bool empty() const noexcept { return length_ == 0; }
+
+ // string_view::operator[]
+ //
+ // Returns the ith element of the `string_view` using the array operator.
+ // Note that this operator does not perform any bounds checking.
+ constexpr const_reference operator[](size_type i) const {
+ return ABSL_HARDENING_ASSERT(i < size()), ptr_[i];
+ }
+
+ // string_view::at()
+ //
+ // Returns the ith element of the `string_view`. Bounds checking is performed,
+ // and an exception of type `std::out_of_range` will be thrown on invalid
+ // access.
+ constexpr const_reference at(size_type i) const {
+ return ABSL_PREDICT_TRUE(i < size())
+ ? ptr_[i]
+ : ((void)base_internal::ThrowStdOutOfRange(
+ "y_absl::string_view::at"),
+ ptr_[i]);
+ }
+
+ // string_view::front()
+ //
+ // Returns the first element of a `string_view`.
+ constexpr const_reference front() const {
+ return ABSL_HARDENING_ASSERT(!empty()), ptr_[0];
+ }
+
+ // string_view::back()
+ //
+ // Returns the last element of a `string_view`.
+ constexpr const_reference back() const {
+ return ABSL_HARDENING_ASSERT(!empty()), ptr_[size() - 1];
+ }
+
+ // string_view::data()
+ //
+ // Returns a pointer to the underlying character array (which is of course
+ // stored elsewhere). Note that `string_view::data()` may contain embedded nul
+ // characters, but the returned buffer may or may not be NUL-terminated;
+ // therefore, do not pass `data()` to a routine that expects a NUL-terminated
+ // string.
+ constexpr const_pointer data() const noexcept { return ptr_; }
+
+ // Modifiers
+
+ // string_view::remove_prefix()
+ //
+ // Removes the first `n` characters from the `string_view`. Note that the
+ // underlying string is not changed, only the view.
+ ABSL_INTERNAL_STRING_VIEW_CXX14_CONSTEXPR void remove_prefix(size_type n) {
+ ABSL_HARDENING_ASSERT(n <= length_);
+ ptr_ += n;
+ length_ -= n;
+ }
+
+ // string_view::remove_suffix()
+ //
+ // Removes the last `n` characters from the `string_view`. Note that the
+ // underlying string is not changed, only the view.
+ ABSL_INTERNAL_STRING_VIEW_CXX14_CONSTEXPR void remove_suffix(size_type n) {
+ ABSL_HARDENING_ASSERT(n <= length_);
+ length_ -= n;
+ }
+
+ // string_view::swap()
+ //
+ // Swaps this `string_view` with another `string_view`.
+ ABSL_INTERNAL_STRING_VIEW_CXX14_CONSTEXPR void swap(string_view& s) noexcept {
+ auto t = *this;
+ *this = s;
+ s = t;
+ }
+
+ // Explicit conversion operators
+
+ // Converts to `std::basic_string`.
+ template <typename A>
+ explicit operator std::basic_string<char, traits_type, A>() const {
+ if (!data()) return {};
+ return std::basic_string<char, traits_type, A>(data(), size());
+ }
+
+ // string_view::copy()
+ //
+ // Copies the contents of the `string_view` at offset `pos` and length `n`
+ // into `buf`.
+ size_type copy(char* buf, size_type n, size_type pos = 0) const {
+ if (ABSL_PREDICT_FALSE(pos > length_)) {
+ base_internal::ThrowStdOutOfRange("y_absl::string_view::copy");
+ }
+ size_type rlen = (std::min)(length_ - pos, n);
+ if (rlen > 0) {
+ const char* start = ptr_ + pos;
+ traits_type::copy(buf, start, rlen);
+ }
+ return rlen;
+ }
+
+ // string_view::substr()
+ //
+ // Returns a "substring" of the `string_view` (at offset `pos` and length
+ // `n`) as another string_view. This function throws `std::out_of_bounds` if
+ // `pos > size`.
+ // Use y_absl::ClippedSubstr if you need a truncating substr operation.
+ constexpr string_view substr(size_type pos = 0, size_type n = npos) const {
+ return ABSL_PREDICT_FALSE(pos > length_)
+ ? (base_internal::ThrowStdOutOfRange(
+ "y_absl::string_view::substr"),
+ string_view())
+ : string_view(ptr_ + pos, Min(n, length_ - pos));
+ }
+
+ // string_view::compare()
+ //
+ // Performs a lexicographical comparison between this `string_view` and
+ // another `string_view` `x`, returning a negative value if `*this` is less
+ // than `x`, 0 if `*this` is equal to `x`, and a positive value if `*this`
+ // is greater than `x`.
+ constexpr int compare(string_view x) const noexcept {
+ return CompareImpl(length_, x.length_,
+ Min(length_, x.length_) == 0
+ ? 0
+ : ABSL_INTERNAL_STRING_VIEW_MEMCMP(
+ ptr_, x.ptr_, Min(length_, x.length_)));
+ }
+
+ // Overload of `string_view::compare()` for comparing a substring of the
+ // 'string_view` and another `y_absl::string_view`.
+ constexpr int compare(size_type pos1, size_type count1, string_view v) const {
+ return substr(pos1, count1).compare(v);
+ }
+
+ // Overload of `string_view::compare()` for comparing a substring of the
+ // `string_view` and a substring of another `y_absl::string_view`.
+ constexpr int compare(size_type pos1, size_type count1, string_view v,
+ size_type pos2, size_type count2) const {
+ return substr(pos1, count1).compare(v.substr(pos2, count2));
+ }
+
+ // Overload of `string_view::compare()` for comparing a `string_view` and a
+ // a different C-style string `s`.
+ constexpr int compare(const char* s) const { return compare(string_view(s)); }
+
+ // Overload of `string_view::compare()` for comparing a substring of the
+ // `string_view` and a different string C-style string `s`.
+ constexpr int compare(size_type pos1, size_type count1, const char* s) const {
+ return substr(pos1, count1).compare(string_view(s));
+ }
+
+ // Overload of `string_view::compare()` for comparing a substring of the
+ // `string_view` and a substring of a different C-style string `s`.
+ constexpr int compare(size_type pos1, size_type count1, const char* s,
+ size_type count2) const {
+ return substr(pos1, count1).compare(string_view(s, count2));
+ }
+
+ // Find Utilities
+
+ // string_view::find()
+ //
+ // Finds the first occurrence of the substring `s` within the `string_view`,
+ // returning the position of the first character's match, or `npos` if no
+ // match was found.
+ size_type find(string_view s, size_type pos = 0) const noexcept;
+
+ // Overload of `string_view::find()` for finding the given character `c`
+ // within the `string_view`.
+ size_type find(char c, size_type pos = 0) const noexcept;
+
+ // Overload of `string_view::find()` for finding a substring of a different
+ // C-style string `s` within the `string_view`.
+ size_type find(const char* s, size_type pos, size_type count) const {
+ return find(string_view(s, count), pos);
+ }
+
+ // Overload of `string_view::find()` for finding a different C-style string
+ // `s` within the `string_view`.
+ size_type find(const char* s, size_type pos = 0) const {
+ return find(string_view(s), pos);
+ }
+
+ // string_view::rfind()
+ //
+ // Finds the last occurrence of a substring `s` within the `string_view`,
+ // returning the position of the first character's match, or `npos` if no
+ // match was found.
+ size_type rfind(string_view s, size_type pos = npos) const noexcept;
+
+ // Overload of `string_view::rfind()` for finding the last given character `c`
+ // within the `string_view`.
+ size_type rfind(char c, size_type pos = npos) const noexcept;
+
+ // Overload of `string_view::rfind()` for finding a substring of a different
+ // C-style string `s` within the `string_view`.
+ size_type rfind(const char* s, size_type pos, size_type count) const {
+ return rfind(string_view(s, count), pos);
+ }
+
+ // Overload of `string_view::rfind()` for finding a different C-style string
+ // `s` within the `string_view`.
+ size_type rfind(const char* s, size_type pos = npos) const {
+ return rfind(string_view(s), pos);
+ }
+
+ // string_view::find_first_of()
+ //
+ // Finds the first occurrence of any of the characters in `s` within the
+ // `string_view`, returning the start position of the match, or `npos` if no
+ // match was found.
+ size_type find_first_of(string_view s, size_type pos = 0) const noexcept;
+
+ // Overload of `string_view::find_first_of()` for finding a character `c`
+ // within the `string_view`.
+ size_type find_first_of(char c, size_type pos = 0) const noexcept {
+ return find(c, pos);
+ }
+
+ // Overload of `string_view::find_first_of()` for finding a substring of a
+ // different C-style string `s` within the `string_view`.
+ size_type find_first_of(const char* s, size_type pos,
+ size_type count) const {
+ return find_first_of(string_view(s, count), pos);
+ }
+
+ // Overload of `string_view::find_first_of()` for finding a different C-style
+ // string `s` within the `string_view`.
+ size_type find_first_of(const char* s, size_type pos = 0) const {
+ return find_first_of(string_view(s), pos);
+ }
+
+ // string_view::find_last_of()
+ //
+ // Finds the last occurrence of any of the characters in `s` within the
+ // `string_view`, returning the start position of the match, or `npos` if no
+ // match was found.
+ size_type find_last_of(string_view s, size_type pos = npos) const noexcept;
+
+ // Overload of `string_view::find_last_of()` for finding a character `c`
+ // within the `string_view`.
+ size_type find_last_of(char c, size_type pos = npos) const noexcept {
+ return rfind(c, pos);
+ }
+
+ // Overload of `string_view::find_last_of()` for finding a substring of a
+ // different C-style string `s` within the `string_view`.
+ size_type find_last_of(const char* s, size_type pos, size_type count) const {
+ return find_last_of(string_view(s, count), pos);
+ }
+
+ // Overload of `string_view::find_last_of()` for finding a different C-style
+ // string `s` within the `string_view`.
+ size_type find_last_of(const char* s, size_type pos = npos) const {
+ return find_last_of(string_view(s), pos);
+ }
+
+ // string_view::find_first_not_of()
+ //
+ // Finds the first occurrence of any of the characters not in `s` within the
+ // `string_view`, returning the start position of the first non-match, or
+ // `npos` if no non-match was found.
+ size_type find_first_not_of(string_view s, size_type pos = 0) const noexcept;
+
+ // Overload of `string_view::find_first_not_of()` for finding a character
+ // that is not `c` within the `string_view`.
+ size_type find_first_not_of(char c, size_type pos = 0) const noexcept;
+
+ // Overload of `string_view::find_first_not_of()` for finding a substring of a
+ // different C-style string `s` within the `string_view`.
+ size_type find_first_not_of(const char* s, size_type pos,
+ size_type count) const {
+ return find_first_not_of(string_view(s, count), pos);
+ }
+
+ // Overload of `string_view::find_first_not_of()` for finding a different
+ // C-style string `s` within the `string_view`.
+ size_type find_first_not_of(const char* s, size_type pos = 0) const {
+ return find_first_not_of(string_view(s), pos);
+ }
+
+ // string_view::find_last_not_of()
+ //
+ // Finds the last occurrence of any of the characters not in `s` within the
+ // `string_view`, returning the start position of the last non-match, or
+ // `npos` if no non-match was found.
+ size_type find_last_not_of(string_view s,
+ size_type pos = npos) const noexcept;
+
+ // Overload of `string_view::find_last_not_of()` for finding a character
+ // that is not `c` within the `string_view`.
+ size_type find_last_not_of(char c, size_type pos = npos) const noexcept;
+
+ // Overload of `string_view::find_last_not_of()` for finding a substring of a
+ // different C-style string `s` within the `string_view`.
+ size_type find_last_not_of(const char* s, size_type pos,
+ size_type count) const {
+ return find_last_not_of(string_view(s, count), pos);
+ }
+
+ // Overload of `string_view::find_last_not_of()` for finding a different
+ // C-style string `s` within the `string_view`.
+ size_type find_last_not_of(const char* s, size_type pos = npos) const {
+ return find_last_not_of(string_view(s), pos);
+ }
+
+ private:
+ // The constructor from TString delegates to this constructor.
+ // See the comment on that constructor for the rationale.
+ struct SkipCheckLengthTag {};
+ string_view(const char* data, size_type len, SkipCheckLengthTag) noexcept
+ : ptr_(data), length_(len) {}
+
+ static constexpr size_type kMaxSize =
+ (std::numeric_limits<difference_type>::max)();
+
+ static constexpr size_type CheckLengthInternal(size_type len) {
+ return ABSL_HARDENING_ASSERT(len <= kMaxSize), len;
+ }
+
+ static constexpr size_type StrlenInternal(const char* str) {
+#if defined(_MSC_VER) && _MSC_VER >= 1910 && !defined(__clang__)
+ // MSVC 2017+ can evaluate this at compile-time.
+ const char* begin = str;
+ while (*str != '\0') ++str;
+ return str - begin;
+#elif ABSL_HAVE_BUILTIN(__builtin_strlen) || \
+ (defined(__GNUC__) && !defined(__clang__))
+ // GCC has __builtin_strlen according to
+ // https://gcc.gnu.org/onlinedocs/gcc-4.7.0/gcc/Other-Builtins.html, but
+ // ABSL_HAVE_BUILTIN doesn't detect that, so we use the extra checks above.
+ // __builtin_strlen is constexpr.
+ return __builtin_strlen(str);
+#else
+ return str ? strlen(str) : 0;
+#endif
+ }
+
+ static constexpr size_t Min(size_type length_a, size_type length_b) {
+ return length_a < length_b ? length_a : length_b;
+ }
+
+ static constexpr int CompareImpl(size_type length_a, size_type length_b,
+ int compare_result) {
+ return compare_result == 0 ? static_cast<int>(length_a > length_b) -
+ static_cast<int>(length_a < length_b)
+ : (compare_result < 0 ? -1 : 1);
+ }
+
+ const char* ptr_;
+ size_type length_;
+};
+
+// This large function is defined inline so that in a fairly common case where
+// one of the arguments is a literal, the compiler can elide a lot of the
+// following comparisons.
+constexpr bool operator==(string_view x, string_view y) noexcept {
+ return x.size() == y.size() &&
+ (x.empty() ||
+ ABSL_INTERNAL_STRING_VIEW_MEMCMP(x.data(), y.data(), x.size()) == 0);
+}
+
+constexpr bool operator!=(string_view x, string_view y) noexcept {
+ return !(x == y);
+}
+
+constexpr bool operator<(string_view x, string_view y) noexcept {
+ return x.compare(y) < 0;
+}
+
+constexpr bool operator>(string_view x, string_view y) noexcept {
+ return y < x;
+}
+
+constexpr bool operator<=(string_view x, string_view y) noexcept {
+ return !(y < x);
+}
+
+constexpr bool operator>=(string_view x, string_view y) noexcept {
+ return !(x < y);
+}
+
+// IO Insertion Operator
+std::ostream& operator<<(std::ostream& o, string_view piece);
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#undef ABSL_INTERNAL_STRING_VIEW_CXX14_CONSTEXPR
+#undef ABSL_INTERNAL_STRING_VIEW_MEMCMP
+
+#endif // ABSL_USES_STD_STRING_VIEW
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+// ClippedSubstr()
+//
+// Like `s.substr(pos, n)`, but clips `pos` to an upper bound of `s.size()`.
+// Provided because std::string_view::substr throws if `pos > size()`
+inline string_view ClippedSubstr(string_view s, size_t pos,
+ size_t n = string_view::npos) {
+ pos = (std::min)(pos, static_cast<size_t>(s.size()));
+ return s.substr(pos, n);
+}
+
+// NullSafeStringView()
+//
+// Creates an `y_absl::string_view` from a pointer `p` even if it's null-valued.
+// This function should be used where an `y_absl::string_view` can be created from
+// a possibly-null pointer.
+constexpr string_view NullSafeStringView(const char* p) {
+ return p ? string_view(p) : string_view();
+}
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_STRINGS_STRING_VIEW_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/strip.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/strip.h
new file mode 100644
index 00000000000..3164ff1ebc5
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/strip.h
@@ -0,0 +1,91 @@
+//
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: strip.h
+// -----------------------------------------------------------------------------
+//
+// This file contains various functions for stripping substrings from a string.
+#ifndef ABSL_STRINGS_STRIP_H_
+#define ABSL_STRINGS_STRIP_H_
+
+#include <cstddef>
+#include <util/generic/string.h>
+
+#include "y_absl/base/macros.h"
+#include "y_absl/strings/ascii.h"
+#include "y_absl/strings/match.h"
+#include "y_absl/strings/string_view.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+// ConsumePrefix()
+//
+// Strips the `expected` prefix from the start of the given string, returning
+// `true` if the strip operation succeeded or false otherwise.
+//
+// Example:
+//
+// y_absl::string_view input("abc");
+// EXPECT_TRUE(y_absl::ConsumePrefix(&input, "a"));
+// EXPECT_EQ(input, "bc");
+inline bool ConsumePrefix(y_absl::string_view* str, y_absl::string_view expected) {
+ if (!y_absl::StartsWith(*str, expected)) return false;
+ str->remove_prefix(expected.size());
+ return true;
+}
+// ConsumeSuffix()
+//
+// Strips the `expected` suffix from the end of the given string, returning
+// `true` if the strip operation succeeded or false otherwise.
+//
+// Example:
+//
+// y_absl::string_view input("abcdef");
+// EXPECT_TRUE(y_absl::ConsumeSuffix(&input, "def"));
+// EXPECT_EQ(input, "abc");
+inline bool ConsumeSuffix(y_absl::string_view* str, y_absl::string_view expected) {
+ if (!y_absl::EndsWith(*str, expected)) return false;
+ str->remove_suffix(expected.size());
+ return true;
+}
+
+// StripPrefix()
+//
+// Returns a view into the input string 'str' with the given 'prefix' removed,
+// but leaving the original string intact. If the prefix does not match at the
+// start of the string, returns the original string instead.
+ABSL_MUST_USE_RESULT inline y_absl::string_view StripPrefix(
+ y_absl::string_view str, y_absl::string_view prefix) {
+ if (y_absl::StartsWith(str, prefix)) str.remove_prefix(prefix.size());
+ return str;
+}
+
+// StripSuffix()
+//
+// Returns a view into the input string 'str' with the given 'suffix' removed,
+// but leaving the original string intact. If the suffix does not match at the
+// end of the string, returns the original string instead.
+ABSL_MUST_USE_RESULT inline y_absl::string_view StripSuffix(
+ y_absl::string_view str, y_absl::string_view suffix) {
+ if (y_absl::EndsWith(str, suffix)) str.remove_suffix(suffix.size());
+ return str;
+}
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_STRINGS_STRIP_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/substitute.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/substitute.cc
new file mode 100644
index 00000000000..177fba8cbe8
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/substitute.cc
@@ -0,0 +1,172 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "y_absl/strings/substitute.h"
+
+#include <algorithm>
+
+#include "y_absl/base/internal/raw_logging.h"
+#include "y_absl/strings/ascii.h"
+#include "y_absl/strings/escaping.h"
+#include "y_absl/strings/internal/resize_uninitialized.h"
+#include "y_absl/strings/string_view.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace substitute_internal {
+
+void SubstituteAndAppendArray(TString* output, y_absl::string_view format,
+ const y_absl::string_view* args_array,
+ size_t num_args) {
+ // Determine total size needed.
+ size_t size = 0;
+ for (size_t i = 0; i < format.size(); i++) {
+ if (format[i] == '$') {
+ if (i + 1 >= format.size()) {
+#ifndef NDEBUG
+ ABSL_RAW_LOG(FATAL,
+ "Invalid y_absl::Substitute() format string: \"%s\".",
+ y_absl::CEscape(format).c_str());
+#endif
+ return;
+ } else if (y_absl::ascii_isdigit(format[i + 1])) {
+ int index = format[i + 1] - '0';
+ if (static_cast<size_t>(index) >= num_args) {
+#ifndef NDEBUG
+ ABSL_RAW_LOG(
+ FATAL,
+ "Invalid y_absl::Substitute() format string: asked for \"$"
+ "%d\", but only %d args were given. Full format string was: "
+ "\"%s\".",
+ index, static_cast<int>(num_args), y_absl::CEscape(format).c_str());
+#endif
+ return;
+ }
+ size += args_array[index].size();
+ ++i; // Skip next char.
+ } else if (format[i + 1] == '$') {
+ ++size;
+ ++i; // Skip next char.
+ } else {
+#ifndef NDEBUG
+ ABSL_RAW_LOG(FATAL,
+ "Invalid y_absl::Substitute() format string: \"%s\".",
+ y_absl::CEscape(format).c_str());
+#endif
+ return;
+ }
+ } else {
+ ++size;
+ }
+ }
+
+ if (size == 0) return;
+
+ // Build the string.
+ size_t original_size = output->size();
+ strings_internal::STLStringResizeUninitializedAmortized(output,
+ original_size + size);
+ char* target = &(*output)[original_size];
+ for (size_t i = 0; i < format.size(); i++) {
+ if (format[i] == '$') {
+ if (y_absl::ascii_isdigit(format[i + 1])) {
+ const y_absl::string_view src = args_array[format[i + 1] - '0'];
+ target = std::copy(src.begin(), src.end(), target);
+ ++i; // Skip next char.
+ } else if (format[i + 1] == '$') {
+ *target++ = '$';
+ ++i; // Skip next char.
+ }
+ } else {
+ *target++ = format[i];
+ }
+ }
+
+ assert(target == output->data() + output->size());
+}
+
+Arg::Arg(const void* value) {
+ static_assert(sizeof(scratch_) >= sizeof(value) * 2 + 2,
+ "fix sizeof(scratch_)");
+ if (value == nullptr) {
+ piece_ = "NULL";
+ } else {
+ char* ptr = scratch_ + sizeof(scratch_);
+ uintptr_t num = reinterpret_cast<uintptr_t>(value);
+ do {
+ *--ptr = y_absl::numbers_internal::kHexChar[num & 0xf];
+ num >>= 4;
+ } while (num != 0);
+ *--ptr = 'x';
+ *--ptr = '0';
+ piece_ = y_absl::string_view(ptr, scratch_ + sizeof(scratch_) - ptr);
+ }
+}
+
+// TODO(jorg): Don't duplicate so much code between here and str_cat.cc
+Arg::Arg(Hex hex) {
+ char* const end = &scratch_[numbers_internal::kFastToBufferSize];
+ char* writer = end;
+ uint64_t value = hex.value;
+ do {
+ *--writer = y_absl::numbers_internal::kHexChar[value & 0xF];
+ value >>= 4;
+ } while (value != 0);
+
+ char* beg;
+ if (end - writer < hex.width) {
+ beg = end - hex.width;
+ std::fill_n(beg, writer - beg, hex.fill);
+ } else {
+ beg = writer;
+ }
+
+ piece_ = y_absl::string_view(beg, end - beg);
+}
+
+// TODO(jorg): Don't duplicate so much code between here and str_cat.cc
+Arg::Arg(Dec dec) {
+ assert(dec.width <= numbers_internal::kFastToBufferSize);
+ char* const end = &scratch_[numbers_internal::kFastToBufferSize];
+ char* const minfill = end - dec.width;
+ char* writer = end;
+ uint64_t value = dec.value;
+ bool neg = dec.neg;
+ while (value > 9) {
+ *--writer = '0' + (value % 10);
+ value /= 10;
+ }
+ *--writer = '0' + value;
+ if (neg) *--writer = '-';
+
+ ptrdiff_t fillers = writer - minfill;
+ if (fillers > 0) {
+ // Tricky: if the fill character is ' ', then it's <fill><+/-><digits>
+ // But...: if the fill character is '0', then it's <+/-><fill><digits>
+ bool add_sign_again = false;
+ if (neg && dec.fill == '0') { // If filling with '0',
+ ++writer; // ignore the sign we just added
+ add_sign_again = true; // and re-add the sign later.
+ }
+ writer -= fillers;
+ std::fill_n(writer, fillers, dec.fill);
+ if (add_sign_again) *--writer = '-';
+ }
+
+ piece_ = y_absl::string_view(writer, end - writer);
+}
+
+} // namespace substitute_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/substitute.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/substitute.h
new file mode 100644
index 00000000000..c31191fbda4
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/substitute.h
@@ -0,0 +1,723 @@
+//
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: substitute.h
+// -----------------------------------------------------------------------------
+//
+// This package contains functions for efficiently performing string
+// substitutions using a format string with positional notation:
+// `Substitute()` and `SubstituteAndAppend()`.
+//
+// Unlike printf-style format specifiers, `Substitute()` functions do not need
+// to specify the type of the substitution arguments. Supported arguments
+// following the format string, such as strings, string_views, ints,
+// floats, and bools, are automatically converted to strings during the
+// substitution process. (See below for a full list of supported types.)
+//
+// `Substitute()` does not allow you to specify *how* to format a value, beyond
+// the default conversion to string. For example, you cannot format an integer
+// in hex.
+//
+// The format string uses positional identifiers indicated by a dollar sign ($)
+// and single digit positional ids to indicate which substitution arguments to
+// use at that location within the format string.
+//
+// A '$$' sequence in the format string causes a literal '$' character to be
+// output.
+//
+// Example 1:
+// TString s = Substitute("$1 purchased $0 $2 for $$10. Thanks $1!",
+// 5, "Bob", "Apples");
+// EXPECT_EQ("Bob purchased 5 Apples for $10. Thanks Bob!", s);
+//
+// Example 2:
+// TString s = "Hi. ";
+// SubstituteAndAppend(&s, "My name is $0 and I am $1 years old.", "Bob", 5);
+// EXPECT_EQ("Hi. My name is Bob and I am 5 years old.", s);
+//
+// Supported types:
+// * y_absl::string_view, TString, const char* (null is equivalent to "")
+// * int32_t, int64_t, uint32_t, uint64_t
+// * float, double
+// * bool (Printed as "true" or "false")
+// * pointer types other than char* (Printed as "0x<lower case hex string>",
+// except that null is printed as "NULL")
+//
+// If an invalid format string is provided, Substitute returns an empty string
+// and SubstituteAndAppend does not change the provided output string.
+// A format string is invalid if it:
+// * ends in an unescaped $ character,
+// e.g. "Hello $", or
+// * calls for a position argument which is not provided,
+// e.g. Substitute("Hello $2", "world"), or
+// * specifies a non-digit, non-$ character after an unescaped $ character,
+// e.g. "Hello $f".
+// In debug mode, i.e. #ifndef NDEBUG, such errors terminate the program.
+
+#ifndef ABSL_STRINGS_SUBSTITUTE_H_
+#define ABSL_STRINGS_SUBSTITUTE_H_
+
+#include <cstring>
+#include <util/generic/string.h>
+#include <type_traits>
+#include <vector>
+
+#include "y_absl/base/macros.h"
+#include "y_absl/base/port.h"
+#include "y_absl/strings/ascii.h"
+#include "y_absl/strings/escaping.h"
+#include "y_absl/strings/numbers.h"
+#include "y_absl/strings/str_cat.h"
+#include "y_absl/strings/str_split.h"
+#include "y_absl/strings/string_view.h"
+#include "y_absl/strings/strip.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace substitute_internal {
+
+// Arg
+//
+// This class provides an argument type for `y_absl::Substitute()` and
+// `y_absl::SubstituteAndAppend()`. `Arg` handles implicit conversion of various
+// types to a string. (`Arg` is very similar to the `AlphaNum` class in
+// `StrCat()`.)
+//
+// This class has implicit constructors.
+class Arg {
+ public:
+ // Overloads for string-y things
+ //
+ // Explicitly overload `const char*` so the compiler doesn't cast to `bool`.
+ Arg(const char* value) // NOLINT(runtime/explicit)
+ : piece_(y_absl::NullSafeStringView(value)) {}
+ template <typename Allocator>
+ Arg( // NOLINT
+ const std::basic_string<char, std::char_traits<char>, Allocator>&
+ value) noexcept
+ : piece_(value) {}
+ Arg(y_absl::string_view value) // NOLINT(runtime/explicit)
+ : piece_(value) {}
+ Arg(const TString& s)
+ : piece_(s.data(), s.size()) {}
+
+ // Overloads for primitives
+ //
+ // No overloads are available for signed and unsigned char because if people
+ // are explicitly declaring their chars as signed or unsigned then they are
+ // probably using them as 8-bit integers and would probably prefer an integer
+ // representation. However, we can't really know, so we make the caller decide
+ // what to do.
+ Arg(char value) // NOLINT(runtime/explicit)
+ : piece_(scratch_, 1) {
+ scratch_[0] = value;
+ }
+ Arg(short value) // NOLINT(*)
+ : piece_(scratch_,
+ numbers_internal::FastIntToBuffer(value, scratch_) - scratch_) {}
+ Arg(unsigned short value) // NOLINT(*)
+ : piece_(scratch_,
+ numbers_internal::FastIntToBuffer(value, scratch_) - scratch_) {}
+ Arg(int value) // NOLINT(runtime/explicit)
+ : piece_(scratch_,
+ numbers_internal::FastIntToBuffer(value, scratch_) - scratch_) {}
+ Arg(unsigned int value) // NOLINT(runtime/explicit)
+ : piece_(scratch_,
+ numbers_internal::FastIntToBuffer(value, scratch_) - scratch_) {}
+ Arg(long value) // NOLINT(*)
+ : piece_(scratch_,
+ numbers_internal::FastIntToBuffer(value, scratch_) - scratch_) {}
+ Arg(unsigned long value) // NOLINT(*)
+ : piece_(scratch_,
+ numbers_internal::FastIntToBuffer(value, scratch_) - scratch_) {}
+ Arg(long long value) // NOLINT(*)
+ : piece_(scratch_,
+ numbers_internal::FastIntToBuffer(value, scratch_) - scratch_) {}
+ Arg(unsigned long long value) // NOLINT(*)
+ : piece_(scratch_,
+ numbers_internal::FastIntToBuffer(value, scratch_) - scratch_) {}
+ Arg(float value) // NOLINT(runtime/explicit)
+ : piece_(scratch_, numbers_internal::SixDigitsToBuffer(value, scratch_)) {
+ }
+ Arg(double value) // NOLINT(runtime/explicit)
+ : piece_(scratch_, numbers_internal::SixDigitsToBuffer(value, scratch_)) {
+ }
+ Arg(bool value) // NOLINT(runtime/explicit)
+ : piece_(value ? "true" : "false") {}
+
+ Arg(Hex hex); // NOLINT(runtime/explicit)
+ Arg(Dec dec); // NOLINT(runtime/explicit)
+
+ // vector<bool>::reference and const_reference require special help to
+ // convert to `AlphaNum` because it requires two user defined conversions.
+ template <typename T,
+ y_absl::enable_if_t<
+ std::is_class<T>::value &&
+ (std::is_same<T, std::vector<bool>::reference>::value ||
+ std::is_same<T, std::vector<bool>::const_reference>::value)>* =
+ nullptr>
+ Arg(T value) // NOLINT(google-explicit-constructor)
+ : Arg(static_cast<bool>(value)) {}
+
+ // `void*` values, with the exception of `char*`, are printed as
+ // "0x<hex value>". However, in the case of `nullptr`, "NULL" is printed.
+ Arg(const void* value); // NOLINT(runtime/explicit)
+
+ Arg(const Arg&) = delete;
+ Arg& operator=(const Arg&) = delete;
+
+ y_absl::string_view piece() const { return piece_; }
+
+ private:
+ y_absl::string_view piece_;
+ char scratch_[numbers_internal::kFastToBufferSize];
+};
+
+// Internal helper function. Don't call this from outside this implementation.
+// This interface may change without notice.
+void SubstituteAndAppendArray(TString* output, y_absl::string_view format,
+ const y_absl::string_view* args_array,
+ size_t num_args);
+
+#if defined(ABSL_BAD_CALL_IF)
+constexpr int CalculateOneBit(const char* format) {
+ // Returns:
+ // * 2^N for '$N' when N is in [0-9]
+ // * 0 for correct '$' escaping: '$$'.
+ // * -1 otherwise.
+ return (*format < '0' || *format > '9') ? (*format == '$' ? 0 : -1)
+ : (1 << (*format - '0'));
+}
+
+constexpr const char* SkipNumber(const char* format) {
+ return !*format ? format : (format + 1);
+}
+
+constexpr int PlaceholderBitmask(const char* format) {
+ return !*format
+ ? 0
+ : *format != '$' ? PlaceholderBitmask(format + 1)
+ : (CalculateOneBit(format + 1) |
+ PlaceholderBitmask(SkipNumber(format + 1)));
+}
+#endif // ABSL_BAD_CALL_IF
+
+} // namespace substitute_internal
+
+//
+// PUBLIC API
+//
+
+// SubstituteAndAppend()
+//
+// Substitutes variables into a given format string and appends to a given
+// output string. See file comments above for usage.
+//
+// The declarations of `SubstituteAndAppend()` below consist of overloads
+// for passing 0 to 10 arguments, respectively.
+//
+// NOTE: A zero-argument `SubstituteAndAppend()` may be used within variadic
+// templates to allow a variable number of arguments.
+//
+// Example:
+// template <typename... Args>
+// void VarMsg(TString* boilerplate, y_absl::string_view format,
+// const Args&... args) {
+// y_absl::SubstituteAndAppend(boilerplate, format, args...);
+// }
+//
+inline void SubstituteAndAppend(TString* output, y_absl::string_view format) {
+ substitute_internal::SubstituteAndAppendArray(output, format, nullptr, 0);
+}
+
+inline void SubstituteAndAppend(TString* output, y_absl::string_view format,
+ const substitute_internal::Arg& a0) {
+ const y_absl::string_view args[] = {a0.piece()};
+ substitute_internal::SubstituteAndAppendArray(output, format, args,
+ ABSL_ARRAYSIZE(args));
+}
+
+inline void SubstituteAndAppend(TString* output, y_absl::string_view format,
+ const substitute_internal::Arg& a0,
+ const substitute_internal::Arg& a1) {
+ const y_absl::string_view args[] = {a0.piece(), a1.piece()};
+ substitute_internal::SubstituteAndAppendArray(output, format, args,
+ ABSL_ARRAYSIZE(args));
+}
+
+inline void SubstituteAndAppend(TString* output, y_absl::string_view format,
+ const substitute_internal::Arg& a0,
+ const substitute_internal::Arg& a1,
+ const substitute_internal::Arg& a2) {
+ const y_absl::string_view args[] = {a0.piece(), a1.piece(), a2.piece()};
+ substitute_internal::SubstituteAndAppendArray(output, format, args,
+ ABSL_ARRAYSIZE(args));
+}
+
+inline void SubstituteAndAppend(TString* output, y_absl::string_view format,
+ const substitute_internal::Arg& a0,
+ const substitute_internal::Arg& a1,
+ const substitute_internal::Arg& a2,
+ const substitute_internal::Arg& a3) {
+ const y_absl::string_view args[] = {a0.piece(), a1.piece(), a2.piece(),
+ a3.piece()};
+ substitute_internal::SubstituteAndAppendArray(output, format, args,
+ ABSL_ARRAYSIZE(args));
+}
+
+inline void SubstituteAndAppend(TString* output, y_absl::string_view format,
+ const substitute_internal::Arg& a0,
+ const substitute_internal::Arg& a1,
+ const substitute_internal::Arg& a2,
+ const substitute_internal::Arg& a3,
+ const substitute_internal::Arg& a4) {
+ const y_absl::string_view args[] = {a0.piece(), a1.piece(), a2.piece(),
+ a3.piece(), a4.piece()};
+ substitute_internal::SubstituteAndAppendArray(output, format, args,
+ ABSL_ARRAYSIZE(args));
+}
+
+inline void SubstituteAndAppend(TString* output, y_absl::string_view format,
+ const substitute_internal::Arg& a0,
+ const substitute_internal::Arg& a1,
+ const substitute_internal::Arg& a2,
+ const substitute_internal::Arg& a3,
+ const substitute_internal::Arg& a4,
+ const substitute_internal::Arg& a5) {
+ const y_absl::string_view args[] = {a0.piece(), a1.piece(), a2.piece(),
+ a3.piece(), a4.piece(), a5.piece()};
+ substitute_internal::SubstituteAndAppendArray(output, format, args,
+ ABSL_ARRAYSIZE(args));
+}
+
+inline void SubstituteAndAppend(TString* output, y_absl::string_view format,
+ const substitute_internal::Arg& a0,
+ const substitute_internal::Arg& a1,
+ const substitute_internal::Arg& a2,
+ const substitute_internal::Arg& a3,
+ const substitute_internal::Arg& a4,
+ const substitute_internal::Arg& a5,
+ const substitute_internal::Arg& a6) {
+ const y_absl::string_view args[] = {a0.piece(), a1.piece(), a2.piece(),
+ a3.piece(), a4.piece(), a5.piece(),
+ a6.piece()};
+ substitute_internal::SubstituteAndAppendArray(output, format, args,
+ ABSL_ARRAYSIZE(args));
+}
+
+inline void SubstituteAndAppend(
+ TString* output, y_absl::string_view format,
+ const substitute_internal::Arg& a0, const substitute_internal::Arg& a1,
+ const substitute_internal::Arg& a2, const substitute_internal::Arg& a3,
+ const substitute_internal::Arg& a4, const substitute_internal::Arg& a5,
+ const substitute_internal::Arg& a6, const substitute_internal::Arg& a7) {
+ const y_absl::string_view args[] = {a0.piece(), a1.piece(), a2.piece(),
+ a3.piece(), a4.piece(), a5.piece(),
+ a6.piece(), a7.piece()};
+ substitute_internal::SubstituteAndAppendArray(output, format, args,
+ ABSL_ARRAYSIZE(args));
+}
+
+inline void SubstituteAndAppend(
+ TString* output, y_absl::string_view format,
+ const substitute_internal::Arg& a0, const substitute_internal::Arg& a1,
+ const substitute_internal::Arg& a2, const substitute_internal::Arg& a3,
+ const substitute_internal::Arg& a4, const substitute_internal::Arg& a5,
+ const substitute_internal::Arg& a6, const substitute_internal::Arg& a7,
+ const substitute_internal::Arg& a8) {
+ const y_absl::string_view args[] = {a0.piece(), a1.piece(), a2.piece(),
+ a3.piece(), a4.piece(), a5.piece(),
+ a6.piece(), a7.piece(), a8.piece()};
+ substitute_internal::SubstituteAndAppendArray(output, format, args,
+ ABSL_ARRAYSIZE(args));
+}
+
+inline void SubstituteAndAppend(
+ TString* output, y_absl::string_view format,
+ const substitute_internal::Arg& a0, const substitute_internal::Arg& a1,
+ const substitute_internal::Arg& a2, const substitute_internal::Arg& a3,
+ const substitute_internal::Arg& a4, const substitute_internal::Arg& a5,
+ const substitute_internal::Arg& a6, const substitute_internal::Arg& a7,
+ const substitute_internal::Arg& a8, const substitute_internal::Arg& a9) {
+ const y_absl::string_view args[] = {
+ a0.piece(), a1.piece(), a2.piece(), a3.piece(), a4.piece(),
+ a5.piece(), a6.piece(), a7.piece(), a8.piece(), a9.piece()};
+ substitute_internal::SubstituteAndAppendArray(output, format, args,
+ ABSL_ARRAYSIZE(args));
+}
+
+#if defined(ABSL_BAD_CALL_IF)
+// This body of functions catches cases where the number of placeholders
+// doesn't match the number of data arguments.
+void SubstituteAndAppend(TString* output, const char* format)
+ ABSL_BAD_CALL_IF(
+ substitute_internal::PlaceholderBitmask(format) != 0,
+ "There were no substitution arguments "
+ "but this format string either has a $[0-9] in it or contains "
+ "an unescaped $ character (use $$ instead)");
+
+void SubstituteAndAppend(TString* output, const char* format,
+ const substitute_internal::Arg& a0)
+ ABSL_BAD_CALL_IF(substitute_internal::PlaceholderBitmask(format) != 1,
+ "There was 1 substitution argument given, but "
+ "this format string is missing its $0, contains "
+ "one of $1-$9, or contains an unescaped $ character (use "
+ "$$ instead)");
+
+void SubstituteAndAppend(TString* output, const char* format,
+ const substitute_internal::Arg& a0,
+ const substitute_internal::Arg& a1)
+ ABSL_BAD_CALL_IF(
+ substitute_internal::PlaceholderBitmask(format) != 3,
+ "There were 2 substitution arguments given, but this format string is "
+ "missing its $0/$1, contains one of $2-$9, or contains an "
+ "unescaped $ character (use $$ instead)");
+
+void SubstituteAndAppend(TString* output, const char* format,
+ const substitute_internal::Arg& a0,
+ const substitute_internal::Arg& a1,
+ const substitute_internal::Arg& a2)
+ ABSL_BAD_CALL_IF(
+ substitute_internal::PlaceholderBitmask(format) != 7,
+ "There were 3 substitution arguments given, but "
+ "this format string is missing its $0/$1/$2, contains one of "
+ "$3-$9, or contains an unescaped $ character (use $$ instead)");
+
+void SubstituteAndAppend(TString* output, const char* format,
+ const substitute_internal::Arg& a0,
+ const substitute_internal::Arg& a1,
+ const substitute_internal::Arg& a2,
+ const substitute_internal::Arg& a3)
+ ABSL_BAD_CALL_IF(
+ substitute_internal::PlaceholderBitmask(format) != 15,
+ "There were 4 substitution arguments given, but "
+ "this format string is missing its $0-$3, contains one of "
+ "$4-$9, or contains an unescaped $ character (use $$ instead)");
+
+void SubstituteAndAppend(TString* output, const char* format,
+ const substitute_internal::Arg& a0,
+ const substitute_internal::Arg& a1,
+ const substitute_internal::Arg& a2,
+ const substitute_internal::Arg& a3,
+ const substitute_internal::Arg& a4)
+ ABSL_BAD_CALL_IF(
+ substitute_internal::PlaceholderBitmask(format) != 31,
+ "There were 5 substitution arguments given, but "
+ "this format string is missing its $0-$4, contains one of "
+ "$5-$9, or contains an unescaped $ character (use $$ instead)");
+
+void SubstituteAndAppend(TString* output, const char* format,
+ const substitute_internal::Arg& a0,
+ const substitute_internal::Arg& a1,
+ const substitute_internal::Arg& a2,
+ const substitute_internal::Arg& a3,
+ const substitute_internal::Arg& a4,
+ const substitute_internal::Arg& a5)
+ ABSL_BAD_CALL_IF(
+ substitute_internal::PlaceholderBitmask(format) != 63,
+ "There were 6 substitution arguments given, but "
+ "this format string is missing its $0-$5, contains one of "
+ "$6-$9, or contains an unescaped $ character (use $$ instead)");
+
+void SubstituteAndAppend(
+ TString* output, const char* format, const substitute_internal::Arg& a0,
+ const substitute_internal::Arg& a1, const substitute_internal::Arg& a2,
+ const substitute_internal::Arg& a3, const substitute_internal::Arg& a4,
+ const substitute_internal::Arg& a5, const substitute_internal::Arg& a6)
+ ABSL_BAD_CALL_IF(
+ substitute_internal::PlaceholderBitmask(format) != 127,
+ "There were 7 substitution arguments given, but "
+ "this format string is missing its $0-$6, contains one of "
+ "$7-$9, or contains an unescaped $ character (use $$ instead)");
+
+void SubstituteAndAppend(
+ TString* output, const char* format, const substitute_internal::Arg& a0,
+ const substitute_internal::Arg& a1, const substitute_internal::Arg& a2,
+ const substitute_internal::Arg& a3, const substitute_internal::Arg& a4,
+ const substitute_internal::Arg& a5, const substitute_internal::Arg& a6,
+ const substitute_internal::Arg& a7)
+ ABSL_BAD_CALL_IF(
+ substitute_internal::PlaceholderBitmask(format) != 255,
+ "There were 8 substitution arguments given, but "
+ "this format string is missing its $0-$7, contains one of "
+ "$8-$9, or contains an unescaped $ character (use $$ instead)");
+
+void SubstituteAndAppend(
+ TString* output, const char* format, const substitute_internal::Arg& a0,
+ const substitute_internal::Arg& a1, const substitute_internal::Arg& a2,
+ const substitute_internal::Arg& a3, const substitute_internal::Arg& a4,
+ const substitute_internal::Arg& a5, const substitute_internal::Arg& a6,
+ const substitute_internal::Arg& a7, const substitute_internal::Arg& a8)
+ ABSL_BAD_CALL_IF(
+ substitute_internal::PlaceholderBitmask(format) != 511,
+ "There were 9 substitution arguments given, but "
+ "this format string is missing its $0-$8, contains a $9, or "
+ "contains an unescaped $ character (use $$ instead)");
+
+void SubstituteAndAppend(
+ TString* output, const char* format, const substitute_internal::Arg& a0,
+ const substitute_internal::Arg& a1, const substitute_internal::Arg& a2,
+ const substitute_internal::Arg& a3, const substitute_internal::Arg& a4,
+ const substitute_internal::Arg& a5, const substitute_internal::Arg& a6,
+ const substitute_internal::Arg& a7, const substitute_internal::Arg& a8,
+ const substitute_internal::Arg& a9)
+ ABSL_BAD_CALL_IF(
+ substitute_internal::PlaceholderBitmask(format) != 1023,
+ "There were 10 substitution arguments given, but this "
+ "format string either doesn't contain all of $0 through $9 or "
+ "contains an unescaped $ character (use $$ instead)");
+#endif // ABSL_BAD_CALL_IF
+
+// Substitute()
+//
+// Substitutes variables into a given format string. See file comments above
+// for usage.
+//
+// The declarations of `Substitute()` below consist of overloads for passing 0
+// to 10 arguments, respectively.
+//
+// NOTE: A zero-argument `Substitute()` may be used within variadic templates to
+// allow a variable number of arguments.
+//
+// Example:
+// template <typename... Args>
+// void VarMsg(y_absl::string_view format, const Args&... args) {
+// TString s = y_absl::Substitute(format, args...);
+
+ABSL_MUST_USE_RESULT inline TString Substitute(y_absl::string_view format) {
+ TString result;
+ SubstituteAndAppend(&result, format);
+ return result;
+}
+
+ABSL_MUST_USE_RESULT inline TString Substitute(
+ y_absl::string_view format, const substitute_internal::Arg& a0) {
+ TString result;
+ SubstituteAndAppend(&result, format, a0);
+ return result;
+}
+
+ABSL_MUST_USE_RESULT inline TString Substitute(
+ y_absl::string_view format, const substitute_internal::Arg& a0,
+ const substitute_internal::Arg& a1) {
+ TString result;
+ SubstituteAndAppend(&result, format, a0, a1);
+ return result;
+}
+
+ABSL_MUST_USE_RESULT inline TString Substitute(
+ y_absl::string_view format, const substitute_internal::Arg& a0,
+ const substitute_internal::Arg& a1, const substitute_internal::Arg& a2) {
+ TString result;
+ SubstituteAndAppend(&result, format, a0, a1, a2);
+ return result;
+}
+
+ABSL_MUST_USE_RESULT inline TString Substitute(
+ y_absl::string_view format, const substitute_internal::Arg& a0,
+ const substitute_internal::Arg& a1, const substitute_internal::Arg& a2,
+ const substitute_internal::Arg& a3) {
+ TString result;
+ SubstituteAndAppend(&result, format, a0, a1, a2, a3);
+ return result;
+}
+
+ABSL_MUST_USE_RESULT inline TString Substitute(
+ y_absl::string_view format, const substitute_internal::Arg& a0,
+ const substitute_internal::Arg& a1, const substitute_internal::Arg& a2,
+ const substitute_internal::Arg& a3, const substitute_internal::Arg& a4) {
+ TString result;
+ SubstituteAndAppend(&result, format, a0, a1, a2, a3, a4);
+ return result;
+}
+
+ABSL_MUST_USE_RESULT inline TString Substitute(
+ y_absl::string_view format, const substitute_internal::Arg& a0,
+ const substitute_internal::Arg& a1, const substitute_internal::Arg& a2,
+ const substitute_internal::Arg& a3, const substitute_internal::Arg& a4,
+ const substitute_internal::Arg& a5) {
+ TString result;
+ SubstituteAndAppend(&result, format, a0, a1, a2, a3, a4, a5);
+ return result;
+}
+
+ABSL_MUST_USE_RESULT inline TString Substitute(
+ y_absl::string_view format, const substitute_internal::Arg& a0,
+ const substitute_internal::Arg& a1, const substitute_internal::Arg& a2,
+ const substitute_internal::Arg& a3, const substitute_internal::Arg& a4,
+ const substitute_internal::Arg& a5, const substitute_internal::Arg& a6) {
+ TString result;
+ SubstituteAndAppend(&result, format, a0, a1, a2, a3, a4, a5, a6);
+ return result;
+}
+
+ABSL_MUST_USE_RESULT inline TString Substitute(
+ y_absl::string_view format, const substitute_internal::Arg& a0,
+ const substitute_internal::Arg& a1, const substitute_internal::Arg& a2,
+ const substitute_internal::Arg& a3, const substitute_internal::Arg& a4,
+ const substitute_internal::Arg& a5, const substitute_internal::Arg& a6,
+ const substitute_internal::Arg& a7) {
+ TString result;
+ SubstituteAndAppend(&result, format, a0, a1, a2, a3, a4, a5, a6, a7);
+ return result;
+}
+
+ABSL_MUST_USE_RESULT inline TString Substitute(
+ y_absl::string_view format, const substitute_internal::Arg& a0,
+ const substitute_internal::Arg& a1, const substitute_internal::Arg& a2,
+ const substitute_internal::Arg& a3, const substitute_internal::Arg& a4,
+ const substitute_internal::Arg& a5, const substitute_internal::Arg& a6,
+ const substitute_internal::Arg& a7, const substitute_internal::Arg& a8) {
+ TString result;
+ SubstituteAndAppend(&result, format, a0, a1, a2, a3, a4, a5, a6, a7, a8);
+ return result;
+}
+
+ABSL_MUST_USE_RESULT inline TString Substitute(
+ y_absl::string_view format, const substitute_internal::Arg& a0,
+ const substitute_internal::Arg& a1, const substitute_internal::Arg& a2,
+ const substitute_internal::Arg& a3, const substitute_internal::Arg& a4,
+ const substitute_internal::Arg& a5, const substitute_internal::Arg& a6,
+ const substitute_internal::Arg& a7, const substitute_internal::Arg& a8,
+ const substitute_internal::Arg& a9) {
+ TString result;
+ SubstituteAndAppend(&result, format, a0, a1, a2, a3, a4, a5, a6, a7, a8, a9);
+ return result;
+}
+
+#if defined(ABSL_BAD_CALL_IF)
+// This body of functions catches cases where the number of placeholders
+// doesn't match the number of data arguments.
+TString Substitute(const char* format)
+ ABSL_BAD_CALL_IF(substitute_internal::PlaceholderBitmask(format) != 0,
+ "There were no substitution arguments "
+ "but this format string either has a $[0-9] in it or "
+ "contains an unescaped $ character (use $$ instead)");
+
+TString Substitute(const char* format, const substitute_internal::Arg& a0)
+ ABSL_BAD_CALL_IF(
+ substitute_internal::PlaceholderBitmask(format) != 1,
+ "There was 1 substitution argument given, but "
+ "this format string is missing its $0, contains one of $1-$9, "
+ "or contains an unescaped $ character (use $$ instead)");
+
+TString Substitute(const char* format, const substitute_internal::Arg& a0,
+ const substitute_internal::Arg& a1)
+ ABSL_BAD_CALL_IF(
+ substitute_internal::PlaceholderBitmask(format) != 3,
+ "There were 2 substitution arguments given, but "
+ "this format string is missing its $0/$1, contains one of "
+ "$2-$9, or contains an unescaped $ character (use $$ instead)");
+
+TString Substitute(const char* format, const substitute_internal::Arg& a0,
+ const substitute_internal::Arg& a1,
+ const substitute_internal::Arg& a2)
+ ABSL_BAD_CALL_IF(
+ substitute_internal::PlaceholderBitmask(format) != 7,
+ "There were 3 substitution arguments given, but "
+ "this format string is missing its $0/$1/$2, contains one of "
+ "$3-$9, or contains an unescaped $ character (use $$ instead)");
+
+TString Substitute(const char* format, const substitute_internal::Arg& a0,
+ const substitute_internal::Arg& a1,
+ const substitute_internal::Arg& a2,
+ const substitute_internal::Arg& a3)
+ ABSL_BAD_CALL_IF(
+ substitute_internal::PlaceholderBitmask(format) != 15,
+ "There were 4 substitution arguments given, but "
+ "this format string is missing its $0-$3, contains one of "
+ "$4-$9, or contains an unescaped $ character (use $$ instead)");
+
+TString Substitute(const char* format, const substitute_internal::Arg& a0,
+ const substitute_internal::Arg& a1,
+ const substitute_internal::Arg& a2,
+ const substitute_internal::Arg& a3,
+ const substitute_internal::Arg& a4)
+ ABSL_BAD_CALL_IF(
+ substitute_internal::PlaceholderBitmask(format) != 31,
+ "There were 5 substitution arguments given, but "
+ "this format string is missing its $0-$4, contains one of "
+ "$5-$9, or contains an unescaped $ character (use $$ instead)");
+
+TString Substitute(const char* format, const substitute_internal::Arg& a0,
+ const substitute_internal::Arg& a1,
+ const substitute_internal::Arg& a2,
+ const substitute_internal::Arg& a3,
+ const substitute_internal::Arg& a4,
+ const substitute_internal::Arg& a5)
+ ABSL_BAD_CALL_IF(
+ substitute_internal::PlaceholderBitmask(format) != 63,
+ "There were 6 substitution arguments given, but "
+ "this format string is missing its $0-$5, contains one of "
+ "$6-$9, or contains an unescaped $ character (use $$ instead)");
+
+TString Substitute(const char* format, const substitute_internal::Arg& a0,
+ const substitute_internal::Arg& a1,
+ const substitute_internal::Arg& a2,
+ const substitute_internal::Arg& a3,
+ const substitute_internal::Arg& a4,
+ const substitute_internal::Arg& a5,
+ const substitute_internal::Arg& a6)
+ ABSL_BAD_CALL_IF(
+ substitute_internal::PlaceholderBitmask(format) != 127,
+ "There were 7 substitution arguments given, but "
+ "this format string is missing its $0-$6, contains one of "
+ "$7-$9, or contains an unescaped $ character (use $$ instead)");
+
+TString Substitute(const char* format, const substitute_internal::Arg& a0,
+ const substitute_internal::Arg& a1,
+ const substitute_internal::Arg& a2,
+ const substitute_internal::Arg& a3,
+ const substitute_internal::Arg& a4,
+ const substitute_internal::Arg& a5,
+ const substitute_internal::Arg& a6,
+ const substitute_internal::Arg& a7)
+ ABSL_BAD_CALL_IF(
+ substitute_internal::PlaceholderBitmask(format) != 255,
+ "There were 8 substitution arguments given, but "
+ "this format string is missing its $0-$7, contains one of "
+ "$8-$9, or contains an unescaped $ character (use $$ instead)");
+
+TString Substitute(
+ const char* format, const substitute_internal::Arg& a0,
+ const substitute_internal::Arg& a1, const substitute_internal::Arg& a2,
+ const substitute_internal::Arg& a3, const substitute_internal::Arg& a4,
+ const substitute_internal::Arg& a5, const substitute_internal::Arg& a6,
+ const substitute_internal::Arg& a7, const substitute_internal::Arg& a8)
+ ABSL_BAD_CALL_IF(
+ substitute_internal::PlaceholderBitmask(format) != 511,
+ "There were 9 substitution arguments given, but "
+ "this format string is missing its $0-$8, contains a $9, or "
+ "contains an unescaped $ character (use $$ instead)");
+
+TString Substitute(
+ const char* format, const substitute_internal::Arg& a0,
+ const substitute_internal::Arg& a1, const substitute_internal::Arg& a2,
+ const substitute_internal::Arg& a3, const substitute_internal::Arg& a4,
+ const substitute_internal::Arg& a5, const substitute_internal::Arg& a6,
+ const substitute_internal::Arg& a7, const substitute_internal::Arg& a8,
+ const substitute_internal::Arg& a9)
+ ABSL_BAD_CALL_IF(
+ substitute_internal::PlaceholderBitmask(format) != 1023,
+ "There were 10 substitution arguments given, but this "
+ "format string either doesn't contain all of $0 through $9 or "
+ "contains an unescaped $ character (use $$ instead)");
+#endif // ABSL_BAD_CALL_IF
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_STRINGS_SUBSTITUTE_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/ya.make b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/ya.make
new file mode 100644
index 00000000000..77c5a47dc93
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/ya.make
@@ -0,0 +1,46 @@
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
+OWNER(
+ somov
+ g:cpp-contrib
+)
+
+LICENSE(Apache-2.0)
+
+LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
+
+PEERDIR(
+ contrib/restricted/abseil-cpp-tstring/y_absl/base
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_wait
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/throw_delegate
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/log_severity
+ contrib/restricted/abseil-cpp-tstring/y_absl/numeric
+ contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/absl_strings_internal
+)
+
+ADDINCL(
+ GLOBAL contrib/restricted/abseil-cpp-tstring
+)
+
+NO_COMPILER_WARNINGS()
+
+SRCS(
+ ascii.cc
+ charconv.cc
+ escaping.cc
+ internal/charconv_bigint.cc
+ internal/charconv_parse.cc
+ internal/memutil.cc
+ match.cc
+ numbers.cc
+ str_cat.cc
+ str_replace.cc
+ str_split.cc
+ string_view.cc
+ substitute.cc
+)
+
+END()
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/.yandex_meta/licenses.list.txt b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/.yandex_meta/licenses.list.txt
new file mode 100644
index 00000000000..7be6b428485
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/.yandex_meta/licenses.list.txt
@@ -0,0 +1,16 @@
+====================Apache-2.0====================
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+
+====================COPYRIGHT====================
+// Copyright 2017 The Abseil Authors.
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/barrier.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/barrier.cc
new file mode 100644
index 00000000000..b03f0ff559e
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/barrier.cc
@@ -0,0 +1,52 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "y_absl/synchronization/barrier.h"
+
+#include "y_absl/base/internal/raw_logging.h"
+#include "y_absl/synchronization/mutex.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+// Return whether int *arg is zero.
+static bool IsZero(void *arg) {
+ return 0 == *reinterpret_cast<int *>(arg);
+}
+
+bool Barrier::Block() {
+ MutexLock l(&this->lock_);
+
+ this->num_to_block_--;
+ if (this->num_to_block_ < 0) {
+ ABSL_RAW_LOG(
+ FATAL,
+ "Block() called too many times. num_to_block_=%d out of total=%d",
+ this->num_to_block_, this->num_to_exit_);
+ }
+
+ this->lock_.Await(Condition(IsZero, &this->num_to_block_));
+
+ // Determine which thread can safely delete this Barrier object
+ this->num_to_exit_--;
+ ABSL_RAW_CHECK(this->num_to_exit_ >= 0, "barrier underflow");
+
+ // If num_to_exit_ == 0 then all other threads in the barrier have
+ // exited the Wait() and have released the Mutex so this thread is
+ // free to delete the barrier.
+ return this->num_to_exit_ == 0;
+}
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/barrier.h b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/barrier.h
new file mode 100644
index 00000000000..527bd42be05
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/barrier.h
@@ -0,0 +1,79 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// barrier.h
+// -----------------------------------------------------------------------------
+
+#ifndef ABSL_SYNCHRONIZATION_BARRIER_H_
+#define ABSL_SYNCHRONIZATION_BARRIER_H_
+
+#include "y_absl/base/thread_annotations.h"
+#include "y_absl/synchronization/mutex.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+// Barrier
+//
+// This class creates a barrier which blocks threads until a prespecified
+// threshold of threads (`num_threads`) utilizes the barrier. A thread utilizes
+// the `Barrier` by calling `Block()` on the barrier, which will block that
+// thread; no call to `Block()` will return until `num_threads` threads have
+// called it.
+//
+// Exactly one call to `Block()` will return `true`, which is then responsible
+// for destroying the barrier; because stack allocation will cause the barrier
+// to be deleted when it is out of scope, barriers should not be stack
+// allocated.
+//
+// Example:
+//
+// // Main thread creates a `Barrier`:
+// barrier = new Barrier(num_threads);
+//
+// // Each participating thread could then call:
+// if (barrier->Block()) delete barrier; // Exactly one call to `Block()`
+// // returns `true`; that call
+// // deletes the barrier.
+class Barrier {
+ public:
+ // `num_threads` is the number of threads that will participate in the barrier
+ explicit Barrier(int num_threads)
+ : num_to_block_(num_threads), num_to_exit_(num_threads) {}
+
+ Barrier(const Barrier&) = delete;
+ Barrier& operator=(const Barrier&) = delete;
+
+ // Barrier::Block()
+ //
+ // Blocks the current thread, and returns only when the `num_threads`
+ // threshold of threads utilizing this barrier has been reached. `Block()`
+ // returns `true` for precisely one caller, which may then destroy the
+ // barrier.
+ //
+ // Memory ordering: For any threads X and Y, any action taken by X
+ // before X calls `Block()` will be visible to Y after Y returns from
+ // `Block()`.
+ bool Block();
+
+ private:
+ Mutex lock_;
+ int num_to_block_ ABSL_GUARDED_BY(lock_);
+ int num_to_exit_ ABSL_GUARDED_BY(lock_);
+};
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
+#endif // ABSL_SYNCHRONIZATION_BARRIER_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/blocking_counter.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/blocking_counter.cc
new file mode 100644
index 00000000000..056185e51ed
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/blocking_counter.cc
@@ -0,0 +1,67 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "y_absl/synchronization/blocking_counter.h"
+
+#include <atomic>
+
+#include "y_absl/base/internal/raw_logging.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+namespace {
+
+// Return whether int *arg is true.
+bool IsDone(void *arg) { return *reinterpret_cast<bool *>(arg); }
+
+} // namespace
+
+BlockingCounter::BlockingCounter(int initial_count)
+ : count_(initial_count),
+ num_waiting_(0),
+ done_{initial_count == 0 ? true : false} {
+ ABSL_RAW_CHECK(initial_count >= 0, "BlockingCounter initial_count negative");
+}
+
+bool BlockingCounter::DecrementCount() {
+ int count = count_.fetch_sub(1, std::memory_order_acq_rel) - 1;
+ ABSL_RAW_CHECK(count >= 0,
+ "BlockingCounter::DecrementCount() called too many times");
+ if (count == 0) {
+ MutexLock l(&lock_);
+ done_ = true;
+ return true;
+ }
+ return false;
+}
+
+void BlockingCounter::Wait() {
+ MutexLock l(&this->lock_);
+
+ // only one thread may call Wait(). To support more than one thread,
+ // implement a counter num_to_exit, like in the Barrier class.
+ ABSL_RAW_CHECK(num_waiting_ == 0, "multiple threads called Wait()");
+ num_waiting_++;
+
+ this->lock_.Await(Condition(IsDone, &this->done_));
+
+ // At this point, we know that all threads executing DecrementCount
+ // will not touch this object again.
+ // Therefore, the thread calling this method is free to delete the object
+ // after we return from this method.
+}
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/blocking_counter.h b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/blocking_counter.h
new file mode 100644
index 00000000000..a1644903e3b
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/blocking_counter.h
@@ -0,0 +1,101 @@
+//
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// blocking_counter.h
+// -----------------------------------------------------------------------------
+
+#ifndef ABSL_SYNCHRONIZATION_BLOCKING_COUNTER_H_
+#define ABSL_SYNCHRONIZATION_BLOCKING_COUNTER_H_
+
+#include <atomic>
+
+#include "y_absl/base/thread_annotations.h"
+#include "y_absl/synchronization/mutex.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+// BlockingCounter
+//
+// This class allows a thread to block for a pre-specified number of actions.
+// `BlockingCounter` maintains a single non-negative abstract integer "count"
+// with an initial value `initial_count`. A thread can then call `Wait()` on
+// this blocking counter to block until the specified number of events occur;
+// worker threads then call 'DecrementCount()` on the counter upon completion of
+// their work. Once the counter's internal "count" reaches zero, the blocked
+// thread unblocks.
+//
+// A `BlockingCounter` requires the following:
+// - its `initial_count` is non-negative.
+// - the number of calls to `DecrementCount()` on it is at most
+// `initial_count`.
+// - `Wait()` is called at most once on it.
+//
+// Given the above requirements, a `BlockingCounter` provides the following
+// guarantees:
+// - Once its internal "count" reaches zero, no legal action on the object
+// can further change the value of "count".
+// - When `Wait()` returns, it is legal to destroy the `BlockingCounter`.
+// - When `Wait()` returns, the number of calls to `DecrementCount()` on
+// this blocking counter exactly equals `initial_count`.
+//
+// Example:
+// BlockingCounter bcount(N); // there are N items of work
+// ... Allow worker threads to start.
+// ... On completing each work item, workers do:
+// ... bcount.DecrementCount(); // an item of work has been completed
+//
+// bcount.Wait(); // wait for all work to be complete
+//
+class BlockingCounter {
+ public:
+ explicit BlockingCounter(int initial_count);
+
+ BlockingCounter(const BlockingCounter&) = delete;
+ BlockingCounter& operator=(const BlockingCounter&) = delete;
+
+ // BlockingCounter::DecrementCount()
+ //
+ // Decrements the counter's "count" by one, and return "count == 0". This
+ // function requires that "count != 0" when it is called.
+ //
+ // Memory ordering: For any threads X and Y, any action taken by X
+ // before it calls `DecrementCount()` is visible to thread Y after
+ // Y's call to `DecrementCount()`, provided Y's call returns `true`.
+ bool DecrementCount();
+
+ // BlockingCounter::Wait()
+ //
+ // Blocks until the counter reaches zero. This function may be called at most
+ // once. On return, `DecrementCount()` will have been called "initial_count"
+ // times and the blocking counter may be destroyed.
+ //
+ // Memory ordering: For any threads X and Y, any action taken by X
+ // before X calls `DecrementCount()` is visible to Y after Y returns
+ // from `Wait()`.
+ void Wait();
+
+ private:
+ Mutex lock_;
+ std::atomic<int> count_;
+ int num_waiting_ ABSL_GUARDED_BY(lock_);
+ bool done_ ABSL_GUARDED_BY(lock_);
+};
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_SYNCHRONIZATION_BLOCKING_COUNTER_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/.yandex_meta/licenses.list.txt b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/.yandex_meta/licenses.list.txt
new file mode 100644
index 00000000000..b94f79902f1
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/.yandex_meta/licenses.list.txt
@@ -0,0 +1,34 @@
+====================Apache-2.0====================
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+
+
+====================Apache-2.0====================
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+
+====================COPYRIGHT====================
+// Copyright 2017 The Abseil Authors.
+
+
+====================COPYRIGHT====================
+// Copyright 2020 The Abseil Authors.
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/create_thread_identity.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/create_thread_identity.cc
new file mode 100644
index 00000000000..3c5764eeb95
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/create_thread_identity.cc
@@ -0,0 +1,140 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <stdint.h>
+#include <new>
+
+// This file is a no-op if the required LowLevelAlloc support is missing.
+#include "y_absl/base/internal/low_level_alloc.h"
+#ifndef ABSL_LOW_LEVEL_ALLOC_MISSING
+
+#include <string.h>
+
+#include "y_absl/base/attributes.h"
+#include "y_absl/base/internal/spinlock.h"
+#include "y_absl/base/internal/thread_identity.h"
+#include "y_absl/synchronization/internal/per_thread_sem.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace synchronization_internal {
+
+// ThreadIdentity storage is persistent, we maintain a free-list of previously
+// released ThreadIdentity objects.
+ABSL_CONST_INIT static base_internal::SpinLock freelist_lock(
+ y_absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
+ABSL_CONST_INIT static base_internal::ThreadIdentity* thread_identity_freelist;
+
+// A per-thread destructor for reclaiming associated ThreadIdentity objects.
+// Since we must preserve their storage we cache them for re-use.
+void ReclaimThreadIdentity(void* v) {
+ base_internal::ThreadIdentity* identity =
+ static_cast<base_internal::ThreadIdentity*>(v);
+
+ // all_locks might have been allocated by the Mutex implementation.
+ // We free it here when we are notified that our thread is dying.
+ if (identity->per_thread_synch.all_locks != nullptr) {
+ base_internal::LowLevelAlloc::Free(identity->per_thread_synch.all_locks);
+ }
+
+ PerThreadSem::Destroy(identity);
+
+ // We must explicitly clear the current thread's identity:
+ // (a) Subsequent (unrelated) per-thread destructors may require an identity.
+ // We must guarantee a new identity is used in this case (this instructor
+ // will be reinvoked up to PTHREAD_DESTRUCTOR_ITERATIONS in this case).
+ // (b) ThreadIdentity implementations may depend on memory that is not
+ // reinitialized before reuse. We must allow explicit clearing of the
+ // association state in this case.
+ base_internal::ClearCurrentThreadIdentity();
+ {
+ base_internal::SpinLockHolder l(&freelist_lock);
+ identity->next = thread_identity_freelist;
+ thread_identity_freelist = identity;
+ }
+}
+
+// Return value rounded up to next multiple of align.
+// Align must be a power of two.
+static intptr_t RoundUp(intptr_t addr, intptr_t align) {
+ return (addr + align - 1) & ~(align - 1);
+}
+
+static void ResetThreadIdentity(base_internal::ThreadIdentity* identity) {
+ base_internal::PerThreadSynch* pts = &identity->per_thread_synch;
+ pts->next = nullptr;
+ pts->skip = nullptr;
+ pts->may_skip = false;
+ pts->waitp = nullptr;
+ pts->suppress_fatal_errors = false;
+ pts->readers = 0;
+ pts->priority = 0;
+ pts->next_priority_read_cycles = 0;
+ pts->state.store(base_internal::PerThreadSynch::State::kAvailable,
+ std::memory_order_relaxed);
+ pts->maybe_unlocking = false;
+ pts->wake = false;
+ pts->cond_waiter = false;
+ pts->all_locks = nullptr;
+ identity->blocked_count_ptr = nullptr;
+ identity->ticker.store(0, std::memory_order_relaxed);
+ identity->wait_start.store(0, std::memory_order_relaxed);
+ identity->is_idle.store(false, std::memory_order_relaxed);
+ identity->next = nullptr;
+}
+
+static base_internal::ThreadIdentity* NewThreadIdentity() {
+ base_internal::ThreadIdentity* identity = nullptr;
+
+ {
+ // Re-use a previously released object if possible.
+ base_internal::SpinLockHolder l(&freelist_lock);
+ if (thread_identity_freelist) {
+ identity = thread_identity_freelist; // Take list-head.
+ thread_identity_freelist = thread_identity_freelist->next;
+ }
+ }
+
+ if (identity == nullptr) {
+ // Allocate enough space to align ThreadIdentity to a multiple of
+ // PerThreadSynch::kAlignment. This space is never released (it is
+ // added to a freelist by ReclaimThreadIdentity instead).
+ void* allocation = base_internal::LowLevelAlloc::Alloc(
+ sizeof(*identity) + base_internal::PerThreadSynch::kAlignment - 1);
+ // Round up the address to the required alignment.
+ identity = reinterpret_cast<base_internal::ThreadIdentity*>(
+ RoundUp(reinterpret_cast<intptr_t>(allocation),
+ base_internal::PerThreadSynch::kAlignment));
+ }
+ ResetThreadIdentity(identity);
+
+ return identity;
+}
+
+// Allocates and attaches ThreadIdentity object for the calling thread. Returns
+// the new identity.
+// REQUIRES: CurrentThreadIdentity(false) == nullptr
+base_internal::ThreadIdentity* CreateThreadIdentity() {
+ base_internal::ThreadIdentity* identity = NewThreadIdentity();
+ PerThreadSem::Init(identity);
+ // Associate the value with the current thread, and attach our destructor.
+ base_internal::SetCurrentThreadIdentity(identity, ReclaimThreadIdentity);
+ return identity;
+}
+
+} // namespace synchronization_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_LOW_LEVEL_ALLOC_MISSING
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/create_thread_identity.h b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/create_thread_identity.h
new file mode 100644
index 00000000000..d93209f424d
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/create_thread_identity.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2017 The Abseil Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Interface for getting the current ThreadIdentity, creating one if necessary.
+// See thread_identity.h.
+//
+// This file is separate from thread_identity.h because creating a new
+// ThreadIdentity requires slightly higher level libraries (per_thread_sem
+// and low_level_alloc) than accessing an existing one. This separation allows
+// us to have a smaller //y_absl/base:base.
+
+#ifndef ABSL_SYNCHRONIZATION_INTERNAL_CREATE_THREAD_IDENTITY_H_
+#define ABSL_SYNCHRONIZATION_INTERNAL_CREATE_THREAD_IDENTITY_H_
+
+#include "y_absl/base/internal/thread_identity.h"
+#include "y_absl/base/port.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace synchronization_internal {
+
+// Allocates and attaches a ThreadIdentity object for the calling thread.
+// For private use only.
+base_internal::ThreadIdentity* CreateThreadIdentity();
+
+// A per-thread destructor for reclaiming associated ThreadIdentity objects.
+// For private use only.
+void ReclaimThreadIdentity(void* v);
+
+// Returns the ThreadIdentity object representing the calling thread; guaranteed
+// to be unique for its lifetime. The returned object will remain valid for the
+// program's lifetime; although it may be re-assigned to a subsequent thread.
+// If one does not exist for the calling thread, allocate it now.
+inline base_internal::ThreadIdentity* GetOrCreateCurrentThreadIdentity() {
+ base_internal::ThreadIdentity* identity =
+ base_internal::CurrentThreadIdentityIfPresent();
+ if (ABSL_PREDICT_FALSE(identity == nullptr)) {
+ return CreateThreadIdentity();
+ }
+ return identity;
+}
+
+} // namespace synchronization_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_SYNCHRONIZATION_INTERNAL_CREATE_THREAD_IDENTITY_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/futex.h b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/futex.h
new file mode 100644
index 00000000000..aea769d8ec4
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/futex.h
@@ -0,0 +1,154 @@
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+#ifndef ABSL_SYNCHRONIZATION_INTERNAL_FUTEX_H_
+#define ABSL_SYNCHRONIZATION_INTERNAL_FUTEX_H_
+
+#include "y_absl/base/config.h"
+
+#ifdef _WIN32
+#include <windows.h>
+#else
+#include <sys/time.h>
+#include <unistd.h>
+#endif
+
+#ifdef __linux__
+#include <linux/futex.h>
+#include <sys/syscall.h>
+#endif
+
+#include <errno.h>
+#include <stdio.h>
+#include <time.h>
+
+#include <atomic>
+#include <cstdint>
+
+#include "y_absl/base/optimization.h"
+#include "y_absl/synchronization/internal/kernel_timeout.h"
+
+#ifdef ABSL_INTERNAL_HAVE_FUTEX
+#error ABSL_INTERNAL_HAVE_FUTEX may not be set on the command line
+#elif defined(__BIONIC__)
+// Bionic supports all the futex operations we need even when some of the futex
+// definitions are missing.
+#define ABSL_INTERNAL_HAVE_FUTEX
+#elif defined(__linux__) && defined(FUTEX_CLOCK_REALTIME)
+// FUTEX_CLOCK_REALTIME requires Linux >= 2.6.28.
+#define ABSL_INTERNAL_HAVE_FUTEX
+#endif
+
+#ifdef ABSL_INTERNAL_HAVE_FUTEX
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace synchronization_internal {
+
+// Some Android headers are missing these definitions even though they
+// support these futex operations.
+#ifdef __BIONIC__
+#ifndef SYS_futex
+#define SYS_futex __NR_futex
+#endif
+#ifndef FUTEX_WAIT_BITSET
+#define FUTEX_WAIT_BITSET 9
+#endif
+#ifndef FUTEX_PRIVATE_FLAG
+#define FUTEX_PRIVATE_FLAG 128
+#endif
+#ifndef FUTEX_CLOCK_REALTIME
+#define FUTEX_CLOCK_REALTIME 256
+#endif
+#ifndef FUTEX_BITSET_MATCH_ANY
+#define FUTEX_BITSET_MATCH_ANY 0xFFFFFFFF
+#endif
+#endif
+
+#if defined(__NR_futex_time64) && !defined(SYS_futex_time64)
+#define SYS_futex_time64 __NR_futex_time64
+#endif
+
+#if defined(SYS_futex_time64) && !defined(SYS_futex)
+#define SYS_futex SYS_futex_time64
+#endif
+
+class FutexImpl {
+ public:
+ static int WaitUntil(std::atomic<int32_t> *v, int32_t val,
+ KernelTimeout t) {
+ int err = 0;
+ if (t.has_timeout()) {
+ // https://locklessinc.com/articles/futex_cheat_sheet/
+ // Unlike FUTEX_WAIT, FUTEX_WAIT_BITSET uses absolute time.
+ struct timespec abs_timeout = t.MakeAbsTimespec();
+ // Atomically check that the futex value is still 0, and if it
+ // is, sleep until abs_timeout or until woken by FUTEX_WAKE.
+ err = syscall(
+ SYS_futex, reinterpret_cast<int32_t *>(v),
+ FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME, val,
+ &abs_timeout, nullptr, FUTEX_BITSET_MATCH_ANY);
+ } else {
+ // Atomically check that the futex value is still 0, and if it
+ // is, sleep until woken by FUTEX_WAKE.
+ err = syscall(SYS_futex, reinterpret_cast<int32_t *>(v),
+ FUTEX_WAIT | FUTEX_PRIVATE_FLAG, val, nullptr);
+ }
+ if (ABSL_PREDICT_FALSE(err != 0)) {
+ err = -errno;
+ }
+ return err;
+ }
+
+ static int WaitBitsetAbsoluteTimeout(std::atomic<int32_t> *v, int32_t val,
+ int32_t bits,
+ const struct timespec *abstime) {
+ int err = syscall(SYS_futex, reinterpret_cast<int32_t *>(v),
+ FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG, val, abstime,
+ nullptr, bits);
+ if (ABSL_PREDICT_FALSE(err != 0)) {
+ err = -errno;
+ }
+ return err;
+ }
+
+ static int Wake(std::atomic<int32_t> *v, int32_t count) {
+ int err = syscall(SYS_futex, reinterpret_cast<int32_t *>(v),
+ FUTEX_WAKE | FUTEX_PRIVATE_FLAG, count);
+ if (ABSL_PREDICT_FALSE(err < 0)) {
+ err = -errno;
+ }
+ return err;
+ }
+
+ // FUTEX_WAKE_BITSET
+ static int WakeBitset(std::atomic<int32_t> *v, int32_t count, int32_t bits) {
+ int err = syscall(SYS_futex, reinterpret_cast<int32_t *>(v),
+ FUTEX_WAKE_BITSET | FUTEX_PRIVATE_FLAG, count, nullptr,
+ nullptr, bits);
+ if (ABSL_PREDICT_FALSE(err < 0)) {
+ err = -errno;
+ }
+ return err;
+ }
+};
+
+class Futex : public FutexImpl {};
+
+} // namespace synchronization_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_INTERNAL_HAVE_FUTEX
+
+#endif // ABSL_SYNCHRONIZATION_INTERNAL_FUTEX_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/graphcycles.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/graphcycles.cc
new file mode 100644
index 00000000000..d8987c1a981
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/graphcycles.cc
@@ -0,0 +1,698 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// GraphCycles provides incremental cycle detection on a dynamic
+// graph using the following algorithm:
+//
+// A dynamic topological sort algorithm for directed acyclic graphs
+// David J. Pearce, Paul H. J. Kelly
+// Journal of Experimental Algorithmics (JEA) JEA Homepage archive
+// Volume 11, 2006, Article No. 1.7
+//
+// Brief summary of the algorithm:
+//
+// (1) Maintain a rank for each node that is consistent
+// with the topological sort of the graph. I.e., path from x to y
+// implies rank[x] < rank[y].
+// (2) When a new edge (x->y) is inserted, do nothing if rank[x] < rank[y].
+// (3) Otherwise: adjust ranks in the neighborhood of x and y.
+
+#include "y_absl/base/attributes.h"
+// This file is a no-op if the required LowLevelAlloc support is missing.
+#include "y_absl/base/internal/low_level_alloc.h"
+#ifndef ABSL_LOW_LEVEL_ALLOC_MISSING
+
+#include "y_absl/synchronization/internal/graphcycles.h"
+
+#include <algorithm>
+#include <array>
+#include <limits>
+#include "y_absl/base/internal/hide_ptr.h"
+#include "y_absl/base/internal/raw_logging.h"
+#include "y_absl/base/internal/spinlock.h"
+
+// Do not use STL. This module does not use standard memory allocation.
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace synchronization_internal {
+
+namespace {
+
+// Avoid LowLevelAlloc's default arena since it calls malloc hooks in
+// which people are doing things like acquiring Mutexes.
+ABSL_CONST_INIT static y_absl::base_internal::SpinLock arena_mu(
+ y_absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
+ABSL_CONST_INIT static base_internal::LowLevelAlloc::Arena* arena;
+
+static void InitArenaIfNecessary() {
+ arena_mu.Lock();
+ if (arena == nullptr) {
+ arena = base_internal::LowLevelAlloc::NewArena(0);
+ }
+ arena_mu.Unlock();
+}
+
+// Number of inlined elements in Vec. Hash table implementation
+// relies on this being a power of two.
+static const uint32_t kInline = 8;
+
+// A simple LowLevelAlloc based resizable vector with inlined storage
+// for a few elements. T must be a plain type since constructor
+// and destructor are not run on elements of type T managed by Vec.
+template <typename T>
+class Vec {
+ public:
+ Vec() { Init(); }
+ ~Vec() { Discard(); }
+
+ void clear() {
+ Discard();
+ Init();
+ }
+
+ bool empty() const { return size_ == 0; }
+ uint32_t size() const { return size_; }
+ T* begin() { return ptr_; }
+ T* end() { return ptr_ + size_; }
+ const T& operator[](uint32_t i) const { return ptr_[i]; }
+ T& operator[](uint32_t i) { return ptr_[i]; }
+ const T& back() const { return ptr_[size_-1]; }
+ void pop_back() { size_--; }
+
+ void push_back(const T& v) {
+ if (size_ == capacity_) Grow(size_ + 1);
+ ptr_[size_] = v;
+ size_++;
+ }
+
+ void resize(uint32_t n) {
+ if (n > capacity_) Grow(n);
+ size_ = n;
+ }
+
+ void fill(const T& val) {
+ for (uint32_t i = 0; i < size(); i++) {
+ ptr_[i] = val;
+ }
+ }
+
+ // Guarantees src is empty at end.
+ // Provided for the hash table resizing code below.
+ void MoveFrom(Vec<T>* src) {
+ if (src->ptr_ == src->space_) {
+ // Need to actually copy
+ resize(src->size_);
+ std::copy(src->ptr_, src->ptr_ + src->size_, ptr_);
+ src->size_ = 0;
+ } else {
+ Discard();
+ ptr_ = src->ptr_;
+ size_ = src->size_;
+ capacity_ = src->capacity_;
+ src->Init();
+ }
+ }
+
+ private:
+ T* ptr_;
+ T space_[kInline];
+ uint32_t size_;
+ uint32_t capacity_;
+
+ void Init() {
+ ptr_ = space_;
+ size_ = 0;
+ capacity_ = kInline;
+ }
+
+ void Discard() {
+ if (ptr_ != space_) base_internal::LowLevelAlloc::Free(ptr_);
+ }
+
+ void Grow(uint32_t n) {
+ while (capacity_ < n) {
+ capacity_ *= 2;
+ }
+ size_t request = static_cast<size_t>(capacity_) * sizeof(T);
+ T* copy = static_cast<T*>(
+ base_internal::LowLevelAlloc::AllocWithArena(request, arena));
+ std::copy(ptr_, ptr_ + size_, copy);
+ Discard();
+ ptr_ = copy;
+ }
+
+ Vec(const Vec&) = delete;
+ Vec& operator=(const Vec&) = delete;
+};
+
+// A hash set of non-negative int32_t that uses Vec for its underlying storage.
+class NodeSet {
+ public:
+ NodeSet() { Init(); }
+
+ void clear() { Init(); }
+ bool contains(int32_t v) const { return table_[FindIndex(v)] == v; }
+
+ bool insert(int32_t v) {
+ uint32_t i = FindIndex(v);
+ if (table_[i] == v) {
+ return false;
+ }
+ if (table_[i] == kEmpty) {
+ // Only inserting over an empty cell increases the number of occupied
+ // slots.
+ occupied_++;
+ }
+ table_[i] = v;
+ // Double when 75% full.
+ if (occupied_ >= table_.size() - table_.size()/4) Grow();
+ return true;
+ }
+
+ void erase(uint32_t v) {
+ uint32_t i = FindIndex(v);
+ if (static_cast<uint32_t>(table_[i]) == v) {
+ table_[i] = kDel;
+ }
+ }
+
+ // Iteration: is done via HASH_FOR_EACH
+ // Example:
+ // HASH_FOR_EACH(elem, node->out) { ... }
+#define HASH_FOR_EACH(elem, eset) \
+ for (int32_t elem, _cursor = 0; (eset).Next(&_cursor, &elem); )
+ bool Next(int32_t* cursor, int32_t* elem) {
+ while (static_cast<uint32_t>(*cursor) < table_.size()) {
+ int32_t v = table_[*cursor];
+ (*cursor)++;
+ if (v >= 0) {
+ *elem = v;
+ return true;
+ }
+ }
+ return false;
+ }
+
+ private:
+ enum : int32_t { kEmpty = -1, kDel = -2 };
+ Vec<int32_t> table_;
+ uint32_t occupied_; // Count of non-empty slots (includes deleted slots)
+
+ static uint32_t Hash(uint32_t a) { return a * 41; }
+
+ // Return index for storing v. May return an empty index or deleted index
+ int FindIndex(int32_t v) const {
+ // Search starting at hash index.
+ const uint32_t mask = table_.size() - 1;
+ uint32_t i = Hash(v) & mask;
+ int deleted_index = -1; // If >= 0, index of first deleted element we see
+ while (true) {
+ int32_t e = table_[i];
+ if (v == e) {
+ return i;
+ } else if (e == kEmpty) {
+ // Return any previously encountered deleted slot.
+ return (deleted_index >= 0) ? deleted_index : i;
+ } else if (e == kDel && deleted_index < 0) {
+ // Keep searching since v might be present later.
+ deleted_index = i;
+ }
+ i = (i + 1) & mask; // Linear probing; quadratic is slightly slower.
+ }
+ }
+
+ void Init() {
+ table_.clear();
+ table_.resize(kInline);
+ table_.fill(kEmpty);
+ occupied_ = 0;
+ }
+
+ void Grow() {
+ Vec<int32_t> copy;
+ copy.MoveFrom(&table_);
+ occupied_ = 0;
+ table_.resize(copy.size() * 2);
+ table_.fill(kEmpty);
+
+ for (const auto& e : copy) {
+ if (e >= 0) insert(e);
+ }
+ }
+
+ NodeSet(const NodeSet&) = delete;
+ NodeSet& operator=(const NodeSet&) = delete;
+};
+
+// We encode a node index and a node version in GraphId. The version
+// number is incremented when the GraphId is freed which automatically
+// invalidates all copies of the GraphId.
+
+inline GraphId MakeId(int32_t index, uint32_t version) {
+ GraphId g;
+ g.handle =
+ (static_cast<uint64_t>(version) << 32) | static_cast<uint32_t>(index);
+ return g;
+}
+
+inline int32_t NodeIndex(GraphId id) {
+ return static_cast<uint32_t>(id.handle & 0xfffffffful);
+}
+
+inline uint32_t NodeVersion(GraphId id) {
+ return static_cast<uint32_t>(id.handle >> 32);
+}
+
+struct Node {
+ int32_t rank; // rank number assigned by Pearce-Kelly algorithm
+ uint32_t version; // Current version number
+ int32_t next_hash; // Next entry in hash table
+ bool visited; // Temporary marker used by depth-first-search
+ uintptr_t masked_ptr; // User-supplied pointer
+ NodeSet in; // List of immediate predecessor nodes in graph
+ NodeSet out; // List of immediate successor nodes in graph
+ int priority; // Priority of recorded stack trace.
+ int nstack; // Depth of recorded stack trace.
+ void* stack[40]; // stack[0,nstack-1] holds stack trace for node.
+};
+
+// Hash table for pointer to node index lookups.
+class PointerMap {
+ public:
+ explicit PointerMap(const Vec<Node*>* nodes) : nodes_(nodes) {
+ table_.fill(-1);
+ }
+
+ int32_t Find(void* ptr) {
+ auto masked = base_internal::HidePtr(ptr);
+ for (int32_t i = table_[Hash(ptr)]; i != -1;) {
+ Node* n = (*nodes_)[i];
+ if (n->masked_ptr == masked) return i;
+ i = n->next_hash;
+ }
+ return -1;
+ }
+
+ void Add(void* ptr, int32_t i) {
+ int32_t* head = &table_[Hash(ptr)];
+ (*nodes_)[i]->next_hash = *head;
+ *head = i;
+ }
+
+ int32_t Remove(void* ptr) {
+ // Advance through linked list while keeping track of the
+ // predecessor slot that points to the current entry.
+ auto masked = base_internal::HidePtr(ptr);
+ for (int32_t* slot = &table_[Hash(ptr)]; *slot != -1; ) {
+ int32_t index = *slot;
+ Node* n = (*nodes_)[index];
+ if (n->masked_ptr == masked) {
+ *slot = n->next_hash; // Remove n from linked list
+ n->next_hash = -1;
+ return index;
+ }
+ slot = &n->next_hash;
+ }
+ return -1;
+ }
+
+ private:
+ // Number of buckets in hash table for pointer lookups.
+ static constexpr uint32_t kHashTableSize = 8171; // should be prime
+
+ const Vec<Node*>* nodes_;
+ std::array<int32_t, kHashTableSize> table_;
+
+ static uint32_t Hash(void* ptr) {
+ return reinterpret_cast<uintptr_t>(ptr) % kHashTableSize;
+ }
+};
+
+} // namespace
+
+struct GraphCycles::Rep {
+ Vec<Node*> nodes_;
+ Vec<int32_t> free_nodes_; // Indices for unused entries in nodes_
+ PointerMap ptrmap_;
+
+ // Temporary state.
+ Vec<int32_t> deltaf_; // Results of forward DFS
+ Vec<int32_t> deltab_; // Results of backward DFS
+ Vec<int32_t> list_; // All nodes to reprocess
+ Vec<int32_t> merged_; // Rank values to assign to list_ entries
+ Vec<int32_t> stack_; // Emulates recursion stack for depth-first searches
+
+ Rep() : ptrmap_(&nodes_) {}
+};
+
+static Node* FindNode(GraphCycles::Rep* rep, GraphId id) {
+ Node* n = rep->nodes_[NodeIndex(id)];
+ return (n->version == NodeVersion(id)) ? n : nullptr;
+}
+
+GraphCycles::GraphCycles() {
+ InitArenaIfNecessary();
+ rep_ = new (base_internal::LowLevelAlloc::AllocWithArena(sizeof(Rep), arena))
+ Rep;
+}
+
+GraphCycles::~GraphCycles() {
+ for (auto* node : rep_->nodes_) {
+ node->Node::~Node();
+ base_internal::LowLevelAlloc::Free(node);
+ }
+ rep_->Rep::~Rep();
+ base_internal::LowLevelAlloc::Free(rep_);
+}
+
+bool GraphCycles::CheckInvariants() const {
+ Rep* r = rep_;
+ NodeSet ranks; // Set of ranks seen so far.
+ for (uint32_t x = 0; x < r->nodes_.size(); x++) {
+ Node* nx = r->nodes_[x];
+ void* ptr = base_internal::UnhidePtr<void>(nx->masked_ptr);
+ if (ptr != nullptr && static_cast<uint32_t>(r->ptrmap_.Find(ptr)) != x) {
+ ABSL_RAW_LOG(FATAL, "Did not find live node in hash table %u %p", x, ptr);
+ }
+ if (nx->visited) {
+ ABSL_RAW_LOG(FATAL, "Did not clear visited marker on node %u", x);
+ }
+ if (!ranks.insert(nx->rank)) {
+ ABSL_RAW_LOG(FATAL, "Duplicate occurrence of rank %d", nx->rank);
+ }
+ HASH_FOR_EACH(y, nx->out) {
+ Node* ny = r->nodes_[y];
+ if (nx->rank >= ny->rank) {
+ ABSL_RAW_LOG(FATAL, "Edge %u->%d has bad rank assignment %d->%d", x, y,
+ nx->rank, ny->rank);
+ }
+ }
+ }
+ return true;
+}
+
+GraphId GraphCycles::GetId(void* ptr) {
+ int32_t i = rep_->ptrmap_.Find(ptr);
+ if (i != -1) {
+ return MakeId(i, rep_->nodes_[i]->version);
+ } else if (rep_->free_nodes_.empty()) {
+ Node* n =
+ new (base_internal::LowLevelAlloc::AllocWithArena(sizeof(Node), arena))
+ Node;
+ n->version = 1; // Avoid 0 since it is used by InvalidGraphId()
+ n->visited = false;
+ n->rank = rep_->nodes_.size();
+ n->masked_ptr = base_internal::HidePtr(ptr);
+ n->nstack = 0;
+ n->priority = 0;
+ rep_->nodes_.push_back(n);
+ rep_->ptrmap_.Add(ptr, n->rank);
+ return MakeId(n->rank, n->version);
+ } else {
+ // Preserve preceding rank since the set of ranks in use must be
+ // a permutation of [0,rep_->nodes_.size()-1].
+ int32_t r = rep_->free_nodes_.back();
+ rep_->free_nodes_.pop_back();
+ Node* n = rep_->nodes_[r];
+ n->masked_ptr = base_internal::HidePtr(ptr);
+ n->nstack = 0;
+ n->priority = 0;
+ rep_->ptrmap_.Add(ptr, r);
+ return MakeId(r, n->version);
+ }
+}
+
+void GraphCycles::RemoveNode(void* ptr) {
+ int32_t i = rep_->ptrmap_.Remove(ptr);
+ if (i == -1) {
+ return;
+ }
+ Node* x = rep_->nodes_[i];
+ HASH_FOR_EACH(y, x->out) {
+ rep_->nodes_[y]->in.erase(i);
+ }
+ HASH_FOR_EACH(y, x->in) {
+ rep_->nodes_[y]->out.erase(i);
+ }
+ x->in.clear();
+ x->out.clear();
+ x->masked_ptr = base_internal::HidePtr<void>(nullptr);
+ if (x->version == std::numeric_limits<uint32_t>::max()) {
+ // Cannot use x any more
+ } else {
+ x->version++; // Invalidates all copies of node.
+ rep_->free_nodes_.push_back(i);
+ }
+}
+
+void* GraphCycles::Ptr(GraphId id) {
+ Node* n = FindNode(rep_, id);
+ return n == nullptr ? nullptr
+ : base_internal::UnhidePtr<void>(n->masked_ptr);
+}
+
+bool GraphCycles::HasNode(GraphId node) {
+ return FindNode(rep_, node) != nullptr;
+}
+
+bool GraphCycles::HasEdge(GraphId x, GraphId y) const {
+ Node* xn = FindNode(rep_, x);
+ return xn && FindNode(rep_, y) && xn->out.contains(NodeIndex(y));
+}
+
+void GraphCycles::RemoveEdge(GraphId x, GraphId y) {
+ Node* xn = FindNode(rep_, x);
+ Node* yn = FindNode(rep_, y);
+ if (xn && yn) {
+ xn->out.erase(NodeIndex(y));
+ yn->in.erase(NodeIndex(x));
+ // No need to update the rank assignment since a previous valid
+ // rank assignment remains valid after an edge deletion.
+ }
+}
+
+static bool ForwardDFS(GraphCycles::Rep* r, int32_t n, int32_t upper_bound);
+static void BackwardDFS(GraphCycles::Rep* r, int32_t n, int32_t lower_bound);
+static void Reorder(GraphCycles::Rep* r);
+static void Sort(const Vec<Node*>&, Vec<int32_t>* delta);
+static void MoveToList(
+ GraphCycles::Rep* r, Vec<int32_t>* src, Vec<int32_t>* dst);
+
+bool GraphCycles::InsertEdge(GraphId idx, GraphId idy) {
+ Rep* r = rep_;
+ const int32_t x = NodeIndex(idx);
+ const int32_t y = NodeIndex(idy);
+ Node* nx = FindNode(r, idx);
+ Node* ny = FindNode(r, idy);
+ if (nx == nullptr || ny == nullptr) return true; // Expired ids
+
+ if (nx == ny) return false; // Self edge
+ if (!nx->out.insert(y)) {
+ // Edge already exists.
+ return true;
+ }
+
+ ny->in.insert(x);
+
+ if (nx->rank <= ny->rank) {
+ // New edge is consistent with existing rank assignment.
+ return true;
+ }
+
+ // Current rank assignments are incompatible with the new edge. Recompute.
+ // We only need to consider nodes that fall in the range [ny->rank,nx->rank].
+ if (!ForwardDFS(r, y, nx->rank)) {
+ // Found a cycle. Undo the insertion and tell caller.
+ nx->out.erase(y);
+ ny->in.erase(x);
+ // Since we do not call Reorder() on this path, clear any visited
+ // markers left by ForwardDFS.
+ for (const auto& d : r->deltaf_) {
+ r->nodes_[d]->visited = false;
+ }
+ return false;
+ }
+ BackwardDFS(r, x, ny->rank);
+ Reorder(r);
+ return true;
+}
+
+static bool ForwardDFS(GraphCycles::Rep* r, int32_t n, int32_t upper_bound) {
+ // Avoid recursion since stack space might be limited.
+ // We instead keep a stack of nodes to visit.
+ r->deltaf_.clear();
+ r->stack_.clear();
+ r->stack_.push_back(n);
+ while (!r->stack_.empty()) {
+ n = r->stack_.back();
+ r->stack_.pop_back();
+ Node* nn = r->nodes_[n];
+ if (nn->visited) continue;
+
+ nn->visited = true;
+ r->deltaf_.push_back(n);
+
+ HASH_FOR_EACH(w, nn->out) {
+ Node* nw = r->nodes_[w];
+ if (nw->rank == upper_bound) {
+ return false; // Cycle
+ }
+ if (!nw->visited && nw->rank < upper_bound) {
+ r->stack_.push_back(w);
+ }
+ }
+ }
+ return true;
+}
+
+static void BackwardDFS(GraphCycles::Rep* r, int32_t n, int32_t lower_bound) {
+ r->deltab_.clear();
+ r->stack_.clear();
+ r->stack_.push_back(n);
+ while (!r->stack_.empty()) {
+ n = r->stack_.back();
+ r->stack_.pop_back();
+ Node* nn = r->nodes_[n];
+ if (nn->visited) continue;
+
+ nn->visited = true;
+ r->deltab_.push_back(n);
+
+ HASH_FOR_EACH(w, nn->in) {
+ Node* nw = r->nodes_[w];
+ if (!nw->visited && lower_bound < nw->rank) {
+ r->stack_.push_back(w);
+ }
+ }
+ }
+}
+
+static void Reorder(GraphCycles::Rep* r) {
+ Sort(r->nodes_, &r->deltab_);
+ Sort(r->nodes_, &r->deltaf_);
+
+ // Adds contents of delta lists to list_ (backwards deltas first).
+ r->list_.clear();
+ MoveToList(r, &r->deltab_, &r->list_);
+ MoveToList(r, &r->deltaf_, &r->list_);
+
+ // Produce sorted list of all ranks that will be reassigned.
+ r->merged_.resize(r->deltab_.size() + r->deltaf_.size());
+ std::merge(r->deltab_.begin(), r->deltab_.end(),
+ r->deltaf_.begin(), r->deltaf_.end(),
+ r->merged_.begin());
+
+ // Assign the ranks in order to the collected list.
+ for (uint32_t i = 0; i < r->list_.size(); i++) {
+ r->nodes_[r->list_[i]]->rank = r->merged_[i];
+ }
+}
+
+static void Sort(const Vec<Node*>& nodes, Vec<int32_t>* delta) {
+ struct ByRank {
+ const Vec<Node*>* nodes;
+ bool operator()(int32_t a, int32_t b) const {
+ return (*nodes)[a]->rank < (*nodes)[b]->rank;
+ }
+ };
+ ByRank cmp;
+ cmp.nodes = &nodes;
+ std::sort(delta->begin(), delta->end(), cmp);
+}
+
+static void MoveToList(
+ GraphCycles::Rep* r, Vec<int32_t>* src, Vec<int32_t>* dst) {
+ for (auto& v : *src) {
+ int32_t w = v;
+ v = r->nodes_[w]->rank; // Replace v entry with its rank
+ r->nodes_[w]->visited = false; // Prepare for future DFS calls
+ dst->push_back(w);
+ }
+}
+
+int GraphCycles::FindPath(GraphId idx, GraphId idy, int max_path_len,
+ GraphId path[]) const {
+ Rep* r = rep_;
+ if (FindNode(r, idx) == nullptr || FindNode(r, idy) == nullptr) return 0;
+ const int32_t x = NodeIndex(idx);
+ const int32_t y = NodeIndex(idy);
+
+ // Forward depth first search starting at x until we hit y.
+ // As we descend into a node, we push it onto the path.
+ // As we leave a node, we remove it from the path.
+ int path_len = 0;
+
+ NodeSet seen;
+ r->stack_.clear();
+ r->stack_.push_back(x);
+ while (!r->stack_.empty()) {
+ int32_t n = r->stack_.back();
+ r->stack_.pop_back();
+ if (n < 0) {
+ // Marker to indicate that we are leaving a node
+ path_len--;
+ continue;
+ }
+
+ if (path_len < max_path_len) {
+ path[path_len] = MakeId(n, rep_->nodes_[n]->version);
+ }
+ path_len++;
+ r->stack_.push_back(-1); // Will remove tentative path entry
+
+ if (n == y) {
+ return path_len;
+ }
+
+ HASH_FOR_EACH(w, r->nodes_[n]->out) {
+ if (seen.insert(w)) {
+ r->stack_.push_back(w);
+ }
+ }
+ }
+
+ return 0;
+}
+
+bool GraphCycles::IsReachable(GraphId x, GraphId y) const {
+ return FindPath(x, y, 0, nullptr) > 0;
+}
+
+void GraphCycles::UpdateStackTrace(GraphId id, int priority,
+ int (*get_stack_trace)(void** stack, int)) {
+ Node* n = FindNode(rep_, id);
+ if (n == nullptr || n->priority >= priority) {
+ return;
+ }
+ n->nstack = (*get_stack_trace)(n->stack, ABSL_ARRAYSIZE(n->stack));
+ n->priority = priority;
+}
+
+int GraphCycles::GetStackTrace(GraphId id, void*** ptr) {
+ Node* n = FindNode(rep_, id);
+ if (n == nullptr) {
+ *ptr = nullptr;
+ return 0;
+ } else {
+ *ptr = n->stack;
+ return n->nstack;
+ }
+}
+
+} // namespace synchronization_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_LOW_LEVEL_ALLOC_MISSING
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/graphcycles.h b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/graphcycles.h
new file mode 100644
index 00000000000..eaf130bc297
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/graphcycles.h
@@ -0,0 +1,141 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef ABSL_SYNCHRONIZATION_INTERNAL_GRAPHCYCLES_H_
+#define ABSL_SYNCHRONIZATION_INTERNAL_GRAPHCYCLES_H_
+
+// GraphCycles detects the introduction of a cycle into a directed
+// graph that is being built up incrementally.
+//
+// Nodes are identified by small integers. It is not possible to
+// record multiple edges with the same (source, destination) pair;
+// requests to add an edge where one already exists are silently
+// ignored.
+//
+// It is also not possible to introduce a cycle; an attempt to insert
+// an edge that would introduce a cycle fails and returns false.
+//
+// GraphCycles uses no internal locking; calls into it should be
+// serialized externally.
+
+// Performance considerations:
+// Works well on sparse graphs, poorly on dense graphs.
+// Extra information is maintained incrementally to detect cycles quickly.
+// InsertEdge() is very fast when the edge already exists, and reasonably fast
+// otherwise.
+// FindPath() is linear in the size of the graph.
+// The current implementation uses O(|V|+|E|) space.
+
+#include <cstdint>
+
+#include "y_absl/base/config.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace synchronization_internal {
+
+// Opaque identifier for a graph node.
+struct GraphId {
+ uint64_t handle;
+
+ bool operator==(const GraphId& x) const { return handle == x.handle; }
+ bool operator!=(const GraphId& x) const { return handle != x.handle; }
+};
+
+// Return an invalid graph id that will never be assigned by GraphCycles.
+inline GraphId InvalidGraphId() {
+ return GraphId{0};
+}
+
+class GraphCycles {
+ public:
+ GraphCycles();
+ ~GraphCycles();
+
+ // Return the id to use for ptr, assigning one if necessary.
+ // Subsequent calls with the same ptr value will return the same id
+ // until Remove().
+ GraphId GetId(void* ptr);
+
+ // Remove "ptr" from the graph. Its corresponding node and all
+ // edges to and from it are removed.
+ void RemoveNode(void* ptr);
+
+ // Return the pointer associated with id, or nullptr if id is not
+ // currently in the graph.
+ void* Ptr(GraphId id);
+
+ // Attempt to insert an edge from source_node to dest_node. If the
+ // edge would introduce a cycle, return false without making any
+ // changes. Otherwise add the edge and return true.
+ bool InsertEdge(GraphId source_node, GraphId dest_node);
+
+ // Remove any edge that exists from source_node to dest_node.
+ void RemoveEdge(GraphId source_node, GraphId dest_node);
+
+ // Return whether node exists in the graph.
+ bool HasNode(GraphId node);
+
+ // Return whether there is an edge directly from source_node to dest_node.
+ bool HasEdge(GraphId source_node, GraphId dest_node) const;
+
+ // Return whether dest_node is reachable from source_node
+ // by following edges.
+ bool IsReachable(GraphId source_node, GraphId dest_node) const;
+
+ // Find a path from "source" to "dest". If such a path exists,
+ // place the nodes on the path in the array path[], and return
+ // the number of nodes on the path. If the path is longer than
+ // max_path_len nodes, only the first max_path_len nodes are placed
+ // in path[]. The client should compare the return value with
+ // max_path_len" to see when this occurs. If no path exists, return
+ // 0. Any valid path stored in path[] will start with "source" and
+ // end with "dest". There is no guarantee that the path is the
+ // shortest, but no node will appear twice in the path, except the
+ // source and destination node if they are identical; therefore, the
+ // return value is at most one greater than the number of nodes in
+ // the graph.
+ int FindPath(GraphId source, GraphId dest, int max_path_len,
+ GraphId path[]) const;
+
+ // Update the stack trace recorded for id with the current stack
+ // trace if the last time it was updated had a smaller priority
+ // than the priority passed on this call.
+ //
+ // *get_stack_trace is called to get the stack trace.
+ void UpdateStackTrace(GraphId id, int priority,
+ int (*get_stack_trace)(void**, int));
+
+ // Set *ptr to the beginning of the array that holds the recorded
+ // stack trace for id and return the depth of the stack trace.
+ int GetStackTrace(GraphId id, void*** ptr);
+
+ // Check internal invariants. Crashes on failure, returns true on success.
+ // Expensive: should only be called from graphcycles_test.cc.
+ bool CheckInvariants() const;
+
+ // ----------------------------------------------------
+ struct Rep;
+ private:
+ Rep *rep_; // opaque representation
+ GraphCycles(const GraphCycles&) = delete;
+ GraphCycles& operator=(const GraphCycles&) = delete;
+};
+
+} // namespace synchronization_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/kernel_timeout.h b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/kernel_timeout.h
new file mode 100644
index 00000000000..ee4078702de
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/kernel_timeout.h
@@ -0,0 +1,156 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+// An optional absolute timeout, with nanosecond granularity,
+// compatible with y_absl::Time. Suitable for in-register
+// parameter-passing (e.g. syscalls.)
+// Constructible from a y_absl::Time (for a timeout to be respected) or {}
+// (for "no timeout".)
+// This is a private low-level API for use by a handful of low-level
+// components that are friends of this class. Higher-level components
+// should build APIs based on y_absl::Time and y_absl::Duration.
+
+#ifndef ABSL_SYNCHRONIZATION_INTERNAL_KERNEL_TIMEOUT_H_
+#define ABSL_SYNCHRONIZATION_INTERNAL_KERNEL_TIMEOUT_H_
+
+#include <time.h>
+
+#include <algorithm>
+#include <limits>
+
+#include "y_absl/base/internal/raw_logging.h"
+#include "y_absl/time/clock.h"
+#include "y_absl/time/time.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace synchronization_internal {
+
+class Futex;
+class Waiter;
+
+class KernelTimeout {
+ public:
+ // A timeout that should expire at <t>. Any value, in the full
+ // InfinitePast() to InfiniteFuture() range, is valid here and will be
+ // respected.
+ explicit KernelTimeout(y_absl::Time t) : ns_(MakeNs(t)) {}
+ // No timeout.
+ KernelTimeout() : ns_(0) {}
+
+ // A more explicit factory for those who prefer it. Equivalent to {}.
+ static KernelTimeout Never() { return {}; }
+
+ // We explicitly do not support other custom formats: timespec, int64_t nanos.
+ // Unify on this and y_absl::Time, please.
+
+ bool has_timeout() const { return ns_ != 0; }
+
+ // Convert to parameter for sem_timedwait/futex/similar. Only for approved
+ // users. Do not call if !has_timeout.
+ struct timespec MakeAbsTimespec();
+
+ private:
+ // internal rep, not user visible: ns after unix epoch.
+ // zero = no timeout.
+ // Negative we treat as an unlikely (and certainly expired!) but valid
+ // timeout.
+ int64_t ns_;
+
+ static int64_t MakeNs(y_absl::Time t) {
+ // optimization--InfiniteFuture is common "no timeout" value
+ // and cheaper to compare than convert.
+ if (t == y_absl::InfiniteFuture()) return 0;
+ int64_t x = ToUnixNanos(t);
+
+ // A timeout that lands exactly on the epoch (x=0) needs to be respected,
+ // so we alter it unnoticably to 1. Negative timeouts are in
+ // theory supported, but handled poorly by the kernel (long
+ // delays) so push them forward too; since all such times have
+ // already passed, it's indistinguishable.
+ if (x <= 0) x = 1;
+ // A time larger than what can be represented to the kernel is treated
+ // as no timeout.
+ if (x == (std::numeric_limits<int64_t>::max)()) x = 0;
+ return x;
+ }
+
+#ifdef _WIN32
+ // Converts to milliseconds from now, or INFINITE when
+ // !has_timeout(). For use by SleepConditionVariableSRW on
+ // Windows. Callers should recognize that the return value is a
+ // relative duration (it should be recomputed by calling this method
+ // in the case of a spurious wakeup).
+ // This header file may be included transitively by public header files,
+ // so we define our own DWORD and INFINITE instead of getting them from
+ // <intsafe.h> and <WinBase.h>.
+ typedef unsigned long DWord; // NOLINT
+ DWord InMillisecondsFromNow() const {
+ constexpr DWord kInfinite = (std::numeric_limits<DWord>::max)();
+ if (!has_timeout()) {
+ return kInfinite;
+ }
+ // The use of y_absl::Now() to convert from absolute time to
+ // relative time means that y_absl::Now() cannot use anything that
+ // depends on KernelTimeout (for example, Mutex) on Windows.
+ int64_t now = ToUnixNanos(y_absl::Now());
+ if (ns_ >= now) {
+ // Round up so that Now() + ms_from_now >= ns_.
+ constexpr uint64_t max_nanos =
+ (std::numeric_limits<int64_t>::max)() - 999999u;
+ uint64_t ms_from_now =
+ (std::min<uint64_t>(max_nanos, ns_ - now) + 999999u) / 1000000u;
+ if (ms_from_now > kInfinite) {
+ return kInfinite;
+ }
+ return static_cast<DWord>(ms_from_now);
+ }
+ return 0;
+ }
+#endif
+
+ friend class Futex;
+ friend class Waiter;
+};
+
+inline struct timespec KernelTimeout::MakeAbsTimespec() {
+ int64_t n = ns_;
+ static const int64_t kNanosPerSecond = 1000 * 1000 * 1000;
+ if (n == 0) {
+ ABSL_RAW_LOG(
+ ERROR, "Tried to create a timespec from a non-timeout; never do this.");
+ // But we'll try to continue sanely. no-timeout ~= saturated timeout.
+ n = (std::numeric_limits<int64_t>::max)();
+ }
+
+ // Kernel APIs validate timespecs as being at or after the epoch,
+ // despite the kernel time type being signed. However, no one can
+ // tell the difference between a timeout at or before the epoch (since
+ // all such timeouts have expired!)
+ if (n < 0) n = 0;
+
+ struct timespec abstime;
+ int64_t seconds = (std::min)(n / kNanosPerSecond,
+ int64_t{(std::numeric_limits<time_t>::max)()});
+ abstime.tv_sec = static_cast<time_t>(seconds);
+ abstime.tv_nsec = static_cast<decltype(abstime.tv_nsec)>(n % kNanosPerSecond);
+ return abstime;
+}
+
+} // namespace synchronization_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_SYNCHRONIZATION_INTERNAL_KERNEL_TIMEOUT_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/per_thread_sem.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/per_thread_sem.cc
new file mode 100644
index 00000000000..3e40e812a9a
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/per_thread_sem.cc
@@ -0,0 +1,106 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file is a no-op if the required LowLevelAlloc support is missing.
+#include "y_absl/base/internal/low_level_alloc.h"
+#ifndef ABSL_LOW_LEVEL_ALLOC_MISSING
+
+#include "y_absl/synchronization/internal/per_thread_sem.h"
+
+#include <atomic>
+
+#include "y_absl/base/attributes.h"
+#include "y_absl/base/internal/thread_identity.h"
+#include "y_absl/synchronization/internal/waiter.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace synchronization_internal {
+
+void PerThreadSem::SetThreadBlockedCounter(std::atomic<int> *counter) {
+ base_internal::ThreadIdentity *identity;
+ identity = GetOrCreateCurrentThreadIdentity();
+ identity->blocked_count_ptr = counter;
+}
+
+std::atomic<int> *PerThreadSem::GetThreadBlockedCounter() {
+ base_internal::ThreadIdentity *identity;
+ identity = GetOrCreateCurrentThreadIdentity();
+ return identity->blocked_count_ptr;
+}
+
+void PerThreadSem::Init(base_internal::ThreadIdentity *identity) {
+ new (Waiter::GetWaiter(identity)) Waiter();
+ identity->ticker.store(0, std::memory_order_relaxed);
+ identity->wait_start.store(0, std::memory_order_relaxed);
+ identity->is_idle.store(false, std::memory_order_relaxed);
+}
+
+void PerThreadSem::Destroy(base_internal::ThreadIdentity *identity) {
+ Waiter::GetWaiter(identity)->~Waiter();
+}
+
+void PerThreadSem::Tick(base_internal::ThreadIdentity *identity) {
+ const int ticker =
+ identity->ticker.fetch_add(1, std::memory_order_relaxed) + 1;
+ const int wait_start = identity->wait_start.load(std::memory_order_relaxed);
+ const bool is_idle = identity->is_idle.load(std::memory_order_relaxed);
+ if (wait_start && (ticker - wait_start > Waiter::kIdlePeriods) && !is_idle) {
+ // Wakeup the waiting thread since it is time for it to become idle.
+ Waiter::GetWaiter(identity)->Poke();
+ }
+}
+
+} // namespace synchronization_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+extern "C" {
+
+ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPost)(
+ y_absl::base_internal::ThreadIdentity *identity) {
+ y_absl::synchronization_internal::Waiter::GetWaiter(identity)->Post();
+}
+
+ABSL_ATTRIBUTE_WEAK bool ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemWait)(
+ y_absl::synchronization_internal::KernelTimeout t) {
+ bool timeout = false;
+ y_absl::base_internal::ThreadIdentity *identity;
+ identity = y_absl::synchronization_internal::GetOrCreateCurrentThreadIdentity();
+
+ // Ensure wait_start != 0.
+ int ticker = identity->ticker.load(std::memory_order_relaxed);
+ identity->wait_start.store(ticker ? ticker : 1, std::memory_order_relaxed);
+ identity->is_idle.store(false, std::memory_order_relaxed);
+
+ if (identity->blocked_count_ptr != nullptr) {
+ // Increment count of threads blocked in a given thread pool.
+ identity->blocked_count_ptr->fetch_add(1, std::memory_order_relaxed);
+ }
+
+ timeout =
+ !y_absl::synchronization_internal::Waiter::GetWaiter(identity)->Wait(t);
+
+ if (identity->blocked_count_ptr != nullptr) {
+ identity->blocked_count_ptr->fetch_sub(1, std::memory_order_relaxed);
+ }
+
+ identity->is_idle.store(false, std::memory_order_relaxed);
+ identity->wait_start.store(0, std::memory_order_relaxed);
+ return !timeout;
+}
+
+} // extern "C"
+
+#endif // ABSL_LOW_LEVEL_ALLOC_MISSING
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/per_thread_sem.h b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/per_thread_sem.h
new file mode 100644
index 00000000000..2fc39ca130e
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/per_thread_sem.h
@@ -0,0 +1,115 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+// PerThreadSem is a low-level synchronization primitive controlling the
+// runnability of a single thread, used internally by Mutex and CondVar.
+//
+// This is NOT a general-purpose synchronization mechanism, and should not be
+// used directly by applications. Applications should use Mutex and CondVar.
+//
+// The semantics of PerThreadSem are the same as that of a counting semaphore.
+// Each thread maintains an abstract "count" value associated with its identity.
+
+#ifndef ABSL_SYNCHRONIZATION_INTERNAL_PER_THREAD_SEM_H_
+#define ABSL_SYNCHRONIZATION_INTERNAL_PER_THREAD_SEM_H_
+
+#include <atomic>
+
+#include "y_absl/base/internal/thread_identity.h"
+#include "y_absl/synchronization/internal/create_thread_identity.h"
+#include "y_absl/synchronization/internal/kernel_timeout.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+class Mutex;
+
+namespace synchronization_internal {
+
+class PerThreadSem {
+ public:
+ PerThreadSem() = delete;
+ PerThreadSem(const PerThreadSem&) = delete;
+ PerThreadSem& operator=(const PerThreadSem&) = delete;
+
+ // Routine invoked periodically (once a second) by a background thread.
+ // Has no effect on user-visible state.
+ static void Tick(base_internal::ThreadIdentity* identity);
+
+ // ---------------------------------------------------------------------------
+ // Routines used by autosizing threadpools to detect when threads are
+ // blocked. Each thread has a counter pointer, initially zero. If non-zero,
+ // the implementation atomically increments the counter when it blocks on a
+ // semaphore, a decrements it again when it wakes. This allows a threadpool
+ // to keep track of how many of its threads are blocked.
+ // SetThreadBlockedCounter() should be used only by threadpool
+ // implementations. GetThreadBlockedCounter() should be used by modules that
+ // block threads; if the pointer returned is non-zero, the location should be
+ // incremented before the thread blocks, and decremented after it wakes.
+ static void SetThreadBlockedCounter(std::atomic<int> *counter);
+ static std::atomic<int> *GetThreadBlockedCounter();
+
+ private:
+ // Create the PerThreadSem associated with "identity". Initializes count=0.
+ // REQUIRES: May only be called by ThreadIdentity.
+ static void Init(base_internal::ThreadIdentity* identity);
+
+ // Destroy the PerThreadSem associated with "identity".
+ // REQUIRES: May only be called by ThreadIdentity.
+ static void Destroy(base_internal::ThreadIdentity* identity);
+
+ // Increments "identity"'s count.
+ static inline void Post(base_internal::ThreadIdentity* identity);
+
+ // Waits until either our count > 0 or t has expired.
+ // If count > 0, decrements count and returns true. Otherwise returns false.
+ // !t.has_timeout() => Wait(t) will return true.
+ static inline bool Wait(KernelTimeout t);
+
+ // Permitted callers.
+ friend class PerThreadSemTest;
+ friend class y_absl::Mutex;
+ friend y_absl::base_internal::ThreadIdentity* CreateThreadIdentity();
+ friend void ReclaimThreadIdentity(void* v);
+};
+
+} // namespace synchronization_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+// In some build configurations we pass --detect-odr-violations to the
+// gold linker. This causes it to flag weak symbol overrides as ODR
+// violations. Because ODR only applies to C++ and not C,
+// --detect-odr-violations ignores symbols not mangled with C++ names.
+// By changing our extension points to be extern "C", we dodge this
+// check.
+extern "C" {
+void ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPost)(
+ y_absl::base_internal::ThreadIdentity* identity);
+bool ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemWait)(
+ y_absl::synchronization_internal::KernelTimeout t);
+} // extern "C"
+
+void y_absl::synchronization_internal::PerThreadSem::Post(
+ y_absl::base_internal::ThreadIdentity* identity) {
+ ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPost)(identity);
+}
+
+bool y_absl::synchronization_internal::PerThreadSem::Wait(
+ y_absl::synchronization_internal::KernelTimeout t) {
+ return ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemWait)(t);
+}
+
+#endif // ABSL_SYNCHRONIZATION_INTERNAL_PER_THREAD_SEM_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/thread_pool.h b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/thread_pool.h
new file mode 100644
index 00000000000..8b8912d4946
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/thread_pool.h
@@ -0,0 +1,93 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_SYNCHRONIZATION_INTERNAL_THREAD_POOL_H_
+#define ABSL_SYNCHRONIZATION_INTERNAL_THREAD_POOL_H_
+
+#include <cassert>
+#include <cstddef>
+#include <functional>
+#include <queue>
+#include <thread> // NOLINT(build/c++11)
+#include <vector>
+
+#include "y_absl/base/thread_annotations.h"
+#include "y_absl/synchronization/mutex.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace synchronization_internal {
+
+// A simple ThreadPool implementation for tests.
+class ThreadPool {
+ public:
+ explicit ThreadPool(int num_threads) {
+ for (int i = 0; i < num_threads; ++i) {
+ threads_.push_back(std::thread(&ThreadPool::WorkLoop, this));
+ }
+ }
+
+ ThreadPool(const ThreadPool &) = delete;
+ ThreadPool &operator=(const ThreadPool &) = delete;
+
+ ~ThreadPool() {
+ {
+ y_absl::MutexLock l(&mu_);
+ for (size_t i = 0; i < threads_.size(); i++) {
+ queue_.push(nullptr); // Shutdown signal.
+ }
+ }
+ for (auto &t : threads_) {
+ t.join();
+ }
+ }
+
+ // Schedule a function to be run on a ThreadPool thread immediately.
+ void Schedule(std::function<void()> func) {
+ assert(func != nullptr);
+ y_absl::MutexLock l(&mu_);
+ queue_.push(std::move(func));
+ }
+
+ private:
+ bool WorkAvailable() const ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
+ return !queue_.empty();
+ }
+
+ void WorkLoop() {
+ while (true) {
+ std::function<void()> func;
+ {
+ y_absl::MutexLock l(&mu_);
+ mu_.Await(y_absl::Condition(this, &ThreadPool::WorkAvailable));
+ func = std::move(queue_.front());
+ queue_.pop();
+ }
+ if (func == nullptr) { // Shutdown signal.
+ break;
+ }
+ func();
+ }
+ }
+
+ y_absl::Mutex mu_;
+ std::queue<std::function<void()>> queue_ ABSL_GUARDED_BY(mu_);
+ std::vector<std::thread> threads_;
+};
+
+} // namespace synchronization_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_SYNCHRONIZATION_INTERNAL_THREAD_POOL_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/waiter.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/waiter.cc
new file mode 100644
index 00000000000..573ce9c6c54
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/waiter.cc
@@ -0,0 +1,428 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "y_absl/synchronization/internal/waiter.h"
+
+#include "y_absl/base/config.h"
+
+#ifdef _WIN32
+#include <windows.h>
+#else
+#include <pthread.h>
+#include <sys/time.h>
+#include <unistd.h>
+#endif
+
+#ifdef __linux__
+#include <linux/futex.h>
+#include <sys/syscall.h>
+#endif
+
+#ifdef ABSL_HAVE_SEMAPHORE_H
+#include <semaphore.h>
+#endif
+
+#include <errno.h>
+#include <stdio.h>
+#include <time.h>
+
+#include <atomic>
+#include <cassert>
+#include <cstdint>
+#include <new>
+#include <type_traits>
+
+#include "y_absl/base/internal/raw_logging.h"
+#include "y_absl/base/internal/thread_identity.h"
+#include "y_absl/base/optimization.h"
+#include "y_absl/synchronization/internal/kernel_timeout.h"
+
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace synchronization_internal {
+
+static void MaybeBecomeIdle() {
+ base_internal::ThreadIdentity *identity =
+ base_internal::CurrentThreadIdentityIfPresent();
+ assert(identity != nullptr);
+ const bool is_idle = identity->is_idle.load(std::memory_order_relaxed);
+ const int ticker = identity->ticker.load(std::memory_order_relaxed);
+ const int wait_start = identity->wait_start.load(std::memory_order_relaxed);
+ if (!is_idle && ticker - wait_start > Waiter::kIdlePeriods) {
+ identity->is_idle.store(true, std::memory_order_relaxed);
+ }
+}
+
+#if ABSL_WAITER_MODE == ABSL_WAITER_MODE_FUTEX
+
+Waiter::Waiter() {
+ futex_.store(0, std::memory_order_relaxed);
+}
+
+Waiter::~Waiter() = default;
+
+bool Waiter::Wait(KernelTimeout t) {
+ // Loop until we can atomically decrement futex from a positive
+ // value, waiting on a futex while we believe it is zero.
+ // Note that, since the thread ticker is just reset, we don't need to check
+ // whether the thread is idle on the very first pass of the loop.
+ bool first_pass = true;
+
+ while (true) {
+ int32_t x = futex_.load(std::memory_order_relaxed);
+ while (x != 0) {
+ if (!futex_.compare_exchange_weak(x, x - 1,
+ std::memory_order_acquire,
+ std::memory_order_relaxed)) {
+ continue; // Raced with someone, retry.
+ }
+ return true; // Consumed a wakeup, we are done.
+ }
+
+ if (!first_pass) MaybeBecomeIdle();
+ const int err = Futex::WaitUntil(&futex_, 0, t);
+ if (err != 0) {
+ if (err == -EINTR || err == -EWOULDBLOCK) {
+ // Do nothing, the loop will retry.
+ } else if (err == -ETIMEDOUT) {
+ return false;
+ } else {
+ ABSL_RAW_LOG(FATAL, "Futex operation failed with error %d\n", err);
+ }
+ }
+ first_pass = false;
+ }
+}
+
+void Waiter::Post() {
+ if (futex_.fetch_add(1, std::memory_order_release) == 0) {
+ // We incremented from 0, need to wake a potential waiter.
+ Poke();
+ }
+}
+
+void Waiter::Poke() {
+ // Wake one thread waiting on the futex.
+ const int err = Futex::Wake(&futex_, 1);
+ if (ABSL_PREDICT_FALSE(err < 0)) {
+ ABSL_RAW_LOG(FATAL, "Futex operation failed with error %d\n", err);
+ }
+}
+
+#elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_CONDVAR
+
+class PthreadMutexHolder {
+ public:
+ explicit PthreadMutexHolder(pthread_mutex_t *mu) : mu_(mu) {
+ const int err = pthread_mutex_lock(mu_);
+ if (err != 0) {
+ ABSL_RAW_LOG(FATAL, "pthread_mutex_lock failed: %d", err);
+ }
+ }
+
+ PthreadMutexHolder(const PthreadMutexHolder &rhs) = delete;
+ PthreadMutexHolder &operator=(const PthreadMutexHolder &rhs) = delete;
+
+ ~PthreadMutexHolder() {
+ const int err = pthread_mutex_unlock(mu_);
+ if (err != 0) {
+ ABSL_RAW_LOG(FATAL, "pthread_mutex_unlock failed: %d", err);
+ }
+ }
+
+ private:
+ pthread_mutex_t *mu_;
+};
+
+Waiter::Waiter() {
+ const int err = pthread_mutex_init(&mu_, 0);
+ if (err != 0) {
+ ABSL_RAW_LOG(FATAL, "pthread_mutex_init failed: %d", err);
+ }
+
+ const int err2 = pthread_cond_init(&cv_, 0);
+ if (err2 != 0) {
+ ABSL_RAW_LOG(FATAL, "pthread_cond_init failed: %d", err2);
+ }
+
+ waiter_count_ = 0;
+ wakeup_count_ = 0;
+}
+
+Waiter::~Waiter() {
+ const int err = pthread_mutex_destroy(&mu_);
+ if (err != 0) {
+ ABSL_RAW_LOG(FATAL, "pthread_mutex_destroy failed: %d", err);
+ }
+
+ const int err2 = pthread_cond_destroy(&cv_);
+ if (err2 != 0) {
+ ABSL_RAW_LOG(FATAL, "pthread_cond_destroy failed: %d", err2);
+ }
+}
+
+bool Waiter::Wait(KernelTimeout t) {
+ struct timespec abs_timeout;
+ if (t.has_timeout()) {
+ abs_timeout = t.MakeAbsTimespec();
+ }
+
+ PthreadMutexHolder h(&mu_);
+ ++waiter_count_;
+ // Loop until we find a wakeup to consume or timeout.
+ // Note that, since the thread ticker is just reset, we don't need to check
+ // whether the thread is idle on the very first pass of the loop.
+ bool first_pass = true;
+ while (wakeup_count_ == 0) {
+ if (!first_pass) MaybeBecomeIdle();
+ // No wakeups available, time to wait.
+ if (!t.has_timeout()) {
+ const int err = pthread_cond_wait(&cv_, &mu_);
+ if (err != 0) {
+ ABSL_RAW_LOG(FATAL, "pthread_cond_wait failed: %d", err);
+ }
+ } else {
+ const int err = pthread_cond_timedwait(&cv_, &mu_, &abs_timeout);
+ if (err == ETIMEDOUT) {
+ --waiter_count_;
+ return false;
+ }
+ if (err != 0) {
+ ABSL_RAW_LOG(FATAL, "pthread_cond_timedwait failed: %d", err);
+ }
+ }
+ first_pass = false;
+ }
+ // Consume a wakeup and we're done.
+ --wakeup_count_;
+ --waiter_count_;
+ return true;
+}
+
+void Waiter::Post() {
+ PthreadMutexHolder h(&mu_);
+ ++wakeup_count_;
+ InternalCondVarPoke();
+}
+
+void Waiter::Poke() {
+ PthreadMutexHolder h(&mu_);
+ InternalCondVarPoke();
+}
+
+void Waiter::InternalCondVarPoke() {
+ if (waiter_count_ != 0) {
+ const int err = pthread_cond_signal(&cv_);
+ if (ABSL_PREDICT_FALSE(err != 0)) {
+ ABSL_RAW_LOG(FATAL, "pthread_cond_signal failed: %d", err);
+ }
+ }
+}
+
+#elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_SEM
+
+Waiter::Waiter() {
+ if (sem_init(&sem_, 0, 0) != 0) {
+ ABSL_RAW_LOG(FATAL, "sem_init failed with errno %d\n", errno);
+ }
+ wakeups_.store(0, std::memory_order_relaxed);
+}
+
+Waiter::~Waiter() {
+ if (sem_destroy(&sem_) != 0) {
+ ABSL_RAW_LOG(FATAL, "sem_destroy failed with errno %d\n", errno);
+ }
+}
+
+bool Waiter::Wait(KernelTimeout t) {
+ struct timespec abs_timeout;
+ if (t.has_timeout()) {
+ abs_timeout = t.MakeAbsTimespec();
+ }
+
+ // Loop until we timeout or consume a wakeup.
+ // Note that, since the thread ticker is just reset, we don't need to check
+ // whether the thread is idle on the very first pass of the loop.
+ bool first_pass = true;
+ while (true) {
+ int x = wakeups_.load(std::memory_order_relaxed);
+ while (x != 0) {
+ if (!wakeups_.compare_exchange_weak(x, x - 1,
+ std::memory_order_acquire,
+ std::memory_order_relaxed)) {
+ continue; // Raced with someone, retry.
+ }
+ // Successfully consumed a wakeup, we're done.
+ return true;
+ }
+
+ if (!first_pass) MaybeBecomeIdle();
+ // Nothing to consume, wait (looping on EINTR).
+ while (true) {
+ if (!t.has_timeout()) {
+ if (sem_wait(&sem_) == 0) break;
+ if (errno == EINTR) continue;
+ ABSL_RAW_LOG(FATAL, "sem_wait failed: %d", errno);
+ } else {
+ if (sem_timedwait(&sem_, &abs_timeout) == 0) break;
+ if (errno == EINTR) continue;
+ if (errno == ETIMEDOUT) return false;
+ ABSL_RAW_LOG(FATAL, "sem_timedwait failed: %d", errno);
+ }
+ }
+ first_pass = false;
+ }
+}
+
+void Waiter::Post() {
+ // Post a wakeup.
+ if (wakeups_.fetch_add(1, std::memory_order_release) == 0) {
+ // We incremented from 0, need to wake a potential waiter.
+ Poke();
+ }
+}
+
+void Waiter::Poke() {
+ if (sem_post(&sem_) != 0) { // Wake any semaphore waiter.
+ ABSL_RAW_LOG(FATAL, "sem_post failed with errno %d\n", errno);
+ }
+}
+
+#elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_WIN32
+
+class Waiter::WinHelper {
+ public:
+ static SRWLOCK *GetLock(Waiter *w) {
+ return reinterpret_cast<SRWLOCK *>(&w->mu_storage_);
+ }
+
+ static CONDITION_VARIABLE *GetCond(Waiter *w) {
+ return reinterpret_cast<CONDITION_VARIABLE *>(&w->cv_storage_);
+ }
+
+ static_assert(sizeof(SRWLOCK) == sizeof(void *),
+ "`mu_storage_` does not have the same size as SRWLOCK");
+ static_assert(alignof(SRWLOCK) == alignof(void *),
+ "`mu_storage_` does not have the same alignment as SRWLOCK");
+
+ static_assert(sizeof(CONDITION_VARIABLE) == sizeof(void *),
+ "`ABSL_CONDITION_VARIABLE_STORAGE` does not have the same size "
+ "as `CONDITION_VARIABLE`");
+ static_assert(
+ alignof(CONDITION_VARIABLE) == alignof(void *),
+ "`cv_storage_` does not have the same alignment as `CONDITION_VARIABLE`");
+
+ // The SRWLOCK and CONDITION_VARIABLE types must be trivially constructible
+ // and destructible because we never call their constructors or destructors.
+ static_assert(std::is_trivially_constructible<SRWLOCK>::value,
+ "The `SRWLOCK` type must be trivially constructible");
+ static_assert(
+ std::is_trivially_constructible<CONDITION_VARIABLE>::value,
+ "The `CONDITION_VARIABLE` type must be trivially constructible");
+ static_assert(std::is_trivially_destructible<SRWLOCK>::value,
+ "The `SRWLOCK` type must be trivially destructible");
+ static_assert(std::is_trivially_destructible<CONDITION_VARIABLE>::value,
+ "The `CONDITION_VARIABLE` type must be trivially destructible");
+};
+
+class LockHolder {
+ public:
+ explicit LockHolder(SRWLOCK* mu) : mu_(mu) {
+ AcquireSRWLockExclusive(mu_);
+ }
+
+ LockHolder(const LockHolder&) = delete;
+ LockHolder& operator=(const LockHolder&) = delete;
+
+ ~LockHolder() {
+ ReleaseSRWLockExclusive(mu_);
+ }
+
+ private:
+ SRWLOCK* mu_;
+};
+
+Waiter::Waiter() {
+ auto *mu = ::new (static_cast<void *>(&mu_storage_)) SRWLOCK;
+ auto *cv = ::new (static_cast<void *>(&cv_storage_)) CONDITION_VARIABLE;
+ InitializeSRWLock(mu);
+ InitializeConditionVariable(cv);
+ waiter_count_ = 0;
+ wakeup_count_ = 0;
+}
+
+// SRW locks and condition variables do not need to be explicitly destroyed.
+// https://docs.microsoft.com/en-us/windows/win32/api/synchapi/nf-synchapi-initializesrwlock
+// https://stackoverflow.com/questions/28975958/why-does-windows-have-no-deleteconditionvariable-function-to-go-together-with
+Waiter::~Waiter() = default;
+
+bool Waiter::Wait(KernelTimeout t) {
+ SRWLOCK *mu = WinHelper::GetLock(this);
+ CONDITION_VARIABLE *cv = WinHelper::GetCond(this);
+
+ LockHolder h(mu);
+ ++waiter_count_;
+
+ // Loop until we find a wakeup to consume or timeout.
+ // Note that, since the thread ticker is just reset, we don't need to check
+ // whether the thread is idle on the very first pass of the loop.
+ bool first_pass = true;
+ while (wakeup_count_ == 0) {
+ if (!first_pass) MaybeBecomeIdle();
+ // No wakeups available, time to wait.
+ if (!SleepConditionVariableSRW(cv, mu, t.InMillisecondsFromNow(), 0)) {
+ // GetLastError() returns a Win32 DWORD, but we assign to
+ // unsigned long to simplify the ABSL_RAW_LOG case below. The uniform
+ // initialization guarantees this is not a narrowing conversion.
+ const unsigned long err{GetLastError()}; // NOLINT(runtime/int)
+ if (err == ERROR_TIMEOUT) {
+ --waiter_count_;
+ return false;
+ } else {
+ ABSL_RAW_LOG(FATAL, "SleepConditionVariableSRW failed: %lu", err);
+ }
+ }
+ first_pass = false;
+ }
+ // Consume a wakeup and we're done.
+ --wakeup_count_;
+ --waiter_count_;
+ return true;
+}
+
+void Waiter::Post() {
+ LockHolder h(WinHelper::GetLock(this));
+ ++wakeup_count_;
+ InternalCondVarPoke();
+}
+
+void Waiter::Poke() {
+ LockHolder h(WinHelper::GetLock(this));
+ InternalCondVarPoke();
+}
+
+void Waiter::InternalCondVarPoke() {
+ if (waiter_count_ != 0) {
+ WakeConditionVariable(WinHelper::GetCond(this));
+ }
+}
+
+#else
+#error Unknown ABSL_WAITER_MODE
+#endif
+
+} // namespace synchronization_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/waiter.h b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/waiter.h
new file mode 100644
index 00000000000..e6fede411f2
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/waiter.h
@@ -0,0 +1,155 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef ABSL_SYNCHRONIZATION_INTERNAL_WAITER_H_
+#define ABSL_SYNCHRONIZATION_INTERNAL_WAITER_H_
+
+#include "y_absl/base/config.h"
+
+#ifdef _WIN32
+#include <sdkddkver.h>
+#else
+#include <pthread.h>
+#endif
+
+#ifdef __linux__
+#include <linux/futex.h>
+#endif
+
+#ifdef ABSL_HAVE_SEMAPHORE_H
+#include <semaphore.h>
+#endif
+
+#include <atomic>
+#include <cstdint>
+
+#include "y_absl/base/internal/thread_identity.h"
+#include "y_absl/synchronization/internal/futex.h"
+#include "y_absl/synchronization/internal/kernel_timeout.h"
+
+// May be chosen at compile time via -DABSL_FORCE_WAITER_MODE=<index>
+#define ABSL_WAITER_MODE_FUTEX 0
+#define ABSL_WAITER_MODE_SEM 1
+#define ABSL_WAITER_MODE_CONDVAR 2
+#define ABSL_WAITER_MODE_WIN32 3
+
+#if defined(ABSL_FORCE_WAITER_MODE)
+#define ABSL_WAITER_MODE ABSL_FORCE_WAITER_MODE
+#elif defined(_WIN32) && _WIN32_WINNT >= _WIN32_WINNT_VISTA
+#define ABSL_WAITER_MODE ABSL_WAITER_MODE_WIN32
+#elif defined(ABSL_INTERNAL_HAVE_FUTEX)
+#define ABSL_WAITER_MODE ABSL_WAITER_MODE_FUTEX
+#elif defined(ABSL_HAVE_SEMAPHORE_H)
+#define ABSL_WAITER_MODE ABSL_WAITER_MODE_SEM
+#else
+#define ABSL_WAITER_MODE ABSL_WAITER_MODE_CONDVAR
+#endif
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace synchronization_internal {
+
+// Waiter is an OS-specific semaphore.
+class Waiter {
+ public:
+ // Prepare any data to track waits.
+ Waiter();
+
+ // Not copyable or movable
+ Waiter(const Waiter&) = delete;
+ Waiter& operator=(const Waiter&) = delete;
+
+ // Destroy any data to track waits.
+ ~Waiter();
+
+ // Blocks the calling thread until a matching call to `Post()` or
+ // `t` has passed. Returns `true` if woken (`Post()` called),
+ // `false` on timeout.
+ bool Wait(KernelTimeout t);
+
+ // Restart the caller of `Wait()` as with a normal semaphore.
+ void Post();
+
+ // If anyone is waiting, wake them up temporarily and cause them to
+ // call `MaybeBecomeIdle()`. They will then return to waiting for a
+ // `Post()` or timeout.
+ void Poke();
+
+ // Returns the Waiter associated with the identity.
+ static Waiter* GetWaiter(base_internal::ThreadIdentity* identity) {
+ static_assert(
+ sizeof(Waiter) <= sizeof(base_internal::ThreadIdentity::WaiterState),
+ "Insufficient space for Waiter");
+ return reinterpret_cast<Waiter*>(identity->waiter_state.data);
+ }
+
+ // How many periods to remain idle before releasing resources
+#ifndef ABSL_HAVE_THREAD_SANITIZER
+ static constexpr int kIdlePeriods = 60;
+#else
+ // Memory consumption under ThreadSanitizer is a serious concern,
+ // so we release resources sooner. The value of 1 leads to 1 to 2 second
+ // delay before marking a thread as idle.
+ static const int kIdlePeriods = 1;
+#endif
+
+ private:
+#if ABSL_WAITER_MODE == ABSL_WAITER_MODE_FUTEX
+ // Futexes are defined by specification to be 32-bits.
+ // Thus std::atomic<int32_t> must be just an int32_t with lockfree methods.
+ std::atomic<int32_t> futex_;
+ static_assert(sizeof(int32_t) == sizeof(futex_), "Wrong size for futex");
+
+#elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_CONDVAR
+ // REQUIRES: mu_ must be held.
+ void InternalCondVarPoke();
+
+ pthread_mutex_t mu_;
+ pthread_cond_t cv_;
+ int waiter_count_;
+ int wakeup_count_; // Unclaimed wakeups.
+
+#elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_SEM
+ sem_t sem_;
+ // This seems superfluous, but for Poke() we need to cause spurious
+ // wakeups on the semaphore. Hence we can't actually use the
+ // semaphore's count.
+ std::atomic<int> wakeups_;
+
+#elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_WIN32
+ // WinHelper - Used to define utilities for accessing the lock and
+ // condition variable storage once the types are complete.
+ class WinHelper;
+
+ // REQUIRES: WinHelper::GetLock(this) must be held.
+ void InternalCondVarPoke();
+
+ // We can't include Windows.h in our headers, so we use aligned charachter
+ // buffers to define the storage of SRWLOCK and CONDITION_VARIABLE.
+ alignas(void*) unsigned char mu_storage_[sizeof(void*)];
+ alignas(void*) unsigned char cv_storage_[sizeof(void*)];
+ int waiter_count_;
+ int wakeup_count_;
+
+#else
+ #error Unknown ABSL_WAITER_MODE
+#endif
+};
+
+} // namespace synchronization_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_SYNCHRONIZATION_INTERNAL_WAITER_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/ya.make b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/ya.make
new file mode 100644
index 00000000000..167e710a78a
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/ya.make
@@ -0,0 +1,32 @@
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
+OWNER(
+ somov
+ g:cpp-contrib
+)
+
+LICENSE(Apache-2.0)
+
+LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
+
+PEERDIR(
+ contrib/restricted/abseil-cpp-tstring/y_absl/base
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/low_level_alloc
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_wait
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/log_severity
+)
+
+ADDINCL(
+ GLOBAL contrib/restricted/abseil-cpp-tstring
+)
+
+NO_COMPILER_WARNINGS()
+
+SRCS(
+ graphcycles.cc
+)
+
+END()
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/mutex.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/mutex.cc
new file mode 100644
index 00000000000..babe5c4ada4
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/mutex.cc
@@ -0,0 +1,2751 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "y_absl/synchronization/mutex.h"
+
+#ifdef _WIN32
+#include <windows.h>
+#ifdef ERROR
+#undef ERROR
+#endif
+#else
+#include <fcntl.h>
+#include <pthread.h>
+#include <sched.h>
+#include <sys/time.h>
+#endif
+
+#include <assert.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+
+#include <algorithm>
+#include <atomic>
+#include <cinttypes>
+#include <thread> // NOLINT(build/c++11)
+
+#include "y_absl/base/attributes.h"
+#include "y_absl/base/call_once.h"
+#include "y_absl/base/config.h"
+#include "y_absl/base/dynamic_annotations.h"
+#include "y_absl/base/internal/atomic_hook.h"
+#include "y_absl/base/internal/cycleclock.h"
+#include "y_absl/base/internal/hide_ptr.h"
+#include "y_absl/base/internal/low_level_alloc.h"
+#include "y_absl/base/internal/raw_logging.h"
+#include "y_absl/base/internal/spinlock.h"
+#include "y_absl/base/internal/sysinfo.h"
+#include "y_absl/base/internal/thread_identity.h"
+#include "y_absl/base/internal/tsan_mutex_interface.h"
+#include "y_absl/base/port.h"
+#include "y_absl/debugging/stacktrace.h"
+#include "y_absl/debugging/symbolize.h"
+#include "y_absl/synchronization/internal/graphcycles.h"
+#include "y_absl/synchronization/internal/per_thread_sem.h"
+#include "y_absl/time/time.h"
+
+using y_absl::base_internal::CurrentThreadIdentityIfPresent;
+using y_absl::base_internal::PerThreadSynch;
+using y_absl::base_internal::SchedulingGuard;
+using y_absl::base_internal::ThreadIdentity;
+using y_absl::synchronization_internal::GetOrCreateCurrentThreadIdentity;
+using y_absl::synchronization_internal::GraphCycles;
+using y_absl::synchronization_internal::GraphId;
+using y_absl::synchronization_internal::InvalidGraphId;
+using y_absl::synchronization_internal::KernelTimeout;
+using y_absl::synchronization_internal::PerThreadSem;
+
+extern "C" {
+ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)() {
+ std::this_thread::yield();
+}
+} // extern "C"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+namespace {
+
+#if defined(ABSL_HAVE_THREAD_SANITIZER)
+constexpr OnDeadlockCycle kDeadlockDetectionDefault = OnDeadlockCycle::kIgnore;
+#else
+constexpr OnDeadlockCycle kDeadlockDetectionDefault = OnDeadlockCycle::kAbort;
+#endif
+
+ABSL_CONST_INIT std::atomic<OnDeadlockCycle> synch_deadlock_detection(
+ kDeadlockDetectionDefault);
+ABSL_CONST_INIT std::atomic<bool> synch_check_invariants(false);
+
+ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
+y_absl::base_internal::AtomicHook<void (*)(int64_t wait_cycles)>
+ submit_profile_data;
+ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES y_absl::base_internal::AtomicHook<void (*)(
+ const char *msg, const void *obj, int64_t wait_cycles)>
+ mutex_tracer;
+ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
+ y_absl::base_internal::AtomicHook<void (*)(const char *msg, const void *cv)>
+ cond_var_tracer;
+ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES y_absl::base_internal::AtomicHook<
+ bool (*)(const void *pc, char *out, int out_size)>
+ symbolizer(y_absl::Symbolize);
+
+} // namespace
+
+static inline bool EvalConditionAnnotated(const Condition *cond, Mutex *mu,
+ bool locking, bool trylock,
+ bool read_lock);
+
+void RegisterMutexProfiler(void (*fn)(int64_t wait_timestamp)) {
+ submit_profile_data.Store(fn);
+}
+
+void RegisterMutexTracer(void (*fn)(const char *msg, const void *obj,
+ int64_t wait_cycles)) {
+ mutex_tracer.Store(fn);
+}
+
+void RegisterCondVarTracer(void (*fn)(const char *msg, const void *cv)) {
+ cond_var_tracer.Store(fn);
+}
+
+void RegisterSymbolizer(bool (*fn)(const void *pc, char *out, int out_size)) {
+ symbolizer.Store(fn);
+}
+
+namespace {
+// Represents the strategy for spin and yield.
+// See the comment in GetMutexGlobals() for more information.
+enum DelayMode { AGGRESSIVE, GENTLE };
+
+struct ABSL_CACHELINE_ALIGNED MutexGlobals {
+ y_absl::once_flag once;
+ int spinloop_iterations = 0;
+ int32_t mutex_sleep_limit[2] = {};
+};
+
+const MutexGlobals &GetMutexGlobals() {
+ ABSL_CONST_INIT static MutexGlobals data;
+ y_absl::base_internal::LowLevelCallOnce(&data.once, [&]() {
+ const int num_cpus = y_absl::base_internal::NumCPUs();
+ data.spinloop_iterations = num_cpus > 1 ? 1500 : 0;
+ // If this a uniprocessor, only yield/sleep. Otherwise, if the mode is
+ // aggressive then spin many times before yielding. If the mode is
+ // gentle then spin only a few times before yielding. Aggressive spinning
+ // is used to ensure that an Unlock() call, which must get the spin lock
+ // for any thread to make progress gets it without undue delay.
+ if (num_cpus > 1) {
+ data.mutex_sleep_limit[AGGRESSIVE] = 5000;
+ data.mutex_sleep_limit[GENTLE] = 250;
+ } else {
+ data.mutex_sleep_limit[AGGRESSIVE] = 0;
+ data.mutex_sleep_limit[GENTLE] = 0;
+ }
+ });
+ return data;
+}
+} // namespace
+
+namespace synchronization_internal {
+// Returns the Mutex delay on iteration `c` depending on the given `mode`.
+// The returned value should be used as `c` for the next call to `MutexDelay`.
+int MutexDelay(int32_t c, int mode) {
+ const int32_t limit = GetMutexGlobals().mutex_sleep_limit[mode];
+ if (c < limit) {
+ // Spin.
+ c++;
+ } else {
+ SchedulingGuard::ScopedEnable enable_rescheduling;
+ ABSL_TSAN_MUTEX_PRE_DIVERT(nullptr, 0);
+ if (c == limit) {
+ // Yield once.
+ ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)();
+ c++;
+ } else {
+ // Then wait.
+ y_absl::SleepFor(y_absl::Microseconds(10));
+ c = 0;
+ }
+ ABSL_TSAN_MUTEX_POST_DIVERT(nullptr, 0);
+ }
+ return c;
+}
+} // namespace synchronization_internal
+
+// --------------------------Generic atomic ops
+// Ensure that "(*pv & bits) == bits" by doing an atomic update of "*pv" to
+// "*pv | bits" if necessary. Wait until (*pv & wait_until_clear)==0
+// before making any change.
+// This is used to set flags in mutex and condition variable words.
+static void AtomicSetBits(std::atomic<intptr_t>* pv, intptr_t bits,
+ intptr_t wait_until_clear) {
+ intptr_t v;
+ do {
+ v = pv->load(std::memory_order_relaxed);
+ } while ((v & bits) != bits &&
+ ((v & wait_until_clear) != 0 ||
+ !pv->compare_exchange_weak(v, v | bits,
+ std::memory_order_release,
+ std::memory_order_relaxed)));
+}
+
+// Ensure that "(*pv & bits) == 0" by doing an atomic update of "*pv" to
+// "*pv & ~bits" if necessary. Wait until (*pv & wait_until_clear)==0
+// before making any change.
+// This is used to unset flags in mutex and condition variable words.
+static void AtomicClearBits(std::atomic<intptr_t>* pv, intptr_t bits,
+ intptr_t wait_until_clear) {
+ intptr_t v;
+ do {
+ v = pv->load(std::memory_order_relaxed);
+ } while ((v & bits) != 0 &&
+ ((v & wait_until_clear) != 0 ||
+ !pv->compare_exchange_weak(v, v & ~bits,
+ std::memory_order_release,
+ std::memory_order_relaxed)));
+}
+
+//------------------------------------------------------------------
+
+// Data for doing deadlock detection.
+ABSL_CONST_INIT static y_absl::base_internal::SpinLock deadlock_graph_mu(
+ y_absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
+
+// Graph used to detect deadlocks.
+ABSL_CONST_INIT static GraphCycles *deadlock_graph
+ ABSL_GUARDED_BY(deadlock_graph_mu) ABSL_PT_GUARDED_BY(deadlock_graph_mu);
+
+//------------------------------------------------------------------
+// An event mechanism for debugging mutex use.
+// It also allows mutexes to be given names for those who can't handle
+// addresses, and instead like to give their data structures names like
+// "Henry", "Fido", or "Rupert IV, King of Yondavia".
+
+namespace { // to prevent name pollution
+enum { // Mutex and CondVar events passed as "ev" to PostSynchEvent
+ // Mutex events
+ SYNCH_EV_TRYLOCK_SUCCESS,
+ SYNCH_EV_TRYLOCK_FAILED,
+ SYNCH_EV_READERTRYLOCK_SUCCESS,
+ SYNCH_EV_READERTRYLOCK_FAILED,
+ SYNCH_EV_LOCK,
+ SYNCH_EV_LOCK_RETURNING,
+ SYNCH_EV_READERLOCK,
+ SYNCH_EV_READERLOCK_RETURNING,
+ SYNCH_EV_UNLOCK,
+ SYNCH_EV_READERUNLOCK,
+
+ // CondVar events
+ SYNCH_EV_WAIT,
+ SYNCH_EV_WAIT_RETURNING,
+ SYNCH_EV_SIGNAL,
+ SYNCH_EV_SIGNALALL,
+};
+
+enum { // Event flags
+ SYNCH_F_R = 0x01, // reader event
+ SYNCH_F_LCK = 0x02, // PostSynchEvent called with mutex held
+ SYNCH_F_TRY = 0x04, // TryLock or ReaderTryLock
+ SYNCH_F_UNLOCK = 0x08, // Unlock or ReaderUnlock
+
+ SYNCH_F_LCK_W = SYNCH_F_LCK,
+ SYNCH_F_LCK_R = SYNCH_F_LCK | SYNCH_F_R,
+};
+} // anonymous namespace
+
+// Properties of the events.
+static const struct {
+ int flags;
+ const char *msg;
+} event_properties[] = {
+ {SYNCH_F_LCK_W | SYNCH_F_TRY, "TryLock succeeded "},
+ {0, "TryLock failed "},
+ {SYNCH_F_LCK_R | SYNCH_F_TRY, "ReaderTryLock succeeded "},
+ {0, "ReaderTryLock failed "},
+ {0, "Lock blocking "},
+ {SYNCH_F_LCK_W, "Lock returning "},
+ {0, "ReaderLock blocking "},
+ {SYNCH_F_LCK_R, "ReaderLock returning "},
+ {SYNCH_F_LCK_W | SYNCH_F_UNLOCK, "Unlock "},
+ {SYNCH_F_LCK_R | SYNCH_F_UNLOCK, "ReaderUnlock "},
+ {0, "Wait on "},
+ {0, "Wait unblocked "},
+ {0, "Signal on "},
+ {0, "SignalAll on "},
+};
+
+ABSL_CONST_INIT static y_absl::base_internal::SpinLock synch_event_mu(
+ y_absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
+
+// Hash table size; should be prime > 2.
+// Can't be too small, as it's used for deadlock detection information.
+static constexpr uint32_t kNSynchEvent = 1031;
+
+static struct SynchEvent { // this is a trivial hash table for the events
+ // struct is freed when refcount reaches 0
+ int refcount ABSL_GUARDED_BY(synch_event_mu);
+
+ // buckets have linear, 0-terminated chains
+ SynchEvent *next ABSL_GUARDED_BY(synch_event_mu);
+
+ // Constant after initialization
+ uintptr_t masked_addr; // object at this address is called "name"
+
+ // No explicit synchronization used. Instead we assume that the
+ // client who enables/disables invariants/logging on a Mutex does so
+ // while the Mutex is not being concurrently accessed by others.
+ void (*invariant)(void *arg); // called on each event
+ void *arg; // first arg to (*invariant)()
+ bool log; // logging turned on
+
+ // Constant after initialization
+ char name[1]; // actually longer---NUL-terminated string
+} * synch_event[kNSynchEvent] ABSL_GUARDED_BY(synch_event_mu);
+
+// Ensure that the object at "addr" has a SynchEvent struct associated with it,
+// set "bits" in the word there (waiting until lockbit is clear before doing
+// so), and return a refcounted reference that will remain valid until
+// UnrefSynchEvent() is called. If a new SynchEvent is allocated,
+// the string name is copied into it.
+// When used with a mutex, the caller should also ensure that kMuEvent
+// is set in the mutex word, and similarly for condition variables and kCVEvent.
+static SynchEvent *EnsureSynchEvent(std::atomic<intptr_t> *addr,
+ const char *name, intptr_t bits,
+ intptr_t lockbit) {
+ uint32_t h = reinterpret_cast<intptr_t>(addr) % kNSynchEvent;
+ SynchEvent *e;
+ // first look for existing SynchEvent struct..
+ synch_event_mu.Lock();
+ for (e = synch_event[h];
+ e != nullptr && e->masked_addr != base_internal::HidePtr(addr);
+ e = e->next) {
+ }
+ if (e == nullptr) { // no SynchEvent struct found; make one.
+ if (name == nullptr) {
+ name = "";
+ }
+ size_t l = strlen(name);
+ e = reinterpret_cast<SynchEvent *>(
+ base_internal::LowLevelAlloc::Alloc(sizeof(*e) + l));
+ e->refcount = 2; // one for return value, one for linked list
+ e->masked_addr = base_internal::HidePtr(addr);
+ e->invariant = nullptr;
+ e->arg = nullptr;
+ e->log = false;
+ strcpy(e->name, name); // NOLINT(runtime/printf)
+ e->next = synch_event[h];
+ AtomicSetBits(addr, bits, lockbit);
+ synch_event[h] = e;
+ } else {
+ e->refcount++; // for return value
+ }
+ synch_event_mu.Unlock();
+ return e;
+}
+
+// Deallocate the SynchEvent *e, whose refcount has fallen to zero.
+static void DeleteSynchEvent(SynchEvent *e) {
+ base_internal::LowLevelAlloc::Free(e);
+}
+
+// Decrement the reference count of *e, or do nothing if e==null.
+static void UnrefSynchEvent(SynchEvent *e) {
+ if (e != nullptr) {
+ synch_event_mu.Lock();
+ bool del = (--(e->refcount) == 0);
+ synch_event_mu.Unlock();
+ if (del) {
+ DeleteSynchEvent(e);
+ }
+ }
+}
+
+// Forget the mapping from the object (Mutex or CondVar) at address addr
+// to SynchEvent object, and clear "bits" in its word (waiting until lockbit
+// is clear before doing so).
+static void ForgetSynchEvent(std::atomic<intptr_t> *addr, intptr_t bits,
+ intptr_t lockbit) {
+ uint32_t h = reinterpret_cast<intptr_t>(addr) % kNSynchEvent;
+ SynchEvent **pe;
+ SynchEvent *e;
+ synch_event_mu.Lock();
+ for (pe = &synch_event[h];
+ (e = *pe) != nullptr && e->masked_addr != base_internal::HidePtr(addr);
+ pe = &e->next) {
+ }
+ bool del = false;
+ if (e != nullptr) {
+ *pe = e->next;
+ del = (--(e->refcount) == 0);
+ }
+ AtomicClearBits(addr, bits, lockbit);
+ synch_event_mu.Unlock();
+ if (del) {
+ DeleteSynchEvent(e);
+ }
+}
+
+// Return a refcounted reference to the SynchEvent of the object at address
+// "addr", if any. The pointer returned is valid until the UnrefSynchEvent() is
+// called.
+static SynchEvent *GetSynchEvent(const void *addr) {
+ uint32_t h = reinterpret_cast<intptr_t>(addr) % kNSynchEvent;
+ SynchEvent *e;
+ synch_event_mu.Lock();
+ for (e = synch_event[h];
+ e != nullptr && e->masked_addr != base_internal::HidePtr(addr);
+ e = e->next) {
+ }
+ if (e != nullptr) {
+ e->refcount++;
+ }
+ synch_event_mu.Unlock();
+ return e;
+}
+
+// Called when an event "ev" occurs on a Mutex of CondVar "obj"
+// if event recording is on
+static void PostSynchEvent(void *obj, int ev) {
+ SynchEvent *e = GetSynchEvent(obj);
+ // logging is on if event recording is on and either there's no event struct,
+ // or it explicitly says to log
+ if (e == nullptr || e->log) {
+ void *pcs[40];
+ int n = y_absl::GetStackTrace(pcs, ABSL_ARRAYSIZE(pcs), 1);
+ // A buffer with enough space for the ASCII for all the PCs, even on a
+ // 64-bit machine.
+ char buffer[ABSL_ARRAYSIZE(pcs) * 24];
+ int pos = snprintf(buffer, sizeof (buffer), " @");
+ for (int i = 0; i != n; i++) {
+ pos += snprintf(&buffer[pos], sizeof (buffer) - pos, " %p", pcs[i]);
+ }
+ ABSL_RAW_LOG(INFO, "%s%p %s %s", event_properties[ev].msg, obj,
+ (e == nullptr ? "" : e->name), buffer);
+ }
+ const int flags = event_properties[ev].flags;
+ if ((flags & SYNCH_F_LCK) != 0 && e != nullptr && e->invariant != nullptr) {
+ // Calling the invariant as is causes problems under ThreadSanitizer.
+ // We are currently inside of Mutex Lock/Unlock and are ignoring all
+ // memory accesses and synchronization. If the invariant transitively
+ // synchronizes something else and we ignore the synchronization, we will
+ // get false positive race reports later.
+ // Reuse EvalConditionAnnotated to properly call into user code.
+ struct local {
+ static bool pred(SynchEvent *ev) {
+ (*ev->invariant)(ev->arg);
+ return false;
+ }
+ };
+ Condition cond(&local::pred, e);
+ Mutex *mu = static_cast<Mutex *>(obj);
+ const bool locking = (flags & SYNCH_F_UNLOCK) == 0;
+ const bool trylock = (flags & SYNCH_F_TRY) != 0;
+ const bool read_lock = (flags & SYNCH_F_R) != 0;
+ EvalConditionAnnotated(&cond, mu, locking, trylock, read_lock);
+ }
+ UnrefSynchEvent(e);
+}
+
+//------------------------------------------------------------------
+
+// The SynchWaitParams struct encapsulates the way in which a thread is waiting:
+// whether it has a timeout, the condition, exclusive/shared, and whether a
+// condition variable wait has an associated Mutex (as opposed to another
+// type of lock). It also points to the PerThreadSynch struct of its thread.
+// cv_word tells Enqueue() to enqueue on a CondVar using CondVarEnqueue().
+//
+// This structure is held on the stack rather than directly in
+// PerThreadSynch because a thread can be waiting on multiple Mutexes if,
+// while waiting on one Mutex, the implementation calls a client callback
+// (such as a Condition function) that acquires another Mutex. We don't
+// strictly need to allow this, but programmers become confused if we do not
+// allow them to use functions such a LOG() within Condition functions. The
+// PerThreadSynch struct points at the most recent SynchWaitParams struct when
+// the thread is on a Mutex's waiter queue.
+struct SynchWaitParams {
+ SynchWaitParams(Mutex::MuHow how_arg, const Condition *cond_arg,
+ KernelTimeout timeout_arg, Mutex *cvmu_arg,
+ PerThreadSynch *thread_arg,
+ std::atomic<intptr_t> *cv_word_arg)
+ : how(how_arg),
+ cond(cond_arg),
+ timeout(timeout_arg),
+ cvmu(cvmu_arg),
+ thread(thread_arg),
+ cv_word(cv_word_arg),
+ contention_start_cycles(base_internal::CycleClock::Now()) {}
+
+ const Mutex::MuHow how; // How this thread needs to wait.
+ const Condition *cond; // The condition that this thread is waiting for.
+ // In Mutex, this field is set to zero if a timeout
+ // expires.
+ KernelTimeout timeout; // timeout expiry---absolute time
+ // In Mutex, this field is set to zero if a timeout
+ // expires.
+ Mutex *const cvmu; // used for transfer from cond var to mutex
+ PerThreadSynch *const thread; // thread that is waiting
+
+ // If not null, thread should be enqueued on the CondVar whose state
+ // word is cv_word instead of queueing normally on the Mutex.
+ std::atomic<intptr_t> *cv_word;
+
+ int64_t contention_start_cycles; // Time (in cycles) when this thread started
+ // to contend for the mutex.
+};
+
+struct SynchLocksHeld {
+ int n; // number of valid entries in locks[]
+ bool overflow; // true iff we overflowed the array at some point
+ struct {
+ Mutex *mu; // lock acquired
+ int32_t count; // times acquired
+ GraphId id; // deadlock_graph id of acquired lock
+ } locks[40];
+ // If a thread overfills the array during deadlock detection, we
+ // continue, discarding information as needed. If no overflow has
+ // taken place, we can provide more error checking, such as
+ // detecting when a thread releases a lock it does not hold.
+};
+
+// A sentinel value in lists that is not 0.
+// A 0 value is used to mean "not on a list".
+static PerThreadSynch *const kPerThreadSynchNull =
+ reinterpret_cast<PerThreadSynch *>(1);
+
+static SynchLocksHeld *LocksHeldAlloc() {
+ SynchLocksHeld *ret = reinterpret_cast<SynchLocksHeld *>(
+ base_internal::LowLevelAlloc::Alloc(sizeof(SynchLocksHeld)));
+ ret->n = 0;
+ ret->overflow = false;
+ return ret;
+}
+
+// Return the PerThreadSynch-struct for this thread.
+static PerThreadSynch *Synch_GetPerThread() {
+ ThreadIdentity *identity = GetOrCreateCurrentThreadIdentity();
+ return &identity->per_thread_synch;
+}
+
+static PerThreadSynch *Synch_GetPerThreadAnnotated(Mutex *mu) {
+ if (mu) {
+ ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
+ }
+ PerThreadSynch *w = Synch_GetPerThread();
+ if (mu) {
+ ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);
+ }
+ return w;
+}
+
+static SynchLocksHeld *Synch_GetAllLocks() {
+ PerThreadSynch *s = Synch_GetPerThread();
+ if (s->all_locks == nullptr) {
+ s->all_locks = LocksHeldAlloc(); // Freed by ReclaimThreadIdentity.
+ }
+ return s->all_locks;
+}
+
+// Post on "w"'s associated PerThreadSem.
+void Mutex::IncrementSynchSem(Mutex *mu, PerThreadSynch *w) {
+ if (mu) {
+ ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
+ }
+ PerThreadSem::Post(w->thread_identity());
+ if (mu) {
+ ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);
+ }
+}
+
+// Wait on "w"'s associated PerThreadSem; returns false if timeout expired.
+bool Mutex::DecrementSynchSem(Mutex *mu, PerThreadSynch *w, KernelTimeout t) {
+ if (mu) {
+ ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
+ }
+ assert(w == Synch_GetPerThread());
+ static_cast<void>(w);
+ bool res = PerThreadSem::Wait(t);
+ if (mu) {
+ ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);
+ }
+ return res;
+}
+
+// We're in a fatal signal handler that hopes to use Mutex and to get
+// lucky by not deadlocking. We try to improve its chances of success
+// by effectively disabling some of the consistency checks. This will
+// prevent certain ABSL_RAW_CHECK() statements from being triggered when
+// re-rentry is detected. The ABSL_RAW_CHECK() statements are those in the
+// Mutex code checking that the "waitp" field has not been reused.
+void Mutex::InternalAttemptToUseMutexInFatalSignalHandler() {
+ // Fix the per-thread state only if it exists.
+ ThreadIdentity *identity = CurrentThreadIdentityIfPresent();
+ if (identity != nullptr) {
+ identity->per_thread_synch.suppress_fatal_errors = true;
+ }
+ // Don't do deadlock detection when we are already failing.
+ synch_deadlock_detection.store(OnDeadlockCycle::kIgnore,
+ std::memory_order_release);
+}
+
+// --------------------------time support
+
+// Return the current time plus the timeout. Use the same clock as
+// PerThreadSem::Wait() for consistency. Unfortunately, we don't have
+// such a choice when a deadline is given directly.
+static y_absl::Time DeadlineFromTimeout(y_absl::Duration timeout) {
+#ifndef _WIN32
+ struct timeval tv;
+ gettimeofday(&tv, nullptr);
+ return y_absl::TimeFromTimeval(tv) + timeout;
+#else
+ return y_absl::Now() + timeout;
+#endif
+}
+
+// --------------------------Mutexes
+
+// In the layout below, the msb of the bottom byte is currently unused. Also,
+// the following constraints were considered in choosing the layout:
+// o Both the debug allocator's "uninitialized" and "freed" patterns (0xab and
+// 0xcd) are illegal: reader and writer lock both held.
+// o kMuWriter and kMuEvent should exceed kMuDesig and kMuWait, to enable the
+// bit-twiddling trick in Mutex::Unlock().
+// o kMuWriter / kMuReader == kMuWrWait / kMuWait,
+// to enable the bit-twiddling trick in CheckForMutexCorruption().
+static const intptr_t kMuReader = 0x0001L; // a reader holds the lock
+static const intptr_t kMuDesig = 0x0002L; // there's a designated waker
+static const intptr_t kMuWait = 0x0004L; // threads are waiting
+static const intptr_t kMuWriter = 0x0008L; // a writer holds the lock
+static const intptr_t kMuEvent = 0x0010L; // record this mutex's events
+// INVARIANT1: there's a thread that was blocked on the mutex, is
+// no longer, yet has not yet acquired the mutex. If there's a
+// designated waker, all threads can avoid taking the slow path in
+// unlock because the designated waker will subsequently acquire
+// the lock and wake someone. To maintain INVARIANT1 the bit is
+// set when a thread is unblocked(INV1a), and threads that were
+// unblocked reset the bit when they either acquire or re-block
+// (INV1b).
+static const intptr_t kMuWrWait = 0x0020L; // runnable writer is waiting
+ // for a reader
+static const intptr_t kMuSpin = 0x0040L; // spinlock protects wait list
+static const intptr_t kMuLow = 0x00ffL; // mask all mutex bits
+static const intptr_t kMuHigh = ~kMuLow; // mask pointer/reader count
+
+// Hack to make constant values available to gdb pretty printer
+enum {
+ kGdbMuSpin = kMuSpin,
+ kGdbMuEvent = kMuEvent,
+ kGdbMuWait = kMuWait,
+ kGdbMuWriter = kMuWriter,
+ kGdbMuDesig = kMuDesig,
+ kGdbMuWrWait = kMuWrWait,
+ kGdbMuReader = kMuReader,
+ kGdbMuLow = kMuLow,
+};
+
+// kMuWrWait implies kMuWait.
+// kMuReader and kMuWriter are mutually exclusive.
+// If kMuReader is zero, there are no readers.
+// Otherwise, if kMuWait is zero, the high order bits contain a count of the
+// number of readers. Otherwise, the reader count is held in
+// PerThreadSynch::readers of the most recently queued waiter, again in the
+// bits above kMuLow.
+static const intptr_t kMuOne = 0x0100; // a count of one reader
+
+// flags passed to Enqueue and LockSlow{,WithTimeout,Loop}
+static const int kMuHasBlocked = 0x01; // already blocked (MUST == 1)
+static const int kMuIsCond = 0x02; // conditional waiter (CV or Condition)
+
+static_assert(PerThreadSynch::kAlignment > kMuLow,
+ "PerThreadSynch::kAlignment must be greater than kMuLow");
+
+// This struct contains various bitmasks to be used in
+// acquiring and releasing a mutex in a particular mode.
+struct MuHowS {
+ // if all the bits in fast_need_zero are zero, the lock can be acquired by
+ // adding fast_add and oring fast_or. The bit kMuDesig should be reset iff
+ // this is the designated waker.
+ intptr_t fast_need_zero;
+ intptr_t fast_or;
+ intptr_t fast_add;
+
+ intptr_t slow_need_zero; // fast_need_zero with events (e.g. logging)
+
+ intptr_t slow_inc_need_zero; // if all the bits in slow_inc_need_zero are
+ // zero a reader can acquire a read share by
+ // setting the reader bit and incrementing
+ // the reader count (in last waiter since
+ // we're now slow-path). kMuWrWait be may
+ // be ignored if we already waited once.
+};
+
+static const MuHowS kSharedS = {
+ // shared or read lock
+ kMuWriter | kMuWait | kMuEvent, // fast_need_zero
+ kMuReader, // fast_or
+ kMuOne, // fast_add
+ kMuWriter | kMuWait, // slow_need_zero
+ kMuSpin | kMuWriter | kMuWrWait, // slow_inc_need_zero
+};
+static const MuHowS kExclusiveS = {
+ // exclusive or write lock
+ kMuWriter | kMuReader | kMuEvent, // fast_need_zero
+ kMuWriter, // fast_or
+ 0, // fast_add
+ kMuWriter | kMuReader, // slow_need_zero
+ ~static_cast<intptr_t>(0), // slow_inc_need_zero
+};
+static const Mutex::MuHow kShared = &kSharedS; // shared lock
+static const Mutex::MuHow kExclusive = &kExclusiveS; // exclusive lock
+
+#ifdef NDEBUG
+static constexpr bool kDebugMode = false;
+#else
+static constexpr bool kDebugMode = true;
+#endif
+
+#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
+static unsigned TsanFlags(Mutex::MuHow how) {
+ return how == kShared ? __tsan_mutex_read_lock : 0;
+}
+#endif
+
+static bool DebugOnlyIsExiting() {
+ return false;
+}
+
+Mutex::~Mutex() {
+ intptr_t v = mu_.load(std::memory_order_relaxed);
+ if ((v & kMuEvent) != 0 && !DebugOnlyIsExiting()) {
+ ForgetSynchEvent(&this->mu_, kMuEvent, kMuSpin);
+ }
+ if (kDebugMode) {
+ this->ForgetDeadlockInfo();
+ }
+ ABSL_TSAN_MUTEX_DESTROY(this, __tsan_mutex_not_static);
+}
+
+void Mutex::EnableDebugLog(const char *name) {
+ SynchEvent *e = EnsureSynchEvent(&this->mu_, name, kMuEvent, kMuSpin);
+ e->log = true;
+ UnrefSynchEvent(e);
+}
+
+void EnableMutexInvariantDebugging(bool enabled) {
+ synch_check_invariants.store(enabled, std::memory_order_release);
+}
+
+void Mutex::EnableInvariantDebugging(void (*invariant)(void *),
+ void *arg) {
+ if (synch_check_invariants.load(std::memory_order_acquire) &&
+ invariant != nullptr) {
+ SynchEvent *e = EnsureSynchEvent(&this->mu_, nullptr, kMuEvent, kMuSpin);
+ e->invariant = invariant;
+ e->arg = arg;
+ UnrefSynchEvent(e);
+ }
+}
+
+void SetMutexDeadlockDetectionMode(OnDeadlockCycle mode) {
+ synch_deadlock_detection.store(mode, std::memory_order_release);
+}
+
+// Return true iff threads x and y are part of the same equivalence
+// class of waiters. An equivalence class is defined as the set of
+// waiters with the same condition, type of lock, and thread priority.
+//
+// Requires that x and y be waiting on the same Mutex queue.
+static bool MuEquivalentWaiter(PerThreadSynch *x, PerThreadSynch *y) {
+ return x->waitp->how == y->waitp->how && x->priority == y->priority &&
+ Condition::GuaranteedEqual(x->waitp->cond, y->waitp->cond);
+}
+
+// Given the contents of a mutex word containing a PerThreadSynch pointer,
+// return the pointer.
+static inline PerThreadSynch *GetPerThreadSynch(intptr_t v) {
+ return reinterpret_cast<PerThreadSynch *>(v & kMuHigh);
+}
+
+// The next several routines maintain the per-thread next and skip fields
+// used in the Mutex waiter queue.
+// The queue is a circular singly-linked list, of which the "head" is the
+// last element, and head->next if the first element.
+// The skip field has the invariant:
+// For thread x, x->skip is one of:
+// - invalid (iff x is not in a Mutex wait queue),
+// - null, or
+// - a pointer to a distinct thread waiting later in the same Mutex queue
+// such that all threads in [x, x->skip] have the same condition, priority
+// and lock type (MuEquivalentWaiter() is true for all pairs in [x,
+// x->skip]).
+// In addition, if x->skip is valid, (x->may_skip || x->skip == null)
+//
+// By the spec of MuEquivalentWaiter(), it is not necessary when removing the
+// first runnable thread y from the front a Mutex queue to adjust the skip
+// field of another thread x because if x->skip==y, x->skip must (have) become
+// invalid before y is removed. The function TryRemove can remove a specified
+// thread from an arbitrary position in the queue whether runnable or not, so
+// it fixes up skip fields that would otherwise be left dangling.
+// The statement
+// if (x->may_skip && MuEquivalentWaiter(x, x->next)) { x->skip = x->next; }
+// maintains the invariant provided x is not the last waiter in a Mutex queue
+// The statement
+// if (x->skip != null) { x->skip = x->skip->skip; }
+// maintains the invariant.
+
+// Returns the last thread y in a mutex waiter queue such that all threads in
+// [x, y] inclusive share the same condition. Sets skip fields of some threads
+// in that range to optimize future evaluation of Skip() on x values in
+// the range. Requires thread x is in a mutex waiter queue.
+// The locking is unusual. Skip() is called under these conditions:
+// - spinlock is held in call from Enqueue(), with maybe_unlocking == false
+// - Mutex is held in call from UnlockSlow() by last unlocker, with
+// maybe_unlocking == true
+// - both Mutex and spinlock are held in call from DequeueAllWakeable() (from
+// UnlockSlow()) and TryRemove()
+// These cases are mutually exclusive, so Skip() never runs concurrently
+// with itself on the same Mutex. The skip chain is used in these other places
+// that cannot occur concurrently:
+// - FixSkip() (from TryRemove()) - spinlock and Mutex are held)
+// - Dequeue() (with spinlock and Mutex held)
+// - UnlockSlow() (with spinlock and Mutex held)
+// A more complex case is Enqueue()
+// - Enqueue() (with spinlock held and maybe_unlocking == false)
+// This is the first case in which Skip is called, above.
+// - Enqueue() (without spinlock held; but queue is empty and being freshly
+// formed)
+// - Enqueue() (with spinlock held and maybe_unlocking == true)
+// The first case has mutual exclusion, and the second isolation through
+// working on an otherwise unreachable data structure.
+// In the last case, Enqueue() is required to change no skip/next pointers
+// except those in the added node and the former "head" node. This implies
+// that the new node is added after head, and so must be the new head or the
+// new front of the queue.
+static PerThreadSynch *Skip(PerThreadSynch *x) {
+ PerThreadSynch *x0 = nullptr;
+ PerThreadSynch *x1 = x;
+ PerThreadSynch *x2 = x->skip;
+ if (x2 != nullptr) {
+ // Each iteration attempts to advance sequence (x0,x1,x2) to next sequence
+ // such that x1 == x0->skip && x2 == x1->skip
+ while ((x0 = x1, x1 = x2, x2 = x2->skip) != nullptr) {
+ x0->skip = x2; // short-circuit skip from x0 to x2
+ }
+ x->skip = x1; // short-circuit skip from x to result
+ }
+ return x1;
+}
+
+// "ancestor" appears before "to_be_removed" in the same Mutex waiter queue.
+// The latter is going to be removed out of order, because of a timeout.
+// Check whether "ancestor" has a skip field pointing to "to_be_removed",
+// and fix it if it does.
+static void FixSkip(PerThreadSynch *ancestor, PerThreadSynch *to_be_removed) {
+ if (ancestor->skip == to_be_removed) { // ancestor->skip left dangling
+ if (to_be_removed->skip != nullptr) {
+ ancestor->skip = to_be_removed->skip; // can skip past to_be_removed
+ } else if (ancestor->next != to_be_removed) { // they are not adjacent
+ ancestor->skip = ancestor->next; // can skip one past ancestor
+ } else {
+ ancestor->skip = nullptr; // can't skip at all
+ }
+ }
+}
+
+static void CondVarEnqueue(SynchWaitParams *waitp);
+
+// Enqueue thread "waitp->thread" on a waiter queue.
+// Called with mutex spinlock held if head != nullptr
+// If head==nullptr and waitp->cv_word==nullptr, then Enqueue() is
+// idempotent; it alters no state associated with the existing (empty)
+// queue.
+//
+// If waitp->cv_word == nullptr, queue the thread at either the front or
+// the end (according to its priority) of the circular mutex waiter queue whose
+// head is "head", and return the new head. mu is the previous mutex state,
+// which contains the reader count (perhaps adjusted for the operation in
+// progress) if the list was empty and a read lock held, and the holder hint if
+// the list was empty and a write lock held. (flags & kMuIsCond) indicates
+// whether this thread was transferred from a CondVar or is waiting for a
+// non-trivial condition. In this case, Enqueue() never returns nullptr
+//
+// If waitp->cv_word != nullptr, CondVarEnqueue() is called, and "head" is
+// returned. This mechanism is used by CondVar to queue a thread on the
+// condition variable queue instead of the mutex queue in implementing Wait().
+// In this case, Enqueue() can return nullptr (if head==nullptr).
+static PerThreadSynch *Enqueue(PerThreadSynch *head,
+ SynchWaitParams *waitp, intptr_t mu, int flags) {
+ // If we have been given a cv_word, call CondVarEnqueue() and return
+ // the previous head of the Mutex waiter queue.
+ if (waitp->cv_word != nullptr) {
+ CondVarEnqueue(waitp);
+ return head;
+ }
+
+ PerThreadSynch *s = waitp->thread;
+ ABSL_RAW_CHECK(
+ s->waitp == nullptr || // normal case
+ s->waitp == waitp || // Fer()---transfer from condition variable
+ s->suppress_fatal_errors,
+ "detected illegal recursion into Mutex code");
+ s->waitp = waitp;
+ s->skip = nullptr; // maintain skip invariant (see above)
+ s->may_skip = true; // always true on entering queue
+ s->wake = false; // not being woken
+ s->cond_waiter = ((flags & kMuIsCond) != 0);
+ if (head == nullptr) { // s is the only waiter
+ s->next = s; // it's the only entry in the cycle
+ s->readers = mu; // reader count is from mu word
+ s->maybe_unlocking = false; // no one is searching an empty list
+ head = s; // s is new head
+ } else {
+ PerThreadSynch *enqueue_after = nullptr; // we'll put s after this element
+#ifdef ABSL_HAVE_PTHREAD_GETSCHEDPARAM
+ int64_t now_cycles = base_internal::CycleClock::Now();
+ if (s->next_priority_read_cycles < now_cycles) {
+ // Every so often, update our idea of the thread's priority.
+ // pthread_getschedparam() is 5% of the block/wakeup time;
+ // base_internal::CycleClock::Now() is 0.5%.
+ int policy;
+ struct sched_param param;
+ const int err = pthread_getschedparam(pthread_self(), &policy, &param);
+ if (err != 0) {
+ ABSL_RAW_LOG(ERROR, "pthread_getschedparam failed: %d", err);
+ } else {
+ s->priority = param.sched_priority;
+ s->next_priority_read_cycles =
+ now_cycles +
+ static_cast<int64_t>(base_internal::CycleClock::Frequency());
+ }
+ }
+ if (s->priority > head->priority) { // s's priority is above head's
+ // try to put s in priority-fifo order, or failing that at the front.
+ if (!head->maybe_unlocking) {
+ // No unlocker can be scanning the queue, so we can insert into the
+ // middle of the queue.
+ //
+ // Within a skip chain, all waiters have the same priority, so we can
+ // skip forward through the chains until we find one with a lower
+ // priority than the waiter to be enqueued.
+ PerThreadSynch *advance_to = head; // next value of enqueue_after
+ do {
+ enqueue_after = advance_to;
+ // (side-effect: optimizes skip chain)
+ advance_to = Skip(enqueue_after->next);
+ } while (s->priority <= advance_to->priority);
+ // termination guaranteed because s->priority > head->priority
+ // and head is the end of a skip chain
+ } else if (waitp->how == kExclusive &&
+ Condition::GuaranteedEqual(waitp->cond, nullptr)) {
+ // An unlocker could be scanning the queue, but we know it will recheck
+ // the queue front for writers that have no condition, which is what s
+ // is, so an insert at front is safe.
+ enqueue_after = head; // add after head, at front
+ }
+ }
+#endif
+ if (enqueue_after != nullptr) {
+ s->next = enqueue_after->next;
+ enqueue_after->next = s;
+
+ // enqueue_after can be: head, Skip(...), or cur.
+ // The first two imply enqueue_after->skip == nullptr, and
+ // the last is used only if MuEquivalentWaiter(s, cur).
+ // We require this because clearing enqueue_after->skip
+ // is impossible; enqueue_after's predecessors might also
+ // incorrectly skip over s if we were to allow other
+ // insertion points.
+ ABSL_RAW_CHECK(enqueue_after->skip == nullptr ||
+ MuEquivalentWaiter(enqueue_after, s),
+ "Mutex Enqueue failure");
+
+ if (enqueue_after != head && enqueue_after->may_skip &&
+ MuEquivalentWaiter(enqueue_after, enqueue_after->next)) {
+ // enqueue_after can skip to its new successor, s
+ enqueue_after->skip = enqueue_after->next;
+ }
+ if (MuEquivalentWaiter(s, s->next)) { // s->may_skip is known to be true
+ s->skip = s->next; // s may skip to its successor
+ }
+ } else { // enqueue not done any other way, so
+ // we're inserting s at the back
+ // s will become new head; copy data from head into it
+ s->next = head->next; // add s after head
+ head->next = s;
+ s->readers = head->readers; // reader count is from previous head
+ s->maybe_unlocking = head->maybe_unlocking; // same for unlock hint
+ if (head->may_skip && MuEquivalentWaiter(head, s)) {
+ // head now has successor; may skip
+ head->skip = s;
+ }
+ head = s; // s is new head
+ }
+ }
+ s->state.store(PerThreadSynch::kQueued, std::memory_order_relaxed);
+ return head;
+}
+
+// Dequeue the successor pw->next of thread pw from the Mutex waiter queue
+// whose last element is head. The new head element is returned, or null
+// if the list is made empty.
+// Dequeue is called with both spinlock and Mutex held.
+static PerThreadSynch *Dequeue(PerThreadSynch *head, PerThreadSynch *pw) {
+ PerThreadSynch *w = pw->next;
+ pw->next = w->next; // snip w out of list
+ if (head == w) { // we removed the head
+ head = (pw == w) ? nullptr : pw; // either emptied list, or pw is new head
+ } else if (pw != head && MuEquivalentWaiter(pw, pw->next)) {
+ // pw can skip to its new successor
+ if (pw->next->skip !=
+ nullptr) { // either skip to its successors skip target
+ pw->skip = pw->next->skip;
+ } else { // or to pw's successor
+ pw->skip = pw->next;
+ }
+ }
+ return head;
+}
+
+// Traverse the elements [ pw->next, h] of the circular list whose last element
+// is head.
+// Remove all elements with wake==true and place them in the
+// singly-linked list wake_list in the order found. Assumes that
+// there is only one such element if the element has how == kExclusive.
+// Return the new head.
+static PerThreadSynch *DequeueAllWakeable(PerThreadSynch *head,
+ PerThreadSynch *pw,
+ PerThreadSynch **wake_tail) {
+ PerThreadSynch *orig_h = head;
+ PerThreadSynch *w = pw->next;
+ bool skipped = false;
+ do {
+ if (w->wake) { // remove this element
+ ABSL_RAW_CHECK(pw->skip == nullptr, "bad skip in DequeueAllWakeable");
+ // we're removing pw's successor so either pw->skip is zero or we should
+ // already have removed pw since if pw->skip!=null, pw has the same
+ // condition as w.
+ head = Dequeue(head, pw);
+ w->next = *wake_tail; // keep list terminated
+ *wake_tail = w; // add w to wake_list;
+ wake_tail = &w->next; // next addition to end
+ if (w->waitp->how == kExclusive) { // wake at most 1 writer
+ break;
+ }
+ } else { // not waking this one; skip
+ pw = Skip(w); // skip as much as possible
+ skipped = true;
+ }
+ w = pw->next;
+ // We want to stop processing after we've considered the original head,
+ // orig_h. We can't test for w==orig_h in the loop because w may skip over
+ // it; we are guaranteed only that w's predecessor will not skip over
+ // orig_h. When we've considered orig_h, either we've processed it and
+ // removed it (so orig_h != head), or we considered it and skipped it (so
+ // skipped==true && pw == head because skipping from head always skips by
+ // just one, leaving pw pointing at head). So we want to
+ // continue the loop with the negation of that expression.
+ } while (orig_h == head && (pw != head || !skipped));
+ return head;
+}
+
+// Try to remove thread s from the list of waiters on this mutex.
+// Does nothing if s is not on the waiter list.
+void Mutex::TryRemove(PerThreadSynch *s) {
+ SchedulingGuard::ScopedDisable disable_rescheduling;
+ intptr_t v = mu_.load(std::memory_order_relaxed);
+ // acquire spinlock & lock
+ if ((v & (kMuWait | kMuSpin | kMuWriter | kMuReader)) == kMuWait &&
+ mu_.compare_exchange_strong(v, v | kMuSpin | kMuWriter,
+ std::memory_order_acquire,
+ std::memory_order_relaxed)) {
+ PerThreadSynch *h = GetPerThreadSynch(v);
+ if (h != nullptr) {
+ PerThreadSynch *pw = h; // pw is w's predecessor
+ PerThreadSynch *w;
+ if ((w = pw->next) != s) { // search for thread,
+ do { // processing at least one element
+ // If the current element isn't equivalent to the waiter to be
+ // removed, we can skip the entire chain.
+ if (!MuEquivalentWaiter(s, w)) {
+ pw = Skip(w); // so skip all that won't match
+ // we don't have to worry about dangling skip fields
+ // in the threads we skipped; none can point to s
+ // because they are in a different equivalence class.
+ } else { // seeking same condition
+ FixSkip(w, s); // fix up any skip pointer from w to s
+ pw = w;
+ }
+ // don't search further if we found the thread, or we're about to
+ // process the first thread again.
+ } while ((w = pw->next) != s && pw != h);
+ }
+ if (w == s) { // found thread; remove it
+ // pw->skip may be non-zero here; the loop above ensured that
+ // no ancestor of s can skip to s, so removal is safe anyway.
+ h = Dequeue(h, pw);
+ s->next = nullptr;
+ s->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
+ }
+ }
+ intptr_t nv;
+ do { // release spinlock and lock
+ v = mu_.load(std::memory_order_relaxed);
+ nv = v & (kMuDesig | kMuEvent);
+ if (h != nullptr) {
+ nv |= kMuWait | reinterpret_cast<intptr_t>(h);
+ h->readers = 0; // we hold writer lock
+ h->maybe_unlocking = false; // finished unlocking
+ }
+ } while (!mu_.compare_exchange_weak(v, nv,
+ std::memory_order_release,
+ std::memory_order_relaxed));
+ }
+}
+
+// Wait until thread "s", which must be the current thread, is removed from the
+// this mutex's waiter queue. If "s->waitp->timeout" has a timeout, wake up
+// if the wait extends past the absolute time specified, even if "s" is still
+// on the mutex queue. In this case, remove "s" from the queue and return
+// true, otherwise return false.
+ABSL_XRAY_LOG_ARGS(1) void Mutex::Block(PerThreadSynch *s) {
+ while (s->state.load(std::memory_order_acquire) == PerThreadSynch::kQueued) {
+ if (!DecrementSynchSem(this, s, s->waitp->timeout)) {
+ // After a timeout, we go into a spin loop until we remove ourselves
+ // from the queue, or someone else removes us. We can't be sure to be
+ // able to remove ourselves in a single lock acquisition because this
+ // mutex may be held, and the holder has the right to read the centre
+ // of the waiter queue without holding the spinlock.
+ this->TryRemove(s);
+ int c = 0;
+ while (s->next != nullptr) {
+ c = synchronization_internal::MutexDelay(c, GENTLE);
+ this->TryRemove(s);
+ }
+ if (kDebugMode) {
+ // This ensures that we test the case that TryRemove() is called when s
+ // is not on the queue.
+ this->TryRemove(s);
+ }
+ s->waitp->timeout = KernelTimeout::Never(); // timeout is satisfied
+ s->waitp->cond = nullptr; // condition no longer relevant for wakeups
+ }
+ }
+ ABSL_RAW_CHECK(s->waitp != nullptr || s->suppress_fatal_errors,
+ "detected illegal recursion in Mutex code");
+ s->waitp = nullptr;
+}
+
+// Wake thread w, and return the next thread in the list.
+PerThreadSynch *Mutex::Wakeup(PerThreadSynch *w) {
+ PerThreadSynch *next = w->next;
+ w->next = nullptr;
+ w->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
+ IncrementSynchSem(this, w);
+
+ return next;
+}
+
+static GraphId GetGraphIdLocked(Mutex *mu)
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(deadlock_graph_mu) {
+ if (!deadlock_graph) { // (re)create the deadlock graph.
+ deadlock_graph =
+ new (base_internal::LowLevelAlloc::Alloc(sizeof(*deadlock_graph)))
+ GraphCycles;
+ }
+ return deadlock_graph->GetId(mu);
+}
+
+static GraphId GetGraphId(Mutex *mu) ABSL_LOCKS_EXCLUDED(deadlock_graph_mu) {
+ deadlock_graph_mu.Lock();
+ GraphId id = GetGraphIdLocked(mu);
+ deadlock_graph_mu.Unlock();
+ return id;
+}
+
+// Record a lock acquisition. This is used in debug mode for deadlock
+// detection. The held_locks pointer points to the relevant data
+// structure for each case.
+static void LockEnter(Mutex* mu, GraphId id, SynchLocksHeld *held_locks) {
+ int n = held_locks->n;
+ int i = 0;
+ while (i != n && held_locks->locks[i].id != id) {
+ i++;
+ }
+ if (i == n) {
+ if (n == ABSL_ARRAYSIZE(held_locks->locks)) {
+ held_locks->overflow = true; // lost some data
+ } else { // we have room for lock
+ held_locks->locks[i].mu = mu;
+ held_locks->locks[i].count = 1;
+ held_locks->locks[i].id = id;
+ held_locks->n = n + 1;
+ }
+ } else {
+ held_locks->locks[i].count++;
+ }
+}
+
+// Record a lock release. Each call to LockEnter(mu, id, x) should be
+// eventually followed by a call to LockLeave(mu, id, x) by the same thread.
+// It does not process the event if is not needed when deadlock detection is
+// disabled.
+static void LockLeave(Mutex* mu, GraphId id, SynchLocksHeld *held_locks) {
+ int n = held_locks->n;
+ int i = 0;
+ while (i != n && held_locks->locks[i].id != id) {
+ i++;
+ }
+ if (i == n) {
+ if (!held_locks->overflow) {
+ // The deadlock id may have been reassigned after ForgetDeadlockInfo,
+ // but in that case mu should still be present.
+ i = 0;
+ while (i != n && held_locks->locks[i].mu != mu) {
+ i++;
+ }
+ if (i == n) { // mu missing means releasing unheld lock
+ SynchEvent *mu_events = GetSynchEvent(mu);
+ ABSL_RAW_LOG(FATAL,
+ "thread releasing lock it does not hold: %p %s; "
+ ,
+ static_cast<void *>(mu),
+ mu_events == nullptr ? "" : mu_events->name);
+ }
+ }
+ } else if (held_locks->locks[i].count == 1) {
+ held_locks->n = n - 1;
+ held_locks->locks[i] = held_locks->locks[n - 1];
+ held_locks->locks[n - 1].id = InvalidGraphId();
+ held_locks->locks[n - 1].mu =
+ nullptr; // clear mu to please the leak detector.
+ } else {
+ assert(held_locks->locks[i].count > 0);
+ held_locks->locks[i].count--;
+ }
+}
+
+// Call LockEnter() if in debug mode and deadlock detection is enabled.
+static inline void DebugOnlyLockEnter(Mutex *mu) {
+ if (kDebugMode) {
+ if (synch_deadlock_detection.load(std::memory_order_acquire) !=
+ OnDeadlockCycle::kIgnore) {
+ LockEnter(mu, GetGraphId(mu), Synch_GetAllLocks());
+ }
+ }
+}
+
+// Call LockEnter() if in debug mode and deadlock detection is enabled.
+static inline void DebugOnlyLockEnter(Mutex *mu, GraphId id) {
+ if (kDebugMode) {
+ if (synch_deadlock_detection.load(std::memory_order_acquire) !=
+ OnDeadlockCycle::kIgnore) {
+ LockEnter(mu, id, Synch_GetAllLocks());
+ }
+ }
+}
+
+// Call LockLeave() if in debug mode and deadlock detection is enabled.
+static inline void DebugOnlyLockLeave(Mutex *mu) {
+ if (kDebugMode) {
+ if (synch_deadlock_detection.load(std::memory_order_acquire) !=
+ OnDeadlockCycle::kIgnore) {
+ LockLeave(mu, GetGraphId(mu), Synch_GetAllLocks());
+ }
+ }
+}
+
+static char *StackString(void **pcs, int n, char *buf, int maxlen,
+ bool symbolize) {
+ static const int kSymLen = 200;
+ char sym[kSymLen];
+ int len = 0;
+ for (int i = 0; i != n; i++) {
+ if (symbolize) {
+ if (!symbolizer(pcs[i], sym, kSymLen)) {
+ sym[0] = '\0';
+ }
+ snprintf(buf + len, maxlen - len, "%s\t@ %p %s\n",
+ (i == 0 ? "\n" : ""),
+ pcs[i], sym);
+ } else {
+ snprintf(buf + len, maxlen - len, " %p", pcs[i]);
+ }
+ len += strlen(&buf[len]);
+ }
+ return buf;
+}
+
+static char *CurrentStackString(char *buf, int maxlen, bool symbolize) {
+ void *pcs[40];
+ return StackString(pcs, y_absl::GetStackTrace(pcs, ABSL_ARRAYSIZE(pcs), 2), buf,
+ maxlen, symbolize);
+}
+
+namespace {
+enum { kMaxDeadlockPathLen = 10 }; // maximum length of a deadlock cycle;
+ // a path this long would be remarkable
+// Buffers required to report a deadlock.
+// We do not allocate them on stack to avoid large stack frame.
+struct DeadlockReportBuffers {
+ char buf[6100];
+ GraphId path[kMaxDeadlockPathLen];
+};
+
+struct ScopedDeadlockReportBuffers {
+ ScopedDeadlockReportBuffers() {
+ b = reinterpret_cast<DeadlockReportBuffers *>(
+ base_internal::LowLevelAlloc::Alloc(sizeof(*b)));
+ }
+ ~ScopedDeadlockReportBuffers() { base_internal::LowLevelAlloc::Free(b); }
+ DeadlockReportBuffers *b;
+};
+
+// Helper to pass to GraphCycles::UpdateStackTrace.
+int GetStack(void** stack, int max_depth) {
+ return y_absl::GetStackTrace(stack, max_depth, 3);
+}
+} // anonymous namespace
+
+// Called in debug mode when a thread is about to acquire a lock in a way that
+// may block.
+static GraphId DeadlockCheck(Mutex *mu) {
+ if (synch_deadlock_detection.load(std::memory_order_acquire) ==
+ OnDeadlockCycle::kIgnore) {
+ return InvalidGraphId();
+ }
+
+ SynchLocksHeld *all_locks = Synch_GetAllLocks();
+
+ y_absl::base_internal::SpinLockHolder lock(&deadlock_graph_mu);
+ const GraphId mu_id = GetGraphIdLocked(mu);
+
+ if (all_locks->n == 0) {
+ // There are no other locks held. Return now so that we don't need to
+ // call GetSynchEvent(). This way we do not record the stack trace
+ // for this Mutex. It's ok, since if this Mutex is involved in a deadlock,
+ // it can't always be the first lock acquired by a thread.
+ return mu_id;
+ }
+
+ // We prefer to keep stack traces that show a thread holding and acquiring
+ // as many locks as possible. This increases the chances that a given edge
+ // in the acquires-before graph will be represented in the stack traces
+ // recorded for the locks.
+ deadlock_graph->UpdateStackTrace(mu_id, all_locks->n + 1, GetStack);
+
+ // For each other mutex already held by this thread:
+ for (int i = 0; i != all_locks->n; i++) {
+ const GraphId other_node_id = all_locks->locks[i].id;
+ const Mutex *other =
+ static_cast<const Mutex *>(deadlock_graph->Ptr(other_node_id));
+ if (other == nullptr) {
+ // Ignore stale lock
+ continue;
+ }
+
+ // Add the acquired-before edge to the graph.
+ if (!deadlock_graph->InsertEdge(other_node_id, mu_id)) {
+ ScopedDeadlockReportBuffers scoped_buffers;
+ DeadlockReportBuffers *b = scoped_buffers.b;
+ static int number_of_reported_deadlocks = 0;
+ number_of_reported_deadlocks++;
+ // Symbolize only 2 first deadlock report to avoid huge slowdowns.
+ bool symbolize = number_of_reported_deadlocks <= 2;
+ ABSL_RAW_LOG(ERROR, "Potential Mutex deadlock: %s",
+ CurrentStackString(b->buf, sizeof (b->buf), symbolize));
+ int len = 0;
+ for (int j = 0; j != all_locks->n; j++) {
+ void* pr = deadlock_graph->Ptr(all_locks->locks[j].id);
+ if (pr != nullptr) {
+ snprintf(b->buf + len, sizeof (b->buf) - len, " %p", pr);
+ len += static_cast<int>(strlen(&b->buf[len]));
+ }
+ }
+ ABSL_RAW_LOG(ERROR,
+ "Acquiring y_absl::Mutex %p while holding %s; a cycle in the "
+ "historical lock ordering graph has been observed",
+ static_cast<void *>(mu), b->buf);
+ ABSL_RAW_LOG(ERROR, "Cycle: ");
+ int path_len = deadlock_graph->FindPath(
+ mu_id, other_node_id, ABSL_ARRAYSIZE(b->path), b->path);
+ for (int j = 0; j != path_len; j++) {
+ GraphId id = b->path[j];
+ Mutex *path_mu = static_cast<Mutex *>(deadlock_graph->Ptr(id));
+ if (path_mu == nullptr) continue;
+ void** stack;
+ int depth = deadlock_graph->GetStackTrace(id, &stack);
+ snprintf(b->buf, sizeof(b->buf),
+ "mutex@%p stack: ", static_cast<void *>(path_mu));
+ StackString(stack, depth, b->buf + strlen(b->buf),
+ static_cast<int>(sizeof(b->buf) - strlen(b->buf)),
+ symbolize);
+ ABSL_RAW_LOG(ERROR, "%s", b->buf);
+ }
+ if (synch_deadlock_detection.load(std::memory_order_acquire) ==
+ OnDeadlockCycle::kAbort) {
+ deadlock_graph_mu.Unlock(); // avoid deadlock in fatal sighandler
+ ABSL_RAW_LOG(FATAL, "dying due to potential deadlock");
+ return mu_id;
+ }
+ break; // report at most one potential deadlock per acquisition
+ }
+ }
+
+ return mu_id;
+}
+
+// Invoke DeadlockCheck() iff we're in debug mode and
+// deadlock checking has been enabled.
+static inline GraphId DebugOnlyDeadlockCheck(Mutex *mu) {
+ if (kDebugMode && synch_deadlock_detection.load(std::memory_order_acquire) !=
+ OnDeadlockCycle::kIgnore) {
+ return DeadlockCheck(mu);
+ } else {
+ return InvalidGraphId();
+ }
+}
+
+void Mutex::ForgetDeadlockInfo() {
+ if (kDebugMode && synch_deadlock_detection.load(std::memory_order_acquire) !=
+ OnDeadlockCycle::kIgnore) {
+ deadlock_graph_mu.Lock();
+ if (deadlock_graph != nullptr) {
+ deadlock_graph->RemoveNode(this);
+ }
+ deadlock_graph_mu.Unlock();
+ }
+}
+
+void Mutex::AssertNotHeld() const {
+ // We have the data to allow this check only if in debug mode and deadlock
+ // detection is enabled.
+ if (kDebugMode &&
+ (mu_.load(std::memory_order_relaxed) & (kMuWriter | kMuReader)) != 0 &&
+ synch_deadlock_detection.load(std::memory_order_acquire) !=
+ OnDeadlockCycle::kIgnore) {
+ GraphId id = GetGraphId(const_cast<Mutex *>(this));
+ SynchLocksHeld *locks = Synch_GetAllLocks();
+ for (int i = 0; i != locks->n; i++) {
+ if (locks->locks[i].id == id) {
+ SynchEvent *mu_events = GetSynchEvent(this);
+ ABSL_RAW_LOG(FATAL, "thread should not hold mutex %p %s",
+ static_cast<const void *>(this),
+ (mu_events == nullptr ? "" : mu_events->name));
+ }
+ }
+ }
+}
+
+// Attempt to acquire *mu, and return whether successful. The implementation
+// may spin for a short while if the lock cannot be acquired immediately.
+static bool TryAcquireWithSpinning(std::atomic<intptr_t>* mu) {
+ int c = GetMutexGlobals().spinloop_iterations;
+ do { // do/while somewhat faster on AMD
+ intptr_t v = mu->load(std::memory_order_relaxed);
+ if ((v & (kMuReader|kMuEvent)) != 0) {
+ return false; // a reader or tracing -> give up
+ } else if (((v & kMuWriter) == 0) && // no holder -> try to acquire
+ mu->compare_exchange_strong(v, kMuWriter | v,
+ std::memory_order_acquire,
+ std::memory_order_relaxed)) {
+ return true;
+ }
+ } while (--c > 0);
+ return false;
+}
+
+ABSL_XRAY_LOG_ARGS(1) void Mutex::Lock() {
+ ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
+ GraphId id = DebugOnlyDeadlockCheck(this);
+ intptr_t v = mu_.load(std::memory_order_relaxed);
+ // try fast acquire, then spin loop
+ if ((v & (kMuWriter | kMuReader | kMuEvent)) != 0 ||
+ !mu_.compare_exchange_strong(v, kMuWriter | v,
+ std::memory_order_acquire,
+ std::memory_order_relaxed)) {
+ // try spin acquire, then slow loop
+ if (!TryAcquireWithSpinning(&this->mu_)) {
+ this->LockSlow(kExclusive, nullptr, 0);
+ }
+ }
+ DebugOnlyLockEnter(this, id);
+ ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
+}
+
+ABSL_XRAY_LOG_ARGS(1) void Mutex::ReaderLock() {
+ ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock);
+ GraphId id = DebugOnlyDeadlockCheck(this);
+ intptr_t v = mu_.load(std::memory_order_relaxed);
+ // try fast acquire, then slow loop
+ if ((v & (kMuWriter | kMuWait | kMuEvent)) != 0 ||
+ !mu_.compare_exchange_strong(v, (kMuReader | v) + kMuOne,
+ std::memory_order_acquire,
+ std::memory_order_relaxed)) {
+ this->LockSlow(kShared, nullptr, 0);
+ }
+ DebugOnlyLockEnter(this, id);
+ ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_read_lock, 0);
+}
+
+void Mutex::LockWhen(const Condition &cond) {
+ ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
+ GraphId id = DebugOnlyDeadlockCheck(this);
+ this->LockSlow(kExclusive, &cond, 0);
+ DebugOnlyLockEnter(this, id);
+ ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
+}
+
+bool Mutex::LockWhenWithTimeout(const Condition &cond, y_absl::Duration timeout) {
+ return LockWhenWithDeadline(cond, DeadlineFromTimeout(timeout));
+}
+
+bool Mutex::LockWhenWithDeadline(const Condition &cond, y_absl::Time deadline) {
+ ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
+ GraphId id = DebugOnlyDeadlockCheck(this);
+ bool res = LockSlowWithDeadline(kExclusive, &cond,
+ KernelTimeout(deadline), 0);
+ DebugOnlyLockEnter(this, id);
+ ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
+ return res;
+}
+
+void Mutex::ReaderLockWhen(const Condition &cond) {
+ ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock);
+ GraphId id = DebugOnlyDeadlockCheck(this);
+ this->LockSlow(kShared, &cond, 0);
+ DebugOnlyLockEnter(this, id);
+ ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_read_lock, 0);
+}
+
+bool Mutex::ReaderLockWhenWithTimeout(const Condition &cond,
+ y_absl::Duration timeout) {
+ return ReaderLockWhenWithDeadline(cond, DeadlineFromTimeout(timeout));
+}
+
+bool Mutex::ReaderLockWhenWithDeadline(const Condition &cond,
+ y_absl::Time deadline) {
+ ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock);
+ GraphId id = DebugOnlyDeadlockCheck(this);
+ bool res = LockSlowWithDeadline(kShared, &cond, KernelTimeout(deadline), 0);
+ DebugOnlyLockEnter(this, id);
+ ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_read_lock, 0);
+ return res;
+}
+
+void Mutex::Await(const Condition &cond) {
+ if (cond.Eval()) { // condition already true; nothing to do
+ if (kDebugMode) {
+ this->AssertReaderHeld();
+ }
+ } else { // normal case
+ ABSL_RAW_CHECK(this->AwaitCommon(cond, KernelTimeout::Never()),
+ "condition untrue on return from Await");
+ }
+}
+
+bool Mutex::AwaitWithTimeout(const Condition &cond, y_absl::Duration timeout) {
+ return AwaitWithDeadline(cond, DeadlineFromTimeout(timeout));
+}
+
+bool Mutex::AwaitWithDeadline(const Condition &cond, y_absl::Time deadline) {
+ if (cond.Eval()) { // condition already true; nothing to do
+ if (kDebugMode) {
+ this->AssertReaderHeld();
+ }
+ return true;
+ }
+
+ KernelTimeout t{deadline};
+ bool res = this->AwaitCommon(cond, t);
+ ABSL_RAW_CHECK(res || t.has_timeout(),
+ "condition untrue on return from Await");
+ return res;
+}
+
+bool Mutex::AwaitCommon(const Condition &cond, KernelTimeout t) {
+ this->AssertReaderHeld();
+ MuHow how =
+ (mu_.load(std::memory_order_relaxed) & kMuWriter) ? kExclusive : kShared;
+ ABSL_TSAN_MUTEX_PRE_UNLOCK(this, TsanFlags(how));
+ SynchWaitParams waitp(
+ how, &cond, t, nullptr /*no cvmu*/, Synch_GetPerThreadAnnotated(this),
+ nullptr /*no cv_word*/);
+ int flags = kMuHasBlocked;
+ if (!Condition::GuaranteedEqual(&cond, nullptr)) {
+ flags |= kMuIsCond;
+ }
+ this->UnlockSlow(&waitp);
+ this->Block(waitp.thread);
+ ABSL_TSAN_MUTEX_POST_UNLOCK(this, TsanFlags(how));
+ ABSL_TSAN_MUTEX_PRE_LOCK(this, TsanFlags(how));
+ this->LockSlowLoop(&waitp, flags);
+ bool res = waitp.cond != nullptr || // => cond known true from LockSlowLoop
+ EvalConditionAnnotated(&cond, this, true, false, how == kShared);
+ ABSL_TSAN_MUTEX_POST_LOCK(this, TsanFlags(how), 0);
+ return res;
+}
+
+ABSL_XRAY_LOG_ARGS(1) bool Mutex::TryLock() {
+ ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_try_lock);
+ intptr_t v = mu_.load(std::memory_order_relaxed);
+ if ((v & (kMuWriter | kMuReader | kMuEvent)) == 0 && // try fast acquire
+ mu_.compare_exchange_strong(v, kMuWriter | v,
+ std::memory_order_acquire,
+ std::memory_order_relaxed)) {
+ DebugOnlyLockEnter(this);
+ ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_try_lock, 0);
+ return true;
+ }
+ if ((v & kMuEvent) != 0) { // we're recording events
+ if ((v & kExclusive->slow_need_zero) == 0 && // try fast acquire
+ mu_.compare_exchange_strong(
+ v, (kExclusive->fast_or | v) + kExclusive->fast_add,
+ std::memory_order_acquire, std::memory_order_relaxed)) {
+ DebugOnlyLockEnter(this);
+ PostSynchEvent(this, SYNCH_EV_TRYLOCK_SUCCESS);
+ ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_try_lock, 0);
+ return true;
+ } else {
+ PostSynchEvent(this, SYNCH_EV_TRYLOCK_FAILED);
+ }
+ }
+ ABSL_TSAN_MUTEX_POST_LOCK(
+ this, __tsan_mutex_try_lock | __tsan_mutex_try_lock_failed, 0);
+ return false;
+}
+
+ABSL_XRAY_LOG_ARGS(1) bool Mutex::ReaderTryLock() {
+ ABSL_TSAN_MUTEX_PRE_LOCK(this,
+ __tsan_mutex_read_lock | __tsan_mutex_try_lock);
+ intptr_t v = mu_.load(std::memory_order_relaxed);
+ // The while-loops (here and below) iterate only if the mutex word keeps
+ // changing (typically because the reader count changes) under the CAS. We
+ // limit the number of attempts to avoid having to think about livelock.
+ int loop_limit = 5;
+ while ((v & (kMuWriter|kMuWait|kMuEvent)) == 0 && loop_limit != 0) {
+ if (mu_.compare_exchange_strong(v, (kMuReader | v) + kMuOne,
+ std::memory_order_acquire,
+ std::memory_order_relaxed)) {
+ DebugOnlyLockEnter(this);
+ ABSL_TSAN_MUTEX_POST_LOCK(
+ this, __tsan_mutex_read_lock | __tsan_mutex_try_lock, 0);
+ return true;
+ }
+ loop_limit--;
+ v = mu_.load(std::memory_order_relaxed);
+ }
+ if ((v & kMuEvent) != 0) { // we're recording events
+ loop_limit = 5;
+ while ((v & kShared->slow_need_zero) == 0 && loop_limit != 0) {
+ if (mu_.compare_exchange_strong(v, (kMuReader | v) + kMuOne,
+ std::memory_order_acquire,
+ std::memory_order_relaxed)) {
+ DebugOnlyLockEnter(this);
+ PostSynchEvent(this, SYNCH_EV_READERTRYLOCK_SUCCESS);
+ ABSL_TSAN_MUTEX_POST_LOCK(
+ this, __tsan_mutex_read_lock | __tsan_mutex_try_lock, 0);
+ return true;
+ }
+ loop_limit--;
+ v = mu_.load(std::memory_order_relaxed);
+ }
+ if ((v & kMuEvent) != 0) {
+ PostSynchEvent(this, SYNCH_EV_READERTRYLOCK_FAILED);
+ }
+ }
+ ABSL_TSAN_MUTEX_POST_LOCK(this,
+ __tsan_mutex_read_lock | __tsan_mutex_try_lock |
+ __tsan_mutex_try_lock_failed,
+ 0);
+ return false;
+}
+
+ABSL_XRAY_LOG_ARGS(1) void Mutex::Unlock() {
+ ABSL_TSAN_MUTEX_PRE_UNLOCK(this, 0);
+ DebugOnlyLockLeave(this);
+ intptr_t v = mu_.load(std::memory_order_relaxed);
+
+ if (kDebugMode && ((v & (kMuWriter | kMuReader)) != kMuWriter)) {
+ ABSL_RAW_LOG(FATAL, "Mutex unlocked when destroyed or not locked: v=0x%x",
+ static_cast<unsigned>(v));
+ }
+
+ // should_try_cas is whether we'll try a compare-and-swap immediately.
+ // NOTE: optimized out when kDebugMode is false.
+ bool should_try_cas = ((v & (kMuEvent | kMuWriter)) == kMuWriter &&
+ (v & (kMuWait | kMuDesig)) != kMuWait);
+ // But, we can use an alternate computation of it, that compilers
+ // currently don't find on their own. When that changes, this function
+ // can be simplified.
+ intptr_t x = (v ^ (kMuWriter | kMuWait)) & (kMuWriter | kMuEvent);
+ intptr_t y = (v ^ (kMuWriter | kMuWait)) & (kMuWait | kMuDesig);
+ // Claim: "x == 0 && y > 0" is equal to should_try_cas.
+ // Also, because kMuWriter and kMuEvent exceed kMuDesig and kMuWait,
+ // all possible non-zero values for x exceed all possible values for y.
+ // Therefore, (x == 0 && y > 0) == (x < y).
+ if (kDebugMode && should_try_cas != (x < y)) {
+ // We would usually use PRIdPTR here, but is not correctly implemented
+ // within the android toolchain.
+ ABSL_RAW_LOG(FATAL, "internal logic error %llx %llx %llx\n",
+ static_cast<long long>(v), static_cast<long long>(x),
+ static_cast<long long>(y));
+ }
+ if (x < y &&
+ mu_.compare_exchange_strong(v, v & ~(kMuWrWait | kMuWriter),
+ std::memory_order_release,
+ std::memory_order_relaxed)) {
+ // fast writer release (writer with no waiters or with designated waker)
+ } else {
+ this->UnlockSlow(nullptr /*no waitp*/); // take slow path
+ }
+ ABSL_TSAN_MUTEX_POST_UNLOCK(this, 0);
+}
+
+// Requires v to represent a reader-locked state.
+static bool ExactlyOneReader(intptr_t v) {
+ assert((v & (kMuWriter|kMuReader)) == kMuReader);
+ assert((v & kMuHigh) != 0);
+ // The more straightforward "(v & kMuHigh) == kMuOne" also works, but
+ // on some architectures the following generates slightly smaller code.
+ // It may be faster too.
+ constexpr intptr_t kMuMultipleWaitersMask = kMuHigh ^ kMuOne;
+ return (v & kMuMultipleWaitersMask) == 0;
+}
+
+ABSL_XRAY_LOG_ARGS(1) void Mutex::ReaderUnlock() {
+ ABSL_TSAN_MUTEX_PRE_UNLOCK(this, __tsan_mutex_read_lock);
+ DebugOnlyLockLeave(this);
+ intptr_t v = mu_.load(std::memory_order_relaxed);
+ assert((v & (kMuWriter|kMuReader)) == kMuReader);
+ if ((v & (kMuReader|kMuWait|kMuEvent)) == kMuReader) {
+ // fast reader release (reader with no waiters)
+ intptr_t clear = ExactlyOneReader(v) ? kMuReader|kMuOne : kMuOne;
+ if (mu_.compare_exchange_strong(v, v - clear,
+ std::memory_order_release,
+ std::memory_order_relaxed)) {
+ ABSL_TSAN_MUTEX_POST_UNLOCK(this, __tsan_mutex_read_lock);
+ return;
+ }
+ }
+ this->UnlockSlow(nullptr /*no waitp*/); // take slow path
+ ABSL_TSAN_MUTEX_POST_UNLOCK(this, __tsan_mutex_read_lock);
+}
+
+// The zap_desig_waker bitmask is used to clear the designated waker flag in
+// the mutex if this thread has blocked, and therefore may be the designated
+// waker.
+static const intptr_t zap_desig_waker[] = {
+ ~static_cast<intptr_t>(0), // not blocked
+ ~static_cast<intptr_t>(
+ kMuDesig) // blocked; turn off the designated waker bit
+};
+
+// The ignore_waiting_writers bitmask is used to ignore the existence
+// of waiting writers if a reader that has already blocked once
+// wakes up.
+static const intptr_t ignore_waiting_writers[] = {
+ ~static_cast<intptr_t>(0), // not blocked
+ ~static_cast<intptr_t>(
+ kMuWrWait) // blocked; pretend there are no waiting writers
+};
+
+// Internal version of LockWhen(). See LockSlowWithDeadline()
+ABSL_ATTRIBUTE_NOINLINE void Mutex::LockSlow(MuHow how, const Condition *cond,
+ int flags) {
+ ABSL_RAW_CHECK(
+ this->LockSlowWithDeadline(how, cond, KernelTimeout::Never(), flags),
+ "condition untrue on return from LockSlow");
+}
+
+// Compute cond->Eval() and tell race detectors that we do it under mutex mu.
+static inline bool EvalConditionAnnotated(const Condition *cond, Mutex *mu,
+ bool locking, bool trylock,
+ bool read_lock) {
+ // Delicate annotation dance.
+ // We are currently inside of read/write lock/unlock operation.
+ // All memory accesses are ignored inside of mutex operations + for unlock
+ // operation tsan considers that we've already released the mutex.
+ bool res = false;
+#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
+ const int flags = read_lock ? __tsan_mutex_read_lock : 0;
+ const int tryflags = flags | (trylock ? __tsan_mutex_try_lock : 0);
+#endif
+ if (locking) {
+ // For lock we pretend that we have finished the operation,
+ // evaluate the predicate, then unlock the mutex and start locking it again
+ // to match the annotation at the end of outer lock operation.
+ // Note: we can't simply do POST_LOCK, Eval, PRE_LOCK, because then tsan
+ // will think the lock acquisition is recursive which will trigger
+ // deadlock detector.
+ ABSL_TSAN_MUTEX_POST_LOCK(mu, tryflags, 0);
+ res = cond->Eval();
+ // There is no "try" version of Unlock, so use flags instead of tryflags.
+ ABSL_TSAN_MUTEX_PRE_UNLOCK(mu, flags);
+ ABSL_TSAN_MUTEX_POST_UNLOCK(mu, flags);
+ ABSL_TSAN_MUTEX_PRE_LOCK(mu, tryflags);
+ } else {
+ // Similarly, for unlock we pretend that we have unlocked the mutex,
+ // lock the mutex, evaluate the predicate, and start unlocking it again
+ // to match the annotation at the end of outer unlock operation.
+ ABSL_TSAN_MUTEX_POST_UNLOCK(mu, flags);
+ ABSL_TSAN_MUTEX_PRE_LOCK(mu, flags);
+ ABSL_TSAN_MUTEX_POST_LOCK(mu, flags, 0);
+ res = cond->Eval();
+ ABSL_TSAN_MUTEX_PRE_UNLOCK(mu, flags);
+ }
+ // Prevent unused param warnings in non-TSAN builds.
+ static_cast<void>(mu);
+ static_cast<void>(trylock);
+ static_cast<void>(read_lock);
+ return res;
+}
+
+// Compute cond->Eval() hiding it from race detectors.
+// We are hiding it because inside of UnlockSlow we can evaluate a predicate
+// that was just added by a concurrent Lock operation; Lock adds the predicate
+// to the internal Mutex list without actually acquiring the Mutex
+// (it only acquires the internal spinlock, which is rightfully invisible for
+// tsan). As the result there is no tsan-visible synchronization between the
+// addition and this thread. So if we would enable race detection here,
+// it would race with the predicate initialization.
+static inline bool EvalConditionIgnored(Mutex *mu, const Condition *cond) {
+ // Memory accesses are already ignored inside of lock/unlock operations,
+ // but synchronization operations are also ignored. When we evaluate the
+ // predicate we must ignore only memory accesses but not synchronization,
+ // because missed synchronization can lead to false reports later.
+ // So we "divert" (which un-ignores both memory accesses and synchronization)
+ // and then separately turn on ignores of memory accesses.
+ ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
+ ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN();
+ bool res = cond->Eval();
+ ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_END();
+ ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);
+ static_cast<void>(mu); // Prevent unused param warning in non-TSAN builds.
+ return res;
+}
+
+// Internal equivalent of *LockWhenWithDeadline(), where
+// "t" represents the absolute timeout; !t.has_timeout() means "forever".
+// "how" is "kShared" (for ReaderLockWhen) or "kExclusive" (for LockWhen)
+// In flags, bits are ored together:
+// - kMuHasBlocked indicates that the client has already blocked on the call so
+// the designated waker bit must be cleared and waiting writers should not
+// obstruct this call
+// - kMuIsCond indicates that this is a conditional acquire (condition variable,
+// Await, LockWhen) so contention profiling should be suppressed.
+bool Mutex::LockSlowWithDeadline(MuHow how, const Condition *cond,
+ KernelTimeout t, int flags) {
+ intptr_t v = mu_.load(std::memory_order_relaxed);
+ bool unlock = false;
+ if ((v & how->fast_need_zero) == 0 && // try fast acquire
+ mu_.compare_exchange_strong(
+ v, (how->fast_or | (v & zap_desig_waker[flags & kMuHasBlocked])) +
+ how->fast_add,
+ std::memory_order_acquire, std::memory_order_relaxed)) {
+ if (cond == nullptr ||
+ EvalConditionAnnotated(cond, this, true, false, how == kShared)) {
+ return true;
+ }
+ unlock = true;
+ }
+ SynchWaitParams waitp(
+ how, cond, t, nullptr /*no cvmu*/, Synch_GetPerThreadAnnotated(this),
+ nullptr /*no cv_word*/);
+ if (!Condition::GuaranteedEqual(cond, nullptr)) {
+ flags |= kMuIsCond;
+ }
+ if (unlock) {
+ this->UnlockSlow(&waitp);
+ this->Block(waitp.thread);
+ flags |= kMuHasBlocked;
+ }
+ this->LockSlowLoop(&waitp, flags);
+ return waitp.cond != nullptr || // => cond known true from LockSlowLoop
+ cond == nullptr ||
+ EvalConditionAnnotated(cond, this, true, false, how == kShared);
+}
+
+// RAW_CHECK_FMT() takes a condition, a printf-style format string, and
+// the printf-style argument list. The format string must be a literal.
+// Arguments after the first are not evaluated unless the condition is true.
+#define RAW_CHECK_FMT(cond, ...) \
+ do { \
+ if (ABSL_PREDICT_FALSE(!(cond))) { \
+ ABSL_RAW_LOG(FATAL, "Check " #cond " failed: " __VA_ARGS__); \
+ } \
+ } while (0)
+
+static void CheckForMutexCorruption(intptr_t v, const char* label) {
+ // Test for either of two situations that should not occur in v:
+ // kMuWriter and kMuReader
+ // kMuWrWait and !kMuWait
+ const uintptr_t w = v ^ kMuWait;
+ // By flipping that bit, we can now test for:
+ // kMuWriter and kMuReader in w
+ // kMuWrWait and kMuWait in w
+ // We've chosen these two pairs of values to be so that they will overlap,
+ // respectively, when the word is left shifted by three. This allows us to
+ // save a branch in the common (correct) case of them not being coincident.
+ static_assert(kMuReader << 3 == kMuWriter, "must match");
+ static_assert(kMuWait << 3 == kMuWrWait, "must match");
+ if (ABSL_PREDICT_TRUE((w & (w << 3) & (kMuWriter | kMuWrWait)) == 0)) return;
+ RAW_CHECK_FMT((v & (kMuWriter | kMuReader)) != (kMuWriter | kMuReader),
+ "%s: Mutex corrupt: both reader and writer lock held: %p",
+ label, reinterpret_cast<void *>(v));
+ RAW_CHECK_FMT((v & (kMuWait | kMuWrWait)) != kMuWrWait,
+ "%s: Mutex corrupt: waiting writer with no waiters: %p",
+ label, reinterpret_cast<void *>(v));
+ assert(false);
+}
+
+void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
+ SchedulingGuard::ScopedDisable disable_rescheduling;
+ int c = 0;
+ intptr_t v = mu_.load(std::memory_order_relaxed);
+ if ((v & kMuEvent) != 0) {
+ PostSynchEvent(this,
+ waitp->how == kExclusive? SYNCH_EV_LOCK: SYNCH_EV_READERLOCK);
+ }
+ ABSL_RAW_CHECK(
+ waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors,
+ "detected illegal recursion into Mutex code");
+ for (;;) {
+ v = mu_.load(std::memory_order_relaxed);
+ CheckForMutexCorruption(v, "Lock");
+ if ((v & waitp->how->slow_need_zero) == 0) {
+ if (mu_.compare_exchange_strong(
+ v, (waitp->how->fast_or |
+ (v & zap_desig_waker[flags & kMuHasBlocked])) +
+ waitp->how->fast_add,
+ std::memory_order_acquire, std::memory_order_relaxed)) {
+ if (waitp->cond == nullptr ||
+ EvalConditionAnnotated(waitp->cond, this, true, false,
+ waitp->how == kShared)) {
+ break; // we timed out, or condition true, so return
+ }
+ this->UnlockSlow(waitp); // got lock but condition false
+ this->Block(waitp->thread);
+ flags |= kMuHasBlocked;
+ c = 0;
+ }
+ } else { // need to access waiter list
+ bool dowait = false;
+ if ((v & (kMuSpin|kMuWait)) == 0) { // no waiters
+ // This thread tries to become the one and only waiter.
+ PerThreadSynch *new_h = Enqueue(nullptr, waitp, v, flags);
+ intptr_t nv = (v & zap_desig_waker[flags & kMuHasBlocked] & kMuLow) |
+ kMuWait;
+ ABSL_RAW_CHECK(new_h != nullptr, "Enqueue to empty list failed");
+ if (waitp->how == kExclusive && (v & kMuReader) != 0) {
+ nv |= kMuWrWait;
+ }
+ if (mu_.compare_exchange_strong(
+ v, reinterpret_cast<intptr_t>(new_h) | nv,
+ std::memory_order_release, std::memory_order_relaxed)) {
+ dowait = true;
+ } else { // attempted Enqueue() failed
+ // zero out the waitp field set by Enqueue()
+ waitp->thread->waitp = nullptr;
+ }
+ } else if ((v & waitp->how->slow_inc_need_zero &
+ ignore_waiting_writers[flags & kMuHasBlocked]) == 0) {
+ // This is a reader that needs to increment the reader count,
+ // but the count is currently held in the last waiter.
+ if (mu_.compare_exchange_strong(
+ v, (v & zap_desig_waker[flags & kMuHasBlocked]) | kMuSpin |
+ kMuReader,
+ std::memory_order_acquire, std::memory_order_relaxed)) {
+ PerThreadSynch *h = GetPerThreadSynch(v);
+ h->readers += kMuOne; // inc reader count in waiter
+ do { // release spinlock
+ v = mu_.load(std::memory_order_relaxed);
+ } while (!mu_.compare_exchange_weak(v, (v & ~kMuSpin) | kMuReader,
+ std::memory_order_release,
+ std::memory_order_relaxed));
+ if (waitp->cond == nullptr ||
+ EvalConditionAnnotated(waitp->cond, this, true, false,
+ waitp->how == kShared)) {
+ break; // we timed out, or condition true, so return
+ }
+ this->UnlockSlow(waitp); // got lock but condition false
+ this->Block(waitp->thread);
+ flags |= kMuHasBlocked;
+ c = 0;
+ }
+ } else if ((v & kMuSpin) == 0 && // attempt to queue ourselves
+ mu_.compare_exchange_strong(
+ v, (v & zap_desig_waker[flags & kMuHasBlocked]) | kMuSpin |
+ kMuWait,
+ std::memory_order_acquire, std::memory_order_relaxed)) {
+ PerThreadSynch *h = GetPerThreadSynch(v);
+ PerThreadSynch *new_h = Enqueue(h, waitp, v, flags);
+ intptr_t wr_wait = 0;
+ ABSL_RAW_CHECK(new_h != nullptr, "Enqueue to list failed");
+ if (waitp->how == kExclusive && (v & kMuReader) != 0) {
+ wr_wait = kMuWrWait; // give priority to a waiting writer
+ }
+ do { // release spinlock
+ v = mu_.load(std::memory_order_relaxed);
+ } while (!mu_.compare_exchange_weak(
+ v, (v & (kMuLow & ~kMuSpin)) | kMuWait | wr_wait |
+ reinterpret_cast<intptr_t>(new_h),
+ std::memory_order_release, std::memory_order_relaxed));
+ dowait = true;
+ }
+ if (dowait) {
+ this->Block(waitp->thread); // wait until removed from list or timeout
+ flags |= kMuHasBlocked;
+ c = 0;
+ }
+ }
+ ABSL_RAW_CHECK(
+ waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors,
+ "detected illegal recursion into Mutex code");
+ // delay, then try again
+ c = synchronization_internal::MutexDelay(c, GENTLE);
+ }
+ ABSL_RAW_CHECK(
+ waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors,
+ "detected illegal recursion into Mutex code");
+ if ((v & kMuEvent) != 0) {
+ PostSynchEvent(this,
+ waitp->how == kExclusive? SYNCH_EV_LOCK_RETURNING :
+ SYNCH_EV_READERLOCK_RETURNING);
+ }
+}
+
+// Unlock this mutex, which is held by the current thread.
+// If waitp is non-zero, it must be the wait parameters for the current thread
+// which holds the lock but is not runnable because its condition is false
+// or it is in the process of blocking on a condition variable; it must requeue
+// itself on the mutex/condvar to wait for its condition to become true.
+ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
+ SchedulingGuard::ScopedDisable disable_rescheduling;
+ intptr_t v = mu_.load(std::memory_order_relaxed);
+ this->AssertReaderHeld();
+ CheckForMutexCorruption(v, "Unlock");
+ if ((v & kMuEvent) != 0) {
+ PostSynchEvent(this,
+ (v & kMuWriter) != 0? SYNCH_EV_UNLOCK: SYNCH_EV_READERUNLOCK);
+ }
+ int c = 0;
+ // the waiter under consideration to wake, or zero
+ PerThreadSynch *w = nullptr;
+ // the predecessor to w or zero
+ PerThreadSynch *pw = nullptr;
+ // head of the list searched previously, or zero
+ PerThreadSynch *old_h = nullptr;
+ // a condition that's known to be false.
+ const Condition *known_false = nullptr;
+ PerThreadSynch *wake_list = kPerThreadSynchNull; // list of threads to wake
+ intptr_t wr_wait = 0; // set to kMuWrWait if we wake a reader and a
+ // later writer could have acquired the lock
+ // (starvation avoidance)
+ ABSL_RAW_CHECK(waitp == nullptr || waitp->thread->waitp == nullptr ||
+ waitp->thread->suppress_fatal_errors,
+ "detected illegal recursion into Mutex code");
+ // This loop finds threads wake_list to wakeup if any, and removes them from
+ // the list of waiters. In addition, it places waitp.thread on the queue of
+ // waiters if waitp is non-zero.
+ for (;;) {
+ v = mu_.load(std::memory_order_relaxed);
+ if ((v & kMuWriter) != 0 && (v & (kMuWait | kMuDesig)) != kMuWait &&
+ waitp == nullptr) {
+ // fast writer release (writer with no waiters or with designated waker)
+ if (mu_.compare_exchange_strong(v, v & ~(kMuWrWait | kMuWriter),
+ std::memory_order_release,
+ std::memory_order_relaxed)) {
+ return;
+ }
+ } else if ((v & (kMuReader | kMuWait)) == kMuReader && waitp == nullptr) {
+ // fast reader release (reader with no waiters)
+ intptr_t clear = ExactlyOneReader(v) ? kMuReader | kMuOne : kMuOne;
+ if (mu_.compare_exchange_strong(v, v - clear,
+ std::memory_order_release,
+ std::memory_order_relaxed)) {
+ return;
+ }
+ } else if ((v & kMuSpin) == 0 && // attempt to get spinlock
+ mu_.compare_exchange_strong(v, v | kMuSpin,
+ std::memory_order_acquire,
+ std::memory_order_relaxed)) {
+ if ((v & kMuWait) == 0) { // no one to wake
+ intptr_t nv;
+ bool do_enqueue = true; // always Enqueue() the first time
+ ABSL_RAW_CHECK(waitp != nullptr,
+ "UnlockSlow is confused"); // about to sleep
+ do { // must loop to release spinlock as reader count may change
+ v = mu_.load(std::memory_order_relaxed);
+ // decrement reader count if there are readers
+ intptr_t new_readers = (v >= kMuOne)? v - kMuOne : v;
+ PerThreadSynch *new_h = nullptr;
+ if (do_enqueue) {
+ // If we are enqueuing on a CondVar (waitp->cv_word != nullptr) then
+ // we must not retry here. The initial attempt will always have
+ // succeeded, further attempts would enqueue us against *this due to
+ // Fer() handling.
+ do_enqueue = (waitp->cv_word == nullptr);
+ new_h = Enqueue(nullptr, waitp, new_readers, kMuIsCond);
+ }
+ intptr_t clear = kMuWrWait | kMuWriter; // by default clear write bit
+ if ((v & kMuWriter) == 0 && ExactlyOneReader(v)) { // last reader
+ clear = kMuWrWait | kMuReader; // clear read bit
+ }
+ nv = (v & kMuLow & ~clear & ~kMuSpin);
+ if (new_h != nullptr) {
+ nv |= kMuWait | reinterpret_cast<intptr_t>(new_h);
+ } else { // new_h could be nullptr if we queued ourselves on a
+ // CondVar
+ // In that case, we must place the reader count back in the mutex
+ // word, as Enqueue() did not store it in the new waiter.
+ nv |= new_readers & kMuHigh;
+ }
+ // release spinlock & our lock; retry if reader-count changed
+ // (writer count cannot change since we hold lock)
+ } while (!mu_.compare_exchange_weak(v, nv,
+ std::memory_order_release,
+ std::memory_order_relaxed));
+ break;
+ }
+
+ // There are waiters.
+ // Set h to the head of the circular waiter list.
+ PerThreadSynch *h = GetPerThreadSynch(v);
+ if ((v & kMuReader) != 0 && (h->readers & kMuHigh) > kMuOne) {
+ // a reader but not the last
+ h->readers -= kMuOne; // release our lock
+ intptr_t nv = v; // normally just release spinlock
+ if (waitp != nullptr) { // but waitp!=nullptr => must queue ourselves
+ PerThreadSynch *new_h = Enqueue(h, waitp, v, kMuIsCond);
+ ABSL_RAW_CHECK(new_h != nullptr,
+ "waiters disappeared during Enqueue()!");
+ nv &= kMuLow;
+ nv |= kMuWait | reinterpret_cast<intptr_t>(new_h);
+ }
+ mu_.store(nv, std::memory_order_release); // release spinlock
+ // can release with a store because there were waiters
+ break;
+ }
+
+ // Either we didn't search before, or we marked the queue
+ // as "maybe_unlocking" and no one else should have changed it.
+ ABSL_RAW_CHECK(old_h == nullptr || h->maybe_unlocking,
+ "Mutex queue changed beneath us");
+
+ // The lock is becoming free, and there's a waiter
+ if (old_h != nullptr &&
+ !old_h->may_skip) { // we used old_h as a terminator
+ old_h->may_skip = true; // allow old_h to skip once more
+ ABSL_RAW_CHECK(old_h->skip == nullptr, "illegal skip from head");
+ if (h != old_h && MuEquivalentWaiter(old_h, old_h->next)) {
+ old_h->skip = old_h->next; // old_h not head & can skip to successor
+ }
+ }
+ if (h->next->waitp->how == kExclusive &&
+ Condition::GuaranteedEqual(h->next->waitp->cond, nullptr)) {
+ // easy case: writer with no condition; no need to search
+ pw = h; // wake w, the successor of h (=pw)
+ w = h->next;
+ w->wake = true;
+ // We are waking up a writer. This writer may be racing against
+ // an already awake reader for the lock. We want the
+ // writer to usually win this race,
+ // because if it doesn't, we can potentially keep taking a reader
+ // perpetually and writers will starve. Worse than
+ // that, this can also starve other readers if kMuWrWait gets set
+ // later.
+ wr_wait = kMuWrWait;
+ } else if (w != nullptr && (w->waitp->how == kExclusive || h == old_h)) {
+ // we found a waiter w to wake on a previous iteration and either it's
+ // a writer, or we've searched the entire list so we have all the
+ // readers.
+ if (pw == nullptr) { // if w's predecessor is unknown, it must be h
+ pw = h;
+ }
+ } else {
+ // At this point we don't know all the waiters to wake, and the first
+ // waiter has a condition or is a reader. We avoid searching over
+ // waiters we've searched on previous iterations by starting at
+ // old_h if it's set. If old_h==h, there's no one to wakeup at all.
+ if (old_h == h) { // we've searched before, and nothing's new
+ // so there's no one to wake.
+ intptr_t nv = (v & ~(kMuReader|kMuWriter|kMuWrWait));
+ h->readers = 0;
+ h->maybe_unlocking = false; // finished unlocking
+ if (waitp != nullptr) { // we must queue ourselves and sleep
+ PerThreadSynch *new_h = Enqueue(h, waitp, v, kMuIsCond);
+ nv &= kMuLow;
+ if (new_h != nullptr) {
+ nv |= kMuWait | reinterpret_cast<intptr_t>(new_h);
+ } // else new_h could be nullptr if we queued ourselves on a
+ // CondVar
+ }
+ // release spinlock & lock
+ // can release with a store because there were waiters
+ mu_.store(nv, std::memory_order_release);
+ break;
+ }
+
+ // set up to walk the list
+ PerThreadSynch *w_walk; // current waiter during list walk
+ PerThreadSynch *pw_walk; // previous waiter during list walk
+ if (old_h != nullptr) { // we've searched up to old_h before
+ pw_walk = old_h;
+ w_walk = old_h->next;
+ } else { // no prior search, start at beginning
+ pw_walk =
+ nullptr; // h->next's predecessor may change; don't record it
+ w_walk = h->next;
+ }
+
+ h->may_skip = false; // ensure we never skip past h in future searches
+ // even if other waiters are queued after it.
+ ABSL_RAW_CHECK(h->skip == nullptr, "illegal skip from head");
+
+ h->maybe_unlocking = true; // we're about to scan the waiter list
+ // without the spinlock held.
+ // Enqueue must be conservative about
+ // priority queuing.
+
+ // We must release the spinlock to evaluate the conditions.
+ mu_.store(v, std::memory_order_release); // release just spinlock
+ // can release with a store because there were waiters
+
+ // h is the last waiter queued, and w_walk the first unsearched waiter.
+ // Without the spinlock, the locations mu_ and h->next may now change
+ // underneath us, but since we hold the lock itself, the only legal
+ // change is to add waiters between h and w_walk. Therefore, it's safe
+ // to walk the path from w_walk to h inclusive. (TryRemove() can remove
+ // a waiter anywhere, but it acquires both the spinlock and the Mutex)
+
+ old_h = h; // remember we searched to here
+
+ // Walk the path upto and including h looking for waiters we can wake.
+ while (pw_walk != h) {
+ w_walk->wake = false;
+ if (w_walk->waitp->cond ==
+ nullptr || // no condition => vacuously true OR
+ (w_walk->waitp->cond != known_false &&
+ // this thread's condition is not known false, AND
+ // is in fact true
+ EvalConditionIgnored(this, w_walk->waitp->cond))) {
+ if (w == nullptr) {
+ w_walk->wake = true; // can wake this waiter
+ w = w_walk;
+ pw = pw_walk;
+ if (w_walk->waitp->how == kExclusive) {
+ wr_wait = kMuWrWait;
+ break; // bail if waking this writer
+ }
+ } else if (w_walk->waitp->how == kShared) { // wake if a reader
+ w_walk->wake = true;
+ } else { // writer with true condition
+ wr_wait = kMuWrWait;
+ }
+ } else { // can't wake; condition false
+ known_false = w_walk->waitp->cond; // remember last false condition
+ }
+ if (w_walk->wake) { // we're waking reader w_walk
+ pw_walk = w_walk; // don't skip similar waiters
+ } else { // not waking; skip as much as possible
+ pw_walk = Skip(w_walk);
+ }
+ // If pw_walk == h, then load of pw_walk->next can race with
+ // concurrent write in Enqueue(). However, at the same time
+ // we do not need to do the load, because we will bail out
+ // from the loop anyway.
+ if (pw_walk != h) {
+ w_walk = pw_walk->next;
+ }
+ }
+
+ continue; // restart for(;;)-loop to wakeup w or to find more waiters
+ }
+ ABSL_RAW_CHECK(pw->next == w, "pw not w's predecessor");
+ // The first (and perhaps only) waiter we've chosen to wake is w, whose
+ // predecessor is pw. If w is a reader, we must wake all the other
+ // waiters with wake==true as well. We may also need to queue
+ // ourselves if waitp != null. The spinlock and the lock are still
+ // held.
+
+ // This traverses the list in [ pw->next, h ], where h is the head,
+ // removing all elements with wake==true and placing them in the
+ // singly-linked list wake_list. Returns the new head.
+ h = DequeueAllWakeable(h, pw, &wake_list);
+
+ intptr_t nv = (v & kMuEvent) | kMuDesig;
+ // assume no waiters left,
+ // set kMuDesig for INV1a
+
+ if (waitp != nullptr) { // we must queue ourselves and sleep
+ h = Enqueue(h, waitp, v, kMuIsCond);
+ // h is new last waiter; could be null if we queued ourselves on a
+ // CondVar
+ }
+
+ ABSL_RAW_CHECK(wake_list != kPerThreadSynchNull,
+ "unexpected empty wake list");
+
+ if (h != nullptr) { // there are waiters left
+ h->readers = 0;
+ h->maybe_unlocking = false; // finished unlocking
+ nv |= wr_wait | kMuWait | reinterpret_cast<intptr_t>(h);
+ }
+
+ // release both spinlock & lock
+ // can release with a store because there were waiters
+ mu_.store(nv, std::memory_order_release);
+ break; // out of for(;;)-loop
+ }
+ // aggressive here; no one can proceed till we do
+ c = synchronization_internal::MutexDelay(c, AGGRESSIVE);
+ } // end of for(;;)-loop
+
+ if (wake_list != kPerThreadSynchNull) {
+ int64_t enqueue_timestamp = wake_list->waitp->contention_start_cycles;
+ bool cond_waiter = wake_list->cond_waiter;
+ do {
+ wake_list = Wakeup(wake_list); // wake waiters
+ } while (wake_list != kPerThreadSynchNull);
+ if (!cond_waiter) {
+ // Sample lock contention events only if the (first) waiter was trying to
+ // acquire the lock, not waiting on a condition variable or Condition.
+ int64_t wait_cycles =
+ base_internal::CycleClock::Now() - enqueue_timestamp;
+ mutex_tracer("slow release", this, wait_cycles);
+ ABSL_TSAN_MUTEX_PRE_DIVERT(this, 0);
+ submit_profile_data(enqueue_timestamp);
+ ABSL_TSAN_MUTEX_POST_DIVERT(this, 0);
+ }
+ }
+}
+
+// Used by CondVar implementation to reacquire mutex after waking from
+// condition variable. This routine is used instead of Lock() because the
+// waiting thread may have been moved from the condition variable queue to the
+// mutex queue without a wakeup, by Trans(). In that case, when the thread is
+// finally woken, the woken thread will believe it has been woken from the
+// condition variable (i.e. its PC will be in when in the CondVar code), when
+// in fact it has just been woken from the mutex. Thus, it must enter the slow
+// path of the mutex in the same state as if it had just woken from the mutex.
+// That is, it must ensure to clear kMuDesig (INV1b).
+void Mutex::Trans(MuHow how) {
+ this->LockSlow(how, nullptr, kMuHasBlocked | kMuIsCond);
+}
+
+// Used by CondVar implementation to effectively wake thread w from the
+// condition variable. If this mutex is free, we simply wake the thread.
+// It will later acquire the mutex with high probability. Otherwise, we
+// enqueue thread w on this mutex.
+void Mutex::Fer(PerThreadSynch *w) {
+ SchedulingGuard::ScopedDisable disable_rescheduling;
+ int c = 0;
+ ABSL_RAW_CHECK(w->waitp->cond == nullptr,
+ "Mutex::Fer while waiting on Condition");
+ ABSL_RAW_CHECK(!w->waitp->timeout.has_timeout(),
+ "Mutex::Fer while in timed wait");
+ ABSL_RAW_CHECK(w->waitp->cv_word == nullptr,
+ "Mutex::Fer with pending CondVar queueing");
+ for (;;) {
+ intptr_t v = mu_.load(std::memory_order_relaxed);
+ // Note: must not queue if the mutex is unlocked (nobody will wake it).
+ // For example, we can have only kMuWait (conditional) or maybe
+ // kMuWait|kMuWrWait.
+ // conflicting != 0 implies that the waking thread cannot currently take
+ // the mutex, which in turn implies that someone else has it and can wake
+ // us if we queue.
+ const intptr_t conflicting =
+ kMuWriter | (w->waitp->how == kShared ? 0 : kMuReader);
+ if ((v & conflicting) == 0) {
+ w->next = nullptr;
+ w->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
+ IncrementSynchSem(this, w);
+ return;
+ } else {
+ if ((v & (kMuSpin|kMuWait)) == 0) { // no waiters
+ // This thread tries to become the one and only waiter.
+ PerThreadSynch *new_h = Enqueue(nullptr, w->waitp, v, kMuIsCond);
+ ABSL_RAW_CHECK(new_h != nullptr,
+ "Enqueue failed"); // we must queue ourselves
+ if (mu_.compare_exchange_strong(
+ v, reinterpret_cast<intptr_t>(new_h) | (v & kMuLow) | kMuWait,
+ std::memory_order_release, std::memory_order_relaxed)) {
+ return;
+ }
+ } else if ((v & kMuSpin) == 0 &&
+ mu_.compare_exchange_strong(v, v | kMuSpin | kMuWait)) {
+ PerThreadSynch *h = GetPerThreadSynch(v);
+ PerThreadSynch *new_h = Enqueue(h, w->waitp, v, kMuIsCond);
+ ABSL_RAW_CHECK(new_h != nullptr,
+ "Enqueue failed"); // we must queue ourselves
+ do {
+ v = mu_.load(std::memory_order_relaxed);
+ } while (!mu_.compare_exchange_weak(
+ v,
+ (v & kMuLow & ~kMuSpin) | kMuWait |
+ reinterpret_cast<intptr_t>(new_h),
+ std::memory_order_release, std::memory_order_relaxed));
+ return;
+ }
+ }
+ c = synchronization_internal::MutexDelay(c, GENTLE);
+ }
+}
+
+void Mutex::AssertHeld() const {
+ if ((mu_.load(std::memory_order_relaxed) & kMuWriter) == 0) {
+ SynchEvent *e = GetSynchEvent(this);
+ ABSL_RAW_LOG(FATAL, "thread should hold write lock on Mutex %p %s",
+ static_cast<const void *>(this),
+ (e == nullptr ? "" : e->name));
+ }
+}
+
+void Mutex::AssertReaderHeld() const {
+ if ((mu_.load(std::memory_order_relaxed) & (kMuReader | kMuWriter)) == 0) {
+ SynchEvent *e = GetSynchEvent(this);
+ ABSL_RAW_LOG(
+ FATAL, "thread should hold at least a read lock on Mutex %p %s",
+ static_cast<const void *>(this), (e == nullptr ? "" : e->name));
+ }
+}
+
+// -------------------------------- condition variables
+static const intptr_t kCvSpin = 0x0001L; // spinlock protects waiter list
+static const intptr_t kCvEvent = 0x0002L; // record events
+
+static const intptr_t kCvLow = 0x0003L; // low order bits of CV
+
+// Hack to make constant values available to gdb pretty printer
+enum { kGdbCvSpin = kCvSpin, kGdbCvEvent = kCvEvent, kGdbCvLow = kCvLow, };
+
+static_assert(PerThreadSynch::kAlignment > kCvLow,
+ "PerThreadSynch::kAlignment must be greater than kCvLow");
+
+void CondVar::EnableDebugLog(const char *name) {
+ SynchEvent *e = EnsureSynchEvent(&this->cv_, name, kCvEvent, kCvSpin);
+ e->log = true;
+ UnrefSynchEvent(e);
+}
+
+CondVar::~CondVar() {
+ if ((cv_.load(std::memory_order_relaxed) & kCvEvent) != 0) {
+ ForgetSynchEvent(&this->cv_, kCvEvent, kCvSpin);
+ }
+}
+
+
+// Remove thread s from the list of waiters on this condition variable.
+void CondVar::Remove(PerThreadSynch *s) {
+ SchedulingGuard::ScopedDisable disable_rescheduling;
+ intptr_t v;
+ int c = 0;
+ for (v = cv_.load(std::memory_order_relaxed);;
+ v = cv_.load(std::memory_order_relaxed)) {
+ if ((v & kCvSpin) == 0 && // attempt to acquire spinlock
+ cv_.compare_exchange_strong(v, v | kCvSpin,
+ std::memory_order_acquire,
+ std::memory_order_relaxed)) {
+ PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow);
+ if (h != nullptr) {
+ PerThreadSynch *w = h;
+ while (w->next != s && w->next != h) { // search for thread
+ w = w->next;
+ }
+ if (w->next == s) { // found thread; remove it
+ w->next = s->next;
+ if (h == s) {
+ h = (w == s) ? nullptr : w;
+ }
+ s->next = nullptr;
+ s->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
+ }
+ }
+ // release spinlock
+ cv_.store((v & kCvEvent) | reinterpret_cast<intptr_t>(h),
+ std::memory_order_release);
+ return;
+ } else {
+ // try again after a delay
+ c = synchronization_internal::MutexDelay(c, GENTLE);
+ }
+ }
+}
+
+// Queue thread waitp->thread on condition variable word cv_word using
+// wait parameters waitp.
+// We split this into a separate routine, rather than simply doing it as part
+// of WaitCommon(). If we were to queue ourselves on the condition variable
+// before calling Mutex::UnlockSlow(), the Mutex code might be re-entered (via
+// the logging code, or via a Condition function) and might potentially attempt
+// to block this thread. That would be a problem if the thread were already on
+// a the condition variable waiter queue. Thus, we use the waitp->cv_word
+// to tell the unlock code to call CondVarEnqueue() to queue the thread on the
+// condition variable queue just before the mutex is to be unlocked, and (most
+// importantly) after any call to an external routine that might re-enter the
+// mutex code.
+static void CondVarEnqueue(SynchWaitParams *waitp) {
+ // This thread might be transferred to the Mutex queue by Fer() when
+ // we are woken. To make sure that is what happens, Enqueue() doesn't
+ // call CondVarEnqueue() again but instead uses its normal code. We
+ // must do this before we queue ourselves so that cv_word will be null
+ // when seen by the dequeuer, who may wish immediately to requeue
+ // this thread on another queue.
+ std::atomic<intptr_t> *cv_word = waitp->cv_word;
+ waitp->cv_word = nullptr;
+
+ intptr_t v = cv_word->load(std::memory_order_relaxed);
+ int c = 0;
+ while ((v & kCvSpin) != 0 || // acquire spinlock
+ !cv_word->compare_exchange_weak(v, v | kCvSpin,
+ std::memory_order_acquire,
+ std::memory_order_relaxed)) {
+ c = synchronization_internal::MutexDelay(c, GENTLE);
+ v = cv_word->load(std::memory_order_relaxed);
+ }
+ ABSL_RAW_CHECK(waitp->thread->waitp == nullptr, "waiting when shouldn't be");
+ waitp->thread->waitp = waitp; // prepare ourselves for waiting
+ PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow);
+ if (h == nullptr) { // add this thread to waiter list
+ waitp->thread->next = waitp->thread;
+ } else {
+ waitp->thread->next = h->next;
+ h->next = waitp->thread;
+ }
+ waitp->thread->state.store(PerThreadSynch::kQueued,
+ std::memory_order_relaxed);
+ cv_word->store((v & kCvEvent) | reinterpret_cast<intptr_t>(waitp->thread),
+ std::memory_order_release);
+}
+
+bool CondVar::WaitCommon(Mutex *mutex, KernelTimeout t) {
+ bool rc = false; // return value; true iff we timed-out
+
+ intptr_t mutex_v = mutex->mu_.load(std::memory_order_relaxed);
+ Mutex::MuHow mutex_how = ((mutex_v & kMuWriter) != 0) ? kExclusive : kShared;
+ ABSL_TSAN_MUTEX_PRE_UNLOCK(mutex, TsanFlags(mutex_how));
+
+ // maybe trace this call
+ intptr_t v = cv_.load(std::memory_order_relaxed);
+ cond_var_tracer("Wait", this);
+ if ((v & kCvEvent) != 0) {
+ PostSynchEvent(this, SYNCH_EV_WAIT);
+ }
+
+ // Release mu and wait on condition variable.
+ SynchWaitParams waitp(mutex_how, nullptr, t, mutex,
+ Synch_GetPerThreadAnnotated(mutex), &cv_);
+ // UnlockSlow() will call CondVarEnqueue() just before releasing the
+ // Mutex, thus queuing this thread on the condition variable. See
+ // CondVarEnqueue() for the reasons.
+ mutex->UnlockSlow(&waitp);
+
+ // wait for signal
+ while (waitp.thread->state.load(std::memory_order_acquire) ==
+ PerThreadSynch::kQueued) {
+ if (!Mutex::DecrementSynchSem(mutex, waitp.thread, t)) {
+ this->Remove(waitp.thread);
+ rc = true;
+ }
+ }
+
+ ABSL_RAW_CHECK(waitp.thread->waitp != nullptr, "not waiting when should be");
+ waitp.thread->waitp = nullptr; // cleanup
+
+ // maybe trace this call
+ cond_var_tracer("Unwait", this);
+ if ((v & kCvEvent) != 0) {
+ PostSynchEvent(this, SYNCH_EV_WAIT_RETURNING);
+ }
+
+ // From synchronization point of view Wait is unlock of the mutex followed
+ // by lock of the mutex. We've annotated start of unlock in the beginning
+ // of the function. Now, finish unlock and annotate lock of the mutex.
+ // (Trans is effectively lock).
+ ABSL_TSAN_MUTEX_POST_UNLOCK(mutex, TsanFlags(mutex_how));
+ ABSL_TSAN_MUTEX_PRE_LOCK(mutex, TsanFlags(mutex_how));
+ mutex->Trans(mutex_how); // Reacquire mutex
+ ABSL_TSAN_MUTEX_POST_LOCK(mutex, TsanFlags(mutex_how), 0);
+ return rc;
+}
+
+bool CondVar::WaitWithTimeout(Mutex *mu, y_absl::Duration timeout) {
+ return WaitWithDeadline(mu, DeadlineFromTimeout(timeout));
+}
+
+bool CondVar::WaitWithDeadline(Mutex *mu, y_absl::Time deadline) {
+ return WaitCommon(mu, KernelTimeout(deadline));
+}
+
+void CondVar::Wait(Mutex *mu) {
+ WaitCommon(mu, KernelTimeout::Never());
+}
+
+// Wake thread w
+// If it was a timed wait, w will be waiting on w->cv
+// Otherwise, if it was not a Mutex mutex, w will be waiting on w->sem
+// Otherwise, w is transferred to the Mutex mutex via Mutex::Fer().
+void CondVar::Wakeup(PerThreadSynch *w) {
+ if (w->waitp->timeout.has_timeout() || w->waitp->cvmu == nullptr) {
+ // The waiting thread only needs to observe "w->state == kAvailable" to be
+ // released, we must cache "cvmu" before clearing "next".
+ Mutex *mu = w->waitp->cvmu;
+ w->next = nullptr;
+ w->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
+ Mutex::IncrementSynchSem(mu, w);
+ } else {
+ w->waitp->cvmu->Fer(w);
+ }
+}
+
+void CondVar::Signal() {
+ SchedulingGuard::ScopedDisable disable_rescheduling;
+ ABSL_TSAN_MUTEX_PRE_SIGNAL(nullptr, 0);
+ intptr_t v;
+ int c = 0;
+ for (v = cv_.load(std::memory_order_relaxed); v != 0;
+ v = cv_.load(std::memory_order_relaxed)) {
+ if ((v & kCvSpin) == 0 && // attempt to acquire spinlock
+ cv_.compare_exchange_strong(v, v | kCvSpin,
+ std::memory_order_acquire,
+ std::memory_order_relaxed)) {
+ PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow);
+ PerThreadSynch *w = nullptr;
+ if (h != nullptr) { // remove first waiter
+ w = h->next;
+ if (w == h) {
+ h = nullptr;
+ } else {
+ h->next = w->next;
+ }
+ }
+ // release spinlock
+ cv_.store((v & kCvEvent) | reinterpret_cast<intptr_t>(h),
+ std::memory_order_release);
+ if (w != nullptr) {
+ CondVar::Wakeup(w); // wake waiter, if there was one
+ cond_var_tracer("Signal wakeup", this);
+ }
+ if ((v & kCvEvent) != 0) {
+ PostSynchEvent(this, SYNCH_EV_SIGNAL);
+ }
+ ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
+ return;
+ } else {
+ c = synchronization_internal::MutexDelay(c, GENTLE);
+ }
+ }
+ ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
+}
+
+void CondVar::SignalAll () {
+ ABSL_TSAN_MUTEX_PRE_SIGNAL(nullptr, 0);
+ intptr_t v;
+ int c = 0;
+ for (v = cv_.load(std::memory_order_relaxed); v != 0;
+ v = cv_.load(std::memory_order_relaxed)) {
+ // empty the list if spinlock free
+ // We do this by simply setting the list to empty using
+ // compare and swap. We then have the entire list in our hands,
+ // which cannot be changing since we grabbed it while no one
+ // held the lock.
+ if ((v & kCvSpin) == 0 &&
+ cv_.compare_exchange_strong(v, v & kCvEvent, std::memory_order_acquire,
+ std::memory_order_relaxed)) {
+ PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow);
+ if (h != nullptr) {
+ PerThreadSynch *w;
+ PerThreadSynch *n = h->next;
+ do { // for every thread, wake it up
+ w = n;
+ n = n->next;
+ CondVar::Wakeup(w);
+ } while (w != h);
+ cond_var_tracer("SignalAll wakeup", this);
+ }
+ if ((v & kCvEvent) != 0) {
+ PostSynchEvent(this, SYNCH_EV_SIGNALALL);
+ }
+ ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
+ return;
+ } else {
+ // try again after a delay
+ c = synchronization_internal::MutexDelay(c, GENTLE);
+ }
+ }
+ ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
+}
+
+void ReleasableMutexLock::Release() {
+ ABSL_RAW_CHECK(this->mu_ != nullptr,
+ "ReleasableMutexLock::Release may only be called once");
+ this->mu_->Unlock();
+ this->mu_ = nullptr;
+}
+
+#ifdef ABSL_HAVE_THREAD_SANITIZER
+extern "C" void __tsan_read1(void *addr);
+#else
+#define __tsan_read1(addr) // do nothing if TSan not enabled
+#endif
+
+// A function that just returns its argument, dereferenced
+static bool Dereference(void *arg) {
+ // ThreadSanitizer does not instrument this file for memory accesses.
+ // This function dereferences a user variable that can participate
+ // in a data race, so we need to manually tell TSan about this memory access.
+ __tsan_read1(arg);
+ return *(static_cast<bool *>(arg));
+}
+
+Condition::Condition() {} // null constructor, used for kTrue only
+const Condition Condition::kTrue;
+
+Condition::Condition(bool (*func)(void *), void *arg)
+ : eval_(&CallVoidPtrFunction),
+ function_(func),
+ method_(nullptr),
+ arg_(arg) {}
+
+bool Condition::CallVoidPtrFunction(const Condition *c) {
+ return (*c->function_)(c->arg_);
+}
+
+Condition::Condition(const bool *cond)
+ : eval_(CallVoidPtrFunction),
+ function_(Dereference),
+ method_(nullptr),
+ // const_cast is safe since Dereference does not modify arg
+ arg_(const_cast<bool *>(cond)) {}
+
+bool Condition::Eval() const {
+ // eval_ == null for kTrue
+ return (this->eval_ == nullptr) || (*this->eval_)(this);
+}
+
+bool Condition::GuaranteedEqual(const Condition *a, const Condition *b) {
+ if (a == nullptr) {
+ return b == nullptr || b->eval_ == nullptr;
+ }
+ if (b == nullptr || b->eval_ == nullptr) {
+ return a->eval_ == nullptr;
+ }
+ return a->eval_ == b->eval_ && a->function_ == b->function_ &&
+ a->arg_ == b->arg_ && a->method_ == b->method_;
+}
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/mutex.h b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/mutex.h
new file mode 100644
index 00000000000..0762a852df6
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/mutex.h
@@ -0,0 +1,1082 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// mutex.h
+// -----------------------------------------------------------------------------
+//
+// This header file defines a `Mutex` -- a mutually exclusive lock -- and the
+// most common type of synchronization primitive for facilitating locks on
+// shared resources. A mutex is used to prevent multiple threads from accessing
+// and/or writing to a shared resource concurrently.
+//
+// Unlike a `std::mutex`, the Abseil `Mutex` provides the following additional
+// features:
+// * Conditional predicates intrinsic to the `Mutex` object
+// * Shared/reader locks, in addition to standard exclusive/writer locks
+// * Deadlock detection and debug support.
+//
+// The following helper classes are also defined within this file:
+//
+// MutexLock - An RAII wrapper to acquire and release a `Mutex` for exclusive/
+// write access within the current scope.
+//
+// ReaderMutexLock
+// - An RAII wrapper to acquire and release a `Mutex` for shared/read
+// access within the current scope.
+//
+// WriterMutexLock
+// - Effectively an alias for `MutexLock` above, designed for use in
+// distinguishing reader and writer locks within code.
+//
+// In addition to simple mutex locks, this file also defines ways to perform
+// locking under certain conditions.
+//
+// Condition - (Preferred) Used to wait for a particular predicate that
+// depends on state protected by the `Mutex` to become true.
+// CondVar - A lower-level variant of `Condition` that relies on
+// application code to explicitly signal the `CondVar` when
+// a condition has been met.
+//
+// See below for more information on using `Condition` or `CondVar`.
+//
+// Mutexes and mutex behavior can be quite complicated. The information within
+// this header file is limited, as a result. Please consult the Mutex guide for
+// more complete information and examples.
+
+#ifndef ABSL_SYNCHRONIZATION_MUTEX_H_
+#define ABSL_SYNCHRONIZATION_MUTEX_H_
+
+#include <atomic>
+#include <cstdint>
+#include <util/generic/string.h>
+
+#include "y_absl/base/const_init.h"
+#include "y_absl/base/internal/identity.h"
+#include "y_absl/base/internal/low_level_alloc.h"
+#include "y_absl/base/internal/thread_identity.h"
+#include "y_absl/base/internal/tsan_mutex_interface.h"
+#include "y_absl/base/port.h"
+#include "y_absl/base/thread_annotations.h"
+#include "y_absl/synchronization/internal/kernel_timeout.h"
+#include "y_absl/synchronization/internal/per_thread_sem.h"
+#include "y_absl/time/time.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+class Condition;
+struct SynchWaitParams;
+
+// -----------------------------------------------------------------------------
+// Mutex
+// -----------------------------------------------------------------------------
+//
+// A `Mutex` is a non-reentrant (aka non-recursive) Mutually Exclusive lock
+// on some resource, typically a variable or data structure with associated
+// invariants. Proper usage of mutexes prevents concurrent access by different
+// threads to the same resource.
+//
+// A `Mutex` has two basic operations: `Mutex::Lock()` and `Mutex::Unlock()`.
+// The `Lock()` operation *acquires* a `Mutex` (in a state known as an
+// *exclusive* -- or write -- lock), while the `Unlock()` operation *releases* a
+// Mutex. During the span of time between the Lock() and Unlock() operations,
+// a mutex is said to be *held*. By design all mutexes support exclusive/write
+// locks, as this is the most common way to use a mutex.
+//
+// The `Mutex` state machine for basic lock/unlock operations is quite simple:
+//
+// | | Lock() | Unlock() |
+// |----------------+------------+----------|
+// | Free | Exclusive | invalid |
+// | Exclusive | blocks | Free |
+//
+// Attempts to `Unlock()` must originate from the thread that performed the
+// corresponding `Lock()` operation.
+//
+// An "invalid" operation is disallowed by the API. The `Mutex` implementation
+// is allowed to do anything on an invalid call, including but not limited to
+// crashing with a useful error message, silently succeeding, or corrupting
+// data structures. In debug mode, the implementation attempts to crash with a
+// useful error message.
+//
+// `Mutex` is not guaranteed to be "fair" in prioritizing waiting threads; it
+// is, however, approximately fair over long periods, and starvation-free for
+// threads at the same priority.
+//
+// The lock/unlock primitives are now annotated with lock annotations
+// defined in (base/thread_annotations.h). When writing multi-threaded code,
+// you should use lock annotations whenever possible to document your lock
+// synchronization policy. Besides acting as documentation, these annotations
+// also help compilers or static analysis tools to identify and warn about
+// issues that could potentially result in race conditions and deadlocks.
+//
+// For more information about the lock annotations, please see
+// [Thread Safety Analysis](http://clang.llvm.org/docs/ThreadSafetyAnalysis.html)
+// in the Clang documentation.
+//
+// See also `MutexLock`, below, for scoped `Mutex` acquisition.
+
+class ABSL_LOCKABLE Mutex {
+ public:
+ // Creates a `Mutex` that is not held by anyone. This constructor is
+ // typically used for Mutexes allocated on the heap or the stack.
+ //
+ // To create `Mutex` instances with static storage duration
+ // (e.g. a namespace-scoped or global variable), see
+ // `Mutex::Mutex(y_absl::kConstInit)` below instead.
+ Mutex();
+
+ // Creates a mutex with static storage duration. A global variable
+ // constructed this way avoids the lifetime issues that can occur on program
+ // startup and shutdown. (See y_absl/base/const_init.h.)
+ //
+ // For Mutexes allocated on the heap and stack, instead use the default
+ // constructor, which can interact more fully with the thread sanitizer.
+ //
+ // Example usage:
+ // namespace foo {
+ // ABSL_CONST_INIT y_absl::Mutex mu(y_absl::kConstInit);
+ // }
+ explicit constexpr Mutex(y_absl::ConstInitType);
+
+ ~Mutex();
+
+ // Mutex::Lock()
+ //
+ // Blocks the calling thread, if necessary, until this `Mutex` is free, and
+ // then acquires it exclusively. (This lock is also known as a "write lock.")
+ void Lock() ABSL_EXCLUSIVE_LOCK_FUNCTION();
+
+ // Mutex::Unlock()
+ //
+ // Releases this `Mutex` and returns it from the exclusive/write state to the
+ // free state. Calling thread must hold the `Mutex` exclusively.
+ void Unlock() ABSL_UNLOCK_FUNCTION();
+
+ // Mutex::TryLock()
+ //
+ // If the mutex can be acquired without blocking, does so exclusively and
+ // returns `true`. Otherwise, returns `false`. Returns `true` with high
+ // probability if the `Mutex` was free.
+ bool TryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true);
+
+ // Mutex::AssertHeld()
+ //
+ // Return immediately if this thread holds the `Mutex` exclusively (in write
+ // mode). Otherwise, may report an error (typically by crashing with a
+ // diagnostic), or may return immediately.
+ void AssertHeld() const ABSL_ASSERT_EXCLUSIVE_LOCK();
+
+ // ---------------------------------------------------------------------------
+ // Reader-Writer Locking
+ // ---------------------------------------------------------------------------
+
+ // A Mutex can also be used as a starvation-free reader-writer lock.
+ // Neither read-locks nor write-locks are reentrant/recursive to avoid
+ // potential client programming errors.
+ //
+ // The Mutex API provides `Writer*()` aliases for the existing `Lock()`,
+ // `Unlock()` and `TryLock()` methods for use within applications mixing
+ // reader/writer locks. Using `Reader*()` and `Writer*()` operations in this
+ // manner can make locking behavior clearer when mixing read and write modes.
+ //
+ // Introducing reader locks necessarily complicates the `Mutex` state
+ // machine somewhat. The table below illustrates the allowed state transitions
+ // of a mutex in such cases. Note that ReaderLock() may block even if the lock
+ // is held in shared mode; this occurs when another thread is blocked on a
+ // call to WriterLock().
+ //
+ // ---------------------------------------------------------------------------
+ // Operation: WriterLock() Unlock() ReaderLock() ReaderUnlock()
+ // ---------------------------------------------------------------------------
+ // State
+ // ---------------------------------------------------------------------------
+ // Free Exclusive invalid Shared(1) invalid
+ // Shared(1) blocks invalid Shared(2) or blocks Free
+ // Shared(n) n>1 blocks invalid Shared(n+1) or blocks Shared(n-1)
+ // Exclusive blocks Free blocks invalid
+ // ---------------------------------------------------------------------------
+ //
+ // In comments below, "shared" refers to a state of Shared(n) for any n > 0.
+
+ // Mutex::ReaderLock()
+ //
+ // Blocks the calling thread, if necessary, until this `Mutex` is either free,
+ // or in shared mode, and then acquires a share of it. Note that
+ // `ReaderLock()` will block if some other thread has an exclusive/writer lock
+ // on the mutex.
+
+ void ReaderLock() ABSL_SHARED_LOCK_FUNCTION();
+
+ // Mutex::ReaderUnlock()
+ //
+ // Releases a read share of this `Mutex`. `ReaderUnlock` may return a mutex to
+ // the free state if this thread holds the last reader lock on the mutex. Note
+ // that you cannot call `ReaderUnlock()` on a mutex held in write mode.
+ void ReaderUnlock() ABSL_UNLOCK_FUNCTION();
+
+ // Mutex::ReaderTryLock()
+ //
+ // If the mutex can be acquired without blocking, acquires this mutex for
+ // shared access and returns `true`. Otherwise, returns `false`. Returns
+ // `true` with high probability if the `Mutex` was free or shared.
+ bool ReaderTryLock() ABSL_SHARED_TRYLOCK_FUNCTION(true);
+
+ // Mutex::AssertReaderHeld()
+ //
+ // Returns immediately if this thread holds the `Mutex` in at least shared
+ // mode (read mode). Otherwise, may report an error (typically by
+ // crashing with a diagnostic), or may return immediately.
+ void AssertReaderHeld() const ABSL_ASSERT_SHARED_LOCK();
+
+ // Mutex::WriterLock()
+ // Mutex::WriterUnlock()
+ // Mutex::WriterTryLock()
+ //
+ // Aliases for `Mutex::Lock()`, `Mutex::Unlock()`, and `Mutex::TryLock()`.
+ //
+ // These methods may be used (along with the complementary `Reader*()`
+ // methods) to distingish simple exclusive `Mutex` usage (`Lock()`,
+ // etc.) from reader/writer lock usage.
+ void WriterLock() ABSL_EXCLUSIVE_LOCK_FUNCTION() { this->Lock(); }
+
+ void WriterUnlock() ABSL_UNLOCK_FUNCTION() { this->Unlock(); }
+
+ bool WriterTryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) {
+ return this->TryLock();
+ }
+
+ // ---------------------------------------------------------------------------
+ // Conditional Critical Regions
+ // ---------------------------------------------------------------------------
+
+ // Conditional usage of a `Mutex` can occur using two distinct paradigms:
+ //
+ // * Use of `Mutex` member functions with `Condition` objects.
+ // * Use of the separate `CondVar` abstraction.
+ //
+ // In general, prefer use of `Condition` and the `Mutex` member functions
+ // listed below over `CondVar`. When there are multiple threads waiting on
+ // distinctly different conditions, however, a battery of `CondVar`s may be
+ // more efficient. This section discusses use of `Condition` objects.
+ //
+ // `Mutex` contains member functions for performing lock operations only under
+ // certain conditions, of class `Condition`. For correctness, the `Condition`
+ // must return a boolean that is a pure function, only of state protected by
+ // the `Mutex`. The condition must be invariant w.r.t. environmental state
+ // such as thread, cpu id, or time, and must be `noexcept`. The condition will
+ // always be invoked with the mutex held in at least read mode, so you should
+ // not block it for long periods or sleep it on a timer.
+ //
+ // Since a condition must not depend directly on the current time, use
+ // `*WithTimeout()` member function variants to make your condition
+ // effectively true after a given duration, or `*WithDeadline()` variants to
+ // make your condition effectively true after a given time.
+ //
+ // The condition function should have no side-effects aside from debug
+ // logging; as a special exception, the function may acquire other mutexes
+ // provided it releases all those that it acquires. (This exception was
+ // required to allow logging.)
+
+ // Mutex::Await()
+ //
+ // Unlocks this `Mutex` and blocks until simultaneously both `cond` is `true`
+ // and this `Mutex` can be reacquired, then reacquires this `Mutex` in the
+ // same mode in which it was previously held. If the condition is initially
+ // `true`, `Await()` *may* skip the release/re-acquire step.
+ //
+ // `Await()` requires that this thread holds this `Mutex` in some mode.
+ void Await(const Condition &cond);
+
+ // Mutex::LockWhen()
+ // Mutex::ReaderLockWhen()
+ // Mutex::WriterLockWhen()
+ //
+ // Blocks until simultaneously both `cond` is `true` and this `Mutex` can
+ // be acquired, then atomically acquires this `Mutex`. `LockWhen()` is
+ // logically equivalent to `*Lock(); Await();` though they may have different
+ // performance characteristics.
+ void LockWhen(const Condition &cond) ABSL_EXCLUSIVE_LOCK_FUNCTION();
+
+ void ReaderLockWhen(const Condition &cond) ABSL_SHARED_LOCK_FUNCTION();
+
+ void WriterLockWhen(const Condition &cond) ABSL_EXCLUSIVE_LOCK_FUNCTION() {
+ this->LockWhen(cond);
+ }
+
+ // ---------------------------------------------------------------------------
+ // Mutex Variants with Timeouts/Deadlines
+ // ---------------------------------------------------------------------------
+
+ // Mutex::AwaitWithTimeout()
+ // Mutex::AwaitWithDeadline()
+ //
+ // Unlocks this `Mutex` and blocks until simultaneously:
+ // - either `cond` is true or the {timeout has expired, deadline has passed}
+ // and
+ // - this `Mutex` can be reacquired,
+ // then reacquire this `Mutex` in the same mode in which it was previously
+ // held, returning `true` iff `cond` is `true` on return.
+ //
+ // If the condition is initially `true`, the implementation *may* skip the
+ // release/re-acquire step and return immediately.
+ //
+ // Deadlines in the past are equivalent to an immediate deadline.
+ // Negative timeouts are equivalent to a zero timeout.
+ //
+ // This method requires that this thread holds this `Mutex` in some mode.
+ bool AwaitWithTimeout(const Condition &cond, y_absl::Duration timeout);
+
+ bool AwaitWithDeadline(const Condition &cond, y_absl::Time deadline);
+
+ // Mutex::LockWhenWithTimeout()
+ // Mutex::ReaderLockWhenWithTimeout()
+ // Mutex::WriterLockWhenWithTimeout()
+ //
+ // Blocks until simultaneously both:
+ // - either `cond` is `true` or the timeout has expired, and
+ // - this `Mutex` can be acquired,
+ // then atomically acquires this `Mutex`, returning `true` iff `cond` is
+ // `true` on return.
+ //
+ // Negative timeouts are equivalent to a zero timeout.
+ bool LockWhenWithTimeout(const Condition &cond, y_absl::Duration timeout)
+ ABSL_EXCLUSIVE_LOCK_FUNCTION();
+ bool ReaderLockWhenWithTimeout(const Condition &cond, y_absl::Duration timeout)
+ ABSL_SHARED_LOCK_FUNCTION();
+ bool WriterLockWhenWithTimeout(const Condition &cond, y_absl::Duration timeout)
+ ABSL_EXCLUSIVE_LOCK_FUNCTION() {
+ return this->LockWhenWithTimeout(cond, timeout);
+ }
+
+ // Mutex::LockWhenWithDeadline()
+ // Mutex::ReaderLockWhenWithDeadline()
+ // Mutex::WriterLockWhenWithDeadline()
+ //
+ // Blocks until simultaneously both:
+ // - either `cond` is `true` or the deadline has been passed, and
+ // - this `Mutex` can be acquired,
+ // then atomically acquires this Mutex, returning `true` iff `cond` is `true`
+ // on return.
+ //
+ // Deadlines in the past are equivalent to an immediate deadline.
+ bool LockWhenWithDeadline(const Condition &cond, y_absl::Time deadline)
+ ABSL_EXCLUSIVE_LOCK_FUNCTION();
+ bool ReaderLockWhenWithDeadline(const Condition &cond, y_absl::Time deadline)
+ ABSL_SHARED_LOCK_FUNCTION();
+ bool WriterLockWhenWithDeadline(const Condition &cond, y_absl::Time deadline)
+ ABSL_EXCLUSIVE_LOCK_FUNCTION() {
+ return this->LockWhenWithDeadline(cond, deadline);
+ }
+
+ // ---------------------------------------------------------------------------
+ // Debug Support: Invariant Checking, Deadlock Detection, Logging.
+ // ---------------------------------------------------------------------------
+
+ // Mutex::EnableInvariantDebugging()
+ //
+ // If `invariant`!=null and if invariant debugging has been enabled globally,
+ // cause `(*invariant)(arg)` to be called at moments when the invariant for
+ // this `Mutex` should hold (for example: just after acquire, just before
+ // release).
+ //
+ // The routine `invariant` should have no side-effects since it is not
+ // guaranteed how many times it will be called; it should check the invariant
+ // and crash if it does not hold. Enabling global invariant debugging may
+ // substantially reduce `Mutex` performance; it should be set only for
+ // non-production runs. Optimization options may also disable invariant
+ // checks.
+ void EnableInvariantDebugging(void (*invariant)(void *), void *arg);
+
+ // Mutex::EnableDebugLog()
+ //
+ // Cause all subsequent uses of this `Mutex` to be logged via
+ // `ABSL_RAW_LOG(INFO)`. Log entries are tagged with `name` if no previous
+ // call to `EnableInvariantDebugging()` or `EnableDebugLog()` has been made.
+ //
+ // Note: This method substantially reduces `Mutex` performance.
+ void EnableDebugLog(const char *name);
+
+ // Deadlock detection
+
+ // Mutex::ForgetDeadlockInfo()
+ //
+ // Forget any deadlock-detection information previously gathered
+ // about this `Mutex`. Call this method in debug mode when the lock ordering
+ // of a `Mutex` changes.
+ void ForgetDeadlockInfo();
+
+ // Mutex::AssertNotHeld()
+ //
+ // Return immediately if this thread does not hold this `Mutex` in any
+ // mode; otherwise, may report an error (typically by crashing with a
+ // diagnostic), or may return immediately.
+ //
+ // Currently this check is performed only if all of:
+ // - in debug mode
+ // - SetMutexDeadlockDetectionMode() has been set to kReport or kAbort
+ // - number of locks concurrently held by this thread is not large.
+ // are true.
+ void AssertNotHeld() const;
+
+ // Special cases.
+
+ // A `MuHow` is a constant that indicates how a lock should be acquired.
+ // Internal implementation detail. Clients should ignore.
+ typedef const struct MuHowS *MuHow;
+
+ // Mutex::InternalAttemptToUseMutexInFatalSignalHandler()
+ //
+ // Causes the `Mutex` implementation to prepare itself for re-entry caused by
+ // future use of `Mutex` within a fatal signal handler. This method is
+ // intended for use only for last-ditch attempts to log crash information.
+ // It does not guarantee that attempts to use Mutexes within the handler will
+ // not deadlock; it merely makes other faults less likely.
+ //
+ // WARNING: This routine must be invoked from a signal handler, and the
+ // signal handler must either loop forever or terminate the process.
+ // Attempts to return from (or `longjmp` out of) the signal handler once this
+ // call has been made may cause arbitrary program behaviour including
+ // crashes and deadlocks.
+ static void InternalAttemptToUseMutexInFatalSignalHandler();
+
+ private:
+ std::atomic<intptr_t> mu_; // The Mutex state.
+
+ // Post()/Wait() versus associated PerThreadSem; in class for required
+ // friendship with PerThreadSem.
+ static void IncrementSynchSem(Mutex *mu, base_internal::PerThreadSynch *w);
+ static bool DecrementSynchSem(Mutex *mu, base_internal::PerThreadSynch *w,
+ synchronization_internal::KernelTimeout t);
+
+ // slow path acquire
+ void LockSlowLoop(SynchWaitParams *waitp, int flags);
+ // wrappers around LockSlowLoop()
+ bool LockSlowWithDeadline(MuHow how, const Condition *cond,
+ synchronization_internal::KernelTimeout t,
+ int flags);
+ void LockSlow(MuHow how, const Condition *cond,
+ int flags) ABSL_ATTRIBUTE_COLD;
+ // slow path release
+ void UnlockSlow(SynchWaitParams *waitp) ABSL_ATTRIBUTE_COLD;
+ // Common code between Await() and AwaitWithTimeout/Deadline()
+ bool AwaitCommon(const Condition &cond,
+ synchronization_internal::KernelTimeout t);
+ // Attempt to remove thread s from queue.
+ void TryRemove(base_internal::PerThreadSynch *s);
+ // Block a thread on mutex.
+ void Block(base_internal::PerThreadSynch *s);
+ // Wake a thread; return successor.
+ base_internal::PerThreadSynch *Wakeup(base_internal::PerThreadSynch *w);
+
+ friend class CondVar; // for access to Trans()/Fer().
+ void Trans(MuHow how); // used for CondVar->Mutex transfer
+ void Fer(
+ base_internal::PerThreadSynch *w); // used for CondVar->Mutex transfer
+
+ // Catch the error of writing Mutex when intending MutexLock.
+ Mutex(const volatile Mutex * /*ignored*/) {} // NOLINT(runtime/explicit)
+
+ Mutex(const Mutex&) = delete;
+ Mutex& operator=(const Mutex&) = delete;
+};
+
+// -----------------------------------------------------------------------------
+// Mutex RAII Wrappers
+// -----------------------------------------------------------------------------
+
+// MutexLock
+//
+// `MutexLock` is a helper class, which acquires and releases a `Mutex` via
+// RAII.
+//
+// Example:
+//
+// Class Foo {
+// public:
+// Foo::Bar* Baz() {
+// MutexLock lock(&mu_);
+// ...
+// return bar;
+// }
+//
+// private:
+// Mutex mu_;
+// };
+class ABSL_SCOPED_LOCKABLE MutexLock {
+ public:
+ // Constructors
+
+ // Calls `mu->Lock()` and returns when that call returns. That is, `*mu` is
+ // guaranteed to be locked when this object is constructed. Requires that
+ // `mu` be dereferenceable.
+ explicit MutexLock(Mutex *mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) : mu_(mu) {
+ this->mu_->Lock();
+ }
+
+ // Like above, but calls `mu->LockWhen(cond)` instead. That is, in addition to
+ // the above, the condition given by `cond` is also guaranteed to hold when
+ // this object is constructed.
+ explicit MutexLock(Mutex *mu, const Condition &cond)
+ ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
+ : mu_(mu) {
+ this->mu_->LockWhen(cond);
+ }
+
+ MutexLock(const MutexLock &) = delete; // NOLINT(runtime/mutex)
+ MutexLock(MutexLock&&) = delete; // NOLINT(runtime/mutex)
+ MutexLock& operator=(const MutexLock&) = delete;
+ MutexLock& operator=(MutexLock&&) = delete;
+
+ ~MutexLock() ABSL_UNLOCK_FUNCTION() { this->mu_->Unlock(); }
+
+ private:
+ Mutex *const mu_;
+};
+
+// ReaderMutexLock
+//
+// The `ReaderMutexLock` is a helper class, like `MutexLock`, which acquires and
+// releases a shared lock on a `Mutex` via RAII.
+class ABSL_SCOPED_LOCKABLE ReaderMutexLock {
+ public:
+ explicit ReaderMutexLock(Mutex *mu) ABSL_SHARED_LOCK_FUNCTION(mu) : mu_(mu) {
+ mu->ReaderLock();
+ }
+
+ explicit ReaderMutexLock(Mutex *mu, const Condition &cond)
+ ABSL_SHARED_LOCK_FUNCTION(mu)
+ : mu_(mu) {
+ mu->ReaderLockWhen(cond);
+ }
+
+ ReaderMutexLock(const ReaderMutexLock&) = delete;
+ ReaderMutexLock(ReaderMutexLock&&) = delete;
+ ReaderMutexLock& operator=(const ReaderMutexLock&) = delete;
+ ReaderMutexLock& operator=(ReaderMutexLock&&) = delete;
+
+ ~ReaderMutexLock() ABSL_UNLOCK_FUNCTION() { this->mu_->ReaderUnlock(); }
+
+ private:
+ Mutex *const mu_;
+};
+
+// WriterMutexLock
+//
+// The `WriterMutexLock` is a helper class, like `MutexLock`, which acquires and
+// releases a write (exclusive) lock on a `Mutex` via RAII.
+class ABSL_SCOPED_LOCKABLE WriterMutexLock {
+ public:
+ explicit WriterMutexLock(Mutex *mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
+ : mu_(mu) {
+ mu->WriterLock();
+ }
+
+ explicit WriterMutexLock(Mutex *mu, const Condition &cond)
+ ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
+ : mu_(mu) {
+ mu->WriterLockWhen(cond);
+ }
+
+ WriterMutexLock(const WriterMutexLock&) = delete;
+ WriterMutexLock(WriterMutexLock&&) = delete;
+ WriterMutexLock& operator=(const WriterMutexLock&) = delete;
+ WriterMutexLock& operator=(WriterMutexLock&&) = delete;
+
+ ~WriterMutexLock() ABSL_UNLOCK_FUNCTION() { this->mu_->WriterUnlock(); }
+
+ private:
+ Mutex *const mu_;
+};
+
+// -----------------------------------------------------------------------------
+// Condition
+// -----------------------------------------------------------------------------
+//
+// As noted above, `Mutex` contains a number of member functions which take a
+// `Condition` as an argument; clients can wait for conditions to become `true`
+// before attempting to acquire the mutex. These sections are known as
+// "condition critical" sections. To use a `Condition`, you simply need to
+// construct it, and use within an appropriate `Mutex` member function;
+// everything else in the `Condition` class is an implementation detail.
+//
+// A `Condition` is specified as a function pointer which returns a boolean.
+// `Condition` functions should be pure functions -- their results should depend
+// only on passed arguments, should not consult any external state (such as
+// clocks), and should have no side-effects, aside from debug logging. Any
+// objects that the function may access should be limited to those which are
+// constant while the mutex is blocked on the condition (e.g. a stack variable),
+// or objects of state protected explicitly by the mutex.
+//
+// No matter which construction is used for `Condition`, the underlying
+// function pointer / functor / callable must not throw any
+// exceptions. Correctness of `Mutex` / `Condition` is not guaranteed in
+// the face of a throwing `Condition`. (When Abseil is allowed to depend
+// on C++17, these function pointers will be explicitly marked
+// `noexcept`; until then this requirement cannot be enforced in the
+// type system.)
+//
+// Note: to use a `Condition`, you need only construct it and pass it to a
+// suitable `Mutex' member function, such as `Mutex::Await()`, or to the
+// constructor of one of the scope guard classes.
+//
+// Example using LockWhen/Unlock:
+//
+// // assume count_ is not internal reference count
+// int count_ ABSL_GUARDED_BY(mu_);
+// Condition count_is_zero(+[](int *count) { return *count == 0; }, &count_);
+//
+// mu_.LockWhen(count_is_zero);
+// // ...
+// mu_.Unlock();
+//
+// Example using a scope guard:
+//
+// {
+// MutexLock lock(&mu_, count_is_zero);
+// // ...
+// }
+//
+// When multiple threads are waiting on exactly the same condition, make sure
+// that they are constructed with the same parameters (same pointer to function
+// + arg, or same pointer to object + method), so that the mutex implementation
+// can avoid redundantly evaluating the same condition for each thread.
+class Condition {
+ public:
+ // A Condition that returns the result of "(*func)(arg)"
+ Condition(bool (*func)(void *), void *arg);
+
+ // Templated version for people who are averse to casts.
+ //
+ // To use a lambda, prepend it with unary plus, which converts the lambda
+ // into a function pointer:
+ // Condition(+[](T* t) { return ...; }, arg).
+ //
+ // Note: lambdas in this case must contain no bound variables.
+ //
+ // See class comment for performance advice.
+ template<typename T>
+ Condition(bool (*func)(T *), T *arg);
+
+ // Templated version for invoking a method that returns a `bool`.
+ //
+ // `Condition(object, &Class::Method)` constructs a `Condition` that evaluates
+ // `object->Method()`.
+ //
+ // Implementation Note: `y_absl::internal::identity` is used to allow methods to
+ // come from base classes. A simpler signature like
+ // `Condition(T*, bool (T::*)())` does not suffice.
+ template<typename T>
+ Condition(T *object, bool (y_absl::internal::identity<T>::type::* method)());
+
+ // Same as above, for const members
+ template<typename T>
+ Condition(const T *object,
+ bool (y_absl::internal::identity<T>::type::* method)() const);
+
+ // A Condition that returns the value of `*cond`
+ explicit Condition(const bool *cond);
+
+ // Templated version for invoking a functor that returns a `bool`.
+ // This approach accepts pointers to non-mutable lambdas, `std::function`,
+ // the result of` std::bind` and user-defined functors that define
+ // `bool F::operator()() const`.
+ //
+ // Example:
+ //
+ // auto reached = [this, current]() {
+ // mu_.AssertReaderHeld(); // For annotalysis.
+ // return processed_ >= current;
+ // };
+ // mu_.Await(Condition(&reached));
+ //
+ // NOTE: never use "mu_.AssertHeld()" instead of "mu_.AssertReaderHeld()" in
+ // the lambda as it may be called when the mutex is being unlocked from a
+ // scope holding only a reader lock, which will make the assertion not
+ // fulfilled and crash the binary.
+
+ // See class comment for performance advice. In particular, if there
+ // might be more than one waiter for the same condition, make sure
+ // that all waiters construct the condition with the same pointers.
+
+ // Implementation note: The second template parameter ensures that this
+ // constructor doesn't participate in overload resolution if T doesn't have
+ // `bool operator() const`.
+ template <typename T, typename E = decltype(
+ static_cast<bool (T::*)() const>(&T::operator()))>
+ explicit Condition(const T *obj)
+ : Condition(obj, static_cast<bool (T::*)() const>(&T::operator())) {}
+
+ // A Condition that always returns `true`.
+ static const Condition kTrue;
+
+ // Evaluates the condition.
+ bool Eval() const;
+
+ // Returns `true` if the two conditions are guaranteed to return the same
+ // value if evaluated at the same time, `false` if the evaluation *may* return
+ // different results.
+ //
+ // Two `Condition` values are guaranteed equal if both their `func` and `arg`
+ // components are the same. A null pointer is equivalent to a `true`
+ // condition.
+ static bool GuaranteedEqual(const Condition *a, const Condition *b);
+
+ private:
+ typedef bool (*InternalFunctionType)(void * arg);
+ typedef bool (Condition::*InternalMethodType)();
+ typedef bool (*InternalMethodCallerType)(void * arg,
+ InternalMethodType internal_method);
+
+ bool (*eval_)(const Condition*); // Actual evaluator
+ InternalFunctionType function_; // function taking pointer returning bool
+ InternalMethodType method_; // method returning bool
+ void *arg_; // arg of function_ or object of method_
+
+ Condition(); // null constructor used only to create kTrue
+
+ // Various functions eval_ can point to:
+ static bool CallVoidPtrFunction(const Condition*);
+ template <typename T> static bool CastAndCallFunction(const Condition* c);
+ template <typename T> static bool CastAndCallMethod(const Condition* c);
+};
+
+// -----------------------------------------------------------------------------
+// CondVar
+// -----------------------------------------------------------------------------
+//
+// A condition variable, reflecting state evaluated separately outside of the
+// `Mutex` object, which can be signaled to wake callers.
+// This class is not normally needed; use `Mutex` member functions such as
+// `Mutex::Await()` and intrinsic `Condition` abstractions. In rare cases
+// with many threads and many conditions, `CondVar` may be faster.
+//
+// The implementation may deliver signals to any condition variable at
+// any time, even when no call to `Signal()` or `SignalAll()` is made; as a
+// result, upon being awoken, you must check the logical condition you have
+// been waiting upon.
+//
+// Examples:
+//
+// Usage for a thread waiting for some condition C protected by mutex mu:
+// mu.Lock();
+// while (!C) { cv->Wait(&mu); } // releases and reacquires mu
+// // C holds; process data
+// mu.Unlock();
+//
+// Usage to wake T is:
+// mu.Lock();
+// // process data, possibly establishing C
+// if (C) { cv->Signal(); }
+// mu.Unlock();
+//
+// If C may be useful to more than one waiter, use `SignalAll()` instead of
+// `Signal()`.
+//
+// With this implementation it is efficient to use `Signal()/SignalAll()` inside
+// the locked region; this usage can make reasoning about your program easier.
+//
+class CondVar {
+ public:
+ // A `CondVar` allocated on the heap or on the stack can use the this
+ // constructor.
+ CondVar();
+ ~CondVar();
+
+ // CondVar::Wait()
+ //
+ // Atomically releases a `Mutex` and blocks on this condition variable.
+ // Waits until awakened by a call to `Signal()` or `SignalAll()` (or a
+ // spurious wakeup), then reacquires the `Mutex` and returns.
+ //
+ // Requires and ensures that the current thread holds the `Mutex`.
+ void Wait(Mutex *mu);
+
+ // CondVar::WaitWithTimeout()
+ //
+ // Atomically releases a `Mutex` and blocks on this condition variable.
+ // Waits until awakened by a call to `Signal()` or `SignalAll()` (or a
+ // spurious wakeup), or until the timeout has expired, then reacquires
+ // the `Mutex` and returns.
+ //
+ // Returns true if the timeout has expired without this `CondVar`
+ // being signalled in any manner. If both the timeout has expired
+ // and this `CondVar` has been signalled, the implementation is free
+ // to return `true` or `false`.
+ //
+ // Requires and ensures that the current thread holds the `Mutex`.
+ bool WaitWithTimeout(Mutex *mu, y_absl::Duration timeout);
+
+ // CondVar::WaitWithDeadline()
+ //
+ // Atomically releases a `Mutex` and blocks on this condition variable.
+ // Waits until awakened by a call to `Signal()` or `SignalAll()` (or a
+ // spurious wakeup), or until the deadline has passed, then reacquires
+ // the `Mutex` and returns.
+ //
+ // Deadlines in the past are equivalent to an immediate deadline.
+ //
+ // Returns true if the deadline has passed without this `CondVar`
+ // being signalled in any manner. If both the deadline has passed
+ // and this `CondVar` has been signalled, the implementation is free
+ // to return `true` or `false`.
+ //
+ // Requires and ensures that the current thread holds the `Mutex`.
+ bool WaitWithDeadline(Mutex *mu, y_absl::Time deadline);
+
+ // CondVar::Signal()
+ //
+ // Signal this `CondVar`; wake at least one waiter if one exists.
+ void Signal();
+
+ // CondVar::SignalAll()
+ //
+ // Signal this `CondVar`; wake all waiters.
+ void SignalAll();
+
+ // CondVar::EnableDebugLog()
+ //
+ // Causes all subsequent uses of this `CondVar` to be logged via
+ // `ABSL_RAW_LOG(INFO)`. Log entries are tagged with `name` if `name != 0`.
+ // Note: this method substantially reduces `CondVar` performance.
+ void EnableDebugLog(const char *name);
+
+ private:
+ bool WaitCommon(Mutex *mutex, synchronization_internal::KernelTimeout t);
+ void Remove(base_internal::PerThreadSynch *s);
+ void Wakeup(base_internal::PerThreadSynch *w);
+ std::atomic<intptr_t> cv_; // Condition variable state.
+ CondVar(const CondVar&) = delete;
+ CondVar& operator=(const CondVar&) = delete;
+};
+
+
+// Variants of MutexLock.
+//
+// If you find yourself using one of these, consider instead using
+// Mutex::Unlock() and/or if-statements for clarity.
+
+// MutexLockMaybe
+//
+// MutexLockMaybe is like MutexLock, but is a no-op when mu is null.
+class ABSL_SCOPED_LOCKABLE MutexLockMaybe {
+ public:
+ explicit MutexLockMaybe(Mutex *mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
+ : mu_(mu) {
+ if (this->mu_ != nullptr) {
+ this->mu_->Lock();
+ }
+ }
+
+ explicit MutexLockMaybe(Mutex *mu, const Condition &cond)
+ ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
+ : mu_(mu) {
+ if (this->mu_ != nullptr) {
+ this->mu_->LockWhen(cond);
+ }
+ }
+
+ ~MutexLockMaybe() ABSL_UNLOCK_FUNCTION() {
+ if (this->mu_ != nullptr) { this->mu_->Unlock(); }
+ }
+
+ private:
+ Mutex *const mu_;
+ MutexLockMaybe(const MutexLockMaybe&) = delete;
+ MutexLockMaybe(MutexLockMaybe&&) = delete;
+ MutexLockMaybe& operator=(const MutexLockMaybe&) = delete;
+ MutexLockMaybe& operator=(MutexLockMaybe&&) = delete;
+};
+
+// ReleasableMutexLock
+//
+// ReleasableMutexLock is like MutexLock, but permits `Release()` of its
+// mutex before destruction. `Release()` may be called at most once.
+class ABSL_SCOPED_LOCKABLE ReleasableMutexLock {
+ public:
+ explicit ReleasableMutexLock(Mutex *mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
+ : mu_(mu) {
+ this->mu_->Lock();
+ }
+
+ explicit ReleasableMutexLock(Mutex *mu, const Condition &cond)
+ ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
+ : mu_(mu) {
+ this->mu_->LockWhen(cond);
+ }
+
+ ~ReleasableMutexLock() ABSL_UNLOCK_FUNCTION() {
+ if (this->mu_ != nullptr) { this->mu_->Unlock(); }
+ }
+
+ void Release() ABSL_UNLOCK_FUNCTION();
+
+ private:
+ Mutex *mu_;
+ ReleasableMutexLock(const ReleasableMutexLock&) = delete;
+ ReleasableMutexLock(ReleasableMutexLock&&) = delete;
+ ReleasableMutexLock& operator=(const ReleasableMutexLock&) = delete;
+ ReleasableMutexLock& operator=(ReleasableMutexLock&&) = delete;
+};
+
+inline Mutex::Mutex() : mu_(0) {
+ ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static);
+}
+
+inline constexpr Mutex::Mutex(y_absl::ConstInitType) : mu_(0) {}
+
+inline CondVar::CondVar() : cv_(0) {}
+
+// static
+template <typename T>
+bool Condition::CastAndCallMethod(const Condition *c) {
+ typedef bool (T::*MemberType)();
+ MemberType rm = reinterpret_cast<MemberType>(c->method_);
+ T *x = static_cast<T *>(c->arg_);
+ return (x->*rm)();
+}
+
+// static
+template <typename T>
+bool Condition::CastAndCallFunction(const Condition *c) {
+ typedef bool (*FuncType)(T *);
+ FuncType fn = reinterpret_cast<FuncType>(c->function_);
+ T *x = static_cast<T *>(c->arg_);
+ return (*fn)(x);
+}
+
+template <typename T>
+inline Condition::Condition(bool (*func)(T *), T *arg)
+ : eval_(&CastAndCallFunction<T>),
+ function_(reinterpret_cast<InternalFunctionType>(func)),
+ method_(nullptr),
+ arg_(const_cast<void *>(static_cast<const void *>(arg))) {}
+
+template <typename T>
+inline Condition::Condition(T *object,
+ bool (y_absl::internal::identity<T>::type::*method)())
+ : eval_(&CastAndCallMethod<T>),
+ function_(nullptr),
+ method_(reinterpret_cast<InternalMethodType>(method)),
+ arg_(object) {}
+
+template <typename T>
+inline Condition::Condition(const T *object,
+ bool (y_absl::internal::identity<T>::type::*method)()
+ const)
+ : eval_(&CastAndCallMethod<T>),
+ function_(nullptr),
+ method_(reinterpret_cast<InternalMethodType>(method)),
+ arg_(reinterpret_cast<void *>(const_cast<T *>(object))) {}
+
+// Register a hook for profiling support.
+//
+// The function pointer registered here will be called whenever a mutex is
+// contended. The callback is given the y_absl/base/cycleclock.h timestamp when
+// waiting began.
+//
+// Calls to this function do not race or block, but there is no ordering
+// guaranteed between calls to this function and call to the provided hook.
+// In particular, the previously registered hook may still be called for some
+// time after this function returns.
+void RegisterMutexProfiler(void (*fn)(int64_t wait_timestamp));
+
+// Register a hook for Mutex tracing.
+//
+// The function pointer registered here will be called whenever a mutex is
+// contended. The callback is given an opaque handle to the contended mutex,
+// an event name, and the number of wait cycles (as measured by
+// //y_absl/base/internal/cycleclock.h, and which may not be real
+// "cycle" counts.)
+//
+// The only event name currently sent is "slow release".
+//
+// This has the same memory ordering concerns as RegisterMutexProfiler() above.
+void RegisterMutexTracer(void (*fn)(const char *msg, const void *obj,
+ int64_t wait_cycles));
+
+// TODO(gfalcon): Combine RegisterMutexProfiler() and RegisterMutexTracer()
+// into a single interface, since they are only ever called in pairs.
+
+// Register a hook for CondVar tracing.
+//
+// The function pointer registered here will be called here on various CondVar
+// events. The callback is given an opaque handle to the CondVar object and
+// a string identifying the event. This is thread-safe, but only a single
+// tracer can be registered.
+//
+// Events that can be sent are "Wait", "Unwait", "Signal wakeup", and
+// "SignalAll wakeup".
+//
+// This has the same memory ordering concerns as RegisterMutexProfiler() above.
+void RegisterCondVarTracer(void (*fn)(const char *msg, const void *cv));
+
+// Register a hook for symbolizing stack traces in deadlock detector reports.
+//
+// 'pc' is the program counter being symbolized, 'out' is the buffer to write
+// into, and 'out_size' is the size of the buffer. This function can return
+// false if symbolizing failed, or true if a NUL-terminated symbol was written
+// to 'out.'
+//
+// This has the same memory ordering concerns as RegisterMutexProfiler() above.
+//
+// DEPRECATED: The default symbolizer function is y_absl::Symbolize() and the
+// ability to register a different hook for symbolizing stack traces will be
+// removed on or after 2023-05-01.
+ABSL_DEPRECATED("y_absl::RegisterSymbolizer() is deprecated and will be removed "
+ "on or after 2023-05-01")
+void RegisterSymbolizer(bool (*fn)(const void *pc, char *out, int out_size));
+
+// EnableMutexInvariantDebugging()
+//
+// Enable or disable global support for Mutex invariant debugging. If enabled,
+// then invariant predicates can be registered per-Mutex for debug checking.
+// See Mutex::EnableInvariantDebugging().
+void EnableMutexInvariantDebugging(bool enabled);
+
+// When in debug mode, and when the feature has been enabled globally, the
+// implementation will keep track of lock ordering and complain (or optionally
+// crash) if a cycle is detected in the acquired-before graph.
+
+// Possible modes of operation for the deadlock detector in debug mode.
+enum class OnDeadlockCycle {
+ kIgnore, // Neither report on nor attempt to track cycles in lock ordering
+ kReport, // Report lock cycles to stderr when detected
+ kAbort, // Report lock cycles to stderr when detected, then abort
+};
+
+// SetMutexDeadlockDetectionMode()
+//
+// Enable or disable global support for detection of potential deadlocks
+// due to Mutex lock ordering inversions. When set to 'kIgnore', tracking of
+// lock ordering is disabled. Otherwise, in debug builds, a lock ordering graph
+// will be maintained internally, and detected cycles will be reported in
+// the manner chosen here.
+void SetMutexDeadlockDetectionMode(OnDeadlockCycle mode);
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+// In some build configurations we pass --detect-odr-violations to the
+// gold linker. This causes it to flag weak symbol overrides as ODR
+// violations. Because ODR only applies to C++ and not C,
+// --detect-odr-violations ignores symbols not mangled with C++ names.
+// By changing our extension points to be extern "C", we dodge this
+// check.
+extern "C" {
+void ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)();
+} // extern "C"
+
+#endif // ABSL_SYNCHRONIZATION_MUTEX_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/notification.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/notification.cc
new file mode 100644
index 00000000000..0b5c5a5e48d
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/notification.cc
@@ -0,0 +1,78 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "y_absl/synchronization/notification.h"
+
+#include <atomic>
+
+#include "y_absl/base/attributes.h"
+#include "y_absl/base/internal/raw_logging.h"
+#include "y_absl/synchronization/mutex.h"
+#include "y_absl/time/time.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+void Notification::Notify() {
+ MutexLock l(&this->mutex_);
+
+#ifndef NDEBUG
+ if (ABSL_PREDICT_FALSE(notified_yet_.load(std::memory_order_relaxed))) {
+ ABSL_RAW_LOG(
+ FATAL,
+ "Notify() method called more than once for Notification object %p",
+ static_cast<void *>(this));
+ }
+#endif
+
+ notified_yet_.store(true, std::memory_order_release);
+}
+
+Notification::~Notification() {
+ // Make sure that the thread running Notify() exits before the object is
+ // destructed.
+ MutexLock l(&this->mutex_);
+}
+
+void Notification::WaitForNotification() const {
+ if (!HasBeenNotifiedInternal(&this->notified_yet_)) {
+ this->mutex_.LockWhen(Condition(&HasBeenNotifiedInternal,
+ &this->notified_yet_));
+ this->mutex_.Unlock();
+ }
+}
+
+bool Notification::WaitForNotificationWithTimeout(
+ y_absl::Duration timeout) const {
+ bool notified = HasBeenNotifiedInternal(&this->notified_yet_);
+ if (!notified) {
+ notified = this->mutex_.LockWhenWithTimeout(
+ Condition(&HasBeenNotifiedInternal, &this->notified_yet_), timeout);
+ this->mutex_.Unlock();
+ }
+ return notified;
+}
+
+bool Notification::WaitForNotificationWithDeadline(y_absl::Time deadline) const {
+ bool notified = HasBeenNotifiedInternal(&this->notified_yet_);
+ if (!notified) {
+ notified = this->mutex_.LockWhenWithDeadline(
+ Condition(&HasBeenNotifiedInternal, &this->notified_yet_), deadline);
+ this->mutex_.Unlock();
+ }
+ return notified;
+}
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/notification.h b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/notification.h
new file mode 100644
index 00000000000..eea7090f188
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/notification.h
@@ -0,0 +1,123 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// notification.h
+// -----------------------------------------------------------------------------
+//
+// This header file defines a `Notification` abstraction, which allows threads
+// to receive notification of a single occurrence of a single event.
+//
+// The `Notification` object maintains a private boolean "notified" state that
+// transitions to `true` at most once. The `Notification` class provides the
+// following primary member functions:
+// * `HasBeenNotified() `to query its state
+// * `WaitForNotification*()` to have threads wait until the "notified" state
+// is `true`.
+// * `Notify()` to set the notification's "notified" state to `true` and
+// notify all waiting threads that the event has occurred.
+// This method may only be called once.
+//
+// Note that while `Notify()` may only be called once, it is perfectly valid to
+// call any of the `WaitForNotification*()` methods multiple times, from
+// multiple threads -- even after the notification's "notified" state has been
+// set -- in which case those methods will immediately return.
+//
+// Note that the lifetime of a `Notification` requires careful consideration;
+// it might not be safe to destroy a notification after calling `Notify()` since
+// it is still legal for other threads to call `WaitForNotification*()` methods
+// on the notification. However, observers responding to a "notified" state of
+// `true` can safely delete the notification without interfering with the call
+// to `Notify()` in the other thread.
+//
+// Memory ordering: For any threads X and Y, if X calls `Notify()`, then any
+// action taken by X before it calls `Notify()` is visible to thread Y after:
+// * Y returns from `WaitForNotification()`, or
+// * Y receives a `true` return value from either `HasBeenNotified()` or
+// `WaitForNotificationWithTimeout()`.
+
+#ifndef ABSL_SYNCHRONIZATION_NOTIFICATION_H_
+#define ABSL_SYNCHRONIZATION_NOTIFICATION_H_
+
+#include <atomic>
+
+#include "y_absl/base/macros.h"
+#include "y_absl/synchronization/mutex.h"
+#include "y_absl/time/time.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+// -----------------------------------------------------------------------------
+// Notification
+// -----------------------------------------------------------------------------
+class Notification {
+ public:
+ // Initializes the "notified" state to unnotified.
+ Notification() : notified_yet_(false) {}
+ explicit Notification(bool prenotify) : notified_yet_(prenotify) {}
+ Notification(const Notification&) = delete;
+ Notification& operator=(const Notification&) = delete;
+ ~Notification();
+
+ // Notification::HasBeenNotified()
+ //
+ // Returns the value of the notification's internal "notified" state.
+ bool HasBeenNotified() const {
+ return HasBeenNotifiedInternal(&this->notified_yet_);
+ }
+
+ // Notification::WaitForNotification()
+ //
+ // Blocks the calling thread until the notification's "notified" state is
+ // `true`. Note that if `Notify()` has been previously called on this
+ // notification, this function will immediately return.
+ void WaitForNotification() const;
+
+ // Notification::WaitForNotificationWithTimeout()
+ //
+ // Blocks until either the notification's "notified" state is `true` (which
+ // may occur immediately) or the timeout has elapsed, returning the value of
+ // its "notified" state in either case.
+ bool WaitForNotificationWithTimeout(y_absl::Duration timeout) const;
+
+ // Notification::WaitForNotificationWithDeadline()
+ //
+ // Blocks until either the notification's "notified" state is `true` (which
+ // may occur immediately) or the deadline has expired, returning the value of
+ // its "notified" state in either case.
+ bool WaitForNotificationWithDeadline(y_absl::Time deadline) const;
+
+ // Notification::Notify()
+ //
+ // Sets the "notified" state of this notification to `true` and wakes waiting
+ // threads. Note: do not call `Notify()` multiple times on the same
+ // `Notification`; calling `Notify()` more than once on the same notification
+ // results in undefined behavior.
+ void Notify();
+
+ private:
+ static inline bool HasBeenNotifiedInternal(
+ const std::atomic<bool>* notified_yet) {
+ return notified_yet->load(std::memory_order_acquire);
+ }
+
+ mutable Mutex mutex_;
+ std::atomic<bool> notified_yet_; // written under mutex_
+};
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_SYNCHRONIZATION_NOTIFICATION_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/ya.make b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/ya.make
new file mode 100644
index 00000000000..860fd01b0f8
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/ya.make
@@ -0,0 +1,50 @@
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
+OWNER(
+ somov
+ g:cpp-contrib
+)
+
+LICENSE(Apache-2.0)
+
+LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
+
+PEERDIR(
+ contrib/restricted/abseil-cpp-tstring/y_absl/base
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/low_level_alloc
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_wait
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/throw_delegate
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/log_severity
+ contrib/restricted/abseil-cpp-tstring/y_absl/debugging
+ contrib/restricted/abseil-cpp-tstring/y_absl/debugging/stacktrace
+ contrib/restricted/abseil-cpp-tstring/y_absl/debugging/symbolize
+ contrib/restricted/abseil-cpp-tstring/y_absl/demangle
+ contrib/restricted/abseil-cpp-tstring/y_absl/numeric
+ contrib/restricted/abseil-cpp-tstring/y_absl/strings
+ contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/absl_strings_internal
+ contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal
+ contrib/restricted/abseil-cpp-tstring/y_absl/time
+ contrib/restricted/abseil-cpp-tstring/y_absl/time/civil_time
+ contrib/restricted/abseil-cpp-tstring/y_absl/time/time_zone
+)
+
+ADDINCL(
+ GLOBAL contrib/restricted/abseil-cpp-tstring
+)
+
+NO_COMPILER_WARNINGS()
+
+SRCS(
+ barrier.cc
+ blocking_counter.cc
+ internal/create_thread_identity.cc
+ internal/per_thread_sem.cc
+ internal/waiter.cc
+ mutex.cc
+ notification.cc
+)
+
+END()
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/time/.yandex_meta/licenses.list.txt b/contrib/restricted/abseil-cpp-tstring/y_absl/time/.yandex_meta/licenses.list.txt
new file mode 100644
index 00000000000..7c1bf3b5ad6
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/time/.yandex_meta/licenses.list.txt
@@ -0,0 +1,42 @@
+====================Apache-2.0====================
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+
+====================Apache-2.0====================
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+
+====================COPYRIGHT====================
+// Copyright 2016 Google Inc. All Rights Reserved.
+
+
+====================COPYRIGHT====================
+// Copyright 2017 The Abseil Authors.
+
+
+====================COPYRIGHT====================
+// Copyright 2018 The Abseil Authors.
+
+
+====================Public-Domain====================
+** This file is in the public domain, so clarified as of
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/time/civil_time.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/time/civil_time.cc
new file mode 100644
index 00000000000..f24bac91e85
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/time/civil_time.cc
@@ -0,0 +1,173 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "y_absl/time/civil_time.h"
+
+#include <cstdlib>
+#include <util/generic/string.h>
+
+#include "y_absl/strings/str_cat.h"
+#include "y_absl/time/time.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+namespace {
+
+// Since a civil time has a larger year range than y_absl::Time (64-bit years vs
+// 64-bit seconds, respectively) we normalize years to roughly +/- 400 years
+// around the year 2400, which will produce an equivalent year in a range that
+// y_absl::Time can handle.
+inline civil_year_t NormalizeYear(civil_year_t year) {
+ return 2400 + year % 400;
+}
+
+// Formats the given CivilSecond according to the given format.
+TString FormatYearAnd(string_view fmt, CivilSecond cs) {
+ const CivilSecond ncs(NormalizeYear(cs.year()), cs.month(), cs.day(),
+ cs.hour(), cs.minute(), cs.second());
+ const TimeZone utc = UTCTimeZone();
+ return StrCat(cs.year(), FormatTime(fmt, FromCivil(ncs, utc), utc));
+}
+
+template <typename CivilT>
+bool ParseYearAnd(string_view fmt, string_view s, CivilT* c) {
+ // Civil times support a larger year range than y_absl::Time, so we need to
+ // parse the year separately, normalize it, then use y_absl::ParseTime on the
+ // normalized string.
+ const TString ss = TString(s); // TODO(y_absl-team): Avoid conversion.
+ const char* const np = ss.c_str();
+ char* endp;
+ errno = 0;
+ const civil_year_t y =
+ std::strtoll(np, &endp, 10); // NOLINT(runtime/deprecated_fn)
+ if (endp == np || errno == ERANGE) return false;
+ const TString norm = StrCat(NormalizeYear(y), endp);
+
+ const TimeZone utc = UTCTimeZone();
+ Time t;
+ if (ParseTime(StrCat("%Y", fmt), norm, utc, &t, nullptr)) {
+ const auto cs = ToCivilSecond(t, utc);
+ *c = CivilT(y, cs.month(), cs.day(), cs.hour(), cs.minute(), cs.second());
+ return true;
+ }
+
+ return false;
+}
+
+// Tries to parse the type as a CivilT1, but then assigns the result to the
+// argument of type CivilT2.
+template <typename CivilT1, typename CivilT2>
+bool ParseAs(string_view s, CivilT2* c) {
+ CivilT1 t1;
+ if (ParseCivilTime(s, &t1)) {
+ *c = CivilT2(t1);
+ return true;
+ }
+ return false;
+}
+
+template <typename CivilT>
+bool ParseLenient(string_view s, CivilT* c) {
+ // A fastpath for when the given string data parses exactly into the given
+ // type T (e.g., s="YYYY-MM-DD" and CivilT=CivilDay).
+ if (ParseCivilTime(s, c)) return true;
+ // Try parsing as each of the 6 types, trying the most common types first
+ // (based on csearch results).
+ if (ParseAs<CivilDay>(s, c)) return true;
+ if (ParseAs<CivilSecond>(s, c)) return true;
+ if (ParseAs<CivilHour>(s, c)) return true;
+ if (ParseAs<CivilMonth>(s, c)) return true;
+ if (ParseAs<CivilMinute>(s, c)) return true;
+ if (ParseAs<CivilYear>(s, c)) return true;
+ return false;
+}
+} // namespace
+
+TString FormatCivilTime(CivilSecond c) {
+ return FormatYearAnd("-%m-%d%ET%H:%M:%S", c);
+}
+TString FormatCivilTime(CivilMinute c) {
+ return FormatYearAnd("-%m-%d%ET%H:%M", c);
+}
+TString FormatCivilTime(CivilHour c) {
+ return FormatYearAnd("-%m-%d%ET%H", c);
+}
+TString FormatCivilTime(CivilDay c) { return FormatYearAnd("-%m-%d", c); }
+TString FormatCivilTime(CivilMonth c) { return FormatYearAnd("-%m", c); }
+TString FormatCivilTime(CivilYear c) { return FormatYearAnd("", c); }
+
+bool ParseCivilTime(string_view s, CivilSecond* c) {
+ return ParseYearAnd("-%m-%d%ET%H:%M:%S", s, c);
+}
+bool ParseCivilTime(string_view s, CivilMinute* c) {
+ return ParseYearAnd("-%m-%d%ET%H:%M", s, c);
+}
+bool ParseCivilTime(string_view s, CivilHour* c) {
+ return ParseYearAnd("-%m-%d%ET%H", s, c);
+}
+bool ParseCivilTime(string_view s, CivilDay* c) {
+ return ParseYearAnd("-%m-%d", s, c);
+}
+bool ParseCivilTime(string_view s, CivilMonth* c) {
+ return ParseYearAnd("-%m", s, c);
+}
+bool ParseCivilTime(string_view s, CivilYear* c) {
+ return ParseYearAnd("", s, c);
+}
+
+bool ParseLenientCivilTime(string_view s, CivilSecond* c) {
+ return ParseLenient(s, c);
+}
+bool ParseLenientCivilTime(string_view s, CivilMinute* c) {
+ return ParseLenient(s, c);
+}
+bool ParseLenientCivilTime(string_view s, CivilHour* c) {
+ return ParseLenient(s, c);
+}
+bool ParseLenientCivilTime(string_view s, CivilDay* c) {
+ return ParseLenient(s, c);
+}
+bool ParseLenientCivilTime(string_view s, CivilMonth* c) {
+ return ParseLenient(s, c);
+}
+bool ParseLenientCivilTime(string_view s, CivilYear* c) {
+ return ParseLenient(s, c);
+}
+
+namespace time_internal {
+
+std::ostream& operator<<(std::ostream& os, CivilYear y) {
+ return os << FormatCivilTime(y);
+}
+std::ostream& operator<<(std::ostream& os, CivilMonth m) {
+ return os << FormatCivilTime(m);
+}
+std::ostream& operator<<(std::ostream& os, CivilDay d) {
+ return os << FormatCivilTime(d);
+}
+std::ostream& operator<<(std::ostream& os, CivilHour h) {
+ return os << FormatCivilTime(h);
+}
+std::ostream& operator<<(std::ostream& os, CivilMinute m) {
+ return os << FormatCivilTime(m);
+}
+std::ostream& operator<<(std::ostream& os, CivilSecond s) {
+ return os << FormatCivilTime(s);
+}
+
+} // namespace time_internal
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/time/civil_time.h b/contrib/restricted/abseil-cpp-tstring/y_absl/time/civil_time.h
new file mode 100644
index 00000000000..64fb6da494a
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/time/civil_time.h
@@ -0,0 +1,538 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: civil_time.h
+// -----------------------------------------------------------------------------
+//
+// This header file defines abstractions for computing with "civil time".
+// The term "civil time" refers to the legally recognized human-scale time
+// that is represented by the six fields `YYYY-MM-DD hh:mm:ss`. A "date"
+// is perhaps the most common example of a civil time (represented here as
+// an `y_absl::CivilDay`).
+//
+// Modern-day civil time follows the Gregorian Calendar and is a
+// time-zone-independent concept: a civil time of "2015-06-01 12:00:00", for
+// example, is not tied to a time zone. Put another way, a civil time does not
+// map to a unique point in time; a civil time must be mapped to an absolute
+// time *through* a time zone.
+//
+// Because a civil time is what most people think of as "time," it is common to
+// map absolute times to civil times to present to users.
+//
+// Time zones define the relationship between absolute and civil times. Given an
+// absolute or civil time and a time zone, you can compute the other time:
+//
+// Civil Time = F(Absolute Time, Time Zone)
+// Absolute Time = G(Civil Time, Time Zone)
+//
+// The Abseil time library allows you to construct such civil times from
+// absolute times; consult time.h for such functionality.
+//
+// This library provides six classes for constructing civil-time objects, and
+// provides several helper functions for rounding, iterating, and performing
+// arithmetic on civil-time objects, while avoiding complications like
+// daylight-saving time (DST):
+//
+// * `y_absl::CivilSecond`
+// * `y_absl::CivilMinute`
+// * `y_absl::CivilHour`
+// * `y_absl::CivilDay`
+// * `y_absl::CivilMonth`
+// * `y_absl::CivilYear`
+//
+// Example:
+//
+// // Construct a civil-time object for a specific day
+// const y_absl::CivilDay cd(1969, 07, 20);
+//
+// // Construct a civil-time object for a specific second
+// const y_absl::CivilSecond cd(2018, 8, 1, 12, 0, 1);
+//
+// Note: In C++14 and later, this library is usable in a constexpr context.
+//
+// Example:
+//
+// // Valid in C++14
+// constexpr y_absl::CivilDay cd(1969, 07, 20);
+
+#ifndef ABSL_TIME_CIVIL_TIME_H_
+#define ABSL_TIME_CIVIL_TIME_H_
+
+#include <util/generic/string.h>
+
+#include "y_absl/strings/string_view.h"
+#include "y_absl/time/internal/cctz/include/cctz/civil_time.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+namespace time_internal {
+struct second_tag : cctz::detail::second_tag {};
+struct minute_tag : second_tag, cctz::detail::minute_tag {};
+struct hour_tag : minute_tag, cctz::detail::hour_tag {};
+struct day_tag : hour_tag, cctz::detail::day_tag {};
+struct month_tag : day_tag, cctz::detail::month_tag {};
+struct year_tag : month_tag, cctz::detail::year_tag {};
+} // namespace time_internal
+
+// -----------------------------------------------------------------------------
+// CivilSecond, CivilMinute, CivilHour, CivilDay, CivilMonth, CivilYear
+// -----------------------------------------------------------------------------
+//
+// Each of these civil-time types is a simple value type with the same
+// interface for construction and the same six accessors for each of the civil
+// time fields (year, month, day, hour, minute, and second, aka YMDHMS). These
+// classes differ only in their alignment, which is indicated by the type name
+// and specifies the field on which arithmetic operates.
+//
+// CONSTRUCTION
+//
+// Each of the civil-time types can be constructed in two ways: by directly
+// passing to the constructor up to six integers representing the YMDHMS fields,
+// or by copying the YMDHMS fields from a differently aligned civil-time type.
+// Omitted fields are assigned their minimum valid value. Hours, minutes, and
+// seconds will be set to 0, month and day will be set to 1. Since there is no
+// minimum year, the default is 1970.
+//
+// Examples:
+//
+// y_absl::CivilDay default_value; // 1970-01-01 00:00:00
+//
+// y_absl::CivilDay a(2015, 2, 3); // 2015-02-03 00:00:00
+// y_absl::CivilDay b(2015, 2, 3, 4, 5, 6); // 2015-02-03 00:00:00
+// y_absl::CivilDay c(2015); // 2015-01-01 00:00:00
+//
+// y_absl::CivilSecond ss(2015, 2, 3, 4, 5, 6); // 2015-02-03 04:05:06
+// y_absl::CivilMinute mm(ss); // 2015-02-03 04:05:00
+// y_absl::CivilHour hh(mm); // 2015-02-03 04:00:00
+// y_absl::CivilDay d(hh); // 2015-02-03 00:00:00
+// y_absl::CivilMonth m(d); // 2015-02-01 00:00:00
+// y_absl::CivilYear y(m); // 2015-01-01 00:00:00
+//
+// m = y_absl::CivilMonth(y); // 2015-01-01 00:00:00
+// d = y_absl::CivilDay(m); // 2015-01-01 00:00:00
+// hh = y_absl::CivilHour(d); // 2015-01-01 00:00:00
+// mm = y_absl::CivilMinute(hh); // 2015-01-01 00:00:00
+// ss = y_absl::CivilSecond(mm); // 2015-01-01 00:00:00
+//
+// Each civil-time class is aligned to the civil-time field indicated in the
+// class's name after normalization. Alignment is performed by setting all the
+// inferior fields to their minimum valid value (as described above). The
+// following are examples of how each of the six types would align the fields
+// representing November 22, 2015 at 12:34:56 in the afternoon. (Note: the
+// string format used here is not important; it's just a shorthand way of
+// showing the six YMDHMS fields.)
+//
+// y_absl::CivilSecond : 2015-11-22 12:34:56
+// y_absl::CivilMinute : 2015-11-22 12:34:00
+// y_absl::CivilHour : 2015-11-22 12:00:00
+// y_absl::CivilDay : 2015-11-22 00:00:00
+// y_absl::CivilMonth : 2015-11-01 00:00:00
+// y_absl::CivilYear : 2015-01-01 00:00:00
+//
+// Each civil-time type performs arithmetic on the field to which it is
+// aligned. This means that adding 1 to an y_absl::CivilDay increments the day
+// field (normalizing as necessary), and subtracting 7 from an y_absl::CivilMonth
+// operates on the month field (normalizing as necessary). All arithmetic
+// produces a valid civil time. Difference requires two similarly aligned
+// civil-time objects and returns the scalar answer in units of the objects'
+// alignment. For example, the difference between two y_absl::CivilHour objects
+// will give an answer in units of civil hours.
+//
+// ALIGNMENT CONVERSION
+//
+// The alignment of a civil-time object cannot change, but the object may be
+// used to construct a new object with a different alignment. This is referred
+// to as "realigning". When realigning to a type with the same or more
+// precision (e.g., y_absl::CivilDay -> y_absl::CivilSecond), the conversion may be
+// performed implicitly since no information is lost. However, if information
+// could be discarded (e.g., CivilSecond -> CivilDay), the conversion must
+// be explicit at the call site.
+//
+// Examples:
+//
+// void UseDay(y_absl::CivilDay day);
+//
+// y_absl::CivilSecond cs;
+// UseDay(cs); // Won't compile because data may be discarded
+// UseDay(y_absl::CivilDay(cs)); // OK: explicit conversion
+//
+// y_absl::CivilDay cd;
+// UseDay(cd); // OK: no conversion needed
+//
+// y_absl::CivilMonth cm;
+// UseDay(cm); // OK: implicit conversion to y_absl::CivilDay
+//
+// NORMALIZATION
+//
+// Normalization takes invalid values and adjusts them to produce valid values.
+// Within the civil-time library, integer arguments passed to the Civil*
+// constructors may be out-of-range, in which case they are normalized by
+// carrying overflow into a field of courser granularity to produce valid
+// civil-time objects. This normalization enables natural arithmetic on
+// constructor arguments without worrying about the field's range.
+//
+// Examples:
+//
+// // Out-of-range; normalized to 2016-11-01
+// y_absl::CivilDay d(2016, 10, 32);
+// // Out-of-range, negative: normalized to 2016-10-30T23
+// y_absl::CivilHour h1(2016, 10, 31, -1);
+// // Normalization is cumulative: normalized to 2016-10-30T23
+// y_absl::CivilHour h2(2016, 10, 32, -25);
+//
+// Note: If normalization is undesired, you can signal an error by comparing
+// the constructor arguments to the normalized values returned by the YMDHMS
+// properties.
+//
+// COMPARISON
+//
+// Comparison between civil-time objects considers all six YMDHMS fields,
+// regardless of the type's alignment. Comparison between differently aligned
+// civil-time types is allowed.
+//
+// Examples:
+//
+// y_absl::CivilDay feb_3(2015, 2, 3); // 2015-02-03 00:00:00
+// y_absl::CivilDay mar_4(2015, 3, 4); // 2015-03-04 00:00:00
+// // feb_3 < mar_4
+// // y_absl::CivilYear(feb_3) == y_absl::CivilYear(mar_4)
+//
+// y_absl::CivilSecond feb_3_noon(2015, 2, 3, 12, 0, 0); // 2015-02-03 12:00:00
+// // feb_3 < feb_3_noon
+// // feb_3 == y_absl::CivilDay(feb_3_noon)
+//
+// // Iterates all the days of February 2015.
+// for (y_absl::CivilDay d(2015, 2, 1); d < y_absl::CivilMonth(2015, 3); ++d) {
+// // ...
+// }
+//
+// ARITHMETIC
+//
+// Civil-time types support natural arithmetic operators such as addition,
+// subtraction, and difference. Arithmetic operates on the civil-time field
+// indicated in the type's name. Difference operators require arguments with
+// the same alignment and return the answer in units of the alignment.
+//
+// Example:
+//
+// y_absl::CivilDay a(2015, 2, 3);
+// ++a; // 2015-02-04 00:00:00
+// --a; // 2015-02-03 00:00:00
+// y_absl::CivilDay b = a + 1; // 2015-02-04 00:00:00
+// y_absl::CivilDay c = 1 + b; // 2015-02-05 00:00:00
+// int n = c - a; // n = 2 (civil days)
+// int m = c - y_absl::CivilMonth(c); // Won't compile: different types.
+//
+// ACCESSORS
+//
+// Each civil-time type has accessors for all six of the civil-time fields:
+// year, month, day, hour, minute, and second.
+//
+// civil_year_t year()
+// int month()
+// int day()
+// int hour()
+// int minute()
+// int second()
+//
+// Recall that fields inferior to the type's alignment will be set to their
+// minimum valid value.
+//
+// Example:
+//
+// y_absl::CivilDay d(2015, 6, 28);
+// // d.year() == 2015
+// // d.month() == 6
+// // d.day() == 28
+// // d.hour() == 0
+// // d.minute() == 0
+// // d.second() == 0
+//
+// CASE STUDY: Adding a month to January 31.
+//
+// One of the classic questions that arises when considering a civil time
+// library (or a date library or a date/time library) is this:
+// "What is the result of adding a month to January 31?"
+// This is an interesting question because it is unclear what is meant by a
+// "month", and several different answers are possible, depending on context:
+//
+// 1. March 3 (or 2 if a leap year), if "add a month" means to add a month to
+// the current month, and adjust the date to overflow the extra days into
+// March. In this case the result of "February 31" would be normalized as
+// within the civil-time library.
+// 2. February 28 (or 29 if a leap year), if "add a month" means to add a
+// month, and adjust the date while holding the resulting month constant.
+// In this case, the result of "February 31" would be truncated to the last
+// day in February.
+// 3. An error. The caller may get some error, an exception, an invalid date
+// object, or perhaps return `false`. This may make sense because there is
+// no single unambiguously correct answer to the question.
+//
+// Practically speaking, any answer that is not what the programmer intended
+// is the wrong answer.
+//
+// The Abseil time library avoids this problem by making it impossible to
+// ask ambiguous questions. All civil-time objects are aligned to a particular
+// civil-field boundary (such as aligned to a year, month, day, hour, minute,
+// or second), and arithmetic operates on the field to which the object is
+// aligned. This means that in order to "add a month" the object must first be
+// aligned to a month boundary, which is equivalent to the first day of that
+// month.
+//
+// Of course, there are ways to compute an answer the question at hand using
+// this Abseil time library, but they require the programmer to be explicit
+// about the answer they expect. To illustrate, let's see how to compute all
+// three of the above possible answers to the question of "Jan 31 plus 1
+// month":
+//
+// Example:
+//
+// const y_absl::CivilDay d(2015, 1, 31);
+//
+// // Answer 1:
+// // Add 1 to the month field in the constructor, and rely on normalization.
+// const auto normalized = y_absl::CivilDay(d.year(), d.month() + 1, d.day());
+// // normalized == 2015-03-03 (aka Feb 31)
+//
+// // Answer 2:
+// // Add 1 to month field, capping to the end of next month.
+// const auto next_month = y_absl::CivilMonth(d) + 1;
+// const auto last_day_of_next_month = y_absl::CivilDay(next_month + 1) - 1;
+// const auto capped = std::min(normalized, last_day_of_next_month);
+// // capped == 2015-02-28
+//
+// // Answer 3:
+// // Signal an error if the normalized answer is not in next month.
+// if (y_absl::CivilMonth(normalized) != next_month) {
+// // error, month overflow
+// }
+//
+using CivilSecond =
+ time_internal::cctz::detail::civil_time<time_internal::second_tag>;
+using CivilMinute =
+ time_internal::cctz::detail::civil_time<time_internal::minute_tag>;
+using CivilHour =
+ time_internal::cctz::detail::civil_time<time_internal::hour_tag>;
+using CivilDay =
+ time_internal::cctz::detail::civil_time<time_internal::day_tag>;
+using CivilMonth =
+ time_internal::cctz::detail::civil_time<time_internal::month_tag>;
+using CivilYear =
+ time_internal::cctz::detail::civil_time<time_internal::year_tag>;
+
+// civil_year_t
+//
+// Type alias of a civil-time year value. This type is guaranteed to (at least)
+// support any year value supported by `time_t`.
+//
+// Example:
+//
+// y_absl::CivilSecond cs = ...;
+// y_absl::civil_year_t y = cs.year();
+// cs = y_absl::CivilSecond(y, 1, 1, 0, 0, 0); // CivilSecond(CivilYear(cs))
+//
+using civil_year_t = time_internal::cctz::year_t;
+
+// civil_diff_t
+//
+// Type alias of the difference between two civil-time values.
+// This type is used to indicate arguments that are not
+// normalized (such as parameters to the civil-time constructors), the results
+// of civil-time subtraction, or the operand to civil-time addition.
+//
+// Example:
+//
+// y_absl::civil_diff_t n_sec = cs1 - cs2; // cs1 == cs2 + n_sec;
+//
+using civil_diff_t = time_internal::cctz::diff_t;
+
+// Weekday::monday, Weekday::tuesday, Weekday::wednesday, Weekday::thursday,
+// Weekday::friday, Weekday::saturday, Weekday::sunday
+//
+// The Weekday enum class represents the civil-time concept of a "weekday" with
+// members for all days of the week.
+//
+// y_absl::Weekday wd = y_absl::Weekday::thursday;
+//
+using Weekday = time_internal::cctz::weekday;
+
+// GetWeekday()
+//
+// Returns the y_absl::Weekday for the given (realigned) civil-time value.
+//
+// Example:
+//
+// y_absl::CivilDay a(2015, 8, 13);
+// y_absl::Weekday wd = y_absl::GetWeekday(a); // wd == y_absl::Weekday::thursday
+//
+inline Weekday GetWeekday(CivilSecond cs) {
+ return time_internal::cctz::get_weekday(cs);
+}
+
+// NextWeekday()
+// PrevWeekday()
+//
+// Returns the y_absl::CivilDay that strictly follows or precedes a given
+// y_absl::CivilDay, and that falls on the given y_absl::Weekday.
+//
+// Example, given the following month:
+//
+// August 2015
+// Su Mo Tu We Th Fr Sa
+// 1
+// 2 3 4 5 6 7 8
+// 9 10 11 12 13 14 15
+// 16 17 18 19 20 21 22
+// 23 24 25 26 27 28 29
+// 30 31
+//
+// y_absl::CivilDay a(2015, 8, 13);
+// // y_absl::GetWeekday(a) == y_absl::Weekday::thursday
+// y_absl::CivilDay b = y_absl::NextWeekday(a, y_absl::Weekday::thursday);
+// // b = 2015-08-20
+// y_absl::CivilDay c = y_absl::PrevWeekday(a, y_absl::Weekday::thursday);
+// // c = 2015-08-06
+//
+// y_absl::CivilDay d = ...
+// // Gets the following Thursday if d is not already Thursday
+// y_absl::CivilDay thurs1 = y_absl::NextWeekday(d - 1, y_absl::Weekday::thursday);
+// // Gets the previous Thursday if d is not already Thursday
+// y_absl::CivilDay thurs2 = y_absl::PrevWeekday(d + 1, y_absl::Weekday::thursday);
+//
+inline CivilDay NextWeekday(CivilDay cd, Weekday wd) {
+ return CivilDay(time_internal::cctz::next_weekday(cd, wd));
+}
+inline CivilDay PrevWeekday(CivilDay cd, Weekday wd) {
+ return CivilDay(time_internal::cctz::prev_weekday(cd, wd));
+}
+
+// GetYearDay()
+//
+// Returns the day-of-year for the given (realigned) civil-time value.
+//
+// Example:
+//
+// y_absl::CivilDay a(2015, 1, 1);
+// int yd_jan_1 = y_absl::GetYearDay(a); // yd_jan_1 = 1
+// y_absl::CivilDay b(2015, 12, 31);
+// int yd_dec_31 = y_absl::GetYearDay(b); // yd_dec_31 = 365
+//
+inline int GetYearDay(CivilSecond cs) {
+ return time_internal::cctz::get_yearday(cs);
+}
+
+// FormatCivilTime()
+//
+// Formats the given civil-time value into a string value of the following
+// format:
+//
+// Type | Format
+// ---------------------------------
+// CivilSecond | YYYY-MM-DDTHH:MM:SS
+// CivilMinute | YYYY-MM-DDTHH:MM
+// CivilHour | YYYY-MM-DDTHH
+// CivilDay | YYYY-MM-DD
+// CivilMonth | YYYY-MM
+// CivilYear | YYYY
+//
+// Example:
+//
+// y_absl::CivilDay d = y_absl::CivilDay(1969, 7, 20);
+// TString day_string = y_absl::FormatCivilTime(d); // "1969-07-20"
+//
+TString FormatCivilTime(CivilSecond c);
+TString FormatCivilTime(CivilMinute c);
+TString FormatCivilTime(CivilHour c);
+TString FormatCivilTime(CivilDay c);
+TString FormatCivilTime(CivilMonth c);
+TString FormatCivilTime(CivilYear c);
+
+// y_absl::ParseCivilTime()
+//
+// Parses a civil-time value from the specified `y_absl::string_view` into the
+// passed output parameter. Returns `true` upon successful parsing.
+//
+// The expected form of the input string is as follows:
+//
+// Type | Format
+// ---------------------------------
+// CivilSecond | YYYY-MM-DDTHH:MM:SS
+// CivilMinute | YYYY-MM-DDTHH:MM
+// CivilHour | YYYY-MM-DDTHH
+// CivilDay | YYYY-MM-DD
+// CivilMonth | YYYY-MM
+// CivilYear | YYYY
+//
+// Example:
+//
+// y_absl::CivilDay d;
+// bool ok = y_absl::ParseCivilTime("2018-01-02", &d); // OK
+//
+// Note that parsing will fail if the string's format does not match the
+// expected type exactly. `ParseLenientCivilTime()` below is more lenient.
+//
+bool ParseCivilTime(y_absl::string_view s, CivilSecond* c);
+bool ParseCivilTime(y_absl::string_view s, CivilMinute* c);
+bool ParseCivilTime(y_absl::string_view s, CivilHour* c);
+bool ParseCivilTime(y_absl::string_view s, CivilDay* c);
+bool ParseCivilTime(y_absl::string_view s, CivilMonth* c);
+bool ParseCivilTime(y_absl::string_view s, CivilYear* c);
+
+// ParseLenientCivilTime()
+//
+// Parses any of the formats accepted by `y_absl::ParseCivilTime()`, but is more
+// lenient if the format of the string does not exactly match the associated
+// type.
+//
+// Example:
+//
+// y_absl::CivilDay d;
+// bool ok = y_absl::ParseLenientCivilTime("1969-07-20", &d); // OK
+// ok = y_absl::ParseLenientCivilTime("1969-07-20T10", &d); // OK: T10 floored
+// ok = y_absl::ParseLenientCivilTime("1969-07", &d); // OK: day defaults to 1
+//
+bool ParseLenientCivilTime(y_absl::string_view s, CivilSecond* c);
+bool ParseLenientCivilTime(y_absl::string_view s, CivilMinute* c);
+bool ParseLenientCivilTime(y_absl::string_view s, CivilHour* c);
+bool ParseLenientCivilTime(y_absl::string_view s, CivilDay* c);
+bool ParseLenientCivilTime(y_absl::string_view s, CivilMonth* c);
+bool ParseLenientCivilTime(y_absl::string_view s, CivilYear* c);
+
+namespace time_internal { // For functions found via ADL on civil-time tags.
+
+// Streaming Operators
+//
+// Each civil-time type may be sent to an output stream using operator<<().
+// The result matches the string produced by `FormatCivilTime()`.
+//
+// Example:
+//
+// y_absl::CivilDay d = y_absl::CivilDay(1969, 7, 20);
+// std::cout << "Date is: " << d << "\n";
+//
+std::ostream& operator<<(std::ostream& os, CivilYear y);
+std::ostream& operator<<(std::ostream& os, CivilMonth m);
+std::ostream& operator<<(std::ostream& os, CivilDay d);
+std::ostream& operator<<(std::ostream& os, CivilHour h);
+std::ostream& operator<<(std::ostream& os, CivilMinute m);
+std::ostream& operator<<(std::ostream& os, CivilSecond s);
+
+} // namespace time_internal
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_TIME_CIVIL_TIME_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/time/civil_time/ya.make b/contrib/restricted/abseil-cpp-tstring/y_absl/time/civil_time/ya.make
new file mode 100644
index 00000000000..919773f6197
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/time/civil_time/ya.make
@@ -0,0 +1,26 @@
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
+WITHOUT_LICENSE_TEXTS()
+
+OWNER(
+ somov
+ g:cpp-contrib
+)
+
+LICENSE(Apache-2.0)
+
+ADDINCL(
+ GLOBAL contrib/restricted/abseil-cpp-tstring
+)
+
+NO_COMPILER_WARNINGS()
+
+SRCDIR(contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src)
+
+SRCS(
+ civil_time_detail.cc
+)
+
+END()
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/time/clock.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/time/clock.cc
new file mode 100644
index 00000000000..dcc12b56331
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/time/clock.cc
@@ -0,0 +1,585 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "y_absl/time/clock.h"
+
+#include "y_absl/base/attributes.h"
+#include "y_absl/base/optimization.h"
+
+#ifdef _WIN32
+#include <windows.h>
+#endif
+
+#include <algorithm>
+#include <atomic>
+#include <cerrno>
+#include <cstdint>
+#include <ctime>
+#include <limits>
+
+#include "y_absl/base/internal/spinlock.h"
+#include "y_absl/base/internal/unscaledcycleclock.h"
+#include "y_absl/base/macros.h"
+#include "y_absl/base/port.h"
+#include "y_absl/base/thread_annotations.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+Time Now() {
+ // TODO(bww): Get a timespec instead so we don't have to divide.
+ int64_t n = y_absl::GetCurrentTimeNanos();
+ if (n >= 0) {
+ return time_internal::FromUnixDuration(
+ time_internal::MakeDuration(n / 1000000000, n % 1000000000 * 4));
+ }
+ return time_internal::FromUnixDuration(y_absl::Nanoseconds(n));
+}
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+// Decide if we should use the fast GetCurrentTimeNanos() algorithm
+// based on the cyclecounter, otherwise just get the time directly
+// from the OS on every call. This can be chosen at compile-time via
+// -DABSL_USE_CYCLECLOCK_FOR_GET_CURRENT_TIME_NANOS=[0|1]
+#ifndef ABSL_USE_CYCLECLOCK_FOR_GET_CURRENT_TIME_NANOS
+#if ABSL_USE_UNSCALED_CYCLECLOCK
+#define ABSL_USE_CYCLECLOCK_FOR_GET_CURRENT_TIME_NANOS 1
+#else
+#define ABSL_USE_CYCLECLOCK_FOR_GET_CURRENT_TIME_NANOS 0
+#endif
+#endif
+
+#if defined(__APPLE__) || defined(_WIN32)
+#include "y_absl/time/internal/get_current_time_chrono.inc"
+#else
+#include "y_absl/time/internal/get_current_time_posix.inc"
+#endif
+
+// Allows override by test.
+#ifndef GET_CURRENT_TIME_NANOS_FROM_SYSTEM
+#define GET_CURRENT_TIME_NANOS_FROM_SYSTEM() \
+ ::y_absl::time_internal::GetCurrentTimeNanosFromSystem()
+#endif
+
+#if !ABSL_USE_CYCLECLOCK_FOR_GET_CURRENT_TIME_NANOS
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+int64_t GetCurrentTimeNanos() { return GET_CURRENT_TIME_NANOS_FROM_SYSTEM(); }
+ABSL_NAMESPACE_END
+} // namespace y_absl
+#else // Use the cyclecounter-based implementation below.
+
+// Allows override by test.
+#ifndef GET_CURRENT_TIME_NANOS_CYCLECLOCK_NOW
+#define GET_CURRENT_TIME_NANOS_CYCLECLOCK_NOW() \
+ ::y_absl::time_internal::UnscaledCycleClockWrapperForGetCurrentTime::Now()
+#endif
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace time_internal {
+// This is a friend wrapper around UnscaledCycleClock::Now()
+// (needed to access UnscaledCycleClock).
+class UnscaledCycleClockWrapperForGetCurrentTime {
+ public:
+ static int64_t Now() { return base_internal::UnscaledCycleClock::Now(); }
+};
+} // namespace time_internal
+
+// uint64_t is used in this module to provide an extra bit in multiplications
+
+// ---------------------------------------------------------------------
+// An implementation of reader-write locks that use no atomic ops in the read
+// case. This is a generalization of Lamport's method for reading a multiword
+// clock. Increment a word on each write acquisition, using the low-order bit
+// as a spinlock; the word is the high word of the "clock". Readers read the
+// high word, then all other data, then the high word again, and repeat the
+// read if the reads of the high words yields different answers, or an odd
+// value (either case suggests possible interference from a writer).
+// Here we use a spinlock to ensure only one writer at a time, rather than
+// spinning on the bottom bit of the word to benefit from SpinLock
+// spin-delay tuning.
+
+// Acquire seqlock (*seq) and return the value to be written to unlock.
+static inline uint64_t SeqAcquire(std::atomic<uint64_t> *seq) {
+ uint64_t x = seq->fetch_add(1, std::memory_order_relaxed);
+
+ // We put a release fence between update to *seq and writes to shared data.
+ // Thus all stores to shared data are effectively release operations and
+ // update to *seq above cannot be re-ordered past any of them. Note that
+ // this barrier is not for the fetch_add above. A release barrier for the
+ // fetch_add would be before it, not after.
+ std::atomic_thread_fence(std::memory_order_release);
+
+ return x + 2; // original word plus 2
+}
+
+// Release seqlock (*seq) by writing x to it---a value previously returned by
+// SeqAcquire.
+static inline void SeqRelease(std::atomic<uint64_t> *seq, uint64_t x) {
+ // The unlock store to *seq must have release ordering so that all
+ // updates to shared data must finish before this store.
+ seq->store(x, std::memory_order_release); // release lock for readers
+}
+
+// ---------------------------------------------------------------------
+
+// "nsscaled" is unit of time equal to a (2**kScale)th of a nanosecond.
+enum { kScale = 30 };
+
+// The minimum interval between samples of the time base.
+// We pick enough time to amortize the cost of the sample,
+// to get a reasonably accurate cycle counter rate reading,
+// and not so much that calculations will overflow 64-bits.
+static const uint64_t kMinNSBetweenSamples = 2000 << 20;
+
+// We require that kMinNSBetweenSamples shifted by kScale
+// have at least a bit left over for 64-bit calculations.
+static_assert(((kMinNSBetweenSamples << (kScale + 1)) >> (kScale + 1)) ==
+ kMinNSBetweenSamples,
+ "cannot represent kMaxBetweenSamplesNSScaled");
+
+// data from a sample of the kernel's time value
+struct TimeSampleAtomic {
+ std::atomic<uint64_t> raw_ns{0}; // raw kernel time
+ std::atomic<uint64_t> base_ns{0}; // our estimate of time
+ std::atomic<uint64_t> base_cycles{0}; // cycle counter reading
+ std::atomic<uint64_t> nsscaled_per_cycle{0}; // cycle period
+ // cycles before we'll sample again (a scaled reciprocal of the period,
+ // to avoid a division on the fast path).
+ std::atomic<uint64_t> min_cycles_per_sample{0};
+};
+// Same again, but with non-atomic types
+struct TimeSample {
+ uint64_t raw_ns = 0; // raw kernel time
+ uint64_t base_ns = 0; // our estimate of time
+ uint64_t base_cycles = 0; // cycle counter reading
+ uint64_t nsscaled_per_cycle = 0; // cycle period
+ uint64_t min_cycles_per_sample = 0; // approx cycles before next sample
+};
+
+struct ABSL_CACHELINE_ALIGNED TimeState {
+ std::atomic<uint64_t> seq{0};
+ TimeSampleAtomic last_sample; // the last sample; under seq
+
+ // The following counters are used only by the test code.
+ int64_t stats_initializations{0};
+ int64_t stats_reinitializations{0};
+ int64_t stats_calibrations{0};
+ int64_t stats_slow_paths{0};
+ int64_t stats_fast_slow_paths{0};
+
+ uint64_t last_now_cycles ABSL_GUARDED_BY(lock){0};
+
+ // Used by GetCurrentTimeNanosFromKernel().
+ // We try to read clock values at about the same time as the kernel clock.
+ // This value gets adjusted up or down as estimate of how long that should
+ // take, so we can reject attempts that take unusually long.
+ std::atomic<uint64_t> approx_syscall_time_in_cycles{10 * 1000};
+ // Number of times in a row we've seen a kernel time call take substantially
+ // less than approx_syscall_time_in_cycles.
+ std::atomic<uint32_t> kernel_time_seen_smaller{0};
+
+ // A reader-writer lock protecting the static locations below.
+ // See SeqAcquire() and SeqRelease() above.
+ y_absl::base_internal::SpinLock lock{y_absl::kConstInit,
+ base_internal::SCHEDULE_KERNEL_ONLY};
+};
+ABSL_CONST_INIT static TimeState time_state{};
+
+// Return the time in ns as told by the kernel interface. Place in *cycleclock
+// the value of the cycleclock at about the time of the syscall.
+// This call represents the time base that this module synchronizes to.
+// Ensures that *cycleclock does not step back by up to (1 << 16) from
+// last_cycleclock, to discard small backward counter steps. (Larger steps are
+// assumed to be complete resyncs, which shouldn't happen. If they do, a full
+// reinitialization of the outer algorithm should occur.)
+static int64_t GetCurrentTimeNanosFromKernel(uint64_t last_cycleclock,
+ uint64_t *cycleclock)
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(time_state.lock) {
+ uint64_t local_approx_syscall_time_in_cycles = // local copy
+ time_state.approx_syscall_time_in_cycles.load(std::memory_order_relaxed);
+
+ int64_t current_time_nanos_from_system;
+ uint64_t before_cycles;
+ uint64_t after_cycles;
+ uint64_t elapsed_cycles;
+ int loops = 0;
+ do {
+ before_cycles = GET_CURRENT_TIME_NANOS_CYCLECLOCK_NOW();
+ current_time_nanos_from_system = GET_CURRENT_TIME_NANOS_FROM_SYSTEM();
+ after_cycles = GET_CURRENT_TIME_NANOS_CYCLECLOCK_NOW();
+ // elapsed_cycles is unsigned, so is large on overflow
+ elapsed_cycles = after_cycles - before_cycles;
+ if (elapsed_cycles >= local_approx_syscall_time_in_cycles &&
+ ++loops == 20) { // clock changed frequencies? Back off.
+ loops = 0;
+ if (local_approx_syscall_time_in_cycles < 1000 * 1000) {
+ local_approx_syscall_time_in_cycles =
+ (local_approx_syscall_time_in_cycles + 1) << 1;
+ }
+ time_state.approx_syscall_time_in_cycles.store(
+ local_approx_syscall_time_in_cycles, std::memory_order_relaxed);
+ }
+ } while (elapsed_cycles >= local_approx_syscall_time_in_cycles ||
+ last_cycleclock - after_cycles < (static_cast<uint64_t>(1) << 16));
+
+ // Adjust approx_syscall_time_in_cycles to be within a factor of 2
+ // of the typical time to execute one iteration of the loop above.
+ if ((local_approx_syscall_time_in_cycles >> 1) < elapsed_cycles) {
+ // measured time is no smaller than half current approximation
+ time_state.kernel_time_seen_smaller.store(0, std::memory_order_relaxed);
+ } else if (time_state.kernel_time_seen_smaller.fetch_add(
+ 1, std::memory_order_relaxed) >= 3) {
+ // smaller delays several times in a row; reduce approximation by 12.5%
+ const uint64_t new_approximation =
+ local_approx_syscall_time_in_cycles -
+ (local_approx_syscall_time_in_cycles >> 3);
+ time_state.approx_syscall_time_in_cycles.store(new_approximation,
+ std::memory_order_relaxed);
+ time_state.kernel_time_seen_smaller.store(0, std::memory_order_relaxed);
+ }
+
+ *cycleclock = after_cycles;
+ return current_time_nanos_from_system;
+}
+
+static int64_t GetCurrentTimeNanosSlowPath() ABSL_ATTRIBUTE_COLD;
+
+// Read the contents of *atomic into *sample.
+// Each field is read atomically, but to maintain atomicity between fields,
+// the access must be done under a lock.
+static void ReadTimeSampleAtomic(const struct TimeSampleAtomic *atomic,
+ struct TimeSample *sample) {
+ sample->base_ns = atomic->base_ns.load(std::memory_order_relaxed);
+ sample->base_cycles = atomic->base_cycles.load(std::memory_order_relaxed);
+ sample->nsscaled_per_cycle =
+ atomic->nsscaled_per_cycle.load(std::memory_order_relaxed);
+ sample->min_cycles_per_sample =
+ atomic->min_cycles_per_sample.load(std::memory_order_relaxed);
+ sample->raw_ns = atomic->raw_ns.load(std::memory_order_relaxed);
+}
+
+// Public routine.
+// Algorithm: We wish to compute real time from a cycle counter. In normal
+// operation, we construct a piecewise linear approximation to the kernel time
+// source, using the cycle counter value. The start of each line segment is at
+// the same point as the end of the last, but may have a different slope (that
+// is, a different idea of the cycle counter frequency). Every couple of
+// seconds, the kernel time source is sampled and compared with the current
+// approximation. A new slope is chosen that, if followed for another couple
+// of seconds, will correct the error at the current position. The information
+// for a sample is in the "last_sample" struct. The linear approximation is
+// estimated_time = last_sample.base_ns +
+// last_sample.ns_per_cycle * (counter_reading - last_sample.base_cycles)
+// (ns_per_cycle is actually stored in different units and scaled, to avoid
+// overflow). The base_ns of the next linear approximation is the
+// estimated_time using the last approximation; the base_cycles is the cycle
+// counter value at that time; the ns_per_cycle is the number of ns per cycle
+// measured since the last sample, but adjusted so that most of the difference
+// between the estimated_time and the kernel time will be corrected by the
+// estimated time to the next sample. In normal operation, this algorithm
+// relies on:
+// - the cycle counter and kernel time rates not changing a lot in a few
+// seconds.
+// - the client calling into the code often compared to a couple of seconds, so
+// the time to the next correction can be estimated.
+// Any time ns_per_cycle is not known, a major error is detected, or the
+// assumption about frequent calls is violated, the implementation returns the
+// kernel time. It records sufficient data that a linear approximation can
+// resume a little later.
+
+int64_t GetCurrentTimeNanos() {
+ // read the data from the "last_sample" struct (but don't need raw_ns yet)
+ // The reads of "seq" and test of the values emulate a reader lock.
+ uint64_t base_ns;
+ uint64_t base_cycles;
+ uint64_t nsscaled_per_cycle;
+ uint64_t min_cycles_per_sample;
+ uint64_t seq_read0;
+ uint64_t seq_read1;
+
+ // If we have enough information to interpolate, the value returned will be
+ // derived from this cycleclock-derived time estimate. On some platforms
+ // (POWER) the function to retrieve this value has enough complexity to
+ // contribute to register pressure - reading it early before initializing
+ // the other pieces of the calculation minimizes spill/restore instructions,
+ // minimizing icache cost.
+ uint64_t now_cycles = GET_CURRENT_TIME_NANOS_CYCLECLOCK_NOW();
+
+ // Acquire pairs with the barrier in SeqRelease - if this load sees that
+ // store, the shared-data reads necessarily see that SeqRelease's updates
+ // to the same shared data.
+ seq_read0 = time_state.seq.load(std::memory_order_acquire);
+
+ base_ns = time_state.last_sample.base_ns.load(std::memory_order_relaxed);
+ base_cycles =
+ time_state.last_sample.base_cycles.load(std::memory_order_relaxed);
+ nsscaled_per_cycle =
+ time_state.last_sample.nsscaled_per_cycle.load(std::memory_order_relaxed);
+ min_cycles_per_sample = time_state.last_sample.min_cycles_per_sample.load(
+ std::memory_order_relaxed);
+
+ // This acquire fence pairs with the release fence in SeqAcquire. Since it
+ // is sequenced between reads of shared data and seq_read1, the reads of
+ // shared data are effectively acquiring.
+ std::atomic_thread_fence(std::memory_order_acquire);
+
+ // The shared-data reads are effectively acquire ordered, and the
+ // shared-data writes are effectively release ordered. Therefore if our
+ // shared-data reads see any of a particular update's shared-data writes,
+ // seq_read1 is guaranteed to see that update's SeqAcquire.
+ seq_read1 = time_state.seq.load(std::memory_order_relaxed);
+
+ // Fast path. Return if min_cycles_per_sample has not yet elapsed since the
+ // last sample, and we read a consistent sample. The fast path activates
+ // only when min_cycles_per_sample is non-zero, which happens when we get an
+ // estimate for the cycle time. The predicate will fail if now_cycles <
+ // base_cycles, or if some other thread is in the slow path.
+ //
+ // Since we now read now_cycles before base_ns, it is possible for now_cycles
+ // to be less than base_cycles (if we were interrupted between those loads and
+ // last_sample was updated). This is harmless, because delta_cycles will wrap
+ // and report a time much much bigger than min_cycles_per_sample. In that case
+ // we will take the slow path.
+ uint64_t delta_cycles;
+ if (seq_read0 == seq_read1 && (seq_read0 & 1) == 0 &&
+ (delta_cycles = now_cycles - base_cycles) < min_cycles_per_sample) {
+ return base_ns + ((delta_cycles * nsscaled_per_cycle) >> kScale);
+ }
+ return GetCurrentTimeNanosSlowPath();
+}
+
+// Return (a << kScale)/b.
+// Zero is returned if b==0. Scaling is performed internally to
+// preserve precision without overflow.
+static uint64_t SafeDivideAndScale(uint64_t a, uint64_t b) {
+ // Find maximum safe_shift so that
+ // 0 <= safe_shift <= kScale and (a << safe_shift) does not overflow.
+ int safe_shift = kScale;
+ while (((a << safe_shift) >> safe_shift) != a) {
+ safe_shift--;
+ }
+ uint64_t scaled_b = b >> (kScale - safe_shift);
+ uint64_t quotient = 0;
+ if (scaled_b != 0) {
+ quotient = (a << safe_shift) / scaled_b;
+ }
+ return quotient;
+}
+
+static uint64_t UpdateLastSample(
+ uint64_t now_cycles, uint64_t now_ns, uint64_t delta_cycles,
+ const struct TimeSample *sample) ABSL_ATTRIBUTE_COLD;
+
+// The slow path of GetCurrentTimeNanos(). This is taken while gathering
+// initial samples, when enough time has elapsed since the last sample, and if
+// any other thread is writing to last_sample.
+//
+// Manually mark this 'noinline' to minimize stack frame size of the fast
+// path. Without this, sometimes a compiler may inline this big block of code
+// into the fast path. That causes lots of register spills and reloads that
+// are unnecessary unless the slow path is taken.
+//
+// TODO(y_absl-team): Remove this attribute when our compiler is smart enough
+// to do the right thing.
+ABSL_ATTRIBUTE_NOINLINE
+static int64_t GetCurrentTimeNanosSlowPath()
+ ABSL_LOCKS_EXCLUDED(time_state.lock) {
+ // Serialize access to slow-path. Fast-path readers are not blocked yet, and
+ // code below must not modify last_sample until the seqlock is acquired.
+ time_state.lock.Lock();
+
+ // Sample the kernel time base. This is the definition of
+ // "now" if we take the slow path.
+ uint64_t now_cycles;
+ uint64_t now_ns =
+ GetCurrentTimeNanosFromKernel(time_state.last_now_cycles, &now_cycles);
+ time_state.last_now_cycles = now_cycles;
+
+ uint64_t estimated_base_ns;
+
+ // ----------
+ // Read the "last_sample" values again; this time holding the write lock.
+ struct TimeSample sample;
+ ReadTimeSampleAtomic(&time_state.last_sample, &sample);
+
+ // ----------
+ // Try running the fast path again; another thread may have updated the
+ // sample between our run of the fast path and the sample we just read.
+ uint64_t delta_cycles = now_cycles - sample.base_cycles;
+ if (delta_cycles < sample.min_cycles_per_sample) {
+ // Another thread updated the sample. This path does not take the seqlock
+ // so that blocked readers can make progress without blocking new readers.
+ estimated_base_ns = sample.base_ns +
+ ((delta_cycles * sample.nsscaled_per_cycle) >> kScale);
+ time_state.stats_fast_slow_paths++;
+ } else {
+ estimated_base_ns =
+ UpdateLastSample(now_cycles, now_ns, delta_cycles, &sample);
+ }
+
+ time_state.lock.Unlock();
+
+ return estimated_base_ns;
+}
+
+// Main part of the algorithm. Locks out readers, updates the approximation
+// using the new sample from the kernel, and stores the result in last_sample
+// for readers. Returns the new estimated time.
+static uint64_t UpdateLastSample(uint64_t now_cycles, uint64_t now_ns,
+ uint64_t delta_cycles,
+ const struct TimeSample *sample)
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(time_state.lock) {
+ uint64_t estimated_base_ns = now_ns;
+ uint64_t lock_value =
+ SeqAcquire(&time_state.seq); // acquire seqlock to block readers
+
+ // The 5s in the next if-statement limits the time for which we will trust
+ // the cycle counter and our last sample to give a reasonable result.
+ // Errors in the rate of the source clock can be multiplied by the ratio
+ // between this limit and kMinNSBetweenSamples.
+ if (sample->raw_ns == 0 || // no recent sample, or clock went backwards
+ sample->raw_ns + static_cast<uint64_t>(5) * 1000 * 1000 * 1000 < now_ns ||
+ now_ns < sample->raw_ns || now_cycles < sample->base_cycles) {
+ // record this sample, and forget any previously known slope.
+ time_state.last_sample.raw_ns.store(now_ns, std::memory_order_relaxed);
+ time_state.last_sample.base_ns.store(estimated_base_ns,
+ std::memory_order_relaxed);
+ time_state.last_sample.base_cycles.store(now_cycles,
+ std::memory_order_relaxed);
+ time_state.last_sample.nsscaled_per_cycle.store(0,
+ std::memory_order_relaxed);
+ time_state.last_sample.min_cycles_per_sample.store(
+ 0, std::memory_order_relaxed);
+ time_state.stats_initializations++;
+ } else if (sample->raw_ns + 500 * 1000 * 1000 < now_ns &&
+ sample->base_cycles + 50 < now_cycles) {
+ // Enough time has passed to compute the cycle time.
+ if (sample->nsscaled_per_cycle != 0) { // Have a cycle time estimate.
+ // Compute time from counter reading, but avoiding overflow
+ // delta_cycles may be larger than on the fast path.
+ uint64_t estimated_scaled_ns;
+ int s = -1;
+ do {
+ s++;
+ estimated_scaled_ns = (delta_cycles >> s) * sample->nsscaled_per_cycle;
+ } while (estimated_scaled_ns / sample->nsscaled_per_cycle !=
+ (delta_cycles >> s));
+ estimated_base_ns = sample->base_ns +
+ (estimated_scaled_ns >> (kScale - s));
+ }
+
+ // Compute the assumed cycle time kMinNSBetweenSamples ns into the future
+ // assuming the cycle counter rate stays the same as the last interval.
+ uint64_t ns = now_ns - sample->raw_ns;
+ uint64_t measured_nsscaled_per_cycle = SafeDivideAndScale(ns, delta_cycles);
+
+ uint64_t assumed_next_sample_delta_cycles =
+ SafeDivideAndScale(kMinNSBetweenSamples, measured_nsscaled_per_cycle);
+
+ int64_t diff_ns = now_ns - estimated_base_ns; // estimate low by this much
+
+ // We want to set nsscaled_per_cycle so that our estimate of the ns time
+ // at the assumed cycle time is the assumed ns time.
+ // That is, we want to set nsscaled_per_cycle so:
+ // kMinNSBetweenSamples + diff_ns ==
+ // (assumed_next_sample_delta_cycles * nsscaled_per_cycle) >> kScale
+ // But we wish to damp oscillations, so instead correct only most
+ // of our current error, by solving:
+ // kMinNSBetweenSamples + diff_ns - (diff_ns / 16) ==
+ // (assumed_next_sample_delta_cycles * nsscaled_per_cycle) >> kScale
+ ns = kMinNSBetweenSamples + diff_ns - (diff_ns / 16);
+ uint64_t new_nsscaled_per_cycle =
+ SafeDivideAndScale(ns, assumed_next_sample_delta_cycles);
+ if (new_nsscaled_per_cycle != 0 &&
+ diff_ns < 100 * 1000 * 1000 && -diff_ns < 100 * 1000 * 1000) {
+ // record the cycle time measurement
+ time_state.last_sample.nsscaled_per_cycle.store(
+ new_nsscaled_per_cycle, std::memory_order_relaxed);
+ uint64_t new_min_cycles_per_sample =
+ SafeDivideAndScale(kMinNSBetweenSamples, new_nsscaled_per_cycle);
+ time_state.last_sample.min_cycles_per_sample.store(
+ new_min_cycles_per_sample, std::memory_order_relaxed);
+ time_state.stats_calibrations++;
+ } else { // something went wrong; forget the slope
+ time_state.last_sample.nsscaled_per_cycle.store(
+ 0, std::memory_order_relaxed);
+ time_state.last_sample.min_cycles_per_sample.store(
+ 0, std::memory_order_relaxed);
+ estimated_base_ns = now_ns;
+ time_state.stats_reinitializations++;
+ }
+ time_state.last_sample.raw_ns.store(now_ns, std::memory_order_relaxed);
+ time_state.last_sample.base_ns.store(estimated_base_ns,
+ std::memory_order_relaxed);
+ time_state.last_sample.base_cycles.store(now_cycles,
+ std::memory_order_relaxed);
+ } else {
+ // have a sample, but no slope; waiting for enough time for a calibration
+ time_state.stats_slow_paths++;
+ }
+
+ SeqRelease(&time_state.seq, lock_value); // release the readers
+
+ return estimated_base_ns;
+}
+ABSL_NAMESPACE_END
+} // namespace y_absl
+#endif // ABSL_USE_CYCLECLOCK_FOR_GET_CURRENT_TIME_NANOS
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace {
+
+// Returns the maximum duration that SleepOnce() can sleep for.
+constexpr y_absl::Duration MaxSleep() {
+#ifdef _WIN32
+ // Windows Sleep() takes unsigned long argument in milliseconds.
+ return y_absl::Milliseconds(
+ std::numeric_limits<unsigned long>::max()); // NOLINT(runtime/int)
+#else
+ return y_absl::Seconds(std::numeric_limits<time_t>::max());
+#endif
+}
+
+// Sleeps for the given duration.
+// REQUIRES: to_sleep <= MaxSleep().
+void SleepOnce(y_absl::Duration to_sleep) {
+#ifdef _WIN32
+ Sleep(to_sleep / y_absl::Milliseconds(1));
+#else
+ struct timespec sleep_time = y_absl::ToTimespec(to_sleep);
+ while (nanosleep(&sleep_time, &sleep_time) != 0 && errno == EINTR) {
+ // Ignore signals and wait for the full interval to elapse.
+ }
+#endif
+}
+
+} // namespace
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+extern "C" {
+
+ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSleepFor)(
+ y_absl::Duration duration) {
+ while (duration > y_absl::ZeroDuration()) {
+ y_absl::Duration to_sleep = std::min(duration, y_absl::MaxSleep());
+ y_absl::SleepOnce(to_sleep);
+ duration -= to_sleep;
+ }
+}
+
+} // extern "C"
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/time/clock.h b/contrib/restricted/abseil-cpp-tstring/y_absl/time/clock.h
new file mode 100644
index 00000000000..178b96d828c
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/time/clock.h
@@ -0,0 +1,74 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: clock.h
+// -----------------------------------------------------------------------------
+//
+// This header file contains utility functions for working with the system-wide
+// realtime clock. For descriptions of the main time abstractions used within
+// this header file, consult the time.h header file.
+#ifndef ABSL_TIME_CLOCK_H_
+#define ABSL_TIME_CLOCK_H_
+
+#include "y_absl/base/macros.h"
+#include "y_absl/time/time.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+// Now()
+//
+// Returns the current time, expressed as an `y_absl::Time` absolute time value.
+y_absl::Time Now();
+
+// GetCurrentTimeNanos()
+//
+// Returns the current time, expressed as a count of nanoseconds since the Unix
+// Epoch (https://en.wikipedia.org/wiki/Unix_time). Prefer `y_absl::Now()` instead
+// for all but the most performance-sensitive cases (i.e. when you are calling
+// this function hundreds of thousands of times per second).
+int64_t GetCurrentTimeNanos();
+
+// SleepFor()
+//
+// Sleeps for the specified duration, expressed as an `y_absl::Duration`.
+//
+// Notes:
+// * Signal interruptions will not reduce the sleep duration.
+// * Returns immediately when passed a nonpositive duration.
+void SleepFor(y_absl::Duration duration);
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+// -----------------------------------------------------------------------------
+// Implementation Details
+// -----------------------------------------------------------------------------
+
+// In some build configurations we pass --detect-odr-violations to the
+// gold linker. This causes it to flag weak symbol overrides as ODR
+// violations. Because ODR only applies to C++ and not C,
+// --detect-odr-violations ignores symbols not mangled with C++ names.
+// By changing our extension points to be extern "C", we dodge this
+// check.
+extern "C" {
+void ABSL_INTERNAL_C_SYMBOL(AbslInternalSleepFor)(y_absl::Duration duration);
+} // extern "C"
+
+inline void y_absl::SleepFor(y_absl::Duration duration) {
+ ABSL_INTERNAL_C_SYMBOL(AbslInternalSleepFor)(duration);
+}
+
+#endif // ABSL_TIME_CLOCK_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/time/duration.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/time/duration.cc
new file mode 100644
index 00000000000..d4914556e6c
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/time/duration.cc
@@ -0,0 +1,954 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// The implementation of the y_absl::Duration class, which is declared in
+// //y_absl/time.h. This class behaves like a numeric type; it has no public
+// methods and is used only through the operators defined here.
+//
+// Implementation notes:
+//
+// An y_absl::Duration is represented as
+//
+// rep_hi_ : (int64_t) Whole seconds
+// rep_lo_ : (uint32_t) Fractions of a second
+//
+// The seconds value (rep_hi_) may be positive or negative as appropriate.
+// The fractional seconds (rep_lo_) is always a positive offset from rep_hi_.
+// The API for Duration guarantees at least nanosecond resolution, which
+// means rep_lo_ could have a max value of 1B - 1 if it stored nanoseconds.
+// However, to utilize more of the available 32 bits of space in rep_lo_,
+// we instead store quarters of a nanosecond in rep_lo_ resulting in a max
+// value of 4B - 1. This allows us to correctly handle calculations like
+// 0.5 nanos + 0.5 nanos = 1 nano. The following example shows the actual
+// Duration rep using quarters of a nanosecond.
+//
+// 2.5 sec = {rep_hi_=2, rep_lo_=2000000000} // lo = 4 * 500000000
+// -2.5 sec = {rep_hi_=-3, rep_lo_=2000000000}
+//
+// Infinite durations are represented as Durations with the rep_lo_ field set
+// to all 1s.
+//
+// +InfiniteDuration:
+// rep_hi_ : kint64max
+// rep_lo_ : ~0U
+//
+// -InfiniteDuration:
+// rep_hi_ : kint64min
+// rep_lo_ : ~0U
+//
+// Arithmetic overflows/underflows to +/- infinity and saturates.
+
+#if defined(_MSC_VER)
+#include <winsock2.h> // for timeval
+#endif
+
+#include <algorithm>
+#include <cassert>
+#include <cctype>
+#include <cerrno>
+#include <cmath>
+#include <cstdint>
+#include <cstdlib>
+#include <cstring>
+#include <ctime>
+#include <functional>
+#include <limits>
+#include <util/generic/string.h>
+
+#include "y_absl/base/casts.h"
+#include "y_absl/base/macros.h"
+#include "y_absl/numeric/int128.h"
+#include "y_absl/strings/string_view.h"
+#include "y_absl/strings/strip.h"
+#include "y_absl/time/time.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+namespace {
+
+using time_internal::kTicksPerNanosecond;
+using time_internal::kTicksPerSecond;
+
+constexpr int64_t kint64max = std::numeric_limits<int64_t>::max();
+constexpr int64_t kint64min = std::numeric_limits<int64_t>::min();
+
+// Can't use std::isinfinite() because it doesn't exist on windows.
+inline bool IsFinite(double d) {
+ if (std::isnan(d)) return false;
+ return d != std::numeric_limits<double>::infinity() &&
+ d != -std::numeric_limits<double>::infinity();
+}
+
+inline bool IsValidDivisor(double d) {
+ if (std::isnan(d)) return false;
+ return d != 0.0;
+}
+
+// Can't use std::round() because it is only available in C++11.
+// Note that we ignore the possibility of floating-point over/underflow.
+template <typename Double>
+inline double Round(Double d) {
+ return d < 0 ? std::ceil(d - 0.5) : std::floor(d + 0.5);
+}
+
+// *sec may be positive or negative. *ticks must be in the range
+// -kTicksPerSecond < *ticks < kTicksPerSecond. If *ticks is negative it
+// will be normalized to a positive value by adjusting *sec accordingly.
+inline void NormalizeTicks(int64_t* sec, int64_t* ticks) {
+ if (*ticks < 0) {
+ --*sec;
+ *ticks += kTicksPerSecond;
+ }
+}
+
+// Makes a uint128 from the absolute value of the given scalar.
+inline uint128 MakeU128(int64_t a) {
+ uint128 u128 = 0;
+ if (a < 0) {
+ ++u128;
+ ++a; // Makes it safe to negate 'a'
+ a = -a;
+ }
+ u128 += static_cast<uint64_t>(a);
+ return u128;
+}
+
+// Makes a uint128 count of ticks out of the absolute value of the Duration.
+inline uint128 MakeU128Ticks(Duration d) {
+ int64_t rep_hi = time_internal::GetRepHi(d);
+ uint32_t rep_lo = time_internal::GetRepLo(d);
+ if (rep_hi < 0) {
+ ++rep_hi;
+ rep_hi = -rep_hi;
+ rep_lo = kTicksPerSecond - rep_lo;
+ }
+ uint128 u128 = static_cast<uint64_t>(rep_hi);
+ u128 *= static_cast<uint64_t>(kTicksPerSecond);
+ u128 += rep_lo;
+ return u128;
+}
+
+// Breaks a uint128 of ticks into a Duration.
+inline Duration MakeDurationFromU128(uint128 u128, bool is_neg) {
+ int64_t rep_hi;
+ uint32_t rep_lo;
+ const uint64_t h64 = Uint128High64(u128);
+ const uint64_t l64 = Uint128Low64(u128);
+ if (h64 == 0) { // fastpath
+ const uint64_t hi = l64 / kTicksPerSecond;
+ rep_hi = static_cast<int64_t>(hi);
+ rep_lo = static_cast<uint32_t>(l64 - hi * kTicksPerSecond);
+ } else {
+ // kMaxRepHi64 is the high 64 bits of (2^63 * kTicksPerSecond).
+ // Any positive tick count whose high 64 bits are >= kMaxRepHi64
+ // is not representable as a Duration. A negative tick count can
+ // have its high 64 bits == kMaxRepHi64 but only when the low 64
+ // bits are all zero, otherwise it is not representable either.
+ const uint64_t kMaxRepHi64 = 0x77359400UL;
+ if (h64 >= kMaxRepHi64) {
+ if (is_neg && h64 == kMaxRepHi64 && l64 == 0) {
+ // Avoid trying to represent -kint64min below.
+ return time_internal::MakeDuration(kint64min);
+ }
+ return is_neg ? -InfiniteDuration() : InfiniteDuration();
+ }
+ const uint128 kTicksPerSecond128 = static_cast<uint64_t>(kTicksPerSecond);
+ const uint128 hi = u128 / kTicksPerSecond128;
+ rep_hi = static_cast<int64_t>(Uint128Low64(hi));
+ rep_lo =
+ static_cast<uint32_t>(Uint128Low64(u128 - hi * kTicksPerSecond128));
+ }
+ if (is_neg) {
+ rep_hi = -rep_hi;
+ if (rep_lo != 0) {
+ --rep_hi;
+ rep_lo = kTicksPerSecond - rep_lo;
+ }
+ }
+ return time_internal::MakeDuration(rep_hi, rep_lo);
+}
+
+// Convert between int64_t and uint64_t, preserving representation. This
+// allows us to do arithmetic in the unsigned domain, where overflow has
+// well-defined behavior. See operator+=() and operator-=().
+//
+// C99 7.20.1.1.1, as referenced by C++11 18.4.1.2, says, "The typedef
+// name intN_t designates a signed integer type with width N, no padding
+// bits, and a two's complement representation." So, we can convert to
+// and from the corresponding uint64_t value using a bit cast.
+inline uint64_t EncodeTwosComp(int64_t v) {
+ return y_absl::bit_cast<uint64_t>(v);
+}
+inline int64_t DecodeTwosComp(uint64_t v) { return y_absl::bit_cast<int64_t>(v); }
+
+// Note: The overflow detection in this function is done using greater/less *or
+// equal* because kint64max/min is too large to be represented exactly in a
+// double (which only has 53 bits of precision). In order to avoid assigning to
+// rep->hi a double value that is too large for an int64_t (and therefore is
+// undefined), we must consider computations that equal kint64max/min as a
+// double as overflow cases.
+inline bool SafeAddRepHi(double a_hi, double b_hi, Duration* d) {
+ double c = a_hi + b_hi;
+ if (c >= static_cast<double>(kint64max)) {
+ *d = InfiniteDuration();
+ return false;
+ }
+ if (c <= static_cast<double>(kint64min)) {
+ *d = -InfiniteDuration();
+ return false;
+ }
+ *d = time_internal::MakeDuration(c, time_internal::GetRepLo(*d));
+ return true;
+}
+
+// A functor that's similar to std::multiplies<T>, except this returns the max
+// T value instead of overflowing. This is only defined for uint128.
+template <typename Ignored>
+struct SafeMultiply {
+ uint128 operator()(uint128 a, uint128 b) const {
+ // b hi is always zero because it originated as an int64_t.
+ assert(Uint128High64(b) == 0);
+ // Fastpath to avoid the expensive overflow check with division.
+ if (Uint128High64(a) == 0) {
+ return (((Uint128Low64(a) | Uint128Low64(b)) >> 32) == 0)
+ ? static_cast<uint128>(Uint128Low64(a) * Uint128Low64(b))
+ : a * b;
+ }
+ return b == 0 ? b : (a > kuint128max / b) ? kuint128max : a * b;
+ }
+};
+
+// Scales (i.e., multiplies or divides, depending on the Operation template)
+// the Duration d by the int64_t r.
+template <template <typename> class Operation>
+inline Duration ScaleFixed(Duration d, int64_t r) {
+ const uint128 a = MakeU128Ticks(d);
+ const uint128 b = MakeU128(r);
+ const uint128 q = Operation<uint128>()(a, b);
+ const bool is_neg = (time_internal::GetRepHi(d) < 0) != (r < 0);
+ return MakeDurationFromU128(q, is_neg);
+}
+
+// Scales (i.e., multiplies or divides, depending on the Operation template)
+// the Duration d by the double r.
+template <template <typename> class Operation>
+inline Duration ScaleDouble(Duration d, double r) {
+ Operation<double> op;
+ double hi_doub = op(time_internal::GetRepHi(d), r);
+ double lo_doub = op(time_internal::GetRepLo(d), r);
+
+ double hi_int = 0;
+ double hi_frac = std::modf(hi_doub, &hi_int);
+
+ // Moves hi's fractional bits to lo.
+ lo_doub /= kTicksPerSecond;
+ lo_doub += hi_frac;
+
+ double lo_int = 0;
+ double lo_frac = std::modf(lo_doub, &lo_int);
+
+ // Rolls lo into hi if necessary.
+ int64_t lo64 = Round(lo_frac * kTicksPerSecond);
+
+ Duration ans;
+ if (!SafeAddRepHi(hi_int, lo_int, &ans)) return ans;
+ int64_t hi64 = time_internal::GetRepHi(ans);
+ if (!SafeAddRepHi(hi64, lo64 / kTicksPerSecond, &ans)) return ans;
+ hi64 = time_internal::GetRepHi(ans);
+ lo64 %= kTicksPerSecond;
+ NormalizeTicks(&hi64, &lo64);
+ return time_internal::MakeDuration(hi64, lo64);
+}
+
+// Tries to divide num by den as fast as possible by looking for common, easy
+// cases. If the division was done, the quotient is in *q and the remainder is
+// in *rem and true will be returned.
+inline bool IDivFastPath(const Duration num, const Duration den, int64_t* q,
+ Duration* rem) {
+ // Bail if num or den is an infinity.
+ if (time_internal::IsInfiniteDuration(num) ||
+ time_internal::IsInfiniteDuration(den))
+ return false;
+
+ int64_t num_hi = time_internal::GetRepHi(num);
+ uint32_t num_lo = time_internal::GetRepLo(num);
+ int64_t den_hi = time_internal::GetRepHi(den);
+ uint32_t den_lo = time_internal::GetRepLo(den);
+
+ if (den_hi == 0 && den_lo == kTicksPerNanosecond) {
+ // Dividing by 1ns
+ if (num_hi >= 0 && num_hi < (kint64max - kTicksPerSecond) / 1000000000) {
+ *q = num_hi * 1000000000 + num_lo / kTicksPerNanosecond;
+ *rem = time_internal::MakeDuration(0, num_lo % den_lo);
+ return true;
+ }
+ } else if (den_hi == 0 && den_lo == 100 * kTicksPerNanosecond) {
+ // Dividing by 100ns (common when converting to Universal time)
+ if (num_hi >= 0 && num_hi < (kint64max - kTicksPerSecond) / 10000000) {
+ *q = num_hi * 10000000 + num_lo / (100 * kTicksPerNanosecond);
+ *rem = time_internal::MakeDuration(0, num_lo % den_lo);
+ return true;
+ }
+ } else if (den_hi == 0 && den_lo == 1000 * kTicksPerNanosecond) {
+ // Dividing by 1us
+ if (num_hi >= 0 && num_hi < (kint64max - kTicksPerSecond) / 1000000) {
+ *q = num_hi * 1000000 + num_lo / (1000 * kTicksPerNanosecond);
+ *rem = time_internal::MakeDuration(0, num_lo % den_lo);
+ return true;
+ }
+ } else if (den_hi == 0 && den_lo == 1000000 * kTicksPerNanosecond) {
+ // Dividing by 1ms
+ if (num_hi >= 0 && num_hi < (kint64max - kTicksPerSecond) / 1000) {
+ *q = num_hi * 1000 + num_lo / (1000000 * kTicksPerNanosecond);
+ *rem = time_internal::MakeDuration(0, num_lo % den_lo);
+ return true;
+ }
+ } else if (den_hi > 0 && den_lo == 0) {
+ // Dividing by positive multiple of 1s
+ if (num_hi >= 0) {
+ if (den_hi == 1) {
+ *q = num_hi;
+ *rem = time_internal::MakeDuration(0, num_lo);
+ return true;
+ }
+ *q = num_hi / den_hi;
+ *rem = time_internal::MakeDuration(num_hi % den_hi, num_lo);
+ return true;
+ }
+ if (num_lo != 0) {
+ num_hi += 1;
+ }
+ int64_t quotient = num_hi / den_hi;
+ int64_t rem_sec = num_hi % den_hi;
+ if (rem_sec > 0) {
+ rem_sec -= den_hi;
+ quotient += 1;
+ }
+ if (num_lo != 0) {
+ rem_sec -= 1;
+ }
+ *q = quotient;
+ *rem = time_internal::MakeDuration(rem_sec, num_lo);
+ return true;
+ }
+
+ return false;
+}
+
+} // namespace
+
+namespace time_internal {
+
+// The 'satq' argument indicates whether the quotient should saturate at the
+// bounds of int64_t. If it does saturate, the difference will spill over to
+// the remainder. If it does not saturate, the remainder remain accurate,
+// but the returned quotient will over/underflow int64_t and should not be used.
+int64_t IDivDuration(bool satq, const Duration num, const Duration den,
+ Duration* rem) {
+ int64_t q = 0;
+ if (IDivFastPath(num, den, &q, rem)) {
+ return q;
+ }
+
+ const bool num_neg = num < ZeroDuration();
+ const bool den_neg = den < ZeroDuration();
+ const bool quotient_neg = num_neg != den_neg;
+
+ if (time_internal::IsInfiniteDuration(num) || den == ZeroDuration()) {
+ *rem = num_neg ? -InfiniteDuration() : InfiniteDuration();
+ return quotient_neg ? kint64min : kint64max;
+ }
+ if (time_internal::IsInfiniteDuration(den)) {
+ *rem = num;
+ return 0;
+ }
+
+ const uint128 a = MakeU128Ticks(num);
+ const uint128 b = MakeU128Ticks(den);
+ uint128 quotient128 = a / b;
+
+ if (satq) {
+ // Limits the quotient to the range of int64_t.
+ if (quotient128 > uint128(static_cast<uint64_t>(kint64max))) {
+ quotient128 = quotient_neg ? uint128(static_cast<uint64_t>(kint64min))
+ : uint128(static_cast<uint64_t>(kint64max));
+ }
+ }
+
+ const uint128 remainder128 = a - quotient128 * b;
+ *rem = MakeDurationFromU128(remainder128, num_neg);
+
+ if (!quotient_neg || quotient128 == 0) {
+ return Uint128Low64(quotient128) & kint64max;
+ }
+ // The quotient needs to be negated, but we need to carefully handle
+ // quotient128s with the top bit on.
+ return -static_cast<int64_t>(Uint128Low64(quotient128 - 1) & kint64max) - 1;
+}
+
+} // namespace time_internal
+
+//
+// Additive operators.
+//
+
+Duration& Duration::operator+=(Duration rhs) {
+ if (time_internal::IsInfiniteDuration(*this)) return *this;
+ if (time_internal::IsInfiniteDuration(rhs)) return *this = rhs;
+ const int64_t orig_rep_hi = rep_hi_;
+ rep_hi_ =
+ DecodeTwosComp(EncodeTwosComp(rep_hi_) + EncodeTwosComp(rhs.rep_hi_));
+ if (rep_lo_ >= kTicksPerSecond - rhs.rep_lo_) {
+ rep_hi_ = DecodeTwosComp(EncodeTwosComp(rep_hi_) + 1);
+ rep_lo_ -= kTicksPerSecond;
+ }
+ rep_lo_ += rhs.rep_lo_;
+ if (rhs.rep_hi_ < 0 ? rep_hi_ > orig_rep_hi : rep_hi_ < orig_rep_hi) {
+ return *this = rhs.rep_hi_ < 0 ? -InfiniteDuration() : InfiniteDuration();
+ }
+ return *this;
+}
+
+Duration& Duration::operator-=(Duration rhs) {
+ if (time_internal::IsInfiniteDuration(*this)) return *this;
+ if (time_internal::IsInfiniteDuration(rhs)) {
+ return *this = rhs.rep_hi_ >= 0 ? -InfiniteDuration() : InfiniteDuration();
+ }
+ const int64_t orig_rep_hi = rep_hi_;
+ rep_hi_ =
+ DecodeTwosComp(EncodeTwosComp(rep_hi_) - EncodeTwosComp(rhs.rep_hi_));
+ if (rep_lo_ < rhs.rep_lo_) {
+ rep_hi_ = DecodeTwosComp(EncodeTwosComp(rep_hi_) - 1);
+ rep_lo_ += kTicksPerSecond;
+ }
+ rep_lo_ -= rhs.rep_lo_;
+ if (rhs.rep_hi_ < 0 ? rep_hi_ < orig_rep_hi : rep_hi_ > orig_rep_hi) {
+ return *this = rhs.rep_hi_ >= 0 ? -InfiniteDuration() : InfiniteDuration();
+ }
+ return *this;
+}
+
+//
+// Multiplicative operators.
+//
+
+Duration& Duration::operator*=(int64_t r) {
+ if (time_internal::IsInfiniteDuration(*this)) {
+ const bool is_neg = (r < 0) != (rep_hi_ < 0);
+ return *this = is_neg ? -InfiniteDuration() : InfiniteDuration();
+ }
+ return *this = ScaleFixed<SafeMultiply>(*this, r);
+}
+
+Duration& Duration::operator*=(double r) {
+ if (time_internal::IsInfiniteDuration(*this) || !IsFinite(r)) {
+ const bool is_neg = (std::signbit(r) != 0) != (rep_hi_ < 0);
+ return *this = is_neg ? -InfiniteDuration() : InfiniteDuration();
+ }
+ return *this = ScaleDouble<std::multiplies>(*this, r);
+}
+
+Duration& Duration::operator/=(int64_t r) {
+ if (time_internal::IsInfiniteDuration(*this) || r == 0) {
+ const bool is_neg = (r < 0) != (rep_hi_ < 0);
+ return *this = is_neg ? -InfiniteDuration() : InfiniteDuration();
+ }
+ return *this = ScaleFixed<std::divides>(*this, r);
+}
+
+Duration& Duration::operator/=(double r) {
+ if (time_internal::IsInfiniteDuration(*this) || !IsValidDivisor(r)) {
+ const bool is_neg = (std::signbit(r) != 0) != (rep_hi_ < 0);
+ return *this = is_neg ? -InfiniteDuration() : InfiniteDuration();
+ }
+ return *this = ScaleDouble<std::divides>(*this, r);
+}
+
+Duration& Duration::operator%=(Duration rhs) {
+ time_internal::IDivDuration(false, *this, rhs, this);
+ return *this;
+}
+
+double FDivDuration(Duration num, Duration den) {
+ // Arithmetic with infinity is sticky.
+ if (time_internal::IsInfiniteDuration(num) || den == ZeroDuration()) {
+ return (num < ZeroDuration()) == (den < ZeroDuration())
+ ? std::numeric_limits<double>::infinity()
+ : -std::numeric_limits<double>::infinity();
+ }
+ if (time_internal::IsInfiniteDuration(den)) return 0.0;
+
+ double a =
+ static_cast<double>(time_internal::GetRepHi(num)) * kTicksPerSecond +
+ time_internal::GetRepLo(num);
+ double b =
+ static_cast<double>(time_internal::GetRepHi(den)) * kTicksPerSecond +
+ time_internal::GetRepLo(den);
+ return a / b;
+}
+
+//
+// Trunc/Floor/Ceil.
+//
+
+Duration Trunc(Duration d, Duration unit) {
+ return d - (d % unit);
+}
+
+Duration Floor(const Duration d, const Duration unit) {
+ const y_absl::Duration td = Trunc(d, unit);
+ return td <= d ? td : td - AbsDuration(unit);
+}
+
+Duration Ceil(const Duration d, const Duration unit) {
+ const y_absl::Duration td = Trunc(d, unit);
+ return td >= d ? td : td + AbsDuration(unit);
+}
+
+//
+// Factory functions.
+//
+
+Duration DurationFromTimespec(timespec ts) {
+ if (static_cast<uint64_t>(ts.tv_nsec) < 1000 * 1000 * 1000) {
+ int64_t ticks = ts.tv_nsec * kTicksPerNanosecond;
+ return time_internal::MakeDuration(ts.tv_sec, ticks);
+ }
+ return Seconds(ts.tv_sec) + Nanoseconds(ts.tv_nsec);
+}
+
+Duration DurationFromTimeval(timeval tv) {
+ if (static_cast<uint64_t>(tv.tv_usec) < 1000 * 1000) {
+ int64_t ticks = tv.tv_usec * 1000 * kTicksPerNanosecond;
+ return time_internal::MakeDuration(tv.tv_sec, ticks);
+ }
+ return Seconds(tv.tv_sec) + Microseconds(tv.tv_usec);
+}
+
+//
+// Conversion to other duration types.
+//
+
+int64_t ToInt64Nanoseconds(Duration d) {
+ if (time_internal::GetRepHi(d) >= 0 &&
+ time_internal::GetRepHi(d) >> 33 == 0) {
+ return (time_internal::GetRepHi(d) * 1000 * 1000 * 1000) +
+ (time_internal::GetRepLo(d) / kTicksPerNanosecond);
+ }
+ return d / Nanoseconds(1);
+}
+int64_t ToInt64Microseconds(Duration d) {
+ if (time_internal::GetRepHi(d) >= 0 &&
+ time_internal::GetRepHi(d) >> 43 == 0) {
+ return (time_internal::GetRepHi(d) * 1000 * 1000) +
+ (time_internal::GetRepLo(d) / (kTicksPerNanosecond * 1000));
+ }
+ return d / Microseconds(1);
+}
+int64_t ToInt64Milliseconds(Duration d) {
+ if (time_internal::GetRepHi(d) >= 0 &&
+ time_internal::GetRepHi(d) >> 53 == 0) {
+ return (time_internal::GetRepHi(d) * 1000) +
+ (time_internal::GetRepLo(d) / (kTicksPerNanosecond * 1000 * 1000));
+ }
+ return d / Milliseconds(1);
+}
+int64_t ToInt64Seconds(Duration d) {
+ int64_t hi = time_internal::GetRepHi(d);
+ if (time_internal::IsInfiniteDuration(d)) return hi;
+ if (hi < 0 && time_internal::GetRepLo(d) != 0) ++hi;
+ return hi;
+}
+int64_t ToInt64Minutes(Duration d) {
+ int64_t hi = time_internal::GetRepHi(d);
+ if (time_internal::IsInfiniteDuration(d)) return hi;
+ if (hi < 0 && time_internal::GetRepLo(d) != 0) ++hi;
+ return hi / 60;
+}
+int64_t ToInt64Hours(Duration d) {
+ int64_t hi = time_internal::GetRepHi(d);
+ if (time_internal::IsInfiniteDuration(d)) return hi;
+ if (hi < 0 && time_internal::GetRepLo(d) != 0) ++hi;
+ return hi / (60 * 60);
+}
+
+double ToDoubleNanoseconds(Duration d) {
+ return FDivDuration(d, Nanoseconds(1));
+}
+double ToDoubleMicroseconds(Duration d) {
+ return FDivDuration(d, Microseconds(1));
+}
+double ToDoubleMilliseconds(Duration d) {
+ return FDivDuration(d, Milliseconds(1));
+}
+double ToDoubleSeconds(Duration d) {
+ return FDivDuration(d, Seconds(1));
+}
+double ToDoubleMinutes(Duration d) {
+ return FDivDuration(d, Minutes(1));
+}
+double ToDoubleHours(Duration d) {
+ return FDivDuration(d, Hours(1));
+}
+
+timespec ToTimespec(Duration d) {
+ timespec ts;
+ if (!time_internal::IsInfiniteDuration(d)) {
+ int64_t rep_hi = time_internal::GetRepHi(d);
+ uint32_t rep_lo = time_internal::GetRepLo(d);
+ if (rep_hi < 0) {
+ // Tweak the fields so that unsigned division of rep_lo
+ // maps to truncation (towards zero) for the timespec.
+ rep_lo += kTicksPerNanosecond - 1;
+ if (rep_lo >= kTicksPerSecond) {
+ rep_hi += 1;
+ rep_lo -= kTicksPerSecond;
+ }
+ }
+ ts.tv_sec = rep_hi;
+ if (ts.tv_sec == rep_hi) { // no time_t narrowing
+ ts.tv_nsec = rep_lo / kTicksPerNanosecond;
+ return ts;
+ }
+ }
+ if (d >= ZeroDuration()) {
+ ts.tv_sec = std::numeric_limits<time_t>::max();
+ ts.tv_nsec = 1000 * 1000 * 1000 - 1;
+ } else {
+ ts.tv_sec = std::numeric_limits<time_t>::min();
+ ts.tv_nsec = 0;
+ }
+ return ts;
+}
+
+timeval ToTimeval(Duration d) {
+ timeval tv;
+ timespec ts = ToTimespec(d);
+ if (ts.tv_sec < 0) {
+ // Tweak the fields so that positive division of tv_nsec
+ // maps to truncation (towards zero) for the timeval.
+ ts.tv_nsec += 1000 - 1;
+ if (ts.tv_nsec >= 1000 * 1000 * 1000) {
+ ts.tv_sec += 1;
+ ts.tv_nsec -= 1000 * 1000 * 1000;
+ }
+ }
+ tv.tv_sec = ts.tv_sec;
+ if (tv.tv_sec != ts.tv_sec) { // narrowing
+ if (ts.tv_sec < 0) {
+ tv.tv_sec = std::numeric_limits<decltype(tv.tv_sec)>::min();
+ tv.tv_usec = 0;
+ } else {
+ tv.tv_sec = std::numeric_limits<decltype(tv.tv_sec)>::max();
+ tv.tv_usec = 1000 * 1000 - 1;
+ }
+ return tv;
+ }
+ tv.tv_usec = static_cast<int>(ts.tv_nsec / 1000); // suseconds_t
+ return tv;
+}
+
+std::chrono::nanoseconds ToChronoNanoseconds(Duration d) {
+ return time_internal::ToChronoDuration<std::chrono::nanoseconds>(d);
+}
+std::chrono::microseconds ToChronoMicroseconds(Duration d) {
+ return time_internal::ToChronoDuration<std::chrono::microseconds>(d);
+}
+std::chrono::milliseconds ToChronoMilliseconds(Duration d) {
+ return time_internal::ToChronoDuration<std::chrono::milliseconds>(d);
+}
+std::chrono::seconds ToChronoSeconds(Duration d) {
+ return time_internal::ToChronoDuration<std::chrono::seconds>(d);
+}
+std::chrono::minutes ToChronoMinutes(Duration d) {
+ return time_internal::ToChronoDuration<std::chrono::minutes>(d);
+}
+std::chrono::hours ToChronoHours(Duration d) {
+ return time_internal::ToChronoDuration<std::chrono::hours>(d);
+}
+
+//
+// To/From string formatting.
+//
+
+namespace {
+
+// Formats a positive 64-bit integer in the given field width. Note that
+// it is up to the caller of Format64() to ensure that there is sufficient
+// space before ep to hold the conversion.
+char* Format64(char* ep, int width, int64_t v) {
+ do {
+ --width;
+ *--ep = '0' + (v % 10); // contiguous digits
+ } while (v /= 10);
+ while (--width >= 0) *--ep = '0'; // zero pad
+ return ep;
+}
+
+// Helpers for FormatDuration() that format 'n' and append it to 'out'
+// followed by the given 'unit'. If 'n' formats to "0", nothing is
+// appended (not even the unit).
+
+// A type that encapsulates how to display a value of a particular unit. For
+// values that are displayed with fractional parts, the precision indicates
+// where to round the value. The precision varies with the display unit because
+// a Duration can hold only quarters of a nanosecond, so displaying information
+// beyond that is just noise.
+//
+// For example, a microsecond value of 42.00025xxxxx should not display beyond 5
+// fractional digits, because it is in the noise of what a Duration can
+// represent.
+struct DisplayUnit {
+ y_absl::string_view abbr;
+ int prec;
+ double pow10;
+};
+ABSL_CONST_INIT const DisplayUnit kDisplayNano = {"ns", 2, 1e2};
+ABSL_CONST_INIT const DisplayUnit kDisplayMicro = {"us", 5, 1e5};
+ABSL_CONST_INIT const DisplayUnit kDisplayMilli = {"ms", 8, 1e8};
+ABSL_CONST_INIT const DisplayUnit kDisplaySec = {"s", 11, 1e11};
+ABSL_CONST_INIT const DisplayUnit kDisplayMin = {"m", -1, 0.0}; // prec ignored
+ABSL_CONST_INIT const DisplayUnit kDisplayHour = {"h", -1,
+ 0.0}; // prec ignored
+
+void AppendNumberUnit(TString* out, int64_t n, DisplayUnit unit) {
+ char buf[sizeof("2562047788015216")]; // hours in max duration
+ char* const ep = buf + sizeof(buf);
+ char* bp = Format64(ep, 0, n);
+ if (*bp != '0' || bp + 1 != ep) {
+ out->append(bp, ep - bp);
+ out->append(unit.abbr.data(), unit.abbr.size());
+ }
+}
+
+// Note: unit.prec is limited to double's digits10 value (typically 15) so it
+// always fits in buf[].
+void AppendNumberUnit(TString* out, double n, DisplayUnit unit) {
+ constexpr int kBufferSize = std::numeric_limits<double>::digits10;
+ const int prec = std::min(kBufferSize, unit.prec);
+ char buf[kBufferSize]; // also large enough to hold integer part
+ char* ep = buf + sizeof(buf);
+ double d = 0;
+ int64_t frac_part = Round(std::modf(n, &d) * unit.pow10);
+ int64_t int_part = d;
+ if (int_part != 0 || frac_part != 0) {
+ char* bp = Format64(ep, 0, int_part); // always < 1000
+ out->append(bp, ep - bp);
+ if (frac_part != 0) {
+ out->push_back('.');
+ bp = Format64(ep, prec, frac_part);
+ while (ep[-1] == '0') --ep;
+ out->append(bp, ep - bp);
+ }
+ out->append(unit.abbr.data(), unit.abbr.size());
+ }
+}
+
+} // namespace
+
+// From Go's doc at https://golang.org/pkg/time/#Duration.String
+// [FormatDuration] returns a string representing the duration in the
+// form "72h3m0.5s". Leading zero units are omitted. As a special
+// case, durations less than one second format use a smaller unit
+// (milli-, micro-, or nanoseconds) to ensure that the leading digit
+// is non-zero.
+// Unlike Go, we format the zero duration as 0, with no unit.
+TString FormatDuration(Duration d) {
+ const Duration min_duration = Seconds(kint64min);
+ if (d == min_duration) {
+ // Avoid needing to negate kint64min by directly returning what the
+ // following code should produce in that case.
+ return "-2562047788015215h30m8s";
+ }
+ TString s;
+ if (d < ZeroDuration()) {
+ s.append("-");
+ d = -d;
+ }
+ if (d == InfiniteDuration()) {
+ s.append("inf");
+ } else if (d < Seconds(1)) {
+ // Special case for durations with a magnitude < 1 second. The duration
+ // is printed as a fraction of a single unit, e.g., "1.2ms".
+ if (d < Microseconds(1)) {
+ AppendNumberUnit(&s, FDivDuration(d, Nanoseconds(1)), kDisplayNano);
+ } else if (d < Milliseconds(1)) {
+ AppendNumberUnit(&s, FDivDuration(d, Microseconds(1)), kDisplayMicro);
+ } else {
+ AppendNumberUnit(&s, FDivDuration(d, Milliseconds(1)), kDisplayMilli);
+ }
+ } else {
+ AppendNumberUnit(&s, IDivDuration(d, Hours(1), &d), kDisplayHour);
+ AppendNumberUnit(&s, IDivDuration(d, Minutes(1), &d), kDisplayMin);
+ AppendNumberUnit(&s, FDivDuration(d, Seconds(1)), kDisplaySec);
+ }
+ if (s.empty() || s == "-") {
+ s = "0";
+ }
+ return s;
+}
+
+namespace {
+
+// A helper for ParseDuration() that parses a leading number from the given
+// string and stores the result in *int_part/*frac_part/*frac_scale. The
+// given string pointer is modified to point to the first unconsumed char.
+bool ConsumeDurationNumber(const char** dpp, const char* ep, int64_t* int_part,
+ int64_t* frac_part, int64_t* frac_scale) {
+ *int_part = 0;
+ *frac_part = 0;
+ *frac_scale = 1; // invariant: *frac_part < *frac_scale
+ const char* start = *dpp;
+ for (; *dpp != ep; *dpp += 1) {
+ const int d = **dpp - '0'; // contiguous digits
+ if (d < 0 || 10 <= d) break;
+
+ if (*int_part > kint64max / 10) return false;
+ *int_part *= 10;
+ if (*int_part > kint64max - d) return false;
+ *int_part += d;
+ }
+ const bool int_part_empty = (*dpp == start);
+ if (*dpp == ep || **dpp != '.') return !int_part_empty;
+
+ for (*dpp += 1; *dpp != ep; *dpp += 1) {
+ const int d = **dpp - '0'; // contiguous digits
+ if (d < 0 || 10 <= d) break;
+ if (*frac_scale <= kint64max / 10) {
+ *frac_part *= 10;
+ *frac_part += d;
+ *frac_scale *= 10;
+ }
+ }
+ return !int_part_empty || *frac_scale != 1;
+}
+
+// A helper for ParseDuration() that parses a leading unit designator (e.g.,
+// ns, us, ms, s, m, h) from the given string and stores the resulting unit
+// in "*unit". The given string pointer is modified to point to the first
+// unconsumed char.
+bool ConsumeDurationUnit(const char** start, const char* end, Duration* unit) {
+ size_t size = end - *start;
+ switch (size) {
+ case 0:
+ return false;
+ default:
+ switch (**start) {
+ case 'n':
+ if (*(*start + 1) == 's') {
+ *start += 2;
+ *unit = Nanoseconds(1);
+ return true;
+ }
+ break;
+ case 'u':
+ if (*(*start + 1) == 's') {
+ *start += 2;
+ *unit = Microseconds(1);
+ return true;
+ }
+ break;
+ case 'm':
+ if (*(*start + 1) == 's') {
+ *start += 2;
+ *unit = Milliseconds(1);
+ return true;
+ }
+ break;
+ default:
+ break;
+ }
+ ABSL_FALLTHROUGH_INTENDED;
+ case 1:
+ switch (**start) {
+ case 's':
+ *unit = Seconds(1);
+ *start += 1;
+ return true;
+ case 'm':
+ *unit = Minutes(1);
+ *start += 1;
+ return true;
+ case 'h':
+ *unit = Hours(1);
+ *start += 1;
+ return true;
+ default:
+ return false;
+ }
+ }
+}
+
+} // namespace
+
+// From Go's doc at https://golang.org/pkg/time/#ParseDuration
+// [ParseDuration] parses a duration string. A duration string is
+// a possibly signed sequence of decimal numbers, each with optional
+// fraction and a unit suffix, such as "300ms", "-1.5h" or "2h45m".
+// Valid time units are "ns", "us" "ms", "s", "m", "h".
+bool ParseDuration(y_absl::string_view dur_sv, Duration* d) {
+ int sign = 1;
+ if (y_absl::ConsumePrefix(&dur_sv, "-")) {
+ sign = -1;
+ } else {
+ y_absl::ConsumePrefix(&dur_sv, "+");
+ }
+ if (dur_sv.empty()) return false;
+
+ // Special case for a string of "0".
+ if (dur_sv == "0") {
+ *d = ZeroDuration();
+ return true;
+ }
+
+ if (dur_sv == "inf") {
+ *d = sign * InfiniteDuration();
+ return true;
+ }
+
+ const char* start = dur_sv.data();
+ const char* end = start + dur_sv.size();
+
+ Duration dur;
+ while (start != end) {
+ int64_t int_part;
+ int64_t frac_part;
+ int64_t frac_scale;
+ Duration unit;
+ if (!ConsumeDurationNumber(&start, end, &int_part, &frac_part,
+ &frac_scale) ||
+ !ConsumeDurationUnit(&start, end, &unit)) {
+ return false;
+ }
+ if (int_part != 0) dur += sign * int_part * unit;
+ if (frac_part != 0) dur += sign * frac_part * unit / frac_scale;
+ }
+ *d = dur;
+ return true;
+}
+
+bool AbslParseFlag(y_absl::string_view text, Duration* dst, TString*) {
+ return ParseDuration(text, dst);
+}
+
+TString AbslUnparseFlag(Duration d) { return FormatDuration(d); }
+bool ParseFlag(const TString& text, Duration* dst, TString* ) {
+ return ParseDuration(text, dst);
+}
+
+TString UnparseFlag(Duration d) { return FormatDuration(d); }
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/time/format.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/time/format.cc
new file mode 100644
index 00000000000..c75e4ec21a4
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/time/format.cc
@@ -0,0 +1,160 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <string.h>
+
+#include <cctype>
+#include <cstdint>
+
+#include "y_absl/strings/match.h"
+#include "y_absl/strings/string_view.h"
+#include "y_absl/time/internal/cctz/include/cctz/time_zone.h"
+#include "y_absl/time/time.h"
+
+namespace cctz = y_absl::time_internal::cctz;
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+ABSL_DLL extern const char RFC3339_full[] = "%Y-%m-%d%ET%H:%M:%E*S%Ez";
+ABSL_DLL extern const char RFC3339_sec[] = "%Y-%m-%d%ET%H:%M:%S%Ez";
+
+ABSL_DLL extern const char RFC1123_full[] = "%a, %d %b %E4Y %H:%M:%S %z";
+ABSL_DLL extern const char RFC1123_no_wday[] = "%d %b %E4Y %H:%M:%S %z";
+
+namespace {
+
+const char kInfiniteFutureStr[] = "infinite-future";
+const char kInfinitePastStr[] = "infinite-past";
+
+struct cctz_parts {
+ cctz::time_point<cctz::seconds> sec;
+ cctz::detail::femtoseconds fem;
+};
+
+inline cctz::time_point<cctz::seconds> unix_epoch() {
+ return std::chrono::time_point_cast<cctz::seconds>(
+ std::chrono::system_clock::from_time_t(0));
+}
+
+// Splits a Time into seconds and femtoseconds, which can be used with CCTZ.
+// Requires that 't' is finite. See duration.cc for details about rep_hi and
+// rep_lo.
+cctz_parts Split(y_absl::Time t) {
+ const auto d = time_internal::ToUnixDuration(t);
+ const int64_t rep_hi = time_internal::GetRepHi(d);
+ const int64_t rep_lo = time_internal::GetRepLo(d);
+ const auto sec = unix_epoch() + cctz::seconds(rep_hi);
+ const auto fem = cctz::detail::femtoseconds(rep_lo * (1000 * 1000 / 4));
+ return {sec, fem};
+}
+
+// Joins the given seconds and femtoseconds into a Time. See duration.cc for
+// details about rep_hi and rep_lo.
+y_absl::Time Join(const cctz_parts& parts) {
+ const int64_t rep_hi = (parts.sec - unix_epoch()).count();
+ const uint32_t rep_lo = parts.fem.count() / (1000 * 1000 / 4);
+ const auto d = time_internal::MakeDuration(rep_hi, rep_lo);
+ return time_internal::FromUnixDuration(d);
+}
+
+} // namespace
+
+TString FormatTime(y_absl::string_view format, y_absl::Time t,
+ y_absl::TimeZone tz) {
+ if (t == y_absl::InfiniteFuture()) return TString(kInfiniteFutureStr);
+ if (t == y_absl::InfinitePast()) return TString(kInfinitePastStr);
+ const auto parts = Split(t);
+ return cctz::detail::format(TString(format), parts.sec, parts.fem,
+ cctz::time_zone(tz));
+}
+
+TString FormatTime(y_absl::Time t, y_absl::TimeZone tz) {
+ return FormatTime(RFC3339_full, t, tz);
+}
+
+TString FormatTime(y_absl::Time t) {
+ return y_absl::FormatTime(RFC3339_full, t, y_absl::LocalTimeZone());
+}
+
+bool ParseTime(y_absl::string_view format, y_absl::string_view input,
+ y_absl::Time* time, TString* err) {
+ return y_absl::ParseTime(format, input, y_absl::UTCTimeZone(), time, err);
+}
+
+// If the input string does not contain an explicit UTC offset, interpret
+// the fields with respect to the given TimeZone.
+bool ParseTime(y_absl::string_view format, y_absl::string_view input,
+ y_absl::TimeZone tz, y_absl::Time* time, TString* err) {
+ auto strip_leading_space = [](y_absl::string_view* sv) {
+ while (!sv->empty()) {
+ if (!std::isspace(sv->front())) return;
+ sv->remove_prefix(1);
+ }
+ };
+
+ // Portable toolchains means we don't get nice constexpr here.
+ struct Literal {
+ const char* name;
+ size_t size;
+ y_absl::Time value;
+ };
+ static Literal literals[] = {
+ {kInfiniteFutureStr, strlen(kInfiniteFutureStr), InfiniteFuture()},
+ {kInfinitePastStr, strlen(kInfinitePastStr), InfinitePast()},
+ };
+ strip_leading_space(&input);
+ for (const auto& lit : literals) {
+ if (y_absl::StartsWith(input, y_absl::string_view(lit.name, lit.size))) {
+ y_absl::string_view tail = input;
+ tail.remove_prefix(lit.size);
+ strip_leading_space(&tail);
+ if (tail.empty()) {
+ *time = lit.value;
+ return true;
+ }
+ }
+ }
+
+ TString error;
+ cctz_parts parts;
+ const bool b =
+ cctz::detail::parse(TString(format), TString(input),
+ cctz::time_zone(tz), &parts.sec, &parts.fem, &error);
+ if (b) {
+ *time = Join(parts);
+ } else if (err != nullptr) {
+ *err = error;
+ }
+ return b;
+}
+
+// Functions required to support y_absl::Time flags.
+bool AbslParseFlag(y_absl::string_view text, y_absl::Time* t, TString* error) {
+ return y_absl::ParseTime(RFC3339_full, text, y_absl::UTCTimeZone(), t, error);
+}
+
+TString AbslUnparseFlag(y_absl::Time t) {
+ return y_absl::FormatTime(RFC3339_full, t, y_absl::UTCTimeZone());
+}
+bool ParseFlag(const TString& text, y_absl::Time* t, TString* error) {
+ return y_absl::ParseTime(RFC3339_full, text, y_absl::UTCTimeZone(), t, error);
+}
+
+TString UnparseFlag(y_absl::Time t) {
+ return y_absl::FormatTime(RFC3339_full, t, y_absl::UTCTimeZone());
+}
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/include/cctz/civil_time.h b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/include/cctz/civil_time.h
new file mode 100644
index 00000000000..be60a54dbe1
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/include/cctz/civil_time.h
@@ -0,0 +1,332 @@
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_TIME_INTERNAL_CCTZ_CIVIL_TIME_H_
+#define ABSL_TIME_INTERNAL_CCTZ_CIVIL_TIME_H_
+
+#include "y_absl/base/config.h"
+#include "y_absl/time/internal/cctz/include/cctz/civil_time_detail.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace time_internal {
+namespace cctz {
+
+// The term "civil time" refers to the legally recognized human-scale time
+// that is represented by the six fields YYYY-MM-DD hh:mm:ss. Modern-day civil
+// time follows the Gregorian Calendar and is a time-zone-independent concept.
+// A "date" is perhaps the most common example of a civil time (represented in
+// this library as cctz::civil_day). This library provides six classes and a
+// handful of functions that help with rounding, iterating, and arithmetic on
+// civil times while avoiding complications like daylight-saving time (DST).
+//
+// The following six classes form the core of this civil-time library:
+//
+// * civil_second
+// * civil_minute
+// * civil_hour
+// * civil_day
+// * civil_month
+// * civil_year
+//
+// Each class is a simple value type with the same interface for construction
+// and the same six accessors for each of the civil fields (year, month, day,
+// hour, minute, and second, aka YMDHMS). These classes differ only in their
+// alignment, which is indicated by the type name and specifies the field on
+// which arithmetic operates.
+//
+// Each class can be constructed by passing up to six optional integer
+// arguments representing the YMDHMS fields (in that order) to the
+// constructor. Omitted fields are assigned their minimum valid value. Hours,
+// minutes, and seconds will be set to 0, month and day will be set to 1, and
+// since there is no minimum valid year, it will be set to 1970. So, a
+// default-constructed civil-time object will have YMDHMS fields representing
+// "1970-01-01 00:00:00". Fields that are out-of-range are normalized (e.g.,
+// October 32 -> November 1) so that all civil-time objects represent valid
+// values.
+//
+// Each civil-time class is aligned to the civil-time field indicated in the
+// class's name after normalization. Alignment is performed by setting all the
+// inferior fields to their minimum valid value (as described above). The
+// following are examples of how each of the six types would align the fields
+// representing November 22, 2015 at 12:34:56 in the afternoon. (Note: the
+// string format used here is not important; it's just a shorthand way of
+// showing the six YMDHMS fields.)
+//
+// civil_second 2015-11-22 12:34:56
+// civil_minute 2015-11-22 12:34:00
+// civil_hour 2015-11-22 12:00:00
+// civil_day 2015-11-22 00:00:00
+// civil_month 2015-11-01 00:00:00
+// civil_year 2015-01-01 00:00:00
+//
+// Each civil-time type performs arithmetic on the field to which it is
+// aligned. This means that adding 1 to a civil_day increments the day field
+// (normalizing as necessary), and subtracting 7 from a civil_month operates
+// on the month field (normalizing as necessary). All arithmetic produces a
+// valid civil time. Difference requires two similarly aligned civil-time
+// objects and returns the scalar answer in units of the objects' alignment.
+// For example, the difference between two civil_hour objects will give an
+// answer in units of civil hours.
+//
+// In addition to the six civil-time types just described, there are
+// a handful of helper functions and algorithms for performing common
+// calculations. These are described below.
+//
+// Note: In C++14 and later, this library is usable in a constexpr context.
+//
+// CONSTRUCTION:
+//
+// Each of the civil-time types can be constructed in two ways: by directly
+// passing to the constructor up to six (optional) integers representing the
+// YMDHMS fields, or by copying the YMDHMS fields from a differently aligned
+// civil-time type.
+//
+// civil_day default_value; // 1970-01-01 00:00:00
+//
+// civil_day a(2015, 2, 3); // 2015-02-03 00:00:00
+// civil_day b(2015, 2, 3, 4, 5, 6); // 2015-02-03 00:00:00
+// civil_day c(2015); // 2015-01-01 00:00:00
+//
+// civil_second ss(2015, 2, 3, 4, 5, 6); // 2015-02-03 04:05:06
+// civil_minute mm(ss); // 2015-02-03 04:05:00
+// civil_hour hh(mm); // 2015-02-03 04:00:00
+// civil_day d(hh); // 2015-02-03 00:00:00
+// civil_month m(d); // 2015-02-01 00:00:00
+// civil_year y(m); // 2015-01-01 00:00:00
+//
+// m = civil_month(y); // 2015-01-01 00:00:00
+// d = civil_day(m); // 2015-01-01 00:00:00
+// hh = civil_hour(d); // 2015-01-01 00:00:00
+// mm = civil_minute(hh); // 2015-01-01 00:00:00
+// ss = civil_second(mm); // 2015-01-01 00:00:00
+//
+// ALIGNMENT CONVERSION:
+//
+// The alignment of a civil-time object cannot change, but the object may be
+// used to construct a new object with a different alignment. This is referred
+// to as "realigning". When realigning to a type with the same or more
+// precision (e.g., civil_day -> civil_second), the conversion may be
+// performed implicitly since no information is lost. However, if information
+// could be discarded (e.g., civil_second -> civil_day), the conversion must
+// be explicit at the call site.
+//
+// void fun(const civil_day& day);
+//
+// civil_second cs;
+// fun(cs); // Won't compile because data may be discarded
+// fun(civil_day(cs)); // OK: explicit conversion
+//
+// civil_day cd;
+// fun(cd); // OK: no conversion needed
+//
+// civil_month cm;
+// fun(cm); // OK: implicit conversion to civil_day
+//
+// NORMALIZATION:
+//
+// Integer arguments passed to the constructor may be out-of-range, in which
+// case they are normalized to produce a valid civil-time object. This enables
+// natural arithmetic on constructor arguments without worrying about the
+// field's range. Normalization guarantees that there are no invalid
+// civil-time objects.
+//
+// civil_day d(2016, 10, 32); // Out-of-range day; normalized to 2016-11-01
+//
+// Note: If normalization is undesired, you can signal an error by comparing
+// the constructor arguments to the normalized values returned by the YMDHMS
+// properties.
+//
+// PROPERTIES:
+//
+// All civil-time types have accessors for all six of the civil-time fields:
+// year, month, day, hour, minute, and second. Recall that fields inferior to
+// the type's alignment will be set to their minimum valid value.
+//
+// civil_day d(2015, 6, 28);
+// // d.year() == 2015
+// // d.month() == 6
+// // d.day() == 28
+// // d.hour() == 0
+// // d.minute() == 0
+// // d.second() == 0
+//
+// COMPARISON:
+//
+// Comparison always considers all six YMDHMS fields, regardless of the type's
+// alignment. Comparison between differently aligned civil-time types is
+// allowed.
+//
+// civil_day feb_3(2015, 2, 3); // 2015-02-03 00:00:00
+// civil_day mar_4(2015, 3, 4); // 2015-03-04 00:00:00
+// // feb_3 < mar_4
+// // civil_year(feb_3) == civil_year(mar_4)
+//
+// civil_second feb_3_noon(2015, 2, 3, 12, 0, 0); // 2015-02-03 12:00:00
+// // feb_3 < feb_3_noon
+// // feb_3 == civil_day(feb_3_noon)
+//
+// // Iterates all the days of February 2015.
+// for (civil_day d(2015, 2, 1); d < civil_month(2015, 3); ++d) {
+// // ...
+// }
+//
+// STREAMING:
+//
+// Each civil-time type may be sent to an output stream using operator<<().
+// The output format follows the pattern "YYYY-MM-DDThh:mm:ss" where fields
+// inferior to the type's alignment are omitted.
+//
+// civil_second cs(2015, 2, 3, 4, 5, 6);
+// std::cout << cs << "\n"; // Outputs: 2015-02-03T04:05:06
+//
+// civil_day cd(cs);
+// std::cout << cd << "\n"; // Outputs: 2015-02-03
+//
+// civil_year cy(cs);
+// std::cout << cy << "\n"; // Outputs: 2015
+//
+// ARITHMETIC:
+//
+// Civil-time types support natural arithmetic operators such as addition,
+// subtraction, and difference. Arithmetic operates on the civil-time field
+// indicated in the type's name. Difference requires arguments with the same
+// alignment and returns the answer in units of the alignment.
+//
+// civil_day a(2015, 2, 3);
+// ++a; // 2015-02-04 00:00:00
+// --a; // 2015-02-03 00:00:00
+// civil_day b = a + 1; // 2015-02-04 00:00:00
+// civil_day c = 1 + b; // 2015-02-05 00:00:00
+// int n = c - a; // n = 2 (civil days)
+// int m = c - civil_month(c); // Won't compile: different types.
+//
+// EXAMPLE: Adding a month to January 31.
+//
+// One of the classic questions that arises when considering a civil-time
+// library (or a date library or a date/time library) is this: "What happens
+// when you add a month to January 31?" This is an interesting question
+// because there could be a number of possible answers:
+//
+// 1. March 3 (or 2 if a leap year). This may make sense if the operation
+// wants the equivalent of February 31.
+// 2. February 28 (or 29 if a leap year). This may make sense if the operation
+// wants the last day of January to go to the last day of February.
+// 3. Error. The caller may get some error, an exception, an invalid date
+// object, or maybe false is returned. This may make sense because there is
+// no single unambiguously correct answer to the question.
+//
+// Practically speaking, any answer that is not what the programmer intended
+// is the wrong answer.
+//
+// This civil-time library avoids the problem by making it impossible to ask
+// ambiguous questions. All civil-time objects are aligned to a particular
+// civil-field boundary (such as aligned to a year, month, day, hour, minute,
+// or second), and arithmetic operates on the field to which the object is
+// aligned. This means that in order to "add a month" the object must first be
+// aligned to a month boundary, which is equivalent to the first day of that
+// month.
+//
+// Of course, there are ways to compute an answer the question at hand using
+// this civil-time library, but they require the programmer to be explicit
+// about the answer they expect. To illustrate, let's see how to compute all
+// three of the above possible answers to the question of "Jan 31 plus 1
+// month":
+//
+// const civil_day d(2015, 1, 31);
+//
+// // Answer 1:
+// // Add 1 to the month field in the constructor, and rely on normalization.
+// const auto ans_normalized = civil_day(d.year(), d.month() + 1, d.day());
+// // ans_normalized == 2015-03-03 (aka Feb 31)
+//
+// // Answer 2:
+// // Add 1 to month field, capping to the end of next month.
+// const auto next_month = civil_month(d) + 1;
+// const auto last_day_of_next_month = civil_day(next_month + 1) - 1;
+// const auto ans_capped = std::min(ans_normalized, last_day_of_next_month);
+// // ans_capped == 2015-02-28
+//
+// // Answer 3:
+// // Signal an error if the normalized answer is not in next month.
+// if (civil_month(ans_normalized) != next_month) {
+// // error, month overflow
+// }
+//
+using civil_year = detail::civil_year;
+using civil_month = detail::civil_month;
+using civil_day = detail::civil_day;
+using civil_hour = detail::civil_hour;
+using civil_minute = detail::civil_minute;
+using civil_second = detail::civil_second;
+
+// An enum class with members monday, tuesday, wednesday, thursday, friday,
+// saturday, and sunday. These enum values may be sent to an output stream
+// using operator<<(). The result is the full weekday name in English with a
+// leading capital letter.
+//
+// weekday wd = weekday::thursday;
+// std::cout << wd << "\n"; // Outputs: Thursday
+//
+using detail::weekday;
+
+// Returns the weekday for the given civil-time value.
+//
+// civil_day a(2015, 8, 13);
+// weekday wd = get_weekday(a); // wd == weekday::thursday
+//
+using detail::get_weekday;
+
+// Returns the civil_day that strictly follows or precedes the given
+// civil_day, and that falls on the given weekday.
+//
+// For example, given:
+//
+// August 2015
+// Su Mo Tu We Th Fr Sa
+// 1
+// 2 3 4 5 6 7 8
+// 9 10 11 12 13 14 15
+// 16 17 18 19 20 21 22
+// 23 24 25 26 27 28 29
+// 30 31
+//
+// civil_day a(2015, 8, 13); // get_weekday(a) == weekday::thursday
+// civil_day b = next_weekday(a, weekday::thursday); // b = 2015-08-20
+// civil_day c = prev_weekday(a, weekday::thursday); // c = 2015-08-06
+//
+// civil_day d = ...
+// // Gets the following Thursday if d is not already Thursday
+// civil_day thurs1 = next_weekday(d - 1, weekday::thursday);
+// // Gets the previous Thursday if d is not already Thursday
+// civil_day thurs2 = prev_weekday(d + 1, weekday::thursday);
+//
+using detail::next_weekday;
+using detail::prev_weekday;
+
+// Returns the day-of-year for the given civil-time value.
+//
+// civil_day a(2015, 1, 1);
+// int yd_jan_1 = get_yearday(a); // yd_jan_1 = 1
+// civil_day b(2015, 12, 31);
+// int yd_dec_31 = get_yearday(b); // yd_dec_31 = 365
+//
+using detail::get_yearday;
+
+} // namespace cctz
+} // namespace time_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_TIME_INTERNAL_CCTZ_CIVIL_TIME_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/include/cctz/civil_time_detail.h b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/include/cctz/civil_time_detail.h
new file mode 100644
index 00000000000..0e71a7cd332
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/include/cctz/civil_time_detail.h
@@ -0,0 +1,628 @@
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_TIME_INTERNAL_CCTZ_CIVIL_TIME_DETAIL_H_
+#define ABSL_TIME_INTERNAL_CCTZ_CIVIL_TIME_DETAIL_H_
+
+#include <cstdint>
+#include <limits>
+#include <ostream>
+#include <type_traits>
+
+#include "y_absl/base/config.h"
+
+// Disable constexpr support unless we are in C++14 mode.
+#if __cpp_constexpr >= 201304 || (defined(_MSC_VER) && _MSC_VER >= 1910)
+#define CONSTEXPR_D constexpr // data
+#define CONSTEXPR_F constexpr // function
+#define CONSTEXPR_M constexpr // member
+#else
+#define CONSTEXPR_D const
+#define CONSTEXPR_F inline
+#define CONSTEXPR_M
+#endif
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace time_internal {
+namespace cctz {
+
+// Support years that at least span the range of 64-bit time_t values.
+using year_t = std::int_fast64_t;
+
+// Type alias that indicates an argument is not normalized (e.g., the
+// constructor parameters and operands/results of addition/subtraction).
+using diff_t = std::int_fast64_t;
+
+namespace detail {
+
+// Type aliases that indicate normalized argument values.
+using month_t = std::int_fast8_t; // [1:12]
+using day_t = std::int_fast8_t; // [1:31]
+using hour_t = std::int_fast8_t; // [0:23]
+using minute_t = std::int_fast8_t; // [0:59]
+using second_t = std::int_fast8_t; // [0:59]
+
+// Normalized civil-time fields: Y-M-D HH:MM:SS.
+struct fields {
+ CONSTEXPR_M fields(year_t year, month_t month, day_t day, hour_t hour,
+ minute_t minute, second_t second)
+ : y(year), m(month), d(day), hh(hour), mm(minute), ss(second) {}
+ std::int_least64_t y;
+ std::int_least8_t m;
+ std::int_least8_t d;
+ std::int_least8_t hh;
+ std::int_least8_t mm;
+ std::int_least8_t ss;
+};
+
+struct second_tag {};
+struct minute_tag : second_tag {};
+struct hour_tag : minute_tag {};
+struct day_tag : hour_tag {};
+struct month_tag : day_tag {};
+struct year_tag : month_tag {};
+
+////////////////////////////////////////////////////////////////////////
+
+// Field normalization (without avoidable overflow).
+
+namespace impl {
+
+CONSTEXPR_F bool is_leap_year(year_t y) noexcept {
+ return y % 4 == 0 && (y % 100 != 0 || y % 400 == 0);
+}
+CONSTEXPR_F int year_index(year_t y, month_t m) noexcept {
+ return (static_cast<int>((y + (m > 2)) % 400) + 400) % 400;
+}
+CONSTEXPR_F int days_per_century(year_t y, month_t m) noexcept {
+ const int yi = year_index(y, m);
+ return 36524 + (yi == 0 || yi > 300);
+}
+CONSTEXPR_F int days_per_4years(year_t y, month_t m) noexcept {
+ const int yi = year_index(y, m);
+ return 1460 + (yi == 0 || yi > 300 || (yi - 1) % 100 < 96);
+}
+CONSTEXPR_F int days_per_year(year_t y, month_t m) noexcept {
+ return is_leap_year(y + (m > 2)) ? 366 : 365;
+}
+CONSTEXPR_F int days_per_month(year_t y, month_t m) noexcept {
+ CONSTEXPR_D int k_days_per_month[1 + 12] = {
+ -1, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 // non leap year
+ };
+ return k_days_per_month[m] + (m == 2 && is_leap_year(y));
+}
+
+CONSTEXPR_F fields n_day(year_t y, month_t m, diff_t d, diff_t cd, hour_t hh,
+ minute_t mm, second_t ss) noexcept {
+ year_t ey = y % 400;
+ const year_t oey = ey;
+ ey += (cd / 146097) * 400;
+ cd %= 146097;
+ if (cd < 0) {
+ ey -= 400;
+ cd += 146097;
+ }
+ ey += (d / 146097) * 400;
+ d = d % 146097 + cd;
+ if (d > 0) {
+ if (d > 146097) {
+ ey += 400;
+ d -= 146097;
+ }
+ } else {
+ if (d > -365) {
+ // We often hit the previous year when stepping a civil time backwards,
+ // so special case it to avoid counting up by 100/4/1-year chunks.
+ ey -= 1;
+ d += days_per_year(ey, m);
+ } else {
+ ey -= 400;
+ d += 146097;
+ }
+ }
+ if (d > 365) {
+ for (;;) {
+ int n = days_per_century(ey, m);
+ if (d <= n) break;
+ d -= n;
+ ey += 100;
+ }
+ for (;;) {
+ int n = days_per_4years(ey, m);
+ if (d <= n) break;
+ d -= n;
+ ey += 4;
+ }
+ for (;;) {
+ int n = days_per_year(ey, m);
+ if (d <= n) break;
+ d -= n;
+ ++ey;
+ }
+ }
+ if (d > 28) {
+ for (;;) {
+ int n = days_per_month(ey, m);
+ if (d <= n) break;
+ d -= n;
+ if (++m > 12) {
+ ++ey;
+ m = 1;
+ }
+ }
+ }
+ return fields(y + (ey - oey), m, static_cast<day_t>(d), hh, mm, ss);
+}
+CONSTEXPR_F fields n_mon(year_t y, diff_t m, diff_t d, diff_t cd, hour_t hh,
+ minute_t mm, second_t ss) noexcept {
+ if (m != 12) {
+ y += m / 12;
+ m %= 12;
+ if (m <= 0) {
+ y -= 1;
+ m += 12;
+ }
+ }
+ return n_day(y, static_cast<month_t>(m), d, cd, hh, mm, ss);
+}
+CONSTEXPR_F fields n_hour(year_t y, diff_t m, diff_t d, diff_t cd, diff_t hh,
+ minute_t mm, second_t ss) noexcept {
+ cd += hh / 24;
+ hh %= 24;
+ if (hh < 0) {
+ cd -= 1;
+ hh += 24;
+ }
+ return n_mon(y, m, d, cd, static_cast<hour_t>(hh), mm, ss);
+}
+CONSTEXPR_F fields n_min(year_t y, diff_t m, diff_t d, diff_t hh, diff_t ch,
+ diff_t mm, second_t ss) noexcept {
+ ch += mm / 60;
+ mm %= 60;
+ if (mm < 0) {
+ ch -= 1;
+ mm += 60;
+ }
+ return n_hour(y, m, d, hh / 24 + ch / 24, hh % 24 + ch % 24,
+ static_cast<minute_t>(mm), ss);
+}
+CONSTEXPR_F fields n_sec(year_t y, diff_t m, diff_t d, diff_t hh, diff_t mm,
+ diff_t ss) noexcept {
+ // Optimization for when (non-constexpr) fields are already normalized.
+ if (0 <= ss && ss < 60) {
+ const second_t nss = static_cast<second_t>(ss);
+ if (0 <= mm && mm < 60) {
+ const minute_t nmm = static_cast<minute_t>(mm);
+ if (0 <= hh && hh < 24) {
+ const hour_t nhh = static_cast<hour_t>(hh);
+ if (1 <= d && d <= 28 && 1 <= m && m <= 12) {
+ const day_t nd = static_cast<day_t>(d);
+ const month_t nm = static_cast<month_t>(m);
+ return fields(y, nm, nd, nhh, nmm, nss);
+ }
+ return n_mon(y, m, d, 0, nhh, nmm, nss);
+ }
+ return n_hour(y, m, d, hh / 24, hh % 24, nmm, nss);
+ }
+ return n_min(y, m, d, hh, mm / 60, mm % 60, nss);
+ }
+ diff_t cm = ss / 60;
+ ss %= 60;
+ if (ss < 0) {
+ cm -= 1;
+ ss += 60;
+ }
+ return n_min(y, m, d, hh, mm / 60 + cm / 60, mm % 60 + cm % 60,
+ static_cast<second_t>(ss));
+}
+
+} // namespace impl
+
+////////////////////////////////////////////////////////////////////////
+
+// Increments the indicated (normalized) field by "n".
+CONSTEXPR_F fields step(second_tag, fields f, diff_t n) noexcept {
+ return impl::n_sec(f.y, f.m, f.d, f.hh, f.mm + n / 60, f.ss + n % 60);
+}
+CONSTEXPR_F fields step(minute_tag, fields f, diff_t n) noexcept {
+ return impl::n_min(f.y, f.m, f.d, f.hh + n / 60, 0, f.mm + n % 60, f.ss);
+}
+CONSTEXPR_F fields step(hour_tag, fields f, diff_t n) noexcept {
+ return impl::n_hour(f.y, f.m, f.d + n / 24, 0, f.hh + n % 24, f.mm, f.ss);
+}
+CONSTEXPR_F fields step(day_tag, fields f, diff_t n) noexcept {
+ return impl::n_day(f.y, f.m, f.d, n, f.hh, f.mm, f.ss);
+}
+CONSTEXPR_F fields step(month_tag, fields f, diff_t n) noexcept {
+ return impl::n_mon(f.y + n / 12, f.m + n % 12, f.d, 0, f.hh, f.mm, f.ss);
+}
+CONSTEXPR_F fields step(year_tag, fields f, diff_t n) noexcept {
+ return fields(f.y + n, f.m, f.d, f.hh, f.mm, f.ss);
+}
+
+////////////////////////////////////////////////////////////////////////
+
+namespace impl {
+
+// Returns (v * f + a) but avoiding intermediate overflow when possible.
+CONSTEXPR_F diff_t scale_add(diff_t v, diff_t f, diff_t a) noexcept {
+ return (v < 0) ? ((v + 1) * f + a) - f : ((v - 1) * f + a) + f;
+}
+
+// Map a (normalized) Y/M/D to the number of days before/after 1970-01-01.
+// Probably overflows for years outside [-292277022656:292277026595].
+CONSTEXPR_F diff_t ymd_ord(year_t y, month_t m, day_t d) noexcept {
+ const diff_t eyear = (m <= 2) ? y - 1 : y;
+ const diff_t era = (eyear >= 0 ? eyear : eyear - 399) / 400;
+ const diff_t yoe = eyear - era * 400;
+ const diff_t doy = (153 * (m + (m > 2 ? -3 : 9)) + 2) / 5 + d - 1;
+ const diff_t doe = yoe * 365 + yoe / 4 - yoe / 100 + doy;
+ return era * 146097 + doe - 719468;
+}
+
+// Returns the difference in days between two normalized Y-M-D tuples.
+// ymd_ord() will encounter integer overflow given extreme year values,
+// yet the difference between two such extreme values may actually be
+// small, so we take a little care to avoid overflow when possible by
+// exploiting the 146097-day cycle.
+CONSTEXPR_F diff_t day_difference(year_t y1, month_t m1, day_t d1, year_t y2,
+ month_t m2, day_t d2) noexcept {
+ const diff_t a_c4_off = y1 % 400;
+ const diff_t b_c4_off = y2 % 400;
+ diff_t c4_diff = (y1 - a_c4_off) - (y2 - b_c4_off);
+ diff_t delta = ymd_ord(a_c4_off, m1, d1) - ymd_ord(b_c4_off, m2, d2);
+ if (c4_diff > 0 && delta < 0) {
+ delta += 2 * 146097;
+ c4_diff -= 2 * 400;
+ } else if (c4_diff < 0 && delta > 0) {
+ delta -= 2 * 146097;
+ c4_diff += 2 * 400;
+ }
+ return (c4_diff / 400 * 146097) + delta;
+}
+
+} // namespace impl
+
+// Returns the difference between fields structs using the indicated unit.
+CONSTEXPR_F diff_t difference(year_tag, fields f1, fields f2) noexcept {
+ return f1.y - f2.y;
+}
+CONSTEXPR_F diff_t difference(month_tag, fields f1, fields f2) noexcept {
+ return impl::scale_add(difference(year_tag{}, f1, f2), 12, (f1.m - f2.m));
+}
+CONSTEXPR_F diff_t difference(day_tag, fields f1, fields f2) noexcept {
+ return impl::day_difference(f1.y, f1.m, f1.d, f2.y, f2.m, f2.d);
+}
+CONSTEXPR_F diff_t difference(hour_tag, fields f1, fields f2) noexcept {
+ return impl::scale_add(difference(day_tag{}, f1, f2), 24, (f1.hh - f2.hh));
+}
+CONSTEXPR_F diff_t difference(minute_tag, fields f1, fields f2) noexcept {
+ return impl::scale_add(difference(hour_tag{}, f1, f2), 60, (f1.mm - f2.mm));
+}
+CONSTEXPR_F diff_t difference(second_tag, fields f1, fields f2) noexcept {
+ return impl::scale_add(difference(minute_tag{}, f1, f2), 60, f1.ss - f2.ss);
+}
+
+////////////////////////////////////////////////////////////////////////
+
+// Aligns the (normalized) fields struct to the indicated field.
+CONSTEXPR_F fields align(second_tag, fields f) noexcept { return f; }
+CONSTEXPR_F fields align(minute_tag, fields f) noexcept {
+ return fields{f.y, f.m, f.d, f.hh, f.mm, 0};
+}
+CONSTEXPR_F fields align(hour_tag, fields f) noexcept {
+ return fields{f.y, f.m, f.d, f.hh, 0, 0};
+}
+CONSTEXPR_F fields align(day_tag, fields f) noexcept {
+ return fields{f.y, f.m, f.d, 0, 0, 0};
+}
+CONSTEXPR_F fields align(month_tag, fields f) noexcept {
+ return fields{f.y, f.m, 1, 0, 0, 0};
+}
+CONSTEXPR_F fields align(year_tag, fields f) noexcept {
+ return fields{f.y, 1, 1, 0, 0, 0};
+}
+
+////////////////////////////////////////////////////////////////////////
+
+namespace impl {
+
+template <typename H>
+H AbslHashValueImpl(second_tag, H h, fields f) {
+ return H::combine(std::move(h), f.y, f.m, f.d, f.hh, f.mm, f.ss);
+}
+template <typename H>
+H AbslHashValueImpl(minute_tag, H h, fields f) {
+ return H::combine(std::move(h), f.y, f.m, f.d, f.hh, f.mm);
+}
+template <typename H>
+H AbslHashValueImpl(hour_tag, H h, fields f) {
+ return H::combine(std::move(h), f.y, f.m, f.d, f.hh);
+}
+template <typename H>
+H AbslHashValueImpl(day_tag, H h, fields f) {
+ return H::combine(std::move(h), f.y, f.m, f.d);
+}
+template <typename H>
+H AbslHashValueImpl(month_tag, H h, fields f) {
+ return H::combine(std::move(h), f.y, f.m);
+}
+template <typename H>
+H AbslHashValueImpl(year_tag, H h, fields f) {
+ return H::combine(std::move(h), f.y);
+}
+
+} // namespace impl
+
+////////////////////////////////////////////////////////////////////////
+
+template <typename T>
+class civil_time {
+ public:
+ explicit CONSTEXPR_M civil_time(year_t y, diff_t m = 1, diff_t d = 1,
+ diff_t hh = 0, diff_t mm = 0,
+ diff_t ss = 0) noexcept
+ : civil_time(impl::n_sec(y, m, d, hh, mm, ss)) {}
+
+ CONSTEXPR_M civil_time() noexcept : f_{1970, 1, 1, 0, 0, 0} {}
+ civil_time(const civil_time&) = default;
+ civil_time& operator=(const civil_time&) = default;
+
+ // Conversion between civil times of different alignment. Conversion to
+ // a more precise alignment is allowed implicitly (e.g., day -> hour),
+ // but conversion where information is discarded must be explicit
+ // (e.g., second -> minute).
+ template <typename U, typename S>
+ using preserves_data =
+ typename std::enable_if<std::is_base_of<U, S>::value>::type;
+ template <typename U>
+ CONSTEXPR_M civil_time(const civil_time<U>& ct,
+ preserves_data<T, U>* = nullptr) noexcept
+ : civil_time(ct.f_) {}
+ template <typename U>
+ explicit CONSTEXPR_M civil_time(const civil_time<U>& ct,
+ preserves_data<U, T>* = nullptr) noexcept
+ : civil_time(ct.f_) {}
+
+ // Factories for the maximum/minimum representable civil_time.
+ static CONSTEXPR_F civil_time(max)() {
+ const auto max_year = (std::numeric_limits<std::int_least64_t>::max)();
+ return civil_time(max_year, 12, 31, 23, 59, 59);
+ }
+ static CONSTEXPR_F civil_time(min)() {
+ const auto min_year = (std::numeric_limits<std::int_least64_t>::min)();
+ return civil_time(min_year, 1, 1, 0, 0, 0);
+ }
+
+ // Field accessors. Note: All but year() return an int.
+ CONSTEXPR_M year_t year() const noexcept { return f_.y; }
+ CONSTEXPR_M int month() const noexcept { return f_.m; }
+ CONSTEXPR_M int day() const noexcept { return f_.d; }
+ CONSTEXPR_M int hour() const noexcept { return f_.hh; }
+ CONSTEXPR_M int minute() const noexcept { return f_.mm; }
+ CONSTEXPR_M int second() const noexcept { return f_.ss; }
+
+ // Assigning arithmetic.
+ CONSTEXPR_M civil_time& operator+=(diff_t n) noexcept {
+ return *this = *this + n;
+ }
+ CONSTEXPR_M civil_time& operator-=(diff_t n) noexcept {
+ return *this = *this - n;
+ }
+ CONSTEXPR_M civil_time& operator++() noexcept { return *this += 1; }
+ CONSTEXPR_M civil_time operator++(int) noexcept {
+ const civil_time a = *this;
+ ++*this;
+ return a;
+ }
+ CONSTEXPR_M civil_time& operator--() noexcept { return *this -= 1; }
+ CONSTEXPR_M civil_time operator--(int) noexcept {
+ const civil_time a = *this;
+ --*this;
+ return a;
+ }
+
+ // Binary arithmetic operators.
+ friend CONSTEXPR_F civil_time operator+(civil_time a, diff_t n) noexcept {
+ return civil_time(step(T{}, a.f_, n));
+ }
+ friend CONSTEXPR_F civil_time operator+(diff_t n, civil_time a) noexcept {
+ return a + n;
+ }
+ friend CONSTEXPR_F civil_time operator-(civil_time a, diff_t n) noexcept {
+ return n != (std::numeric_limits<diff_t>::min)()
+ ? civil_time(step(T{}, a.f_, -n))
+ : civil_time(step(T{}, step(T{}, a.f_, -(n + 1)), 1));
+ }
+ friend CONSTEXPR_F diff_t operator-(civil_time lhs, civil_time rhs) noexcept {
+ return difference(T{}, lhs.f_, rhs.f_);
+ }
+
+ template <typename H>
+ friend H AbslHashValue(H h, civil_time a) {
+ return impl::AbslHashValueImpl(T{}, std::move(h), a.f_);
+ }
+
+ private:
+ // All instantiations of this template are allowed to call the following
+ // private constructor and access the private fields member.
+ template <typename U>
+ friend class civil_time;
+
+ // The designated constructor that all others eventually call.
+ explicit CONSTEXPR_M civil_time(fields f) noexcept : f_(align(T{}, f)) {}
+
+ fields f_;
+};
+
+// Disallows difference between differently aligned types.
+// auto n = civil_day(...) - civil_hour(...); // would be confusing.
+template <typename T, typename U>
+CONSTEXPR_F diff_t operator-(civil_time<T>, civil_time<U>) = delete;
+
+using civil_year = civil_time<year_tag>;
+using civil_month = civil_time<month_tag>;
+using civil_day = civil_time<day_tag>;
+using civil_hour = civil_time<hour_tag>;
+using civil_minute = civil_time<minute_tag>;
+using civil_second = civil_time<second_tag>;
+
+////////////////////////////////////////////////////////////////////////
+
+// Relational operators that work with differently aligned objects.
+// Always compares all six fields.
+template <typename T1, typename T2>
+CONSTEXPR_F bool operator<(const civil_time<T1>& lhs,
+ const civil_time<T2>& rhs) noexcept {
+ return (
+ lhs.year() < rhs.year() ||
+ (lhs.year() == rhs.year() &&
+ (lhs.month() < rhs.month() ||
+ (lhs.month() == rhs.month() &&
+ (lhs.day() < rhs.day() || (lhs.day() == rhs.day() &&
+ (lhs.hour() < rhs.hour() ||
+ (lhs.hour() == rhs.hour() &&
+ (lhs.minute() < rhs.minute() ||
+ (lhs.minute() == rhs.minute() &&
+ (lhs.second() < rhs.second())))))))))));
+}
+template <typename T1, typename T2>
+CONSTEXPR_F bool operator<=(const civil_time<T1>& lhs,
+ const civil_time<T2>& rhs) noexcept {
+ return !(rhs < lhs);
+}
+template <typename T1, typename T2>
+CONSTEXPR_F bool operator>=(const civil_time<T1>& lhs,
+ const civil_time<T2>& rhs) noexcept {
+ return !(lhs < rhs);
+}
+template <typename T1, typename T2>
+CONSTEXPR_F bool operator>(const civil_time<T1>& lhs,
+ const civil_time<T2>& rhs) noexcept {
+ return rhs < lhs;
+}
+template <typename T1, typename T2>
+CONSTEXPR_F bool operator==(const civil_time<T1>& lhs,
+ const civil_time<T2>& rhs) noexcept {
+ return lhs.year() == rhs.year() && lhs.month() == rhs.month() &&
+ lhs.day() == rhs.day() && lhs.hour() == rhs.hour() &&
+ lhs.minute() == rhs.minute() && lhs.second() == rhs.second();
+}
+template <typename T1, typename T2>
+CONSTEXPR_F bool operator!=(const civil_time<T1>& lhs,
+ const civil_time<T2>& rhs) noexcept {
+ return !(lhs == rhs);
+}
+
+////////////////////////////////////////////////////////////////////////
+
+enum class weekday {
+ monday,
+ tuesday,
+ wednesday,
+ thursday,
+ friday,
+ saturday,
+ sunday,
+};
+
+CONSTEXPR_F weekday get_weekday(const civil_second& cs) noexcept {
+ CONSTEXPR_D weekday k_weekday_by_mon_off[13] = {
+ weekday::monday, weekday::tuesday, weekday::wednesday,
+ weekday::thursday, weekday::friday, weekday::saturday,
+ weekday::sunday, weekday::monday, weekday::tuesday,
+ weekday::wednesday, weekday::thursday, weekday::friday,
+ weekday::saturday,
+ };
+ CONSTEXPR_D int k_weekday_offsets[1 + 12] = {
+ -1, 0, 3, 2, 5, 0, 3, 5, 1, 4, 6, 2, 4,
+ };
+ year_t wd = 2400 + (cs.year() % 400) - (cs.month() < 3);
+ wd += wd / 4 - wd / 100 + wd / 400;
+ wd += k_weekday_offsets[cs.month()] + cs.day();
+ return k_weekday_by_mon_off[wd % 7 + 6];
+}
+
+////////////////////////////////////////////////////////////////////////
+
+CONSTEXPR_F civil_day next_weekday(civil_day cd, weekday wd) noexcept {
+ CONSTEXPR_D weekday k_weekdays_forw[14] = {
+ weekday::monday, weekday::tuesday, weekday::wednesday,
+ weekday::thursday, weekday::friday, weekday::saturday,
+ weekday::sunday, weekday::monday, weekday::tuesday,
+ weekday::wednesday, weekday::thursday, weekday::friday,
+ weekday::saturday, weekday::sunday,
+ };
+ weekday base = get_weekday(cd);
+ for (int i = 0;; ++i) {
+ if (base == k_weekdays_forw[i]) {
+ for (int j = i + 1;; ++j) {
+ if (wd == k_weekdays_forw[j]) {
+ return cd + (j - i);
+ }
+ }
+ }
+ }
+}
+
+CONSTEXPR_F civil_day prev_weekday(civil_day cd, weekday wd) noexcept {
+ CONSTEXPR_D weekday k_weekdays_back[14] = {
+ weekday::sunday, weekday::saturday, weekday::friday,
+ weekday::thursday, weekday::wednesday, weekday::tuesday,
+ weekday::monday, weekday::sunday, weekday::saturday,
+ weekday::friday, weekday::thursday, weekday::wednesday,
+ weekday::tuesday, weekday::monday,
+ };
+ weekday base = get_weekday(cd);
+ for (int i = 0;; ++i) {
+ if (base == k_weekdays_back[i]) {
+ for (int j = i + 1;; ++j) {
+ if (wd == k_weekdays_back[j]) {
+ return cd - (j - i);
+ }
+ }
+ }
+ }
+}
+
+CONSTEXPR_F int get_yearday(const civil_second& cs) noexcept {
+ CONSTEXPR_D int k_month_offsets[1 + 12] = {
+ -1, 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334,
+ };
+ const int feb29 = (cs.month() > 2 && impl::is_leap_year(cs.year()));
+ return k_month_offsets[cs.month()] + feb29 + cs.day();
+}
+
+////////////////////////////////////////////////////////////////////////
+
+std::ostream& operator<<(std::ostream& os, const civil_year& y);
+std::ostream& operator<<(std::ostream& os, const civil_month& m);
+std::ostream& operator<<(std::ostream& os, const civil_day& d);
+std::ostream& operator<<(std::ostream& os, const civil_hour& h);
+std::ostream& operator<<(std::ostream& os, const civil_minute& m);
+std::ostream& operator<<(std::ostream& os, const civil_second& s);
+std::ostream& operator<<(std::ostream& os, weekday wd);
+
+} // namespace detail
+} // namespace cctz
+} // namespace time_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#undef CONSTEXPR_M
+#undef CONSTEXPR_F
+#undef CONSTEXPR_D
+
+#endif // ABSL_TIME_INTERNAL_CCTZ_CIVIL_TIME_DETAIL_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/include/cctz/time_zone.h b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/include/cctz/time_zone.h
new file mode 100644
index 00000000000..524654df4b2
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/include/cctz/time_zone.h
@@ -0,0 +1,459 @@
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// A library for translating between absolute times (represented by
+// std::chrono::time_points of the std::chrono::system_clock) and civil
+// times (represented by cctz::civil_second) using the rules defined by
+// a time zone (cctz::time_zone).
+
+#ifndef ABSL_TIME_INTERNAL_CCTZ_TIME_ZONE_H_
+#define ABSL_TIME_INTERNAL_CCTZ_TIME_ZONE_H_
+
+#include <chrono>
+#include <cstdint>
+#include <limits>
+#include <util/generic/string.h>
+#include <utility>
+
+#include "y_absl/base/config.h"
+#include "y_absl/time/internal/cctz/include/cctz/civil_time.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace time_internal {
+namespace cctz {
+
+// Convenience aliases. Not intended as public API points.
+template <typename D>
+using time_point = std::chrono::time_point<std::chrono::system_clock, D>;
+using seconds = std::chrono::duration<std::int_fast64_t>;
+using sys_seconds = seconds; // Deprecated. Use cctz::seconds instead.
+
+namespace detail {
+template <typename D>
+std::pair<time_point<seconds>, D> split_seconds(const time_point<D>& tp);
+std::pair<time_point<seconds>, seconds> split_seconds(
+ const time_point<seconds>& tp);
+} // namespace detail
+
+// cctz::time_zone is an opaque, small, value-type class representing a
+// geo-political region within which particular rules are used for mapping
+// between absolute and civil times. Time zones are named using the TZ
+// identifiers from the IANA Time Zone Database, such as "America/Los_Angeles"
+// or "Australia/Sydney". Time zones are created from factory functions such
+// as load_time_zone(). Note: strings like "PST" and "EDT" are not valid TZ
+// identifiers.
+//
+// Example:
+// cctz::time_zone utc = cctz::utc_time_zone();
+// cctz::time_zone pst = cctz::fixed_time_zone(std::chrono::hours(-8));
+// cctz::time_zone loc = cctz::local_time_zone();
+// cctz::time_zone lax;
+// if (!cctz::load_time_zone("America/Los_Angeles", &lax)) { ... }
+//
+// See also:
+// - http://www.iana.org/time-zones
+// - https://en.wikipedia.org/wiki/Zoneinfo
+class time_zone {
+ public:
+ time_zone() : time_zone(nullptr) {} // Equivalent to UTC
+ time_zone(const time_zone&) = default;
+ time_zone& operator=(const time_zone&) = default;
+
+ TString name() const;
+
+ // An absolute_lookup represents the civil time (cctz::civil_second) within
+ // this time_zone at the given absolute time (time_point). There are
+ // additionally a few other fields that may be useful when working with
+ // older APIs, such as std::tm.
+ //
+ // Example:
+ // const cctz::time_zone tz = ...
+ // const auto tp = std::chrono::system_clock::now();
+ // const cctz::time_zone::absolute_lookup al = tz.lookup(tp);
+ struct absolute_lookup {
+ civil_second cs;
+ // Note: The following fields exist for backward compatibility with older
+ // APIs. Accessing these fields directly is a sign of imprudent logic in
+ // the calling code. Modern time-related code should only access this data
+ // indirectly by way of cctz::format().
+ int offset; // civil seconds east of UTC
+ bool is_dst; // is offset non-standard?
+ const char* abbr; // time-zone abbreviation (e.g., "PST")
+ };
+ absolute_lookup lookup(const time_point<seconds>& tp) const;
+ template <typename D>
+ absolute_lookup lookup(const time_point<D>& tp) const {
+ return lookup(detail::split_seconds(tp).first);
+ }
+
+ // A civil_lookup represents the absolute time(s) (time_point) that
+ // correspond to the given civil time (cctz::civil_second) within this
+ // time_zone. Usually the given civil time represents a unique instant
+ // in time, in which case the conversion is unambiguous. However,
+ // within this time zone, the given civil time may be skipped (e.g.,
+ // during a positive UTC offset shift), or repeated (e.g., during a
+ // negative UTC offset shift). To account for these possibilities,
+ // civil_lookup is richer than just a single time_point.
+ //
+ // In all cases the civil_lookup::kind enum will indicate the nature
+ // of the given civil-time argument, and the pre, trans, and post
+ // members will give the absolute time answers using the pre-transition
+ // offset, the transition point itself, and the post-transition offset,
+ // respectively (all three times are equal if kind == UNIQUE). If any
+ // of these three absolute times is outside the representable range of a
+ // time_point<seconds> the field is set to its maximum/minimum value.
+ //
+ // Example:
+ // cctz::time_zone lax;
+ // if (!cctz::load_time_zone("America/Los_Angeles", &lax)) { ... }
+ //
+ // // A unique civil time.
+ // auto jan01 = lax.lookup(cctz::civil_second(2011, 1, 1, 0, 0, 0));
+ // // jan01.kind == cctz::time_zone::civil_lookup::UNIQUE
+ // // jan01.pre is 2011/01/01 00:00:00 -0800
+ // // jan01.trans is 2011/01/01 00:00:00 -0800
+ // // jan01.post is 2011/01/01 00:00:00 -0800
+ //
+ // // A Spring DST transition, when there is a gap in civil time.
+ // auto mar13 = lax.lookup(cctz::civil_second(2011, 3, 13, 2, 15, 0));
+ // // mar13.kind == cctz::time_zone::civil_lookup::SKIPPED
+ // // mar13.pre is 2011/03/13 03:15:00 -0700
+ // // mar13.trans is 2011/03/13 03:00:00 -0700
+ // // mar13.post is 2011/03/13 01:15:00 -0800
+ //
+ // // A Fall DST transition, when civil times are repeated.
+ // auto nov06 = lax.lookup(cctz::civil_second(2011, 11, 6, 1, 15, 0));
+ // // nov06.kind == cctz::time_zone::civil_lookup::REPEATED
+ // // nov06.pre is 2011/11/06 01:15:00 -0700
+ // // nov06.trans is 2011/11/06 01:00:00 -0800
+ // // nov06.post is 2011/11/06 01:15:00 -0800
+ struct civil_lookup {
+ enum civil_kind {
+ UNIQUE, // the civil time was singular (pre == trans == post)
+ SKIPPED, // the civil time did not exist (pre >= trans > post)
+ REPEATED, // the civil time was ambiguous (pre < trans <= post)
+ } kind;
+ time_point<seconds> pre; // uses the pre-transition offset
+ time_point<seconds> trans; // instant of civil-offset change
+ time_point<seconds> post; // uses the post-transition offset
+ };
+ civil_lookup lookup(const civil_second& cs) const;
+
+ // Finds the time of the next/previous offset change in this time zone.
+ //
+ // By definition, next_transition(tp, &trans) returns false when tp has
+ // its maximum value, and prev_transition(tp, &trans) returns false
+ // when tp has its minimum value. If the zone has no transitions, the
+ // result will also be false no matter what the argument.
+ //
+ // Otherwise, when tp has its minimum value, next_transition(tp, &trans)
+ // returns true and sets trans to the first recorded transition. Chains
+ // of calls to next_transition()/prev_transition() will eventually return
+ // false, but it is unspecified exactly when next_transition(tp, &trans)
+ // jumps to false, or what time is set by prev_transition(tp, &trans) for
+ // a very distant tp.
+ //
+ // Note: Enumeration of time-zone transitions is for informational purposes
+ // only. Modern time-related code should not care about when offset changes
+ // occur.
+ //
+ // Example:
+ // cctz::time_zone nyc;
+ // if (!cctz::load_time_zone("America/New_York", &nyc)) { ... }
+ // const auto now = std::chrono::system_clock::now();
+ // auto tp = cctz::time_point<cctz::seconds>::min();
+ // cctz::time_zone::civil_transition trans;
+ // while (tp <= now && nyc.next_transition(tp, &trans)) {
+ // // transition: trans.from -> trans.to
+ // tp = nyc.lookup(trans.to).trans;
+ // }
+ struct civil_transition {
+ civil_second from; // the civil time we jump from
+ civil_second to; // the civil time we jump to
+ };
+ bool next_transition(const time_point<seconds>& tp,
+ civil_transition* trans) const;
+ template <typename D>
+ bool next_transition(const time_point<D>& tp, civil_transition* trans) const {
+ return next_transition(detail::split_seconds(tp).first, trans);
+ }
+ bool prev_transition(const time_point<seconds>& tp,
+ civil_transition* trans) const;
+ template <typename D>
+ bool prev_transition(const time_point<D>& tp, civil_transition* trans) const {
+ return prev_transition(detail::split_seconds(tp).first, trans);
+ }
+
+ // version() and description() provide additional information about the
+ // time zone. The content of each of the returned strings is unspecified,
+ // however, when the IANA Time Zone Database is the underlying data source
+ // the version() string will be in the familar form (e.g, "2018e") or
+ // empty when unavailable.
+ //
+ // Note: These functions are for informational or testing purposes only.
+ TString version() const; // empty when unknown
+ TString description() const;
+
+ // Relational operators.
+ friend bool operator==(time_zone lhs, time_zone rhs) {
+ return &lhs.effective_impl() == &rhs.effective_impl();
+ }
+ friend bool operator!=(time_zone lhs, time_zone rhs) { return !(lhs == rhs); }
+
+ template <typename H>
+ friend H AbslHashValue(H h, time_zone tz) {
+ return H::combine(std::move(h), &tz.effective_impl());
+ }
+
+ class Impl;
+
+ private:
+ explicit time_zone(const Impl* impl) : impl_(impl) {}
+ const Impl& effective_impl() const; // handles implicit UTC
+ const Impl* impl_;
+};
+
+// Loads the named time zone. May perform I/O on the initial load.
+// If the name is invalid, or some other kind of error occurs, returns
+// false and "*tz" is set to the UTC time zone.
+bool load_time_zone(const TString& name, time_zone* tz);
+
+// Returns a time_zone representing UTC. Cannot fail.
+time_zone utc_time_zone();
+
+// Returns a time zone that is a fixed offset (seconds east) from UTC.
+// Note: If the absolute value of the offset is greater than 24 hours
+// you'll get UTC (i.e., zero offset) instead.
+time_zone fixed_time_zone(const seconds& offset);
+
+// Returns a time zone representing the local time zone. Falls back to UTC.
+// Note: local_time_zone.name() may only be something like "localtime".
+time_zone local_time_zone();
+
+// Returns the civil time (cctz::civil_second) within the given time zone at
+// the given absolute time (time_point). Since the additional fields provided
+// by the time_zone::absolute_lookup struct should rarely be needed in modern
+// code, this convert() function is simpler and should be preferred.
+template <typename D>
+inline civil_second convert(const time_point<D>& tp, const time_zone& tz) {
+ return tz.lookup(tp).cs;
+}
+
+// Returns the absolute time (time_point) that corresponds to the given civil
+// time within the given time zone. If the civil time is not unique (i.e., if
+// it was either repeated or non-existent), then the returned time_point is
+// the best estimate that preserves relative order. That is, this function
+// guarantees that if cs1 < cs2, then convert(cs1, tz) <= convert(cs2, tz).
+inline time_point<seconds> convert(const civil_second& cs,
+ const time_zone& tz) {
+ const time_zone::civil_lookup cl = tz.lookup(cs);
+ if (cl.kind == time_zone::civil_lookup::SKIPPED) return cl.trans;
+ return cl.pre;
+}
+
+namespace detail {
+using femtoseconds = std::chrono::duration<std::int_fast64_t, std::femto>;
+TString format(const TString&, const time_point<seconds>&,
+ const femtoseconds&, const time_zone&);
+bool parse(const TString&, const TString&, const time_zone&,
+ time_point<seconds>*, femtoseconds*, TString* err = nullptr);
+template <typename Rep, std::intmax_t Denom>
+bool join_seconds(
+ const time_point<seconds>& sec, const femtoseconds& fs,
+ time_point<std::chrono::duration<Rep, std::ratio<1, Denom>>>* tpp);
+template <typename Rep, std::intmax_t Num>
+bool join_seconds(
+ const time_point<seconds>& sec, const femtoseconds& fs,
+ time_point<std::chrono::duration<Rep, std::ratio<Num, 1>>>* tpp);
+template <typename Rep>
+bool join_seconds(
+ const time_point<seconds>& sec, const femtoseconds& fs,
+ time_point<std::chrono::duration<Rep, std::ratio<1, 1>>>* tpp);
+bool join_seconds(const time_point<seconds>& sec, const femtoseconds&,
+ time_point<seconds>* tpp);
+} // namespace detail
+
+// Formats the given time_point in the given cctz::time_zone according to
+// the provided format string. Uses strftime()-like formatting options,
+// with the following extensions:
+//
+// - %Ez - RFC3339-compatible numeric UTC offset (+hh:mm or -hh:mm)
+// - %E*z - Full-resolution numeric UTC offset (+hh:mm:ss or -hh:mm:ss)
+// - %E#S - Seconds with # digits of fractional precision
+// - %E*S - Seconds with full fractional precision (a literal '*')
+// - %E#f - Fractional seconds with # digits of precision
+// - %E*f - Fractional seconds with full precision (a literal '*')
+// - %E4Y - Four-character years (-999 ... -001, 0000, 0001 ... 9999)
+// - %ET - The RFC3339 "date-time" separator "T"
+//
+// Note that %E0S behaves like %S, and %E0f produces no characters. In
+// contrast %E*f always produces at least one digit, which may be '0'.
+//
+// Note that %Y produces as many characters as it takes to fully render the
+// year. A year outside of [-999:9999] when formatted with %E4Y will produce
+// more than four characters, just like %Y.
+//
+// Tip: Format strings should include the UTC offset (e.g., %z, %Ez, or %E*z)
+// so that the resulting string uniquely identifies an absolute time.
+//
+// Example:
+// cctz::time_zone lax;
+// if (!cctz::load_time_zone("America/Los_Angeles", &lax)) { ... }
+// auto tp = cctz::convert(cctz::civil_second(2013, 1, 2, 3, 4, 5), lax);
+// TString f = cctz::format("%H:%M:%S", tp, lax); // "03:04:05"
+// f = cctz::format("%H:%M:%E3S", tp, lax); // "03:04:05.000"
+template <typename D>
+inline TString format(const TString& fmt, const time_point<D>& tp,
+ const time_zone& tz) {
+ const auto p = detail::split_seconds(tp);
+ const auto n = std::chrono::duration_cast<detail::femtoseconds>(p.second);
+ return detail::format(fmt, p.first, n, tz);
+}
+
+// Parses an input string according to the provided format string and
+// returns the corresponding time_point. Uses strftime()-like formatting
+// options, with the same extensions as cctz::format(), but with the
+// exceptions that %E#S is interpreted as %E*S, and %E#f as %E*f. %Ez
+// and %E*z also accept the same inputs, which (along with %z) includes
+// 'z' and 'Z' as synonyms for +00:00. %ET accepts either 'T' or 't'.
+//
+// %Y consumes as many numeric characters as it can, so the matching data
+// should always be terminated with a non-numeric. %E4Y always consumes
+// exactly four characters, including any sign.
+//
+// Unspecified fields are taken from the default date and time of ...
+//
+// "1970-01-01 00:00:00.0 +0000"
+//
+// For example, parsing a string of "15:45" (%H:%M) will return a time_point
+// that represents "1970-01-01 15:45:00.0 +0000".
+//
+// Note that parse() returns time instants, so it makes most sense to parse
+// fully-specified date/time strings that include a UTC offset (%z, %Ez, or
+// %E*z).
+//
+// Note also that parse() only heeds the fields year, month, day, hour,
+// minute, (fractional) second, and UTC offset. Other fields, like weekday (%a
+// or %A), while parsed for syntactic validity, are ignored in the conversion.
+//
+// Date and time fields that are out-of-range will be treated as errors rather
+// than normalizing them like cctz::civil_second() would do. For example, it
+// is an error to parse the date "Oct 32, 2013" because 32 is out of range.
+//
+// A second of ":60" is normalized to ":00" of the following minute with
+// fractional seconds discarded. The following table shows how the given
+// seconds and subseconds will be parsed:
+//
+// "59.x" -> 59.x // exact
+// "60.x" -> 00.0 // normalized
+// "00.x" -> 00.x // exact
+//
+// Errors are indicated by returning false.
+//
+// Example:
+// const cctz::time_zone tz = ...
+// std::chrono::system_clock::time_point tp;
+// if (cctz::parse("%Y-%m-%d", "2015-10-09", tz, &tp)) {
+// ...
+// }
+template <typename D>
+inline bool parse(const TString& fmt, const TString& input,
+ const time_zone& tz, time_point<D>* tpp) {
+ time_point<seconds> sec;
+ detail::femtoseconds fs;
+ return detail::parse(fmt, input, tz, &sec, &fs) &&
+ detail::join_seconds(sec, fs, tpp);
+}
+
+namespace detail {
+
+// Split a time_point<D> into a time_point<seconds> and a D subseconds.
+// Undefined behavior if time_point<seconds> is not of sufficient range.
+// Note that this means it is UB to call cctz::time_zone::lookup(tp) or
+// cctz::format(fmt, tp, tz) with a time_point that is outside the range
+// of a 64-bit std::time_t.
+template <typename D>
+std::pair<time_point<seconds>, D> split_seconds(const time_point<D>& tp) {
+ auto sec = std::chrono::time_point_cast<seconds>(tp);
+ auto sub = tp - sec;
+ if (sub.count() < 0) {
+ sec -= seconds(1);
+ sub += seconds(1);
+ }
+ return {sec, std::chrono::duration_cast<D>(sub)};
+}
+
+inline std::pair<time_point<seconds>, seconds> split_seconds(
+ const time_point<seconds>& tp) {
+ return {tp, seconds::zero()};
+}
+
+// Join a time_point<seconds> and femto subseconds into a time_point<D>.
+// Floors to the resolution of time_point<D>. Returns false if time_point<D>
+// is not of sufficient range.
+template <typename Rep, std::intmax_t Denom>
+bool join_seconds(
+ const time_point<seconds>& sec, const femtoseconds& fs,
+ time_point<std::chrono::duration<Rep, std::ratio<1, Denom>>>* tpp) {
+ using D = std::chrono::duration<Rep, std::ratio<1, Denom>>;
+ // TODO(#199): Return false if result unrepresentable as a time_point<D>.
+ *tpp = std::chrono::time_point_cast<D>(sec);
+ *tpp += std::chrono::duration_cast<D>(fs);
+ return true;
+}
+
+template <typename Rep, std::intmax_t Num>
+bool join_seconds(
+ const time_point<seconds>& sec, const femtoseconds&,
+ time_point<std::chrono::duration<Rep, std::ratio<Num, 1>>>* tpp) {
+ using D = std::chrono::duration<Rep, std::ratio<Num, 1>>;
+ auto count = sec.time_since_epoch().count();
+ if (count >= 0 || count % Num == 0) {
+ count /= Num;
+ } else {
+ count /= Num;
+ count -= 1;
+ }
+ if (count > (std::numeric_limits<Rep>::max)()) return false;
+ if (count < (std::numeric_limits<Rep>::min)()) return false;
+ *tpp = time_point<D>() + D{static_cast<Rep>(count)};
+ return true;
+}
+
+template <typename Rep>
+bool join_seconds(
+ const time_point<seconds>& sec, const femtoseconds&,
+ time_point<std::chrono::duration<Rep, std::ratio<1, 1>>>* tpp) {
+ using D = std::chrono::duration<Rep, std::ratio<1, 1>>;
+ auto count = sec.time_since_epoch().count();
+ if (count > (std::numeric_limits<Rep>::max)()) return false;
+ if (count < (std::numeric_limits<Rep>::min)()) return false;
+ *tpp = time_point<D>() + D{static_cast<Rep>(count)};
+ return true;
+}
+
+inline bool join_seconds(const time_point<seconds>& sec, const femtoseconds&,
+ time_point<seconds>* tpp) {
+ *tpp = sec;
+ return true;
+}
+
+} // namespace detail
+} // namespace cctz
+} // namespace time_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_TIME_INTERNAL_CCTZ_TIME_ZONE_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/include/cctz/zone_info_source.h b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/include/cctz/zone_info_source.h
new file mode 100644
index 00000000000..fd6dc4bbb0f
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/include/cctz/zone_info_source.h
@@ -0,0 +1,102 @@
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_TIME_INTERNAL_CCTZ_ZONE_INFO_SOURCE_H_
+#define ABSL_TIME_INTERNAL_CCTZ_ZONE_INFO_SOURCE_H_
+
+#include <cstddef>
+#include <functional>
+#include <memory>
+#include <util/generic/string.h>
+
+#include "y_absl/base/config.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace time_internal {
+namespace cctz {
+
+// A stdio-like interface for providing zoneinfo data for a particular zone.
+class ZoneInfoSource {
+ public:
+ virtual ~ZoneInfoSource();
+
+ virtual std::size_t Read(void* ptr, std::size_t size) = 0; // like fread()
+ virtual int Skip(std::size_t offset) = 0; // like fseek()
+
+ // Until the zoneinfo data supports versioning information, we provide
+ // a way for a ZoneInfoSource to indicate it out-of-band. The default
+ // implementation returns an empty string.
+ virtual TString Version() const;
+};
+
+} // namespace cctz
+} // namespace time_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace time_internal {
+namespace cctz_extension {
+
+// A function-pointer type for a factory that returns a ZoneInfoSource
+// given the name of a time zone and a fallback factory. Returns null
+// when the data for the named zone cannot be found.
+using ZoneInfoSourceFactory =
+ std::unique_ptr<y_absl::time_internal::cctz::ZoneInfoSource> (*)(
+ const TString&,
+ const std::function<std::unique_ptr<
+ y_absl::time_internal::cctz::ZoneInfoSource>(const TString&)>&);
+
+// The user can control the mapping of zone names to zoneinfo data by
+// providing a definition for cctz_extension::zone_info_source_factory.
+// For example, given functions my_factory() and my_other_factory() that
+// can return a ZoneInfoSource for a named zone, we could inject them into
+// cctz::load_time_zone() with:
+//
+// namespace cctz_extension {
+// namespace {
+// std::unique_ptr<cctz::ZoneInfoSource> CustomFactory(
+// const TString& name,
+// const std::function<std::unique_ptr<cctz::ZoneInfoSource>(
+// const TString& name)>& fallback_factory) {
+// if (auto zip = my_factory(name)) return zip;
+// if (auto zip = fallback_factory(name)) return zip;
+// if (auto zip = my_other_factory(name)) return zip;
+// return nullptr;
+// }
+// } // namespace
+// ZoneInfoSourceFactory zone_info_source_factory = CustomFactory;
+// } // namespace cctz_extension
+//
+// This might be used, say, to use zoneinfo data embedded in the program,
+// or read from a (possibly compressed) file archive, or both.
+//
+// cctz_extension::zone_info_source_factory() will be called:
+// (1) from the same thread as the cctz::load_time_zone() call,
+// (2) only once for any zone name, and
+// (3) serially (i.e., no concurrent execution).
+//
+// The fallback factory obtains zoneinfo data by reading files in ${TZDIR},
+// and it is used automatically when no zone_info_source_factory definition
+// is linked into the program.
+extern ZoneInfoSourceFactory zone_info_source_factory;
+
+} // namespace cctz_extension
+} // namespace time_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_TIME_INTERNAL_CCTZ_ZONE_INFO_SOURCE_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/civil_time_detail.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/civil_time_detail.cc
new file mode 100644
index 00000000000..61dbfc96ab7
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/civil_time_detail.cc
@@ -0,0 +1,94 @@
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "y_absl/time/internal/cctz/include/cctz/civil_time_detail.h"
+
+#include <iomanip>
+#include <ostream>
+#include <sstream>
+
+#include "y_absl/base/config.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace time_internal {
+namespace cctz {
+namespace detail {
+
+// Output stream operators output a format matching YYYY-MM-DDThh:mm:ss,
+// while omitting fields inferior to the type's alignment. For example,
+// civil_day is formatted only as YYYY-MM-DD.
+std::ostream& operator<<(std::ostream& os, const civil_year& y) {
+ std::stringstream ss;
+ ss << y.year(); // No padding.
+ return os << ss.str();
+}
+std::ostream& operator<<(std::ostream& os, const civil_month& m) {
+ std::stringstream ss;
+ ss << civil_year(m) << '-';
+ ss << std::setfill('0') << std::setw(2) << m.month();
+ return os << ss.str();
+}
+std::ostream& operator<<(std::ostream& os, const civil_day& d) {
+ std::stringstream ss;
+ ss << civil_month(d) << '-';
+ ss << std::setfill('0') << std::setw(2) << d.day();
+ return os << ss.str();
+}
+std::ostream& operator<<(std::ostream& os, const civil_hour& h) {
+ std::stringstream ss;
+ ss << civil_day(h) << 'T';
+ ss << std::setfill('0') << std::setw(2) << h.hour();
+ return os << ss.str();
+}
+std::ostream& operator<<(std::ostream& os, const civil_minute& m) {
+ std::stringstream ss;
+ ss << civil_hour(m) << ':';
+ ss << std::setfill('0') << std::setw(2) << m.minute();
+ return os << ss.str();
+}
+std::ostream& operator<<(std::ostream& os, const civil_second& s) {
+ std::stringstream ss;
+ ss << civil_minute(s) << ':';
+ ss << std::setfill('0') << std::setw(2) << s.second();
+ return os << ss.str();
+}
+
+////////////////////////////////////////////////////////////////////////
+
+std::ostream& operator<<(std::ostream& os, weekday wd) {
+ switch (wd) {
+ case weekday::monday:
+ return os << "Monday";
+ case weekday::tuesday:
+ return os << "Tuesday";
+ case weekday::wednesday:
+ return os << "Wednesday";
+ case weekday::thursday:
+ return os << "Thursday";
+ case weekday::friday:
+ return os << "Friday";
+ case weekday::saturday:
+ return os << "Saturday";
+ case weekday::sunday:
+ return os << "Sunday";
+ }
+ return os; // Should never get here, but -Wreturn-type may warn without this.
+}
+
+} // namespace detail
+} // namespace cctz
+} // namespace time_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_fixed.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_fixed.cc
new file mode 100644
index 00000000000..3a22cbea1dc
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_fixed.cc
@@ -0,0 +1,140 @@
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "time_zone_fixed.h"
+
+#include <algorithm>
+#include <cassert>
+#include <chrono>
+#include <cstring>
+#include <util/generic/string.h>
+
+#include "y_absl/base/config.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace time_internal {
+namespace cctz {
+
+namespace {
+
+// The prefix used for the internal names of fixed-offset zones.
+const char kFixedZonePrefix[] = "Fixed/UTC";
+
+const char kDigits[] = "0123456789";
+
+char* Format02d(char* p, int v) {
+ *p++ = kDigits[(v / 10) % 10];
+ *p++ = kDigits[v % 10];
+ return p;
+}
+
+int Parse02d(const char* p) {
+ if (const char* ap = std::strchr(kDigits, *p)) {
+ int v = static_cast<int>(ap - kDigits);
+ if (const char* bp = std::strchr(kDigits, *++p)) {
+ return (v * 10) + static_cast<int>(bp - kDigits);
+ }
+ }
+ return -1;
+}
+
+} // namespace
+
+bool FixedOffsetFromName(const TString& name, seconds* offset) {
+ if (name == "UTC" || name == "UTC0") {
+ *offset = seconds::zero();
+ return true;
+ }
+
+ const std::size_t prefix_len = sizeof(kFixedZonePrefix) - 1;
+ const char* const ep = kFixedZonePrefix + prefix_len;
+ if (name.size() != prefix_len + 9) // <prefix>+99:99:99
+ return false;
+ if (!std::equal(kFixedZonePrefix, ep, name.begin())) return false;
+ const char* np = name.data() + prefix_len;
+ if (np[0] != '+' && np[0] != '-') return false;
+ if (np[3] != ':' || np[6] != ':') // see note below about large offsets
+ return false;
+
+ int hours = Parse02d(np + 1);
+ if (hours == -1) return false;
+ int mins = Parse02d(np + 4);
+ if (mins == -1) return false;
+ int secs = Parse02d(np + 7);
+ if (secs == -1) return false;
+
+ secs += ((hours * 60) + mins) * 60;
+ if (secs > 24 * 60 * 60) return false; // outside supported offset range
+ *offset = seconds(secs * (np[0] == '-' ? -1 : 1)); // "-" means west
+ return true;
+}
+
+TString FixedOffsetToName(const seconds& offset) {
+ if (offset == seconds::zero()) return "UTC";
+ if (offset < std::chrono::hours(-24) || offset > std::chrono::hours(24)) {
+ // We don't support fixed-offset zones more than 24 hours
+ // away from UTC to avoid complications in rendering such
+ // offsets and to (somewhat) limit the total number of zones.
+ return "UTC";
+ }
+ int offset_seconds = static_cast<int>(offset.count());
+ const char sign = (offset_seconds < 0 ? '-' : '+');
+ int offset_minutes = offset_seconds / 60;
+ offset_seconds %= 60;
+ if (sign == '-') {
+ if (offset_seconds > 0) {
+ offset_seconds -= 60;
+ offset_minutes += 1;
+ }
+ offset_seconds = -offset_seconds;
+ offset_minutes = -offset_minutes;
+ }
+ int offset_hours = offset_minutes / 60;
+ offset_minutes %= 60;
+ const std::size_t prefix_len = sizeof(kFixedZonePrefix) - 1;
+ char buf[prefix_len + sizeof("-24:00:00")];
+ char* ep = std::copy(kFixedZonePrefix, kFixedZonePrefix + prefix_len, buf);
+ *ep++ = sign;
+ ep = Format02d(ep, offset_hours);
+ *ep++ = ':';
+ ep = Format02d(ep, offset_minutes);
+ *ep++ = ':';
+ ep = Format02d(ep, offset_seconds);
+ *ep++ = '\0';
+ assert(ep == buf + sizeof(buf));
+ return buf;
+}
+
+TString FixedOffsetToAbbr(const seconds& offset) {
+ TString abbr = FixedOffsetToName(offset);
+ const std::size_t prefix_len = sizeof(kFixedZonePrefix) - 1;
+ if (abbr.size() == prefix_len + 9) { // <prefix>+99:99:99
+ abbr.erase(0, prefix_len); // +99:99:99
+ abbr.erase(6, 1); // +99:9999
+ abbr.erase(3, 1); // +999999
+ if (abbr[5] == '0' && abbr[6] == '0') { // +999900
+ abbr.erase(5, 2); // +9999
+ if (abbr[3] == '0' && abbr[4] == '0') { // +9900
+ abbr.erase(3, 2); // +99
+ }
+ }
+ }
+ return abbr;
+}
+
+} // namespace cctz
+} // namespace time_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_fixed.h b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_fixed.h
new file mode 100644
index 00000000000..39b24e5a32f
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_fixed.h
@@ -0,0 +1,52 @@
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_TIME_INTERNAL_CCTZ_TIME_ZONE_FIXED_H_
+#define ABSL_TIME_INTERNAL_CCTZ_TIME_ZONE_FIXED_H_
+
+#include <util/generic/string.h>
+
+#include "y_absl/base/config.h"
+#include "y_absl/time/internal/cctz/include/cctz/time_zone.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace time_internal {
+namespace cctz {
+
+// Helper functions for dealing with the names and abbreviations
+// of time zones that are a fixed offset (seconds east) from UTC.
+// FixedOffsetFromName() extracts the offset from a valid fixed-offset
+// name, while FixedOffsetToName() and FixedOffsetToAbbr() generate
+// the canonical zone name and abbreviation respectively for the given
+// offset.
+//
+// A fixed-offset name looks like "Fixed/UTC<+-><hours>:<mins>:<secs>".
+// Its abbreviation is of the form "UTC(<+->H?H(MM(SS)?)?)?" where the
+// optional pieces are omitted when their values are zero. (Note that
+// the sign is the opposite of that used in a POSIX TZ specification.)
+//
+// Note: FixedOffsetFromName() fails on syntax errors or when the parsed
+// offset exceeds 24 hours. FixedOffsetToName() and FixedOffsetToAbbr()
+// both produce "UTC" when the argument offset exceeds 24 hours.
+bool FixedOffsetFromName(const TString& name, seconds* offset);
+TString FixedOffsetToName(const seconds& offset);
+TString FixedOffsetToAbbr(const seconds& offset);
+
+} // namespace cctz
+} // namespace time_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_TIME_INTERNAL_CCTZ_TIME_ZONE_FIXED_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_format.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_format.cc
new file mode 100644
index 00000000000..2f9597018b7
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_format.cc
@@ -0,0 +1,1029 @@
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#if !defined(HAS_STRPTIME)
+#if !defined(_MSC_VER) && !defined(__MINGW32__)
+#define HAS_STRPTIME 1 // assume everyone has strptime() except windows
+#endif
+#endif
+
+#if defined(HAS_STRPTIME) && HAS_STRPTIME
+#if !defined(_XOPEN_SOURCE)
+#define _XOPEN_SOURCE // Definedness suffices for strptime.
+#endif
+#endif
+
+#include "y_absl/base/config.h"
+#include "y_absl/time/internal/cctz/include/cctz/time_zone.h"
+
+// Include time.h directly since, by C++ standards, ctime doesn't have to
+// declare strptime.
+#include <time.h>
+
+#include <cctype>
+#include <chrono>
+#include <cstddef>
+#include <cstdint>
+#include <cstring>
+#include <ctime>
+#include <limits>
+#include <util/generic/string.h>
+#include <vector>
+#if !HAS_STRPTIME
+#include <iomanip>
+#include <sstream>
+#endif
+
+#include "y_absl/time/internal/cctz/include/cctz/civil_time.h"
+#include "time_zone_if.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace time_internal {
+namespace cctz {
+namespace detail {
+
+namespace {
+
+#if !HAS_STRPTIME
+// Build a strptime() using C++11's std::get_time().
+char* strptime(const char* s, const char* fmt, std::tm* tm) {
+ std::istringstream input(s);
+ input >> std::get_time(tm, fmt);
+ if (input.fail()) return nullptr;
+ return const_cast<char*>(s) +
+ (input.eof() ? strlen(s) : static_cast<std::size_t>(input.tellg()));
+}
+#endif
+
+// Convert a cctz::weekday to a tm_wday value (0-6, Sunday = 0).
+int ToTmWday(weekday wd) {
+ switch (wd) {
+ case weekday::sunday:
+ return 0;
+ case weekday::monday:
+ return 1;
+ case weekday::tuesday:
+ return 2;
+ case weekday::wednesday:
+ return 3;
+ case weekday::thursday:
+ return 4;
+ case weekday::friday:
+ return 5;
+ case weekday::saturday:
+ return 6;
+ }
+ return 0; /*NOTREACHED*/
+}
+
+// Convert a tm_wday value (0-6, Sunday = 0) to a cctz::weekday.
+weekday FromTmWday(int tm_wday) {
+ switch (tm_wday) {
+ case 0:
+ return weekday::sunday;
+ case 1:
+ return weekday::monday;
+ case 2:
+ return weekday::tuesday;
+ case 3:
+ return weekday::wednesday;
+ case 4:
+ return weekday::thursday;
+ case 5:
+ return weekday::friday;
+ case 6:
+ return weekday::saturday;
+ }
+ return weekday::sunday; /*NOTREACHED*/
+}
+
+std::tm ToTM(const time_zone::absolute_lookup& al) {
+ std::tm tm{};
+ tm.tm_sec = al.cs.second();
+ tm.tm_min = al.cs.minute();
+ tm.tm_hour = al.cs.hour();
+ tm.tm_mday = al.cs.day();
+ tm.tm_mon = al.cs.month() - 1;
+
+ // Saturate tm.tm_year is cases of over/underflow.
+ if (al.cs.year() < std::numeric_limits<int>::min() + 1900) {
+ tm.tm_year = std::numeric_limits<int>::min();
+ } else if (al.cs.year() - 1900 > std::numeric_limits<int>::max()) {
+ tm.tm_year = std::numeric_limits<int>::max();
+ } else {
+ tm.tm_year = static_cast<int>(al.cs.year() - 1900);
+ }
+
+ tm.tm_wday = ToTmWday(get_weekday(al.cs));
+ tm.tm_yday = get_yearday(al.cs) - 1;
+ tm.tm_isdst = al.is_dst ? 1 : 0;
+ return tm;
+}
+
+// Returns the week of the year [0:53] given a civil day and the day on
+// which weeks are defined to start.
+int ToWeek(const civil_day& cd, weekday week_start) {
+ const civil_day d(cd.year() % 400, cd.month(), cd.day());
+ return static_cast<int>((d - prev_weekday(civil_year(d), week_start)) / 7);
+}
+
+const char kDigits[] = "0123456789";
+
+// Formats a 64-bit integer in the given field width. Note that it is up
+// to the caller of Format64() [and Format02d()/FormatOffset()] to ensure
+// that there is sufficient space before ep to hold the conversion.
+char* Format64(char* ep, int width, std::int_fast64_t v) {
+ bool neg = false;
+ if (v < 0) {
+ --width;
+ neg = true;
+ if (v == std::numeric_limits<std::int_fast64_t>::min()) {
+ // Avoid negating minimum value.
+ std::int_fast64_t last_digit = -(v % 10);
+ v /= 10;
+ if (last_digit < 0) {
+ ++v;
+ last_digit += 10;
+ }
+ --width;
+ *--ep = kDigits[last_digit];
+ }
+ v = -v;
+ }
+ do {
+ --width;
+ *--ep = kDigits[v % 10];
+ } while (v /= 10);
+ while (--width >= 0) *--ep = '0'; // zero pad
+ if (neg) *--ep = '-';
+ return ep;
+}
+
+// Formats [0 .. 99] as %02d.
+char* Format02d(char* ep, int v) {
+ *--ep = kDigits[v % 10];
+ *--ep = kDigits[(v / 10) % 10];
+ return ep;
+}
+
+// Formats a UTC offset, like +00:00.
+char* FormatOffset(char* ep, int offset, const char* mode) {
+ // TODO: Follow the RFC3339 "Unknown Local Offset Convention" and
+ // generate a "negative zero" when we're formatting a zero offset
+ // as the result of a failed load_time_zone().
+ char sign = '+';
+ if (offset < 0) {
+ offset = -offset; // bounded by 24h so no overflow
+ sign = '-';
+ }
+ const int seconds = offset % 60;
+ const int minutes = (offset /= 60) % 60;
+ const int hours = offset /= 60;
+ const char sep = mode[0];
+ const bool ext = (sep != '\0' && mode[1] == '*');
+ const bool ccc = (ext && mode[2] == ':');
+ if (ext && (!ccc || seconds != 0)) {
+ ep = Format02d(ep, seconds);
+ *--ep = sep;
+ } else {
+ // If we're not rendering seconds, sub-minute negative offsets
+ // should get a positive sign (e.g., offset=-10s => "+00:00").
+ if (hours == 0 && minutes == 0) sign = '+';
+ }
+ if (!ccc || minutes != 0 || seconds != 0) {
+ ep = Format02d(ep, minutes);
+ if (sep != '\0') *--ep = sep;
+ }
+ ep = Format02d(ep, hours);
+ *--ep = sign;
+ return ep;
+}
+
+// Formats a std::tm using strftime(3).
+void FormatTM(TString* out, const TString& fmt, const std::tm& tm) {
+ // strftime(3) returns the number of characters placed in the output
+ // array (which may be 0 characters). It also returns 0 to indicate
+ // an error, like the array wasn't large enough. To accommodate this,
+ // the following code grows the buffer size from 2x the format string
+ // length up to 32x.
+ for (std::size_t i = 2; i != 32; i *= 2) {
+ std::size_t buf_size = fmt.size() * i;
+ std::vector<char> buf(buf_size);
+ if (std::size_t len = strftime(&buf[0], buf_size, fmt.c_str(), &tm)) {
+ out->append(&buf[0], len);
+ return;
+ }
+ }
+}
+
+// Used for %E#S/%E#f specifiers and for data values in parse().
+template <typename T>
+const char* ParseInt(const char* dp, int width, T min, T max, T* vp) {
+ if (dp != nullptr) {
+ const T kmin = std::numeric_limits<T>::min();
+ bool erange = false;
+ bool neg = false;
+ T value = 0;
+ if (*dp == '-') {
+ neg = true;
+ if (width <= 0 || --width != 0) {
+ ++dp;
+ } else {
+ dp = nullptr; // width was 1
+ }
+ }
+ if (const char* const bp = dp) {
+ while (const char* cp = strchr(kDigits, *dp)) {
+ int d = static_cast<int>(cp - kDigits);
+ if (d >= 10) break;
+ if (value < kmin / 10) {
+ erange = true;
+ break;
+ }
+ value *= 10;
+ if (value < kmin + d) {
+ erange = true;
+ break;
+ }
+ value -= d;
+ dp += 1;
+ if (width > 0 && --width == 0) break;
+ }
+ if (dp != bp && !erange && (neg || value != kmin)) {
+ if (!neg || value != 0) {
+ if (!neg) value = -value; // make positive
+ if (min <= value && value <= max) {
+ *vp = value;
+ } else {
+ dp = nullptr;
+ }
+ } else {
+ dp = nullptr;
+ }
+ } else {
+ dp = nullptr;
+ }
+ }
+ }
+ return dp;
+}
+
+// The number of base-10 digits that can be represented by a signed 64-bit
+// integer. That is, 10^kDigits10_64 <= 2^63 - 1 < 10^(kDigits10_64 + 1).
+const int kDigits10_64 = 18;
+
+// 10^n for everything that can be represented by a signed 64-bit integer.
+const std::int_fast64_t kExp10[kDigits10_64 + 1] = {
+ 1,
+ 10,
+ 100,
+ 1000,
+ 10000,
+ 100000,
+ 1000000,
+ 10000000,
+ 100000000,
+ 1000000000,
+ 10000000000,
+ 100000000000,
+ 1000000000000,
+ 10000000000000,
+ 100000000000000,
+ 1000000000000000,
+ 10000000000000000,
+ 100000000000000000,
+ 1000000000000000000,
+};
+
+} // namespace
+
+// Uses strftime(3) to format the given Time. The following extended format
+// specifiers are also supported:
+//
+// - %Ez - RFC3339-compatible numeric UTC offset (+hh:mm or -hh:mm)
+// - %E*z - Full-resolution numeric UTC offset (+hh:mm:ss or -hh:mm:ss)
+// - %E#S - Seconds with # digits of fractional precision
+// - %E*S - Seconds with full fractional precision (a literal '*')
+// - %E4Y - Four-character years (-999 ... -001, 0000, 0001 ... 9999)
+// - %ET - The RFC3339 "date-time" separator "T"
+//
+// The standard specifiers from RFC3339_* (%Y, %m, %d, %H, %M, and %S) are
+// handled internally for performance reasons. strftime(3) is slow due to
+// a POSIX requirement to respect changes to ${TZ}.
+//
+// The TZ/GNU %s extension is handled internally because strftime() has
+// to use mktime() to generate it, and that assumes the local time zone.
+//
+// We also handle the %z and %Z specifiers to accommodate platforms that do
+// not support the tm_gmtoff and tm_zone extensions to std::tm.
+//
+// Requires that zero() <= fs < seconds(1).
+TString format(const TString& format, const time_point<seconds>& tp,
+ const detail::femtoseconds& fs, const time_zone& tz) {
+ TString result;
+ result.reserve(format.size()); // A reasonable guess for the result size.
+ const time_zone::absolute_lookup al = tz.lookup(tp);
+ const std::tm tm = ToTM(al);
+
+ // Scratch buffer for internal conversions.
+ char buf[3 + kDigits10_64]; // enough for longest conversion
+ char* const ep = buf + sizeof(buf);
+ char* bp; // works back from ep
+
+ // Maintain three, disjoint subsequences that span format.
+ // [format.begin() ... pending) : already formatted into result
+ // [pending ... cur) : formatting pending, but no special cases
+ // [cur ... format.end()) : unexamined
+ // Initially, everything is in the unexamined part.
+ const char* pending = format.c_str(); // NUL terminated
+ const char* cur = pending;
+ const char* end = pending + format.length();
+
+ while (cur != end) { // while something is unexamined
+ // Moves cur to the next percent sign.
+ const char* start = cur;
+ while (cur != end && *cur != '%') ++cur;
+
+ // If the new pending text is all ordinary, copy it out.
+ if (cur != start && pending == start) {
+ result.append(pending, static_cast<std::size_t>(cur - pending));
+ pending = start = cur;
+ }
+
+ // Span the sequential percent signs.
+ const char* percent = cur;
+ while (cur != end && *cur == '%') ++cur;
+
+ // If the new pending text is all percents, copy out one
+ // percent for every matched pair, then skip those pairs.
+ if (cur != start && pending == start) {
+ std::size_t escaped = static_cast<std::size_t>(cur - pending) / 2;
+ result.append(pending, escaped);
+ pending += escaped * 2;
+ // Also copy out a single trailing percent.
+ if (pending != cur && cur == end) {
+ result.push_back(*pending++);
+ }
+ }
+
+ // Loop unless we have an unescaped percent.
+ if (cur == end || (cur - percent) % 2 == 0) continue;
+
+ // Simple specifiers that we handle ourselves.
+ if (strchr("YmdeUuWwHMSzZs%", *cur)) {
+ if (cur - 1 != pending) {
+ FormatTM(&result, TString(pending, cur - 1), tm);
+ }
+ switch (*cur) {
+ case 'Y':
+ // This avoids the tm.tm_year overflow problem for %Y, however
+ // tm.tm_year will still be used by other specifiers like %D.
+ bp = Format64(ep, 0, al.cs.year());
+ result.append(bp, static_cast<std::size_t>(ep - bp));
+ break;
+ case 'm':
+ bp = Format02d(ep, al.cs.month());
+ result.append(bp, static_cast<std::size_t>(ep - bp));
+ break;
+ case 'd':
+ case 'e':
+ bp = Format02d(ep, al.cs.day());
+ if (*cur == 'e' && *bp == '0') *bp = ' '; // for Windows
+ result.append(bp, static_cast<std::size_t>(ep - bp));
+ break;
+ case 'U':
+ bp = Format02d(ep, ToWeek(civil_day(al.cs), weekday::sunday));
+ result.append(bp, static_cast<std::size_t>(ep - bp));
+ break;
+ case 'u':
+ bp = Format64(ep, 0, tm.tm_wday ? tm.tm_wday : 7);
+ result.append(bp, static_cast<std::size_t>(ep - bp));
+ break;
+ case 'W':
+ bp = Format02d(ep, ToWeek(civil_day(al.cs), weekday::monday));
+ result.append(bp, static_cast<std::size_t>(ep - bp));
+ break;
+ case 'w':
+ bp = Format64(ep, 0, tm.tm_wday);
+ result.append(bp, static_cast<std::size_t>(ep - bp));
+ break;
+ case 'H':
+ bp = Format02d(ep, al.cs.hour());
+ result.append(bp, static_cast<std::size_t>(ep - bp));
+ break;
+ case 'M':
+ bp = Format02d(ep, al.cs.minute());
+ result.append(bp, static_cast<std::size_t>(ep - bp));
+ break;
+ case 'S':
+ bp = Format02d(ep, al.cs.second());
+ result.append(bp, static_cast<std::size_t>(ep - bp));
+ break;
+ case 'z':
+ bp = FormatOffset(ep, al.offset, "");
+ result.append(bp, static_cast<std::size_t>(ep - bp));
+ break;
+ case 'Z':
+ result.append(al.abbr);
+ break;
+ case 's':
+ bp = Format64(ep, 0, ToUnixSeconds(tp));
+ result.append(bp, static_cast<std::size_t>(ep - bp));
+ break;
+ case '%':
+ result.push_back('%');
+ break;
+ }
+ pending = ++cur;
+ continue;
+ }
+
+ // More complex specifiers that we handle ourselves.
+ if (*cur == ':' && cur + 1 != end) {
+ if (*(cur + 1) == 'z') {
+ // Formats %:z.
+ if (cur - 1 != pending) {
+ FormatTM(&result, TString(pending, cur - 1), tm);
+ }
+ bp = FormatOffset(ep, al.offset, ":");
+ result.append(bp, static_cast<std::size_t>(ep - bp));
+ pending = cur += 2;
+ continue;
+ }
+ if (*(cur + 1) == ':' && cur + 2 != end) {
+ if (*(cur + 2) == 'z') {
+ // Formats %::z.
+ if (cur - 1 != pending) {
+ FormatTM(&result, TString(pending, cur - 1), tm);
+ }
+ bp = FormatOffset(ep, al.offset, ":*");
+ result.append(bp, static_cast<std::size_t>(ep - bp));
+ pending = cur += 3;
+ continue;
+ }
+ if (*(cur + 2) == ':' && cur + 3 != end) {
+ if (*(cur + 3) == 'z') {
+ // Formats %:::z.
+ if (cur - 1 != pending) {
+ FormatTM(&result, TString(pending, cur - 1), tm);
+ }
+ bp = FormatOffset(ep, al.offset, ":*:");
+ result.append(bp, static_cast<std::size_t>(ep - bp));
+ pending = cur += 4;
+ continue;
+ }
+ }
+ }
+ }
+
+ // Loop if there is no E modifier.
+ if (*cur != 'E' || ++cur == end) continue;
+
+ // Format our extensions.
+ if (*cur == 'T') {
+ // Formats %ET.
+ if (cur - 2 != pending) {
+ FormatTM(&result, TString(pending, cur - 2), tm);
+ }
+ result.append("T");
+ pending = ++cur;
+ } else if (*cur == 'z') {
+ // Formats %Ez.
+ if (cur - 2 != pending) {
+ FormatTM(&result, TString(pending, cur - 2), tm);
+ }
+ bp = FormatOffset(ep, al.offset, ":");
+ result.append(bp, static_cast<std::size_t>(ep - bp));
+ pending = ++cur;
+ } else if (*cur == '*' && cur + 1 != end && *(cur + 1) == 'z') {
+ // Formats %E*z.
+ if (cur - 2 != pending) {
+ FormatTM(&result, TString(pending, cur - 2), tm);
+ }
+ bp = FormatOffset(ep, al.offset, ":*");
+ result.append(bp, static_cast<std::size_t>(ep - bp));
+ pending = cur += 2;
+ } else if (*cur == '*' && cur + 1 != end &&
+ (*(cur + 1) == 'S' || *(cur + 1) == 'f')) {
+ // Formats %E*S or %E*F.
+ if (cur - 2 != pending) {
+ FormatTM(&result, TString(pending, cur - 2), tm);
+ }
+ char* cp = ep;
+ bp = Format64(cp, 15, fs.count());
+ while (cp != bp && cp[-1] == '0') --cp;
+ switch (*(cur + 1)) {
+ case 'S':
+ if (cp != bp) *--bp = '.';
+ bp = Format02d(bp, al.cs.second());
+ break;
+ case 'f':
+ if (cp == bp) *--bp = '0';
+ break;
+ }
+ result.append(bp, static_cast<std::size_t>(cp - bp));
+ pending = cur += 2;
+ } else if (*cur == '4' && cur + 1 != end && *(cur + 1) == 'Y') {
+ // Formats %E4Y.
+ if (cur - 2 != pending) {
+ FormatTM(&result, TString(pending, cur - 2), tm);
+ }
+ bp = Format64(ep, 4, al.cs.year());
+ result.append(bp, static_cast<std::size_t>(ep - bp));
+ pending = cur += 2;
+ } else if (std::isdigit(*cur)) {
+ // Possibly found %E#S or %E#f.
+ int n = 0;
+ if (const char* np = ParseInt(cur, 0, 0, 1024, &n)) {
+ if (*np == 'S' || *np == 'f') {
+ // Formats %E#S or %E#f.
+ if (cur - 2 != pending) {
+ FormatTM(&result, TString(pending, cur - 2), tm);
+ }
+ bp = ep;
+ if (n > 0) {
+ if (n > kDigits10_64) n = kDigits10_64;
+ bp = Format64(bp, n,
+ (n > 15) ? fs.count() * kExp10[n - 15]
+ : fs.count() / kExp10[15 - n]);
+ if (*np == 'S') *--bp = '.';
+ }
+ if (*np == 'S') bp = Format02d(bp, al.cs.second());
+ result.append(bp, static_cast<std::size_t>(ep - bp));
+ pending = cur = ++np;
+ }
+ }
+ }
+ }
+
+ // Formats any remaining data.
+ if (end != pending) {
+ FormatTM(&result, TString(pending, end), tm);
+ }
+
+ return result;
+}
+
+namespace {
+
+const char* ParseOffset(const char* dp, const char* mode, int* offset) {
+ if (dp != nullptr) {
+ const char first = *dp++;
+ if (first == '+' || first == '-') {
+ char sep = mode[0];
+ int hours = 0;
+ int minutes = 0;
+ int seconds = 0;
+ const char* ap = ParseInt(dp, 2, 0, 23, &hours);
+ if (ap != nullptr && ap - dp == 2) {
+ dp = ap;
+ if (sep != '\0' && *ap == sep) ++ap;
+ const char* bp = ParseInt(ap, 2, 0, 59, &minutes);
+ if (bp != nullptr && bp - ap == 2) {
+ dp = bp;
+ if (sep != '\0' && *bp == sep) ++bp;
+ const char* cp = ParseInt(bp, 2, 0, 59, &seconds);
+ if (cp != nullptr && cp - bp == 2) dp = cp;
+ }
+ *offset = ((hours * 60 + minutes) * 60) + seconds;
+ if (first == '-') *offset = -*offset;
+ } else {
+ dp = nullptr;
+ }
+ } else if (first == 'Z' || first == 'z') { // Zulu
+ *offset = 0;
+ } else {
+ dp = nullptr;
+ }
+ }
+ return dp;
+}
+
+const char* ParseZone(const char* dp, TString* zone) {
+ zone->clear();
+ if (dp != nullptr) {
+ while (*dp != '\0' && !std::isspace(*dp)) zone->push_back(*dp++);
+ if (zone->empty()) dp = nullptr;
+ }
+ return dp;
+}
+
+const char* ParseSubSeconds(const char* dp, detail::femtoseconds* subseconds) {
+ if (dp != nullptr) {
+ std::int_fast64_t v = 0;
+ std::int_fast64_t exp = 0;
+ const char* const bp = dp;
+ while (const char* cp = strchr(kDigits, *dp)) {
+ int d = static_cast<int>(cp - kDigits);
+ if (d >= 10) break;
+ if (exp < 15) {
+ exp += 1;
+ v *= 10;
+ v += d;
+ }
+ ++dp;
+ }
+ if (dp != bp) {
+ v *= kExp10[15 - exp];
+ *subseconds = detail::femtoseconds(v);
+ } else {
+ dp = nullptr;
+ }
+ }
+ return dp;
+}
+
+// Parses a string into a std::tm using strptime(3).
+const char* ParseTM(const char* dp, const char* fmt, std::tm* tm) {
+ if (dp != nullptr) {
+ dp = strptime(dp, fmt, tm);
+ }
+ return dp;
+}
+
+// Sets year, tm_mon and tm_mday given the year, week_num, and tm_wday,
+// and the day on which weeks are defined to start. Returns false if year
+// would need to move outside its bounds.
+bool FromWeek(int week_num, weekday week_start, year_t* year, std::tm* tm) {
+ const civil_year y(*year % 400);
+ civil_day cd = prev_weekday(y, week_start); // week 0
+ cd = next_weekday(cd - 1, FromTmWday(tm->tm_wday)) + (week_num * 7);
+ if (const year_t shift = cd.year() - y.year()) {
+ if (shift > 0) {
+ if (*year > std::numeric_limits<year_t>::max() - shift) return false;
+ } else {
+ if (*year < std::numeric_limits<year_t>::min() - shift) return false;
+ }
+ *year += shift;
+ }
+ tm->tm_mon = cd.month() - 1;
+ tm->tm_mday = cd.day();
+ return true;
+}
+
+} // namespace
+
+// Uses strptime(3) to parse the given input. Supports the same extended
+// format specifiers as format(), although %E#S and %E*S are treated
+// identically (and similarly for %E#f and %E*f). %Ez and %E*z also accept
+// the same inputs. %ET accepts either 'T' or 't'.
+//
+// The standard specifiers from RFC3339_* (%Y, %m, %d, %H, %M, and %S) are
+// handled internally so that we can normally avoid strptime() altogether
+// (which is particularly helpful when the native implementation is broken).
+//
+// The TZ/GNU %s extension is handled internally because strptime() has to
+// use localtime_r() to generate it, and that assumes the local time zone.
+//
+// We also handle the %z specifier to accommodate platforms that do not
+// support the tm_gmtoff extension to std::tm. %Z is parsed but ignored.
+bool parse(const TString& format, const TString& input,
+ const time_zone& tz, time_point<seconds>* sec,
+ detail::femtoseconds* fs, TString* err) {
+ // The unparsed input.
+ const char* data = input.c_str(); // NUL terminated
+
+ // Skips leading whitespace.
+ while (std::isspace(*data)) ++data;
+
+ const year_t kyearmax = std::numeric_limits<year_t>::max();
+ const year_t kyearmin = std::numeric_limits<year_t>::min();
+
+ // Sets default values for unspecified fields.
+ bool saw_year = false;
+ year_t year = 1970;
+ std::tm tm{};
+ tm.tm_year = 1970 - 1900;
+ tm.tm_mon = 1 - 1; // Jan
+ tm.tm_mday = 1;
+ tm.tm_hour = 0;
+ tm.tm_min = 0;
+ tm.tm_sec = 0;
+ tm.tm_wday = 4; // Thu
+ tm.tm_yday = 0;
+ tm.tm_isdst = 0;
+ auto subseconds = detail::femtoseconds::zero();
+ bool saw_offset = false;
+ int offset = 0; // No offset from passed tz.
+ TString zone = "UTC";
+
+ const char* fmt = format.c_str(); // NUL terminated
+ bool twelve_hour = false;
+ bool afternoon = false;
+ int week_num = -1;
+ weekday week_start = weekday::sunday;
+
+ bool saw_percent_s = false;
+ std::int_fast64_t percent_s = 0;
+
+ // Steps through format, one specifier at a time.
+ while (data != nullptr && *fmt != '\0') {
+ if (std::isspace(*fmt)) {
+ while (std::isspace(*data)) ++data;
+ while (std::isspace(*++fmt)) continue;
+ continue;
+ }
+
+ if (*fmt != '%') {
+ if (*data == *fmt) {
+ ++data;
+ ++fmt;
+ } else {
+ data = nullptr;
+ }
+ continue;
+ }
+
+ const char* percent = fmt;
+ if (*++fmt == '\0') {
+ data = nullptr;
+ continue;
+ }
+ switch (*fmt++) {
+ case 'Y':
+ // Symmetrically with FormatTime(), directly handing %Y avoids the
+ // tm.tm_year overflow problem. However, tm.tm_year will still be
+ // used by other specifiers like %D.
+ data = ParseInt(data, 0, kyearmin, kyearmax, &year);
+ if (data != nullptr) saw_year = true;
+ continue;
+ case 'm':
+ data = ParseInt(data, 2, 1, 12, &tm.tm_mon);
+ if (data != nullptr) tm.tm_mon -= 1;
+ week_num = -1;
+ continue;
+ case 'd':
+ case 'e':
+ data = ParseInt(data, 2, 1, 31, &tm.tm_mday);
+ week_num = -1;
+ continue;
+ case 'U':
+ data = ParseInt(data, 0, 0, 53, &week_num);
+ week_start = weekday::sunday;
+ continue;
+ case 'W':
+ data = ParseInt(data, 0, 0, 53, &week_num);
+ week_start = weekday::monday;
+ continue;
+ case 'u':
+ data = ParseInt(data, 0, 1, 7, &tm.tm_wday);
+ if (data != nullptr) tm.tm_wday %= 7;
+ continue;
+ case 'w':
+ data = ParseInt(data, 0, 0, 6, &tm.tm_wday);
+ continue;
+ case 'H':
+ data = ParseInt(data, 2, 0, 23, &tm.tm_hour);
+ twelve_hour = false;
+ continue;
+ case 'M':
+ data = ParseInt(data, 2, 0, 59, &tm.tm_min);
+ continue;
+ case 'S':
+ data = ParseInt(data, 2, 0, 60, &tm.tm_sec);
+ continue;
+ case 'I':
+ case 'l':
+ case 'r': // probably uses %I
+ twelve_hour = true;
+ break;
+ case 'R': // uses %H
+ case 'T': // uses %H
+ case 'c': // probably uses %H
+ case 'X': // probably uses %H
+ twelve_hour = false;
+ break;
+ case 'z':
+ data = ParseOffset(data, "", &offset);
+ if (data != nullptr) saw_offset = true;
+ continue;
+ case 'Z': // ignored; zone abbreviations are ambiguous
+ data = ParseZone(data, &zone);
+ continue;
+ case 's':
+ data =
+ ParseInt(data, 0, std::numeric_limits<std::int_fast64_t>::min(),
+ std::numeric_limits<std::int_fast64_t>::max(), &percent_s);
+ if (data != nullptr) saw_percent_s = true;
+ continue;
+ case ':':
+ if (fmt[0] == 'z' ||
+ (fmt[0] == ':' &&
+ (fmt[1] == 'z' || (fmt[1] == ':' && fmt[2] == 'z')))) {
+ data = ParseOffset(data, ":", &offset);
+ if (data != nullptr) saw_offset = true;
+ fmt += (fmt[0] == 'z') ? 1 : (fmt[1] == 'z') ? 2 : 3;
+ continue;
+ }
+ break;
+ case '%':
+ data = (*data == '%' ? data + 1 : nullptr);
+ continue;
+ case 'E':
+ if (fmt[0] == 'T') {
+ if (*data == 'T' || *data == 't') {
+ ++data;
+ ++fmt;
+ } else {
+ data = nullptr;
+ }
+ continue;
+ }
+ if (fmt[0] == 'z' || (fmt[0] == '*' && fmt[1] == 'z')) {
+ data = ParseOffset(data, ":", &offset);
+ if (data != nullptr) saw_offset = true;
+ fmt += (fmt[0] == 'z') ? 1 : 2;
+ continue;
+ }
+ if (fmt[0] == '*' && fmt[1] == 'S') {
+ data = ParseInt(data, 2, 0, 60, &tm.tm_sec);
+ if (data != nullptr && *data == '.') {
+ data = ParseSubSeconds(data + 1, &subseconds);
+ }
+ fmt += 2;
+ continue;
+ }
+ if (fmt[0] == '*' && fmt[1] == 'f') {
+ if (data != nullptr && std::isdigit(*data)) {
+ data = ParseSubSeconds(data, &subseconds);
+ }
+ fmt += 2;
+ continue;
+ }
+ if (fmt[0] == '4' && fmt[1] == 'Y') {
+ const char* bp = data;
+ data = ParseInt(data, 4, year_t{-999}, year_t{9999}, &year);
+ if (data != nullptr) {
+ if (data - bp == 4) {
+ saw_year = true;
+ } else {
+ data = nullptr; // stopped too soon
+ }
+ }
+ fmt += 2;
+ continue;
+ }
+ if (std::isdigit(*fmt)) {
+ int n = 0; // value ignored
+ if (const char* np = ParseInt(fmt, 0, 0, 1024, &n)) {
+ if (*np == 'S') {
+ data = ParseInt(data, 2, 0, 60, &tm.tm_sec);
+ if (data != nullptr && *data == '.') {
+ data = ParseSubSeconds(data + 1, &subseconds);
+ }
+ fmt = ++np;
+ continue;
+ }
+ if (*np == 'f') {
+ if (data != nullptr && std::isdigit(*data)) {
+ data = ParseSubSeconds(data, &subseconds);
+ }
+ fmt = ++np;
+ continue;
+ }
+ }
+ }
+ if (*fmt == 'c') twelve_hour = false; // probably uses %H
+ if (*fmt == 'X') twelve_hour = false; // probably uses %H
+ if (*fmt != '\0') ++fmt;
+ break;
+ case 'O':
+ if (*fmt == 'H') twelve_hour = false;
+ if (*fmt == 'I') twelve_hour = true;
+ if (*fmt != '\0') ++fmt;
+ break;
+ }
+
+ // Parses the current specifier.
+ const char* orig_data = data;
+ TString spec(percent, static_cast<std::size_t>(fmt - percent));
+ data = ParseTM(data, spec.c_str(), &tm);
+
+ // If we successfully parsed %p we need to remember whether the result
+ // was AM or PM so that we can adjust tm_hour before time_zone::lookup().
+ // So reparse the input with a known AM hour, and check if it is shifted
+ // to a PM hour.
+ if (spec == "%p" && data != nullptr) {
+ TString test_input = "1";
+ test_input.append(orig_data, static_cast<std::size_t>(data - orig_data));
+ const char* test_data = test_input.c_str();
+ std::tm tmp{};
+ ParseTM(test_data, "%I%p", &tmp);
+ afternoon = (tmp.tm_hour == 13);
+ }
+ }
+
+ // Adjust a 12-hour tm_hour value if it should be in the afternoon.
+ if (twelve_hour && afternoon && tm.tm_hour < 12) {
+ tm.tm_hour += 12;
+ }
+
+ if (data == nullptr) {
+ if (err != nullptr) *err = "Failed to parse input";
+ return false;
+ }
+
+ // Skip any remaining whitespace.
+ while (std::isspace(*data)) ++data;
+
+ // parse() must consume the entire input string.
+ if (*data != '\0') {
+ if (err != nullptr) *err = "Illegal trailing data in input string";
+ return false;
+ }
+
+ // If we saw %s then we ignore anything else and return that time.
+ if (saw_percent_s) {
+ *sec = FromUnixSeconds(percent_s);
+ *fs = detail::femtoseconds::zero();
+ return true;
+ }
+
+ // If we saw %z, %Ez, or %E*z then we want to interpret the parsed fields
+ // in UTC and then shift by that offset. Otherwise we want to interpret
+ // the fields directly in the passed time_zone.
+ time_zone ptz = saw_offset ? utc_time_zone() : tz;
+
+ // Allows a leap second of 60 to normalize forward to the following ":00".
+ if (tm.tm_sec == 60) {
+ tm.tm_sec -= 1;
+ offset -= 1;
+ subseconds = detail::femtoseconds::zero();
+ }
+
+ if (!saw_year) {
+ year = year_t{tm.tm_year};
+ if (year > kyearmax - 1900) {
+ // Platform-dependent, maybe unreachable.
+ if (err != nullptr) *err = "Out-of-range year";
+ return false;
+ }
+ year += 1900;
+ }
+
+ // Compute year, tm.tm_mon and tm.tm_mday if we parsed a week number.
+ if (week_num != -1) {
+ if (!FromWeek(week_num, week_start, &year, &tm)) {
+ if (err != nullptr) *err = "Out-of-range field";
+ return false;
+ }
+ }
+
+ const int month = tm.tm_mon + 1;
+ civil_second cs(year, month, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec);
+
+ // parse() should not allow normalization. Due to the restricted field
+ // ranges above (see ParseInt()), the only possibility is for days to roll
+ // into months. That is, parsing "Sep 31" should not produce "Oct 1".
+ if (cs.month() != month || cs.day() != tm.tm_mday) {
+ if (err != nullptr) *err = "Out-of-range field";
+ return false;
+ }
+
+ // Accounts for the offset adjustment before converting to absolute time.
+ if ((offset < 0 && cs > civil_second::max() + offset) ||
+ (offset > 0 && cs < civil_second::min() + offset)) {
+ if (err != nullptr) *err = "Out-of-range field";
+ return false;
+ }
+ cs -= offset;
+
+ const auto tp = ptz.lookup(cs).pre;
+ // Checks for overflow/underflow and returns an error as necessary.
+ if (tp == time_point<seconds>::max()) {
+ const auto al = ptz.lookup(time_point<seconds>::max());
+ if (cs > al.cs) {
+ if (err != nullptr) *err = "Out-of-range field";
+ return false;
+ }
+ }
+ if (tp == time_point<seconds>::min()) {
+ const auto al = ptz.lookup(time_point<seconds>::min());
+ if (cs < al.cs) {
+ if (err != nullptr) *err = "Out-of-range field";
+ return false;
+ }
+ }
+
+ *sec = tp;
+ *fs = subseconds;
+ return true;
+}
+
+} // namespace detail
+} // namespace cctz
+} // namespace time_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_if.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_if.cc
new file mode 100644
index 00000000000..2fe6d23c71c
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_if.cc
@@ -0,0 +1,45 @@
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "time_zone_if.h"
+
+#include "y_absl/base/config.h"
+#include "time_zone_info.h"
+#include "time_zone_libc.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace time_internal {
+namespace cctz {
+
+std::unique_ptr<TimeZoneIf> TimeZoneIf::Load(const TString& name) {
+ // Support "libc:localtime" and "libc:*" to access the legacy
+ // localtime and UTC support respectively from the C library.
+ if (name.compare(0, 5, "libc:") == 0) {
+ return std::unique_ptr<TimeZoneIf>(new TimeZoneLibC(name.substr(5)));
+ }
+
+ // Otherwise use the "zoneinfo" implementation by default.
+ std::unique_ptr<TimeZoneInfo> tz(new TimeZoneInfo);
+ if (!tz->Load(name)) tz.reset();
+ return std::unique_ptr<TimeZoneIf>(tz.release());
+}
+
+// Defined out-of-line to avoid emitting a weak vtable in all TUs.
+TimeZoneIf::~TimeZoneIf() {}
+
+} // namespace cctz
+} // namespace time_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_if.h b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_if.h
new file mode 100644
index 00000000000..10312badc20
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_if.h
@@ -0,0 +1,77 @@
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_TIME_INTERNAL_CCTZ_TIME_ZONE_IF_H_
+#define ABSL_TIME_INTERNAL_CCTZ_TIME_ZONE_IF_H_
+
+#include <chrono>
+#include <cstdint>
+#include <memory>
+#include <util/generic/string.h>
+
+#include "y_absl/base/config.h"
+#include "y_absl/time/internal/cctz/include/cctz/civil_time.h"
+#include "y_absl/time/internal/cctz/include/cctz/time_zone.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace time_internal {
+namespace cctz {
+
+// A simple interface used to hide time-zone complexities from time_zone::Impl.
+// Subclasses implement the functions for civil-time conversions in the zone.
+class TimeZoneIf {
+ public:
+ // A factory function for TimeZoneIf implementations.
+ static std::unique_ptr<TimeZoneIf> Load(const TString& name);
+
+ virtual ~TimeZoneIf();
+
+ virtual time_zone::absolute_lookup BreakTime(
+ const time_point<seconds>& tp) const = 0;
+ virtual time_zone::civil_lookup MakeTime(const civil_second& cs) const = 0;
+
+ virtual bool NextTransition(const time_point<seconds>& tp,
+ time_zone::civil_transition* trans) const = 0;
+ virtual bool PrevTransition(const time_point<seconds>& tp,
+ time_zone::civil_transition* trans) const = 0;
+
+ virtual TString Version() const = 0;
+ virtual TString Description() const = 0;
+
+ protected:
+ TimeZoneIf() {}
+};
+
+// Convert between time_point<seconds> and a count of seconds since the
+// Unix epoch. We assume that the std::chrono::system_clock and the
+// Unix clock are second aligned, and that the results are representable.
+// (That is, that they share an epoch, which is required since C++20.)
+inline std::int_fast64_t ToUnixSeconds(const time_point<seconds>& tp) {
+ return (tp - std::chrono::time_point_cast<seconds>(
+ std::chrono::system_clock::from_time_t(0)))
+ .count();
+}
+inline time_point<seconds> FromUnixSeconds(std::int_fast64_t t) {
+ return std::chrono::time_point_cast<seconds>(
+ std::chrono::system_clock::from_time_t(0)) +
+ seconds(t);
+}
+
+} // namespace cctz
+} // namespace time_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_TIME_INTERNAL_CCTZ_TIME_ZONE_IF_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_impl.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_impl.cc
new file mode 100644
index 00000000000..3810defe3cc
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_impl.cc
@@ -0,0 +1,113 @@
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "time_zone_impl.h"
+
+#include <deque>
+#include <memory>
+#include <mutex>
+#include <util/generic/string.h>
+#include <unordered_map>
+#include <utility>
+
+#include "y_absl/base/config.h"
+#include "time_zone_fixed.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace time_internal {
+namespace cctz {
+
+namespace {
+
+// time_zone::Impls are linked into a map to support fast lookup by name.
+using TimeZoneImplByName =
+ std::unordered_map<TString, const time_zone::Impl*>;
+TimeZoneImplByName* time_zone_map = nullptr;
+
+// Mutual exclusion for time_zone_map.
+std::mutex& TimeZoneMutex() {
+ // This mutex is intentionally "leaked" to avoid the static deinitialization
+ // order fiasco (std::mutex's destructor is not trivial on many platforms).
+ static std::mutex* time_zone_mutex = new std::mutex;
+ return *time_zone_mutex;
+}
+
+} // namespace
+
+time_zone time_zone::Impl::UTC() { return time_zone(UTCImpl()); }
+
+bool time_zone::Impl::LoadTimeZone(const TString& name, time_zone* tz) {
+ const Impl* const utc_impl = UTCImpl();
+
+ // Check for UTC (which is never a key in time_zone_map).
+ auto offset = seconds::zero();
+ if (FixedOffsetFromName(name, &offset) && offset == seconds::zero()) {
+ *tz = time_zone(utc_impl);
+ return true;
+ }
+
+ // Check whether the time zone has already been loaded.
+ {
+ std::lock_guard<std::mutex> lock(TimeZoneMutex());
+ if (time_zone_map != nullptr) {
+ TimeZoneImplByName::const_iterator itr = time_zone_map->find(name);
+ if (itr != time_zone_map->end()) {
+ *tz = time_zone(itr->second);
+ return itr->second != utc_impl;
+ }
+ }
+ }
+
+ // Load the new time zone (outside the lock).
+ std::unique_ptr<const Impl> new_impl(new Impl(name));
+
+ // Add the new time zone to the map.
+ std::lock_guard<std::mutex> lock(TimeZoneMutex());
+ if (time_zone_map == nullptr) time_zone_map = new TimeZoneImplByName;
+ const Impl*& impl = (*time_zone_map)[name];
+ if (impl == nullptr) { // this thread won any load race
+ impl = new_impl->zone_ ? new_impl.release() : utc_impl;
+ }
+ *tz = time_zone(impl);
+ return impl != utc_impl;
+}
+
+void time_zone::Impl::ClearTimeZoneMapTestOnly() {
+ std::lock_guard<std::mutex> lock(TimeZoneMutex());
+ if (time_zone_map != nullptr) {
+ // Existing time_zone::Impl* entries are in the wild, so we can't delete
+ // them. Instead, we move them to a private container, where they are
+ // logically unreachable but not "leaked". Future requests will result
+ // in reloading the data.
+ static auto* cleared = new std::deque<const time_zone::Impl*>;
+ for (const auto& element : *time_zone_map) {
+ cleared->push_back(element.second);
+ }
+ time_zone_map->clear();
+ }
+}
+
+time_zone::Impl::Impl(const TString& name)
+ : name_(name), zone_(TimeZoneIf::Load(name_)) {}
+
+const time_zone::Impl* time_zone::Impl::UTCImpl() {
+ static const Impl* utc_impl = new Impl("UTC"); // never fails
+ return utc_impl;
+}
+
+} // namespace cctz
+} // namespace time_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_impl.h b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_impl.h
new file mode 100644
index 00000000000..c014ab41dfe
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_impl.h
@@ -0,0 +1,93 @@
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_TIME_INTERNAL_CCTZ_TIME_ZONE_IMPL_H_
+#define ABSL_TIME_INTERNAL_CCTZ_TIME_ZONE_IMPL_H_
+
+#include <memory>
+#include <util/generic/string.h>
+
+#include "y_absl/base/config.h"
+#include "y_absl/time/internal/cctz/include/cctz/civil_time.h"
+#include "y_absl/time/internal/cctz/include/cctz/time_zone.h"
+#include "time_zone_if.h"
+#include "time_zone_info.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace time_internal {
+namespace cctz {
+
+// time_zone::Impl is the internal object referenced by a cctz::time_zone.
+class time_zone::Impl {
+ public:
+ // The UTC time zone. Also used for other time zones that fail to load.
+ static time_zone UTC();
+
+ // Load a named time zone. Returns false if the name is invalid, or if
+ // some other kind of error occurs. Note that loading "UTC" never fails.
+ static bool LoadTimeZone(const TString& name, time_zone* tz);
+
+ // Clears the map of cached time zones. Primarily for use in benchmarks
+ // that gauge the performance of loading/parsing the time-zone data.
+ static void ClearTimeZoneMapTestOnly();
+
+ // The primary key is the time-zone ID (e.g., "America/New_York").
+ const TString& Name() const {
+ // TODO: It would nice if the zoneinfo data included the zone name.
+ return name_;
+ }
+
+ // Breaks a time_point down to civil-time components in this time zone.
+ time_zone::absolute_lookup BreakTime(const time_point<seconds>& tp) const {
+ return zone_->BreakTime(tp);
+ }
+
+ // Converts the civil-time components in this time zone into a time_point.
+ // That is, the opposite of BreakTime(). The requested civil time may be
+ // ambiguous or illegal due to a change of UTC offset.
+ time_zone::civil_lookup MakeTime(const civil_second& cs) const {
+ return zone_->MakeTime(cs);
+ }
+
+ // Finds the time of the next/previous offset change in this time zone.
+ bool NextTransition(const time_point<seconds>& tp,
+ time_zone::civil_transition* trans) const {
+ return zone_->NextTransition(tp, trans);
+ }
+ bool PrevTransition(const time_point<seconds>& tp,
+ time_zone::civil_transition* trans) const {
+ return zone_->PrevTransition(tp, trans);
+ }
+
+ // Returns an implementation-defined version string for this time zone.
+ TString Version() const { return zone_->Version(); }
+
+ // Returns an implementation-defined description of this time zone.
+ TString Description() const { return zone_->Description(); }
+
+ private:
+ explicit Impl(const TString& name);
+ static const Impl* UTCImpl();
+
+ const TString name_;
+ std::unique_ptr<TimeZoneIf> zone_;
+};
+
+} // namespace cctz
+} // namespace time_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_TIME_INTERNAL_CCTZ_TIME_ZONE_IMPL_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_info.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_info.cc
new file mode 100644
index 00000000000..72f7bdc3ca1
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_info.cc
@@ -0,0 +1,1027 @@
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file implements the TimeZoneIf interface using the "zoneinfo"
+// data provided by the IANA Time Zone Database (i.e., the only real game
+// in town).
+//
+// TimeZoneInfo represents the history of UTC-offset changes within a time
+// zone. Most changes are due to daylight-saving rules, but occasionally
+// shifts are made to the time-zone's base offset. The database only attempts
+// to be definitive for times since 1970, so be wary of local-time conversions
+// before that. Also, rule and zone-boundary changes are made at the whim
+// of governments, so the conversion of future times needs to be taken with
+// a grain of salt.
+//
+// For more information see tzfile(5), http://www.iana.org/time-zones, or
+// https://en.wikipedia.org/wiki/Zoneinfo.
+//
+// Note that we assume the proleptic Gregorian calendar and 60-second
+// minutes throughout.
+
+#include "time_zone_info.h"
+
+#include <algorithm>
+#include <cassert>
+#include <chrono>
+#include <cstdint>
+#include <cstdio>
+#include <cstdlib>
+#include <cstring>
+#include <fstream>
+#include <functional>
+#include <memory>
+#include <sstream>
+#include <util/generic/string.h>
+#include <utility>
+
+#include "y_absl/base/config.h"
+#include "y_absl/time/internal/cctz/include/cctz/civil_time.h"
+#include "time_zone_fixed.h"
+#include "time_zone_posix.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace time_internal {
+namespace cctz {
+
+namespace {
+
+inline bool IsLeap(year_t year) {
+ return (year % 4) == 0 && ((year % 100) != 0 || (year % 400) == 0);
+}
+
+// The number of days in non-leap and leap years respectively.
+const std::int_least32_t kDaysPerYear[2] = {365, 366};
+
+// The day offsets of the beginning of each (1-based) month in non-leap and
+// leap years respectively (e.g., 335 days before December in a leap year).
+const std::int_least16_t kMonthOffsets[2][1 + 12 + 1] = {
+ {-1, 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365},
+ {-1, 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366},
+};
+
+// We reject leap-second encoded zoneinfo and so assume 60-second minutes.
+const std::int_least32_t kSecsPerDay = 24 * 60 * 60;
+
+// 400-year chunks always have 146097 days (20871 weeks).
+const std::int_least64_t kSecsPer400Years = 146097LL * kSecsPerDay;
+
+// Like kDaysPerYear[] but scaled up by a factor of kSecsPerDay.
+const std::int_least32_t kSecsPerYear[2] = {
+ 365 * kSecsPerDay,
+ 366 * kSecsPerDay,
+};
+
+// Convert a cctz::weekday to a POSIX TZ weekday number (0==Sun, ..., 6=Sat).
+inline int ToPosixWeekday(weekday wd) {
+ switch (wd) {
+ case weekday::sunday:
+ return 0;
+ case weekday::monday:
+ return 1;
+ case weekday::tuesday:
+ return 2;
+ case weekday::wednesday:
+ return 3;
+ case weekday::thursday:
+ return 4;
+ case weekday::friday:
+ return 5;
+ case weekday::saturday:
+ return 6;
+ }
+ return 0; /*NOTREACHED*/
+}
+
+// Single-byte, unsigned numeric values are encoded directly.
+inline std::uint_fast8_t Decode8(const char* cp) {
+ return static_cast<std::uint_fast8_t>(*cp) & 0xff;
+}
+
+// Multi-byte, numeric values are encoded using a MSB first,
+// twos-complement representation. These helpers decode, from
+// the given address, 4-byte and 8-byte values respectively.
+// Note: If int_fastXX_t == intXX_t and this machine is not
+// twos complement, then there will be at least one input value
+// we cannot represent.
+std::int_fast32_t Decode32(const char* cp) {
+ std::uint_fast32_t v = 0;
+ for (int i = 0; i != (32 / 8); ++i) v = (v << 8) | Decode8(cp++);
+ const std::int_fast32_t s32max = 0x7fffffff;
+ const auto s32maxU = static_cast<std::uint_fast32_t>(s32max);
+ if (v <= s32maxU) return static_cast<std::int_fast32_t>(v);
+ return static_cast<std::int_fast32_t>(v - s32maxU - 1) - s32max - 1;
+}
+
+std::int_fast64_t Decode64(const char* cp) {
+ std::uint_fast64_t v = 0;
+ for (int i = 0; i != (64 / 8); ++i) v = (v << 8) | Decode8(cp++);
+ const std::int_fast64_t s64max = 0x7fffffffffffffff;
+ const auto s64maxU = static_cast<std::uint_fast64_t>(s64max);
+ if (v <= s64maxU) return static_cast<std::int_fast64_t>(v);
+ return static_cast<std::int_fast64_t>(v - s64maxU - 1) - s64max - 1;
+}
+
+// Generate a year-relative offset for a PosixTransition.
+std::int_fast64_t TransOffset(bool leap_year, int jan1_weekday,
+ const PosixTransition& pt) {
+ std::int_fast64_t days = 0;
+ switch (pt.date.fmt) {
+ case PosixTransition::J: {
+ days = pt.date.j.day;
+ if (!leap_year || days < kMonthOffsets[1][3]) days -= 1;
+ break;
+ }
+ case PosixTransition::N: {
+ days = pt.date.n.day;
+ break;
+ }
+ case PosixTransition::M: {
+ const bool last_week = (pt.date.m.week == 5);
+ days = kMonthOffsets[leap_year][pt.date.m.month + last_week];
+ const std::int_fast64_t weekday = (jan1_weekday + days) % 7;
+ if (last_week) {
+ days -= (weekday + 7 - 1 - pt.date.m.weekday) % 7 + 1;
+ } else {
+ days += (pt.date.m.weekday + 7 - weekday) % 7;
+ days += (pt.date.m.week - 1) * 7;
+ }
+ break;
+ }
+ }
+ return (days * kSecsPerDay) + pt.time.offset;
+}
+
+inline time_zone::civil_lookup MakeUnique(const time_point<seconds>& tp) {
+ time_zone::civil_lookup cl;
+ cl.kind = time_zone::civil_lookup::UNIQUE;
+ cl.pre = cl.trans = cl.post = tp;
+ return cl;
+}
+
+inline time_zone::civil_lookup MakeUnique(std::int_fast64_t unix_time) {
+ return MakeUnique(FromUnixSeconds(unix_time));
+}
+
+inline time_zone::civil_lookup MakeSkipped(const Transition& tr,
+ const civil_second& cs) {
+ time_zone::civil_lookup cl;
+ cl.kind = time_zone::civil_lookup::SKIPPED;
+ cl.pre = FromUnixSeconds(tr.unix_time - 1 + (cs - tr.prev_civil_sec));
+ cl.trans = FromUnixSeconds(tr.unix_time);
+ cl.post = FromUnixSeconds(tr.unix_time - (tr.civil_sec - cs));
+ return cl;
+}
+
+inline time_zone::civil_lookup MakeRepeated(const Transition& tr,
+ const civil_second& cs) {
+ time_zone::civil_lookup cl;
+ cl.kind = time_zone::civil_lookup::REPEATED;
+ cl.pre = FromUnixSeconds(tr.unix_time - 1 - (tr.prev_civil_sec - cs));
+ cl.trans = FromUnixSeconds(tr.unix_time);
+ cl.post = FromUnixSeconds(tr.unix_time + (cs - tr.civil_sec));
+ return cl;
+}
+
+inline civil_second YearShift(const civil_second& cs, year_t shift) {
+ return civil_second(cs.year() + shift, cs.month(), cs.day(), cs.hour(),
+ cs.minute(), cs.second());
+}
+
+} // namespace
+
+// What (no leap-seconds) UTC+seconds zoneinfo would look like.
+bool TimeZoneInfo::ResetToBuiltinUTC(const seconds& offset) {
+ transition_types_.resize(1);
+ TransitionType& tt(transition_types_.back());
+ tt.utc_offset = static_cast<std::int_least32_t>(offset.count());
+ tt.is_dst = false;
+ tt.abbr_index = 0;
+
+ // We temporarily add some redundant, contemporary (2015 through 2025)
+ // transitions for performance reasons. See TimeZoneInfo::LocalTime().
+ // TODO: Fix the performance issue and remove the extra transitions.
+ transitions_.clear();
+ transitions_.reserve(12);
+ for (const std::int_fast64_t unix_time : {
+ -(1LL << 59), // a "first half" transition
+ 1420070400LL, // 2015-01-01T00:00:00+00:00
+ 1451606400LL, // 2016-01-01T00:00:00+00:00
+ 1483228800LL, // 2017-01-01T00:00:00+00:00
+ 1514764800LL, // 2018-01-01T00:00:00+00:00
+ 1546300800LL, // 2019-01-01T00:00:00+00:00
+ 1577836800LL, // 2020-01-01T00:00:00+00:00
+ 1609459200LL, // 2021-01-01T00:00:00+00:00
+ 1640995200LL, // 2022-01-01T00:00:00+00:00
+ 1672531200LL, // 2023-01-01T00:00:00+00:00
+ 1704067200LL, // 2024-01-01T00:00:00+00:00
+ 1735689600LL, // 2025-01-01T00:00:00+00:00
+ }) {
+ Transition& tr(*transitions_.emplace(transitions_.end()));
+ tr.unix_time = unix_time;
+ tr.type_index = 0;
+ tr.civil_sec = LocalTime(tr.unix_time, tt).cs;
+ tr.prev_civil_sec = tr.civil_sec - 1;
+ }
+
+ default_transition_type_ = 0;
+ abbreviations_ = FixedOffsetToAbbr(offset);
+ abbreviations_.append(1, '\0');
+ future_spec_.clear(); // never needed for a fixed-offset zone
+ extended_ = false;
+
+ tt.civil_max = LocalTime(seconds::max().count(), tt).cs;
+ tt.civil_min = LocalTime(seconds::min().count(), tt).cs;
+
+ transitions_.shrink_to_fit();
+ return true;
+}
+
+// Builds the in-memory header using the raw bytes from the file.
+bool TimeZoneInfo::Header::Build(const tzhead& tzh) {
+ std::int_fast32_t v;
+ if ((v = Decode32(tzh.tzh_timecnt)) < 0) return false;
+ timecnt = static_cast<std::size_t>(v);
+ if ((v = Decode32(tzh.tzh_typecnt)) < 0) return false;
+ typecnt = static_cast<std::size_t>(v);
+ if ((v = Decode32(tzh.tzh_charcnt)) < 0) return false;
+ charcnt = static_cast<std::size_t>(v);
+ if ((v = Decode32(tzh.tzh_leapcnt)) < 0) return false;
+ leapcnt = static_cast<std::size_t>(v);
+ if ((v = Decode32(tzh.tzh_ttisstdcnt)) < 0) return false;
+ ttisstdcnt = static_cast<std::size_t>(v);
+ if ((v = Decode32(tzh.tzh_ttisutcnt)) < 0) return false;
+ ttisutcnt = static_cast<std::size_t>(v);
+ return true;
+}
+
+// How many bytes of data are associated with this header. The result
+// depends upon whether this is a section with 4-byte or 8-byte times.
+std::size_t TimeZoneInfo::Header::DataLength(std::size_t time_len) const {
+ std::size_t len = 0;
+ len += (time_len + 1) * timecnt; // unix_time + type_index
+ len += (4 + 1 + 1) * typecnt; // utc_offset + is_dst + abbr_index
+ len += 1 * charcnt; // abbreviations
+ len += (time_len + 4) * leapcnt; // leap-time + TAI-UTC
+ len += 1 * ttisstdcnt; // UTC/local indicators
+ len += 1 * ttisutcnt; // standard/wall indicators
+ return len;
+}
+
+// zic(8) can generate no-op transitions when a zone changes rules at an
+// instant when there is actually no discontinuity. So we check whether
+// two transitions have equivalent types (same offset/is_dst/abbr).
+bool TimeZoneInfo::EquivTransitions(std::uint_fast8_t tt1_index,
+ std::uint_fast8_t tt2_index) const {
+ if (tt1_index == tt2_index) return true;
+ const TransitionType& tt1(transition_types_[tt1_index]);
+ const TransitionType& tt2(transition_types_[tt2_index]);
+ if (tt1.utc_offset != tt2.utc_offset) return false;
+ if (tt1.is_dst != tt2.is_dst) return false;
+ if (tt1.abbr_index != tt2.abbr_index) return false;
+ return true;
+}
+
+// Find/make a transition type with these attributes.
+bool TimeZoneInfo::GetTransitionType(std::int_fast32_t utc_offset, bool is_dst,
+ const TString& abbr,
+ std::uint_least8_t* index) {
+ std::size_t type_index = 0;
+ std::size_t abbr_index = abbreviations_.size();
+ for (; type_index != transition_types_.size(); ++type_index) {
+ const TransitionType& tt(transition_types_[type_index]);
+ const char* tt_abbr = &*abbreviations_.begin() + tt.abbr_index;
+ if (tt_abbr == abbr) abbr_index = tt.abbr_index;
+ if (tt.utc_offset == utc_offset && tt.is_dst == is_dst) {
+ if (abbr_index == tt.abbr_index) break; // reuse
+ }
+ }
+ if (type_index > 255 || abbr_index > 255) {
+ // No index space (8 bits) available for a new type or abbreviation.
+ return false;
+ }
+ if (type_index == transition_types_.size()) {
+ TransitionType& tt(*transition_types_.emplace(transition_types_.end()));
+ tt.utc_offset = static_cast<std::int_least32_t>(utc_offset);
+ tt.is_dst = is_dst;
+ if (abbr_index == abbreviations_.size()) {
+ abbreviations_.append(abbr);
+ abbreviations_.append(1, '\0');
+ }
+ tt.abbr_index = static_cast<std::uint_least8_t>(abbr_index);
+ }
+ *index = static_cast<std::uint_least8_t>(type_index);
+ return true;
+}
+
+// Use the POSIX-TZ-environment-variable-style string to handle times
+// in years after the last transition stored in the zoneinfo data.
+bool TimeZoneInfo::ExtendTransitions() {
+ extended_ = false;
+ if (future_spec_.empty()) return true; // last transition prevails
+
+ PosixTimeZone posix;
+ if (!ParsePosixSpec(future_spec_, &posix)) return false;
+
+ // Find transition type for the future std specification.
+ std::uint_least8_t std_ti;
+ if (!GetTransitionType(posix.std_offset, false, posix.std_abbr, &std_ti))
+ return false;
+
+ if (posix.dst_abbr.empty()) { // std only
+ // The future specification should match the last transition, and
+ // that means that handling the future will fall out naturally.
+ return EquivTransitions(transitions_.back().type_index, std_ti);
+ }
+
+ // Find transition type for the future dst specification.
+ std::uint_least8_t dst_ti;
+ if (!GetTransitionType(posix.dst_offset, true, posix.dst_abbr, &dst_ti))
+ return false;
+
+ // Extend the transitions for an additional 400 years using the
+ // future specification. Years beyond those can be handled by
+ // mapping back to a cycle-equivalent year within that range.
+ // We may need two additional transitions for the current year.
+ transitions_.reserve(transitions_.size() + 400 * 2 + 2);
+ extended_ = true;
+
+ const Transition& last(transitions_.back());
+ const std::int_fast64_t last_time = last.unix_time;
+ const TransitionType& last_tt(transition_types_[last.type_index]);
+ last_year_ = LocalTime(last_time, last_tt).cs.year();
+ bool leap_year = IsLeap(last_year_);
+ const civil_second jan1(last_year_);
+ std::int_fast64_t jan1_time = jan1 - civil_second();
+ int jan1_weekday = ToPosixWeekday(get_weekday(jan1));
+
+ Transition dst = {0, dst_ti, civil_second(), civil_second()};
+ Transition std = {0, std_ti, civil_second(), civil_second()};
+ for (const year_t limit = last_year_ + 400;; ++last_year_) {
+ auto dst_trans_off = TransOffset(leap_year, jan1_weekday, posix.dst_start);
+ auto std_trans_off = TransOffset(leap_year, jan1_weekday, posix.dst_end);
+ dst.unix_time = jan1_time + dst_trans_off - posix.std_offset;
+ std.unix_time = jan1_time + std_trans_off - posix.dst_offset;
+ const auto* ta = dst.unix_time < std.unix_time ? &dst : &std;
+ const auto* tb = dst.unix_time < std.unix_time ? &std : &dst;
+ if (last_time < tb->unix_time) {
+ if (last_time < ta->unix_time) transitions_.push_back(*ta);
+ transitions_.push_back(*tb);
+ }
+ if (last_year_ == limit) break;
+ jan1_time += kSecsPerYear[leap_year];
+ jan1_weekday = (jan1_weekday + kDaysPerYear[leap_year]) % 7;
+ leap_year = !leap_year && IsLeap(last_year_ + 1);
+ }
+
+ return true;
+}
+
+bool TimeZoneInfo::Load(ZoneInfoSource* zip) {
+ // Read and validate the header.
+ tzhead tzh;
+ if (zip->Read(&tzh, sizeof(tzh)) != sizeof(tzh)) return false;
+ if (strncmp(tzh.tzh_magic, TZ_MAGIC, sizeof(tzh.tzh_magic)) != 0)
+ return false;
+ Header hdr;
+ if (!hdr.Build(tzh)) return false;
+ std::size_t time_len = 4;
+ if (tzh.tzh_version[0] != '\0') {
+ // Skip the 4-byte data.
+ if (zip->Skip(hdr.DataLength(time_len)) != 0) return false;
+ // Read and validate the header for the 8-byte data.
+ if (zip->Read(&tzh, sizeof(tzh)) != sizeof(tzh)) return false;
+ if (strncmp(tzh.tzh_magic, TZ_MAGIC, sizeof(tzh.tzh_magic)) != 0)
+ return false;
+ if (tzh.tzh_version[0] == '\0') return false;
+ if (!hdr.Build(tzh)) return false;
+ time_len = 8;
+ }
+ if (hdr.typecnt == 0) return false;
+ if (hdr.leapcnt != 0) {
+ // This code assumes 60-second minutes so we do not want
+ // the leap-second encoded zoneinfo. We could reverse the
+ // compensation, but the "right" encoding is rarely used
+ // so currently we simply reject such data.
+ return false;
+ }
+ if (hdr.ttisstdcnt != 0 && hdr.ttisstdcnt != hdr.typecnt) return false;
+ if (hdr.ttisutcnt != 0 && hdr.ttisutcnt != hdr.typecnt) return false;
+
+ // Read the data into a local buffer.
+ std::size_t len = hdr.DataLength(time_len);
+ std::vector<char> tbuf(len);
+ if (zip->Read(tbuf.data(), len) != len) return false;
+ const char* bp = tbuf.data();
+
+ // Decode and validate the transitions.
+ transitions_.reserve(hdr.timecnt + 2);
+ transitions_.resize(hdr.timecnt);
+ for (std::size_t i = 0; i != hdr.timecnt; ++i) {
+ transitions_[i].unix_time = (time_len == 4) ? Decode32(bp) : Decode64(bp);
+ bp += time_len;
+ if (i != 0) {
+ // Check that the transitions are ordered by time (as zic guarantees).
+ if (!Transition::ByUnixTime()(transitions_[i - 1], transitions_[i]))
+ return false; // out of order
+ }
+ }
+ bool seen_type_0 = false;
+ for (std::size_t i = 0; i != hdr.timecnt; ++i) {
+ transitions_[i].type_index = Decode8(bp++);
+ if (transitions_[i].type_index >= hdr.typecnt) return false;
+ if (transitions_[i].type_index == 0) seen_type_0 = true;
+ }
+
+ // Decode and validate the transition types.
+ transition_types_.reserve(hdr.typecnt + 2);
+ transition_types_.resize(hdr.typecnt);
+ for (std::size_t i = 0; i != hdr.typecnt; ++i) {
+ transition_types_[i].utc_offset =
+ static_cast<std::int_least32_t>(Decode32(bp));
+ if (transition_types_[i].utc_offset >= kSecsPerDay ||
+ transition_types_[i].utc_offset <= -kSecsPerDay)
+ return false;
+ bp += 4;
+ transition_types_[i].is_dst = (Decode8(bp++) != 0);
+ transition_types_[i].abbr_index = Decode8(bp++);
+ if (transition_types_[i].abbr_index >= hdr.charcnt) return false;
+ }
+
+ // Determine the before-first-transition type.
+ default_transition_type_ = 0;
+ if (seen_type_0 && hdr.timecnt != 0) {
+ std::uint_fast8_t index = 0;
+ if (transition_types_[0].is_dst) {
+ index = transitions_[0].type_index;
+ while (index != 0 && transition_types_[index].is_dst) --index;
+ }
+ while (index != hdr.typecnt && transition_types_[index].is_dst) ++index;
+ if (index != hdr.typecnt) default_transition_type_ = index;
+ }
+
+ // Copy all the abbreviations.
+ abbreviations_.reserve(hdr.charcnt + 10);
+ abbreviations_.assign(bp, hdr.charcnt);
+ bp += hdr.charcnt;
+
+ // Skip the unused portions. We've already dispensed with leap-second
+ // encoded zoneinfo. The ttisstd/ttisgmt indicators only apply when
+ // interpreting a POSIX spec that does not include start/end rules, and
+ // that isn't the case here (see "zic -p").
+ bp += (8 + 4) * hdr.leapcnt; // leap-time + TAI-UTC
+ bp += 1 * hdr.ttisstdcnt; // UTC/local indicators
+ bp += 1 * hdr.ttisutcnt; // standard/wall indicators
+ assert(bp == tbuf.data() + tbuf.size());
+
+ future_spec_.clear();
+ if (tzh.tzh_version[0] != '\0') {
+ // Snarf up the NL-enclosed future POSIX spec. Note
+ // that version '3' files utilize an extended format.
+ auto get_char = [](ZoneInfoSource* azip) -> int {
+ unsigned char ch; // all non-EOF results are positive
+ return (azip->Read(&ch, 1) == 1) ? ch : EOF;
+ };
+ if (get_char(zip) != '\n') return false;
+ for (int c = get_char(zip); c != '\n'; c = get_char(zip)) {
+ if (c == EOF) return false;
+ future_spec_.push_back(static_cast<char>(c));
+ }
+ }
+
+ // We don't check for EOF so that we're forwards compatible.
+
+ // If we did not find version information during the standard loading
+ // process (as of tzh_version '3' that is unsupported), then ask the
+ // ZoneInfoSource for any out-of-bound version string it may be privy to.
+ if (version_.empty()) {
+ version_ = zip->Version();
+ }
+
+ // Trim redundant transitions. zic may have added these to work around
+ // differences between the glibc and reference implementations (see
+ // zic.c:dontmerge) and the Qt library (see zic.c:WORK_AROUND_QTBUG_53071).
+ // For us, they just get in the way when we do future_spec_ extension.
+ while (hdr.timecnt > 1) {
+ if (!EquivTransitions(transitions_[hdr.timecnt - 1].type_index,
+ transitions_[hdr.timecnt - 2].type_index)) {
+ break;
+ }
+ hdr.timecnt -= 1;
+ }
+ transitions_.resize(hdr.timecnt);
+
+ // Ensure that there is always a transition in the first half of the
+ // time line (the second half is handled below) so that the signed
+ // difference between a civil_second and the civil_second of its
+ // previous transition is always representable, without overflow.
+ if (transitions_.empty() || transitions_.front().unix_time >= 0) {
+ Transition& tr(*transitions_.emplace(transitions_.begin()));
+ tr.unix_time = -(1LL << 59); // -18267312070-10-26T17:01:52+00:00
+ tr.type_index = default_transition_type_;
+ }
+
+ // Extend the transitions using the future specification.
+ if (!ExtendTransitions()) return false;
+
+ // Ensure that there is always a transition in the second half of the
+ // time line (the first half is handled above) so that the signed
+ // difference between a civil_second and the civil_second of its
+ // previous transition is always representable, without overflow.
+ const Transition& last(transitions_.back());
+ if (last.unix_time < 0) {
+ const std::uint_fast8_t type_index = last.type_index;
+ Transition& tr(*transitions_.emplace(transitions_.end()));
+ tr.unix_time = 2147483647; // 2038-01-19T03:14:07+00:00
+ tr.type_index = type_index;
+ }
+
+ // Compute the local civil time for each transition and the preceding
+ // second. These will be used for reverse conversions in MakeTime().
+ const TransitionType* ttp = &transition_types_[default_transition_type_];
+ for (std::size_t i = 0; i != transitions_.size(); ++i) {
+ Transition& tr(transitions_[i]);
+ tr.prev_civil_sec = LocalTime(tr.unix_time, *ttp).cs - 1;
+ ttp = &transition_types_[tr.type_index];
+ tr.civil_sec = LocalTime(tr.unix_time, *ttp).cs;
+ if (i != 0) {
+ // Check that the transitions are ordered by civil time. Essentially
+ // this means that an offset change cannot cross another such change.
+ // No one does this in practice, and we depend on it in MakeTime().
+ if (!Transition::ByCivilTime()(transitions_[i - 1], tr))
+ return false; // out of order
+ }
+ }
+
+ // Compute the maximum/minimum civil times that can be converted to a
+ // time_point<seconds> for each of the zone's transition types.
+ for (auto& tt : transition_types_) {
+ tt.civil_max = LocalTime(seconds::max().count(), tt).cs;
+ tt.civil_min = LocalTime(seconds::min().count(), tt).cs;
+ }
+
+ transitions_.shrink_to_fit();
+ return true;
+}
+
+namespace {
+
+using FilePtr = std::unique_ptr<FILE, int (*)(FILE*)>;
+
+// fopen(3) adaptor.
+inline FilePtr FOpen(const char* path, const char* mode) {
+#if defined(_MSC_VER)
+ FILE* fp;
+ if (fopen_s(&fp, path, mode) != 0) fp = nullptr;
+ return FilePtr(fp, fclose);
+#else
+ // TODO: Enable the close-on-exec flag.
+ return FilePtr(fopen(path, mode), fclose);
+#endif
+}
+
+// A stdio(3)-backed implementation of ZoneInfoSource.
+class FileZoneInfoSource : public ZoneInfoSource {
+ public:
+ static std::unique_ptr<ZoneInfoSource> Open(const TString& name);
+
+ std::size_t Read(void* ptr, std::size_t size) override {
+ size = std::min(size, len_);
+ std::size_t nread = fread(ptr, 1, size, fp_.get());
+ len_ -= nread;
+ return nread;
+ }
+ int Skip(std::size_t offset) override {
+ offset = std::min(offset, len_);
+ int rc = fseek(fp_.get(), static_cast<long>(offset), SEEK_CUR);
+ if (rc == 0) len_ -= offset;
+ return rc;
+ }
+ TString Version() const override {
+ // TODO: It would nice if the zoneinfo data included the tzdb version.
+ return TString();
+ }
+
+ protected:
+ explicit FileZoneInfoSource(
+ FilePtr fp, std::size_t len = std::numeric_limits<std::size_t>::max())
+ : fp_(std::move(fp)), len_(len) {}
+
+ private:
+ FilePtr fp_;
+ std::size_t len_;
+};
+
+std::unique_ptr<ZoneInfoSource> FileZoneInfoSource::Open(
+ const TString& name) {
+ // Use of the "file:" prefix is intended for testing purposes only.
+ const std::size_t pos = (name.compare(0, 5, "file:") == 0) ? 5 : 0;
+
+ // Map the time-zone name to a path name.
+ TString path;
+ if (pos == name.size() || name[pos] != '/') {
+ const char* tzdir = "/usr/share/zoneinfo";
+ char* tzdir_env = nullptr;
+#if defined(_MSC_VER)
+ _dupenv_s(&tzdir_env, nullptr, "TZDIR");
+#else
+ tzdir_env = std::getenv("TZDIR");
+#endif
+ if (tzdir_env && *tzdir_env) tzdir = tzdir_env;
+ path += tzdir;
+ path += '/';
+#if defined(_MSC_VER)
+ free(tzdir_env);
+#endif
+ }
+ path.append(name, pos, TString::npos);
+
+ // Open the zoneinfo file.
+ auto fp = FOpen(path.c_str(), "rb");
+ if (fp == nullptr) return nullptr;
+ return std::unique_ptr<ZoneInfoSource>(new FileZoneInfoSource(std::move(fp)));
+}
+
+class AndroidZoneInfoSource : public FileZoneInfoSource {
+ public:
+ static std::unique_ptr<ZoneInfoSource> Open(const TString& name);
+ TString Version() const override { return version_; }
+
+ private:
+ explicit AndroidZoneInfoSource(FilePtr fp, std::size_t len,
+ TString version)
+ : FileZoneInfoSource(std::move(fp), len), version_(std::move(version)) {}
+ TString version_;
+};
+
+std::unique_ptr<ZoneInfoSource> AndroidZoneInfoSource::Open(
+ const TString& name) {
+ // Use of the "file:" prefix is intended for testing purposes only.
+ const std::size_t pos = (name.compare(0, 5, "file:") == 0) ? 5 : 0;
+
+ // See Android's libc/tzcode/bionic.cpp for additional information.
+ for (const char* tzdata : {"/data/misc/zoneinfo/current/tzdata",
+ "/system/usr/share/zoneinfo/tzdata"}) {
+ auto fp = FOpen(tzdata, "rb");
+ if (fp == nullptr) continue;
+
+ char hbuf[24]; // covers header.zonetab_offset too
+ if (fread(hbuf, 1, sizeof(hbuf), fp.get()) != sizeof(hbuf)) continue;
+ if (strncmp(hbuf, "tzdata", 6) != 0) continue;
+ const char* vers = (hbuf[11] == '\0') ? hbuf + 6 : "";
+ const std::int_fast32_t index_offset = Decode32(hbuf + 12);
+ const std::int_fast32_t data_offset = Decode32(hbuf + 16);
+ if (index_offset < 0 || data_offset < index_offset) continue;
+ if (fseek(fp.get(), static_cast<long>(index_offset), SEEK_SET) != 0)
+ continue;
+
+ char ebuf[52]; // covers entry.unused too
+ const std::size_t index_size =
+ static_cast<std::size_t>(data_offset - index_offset);
+ const std::size_t zonecnt = index_size / sizeof(ebuf);
+ if (zonecnt * sizeof(ebuf) != index_size) continue;
+ for (std::size_t i = 0; i != zonecnt; ++i) {
+ if (fread(ebuf, 1, sizeof(ebuf), fp.get()) != sizeof(ebuf)) break;
+ const std::int_fast32_t start = data_offset + Decode32(ebuf + 40);
+ const std::int_fast32_t length = Decode32(ebuf + 44);
+ if (start < 0 || length < 0) break;
+ ebuf[40] = '\0'; // ensure zone name is NUL terminated
+ if (strcmp(name.c_str() + pos, ebuf) == 0) {
+ if (fseek(fp.get(), static_cast<long>(start), SEEK_SET) != 0) break;
+ return std::unique_ptr<ZoneInfoSource>(new AndroidZoneInfoSource(
+ std::move(fp), static_cast<std::size_t>(length), vers));
+ }
+ }
+ }
+
+ return nullptr;
+}
+
+// A zoneinfo source for use inside Fuchsia components. This attempts to
+// read zoneinfo files from one of several known paths in a component's
+// incoming namespace. [Config data][1] is preferred, but package-specific
+// resources are also supported.
+//
+// Fuchsia's implementation supports `FileZoneInfoSource::Version()`.
+//
+// [1]:
+// https://fuchsia.dev/fuchsia-src/development/components/data#using_config_data_in_your_component
+class FuchsiaZoneInfoSource : public FileZoneInfoSource {
+ public:
+ static std::unique_ptr<ZoneInfoSource> Open(const TString& name);
+ TString Version() const override { return version_; }
+
+ private:
+ explicit FuchsiaZoneInfoSource(FilePtr fp, TString version)
+ : FileZoneInfoSource(std::move(fp)), version_(std::move(version)) {}
+ TString version_;
+};
+
+std::unique_ptr<ZoneInfoSource> FuchsiaZoneInfoSource::Open(
+ const TString& name) {
+ // Use of the "file:" prefix is intended for testing purposes only.
+ const std::size_t pos = (name.compare(0, 5, "file:") == 0) ? 5 : 0;
+
+ // Prefixes where a Fuchsia component might find zoneinfo files,
+ // in descending order of preference.
+ const auto kTzdataPrefixes = {
+ "/config/data/tzdata/",
+ "/pkg/data/tzdata/",
+ "/data/tzdata/",
+ };
+ const auto kEmptyPrefix = {""};
+ const bool name_absolute = (pos != name.size() && name[pos] == '/');
+ const auto prefixes = name_absolute ? kEmptyPrefix : kTzdataPrefixes;
+
+ // Fuchsia builds place zoneinfo files at "<prefix><format><name>".
+ for (const TString prefix : prefixes) {
+ TString path = prefix;
+ if (!prefix.empty()) path += "zoneinfo/tzif2/"; // format
+ path.append(name, pos, TString::npos);
+
+ auto fp = FOpen(path.c_str(), "rb");
+ if (fp == nullptr) continue;
+
+ std::string version;
+ if (!prefix.empty()) {
+ // Fuchsia builds place the version in "<prefix>revision.txt".
+ std::ifstream version_stream(prefix + "revision.txt");
+ if (version_stream.is_open()) {
+ // revision.txt should contain no newlines, but to be
+ // defensive we read just the first line.
+ std::getline(version_stream, version);
+ }
+ }
+
+ return std::unique_ptr<ZoneInfoSource>(
+ new FuchsiaZoneInfoSource(std::move(fp), std::move(version)));
+ }
+
+ return nullptr;
+}
+
+} // namespace
+
+bool TimeZoneInfo::Load(const TString& name) {
+ // We can ensure that the loading of UTC or any other fixed-offset
+ // zone never fails because the simple, fixed-offset state can be
+ // internally generated. Note that this depends on our choice to not
+ // accept leap-second encoded ("right") zoneinfo.
+ auto offset = seconds::zero();
+ if (FixedOffsetFromName(name, &offset)) {
+ return ResetToBuiltinUTC(offset);
+ }
+
+ // Find and use a ZoneInfoSource to load the named zone.
+ auto zip = cctz_extension::zone_info_source_factory(
+ name, [](const TString& n) -> std::unique_ptr<ZoneInfoSource> {
+ if (auto z = FileZoneInfoSource::Open(n)) return z;
+ if (auto z = AndroidZoneInfoSource::Open(n)) return z;
+ if (auto z = FuchsiaZoneInfoSource::Open(n)) return z;
+ return nullptr;
+ });
+ return zip != nullptr && Load(zip.get());
+}
+
+// BreakTime() translation for a particular transition type.
+time_zone::absolute_lookup TimeZoneInfo::LocalTime(
+ std::int_fast64_t unix_time, const TransitionType& tt) const {
+ // A civil time in "+offset" looks like (time+offset) in UTC.
+ // Note: We perform two additions in the civil_second domain to
+ // sidestep the chance of overflow in (unix_time + tt.utc_offset).
+ return {(civil_second() + unix_time) + tt.utc_offset, tt.utc_offset,
+ tt.is_dst, &*abbreviations_.begin() + tt.abbr_index};
+}
+
+// BreakTime() translation for a particular transition.
+time_zone::absolute_lookup TimeZoneInfo::LocalTime(std::int_fast64_t unix_time,
+ const Transition& tr) const {
+ const TransitionType& tt = transition_types_[tr.type_index];
+ // Note: (unix_time - tr.unix_time) will never overflow as we
+ // have ensured that there is always a "nearby" transition.
+ return {tr.civil_sec + (unix_time - tr.unix_time), // TODO: Optimize.
+ tt.utc_offset, tt.is_dst, &*abbreviations_.begin() + tt.abbr_index};
+}
+
+// MakeTime() translation with a conversion-preserving +N * 400-year shift.
+time_zone::civil_lookup TimeZoneInfo::TimeLocal(const civil_second& cs,
+ year_t c4_shift) const {
+ assert(last_year_ - 400 < cs.year() && cs.year() <= last_year_);
+ time_zone::civil_lookup cl = MakeTime(cs);
+ if (c4_shift > seconds::max().count() / kSecsPer400Years) {
+ cl.pre = cl.trans = cl.post = time_point<seconds>::max();
+ } else {
+ const auto offset = seconds(c4_shift * kSecsPer400Years);
+ const auto limit = time_point<seconds>::max() - offset;
+ for (auto* tp : {&cl.pre, &cl.trans, &cl.post}) {
+ if (*tp > limit) {
+ *tp = time_point<seconds>::max();
+ } else {
+ *tp += offset;
+ }
+ }
+ }
+ return cl;
+}
+
+time_zone::absolute_lookup TimeZoneInfo::BreakTime(
+ const time_point<seconds>& tp) const {
+ std::int_fast64_t unix_time = ToUnixSeconds(tp);
+ const std::size_t timecnt = transitions_.size();
+ assert(timecnt != 0); // We always add a transition.
+
+ if (unix_time < transitions_[0].unix_time) {
+ return LocalTime(unix_time, transition_types_[default_transition_type_]);
+ }
+ if (unix_time >= transitions_[timecnt - 1].unix_time) {
+ // After the last transition. If we extended the transitions using
+ // future_spec_, shift back to a supported year using the 400-year
+ // cycle of calendaric equivalence and then compensate accordingly.
+ if (extended_) {
+ const std::int_fast64_t diff =
+ unix_time - transitions_[timecnt - 1].unix_time;
+ const year_t shift = diff / kSecsPer400Years + 1;
+ const auto d = seconds(shift * kSecsPer400Years);
+ time_zone::absolute_lookup al = BreakTime(tp - d);
+ al.cs = YearShift(al.cs, shift * 400);
+ return al;
+ }
+ return LocalTime(unix_time, transitions_[timecnt - 1]);
+ }
+
+ const std::size_t hint = local_time_hint_.load(std::memory_order_relaxed);
+ if (0 < hint && hint < timecnt) {
+ if (transitions_[hint - 1].unix_time <= unix_time) {
+ if (unix_time < transitions_[hint].unix_time) {
+ return LocalTime(unix_time, transitions_[hint - 1]);
+ }
+ }
+ }
+
+ const Transition target = {unix_time, 0, civil_second(), civil_second()};
+ const Transition* begin = &transitions_[0];
+ const Transition* tr = std::upper_bound(begin, begin + timecnt, target,
+ Transition::ByUnixTime());
+ local_time_hint_.store(static_cast<std::size_t>(tr - begin),
+ std::memory_order_relaxed);
+ return LocalTime(unix_time, *--tr);
+}
+
+time_zone::civil_lookup TimeZoneInfo::MakeTime(const civil_second& cs) const {
+ const std::size_t timecnt = transitions_.size();
+ assert(timecnt != 0); // We always add a transition.
+
+ // Find the first transition after our target civil time.
+ const Transition* tr = nullptr;
+ const Transition* begin = &transitions_[0];
+ const Transition* end = begin + timecnt;
+ if (cs < begin->civil_sec) {
+ tr = begin;
+ } else if (cs >= transitions_[timecnt - 1].civil_sec) {
+ tr = end;
+ } else {
+ const std::size_t hint = time_local_hint_.load(std::memory_order_relaxed);
+ if (0 < hint && hint < timecnt) {
+ if (transitions_[hint - 1].civil_sec <= cs) {
+ if (cs < transitions_[hint].civil_sec) {
+ tr = begin + hint;
+ }
+ }
+ }
+ if (tr == nullptr) {
+ const Transition target = {0, 0, cs, civil_second()};
+ tr = std::upper_bound(begin, end, target, Transition::ByCivilTime());
+ time_local_hint_.store(static_cast<std::size_t>(tr - begin),
+ std::memory_order_relaxed);
+ }
+ }
+
+ if (tr == begin) {
+ if (tr->prev_civil_sec >= cs) {
+ // Before first transition, so use the default offset.
+ const TransitionType& tt(transition_types_[default_transition_type_]);
+ if (cs < tt.civil_min) return MakeUnique(time_point<seconds>::min());
+ return MakeUnique(cs - (civil_second() + tt.utc_offset));
+ }
+ // tr->prev_civil_sec < cs < tr->civil_sec
+ return MakeSkipped(*tr, cs);
+ }
+
+ if (tr == end) {
+ if (cs > (--tr)->prev_civil_sec) {
+ // After the last transition. If we extended the transitions using
+ // future_spec_, shift back to a supported year using the 400-year
+ // cycle of calendaric equivalence and then compensate accordingly.
+ if (extended_ && cs.year() > last_year_) {
+ const year_t shift = (cs.year() - last_year_ - 1) / 400 + 1;
+ return TimeLocal(YearShift(cs, shift * -400), shift);
+ }
+ const TransitionType& tt(transition_types_[tr->type_index]);
+ if (cs > tt.civil_max) return MakeUnique(time_point<seconds>::max());
+ return MakeUnique(tr->unix_time + (cs - tr->civil_sec));
+ }
+ // tr->civil_sec <= cs <= tr->prev_civil_sec
+ return MakeRepeated(*tr, cs);
+ }
+
+ if (tr->prev_civil_sec < cs) {
+ // tr->prev_civil_sec < cs < tr->civil_sec
+ return MakeSkipped(*tr, cs);
+ }
+
+ if (cs <= (--tr)->prev_civil_sec) {
+ // tr->civil_sec <= cs <= tr->prev_civil_sec
+ return MakeRepeated(*tr, cs);
+ }
+
+ // In between transitions.
+ return MakeUnique(tr->unix_time + (cs - tr->civil_sec));
+}
+
+TString TimeZoneInfo::Version() const { return version_; }
+
+TString TimeZoneInfo::Description() const {
+ std::ostringstream oss;
+ oss << "#trans=" << transitions_.size();
+ oss << " #types=" << transition_types_.size();
+ oss << " spec='" << future_spec_ << "'";
+ return oss.str().c_str();
+}
+
+bool TimeZoneInfo::NextTransition(const time_point<seconds>& tp,
+ time_zone::civil_transition* trans) const {
+ if (transitions_.empty()) return false;
+ const Transition* begin = &transitions_[0];
+ const Transition* end = begin + transitions_.size();
+ if (begin->unix_time <= -(1LL << 59)) {
+ // Do not report the BIG_BANG found in some zoneinfo data as it is
+ // really a sentinel, not a transition. See pre-2018f tz/zic.c.
+ ++begin;
+ }
+ std::int_fast64_t unix_time = ToUnixSeconds(tp);
+ const Transition target = {unix_time, 0, civil_second(), civil_second()};
+ const Transition* tr =
+ std::upper_bound(begin, end, target, Transition::ByUnixTime());
+ for (; tr != end; ++tr) { // skip no-op transitions
+ std::uint_fast8_t prev_type_index =
+ (tr == begin) ? default_transition_type_ : tr[-1].type_index;
+ if (!EquivTransitions(prev_type_index, tr[0].type_index)) break;
+ }
+ // When tr == end we return false, ignoring future_spec_.
+ if (tr == end) return false;
+ trans->from = tr->prev_civil_sec + 1;
+ trans->to = tr->civil_sec;
+ return true;
+}
+
+bool TimeZoneInfo::PrevTransition(const time_point<seconds>& tp,
+ time_zone::civil_transition* trans) const {
+ if (transitions_.empty()) return false;
+ const Transition* begin = &transitions_[0];
+ const Transition* end = begin + transitions_.size();
+ if (begin->unix_time <= -(1LL << 59)) {
+ // Do not report the BIG_BANG found in some zoneinfo data as it is
+ // really a sentinel, not a transition. See pre-2018f tz/zic.c.
+ ++begin;
+ }
+ std::int_fast64_t unix_time = ToUnixSeconds(tp);
+ if (FromUnixSeconds(unix_time) != tp) {
+ if (unix_time == std::numeric_limits<std::int_fast64_t>::max()) {
+ if (end == begin) return false; // Ignore future_spec_.
+ trans->from = (--end)->prev_civil_sec + 1;
+ trans->to = end->civil_sec;
+ return true;
+ }
+ unix_time += 1; // ceils
+ }
+ const Transition target = {unix_time, 0, civil_second(), civil_second()};
+ const Transition* tr =
+ std::lower_bound(begin, end, target, Transition::ByUnixTime());
+ for (; tr != begin; --tr) { // skip no-op transitions
+ std::uint_fast8_t prev_type_index =
+ (tr - 1 == begin) ? default_transition_type_ : tr[-2].type_index;
+ if (!EquivTransitions(prev_type_index, tr[-1].type_index)) break;
+ }
+ // When tr == end we return the "last" transition, ignoring future_spec_.
+ if (tr == begin) return false;
+ trans->from = (--tr)->prev_civil_sec + 1;
+ trans->to = tr->civil_sec;
+ return true;
+}
+
+} // namespace cctz
+} // namespace time_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_info.h b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_info.h
new file mode 100644
index 00000000000..e55fd518474
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_info.h
@@ -0,0 +1,137 @@
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_TIME_INTERNAL_CCTZ_TIME_ZONE_INFO_H_
+#define ABSL_TIME_INTERNAL_CCTZ_TIME_ZONE_INFO_H_
+
+#include <atomic>
+#include <cstddef>
+#include <cstdint>
+#include <util/generic/string.h>
+#include <vector>
+
+#include "y_absl/base/config.h"
+#include "y_absl/time/internal/cctz/include/cctz/civil_time.h"
+#include "y_absl/time/internal/cctz/include/cctz/time_zone.h"
+#include "y_absl/time/internal/cctz/include/cctz/zone_info_source.h"
+#include "time_zone_if.h"
+#include "tzfile.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace time_internal {
+namespace cctz {
+
+// A transition to a new UTC offset.
+struct Transition {
+ std::int_least64_t unix_time; // the instant of this transition
+ std::uint_least8_t type_index; // index of the transition type
+ civil_second civil_sec; // local civil time of transition
+ civil_second prev_civil_sec; // local civil time one second earlier
+
+ struct ByUnixTime {
+ inline bool operator()(const Transition& lhs, const Transition& rhs) const {
+ return lhs.unix_time < rhs.unix_time;
+ }
+ };
+ struct ByCivilTime {
+ inline bool operator()(const Transition& lhs, const Transition& rhs) const {
+ return lhs.civil_sec < rhs.civil_sec;
+ }
+ };
+};
+
+// The characteristics of a particular transition.
+struct TransitionType {
+ std::int_least32_t utc_offset; // the new prevailing UTC offset
+ civil_second civil_max; // max convertible civil time for offset
+ civil_second civil_min; // min convertible civil time for offset
+ bool is_dst; // did we move into daylight-saving time
+ std::uint_least8_t abbr_index; // index of the new abbreviation
+};
+
+// A time zone backed by the IANA Time Zone Database (zoneinfo).
+class TimeZoneInfo : public TimeZoneIf {
+ public:
+ TimeZoneInfo() = default;
+ TimeZoneInfo(const TimeZoneInfo&) = delete;
+ TimeZoneInfo& operator=(const TimeZoneInfo&) = delete;
+
+ // Loads the zoneinfo for the given name, returning true if successful.
+ bool Load(const TString& name);
+
+ // TimeZoneIf implementations.
+ time_zone::absolute_lookup BreakTime(
+ const time_point<seconds>& tp) const override;
+ time_zone::civil_lookup MakeTime(const civil_second& cs) const override;
+ bool NextTransition(const time_point<seconds>& tp,
+ time_zone::civil_transition* trans) const override;
+ bool PrevTransition(const time_point<seconds>& tp,
+ time_zone::civil_transition* trans) const override;
+ TString Version() const override;
+ TString Description() const override;
+
+ private:
+ struct Header { // counts of:
+ std::size_t timecnt; // transition times
+ std::size_t typecnt; // transition types
+ std::size_t charcnt; // zone abbreviation characters
+ std::size_t leapcnt; // leap seconds (we expect none)
+ std::size_t ttisstdcnt; // UTC/local indicators (unused)
+ std::size_t ttisutcnt; // standard/wall indicators (unused)
+
+ bool Build(const tzhead& tzh);
+ std::size_t DataLength(std::size_t time_len) const;
+ };
+
+ bool GetTransitionType(std::int_fast32_t utc_offset, bool is_dst,
+ const TString& abbr, std::uint_least8_t* index);
+ bool EquivTransitions(std::uint_fast8_t tt1_index,
+ std::uint_fast8_t tt2_index) const;
+ bool ExtendTransitions();
+
+ bool ResetToBuiltinUTC(const seconds& offset);
+ bool Load(ZoneInfoSource* zip);
+
+ // Helpers for BreakTime() and MakeTime().
+ time_zone::absolute_lookup LocalTime(std::int_fast64_t unix_time,
+ const TransitionType& tt) const;
+ time_zone::absolute_lookup LocalTime(std::int_fast64_t unix_time,
+ const Transition& tr) const;
+ time_zone::civil_lookup TimeLocal(const civil_second& cs,
+ year_t c4_shift) const;
+
+ std::vector<Transition> transitions_; // ordered by unix_time and civil_sec
+ std::vector<TransitionType> transition_types_; // distinct transition types
+ std::uint_fast8_t default_transition_type_; // for before first transition
+ TString abbreviations_; // all the NUL-terminated abbreviations
+
+ TString version_; // the tzdata version if available
+ TString future_spec_; // for after the last zic transition
+ bool extended_; // future_spec_ was used to generate transitions
+ year_t last_year_; // the final year of the generated transitions
+
+ // We remember the transitions found during the last BreakTime() and
+ // MakeTime() calls. If the next request is for the same transition we
+ // will avoid re-searching.
+ mutable std::atomic<std::size_t> local_time_hint_ = {}; // BreakTime() hint
+ mutable std::atomic<std::size_t> time_local_hint_ = {}; // MakeTime() hint
+};
+
+} // namespace cctz
+} // namespace time_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_TIME_INTERNAL_CCTZ_TIME_ZONE_INFO_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_libc.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_libc.cc
new file mode 100644
index 00000000000..a73832fc29c
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_libc.cc
@@ -0,0 +1,315 @@
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#if defined(_WIN32) || defined(_WIN64)
+#define _CRT_SECURE_NO_WARNINGS 1
+#endif
+
+#include "time_zone_libc.h"
+
+#include <chrono>
+#include <ctime>
+#include <limits>
+#include <utility>
+
+#include "y_absl/base/config.h"
+#include "y_absl/time/internal/cctz/include/cctz/civil_time.h"
+#include "y_absl/time/internal/cctz/include/cctz/time_zone.h"
+
+#if defined(_AIX)
+extern "C" {
+extern long altzone;
+}
+#endif
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace time_internal {
+namespace cctz {
+
+namespace {
+
+#if defined(_WIN32) || defined(_WIN64)
+// Uses the globals: '_timezone', '_dstbias' and '_tzname'.
+auto tm_gmtoff(const std::tm& tm) -> decltype(_timezone + _dstbias) {
+ const bool is_dst = tm.tm_isdst > 0;
+ return _timezone + (is_dst ? _dstbias : 0);
+}
+auto tm_zone(const std::tm& tm) -> decltype(_tzname[0]) {
+ const bool is_dst = tm.tm_isdst > 0;
+ return _tzname[is_dst];
+}
+#elif defined(__sun) || defined(_AIX)
+// Uses the globals: 'timezone', 'altzone' and 'tzname'.
+auto tm_gmtoff(const std::tm& tm) -> decltype(timezone) {
+ const bool is_dst = tm.tm_isdst > 0;
+ return is_dst ? altzone : timezone;
+}
+auto tm_zone(const std::tm& tm) -> decltype(tzname[0]) {
+ const bool is_dst = tm.tm_isdst > 0;
+ return tzname[is_dst];
+}
+#elif defined(__native_client__) || defined(__myriad2__) || \
+ defined(__EMSCRIPTEN__)
+// Uses the globals: 'timezone' and 'tzname'.
+auto tm_gmtoff(const std::tm& tm) -> decltype(_timezone + 0) {
+ const bool is_dst = tm.tm_isdst > 0;
+ return _timezone + (is_dst ? 60 * 60 : 0);
+}
+auto tm_zone(const std::tm& tm) -> decltype(tzname[0]) {
+ const bool is_dst = tm.tm_isdst > 0;
+ return tzname[is_dst];
+}
+#else
+// Adapt to different spellings of the struct std::tm extension fields.
+#if defined(tm_gmtoff)
+auto tm_gmtoff(const std::tm& tm) -> decltype(tm.tm_gmtoff) {
+ return tm.tm_gmtoff;
+}
+#elif defined(__tm_gmtoff)
+auto tm_gmtoff(const std::tm& tm) -> decltype(tm.__tm_gmtoff) {
+ return tm.__tm_gmtoff;
+}
+#else
+template <typename T>
+auto tm_gmtoff(const T& tm) -> decltype(tm.tm_gmtoff) {
+ return tm.tm_gmtoff;
+}
+template <typename T>
+auto tm_gmtoff(const T& tm) -> decltype(tm.__tm_gmtoff) {
+ return tm.__tm_gmtoff;
+}
+#endif // tm_gmtoff
+#if defined(tm_zone)
+auto tm_zone(const std::tm& tm) -> decltype(tm.tm_zone) { return tm.tm_zone; }
+#elif defined(__tm_zone)
+auto tm_zone(const std::tm& tm) -> decltype(tm.__tm_zone) {
+ return tm.__tm_zone;
+}
+#else
+template <typename T>
+auto tm_zone(const T& tm) -> decltype(tm.tm_zone) {
+ return tm.tm_zone;
+}
+template <typename T>
+auto tm_zone(const T& tm) -> decltype(tm.__tm_zone) {
+ return tm.__tm_zone;
+}
+#endif // tm_zone
+#endif
+
+inline std::tm* gm_time(const std::time_t* timep, std::tm* result) {
+#if defined(_WIN32) || defined(_WIN64)
+ return gmtime_s(result, timep) ? nullptr : result;
+#else
+ return gmtime_r(timep, result);
+#endif
+}
+
+inline std::tm* local_time(const std::time_t* timep, std::tm* result) {
+#if defined(_WIN32) || defined(_WIN64)
+ return localtime_s(result, timep) ? nullptr : result;
+#else
+ return localtime_r(timep, result);
+#endif
+}
+
+// Converts a civil second and "dst" flag into a time_t and UTC offset.
+// Returns false if time_t cannot represent the requested civil second.
+// Caller must have already checked that cs.year() will fit into a tm_year.
+bool make_time(const civil_second& cs, int is_dst, std::time_t* t, int* off) {
+ std::tm tm;
+ tm.tm_year = static_cast<int>(cs.year() - year_t{1900});
+ tm.tm_mon = cs.month() - 1;
+ tm.tm_mday = cs.day();
+ tm.tm_hour = cs.hour();
+ tm.tm_min = cs.minute();
+ tm.tm_sec = cs.second();
+ tm.tm_isdst = is_dst;
+ *t = std::mktime(&tm);
+ if (*t == std::time_t{-1}) {
+ std::tm tm2;
+ const std::tm* tmp = local_time(t, &tm2);
+ if (tmp == nullptr || tmp->tm_year != tm.tm_year ||
+ tmp->tm_mon != tm.tm_mon || tmp->tm_mday != tm.tm_mday ||
+ tmp->tm_hour != tm.tm_hour || tmp->tm_min != tm.tm_min ||
+ tmp->tm_sec != tm.tm_sec) {
+ // A true error (not just one second before the epoch).
+ return false;
+ }
+ }
+ *off = static_cast<int>(tm_gmtoff(tm));
+ return true;
+}
+
+// Find the least time_t in [lo:hi] where local time matches offset, given:
+// (1) lo doesn't match, (2) hi does, and (3) there is only one transition.
+std::time_t find_trans(std::time_t lo, std::time_t hi, int offset) {
+ std::tm tm;
+ while (lo + 1 != hi) {
+ const std::time_t mid = lo + (hi - lo) / 2;
+ std::tm* tmp = local_time(&mid, &tm);
+ if (tmp != nullptr) {
+ if (tm_gmtoff(*tmp) == offset) {
+ hi = mid;
+ } else {
+ lo = mid;
+ }
+ } else {
+ // If std::tm cannot hold some result we resort to a linear search,
+ // ignoring all failed conversions. Slow, but never really happens.
+ while (++lo != hi) {
+ tmp = local_time(&lo, &tm);
+ if (tmp != nullptr) {
+ if (tm_gmtoff(*tmp) == offset) break;
+ }
+ }
+ return lo;
+ }
+ }
+ return hi;
+}
+
+} // namespace
+
+TimeZoneLibC::TimeZoneLibC(const TString& name)
+ : local_(name == "localtime") {}
+
+time_zone::absolute_lookup TimeZoneLibC::BreakTime(
+ const time_point<seconds>& tp) const {
+ time_zone::absolute_lookup al;
+ al.offset = 0;
+ al.is_dst = false;
+ al.abbr = "-00";
+
+ const std::int_fast64_t s = ToUnixSeconds(tp);
+
+ // If std::time_t cannot hold the input we saturate the output.
+ if (s < std::numeric_limits<std::time_t>::min()) {
+ al.cs = civil_second::min();
+ return al;
+ }
+ if (s > std::numeric_limits<std::time_t>::max()) {
+ al.cs = civil_second::max();
+ return al;
+ }
+
+ const std::time_t t = static_cast<std::time_t>(s);
+ std::tm tm;
+ std::tm* tmp = local_ ? local_time(&t, &tm) : gm_time(&t, &tm);
+
+ // If std::tm cannot hold the result we saturate the output.
+ if (tmp == nullptr) {
+ al.cs = (s < 0) ? civil_second::min() : civil_second::max();
+ return al;
+ }
+
+ const year_t year = tmp->tm_year + year_t{1900};
+ al.cs = civil_second(year, tmp->tm_mon + 1, tmp->tm_mday, tmp->tm_hour,
+ tmp->tm_min, tmp->tm_sec);
+ al.offset = static_cast<int>(tm_gmtoff(*tmp));
+ al.abbr = local_ ? tm_zone(*tmp) : "UTC"; // as expected by cctz
+ al.is_dst = tmp->tm_isdst > 0;
+ return al;
+}
+
+time_zone::civil_lookup TimeZoneLibC::MakeTime(const civil_second& cs) const {
+ if (!local_) {
+ // If time_point<seconds> cannot hold the result we saturate.
+ static const civil_second min_tp_cs =
+ civil_second() + ToUnixSeconds(time_point<seconds>::min());
+ static const civil_second max_tp_cs =
+ civil_second() + ToUnixSeconds(time_point<seconds>::max());
+ const time_point<seconds> tp = (cs < min_tp_cs) ? time_point<seconds>::min()
+ : (cs > max_tp_cs)
+ ? time_point<seconds>::max()
+ : FromUnixSeconds(cs - civil_second());
+ return {time_zone::civil_lookup::UNIQUE, tp, tp, tp};
+ }
+
+ // If tm_year cannot hold the requested year we saturate the result.
+ if (cs.year() < 0) {
+ if (cs.year() < std::numeric_limits<int>::min() + year_t{1900}) {
+ const time_point<seconds> tp = time_point<seconds>::min();
+ return {time_zone::civil_lookup::UNIQUE, tp, tp, tp};
+ }
+ } else {
+ if (cs.year() - year_t{1900} > std::numeric_limits<int>::max()) {
+ const time_point<seconds> tp = time_point<seconds>::max();
+ return {time_zone::civil_lookup::UNIQUE, tp, tp, tp};
+ }
+ }
+
+ // We probe with "is_dst" values of 0 and 1 to try to distinguish unique
+ // civil seconds from skipped or repeated ones. This is not always possible
+ // however, as the "dst" flag does not change over some offset transitions.
+ // We are also subject to the vagaries of mktime() implementations.
+ std::time_t t0, t1;
+ int offset0, offset1;
+ if (make_time(cs, 0, &t0, &offset0) && make_time(cs, 1, &t1, &offset1)) {
+ if (t0 == t1) {
+ // The civil time was singular (pre == trans == post).
+ const time_point<seconds> tp = FromUnixSeconds(t0);
+ return {time_zone::civil_lookup::UNIQUE, tp, tp, tp};
+ }
+
+ if (t0 > t1) {
+ std::swap(t0, t1);
+ std::swap(offset0, offset1);
+ }
+ const std::time_t tt = find_trans(t0, t1, offset1);
+ const time_point<seconds> trans = FromUnixSeconds(tt);
+
+ if (offset0 < offset1) {
+ // The civil time did not exist (pre >= trans > post).
+ const time_point<seconds> pre = FromUnixSeconds(t1);
+ const time_point<seconds> post = FromUnixSeconds(t0);
+ return {time_zone::civil_lookup::SKIPPED, pre, trans, post};
+ }
+
+ // The civil time was ambiguous (pre < trans <= post).
+ const time_point<seconds> pre = FromUnixSeconds(t0);
+ const time_point<seconds> post = FromUnixSeconds(t1);
+ return {time_zone::civil_lookup::REPEATED, pre, trans, post};
+ }
+
+ // make_time() failed somehow so we saturate the result.
+ const time_point<seconds> tp = (cs < civil_second())
+ ? time_point<seconds>::min()
+ : time_point<seconds>::max();
+ return {time_zone::civil_lookup::UNIQUE, tp, tp, tp};
+}
+
+bool TimeZoneLibC::NextTransition(const time_point<seconds>&,
+ time_zone::civil_transition*) const {
+ return false;
+}
+
+bool TimeZoneLibC::PrevTransition(const time_point<seconds>&,
+ time_zone::civil_transition*) const {
+ return false;
+}
+
+TString TimeZoneLibC::Version() const {
+ return TString(); // unknown
+}
+
+TString TimeZoneLibC::Description() const {
+ return local_ ? "localtime" : "UTC";
+}
+
+} // namespace cctz
+} // namespace time_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_libc.h b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_libc.h
new file mode 100644
index 00000000000..fc926fecf7c
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_libc.h
@@ -0,0 +1,55 @@
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_TIME_INTERNAL_CCTZ_TIME_ZONE_LIBC_H_
+#define ABSL_TIME_INTERNAL_CCTZ_TIME_ZONE_LIBC_H_
+
+#include <util/generic/string.h>
+
+#include "y_absl/base/config.h"
+#include "time_zone_if.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace time_internal {
+namespace cctz {
+
+// A time zone backed by gmtime_r(3), localtime_r(3), and mktime(3),
+// and which therefore only supports UTC and the local time zone.
+// TODO: Add support for fixed offsets from UTC.
+class TimeZoneLibC : public TimeZoneIf {
+ public:
+ explicit TimeZoneLibC(const TString& name);
+
+ // TimeZoneIf implementations.
+ time_zone::absolute_lookup BreakTime(
+ const time_point<seconds>& tp) const override;
+ time_zone::civil_lookup MakeTime(const civil_second& cs) const override;
+ bool NextTransition(const time_point<seconds>& tp,
+ time_zone::civil_transition* trans) const override;
+ bool PrevTransition(const time_point<seconds>& tp,
+ time_zone::civil_transition* trans) const override;
+ TString Version() const override;
+ TString Description() const override;
+
+ private:
+ const bool local_; // localtime or UTC
+};
+
+} // namespace cctz
+} // namespace time_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_TIME_INTERNAL_CCTZ_TIME_ZONE_LIBC_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_lookup.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_lookup.cc
new file mode 100644
index 00000000000..92c9208d6dd
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_lookup.cc
@@ -0,0 +1,236 @@
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "y_absl/base/config.h"
+#include "y_absl/time/internal/cctz/include/cctz/time_zone.h"
+
+#if defined(__ANDROID__)
+#include <sys/system_properties.h>
+#if defined(__ANDROID_API__) && __ANDROID_API__ >= 21
+#include <dlfcn.h>
+#endif
+#endif
+
+#if defined(__APPLE__)
+#include <CoreFoundation/CFTimeZone.h>
+
+#include <vector>
+#endif
+
+#if defined(__Fuchsia__)
+#error #include <fuchsia/intl/cpp/fidl.h>
+#error #include <lib/async-loop/cpp/loop.h>
+#error #include <lib/sys/cpp/component_context.h>
+#error #include <zircon/types.h>
+#endif
+
+#include <cstdlib>
+#include <cstring>
+#include <util/generic/string.h>
+
+#include "time_zone_fixed.h"
+#include "time_zone_impl.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace time_internal {
+namespace cctz {
+
+#if defined(__ANDROID__) && defined(__ANDROID_API__) && __ANDROID_API__ >= 21
+namespace {
+// Android 'L' removes __system_property_get() from the NDK, however
+// it is still a hidden symbol in libc so we use dlsym() to access it.
+// See Chromium's base/sys_info_android.cc for a similar example.
+
+using property_get_func = int (*)(const char*, char*);
+
+property_get_func LoadSystemPropertyGet() {
+ int flag = RTLD_LAZY | RTLD_GLOBAL;
+#if defined(RTLD_NOLOAD)
+ flag |= RTLD_NOLOAD; // libc.so should already be resident
+#endif
+ if (void* handle = dlopen("libc.so", flag)) {
+ void* sym = dlsym(handle, "__system_property_get");
+ dlclose(handle);
+ return reinterpret_cast<property_get_func>(sym);
+ }
+ return nullptr;
+}
+
+int __system_property_get(const char* name, char* value) {
+ static property_get_func system_property_get = LoadSystemPropertyGet();
+ return system_property_get ? system_property_get(name, value) : -1;
+}
+
+} // namespace
+#endif
+
+TString time_zone::name() const { return effective_impl().Name(); }
+
+time_zone::absolute_lookup time_zone::lookup(
+ const time_point<seconds>& tp) const {
+ return effective_impl().BreakTime(tp);
+}
+
+time_zone::civil_lookup time_zone::lookup(const civil_second& cs) const {
+ return effective_impl().MakeTime(cs);
+}
+
+bool time_zone::next_transition(const time_point<seconds>& tp,
+ civil_transition* trans) const {
+ return effective_impl().NextTransition(tp, trans);
+}
+
+bool time_zone::prev_transition(const time_point<seconds>& tp,
+ civil_transition* trans) const {
+ return effective_impl().PrevTransition(tp, trans);
+}
+
+TString time_zone::version() const { return effective_impl().Version(); }
+
+TString time_zone::description() const {
+ return effective_impl().Description();
+}
+
+const time_zone::Impl& time_zone::effective_impl() const {
+ if (impl_ == nullptr) {
+ // Dereferencing an implicit-UTC time_zone is expected to be
+ // rare, so we don't mind paying a small synchronization cost.
+ return *time_zone::Impl::UTC().impl_;
+ }
+ return *impl_;
+}
+
+bool load_time_zone(const TString& name, time_zone* tz) {
+ return time_zone::Impl::LoadTimeZone(name, tz);
+}
+
+time_zone utc_time_zone() {
+ return time_zone::Impl::UTC(); // avoid name lookup
+}
+
+time_zone fixed_time_zone(const seconds& offset) {
+ time_zone tz;
+ load_time_zone(FixedOffsetToName(offset), &tz);
+ return tz;
+}
+
+time_zone local_time_zone() {
+ const char* zone = ":localtime";
+#if defined(__ANDROID__)
+ char sysprop[PROP_VALUE_MAX];
+ if (__system_property_get("persist.sys.timezone", sysprop) > 0) {
+ zone = sysprop;
+ }
+#endif
+#if defined(__APPLE__)
+ std::vector<char> buffer;
+ CFTimeZoneRef tz_default = CFTimeZoneCopyDefault();
+ if (CFStringRef tz_name = CFTimeZoneGetName(tz_default)) {
+ CFStringEncoding encoding = kCFStringEncodingUTF8;
+ CFIndex length = CFStringGetLength(tz_name);
+ buffer.resize(CFStringGetMaximumSizeForEncoding(length, encoding) + 1);
+ if (CFStringGetCString(tz_name, &buffer[0], buffer.size(), encoding)) {
+ zone = &buffer[0];
+ }
+ }
+ CFRelease(tz_default);
+#endif
+#if defined(__Fuchsia__)
+ TString primary_tz;
+ [&]() {
+ // Note: We can't use the synchronous FIDL API here because it doesn't
+ // allow timeouts; if the FIDL call failed, local_time_zone() would never
+ // return.
+
+ const zx::duration kTimeout = zx::msec(500);
+
+ // Don't attach to the thread because otherwise the thread's dispatcher
+ // would be set to null when the loop is destroyed, causing any other FIDL
+ // code running on the same thread to crash.
+ async::Loop loop(&kAsyncLoopConfigNeverAttachToThread);
+ std::unique_ptr<sys::ComponentContext> context =
+ sys::ComponentContext::Create();
+
+ fuchsia::intl::PropertyProviderHandle handle;
+ zx_status_t status = context->svc()->Connect(handle.NewRequest());
+ if (status != ZX_OK) {
+ return;
+ }
+
+ fuchsia::intl::PropertyProviderPtr intl_provider;
+ status = intl_provider.Bind(std::move(handle), loop.dispatcher());
+ if (status != ZX_OK) {
+ return;
+ }
+
+ intl_provider->GetProfile(
+ [&loop, &primary_tz](fuchsia::intl::Profile profile) {
+ if (!profile.time_zones().empty()) {
+ primary_tz = profile.time_zones()[0].id;
+ }
+ loop.Quit();
+ });
+ loop.Run(zx::deadline_after(kTimeout));
+ }();
+
+ if (!primary_tz.empty()) {
+ zone = primary_tz.c_str();
+ }
+#endif
+
+ // Allow ${TZ} to override to default zone.
+ char* tz_env = nullptr;
+#if defined(_MSC_VER)
+ _dupenv_s(&tz_env, nullptr, "TZ");
+#else
+ tz_env = std::getenv("TZ");
+#endif
+ if (tz_env) zone = tz_env;
+
+ // We only support the "[:]<zone-name>" form.
+ if (*zone == ':') ++zone;
+
+ // Map "localtime" to a system-specific name, but
+ // allow ${LOCALTIME} to override the default name.
+ char* localtime_env = nullptr;
+ if (strcmp(zone, "localtime") == 0) {
+#if defined(_MSC_VER)
+ // System-specific default is just "localtime".
+ _dupenv_s(&localtime_env, nullptr, "LOCALTIME");
+#else
+ zone = "/etc/localtime"; // System-specific default.
+ localtime_env = std::getenv("LOCALTIME");
+#endif
+ if (localtime_env) zone = localtime_env;
+ }
+
+ const TString name = zone;
+#if defined(_MSC_VER)
+ free(localtime_env);
+ free(tz_env);
+#endif
+
+ time_zone tz;
+ load_time_zone(name, &tz); // Falls back to UTC.
+ // TODO: Follow the RFC3339 "Unknown Local Offset Convention" and
+ // arrange for %z to generate "-0000" when we don't know the local
+ // offset because the load_time_zone() failed and we're using UTC.
+ return tz;
+}
+
+} // namespace cctz
+} // namespace time_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_posix.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_posix.cc
new file mode 100644
index 00000000000..40965039156
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_posix.cc
@@ -0,0 +1,159 @@
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "time_zone_posix.h"
+
+#include <cstddef>
+#include <cstring>
+#include <limits>
+#include <util/generic/string.h>
+
+#include "y_absl/base/config.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace time_internal {
+namespace cctz {
+
+namespace {
+
+const char kDigits[] = "0123456789";
+
+const char* ParseInt(const char* p, int min, int max, int* vp) {
+ int value = 0;
+ const char* op = p;
+ const int kMaxInt = std::numeric_limits<int>::max();
+ for (; const char* dp = strchr(kDigits, *p); ++p) {
+ int d = static_cast<int>(dp - kDigits);
+ if (d >= 10) break; // '\0'
+ if (value > kMaxInt / 10) return nullptr;
+ value *= 10;
+ if (value > kMaxInt - d) return nullptr;
+ value += d;
+ }
+ if (p == op || value < min || value > max) return nullptr;
+ *vp = value;
+ return p;
+}
+
+// abbr = <.*?> | [^-+,\d]{3,}
+const char* ParseAbbr(const char* p, TString* abbr) {
+ const char* op = p;
+ if (*p == '<') { // special zoneinfo <...> form
+ while (*++p != '>') {
+ if (*p == '\0') return nullptr;
+ }
+ abbr->assign(op + 1, static_cast<std::size_t>(p - op) - 1);
+ return ++p;
+ }
+ while (*p != '\0') {
+ if (strchr("-+,", *p)) break;
+ if (strchr(kDigits, *p)) break;
+ ++p;
+ }
+ if (p - op < 3) return nullptr;
+ abbr->assign(op, static_cast<std::size_t>(p - op));
+ return p;
+}
+
+// offset = [+|-]hh[:mm[:ss]] (aggregated into single seconds value)
+const char* ParseOffset(const char* p, int min_hour, int max_hour, int sign,
+ std::int_fast32_t* offset) {
+ if (p == nullptr) return nullptr;
+ if (*p == '+' || *p == '-') {
+ if (*p++ == '-') sign = -sign;
+ }
+ int hours = 0;
+ int minutes = 0;
+ int seconds = 0;
+
+ p = ParseInt(p, min_hour, max_hour, &hours);
+ if (p == nullptr) return nullptr;
+ if (*p == ':') {
+ p = ParseInt(p + 1, 0, 59, &minutes);
+ if (p == nullptr) return nullptr;
+ if (*p == ':') {
+ p = ParseInt(p + 1, 0, 59, &seconds);
+ if (p == nullptr) return nullptr;
+ }
+ }
+ *offset = sign * ((((hours * 60) + minutes) * 60) + seconds);
+ return p;
+}
+
+// datetime = ( Jn | n | Mm.w.d ) [ / offset ]
+const char* ParseDateTime(const char* p, PosixTransition* res) {
+ if (p != nullptr && *p == ',') {
+ if (*++p == 'M') {
+ int month = 0;
+ if ((p = ParseInt(p + 1, 1, 12, &month)) != nullptr && *p == '.') {
+ int week = 0;
+ if ((p = ParseInt(p + 1, 1, 5, &week)) != nullptr && *p == '.') {
+ int weekday = 0;
+ if ((p = ParseInt(p + 1, 0, 6, &weekday)) != nullptr) {
+ res->date.fmt = PosixTransition::M;
+ res->date.m.month = static_cast<std::int_fast8_t>(month);
+ res->date.m.week = static_cast<std::int_fast8_t>(week);
+ res->date.m.weekday = static_cast<std::int_fast8_t>(weekday);
+ }
+ }
+ }
+ } else if (*p == 'J') {
+ int day = 0;
+ if ((p = ParseInt(p + 1, 1, 365, &day)) != nullptr) {
+ res->date.fmt = PosixTransition::J;
+ res->date.j.day = static_cast<std::int_fast16_t>(day);
+ }
+ } else {
+ int day = 0;
+ if ((p = ParseInt(p, 0, 365, &day)) != nullptr) {
+ res->date.fmt = PosixTransition::N;
+ res->date.n.day = static_cast<std::int_fast16_t>(day);
+ }
+ }
+ }
+ if (p != nullptr) {
+ res->time.offset = 2 * 60 * 60; // default offset is 02:00:00
+ if (*p == '/') p = ParseOffset(p + 1, -167, 167, 1, &res->time.offset);
+ }
+ return p;
+}
+
+} // namespace
+
+// spec = std offset [ dst [ offset ] , datetime , datetime ]
+bool ParsePosixSpec(const TString& spec, PosixTimeZone* res) {
+ const char* p = spec.c_str();
+ if (*p == ':') return false;
+
+ p = ParseAbbr(p, &res->std_abbr);
+ p = ParseOffset(p, 0, 24, -1, &res->std_offset);
+ if (p == nullptr) return false;
+ if (*p == '\0') return true;
+
+ p = ParseAbbr(p, &res->dst_abbr);
+ if (p == nullptr) return false;
+ res->dst_offset = res->std_offset + (60 * 60); // default
+ if (*p != ',') p = ParseOffset(p, 0, 24, -1, &res->dst_offset);
+
+ p = ParseDateTime(p, &res->dst_start);
+ p = ParseDateTime(p, &res->dst_end);
+
+ return p != nullptr && *p == '\0';
+}
+
+} // namespace cctz
+} // namespace time_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_posix.h b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_posix.h
new file mode 100644
index 00000000000..e9de2b77add
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_posix.h
@@ -0,0 +1,132 @@
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Parsing of a POSIX zone spec as described in the TZ part of section 8.3 in
+// http://pubs.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap08.html.
+//
+// The current POSIX spec for America/Los_Angeles is "PST8PDT,M3.2.0,M11.1.0",
+// which would be broken down as ...
+//
+// PosixTimeZone {
+// std_abbr = "PST"
+// std_offset = -28800
+// dst_abbr = "PDT"
+// dst_offset = -25200
+// dst_start = PosixTransition {
+// date {
+// m {
+// month = 3
+// week = 2
+// weekday = 0
+// }
+// }
+// time {
+// offset = 7200
+// }
+// }
+// dst_end = PosixTransition {
+// date {
+// m {
+// month = 11
+// week = 1
+// weekday = 0
+// }
+// }
+// time {
+// offset = 7200
+// }
+// }
+// }
+
+#ifndef ABSL_TIME_INTERNAL_CCTZ_TIME_ZONE_POSIX_H_
+#define ABSL_TIME_INTERNAL_CCTZ_TIME_ZONE_POSIX_H_
+
+#include <cstdint>
+#include <util/generic/string.h>
+
+#include "y_absl/base/config.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace time_internal {
+namespace cctz {
+
+// The date/time of the transition. The date is specified as either:
+// (J) the Nth day of the year (1 <= N <= 365), excluding leap days, or
+// (N) the Nth day of the year (0 <= N <= 365), including leap days, or
+// (M) the Nth weekday of a month (e.g., the 2nd Sunday in March).
+// The time, specified as a day offset, identifies the particular moment
+// of the transition, and may be negative or >= 24h, and in which case
+// it would take us to another day, and perhaps week, or even month.
+struct PosixTransition {
+ enum DateFormat { J, N, M };
+
+ struct Date {
+ struct NonLeapDay {
+ std::int_fast16_t day; // day of non-leap year [1:365]
+ };
+ struct Day {
+ std::int_fast16_t day; // day of year [0:365]
+ };
+ struct MonthWeekWeekday {
+ std::int_fast8_t month; // month of year [1:12]
+ std::int_fast8_t week; // week of month [1:5] (5==last)
+ std::int_fast8_t weekday; // 0==Sun, ..., 6=Sat
+ };
+
+ DateFormat fmt;
+
+ union {
+ NonLeapDay j;
+ Day n;
+ MonthWeekWeekday m;
+ };
+ };
+
+ struct Time {
+ std::int_fast32_t offset; // seconds before/after 00:00:00
+ };
+
+ Date date;
+ Time time;
+};
+
+// The entirety of a POSIX-string specified time-zone rule. The standard
+// abbreviation and offset are always given. If the time zone includes
+// daylight saving, then the daylight abbrevation is non-empty and the
+// remaining fields are also valid. Note that the start/end transitions
+// are not ordered---in the southern hemisphere the transition to end
+// daylight time occurs first in any particular year.
+struct PosixTimeZone {
+ TString std_abbr;
+ std::int_fast32_t std_offset;
+
+ TString dst_abbr;
+ std::int_fast32_t dst_offset;
+ PosixTransition dst_start;
+ PosixTransition dst_end;
+};
+
+// Breaks down a POSIX time-zone specification into its constituent pieces,
+// filling in any missing values (DST offset, or start/end transition times)
+// with the standard-defined defaults. Returns false if the specification
+// could not be parsed (although some fields of *res may have been altered).
+bool ParsePosixSpec(const TString& spec, PosixTimeZone* res);
+
+} // namespace cctz
+} // namespace time_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_TIME_INTERNAL_CCTZ_TIME_ZONE_POSIX_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/tzfile.h b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/tzfile.h
new file mode 100644
index 00000000000..31e8598257f
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/tzfile.h
@@ -0,0 +1,122 @@
+/* Layout and location of TZif files. */
+
+#ifndef TZFILE_H
+
+#define TZFILE_H
+
+/*
+** This file is in the public domain, so clarified as of
+** 1996-06-05 by Arthur David Olson.
+*/
+
+/*
+** This header is for use ONLY with the time conversion code.
+** There is no guarantee that it will remain unchanged,
+** or that it will remain at all.
+** Do NOT copy it to any system include directory.
+** Thank you!
+*/
+
+/*
+** Information about time zone files.
+*/
+
+#ifndef TZDIR
+#define TZDIR "/usr/share/zoneinfo" /* Time zone object file directory */
+#endif /* !defined TZDIR */
+
+#ifndef TZDEFAULT
+#define TZDEFAULT "/etc/localtime"
+#endif /* !defined TZDEFAULT */
+
+#ifndef TZDEFRULES
+#define TZDEFRULES "posixrules"
+#endif /* !defined TZDEFRULES */
+
+/* See Internet RFC 8536 for more details about the following format. */
+
+/*
+** Each file begins with. . .
+*/
+
+#define TZ_MAGIC "TZif"
+
+struct tzhead {
+ char tzh_magic[4]; /* TZ_MAGIC */
+ char tzh_version[1]; /* '\0' or '2'-'4' as of 2021 */
+ char tzh_reserved[15]; /* reserved; must be zero */
+ char tzh_ttisutcnt[4]; /* coded number of trans. time flags */
+ char tzh_ttisstdcnt[4]; /* coded number of trans. time flags */
+ char tzh_leapcnt[4]; /* coded number of leap seconds */
+ char tzh_timecnt[4]; /* coded number of transition times */
+ char tzh_typecnt[4]; /* coded number of local time types */
+ char tzh_charcnt[4]; /* coded number of abbr. chars */
+};
+
+/*
+** . . .followed by. . .
+**
+** tzh_timecnt (char [4])s coded transition times a la time(2)
+** tzh_timecnt (unsigned char)s types of local time starting at above
+** tzh_typecnt repetitions of
+** one (char [4]) coded UT offset in seconds
+** one (unsigned char) used to set tm_isdst
+** one (unsigned char) that's an abbreviation list index
+** tzh_charcnt (char)s '\0'-terminated zone abbreviations
+** tzh_leapcnt repetitions of
+** one (char [4]) coded leap second transition times
+** one (char [4]) total correction after above
+** tzh_ttisstdcnt (char)s indexed by type; if 1, transition
+** time is standard time, if 0,
+** transition time is local (wall clock)
+** time; if absent, transition times are
+** assumed to be local time
+** tzh_ttisutcnt (char)s indexed by type; if 1, transition
+** time is UT, if 0, transition time is
+** local time; if absent, transition
+** times are assumed to be local time.
+** When this is 1, the corresponding
+** std/wall indicator must also be 1.
+*/
+
+/*
+** If tzh_version is '2' or greater, the above is followed by a second instance
+** of tzhead and a second instance of the data in which each coded transition
+** time uses 8 rather than 4 chars,
+** then a POSIX-TZ-environment-variable-style string for use in handling
+** instants after the last transition time stored in the file
+** (with nothing between the newlines if there is no POSIX representation for
+** such instants).
+**
+** If tz_version is '3' or greater, the above is extended as follows.
+** First, the POSIX TZ string's hour offset may range from -167
+** through 167 as compared to the POSIX-required 0 through 24.
+** Second, its DST start time may be January 1 at 00:00 and its stop
+** time December 31 at 24:00 plus the difference between DST and
+** standard time, indicating DST all year.
+*/
+
+/*
+** In the current implementation, "tzset()" refuses to deal with files that
+** exceed any of the limits below.
+*/
+
+#ifndef TZ_MAX_TIMES
+#define TZ_MAX_TIMES 2000
+#endif /* !defined TZ_MAX_TIMES */
+
+#ifndef TZ_MAX_TYPES
+/* This must be at least 17 for Europe/Samara and Europe/Vilnius. */
+#define TZ_MAX_TYPES 256 /* Limited by what (unsigned char)'s can hold */
+#endif /* !defined TZ_MAX_TYPES */
+
+#ifndef TZ_MAX_CHARS
+#define TZ_MAX_CHARS 50 /* Maximum number of abbreviation characters */
+ /* (limited by what unsigned chars can hold) */
+#endif /* !defined TZ_MAX_CHARS */
+
+#ifndef TZ_MAX_LEAPS
+#define TZ_MAX_LEAPS 50 /* Maximum number of leap second corrections */
+#endif /* !defined TZ_MAX_LEAPS */
+
+#endif /* !defined TZFILE_H */
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/zone_info_source.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/zone_info_source.cc
new file mode 100644
index 00000000000..be58c20fb33
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/zone_info_source.cc
@@ -0,0 +1,115 @@
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "y_absl/time/internal/cctz/include/cctz/zone_info_source.h"
+
+#include "y_absl/base/config.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace time_internal {
+namespace cctz {
+
+// Defined out-of-line to avoid emitting a weak vtable in all TUs.
+ZoneInfoSource::~ZoneInfoSource() {}
+TString ZoneInfoSource::Version() const { return TString(); }
+
+} // namespace cctz
+} // namespace time_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace time_internal {
+namespace cctz_extension {
+
+namespace {
+
+// A default for cctz_extension::zone_info_source_factory, which simply
+// defers to the fallback factory.
+std::unique_ptr<y_absl::time_internal::cctz::ZoneInfoSource> DefaultFactory(
+ const TString& name,
+ const std::function<
+ std::unique_ptr<y_absl::time_internal::cctz::ZoneInfoSource>(
+ const TString& name)>& fallback_factory) {
+ return fallback_factory(name);
+}
+
+} // namespace
+
+// A "weak" definition for cctz_extension::zone_info_source_factory.
+// The user may override this with their own "strong" definition (see
+// zone_info_source.h).
+#if !defined(__has_attribute)
+#define __has_attribute(x) 0
+#endif
+// MinGW is GCC on Windows, so while it asserts __has_attribute(weak), the
+// Windows linker cannot handle that. Nor does the MinGW compiler know how to
+// pass "#pragma comment(linker, ...)" to the Windows linker.
+#if (__has_attribute(weak) || defined(__GNUC__)) && !defined(__MINGW32__)
+ZoneInfoSourceFactory zone_info_source_factory __attribute__((weak)) =
+ DefaultFactory;
+#elif defined(_MSC_VER) && !defined(__MINGW32__) && !defined(_LIBCPP_VERSION)
+extern ZoneInfoSourceFactory zone_info_source_factory;
+extern ZoneInfoSourceFactory default_factory;
+ZoneInfoSourceFactory default_factory = DefaultFactory;
+#if defined(_M_IX86) || defined(_M_ARM)
+#pragma comment( \
+ linker, \
+ "/alternatename:?zone_info_source_factory@cctz_extension@time_internal@" ABSL_INTERNAL_MANGLED_NS \
+ "@@3P6A?AV?$unique_ptr@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \
+ "@@U?$default_delete@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \
+ "@@@std@@@std@@ABV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@" ABSL_INTERNAL_MANGLED_BACKREFERENCE \
+ "@ABV?$function@$$A6A?AV?$unique_ptr@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \
+ "@@U?$default_delete@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \
+ "@@@std@@@std@@ABV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@2@@Z@" ABSL_INTERNAL_MANGLED_BACKREFERENCE \
+ "@@ZA=?default_factory@cctz_extension@time_internal@" ABSL_INTERNAL_MANGLED_NS \
+ "@@3P6A?AV?$unique_ptr@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \
+ "@@U?$default_delete@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \
+ "@@@std@@@std@@ABV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@" ABSL_INTERNAL_MANGLED_BACKREFERENCE \
+ "@ABV?$function@$$A6A?AV?$unique_ptr@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \
+ "@@U?$default_delete@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \
+ "@@@std@@@std@@ABV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@2@@Z@" ABSL_INTERNAL_MANGLED_BACKREFERENCE \
+ "@@ZA")
+#elif defined(_M_IA_64) || defined(_M_AMD64) || defined(_M_ARM64)
+#pragma comment( \
+ linker, \
+ "/alternatename:?zone_info_source_factory@cctz_extension@time_internal@" ABSL_INTERNAL_MANGLED_NS \
+ "@@3P6A?AV?$unique_ptr@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \
+ "@@U?$default_delete@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \
+ "@@@std@@@std@@AEBV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@" ABSL_INTERNAL_MANGLED_BACKREFERENCE \
+ "@AEBV?$function@$$A6A?AV?$unique_ptr@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \
+ "@@U?$default_delete@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \
+ "@@@std@@@std@@AEBV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@2@@Z@" ABSL_INTERNAL_MANGLED_BACKREFERENCE \
+ "@@ZEA=?default_factory@cctz_extension@time_internal@" ABSL_INTERNAL_MANGLED_NS \
+ "@@3P6A?AV?$unique_ptr@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \
+ "@@U?$default_delete@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \
+ "@@@std@@@std@@AEBV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@" ABSL_INTERNAL_MANGLED_BACKREFERENCE \
+ "@AEBV?$function@$$A6A?AV?$unique_ptr@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \
+ "@@U?$default_delete@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \
+ "@@@std@@@std@@AEBV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@2@@Z@" ABSL_INTERNAL_MANGLED_BACKREFERENCE \
+ "@@ZEA")
+#else
+#error Unsupported MSVC platform
+#endif // _M_<PLATFORM>
+#else
+// Make it a "strong" definition if we have no other choice.
+ZoneInfoSourceFactory zone_info_source_factory = DefaultFactory;
+#endif
+
+} // namespace cctz_extension
+} // namespace time_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/get_current_time_chrono.inc b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/get_current_time_chrono.inc
new file mode 100644
index 00000000000..ef28e97e5a1
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/get_current_time_chrono.inc
@@ -0,0 +1,31 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <chrono>
+#include <cstdint>
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace time_internal {
+
+static int64_t GetCurrentTimeNanosFromSystem() {
+ return std::chrono::duration_cast<std::chrono::nanoseconds>(
+ std::chrono::system_clock::now() -
+ std::chrono::system_clock::from_time_t(0))
+ .count();
+}
+
+} // namespace time_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/get_current_time_posix.inc b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/get_current_time_posix.inc
new file mode 100644
index 00000000000..1068fea026d
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/get_current_time_posix.inc
@@ -0,0 +1,24 @@
+#include "y_absl/time/clock.h"
+
+#include <sys/time.h>
+#include <ctime>
+#include <cstdint>
+
+#include "y_absl/base/internal/raw_logging.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace time_internal {
+
+static int64_t GetCurrentTimeNanosFromSystem() {
+ const int64_t kNanosPerSecond = 1000 * 1000 * 1000;
+ struct timespec ts;
+ ABSL_RAW_CHECK(clock_gettime(CLOCK_REALTIME, &ts) == 0,
+ "Failed to read real-time clock.");
+ return (int64_t{ts.tv_sec} * kNanosPerSecond +
+ int64_t{ts.tv_nsec});
+}
+
+} // namespace time_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/test_util.h b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/test_util.h
new file mode 100644
index 00000000000..22b991b8366
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/test_util.h
@@ -0,0 +1,33 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_TIME_INTERNAL_TEST_UTIL_H_
+#define ABSL_TIME_INTERNAL_TEST_UTIL_H_
+
+#include <util/generic/string.h>
+
+#include "y_absl/time/time.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace time_internal {
+
+// Loads the named timezone, but dies on any failure.
+y_absl::TimeZone LoadTimeZone(const TString& name);
+
+} // namespace time_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_TIME_INTERNAL_TEST_UTIL_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/zoneinfo.inc b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/zoneinfo.inc
new file mode 100644
index 00000000000..bfed82990dd
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/zoneinfo.inc
@@ -0,0 +1,729 @@
+unsigned char America_Los_Angeles[] = {
+ 0x54, 0x5a, 0x69, 0x66, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
+ 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xba,
+ 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x14, 0x80, 0x00, 0x00, 0x00,
+ 0x9e, 0xa6, 0x48, 0xa0, 0x9f, 0xbb, 0x15, 0x90, 0xa0, 0x86, 0x2a, 0xa0,
+ 0xa1, 0x9a, 0xf7, 0x90, 0xcb, 0x89, 0x1a, 0xa0, 0xd2, 0x23, 0xf4, 0x70,
+ 0xd2, 0x61, 0x26, 0x10, 0xd6, 0xfe, 0x74, 0x5c, 0xd8, 0x80, 0xad, 0x90,
+ 0xda, 0xfe, 0xc3, 0x90, 0xdb, 0xc0, 0x90, 0x10, 0xdc, 0xde, 0xa5, 0x90,
+ 0xdd, 0xa9, 0xac, 0x90, 0xde, 0xbe, 0x87, 0x90, 0xdf, 0x89, 0x8e, 0x90,
+ 0xe0, 0x9e, 0x69, 0x90, 0xe1, 0x69, 0x70, 0x90, 0xe2, 0x7e, 0x4b, 0x90,
+ 0xe3, 0x49, 0x52, 0x90, 0xe4, 0x5e, 0x2d, 0x90, 0xe5, 0x29, 0x34, 0x90,
+ 0xe6, 0x47, 0x4a, 0x10, 0xe7, 0x12, 0x51, 0x10, 0xe8, 0x27, 0x2c, 0x10,
+ 0xe8, 0xf2, 0x33, 0x10, 0xea, 0x07, 0x0e, 0x10, 0xea, 0xd2, 0x15, 0x10,
+ 0xeb, 0xe6, 0xf0, 0x10, 0xec, 0xb1, 0xf7, 0x10, 0xed, 0xc6, 0xd2, 0x10,
+ 0xee, 0x91, 0xd9, 0x10, 0xef, 0xaf, 0xee, 0x90, 0xf0, 0x71, 0xbb, 0x10,
+ 0xf1, 0x8f, 0xd0, 0x90, 0xf2, 0x7f, 0xc1, 0x90, 0xf3, 0x6f, 0xb2, 0x90,
+ 0xf4, 0x5f, 0xa3, 0x90, 0xf5, 0x4f, 0x94, 0x90, 0xf6, 0x3f, 0x85, 0x90,
+ 0xf7, 0x2f, 0x76, 0x90, 0xf8, 0x28, 0xa2, 0x10, 0xf9, 0x0f, 0x58, 0x90,
+ 0xfa, 0x08, 0x84, 0x10, 0xfa, 0xf8, 0x83, 0x20, 0xfb, 0xe8, 0x66, 0x10,
+ 0xfc, 0xd8, 0x65, 0x20, 0xfd, 0xc8, 0x48, 0x10, 0xfe, 0xb8, 0x47, 0x20,
+ 0xff, 0xa8, 0x2a, 0x10, 0x00, 0x98, 0x29, 0x20, 0x01, 0x88, 0x0c, 0x10,
+ 0x02, 0x78, 0x0b, 0x20, 0x03, 0x71, 0x28, 0x90, 0x04, 0x61, 0x27, 0xa0,
+ 0x05, 0x51, 0x0a, 0x90, 0x06, 0x41, 0x09, 0xa0, 0x07, 0x30, 0xec, 0x90,
+ 0x07, 0x8d, 0x43, 0xa0, 0x09, 0x10, 0xce, 0x90, 0x09, 0xad, 0xbf, 0x20,
+ 0x0a, 0xf0, 0xb0, 0x90, 0x0b, 0xe0, 0xaf, 0xa0, 0x0c, 0xd9, 0xcd, 0x10,
+ 0x0d, 0xc0, 0x91, 0xa0, 0x0e, 0xb9, 0xaf, 0x10, 0x0f, 0xa9, 0xae, 0x20,
+ 0x10, 0x99, 0x91, 0x10, 0x11, 0x89, 0x90, 0x20, 0x12, 0x79, 0x73, 0x10,
+ 0x13, 0x69, 0x72, 0x20, 0x14, 0x59, 0x55, 0x10, 0x15, 0x49, 0x54, 0x20,
+ 0x16, 0x39, 0x37, 0x10, 0x17, 0x29, 0x36, 0x20, 0x18, 0x22, 0x53, 0x90,
+ 0x19, 0x09, 0x18, 0x20, 0x1a, 0x02, 0x35, 0x90, 0x1a, 0xf2, 0x34, 0xa0,
+ 0x1b, 0xe2, 0x17, 0x90, 0x1c, 0xd2, 0x16, 0xa0, 0x1d, 0xc1, 0xf9, 0x90,
+ 0x1e, 0xb1, 0xf8, 0xa0, 0x1f, 0xa1, 0xdb, 0x90, 0x20, 0x76, 0x2b, 0x20,
+ 0x21, 0x81, 0xbd, 0x90, 0x22, 0x56, 0x0d, 0x20, 0x23, 0x6a, 0xda, 0x10,
+ 0x24, 0x35, 0xef, 0x20, 0x25, 0x4a, 0xbc, 0x10, 0x26, 0x15, 0xd1, 0x20,
+ 0x27, 0x2a, 0x9e, 0x10, 0x27, 0xfe, 0xed, 0xa0, 0x29, 0x0a, 0x80, 0x10,
+ 0x29, 0xde, 0xcf, 0xa0, 0x2a, 0xea, 0x62, 0x10, 0x2b, 0xbe, 0xb1, 0xa0,
+ 0x2c, 0xd3, 0x7e, 0x90, 0x2d, 0x9e, 0x93, 0xa0, 0x2e, 0xb3, 0x60, 0x90,
+ 0x2f, 0x7e, 0x75, 0xa0, 0x30, 0x93, 0x42, 0x90, 0x31, 0x67, 0x92, 0x20,
+ 0x32, 0x73, 0x24, 0x90, 0x33, 0x47, 0x74, 0x20, 0x34, 0x53, 0x06, 0x90,
+ 0x35, 0x27, 0x56, 0x20, 0x36, 0x32, 0xe8, 0x90, 0x37, 0x07, 0x38, 0x20,
+ 0x38, 0x1c, 0x05, 0x10, 0x38, 0xe7, 0x1a, 0x20, 0x39, 0xfb, 0xe7, 0x10,
+ 0x3a, 0xc6, 0xfc, 0x20, 0x3b, 0xdb, 0xc9, 0x10, 0x3c, 0xb0, 0x18, 0xa0,
+ 0x3d, 0xbb, 0xab, 0x10, 0x3e, 0x8f, 0xfa, 0xa0, 0x3f, 0x9b, 0x8d, 0x10,
+ 0x40, 0x6f, 0xdc, 0xa0, 0x41, 0x84, 0xa9, 0x90, 0x42, 0x4f, 0xbe, 0xa0,
+ 0x43, 0x64, 0x8b, 0x90, 0x44, 0x2f, 0xa0, 0xa0, 0x45, 0x44, 0x6d, 0x90,
+ 0x45, 0xf3, 0xd3, 0x20, 0x47, 0x2d, 0x8a, 0x10, 0x47, 0xd3, 0xb5, 0x20,
+ 0x49, 0x0d, 0x6c, 0x10, 0x49, 0xb3, 0x97, 0x20, 0x4a, 0xed, 0x4e, 0x10,
+ 0x4b, 0x9c, 0xb3, 0xa0, 0x4c, 0xd6, 0x6a, 0x90, 0x4d, 0x7c, 0x95, 0xa0,
+ 0x4e, 0xb6, 0x4c, 0x90, 0x4f, 0x5c, 0x77, 0xa0, 0x50, 0x96, 0x2e, 0x90,
+ 0x51, 0x3c, 0x59, 0xa0, 0x52, 0x76, 0x10, 0x90, 0x53, 0x1c, 0x3b, 0xa0,
+ 0x54, 0x55, 0xf2, 0x90, 0x54, 0xfc, 0x1d, 0xa0, 0x56, 0x35, 0xd4, 0x90,
+ 0x56, 0xe5, 0x3a, 0x20, 0x58, 0x1e, 0xf1, 0x10, 0x58, 0xc5, 0x1c, 0x20,
+ 0x59, 0xfe, 0xd3, 0x10, 0x5a, 0xa4, 0xfe, 0x20, 0x5b, 0xde, 0xb5, 0x10,
+ 0x5c, 0x84, 0xe0, 0x20, 0x5d, 0xbe, 0x97, 0x10, 0x5e, 0x64, 0xc2, 0x20,
+ 0x5f, 0x9e, 0x79, 0x10, 0x60, 0x4d, 0xde, 0xa0, 0x61, 0x87, 0x95, 0x90,
+ 0x62, 0x2d, 0xc0, 0xa0, 0x63, 0x67, 0x77, 0x90, 0x64, 0x0d, 0xa2, 0xa0,
+ 0x65, 0x47, 0x59, 0x90, 0x65, 0xed, 0x84, 0xa0, 0x67, 0x27, 0x3b, 0x90,
+ 0x67, 0xcd, 0x66, 0xa0, 0x69, 0x07, 0x1d, 0x90, 0x69, 0xad, 0x48, 0xa0,
+ 0x6a, 0xe6, 0xff, 0x90, 0x6b, 0x96, 0x65, 0x20, 0x6c, 0xd0, 0x1c, 0x10,
+ 0x6d, 0x76, 0x47, 0x20, 0x6e, 0xaf, 0xfe, 0x10, 0x6f, 0x56, 0x29, 0x20,
+ 0x70, 0x8f, 0xe0, 0x10, 0x71, 0x36, 0x0b, 0x20, 0x72, 0x6f, 0xc2, 0x10,
+ 0x73, 0x15, 0xed, 0x20, 0x74, 0x4f, 0xa4, 0x10, 0x74, 0xff, 0x09, 0xa0,
+ 0x76, 0x38, 0xc0, 0x90, 0x76, 0xde, 0xeb, 0xa0, 0x78, 0x18, 0xa2, 0x90,
+ 0x78, 0xbe, 0xcd, 0xa0, 0x79, 0xf8, 0x84, 0x90, 0x7a, 0x9e, 0xaf, 0xa0,
+ 0x7b, 0xd8, 0x66, 0x90, 0x7c, 0x7e, 0x91, 0xa0, 0x7d, 0xb8, 0x48, 0x90,
+ 0x7e, 0x5e, 0x73, 0xa0, 0x7f, 0x98, 0x2a, 0x90, 0x02, 0x01, 0x02, 0x01,
+ 0x02, 0x03, 0x04, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
+ 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
+ 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
+ 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
+ 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
+ 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
+ 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
+ 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
+ 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
+ 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
+ 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
+ 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
+ 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
+ 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
+ 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
+ 0x01, 0x02, 0xff, 0xff, 0x91, 0x26, 0x00, 0x00, 0xff, 0xff, 0x9d, 0x90,
+ 0x01, 0x04, 0xff, 0xff, 0x8f, 0x80, 0x00, 0x08, 0xff, 0xff, 0x9d, 0x90,
+ 0x01, 0x0c, 0xff, 0xff, 0x9d, 0x90, 0x01, 0x10, 0x4c, 0x4d, 0x54, 0x00,
+ 0x50, 0x44, 0x54, 0x00, 0x50, 0x53, 0x54, 0x00, 0x50, 0x57, 0x54, 0x00,
+ 0x50, 0x50, 0x54, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x00, 0x01, 0x54, 0x5a, 0x69, 0x66, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x05, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0xbb, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x14, 0xf8, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x5e, 0x04,
+ 0x1a, 0xc0, 0xff, 0xff, 0xff, 0xff, 0x9e, 0xa6, 0x48, 0xa0, 0xff, 0xff,
+ 0xff, 0xff, 0x9f, 0xbb, 0x15, 0x90, 0xff, 0xff, 0xff, 0xff, 0xa0, 0x86,
+ 0x2a, 0xa0, 0xff, 0xff, 0xff, 0xff, 0xa1, 0x9a, 0xf7, 0x90, 0xff, 0xff,
+ 0xff, 0xff, 0xcb, 0x89, 0x1a, 0xa0, 0xff, 0xff, 0xff, 0xff, 0xd2, 0x23,
+ 0xf4, 0x70, 0xff, 0xff, 0xff, 0xff, 0xd2, 0x61, 0x26, 0x10, 0xff, 0xff,
+ 0xff, 0xff, 0xd6, 0xfe, 0x74, 0x5c, 0xff, 0xff, 0xff, 0xff, 0xd8, 0x80,
+ 0xad, 0x90, 0xff, 0xff, 0xff, 0xff, 0xda, 0xfe, 0xc3, 0x90, 0xff, 0xff,
+ 0xff, 0xff, 0xdb, 0xc0, 0x90, 0x10, 0xff, 0xff, 0xff, 0xff, 0xdc, 0xde,
+ 0xa5, 0x90, 0xff, 0xff, 0xff, 0xff, 0xdd, 0xa9, 0xac, 0x90, 0xff, 0xff,
+ 0xff, 0xff, 0xde, 0xbe, 0x87, 0x90, 0xff, 0xff, 0xff, 0xff, 0xdf, 0x89,
+ 0x8e, 0x90, 0xff, 0xff, 0xff, 0xff, 0xe0, 0x9e, 0x69, 0x90, 0xff, 0xff,
+ 0xff, 0xff, 0xe1, 0x69, 0x70, 0x90, 0xff, 0xff, 0xff, 0xff, 0xe2, 0x7e,
+ 0x4b, 0x90, 0xff, 0xff, 0xff, 0xff, 0xe3, 0x49, 0x52, 0x90, 0xff, 0xff,
+ 0xff, 0xff, 0xe4, 0x5e, 0x2d, 0x90, 0xff, 0xff, 0xff, 0xff, 0xe5, 0x29,
+ 0x34, 0x90, 0xff, 0xff, 0xff, 0xff, 0xe6, 0x47, 0x4a, 0x10, 0xff, 0xff,
+ 0xff, 0xff, 0xe7, 0x12, 0x51, 0x10, 0xff, 0xff, 0xff, 0xff, 0xe8, 0x27,
+ 0x2c, 0x10, 0xff, 0xff, 0xff, 0xff, 0xe8, 0xf2, 0x33, 0x10, 0xff, 0xff,
+ 0xff, 0xff, 0xea, 0x07, 0x0e, 0x10, 0xff, 0xff, 0xff, 0xff, 0xea, 0xd2,
+ 0x15, 0x10, 0xff, 0xff, 0xff, 0xff, 0xeb, 0xe6, 0xf0, 0x10, 0xff, 0xff,
+ 0xff, 0xff, 0xec, 0xb1, 0xf7, 0x10, 0xff, 0xff, 0xff, 0xff, 0xed, 0xc6,
+ 0xd2, 0x10, 0xff, 0xff, 0xff, 0xff, 0xee, 0x91, 0xd9, 0x10, 0xff, 0xff,
+ 0xff, 0xff, 0xef, 0xaf, 0xee, 0x90, 0xff, 0xff, 0xff, 0xff, 0xf0, 0x71,
+ 0xbb, 0x10, 0xff, 0xff, 0xff, 0xff, 0xf1, 0x8f, 0xd0, 0x90, 0xff, 0xff,
+ 0xff, 0xff, 0xf2, 0x7f, 0xc1, 0x90, 0xff, 0xff, 0xff, 0xff, 0xf3, 0x6f,
+ 0xb2, 0x90, 0xff, 0xff, 0xff, 0xff, 0xf4, 0x5f, 0xa3, 0x90, 0xff, 0xff,
+ 0xff, 0xff, 0xf5, 0x4f, 0x94, 0x90, 0xff, 0xff, 0xff, 0xff, 0xf6, 0x3f,
+ 0x85, 0x90, 0xff, 0xff, 0xff, 0xff, 0xf7, 0x2f, 0x76, 0x90, 0xff, 0xff,
+ 0xff, 0xff, 0xf8, 0x28, 0xa2, 0x10, 0xff, 0xff, 0xff, 0xff, 0xf9, 0x0f,
+ 0x58, 0x90, 0xff, 0xff, 0xff, 0xff, 0xfa, 0x08, 0x84, 0x10, 0xff, 0xff,
+ 0xff, 0xff, 0xfa, 0xf8, 0x83, 0x20, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xe8,
+ 0x66, 0x10, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xd8, 0x65, 0x20, 0xff, 0xff,
+ 0xff, 0xff, 0xfd, 0xc8, 0x48, 0x10, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xb8,
+ 0x47, 0x20, 0xff, 0xff, 0xff, 0xff, 0xff, 0xa8, 0x2a, 0x10, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x98, 0x29, 0x20, 0x00, 0x00, 0x00, 0x00, 0x01, 0x88,
+ 0x0c, 0x10, 0x00, 0x00, 0x00, 0x00, 0x02, 0x78, 0x0b, 0x20, 0x00, 0x00,
+ 0x00, 0x00, 0x03, 0x71, 0x28, 0x90, 0x00, 0x00, 0x00, 0x00, 0x04, 0x61,
+ 0x27, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x05, 0x51, 0x0a, 0x90, 0x00, 0x00,
+ 0x00, 0x00, 0x06, 0x41, 0x09, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x07, 0x30,
+ 0xec, 0x90, 0x00, 0x00, 0x00, 0x00, 0x07, 0x8d, 0x43, 0xa0, 0x00, 0x00,
+ 0x00, 0x00, 0x09, 0x10, 0xce, 0x90, 0x00, 0x00, 0x00, 0x00, 0x09, 0xad,
+ 0xbf, 0x20, 0x00, 0x00, 0x00, 0x00, 0x0a, 0xf0, 0xb0, 0x90, 0x00, 0x00,
+ 0x00, 0x00, 0x0b, 0xe0, 0xaf, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x0c, 0xd9,
+ 0xcd, 0x10, 0x00, 0x00, 0x00, 0x00, 0x0d, 0xc0, 0x91, 0xa0, 0x00, 0x00,
+ 0x00, 0x00, 0x0e, 0xb9, 0xaf, 0x10, 0x00, 0x00, 0x00, 0x00, 0x0f, 0xa9,
+ 0xae, 0x20, 0x00, 0x00, 0x00, 0x00, 0x10, 0x99, 0x91, 0x10, 0x00, 0x00,
+ 0x00, 0x00, 0x11, 0x89, 0x90, 0x20, 0x00, 0x00, 0x00, 0x00, 0x12, 0x79,
+ 0x73, 0x10, 0x00, 0x00, 0x00, 0x00, 0x13, 0x69, 0x72, 0x20, 0x00, 0x00,
+ 0x00, 0x00, 0x14, 0x59, 0x55, 0x10, 0x00, 0x00, 0x00, 0x00, 0x15, 0x49,
+ 0x54, 0x20, 0x00, 0x00, 0x00, 0x00, 0x16, 0x39, 0x37, 0x10, 0x00, 0x00,
+ 0x00, 0x00, 0x17, 0x29, 0x36, 0x20, 0x00, 0x00, 0x00, 0x00, 0x18, 0x22,
+ 0x53, 0x90, 0x00, 0x00, 0x00, 0x00, 0x19, 0x09, 0x18, 0x20, 0x00, 0x00,
+ 0x00, 0x00, 0x1a, 0x02, 0x35, 0x90, 0x00, 0x00, 0x00, 0x00, 0x1a, 0xf2,
+ 0x34, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x1b, 0xe2, 0x17, 0x90, 0x00, 0x00,
+ 0x00, 0x00, 0x1c, 0xd2, 0x16, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x1d, 0xc1,
+ 0xf9, 0x90, 0x00, 0x00, 0x00, 0x00, 0x1e, 0xb1, 0xf8, 0xa0, 0x00, 0x00,
+ 0x00, 0x00, 0x1f, 0xa1, 0xdb, 0x90, 0x00, 0x00, 0x00, 0x00, 0x20, 0x76,
+ 0x2b, 0x20, 0x00, 0x00, 0x00, 0x00, 0x21, 0x81, 0xbd, 0x90, 0x00, 0x00,
+ 0x00, 0x00, 0x22, 0x56, 0x0d, 0x20, 0x00, 0x00, 0x00, 0x00, 0x23, 0x6a,
+ 0xda, 0x10, 0x00, 0x00, 0x00, 0x00, 0x24, 0x35, 0xef, 0x20, 0x00, 0x00,
+ 0x00, 0x00, 0x25, 0x4a, 0xbc, 0x10, 0x00, 0x00, 0x00, 0x00, 0x26, 0x15,
+ 0xd1, 0x20, 0x00, 0x00, 0x00, 0x00, 0x27, 0x2a, 0x9e, 0x10, 0x00, 0x00,
+ 0x00, 0x00, 0x27, 0xfe, 0xed, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x29, 0x0a,
+ 0x80, 0x10, 0x00, 0x00, 0x00, 0x00, 0x29, 0xde, 0xcf, 0xa0, 0x00, 0x00,
+ 0x00, 0x00, 0x2a, 0xea, 0x62, 0x10, 0x00, 0x00, 0x00, 0x00, 0x2b, 0xbe,
+ 0xb1, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x2c, 0xd3, 0x7e, 0x90, 0x00, 0x00,
+ 0x00, 0x00, 0x2d, 0x9e, 0x93, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x2e, 0xb3,
+ 0x60, 0x90, 0x00, 0x00, 0x00, 0x00, 0x2f, 0x7e, 0x75, 0xa0, 0x00, 0x00,
+ 0x00, 0x00, 0x30, 0x93, 0x42, 0x90, 0x00, 0x00, 0x00, 0x00, 0x31, 0x67,
+ 0x92, 0x20, 0x00, 0x00, 0x00, 0x00, 0x32, 0x73, 0x24, 0x90, 0x00, 0x00,
+ 0x00, 0x00, 0x33, 0x47, 0x74, 0x20, 0x00, 0x00, 0x00, 0x00, 0x34, 0x53,
+ 0x06, 0x90, 0x00, 0x00, 0x00, 0x00, 0x35, 0x27, 0x56, 0x20, 0x00, 0x00,
+ 0x00, 0x00, 0x36, 0x32, 0xe8, 0x90, 0x00, 0x00, 0x00, 0x00, 0x37, 0x07,
+ 0x38, 0x20, 0x00, 0x00, 0x00, 0x00, 0x38, 0x1c, 0x05, 0x10, 0x00, 0x00,
+ 0x00, 0x00, 0x38, 0xe7, 0x1a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x39, 0xfb,
+ 0xe7, 0x10, 0x00, 0x00, 0x00, 0x00, 0x3a, 0xc6, 0xfc, 0x20, 0x00, 0x00,
+ 0x00, 0x00, 0x3b, 0xdb, 0xc9, 0x10, 0x00, 0x00, 0x00, 0x00, 0x3c, 0xb0,
+ 0x18, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x3d, 0xbb, 0xab, 0x10, 0x00, 0x00,
+ 0x00, 0x00, 0x3e, 0x8f, 0xfa, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x9b,
+ 0x8d, 0x10, 0x00, 0x00, 0x00, 0x00, 0x40, 0x6f, 0xdc, 0xa0, 0x00, 0x00,
+ 0x00, 0x00, 0x41, 0x84, 0xa9, 0x90, 0x00, 0x00, 0x00, 0x00, 0x42, 0x4f,
+ 0xbe, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x43, 0x64, 0x8b, 0x90, 0x00, 0x00,
+ 0x00, 0x00, 0x44, 0x2f, 0xa0, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x45, 0x44,
+ 0x6d, 0x90, 0x00, 0x00, 0x00, 0x00, 0x45, 0xf3, 0xd3, 0x20, 0x00, 0x00,
+ 0x00, 0x00, 0x47, 0x2d, 0x8a, 0x10, 0x00, 0x00, 0x00, 0x00, 0x47, 0xd3,
+ 0xb5, 0x20, 0x00, 0x00, 0x00, 0x00, 0x49, 0x0d, 0x6c, 0x10, 0x00, 0x00,
+ 0x00, 0x00, 0x49, 0xb3, 0x97, 0x20, 0x00, 0x00, 0x00, 0x00, 0x4a, 0xed,
+ 0x4e, 0x10, 0x00, 0x00, 0x00, 0x00, 0x4b, 0x9c, 0xb3, 0xa0, 0x00, 0x00,
+ 0x00, 0x00, 0x4c, 0xd6, 0x6a, 0x90, 0x00, 0x00, 0x00, 0x00, 0x4d, 0x7c,
+ 0x95, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x4e, 0xb6, 0x4c, 0x90, 0x00, 0x00,
+ 0x00, 0x00, 0x4f, 0x5c, 0x77, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x50, 0x96,
+ 0x2e, 0x90, 0x00, 0x00, 0x00, 0x00, 0x51, 0x3c, 0x59, 0xa0, 0x00, 0x00,
+ 0x00, 0x00, 0x52, 0x76, 0x10, 0x90, 0x00, 0x00, 0x00, 0x00, 0x53, 0x1c,
+ 0x3b, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x54, 0x55, 0xf2, 0x90, 0x00, 0x00,
+ 0x00, 0x00, 0x54, 0xfc, 0x1d, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x56, 0x35,
+ 0xd4, 0x90, 0x00, 0x00, 0x00, 0x00, 0x56, 0xe5, 0x3a, 0x20, 0x00, 0x00,
+ 0x00, 0x00, 0x58, 0x1e, 0xf1, 0x10, 0x00, 0x00, 0x00, 0x00, 0x58, 0xc5,
+ 0x1c, 0x20, 0x00, 0x00, 0x00, 0x00, 0x59, 0xfe, 0xd3, 0x10, 0x00, 0x00,
+ 0x00, 0x00, 0x5a, 0xa4, 0xfe, 0x20, 0x00, 0x00, 0x00, 0x00, 0x5b, 0xde,
+ 0xb5, 0x10, 0x00, 0x00, 0x00, 0x00, 0x5c, 0x84, 0xe0, 0x20, 0x00, 0x00,
+ 0x00, 0x00, 0x5d, 0xbe, 0x97, 0x10, 0x00, 0x00, 0x00, 0x00, 0x5e, 0x64,
+ 0xc2, 0x20, 0x00, 0x00, 0x00, 0x00, 0x5f, 0x9e, 0x79, 0x10, 0x00, 0x00,
+ 0x00, 0x00, 0x60, 0x4d, 0xde, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x61, 0x87,
+ 0x95, 0x90, 0x00, 0x00, 0x00, 0x00, 0x62, 0x2d, 0xc0, 0xa0, 0x00, 0x00,
+ 0x00, 0x00, 0x63, 0x67, 0x77, 0x90, 0x00, 0x00, 0x00, 0x00, 0x64, 0x0d,
+ 0xa2, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x65, 0x47, 0x59, 0x90, 0x00, 0x00,
+ 0x00, 0x00, 0x65, 0xed, 0x84, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x67, 0x27,
+ 0x3b, 0x90, 0x00, 0x00, 0x00, 0x00, 0x67, 0xcd, 0x66, 0xa0, 0x00, 0x00,
+ 0x00, 0x00, 0x69, 0x07, 0x1d, 0x90, 0x00, 0x00, 0x00, 0x00, 0x69, 0xad,
+ 0x48, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x6a, 0xe6, 0xff, 0x90, 0x00, 0x00,
+ 0x00, 0x00, 0x6b, 0x96, 0x65, 0x20, 0x00, 0x00, 0x00, 0x00, 0x6c, 0xd0,
+ 0x1c, 0x10, 0x00, 0x00, 0x00, 0x00, 0x6d, 0x76, 0x47, 0x20, 0x00, 0x00,
+ 0x00, 0x00, 0x6e, 0xaf, 0xfe, 0x10, 0x00, 0x00, 0x00, 0x00, 0x6f, 0x56,
+ 0x29, 0x20, 0x00, 0x00, 0x00, 0x00, 0x70, 0x8f, 0xe0, 0x10, 0x00, 0x00,
+ 0x00, 0x00, 0x71, 0x36, 0x0b, 0x20, 0x00, 0x00, 0x00, 0x00, 0x72, 0x6f,
+ 0xc2, 0x10, 0x00, 0x00, 0x00, 0x00, 0x73, 0x15, 0xed, 0x20, 0x00, 0x00,
+ 0x00, 0x00, 0x74, 0x4f, 0xa4, 0x10, 0x00, 0x00, 0x00, 0x00, 0x74, 0xff,
+ 0x09, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x76, 0x38, 0xc0, 0x90, 0x00, 0x00,
+ 0x00, 0x00, 0x76, 0xde, 0xeb, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x78, 0x18,
+ 0xa2, 0x90, 0x00, 0x00, 0x00, 0x00, 0x78, 0xbe, 0xcd, 0xa0, 0x00, 0x00,
+ 0x00, 0x00, 0x79, 0xf8, 0x84, 0x90, 0x00, 0x00, 0x00, 0x00, 0x7a, 0x9e,
+ 0xaf, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x7b, 0xd8, 0x66, 0x90, 0x00, 0x00,
+ 0x00, 0x00, 0x7c, 0x7e, 0x91, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x7d, 0xb8,
+ 0x48, 0x90, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x5e, 0x73, 0xa0, 0x00, 0x00,
+ 0x00, 0x00, 0x7f, 0x98, 0x2a, 0x90, 0x00, 0x02, 0x01, 0x02, 0x01, 0x02,
+ 0x03, 0x04, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
+ 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
+ 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
+ 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
+ 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
+ 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
+ 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
+ 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
+ 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
+ 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
+ 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
+ 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
+ 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
+ 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
+ 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
+ 0x02, 0xff, 0xff, 0x91, 0x26, 0x00, 0x00, 0xff, 0xff, 0x9d, 0x90, 0x01,
+ 0x04, 0xff, 0xff, 0x8f, 0x80, 0x00, 0x08, 0xff, 0xff, 0x9d, 0x90, 0x01,
+ 0x0c, 0xff, 0xff, 0x9d, 0x90, 0x01, 0x10, 0x4c, 0x4d, 0x54, 0x00, 0x50,
+ 0x44, 0x54, 0x00, 0x50, 0x53, 0x54, 0x00, 0x50, 0x57, 0x54, 0x00, 0x50,
+ 0x50, 0x54, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x01, 0x0a, 0x50, 0x53, 0x54, 0x38, 0x50, 0x44, 0x54, 0x2c, 0x4d, 0x33,
+ 0x2e, 0x32, 0x2e, 0x30, 0x2c, 0x4d, 0x31, 0x31, 0x2e, 0x31, 0x2e, 0x30,
+ 0x0a
+};
+unsigned int America_Los_Angeles_len = 2845;
+unsigned char America_New_York[] = {
+ 0x54, 0x5a, 0x69, 0x66, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
+ 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xec,
+ 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x14, 0x80, 0x00, 0x00, 0x00,
+ 0x9e, 0xa6, 0x1e, 0x70, 0x9f, 0xba, 0xeb, 0x60, 0xa0, 0x86, 0x00, 0x70,
+ 0xa1, 0x9a, 0xcd, 0x60, 0xa2, 0x65, 0xe2, 0x70, 0xa3, 0x83, 0xe9, 0xe0,
+ 0xa4, 0x6a, 0xae, 0x70, 0xa5, 0x35, 0xa7, 0x60, 0xa6, 0x53, 0xca, 0xf0,
+ 0xa7, 0x15, 0x89, 0x60, 0xa8, 0x33, 0xac, 0xf0, 0xa8, 0xfe, 0xa5, 0xe0,
+ 0xaa, 0x13, 0x8e, 0xf0, 0xaa, 0xde, 0x87, 0xe0, 0xab, 0xf3, 0x70, 0xf0,
+ 0xac, 0xbe, 0x69, 0xe0, 0xad, 0xd3, 0x52, 0xf0, 0xae, 0x9e, 0x4b, 0xe0,
+ 0xaf, 0xb3, 0x34, 0xf0, 0xb0, 0x7e, 0x2d, 0xe0, 0xb1, 0x9c, 0x51, 0x70,
+ 0xb2, 0x67, 0x4a, 0x60, 0xb3, 0x7c, 0x33, 0x70, 0xb4, 0x47, 0x2c, 0x60,
+ 0xb5, 0x5c, 0x15, 0x70, 0xb6, 0x27, 0x0e, 0x60, 0xb7, 0x3b, 0xf7, 0x70,
+ 0xb8, 0x06, 0xf0, 0x60, 0xb9, 0x1b, 0xd9, 0x70, 0xb9, 0xe6, 0xd2, 0x60,
+ 0xbb, 0x04, 0xf5, 0xf0, 0xbb, 0xc6, 0xb4, 0x60, 0xbc, 0xe4, 0xd7, 0xf0,
+ 0xbd, 0xaf, 0xd0, 0xe0, 0xbe, 0xc4, 0xb9, 0xf0, 0xbf, 0x8f, 0xb2, 0xe0,
+ 0xc0, 0xa4, 0x9b, 0xf0, 0xc1, 0x6f, 0x94, 0xe0, 0xc2, 0x84, 0x7d, 0xf0,
+ 0xc3, 0x4f, 0x76, 0xe0, 0xc4, 0x64, 0x5f, 0xf0, 0xc5, 0x2f, 0x58, 0xe0,
+ 0xc6, 0x4d, 0x7c, 0x70, 0xc7, 0x0f, 0x3a, 0xe0, 0xc8, 0x2d, 0x5e, 0x70,
+ 0xc8, 0xf8, 0x57, 0x60, 0xca, 0x0d, 0x40, 0x70, 0xca, 0xd8, 0x39, 0x60,
+ 0xcb, 0x88, 0xf0, 0x70, 0xd2, 0x23, 0xf4, 0x70, 0xd2, 0x60, 0xfb, 0xe0,
+ 0xd3, 0x75, 0xe4, 0xf0, 0xd4, 0x40, 0xdd, 0xe0, 0xd5, 0x55, 0xc6, 0xf0,
+ 0xd6, 0x20, 0xbf, 0xe0, 0xd7, 0x35, 0xa8, 0xf0, 0xd8, 0x00, 0xa1, 0xe0,
+ 0xd9, 0x15, 0x8a, 0xf0, 0xd9, 0xe0, 0x83, 0xe0, 0xda, 0xfe, 0xa7, 0x70,
+ 0xdb, 0xc0, 0x65, 0xe0, 0xdc, 0xde, 0x89, 0x70, 0xdd, 0xa9, 0x82, 0x60,
+ 0xde, 0xbe, 0x6b, 0x70, 0xdf, 0x89, 0x64, 0x60, 0xe0, 0x9e, 0x4d, 0x70,
+ 0xe1, 0x69, 0x46, 0x60, 0xe2, 0x7e, 0x2f, 0x70, 0xe3, 0x49, 0x28, 0x60,
+ 0xe4, 0x5e, 0x11, 0x70, 0xe5, 0x57, 0x2e, 0xe0, 0xe6, 0x47, 0x2d, 0xf0,
+ 0xe7, 0x37, 0x10, 0xe0, 0xe8, 0x27, 0x0f, 0xf0, 0xe9, 0x16, 0xf2, 0xe0,
+ 0xea, 0x06, 0xf1, 0xf0, 0xea, 0xf6, 0xd4, 0xe0, 0xeb, 0xe6, 0xd3, 0xf0,
+ 0xec, 0xd6, 0xb6, 0xe0, 0xed, 0xc6, 0xb5, 0xf0, 0xee, 0xbf, 0xd3, 0x60,
+ 0xef, 0xaf, 0xd2, 0x70, 0xf0, 0x9f, 0xb5, 0x60, 0xf1, 0x8f, 0xb4, 0x70,
+ 0xf2, 0x7f, 0x97, 0x60, 0xf3, 0x6f, 0x96, 0x70, 0xf4, 0x5f, 0x79, 0x60,
+ 0xf5, 0x4f, 0x78, 0x70, 0xf6, 0x3f, 0x5b, 0x60, 0xf7, 0x2f, 0x5a, 0x70,
+ 0xf8, 0x28, 0x77, 0xe0, 0xf9, 0x0f, 0x3c, 0x70, 0xfa, 0x08, 0x59, 0xe0,
+ 0xfa, 0xf8, 0x58, 0xf0, 0xfb, 0xe8, 0x3b, 0xe0, 0xfc, 0xd8, 0x3a, 0xf0,
+ 0xfd, 0xc8, 0x1d, 0xe0, 0xfe, 0xb8, 0x1c, 0xf0, 0xff, 0xa7, 0xff, 0xe0,
+ 0x00, 0x97, 0xfe, 0xf0, 0x01, 0x87, 0xe1, 0xe0, 0x02, 0x77, 0xe0, 0xf0,
+ 0x03, 0x70, 0xfe, 0x60, 0x04, 0x60, 0xfd, 0x70, 0x05, 0x50, 0xe0, 0x60,
+ 0x06, 0x40, 0xdf, 0x70, 0x07, 0x30, 0xc2, 0x60, 0x07, 0x8d, 0x19, 0x70,
+ 0x09, 0x10, 0xa4, 0x60, 0x09, 0xad, 0x94, 0xf0, 0x0a, 0xf0, 0x86, 0x60,
+ 0x0b, 0xe0, 0x85, 0x70, 0x0c, 0xd9, 0xa2, 0xe0, 0x0d, 0xc0, 0x67, 0x70,
+ 0x0e, 0xb9, 0x84, 0xe0, 0x0f, 0xa9, 0x83, 0xf0, 0x10, 0x99, 0x66, 0xe0,
+ 0x11, 0x89, 0x65, 0xf0, 0x12, 0x79, 0x48, 0xe0, 0x13, 0x69, 0x47, 0xf0,
+ 0x14, 0x59, 0x2a, 0xe0, 0x15, 0x49, 0x29, 0xf0, 0x16, 0x39, 0x0c, 0xe0,
+ 0x17, 0x29, 0x0b, 0xf0, 0x18, 0x22, 0x29, 0x60, 0x19, 0x08, 0xed, 0xf0,
+ 0x1a, 0x02, 0x0b, 0x60, 0x1a, 0xf2, 0x0a, 0x70, 0x1b, 0xe1, 0xed, 0x60,
+ 0x1c, 0xd1, 0xec, 0x70, 0x1d, 0xc1, 0xcf, 0x60, 0x1e, 0xb1, 0xce, 0x70,
+ 0x1f, 0xa1, 0xb1, 0x60, 0x20, 0x76, 0x00, 0xf0, 0x21, 0x81, 0x93, 0x60,
+ 0x22, 0x55, 0xe2, 0xf0, 0x23, 0x6a, 0xaf, 0xe0, 0x24, 0x35, 0xc4, 0xf0,
+ 0x25, 0x4a, 0x91, 0xe0, 0x26, 0x15, 0xa6, 0xf0, 0x27, 0x2a, 0x73, 0xe0,
+ 0x27, 0xfe, 0xc3, 0x70, 0x29, 0x0a, 0x55, 0xe0, 0x29, 0xde, 0xa5, 0x70,
+ 0x2a, 0xea, 0x37, 0xe0, 0x2b, 0xbe, 0x87, 0x70, 0x2c, 0xd3, 0x54, 0x60,
+ 0x2d, 0x9e, 0x69, 0x70, 0x2e, 0xb3, 0x36, 0x60, 0x2f, 0x7e, 0x4b, 0x70,
+ 0x30, 0x93, 0x18, 0x60, 0x31, 0x67, 0x67, 0xf0, 0x32, 0x72, 0xfa, 0x60,
+ 0x33, 0x47, 0x49, 0xf0, 0x34, 0x52, 0xdc, 0x60, 0x35, 0x27, 0x2b, 0xf0,
+ 0x36, 0x32, 0xbe, 0x60, 0x37, 0x07, 0x0d, 0xf0, 0x38, 0x1b, 0xda, 0xe0,
+ 0x38, 0xe6, 0xef, 0xf0, 0x39, 0xfb, 0xbc, 0xe0, 0x3a, 0xc6, 0xd1, 0xf0,
+ 0x3b, 0xdb, 0x9e, 0xe0, 0x3c, 0xaf, 0xee, 0x70, 0x3d, 0xbb, 0x80, 0xe0,
+ 0x3e, 0x8f, 0xd0, 0x70, 0x3f, 0x9b, 0x62, 0xe0, 0x40, 0x6f, 0xb2, 0x70,
+ 0x41, 0x84, 0x7f, 0x60, 0x42, 0x4f, 0x94, 0x70, 0x43, 0x64, 0x61, 0x60,
+ 0x44, 0x2f, 0x76, 0x70, 0x45, 0x44, 0x43, 0x60, 0x45, 0xf3, 0xa8, 0xf0,
+ 0x47, 0x2d, 0x5f, 0xe0, 0x47, 0xd3, 0x8a, 0xf0, 0x49, 0x0d, 0x41, 0xe0,
+ 0x49, 0xb3, 0x6c, 0xf0, 0x4a, 0xed, 0x23, 0xe0, 0x4b, 0x9c, 0x89, 0x70,
+ 0x4c, 0xd6, 0x40, 0x60, 0x4d, 0x7c, 0x6b, 0x70, 0x4e, 0xb6, 0x22, 0x60,
+ 0x4f, 0x5c, 0x4d, 0x70, 0x50, 0x96, 0x04, 0x60, 0x51, 0x3c, 0x2f, 0x70,
+ 0x52, 0x75, 0xe6, 0x60, 0x53, 0x1c, 0x11, 0x70, 0x54, 0x55, 0xc8, 0x60,
+ 0x54, 0xfb, 0xf3, 0x70, 0x56, 0x35, 0xaa, 0x60, 0x56, 0xe5, 0x0f, 0xf0,
+ 0x58, 0x1e, 0xc6, 0xe0, 0x58, 0xc4, 0xf1, 0xf0, 0x59, 0xfe, 0xa8, 0xe0,
+ 0x5a, 0xa4, 0xd3, 0xf0, 0x5b, 0xde, 0x8a, 0xe0, 0x5c, 0x84, 0xb5, 0xf0,
+ 0x5d, 0xbe, 0x6c, 0xe0, 0x5e, 0x64, 0x97, 0xf0, 0x5f, 0x9e, 0x4e, 0xe0,
+ 0x60, 0x4d, 0xb4, 0x70, 0x61, 0x87, 0x6b, 0x60, 0x62, 0x2d, 0x96, 0x70,
+ 0x63, 0x67, 0x4d, 0x60, 0x64, 0x0d, 0x78, 0x70, 0x65, 0x47, 0x2f, 0x60,
+ 0x65, 0xed, 0x5a, 0x70, 0x67, 0x27, 0x11, 0x60, 0x67, 0xcd, 0x3c, 0x70,
+ 0x69, 0x06, 0xf3, 0x60, 0x69, 0xad, 0x1e, 0x70, 0x6a, 0xe6, 0xd5, 0x60,
+ 0x6b, 0x96, 0x3a, 0xf0, 0x6c, 0xcf, 0xf1, 0xe0, 0x6d, 0x76, 0x1c, 0xf0,
+ 0x6e, 0xaf, 0xd3, 0xe0, 0x6f, 0x55, 0xfe, 0xf0, 0x70, 0x8f, 0xb5, 0xe0,
+ 0x71, 0x35, 0xe0, 0xf0, 0x72, 0x6f, 0x97, 0xe0, 0x73, 0x15, 0xc2, 0xf0,
+ 0x74, 0x4f, 0x79, 0xe0, 0x74, 0xfe, 0xdf, 0x70, 0x76, 0x38, 0x96, 0x60,
+ 0x76, 0xde, 0xc1, 0x70, 0x78, 0x18, 0x78, 0x60, 0x78, 0xbe, 0xa3, 0x70,
+ 0x79, 0xf8, 0x5a, 0x60, 0x7a, 0x9e, 0x85, 0x70, 0x7b, 0xd8, 0x3c, 0x60,
+ 0x7c, 0x7e, 0x67, 0x70, 0x7d, 0xb8, 0x1e, 0x60, 0x7e, 0x5e, 0x49, 0x70,
+ 0x7f, 0x98, 0x00, 0x60, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
+ 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
+ 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
+ 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
+ 0x02, 0x01, 0x02, 0x01, 0x02, 0x03, 0x04, 0x02, 0x01, 0x02, 0x01, 0x02,
+ 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
+ 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
+ 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
+ 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
+ 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
+ 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
+ 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
+ 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
+ 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
+ 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
+ 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
+ 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
+ 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
+ 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
+ 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
+ 0xff, 0xff, 0xba, 0x9e, 0x00, 0x00, 0xff, 0xff, 0xc7, 0xc0, 0x01, 0x04,
+ 0xff, 0xff, 0xb9, 0xb0, 0x00, 0x08, 0xff, 0xff, 0xc7, 0xc0, 0x01, 0x0c,
+ 0xff, 0xff, 0xc7, 0xc0, 0x01, 0x10, 0x4c, 0x4d, 0x54, 0x00, 0x45, 0x44,
+ 0x54, 0x00, 0x45, 0x53, 0x54, 0x00, 0x45, 0x57, 0x54, 0x00, 0x45, 0x50,
+ 0x54, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x01,
+ 0x54, 0x5a, 0x69, 0x66, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
+ 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xed,
+ 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x14, 0xf8, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x5e, 0x03, 0xf0, 0x90,
+ 0xff, 0xff, 0xff, 0xff, 0x9e, 0xa6, 0x1e, 0x70, 0xff, 0xff, 0xff, 0xff,
+ 0x9f, 0xba, 0xeb, 0x60, 0xff, 0xff, 0xff, 0xff, 0xa0, 0x86, 0x00, 0x70,
+ 0xff, 0xff, 0xff, 0xff, 0xa1, 0x9a, 0xcd, 0x60, 0xff, 0xff, 0xff, 0xff,
+ 0xa2, 0x65, 0xe2, 0x70, 0xff, 0xff, 0xff, 0xff, 0xa3, 0x83, 0xe9, 0xe0,
+ 0xff, 0xff, 0xff, 0xff, 0xa4, 0x6a, 0xae, 0x70, 0xff, 0xff, 0xff, 0xff,
+ 0xa5, 0x35, 0xa7, 0x60, 0xff, 0xff, 0xff, 0xff, 0xa6, 0x53, 0xca, 0xf0,
+ 0xff, 0xff, 0xff, 0xff, 0xa7, 0x15, 0x89, 0x60, 0xff, 0xff, 0xff, 0xff,
+ 0xa8, 0x33, 0xac, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xa8, 0xfe, 0xa5, 0xe0,
+ 0xff, 0xff, 0xff, 0xff, 0xaa, 0x13, 0x8e, 0xf0, 0xff, 0xff, 0xff, 0xff,
+ 0xaa, 0xde, 0x87, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xab, 0xf3, 0x70, 0xf0,
+ 0xff, 0xff, 0xff, 0xff, 0xac, 0xbe, 0x69, 0xe0, 0xff, 0xff, 0xff, 0xff,
+ 0xad, 0xd3, 0x52, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xae, 0x9e, 0x4b, 0xe0,
+ 0xff, 0xff, 0xff, 0xff, 0xaf, 0xb3, 0x34, 0xf0, 0xff, 0xff, 0xff, 0xff,
+ 0xb0, 0x7e, 0x2d, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xb1, 0x9c, 0x51, 0x70,
+ 0xff, 0xff, 0xff, 0xff, 0xb2, 0x67, 0x4a, 0x60, 0xff, 0xff, 0xff, 0xff,
+ 0xb3, 0x7c, 0x33, 0x70, 0xff, 0xff, 0xff, 0xff, 0xb4, 0x47, 0x2c, 0x60,
+ 0xff, 0xff, 0xff, 0xff, 0xb5, 0x5c, 0x15, 0x70, 0xff, 0xff, 0xff, 0xff,
+ 0xb6, 0x27, 0x0e, 0x60, 0xff, 0xff, 0xff, 0xff, 0xb7, 0x3b, 0xf7, 0x70,
+ 0xff, 0xff, 0xff, 0xff, 0xb8, 0x06, 0xf0, 0x60, 0xff, 0xff, 0xff, 0xff,
+ 0xb9, 0x1b, 0xd9, 0x70, 0xff, 0xff, 0xff, 0xff, 0xb9, 0xe6, 0xd2, 0x60,
+ 0xff, 0xff, 0xff, 0xff, 0xbb, 0x04, 0xf5, 0xf0, 0xff, 0xff, 0xff, 0xff,
+ 0xbb, 0xc6, 0xb4, 0x60, 0xff, 0xff, 0xff, 0xff, 0xbc, 0xe4, 0xd7, 0xf0,
+ 0xff, 0xff, 0xff, 0xff, 0xbd, 0xaf, 0xd0, 0xe0, 0xff, 0xff, 0xff, 0xff,
+ 0xbe, 0xc4, 0xb9, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xbf, 0x8f, 0xb2, 0xe0,
+ 0xff, 0xff, 0xff, 0xff, 0xc0, 0xa4, 0x9b, 0xf0, 0xff, 0xff, 0xff, 0xff,
+ 0xc1, 0x6f, 0x94, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xc2, 0x84, 0x7d, 0xf0,
+ 0xff, 0xff, 0xff, 0xff, 0xc3, 0x4f, 0x76, 0xe0, 0xff, 0xff, 0xff, 0xff,
+ 0xc4, 0x64, 0x5f, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xc5, 0x2f, 0x58, 0xe0,
+ 0xff, 0xff, 0xff, 0xff, 0xc6, 0x4d, 0x7c, 0x70, 0xff, 0xff, 0xff, 0xff,
+ 0xc7, 0x0f, 0x3a, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xc8, 0x2d, 0x5e, 0x70,
+ 0xff, 0xff, 0xff, 0xff, 0xc8, 0xf8, 0x57, 0x60, 0xff, 0xff, 0xff, 0xff,
+ 0xca, 0x0d, 0x40, 0x70, 0xff, 0xff, 0xff, 0xff, 0xca, 0xd8, 0x39, 0x60,
+ 0xff, 0xff, 0xff, 0xff, 0xcb, 0x88, 0xf0, 0x70, 0xff, 0xff, 0xff, 0xff,
+ 0xd2, 0x23, 0xf4, 0x70, 0xff, 0xff, 0xff, 0xff, 0xd2, 0x60, 0xfb, 0xe0,
+ 0xff, 0xff, 0xff, 0xff, 0xd3, 0x75, 0xe4, 0xf0, 0xff, 0xff, 0xff, 0xff,
+ 0xd4, 0x40, 0xdd, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xd5, 0x55, 0xc6, 0xf0,
+ 0xff, 0xff, 0xff, 0xff, 0xd6, 0x20, 0xbf, 0xe0, 0xff, 0xff, 0xff, 0xff,
+ 0xd7, 0x35, 0xa8, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xd8, 0x00, 0xa1, 0xe0,
+ 0xff, 0xff, 0xff, 0xff, 0xd9, 0x15, 0x8a, 0xf0, 0xff, 0xff, 0xff, 0xff,
+ 0xd9, 0xe0, 0x83, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xda, 0xfe, 0xa7, 0x70,
+ 0xff, 0xff, 0xff, 0xff, 0xdb, 0xc0, 0x65, 0xe0, 0xff, 0xff, 0xff, 0xff,
+ 0xdc, 0xde, 0x89, 0x70, 0xff, 0xff, 0xff, 0xff, 0xdd, 0xa9, 0x82, 0x60,
+ 0xff, 0xff, 0xff, 0xff, 0xde, 0xbe, 0x6b, 0x70, 0xff, 0xff, 0xff, 0xff,
+ 0xdf, 0x89, 0x64, 0x60, 0xff, 0xff, 0xff, 0xff, 0xe0, 0x9e, 0x4d, 0x70,
+ 0xff, 0xff, 0xff, 0xff, 0xe1, 0x69, 0x46, 0x60, 0xff, 0xff, 0xff, 0xff,
+ 0xe2, 0x7e, 0x2f, 0x70, 0xff, 0xff, 0xff, 0xff, 0xe3, 0x49, 0x28, 0x60,
+ 0xff, 0xff, 0xff, 0xff, 0xe4, 0x5e, 0x11, 0x70, 0xff, 0xff, 0xff, 0xff,
+ 0xe5, 0x57, 0x2e, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xe6, 0x47, 0x2d, 0xf0,
+ 0xff, 0xff, 0xff, 0xff, 0xe7, 0x37, 0x10, 0xe0, 0xff, 0xff, 0xff, 0xff,
+ 0xe8, 0x27, 0x0f, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xe9, 0x16, 0xf2, 0xe0,
+ 0xff, 0xff, 0xff, 0xff, 0xea, 0x06, 0xf1, 0xf0, 0xff, 0xff, 0xff, 0xff,
+ 0xea, 0xf6, 0xd4, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xeb, 0xe6, 0xd3, 0xf0,
+ 0xff, 0xff, 0xff, 0xff, 0xec, 0xd6, 0xb6, 0xe0, 0xff, 0xff, 0xff, 0xff,
+ 0xed, 0xc6, 0xb5, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xee, 0xbf, 0xd3, 0x60,
+ 0xff, 0xff, 0xff, 0xff, 0xef, 0xaf, 0xd2, 0x70, 0xff, 0xff, 0xff, 0xff,
+ 0xf0, 0x9f, 0xb5, 0x60, 0xff, 0xff, 0xff, 0xff, 0xf1, 0x8f, 0xb4, 0x70,
+ 0xff, 0xff, 0xff, 0xff, 0xf2, 0x7f, 0x97, 0x60, 0xff, 0xff, 0xff, 0xff,
+ 0xf3, 0x6f, 0x96, 0x70, 0xff, 0xff, 0xff, 0xff, 0xf4, 0x5f, 0x79, 0x60,
+ 0xff, 0xff, 0xff, 0xff, 0xf5, 0x4f, 0x78, 0x70, 0xff, 0xff, 0xff, 0xff,
+ 0xf6, 0x3f, 0x5b, 0x60, 0xff, 0xff, 0xff, 0xff, 0xf7, 0x2f, 0x5a, 0x70,
+ 0xff, 0xff, 0xff, 0xff, 0xf8, 0x28, 0x77, 0xe0, 0xff, 0xff, 0xff, 0xff,
+ 0xf9, 0x0f, 0x3c, 0x70, 0xff, 0xff, 0xff, 0xff, 0xfa, 0x08, 0x59, 0xe0,
+ 0xff, 0xff, 0xff, 0xff, 0xfa, 0xf8, 0x58, 0xf0, 0xff, 0xff, 0xff, 0xff,
+ 0xfb, 0xe8, 0x3b, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xd8, 0x3a, 0xf0,
+ 0xff, 0xff, 0xff, 0xff, 0xfd, 0xc8, 0x1d, 0xe0, 0xff, 0xff, 0xff, 0xff,
+ 0xfe, 0xb8, 0x1c, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xa7, 0xff, 0xe0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x97, 0xfe, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x01, 0x87, 0xe1, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x02, 0x77, 0xe0, 0xf0,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0x70, 0xfe, 0x60, 0x00, 0x00, 0x00, 0x00,
+ 0x04, 0x60, 0xfd, 0x70, 0x00, 0x00, 0x00, 0x00, 0x05, 0x50, 0xe0, 0x60,
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x40, 0xdf, 0x70, 0x00, 0x00, 0x00, 0x00,
+ 0x07, 0x30, 0xc2, 0x60, 0x00, 0x00, 0x00, 0x00, 0x07, 0x8d, 0x19, 0x70,
+ 0x00, 0x00, 0x00, 0x00, 0x09, 0x10, 0xa4, 0x60, 0x00, 0x00, 0x00, 0x00,
+ 0x09, 0xad, 0x94, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x0a, 0xf0, 0x86, 0x60,
+ 0x00, 0x00, 0x00, 0x00, 0x0b, 0xe0, 0x85, 0x70, 0x00, 0x00, 0x00, 0x00,
+ 0x0c, 0xd9, 0xa2, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x0d, 0xc0, 0x67, 0x70,
+ 0x00, 0x00, 0x00, 0x00, 0x0e, 0xb9, 0x84, 0xe0, 0x00, 0x00, 0x00, 0x00,
+ 0x0f, 0xa9, 0x83, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x10, 0x99, 0x66, 0xe0,
+ 0x00, 0x00, 0x00, 0x00, 0x11, 0x89, 0x65, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x12, 0x79, 0x48, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x13, 0x69, 0x47, 0xf0,
+ 0x00, 0x00, 0x00, 0x00, 0x14, 0x59, 0x2a, 0xe0, 0x00, 0x00, 0x00, 0x00,
+ 0x15, 0x49, 0x29, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x16, 0x39, 0x0c, 0xe0,
+ 0x00, 0x00, 0x00, 0x00, 0x17, 0x29, 0x0b, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x18, 0x22, 0x29, 0x60, 0x00, 0x00, 0x00, 0x00, 0x19, 0x08, 0xed, 0xf0,
+ 0x00, 0x00, 0x00, 0x00, 0x1a, 0x02, 0x0b, 0x60, 0x00, 0x00, 0x00, 0x00,
+ 0x1a, 0xf2, 0x0a, 0x70, 0x00, 0x00, 0x00, 0x00, 0x1b, 0xe1, 0xed, 0x60,
+ 0x00, 0x00, 0x00, 0x00, 0x1c, 0xd1, 0xec, 0x70, 0x00, 0x00, 0x00, 0x00,
+ 0x1d, 0xc1, 0xcf, 0x60, 0x00, 0x00, 0x00, 0x00, 0x1e, 0xb1, 0xce, 0x70,
+ 0x00, 0x00, 0x00, 0x00, 0x1f, 0xa1, 0xb1, 0x60, 0x00, 0x00, 0x00, 0x00,
+ 0x20, 0x76, 0x00, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x21, 0x81, 0x93, 0x60,
+ 0x00, 0x00, 0x00, 0x00, 0x22, 0x55, 0xe2, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x23, 0x6a, 0xaf, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x24, 0x35, 0xc4, 0xf0,
+ 0x00, 0x00, 0x00, 0x00, 0x25, 0x4a, 0x91, 0xe0, 0x00, 0x00, 0x00, 0x00,
+ 0x26, 0x15, 0xa6, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x27, 0x2a, 0x73, 0xe0,
+ 0x00, 0x00, 0x00, 0x00, 0x27, 0xfe, 0xc3, 0x70, 0x00, 0x00, 0x00, 0x00,
+ 0x29, 0x0a, 0x55, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x29, 0xde, 0xa5, 0x70,
+ 0x00, 0x00, 0x00, 0x00, 0x2a, 0xea, 0x37, 0xe0, 0x00, 0x00, 0x00, 0x00,
+ 0x2b, 0xbe, 0x87, 0x70, 0x00, 0x00, 0x00, 0x00, 0x2c, 0xd3, 0x54, 0x60,
+ 0x00, 0x00, 0x00, 0x00, 0x2d, 0x9e, 0x69, 0x70, 0x00, 0x00, 0x00, 0x00,
+ 0x2e, 0xb3, 0x36, 0x60, 0x00, 0x00, 0x00, 0x00, 0x2f, 0x7e, 0x4b, 0x70,
+ 0x00, 0x00, 0x00, 0x00, 0x30, 0x93, 0x18, 0x60, 0x00, 0x00, 0x00, 0x00,
+ 0x31, 0x67, 0x67, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x32, 0x72, 0xfa, 0x60,
+ 0x00, 0x00, 0x00, 0x00, 0x33, 0x47, 0x49, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x34, 0x52, 0xdc, 0x60, 0x00, 0x00, 0x00, 0x00, 0x35, 0x27, 0x2b, 0xf0,
+ 0x00, 0x00, 0x00, 0x00, 0x36, 0x32, 0xbe, 0x60, 0x00, 0x00, 0x00, 0x00,
+ 0x37, 0x07, 0x0d, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x38, 0x1b, 0xda, 0xe0,
+ 0x00, 0x00, 0x00, 0x00, 0x38, 0xe6, 0xef, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x39, 0xfb, 0xbc, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x3a, 0xc6, 0xd1, 0xf0,
+ 0x00, 0x00, 0x00, 0x00, 0x3b, 0xdb, 0x9e, 0xe0, 0x00, 0x00, 0x00, 0x00,
+ 0x3c, 0xaf, 0xee, 0x70, 0x00, 0x00, 0x00, 0x00, 0x3d, 0xbb, 0x80, 0xe0,
+ 0x00, 0x00, 0x00, 0x00, 0x3e, 0x8f, 0xd0, 0x70, 0x00, 0x00, 0x00, 0x00,
+ 0x3f, 0x9b, 0x62, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x40, 0x6f, 0xb2, 0x70,
+ 0x00, 0x00, 0x00, 0x00, 0x41, 0x84, 0x7f, 0x60, 0x00, 0x00, 0x00, 0x00,
+ 0x42, 0x4f, 0x94, 0x70, 0x00, 0x00, 0x00, 0x00, 0x43, 0x64, 0x61, 0x60,
+ 0x00, 0x00, 0x00, 0x00, 0x44, 0x2f, 0x76, 0x70, 0x00, 0x00, 0x00, 0x00,
+ 0x45, 0x44, 0x43, 0x60, 0x00, 0x00, 0x00, 0x00, 0x45, 0xf3, 0xa8, 0xf0,
+ 0x00, 0x00, 0x00, 0x00, 0x47, 0x2d, 0x5f, 0xe0, 0x00, 0x00, 0x00, 0x00,
+ 0x47, 0xd3, 0x8a, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x49, 0x0d, 0x41, 0xe0,
+ 0x00, 0x00, 0x00, 0x00, 0x49, 0xb3, 0x6c, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x4a, 0xed, 0x23, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x4b, 0x9c, 0x89, 0x70,
+ 0x00, 0x00, 0x00, 0x00, 0x4c, 0xd6, 0x40, 0x60, 0x00, 0x00, 0x00, 0x00,
+ 0x4d, 0x7c, 0x6b, 0x70, 0x00, 0x00, 0x00, 0x00, 0x4e, 0xb6, 0x22, 0x60,
+ 0x00, 0x00, 0x00, 0x00, 0x4f, 0x5c, 0x4d, 0x70, 0x00, 0x00, 0x00, 0x00,
+ 0x50, 0x96, 0x04, 0x60, 0x00, 0x00, 0x00, 0x00, 0x51, 0x3c, 0x2f, 0x70,
+ 0x00, 0x00, 0x00, 0x00, 0x52, 0x75, 0xe6, 0x60, 0x00, 0x00, 0x00, 0x00,
+ 0x53, 0x1c, 0x11, 0x70, 0x00, 0x00, 0x00, 0x00, 0x54, 0x55, 0xc8, 0x60,
+ 0x00, 0x00, 0x00, 0x00, 0x54, 0xfb, 0xf3, 0x70, 0x00, 0x00, 0x00, 0x00,
+ 0x56, 0x35, 0xaa, 0x60, 0x00, 0x00, 0x00, 0x00, 0x56, 0xe5, 0x0f, 0xf0,
+ 0x00, 0x00, 0x00, 0x00, 0x58, 0x1e, 0xc6, 0xe0, 0x00, 0x00, 0x00, 0x00,
+ 0x58, 0xc4, 0xf1, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x59, 0xfe, 0xa8, 0xe0,
+ 0x00, 0x00, 0x00, 0x00, 0x5a, 0xa4, 0xd3, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x5b, 0xde, 0x8a, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x5c, 0x84, 0xb5, 0xf0,
+ 0x00, 0x00, 0x00, 0x00, 0x5d, 0xbe, 0x6c, 0xe0, 0x00, 0x00, 0x00, 0x00,
+ 0x5e, 0x64, 0x97, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x5f, 0x9e, 0x4e, 0xe0,
+ 0x00, 0x00, 0x00, 0x00, 0x60, 0x4d, 0xb4, 0x70, 0x00, 0x00, 0x00, 0x00,
+ 0x61, 0x87, 0x6b, 0x60, 0x00, 0x00, 0x00, 0x00, 0x62, 0x2d, 0x96, 0x70,
+ 0x00, 0x00, 0x00, 0x00, 0x63, 0x67, 0x4d, 0x60, 0x00, 0x00, 0x00, 0x00,
+ 0x64, 0x0d, 0x78, 0x70, 0x00, 0x00, 0x00, 0x00, 0x65, 0x47, 0x2f, 0x60,
+ 0x00, 0x00, 0x00, 0x00, 0x65, 0xed, 0x5a, 0x70, 0x00, 0x00, 0x00, 0x00,
+ 0x67, 0x27, 0x11, 0x60, 0x00, 0x00, 0x00, 0x00, 0x67, 0xcd, 0x3c, 0x70,
+ 0x00, 0x00, 0x00, 0x00, 0x69, 0x06, 0xf3, 0x60, 0x00, 0x00, 0x00, 0x00,
+ 0x69, 0xad, 0x1e, 0x70, 0x00, 0x00, 0x00, 0x00, 0x6a, 0xe6, 0xd5, 0x60,
+ 0x00, 0x00, 0x00, 0x00, 0x6b, 0x96, 0x3a, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x6c, 0xcf, 0xf1, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x6d, 0x76, 0x1c, 0xf0,
+ 0x00, 0x00, 0x00, 0x00, 0x6e, 0xaf, 0xd3, 0xe0, 0x00, 0x00, 0x00, 0x00,
+ 0x6f, 0x55, 0xfe, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x70, 0x8f, 0xb5, 0xe0,
+ 0x00, 0x00, 0x00, 0x00, 0x71, 0x35, 0xe0, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x72, 0x6f, 0x97, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x73, 0x15, 0xc2, 0xf0,
+ 0x00, 0x00, 0x00, 0x00, 0x74, 0x4f, 0x79, 0xe0, 0x00, 0x00, 0x00, 0x00,
+ 0x74, 0xfe, 0xdf, 0x70, 0x00, 0x00, 0x00, 0x00, 0x76, 0x38, 0x96, 0x60,
+ 0x00, 0x00, 0x00, 0x00, 0x76, 0xde, 0xc1, 0x70, 0x00, 0x00, 0x00, 0x00,
+ 0x78, 0x18, 0x78, 0x60, 0x00, 0x00, 0x00, 0x00, 0x78, 0xbe, 0xa3, 0x70,
+ 0x00, 0x00, 0x00, 0x00, 0x79, 0xf8, 0x5a, 0x60, 0x00, 0x00, 0x00, 0x00,
+ 0x7a, 0x9e, 0x85, 0x70, 0x00, 0x00, 0x00, 0x00, 0x7b, 0xd8, 0x3c, 0x60,
+ 0x00, 0x00, 0x00, 0x00, 0x7c, 0x7e, 0x67, 0x70, 0x00, 0x00, 0x00, 0x00,
+ 0x7d, 0xb8, 0x1e, 0x60, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x5e, 0x49, 0x70,
+ 0x00, 0x00, 0x00, 0x00, 0x7f, 0x98, 0x00, 0x60, 0x00, 0x02, 0x01, 0x02,
+ 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
+ 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
+ 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
+ 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x03, 0x04,
+ 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
+ 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
+ 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
+ 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
+ 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
+ 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
+ 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
+ 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
+ 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
+ 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
+ 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
+ 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
+ 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
+ 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
+ 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
+ 0x02, 0x01, 0x02, 0x01, 0x02, 0xff, 0xff, 0xba, 0x9e, 0x00, 0x00, 0xff,
+ 0xff, 0xc7, 0xc0, 0x01, 0x04, 0xff, 0xff, 0xb9, 0xb0, 0x00, 0x08, 0xff,
+ 0xff, 0xc7, 0xc0, 0x01, 0x0c, 0xff, 0xff, 0xc7, 0xc0, 0x01, 0x10, 0x4c,
+ 0x4d, 0x54, 0x00, 0x45, 0x44, 0x54, 0x00, 0x45, 0x53, 0x54, 0x00, 0x45,
+ 0x57, 0x54, 0x00, 0x45, 0x50, 0x54, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0x0a, 0x45, 0x53, 0x54, 0x35, 0x45, 0x44,
+ 0x54, 0x2c, 0x4d, 0x33, 0x2e, 0x32, 0x2e, 0x30, 0x2c, 0x4d, 0x31, 0x31,
+ 0x2e, 0x31, 0x2e, 0x30, 0x0a
+};
+unsigned int America_New_York_len = 3545;
+unsigned char Australia_Sydney[] = {
+ 0x54, 0x5a, 0x69, 0x66, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
+ 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8e,
+ 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x0e, 0x80, 0x00, 0x00, 0x00,
+ 0x9c, 0x4e, 0xa6, 0x9c, 0x9c, 0xbc, 0x20, 0xf0, 0xcb, 0x54, 0xb3, 0x00,
+ 0xcb, 0xc7, 0x57, 0x70, 0xcc, 0xb7, 0x56, 0x80, 0xcd, 0xa7, 0x39, 0x70,
+ 0xce, 0xa0, 0x73, 0x00, 0xcf, 0x87, 0x1b, 0x70, 0x03, 0x70, 0x39, 0x80,
+ 0x04, 0x0d, 0x1c, 0x00, 0x05, 0x50, 0x1b, 0x80, 0x05, 0xf6, 0x38, 0x80,
+ 0x07, 0x2f, 0xfd, 0x80, 0x07, 0xd6, 0x1a, 0x80, 0x09, 0x0f, 0xdf, 0x80,
+ 0x09, 0xb5, 0xfc, 0x80, 0x0a, 0xef, 0xc1, 0x80, 0x0b, 0x9f, 0x19, 0x00,
+ 0x0c, 0xd8, 0xde, 0x00, 0x0d, 0x7e, 0xfb, 0x00, 0x0e, 0xb8, 0xc0, 0x00,
+ 0x0f, 0x5e, 0xdd, 0x00, 0x10, 0x98, 0xa2, 0x00, 0x11, 0x3e, 0xbf, 0x00,
+ 0x12, 0x78, 0x84, 0x00, 0x13, 0x1e, 0xa1, 0x00, 0x14, 0x58, 0x66, 0x00,
+ 0x14, 0xfe, 0x83, 0x00, 0x16, 0x38, 0x48, 0x00, 0x17, 0x0c, 0x89, 0x80,
+ 0x18, 0x21, 0x64, 0x80, 0x18, 0xc7, 0x81, 0x80, 0x1a, 0x01, 0x46, 0x80,
+ 0x1a, 0xa7, 0x63, 0x80, 0x1b, 0xe1, 0x28, 0x80, 0x1c, 0x87, 0x45, 0x80,
+ 0x1d, 0xc1, 0x0a, 0x80, 0x1e, 0x79, 0x9c, 0x80, 0x1f, 0x97, 0xb2, 0x00,
+ 0x20, 0x59, 0x7e, 0x80, 0x21, 0x80, 0xce, 0x80, 0x22, 0x42, 0x9b, 0x00,
+ 0x23, 0x69, 0xeb, 0x00, 0x24, 0x22, 0x7d, 0x00, 0x25, 0x49, 0xcd, 0x00,
+ 0x25, 0xef, 0xea, 0x00, 0x27, 0x29, 0xaf, 0x00, 0x27, 0xcf, 0xcc, 0x00,
+ 0x29, 0x09, 0x91, 0x00, 0x29, 0xaf, 0xae, 0x00, 0x2a, 0xe9, 0x73, 0x00,
+ 0x2b, 0x98, 0xca, 0x80, 0x2c, 0xd2, 0x8f, 0x80, 0x2d, 0x78, 0xac, 0x80,
+ 0x2e, 0xb2, 0x71, 0x80, 0x2f, 0x58, 0x8e, 0x80, 0x30, 0x92, 0x53, 0x80,
+ 0x31, 0x5d, 0x5a, 0x80, 0x32, 0x72, 0x35, 0x80, 0x33, 0x3d, 0x3c, 0x80,
+ 0x34, 0x52, 0x17, 0x80, 0x35, 0x1d, 0x1e, 0x80, 0x36, 0x31, 0xf9, 0x80,
+ 0x36, 0xfd, 0x00, 0x80, 0x38, 0x1b, 0x16, 0x00, 0x38, 0xdc, 0xe2, 0x80,
+ 0x39, 0xa7, 0xe9, 0x80, 0x3a, 0xbc, 0xc4, 0x80, 0x3b, 0xda, 0xda, 0x00,
+ 0x3c, 0xa5, 0xe1, 0x00, 0x3d, 0xba, 0xbc, 0x00, 0x3e, 0x85, 0xc3, 0x00,
+ 0x3f, 0x9a, 0x9e, 0x00, 0x40, 0x65, 0xa5, 0x00, 0x41, 0x83, 0xba, 0x80,
+ 0x42, 0x45, 0x87, 0x00, 0x43, 0x63, 0x9c, 0x80, 0x44, 0x2e, 0xa3, 0x80,
+ 0x45, 0x43, 0x7e, 0x80, 0x46, 0x05, 0x4b, 0x00, 0x47, 0x23, 0x60, 0x80,
+ 0x47, 0xf7, 0xa2, 0x00, 0x48, 0xe7, 0x93, 0x00, 0x49, 0xd7, 0x84, 0x00,
+ 0x4a, 0xc7, 0x75, 0x00, 0x4b, 0xb7, 0x66, 0x00, 0x4c, 0xa7, 0x57, 0x00,
+ 0x4d, 0x97, 0x48, 0x00, 0x4e, 0x87, 0x39, 0x00, 0x4f, 0x77, 0x2a, 0x00,
+ 0x50, 0x70, 0x55, 0x80, 0x51, 0x60, 0x46, 0x80, 0x52, 0x50, 0x37, 0x80,
+ 0x53, 0x40, 0x28, 0x80, 0x54, 0x30, 0x19, 0x80, 0x55, 0x20, 0x0a, 0x80,
+ 0x56, 0x0f, 0xfb, 0x80, 0x56, 0xff, 0xec, 0x80, 0x57, 0xef, 0xdd, 0x80,
+ 0x58, 0xdf, 0xce, 0x80, 0x59, 0xcf, 0xbf, 0x80, 0x5a, 0xbf, 0xb0, 0x80,
+ 0x5b, 0xb8, 0xdc, 0x00, 0x5c, 0xa8, 0xcd, 0x00, 0x5d, 0x98, 0xbe, 0x00,
+ 0x5e, 0x88, 0xaf, 0x00, 0x5f, 0x78, 0xa0, 0x00, 0x60, 0x68, 0x91, 0x00,
+ 0x61, 0x58, 0x82, 0x00, 0x62, 0x48, 0x73, 0x00, 0x63, 0x38, 0x64, 0x00,
+ 0x64, 0x28, 0x55, 0x00, 0x65, 0x18, 0x46, 0x00, 0x66, 0x11, 0x71, 0x80,
+ 0x67, 0x01, 0x62, 0x80, 0x67, 0xf1, 0x53, 0x80, 0x68, 0xe1, 0x44, 0x80,
+ 0x69, 0xd1, 0x35, 0x80, 0x6a, 0xc1, 0x26, 0x80, 0x6b, 0xb1, 0x17, 0x80,
+ 0x6c, 0xa1, 0x08, 0x80, 0x6d, 0x90, 0xf9, 0x80, 0x6e, 0x80, 0xea, 0x80,
+ 0x6f, 0x70, 0xdb, 0x80, 0x70, 0x6a, 0x07, 0x00, 0x71, 0x59, 0xf8, 0x00,
+ 0x72, 0x49, 0xe9, 0x00, 0x73, 0x39, 0xda, 0x00, 0x74, 0x29, 0xcb, 0x00,
+ 0x75, 0x19, 0xbc, 0x00, 0x76, 0x09, 0xad, 0x00, 0x76, 0xf9, 0x9e, 0x00,
+ 0x77, 0xe9, 0x8f, 0x00, 0x78, 0xd9, 0x80, 0x00, 0x79, 0xc9, 0x71, 0x00,
+ 0x7a, 0xb9, 0x62, 0x00, 0x7b, 0xb2, 0x8d, 0x80, 0x7c, 0xa2, 0x7e, 0x80,
+ 0x7d, 0x92, 0x6f, 0x80, 0x7e, 0x82, 0x60, 0x80, 0x7f, 0x72, 0x51, 0x80,
+ 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x03, 0x04, 0x03,
+ 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03,
+ 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03,
+ 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03,
+ 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03,
+ 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03,
+ 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03,
+ 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03,
+ 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03,
+ 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03,
+ 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03,
+ 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x00, 0x00,
+ 0x8d, 0xc4, 0x00, 0x00, 0x00, 0x00, 0x9a, 0xb0, 0x01, 0x04, 0x00, 0x00,
+ 0x8c, 0xa0, 0x00, 0x09, 0x00, 0x00, 0x9a, 0xb0, 0x01, 0x04, 0x00, 0x00,
+ 0x8c, 0xa0, 0x00, 0x09, 0x4c, 0x4d, 0x54, 0x00, 0x41, 0x45, 0x44, 0x54,
+ 0x00, 0x41, 0x45, 0x53, 0x54, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x54, 0x5a, 0x69, 0x66, 0x32, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x8f, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x0e,
+ 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x73, 0x16, 0x7f, 0x3c, 0xff, 0xff, 0xff, 0xff, 0x9c, 0x4e, 0xa6, 0x9c,
+ 0xff, 0xff, 0xff, 0xff, 0x9c, 0xbc, 0x20, 0xf0, 0xff, 0xff, 0xff, 0xff,
+ 0xcb, 0x54, 0xb3, 0x00, 0xff, 0xff, 0xff, 0xff, 0xcb, 0xc7, 0x57, 0x70,
+ 0xff, 0xff, 0xff, 0xff, 0xcc, 0xb7, 0x56, 0x80, 0xff, 0xff, 0xff, 0xff,
+ 0xcd, 0xa7, 0x39, 0x70, 0xff, 0xff, 0xff, 0xff, 0xce, 0xa0, 0x73, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0xcf, 0x87, 0x1b, 0x70, 0x00, 0x00, 0x00, 0x00,
+ 0x03, 0x70, 0x39, 0x80, 0x00, 0x00, 0x00, 0x00, 0x04, 0x0d, 0x1c, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x05, 0x50, 0x1b, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x05, 0xf6, 0x38, 0x80, 0x00, 0x00, 0x00, 0x00, 0x07, 0x2f, 0xfd, 0x80,
+ 0x00, 0x00, 0x00, 0x00, 0x07, 0xd6, 0x1a, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x09, 0x0f, 0xdf, 0x80, 0x00, 0x00, 0x00, 0x00, 0x09, 0xb5, 0xfc, 0x80,
+ 0x00, 0x00, 0x00, 0x00, 0x0a, 0xef, 0xc1, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x0b, 0x9f, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0xd8, 0xde, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x0d, 0x7e, 0xfb, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x0e, 0xb8, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x5e, 0xdd, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x10, 0x98, 0xa2, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x11, 0x3e, 0xbf, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, 0x78, 0x84, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x13, 0x1e, 0xa1, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x14, 0x58, 0x66, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14, 0xfe, 0x83, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x16, 0x38, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x17, 0x0c, 0x89, 0x80, 0x00, 0x00, 0x00, 0x00, 0x18, 0x21, 0x64, 0x80,
+ 0x00, 0x00, 0x00, 0x00, 0x18, 0xc7, 0x81, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x1a, 0x01, 0x46, 0x80, 0x00, 0x00, 0x00, 0x00, 0x1a, 0xa7, 0x63, 0x80,
+ 0x00, 0x00, 0x00, 0x00, 0x1b, 0xe1, 0x28, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x1c, 0x87, 0x45, 0x80, 0x00, 0x00, 0x00, 0x00, 0x1d, 0xc1, 0x0a, 0x80,
+ 0x00, 0x00, 0x00, 0x00, 0x1e, 0x79, 0x9c, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x1f, 0x97, 0xb2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x59, 0x7e, 0x80,
+ 0x00, 0x00, 0x00, 0x00, 0x21, 0x80, 0xce, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x22, 0x42, 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x23, 0x69, 0xeb, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x24, 0x22, 0x7d, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x25, 0x49, 0xcd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x25, 0xef, 0xea, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x27, 0x29, 0xaf, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x27, 0xcf, 0xcc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x29, 0x09, 0x91, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x29, 0xaf, 0xae, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x2a, 0xe9, 0x73, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2b, 0x98, 0xca, 0x80,
+ 0x00, 0x00, 0x00, 0x00, 0x2c, 0xd2, 0x8f, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x2d, 0x78, 0xac, 0x80, 0x00, 0x00, 0x00, 0x00, 0x2e, 0xb2, 0x71, 0x80,
+ 0x00, 0x00, 0x00, 0x00, 0x2f, 0x58, 0x8e, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x30, 0x92, 0x53, 0x80, 0x00, 0x00, 0x00, 0x00, 0x31, 0x5d, 0x5a, 0x80,
+ 0x00, 0x00, 0x00, 0x00, 0x32, 0x72, 0x35, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x33, 0x3d, 0x3c, 0x80, 0x00, 0x00, 0x00, 0x00, 0x34, 0x52, 0x17, 0x80,
+ 0x00, 0x00, 0x00, 0x00, 0x35, 0x1d, 0x1e, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x36, 0x31, 0xf9, 0x80, 0x00, 0x00, 0x00, 0x00, 0x36, 0xfd, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x00, 0x38, 0x1b, 0x16, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x38, 0xdc, 0xe2, 0x80, 0x00, 0x00, 0x00, 0x00, 0x39, 0xa7, 0xe9, 0x80,
+ 0x00, 0x00, 0x00, 0x00, 0x3a, 0xbc, 0xc4, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x3b, 0xda, 0xda, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0xa5, 0xe1, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x3d, 0xba, 0xbc, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x3e, 0x85, 0xc3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x9a, 0x9e, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x40, 0x65, 0xa5, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x41, 0x83, 0xba, 0x80, 0x00, 0x00, 0x00, 0x00, 0x42, 0x45, 0x87, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x43, 0x63, 0x9c, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x44, 0x2e, 0xa3, 0x80, 0x00, 0x00, 0x00, 0x00, 0x45, 0x43, 0x7e, 0x80,
+ 0x00, 0x00, 0x00, 0x00, 0x46, 0x05, 0x4b, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x47, 0x23, 0x60, 0x80, 0x00, 0x00, 0x00, 0x00, 0x47, 0xf7, 0xa2, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x48, 0xe7, 0x93, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x49, 0xd7, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4a, 0xc7, 0x75, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x4b, 0xb7, 0x66, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x4c, 0xa7, 0x57, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4d, 0x97, 0x48, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x4e, 0x87, 0x39, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x4f, 0x77, 0x2a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x70, 0x55, 0x80,
+ 0x00, 0x00, 0x00, 0x00, 0x51, 0x60, 0x46, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x52, 0x50, 0x37, 0x80, 0x00, 0x00, 0x00, 0x00, 0x53, 0x40, 0x28, 0x80,
+ 0x00, 0x00, 0x00, 0x00, 0x54, 0x30, 0x19, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x55, 0x20, 0x0a, 0x80, 0x00, 0x00, 0x00, 0x00, 0x56, 0x0f, 0xfb, 0x80,
+ 0x00, 0x00, 0x00, 0x00, 0x56, 0xff, 0xec, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x57, 0xef, 0xdd, 0x80, 0x00, 0x00, 0x00, 0x00, 0x58, 0xdf, 0xce, 0x80,
+ 0x00, 0x00, 0x00, 0x00, 0x59, 0xcf, 0xbf, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x5a, 0xbf, 0xb0, 0x80, 0x00, 0x00, 0x00, 0x00, 0x5b, 0xb8, 0xdc, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x5c, 0xa8, 0xcd, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x5d, 0x98, 0xbe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5e, 0x88, 0xaf, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x5f, 0x78, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x60, 0x68, 0x91, 0x00, 0x00, 0x00, 0x00, 0x00, 0x61, 0x58, 0x82, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x62, 0x48, 0x73, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x63, 0x38, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x64, 0x28, 0x55, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x65, 0x18, 0x46, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x66, 0x11, 0x71, 0x80, 0x00, 0x00, 0x00, 0x00, 0x67, 0x01, 0x62, 0x80,
+ 0x00, 0x00, 0x00, 0x00, 0x67, 0xf1, 0x53, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x68, 0xe1, 0x44, 0x80, 0x00, 0x00, 0x00, 0x00, 0x69, 0xd1, 0x35, 0x80,
+ 0x00, 0x00, 0x00, 0x00, 0x6a, 0xc1, 0x26, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x6b, 0xb1, 0x17, 0x80, 0x00, 0x00, 0x00, 0x00, 0x6c, 0xa1, 0x08, 0x80,
+ 0x00, 0x00, 0x00, 0x00, 0x6d, 0x90, 0xf9, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x6e, 0x80, 0xea, 0x80, 0x00, 0x00, 0x00, 0x00, 0x6f, 0x70, 0xdb, 0x80,
+ 0x00, 0x00, 0x00, 0x00, 0x70, 0x6a, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x71, 0x59, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x72, 0x49, 0xe9, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x73, 0x39, 0xda, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x74, 0x29, 0xcb, 0x00, 0x00, 0x00, 0x00, 0x00, 0x75, 0x19, 0xbc, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x76, 0x09, 0xad, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x76, 0xf9, 0x9e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0xe9, 0x8f, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x78, 0xd9, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x79, 0xc9, 0x71, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7a, 0xb9, 0x62, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x7b, 0xb2, 0x8d, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x7c, 0xa2, 0x7e, 0x80, 0x00, 0x00, 0x00, 0x00, 0x7d, 0x92, 0x6f, 0x80,
+ 0x00, 0x00, 0x00, 0x00, 0x7e, 0x82, 0x60, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x7f, 0x72, 0x51, 0x80, 0x00, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
+ 0x01, 0x02, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04,
+ 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04,
+ 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04,
+ 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04,
+ 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04,
+ 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04,
+ 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04,
+ 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04,
+ 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04,
+ 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04,
+ 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04,
+ 0x03, 0x04, 0x03, 0x00, 0x00, 0x8d, 0xc4, 0x00, 0x00, 0x00, 0x00, 0x9a,
+ 0xb0, 0x01, 0x04, 0x00, 0x00, 0x8c, 0xa0, 0x00, 0x09, 0x00, 0x00, 0x9a,
+ 0xb0, 0x01, 0x04, 0x00, 0x00, 0x8c, 0xa0, 0x00, 0x09, 0x4c, 0x4d, 0x54,
+ 0x00, 0x41, 0x45, 0x44, 0x54, 0x00, 0x41, 0x45, 0x53, 0x54, 0x00, 0x00,
+ 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x41, 0x45,
+ 0x53, 0x54, 0x2d, 0x31, 0x30, 0x41, 0x45, 0x44, 0x54, 0x2c, 0x4d, 0x31,
+ 0x30, 0x2e, 0x31, 0x2e, 0x30, 0x2c, 0x4d, 0x34, 0x2e, 0x31, 0x2e, 0x30,
+ 0x2f, 0x33, 0x0a
+};
+unsigned int Australia_Sydney_len = 2223;
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/time/time.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/time/time.cc
new file mode 100644
index 00000000000..441638b2569
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/time/time.cc
@@ -0,0 +1,500 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// The implementation of the y_absl::Time class, which is declared in
+// //y_absl/time.h.
+//
+// The representation for an y_absl::Time is an y_absl::Duration offset from the
+// epoch. We use the traditional Unix epoch (1970-01-01 00:00:00 +0000)
+// for convenience, but this is not exposed in the API and could be changed.
+//
+// NOTE: To keep type verbosity to a minimum, the following variable naming
+// conventions are used throughout this file.
+//
+// tz: An y_absl::TimeZone
+// ci: An y_absl::TimeZone::CivilInfo
+// ti: An y_absl::TimeZone::TimeInfo
+// cd: An y_absl::CivilDay or a cctz::civil_day
+// cs: An y_absl::CivilSecond or a cctz::civil_second
+// bd: An y_absl::Time::Breakdown
+// cl: A cctz::time_zone::civil_lookup
+// al: A cctz::time_zone::absolute_lookup
+
+#include "y_absl/time/time.h"
+
+#if defined(_MSC_VER)
+#include <winsock2.h> // for timeval
+#endif
+
+#include <cstring>
+#include <ctime>
+#include <limits>
+
+#include "y_absl/time/internal/cctz/include/cctz/civil_time.h"
+#include "y_absl/time/internal/cctz/include/cctz/time_zone.h"
+
+namespace cctz = y_absl::time_internal::cctz;
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+namespace {
+
+inline cctz::time_point<cctz::seconds> unix_epoch() {
+ return std::chrono::time_point_cast<cctz::seconds>(
+ std::chrono::system_clock::from_time_t(0));
+}
+
+// Floors d to the next unit boundary closer to negative infinity.
+inline int64_t FloorToUnit(y_absl::Duration d, y_absl::Duration unit) {
+ y_absl::Duration rem;
+ int64_t q = y_absl::IDivDuration(d, unit, &rem);
+ return (q > 0 || rem >= ZeroDuration() ||
+ q == std::numeric_limits<int64_t>::min())
+ ? q
+ : q - 1;
+}
+
+inline y_absl::Time::Breakdown InfiniteFutureBreakdown() {
+ y_absl::Time::Breakdown bd;
+ bd.year = std::numeric_limits<int64_t>::max();
+ bd.month = 12;
+ bd.day = 31;
+ bd.hour = 23;
+ bd.minute = 59;
+ bd.second = 59;
+ bd.subsecond = y_absl::InfiniteDuration();
+ bd.weekday = 4;
+ bd.yearday = 365;
+ bd.offset = 0;
+ bd.is_dst = false;
+ bd.zone_abbr = "-00";
+ return bd;
+}
+
+inline y_absl::Time::Breakdown InfinitePastBreakdown() {
+ Time::Breakdown bd;
+ bd.year = std::numeric_limits<int64_t>::min();
+ bd.month = 1;
+ bd.day = 1;
+ bd.hour = 0;
+ bd.minute = 0;
+ bd.second = 0;
+ bd.subsecond = -y_absl::InfiniteDuration();
+ bd.weekday = 7;
+ bd.yearday = 1;
+ bd.offset = 0;
+ bd.is_dst = false;
+ bd.zone_abbr = "-00";
+ return bd;
+}
+
+inline y_absl::TimeZone::CivilInfo InfiniteFutureCivilInfo() {
+ TimeZone::CivilInfo ci;
+ ci.cs = CivilSecond::max();
+ ci.subsecond = InfiniteDuration();
+ ci.offset = 0;
+ ci.is_dst = false;
+ ci.zone_abbr = "-00";
+ return ci;
+}
+
+inline y_absl::TimeZone::CivilInfo InfinitePastCivilInfo() {
+ TimeZone::CivilInfo ci;
+ ci.cs = CivilSecond::min();
+ ci.subsecond = -InfiniteDuration();
+ ci.offset = 0;
+ ci.is_dst = false;
+ ci.zone_abbr = "-00";
+ return ci;
+}
+
+inline y_absl::TimeConversion InfiniteFutureTimeConversion() {
+ y_absl::TimeConversion tc;
+ tc.pre = tc.trans = tc.post = y_absl::InfiniteFuture();
+ tc.kind = y_absl::TimeConversion::UNIQUE;
+ tc.normalized = true;
+ return tc;
+}
+
+inline TimeConversion InfinitePastTimeConversion() {
+ y_absl::TimeConversion tc;
+ tc.pre = tc.trans = tc.post = y_absl::InfinitePast();
+ tc.kind = y_absl::TimeConversion::UNIQUE;
+ tc.normalized = true;
+ return tc;
+}
+
+// Makes a Time from sec, overflowing to InfiniteFuture/InfinitePast as
+// necessary. If sec is min/max, then consult cs+tz to check for overlow.
+Time MakeTimeWithOverflow(const cctz::time_point<cctz::seconds>& sec,
+ const cctz::civil_second& cs,
+ const cctz::time_zone& tz,
+ bool* normalized = nullptr) {
+ const auto max = cctz::time_point<cctz::seconds>::max();
+ const auto min = cctz::time_point<cctz::seconds>::min();
+ if (sec == max) {
+ const auto al = tz.lookup(max);
+ if (cs > al.cs) {
+ if (normalized) *normalized = true;
+ return y_absl::InfiniteFuture();
+ }
+ }
+ if (sec == min) {
+ const auto al = tz.lookup(min);
+ if (cs < al.cs) {
+ if (normalized) *normalized = true;
+ return y_absl::InfinitePast();
+ }
+ }
+ const auto hi = (sec - unix_epoch()).count();
+ return time_internal::FromUnixDuration(time_internal::MakeDuration(hi));
+}
+
+// Returns Mon=1..Sun=7.
+inline int MapWeekday(const cctz::weekday& wd) {
+ switch (wd) {
+ case cctz::weekday::monday:
+ return 1;
+ case cctz::weekday::tuesday:
+ return 2;
+ case cctz::weekday::wednesday:
+ return 3;
+ case cctz::weekday::thursday:
+ return 4;
+ case cctz::weekday::friday:
+ return 5;
+ case cctz::weekday::saturday:
+ return 6;
+ case cctz::weekday::sunday:
+ return 7;
+ }
+ return 1;
+}
+
+bool FindTransition(const cctz::time_zone& tz,
+ bool (cctz::time_zone::*find_transition)(
+ const cctz::time_point<cctz::seconds>& tp,
+ cctz::time_zone::civil_transition* trans) const,
+ Time t, TimeZone::CivilTransition* trans) {
+ // Transitions are second-aligned, so we can discard any fractional part.
+ const auto tp = unix_epoch() + cctz::seconds(ToUnixSeconds(t));
+ cctz::time_zone::civil_transition tr;
+ if (!(tz.*find_transition)(tp, &tr)) return false;
+ trans->from = CivilSecond(tr.from);
+ trans->to = CivilSecond(tr.to);
+ return true;
+}
+
+} // namespace
+
+//
+// Time
+//
+
+y_absl::Time::Breakdown Time::In(y_absl::TimeZone tz) const {
+ if (*this == y_absl::InfiniteFuture()) return InfiniteFutureBreakdown();
+ if (*this == y_absl::InfinitePast()) return InfinitePastBreakdown();
+
+ const auto tp = unix_epoch() + cctz::seconds(time_internal::GetRepHi(rep_));
+ const auto al = cctz::time_zone(tz).lookup(tp);
+ const auto cs = al.cs;
+ const auto cd = cctz::civil_day(cs);
+
+ y_absl::Time::Breakdown bd;
+ bd.year = cs.year();
+ bd.month = cs.month();
+ bd.day = cs.day();
+ bd.hour = cs.hour();
+ bd.minute = cs.minute();
+ bd.second = cs.second();
+ bd.subsecond = time_internal::MakeDuration(0, time_internal::GetRepLo(rep_));
+ bd.weekday = MapWeekday(cctz::get_weekday(cd));
+ bd.yearday = cctz::get_yearday(cd);
+ bd.offset = al.offset;
+ bd.is_dst = al.is_dst;
+ bd.zone_abbr = al.abbr;
+ return bd;
+}
+
+//
+// Conversions from/to other time types.
+//
+
+y_absl::Time FromUDate(double udate) {
+ return time_internal::FromUnixDuration(y_absl::Milliseconds(udate));
+}
+
+y_absl::Time FromUniversal(int64_t universal) {
+ return y_absl::UniversalEpoch() + 100 * y_absl::Nanoseconds(universal);
+}
+
+int64_t ToUnixNanos(Time t) {
+ if (time_internal::GetRepHi(time_internal::ToUnixDuration(t)) >= 0 &&
+ time_internal::GetRepHi(time_internal::ToUnixDuration(t)) >> 33 == 0) {
+ return (time_internal::GetRepHi(time_internal::ToUnixDuration(t)) *
+ 1000 * 1000 * 1000) +
+ (time_internal::GetRepLo(time_internal::ToUnixDuration(t)) / 4);
+ }
+ return FloorToUnit(time_internal::ToUnixDuration(t), y_absl::Nanoseconds(1));
+}
+
+int64_t ToUnixMicros(Time t) {
+ if (time_internal::GetRepHi(time_internal::ToUnixDuration(t)) >= 0 &&
+ time_internal::GetRepHi(time_internal::ToUnixDuration(t)) >> 43 == 0) {
+ return (time_internal::GetRepHi(time_internal::ToUnixDuration(t)) *
+ 1000 * 1000) +
+ (time_internal::GetRepLo(time_internal::ToUnixDuration(t)) / 4000);
+ }
+ return FloorToUnit(time_internal::ToUnixDuration(t), y_absl::Microseconds(1));
+}
+
+int64_t ToUnixMillis(Time t) {
+ if (time_internal::GetRepHi(time_internal::ToUnixDuration(t)) >= 0 &&
+ time_internal::GetRepHi(time_internal::ToUnixDuration(t)) >> 53 == 0) {
+ return (time_internal::GetRepHi(time_internal::ToUnixDuration(t)) * 1000) +
+ (time_internal::GetRepLo(time_internal::ToUnixDuration(t)) /
+ (4000 * 1000));
+ }
+ return FloorToUnit(time_internal::ToUnixDuration(t), y_absl::Milliseconds(1));
+}
+
+int64_t ToUnixSeconds(Time t) {
+ return time_internal::GetRepHi(time_internal::ToUnixDuration(t));
+}
+
+time_t ToTimeT(Time t) { return y_absl::ToTimespec(t).tv_sec; }
+
+double ToUDate(Time t) {
+ return y_absl::FDivDuration(time_internal::ToUnixDuration(t),
+ y_absl::Milliseconds(1));
+}
+
+int64_t ToUniversal(y_absl::Time t) {
+ return y_absl::FloorToUnit(t - y_absl::UniversalEpoch(), y_absl::Nanoseconds(100));
+}
+
+y_absl::Time TimeFromTimespec(timespec ts) {
+ return time_internal::FromUnixDuration(y_absl::DurationFromTimespec(ts));
+}
+
+y_absl::Time TimeFromTimeval(timeval tv) {
+ return time_internal::FromUnixDuration(y_absl::DurationFromTimeval(tv));
+}
+
+timespec ToTimespec(Time t) {
+ timespec ts;
+ y_absl::Duration d = time_internal::ToUnixDuration(t);
+ if (!time_internal::IsInfiniteDuration(d)) {
+ ts.tv_sec = time_internal::GetRepHi(d);
+ if (ts.tv_sec == time_internal::GetRepHi(d)) { // no time_t narrowing
+ ts.tv_nsec = time_internal::GetRepLo(d) / 4; // floor
+ return ts;
+ }
+ }
+ if (d >= y_absl::ZeroDuration()) {
+ ts.tv_sec = std::numeric_limits<time_t>::max();
+ ts.tv_nsec = 1000 * 1000 * 1000 - 1;
+ } else {
+ ts.tv_sec = std::numeric_limits<time_t>::min();
+ ts.tv_nsec = 0;
+ }
+ return ts;
+}
+
+timeval ToTimeval(Time t) {
+ timeval tv;
+ timespec ts = y_absl::ToTimespec(t);
+ tv.tv_sec = ts.tv_sec;
+ if (tv.tv_sec != ts.tv_sec) { // narrowing
+ if (ts.tv_sec < 0) {
+ tv.tv_sec = std::numeric_limits<decltype(tv.tv_sec)>::min();
+ tv.tv_usec = 0;
+ } else {
+ tv.tv_sec = std::numeric_limits<decltype(tv.tv_sec)>::max();
+ tv.tv_usec = 1000 * 1000 - 1;
+ }
+ return tv;
+ }
+ tv.tv_usec = static_cast<int>(ts.tv_nsec / 1000); // suseconds_t
+ return tv;
+}
+
+Time FromChrono(const std::chrono::system_clock::time_point& tp) {
+ return time_internal::FromUnixDuration(time_internal::FromChrono(
+ tp - std::chrono::system_clock::from_time_t(0)));
+}
+
+std::chrono::system_clock::time_point ToChronoTime(y_absl::Time t) {
+ using D = std::chrono::system_clock::duration;
+ auto d = time_internal::ToUnixDuration(t);
+ if (d < ZeroDuration()) d = Floor(d, FromChrono(D{1}));
+ return std::chrono::system_clock::from_time_t(0) +
+ time_internal::ToChronoDuration<D>(d);
+}
+
+//
+// TimeZone
+//
+
+y_absl::TimeZone::CivilInfo TimeZone::At(Time t) const {
+ if (t == y_absl::InfiniteFuture()) return InfiniteFutureCivilInfo();
+ if (t == y_absl::InfinitePast()) return InfinitePastCivilInfo();
+
+ const auto ud = time_internal::ToUnixDuration(t);
+ const auto tp = unix_epoch() + cctz::seconds(time_internal::GetRepHi(ud));
+ const auto al = cz_.lookup(tp);
+
+ TimeZone::CivilInfo ci;
+ ci.cs = CivilSecond(al.cs);
+ ci.subsecond = time_internal::MakeDuration(0, time_internal::GetRepLo(ud));
+ ci.offset = al.offset;
+ ci.is_dst = al.is_dst;
+ ci.zone_abbr = al.abbr;
+ return ci;
+}
+
+y_absl::TimeZone::TimeInfo TimeZone::At(CivilSecond ct) const {
+ const cctz::civil_second cs(ct);
+ const auto cl = cz_.lookup(cs);
+
+ TimeZone::TimeInfo ti;
+ switch (cl.kind) {
+ case cctz::time_zone::civil_lookup::UNIQUE:
+ ti.kind = TimeZone::TimeInfo::UNIQUE;
+ break;
+ case cctz::time_zone::civil_lookup::SKIPPED:
+ ti.kind = TimeZone::TimeInfo::SKIPPED;
+ break;
+ case cctz::time_zone::civil_lookup::REPEATED:
+ ti.kind = TimeZone::TimeInfo::REPEATED;
+ break;
+ }
+ ti.pre = MakeTimeWithOverflow(cl.pre, cs, cz_);
+ ti.trans = MakeTimeWithOverflow(cl.trans, cs, cz_);
+ ti.post = MakeTimeWithOverflow(cl.post, cs, cz_);
+ return ti;
+}
+
+bool TimeZone::NextTransition(Time t, CivilTransition* trans) const {
+ return FindTransition(cz_, &cctz::time_zone::next_transition, t, trans);
+}
+
+bool TimeZone::PrevTransition(Time t, CivilTransition* trans) const {
+ return FindTransition(cz_, &cctz::time_zone::prev_transition, t, trans);
+}
+
+//
+// Conversions involving time zones.
+//
+
+y_absl::TimeConversion ConvertDateTime(int64_t year, int mon, int day, int hour,
+ int min, int sec, TimeZone tz) {
+ // Avoids years that are too extreme for CivilSecond to normalize.
+ if (year > 300000000000) return InfiniteFutureTimeConversion();
+ if (year < -300000000000) return InfinitePastTimeConversion();
+
+ const CivilSecond cs(year, mon, day, hour, min, sec);
+ const auto ti = tz.At(cs);
+
+ TimeConversion tc;
+ tc.pre = ti.pre;
+ tc.trans = ti.trans;
+ tc.post = ti.post;
+ switch (ti.kind) {
+ case TimeZone::TimeInfo::UNIQUE:
+ tc.kind = TimeConversion::UNIQUE;
+ break;
+ case TimeZone::TimeInfo::SKIPPED:
+ tc.kind = TimeConversion::SKIPPED;
+ break;
+ case TimeZone::TimeInfo::REPEATED:
+ tc.kind = TimeConversion::REPEATED;
+ break;
+ }
+ tc.normalized = false;
+ if (year != cs.year() || mon != cs.month() || day != cs.day() ||
+ hour != cs.hour() || min != cs.minute() || sec != cs.second()) {
+ tc.normalized = true;
+ }
+ return tc;
+}
+
+y_absl::Time FromTM(const struct tm& tm, y_absl::TimeZone tz) {
+ civil_year_t tm_year = tm.tm_year;
+ // Avoids years that are too extreme for CivilSecond to normalize.
+ if (tm_year > 300000000000ll) return InfiniteFuture();
+ if (tm_year < -300000000000ll) return InfinitePast();
+ int tm_mon = tm.tm_mon;
+ if (tm_mon == std::numeric_limits<int>::max()) {
+ tm_mon -= 12;
+ tm_year += 1;
+ }
+ const auto ti = tz.At(CivilSecond(tm_year + 1900, tm_mon + 1, tm.tm_mday,
+ tm.tm_hour, tm.tm_min, tm.tm_sec));
+ return tm.tm_isdst == 0 ? ti.post : ti.pre;
+}
+
+struct tm ToTM(y_absl::Time t, y_absl::TimeZone tz) {
+ struct tm tm = {};
+
+ const auto ci = tz.At(t);
+ const auto& cs = ci.cs;
+ tm.tm_sec = cs.second();
+ tm.tm_min = cs.minute();
+ tm.tm_hour = cs.hour();
+ tm.tm_mday = cs.day();
+ tm.tm_mon = cs.month() - 1;
+
+ // Saturates tm.tm_year in cases of over/underflow, accounting for the fact
+ // that tm.tm_year is years since 1900.
+ if (cs.year() < std::numeric_limits<int>::min() + 1900) {
+ tm.tm_year = std::numeric_limits<int>::min();
+ } else if (cs.year() > std::numeric_limits<int>::max()) {
+ tm.tm_year = std::numeric_limits<int>::max() - 1900;
+ } else {
+ tm.tm_year = static_cast<int>(cs.year() - 1900);
+ }
+
+ switch (GetWeekday(cs)) {
+ case Weekday::sunday:
+ tm.tm_wday = 0;
+ break;
+ case Weekday::monday:
+ tm.tm_wday = 1;
+ break;
+ case Weekday::tuesday:
+ tm.tm_wday = 2;
+ break;
+ case Weekday::wednesday:
+ tm.tm_wday = 3;
+ break;
+ case Weekday::thursday:
+ tm.tm_wday = 4;
+ break;
+ case Weekday::friday:
+ tm.tm_wday = 5;
+ break;
+ case Weekday::saturday:
+ tm.tm_wday = 6;
+ break;
+ }
+ tm.tm_yday = GetYearDay(cs) - 1;
+ tm.tm_isdst = ci.is_dst ? 1 : 0;
+
+ return tm;
+}
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/time/time.h b/contrib/restricted/abseil-cpp-tstring/y_absl/time/time.h
new file mode 100644
index 00000000000..16150ad3b63
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/time/time.h
@@ -0,0 +1,1616 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: time.h
+// -----------------------------------------------------------------------------
+//
+// This header file defines abstractions for computing with absolute points
+// in time, durations of time, and formatting and parsing time within a given
+// time zone. The following abstractions are defined:
+//
+// * `y_absl::Time` defines an absolute, specific instance in time
+// * `y_absl::Duration` defines a signed, fixed-length span of time
+// * `y_absl::TimeZone` defines geopolitical time zone regions (as collected
+// within the IANA Time Zone database (https://www.iana.org/time-zones)).
+//
+// Note: Absolute times are distinct from civil times, which refer to the
+// human-scale time commonly represented by `YYYY-MM-DD hh:mm:ss`. The mapping
+// between absolute and civil times can be specified by use of time zones
+// (`y_absl::TimeZone` within this API). That is:
+//
+// Civil Time = F(Absolute Time, Time Zone)
+// Absolute Time = G(Civil Time, Time Zone)
+//
+// See civil_time.h for abstractions related to constructing and manipulating
+// civil time.
+//
+// Example:
+//
+// y_absl::TimeZone nyc;
+// // LoadTimeZone() may fail so it's always better to check for success.
+// if (!y_absl::LoadTimeZone("America/New_York", &nyc)) {
+// // handle error case
+// }
+//
+// // My flight leaves NYC on Jan 2, 2017 at 03:04:05
+// y_absl::CivilSecond cs(2017, 1, 2, 3, 4, 5);
+// y_absl::Time takeoff = y_absl::FromCivil(cs, nyc);
+//
+// y_absl::Duration flight_duration = y_absl::Hours(21) + y_absl::Minutes(35);
+// y_absl::Time landing = takeoff + flight_duration;
+//
+// y_absl::TimeZone syd;
+// if (!y_absl::LoadTimeZone("Australia/Sydney", &syd)) {
+// // handle error case
+// }
+// TString s = y_absl::FormatTime(
+// "My flight will land in Sydney on %Y-%m-%d at %H:%M:%S",
+// landing, syd);
+
+#ifndef ABSL_TIME_TIME_H_
+#define ABSL_TIME_TIME_H_
+
+#if !defined(_MSC_VER)
+#include <sys/time.h>
+#else
+// We don't include `winsock2.h` because it drags in `windows.h` and friends,
+// and they define conflicting macros like OPAQUE, ERROR, and more. This has the
+// potential to break Abseil users.
+//
+// Instead we only forward declare `timeval` and require Windows users include
+// `winsock2.h` themselves. This is both inconsistent and troublesome, but so is
+// including 'windows.h' so we are picking the lesser of two evils here.
+struct timeval;
+#endif
+#include <chrono> // NOLINT(build/c++11)
+#include <cmath>
+#include <cstdint>
+#include <ctime>
+#include <ostream>
+#include <util/generic/string.h>
+#include <type_traits>
+#include <utility>
+
+#include "y_absl/base/macros.h"
+#include "y_absl/strings/string_view.h"
+#include "y_absl/time/civil_time.h"
+#include "y_absl/time/internal/cctz/include/cctz/time_zone.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+class Duration; // Defined below
+class Time; // Defined below
+class TimeZone; // Defined below
+
+namespace time_internal {
+int64_t IDivDuration(bool satq, Duration num, Duration den, Duration* rem);
+constexpr Time FromUnixDuration(Duration d);
+constexpr Duration ToUnixDuration(Time t);
+constexpr int64_t GetRepHi(Duration d);
+constexpr uint32_t GetRepLo(Duration d);
+constexpr Duration MakeDuration(int64_t hi, uint32_t lo);
+constexpr Duration MakeDuration(int64_t hi, int64_t lo);
+inline Duration MakePosDoubleDuration(double n);
+constexpr int64_t kTicksPerNanosecond = 4;
+constexpr int64_t kTicksPerSecond = 1000 * 1000 * 1000 * kTicksPerNanosecond;
+template <std::intmax_t N>
+constexpr Duration FromInt64(int64_t v, std::ratio<1, N>);
+constexpr Duration FromInt64(int64_t v, std::ratio<60>);
+constexpr Duration FromInt64(int64_t v, std::ratio<3600>);
+template <typename T>
+using EnableIfIntegral = typename std::enable_if<
+ std::is_integral<T>::value || std::is_enum<T>::value, int>::type;
+template <typename T>
+using EnableIfFloat =
+ typename std::enable_if<std::is_floating_point<T>::value, int>::type;
+} // namespace time_internal
+
+// Duration
+//
+// The `y_absl::Duration` class represents a signed, fixed-length span of time.
+// A `Duration` is generated using a unit-specific factory function, or is
+// the result of subtracting one `y_absl::Time` from another. Durations behave
+// like unit-safe integers and they support all the natural integer-like
+// arithmetic operations. Arithmetic overflows and saturates at +/- infinity.
+// `Duration` should be passed by value rather than const reference.
+//
+// Factory functions `Nanoseconds()`, `Microseconds()`, `Milliseconds()`,
+// `Seconds()`, `Minutes()`, `Hours()` and `InfiniteDuration()` allow for
+// creation of constexpr `Duration` values
+//
+// Examples:
+//
+// constexpr y_absl::Duration ten_ns = y_absl::Nanoseconds(10);
+// constexpr y_absl::Duration min = y_absl::Minutes(1);
+// constexpr y_absl::Duration hour = y_absl::Hours(1);
+// y_absl::Duration dur = 60 * min; // dur == hour
+// y_absl::Duration half_sec = y_absl::Milliseconds(500);
+// y_absl::Duration quarter_sec = 0.25 * y_absl::Seconds(1);
+//
+// `Duration` values can be easily converted to an integral number of units
+// using the division operator.
+//
+// Example:
+//
+// constexpr y_absl::Duration dur = y_absl::Milliseconds(1500);
+// int64_t ns = dur / y_absl::Nanoseconds(1); // ns == 1500000000
+// int64_t ms = dur / y_absl::Milliseconds(1); // ms == 1500
+// int64_t sec = dur / y_absl::Seconds(1); // sec == 1 (subseconds truncated)
+// int64_t min = dur / y_absl::Minutes(1); // min == 0
+//
+// See the `IDivDuration()` and `FDivDuration()` functions below for details on
+// how to access the fractional parts of the quotient.
+//
+// Alternatively, conversions can be performed using helpers such as
+// `ToInt64Microseconds()` and `ToDoubleSeconds()`.
+class Duration {
+ public:
+ // Value semantics.
+ constexpr Duration() : rep_hi_(0), rep_lo_(0) {} // zero-length duration
+
+ // Copyable.
+#if !defined(__clang__) && defined(_MSC_VER) && _MSC_VER < 1910
+ // Explicitly defining the constexpr copy constructor avoids an MSVC bug.
+ constexpr Duration(const Duration& d)
+ : rep_hi_(d.rep_hi_), rep_lo_(d.rep_lo_) {}
+#else
+ constexpr Duration(const Duration& d) = default;
+#endif
+ Duration& operator=(const Duration& d) = default;
+
+ // Compound assignment operators.
+ Duration& operator+=(Duration d);
+ Duration& operator-=(Duration d);
+ Duration& operator*=(int64_t r);
+ Duration& operator*=(double r);
+ Duration& operator/=(int64_t r);
+ Duration& operator/=(double r);
+ Duration& operator%=(Duration rhs);
+
+ // Overloads that forward to either the int64_t or double overloads above.
+ // Integer operands must be representable as int64_t.
+ template <typename T, time_internal::EnableIfIntegral<T> = 0>
+ Duration& operator*=(T r) {
+ int64_t x = r;
+ return *this *= x;
+ }
+
+ template <typename T, time_internal::EnableIfIntegral<T> = 0>
+ Duration& operator/=(T r) {
+ int64_t x = r;
+ return *this /= x;
+ }
+
+ template <typename T, time_internal::EnableIfFloat<T> = 0>
+ Duration& operator*=(T r) {
+ double x = r;
+ return *this *= x;
+ }
+
+ template <typename T, time_internal::EnableIfFloat<T> = 0>
+ Duration& operator/=(T r) {
+ double x = r;
+ return *this /= x;
+ }
+
+ template <typename H>
+ friend H AbslHashValue(H h, Duration d) {
+ return H::combine(std::move(h), d.rep_hi_, d.rep_lo_);
+ }
+
+ private:
+ friend constexpr int64_t time_internal::GetRepHi(Duration d);
+ friend constexpr uint32_t time_internal::GetRepLo(Duration d);
+ friend constexpr Duration time_internal::MakeDuration(int64_t hi,
+ uint32_t lo);
+ constexpr Duration(int64_t hi, uint32_t lo) : rep_hi_(hi), rep_lo_(lo) {}
+ int64_t rep_hi_;
+ uint32_t rep_lo_;
+};
+
+// Relational Operators
+constexpr bool operator<(Duration lhs, Duration rhs);
+constexpr bool operator>(Duration lhs, Duration rhs) { return rhs < lhs; }
+constexpr bool operator>=(Duration lhs, Duration rhs) { return !(lhs < rhs); }
+constexpr bool operator<=(Duration lhs, Duration rhs) { return !(rhs < lhs); }
+constexpr bool operator==(Duration lhs, Duration rhs);
+constexpr bool operator!=(Duration lhs, Duration rhs) { return !(lhs == rhs); }
+
+// Additive Operators
+constexpr Duration operator-(Duration d);
+inline Duration operator+(Duration lhs, Duration rhs) { return lhs += rhs; }
+inline Duration operator-(Duration lhs, Duration rhs) { return lhs -= rhs; }
+
+// Multiplicative Operators
+// Integer operands must be representable as int64_t.
+template <typename T>
+Duration operator*(Duration lhs, T rhs) {
+ return lhs *= rhs;
+}
+template <typename T>
+Duration operator*(T lhs, Duration rhs) {
+ return rhs *= lhs;
+}
+template <typename T>
+Duration operator/(Duration lhs, T rhs) {
+ return lhs /= rhs;
+}
+inline int64_t operator/(Duration lhs, Duration rhs) {
+ return time_internal::IDivDuration(true, lhs, rhs,
+ &lhs); // trunc towards zero
+}
+inline Duration operator%(Duration lhs, Duration rhs) { return lhs %= rhs; }
+
+// IDivDuration()
+//
+// Divides a numerator `Duration` by a denominator `Duration`, returning the
+// quotient and remainder. The remainder always has the same sign as the
+// numerator. The returned quotient and remainder respect the identity:
+//
+// numerator = denominator * quotient + remainder
+//
+// Returned quotients are capped to the range of `int64_t`, with the difference
+// spilling into the remainder to uphold the above identity. This means that the
+// remainder returned could differ from the remainder returned by
+// `Duration::operator%` for huge quotients.
+//
+// See also the notes on `InfiniteDuration()` below regarding the behavior of
+// division involving zero and infinite durations.
+//
+// Example:
+//
+// constexpr y_absl::Duration a =
+// y_absl::Seconds(std::numeric_limits<int64_t>::max()); // big
+// constexpr y_absl::Duration b = y_absl::Nanoseconds(1); // small
+//
+// y_absl::Duration rem = a % b;
+// // rem == y_absl::ZeroDuration()
+//
+// // Here, q would overflow int64_t, so rem accounts for the difference.
+// int64_t q = y_absl::IDivDuration(a, b, &rem);
+// // q == std::numeric_limits<int64_t>::max(), rem == a - b * q
+inline int64_t IDivDuration(Duration num, Duration den, Duration* rem) {
+ return time_internal::IDivDuration(true, num, den,
+ rem); // trunc towards zero
+}
+
+// FDivDuration()
+//
+// Divides a `Duration` numerator into a fractional number of units of a
+// `Duration` denominator.
+//
+// See also the notes on `InfiniteDuration()` below regarding the behavior of
+// division involving zero and infinite durations.
+//
+// Example:
+//
+// double d = y_absl::FDivDuration(y_absl::Milliseconds(1500), y_absl::Seconds(1));
+// // d == 1.5
+double FDivDuration(Duration num, Duration den);
+
+// ZeroDuration()
+//
+// Returns a zero-length duration. This function behaves just like the default
+// constructor, but the name helps make the semantics clear at call sites.
+constexpr Duration ZeroDuration() { return Duration(); }
+
+// AbsDuration()
+//
+// Returns the absolute value of a duration.
+inline Duration AbsDuration(Duration d) {
+ return (d < ZeroDuration()) ? -d : d;
+}
+
+// Trunc()
+//
+// Truncates a duration (toward zero) to a multiple of a non-zero unit.
+//
+// Example:
+//
+// y_absl::Duration d = y_absl::Nanoseconds(123456789);
+// y_absl::Duration a = y_absl::Trunc(d, y_absl::Microseconds(1)); // 123456us
+Duration Trunc(Duration d, Duration unit);
+
+// Floor()
+//
+// Floors a duration using the passed duration unit to its largest value not
+// greater than the duration.
+//
+// Example:
+//
+// y_absl::Duration d = y_absl::Nanoseconds(123456789);
+// y_absl::Duration b = y_absl::Floor(d, y_absl::Microseconds(1)); // 123456us
+Duration Floor(Duration d, Duration unit);
+
+// Ceil()
+//
+// Returns the ceiling of a duration using the passed duration unit to its
+// smallest value not less than the duration.
+//
+// Example:
+//
+// y_absl::Duration d = y_absl::Nanoseconds(123456789);
+// y_absl::Duration c = y_absl::Ceil(d, y_absl::Microseconds(1)); // 123457us
+Duration Ceil(Duration d, Duration unit);
+
+// InfiniteDuration()
+//
+// Returns an infinite `Duration`. To get a `Duration` representing negative
+// infinity, use `-InfiniteDuration()`.
+//
+// Duration arithmetic overflows to +/- infinity and saturates. In general,
+// arithmetic with `Duration` infinities is similar to IEEE 754 infinities
+// except where IEEE 754 NaN would be involved, in which case +/-
+// `InfiniteDuration()` is used in place of a "nan" Duration.
+//
+// Examples:
+//
+// constexpr y_absl::Duration inf = y_absl::InfiniteDuration();
+// const y_absl::Duration d = ... any finite duration ...
+//
+// inf == inf + inf
+// inf == inf + d
+// inf == inf - inf
+// -inf == d - inf
+//
+// inf == d * 1e100
+// inf == inf / 2
+// 0 == d / inf
+// INT64_MAX == inf / d
+//
+// d < inf
+// -inf < d
+//
+// // Division by zero returns infinity, or INT64_MIN/MAX where appropriate.
+// inf == d / 0
+// INT64_MAX == d / y_absl::ZeroDuration()
+//
+// The examples involving the `/` operator above also apply to `IDivDuration()`
+// and `FDivDuration()`.
+constexpr Duration InfiniteDuration();
+
+// Nanoseconds()
+// Microseconds()
+// Milliseconds()
+// Seconds()
+// Minutes()
+// Hours()
+//
+// Factory functions for constructing `Duration` values from an integral number
+// of the unit indicated by the factory function's name. The number must be
+// representable as int64_t.
+//
+// NOTE: no "Days()" factory function exists because "a day" is ambiguous.
+// Civil days are not always 24 hours long, and a 24-hour duration often does
+// not correspond with a civil day. If a 24-hour duration is needed, use
+// `y_absl::Hours(24)`. If you actually want a civil day, use y_absl::CivilDay
+// from civil_time.h.
+//
+// Example:
+//
+// y_absl::Duration a = y_absl::Seconds(60);
+// y_absl::Duration b = y_absl::Minutes(1); // b == a
+template <typename T, time_internal::EnableIfIntegral<T> = 0>
+constexpr Duration Nanoseconds(T n) {
+ return time_internal::FromInt64(n, std::nano{});
+}
+template <typename T, time_internal::EnableIfIntegral<T> = 0>
+constexpr Duration Microseconds(T n) {
+ return time_internal::FromInt64(n, std::micro{});
+}
+template <typename T, time_internal::EnableIfIntegral<T> = 0>
+constexpr Duration Milliseconds(T n) {
+ return time_internal::FromInt64(n, std::milli{});
+}
+template <typename T, time_internal::EnableIfIntegral<T> = 0>
+constexpr Duration Seconds(T n) {
+ return time_internal::FromInt64(n, std::ratio<1>{});
+}
+template <typename T, time_internal::EnableIfIntegral<T> = 0>
+constexpr Duration Minutes(T n) {
+ return time_internal::FromInt64(n, std::ratio<60>{});
+}
+template <typename T, time_internal::EnableIfIntegral<T> = 0>
+constexpr Duration Hours(T n) {
+ return time_internal::FromInt64(n, std::ratio<3600>{});
+}
+
+// Factory overloads for constructing `Duration` values from a floating-point
+// number of the unit indicated by the factory function's name. These functions
+// exist for convenience, but they are not as efficient as the integral
+// factories, which should be preferred.
+//
+// Example:
+//
+// auto a = y_absl::Seconds(1.5); // OK
+// auto b = y_absl::Milliseconds(1500); // BETTER
+template <typename T, time_internal::EnableIfFloat<T> = 0>
+Duration Nanoseconds(T n) {
+ return n * Nanoseconds(1);
+}
+template <typename T, time_internal::EnableIfFloat<T> = 0>
+Duration Microseconds(T n) {
+ return n * Microseconds(1);
+}
+template <typename T, time_internal::EnableIfFloat<T> = 0>
+Duration Milliseconds(T n) {
+ return n * Milliseconds(1);
+}
+template <typename T, time_internal::EnableIfFloat<T> = 0>
+Duration Seconds(T n) {
+ if (n >= 0) { // Note: `NaN >= 0` is false.
+ if (n >= static_cast<T>((std::numeric_limits<int64_t>::max)())) {
+ return InfiniteDuration();
+ }
+ return time_internal::MakePosDoubleDuration(n);
+ } else {
+ if (std::isnan(n))
+ return std::signbit(n) ? -InfiniteDuration() : InfiniteDuration();
+ if (n <= (std::numeric_limits<int64_t>::min)()) return -InfiniteDuration();
+ return -time_internal::MakePosDoubleDuration(-n);
+ }
+}
+template <typename T, time_internal::EnableIfFloat<T> = 0>
+Duration Minutes(T n) {
+ return n * Minutes(1);
+}
+template <typename T, time_internal::EnableIfFloat<T> = 0>
+Duration Hours(T n) {
+ return n * Hours(1);
+}
+
+// ToInt64Nanoseconds()
+// ToInt64Microseconds()
+// ToInt64Milliseconds()
+// ToInt64Seconds()
+// ToInt64Minutes()
+// ToInt64Hours()
+//
+// Helper functions that convert a Duration to an integral count of the
+// indicated unit. These return the same results as the `IDivDuration()`
+// function, though they usually do so more efficiently; see the
+// documentation of `IDivDuration()` for details about overflow, etc.
+//
+// Example:
+//
+// y_absl::Duration d = y_absl::Milliseconds(1500);
+// int64_t isec = y_absl::ToInt64Seconds(d); // isec == 1
+ABSL_ATTRIBUTE_PURE_FUNCTION int64_t ToInt64Nanoseconds(Duration d);
+ABSL_ATTRIBUTE_PURE_FUNCTION int64_t ToInt64Microseconds(Duration d);
+ABSL_ATTRIBUTE_PURE_FUNCTION int64_t ToInt64Milliseconds(Duration d);
+ABSL_ATTRIBUTE_PURE_FUNCTION int64_t ToInt64Seconds(Duration d);
+ABSL_ATTRIBUTE_PURE_FUNCTION int64_t ToInt64Minutes(Duration d);
+ABSL_ATTRIBUTE_PURE_FUNCTION int64_t ToInt64Hours(Duration d);
+
+// ToDoubleNanoSeconds()
+// ToDoubleMicroseconds()
+// ToDoubleMilliseconds()
+// ToDoubleSeconds()
+// ToDoubleMinutes()
+// ToDoubleHours()
+//
+// Helper functions that convert a Duration to a floating point count of the
+// indicated unit. These functions are shorthand for the `FDivDuration()`
+// function above; see its documentation for details about overflow, etc.
+//
+// Example:
+//
+// y_absl::Duration d = y_absl::Milliseconds(1500);
+// double dsec = y_absl::ToDoubleSeconds(d); // dsec == 1.5
+ABSL_ATTRIBUTE_PURE_FUNCTION double ToDoubleNanoseconds(Duration d);
+ABSL_ATTRIBUTE_PURE_FUNCTION double ToDoubleMicroseconds(Duration d);
+ABSL_ATTRIBUTE_PURE_FUNCTION double ToDoubleMilliseconds(Duration d);
+ABSL_ATTRIBUTE_PURE_FUNCTION double ToDoubleSeconds(Duration d);
+ABSL_ATTRIBUTE_PURE_FUNCTION double ToDoubleMinutes(Duration d);
+ABSL_ATTRIBUTE_PURE_FUNCTION double ToDoubleHours(Duration d);
+
+// FromChrono()
+//
+// Converts any of the pre-defined std::chrono durations to an y_absl::Duration.
+//
+// Example:
+//
+// std::chrono::milliseconds ms(123);
+// y_absl::Duration d = y_absl::FromChrono(ms);
+constexpr Duration FromChrono(const std::chrono::nanoseconds& d);
+constexpr Duration FromChrono(const std::chrono::microseconds& d);
+constexpr Duration FromChrono(const std::chrono::milliseconds& d);
+constexpr Duration FromChrono(const std::chrono::seconds& d);
+constexpr Duration FromChrono(const std::chrono::minutes& d);
+constexpr Duration FromChrono(const std::chrono::hours& d);
+
+// ToChronoNanoseconds()
+// ToChronoMicroseconds()
+// ToChronoMilliseconds()
+// ToChronoSeconds()
+// ToChronoMinutes()
+// ToChronoHours()
+//
+// Converts an y_absl::Duration to any of the pre-defined std::chrono durations.
+// If overflow would occur, the returned value will saturate at the min/max
+// chrono duration value instead.
+//
+// Example:
+//
+// y_absl::Duration d = y_absl::Microseconds(123);
+// auto x = y_absl::ToChronoMicroseconds(d);
+// auto y = y_absl::ToChronoNanoseconds(d); // x == y
+// auto z = y_absl::ToChronoSeconds(y_absl::InfiniteDuration());
+// // z == std::chrono::seconds::max()
+std::chrono::nanoseconds ToChronoNanoseconds(Duration d);
+std::chrono::microseconds ToChronoMicroseconds(Duration d);
+std::chrono::milliseconds ToChronoMilliseconds(Duration d);
+std::chrono::seconds ToChronoSeconds(Duration d);
+std::chrono::minutes ToChronoMinutes(Duration d);
+std::chrono::hours ToChronoHours(Duration d);
+
+// FormatDuration()
+//
+// Returns a string representing the duration in the form "72h3m0.5s".
+// Returns "inf" or "-inf" for +/- `InfiniteDuration()`.
+TString FormatDuration(Duration d);
+
+// Output stream operator.
+inline std::ostream& operator<<(std::ostream& os, Duration d) {
+ return os << FormatDuration(d);
+}
+
+// ParseDuration()
+//
+// Parses a duration string consisting of a possibly signed sequence of
+// decimal numbers, each with an optional fractional part and a unit
+// suffix. The valid suffixes are "ns", "us" "ms", "s", "m", and "h".
+// Simple examples include "300ms", "-1.5h", and "2h45m". Parses "0" as
+// `ZeroDuration()`. Parses "inf" and "-inf" as +/- `InfiniteDuration()`.
+bool ParseDuration(y_absl::string_view dur_string, Duration* d);
+
+// AbslParseFlag()
+//
+// Parses a command-line flag string representation `text` into a a Duration
+// value. Duration flags must be specified in a format that is valid input for
+// `y_absl::ParseDuration()`.
+bool AbslParseFlag(y_absl::string_view text, Duration* dst, TString* error);
+
+
+// AbslUnparseFlag()
+//
+// Unparses a Duration value into a command-line string representation using
+// the format specified by `y_absl::ParseDuration()`.
+TString AbslUnparseFlag(Duration d);
+
+ABSL_DEPRECATED("Use AbslParseFlag() instead.")
+bool ParseFlag(const TString& text, Duration* dst, TString* error);
+ABSL_DEPRECATED("Use AbslUnparseFlag() instead.")
+TString UnparseFlag(Duration d);
+
+// Time
+//
+// An `y_absl::Time` represents a specific instant in time. Arithmetic operators
+// are provided for naturally expressing time calculations. Instances are
+// created using `y_absl::Now()` and the `y_absl::From*()` factory functions that
+// accept the gamut of other time representations. Formatting and parsing
+// functions are provided for conversion to and from strings. `y_absl::Time`
+// should be passed by value rather than const reference.
+//
+// `y_absl::Time` assumes there are 60 seconds in a minute, which means the
+// underlying time scales must be "smeared" to eliminate leap seconds.
+// See https://developers.google.com/time/smear.
+//
+// Even though `y_absl::Time` supports a wide range of timestamps, exercise
+// caution when using values in the distant past. `y_absl::Time` uses the
+// Proleptic Gregorian calendar, which extends the Gregorian calendar backward
+// to dates before its introduction in 1582.
+// See https://en.wikipedia.org/wiki/Proleptic_Gregorian_calendar
+// for more information. Use the ICU calendar classes to convert a date in
+// some other calendar (http://userguide.icu-project.org/datetime/calendar).
+//
+// Similarly, standardized time zones are a reasonably recent innovation, with
+// the Greenwich prime meridian being established in 1884. The TZ database
+// itself does not profess accurate offsets for timestamps prior to 1970. The
+// breakdown of future timestamps is subject to the whim of regional
+// governments.
+//
+// The `y_absl::Time` class represents an instant in time as a count of clock
+// ticks of some granularity (resolution) from some starting point (epoch).
+//
+// `y_absl::Time` uses a resolution that is high enough to avoid loss in
+// precision, and a range that is wide enough to avoid overflow, when
+// converting between tick counts in most Google time scales (i.e., resolution
+// of at least one nanosecond, and range +/-100 billion years). Conversions
+// between the time scales are performed by truncating (towards negative
+// infinity) to the nearest representable point.
+//
+// Examples:
+//
+// y_absl::Time t1 = ...;
+// y_absl::Time t2 = t1 + y_absl::Minutes(2);
+// y_absl::Duration d = t2 - t1; // == y_absl::Minutes(2)
+//
+class Time {
+ public:
+ // Value semantics.
+
+ // Returns the Unix epoch. However, those reading your code may not know
+ // or expect the Unix epoch as the default value, so make your code more
+ // readable by explicitly initializing all instances before use.
+ //
+ // Example:
+ // y_absl::Time t = y_absl::UnixEpoch();
+ // y_absl::Time t = y_absl::Now();
+ // y_absl::Time t = y_absl::TimeFromTimeval(tv);
+ // y_absl::Time t = y_absl::InfinitePast();
+ constexpr Time() = default;
+
+ // Copyable.
+ constexpr Time(const Time& t) = default;
+ Time& operator=(const Time& t) = default;
+
+ // Assignment operators.
+ Time& operator+=(Duration d) {
+ rep_ += d;
+ return *this;
+ }
+ Time& operator-=(Duration d) {
+ rep_ -= d;
+ return *this;
+ }
+
+ // Time::Breakdown
+ //
+ // The calendar and wall-clock (aka "civil time") components of an
+ // `y_absl::Time` in a certain `y_absl::TimeZone`. This struct is not
+ // intended to represent an instant in time. So, rather than passing
+ // a `Time::Breakdown` to a function, pass an `y_absl::Time` and an
+ // `y_absl::TimeZone`.
+ //
+ // Deprecated. Use `y_absl::TimeZone::CivilInfo`.
+ struct
+ Breakdown {
+ int64_t year; // year (e.g., 2013)
+ int month; // month of year [1:12]
+ int day; // day of month [1:31]
+ int hour; // hour of day [0:23]
+ int minute; // minute of hour [0:59]
+ int second; // second of minute [0:59]
+ Duration subsecond; // [Seconds(0):Seconds(1)) if finite
+ int weekday; // 1==Mon, ..., 7=Sun
+ int yearday; // day of year [1:366]
+
+ // Note: The following fields exist for backward compatibility
+ // with older APIs. Accessing these fields directly is a sign of
+ // imprudent logic in the calling code. Modern time-related code
+ // should only access this data indirectly by way of FormatTime().
+ // These fields are undefined for InfiniteFuture() and InfinitePast().
+ int offset; // seconds east of UTC
+ bool is_dst; // is offset non-standard?
+ const char* zone_abbr; // time-zone abbreviation (e.g., "PST")
+ };
+
+ // Time::In()
+ //
+ // Returns the breakdown of this instant in the given TimeZone.
+ //
+ // Deprecated. Use `y_absl::TimeZone::At(Time)`.
+ Breakdown In(TimeZone tz) const;
+
+ template <typename H>
+ friend H AbslHashValue(H h, Time t) {
+ return H::combine(std::move(h), t.rep_);
+ }
+
+ private:
+ friend constexpr Time time_internal::FromUnixDuration(Duration d);
+ friend constexpr Duration time_internal::ToUnixDuration(Time t);
+ friend constexpr bool operator<(Time lhs, Time rhs);
+ friend constexpr bool operator==(Time lhs, Time rhs);
+ friend Duration operator-(Time lhs, Time rhs);
+ friend constexpr Time UniversalEpoch();
+ friend constexpr Time InfiniteFuture();
+ friend constexpr Time InfinitePast();
+ constexpr explicit Time(Duration rep) : rep_(rep) {}
+ Duration rep_;
+};
+
+// Relational Operators
+constexpr bool operator<(Time lhs, Time rhs) { return lhs.rep_ < rhs.rep_; }
+constexpr bool operator>(Time lhs, Time rhs) { return rhs < lhs; }
+constexpr bool operator>=(Time lhs, Time rhs) { return !(lhs < rhs); }
+constexpr bool operator<=(Time lhs, Time rhs) { return !(rhs < lhs); }
+constexpr bool operator==(Time lhs, Time rhs) { return lhs.rep_ == rhs.rep_; }
+constexpr bool operator!=(Time lhs, Time rhs) { return !(lhs == rhs); }
+
+// Additive Operators
+inline Time operator+(Time lhs, Duration rhs) { return lhs += rhs; }
+inline Time operator+(Duration lhs, Time rhs) { return rhs += lhs; }
+inline Time operator-(Time lhs, Duration rhs) { return lhs -= rhs; }
+inline Duration operator-(Time lhs, Time rhs) { return lhs.rep_ - rhs.rep_; }
+
+// UnixEpoch()
+//
+// Returns the `y_absl::Time` representing "1970-01-01 00:00:00.0 +0000".
+constexpr Time UnixEpoch() { return Time(); }
+
+// UniversalEpoch()
+//
+// Returns the `y_absl::Time` representing "0001-01-01 00:00:00.0 +0000", the
+// epoch of the ICU Universal Time Scale.
+constexpr Time UniversalEpoch() {
+ // 719162 is the number of days from 0001-01-01 to 1970-01-01,
+ // assuming the Gregorian calendar.
+ return Time(time_internal::MakeDuration(-24 * 719162 * int64_t{3600}, 0U));
+}
+
+// InfiniteFuture()
+//
+// Returns an `y_absl::Time` that is infinitely far in the future.
+constexpr Time InfiniteFuture() {
+ return Time(
+ time_internal::MakeDuration((std::numeric_limits<int64_t>::max)(), ~0U));
+}
+
+// InfinitePast()
+//
+// Returns an `y_absl::Time` that is infinitely far in the past.
+constexpr Time InfinitePast() {
+ return Time(
+ time_internal::MakeDuration((std::numeric_limits<int64_t>::min)(), ~0U));
+}
+
+// FromUnixNanos()
+// FromUnixMicros()
+// FromUnixMillis()
+// FromUnixSeconds()
+// FromTimeT()
+// FromUDate()
+// FromUniversal()
+//
+// Creates an `y_absl::Time` from a variety of other representations.
+constexpr Time FromUnixNanos(int64_t ns);
+constexpr Time FromUnixMicros(int64_t us);
+constexpr Time FromUnixMillis(int64_t ms);
+constexpr Time FromUnixSeconds(int64_t s);
+constexpr Time FromTimeT(time_t t);
+Time FromUDate(double udate);
+Time FromUniversal(int64_t universal);
+
+// ToUnixNanos()
+// ToUnixMicros()
+// ToUnixMillis()
+// ToUnixSeconds()
+// ToTimeT()
+// ToUDate()
+// ToUniversal()
+//
+// Converts an `y_absl::Time` to a variety of other representations. Note that
+// these operations round down toward negative infinity where necessary to
+// adjust to the resolution of the result type. Beware of possible time_t
+// over/underflow in ToTime{T,val,spec}() on 32-bit platforms.
+int64_t ToUnixNanos(Time t);
+int64_t ToUnixMicros(Time t);
+int64_t ToUnixMillis(Time t);
+int64_t ToUnixSeconds(Time t);
+time_t ToTimeT(Time t);
+double ToUDate(Time t);
+int64_t ToUniversal(Time t);
+
+// DurationFromTimespec()
+// DurationFromTimeval()
+// ToTimespec()
+// ToTimeval()
+// TimeFromTimespec()
+// TimeFromTimeval()
+// ToTimespec()
+// ToTimeval()
+//
+// Some APIs use a timespec or a timeval as a Duration (e.g., nanosleep(2)
+// and select(2)), while others use them as a Time (e.g. clock_gettime(2)
+// and gettimeofday(2)), so conversion functions are provided for both cases.
+// The "to timespec/val" direction is easily handled via overloading, but
+// for "from timespec/val" the desired type is part of the function name.
+Duration DurationFromTimespec(timespec ts);
+Duration DurationFromTimeval(timeval tv);
+timespec ToTimespec(Duration d);
+timeval ToTimeval(Duration d);
+Time TimeFromTimespec(timespec ts);
+Time TimeFromTimeval(timeval tv);
+timespec ToTimespec(Time t);
+timeval ToTimeval(Time t);
+
+// FromChrono()
+//
+// Converts a std::chrono::system_clock::time_point to an y_absl::Time.
+//
+// Example:
+//
+// auto tp = std::chrono::system_clock::from_time_t(123);
+// y_absl::Time t = y_absl::FromChrono(tp);
+// // t == y_absl::FromTimeT(123)
+Time FromChrono(const std::chrono::system_clock::time_point& tp);
+
+// ToChronoTime()
+//
+// Converts an y_absl::Time to a std::chrono::system_clock::time_point. If
+// overflow would occur, the returned value will saturate at the min/max time
+// point value instead.
+//
+// Example:
+//
+// y_absl::Time t = y_absl::FromTimeT(123);
+// auto tp = y_absl::ToChronoTime(t);
+// // tp == std::chrono::system_clock::from_time_t(123);
+std::chrono::system_clock::time_point ToChronoTime(Time);
+
+// AbslParseFlag()
+//
+// Parses the command-line flag string representation `text` into a Time value.
+// Time flags must be specified in a format that matches y_absl::RFC3339_full.
+//
+// For example:
+//
+// --start_time=2016-01-02T03:04:05.678+08:00
+//
+// Note: A UTC offset (or 'Z' indicating a zero-offset from UTC) is required.
+//
+// Additionally, if you'd like to specify a time as a count of
+// seconds/milliseconds/etc from the Unix epoch, use an y_absl::Duration flag
+// and add that duration to y_absl::UnixEpoch() to get an y_absl::Time.
+bool AbslParseFlag(y_absl::string_view text, Time* t, TString* error);
+
+// AbslUnparseFlag()
+//
+// Unparses a Time value into a command-line string representation using
+// the format specified by `y_absl::ParseTime()`.
+TString AbslUnparseFlag(Time t);
+
+ABSL_DEPRECATED("Use AbslParseFlag() instead.")
+bool ParseFlag(const TString& text, Time* t, TString* error);
+ABSL_DEPRECATED("Use AbslUnparseFlag() instead.")
+TString UnparseFlag(Time t);
+
+// TimeZone
+//
+// The `y_absl::TimeZone` is an opaque, small, value-type class representing a
+// geo-political region within which particular rules are used for converting
+// between absolute and civil times (see https://git.io/v59Ly). `y_absl::TimeZone`
+// values are named using the TZ identifiers from the IANA Time Zone Database,
+// such as "America/Los_Angeles" or "Australia/Sydney". `y_absl::TimeZone` values
+// are created from factory functions such as `y_absl::LoadTimeZone()`. Note:
+// strings like "PST" and "EDT" are not valid TZ identifiers. Prefer to pass by
+// value rather than const reference.
+//
+// For more on the fundamental concepts of time zones, absolute times, and civil
+// times, see https://github.com/google/cctz#fundamental-concepts
+//
+// Examples:
+//
+// y_absl::TimeZone utc = y_absl::UTCTimeZone();
+// y_absl::TimeZone pst = y_absl::FixedTimeZone(-8 * 60 * 60);
+// y_absl::TimeZone loc = y_absl::LocalTimeZone();
+// y_absl::TimeZone lax;
+// if (!y_absl::LoadTimeZone("America/Los_Angeles", &lax)) {
+// // handle error case
+// }
+//
+// See also:
+// - https://github.com/google/cctz
+// - https://www.iana.org/time-zones
+// - https://en.wikipedia.org/wiki/Zoneinfo
+class TimeZone {
+ public:
+ explicit TimeZone(time_internal::cctz::time_zone tz) : cz_(tz) {}
+ TimeZone() = default; // UTC, but prefer UTCTimeZone() to be explicit.
+
+ // Copyable.
+ TimeZone(const TimeZone&) = default;
+ TimeZone& operator=(const TimeZone&) = default;
+
+ explicit operator time_internal::cctz::time_zone() const { return cz_; }
+
+ TString name() const { return cz_.name(); }
+
+ // TimeZone::CivilInfo
+ //
+ // Information about the civil time corresponding to an absolute time.
+ // This struct is not intended to represent an instant in time. So, rather
+ // than passing a `TimeZone::CivilInfo` to a function, pass an `y_absl::Time`
+ // and an `y_absl::TimeZone`.
+ struct CivilInfo {
+ CivilSecond cs;
+ Duration subsecond;
+
+ // Note: The following fields exist for backward compatibility
+ // with older APIs. Accessing these fields directly is a sign of
+ // imprudent logic in the calling code. Modern time-related code
+ // should only access this data indirectly by way of FormatTime().
+ // These fields are undefined for InfiniteFuture() and InfinitePast().
+ int offset; // seconds east of UTC
+ bool is_dst; // is offset non-standard?
+ const char* zone_abbr; // time-zone abbreviation (e.g., "PST")
+ };
+
+ // TimeZone::At(Time)
+ //
+ // Returns the civil time for this TimeZone at a certain `y_absl::Time`.
+ // If the input time is infinite, the output civil second will be set to
+ // CivilSecond::max() or min(), and the subsecond will be infinite.
+ //
+ // Example:
+ //
+ // const auto epoch = lax.At(y_absl::UnixEpoch());
+ // // epoch.cs == 1969-12-31 16:00:00
+ // // epoch.subsecond == y_absl::ZeroDuration()
+ // // epoch.offset == -28800
+ // // epoch.is_dst == false
+ // // epoch.abbr == "PST"
+ CivilInfo At(Time t) const;
+
+ // TimeZone::TimeInfo
+ //
+ // Information about the absolute times corresponding to a civil time.
+ // (Subseconds must be handled separately.)
+ //
+ // It is possible for a caller to pass a civil-time value that does
+ // not represent an actual or unique instant in time (due to a shift
+ // in UTC offset in the TimeZone, which results in a discontinuity in
+ // the civil-time components). For example, a daylight-saving-time
+ // transition skips or repeats civil times---in the United States,
+ // March 13, 2011 02:15 never occurred, while November 6, 2011 01:15
+ // occurred twice---so requests for such times are not well-defined.
+ // To account for these possibilities, `y_absl::TimeZone::TimeInfo` is
+ // richer than just a single `y_absl::Time`.
+ struct TimeInfo {
+ enum CivilKind {
+ UNIQUE, // the civil time was singular (pre == trans == post)
+ SKIPPED, // the civil time did not exist (pre >= trans > post)
+ REPEATED, // the civil time was ambiguous (pre < trans <= post)
+ } kind;
+ Time pre; // time calculated using the pre-transition offset
+ Time trans; // when the civil-time discontinuity occurred
+ Time post; // time calculated using the post-transition offset
+ };
+
+ // TimeZone::At(CivilSecond)
+ //
+ // Returns an `y_absl::TimeInfo` containing the absolute time(s) for this
+ // TimeZone at an `y_absl::CivilSecond`. When the civil time is skipped or
+ // repeated, returns times calculated using the pre-transition and post-
+ // transition UTC offsets, plus the transition time itself.
+ //
+ // Examples:
+ //
+ // // A unique civil time
+ // const auto jan01 = lax.At(y_absl::CivilSecond(2011, 1, 1, 0, 0, 0));
+ // // jan01.kind == TimeZone::TimeInfo::UNIQUE
+ // // jan01.pre is 2011-01-01 00:00:00 -0800
+ // // jan01.trans is 2011-01-01 00:00:00 -0800
+ // // jan01.post is 2011-01-01 00:00:00 -0800
+ //
+ // // A Spring DST transition, when there is a gap in civil time
+ // const auto mar13 = lax.At(y_absl::CivilSecond(2011, 3, 13, 2, 15, 0));
+ // // mar13.kind == TimeZone::TimeInfo::SKIPPED
+ // // mar13.pre is 2011-03-13 03:15:00 -0700
+ // // mar13.trans is 2011-03-13 03:00:00 -0700
+ // // mar13.post is 2011-03-13 01:15:00 -0800
+ //
+ // // A Fall DST transition, when civil times are repeated
+ // const auto nov06 = lax.At(y_absl::CivilSecond(2011, 11, 6, 1, 15, 0));
+ // // nov06.kind == TimeZone::TimeInfo::REPEATED
+ // // nov06.pre is 2011-11-06 01:15:00 -0700
+ // // nov06.trans is 2011-11-06 01:00:00 -0800
+ // // nov06.post is 2011-11-06 01:15:00 -0800
+ TimeInfo At(CivilSecond ct) const;
+
+ // TimeZone::NextTransition()
+ // TimeZone::PrevTransition()
+ //
+ // Finds the time of the next/previous offset change in this time zone.
+ //
+ // By definition, `NextTransition(t, &trans)` returns false when `t` is
+ // `InfiniteFuture()`, and `PrevTransition(t, &trans)` returns false
+ // when `t` is `InfinitePast()`. If the zone has no transitions, the
+ // result will also be false no matter what the argument.
+ //
+ // Otherwise, when `t` is `InfinitePast()`, `NextTransition(t, &trans)`
+ // returns true and sets `trans` to the first recorded transition. Chains
+ // of calls to `NextTransition()/PrevTransition()` will eventually return
+ // false, but it is unspecified exactly when `NextTransition(t, &trans)`
+ // jumps to false, or what time is set by `PrevTransition(t, &trans)` for
+ // a very distant `t`.
+ //
+ // Note: Enumeration of time-zone transitions is for informational purposes
+ // only. Modern time-related code should not care about when offset changes
+ // occur.
+ //
+ // Example:
+ // y_absl::TimeZone nyc;
+ // if (!y_absl::LoadTimeZone("America/New_York", &nyc)) { ... }
+ // const auto now = y_absl::Now();
+ // auto t = y_absl::InfinitePast();
+ // y_absl::TimeZone::CivilTransition trans;
+ // while (t <= now && nyc.NextTransition(t, &trans)) {
+ // // transition: trans.from -> trans.to
+ // t = nyc.At(trans.to).trans;
+ // }
+ struct CivilTransition {
+ CivilSecond from; // the civil time we jump from
+ CivilSecond to; // the civil time we jump to
+ };
+ bool NextTransition(Time t, CivilTransition* trans) const;
+ bool PrevTransition(Time t, CivilTransition* trans) const;
+
+ template <typename H>
+ friend H AbslHashValue(H h, TimeZone tz) {
+ return H::combine(std::move(h), tz.cz_);
+ }
+
+ private:
+ friend bool operator==(TimeZone a, TimeZone b) { return a.cz_ == b.cz_; }
+ friend bool operator!=(TimeZone a, TimeZone b) { return a.cz_ != b.cz_; }
+ friend std::ostream& operator<<(std::ostream& os, TimeZone tz) {
+ return os << tz.name();
+ }
+
+ time_internal::cctz::time_zone cz_;
+};
+
+// LoadTimeZone()
+//
+// Loads the named zone. May perform I/O on the initial load of the named
+// zone. If the name is invalid, or some other kind of error occurs, returns
+// `false` and `*tz` is set to the UTC time zone.
+inline bool LoadTimeZone(y_absl::string_view name, TimeZone* tz) {
+ if (name == "localtime") {
+ *tz = TimeZone(time_internal::cctz::local_time_zone());
+ return true;
+ }
+ time_internal::cctz::time_zone cz;
+ const bool b = time_internal::cctz::load_time_zone(TString(name), &cz);
+ *tz = TimeZone(cz);
+ return b;
+}
+
+// FixedTimeZone()
+//
+// Returns a TimeZone that is a fixed offset (seconds east) from UTC.
+// Note: If the absolute value of the offset is greater than 24 hours
+// you'll get UTC (i.e., no offset) instead.
+inline TimeZone FixedTimeZone(int seconds) {
+ return TimeZone(
+ time_internal::cctz::fixed_time_zone(std::chrono::seconds(seconds)));
+}
+
+// UTCTimeZone()
+//
+// Convenience method returning the UTC time zone.
+inline TimeZone UTCTimeZone() {
+ return TimeZone(time_internal::cctz::utc_time_zone());
+}
+
+// LocalTimeZone()
+//
+// Convenience method returning the local time zone, or UTC if there is
+// no configured local zone. Warning: Be wary of using LocalTimeZone(),
+// and particularly so in a server process, as the zone configured for the
+// local machine should be irrelevant. Prefer an explicit zone name.
+inline TimeZone LocalTimeZone() {
+ return TimeZone(time_internal::cctz::local_time_zone());
+}
+
+// ToCivilSecond()
+// ToCivilMinute()
+// ToCivilHour()
+// ToCivilDay()
+// ToCivilMonth()
+// ToCivilYear()
+//
+// Helpers for TimeZone::At(Time) to return particularly aligned civil times.
+//
+// Example:
+//
+// y_absl::Time t = ...;
+// y_absl::TimeZone tz = ...;
+// const auto cd = y_absl::ToCivilDay(t, tz);
+inline CivilSecond ToCivilSecond(Time t, TimeZone tz) {
+ return tz.At(t).cs; // already a CivilSecond
+}
+inline CivilMinute ToCivilMinute(Time t, TimeZone tz) {
+ return CivilMinute(tz.At(t).cs);
+}
+inline CivilHour ToCivilHour(Time t, TimeZone tz) {
+ return CivilHour(tz.At(t).cs);
+}
+inline CivilDay ToCivilDay(Time t, TimeZone tz) {
+ return CivilDay(tz.At(t).cs);
+}
+inline CivilMonth ToCivilMonth(Time t, TimeZone tz) {
+ return CivilMonth(tz.At(t).cs);
+}
+inline CivilYear ToCivilYear(Time t, TimeZone tz) {
+ return CivilYear(tz.At(t).cs);
+}
+
+// FromCivil()
+//
+// Helper for TimeZone::At(CivilSecond) that provides "order-preserving
+// semantics." If the civil time maps to a unique time, that time is
+// returned. If the civil time is repeated in the given time zone, the
+// time using the pre-transition offset is returned. Otherwise, the
+// civil time is skipped in the given time zone, and the transition time
+// is returned. This means that for any two civil times, ct1 and ct2,
+// (ct1 < ct2) => (FromCivil(ct1) <= FromCivil(ct2)), the equal case
+// being when two non-existent civil times map to the same transition time.
+//
+// Note: Accepts civil times of any alignment.
+inline Time FromCivil(CivilSecond ct, TimeZone tz) {
+ const auto ti = tz.At(ct);
+ if (ti.kind == TimeZone::TimeInfo::SKIPPED) return ti.trans;
+ return ti.pre;
+}
+
+// TimeConversion
+//
+// An `y_absl::TimeConversion` represents the conversion of year, month, day,
+// hour, minute, and second values (i.e., a civil time), in a particular
+// `y_absl::TimeZone`, to a time instant (an absolute time), as returned by
+// `y_absl::ConvertDateTime()`. Legacy version of `y_absl::TimeZone::TimeInfo`.
+//
+// Deprecated. Use `y_absl::TimeZone::TimeInfo`.
+struct
+ TimeConversion {
+ Time pre; // time calculated using the pre-transition offset
+ Time trans; // when the civil-time discontinuity occurred
+ Time post; // time calculated using the post-transition offset
+
+ enum Kind {
+ UNIQUE, // the civil time was singular (pre == trans == post)
+ SKIPPED, // the civil time did not exist
+ REPEATED, // the civil time was ambiguous
+ };
+ Kind kind;
+
+ bool normalized; // input values were outside their valid ranges
+};
+
+// ConvertDateTime()
+//
+// Legacy version of `y_absl::TimeZone::At(y_absl::CivilSecond)` that takes
+// the civil time as six, separate values (YMDHMS).
+//
+// The input month, day, hour, minute, and second values can be outside
+// of their valid ranges, in which case they will be "normalized" during
+// the conversion.
+//
+// Example:
+//
+// // "October 32" normalizes to "November 1".
+// y_absl::TimeConversion tc =
+// y_absl::ConvertDateTime(2013, 10, 32, 8, 30, 0, lax);
+// // tc.kind == TimeConversion::UNIQUE && tc.normalized == true
+// // y_absl::ToCivilDay(tc.pre, tz).month() == 11
+// // y_absl::ToCivilDay(tc.pre, tz).day() == 1
+//
+// Deprecated. Use `y_absl::TimeZone::At(CivilSecond)`.
+TimeConversion ConvertDateTime(int64_t year, int mon, int day, int hour,
+ int min, int sec, TimeZone tz);
+
+// FromDateTime()
+//
+// A convenience wrapper for `y_absl::ConvertDateTime()` that simply returns
+// the "pre" `y_absl::Time`. That is, the unique result, or the instant that
+// is correct using the pre-transition offset (as if the transition never
+// happened).
+//
+// Example:
+//
+// y_absl::Time t = y_absl::FromDateTime(2017, 9, 26, 9, 30, 0, lax);
+// // t = 2017-09-26 09:30:00 -0700
+//
+// Deprecated. Use `y_absl::FromCivil(CivilSecond, TimeZone)`. Note that the
+// behavior of `FromCivil()` differs from `FromDateTime()` for skipped civil
+// times. If you care about that see `y_absl::TimeZone::At(y_absl::CivilSecond)`.
+inline Time FromDateTime(int64_t year, int mon, int day, int hour,
+ int min, int sec, TimeZone tz) {
+ return ConvertDateTime(year, mon, day, hour, min, sec, tz).pre;
+}
+
+// FromTM()
+//
+// Converts the `tm_year`, `tm_mon`, `tm_mday`, `tm_hour`, `tm_min`, and
+// `tm_sec` fields to an `y_absl::Time` using the given time zone. See ctime(3)
+// for a description of the expected values of the tm fields. If the civil time
+// is unique (see `y_absl::TimeZone::At(y_absl::CivilSecond)` above), the matching
+// time instant is returned. Otherwise, the `tm_isdst` field is consulted to
+// choose between the possible results. For a repeated civil time, `tm_isdst !=
+// 0` returns the matching DST instant, while `tm_isdst == 0` returns the
+// matching non-DST instant. For a skipped civil time there is no matching
+// instant, so `tm_isdst != 0` returns the DST instant, and `tm_isdst == 0`
+// returns the non-DST instant, that would have matched if the transition never
+// happened.
+Time FromTM(const struct tm& tm, TimeZone tz);
+
+// ToTM()
+//
+// Converts the given `y_absl::Time` to a struct tm using the given time zone.
+// See ctime(3) for a description of the values of the tm fields.
+struct tm ToTM(Time t, TimeZone tz);
+
+// RFC3339_full
+// RFC3339_sec
+//
+// FormatTime()/ParseTime() format specifiers for RFC3339 date/time strings,
+// with trailing zeros trimmed or with fractional seconds omitted altogether.
+//
+// Note that RFC3339_sec[] matches an ISO 8601 extended format for date and
+// time with UTC offset. Also note the use of "%Y": RFC3339 mandates that
+// years have exactly four digits, but we allow them to take their natural
+// width.
+ABSL_DLL extern const char RFC3339_full[]; // %Y-%m-%d%ET%H:%M:%E*S%Ez
+ABSL_DLL extern const char RFC3339_sec[]; // %Y-%m-%d%ET%H:%M:%S%Ez
+
+// RFC1123_full
+// RFC1123_no_wday
+//
+// FormatTime()/ParseTime() format specifiers for RFC1123 date/time strings.
+ABSL_DLL extern const char RFC1123_full[]; // %a, %d %b %E4Y %H:%M:%S %z
+ABSL_DLL extern const char RFC1123_no_wday[]; // %d %b %E4Y %H:%M:%S %z
+
+// FormatTime()
+//
+// Formats the given `y_absl::Time` in the `y_absl::TimeZone` according to the
+// provided format string. Uses strftime()-like formatting options, with
+// the following extensions:
+//
+// - %Ez - RFC3339-compatible numeric UTC offset (+hh:mm or -hh:mm)
+// - %E*z - Full-resolution numeric UTC offset (+hh:mm:ss or -hh:mm:ss)
+// - %E#S - Seconds with # digits of fractional precision
+// - %E*S - Seconds with full fractional precision (a literal '*')
+// - %E#f - Fractional seconds with # digits of precision
+// - %E*f - Fractional seconds with full precision (a literal '*')
+// - %E4Y - Four-character years (-999 ... -001, 0000, 0001 ... 9999)
+// - %ET - The RFC3339 "date-time" separator "T"
+//
+// Note that %E0S behaves like %S, and %E0f produces no characters. In
+// contrast %E*f always produces at least one digit, which may be '0'.
+//
+// Note that %Y produces as many characters as it takes to fully render the
+// year. A year outside of [-999:9999] when formatted with %E4Y will produce
+// more than four characters, just like %Y.
+//
+// We recommend that format strings include the UTC offset (%z, %Ez, or %E*z)
+// so that the result uniquely identifies a time instant.
+//
+// Example:
+//
+// y_absl::CivilSecond cs(2013, 1, 2, 3, 4, 5);
+// y_absl::Time t = y_absl::FromCivil(cs, lax);
+// TString f = y_absl::FormatTime("%H:%M:%S", t, lax); // "03:04:05"
+// f = y_absl::FormatTime("%H:%M:%E3S", t, lax); // "03:04:05.000"
+//
+// Note: If the given `y_absl::Time` is `y_absl::InfiniteFuture()`, the returned
+// string will be exactly "infinite-future". If the given `y_absl::Time` is
+// `y_absl::InfinitePast()`, the returned string will be exactly "infinite-past".
+// In both cases the given format string and `y_absl::TimeZone` are ignored.
+//
+TString FormatTime(y_absl::string_view format, Time t, TimeZone tz);
+
+// Convenience functions that format the given time using the RFC3339_full
+// format. The first overload uses the provided TimeZone, while the second
+// uses LocalTimeZone().
+TString FormatTime(Time t, TimeZone tz);
+TString FormatTime(Time t);
+
+// Output stream operator.
+inline std::ostream& operator<<(std::ostream& os, Time t) {
+ return os << FormatTime(t);
+}
+
+// ParseTime()
+//
+// Parses an input string according to the provided format string and
+// returns the corresponding `y_absl::Time`. Uses strftime()-like formatting
+// options, with the same extensions as FormatTime(), but with the
+// exceptions that %E#S is interpreted as %E*S, and %E#f as %E*f. %Ez
+// and %E*z also accept the same inputs, which (along with %z) includes
+// 'z' and 'Z' as synonyms for +00:00. %ET accepts either 'T' or 't'.
+//
+// %Y consumes as many numeric characters as it can, so the matching data
+// should always be terminated with a non-numeric. %E4Y always consumes
+// exactly four characters, including any sign.
+//
+// Unspecified fields are taken from the default date and time of ...
+//
+// "1970-01-01 00:00:00.0 +0000"
+//
+// For example, parsing a string of "15:45" (%H:%M) will return an y_absl::Time
+// that represents "1970-01-01 15:45:00.0 +0000".
+//
+// Note that since ParseTime() returns time instants, it makes the most sense
+// to parse fully-specified date/time strings that include a UTC offset (%z,
+// %Ez, or %E*z).
+//
+// Note also that `y_absl::ParseTime()` only heeds the fields year, month, day,
+// hour, minute, (fractional) second, and UTC offset. Other fields, like
+// weekday (%a or %A), while parsed for syntactic validity, are ignored
+// in the conversion.
+//
+// Date and time fields that are out-of-range will be treated as errors
+// rather than normalizing them like `y_absl::CivilSecond` does. For example,
+// it is an error to parse the date "Oct 32, 2013" because 32 is out of range.
+//
+// A leap second of ":60" is normalized to ":00" of the following minute
+// with fractional seconds discarded. The following table shows how the
+// given seconds and subseconds will be parsed:
+//
+// "59.x" -> 59.x // exact
+// "60.x" -> 00.0 // normalized
+// "00.x" -> 00.x // exact
+//
+// Errors are indicated by returning false and assigning an error message
+// to the "err" out param if it is non-null.
+//
+// Note: If the input string is exactly "infinite-future", the returned
+// `y_absl::Time` will be `y_absl::InfiniteFuture()` and `true` will be returned.
+// If the input string is "infinite-past", the returned `y_absl::Time` will be
+// `y_absl::InfinitePast()` and `true` will be returned.
+//
+bool ParseTime(y_absl::string_view format, y_absl::string_view input, Time* time,
+ TString* err);
+
+// Like ParseTime() above, but if the format string does not contain a UTC
+// offset specification (%z/%Ez/%E*z) then the input is interpreted in the
+// given TimeZone. This means that the input, by itself, does not identify a
+// unique instant. Being time-zone dependent, it also admits the possibility
+// of ambiguity or non-existence, in which case the "pre" time (as defined
+// by TimeZone::TimeInfo) is returned. For these reasons we recommend that
+// all date/time strings include a UTC offset so they're context independent.
+bool ParseTime(y_absl::string_view format, y_absl::string_view input, TimeZone tz,
+ Time* time, TString* err);
+
+// ============================================================================
+// Implementation Details Follow
+// ============================================================================
+
+namespace time_internal {
+
+// Creates a Duration with a given representation.
+// REQUIRES: hi,lo is a valid representation of a Duration as specified
+// in time/duration.cc.
+constexpr Duration MakeDuration(int64_t hi, uint32_t lo = 0) {
+ return Duration(hi, lo);
+}
+
+constexpr Duration MakeDuration(int64_t hi, int64_t lo) {
+ return MakeDuration(hi, static_cast<uint32_t>(lo));
+}
+
+// Make a Duration value from a floating-point number, as long as that number
+// is in the range [ 0 .. numeric_limits<int64_t>::max ), that is, as long as
+// it's positive and can be converted to int64_t without risk of UB.
+inline Duration MakePosDoubleDuration(double n) {
+ const int64_t int_secs = static_cast<int64_t>(n);
+ const uint32_t ticks = static_cast<uint32_t>(
+ std::round((n - static_cast<double>(int_secs)) * kTicksPerSecond));
+ return ticks < kTicksPerSecond
+ ? MakeDuration(int_secs, ticks)
+ : MakeDuration(int_secs + 1, ticks - kTicksPerSecond);
+}
+
+// Creates a normalized Duration from an almost-normalized (sec,ticks)
+// pair. sec may be positive or negative. ticks must be in the range
+// -kTicksPerSecond < *ticks < kTicksPerSecond. If ticks is negative it
+// will be normalized to a positive value in the resulting Duration.
+constexpr Duration MakeNormalizedDuration(int64_t sec, int64_t ticks) {
+ return (ticks < 0) ? MakeDuration(sec - 1, ticks + kTicksPerSecond)
+ : MakeDuration(sec, ticks);
+}
+
+// Provide access to the Duration representation.
+constexpr int64_t GetRepHi(Duration d) { return d.rep_hi_; }
+constexpr uint32_t GetRepLo(Duration d) { return d.rep_lo_; }
+
+// Returns true iff d is positive or negative infinity.
+constexpr bool IsInfiniteDuration(Duration d) { return GetRepLo(d) == ~0U; }
+
+// Returns an infinite Duration with the opposite sign.
+// REQUIRES: IsInfiniteDuration(d)
+constexpr Duration OppositeInfinity(Duration d) {
+ return GetRepHi(d) < 0
+ ? MakeDuration((std::numeric_limits<int64_t>::max)(), ~0U)
+ : MakeDuration((std::numeric_limits<int64_t>::min)(), ~0U);
+}
+
+// Returns (-n)-1 (equivalently -(n+1)) without avoidable overflow.
+constexpr int64_t NegateAndSubtractOne(int64_t n) {
+ // Note: Good compilers will optimize this expression to ~n when using
+ // a two's-complement representation (which is required for int64_t).
+ return (n < 0) ? -(n + 1) : (-n) - 1;
+}
+
+// Map between a Time and a Duration since the Unix epoch. Note that these
+// functions depend on the above mentioned choice of the Unix epoch for the
+// Time representation (and both need to be Time friends). Without this
+// knowledge, we would need to add-in/subtract-out UnixEpoch() respectively.
+constexpr Time FromUnixDuration(Duration d) { return Time(d); }
+constexpr Duration ToUnixDuration(Time t) { return t.rep_; }
+
+template <std::intmax_t N>
+constexpr Duration FromInt64(int64_t v, std::ratio<1, N>) {
+ static_assert(0 < N && N <= 1000 * 1000 * 1000, "Unsupported ratio");
+ // Subsecond ratios cannot overflow.
+ return MakeNormalizedDuration(
+ v / N, v % N * kTicksPerNanosecond * 1000 * 1000 * 1000 / N);
+}
+constexpr Duration FromInt64(int64_t v, std::ratio<60>) {
+ return (v <= (std::numeric_limits<int64_t>::max)() / 60 &&
+ v >= (std::numeric_limits<int64_t>::min)() / 60)
+ ? MakeDuration(v * 60)
+ : v > 0 ? InfiniteDuration() : -InfiniteDuration();
+}
+constexpr Duration FromInt64(int64_t v, std::ratio<3600>) {
+ return (v <= (std::numeric_limits<int64_t>::max)() / 3600 &&
+ v >= (std::numeric_limits<int64_t>::min)() / 3600)
+ ? MakeDuration(v * 3600)
+ : v > 0 ? InfiniteDuration() : -InfiniteDuration();
+}
+
+// IsValidRep64<T>(0) is true if the expression `int64_t{std::declval<T>()}` is
+// valid. That is, if a T can be assigned to an int64_t without narrowing.
+template <typename T>
+constexpr auto IsValidRep64(int) -> decltype(int64_t{std::declval<T>()} == 0) {
+ return true;
+}
+template <typename T>
+constexpr auto IsValidRep64(char) -> bool {
+ return false;
+}
+
+// Converts a std::chrono::duration to an y_absl::Duration.
+template <typename Rep, typename Period>
+constexpr Duration FromChrono(const std::chrono::duration<Rep, Period>& d) {
+ static_assert(IsValidRep64<Rep>(0), "duration::rep is invalid");
+ return FromInt64(int64_t{d.count()}, Period{});
+}
+
+template <typename Ratio>
+int64_t ToInt64(Duration d, Ratio) {
+ // Note: This may be used on MSVC, which may have a system_clock period of
+ // std::ratio<1, 10 * 1000 * 1000>
+ return ToInt64Seconds(d * Ratio::den / Ratio::num);
+}
+// Fastpath implementations for the 6 common duration units.
+inline int64_t ToInt64(Duration d, std::nano) {
+ return ToInt64Nanoseconds(d);
+}
+inline int64_t ToInt64(Duration d, std::micro) {
+ return ToInt64Microseconds(d);
+}
+inline int64_t ToInt64(Duration d, std::milli) {
+ return ToInt64Milliseconds(d);
+}
+inline int64_t ToInt64(Duration d, std::ratio<1>) {
+ return ToInt64Seconds(d);
+}
+inline int64_t ToInt64(Duration d, std::ratio<60>) {
+ return ToInt64Minutes(d);
+}
+inline int64_t ToInt64(Duration d, std::ratio<3600>) {
+ return ToInt64Hours(d);
+}
+
+// Converts an y_absl::Duration to a chrono duration of type T.
+template <typename T>
+T ToChronoDuration(Duration d) {
+ using Rep = typename T::rep;
+ using Period = typename T::period;
+ static_assert(IsValidRep64<Rep>(0), "duration::rep is invalid");
+ if (time_internal::IsInfiniteDuration(d))
+ return d < ZeroDuration() ? (T::min)() : (T::max)();
+ const auto v = ToInt64(d, Period{});
+ if (v > (std::numeric_limits<Rep>::max)()) return (T::max)();
+ if (v < (std::numeric_limits<Rep>::min)()) return (T::min)();
+ return T{v};
+}
+
+} // namespace time_internal
+
+constexpr bool operator<(Duration lhs, Duration rhs) {
+ return time_internal::GetRepHi(lhs) != time_internal::GetRepHi(rhs)
+ ? time_internal::GetRepHi(lhs) < time_internal::GetRepHi(rhs)
+ : time_internal::GetRepHi(lhs) == (std::numeric_limits<int64_t>::min)()
+ ? time_internal::GetRepLo(lhs) + 1 <
+ time_internal::GetRepLo(rhs) + 1
+ : time_internal::GetRepLo(lhs) < time_internal::GetRepLo(rhs);
+}
+
+constexpr bool operator==(Duration lhs, Duration rhs) {
+ return time_internal::GetRepHi(lhs) == time_internal::GetRepHi(rhs) &&
+ time_internal::GetRepLo(lhs) == time_internal::GetRepLo(rhs);
+}
+
+constexpr Duration operator-(Duration d) {
+ // This is a little interesting because of the special cases.
+ //
+ // If rep_lo_ is zero, we have it easy; it's safe to negate rep_hi_, we're
+ // dealing with an integral number of seconds, and the only special case is
+ // the maximum negative finite duration, which can't be negated.
+ //
+ // Infinities stay infinite, and just change direction.
+ //
+ // Finally we're in the case where rep_lo_ is non-zero, and we can borrow
+ // a second's worth of ticks and avoid overflow (as negating int64_t-min + 1
+ // is safe).
+ return time_internal::GetRepLo(d) == 0
+ ? time_internal::GetRepHi(d) ==
+ (std::numeric_limits<int64_t>::min)()
+ ? InfiniteDuration()
+ : time_internal::MakeDuration(-time_internal::GetRepHi(d))
+ : time_internal::IsInfiniteDuration(d)
+ ? time_internal::OppositeInfinity(d)
+ : time_internal::MakeDuration(
+ time_internal::NegateAndSubtractOne(
+ time_internal::GetRepHi(d)),
+ time_internal::kTicksPerSecond -
+ time_internal::GetRepLo(d));
+}
+
+constexpr Duration InfiniteDuration() {
+ return time_internal::MakeDuration((std::numeric_limits<int64_t>::max)(),
+ ~0U);
+}
+
+constexpr Duration FromChrono(const std::chrono::nanoseconds& d) {
+ return time_internal::FromChrono(d);
+}
+constexpr Duration FromChrono(const std::chrono::microseconds& d) {
+ return time_internal::FromChrono(d);
+}
+constexpr Duration FromChrono(const std::chrono::milliseconds& d) {
+ return time_internal::FromChrono(d);
+}
+constexpr Duration FromChrono(const std::chrono::seconds& d) {
+ return time_internal::FromChrono(d);
+}
+constexpr Duration FromChrono(const std::chrono::minutes& d) {
+ return time_internal::FromChrono(d);
+}
+constexpr Duration FromChrono(const std::chrono::hours& d) {
+ return time_internal::FromChrono(d);
+}
+
+constexpr Time FromUnixNanos(int64_t ns) {
+ return time_internal::FromUnixDuration(Nanoseconds(ns));
+}
+
+constexpr Time FromUnixMicros(int64_t us) {
+ return time_internal::FromUnixDuration(Microseconds(us));
+}
+
+constexpr Time FromUnixMillis(int64_t ms) {
+ return time_internal::FromUnixDuration(Milliseconds(ms));
+}
+
+constexpr Time FromUnixSeconds(int64_t s) {
+ return time_internal::FromUnixDuration(Seconds(s));
+}
+
+constexpr Time FromTimeT(time_t t) {
+ return time_internal::FromUnixDuration(Seconds(t));
+}
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_TIME_TIME_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/time/time_zone/ya.make b/contrib/restricted/abseil-cpp-tstring/y_absl/time/time_zone/ya.make
new file mode 100644
index 00000000000..d7e1dc0c26e
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/time/time_zone/ya.make
@@ -0,0 +1,38 @@
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
+WITHOUT_LICENSE_TEXTS()
+
+OWNER(
+ somov
+ g:cpp-contrib
+)
+
+LICENSE(Apache-2.0)
+
+ADDINCL(
+ GLOBAL contrib/restricted/abseil-cpp-tstring
+)
+
+IF (OS_DARWIN)
+ EXTRALIBS("-framework CoreFoundation")
+ENDIF()
+
+NO_COMPILER_WARNINGS()
+
+SRCDIR(contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src)
+
+SRCS(
+ time_zone_fixed.cc
+ time_zone_format.cc
+ time_zone_if.cc
+ time_zone_impl.cc
+ time_zone_info.cc
+ time_zone_libc.cc
+ time_zone_lookup.cc
+ time_zone_posix.cc
+ zone_info_source.cc
+)
+
+END()
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/time/ya.make b/contrib/restricted/abseil-cpp-tstring/y_absl/time/ya.make
new file mode 100644
index 00000000000..37cc779265d
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/time/ya.make
@@ -0,0 +1,44 @@
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
+OWNER(
+ somov
+ g:cpp-contrib
+)
+
+LICENSE(
+ Apache-2.0 AND
+ Public-Domain
+)
+
+LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
+
+PEERDIR(
+ contrib/restricted/abseil-cpp-tstring/y_absl/base
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_wait
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/throw_delegate
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/log_severity
+ contrib/restricted/abseil-cpp-tstring/y_absl/numeric
+ contrib/restricted/abseil-cpp-tstring/y_absl/strings
+ contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/absl_strings_internal
+ contrib/restricted/abseil-cpp-tstring/y_absl/time/civil_time
+ contrib/restricted/abseil-cpp-tstring/y_absl/time/time_zone
+)
+
+ADDINCL(
+ GLOBAL contrib/restricted/abseil-cpp-tstring
+)
+
+NO_COMPILER_WARNINGS()
+
+SRCS(
+ civil_time.cc
+ clock.cc
+ duration.cc
+ format.cc
+ time.cc
+)
+
+END()
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/types/.yandex_meta/licenses.list.txt b/contrib/restricted/abseil-cpp-tstring/y_absl/types/.yandex_meta/licenses.list.txt
new file mode 100644
index 00000000000..2ff2fa2fd39
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/types/.yandex_meta/licenses.list.txt
@@ -0,0 +1,20 @@
+====================Apache-2.0====================
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+
+====================COPYRIGHT====================
+// Copyright 2017 The Abseil Authors.
+
+
+====================COPYRIGHT====================
+// Copyright 2018 The Abseil Authors.
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/types/any.h b/contrib/restricted/abseil-cpp-tstring/y_absl/types/any.h
new file mode 100644
index 00000000000..f03d940ed90
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/types/any.h
@@ -0,0 +1,528 @@
+//
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// any.h
+// -----------------------------------------------------------------------------
+//
+// This header file define the `y_absl::any` type for holding a type-safe value
+// of any type. The 'y_absl::any` type is useful for providing a way to hold
+// something that is, as yet, unspecified. Such unspecified types
+// traditionally are passed between API boundaries until they are later cast to
+// their "destination" types. To cast to such a destination type, use
+// `y_absl::any_cast()`. Note that when casting an `y_absl::any`, you must cast it
+// to an explicit type; implicit conversions will throw.
+//
+// Example:
+//
+// auto a = y_absl::any(65);
+// y_absl::any_cast<int>(a); // 65
+// y_absl::any_cast<char>(a); // throws y_absl::bad_any_cast
+// y_absl::any_cast<TString>(a); // throws y_absl::bad_any_cast
+//
+// `y_absl::any` is a C++11 compatible version of the C++17 `std::any` abstraction
+// and is designed to be a drop-in replacement for code compliant with C++17.
+//
+// Traditionally, the behavior of casting to a temporary unspecified type has
+// been accomplished with the `void *` paradigm, where the pointer was to some
+// other unspecified type. `y_absl::any` provides an "owning" version of `void *`
+// that avoids issues of pointer management.
+//
+// Note: just as in the case of `void *`, use of `y_absl::any` (and its C++17
+// version `std::any`) is a code smell indicating that your API might not be
+// constructed correctly. We have seen that most uses of `any` are unwarranted,
+// and `y_absl::any`, like `std::any`, is difficult to use properly. Before using
+// this abstraction, make sure that you should not instead be rewriting your
+// code to be more specific.
+//
+// Abseil has also released an `y_absl::variant` type (a C++11 compatible version
+// of the C++17 `std::variant`), which is generally preferred for use over
+// `y_absl::any`.
+#ifndef ABSL_TYPES_ANY_H_
+#define ABSL_TYPES_ANY_H_
+
+#include "y_absl/base/config.h"
+#include "y_absl/utility/utility.h"
+
+#ifdef ABSL_USES_STD_ANY
+
+#include <any> // IWYU pragma: export
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+using std::any;
+using std::any_cast;
+using std::bad_any_cast;
+using std::make_any;
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#else // ABSL_USES_STD_ANY
+
+#include <algorithm>
+#include <cstddef>
+#include <initializer_list>
+#include <memory>
+#include <stdexcept>
+#include <type_traits>
+#include <typeinfo>
+#include <utility>
+
+#include "y_absl/base/internal/fast_type_id.h"
+#include "y_absl/base/macros.h"
+#include "y_absl/meta/type_traits.h"
+#include "y_absl/types/bad_any_cast.h"
+
+// NOTE: This macro is an implementation detail that is undefined at the bottom
+// of the file. It is not intended for expansion directly from user code.
+#ifdef ABSL_ANY_DETAIL_HAS_RTTI
+#error ABSL_ANY_DETAIL_HAS_RTTI cannot be directly set
+#elif !defined(__GNUC__) || defined(__GXX_RTTI)
+#define ABSL_ANY_DETAIL_HAS_RTTI 1
+#endif // !defined(__GNUC__) || defined(__GXX_RTTI)
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+class any;
+
+// swap()
+//
+// Swaps two `y_absl::any` values. Equivalent to `x.swap(y) where `x` and `y` are
+// `y_absl::any` types.
+void swap(any& x, any& y) noexcept;
+
+// make_any()
+//
+// Constructs an `y_absl::any` of type `T` with the given arguments.
+template <typename T, typename... Args>
+any make_any(Args&&... args);
+
+// Overload of `y_absl::make_any()` for constructing an `y_absl::any` type from an
+// initializer list.
+template <typename T, typename U, typename... Args>
+any make_any(std::initializer_list<U> il, Args&&... args);
+
+// any_cast()
+//
+// Statically casts the value of a `const y_absl::any` type to the given type.
+// This function will throw `y_absl::bad_any_cast` if the stored value type of the
+// `y_absl::any` does not match the cast.
+//
+// `any_cast()` can also be used to get a reference to the internal storage iff
+// a reference type is passed as its `ValueType`:
+//
+// Example:
+//
+// y_absl::any my_any = std::vector<int>();
+// y_absl::any_cast<std::vector<int>&>(my_any).push_back(42);
+template <typename ValueType>
+ValueType any_cast(const any& operand);
+
+// Overload of `any_cast()` to statically cast the value of a non-const
+// `y_absl::any` type to the given type. This function will throw
+// `y_absl::bad_any_cast` if the stored value type of the `y_absl::any` does not
+// match the cast.
+template <typename ValueType>
+ValueType any_cast(any& operand); // NOLINT(runtime/references)
+
+// Overload of `any_cast()` to statically cast the rvalue of an `y_absl::any`
+// type. This function will throw `y_absl::bad_any_cast` if the stored value type
+// of the `y_absl::any` does not match the cast.
+template <typename ValueType>
+ValueType any_cast(any&& operand);
+
+// Overload of `any_cast()` to statically cast the value of a const pointer
+// `y_absl::any` type to the given pointer type, or `nullptr` if the stored value
+// type of the `y_absl::any` does not match the cast.
+template <typename ValueType>
+const ValueType* any_cast(const any* operand) noexcept;
+
+// Overload of `any_cast()` to statically cast the value of a pointer
+// `y_absl::any` type to the given pointer type, or `nullptr` if the stored value
+// type of the `y_absl::any` does not match the cast.
+template <typename ValueType>
+ValueType* any_cast(any* operand) noexcept;
+
+// -----------------------------------------------------------------------------
+// y_absl::any
+// -----------------------------------------------------------------------------
+//
+// An `y_absl::any` object provides the facility to either store an instance of a
+// type, known as the "contained object", or no value. An `y_absl::any` is used to
+// store values of types that are unknown at compile time. The `y_absl::any`
+// object, when containing a value, must contain a value type; storing a
+// reference type is neither desired nor supported.
+//
+// An `y_absl::any` can only store a type that is copy-constructible; move-only
+// types are not allowed within an `any` object.
+//
+// Example:
+//
+// auto a = y_absl::any(65); // Literal, copyable
+// auto b = y_absl::any(std::vector<int>()); // Default-initialized, copyable
+// std::unique_ptr<Foo> my_foo;
+// auto c = y_absl::any(std::move(my_foo)); // Error, not copy-constructible
+//
+// Note that `y_absl::any` makes use of decayed types (`y_absl::decay_t` in this
+// context) to remove const-volatile qualifiers (known as "cv qualifiers"),
+// decay functions to function pointers, etc. We essentially "decay" a given
+// type into its essential type.
+//
+// `y_absl::any` makes use of decayed types when determining the basic type `T` of
+// the value to store in the any's contained object. In the documentation below,
+// we explicitly denote this by using the phrase "a decayed type of `T`".
+//
+// Example:
+//
+// const int a = 4;
+// y_absl::any foo(a); // Decay ensures we store an "int", not a "const int&".
+//
+// void my_function() {}
+// y_absl::any bar(my_function); // Decay ensures we store a function pointer.
+//
+// `y_absl::any` is a C++11 compatible version of the C++17 `std::any` abstraction
+// and is designed to be a drop-in replacement for code compliant with C++17.
+class any {
+ private:
+ template <typename T>
+ struct IsInPlaceType;
+
+ public:
+ // Constructors
+
+ // Constructs an empty `y_absl::any` object (`any::has_value()` will return
+ // `false`).
+ constexpr any() noexcept;
+
+ // Copy constructs an `y_absl::any` object with a "contained object" of the
+ // passed type of `other` (or an empty `y_absl::any` if `other.has_value()` is
+ // `false`.
+ any(const any& other)
+ : obj_(other.has_value() ? other.obj_->Clone()
+ : std::unique_ptr<ObjInterface>()) {}
+
+ // Move constructs an `y_absl::any` object with a "contained object" of the
+ // passed type of `other` (or an empty `y_absl::any` if `other.has_value()` is
+ // `false`).
+ any(any&& other) noexcept = default;
+
+ // Constructs an `y_absl::any` object with a "contained object" of the decayed
+ // type of `T`, which is initialized via `std::forward<T>(value)`.
+ //
+ // This constructor will not participate in overload resolution if the
+ // decayed type of `T` is not copy-constructible.
+ template <
+ typename T, typename VT = y_absl::decay_t<T>,
+ y_absl::enable_if_t<!y_absl::disjunction<
+ std::is_same<any, VT>, IsInPlaceType<VT>,
+ y_absl::negation<std::is_copy_constructible<VT> > >::value>* = nullptr>
+ any(T&& value) : obj_(new Obj<VT>(in_place, std::forward<T>(value))) {}
+
+ // Constructs an `y_absl::any` object with a "contained object" of the decayed
+ // type of `T`, which is initialized via `std::forward<T>(value)`.
+ template <typename T, typename... Args, typename VT = y_absl::decay_t<T>,
+ y_absl::enable_if_t<y_absl::conjunction<
+ std::is_copy_constructible<VT>,
+ std::is_constructible<VT, Args...>>::value>* = nullptr>
+ explicit any(in_place_type_t<T> /*tag*/, Args&&... args)
+ : obj_(new Obj<VT>(in_place, std::forward<Args>(args)...)) {}
+
+ // Constructs an `y_absl::any` object with a "contained object" of the passed
+ // type `VT` as a decayed type of `T`. `VT` is initialized as if
+ // direct-non-list-initializing an object of type `VT` with the arguments
+ // `initializer_list, std::forward<Args>(args)...`.
+ template <
+ typename T, typename U, typename... Args, typename VT = y_absl::decay_t<T>,
+ y_absl::enable_if_t<
+ y_absl::conjunction<std::is_copy_constructible<VT>,
+ std::is_constructible<VT, std::initializer_list<U>&,
+ Args...>>::value>* = nullptr>
+ explicit any(in_place_type_t<T> /*tag*/, std::initializer_list<U> ilist,
+ Args&&... args)
+ : obj_(new Obj<VT>(in_place, ilist, std::forward<Args>(args)...)) {}
+
+ // Assignment operators
+
+ // Copy assigns an `y_absl::any` object with a "contained object" of the
+ // passed type.
+ any& operator=(const any& rhs) {
+ any(rhs).swap(*this);
+ return *this;
+ }
+
+ // Move assigns an `y_absl::any` object with a "contained object" of the
+ // passed type. `rhs` is left in a valid but otherwise unspecified state.
+ any& operator=(any&& rhs) noexcept {
+ any(std::move(rhs)).swap(*this);
+ return *this;
+ }
+
+ // Assigns an `y_absl::any` object with a "contained object" of the passed type.
+ template <typename T, typename VT = y_absl::decay_t<T>,
+ y_absl::enable_if_t<y_absl::conjunction<
+ y_absl::negation<std::is_same<VT, any>>,
+ std::is_copy_constructible<VT>>::value>* = nullptr>
+ any& operator=(T&& rhs) {
+ any tmp(in_place_type_t<VT>(), std::forward<T>(rhs));
+ tmp.swap(*this);
+ return *this;
+ }
+
+ // Modifiers
+
+ // any::emplace()
+ //
+ // Emplaces a value within an `y_absl::any` object by calling `any::reset()`,
+ // initializing the contained value as if direct-non-list-initializing an
+ // object of type `VT` with the arguments `std::forward<Args>(args)...`, and
+ // returning a reference to the new contained value.
+ //
+ // Note: If an exception is thrown during the call to `VT`'s constructor,
+ // `*this` does not contain a value, and any previously contained value has
+ // been destroyed.
+ template <
+ typename T, typename... Args, typename VT = y_absl::decay_t<T>,
+ y_absl::enable_if_t<std::is_copy_constructible<VT>::value &&
+ std::is_constructible<VT, Args...>::value>* = nullptr>
+ VT& emplace(Args&&... args) {
+ reset(); // NOTE: reset() is required here even in the world of exceptions.
+ Obj<VT>* const object_ptr =
+ new Obj<VT>(in_place, std::forward<Args>(args)...);
+ obj_ = std::unique_ptr<ObjInterface>(object_ptr);
+ return object_ptr->value;
+ }
+
+ // Overload of `any::emplace()` to emplace a value within an `y_absl::any`
+ // object by calling `any::reset()`, initializing the contained value as if
+ // direct-non-list-initializing an object of type `VT` with the arguments
+ // `initializer_list, std::forward<Args>(args)...`, and returning a reference
+ // to the new contained value.
+ //
+ // Note: If an exception is thrown during the call to `VT`'s constructor,
+ // `*this` does not contain a value, and any previously contained value has
+ // been destroyed. The function shall not participate in overload resolution
+ // unless `is_copy_constructible_v<VT>` is `true` and
+ // `is_constructible_v<VT, initializer_list<U>&, Args...>` is `true`.
+ template <
+ typename T, typename U, typename... Args, typename VT = y_absl::decay_t<T>,
+ y_absl::enable_if_t<std::is_copy_constructible<VT>::value &&
+ std::is_constructible<VT, std::initializer_list<U>&,
+ Args...>::value>* = nullptr>
+ VT& emplace(std::initializer_list<U> ilist, Args&&... args) {
+ reset(); // NOTE: reset() is required here even in the world of exceptions.
+ Obj<VT>* const object_ptr =
+ new Obj<VT>(in_place, ilist, std::forward<Args>(args)...);
+ obj_ = std::unique_ptr<ObjInterface>(object_ptr);
+ return object_ptr->value;
+ }
+
+ // any::reset()
+ //
+ // Resets the state of the `y_absl::any` object, destroying the contained object
+ // if present.
+ void reset() noexcept { obj_ = nullptr; }
+
+ // any::swap()
+ //
+ // Swaps the passed value and the value of this `y_absl::any` object.
+ void swap(any& other) noexcept { obj_.swap(other.obj_); }
+
+ // Observers
+
+ // any::has_value()
+ //
+ // Returns `true` if the `any` object has a contained value, otherwise
+ // returns `false`.
+ bool has_value() const noexcept { return obj_ != nullptr; }
+
+#if ABSL_ANY_DETAIL_HAS_RTTI
+ // Returns: typeid(T) if *this has a contained object of type T, otherwise
+ // typeid(void).
+ const std::type_info& type() const noexcept {
+ if (has_value()) {
+ return obj_->Type();
+ }
+
+ return typeid(void);
+ }
+#endif // ABSL_ANY_DETAIL_HAS_RTTI
+
+ private:
+ // Tagged type-erased abstraction for holding a cloneable object.
+ class ObjInterface {
+ public:
+ virtual ~ObjInterface() = default;
+ virtual std::unique_ptr<ObjInterface> Clone() const = 0;
+ virtual const void* ObjTypeId() const noexcept = 0;
+#if ABSL_ANY_DETAIL_HAS_RTTI
+ virtual const std::type_info& Type() const noexcept = 0;
+#endif // ABSL_ANY_DETAIL_HAS_RTTI
+ };
+
+ // Hold a value of some queryable type, with an ability to Clone it.
+ template <typename T>
+ class Obj : public ObjInterface {
+ public:
+ template <typename... Args>
+ explicit Obj(in_place_t /*tag*/, Args&&... args)
+ : value(std::forward<Args>(args)...) {}
+
+ std::unique_ptr<ObjInterface> Clone() const final {
+ return std::unique_ptr<ObjInterface>(new Obj(in_place, value));
+ }
+
+ const void* ObjTypeId() const noexcept final { return IdForType<T>(); }
+
+#if ABSL_ANY_DETAIL_HAS_RTTI
+ const std::type_info& Type() const noexcept final { return typeid(T); }
+#endif // ABSL_ANY_DETAIL_HAS_RTTI
+
+ T value;
+ };
+
+ std::unique_ptr<ObjInterface> CloneObj() const {
+ if (!obj_) return nullptr;
+ return obj_->Clone();
+ }
+
+ template <typename T>
+ constexpr static const void* IdForType() {
+ // Note: This type dance is to make the behavior consistent with typeid.
+ using NormalizedType =
+ typename std::remove_cv<typename std::remove_reference<T>::type>::type;
+
+ return base_internal::FastTypeId<NormalizedType>();
+ }
+
+ const void* GetObjTypeId() const {
+ return obj_ ? obj_->ObjTypeId() : base_internal::FastTypeId<void>();
+ }
+
+ // `y_absl::any` nonmember functions //
+
+ // Description at the declaration site (top of file).
+ template <typename ValueType>
+ friend ValueType any_cast(const any& operand);
+
+ // Description at the declaration site (top of file).
+ template <typename ValueType>
+ friend ValueType any_cast(any& operand); // NOLINT(runtime/references)
+
+ // Description at the declaration site (top of file).
+ template <typename T>
+ friend const T* any_cast(const any* operand) noexcept;
+
+ // Description at the declaration site (top of file).
+ template <typename T>
+ friend T* any_cast(any* operand) noexcept;
+
+ std::unique_ptr<ObjInterface> obj_;
+};
+
+// -----------------------------------------------------------------------------
+// Implementation Details
+// -----------------------------------------------------------------------------
+
+constexpr any::any() noexcept = default;
+
+template <typename T>
+struct any::IsInPlaceType : std::false_type {};
+
+template <typename T>
+struct any::IsInPlaceType<in_place_type_t<T>> : std::true_type {};
+
+inline void swap(any& x, any& y) noexcept { x.swap(y); }
+
+// Description at the declaration site (top of file).
+template <typename T, typename... Args>
+any make_any(Args&&... args) {
+ return any(in_place_type_t<T>(), std::forward<Args>(args)...);
+}
+
+// Description at the declaration site (top of file).
+template <typename T, typename U, typename... Args>
+any make_any(std::initializer_list<U> il, Args&&... args) {
+ return any(in_place_type_t<T>(), il, std::forward<Args>(args)...);
+}
+
+// Description at the declaration site (top of file).
+template <typename ValueType>
+ValueType any_cast(const any& operand) {
+ using U = typename std::remove_cv<
+ typename std::remove_reference<ValueType>::type>::type;
+ static_assert(std::is_constructible<ValueType, const U&>::value,
+ "Invalid ValueType");
+ auto* const result = (any_cast<U>)(&operand);
+ if (result == nullptr) {
+ any_internal::ThrowBadAnyCast();
+ }
+ return static_cast<ValueType>(*result);
+}
+
+// Description at the declaration site (top of file).
+template <typename ValueType>
+ValueType any_cast(any& operand) { // NOLINT(runtime/references)
+ using U = typename std::remove_cv<
+ typename std::remove_reference<ValueType>::type>::type;
+ static_assert(std::is_constructible<ValueType, U&>::value,
+ "Invalid ValueType");
+ auto* result = (any_cast<U>)(&operand);
+ if (result == nullptr) {
+ any_internal::ThrowBadAnyCast();
+ }
+ return static_cast<ValueType>(*result);
+}
+
+// Description at the declaration site (top of file).
+template <typename ValueType>
+ValueType any_cast(any&& operand) {
+ using U = typename std::remove_cv<
+ typename std::remove_reference<ValueType>::type>::type;
+ static_assert(std::is_constructible<ValueType, U>::value,
+ "Invalid ValueType");
+ return static_cast<ValueType>(std::move((any_cast<U&>)(operand)));
+}
+
+// Description at the declaration site (top of file).
+template <typename T>
+const T* any_cast(const any* operand) noexcept {
+ using U =
+ typename std::remove_cv<typename std::remove_reference<T>::type>::type;
+ return operand && operand->GetObjTypeId() == any::IdForType<U>()
+ ? std::addressof(
+ static_cast<const any::Obj<U>*>(operand->obj_.get())->value)
+ : nullptr;
+}
+
+// Description at the declaration site (top of file).
+template <typename T>
+T* any_cast(any* operand) noexcept {
+ using U =
+ typename std::remove_cv<typename std::remove_reference<T>::type>::type;
+ return operand && operand->GetObjTypeId() == any::IdForType<U>()
+ ? std::addressof(
+ static_cast<any::Obj<U>*>(operand->obj_.get())->value)
+ : nullptr;
+}
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#undef ABSL_ANY_DETAIL_HAS_RTTI
+
+#endif // ABSL_USES_STD_ANY
+
+#endif // ABSL_TYPES_ANY_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/types/bad_any_cast.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/types/bad_any_cast.cc
new file mode 100644
index 00000000000..29916186dad
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/types/bad_any_cast.cc
@@ -0,0 +1,46 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "y_absl/types/bad_any_cast.h"
+
+#ifndef ABSL_USES_STD_ANY
+
+#include <cstdlib>
+
+#include "y_absl/base/config.h"
+#include "y_absl/base/internal/raw_logging.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+bad_any_cast::~bad_any_cast() = default;
+
+const char* bad_any_cast::what() const noexcept { return "Bad any cast"; }
+
+namespace any_internal {
+
+void ThrowBadAnyCast() {
+#ifdef ABSL_HAVE_EXCEPTIONS
+ throw bad_any_cast();
+#else
+ ABSL_RAW_LOG(FATAL, "Bad any cast");
+ std::abort();
+#endif
+}
+
+} // namespace any_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_USES_STD_ANY
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/types/bad_any_cast.h b/contrib/restricted/abseil-cpp-tstring/y_absl/types/bad_any_cast.h
new file mode 100644
index 00000000000..92c3bfe5225
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/types/bad_any_cast.h
@@ -0,0 +1,75 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// bad_any_cast.h
+// -----------------------------------------------------------------------------
+//
+// This header file defines the `y_absl::bad_any_cast` type.
+
+#ifndef ABSL_TYPES_BAD_ANY_CAST_H_
+#define ABSL_TYPES_BAD_ANY_CAST_H_
+
+#include <typeinfo>
+
+#include "y_absl/base/config.h"
+
+#ifdef ABSL_USES_STD_ANY
+
+#include <any>
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+using std::bad_any_cast;
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#else // ABSL_USES_STD_ANY
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+// -----------------------------------------------------------------------------
+// bad_any_cast
+// -----------------------------------------------------------------------------
+//
+// An `y_absl::bad_any_cast` type is an exception type that is thrown when
+// failing to successfully cast the return value of an `y_absl::any` object.
+//
+// Example:
+//
+// auto a = y_absl::any(65);
+// y_absl::any_cast<int>(a); // 65
+// try {
+// y_absl::any_cast<char>(a);
+// } catch(const y_absl::bad_any_cast& e) {
+// std::cout << "Bad any cast: " << e.what() << '\n';
+// }
+class bad_any_cast : public std::bad_cast {
+ public:
+ ~bad_any_cast() override;
+ const char* what() const noexcept override;
+};
+
+namespace any_internal {
+
+[[noreturn]] void ThrowBadAnyCast();
+
+} // namespace any_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_USES_STD_ANY
+
+#endif // ABSL_TYPES_BAD_ANY_CAST_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/types/bad_any_cast/ya.make b/contrib/restricted/abseil-cpp-tstring/y_absl/types/bad_any_cast/ya.make
new file mode 100644
index 00000000000..a0081279374
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/types/bad_any_cast/ya.make
@@ -0,0 +1,31 @@
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
+WITHOUT_LICENSE_TEXTS()
+
+OWNER(
+ somov
+ g:cpp-contrib
+)
+
+LICENSE(Apache-2.0)
+
+PEERDIR(
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/log_severity
+)
+
+ADDINCL(
+ GLOBAL contrib/restricted/abseil-cpp-tstring
+)
+
+NO_COMPILER_WARNINGS()
+
+SRCDIR(contrib/restricted/abseil-cpp-tstring/y_absl/types)
+
+SRCS(
+ bad_any_cast.cc
+)
+
+END()
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/types/bad_optional_access.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/types/bad_optional_access.cc
new file mode 100644
index 00000000000..e583756cd53
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/types/bad_optional_access.cc
@@ -0,0 +1,48 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "y_absl/types/bad_optional_access.h"
+
+#ifndef ABSL_USES_STD_OPTIONAL
+
+#include <cstdlib>
+
+#include "y_absl/base/config.h"
+#include "y_absl/base/internal/raw_logging.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+bad_optional_access::~bad_optional_access() = default;
+
+const char* bad_optional_access::what() const noexcept {
+ return "optional has no value";
+}
+
+namespace optional_internal {
+
+void throw_bad_optional_access() {
+#ifdef ABSL_HAVE_EXCEPTIONS
+ throw bad_optional_access();
+#else
+ ABSL_RAW_LOG(FATAL, "Bad optional access");
+ abort();
+#endif
+}
+
+} // namespace optional_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_USES_STD_OPTIONAL
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/types/bad_optional_access.h b/contrib/restricted/abseil-cpp-tstring/y_absl/types/bad_optional_access.h
new file mode 100644
index 00000000000..7420ade10fc
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/types/bad_optional_access.h
@@ -0,0 +1,78 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// bad_optional_access.h
+// -----------------------------------------------------------------------------
+//
+// This header file defines the `y_absl::bad_optional_access` type.
+
+#ifndef ABSL_TYPES_BAD_OPTIONAL_ACCESS_H_
+#define ABSL_TYPES_BAD_OPTIONAL_ACCESS_H_
+
+#include <stdexcept>
+
+#include "y_absl/base/config.h"
+
+#ifdef ABSL_USES_STD_OPTIONAL
+
+#include <optional>
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+using std::bad_optional_access;
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#else // ABSL_USES_STD_OPTIONAL
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+// -----------------------------------------------------------------------------
+// bad_optional_access
+// -----------------------------------------------------------------------------
+//
+// An `y_absl::bad_optional_access` type is an exception type that is thrown when
+// attempting to access an `y_absl::optional` object that does not contain a
+// value.
+//
+// Example:
+//
+// y_absl::optional<int> o;
+//
+// try {
+// int n = o.value();
+// } catch(const y_absl::bad_optional_access& e) {
+// std::cout << "Bad optional access: " << e.what() << '\n';
+// }
+class bad_optional_access : public std::exception {
+ public:
+ bad_optional_access() = default;
+ ~bad_optional_access() override;
+ const char* what() const noexcept override;
+};
+
+namespace optional_internal {
+
+// throw delegator
+[[noreturn]] ABSL_DLL void throw_bad_optional_access();
+
+} // namespace optional_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_USES_STD_OPTIONAL
+
+#endif // ABSL_TYPES_BAD_OPTIONAL_ACCESS_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/types/bad_optional_access/ya.make b/contrib/restricted/abseil-cpp-tstring/y_absl/types/bad_optional_access/ya.make
new file mode 100644
index 00000000000..fca9a916acf
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/types/bad_optional_access/ya.make
@@ -0,0 +1,31 @@
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
+WITHOUT_LICENSE_TEXTS()
+
+OWNER(
+ somov
+ g:cpp-contrib
+)
+
+LICENSE(Apache-2.0)
+
+PEERDIR(
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/log_severity
+)
+
+ADDINCL(
+ GLOBAL contrib/restricted/abseil-cpp-tstring
+)
+
+NO_COMPILER_WARNINGS()
+
+SRCDIR(contrib/restricted/abseil-cpp-tstring/y_absl/types)
+
+SRCS(
+ bad_optional_access.cc
+)
+
+END()
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/types/bad_variant_access.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/types/bad_variant_access.cc
new file mode 100644
index 00000000000..77df7736a90
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/types/bad_variant_access.cc
@@ -0,0 +1,64 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "y_absl/types/bad_variant_access.h"
+
+#ifndef ABSL_USES_STD_VARIANT
+
+#include <cstdlib>
+#include <stdexcept>
+
+#include "y_absl/base/config.h"
+#include "y_absl/base/internal/raw_logging.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+//////////////////////////
+// [variant.bad.access] //
+//////////////////////////
+
+bad_variant_access::~bad_variant_access() = default;
+
+const char* bad_variant_access::what() const noexcept {
+ return "Bad variant access";
+}
+
+namespace variant_internal {
+
+void ThrowBadVariantAccess() {
+#ifdef ABSL_HAVE_EXCEPTIONS
+ throw bad_variant_access();
+#else
+ ABSL_RAW_LOG(FATAL, "Bad variant access");
+ abort(); // TODO(calabrese) Remove once RAW_LOG FATAL is noreturn.
+#endif
+}
+
+void Rethrow() {
+#ifdef ABSL_HAVE_EXCEPTIONS
+ throw;
+#else
+ ABSL_RAW_LOG(FATAL,
+ "Internal error in y_absl::variant implementation. Attempted to "
+ "rethrow an exception when building with exceptions disabled.");
+ abort(); // TODO(calabrese) Remove once RAW_LOG FATAL is noreturn.
+#endif
+}
+
+} // namespace variant_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_USES_STD_VARIANT
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/types/bad_variant_access.h b/contrib/restricted/abseil-cpp-tstring/y_absl/types/bad_variant_access.h
new file mode 100644
index 00000000000..6e41782e78a
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/types/bad_variant_access.h
@@ -0,0 +1,82 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// bad_variant_access.h
+// -----------------------------------------------------------------------------
+//
+// This header file defines the `y_absl::bad_variant_access` type.
+
+#ifndef ABSL_TYPES_BAD_VARIANT_ACCESS_H_
+#define ABSL_TYPES_BAD_VARIANT_ACCESS_H_
+
+#include <stdexcept>
+
+#include "y_absl/base/config.h"
+
+#ifdef ABSL_USES_STD_VARIANT
+
+#include <variant>
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+using std::bad_variant_access;
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#else // ABSL_USES_STD_VARIANT
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+// -----------------------------------------------------------------------------
+// bad_variant_access
+// -----------------------------------------------------------------------------
+//
+// An `y_absl::bad_variant_access` type is an exception type that is thrown in
+// the following cases:
+//
+// * Calling `y_absl::get(y_absl::variant) with an index or type that does not
+// match the currently selected alternative type
+// * Calling `y_absl::visit on an `y_absl::variant` that is in the
+// `variant::valueless_by_exception` state.
+//
+// Example:
+//
+// y_absl::variant<int, TString> v;
+// v = 1;
+// try {
+// y_absl::get<TString>(v);
+// } catch(const y_absl::bad_variant_access& e) {
+// std::cout << "Bad variant access: " << e.what() << '\n';
+// }
+class bad_variant_access : public std::exception {
+ public:
+ bad_variant_access() noexcept = default;
+ ~bad_variant_access() override;
+ const char* what() const noexcept override;
+};
+
+namespace variant_internal {
+
+[[noreturn]] ABSL_DLL void ThrowBadVariantAccess();
+[[noreturn]] ABSL_DLL void Rethrow();
+
+} // namespace variant_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_USES_STD_VARIANT
+
+#endif // ABSL_TYPES_BAD_VARIANT_ACCESS_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/types/bad_variant_access/ya.make b/contrib/restricted/abseil-cpp-tstring/y_absl/types/bad_variant_access/ya.make
new file mode 100644
index 00000000000..a80f8ed987f
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/types/bad_variant_access/ya.make
@@ -0,0 +1,31 @@
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
+WITHOUT_LICENSE_TEXTS()
+
+OWNER(
+ somov
+ g:cpp-contrib
+)
+
+LICENSE(Apache-2.0)
+
+PEERDIR(
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/log_severity
+)
+
+ADDINCL(
+ GLOBAL contrib/restricted/abseil-cpp-tstring
+)
+
+NO_COMPILER_WARNINGS()
+
+SRCDIR(contrib/restricted/abseil-cpp-tstring/y_absl/types)
+
+SRCS(
+ bad_variant_access.cc
+)
+
+END()
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/types/compare.h b/contrib/restricted/abseil-cpp-tstring/y_absl/types/compare.h
new file mode 100644
index 00000000000..4c8cf2f59f9
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/types/compare.h
@@ -0,0 +1,600 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// compare.h
+// -----------------------------------------------------------------------------
+//
+// This header file defines the `y_absl::weak_equality`, `y_absl::strong_equality`,
+// `y_absl::partial_ordering`, `y_absl::weak_ordering`, and `y_absl::strong_ordering`
+// types for storing the results of three way comparisons.
+//
+// Example:
+// y_absl::weak_ordering compare(const TString& a, const TString& b);
+//
+// These are C++11 compatible versions of the C++20 corresponding types
+// (`std::weak_equality`, etc.) and are designed to be drop-in replacements
+// for code compliant with C++20.
+
+#ifndef ABSL_TYPES_COMPARE_H_
+#define ABSL_TYPES_COMPARE_H_
+
+#include <cstddef>
+#include <cstdint>
+#include <cstdlib>
+#include <type_traits>
+
+#include "y_absl/base/attributes.h"
+#include "y_absl/meta/type_traits.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace compare_internal {
+
+using value_type = int8_t;
+
+template <typename T>
+struct Fail {
+ static_assert(sizeof(T) < 0, "Only literal `0` is allowed.");
+};
+
+// We need the NullPtrT template to avoid triggering the modernize-use-nullptr
+// ClangTidy warning in user code.
+template <typename NullPtrT = std::nullptr_t>
+struct OnlyLiteralZero {
+ constexpr OnlyLiteralZero(NullPtrT) noexcept {} // NOLINT
+
+ // Fails compilation when `nullptr` or integral type arguments other than
+ // `int` are passed. This constructor doesn't accept `int` because literal `0`
+ // has type `int`. Literal `0` arguments will be implicitly converted to
+ // `std::nullptr_t` and accepted by the above constructor, while other `int`
+ // arguments will fail to be converted and cause compilation failure.
+ template <
+ typename T,
+ typename = typename std::enable_if<
+ std::is_same<T, std::nullptr_t>::value ||
+ (std::is_integral<T>::value && !std::is_same<T, int>::value)>::type,
+ typename = typename Fail<T>::type>
+ OnlyLiteralZero(T); // NOLINT
+};
+
+enum class eq : value_type {
+ equal = 0,
+ equivalent = equal,
+ nonequal = 1,
+ nonequivalent = nonequal,
+};
+
+enum class ord : value_type { less = -1, greater = 1 };
+
+enum class ncmp : value_type { unordered = -127 };
+
+// Define macros to allow for creation or emulation of C++17 inline variables
+// based on whether the feature is supported. Note: we can't use
+// ABSL_INTERNAL_INLINE_CONSTEXPR here because the variables here are of
+// incomplete types so they need to be defined after the types are complete.
+#ifdef __cpp_inline_variables
+
+// A no-op expansion that can be followed by a semicolon at class level.
+#define ABSL_COMPARE_INLINE_BASECLASS_DECL(name) static_assert(true, "")
+
+#define ABSL_COMPARE_INLINE_SUBCLASS_DECL(type, name) \
+ static const type name
+
+#define ABSL_COMPARE_INLINE_INIT(type, name, init) \
+ inline constexpr type type::name(init)
+
+#else // __cpp_inline_variables
+
+#define ABSL_COMPARE_INLINE_BASECLASS_DECL(name) \
+ ABSL_CONST_INIT static const T name
+
+// A no-op expansion that can be followed by a semicolon at class level.
+#define ABSL_COMPARE_INLINE_SUBCLASS_DECL(type, name) static_assert(true, "")
+
+#define ABSL_COMPARE_INLINE_INIT(type, name, init) \
+ template <typename T> \
+ const T compare_internal::type##_base<T>::name(init)
+
+#endif // __cpp_inline_variables
+
+// These template base classes allow for defining the values of the constants
+// in the header file (for performance) without using inline variables (which
+// aren't available in C++11).
+template <typename T>
+struct weak_equality_base {
+ ABSL_COMPARE_INLINE_BASECLASS_DECL(equivalent);
+ ABSL_COMPARE_INLINE_BASECLASS_DECL(nonequivalent);
+};
+
+template <typename T>
+struct strong_equality_base {
+ ABSL_COMPARE_INLINE_BASECLASS_DECL(equal);
+ ABSL_COMPARE_INLINE_BASECLASS_DECL(nonequal);
+ ABSL_COMPARE_INLINE_BASECLASS_DECL(equivalent);
+ ABSL_COMPARE_INLINE_BASECLASS_DECL(nonequivalent);
+};
+
+template <typename T>
+struct partial_ordering_base {
+ ABSL_COMPARE_INLINE_BASECLASS_DECL(less);
+ ABSL_COMPARE_INLINE_BASECLASS_DECL(equivalent);
+ ABSL_COMPARE_INLINE_BASECLASS_DECL(greater);
+ ABSL_COMPARE_INLINE_BASECLASS_DECL(unordered);
+};
+
+template <typename T>
+struct weak_ordering_base {
+ ABSL_COMPARE_INLINE_BASECLASS_DECL(less);
+ ABSL_COMPARE_INLINE_BASECLASS_DECL(equivalent);
+ ABSL_COMPARE_INLINE_BASECLASS_DECL(greater);
+};
+
+template <typename T>
+struct strong_ordering_base {
+ ABSL_COMPARE_INLINE_BASECLASS_DECL(less);
+ ABSL_COMPARE_INLINE_BASECLASS_DECL(equal);
+ ABSL_COMPARE_INLINE_BASECLASS_DECL(equivalent);
+ ABSL_COMPARE_INLINE_BASECLASS_DECL(greater);
+};
+
+} // namespace compare_internal
+
+class weak_equality
+ : public compare_internal::weak_equality_base<weak_equality> {
+ explicit constexpr weak_equality(compare_internal::eq v) noexcept
+ : value_(static_cast<compare_internal::value_type>(v)) {}
+ friend struct compare_internal::weak_equality_base<weak_equality>;
+
+ public:
+ ABSL_COMPARE_INLINE_SUBCLASS_DECL(weak_equality, equivalent);
+ ABSL_COMPARE_INLINE_SUBCLASS_DECL(weak_equality, nonequivalent);
+
+ // Comparisons
+ friend constexpr bool operator==(
+ weak_equality v, compare_internal::OnlyLiteralZero<>) noexcept {
+ return v.value_ == 0;
+ }
+ friend constexpr bool operator!=(
+ weak_equality v, compare_internal::OnlyLiteralZero<>) noexcept {
+ return v.value_ != 0;
+ }
+ friend constexpr bool operator==(compare_internal::OnlyLiteralZero<>,
+ weak_equality v) noexcept {
+ return 0 == v.value_;
+ }
+ friend constexpr bool operator!=(compare_internal::OnlyLiteralZero<>,
+ weak_equality v) noexcept {
+ return 0 != v.value_;
+ }
+ friend constexpr bool operator==(weak_equality v1,
+ weak_equality v2) noexcept {
+ return v1.value_ == v2.value_;
+ }
+ friend constexpr bool operator!=(weak_equality v1,
+ weak_equality v2) noexcept {
+ return v1.value_ != v2.value_;
+ }
+
+ private:
+ compare_internal::value_type value_;
+};
+ABSL_COMPARE_INLINE_INIT(weak_equality, equivalent,
+ compare_internal::eq::equivalent);
+ABSL_COMPARE_INLINE_INIT(weak_equality, nonequivalent,
+ compare_internal::eq::nonequivalent);
+
+class strong_equality
+ : public compare_internal::strong_equality_base<strong_equality> {
+ explicit constexpr strong_equality(compare_internal::eq v) noexcept
+ : value_(static_cast<compare_internal::value_type>(v)) {}
+ friend struct compare_internal::strong_equality_base<strong_equality>;
+
+ public:
+ ABSL_COMPARE_INLINE_SUBCLASS_DECL(strong_equality, equal);
+ ABSL_COMPARE_INLINE_SUBCLASS_DECL(strong_equality, nonequal);
+ ABSL_COMPARE_INLINE_SUBCLASS_DECL(strong_equality, equivalent);
+ ABSL_COMPARE_INLINE_SUBCLASS_DECL(strong_equality, nonequivalent);
+
+ // Conversion
+ constexpr operator weak_equality() const noexcept { // NOLINT
+ return value_ == 0 ? weak_equality::equivalent
+ : weak_equality::nonequivalent;
+ }
+ // Comparisons
+ friend constexpr bool operator==(
+ strong_equality v, compare_internal::OnlyLiteralZero<>) noexcept {
+ return v.value_ == 0;
+ }
+ friend constexpr bool operator!=(
+ strong_equality v, compare_internal::OnlyLiteralZero<>) noexcept {
+ return v.value_ != 0;
+ }
+ friend constexpr bool operator==(compare_internal::OnlyLiteralZero<>,
+ strong_equality v) noexcept {
+ return 0 == v.value_;
+ }
+ friend constexpr bool operator!=(compare_internal::OnlyLiteralZero<>,
+ strong_equality v) noexcept {
+ return 0 != v.value_;
+ }
+ friend constexpr bool operator==(strong_equality v1,
+ strong_equality v2) noexcept {
+ return v1.value_ == v2.value_;
+ }
+ friend constexpr bool operator!=(strong_equality v1,
+ strong_equality v2) noexcept {
+ return v1.value_ != v2.value_;
+ }
+
+ private:
+ compare_internal::value_type value_;
+};
+ABSL_COMPARE_INLINE_INIT(strong_equality, equal, compare_internal::eq::equal);
+ABSL_COMPARE_INLINE_INIT(strong_equality, nonequal,
+ compare_internal::eq::nonequal);
+ABSL_COMPARE_INLINE_INIT(strong_equality, equivalent,
+ compare_internal::eq::equivalent);
+ABSL_COMPARE_INLINE_INIT(strong_equality, nonequivalent,
+ compare_internal::eq::nonequivalent);
+
+class partial_ordering
+ : public compare_internal::partial_ordering_base<partial_ordering> {
+ explicit constexpr partial_ordering(compare_internal::eq v) noexcept
+ : value_(static_cast<compare_internal::value_type>(v)) {}
+ explicit constexpr partial_ordering(compare_internal::ord v) noexcept
+ : value_(static_cast<compare_internal::value_type>(v)) {}
+ explicit constexpr partial_ordering(compare_internal::ncmp v) noexcept
+ : value_(static_cast<compare_internal::value_type>(v)) {}
+ friend struct compare_internal::partial_ordering_base<partial_ordering>;
+
+ constexpr bool is_ordered() const noexcept {
+ return value_ !=
+ compare_internal::value_type(compare_internal::ncmp::unordered);
+ }
+
+ public:
+ ABSL_COMPARE_INLINE_SUBCLASS_DECL(partial_ordering, less);
+ ABSL_COMPARE_INLINE_SUBCLASS_DECL(partial_ordering, equivalent);
+ ABSL_COMPARE_INLINE_SUBCLASS_DECL(partial_ordering, greater);
+ ABSL_COMPARE_INLINE_SUBCLASS_DECL(partial_ordering, unordered);
+
+ // Conversion
+ constexpr operator weak_equality() const noexcept { // NOLINT
+ return value_ == 0 ? weak_equality::equivalent
+ : weak_equality::nonequivalent;
+ }
+ // Comparisons
+ friend constexpr bool operator==(
+ partial_ordering v, compare_internal::OnlyLiteralZero<>) noexcept {
+ return v.is_ordered() && v.value_ == 0;
+ }
+ friend constexpr bool operator!=(
+ partial_ordering v, compare_internal::OnlyLiteralZero<>) noexcept {
+ return !v.is_ordered() || v.value_ != 0;
+ }
+ friend constexpr bool operator<(
+ partial_ordering v, compare_internal::OnlyLiteralZero<>) noexcept {
+ return v.is_ordered() && v.value_ < 0;
+ }
+ friend constexpr bool operator<=(
+ partial_ordering v, compare_internal::OnlyLiteralZero<>) noexcept {
+ return v.is_ordered() && v.value_ <= 0;
+ }
+ friend constexpr bool operator>(
+ partial_ordering v, compare_internal::OnlyLiteralZero<>) noexcept {
+ return v.is_ordered() && v.value_ > 0;
+ }
+ friend constexpr bool operator>=(
+ partial_ordering v, compare_internal::OnlyLiteralZero<>) noexcept {
+ return v.is_ordered() && v.value_ >= 0;
+ }
+ friend constexpr bool operator==(compare_internal::OnlyLiteralZero<>,
+ partial_ordering v) noexcept {
+ return v.is_ordered() && 0 == v.value_;
+ }
+ friend constexpr bool operator!=(compare_internal::OnlyLiteralZero<>,
+ partial_ordering v) noexcept {
+ return !v.is_ordered() || 0 != v.value_;
+ }
+ friend constexpr bool operator<(compare_internal::OnlyLiteralZero<>,
+ partial_ordering v) noexcept {
+ return v.is_ordered() && 0 < v.value_;
+ }
+ friend constexpr bool operator<=(compare_internal::OnlyLiteralZero<>,
+ partial_ordering v) noexcept {
+ return v.is_ordered() && 0 <= v.value_;
+ }
+ friend constexpr bool operator>(compare_internal::OnlyLiteralZero<>,
+ partial_ordering v) noexcept {
+ return v.is_ordered() && 0 > v.value_;
+ }
+ friend constexpr bool operator>=(compare_internal::OnlyLiteralZero<>,
+ partial_ordering v) noexcept {
+ return v.is_ordered() && 0 >= v.value_;
+ }
+ friend constexpr bool operator==(partial_ordering v1,
+ partial_ordering v2) noexcept {
+ return v1.value_ == v2.value_;
+ }
+ friend constexpr bool operator!=(partial_ordering v1,
+ partial_ordering v2) noexcept {
+ return v1.value_ != v2.value_;
+ }
+
+ private:
+ compare_internal::value_type value_;
+};
+ABSL_COMPARE_INLINE_INIT(partial_ordering, less, compare_internal::ord::less);
+ABSL_COMPARE_INLINE_INIT(partial_ordering, equivalent,
+ compare_internal::eq::equivalent);
+ABSL_COMPARE_INLINE_INIT(partial_ordering, greater,
+ compare_internal::ord::greater);
+ABSL_COMPARE_INLINE_INIT(partial_ordering, unordered,
+ compare_internal::ncmp::unordered);
+
+class weak_ordering
+ : public compare_internal::weak_ordering_base<weak_ordering> {
+ explicit constexpr weak_ordering(compare_internal::eq v) noexcept
+ : value_(static_cast<compare_internal::value_type>(v)) {}
+ explicit constexpr weak_ordering(compare_internal::ord v) noexcept
+ : value_(static_cast<compare_internal::value_type>(v)) {}
+ friend struct compare_internal::weak_ordering_base<weak_ordering>;
+
+ public:
+ ABSL_COMPARE_INLINE_SUBCLASS_DECL(weak_ordering, less);
+ ABSL_COMPARE_INLINE_SUBCLASS_DECL(weak_ordering, equivalent);
+ ABSL_COMPARE_INLINE_SUBCLASS_DECL(weak_ordering, greater);
+
+ // Conversions
+ constexpr operator weak_equality() const noexcept { // NOLINT
+ return value_ == 0 ? weak_equality::equivalent
+ : weak_equality::nonequivalent;
+ }
+ constexpr operator partial_ordering() const noexcept { // NOLINT
+ return value_ == 0 ? partial_ordering::equivalent
+ : (value_ < 0 ? partial_ordering::less
+ : partial_ordering::greater);
+ }
+ // Comparisons
+ friend constexpr bool operator==(
+ weak_ordering v, compare_internal::OnlyLiteralZero<>) noexcept {
+ return v.value_ == 0;
+ }
+ friend constexpr bool operator!=(
+ weak_ordering v, compare_internal::OnlyLiteralZero<>) noexcept {
+ return v.value_ != 0;
+ }
+ friend constexpr bool operator<(
+ weak_ordering v, compare_internal::OnlyLiteralZero<>) noexcept {
+ return v.value_ < 0;
+ }
+ friend constexpr bool operator<=(
+ weak_ordering v, compare_internal::OnlyLiteralZero<>) noexcept {
+ return v.value_ <= 0;
+ }
+ friend constexpr bool operator>(
+ weak_ordering v, compare_internal::OnlyLiteralZero<>) noexcept {
+ return v.value_ > 0;
+ }
+ friend constexpr bool operator>=(
+ weak_ordering v, compare_internal::OnlyLiteralZero<>) noexcept {
+ return v.value_ >= 0;
+ }
+ friend constexpr bool operator==(compare_internal::OnlyLiteralZero<>,
+ weak_ordering v) noexcept {
+ return 0 == v.value_;
+ }
+ friend constexpr bool operator!=(compare_internal::OnlyLiteralZero<>,
+ weak_ordering v) noexcept {
+ return 0 != v.value_;
+ }
+ friend constexpr bool operator<(compare_internal::OnlyLiteralZero<>,
+ weak_ordering v) noexcept {
+ return 0 < v.value_;
+ }
+ friend constexpr bool operator<=(compare_internal::OnlyLiteralZero<>,
+ weak_ordering v) noexcept {
+ return 0 <= v.value_;
+ }
+ friend constexpr bool operator>(compare_internal::OnlyLiteralZero<>,
+ weak_ordering v) noexcept {
+ return 0 > v.value_;
+ }
+ friend constexpr bool operator>=(compare_internal::OnlyLiteralZero<>,
+ weak_ordering v) noexcept {
+ return 0 >= v.value_;
+ }
+ friend constexpr bool operator==(weak_ordering v1,
+ weak_ordering v2) noexcept {
+ return v1.value_ == v2.value_;
+ }
+ friend constexpr bool operator!=(weak_ordering v1,
+ weak_ordering v2) noexcept {
+ return v1.value_ != v2.value_;
+ }
+
+ private:
+ compare_internal::value_type value_;
+};
+ABSL_COMPARE_INLINE_INIT(weak_ordering, less, compare_internal::ord::less);
+ABSL_COMPARE_INLINE_INIT(weak_ordering, equivalent,
+ compare_internal::eq::equivalent);
+ABSL_COMPARE_INLINE_INIT(weak_ordering, greater,
+ compare_internal::ord::greater);
+
+class strong_ordering
+ : public compare_internal::strong_ordering_base<strong_ordering> {
+ explicit constexpr strong_ordering(compare_internal::eq v) noexcept
+ : value_(static_cast<compare_internal::value_type>(v)) {}
+ explicit constexpr strong_ordering(compare_internal::ord v) noexcept
+ : value_(static_cast<compare_internal::value_type>(v)) {}
+ friend struct compare_internal::strong_ordering_base<strong_ordering>;
+
+ public:
+ ABSL_COMPARE_INLINE_SUBCLASS_DECL(strong_ordering, less);
+ ABSL_COMPARE_INLINE_SUBCLASS_DECL(strong_ordering, equal);
+ ABSL_COMPARE_INLINE_SUBCLASS_DECL(strong_ordering, equivalent);
+ ABSL_COMPARE_INLINE_SUBCLASS_DECL(strong_ordering, greater);
+
+ // Conversions
+ constexpr operator weak_equality() const noexcept { // NOLINT
+ return value_ == 0 ? weak_equality::equivalent
+ : weak_equality::nonequivalent;
+ }
+ constexpr operator strong_equality() const noexcept { // NOLINT
+ return value_ == 0 ? strong_equality::equal : strong_equality::nonequal;
+ }
+ constexpr operator partial_ordering() const noexcept { // NOLINT
+ return value_ == 0 ? partial_ordering::equivalent
+ : (value_ < 0 ? partial_ordering::less
+ : partial_ordering::greater);
+ }
+ constexpr operator weak_ordering() const noexcept { // NOLINT
+ return value_ == 0
+ ? weak_ordering::equivalent
+ : (value_ < 0 ? weak_ordering::less : weak_ordering::greater);
+ }
+ // Comparisons
+ friend constexpr bool operator==(
+ strong_ordering v, compare_internal::OnlyLiteralZero<>) noexcept {
+ return v.value_ == 0;
+ }
+ friend constexpr bool operator!=(
+ strong_ordering v, compare_internal::OnlyLiteralZero<>) noexcept {
+ return v.value_ != 0;
+ }
+ friend constexpr bool operator<(
+ strong_ordering v, compare_internal::OnlyLiteralZero<>) noexcept {
+ return v.value_ < 0;
+ }
+ friend constexpr bool operator<=(
+ strong_ordering v, compare_internal::OnlyLiteralZero<>) noexcept {
+ return v.value_ <= 0;
+ }
+ friend constexpr bool operator>(
+ strong_ordering v, compare_internal::OnlyLiteralZero<>) noexcept {
+ return v.value_ > 0;
+ }
+ friend constexpr bool operator>=(
+ strong_ordering v, compare_internal::OnlyLiteralZero<>) noexcept {
+ return v.value_ >= 0;
+ }
+ friend constexpr bool operator==(compare_internal::OnlyLiteralZero<>,
+ strong_ordering v) noexcept {
+ return 0 == v.value_;
+ }
+ friend constexpr bool operator!=(compare_internal::OnlyLiteralZero<>,
+ strong_ordering v) noexcept {
+ return 0 != v.value_;
+ }
+ friend constexpr bool operator<(compare_internal::OnlyLiteralZero<>,
+ strong_ordering v) noexcept {
+ return 0 < v.value_;
+ }
+ friend constexpr bool operator<=(compare_internal::OnlyLiteralZero<>,
+ strong_ordering v) noexcept {
+ return 0 <= v.value_;
+ }
+ friend constexpr bool operator>(compare_internal::OnlyLiteralZero<>,
+ strong_ordering v) noexcept {
+ return 0 > v.value_;
+ }
+ friend constexpr bool operator>=(compare_internal::OnlyLiteralZero<>,
+ strong_ordering v) noexcept {
+ return 0 >= v.value_;
+ }
+ friend constexpr bool operator==(strong_ordering v1,
+ strong_ordering v2) noexcept {
+ return v1.value_ == v2.value_;
+ }
+ friend constexpr bool operator!=(strong_ordering v1,
+ strong_ordering v2) noexcept {
+ return v1.value_ != v2.value_;
+ }
+
+ private:
+ compare_internal::value_type value_;
+};
+ABSL_COMPARE_INLINE_INIT(strong_ordering, less, compare_internal::ord::less);
+ABSL_COMPARE_INLINE_INIT(strong_ordering, equal, compare_internal::eq::equal);
+ABSL_COMPARE_INLINE_INIT(strong_ordering, equivalent,
+ compare_internal::eq::equivalent);
+ABSL_COMPARE_INLINE_INIT(strong_ordering, greater,
+ compare_internal::ord::greater);
+
+#undef ABSL_COMPARE_INLINE_BASECLASS_DECL
+#undef ABSL_COMPARE_INLINE_SUBCLASS_DECL
+#undef ABSL_COMPARE_INLINE_INIT
+
+namespace compare_internal {
+// We also provide these comparator adapter functions for internal y_absl use.
+
+// Helper functions to do a boolean comparison of two keys given a boolean
+// or three-way comparator.
+// SFINAE prevents implicit conversions to bool (such as from int).
+template <typename Bool,
+ y_absl::enable_if_t<std::is_same<bool, Bool>::value, int> = 0>
+constexpr bool compare_result_as_less_than(const Bool r) { return r; }
+constexpr bool compare_result_as_less_than(const y_absl::weak_ordering r) {
+ return r < 0;
+}
+
+template <typename Compare, typename K, typename LK>
+constexpr bool do_less_than_comparison(const Compare &compare, const K &x,
+ const LK &y) {
+ return compare_result_as_less_than(compare(x, y));
+}
+
+// Helper functions to do a three-way comparison of two keys given a boolean or
+// three-way comparator.
+// SFINAE prevents implicit conversions to int (such as from bool).
+template <typename Int,
+ y_absl::enable_if_t<std::is_same<int, Int>::value, int> = 0>
+constexpr y_absl::weak_ordering compare_result_as_ordering(const Int c) {
+ return c < 0 ? y_absl::weak_ordering::less
+ : c == 0 ? y_absl::weak_ordering::equivalent
+ : y_absl::weak_ordering::greater;
+}
+constexpr y_absl::weak_ordering compare_result_as_ordering(
+ const y_absl::weak_ordering c) {
+ return c;
+}
+
+template <
+ typename Compare, typename K, typename LK,
+ y_absl::enable_if_t<!std::is_same<bool, y_absl::result_of_t<Compare(
+ const K &, const LK &)>>::value,
+ int> = 0>
+constexpr y_absl::weak_ordering do_three_way_comparison(const Compare &compare,
+ const K &x, const LK &y) {
+ return compare_result_as_ordering(compare(x, y));
+}
+template <
+ typename Compare, typename K, typename LK,
+ y_absl::enable_if_t<std::is_same<bool, y_absl::result_of_t<Compare(
+ const K &, const LK &)>>::value,
+ int> = 0>
+constexpr y_absl::weak_ordering do_three_way_comparison(const Compare &compare,
+ const K &x, const LK &y) {
+ return compare(x, y) ? y_absl::weak_ordering::less
+ : compare(y, x) ? y_absl::weak_ordering::greater
+ : y_absl::weak_ordering::equivalent;
+}
+
+} // namespace compare_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_TYPES_COMPARE_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/.yandex_meta/licenses.list.txt b/contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/.yandex_meta/licenses.list.txt
new file mode 100644
index 00000000000..f39e6835968
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/.yandex_meta/licenses.list.txt
@@ -0,0 +1,24 @@
+====================Apache-2.0====================
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+
+====================COPYRIGHT====================
+// Copyright 2017 The Abseil Authors.
+
+
+====================COPYRIGHT====================
+// Copyright 2018 The Abseil Authors.
+
+
+====================COPYRIGHT====================
+// Copyright 2019 The Abseil Authors.
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/conformance_aliases.h b/contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/conformance_aliases.h
new file mode 100644
index 00000000000..ae9df53bf96
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/conformance_aliases.h
@@ -0,0 +1,447 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// regularity_aliases.h
+// -----------------------------------------------------------------------------
+//
+// This file contains type aliases of common ConformanceProfiles and Archetypes
+// so that they can be directly used by name without creating them from scratch.
+
+#ifndef ABSL_TYPES_INTERNAL_CONFORMANCE_ALIASES_H_
+#define ABSL_TYPES_INTERNAL_CONFORMANCE_ALIASES_H_
+
+#include "y_absl/types/internal/conformance_archetype.h"
+#include "y_absl/types/internal/conformance_profile.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace types_internal {
+
+// Creates both a Profile and a corresponding Archetype with root name "name".
+#define ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS(name, ...) \
+ struct name##Profile : __VA_ARGS__ {}; \
+ \
+ using name##Archetype = ::y_absl::types_internal::Archetype<name##Profile>; \
+ \
+ template <class AbslInternalProfileTag> \
+ using name##Archetype##_ = ::y_absl::types_internal::Archetype< \
+ ::y_absl::types_internal::StrongProfileTypedef<name##Profile, \
+ AbslInternalProfileTag>>
+
+ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS(
+ HasTrivialDefaultConstructor,
+ ConformanceProfile<default_constructible::trivial>);
+
+ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS(
+ HasNothrowDefaultConstructor,
+ ConformanceProfile<default_constructible::nothrow>);
+
+ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS(
+ HasDefaultConstructor, ConformanceProfile<default_constructible::yes>);
+
+ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS(
+ HasTrivialMoveConstructor, ConformanceProfile<default_constructible::maybe,
+ move_constructible::trivial>);
+
+ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS(
+ HasNothrowMoveConstructor, ConformanceProfile<default_constructible::maybe,
+ move_constructible::nothrow>);
+
+ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS(
+ HasMoveConstructor,
+ ConformanceProfile<default_constructible::maybe, move_constructible::yes>);
+
+ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS(
+ HasTrivialCopyConstructor,
+ ConformanceProfile<default_constructible::maybe, move_constructible::maybe,
+ copy_constructible::trivial>);
+
+ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS(
+ HasNothrowCopyConstructor,
+ ConformanceProfile<default_constructible::maybe, move_constructible::maybe,
+ copy_constructible::nothrow>);
+
+ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS(
+ HasCopyConstructor,
+ ConformanceProfile<default_constructible::maybe, move_constructible::maybe,
+ copy_constructible::yes>);
+
+ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS(
+ HasTrivialMoveAssign,
+ ConformanceProfile<default_constructible::maybe, move_constructible::maybe,
+ copy_constructible::maybe, move_assignable::trivial>);
+
+ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS(
+ HasNothrowMoveAssign,
+ ConformanceProfile<default_constructible::maybe, move_constructible::maybe,
+ copy_constructible::maybe, move_assignable::nothrow>);
+
+ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS(
+ HasMoveAssign,
+ ConformanceProfile<default_constructible::maybe, move_constructible::maybe,
+ copy_constructible::maybe, move_assignable::yes>);
+
+ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS(
+ HasTrivialCopyAssign,
+ ConformanceProfile<default_constructible::maybe, move_constructible::maybe,
+ copy_constructible::maybe, move_assignable::maybe,
+ copy_assignable::trivial>);
+
+ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS(
+ HasNothrowCopyAssign,
+ ConformanceProfile<default_constructible::maybe, move_constructible::maybe,
+ copy_constructible::maybe, move_assignable::maybe,
+ copy_assignable::nothrow>);
+
+ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS(
+ HasCopyAssign,
+ ConformanceProfile<default_constructible::maybe, move_constructible::maybe,
+ copy_constructible::maybe, move_assignable::maybe,
+ copy_assignable::yes>);
+
+ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS(
+ HasTrivialDestructor,
+ ConformanceProfile<default_constructible::maybe, move_constructible::maybe,
+ copy_constructible::maybe, move_assignable::maybe,
+ copy_assignable::maybe, destructible::trivial>);
+
+ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS(
+ HasNothrowDestructor,
+ ConformanceProfile<default_constructible::maybe, move_constructible::maybe,
+ copy_constructible::maybe, move_assignable::maybe,
+ copy_assignable::maybe, destructible::nothrow>);
+
+ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS(
+ HasDestructor,
+ ConformanceProfile<default_constructible::maybe, move_constructible::maybe,
+ copy_constructible::maybe, move_assignable::maybe,
+ copy_assignable::maybe, destructible::yes>);
+
+ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS(
+ HasNothrowEquality,
+ ConformanceProfile<default_constructible::maybe, move_constructible::maybe,
+ copy_constructible::maybe, move_assignable::maybe,
+ copy_assignable::maybe, destructible::maybe,
+ equality_comparable::nothrow>);
+
+ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS(
+ HasEquality,
+ ConformanceProfile<default_constructible::maybe, move_constructible::maybe,
+ copy_constructible::maybe, move_assignable::maybe,
+ copy_assignable::maybe, destructible::maybe,
+ equality_comparable::yes>);
+
+ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS(
+ HasNothrowInequality,
+ ConformanceProfile<default_constructible::maybe, move_constructible::maybe,
+ copy_constructible::maybe, move_assignable::maybe,
+ copy_assignable::maybe, destructible::maybe,
+ equality_comparable::maybe,
+ inequality_comparable::nothrow>);
+
+ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS(
+ HasInequality,
+ ConformanceProfile<default_constructible::maybe, move_constructible::maybe,
+ copy_constructible::maybe, move_assignable::maybe,
+ copy_assignable::maybe, destructible::maybe,
+ equality_comparable::maybe, inequality_comparable::yes>);
+
+ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS(
+ HasNothrowLessThan,
+ ConformanceProfile<default_constructible::maybe, move_constructible::maybe,
+ copy_constructible::maybe, move_assignable::maybe,
+ copy_assignable::maybe, destructible::maybe,
+ equality_comparable::maybe, inequality_comparable::maybe,
+ less_than_comparable::nothrow>);
+
+ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS(
+ HasLessThan,
+ ConformanceProfile<default_constructible::maybe, move_constructible::maybe,
+ copy_constructible::maybe, move_assignable::maybe,
+ copy_assignable::maybe, destructible::maybe,
+ equality_comparable::maybe, inequality_comparable::maybe,
+ less_than_comparable::yes>);
+
+ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS(
+ HasNothrowLessEqual,
+ ConformanceProfile<default_constructible::maybe, move_constructible::maybe,
+ copy_constructible::maybe, move_assignable::maybe,
+ copy_assignable::maybe, destructible::maybe,
+ equality_comparable::maybe, inequality_comparable::maybe,
+ less_than_comparable::maybe,
+ less_equal_comparable::nothrow>);
+
+ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS(
+ HasLessEqual,
+ ConformanceProfile<default_constructible::maybe, move_constructible::maybe,
+ copy_constructible::maybe, move_assignable::maybe,
+ copy_assignable::maybe, destructible::maybe,
+ equality_comparable::maybe, inequality_comparable::maybe,
+ less_than_comparable::maybe,
+ less_equal_comparable::yes>);
+
+ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS(
+ HasNothrowGreaterEqual,
+ ConformanceProfile<
+ default_constructible::maybe, move_constructible::maybe,
+ copy_constructible::maybe, move_assignable::maybe,
+ copy_assignable::maybe, destructible::maybe, equality_comparable::maybe,
+ inequality_comparable::maybe, less_than_comparable::maybe,
+ less_equal_comparable::maybe, greater_equal_comparable::nothrow>);
+
+ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS(
+ HasGreaterEqual,
+ ConformanceProfile<
+ default_constructible::maybe, move_constructible::maybe,
+ copy_constructible::maybe, move_assignable::maybe,
+ copy_assignable::maybe, destructible::maybe, equality_comparable::maybe,
+ inequality_comparable::maybe, less_than_comparable::maybe,
+ less_equal_comparable::maybe, greater_equal_comparable::yes>);
+
+ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS(
+ HasNothrowGreaterThan,
+ ConformanceProfile<
+ default_constructible::maybe, move_constructible::maybe,
+ copy_constructible::maybe, move_assignable::maybe,
+ copy_assignable::maybe, destructible::maybe, equality_comparable::maybe,
+ inequality_comparable::maybe, less_than_comparable::maybe,
+ less_equal_comparable::maybe, greater_equal_comparable::maybe,
+ greater_than_comparable::nothrow>);
+
+ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS(
+ HasGreaterThan,
+ ConformanceProfile<
+ default_constructible::maybe, move_constructible::maybe,
+ copy_constructible::maybe, move_assignable::maybe,
+ copy_assignable::maybe, destructible::maybe, equality_comparable::maybe,
+ inequality_comparable::maybe, less_than_comparable::maybe,
+ less_equal_comparable::maybe, greater_equal_comparable::maybe,
+ greater_than_comparable::yes>);
+
+ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS(
+ HasNothrowSwap,
+ ConformanceProfile<
+ default_constructible::maybe, move_constructible::maybe,
+ copy_constructible::maybe, move_assignable::maybe,
+ copy_assignable::maybe, destructible::maybe, equality_comparable::maybe,
+ inequality_comparable::maybe, less_than_comparable::maybe,
+ less_equal_comparable::maybe, greater_equal_comparable::maybe,
+ greater_than_comparable::maybe, swappable::nothrow>);
+
+ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS(
+ HasSwap,
+ ConformanceProfile<
+ default_constructible::maybe, move_constructible::maybe,
+ copy_constructible::maybe, move_assignable::maybe,
+ copy_assignable::maybe, destructible::maybe, equality_comparable::maybe,
+ inequality_comparable::maybe, less_than_comparable::maybe,
+ less_equal_comparable::maybe, greater_equal_comparable::maybe,
+ greater_than_comparable::maybe, swappable::yes>);
+
+ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS(
+ HasStdHashSpecialization,
+ ConformanceProfile<
+ default_constructible::maybe, move_constructible::maybe,
+ copy_constructible::maybe, move_assignable::maybe,
+ copy_assignable::maybe, destructible::maybe, equality_comparable::maybe,
+ inequality_comparable::maybe, less_than_comparable::maybe,
+ less_equal_comparable::maybe, greater_equal_comparable::maybe,
+ greater_than_comparable::maybe, swappable::maybe, hashable::yes>);
+
+////////////////////////////////////////////////////////////////////////////////
+//// The remaining aliases are combinations of the previous aliases. ////
+////////////////////////////////////////////////////////////////////////////////
+
+ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS(
+ Equatable, CombineProfiles<HasEqualityProfile, HasInequalityProfile>);
+
+ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS(
+ Comparable,
+ CombineProfiles<EquatableProfile, HasLessThanProfile, HasLessEqualProfile,
+ HasGreaterEqualProfile, HasGreaterThanProfile>);
+
+ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS(
+ NothrowEquatable,
+ CombineProfiles<HasNothrowEqualityProfile, HasNothrowInequalityProfile>);
+
+ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS(
+ NothrowComparable,
+ CombineProfiles<NothrowEquatableProfile, HasNothrowLessThanProfile,
+ HasNothrowLessEqualProfile, HasNothrowGreaterEqualProfile,
+ HasNothrowGreaterThanProfile>);
+
+ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS(
+ Value,
+ CombineProfiles<HasNothrowMoveConstructorProfile, HasCopyConstructorProfile,
+ HasNothrowMoveAssignProfile, HasCopyAssignProfile,
+ HasNothrowDestructorProfile, HasNothrowSwapProfile>);
+
+ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS(
+ EquatableValue, CombineProfiles<EquatableProfile, ValueProfile>);
+
+ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS(
+ ComparableValue, CombineProfiles<ComparableProfile, ValueProfile>);
+
+ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS(
+ DefaultConstructibleValue,
+ CombineProfiles<HasDefaultConstructorProfile, ValueProfile>);
+
+ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS(
+ NothrowMoveConstructible, CombineProfiles<HasNothrowMoveConstructorProfile,
+ HasNothrowDestructorProfile>);
+
+ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS(
+ EquatableNothrowMoveConstructible,
+ CombineProfiles<EquatableProfile, NothrowMoveConstructibleProfile>);
+
+ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS(
+ ComparableNothrowMoveConstructible,
+ CombineProfiles<ComparableProfile, NothrowMoveConstructibleProfile>);
+
+ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS(
+ DefaultConstructibleNothrowMoveConstructible,
+ CombineProfiles<HasDefaultConstructorProfile,
+ NothrowMoveConstructibleProfile>);
+
+ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS(
+ CopyConstructible,
+ CombineProfiles<HasNothrowMoveConstructorProfile, HasCopyConstructorProfile,
+ HasNothrowDestructorProfile>);
+
+ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS(
+ EquatableCopyConstructible,
+ CombineProfiles<EquatableProfile, CopyConstructibleProfile>);
+
+ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS(
+ ComparableCopyConstructible,
+ CombineProfiles<ComparableProfile, CopyConstructibleProfile>);
+
+ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS(
+ DefaultConstructibleCopyConstructible,
+ CombineProfiles<HasDefaultConstructorProfile, CopyConstructibleProfile>);
+
+ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS(
+ NothrowMovable,
+ CombineProfiles<HasNothrowMoveConstructorProfile,
+ HasNothrowMoveAssignProfile, HasNothrowDestructorProfile,
+ HasNothrowSwapProfile>);
+
+ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS(
+ EquatableNothrowMovable,
+ CombineProfiles<EquatableProfile, NothrowMovableProfile>);
+
+ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS(
+ ComparableNothrowMovable,
+ CombineProfiles<ComparableProfile, NothrowMovableProfile>);
+
+ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS(
+ DefaultConstructibleNothrowMovable,
+ CombineProfiles<HasDefaultConstructorProfile, NothrowMovableProfile>);
+
+ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS(
+ TrivialSpecialMemberFunctions,
+ CombineProfiles<HasTrivialDefaultConstructorProfile,
+ HasTrivialMoveConstructorProfile,
+ HasTrivialCopyConstructorProfile,
+ HasTrivialMoveAssignProfile, HasTrivialCopyAssignProfile,
+ HasTrivialDestructorProfile, HasNothrowSwapProfile>);
+
+ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS(
+ TriviallyComplete,
+ CombineProfiles<TrivialSpecialMemberFunctionsProfile, ComparableProfile,
+ HasStdHashSpecializationProfile>);
+
+ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS(
+ HashableNothrowMoveConstructible,
+ CombineProfiles<HasStdHashSpecializationProfile,
+ NothrowMoveConstructibleProfile>);
+
+ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS(
+ HashableCopyConstructible,
+ CombineProfiles<HasStdHashSpecializationProfile, CopyConstructibleProfile>);
+
+ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS(
+ HashableNothrowMovable,
+ CombineProfiles<HasStdHashSpecializationProfile, NothrowMovableProfile>);
+
+ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS(
+ HashableValue,
+ CombineProfiles<HasStdHashSpecializationProfile, ValueProfile>);
+
+ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS(
+ ComparableHashableValue,
+ CombineProfiles<HashableValueProfile, ComparableProfile>);
+
+// The "preferred" profiles that we support in Abseil.
+template <template <class...> class Receiver>
+using ExpandBasicProfiles =
+ Receiver<NothrowMoveConstructibleProfile, CopyConstructibleProfile,
+ NothrowMovableProfile, ValueProfile>;
+
+// The basic profiles except that they are also all Equatable.
+template <template <class...> class Receiver>
+using ExpandBasicEquatableProfiles =
+ Receiver<EquatableNothrowMoveConstructibleProfile,
+ EquatableCopyConstructibleProfile, EquatableNothrowMovableProfile,
+ EquatableValueProfile>;
+
+// The basic profiles except that they are also all Comparable.
+template <template <class...> class Receiver>
+using ExpandBasicComparableProfiles =
+ Receiver<ComparableNothrowMoveConstructibleProfile,
+ ComparableCopyConstructibleProfile,
+ ComparableNothrowMovableProfile, ComparableValueProfile>;
+
+// The basic profiles except that they are also all Hashable.
+template <template <class...> class Receiver>
+using ExpandBasicHashableProfiles =
+ Receiver<HashableNothrowMoveConstructibleProfile,
+ HashableCopyConstructibleProfile, HashableNothrowMovableProfile,
+ HashableValueProfile>;
+
+// The basic profiles except that they are also all DefaultConstructible.
+template <template <class...> class Receiver>
+using ExpandBasicDefaultConstructibleProfiles =
+ Receiver<DefaultConstructibleNothrowMoveConstructibleProfile,
+ DefaultConstructibleCopyConstructibleProfile,
+ DefaultConstructibleNothrowMovableProfile,
+ DefaultConstructibleValueProfile>;
+
+// The type profiles that we support in Abseil (all of the previous lists).
+template <template <class...> class Receiver>
+using ExpandSupportedProfiles = Receiver<
+ NothrowMoveConstructibleProfile, CopyConstructibleProfile,
+ NothrowMovableProfile, ValueProfile,
+ EquatableNothrowMoveConstructibleProfile, EquatableCopyConstructibleProfile,
+ EquatableNothrowMovableProfile, EquatableValueProfile,
+ ComparableNothrowMoveConstructibleProfile,
+ ComparableCopyConstructibleProfile, ComparableNothrowMovableProfile,
+ ComparableValueProfile, DefaultConstructibleNothrowMoveConstructibleProfile,
+ DefaultConstructibleCopyConstructibleProfile,
+ DefaultConstructibleNothrowMovableProfile, DefaultConstructibleValueProfile,
+ HashableNothrowMoveConstructibleProfile, HashableCopyConstructibleProfile,
+ HashableNothrowMovableProfile, HashableValueProfile>;
+
+// TODO(calabrese) Include types that have throwing move constructors, since in
+// practice we still need to support them because of standard library types with
+// (potentially) non-noexcept moves.
+
+} // namespace types_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#undef ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS
+
+#endif // ABSL_TYPES_INTERNAL_CONFORMANCE_ALIASES_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/conformance_archetype.h b/contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/conformance_archetype.h
new file mode 100644
index 00000000000..dc27d05df3d
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/conformance_archetype.h
@@ -0,0 +1,978 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// conformance_archetype.h
+// -----------------------------------------------------------------------------
+//
+// This file contains a facility for generating "archetypes" of out of
+// "Conformance Profiles" (see "conformance_profiles.h" for more information
+// about Conformance Profiles). An archetype is a type that aims to support the
+// bare minimum requirements of a given Conformance Profile. For instance, an
+// archetype that corresponds to an ImmutableProfile has exactly a nothrow
+// move-constructor, a potentially-throwing copy constructor, a nothrow
+// destructor, with all other special-member-functions deleted. These archetypes
+// are useful for testing to make sure that templates are able to work with the
+// kinds of types that they claim to support (i.e. that they do not accidentally
+// under-constrain),
+//
+// The main type template in this file is the Archetype template, which takes
+// a Conformance Profile as a template argument and its instantiations are a
+// minimum-conforming model of that profile.
+
+#ifndef ABSL_TYPES_INTERNAL_CONFORMANCE_ARCHETYPE_H_
+#define ABSL_TYPES_INTERNAL_CONFORMANCE_ARCHETYPE_H_
+
+#include <cstddef>
+#include <functional>
+#include <type_traits>
+#include <utility>
+
+#include "y_absl/meta/type_traits.h"
+#include "y_absl/types/internal/conformance_profile.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace types_internal {
+
+// A minimum-conforming implementation of a type with properties specified in
+// `Prof`, where `Prof` is a valid Conformance Profile.
+template <class Prof, class /*Enabler*/ = void>
+class Archetype;
+
+// Given an Archetype, obtain the properties of the profile associated with that
+// archetype.
+template <class Archetype>
+struct PropertiesOfArchetype;
+
+template <class Prof>
+struct PropertiesOfArchetype<Archetype<Prof>> {
+ using type = PropertiesOfT<Prof>;
+};
+
+template <class Archetype>
+using PropertiesOfArchetypeT = typename PropertiesOfArchetype<Archetype>::type;
+
+// A metafunction to determine if a type is an `Archetype`.
+template <class T>
+struct IsArchetype : std::false_type {};
+
+template <class Prof>
+struct IsArchetype<Archetype<Prof>> : std::true_type {};
+
+// A constructor tag type used when creating an Archetype with internal state.
+struct MakeArchetypeState {};
+
+// Data stored within an archetype that is copied/compared/hashed when the
+// corresponding operations are used.
+using ArchetypeState = std::size_t;
+
+////////////////////////////////////////////////////////////////////////////////
+// This section of the file defines a chain of base classes for Archetype, //
+// where each base defines a specific special member function with the //
+// appropriate properties (deleted, noexcept(false), noexcept, or trivial). //
+////////////////////////////////////////////////////////////////////////////////
+
+// The bottom-most base, which contains the state and the default constructor.
+template <default_constructible DefaultConstructibleValue>
+struct ArchetypeStateBase {
+ static_assert(DefaultConstructibleValue == default_constructible::yes ||
+ DefaultConstructibleValue == default_constructible::nothrow,
+ "");
+
+ ArchetypeStateBase() noexcept(
+ DefaultConstructibleValue ==
+ default_constructible::
+ nothrow) /*Vacuous archetype_state initialization*/ {}
+ explicit ArchetypeStateBase(MakeArchetypeState, ArchetypeState state) noexcept
+ : archetype_state(state) {}
+
+ ArchetypeState archetype_state;
+};
+
+template <>
+struct ArchetypeStateBase<default_constructible::maybe> {
+ explicit ArchetypeStateBase() = delete;
+ explicit ArchetypeStateBase(MakeArchetypeState, ArchetypeState state) noexcept
+ : archetype_state(state) {}
+
+ ArchetypeState archetype_state;
+};
+
+template <>
+struct ArchetypeStateBase<default_constructible::trivial> {
+ ArchetypeStateBase() = default;
+ explicit ArchetypeStateBase(MakeArchetypeState, ArchetypeState state) noexcept
+ : archetype_state(state) {}
+
+ ArchetypeState archetype_state;
+};
+
+// The move-constructor base
+template <default_constructible DefaultConstructibleValue,
+ move_constructible MoveConstructibleValue>
+struct ArchetypeMoveConstructor
+ : ArchetypeStateBase<DefaultConstructibleValue> {
+ static_assert(MoveConstructibleValue == move_constructible::yes ||
+ MoveConstructibleValue == move_constructible::nothrow,
+ "");
+
+ explicit ArchetypeMoveConstructor(MakeArchetypeState,
+ ArchetypeState state) noexcept
+ : ArchetypeStateBase<DefaultConstructibleValue>(MakeArchetypeState(),
+ state) {}
+
+ ArchetypeMoveConstructor() = default;
+ ArchetypeMoveConstructor(ArchetypeMoveConstructor&& other) noexcept(
+ MoveConstructibleValue == move_constructible::nothrow)
+ : ArchetypeStateBase<DefaultConstructibleValue>(MakeArchetypeState(),
+ other.archetype_state) {}
+ ArchetypeMoveConstructor(const ArchetypeMoveConstructor&) = default;
+ ArchetypeMoveConstructor& operator=(ArchetypeMoveConstructor&&) = default;
+ ArchetypeMoveConstructor& operator=(const ArchetypeMoveConstructor&) =
+ default;
+};
+
+template <default_constructible DefaultConstructibleValue>
+struct ArchetypeMoveConstructor<DefaultConstructibleValue,
+ move_constructible::trivial>
+ : ArchetypeStateBase<DefaultConstructibleValue> {
+ explicit ArchetypeMoveConstructor(MakeArchetypeState,
+ ArchetypeState state) noexcept
+ : ArchetypeStateBase<DefaultConstructibleValue>(MakeArchetypeState(),
+ state) {}
+
+ ArchetypeMoveConstructor() = default;
+};
+
+// The copy-constructor base
+template <default_constructible DefaultConstructibleValue,
+ move_constructible MoveConstructibleValue,
+ copy_constructible CopyConstructibleValue>
+struct ArchetypeCopyConstructor
+ : ArchetypeMoveConstructor<DefaultConstructibleValue,
+ MoveConstructibleValue> {
+ static_assert(CopyConstructibleValue == copy_constructible::yes ||
+ CopyConstructibleValue == copy_constructible::nothrow,
+ "");
+ explicit ArchetypeCopyConstructor(MakeArchetypeState,
+ ArchetypeState state) noexcept
+ : ArchetypeMoveConstructor<DefaultConstructibleValue,
+ MoveConstructibleValue>(MakeArchetypeState(),
+ state) {}
+
+ ArchetypeCopyConstructor() = default;
+ ArchetypeCopyConstructor(ArchetypeCopyConstructor&&) = default;
+ ArchetypeCopyConstructor(const ArchetypeCopyConstructor& other) noexcept(
+ CopyConstructibleValue == copy_constructible::nothrow)
+ : ArchetypeMoveConstructor<DefaultConstructibleValue,
+ MoveConstructibleValue>(
+ MakeArchetypeState(), other.archetype_state) {}
+ ArchetypeCopyConstructor& operator=(ArchetypeCopyConstructor&&) = default;
+ ArchetypeCopyConstructor& operator=(const ArchetypeCopyConstructor&) =
+ default;
+};
+
+template <default_constructible DefaultConstructibleValue,
+ move_constructible MoveConstructibleValue>
+struct ArchetypeCopyConstructor<DefaultConstructibleValue,
+ MoveConstructibleValue,
+ copy_constructible::maybe>
+ : ArchetypeMoveConstructor<DefaultConstructibleValue,
+ MoveConstructibleValue> {
+ explicit ArchetypeCopyConstructor(MakeArchetypeState,
+ ArchetypeState state) noexcept
+ : ArchetypeMoveConstructor<DefaultConstructibleValue,
+ MoveConstructibleValue>(MakeArchetypeState(),
+ state) {}
+
+ ArchetypeCopyConstructor() = default;
+ ArchetypeCopyConstructor(ArchetypeCopyConstructor&&) = default;
+ ArchetypeCopyConstructor(const ArchetypeCopyConstructor&) = delete;
+ ArchetypeCopyConstructor& operator=(ArchetypeCopyConstructor&&) = default;
+ ArchetypeCopyConstructor& operator=(const ArchetypeCopyConstructor&) =
+ default;
+};
+
+template <default_constructible DefaultConstructibleValue,
+ move_constructible MoveConstructibleValue>
+struct ArchetypeCopyConstructor<DefaultConstructibleValue,
+ MoveConstructibleValue,
+ copy_constructible::trivial>
+ : ArchetypeMoveConstructor<DefaultConstructibleValue,
+ MoveConstructibleValue> {
+ explicit ArchetypeCopyConstructor(MakeArchetypeState,
+ ArchetypeState state) noexcept
+ : ArchetypeMoveConstructor<DefaultConstructibleValue,
+ MoveConstructibleValue>(MakeArchetypeState(),
+ state) {}
+
+ ArchetypeCopyConstructor() = default;
+};
+
+// The move-assign base
+template <default_constructible DefaultConstructibleValue,
+ move_constructible MoveConstructibleValue,
+ copy_constructible CopyConstructibleValue,
+ move_assignable MoveAssignableValue>
+struct ArchetypeMoveAssign
+ : ArchetypeCopyConstructor<DefaultConstructibleValue,
+ MoveConstructibleValue, CopyConstructibleValue> {
+ static_assert(MoveAssignableValue == move_assignable::yes ||
+ MoveAssignableValue == move_assignable::nothrow,
+ "");
+ explicit ArchetypeMoveAssign(MakeArchetypeState,
+ ArchetypeState state) noexcept
+ : ArchetypeCopyConstructor<DefaultConstructibleValue,
+ MoveConstructibleValue,
+ CopyConstructibleValue>(MakeArchetypeState(),
+ state) {}
+
+ ArchetypeMoveAssign() = default;
+ ArchetypeMoveAssign(ArchetypeMoveAssign&&) = default;
+ ArchetypeMoveAssign(const ArchetypeMoveAssign&) = default;
+ ArchetypeMoveAssign& operator=(ArchetypeMoveAssign&& other) noexcept(
+ MoveAssignableValue == move_assignable::nothrow) {
+ this->archetype_state = other.archetype_state;
+ return *this;
+ }
+
+ ArchetypeMoveAssign& operator=(const ArchetypeMoveAssign&) = default;
+};
+
+template <default_constructible DefaultConstructibleValue,
+ move_constructible MoveConstructibleValue,
+ copy_constructible CopyConstructibleValue>
+struct ArchetypeMoveAssign<DefaultConstructibleValue, MoveConstructibleValue,
+ CopyConstructibleValue, move_assignable::trivial>
+ : ArchetypeCopyConstructor<DefaultConstructibleValue,
+ MoveConstructibleValue, CopyConstructibleValue> {
+ explicit ArchetypeMoveAssign(MakeArchetypeState,
+ ArchetypeState state) noexcept
+ : ArchetypeCopyConstructor<DefaultConstructibleValue,
+ MoveConstructibleValue,
+ CopyConstructibleValue>(MakeArchetypeState(),
+ state) {}
+
+ ArchetypeMoveAssign() = default;
+};
+
+// The copy-assign base
+template <default_constructible DefaultConstructibleValue,
+ move_constructible MoveConstructibleValue,
+ copy_constructible CopyConstructibleValue,
+ move_assignable MoveAssignableValue,
+ copy_assignable CopyAssignableValue>
+struct ArchetypeCopyAssign
+ : ArchetypeMoveAssign<DefaultConstructibleValue, MoveConstructibleValue,
+ CopyConstructibleValue, MoveAssignableValue> {
+ static_assert(CopyAssignableValue == copy_assignable::yes ||
+ CopyAssignableValue == copy_assignable::nothrow,
+ "");
+ explicit ArchetypeCopyAssign(MakeArchetypeState,
+ ArchetypeState state) noexcept
+ : ArchetypeMoveAssign<DefaultConstructibleValue, MoveConstructibleValue,
+ CopyConstructibleValue, MoveAssignableValue>(
+ MakeArchetypeState(), state) {}
+
+ ArchetypeCopyAssign() = default;
+ ArchetypeCopyAssign(ArchetypeCopyAssign&&) = default;
+ ArchetypeCopyAssign(const ArchetypeCopyAssign&) = default;
+ ArchetypeCopyAssign& operator=(ArchetypeCopyAssign&&) = default;
+
+ ArchetypeCopyAssign& operator=(const ArchetypeCopyAssign& other) noexcept(
+ CopyAssignableValue == copy_assignable::nothrow) {
+ this->archetype_state = other.archetype_state;
+ return *this;
+ }
+};
+
+template <default_constructible DefaultConstructibleValue,
+ move_constructible MoveConstructibleValue,
+ copy_constructible CopyConstructibleValue,
+ move_assignable MoveAssignableValue>
+struct ArchetypeCopyAssign<DefaultConstructibleValue, MoveConstructibleValue,
+ CopyConstructibleValue, MoveAssignableValue,
+ copy_assignable::maybe>
+ : ArchetypeMoveAssign<DefaultConstructibleValue, MoveConstructibleValue,
+ CopyConstructibleValue, MoveAssignableValue> {
+ explicit ArchetypeCopyAssign(MakeArchetypeState,
+ ArchetypeState state) noexcept
+ : ArchetypeMoveAssign<DefaultConstructibleValue, MoveConstructibleValue,
+ CopyConstructibleValue, MoveAssignableValue>(
+ MakeArchetypeState(), state) {}
+
+ ArchetypeCopyAssign() = default;
+ ArchetypeCopyAssign(ArchetypeCopyAssign&&) = default;
+ ArchetypeCopyAssign(const ArchetypeCopyAssign&) = default;
+ ArchetypeCopyAssign& operator=(ArchetypeCopyAssign&&) = default;
+ ArchetypeCopyAssign& operator=(const ArchetypeCopyAssign&) = delete;
+};
+
+template <default_constructible DefaultConstructibleValue,
+ move_constructible MoveConstructibleValue,
+ copy_constructible CopyConstructibleValue,
+ move_assignable MoveAssignableValue>
+struct ArchetypeCopyAssign<DefaultConstructibleValue, MoveConstructibleValue,
+ CopyConstructibleValue, MoveAssignableValue,
+ copy_assignable::trivial>
+ : ArchetypeMoveAssign<DefaultConstructibleValue, MoveConstructibleValue,
+ CopyConstructibleValue, MoveAssignableValue> {
+ explicit ArchetypeCopyAssign(MakeArchetypeState,
+ ArchetypeState state) noexcept
+ : ArchetypeMoveAssign<DefaultConstructibleValue, MoveConstructibleValue,
+ CopyConstructibleValue, MoveAssignableValue>(
+ MakeArchetypeState(), state) {}
+
+ ArchetypeCopyAssign() = default;
+};
+
+// The destructor base
+template <default_constructible DefaultConstructibleValue,
+ move_constructible MoveConstructibleValue,
+ copy_constructible CopyConstructibleValue,
+ move_assignable MoveAssignableValue,
+ copy_assignable CopyAssignableValue, destructible DestructibleValue>
+struct ArchetypeDestructor
+ : ArchetypeCopyAssign<DefaultConstructibleValue, MoveConstructibleValue,
+ CopyConstructibleValue, MoveAssignableValue,
+ CopyAssignableValue> {
+ static_assert(DestructibleValue == destructible::yes ||
+ DestructibleValue == destructible::nothrow,
+ "");
+
+ explicit ArchetypeDestructor(MakeArchetypeState,
+ ArchetypeState state) noexcept
+ : ArchetypeCopyAssign<DefaultConstructibleValue, MoveConstructibleValue,
+ CopyConstructibleValue, MoveAssignableValue,
+ CopyAssignableValue>(MakeArchetypeState(), state) {}
+
+ ArchetypeDestructor() = default;
+ ArchetypeDestructor(ArchetypeDestructor&&) = default;
+ ArchetypeDestructor(const ArchetypeDestructor&) = default;
+ ArchetypeDestructor& operator=(ArchetypeDestructor&&) = default;
+ ArchetypeDestructor& operator=(const ArchetypeDestructor&) = default;
+ ~ArchetypeDestructor() noexcept(DestructibleValue == destructible::nothrow) {}
+};
+
+template <default_constructible DefaultConstructibleValue,
+ move_constructible MoveConstructibleValue,
+ copy_constructible CopyConstructibleValue,
+ move_assignable MoveAssignableValue,
+ copy_assignable CopyAssignableValue>
+struct ArchetypeDestructor<DefaultConstructibleValue, MoveConstructibleValue,
+ CopyConstructibleValue, MoveAssignableValue,
+ CopyAssignableValue, destructible::trivial>
+ : ArchetypeCopyAssign<DefaultConstructibleValue, MoveConstructibleValue,
+ CopyConstructibleValue, MoveAssignableValue,
+ CopyAssignableValue> {
+ explicit ArchetypeDestructor(MakeArchetypeState,
+ ArchetypeState state) noexcept
+ : ArchetypeCopyAssign<DefaultConstructibleValue, MoveConstructibleValue,
+ CopyConstructibleValue, MoveAssignableValue,
+ CopyAssignableValue>(MakeArchetypeState(), state) {}
+
+ ArchetypeDestructor() = default;
+};
+
+// An alias to the top of the chain of bases for special-member functions.
+// NOTE: move_constructible::maybe, move_assignable::maybe, and
+// destructible::maybe are handled in the top-level type by way of SFINAE.
+// Because of this, we never instantiate the base classes with
+// move_constructible::maybe, move_assignable::maybe, or destructible::maybe so
+// that we minimize the number of different possible type-template
+// instantiations.
+template <default_constructible DefaultConstructibleValue,
+ move_constructible MoveConstructibleValue,
+ copy_constructible CopyConstructibleValue,
+ move_assignable MoveAssignableValue,
+ copy_assignable CopyAssignableValue, destructible DestructibleValue>
+using ArchetypeSpecialMembersBase = ArchetypeDestructor<
+ DefaultConstructibleValue,
+ MoveConstructibleValue != move_constructible::maybe
+ ? MoveConstructibleValue
+ : move_constructible::nothrow,
+ CopyConstructibleValue,
+ MoveAssignableValue != move_assignable::maybe ? MoveAssignableValue
+ : move_assignable::nothrow,
+ CopyAssignableValue,
+ DestructibleValue != destructible::maybe ? DestructibleValue
+ : destructible::nothrow>;
+
+// A function that is used to create an archetype with some associated state.
+template <class Arch>
+Arch MakeArchetype(ArchetypeState state) noexcept {
+ static_assert(IsArchetype<Arch>::value,
+ "The explicit template argument to MakeArchetype is required "
+ "to be an Archetype.");
+ return Arch(MakeArchetypeState(), state);
+}
+
+// This is used to conditionally delete "copy" and "move" constructors in a way
+// that is consistent with what the ConformanceProfile requires and that also
+// strictly enforces the arguments to the copy/move to not come from implicit
+// conversions when dealing with the Archetype.
+template <class Prof, class T>
+constexpr bool ShouldDeleteConstructor() {
+ return !((PropertiesOfT<Prof>::move_constructible_support !=
+ move_constructible::maybe &&
+ std::is_same<T, Archetype<Prof>>::value) ||
+ (PropertiesOfT<Prof>::copy_constructible_support !=
+ copy_constructible::maybe &&
+ (std::is_same<T, const Archetype<Prof>&>::value ||
+ std::is_same<T, Archetype<Prof>&>::value ||
+ std::is_same<T, const Archetype<Prof>>::value)));
+}
+
+// This is used to conditionally delete "copy" and "move" assigns in a way
+// that is consistent with what the ConformanceProfile requires and that also
+// strictly enforces the arguments to the copy/move to not come from implicit
+// conversions when dealing with the Archetype.
+template <class Prof, class T>
+constexpr bool ShouldDeleteAssign() {
+ return !(
+ (PropertiesOfT<Prof>::move_assignable_support != move_assignable::maybe &&
+ std::is_same<T, Archetype<Prof>>::value) ||
+ (PropertiesOfT<Prof>::copy_assignable_support != copy_assignable::maybe &&
+ (std::is_same<T, const Archetype<Prof>&>::value ||
+ std::is_same<T, Archetype<Prof>&>::value ||
+ std::is_same<T, const Archetype<Prof>>::value)));
+}
+
+// TODO(calabrese) Inherit from a chain of secondary bases to pull in the
+// associated functions of other concepts.
+template <class Prof, class Enabler>
+class Archetype : ArchetypeSpecialMembersBase<
+ PropertiesOfT<Prof>::default_constructible_support,
+ PropertiesOfT<Prof>::move_constructible_support,
+ PropertiesOfT<Prof>::copy_constructible_support,
+ PropertiesOfT<Prof>::move_assignable_support,
+ PropertiesOfT<Prof>::copy_assignable_support,
+ PropertiesOfT<Prof>::destructible_support> {
+ static_assert(std::is_same<Enabler, void>::value,
+ "An explicit type must not be passed as the second template "
+ "argument to 'Archetype`.");
+
+ // The cases mentioned in these static_asserts are expected to be handled in
+ // the partial template specializations of Archetype that follow this
+ // definition.
+ static_assert(PropertiesOfT<Prof>::destructible_support !=
+ destructible::maybe,
+ "");
+ static_assert(PropertiesOfT<Prof>::move_constructible_support !=
+ move_constructible::maybe ||
+ PropertiesOfT<Prof>::copy_constructible_support ==
+ copy_constructible::maybe,
+ "");
+ static_assert(PropertiesOfT<Prof>::move_assignable_support !=
+ move_assignable::maybe ||
+ PropertiesOfT<Prof>::copy_assignable_support ==
+ copy_assignable::maybe,
+ "");
+
+ public:
+ Archetype() = default;
+
+ // Disallow moves when requested, and disallow implicit conversions.
+ template <class T, typename std::enable_if<
+ ShouldDeleteConstructor<Prof, T>()>::type* = nullptr>
+ Archetype(T&&) = delete;
+
+ // Disallow moves when requested, and disallow implicit conversions.
+ template <class T, typename std::enable_if<
+ ShouldDeleteAssign<Prof, T>()>::type* = nullptr>
+ Archetype& operator=(T&&) = delete;
+
+ using ArchetypeSpecialMembersBase<
+ PropertiesOfT<Prof>::default_constructible_support,
+ PropertiesOfT<Prof>::move_constructible_support,
+ PropertiesOfT<Prof>::copy_constructible_support,
+ PropertiesOfT<Prof>::move_assignable_support,
+ PropertiesOfT<Prof>::copy_assignable_support,
+ PropertiesOfT<Prof>::destructible_support>::archetype_state;
+
+ private:
+ explicit Archetype(MakeArchetypeState, ArchetypeState state) noexcept
+ : ArchetypeSpecialMembersBase<
+ PropertiesOfT<Prof>::default_constructible_support,
+ PropertiesOfT<Prof>::move_constructible_support,
+ PropertiesOfT<Prof>::copy_constructible_support,
+ PropertiesOfT<Prof>::move_assignable_support,
+ PropertiesOfT<Prof>::copy_assignable_support,
+ PropertiesOfT<Prof>::destructible_support>(MakeArchetypeState(),
+ state) {}
+
+ friend Archetype MakeArchetype<Archetype>(ArchetypeState) noexcept;
+};
+
+template <class Prof>
+class Archetype<Prof, typename std::enable_if<
+ PropertiesOfT<Prof>::move_constructible_support !=
+ move_constructible::maybe &&
+ PropertiesOfT<Prof>::move_assignable_support ==
+ move_assignable::maybe &&
+ PropertiesOfT<Prof>::destructible_support !=
+ destructible::maybe>::type>
+ : ArchetypeSpecialMembersBase<
+ PropertiesOfT<Prof>::default_constructible_support,
+ PropertiesOfT<Prof>::move_constructible_support,
+ PropertiesOfT<Prof>::copy_constructible_support,
+ PropertiesOfT<Prof>::move_assignable_support,
+ PropertiesOfT<Prof>::copy_assignable_support,
+ PropertiesOfT<Prof>::destructible_support> {
+ public:
+ Archetype() = default;
+ Archetype(Archetype&&) = default;
+ Archetype(const Archetype&) = default;
+ Archetype& operator=(Archetype&&) = delete;
+ Archetype& operator=(const Archetype&) = default;
+
+ // Disallow moves when requested, and disallow implicit conversions.
+ template <class T, typename std::enable_if<
+ ShouldDeleteConstructor<Prof, T>()>::type* = nullptr>
+ Archetype(T&&) = delete;
+
+ // Disallow moves when requested, and disallow implicit conversions.
+ template <class T, typename std::enable_if<
+ ShouldDeleteAssign<Prof, T>()>::type* = nullptr>
+ Archetype& operator=(T&&) = delete;
+
+ using ArchetypeSpecialMembersBase<
+ PropertiesOfT<Prof>::default_constructible_support,
+ PropertiesOfT<Prof>::move_constructible_support,
+ PropertiesOfT<Prof>::copy_constructible_support,
+ PropertiesOfT<Prof>::move_assignable_support,
+ PropertiesOfT<Prof>::copy_assignable_support,
+ PropertiesOfT<Prof>::destructible_support>::archetype_state;
+
+ private:
+ explicit Archetype(MakeArchetypeState, ArchetypeState state) noexcept
+ : ArchetypeSpecialMembersBase<
+ PropertiesOfT<Prof>::default_constructible_support,
+ PropertiesOfT<Prof>::move_constructible_support,
+ PropertiesOfT<Prof>::copy_constructible_support,
+ PropertiesOfT<Prof>::move_assignable_support,
+ PropertiesOfT<Prof>::copy_assignable_support,
+ PropertiesOfT<Prof>::destructible_support>(MakeArchetypeState(),
+ state) {}
+
+ friend Archetype MakeArchetype<Archetype>(ArchetypeState) noexcept;
+};
+
+template <class Prof>
+class Archetype<Prof, typename std::enable_if<
+ PropertiesOfT<Prof>::move_constructible_support ==
+ move_constructible::maybe &&
+ PropertiesOfT<Prof>::move_assignable_support ==
+ move_assignable::maybe &&
+ PropertiesOfT<Prof>::destructible_support !=
+ destructible::maybe>::type>
+ : ArchetypeSpecialMembersBase<
+ PropertiesOfT<Prof>::default_constructible_support,
+ PropertiesOfT<Prof>::move_constructible_support,
+ PropertiesOfT<Prof>::copy_constructible_support,
+ PropertiesOfT<Prof>::move_assignable_support,
+ PropertiesOfT<Prof>::copy_assignable_support,
+ PropertiesOfT<Prof>::destructible_support> {
+ public:
+ Archetype() = default;
+ Archetype(Archetype&&) = delete;
+ Archetype(const Archetype&) = default;
+ Archetype& operator=(Archetype&&) = delete;
+ Archetype& operator=(const Archetype&) = default;
+
+ // Disallow moves when requested, and disallow implicit conversions.
+ template <class T, typename std::enable_if<
+ ShouldDeleteConstructor<Prof, T>()>::type* = nullptr>
+ Archetype(T&&) = delete;
+
+ // Disallow moves when requested, and disallow implicit conversions.
+ template <class T, typename std::enable_if<
+ ShouldDeleteAssign<Prof, T>()>::type* = nullptr>
+ Archetype& operator=(T&&) = delete;
+
+ using ArchetypeSpecialMembersBase<
+ PropertiesOfT<Prof>::default_constructible_support,
+ PropertiesOfT<Prof>::move_constructible_support,
+ PropertiesOfT<Prof>::copy_constructible_support,
+ PropertiesOfT<Prof>::move_assignable_support,
+ PropertiesOfT<Prof>::copy_assignable_support,
+ PropertiesOfT<Prof>::destructible_support>::archetype_state;
+
+ private:
+ explicit Archetype(MakeArchetypeState, ArchetypeState state) noexcept
+ : ArchetypeSpecialMembersBase<
+ PropertiesOfT<Prof>::default_constructible_support,
+ PropertiesOfT<Prof>::move_constructible_support,
+ PropertiesOfT<Prof>::copy_constructible_support,
+ PropertiesOfT<Prof>::move_assignable_support,
+ PropertiesOfT<Prof>::copy_assignable_support,
+ PropertiesOfT<Prof>::destructible_support>(MakeArchetypeState(),
+ state) {}
+
+ friend Archetype MakeArchetype<Archetype>(ArchetypeState) noexcept;
+};
+
+template <class Prof>
+class Archetype<Prof, typename std::enable_if<
+ PropertiesOfT<Prof>::move_constructible_support ==
+ move_constructible::maybe &&
+ PropertiesOfT<Prof>::move_assignable_support !=
+ move_assignable::maybe &&
+ PropertiesOfT<Prof>::destructible_support !=
+ destructible::maybe>::type>
+ : ArchetypeSpecialMembersBase<
+ PropertiesOfT<Prof>::default_constructible_support,
+ PropertiesOfT<Prof>::move_constructible_support,
+ PropertiesOfT<Prof>::copy_constructible_support,
+ PropertiesOfT<Prof>::move_assignable_support,
+ PropertiesOfT<Prof>::copy_assignable_support,
+ PropertiesOfT<Prof>::destructible_support> {
+ public:
+ Archetype() = default;
+ Archetype(Archetype&&) = delete;
+ Archetype(const Archetype&) = default;
+ Archetype& operator=(Archetype&&) = default;
+ Archetype& operator=(const Archetype&) = default;
+
+ // Disallow moves when requested, and disallow implicit conversions.
+ template <class T, typename std::enable_if<
+ ShouldDeleteConstructor<Prof, T>()>::type* = nullptr>
+ Archetype(T&&) = delete;
+
+ // Disallow moves when requested, and disallow implicit conversions.
+ template <class T, typename std::enable_if<
+ ShouldDeleteAssign<Prof, T>()>::type* = nullptr>
+ Archetype& operator=(T&&) = delete;
+
+ using ArchetypeSpecialMembersBase<
+ PropertiesOfT<Prof>::default_constructible_support,
+ PropertiesOfT<Prof>::move_constructible_support,
+ PropertiesOfT<Prof>::copy_constructible_support,
+ PropertiesOfT<Prof>::move_assignable_support,
+ PropertiesOfT<Prof>::copy_assignable_support,
+ PropertiesOfT<Prof>::destructible_support>::archetype_state;
+
+ private:
+ explicit Archetype(MakeArchetypeState, ArchetypeState state) noexcept
+ : ArchetypeSpecialMembersBase<
+ PropertiesOfT<Prof>::default_constructible_support,
+ PropertiesOfT<Prof>::move_constructible_support,
+ PropertiesOfT<Prof>::copy_constructible_support,
+ PropertiesOfT<Prof>::move_assignable_support,
+ PropertiesOfT<Prof>::copy_assignable_support,
+ PropertiesOfT<Prof>::destructible_support>(MakeArchetypeState(),
+ state) {}
+
+ friend Archetype MakeArchetype<Archetype>(ArchetypeState) noexcept;
+};
+
+template <class Prof>
+class Archetype<Prof, typename std::enable_if<
+ PropertiesOfT<Prof>::move_constructible_support !=
+ move_constructible::maybe &&
+ PropertiesOfT<Prof>::move_assignable_support ==
+ move_assignable::maybe &&
+ PropertiesOfT<Prof>::destructible_support ==
+ destructible::maybe>::type>
+ : ArchetypeSpecialMembersBase<
+ PropertiesOfT<Prof>::default_constructible_support,
+ PropertiesOfT<Prof>::move_constructible_support,
+ PropertiesOfT<Prof>::copy_constructible_support,
+ PropertiesOfT<Prof>::move_assignable_support,
+ PropertiesOfT<Prof>::copy_assignable_support,
+ PropertiesOfT<Prof>::destructible_support> {
+ public:
+ Archetype() = default;
+ Archetype(Archetype&&) = default;
+ Archetype(const Archetype&) = default;
+ Archetype& operator=(Archetype&&) = delete;
+ Archetype& operator=(const Archetype&) = default;
+ ~Archetype() = delete;
+
+ // Disallow moves when requested, and disallow implicit conversions.
+ template <class T, typename std::enable_if<
+ ShouldDeleteConstructor<Prof, T>()>::type* = nullptr>
+ Archetype(T&&) = delete;
+
+ // Disallow moves when requested, and disallow implicit conversions.
+ template <class T, typename std::enable_if<
+ ShouldDeleteAssign<Prof, T>()>::type* = nullptr>
+ Archetype& operator=(T&&) = delete;
+
+ using ArchetypeSpecialMembersBase<
+ PropertiesOfT<Prof>::default_constructible_support,
+ PropertiesOfT<Prof>::move_constructible_support,
+ PropertiesOfT<Prof>::copy_constructible_support,
+ PropertiesOfT<Prof>::move_assignable_support,
+ PropertiesOfT<Prof>::copy_assignable_support,
+ PropertiesOfT<Prof>::destructible_support>::archetype_state;
+
+ private:
+ explicit Archetype(MakeArchetypeState, ArchetypeState state) noexcept
+ : ArchetypeSpecialMembersBase<
+ PropertiesOfT<Prof>::default_constructible_support,
+ PropertiesOfT<Prof>::move_constructible_support,
+ PropertiesOfT<Prof>::copy_constructible_support,
+ PropertiesOfT<Prof>::move_assignable_support,
+ PropertiesOfT<Prof>::copy_assignable_support,
+ PropertiesOfT<Prof>::destructible_support>(MakeArchetypeState(),
+ state) {}
+
+ friend Archetype MakeArchetype<Archetype>(ArchetypeState) noexcept;
+};
+
+template <class Prof>
+class Archetype<Prof, typename std::enable_if<
+ PropertiesOfT<Prof>::move_constructible_support ==
+ move_constructible::maybe &&
+ PropertiesOfT<Prof>::move_assignable_support ==
+ move_assignable::maybe &&
+ PropertiesOfT<Prof>::destructible_support ==
+ destructible::maybe>::type>
+ : ArchetypeSpecialMembersBase<
+ PropertiesOfT<Prof>::default_constructible_support,
+ PropertiesOfT<Prof>::move_constructible_support,
+ PropertiesOfT<Prof>::copy_constructible_support,
+ PropertiesOfT<Prof>::move_assignable_support,
+ PropertiesOfT<Prof>::copy_assignable_support,
+ PropertiesOfT<Prof>::destructible_support> {
+ public:
+ Archetype() = default;
+ Archetype(Archetype&&) = delete;
+ Archetype(const Archetype&) = default;
+ Archetype& operator=(Archetype&&) = delete;
+ Archetype& operator=(const Archetype&) = default;
+ ~Archetype() = delete;
+
+ // Disallow moves when requested, and disallow implicit conversions.
+ template <class T, typename std::enable_if<
+ ShouldDeleteConstructor<Prof, T>()>::type* = nullptr>
+ Archetype(T&&) = delete;
+
+ // Disallow moves when requested, and disallow implicit conversions.
+ template <class T, typename std::enable_if<
+ ShouldDeleteAssign<Prof, T>()>::type* = nullptr>
+ Archetype& operator=(T&&) = delete;
+
+ using ArchetypeSpecialMembersBase<
+ PropertiesOfT<Prof>::default_constructible_support,
+ PropertiesOfT<Prof>::move_constructible_support,
+ PropertiesOfT<Prof>::copy_constructible_support,
+ PropertiesOfT<Prof>::move_assignable_support,
+ PropertiesOfT<Prof>::copy_assignable_support,
+ PropertiesOfT<Prof>::destructible_support>::archetype_state;
+
+ private:
+ explicit Archetype(MakeArchetypeState, ArchetypeState state) noexcept
+ : ArchetypeSpecialMembersBase<
+ PropertiesOfT<Prof>::default_constructible_support,
+ PropertiesOfT<Prof>::move_constructible_support,
+ PropertiesOfT<Prof>::copy_constructible_support,
+ PropertiesOfT<Prof>::move_assignable_support,
+ PropertiesOfT<Prof>::copy_assignable_support,
+ PropertiesOfT<Prof>::destructible_support>(MakeArchetypeState(),
+ state) {}
+
+ friend Archetype MakeArchetype<Archetype>(ArchetypeState) noexcept;
+};
+
+template <class Prof>
+class Archetype<Prof, typename std::enable_if<
+ PropertiesOfT<Prof>::move_constructible_support ==
+ move_constructible::maybe &&
+ PropertiesOfT<Prof>::move_assignable_support !=
+ move_assignable::maybe &&
+ PropertiesOfT<Prof>::destructible_support ==
+ destructible::maybe>::type>
+ : ArchetypeSpecialMembersBase<
+ PropertiesOfT<Prof>::default_constructible_support,
+ PropertiesOfT<Prof>::move_constructible_support,
+ PropertiesOfT<Prof>::copy_constructible_support,
+ PropertiesOfT<Prof>::move_assignable_support,
+ PropertiesOfT<Prof>::copy_assignable_support,
+ PropertiesOfT<Prof>::destructible_support> {
+ public:
+ Archetype() = default;
+ Archetype(Archetype&&) = delete;
+ Archetype(const Archetype&) = default;
+ Archetype& operator=(Archetype&&) = default;
+ Archetype& operator=(const Archetype&) = default;
+ ~Archetype() = delete;
+
+ // Disallow moves when requested, and disallow implicit conversions.
+ template <class T, typename std::enable_if<
+ ShouldDeleteConstructor<Prof, T>()>::type* = nullptr>
+ Archetype(T&&) = delete;
+
+ // Disallow moves when requested, and disallow implicit conversions.
+ template <class T, typename std::enable_if<
+ ShouldDeleteAssign<Prof, T>()>::type* = nullptr>
+ Archetype& operator=(T&&) = delete;
+
+ using ArchetypeSpecialMembersBase<
+ PropertiesOfT<Prof>::default_constructible_support,
+ PropertiesOfT<Prof>::move_constructible_support,
+ PropertiesOfT<Prof>::copy_constructible_support,
+ PropertiesOfT<Prof>::move_assignable_support,
+ PropertiesOfT<Prof>::copy_assignable_support,
+ PropertiesOfT<Prof>::destructible_support>::archetype_state;
+
+ private:
+ explicit Archetype(MakeArchetypeState, ArchetypeState state) noexcept
+ : ArchetypeSpecialMembersBase<
+ PropertiesOfT<Prof>::default_constructible_support,
+ PropertiesOfT<Prof>::move_constructible_support,
+ PropertiesOfT<Prof>::copy_constructible_support,
+ PropertiesOfT<Prof>::move_assignable_support,
+ PropertiesOfT<Prof>::copy_assignable_support,
+ PropertiesOfT<Prof>::destructible_support>(MakeArchetypeState(),
+ state) {}
+
+ friend Archetype MakeArchetype<Archetype>(ArchetypeState) noexcept;
+};
+
+// Explicitly deleted swap for Archetype if the profile does not require swap.
+// It is important to delete it rather than simply leave it out so that the
+// "using std::swap;" idiom will result in this deleted overload being picked.
+template <class Prof,
+ y_absl::enable_if_t<!PropertiesOfT<Prof>::is_swappable, int> = 0>
+void swap(Archetype<Prof>&, Archetype<Prof>&) = delete; // NOLINT
+
+// A conditionally-noexcept swap implementation for Archetype when the profile
+// supports swap.
+template <class Prof,
+ y_absl::enable_if_t<PropertiesOfT<Prof>::is_swappable, int> = 0>
+void swap(Archetype<Prof>& lhs, Archetype<Prof>& rhs) // NOLINT
+ noexcept(PropertiesOfT<Prof>::swappable_support != swappable::yes) {
+ std::swap(lhs.archetype_state, rhs.archetype_state);
+}
+
+// A convertible-to-bool type that is used as the return type of comparison
+// operators since the standard doesn't always require exactly bool.
+struct NothrowBool {
+ explicit NothrowBool() = delete;
+ ~NothrowBool() = default;
+
+ // TODO(calabrese) Delete the copy constructor in C++17 mode since guaranteed
+ // elision makes it not required when returning from a function.
+ // NothrowBool(NothrowBool const&) = delete;
+
+ NothrowBool& operator=(NothrowBool const&) = delete;
+
+ explicit operator bool() const noexcept { return value; }
+
+ static NothrowBool make(bool const value) noexcept {
+ return NothrowBool(value);
+ }
+
+ private:
+ explicit NothrowBool(bool const value) noexcept : value(value) {}
+
+ bool value;
+};
+
+// A convertible-to-bool type that is used as the return type of comparison
+// operators since the standard doesn't always require exactly bool.
+// Note: ExceptionalBool has a conversion operator that is not noexcept, so
+// that even when a comparison operator is noexcept, that operation may still
+// potentially throw when converted to bool.
+struct ExceptionalBool {
+ explicit ExceptionalBool() = delete;
+ ~ExceptionalBool() = default;
+
+ // TODO(calabrese) Delete the copy constructor in C++17 mode since guaranteed
+ // elision makes it not required when returning from a function.
+ // ExceptionalBool(ExceptionalBool const&) = delete;
+
+ ExceptionalBool& operator=(ExceptionalBool const&) = delete;
+
+ explicit operator bool() const { return value; } // NOLINT
+
+ static ExceptionalBool make(bool const value) noexcept {
+ return ExceptionalBool(value);
+ }
+
+ private:
+ explicit ExceptionalBool(bool const value) noexcept : value(value) {}
+
+ bool value;
+};
+
+// The following macro is only used as a helper in this file to stamp out
+// comparison operator definitions. It is undefined after usage.
+//
+// NOTE: Non-nothrow operators throw via their result's conversion to bool even
+// though the operation itself is noexcept.
+#define ABSL_TYPES_INTERNAL_OP(enum_name, op) \
+ template <class Prof> \
+ y_absl::enable_if_t<!PropertiesOfT<Prof>::is_##enum_name, bool> operator op( \
+ const Archetype<Prof>&, const Archetype<Prof>&) = delete; \
+ \
+ template <class Prof> \
+ typename y_absl::enable_if_t< \
+ PropertiesOfT<Prof>::is_##enum_name, \
+ std::conditional<PropertiesOfT<Prof>::enum_name##_support == \
+ enum_name::nothrow, \
+ NothrowBool, ExceptionalBool>>::type \
+ operator op(const Archetype<Prof>& lhs, \
+ const Archetype<Prof>& rhs) noexcept { \
+ return y_absl::conditional_t< \
+ PropertiesOfT<Prof>::enum_name##_support == enum_name::nothrow, \
+ NothrowBool, ExceptionalBool>::make(lhs.archetype_state op \
+ rhs.archetype_state); \
+ }
+
+ABSL_TYPES_INTERNAL_OP(equality_comparable, ==);
+ABSL_TYPES_INTERNAL_OP(inequality_comparable, !=);
+ABSL_TYPES_INTERNAL_OP(less_than_comparable, <);
+ABSL_TYPES_INTERNAL_OP(less_equal_comparable, <=);
+ABSL_TYPES_INTERNAL_OP(greater_equal_comparable, >=);
+ABSL_TYPES_INTERNAL_OP(greater_than_comparable, >);
+
+#undef ABSL_TYPES_INTERNAL_OP
+
+// Base class for std::hash specializations when an Archetype doesn't support
+// hashing.
+struct PoisonedHash {
+ PoisonedHash() = delete;
+ PoisonedHash(const PoisonedHash&) = delete;
+ PoisonedHash& operator=(const PoisonedHash&) = delete;
+};
+
+// Base class for std::hash specializations when an Archetype supports hashing.
+template <class Prof>
+struct EnabledHash {
+ using argument_type = Archetype<Prof>;
+ using result_type = std::size_t;
+ result_type operator()(const argument_type& arg) const {
+ return std::hash<ArchetypeState>()(arg.archetype_state);
+ }
+};
+
+} // namespace types_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+namespace std {
+
+template <class Prof> // NOLINT
+struct hash<::y_absl::types_internal::Archetype<Prof>>
+ : conditional<::y_absl::types_internal::PropertiesOfT<Prof>::is_hashable,
+ ::y_absl::types_internal::EnabledHash<Prof>,
+ ::y_absl::types_internal::PoisonedHash>::type {};
+
+} // namespace std
+
+#endif // ABSL_TYPES_INTERNAL_CONFORMANCE_ARCHETYPE_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/conformance_profile.h b/contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/conformance_profile.h
new file mode 100644
index 00000000000..b19165a4b96
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/conformance_profile.h
@@ -0,0 +1,931 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// conformance_profiles.h
+// -----------------------------------------------------------------------------
+//
+// This file contains templates for representing "Regularity Profiles" and
+// concisely-named versions of commonly used Regularity Profiles.
+//
+// A Regularity Profile is a compile-time description of the types of operations
+// that a given type supports, along with properties of those operations when
+// they do exist. For instance, a Regularity Profile may describe a type that
+// has a move-constructor that is noexcept and a copy constructor that is not
+// noexcept. This description can then be examined and passed around to other
+// templates for the purposes of asserting expectations on user-defined types
+// via a series trait checks, or for determining what kinds of run-time tests
+// are able to be performed.
+//
+// Regularity Profiles are also used when creating "archetypes," which are
+// minimum-conforming types that meet all of the requirements of a given
+// Regularity Profile. For more information regarding archetypes, see
+// "conformance_archetypes.h".
+
+#ifndef ABSL_TYPES_INTERNAL_CONFORMANCE_PROFILE_H_
+#define ABSL_TYPES_INTERNAL_CONFORMANCE_PROFILE_H_
+
+#include <set>
+#include <type_traits>
+#include <utility>
+#include <vector>
+
+#include "gtest/gtest.h"
+#include "y_absl/algorithm/container.h"
+#include "y_absl/meta/type_traits.h"
+#include "y_absl/strings/ascii.h"
+#include "y_absl/strings/str_cat.h"
+#include "y_absl/strings/string_view.h"
+#include "y_absl/types/internal/conformance_testing_helpers.h"
+#include "y_absl/utility/utility.h"
+
+// TODO(calabrese) Add support for extending profiles.
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace types_internal {
+
+// Converts an enum to its underlying integral value.
+template <typename Enum>
+constexpr y_absl::underlying_type_t<Enum> UnderlyingValue(Enum value) {
+ return static_cast<y_absl::underlying_type_t<Enum>>(value);
+}
+
+// A tag type used in place of a matcher when checking that an assertion result
+// does not actually contain any errors.
+struct NoError {};
+
+// -----------------------------------------------------------------------------
+// ConformanceErrors
+// -----------------------------------------------------------------------------
+class ConformanceErrors {
+ public:
+ // Setup the error reporting mechanism by seeding it with the name of the type
+ // that is being tested.
+ explicit ConformanceErrors(TString type_name)
+ : assertion_result_(false), type_name_(std::move(type_name)) {
+ assertion_result_ << "\n\n"
+ "Assuming the following type alias:\n"
+ "\n"
+ " using _T = "
+ << type_name_ << ";\n\n";
+ outputDivider();
+ }
+
+ // Adds the test name to the list of successfully run tests iff it was not
+ // previously reported as failing. This behavior is useful for tests that
+ // have multiple parts, where failures and successes are reported individually
+ // with the same test name.
+ void addTestSuccess(y_absl::string_view test_name) {
+ auto normalized_test_name = y_absl::AsciiStrToLower(test_name);
+
+ // If the test is already reported as failing, do not add it to the list of
+ // successes.
+ if (test_failures_.find(normalized_test_name) == test_failures_.end()) {
+ test_successes_.insert(std::move(normalized_test_name));
+ }
+ }
+
+ // Streams a single error description into the internal buffer (a visual
+ // divider is automatically inserted after the error so that multiple errors
+ // are visibly distinct).
+ //
+ // This function increases the error count by 1.
+ //
+ // TODO(calabrese) Determine desired behavior when if this function throws.
+ template <class... P>
+ void addTestFailure(y_absl::string_view test_name, const P&... args) {
+ // Output a message related to the test failure.
+ assertion_result_ << "\n\n"
+ "Failed test: "
+ << test_name << "\n\n";
+ addTestFailureImpl(args...);
+ assertion_result_ << "\n\n";
+ outputDivider();
+
+ auto normalized_test_name = y_absl::AsciiStrToLower(test_name);
+
+ // If previous parts of this test succeeded, remove it from that set.
+ test_successes_.erase(normalized_test_name);
+
+ // Add the test name to the list of failed tests.
+ test_failures_.insert(std::move(normalized_test_name));
+
+ has_error_ = true;
+ }
+
+ // Convert this object into a testing::AssertionResult instance such that it
+ // can be used with gtest.
+ ::testing::AssertionResult assertionResult() const {
+ return has_error_ ? assertion_result_ : ::testing::AssertionSuccess();
+ }
+
+ // Convert this object into a testing::AssertionResult instance such that it
+ // can be used with gtest. This overload expects errors, using the specified
+ // matcher.
+ ::testing::AssertionResult expectFailedTests(
+ const std::set<TString>& test_names) const {
+ // Since we are expecting nonconformance, output an error message when the
+ // type actually conformed to the specified profile.
+ if (!has_error_) {
+ return ::testing::AssertionFailure()
+ << "Unexpected conformance of type:\n"
+ " "
+ << type_name_ << "\n\n";
+ }
+
+ // Get a list of all expected failures that did not actually fail
+ // (or that were not run).
+ std::vector<TString> nonfailing_tests;
+ y_absl::c_set_difference(test_names, test_failures_,
+ std::back_inserter(nonfailing_tests));
+
+ // Get a list of all "expected failures" that were never actually run.
+ std::vector<TString> unrun_tests;
+ y_absl::c_set_difference(nonfailing_tests, test_successes_,
+ std::back_inserter(unrun_tests));
+
+ // Report when the user specified tests that were not run.
+ if (!unrun_tests.empty()) {
+ const bool tests_were_run =
+ !(test_failures_.empty() && test_successes_.empty());
+
+ // Prepare an assertion result used in the case that tests pass that were
+ // expected to fail.
+ ::testing::AssertionResult result = ::testing::AssertionFailure();
+ result << "When testing type:\n " << type_name_
+ << "\n\nThe following tests were expected to fail but were not "
+ "run";
+
+ if (tests_were_run) result << " (was the test name spelled correctly?)";
+
+ result << ":\n\n";
+
+ // List all of the tests that unexpectedly passed.
+ for (const auto& test_name : unrun_tests) {
+ result << " " << test_name << "\n";
+ }
+
+ if (!tests_were_run) result << "\nNo tests were run.";
+
+ if (!test_failures_.empty()) {
+ // List test failures
+ result << "\nThe tests that were run and failed are:\n\n";
+ for (const auto& test_name : test_failures_) {
+ result << " " << test_name << "\n";
+ }
+ }
+
+ if (!test_successes_.empty()) {
+ // List test successes
+ result << "\nThe tests that were run and succeeded are:\n\n";
+ for (const auto& test_name : test_successes_) {
+ result << " " << test_name << "\n";
+ }
+ }
+
+ return result;
+ }
+
+ // If some tests passed when they were expected to fail, alert the caller.
+ if (nonfailing_tests.empty()) return ::testing::AssertionSuccess();
+
+ // Prepare an assertion result used in the case that tests pass that were
+ // expected to fail.
+ ::testing::AssertionResult unexpected_successes =
+ ::testing::AssertionFailure();
+ unexpected_successes << "When testing type:\n " << type_name_
+ << "\n\nThe following tests passed when they were "
+ "expected to fail:\n\n";
+
+ // List all of the tests that unexpectedly passed.
+ for (const auto& test_name : nonfailing_tests) {
+ unexpected_successes << " " << test_name << "\n";
+ }
+
+ return unexpected_successes;
+ }
+
+ private:
+ void outputDivider() {
+ assertion_result_ << "========================================";
+ }
+
+ void addTestFailureImpl() {}
+
+ template <class H, class... T>
+ void addTestFailureImpl(const H& head, const T&... tail) {
+ assertion_result_ << head;
+ addTestFailureImpl(tail...);
+ }
+
+ ::testing::AssertionResult assertion_result_;
+ std::set<TString> test_failures_;
+ std::set<TString> test_successes_;
+ TString type_name_;
+ bool has_error_ = false;
+};
+
+template <class T, class /*Enabler*/ = void>
+struct PropertiesOfImpl {};
+
+template <class T>
+struct PropertiesOfImpl<T, y_absl::void_t<typename T::properties>> {
+ using type = typename T::properties;
+};
+
+template <class T>
+struct PropertiesOfImpl<T, y_absl::void_t<typename T::profile_alias_of>> {
+ using type = typename PropertiesOfImpl<typename T::profile_alias_of>::type;
+};
+
+template <class T>
+struct PropertiesOf : PropertiesOfImpl<T> {};
+
+template <class T>
+using PropertiesOfT = typename PropertiesOf<T>::type;
+
+// NOTE: These enums use this naming convention to be consistent with the
+// standard trait names, which is useful since it allows us to match up each
+// enum name with a corresponding trait name in macro definitions.
+
+// An enum that describes the various expectations on an operations existence.
+enum class function_support { maybe, yes, nothrow, trivial };
+
+constexpr const char* PessimisticPropertyDescription(function_support v) {
+ return v == function_support::maybe
+ ? "no"
+ : v == function_support::yes
+ ? "yes, potentially throwing"
+ : v == function_support::nothrow ? "yes, nothrow"
+ : "yes, trivial";
+}
+
+// Return a string that describes the kind of property support that was
+// expected.
+inline TString ExpectedFunctionKindList(function_support min,
+ function_support max) {
+ if (min == max) {
+ TString result =
+ y_absl::StrCat("Expected:\n ",
+ PessimisticPropertyDescription(
+ static_cast<function_support>(UnderlyingValue(min))),
+ "\n");
+ return result;
+ }
+
+ TString result = "Expected one of:\n";
+ for (auto curr_support = UnderlyingValue(min);
+ curr_support <= UnderlyingValue(max); ++curr_support) {
+ y_absl::StrAppend(&result, " ",
+ PessimisticPropertyDescription(
+ static_cast<function_support>(curr_support)),
+ "\n");
+ }
+
+ return result;
+}
+
+template <class Enum>
+void ExpectModelOfImpl(ConformanceErrors* errors, Enum min_support,
+ Enum max_support, Enum kind) {
+ const auto kind_value = UnderlyingValue(kind);
+ const auto min_support_value = UnderlyingValue(min_support);
+ const auto max_support_value = UnderlyingValue(max_support);
+
+ if (!(kind_value >= min_support_value && kind_value <= max_support_value)) {
+ errors->addTestFailure(
+ PropertyName(kind), "**Failed property expectation**\n\n",
+ ExpectedFunctionKindList(
+ static_cast<function_support>(min_support_value),
+ static_cast<function_support>(max_support_value)),
+ '\n', "Actual:\n ",
+ PessimisticPropertyDescription(
+ static_cast<function_support>(kind_value)));
+ } else {
+ errors->addTestSuccess(PropertyName(kind));
+ }
+}
+
+#define ABSL_INTERNAL_SPECIAL_MEMBER_FUNCTION_ENUM(description, name) \
+ enum class name { maybe, yes, nothrow, trivial }; \
+ \
+ constexpr const char* PropertyName(name v) { return description; } \
+ static_assert(true, "") // Force a semicolon when using this macro.
+
+ABSL_INTERNAL_SPECIAL_MEMBER_FUNCTION_ENUM("support for default construction",
+ default_constructible);
+ABSL_INTERNAL_SPECIAL_MEMBER_FUNCTION_ENUM("support for move construction",
+ move_constructible);
+ABSL_INTERNAL_SPECIAL_MEMBER_FUNCTION_ENUM("support for copy construction",
+ copy_constructible);
+ABSL_INTERNAL_SPECIAL_MEMBER_FUNCTION_ENUM("support for move assignment",
+ move_assignable);
+ABSL_INTERNAL_SPECIAL_MEMBER_FUNCTION_ENUM("support for copy assignment",
+ copy_assignable);
+ABSL_INTERNAL_SPECIAL_MEMBER_FUNCTION_ENUM("support for destruction",
+ destructible);
+
+#undef ABSL_INTERNAL_SPECIAL_MEMBER_FUNCTION_ENUM
+
+#define ABSL_INTERNAL_INTRINSIC_FUNCTION_ENUM(description, name) \
+ enum class name { maybe, yes, nothrow }; \
+ \
+ constexpr const char* PropertyName(name v) { return description; } \
+ static_assert(true, "") // Force a semicolon when using this macro.
+
+ABSL_INTERNAL_INTRINSIC_FUNCTION_ENUM("support for ==", equality_comparable);
+ABSL_INTERNAL_INTRINSIC_FUNCTION_ENUM("support for !=", inequality_comparable);
+ABSL_INTERNAL_INTRINSIC_FUNCTION_ENUM("support for <", less_than_comparable);
+ABSL_INTERNAL_INTRINSIC_FUNCTION_ENUM("support for <=", less_equal_comparable);
+ABSL_INTERNAL_INTRINSIC_FUNCTION_ENUM("support for >=",
+ greater_equal_comparable);
+ABSL_INTERNAL_INTRINSIC_FUNCTION_ENUM("support for >", greater_than_comparable);
+
+ABSL_INTERNAL_INTRINSIC_FUNCTION_ENUM("support for swap", swappable);
+
+#undef ABSL_INTERNAL_INTRINSIC_FUNCTION_ENUM
+
+enum class hashable { maybe, yes };
+
+constexpr const char* PropertyName(hashable v) {
+ return "support for std::hash";
+}
+
+template <class T>
+using AlwaysFalse = std::false_type;
+
+#define ABSL_INTERNAL_PESSIMISTIC_MODEL_OF_SPECIAL_MEMBER(name, property) \
+ template <class T> \
+ constexpr property property##_support_of() { \
+ return std::is_##property<T>::value \
+ ? std::is_nothrow_##property<T>::value \
+ ? y_absl::is_trivially_##property<T>::value \
+ ? property::trivial \
+ : property::nothrow \
+ : property::yes \
+ : property::maybe; \
+ } \
+ \
+ template <class T, class MinProf, class MaxProf> \
+ void ExpectModelOf##name(ConformanceErrors* errors) { \
+ (ExpectModelOfImpl)(errors, PropertiesOfT<MinProf>::property##_support, \
+ PropertiesOfT<MaxProf>::property##_support, \
+ property##_support_of<T>()); \
+ }
+
+ABSL_INTERNAL_PESSIMISTIC_MODEL_OF_SPECIAL_MEMBER(DefaultConstructible,
+ default_constructible);
+
+ABSL_INTERNAL_PESSIMISTIC_MODEL_OF_SPECIAL_MEMBER(MoveConstructible,
+ move_constructible);
+
+ABSL_INTERNAL_PESSIMISTIC_MODEL_OF_SPECIAL_MEMBER(CopyConstructible,
+ copy_constructible);
+
+ABSL_INTERNAL_PESSIMISTIC_MODEL_OF_SPECIAL_MEMBER(MoveAssignable,
+ move_assignable);
+
+ABSL_INTERNAL_PESSIMISTIC_MODEL_OF_SPECIAL_MEMBER(CopyAssignable,
+ copy_assignable);
+
+ABSL_INTERNAL_PESSIMISTIC_MODEL_OF_SPECIAL_MEMBER(Destructible, destructible);
+
+#undef ABSL_INTERNAL_PESSIMISTIC_MODEL_OF_SPECIAL_MEMBER
+
+void BoolFunction(bool) noexcept;
+
+////////////////////////////////////////////////////////////////////////////////
+//
+// A metafunction for checking if an operation exists through SFINAE.
+//
+// `T` is the type to test and Op is an alias containing the expression to test.
+template <class T, template <class...> class Op, class = void>
+struct IsOpableImpl : std::false_type {};
+
+template <class T, template <class...> class Op>
+struct IsOpableImpl<T, Op, y_absl::void_t<Op<T>>> : std::true_type {};
+
+template <template <class...> class Op>
+struct IsOpable {
+ template <class T>
+ using apply = typename IsOpableImpl<T, Op>::type;
+};
+//
+////////////////////////////////////////////////////////////////////////////////
+
+////////////////////////////////////////////////////////////////////////////////
+//
+// A metafunction for checking if an operation exists and is also noexcept
+// through SFINAE and the noexcept operator.
+///
+// `T` is the type to test and Op is an alias containing the expression to test.
+template <class T, template <class...> class Op, class = void>
+struct IsNothrowOpableImpl : std::false_type {};
+
+template <class T, template <class...> class Op>
+struct IsNothrowOpableImpl<T, Op, y_absl::enable_if_t<Op<T>::value>>
+ : std::true_type {};
+
+template <template <class...> class Op>
+struct IsNothrowOpable {
+ template <class T>
+ using apply = typename IsNothrowOpableImpl<T, Op>::type;
+};
+//
+////////////////////////////////////////////////////////////////////////////////
+
+////////////////////////////////////////////////////////////////////////////////
+//
+// A macro that produces the necessary function for reporting what kind of
+// support a specific comparison operation has and a function for reporting an
+// error if a given type's support for that operation does not meet the expected
+// requirements.
+#define ABSL_INTERNAL_PESSIMISTIC_MODEL_OF_COMPARISON(name, property, op) \
+ template <class T, \
+ class Result = std::integral_constant< \
+ bool, noexcept((BoolFunction)(std::declval<const T&>() op \
+ std::declval<const T&>()))>> \
+ using name = Result; \
+ \
+ template <class T> \
+ constexpr property property##_support_of() { \
+ return IsOpable<name>::apply<T>::value \
+ ? IsNothrowOpable<name>::apply<T>::value ? property::nothrow \
+ : property::yes \
+ : property::maybe; \
+ } \
+ \
+ template <class T, class MinProf, class MaxProf> \
+ void ExpectModelOf##name(ConformanceErrors* errors) { \
+ (ExpectModelOfImpl)(errors, PropertiesOfT<MinProf>::property##_support, \
+ PropertiesOfT<MaxProf>::property##_support, \
+ property##_support_of<T>()); \
+ }
+//
+////////////////////////////////////////////////////////////////////////////////
+
+////////////////////////////////////////////////////////////////////////////////
+//
+// Generate the necessary support-checking and error reporting functions for
+// each of the comparison operators.
+ABSL_INTERNAL_PESSIMISTIC_MODEL_OF_COMPARISON(EqualityComparable,
+ equality_comparable, ==);
+
+ABSL_INTERNAL_PESSIMISTIC_MODEL_OF_COMPARISON(InequalityComparable,
+ inequality_comparable, !=);
+
+ABSL_INTERNAL_PESSIMISTIC_MODEL_OF_COMPARISON(LessThanComparable,
+ less_than_comparable, <);
+
+ABSL_INTERNAL_PESSIMISTIC_MODEL_OF_COMPARISON(LessEqualComparable,
+ less_equal_comparable, <=);
+
+ABSL_INTERNAL_PESSIMISTIC_MODEL_OF_COMPARISON(GreaterEqualComparable,
+ greater_equal_comparable, >=);
+
+ABSL_INTERNAL_PESSIMISTIC_MODEL_OF_COMPARISON(GreaterThanComparable,
+ greater_than_comparable, >);
+
+#undef ABSL_INTERNAL_PESSIMISTIC_MODEL_OF_COMPARISON
+//
+////////////////////////////////////////////////////////////////////////////////
+
+////////////////////////////////////////////////////////////////////////////////
+//
+// The necessary support-checking and error-reporting functions for swap.
+template <class T>
+constexpr swappable swappable_support_of() {
+ return type_traits_internal::IsSwappable<T>::value
+ ? type_traits_internal::IsNothrowSwappable<T>::value
+ ? swappable::nothrow
+ : swappable::yes
+ : swappable::maybe;
+}
+
+template <class T, class MinProf, class MaxProf>
+void ExpectModelOfSwappable(ConformanceErrors* errors) {
+ (ExpectModelOfImpl)(errors, PropertiesOfT<MinProf>::swappable_support,
+ PropertiesOfT<MaxProf>::swappable_support,
+ swappable_support_of<T>());
+}
+//
+////////////////////////////////////////////////////////////////////////////////
+
+////////////////////////////////////////////////////////////////////////////////
+//
+// The necessary support-checking and error-reporting functions for std::hash.
+template <class T>
+constexpr hashable hashable_support_of() {
+ return type_traits_internal::IsHashable<T>::value ? hashable::yes
+ : hashable::maybe;
+}
+
+template <class T, class MinProf, class MaxProf>
+void ExpectModelOfHashable(ConformanceErrors* errors) {
+ (ExpectModelOfImpl)(errors, PropertiesOfT<MinProf>::hashable_support,
+ PropertiesOfT<MaxProf>::hashable_support,
+ hashable_support_of<T>());
+}
+//
+////////////////////////////////////////////////////////////////////////////////
+
+template <
+ default_constructible DefaultConstructibleValue =
+ default_constructible::maybe,
+ move_constructible MoveConstructibleValue = move_constructible::maybe,
+ copy_constructible CopyConstructibleValue = copy_constructible::maybe,
+ move_assignable MoveAssignableValue = move_assignable::maybe,
+ copy_assignable CopyAssignableValue = copy_assignable::maybe,
+ destructible DestructibleValue = destructible::maybe,
+ equality_comparable EqualityComparableValue = equality_comparable::maybe,
+ inequality_comparable InequalityComparableValue =
+ inequality_comparable::maybe,
+ less_than_comparable LessThanComparableValue = less_than_comparable::maybe,
+ less_equal_comparable LessEqualComparableValue =
+ less_equal_comparable::maybe,
+ greater_equal_comparable GreaterEqualComparableValue =
+ greater_equal_comparable::maybe,
+ greater_than_comparable GreaterThanComparableValue =
+ greater_than_comparable::maybe,
+ swappable SwappableValue = swappable::maybe,
+ hashable HashableValue = hashable::maybe>
+struct ConformanceProfile {
+ using properties = ConformanceProfile;
+
+ static constexpr default_constructible
+ default_constructible_support = // NOLINT
+ DefaultConstructibleValue;
+
+ static constexpr move_constructible move_constructible_support = // NOLINT
+ MoveConstructibleValue;
+
+ static constexpr copy_constructible copy_constructible_support = // NOLINT
+ CopyConstructibleValue;
+
+ static constexpr move_assignable move_assignable_support = // NOLINT
+ MoveAssignableValue;
+
+ static constexpr copy_assignable copy_assignable_support = // NOLINT
+ CopyAssignableValue;
+
+ static constexpr destructible destructible_support = // NOLINT
+ DestructibleValue;
+
+ static constexpr equality_comparable equality_comparable_support = // NOLINT
+ EqualityComparableValue;
+
+ static constexpr inequality_comparable
+ inequality_comparable_support = // NOLINT
+ InequalityComparableValue;
+
+ static constexpr less_than_comparable
+ less_than_comparable_support = // NOLINT
+ LessThanComparableValue;
+
+ static constexpr less_equal_comparable
+ less_equal_comparable_support = // NOLINT
+ LessEqualComparableValue;
+
+ static constexpr greater_equal_comparable
+ greater_equal_comparable_support = // NOLINT
+ GreaterEqualComparableValue;
+
+ static constexpr greater_than_comparable
+ greater_than_comparable_support = // NOLINT
+ GreaterThanComparableValue;
+
+ static constexpr swappable swappable_support = SwappableValue; // NOLINT
+
+ static constexpr hashable hashable_support = HashableValue; // NOLINT
+
+ static constexpr bool is_default_constructible = // NOLINT
+ DefaultConstructibleValue != default_constructible::maybe;
+
+ static constexpr bool is_move_constructible = // NOLINT
+ MoveConstructibleValue != move_constructible::maybe;
+
+ static constexpr bool is_copy_constructible = // NOLINT
+ CopyConstructibleValue != copy_constructible::maybe;
+
+ static constexpr bool is_move_assignable = // NOLINT
+ MoveAssignableValue != move_assignable::maybe;
+
+ static constexpr bool is_copy_assignable = // NOLINT
+ CopyAssignableValue != copy_assignable::maybe;
+
+ static constexpr bool is_destructible = // NOLINT
+ DestructibleValue != destructible::maybe;
+
+ static constexpr bool is_equality_comparable = // NOLINT
+ EqualityComparableValue != equality_comparable::maybe;
+
+ static constexpr bool is_inequality_comparable = // NOLINT
+ InequalityComparableValue != inequality_comparable::maybe;
+
+ static constexpr bool is_less_than_comparable = // NOLINT
+ LessThanComparableValue != less_than_comparable::maybe;
+
+ static constexpr bool is_less_equal_comparable = // NOLINT
+ LessEqualComparableValue != less_equal_comparable::maybe;
+
+ static constexpr bool is_greater_equal_comparable = // NOLINT
+ GreaterEqualComparableValue != greater_equal_comparable::maybe;
+
+ static constexpr bool is_greater_than_comparable = // NOLINT
+ GreaterThanComparableValue != greater_than_comparable::maybe;
+
+ static constexpr bool is_swappable = // NOLINT
+ SwappableValue != swappable::maybe;
+
+ static constexpr bool is_hashable = // NOLINT
+ HashableValue != hashable::maybe;
+};
+
+////////////////////////////////////////////////////////////////////////////////
+//
+// Compliant SFINAE-friendliness is not always present on the standard library
+// implementations that we support. This helper-struct (and associated enum) is
+// used as a means to conditionally check the hashability support of a type.
+enum class CheckHashability { no, yes };
+
+template <class T, CheckHashability ShouldCheckHashability>
+struct conservative_hashable_support_of;
+
+template <class T>
+struct conservative_hashable_support_of<T, CheckHashability::no> {
+ static constexpr hashable Invoke() { return hashable::maybe; }
+};
+
+template <class T>
+struct conservative_hashable_support_of<T, CheckHashability::yes> {
+ static constexpr hashable Invoke() { return hashable_support_of<T>(); }
+};
+//
+////////////////////////////////////////////////////////////////////////////////
+
+// The ConformanceProfile that is expected based on introspection into the type
+// by way of trait checks.
+template <class T, CheckHashability ShouldCheckHashability>
+struct SyntacticConformanceProfileOf {
+ using properties = ConformanceProfile<
+ default_constructible_support_of<T>(), move_constructible_support_of<T>(),
+ copy_constructible_support_of<T>(), move_assignable_support_of<T>(),
+ copy_assignable_support_of<T>(), destructible_support_of<T>(),
+ equality_comparable_support_of<T>(),
+ inequality_comparable_support_of<T>(),
+ less_than_comparable_support_of<T>(),
+ less_equal_comparable_support_of<T>(),
+ greater_equal_comparable_support_of<T>(),
+ greater_than_comparable_support_of<T>(), swappable_support_of<T>(),
+ conservative_hashable_support_of<T, ShouldCheckHashability>::Invoke()>;
+};
+
+#define ABSL_INTERNAL_CONFORMANCE_TESTING_DATA_MEMBER_DEF_IMPL(type, name) \
+ template <default_constructible DefaultConstructibleValue, \
+ move_constructible MoveConstructibleValue, \
+ copy_constructible CopyConstructibleValue, \
+ move_assignable MoveAssignableValue, \
+ copy_assignable CopyAssignableValue, \
+ destructible DestructibleValue, \
+ equality_comparable EqualityComparableValue, \
+ inequality_comparable InequalityComparableValue, \
+ less_than_comparable LessThanComparableValue, \
+ less_equal_comparable LessEqualComparableValue, \
+ greater_equal_comparable GreaterEqualComparableValue, \
+ greater_than_comparable GreaterThanComparableValue, \
+ swappable SwappableValue, hashable HashableValue> \
+ constexpr type ConformanceProfile< \
+ DefaultConstructibleValue, MoveConstructibleValue, \
+ CopyConstructibleValue, MoveAssignableValue, CopyAssignableValue, \
+ DestructibleValue, EqualityComparableValue, InequalityComparableValue, \
+ LessThanComparableValue, LessEqualComparableValue, \
+ GreaterEqualComparableValue, GreaterThanComparableValue, SwappableValue, \
+ HashableValue>::name
+
+#define ABSL_INTERNAL_CONFORMANCE_TESTING_DATA_MEMBER_DEF(type) \
+ ABSL_INTERNAL_CONFORMANCE_TESTING_DATA_MEMBER_DEF_IMPL(type, \
+ type##_support); \
+ ABSL_INTERNAL_CONFORMANCE_TESTING_DATA_MEMBER_DEF_IMPL(bool, is_##type)
+
+ABSL_INTERNAL_CONFORMANCE_TESTING_DATA_MEMBER_DEF(default_constructible);
+ABSL_INTERNAL_CONFORMANCE_TESTING_DATA_MEMBER_DEF(move_constructible);
+ABSL_INTERNAL_CONFORMANCE_TESTING_DATA_MEMBER_DEF(copy_constructible);
+ABSL_INTERNAL_CONFORMANCE_TESTING_DATA_MEMBER_DEF(move_assignable);
+ABSL_INTERNAL_CONFORMANCE_TESTING_DATA_MEMBER_DEF(copy_assignable);
+ABSL_INTERNAL_CONFORMANCE_TESTING_DATA_MEMBER_DEF(destructible);
+ABSL_INTERNAL_CONFORMANCE_TESTING_DATA_MEMBER_DEF(equality_comparable);
+ABSL_INTERNAL_CONFORMANCE_TESTING_DATA_MEMBER_DEF(inequality_comparable);
+ABSL_INTERNAL_CONFORMANCE_TESTING_DATA_MEMBER_DEF(less_than_comparable);
+ABSL_INTERNAL_CONFORMANCE_TESTING_DATA_MEMBER_DEF(less_equal_comparable);
+ABSL_INTERNAL_CONFORMANCE_TESTING_DATA_MEMBER_DEF(greater_equal_comparable);
+ABSL_INTERNAL_CONFORMANCE_TESTING_DATA_MEMBER_DEF(greater_than_comparable);
+ABSL_INTERNAL_CONFORMANCE_TESTING_DATA_MEMBER_DEF(swappable);
+ABSL_INTERNAL_CONFORMANCE_TESTING_DATA_MEMBER_DEF(hashable);
+
+#undef ABSL_INTERNAL_CONFORMANCE_TESTING_DATA_MEMBER_DEF
+#undef ABSL_INTERNAL_CONFORMANCE_TESTING_DATA_MEMBER_DEF_IMPL
+
+// Retrieve the enum with the minimum underlying value.
+// Note: std::min is not constexpr in C++11, which is why this is necessary.
+template <class H>
+constexpr H MinEnum(H head) {
+ return head;
+}
+
+template <class H, class N, class... T>
+constexpr H MinEnum(H head, N next, T... tail) {
+ return (UnderlyingValue)(head) < (UnderlyingValue)(next)
+ ? (MinEnum)(head, tail...)
+ : (MinEnum)(next, tail...);
+}
+
+template <class... Profs>
+struct MinimalProfiles {
+ static constexpr default_constructible
+ default_constructible_support = // NOLINT
+ (MinEnum)(PropertiesOfT<Profs>::default_constructible_support...);
+
+ static constexpr move_constructible move_constructible_support = // NOLINT
+ (MinEnum)(PropertiesOfT<Profs>::move_constructible_support...);
+
+ static constexpr copy_constructible copy_constructible_support = // NOLINT
+ (MinEnum)(PropertiesOfT<Profs>::copy_constructible_support...);
+
+ static constexpr move_assignable move_assignable_support = // NOLINT
+ (MinEnum)(PropertiesOfT<Profs>::move_assignable_support...);
+
+ static constexpr copy_assignable copy_assignable_support = // NOLINT
+ (MinEnum)(PropertiesOfT<Profs>::copy_assignable_support...);
+
+ static constexpr destructible destructible_support = // NOLINT
+ (MinEnum)(PropertiesOfT<Profs>::destructible_support...);
+
+ static constexpr equality_comparable equality_comparable_support = // NOLINT
+ (MinEnum)(PropertiesOfT<Profs>::equality_comparable_support...);
+
+ static constexpr inequality_comparable
+ inequality_comparable_support = // NOLINT
+ (MinEnum)(PropertiesOfT<Profs>::inequality_comparable_support...);
+
+ static constexpr less_than_comparable
+ less_than_comparable_support = // NOLINT
+ (MinEnum)(PropertiesOfT<Profs>::less_than_comparable_support...);
+
+ static constexpr less_equal_comparable
+ less_equal_comparable_support = // NOLINT
+ (MinEnum)(PropertiesOfT<Profs>::less_equal_comparable_support...);
+
+ static constexpr greater_equal_comparable
+ greater_equal_comparable_support = // NOLINT
+ (MinEnum)(PropertiesOfT<Profs>::greater_equal_comparable_support...);
+
+ static constexpr greater_than_comparable
+ greater_than_comparable_support = // NOLINT
+ (MinEnum)(PropertiesOfT<Profs>::greater_than_comparable_support...);
+
+ static constexpr swappable swappable_support = // NOLINT
+ (MinEnum)(PropertiesOfT<Profs>::swappable_support...);
+
+ static constexpr hashable hashable_support = // NOLINT
+ (MinEnum)(PropertiesOfT<Profs>::hashable_support...);
+
+ using properties = ConformanceProfile<
+ default_constructible_support, move_constructible_support,
+ copy_constructible_support, move_assignable_support,
+ copy_assignable_support, destructible_support,
+ equality_comparable_support, inequality_comparable_support,
+ less_than_comparable_support, less_equal_comparable_support,
+ greater_equal_comparable_support, greater_than_comparable_support,
+ swappable_support, hashable_support>;
+};
+
+// Retrieve the enum with the greatest underlying value.
+// Note: std::max is not constexpr in C++11, which is why this is necessary.
+template <class H>
+constexpr H MaxEnum(H head) {
+ return head;
+}
+
+template <class H, class N, class... T>
+constexpr H MaxEnum(H head, N next, T... tail) {
+ return (UnderlyingValue)(next) < (UnderlyingValue)(head)
+ ? (MaxEnum)(head, tail...)
+ : (MaxEnum)(next, tail...);
+}
+
+template <class... Profs>
+struct CombineProfilesImpl {
+ static constexpr default_constructible
+ default_constructible_support = // NOLINT
+ (MaxEnum)(PropertiesOfT<Profs>::default_constructible_support...);
+
+ static constexpr move_constructible move_constructible_support = // NOLINT
+ (MaxEnum)(PropertiesOfT<Profs>::move_constructible_support...);
+
+ static constexpr copy_constructible copy_constructible_support = // NOLINT
+ (MaxEnum)(PropertiesOfT<Profs>::copy_constructible_support...);
+
+ static constexpr move_assignable move_assignable_support = // NOLINT
+ (MaxEnum)(PropertiesOfT<Profs>::move_assignable_support...);
+
+ static constexpr copy_assignable copy_assignable_support = // NOLINT
+ (MaxEnum)(PropertiesOfT<Profs>::copy_assignable_support...);
+
+ static constexpr destructible destructible_support = // NOLINT
+ (MaxEnum)(PropertiesOfT<Profs>::destructible_support...);
+
+ static constexpr equality_comparable equality_comparable_support = // NOLINT
+ (MaxEnum)(PropertiesOfT<Profs>::equality_comparable_support...);
+
+ static constexpr inequality_comparable
+ inequality_comparable_support = // NOLINT
+ (MaxEnum)(PropertiesOfT<Profs>::inequality_comparable_support...);
+
+ static constexpr less_than_comparable
+ less_than_comparable_support = // NOLINT
+ (MaxEnum)(PropertiesOfT<Profs>::less_than_comparable_support...);
+
+ static constexpr less_equal_comparable
+ less_equal_comparable_support = // NOLINT
+ (MaxEnum)(PropertiesOfT<Profs>::less_equal_comparable_support...);
+
+ static constexpr greater_equal_comparable
+ greater_equal_comparable_support = // NOLINT
+ (MaxEnum)(PropertiesOfT<Profs>::greater_equal_comparable_support...);
+
+ static constexpr greater_than_comparable
+ greater_than_comparable_support = // NOLINT
+ (MaxEnum)(PropertiesOfT<Profs>::greater_than_comparable_support...);
+
+ static constexpr swappable swappable_support = // NOLINT
+ (MaxEnum)(PropertiesOfT<Profs>::swappable_support...);
+
+ static constexpr hashable hashable_support = // NOLINT
+ (MaxEnum)(PropertiesOfT<Profs>::hashable_support...);
+
+ using properties = ConformanceProfile<
+ default_constructible_support, move_constructible_support,
+ copy_constructible_support, move_assignable_support,
+ copy_assignable_support, destructible_support,
+ equality_comparable_support, inequality_comparable_support,
+ less_than_comparable_support, less_equal_comparable_support,
+ greater_equal_comparable_support, greater_than_comparable_support,
+ swappable_support, hashable_support>;
+};
+
+// NOTE: We use this as opposed to a direct alias of CombineProfilesImpl so that
+// when named aliases of CombineProfiles are created (such as in
+// conformance_aliases.h), we only pay for the combination algorithm on the
+// profiles that are actually used.
+template <class... Profs>
+struct CombineProfiles {
+ using profile_alias_of = CombineProfilesImpl<Profs...>;
+};
+
+template <>
+struct CombineProfiles<> {
+ using properties = ConformanceProfile<>;
+};
+
+template <class Profile, class Tag>
+struct StrongProfileTypedef {
+ using properties = PropertiesOfT<Profile>;
+};
+
+template <class T, class /*Enabler*/ = void>
+struct IsProfileImpl : std::false_type {};
+
+template <class T>
+struct IsProfileImpl<T, y_absl::void_t<PropertiesOfT<T>>> : std::true_type {};
+
+template <class T>
+struct IsProfile : IsProfileImpl<T>::type {};
+
+// A tag that describes which set of properties we will check when the user
+// requires a strict match in conformance (as opposed to a loose match which
+// allows more-refined support of any given operation).
+//
+// Currently only the RegularityDomain exists and it includes all operations
+// that the conformance testing suite knows about. The intent is that if the
+// suite is expanded to support extension, such as for checking conformance of
+// concepts like Iterators or Containers, additional corresponding domains can
+// be created.
+struct RegularityDomain {};
+
+} // namespace types_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_TYPES_INTERNAL_CONFORMANCE_PROFILE_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/conformance_testing.h b/contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/conformance_testing.h
new file mode 100644
index 00000000000..799ba18e46d
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/conformance_testing.h
@@ -0,0 +1,1386 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// conformance_testing.h
+// -----------------------------------------------------------------------------
+//
+
+#ifndef ABSL_TYPES_INTERNAL_CONFORMANCE_TESTING_H_
+#define ABSL_TYPES_INTERNAL_CONFORMANCE_TESTING_H_
+
+////////////////////////////////////////////////////////////////////////////////
+// //
+// Many templates in this file take a `T` and a `Prof` type as explicit //
+// template arguments. These are a type to be checked and a //
+// "Regularity Profile" that describes what operations that type `T` is //
+// expected to support. See "regularity_profiles.h" for more details //
+// regarding Regularity Profiles. //
+// //
+////////////////////////////////////////////////////////////////////////////////
+
+#include <cstddef>
+#include <set>
+#include <tuple>
+#include <type_traits>
+#include <utility>
+
+#include "gtest/gtest.h"
+#include "y_absl/meta/type_traits.h"
+#include "y_absl/strings/ascii.h"
+#include "y_absl/strings/str_cat.h"
+#include "y_absl/strings/string_view.h"
+#include "y_absl/types/internal/conformance_aliases.h"
+#include "y_absl/types/internal/conformance_archetype.h"
+#include "y_absl/types/internal/conformance_profile.h"
+#include "y_absl/types/internal/conformance_testing_helpers.h"
+#include "y_absl/types/internal/parentheses.h"
+#include "y_absl/types/internal/transform_args.h"
+#include "y_absl/utility/utility.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace types_internal {
+
+// Returns true if the compiler incorrectly greedily instantiates constexpr
+// templates in any unevaluated context.
+constexpr bool constexpr_instantiation_when_unevaluated() {
+#if defined(__apple_build_version__) // TODO(calabrese) Make more specific
+ return true;
+#elif defined(__clang__)
+ return __clang_major__ < 4;
+#elif defined(__GNUC__)
+ // TODO(calabrese) Figure out why gcc 7 fails (seems like a different bug)
+ return __GNUC__ < 5 || (__GNUC__ == 5 && __GNUC_MINOR__ < 2) || __GNUC__ >= 7;
+#else
+ return false;
+#endif
+}
+
+// Returns true if the standard library being used incorrectly produces an error
+// when instantiating the definition of a poisoned std::hash specialization.
+constexpr bool poisoned_hash_fails_instantiation() {
+#if defined(_MSC_VER) && !defined(_LIBCPP_VERSION)
+ return _MSC_VER < 1914;
+#else
+ return false;
+#endif
+}
+
+template <class Fun>
+struct GeneratorType {
+ decltype(std::declval<const Fun&>()()) operator()() const
+ noexcept(noexcept(std::declval<const Fun&>()())) {
+ return fun();
+ }
+
+ Fun fun;
+ const char* description;
+};
+
+// A "make" function for the GeneratorType template that deduces the function
+// object type.
+template <class Fun,
+ y_absl::enable_if_t<IsNullaryCallable<Fun>::value>** = nullptr>
+GeneratorType<Fun> Generator(Fun fun, const char* description) {
+ return GeneratorType<Fun>{y_absl::move(fun), description};
+}
+
+// A type that contains a set of nullary function objects that each return an
+// instance of the same type and value (though possibly different
+// representations, such as +0 and -0 or two vectors with the same elements but
+// with different capacities).
+template <class... Funs>
+struct EquivalenceClassType {
+ std::tuple<GeneratorType<Funs>...> generators;
+};
+
+// A "make" function for the EquivalenceClassType template that deduces the
+// function object types and is constrained such that a user can only pass in
+// function objects that all have the same return type.
+template <class... Funs, y_absl::enable_if_t<AreGeneratorsWithTheSameReturnType<
+ Funs...>::value>** = nullptr>
+EquivalenceClassType<Funs...> EquivalenceClass(GeneratorType<Funs>... funs) {
+ return {std::make_tuple(y_absl::move(funs)...)};
+}
+
+// A type that contains an ordered series of EquivalenceClassTypes, from
+// smallest value to largest value.
+template <class... EqClasses>
+struct OrderedEquivalenceClasses {
+ std::tuple<EqClasses...> eq_classes;
+};
+
+// An object containing the parts of a given (name, initialization expression),
+// and is capable of generating a string that describes the given.
+struct GivenDeclaration {
+ TString outputDeclaration(std::size_t width) const {
+ const std::size_t indent_size = 2;
+ TString result = y_absl::StrCat(" ", name);
+
+ if (!expression.empty()) {
+ // Indent
+ result.resize(indent_size + width, ' ');
+ y_absl::StrAppend(&result, " = ", expression, ";\n");
+ } else {
+ y_absl::StrAppend(&result, ";\n");
+ }
+
+ return result;
+ }
+
+ TString name;
+ TString expression;
+};
+
+// Produce a string that contains all of the givens of an error report.
+template <class... Decls>
+TString PrepareGivenContext(const Decls&... decls) {
+ const std::size_t width = (std::max)({decls.name.size()...});
+ return y_absl::StrCat("Given:\n", decls.outputDeclaration(width)..., "\n");
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Function objects that perform a check for each comparison operator //
+////////////////////////////////////////////////////////////////////////////////
+
+#define ABSL_INTERNAL_EXPECT_OP(name, op) \
+ struct Expect##name { \
+ template <class T> \
+ void operator()(y_absl::string_view test_name, y_absl::string_view context, \
+ const T& lhs, const T& rhs, y_absl::string_view lhs_name, \
+ y_absl::string_view rhs_name) const { \
+ if (!static_cast<bool>(lhs op rhs)) { \
+ errors->addTestFailure( \
+ test_name, y_absl::StrCat(context, \
+ "**Unexpected comparison result**\n" \
+ "\n" \
+ "Expression:\n" \
+ " ", \
+ lhs_name, " " #op " ", rhs_name, \
+ "\n" \
+ "\n" \
+ "Expected: true\n" \
+ " Actual: false")); \
+ } else { \
+ errors->addTestSuccess(test_name); \
+ } \
+ } \
+ \
+ ConformanceErrors* errors; \
+ }; \
+ \
+ struct ExpectNot##name { \
+ template <class T> \
+ void operator()(y_absl::string_view test_name, y_absl::string_view context, \
+ const T& lhs, const T& rhs, y_absl::string_view lhs_name, \
+ y_absl::string_view rhs_name) const { \
+ if (lhs op rhs) { \
+ errors->addTestFailure( \
+ test_name, y_absl::StrCat(context, \
+ "**Unexpected comparison result**\n" \
+ "\n" \
+ "Expression:\n" \
+ " ", \
+ lhs_name, " " #op " ", rhs_name, \
+ "\n" \
+ "\n" \
+ "Expected: false\n" \
+ " Actual: true")); \
+ } else { \
+ errors->addTestSuccess(test_name); \
+ } \
+ } \
+ \
+ ConformanceErrors* errors; \
+ }
+
+ABSL_INTERNAL_EXPECT_OP(Eq, ==);
+ABSL_INTERNAL_EXPECT_OP(Ne, !=);
+ABSL_INTERNAL_EXPECT_OP(Lt, <);
+ABSL_INTERNAL_EXPECT_OP(Le, <=);
+ABSL_INTERNAL_EXPECT_OP(Ge, >=);
+ABSL_INTERNAL_EXPECT_OP(Gt, >);
+
+#undef ABSL_INTERNAL_EXPECT_OP
+
+// A function object that verifies that two objects hash to the same value by
+// way of the std::hash specialization.
+struct ExpectSameHash {
+ template <class T>
+ void operator()(y_absl::string_view test_name, y_absl::string_view context,
+ const T& lhs, const T& rhs, y_absl::string_view lhs_name,
+ y_absl::string_view rhs_name) const {
+ if (std::hash<T>()(lhs) != std::hash<T>()(rhs)) {
+ errors->addTestFailure(
+ test_name, y_absl::StrCat(context,
+ "**Unexpected hash result**\n"
+ "\n"
+ "Expression:\n"
+ " std::hash<T>()(",
+ lhs_name, ") == std::hash<T>()(", rhs_name,
+ ")\n"
+ "\n"
+ "Expected: true\n"
+ " Actual: false"));
+ } else {
+ errors->addTestSuccess(test_name);
+ }
+ }
+
+ ConformanceErrors* errors;
+};
+
+// A function template that takes two objects and verifies that each comparison
+// operator behaves in a way that is consistent with equality. It has "OneWay"
+// in the name because the first argument will always be the left-hand operand
+// of the corresponding comparison operator and the second argument will
+// always be the right-hand operand. It will never switch that order.
+// At a higher level in the test suite, the one-way form is called once for each
+// of the two possible orders whenever lhs and rhs are not the same initializer.
+template <class T, class Prof>
+void ExpectOneWayEquality(ConformanceErrors* errors,
+ y_absl::string_view test_name,
+ y_absl::string_view context, const T& lhs, const T& rhs,
+ y_absl::string_view lhs_name,
+ y_absl::string_view rhs_name) {
+ If<PropertiesOfT<Prof>::is_equality_comparable>::Invoke(
+ ExpectEq{errors}, test_name, context, lhs, rhs, lhs_name, rhs_name);
+
+ If<PropertiesOfT<Prof>::is_inequality_comparable>::Invoke(
+ ExpectNotNe{errors}, test_name, context, lhs, rhs, lhs_name, rhs_name);
+
+ If<PropertiesOfT<Prof>::is_less_than_comparable>::Invoke(
+ ExpectNotLt{errors}, test_name, context, lhs, rhs, lhs_name, rhs_name);
+
+ If<PropertiesOfT<Prof>::is_less_equal_comparable>::Invoke(
+ ExpectLe{errors}, test_name, context, lhs, rhs, lhs_name, rhs_name);
+
+ If<PropertiesOfT<Prof>::is_greater_equal_comparable>::Invoke(
+ ExpectGe{errors}, test_name, context, lhs, rhs, lhs_name, rhs_name);
+
+ If<PropertiesOfT<Prof>::is_greater_than_comparable>::Invoke(
+ ExpectNotGt{errors}, test_name, context, lhs, rhs, lhs_name, rhs_name);
+
+ If<PropertiesOfT<Prof>::is_hashable>::Invoke(
+ ExpectSameHash{errors}, test_name, context, lhs, rhs, lhs_name, rhs_name);
+}
+
+// A function template that takes two objects and verifies that each comparison
+// operator behaves in a way that is consistent with equality. This function
+// differs from ExpectOneWayEquality in that this will do checks with argument
+// order reversed in addition to in-order.
+template <class T, class Prof>
+void ExpectEquality(ConformanceErrors* errors, y_absl::string_view test_name,
+ y_absl::string_view context, const T& lhs, const T& rhs,
+ y_absl::string_view lhs_name, y_absl::string_view rhs_name) {
+ (ExpectOneWayEquality<T, Prof>)(errors, test_name, context, lhs, rhs,
+ lhs_name, rhs_name);
+ (ExpectOneWayEquality<T, Prof>)(errors, test_name, context, rhs, lhs,
+ rhs_name, lhs_name);
+}
+
+// Given a generator, makes sure that a generated value and a moved-from
+// generated value are equal.
+template <class T, class Prof>
+struct ExpectMoveConstructOneGenerator {
+ template <class Fun>
+ void operator()(const Fun& generator) const {
+ const T object = generator();
+ const T moved_object = y_absl::move(generator()); // Force no elision.
+
+ (ExpectEquality<T, Prof>)(errors, "Move construction",
+ PrepareGivenContext(
+ GivenDeclaration{"const _T object",
+ generator.description},
+ GivenDeclaration{"const _T moved_object",
+ TString("std::move(") +
+ generator.description +
+ ")"}),
+ object, moved_object, "object", "moved_object");
+ }
+
+ ConformanceErrors* errors;
+};
+
+// Given a generator, makes sure that a generated value and a copied-from
+// generated value are equal.
+template <class T, class Prof>
+struct ExpectCopyConstructOneGenerator {
+ template <class Fun>
+ void operator()(const Fun& generator) const {
+ const T object = generator();
+ const T copied_object = static_cast<const T&>(generator());
+
+ (ExpectEquality<T, Prof>)(errors, "Copy construction",
+ PrepareGivenContext(
+ GivenDeclaration{"const _T object",
+ generator.description},
+ GivenDeclaration{
+ "const _T copied_object",
+ TString("static_cast<const _T&>(") +
+ generator.description + ")"}),
+ object, copied_object, "object", "copied_object");
+ }
+
+ ConformanceErrors* errors;
+};
+
+// Default-construct and do nothing before destruction.
+//
+// This is useful in exercising the codepath of default construction followed by
+// destruction, but does not explicitly test anything. An example of where this
+// might fail is a default destructor that default-initializes a scalar and a
+// destructor reads the value of that member. Sanitizers can catch this as long
+// as our test attempts to execute such a case.
+template <class T>
+struct ExpectDefaultConstructWithDestruct {
+ void operator()() const {
+ // Scoped so that destructor gets called before reporting success.
+ {
+ T object;
+ static_cast<void>(object);
+ }
+
+ errors->addTestSuccess("Default construction");
+ }
+
+ ConformanceErrors* errors;
+};
+
+// Check move-assign into a default-constructed object.
+template <class T, class Prof>
+struct ExpectDefaultConstructWithMoveAssign {
+ template <class Fun>
+ void operator()(const Fun& generator) const {
+ const T source_of_truth = generator();
+ T object;
+ object = generator();
+
+ (ExpectEquality<T, Prof>)(errors, "Move assignment",
+ PrepareGivenContext(
+ GivenDeclaration{"const _T object",
+ generator.description},
+ GivenDeclaration{"_T object", ""},
+ GivenDeclaration{"object",
+ generator.description}),
+ object, source_of_truth, "std::as_const(object)",
+ "source_of_truth");
+ }
+
+ ConformanceErrors* errors;
+};
+
+// Check copy-assign into a default-constructed object.
+template <class T, class Prof>
+struct ExpectDefaultConstructWithCopyAssign {
+ template <class Fun>
+ void operator()(const Fun& generator) const {
+ const T source_of_truth = generator();
+ T object;
+ object = static_cast<const T&>(generator());
+
+ (ExpectEquality<T, Prof>)(errors, "Copy assignment",
+ PrepareGivenContext(
+ GivenDeclaration{"const _T source_of_truth",
+ generator.description},
+ GivenDeclaration{"_T object", ""},
+ GivenDeclaration{
+ "object",
+ TString("static_cast<const _T&>(") +
+ generator.description + ")"}),
+ object, source_of_truth, "std::as_const(object)",
+ "source_of_truth");
+ }
+
+ ConformanceErrors* errors;
+};
+
+// Perform a self move-assign.
+template <class T, class Prof>
+struct ExpectSelfMoveAssign {
+ template <class Fun>
+ void operator()(const Fun& generator) const {
+ T object = generator();
+ object = y_absl::move(object);
+
+ // NOTE: Self move-assign results in a valid-but-unspecified state.
+
+ (ExpectEquality<T, Prof>)(errors, "Move assignment",
+ PrepareGivenContext(
+ GivenDeclaration{"_T object",
+ generator.description},
+ GivenDeclaration{"object",
+ "std::move(object)"}),
+ object, object, "object", "object");
+ }
+
+ ConformanceErrors* errors;
+};
+
+// Perform a self copy-assign.
+template <class T, class Prof>
+struct ExpectSelfCopyAssign {
+ template <class Fun>
+ void operator()(const Fun& generator) const {
+ const T source_of_truth = generator();
+ T object = generator();
+ const T& const_object = object;
+ object = const_object;
+
+ (ExpectEquality<T, Prof>)(errors, "Copy assignment",
+ PrepareGivenContext(
+ GivenDeclaration{"const _T source_of_truth",
+ generator.description},
+ GivenDeclaration{"_T object",
+ generator.description},
+ GivenDeclaration{"object",
+ "std::as_const(object)"}),
+ const_object, source_of_truth,
+ "std::as_const(object)", "source_of_truth");
+ }
+
+ ConformanceErrors* errors;
+};
+
+// Perform a self-swap.
+template <class T, class Prof>
+struct ExpectSelfSwap {
+ template <class Fun>
+ void operator()(const Fun& generator) const {
+ const T source_of_truth = generator();
+ T object = generator();
+
+ type_traits_internal::Swap(object, object);
+
+ TString preliminary_info = y_absl::StrCat(
+ PrepareGivenContext(
+ GivenDeclaration{"const _T source_of_truth", generator.description},
+ GivenDeclaration{"_T object", generator.description}),
+ "After performing a self-swap:\n"
+ " using std::swap;\n"
+ " swap(object, object);\n"
+ "\n");
+
+ (ExpectEquality<T, Prof>)(errors, "Swap", std::move(preliminary_info),
+ object, source_of_truth, "std::as_const(object)",
+ "source_of_truth");
+ }
+
+ ConformanceErrors* errors;
+};
+
+// Perform each of the single-generator checks when necessary operations are
+// supported.
+template <class T, class Prof>
+struct ExpectSelfComparison {
+ template <class Fun>
+ void operator()(const Fun& generator) const {
+ const T object = generator();
+ (ExpectOneWayEquality<T, Prof>)(errors, "Comparison",
+ PrepareGivenContext(GivenDeclaration{
+ "const _T object",
+ generator.description}),
+ object, object, "object", "object");
+ }
+
+ ConformanceErrors* errors;
+};
+
+// Perform each of the single-generator checks when necessary operations are
+// supported.
+template <class T, class Prof>
+struct ExpectConsistency {
+ template <class Fun>
+ void operator()(const Fun& generator) const {
+ If<PropertiesOfT<Prof>::is_move_constructible>::Invoke(
+ ExpectMoveConstructOneGenerator<T, Prof>{errors}, generator);
+
+ If<PropertiesOfT<Prof>::is_copy_constructible>::Invoke(
+ ExpectCopyConstructOneGenerator<T, Prof>{errors}, generator);
+
+ If<PropertiesOfT<Prof>::is_default_constructible &&
+ PropertiesOfT<Prof>::is_move_assignable>::
+ Invoke(ExpectDefaultConstructWithMoveAssign<T, Prof>{errors},
+ generator);
+
+ If<PropertiesOfT<Prof>::is_default_constructible &&
+ PropertiesOfT<Prof>::is_copy_assignable>::
+ Invoke(ExpectDefaultConstructWithCopyAssign<T, Prof>{errors},
+ generator);
+
+ If<PropertiesOfT<Prof>::is_move_assignable>::Invoke(
+ ExpectSelfMoveAssign<T, Prof>{errors}, generator);
+
+ If<PropertiesOfT<Prof>::is_copy_assignable>::Invoke(
+ ExpectSelfCopyAssign<T, Prof>{errors}, generator);
+
+ If<PropertiesOfT<Prof>::is_swappable>::Invoke(
+ ExpectSelfSwap<T, Prof>{errors}, generator);
+ }
+
+ ConformanceErrors* errors;
+};
+
+// Check move-assign with two different values.
+template <class T, class Prof>
+struct ExpectMoveAssign {
+ template <class Fun0, class Fun1>
+ void operator()(const Fun0& generator0, const Fun1& generator1) const {
+ const T source_of_truth1 = generator1();
+ T object = generator0();
+ object = generator1();
+
+ (ExpectEquality<T, Prof>)(errors, "Move assignment",
+ PrepareGivenContext(
+ GivenDeclaration{"const _T source_of_truth1",
+ generator1.description},
+ GivenDeclaration{"_T object",
+ generator0.description},
+ GivenDeclaration{"object",
+ generator1.description}),
+ object, source_of_truth1, "std::as_const(object)",
+ "source_of_truth1");
+ }
+
+ ConformanceErrors* errors;
+};
+
+// Check copy-assign with two different values.
+template <class T, class Prof>
+struct ExpectCopyAssign {
+ template <class Fun0, class Fun1>
+ void operator()(const Fun0& generator0, const Fun1& generator1) const {
+ const T source_of_truth1 = generator1();
+ T object = generator0();
+ object = static_cast<const T&>(generator1());
+
+ (ExpectEquality<T, Prof>)(errors, "Copy assignment",
+ PrepareGivenContext(
+ GivenDeclaration{"const _T source_of_truth1",
+ generator1.description},
+ GivenDeclaration{"_T object",
+ generator0.description},
+ GivenDeclaration{
+ "object",
+ TString("static_cast<const _T&>(") +
+ generator1.description + ")"}),
+ object, source_of_truth1, "std::as_const(object)",
+ "source_of_truth1");
+ }
+
+ ConformanceErrors* errors;
+};
+
+// Check swap with two different values.
+template <class T, class Prof>
+struct ExpectSwap {
+ template <class Fun0, class Fun1>
+ void operator()(const Fun0& generator0, const Fun1& generator1) const {
+ const T source_of_truth0 = generator0();
+ const T source_of_truth1 = generator1();
+ T object0 = generator0();
+ T object1 = generator1();
+
+ type_traits_internal::Swap(object0, object1);
+
+ const TString context =
+ PrepareGivenContext(
+ GivenDeclaration{"const _T source_of_truth0",
+ generator0.description},
+ GivenDeclaration{"const _T source_of_truth1",
+ generator1.description},
+ GivenDeclaration{"_T object0", generator0.description},
+ GivenDeclaration{"_T object1", generator1.description}) +
+ "After performing a swap:\n"
+ " using std::swap;\n"
+ " swap(object0, object1);\n"
+ "\n";
+
+ (ExpectEquality<T, Prof>)(errors, "Swap", context, object0,
+ source_of_truth1, "std::as_const(object0)",
+ "source_of_truth1");
+ (ExpectEquality<T, Prof>)(errors, "Swap", context, object1,
+ source_of_truth0, "std::as_const(object1)",
+ "source_of_truth0");
+ }
+
+ ConformanceErrors* errors;
+};
+
+// Validate that `generator0` and `generator1` produce values that are equal.
+template <class T, class Prof>
+struct ExpectEquivalenceClassComparison {
+ template <class Fun0, class Fun1>
+ void operator()(const Fun0& generator0, const Fun1& generator1) const {
+ const T object0 = generator0();
+ const T object1 = generator1();
+
+ (ExpectEquality<T, Prof>)(errors, "Comparison",
+ PrepareGivenContext(
+ GivenDeclaration{"const _T object0",
+ generator0.description},
+ GivenDeclaration{"const _T object1",
+ generator1.description}),
+ object0, object1, "object0", "object1");
+ }
+
+ ConformanceErrors* errors;
+};
+
+// Validate that all objects in the same equivalence-class have the same value.
+template <class T, class Prof>
+struct ExpectEquivalenceClassConsistency {
+ template <class Fun0, class Fun1>
+ void operator()(const Fun0& generator0, const Fun1& generator1) const {
+ If<PropertiesOfT<Prof>::is_move_assignable>::Invoke(
+ ExpectMoveAssign<T, Prof>{errors}, generator0, generator1);
+
+ If<PropertiesOfT<Prof>::is_copy_assignable>::Invoke(
+ ExpectCopyAssign<T, Prof>{errors}, generator0, generator1);
+
+ If<PropertiesOfT<Prof>::is_swappable>::Invoke(ExpectSwap<T, Prof>{errors},
+ generator0, generator1);
+ }
+
+ ConformanceErrors* errors;
+};
+
+// Given a "lesser" object and a "greater" object, perform every combination of
+// comparison operators supported for the type, expecting consistent results.
+template <class T, class Prof>
+void ExpectOrdered(ConformanceErrors* errors, y_absl::string_view context,
+ const T& small, const T& big, y_absl::string_view small_name,
+ y_absl::string_view big_name) {
+ const y_absl::string_view test_name = "Comparison";
+
+ If<PropertiesOfT<Prof>::is_equality_comparable>::Invoke(
+ ExpectNotEq{errors}, test_name, context, small, big, small_name,
+ big_name);
+ If<PropertiesOfT<Prof>::is_equality_comparable>::Invoke(
+ ExpectNotEq{errors}, test_name, context, big, small, big_name,
+ small_name);
+
+ If<PropertiesOfT<Prof>::is_inequality_comparable>::Invoke(
+ ExpectNe{errors}, test_name, context, small, big, small_name, big_name);
+ If<PropertiesOfT<Prof>::is_inequality_comparable>::Invoke(
+ ExpectNe{errors}, test_name, context, big, small, big_name, small_name);
+
+ If<PropertiesOfT<Prof>::is_less_than_comparable>::Invoke(
+ ExpectLt{errors}, test_name, context, small, big, small_name, big_name);
+ If<PropertiesOfT<Prof>::is_less_than_comparable>::Invoke(
+ ExpectNotLt{errors}, test_name, context, big, small, big_name,
+ small_name);
+
+ If<PropertiesOfT<Prof>::is_less_equal_comparable>::Invoke(
+ ExpectLe{errors}, test_name, context, small, big, small_name, big_name);
+ If<PropertiesOfT<Prof>::is_less_equal_comparable>::Invoke(
+ ExpectNotLe{errors}, test_name, context, big, small, big_name,
+ small_name);
+
+ If<PropertiesOfT<Prof>::is_greater_equal_comparable>::Invoke(
+ ExpectNotGe{errors}, test_name, context, small, big, small_name,
+ big_name);
+ If<PropertiesOfT<Prof>::is_greater_equal_comparable>::Invoke(
+ ExpectGe{errors}, test_name, context, big, small, big_name, small_name);
+
+ If<PropertiesOfT<Prof>::is_greater_than_comparable>::Invoke(
+ ExpectNotGt{errors}, test_name, context, small, big, small_name,
+ big_name);
+ If<PropertiesOfT<Prof>::is_greater_than_comparable>::Invoke(
+ ExpectGt{errors}, test_name, context, big, small, big_name, small_name);
+}
+
+// For every two elements of an equivalence class, makes sure that those two
+// elements compare equal, including checks with the same argument passed as
+// both operands.
+template <class T, class Prof>
+struct ExpectEquivalenceClassComparisons {
+ template <class... Funs>
+ void operator()(EquivalenceClassType<Funs...> eq_class) const {
+ (ForEachTupleElement)(ExpectSelfComparison<T, Prof>{errors},
+ eq_class.generators);
+
+ (ForEveryTwo)(ExpectEquivalenceClassComparison<T, Prof>{errors},
+ eq_class.generators);
+ }
+
+ ConformanceErrors* errors;
+};
+
+// For every element of an equivalence class, makes sure that the element is
+// self-consistent (in other words, if any of move/copy/swap are defined,
+// perform those operations and make such that results and operands still
+// compare equal to known values whenever it is required for that operation.
+template <class T, class Prof>
+struct ExpectEquivalenceClass {
+ template <class... Funs>
+ void operator()(EquivalenceClassType<Funs...> eq_class) const {
+ (ForEachTupleElement)(ExpectConsistency<T, Prof>{errors},
+ eq_class.generators);
+
+ (ForEveryTwo)(ExpectEquivalenceClassConsistency<T, Prof>{errors},
+ eq_class.generators);
+ }
+
+ ConformanceErrors* errors;
+};
+
+// Validate that the passed-in argument is a generator of a greater value than
+// the one produced by the "small_gen" datamember with respect to all of the
+// comparison operators that Prof requires, with both argument orders to test.
+template <class T, class Prof, class SmallGenerator>
+struct ExpectBiggerGeneratorThanComparisons {
+ template <class BigGenerator>
+ void operator()(BigGenerator big_gen) const {
+ const T small = small_gen();
+ const T big = big_gen();
+
+ (ExpectOrdered<T, Prof>)(errors,
+ PrepareGivenContext(
+ GivenDeclaration{"const _T small",
+ small_gen.description},
+ GivenDeclaration{"const _T big",
+ big_gen.description}),
+ small, big, "small", "big");
+ }
+
+ SmallGenerator small_gen;
+ ConformanceErrors* errors;
+};
+
+// Perform all of the move, copy, and swap checks on the value generated by
+// `small_gen` and the value generated by `big_gen`.
+template <class T, class Prof, class SmallGenerator>
+struct ExpectBiggerGeneratorThan {
+ template <class BigGenerator>
+ void operator()(BigGenerator big_gen) const {
+ If<PropertiesOfT<Prof>::is_move_assignable>::Invoke(
+ ExpectMoveAssign<T, Prof>{errors}, small_gen, big_gen);
+ If<PropertiesOfT<Prof>::is_move_assignable>::Invoke(
+ ExpectMoveAssign<T, Prof>{errors}, big_gen, small_gen);
+
+ If<PropertiesOfT<Prof>::is_copy_assignable>::Invoke(
+ ExpectCopyAssign<T, Prof>{errors}, small_gen, big_gen);
+ If<PropertiesOfT<Prof>::is_copy_assignable>::Invoke(
+ ExpectCopyAssign<T, Prof>{errors}, big_gen, small_gen);
+
+ If<PropertiesOfT<Prof>::is_swappable>::Invoke(ExpectSwap<T, Prof>{errors},
+ small_gen, big_gen);
+ }
+
+ SmallGenerator small_gen;
+ ConformanceErrors* errors;
+};
+
+// Validate that the result of a generator is greater than the results of all
+// generators in an equivalence class with respect to comparisons.
+template <class T, class Prof, class SmallGenerator>
+struct ExpectBiggerGeneratorThanEqClassesComparisons {
+ template <class BigEqClass>
+ void operator()(BigEqClass big_eq_class) const {
+ (ForEachTupleElement)(
+ ExpectBiggerGeneratorThanComparisons<T, Prof, SmallGenerator>{small_gen,
+ errors},
+ big_eq_class.generators);
+ }
+
+ SmallGenerator small_gen;
+ ConformanceErrors* errors;
+};
+
+// Validate that the non-comparison binary operations required by Prof are
+// correct for the result of each generator of big_eq_class and a generator of
+// the logically smaller value returned by small_gen.
+template <class T, class Prof, class SmallGenerator>
+struct ExpectBiggerGeneratorThanEqClasses {
+ template <class BigEqClass>
+ void operator()(BigEqClass big_eq_class) const {
+ (ForEachTupleElement)(
+ ExpectBiggerGeneratorThan<T, Prof, SmallGenerator>{small_gen, errors},
+ big_eq_class.generators);
+ }
+
+ SmallGenerator small_gen;
+ ConformanceErrors* errors;
+};
+
+// Validate that each equivalence class that is passed is logically less than
+// the equivalence classes that comes later on in the argument list.
+template <class T, class Prof>
+struct ExpectOrderedEquivalenceClassesComparisons {
+ template <class... BigEqClasses>
+ struct Impl {
+ // Validate that the value produced by `small_gen` is less than all of the
+ // values generated by those of the logically larger equivalence classes.
+ template <class SmallGenerator>
+ void operator()(SmallGenerator small_gen) const {
+ (ForEachTupleElement)(ExpectBiggerGeneratorThanEqClassesComparisons<
+ T, Prof, SmallGenerator>{small_gen, errors},
+ big_eq_classes);
+ }
+
+ std::tuple<BigEqClasses...> big_eq_classes;
+ ConformanceErrors* errors;
+ };
+
+ // When given no equivalence classes, no validation is necessary.
+ void operator()() const {}
+
+ template <class SmallEqClass, class... BigEqClasses>
+ void operator()(SmallEqClass small_eq_class,
+ BigEqClasses... big_eq_classes) const {
+ // For each generator in the first equivalence class, make sure that it is
+ // less than each of those in the logically greater equivalence classes.
+ (ForEachTupleElement)(
+ Impl<BigEqClasses...>{std::make_tuple(y_absl::move(big_eq_classes)...),
+ errors},
+ small_eq_class.generators);
+
+ // Recurse so that all equivalence class combinations are checked.
+ (*this)(y_absl::move(big_eq_classes)...);
+ }
+
+ ConformanceErrors* errors;
+};
+
+// Validate that the non-comparison binary operations required by Prof are
+// correct for the result of each generator of big_eq_classes and a generator of
+// the logically smaller value returned by small_gen.
+template <class T, class Prof>
+struct ExpectOrderedEquivalenceClasses {
+ template <class... BigEqClasses>
+ struct Impl {
+ template <class SmallGenerator>
+ void operator()(SmallGenerator small_gen) const {
+ (ForEachTupleElement)(
+ ExpectBiggerGeneratorThanEqClasses<T, Prof, SmallGenerator>{small_gen,
+ errors},
+ big_eq_classes);
+ }
+
+ std::tuple<BigEqClasses...> big_eq_classes;
+ ConformanceErrors* errors;
+ };
+
+ // Check that small_eq_class is logically consistent and also is logically
+ // less than all values in big_eq_classes.
+ template <class SmallEqClass, class... BigEqClasses>
+ void operator()(SmallEqClass small_eq_class,
+ BigEqClasses... big_eq_classes) const {
+ (ForEachTupleElement)(
+ Impl<BigEqClasses...>{std::make_tuple(y_absl::move(big_eq_classes)...),
+ errors},
+ small_eq_class.generators);
+
+ (*this)(y_absl::move(big_eq_classes)...);
+ }
+
+ // Terminating case of operator().
+ void operator()() const {}
+
+ ConformanceErrors* errors;
+};
+
+// Validate that a type meets the syntactic requirements of std::hash if the
+// range of profiles requires it.
+template <class T, class MinProf, class MaxProf>
+struct ExpectHashable {
+ void operator()() const {
+ ExpectModelOfHashable<T, MinProf, MaxProf>(errors);
+ }
+
+ ConformanceErrors* errors;
+};
+
+// Validate that the type `T` meets all of the requirements associated with
+// `MinProf` and without going beyond the syntactic properties of `MaxProf`.
+template <class T, class MinProf, class MaxProf>
+struct ExpectModels {
+ void operator()(ConformanceErrors* errors) const {
+ ExpectModelOfDefaultConstructible<T, MinProf, MaxProf>(errors);
+ ExpectModelOfMoveConstructible<T, MinProf, MaxProf>(errors);
+ ExpectModelOfCopyConstructible<T, MinProf, MaxProf>(errors);
+ ExpectModelOfMoveAssignable<T, MinProf, MaxProf>(errors);
+ ExpectModelOfCopyAssignable<T, MinProf, MaxProf>(errors);
+ ExpectModelOfDestructible<T, MinProf, MaxProf>(errors);
+ ExpectModelOfEqualityComparable<T, MinProf, MaxProf>(errors);
+ ExpectModelOfInequalityComparable<T, MinProf, MaxProf>(errors);
+ ExpectModelOfLessThanComparable<T, MinProf, MaxProf>(errors);
+ ExpectModelOfLessEqualComparable<T, MinProf, MaxProf>(errors);
+ ExpectModelOfGreaterEqualComparable<T, MinProf, MaxProf>(errors);
+ ExpectModelOfGreaterThanComparable<T, MinProf, MaxProf>(errors);
+ ExpectModelOfSwappable<T, MinProf, MaxProf>(errors);
+
+ // Only check hashability on compilers that have a compliant default-hash.
+ If<!poisoned_hash_fails_instantiation()>::Invoke(
+ ExpectHashable<T, MinProf, MaxProf>{errors});
+ }
+};
+
+// A metafunction that yields a Profile matching the set of properties that are
+// safe to be checked (lack-of-hashability is only checked on standard library
+// implementations that are standards compliant in that they provide a std::hash
+// primary template that is SFINAE-friendly)
+template <class LogicalProf, class T>
+struct MinimalCheckableProfile {
+ using type =
+ MinimalProfiles<PropertiesOfT<LogicalProf>,
+ PropertiesOfT<SyntacticConformanceProfileOf<
+ T, !PropertiesOfT<LogicalProf>::is_hashable &&
+ poisoned_hash_fails_instantiation()
+ ? CheckHashability::no
+ : CheckHashability::yes>>>;
+};
+
+// An identity metafunction
+template <class T>
+struct Always {
+ using type = T;
+};
+
+// Validate the T meets all of the necessary requirements of LogicalProf, with
+// syntactic requirements defined by the profile range [MinProf, MaxProf].
+template <class T, class LogicalProf, class MinProf, class MaxProf,
+ class... EqClasses>
+ConformanceErrors ExpectRegularityImpl(
+ OrderedEquivalenceClasses<EqClasses...> vals) {
+ ConformanceErrors errors((NameOf<T>()));
+
+ If<!constexpr_instantiation_when_unevaluated()>::Invoke(
+ ExpectModels<T, MinProf, MaxProf>(), &errors);
+
+ using minimal_profile = typename y_absl::conditional_t<
+ constexpr_instantiation_when_unevaluated(), Always<LogicalProf>,
+ MinimalCheckableProfile<LogicalProf, T>>::type;
+
+ If<PropertiesOfT<minimal_profile>::is_default_constructible>::Invoke(
+ ExpectDefaultConstructWithDestruct<T>{&errors});
+
+ //////////////////////////////////////////////////////////////////////////////
+ // Perform all comparison checks first, since later checks depend on their
+ // correctness.
+ //
+ // Check all of the comparisons for all values in the same equivalence
+ // class (equal with respect to comparison operators and hash the same).
+ (ForEachTupleElement)(
+ ExpectEquivalenceClassComparisons<T, minimal_profile>{&errors},
+ vals.eq_classes);
+
+ // Check all of the comparisons for each combination of values that are in
+ // different equivalence classes (not equal with respect to comparison
+ // operators).
+ y_absl::apply(
+ ExpectOrderedEquivalenceClassesComparisons<T, minimal_profile>{&errors},
+ vals.eq_classes);
+ //
+ //////////////////////////////////////////////////////////////////////////////
+
+ // Perform remaining checks, relying on comparisons.
+ // TODO(calabrese) short circuit if any comparisons above failed.
+ (ForEachTupleElement)(ExpectEquivalenceClass<T, minimal_profile>{&errors},
+ vals.eq_classes);
+
+ y_absl::apply(ExpectOrderedEquivalenceClasses<T, minimal_profile>{&errors},
+ vals.eq_classes);
+
+ return errors;
+}
+
+// A type that represents a range of profiles that are acceptable to be matched.
+//
+// `MinProf` is the minimum set of syntactic requirements that must be met.
+//
+// `MaxProf` is the maximum set of syntactic requirements that must be met.
+// This maximum is particularly useful for certain "strictness" checking. Some
+// examples for when this is useful:
+//
+// * Making sure that a type is move-only (rather than simply movable)
+//
+// * Making sure that a member function is *not* noexcept in cases where it
+// cannot be noexcept, such as if a dependent datamember has certain
+// operations that are not noexcept.
+//
+// * Making sure that a type tightly matches a spec, such as the standard.
+//
+// `LogicalProf` is the Profile for which run-time testing is to take place.
+//
+// Note: The reason for `LogicalProf` is because it is often the case, when
+// dealing with templates, that a declaration of a given operation is specified,
+// but whose body would fail to instantiate. Examples include the
+// copy-constructor of a standard container when the element-type is move-only,
+// or the comparison operators of a standard container when the element-type
+// does not have the necessary comparison operations defined. The `LogicalProf`
+// parameter allows us to capture the intent of what should be tested at
+// run-time, even in the cases where syntactically it might otherwise appear as
+// though the type undergoing testing supports more than it actually does.
+template <class LogicalProf, class MinProf = LogicalProf,
+ class MaxProf = MinProf>
+struct ProfileRange {
+ using logical_profile = LogicalProf;
+ using min_profile = MinProf;
+ using max_profile = MaxProf;
+};
+
+// Similar to ProfileRange except that it creates a profile range that is
+// coupled with a Domain and is used when testing that a type matches exactly
+// the "minimum" requirements of LogicalProf.
+template <class StrictnessDomain, class LogicalProf,
+ class MinProf = LogicalProf, class MaxProf = MinProf>
+struct StrictProfileRange {
+ // We do not yet support extension.
+ static_assert(
+ std::is_same<StrictnessDomain, RegularityDomain>::value,
+ "Currently, the only valid StrictnessDomain is RegularityDomain.");
+ using strictness_domain = StrictnessDomain;
+ using logical_profile = LogicalProf;
+ using min_profile = MinProf;
+ using max_profile = MaxProf;
+};
+
+////////////////////////////////////////////////////////////////////////////////
+//
+// A metafunction that creates a StrictProfileRange from a Domain and either a
+// Profile or ProfileRange.
+template <class StrictnessDomain, class ProfOrRange>
+struct MakeStrictProfileRange;
+
+template <class StrictnessDomain, class LogicalProf>
+struct MakeStrictProfileRange {
+ using type = StrictProfileRange<StrictnessDomain, LogicalProf>;
+};
+
+template <class StrictnessDomain, class LogicalProf, class MinProf,
+ class MaxProf>
+struct MakeStrictProfileRange<StrictnessDomain,
+ ProfileRange<LogicalProf, MinProf, MaxProf>> {
+ using type =
+ StrictProfileRange<StrictnessDomain, LogicalProf, MinProf, MaxProf>;
+};
+
+template <class StrictnessDomain, class ProfOrRange>
+using MakeStrictProfileRangeT =
+ typename MakeStrictProfileRange<StrictnessDomain, ProfOrRange>::type;
+//
+////////////////////////////////////////////////////////////////////////////////
+
+// A profile in the RegularityDomain with the strongest possible requirements.
+using MostStrictProfile =
+ CombineProfiles<TriviallyCompleteProfile, NothrowComparableProfile>;
+
+// Forms a ProfileRange that treats the Profile as the bare minimum requirements
+// of a type.
+template <class LogicalProf, class MinProf = LogicalProf>
+using LooseProfileRange = StrictProfileRange<RegularityDomain, LogicalProf,
+ MinProf, MostStrictProfile>;
+
+template <class Prof>
+using MakeLooseProfileRangeT = Prof;
+
+////////////////////////////////////////////////////////////////////////////////
+//
+// The following classes implement the metafunction ProfileRangeOfT<T> that
+// takes either a Profile or ProfileRange and yields the ProfileRange to be
+// used during testing.
+//
+template <class T, class /*Enabler*/ = void>
+struct ProfileRangeOfImpl;
+
+template <class T>
+struct ProfileRangeOfImpl<T, y_absl::void_t<PropertiesOfT<T>>> {
+ using type = LooseProfileRange<T>;
+};
+
+template <class T>
+struct ProfileRangeOf : ProfileRangeOfImpl<T> {};
+
+template <class StrictnessDomain, class LogicalProf, class MinProf,
+ class MaxProf>
+struct ProfileRangeOf<
+ StrictProfileRange<StrictnessDomain, LogicalProf, MinProf, MaxProf>> {
+ using type =
+ StrictProfileRange<StrictnessDomain, LogicalProf, MinProf, MaxProf>;
+};
+
+template <class T>
+using ProfileRangeOfT = typename ProfileRangeOf<T>::type;
+//
+////////////////////////////////////////////////////////////////////////////////
+
+// Extract the logical profile of a range (what will be runtime tested).
+template <class T>
+using LogicalProfileOfT = typename ProfileRangeOfT<T>::logical_profile;
+
+// Extract the minimal syntactic profile of a range (error if not at least).
+template <class T>
+using MinProfileOfT = typename ProfileRangeOfT<T>::min_profile;
+
+// Extract the maximum syntactic profile of a range (error if more than).
+template <class T>
+using MaxProfileOfT = typename ProfileRangeOfT<T>::max_profile;
+
+////////////////////////////////////////////////////////////////////////////////
+//
+template <class T>
+struct IsProfileOrProfileRange : IsProfile<T>::type {};
+
+template <class StrictnessDomain, class LogicalProf, class MinProf,
+ class MaxProf>
+struct IsProfileOrProfileRange<
+ StrictProfileRange<StrictnessDomain, LogicalProf, MinProf, MaxProf>>
+ : std::true_type {};
+//
+////////////////////////////////////////////////////////////////////////////////
+
+// TODO(calabrese): Consider naming the functions in this class the same as
+// the macros (defined later on) so that auto-complete leads to the correct name
+// and so that a user cannot accidentally call a function rather than the macro
+// form.
+template <bool ExpectSuccess, class T, class... EqClasses>
+struct ExpectConformanceOf {
+ // Add a value to be tested. Subsequent calls to this function on the same
+ // object must specify logically "larger" values with respect to the
+ // comparison operators of the type, if any.
+ //
+ // NOTE: This function should not be called directly. A stateless lambda is
+ // implicitly formed and passed when using the INITIALIZER macro at the bottom
+ // of this file.
+ template <class Fun,
+ y_absl::enable_if_t<std::is_same<
+ ResultOfGeneratorT<GeneratorType<Fun>>, T>::value>** = nullptr>
+ ABSL_MUST_USE_RESULT ExpectConformanceOf<ExpectSuccess, T, EqClasses...,
+ EquivalenceClassType<Fun>>
+ initializer(GeneratorType<Fun> fun) && {
+ return {
+ {std::tuple_cat(y_absl::move(ordered_vals.eq_classes),
+ std::make_tuple((EquivalenceClass)(y_absl::move(fun))))},
+ std::move(expected_failed_tests)};
+ }
+
+ template <class... TestNames,
+ y_absl::enable_if_t<!ExpectSuccess && sizeof...(EqClasses) == 0 &&
+ y_absl::conjunction<std::is_convertible<
+ TestNames, y_absl::string_view>...>::value>** =
+ nullptr>
+ ABSL_MUST_USE_RESULT ExpectConformanceOf<ExpectSuccess, T, EqClasses...>
+ due_to(TestNames&&... test_names) && {
+ (InsertEach)(&expected_failed_tests,
+ y_absl::AsciiStrToLower(y_absl::string_view(test_names))...);
+
+ return {y_absl::move(ordered_vals), std::move(expected_failed_tests)};
+ }
+
+ template <class... TestNames, int = 0, // MSVC disambiguator
+ y_absl::enable_if_t<ExpectSuccess && sizeof...(EqClasses) == 0 &&
+ y_absl::conjunction<std::is_convertible<
+ TestNames, y_absl::string_view>...>::value>** =
+ nullptr>
+ ABSL_MUST_USE_RESULT ExpectConformanceOf<ExpectSuccess, T, EqClasses...>
+ due_to(TestNames&&... test_names) && {
+ // TODO(calabrese) Instead have DUE_TO only exist via a CRTP base.
+ // This would produce better errors messages than the static_assert.
+ static_assert(!ExpectSuccess,
+ "DUE_TO cannot be called when conformance is expected -- did "
+ "you mean to use ASSERT_NONCONFORMANCE_OF?");
+ }
+
+ // Add a value to be tested. Subsequent calls to this function on the same
+ // object must specify logically "larger" values with respect to the
+ // comparison operators of the type, if any.
+ //
+ // NOTE: This function should not be called directly. A stateful lambda is
+ // implicitly formed and passed when using the INITIALIZER macro at the bottom
+ // of this file.
+ template <class Fun,
+ y_absl::enable_if_t<std::is_same<
+ ResultOfGeneratorT<GeneratorType<Fun>>, T>::value>** = nullptr>
+ ABSL_MUST_USE_RESULT ExpectConformanceOf<ExpectSuccess, T, EqClasses...,
+ EquivalenceClassType<Fun>>
+ dont_class_directly_stateful_initializer(GeneratorType<Fun> fun) && {
+ return {
+ {std::tuple_cat(y_absl::move(ordered_vals.eq_classes),
+ std::make_tuple((EquivalenceClass)(y_absl::move(fun))))},
+ std::move(expected_failed_tests)};
+ }
+
+ // Add a set of value to be tested, where each value is equal with respect to
+ // the comparison operators and std::hash specialization, if defined.
+ template <
+ class... Funs,
+ y_absl::void_t<y_absl::enable_if_t<std::is_same<
+ ResultOfGeneratorT<GeneratorType<Funs>>, T>::value>...>** = nullptr>
+ ABSL_MUST_USE_RESULT ExpectConformanceOf<ExpectSuccess, T, EqClasses...,
+ EquivalenceClassType<Funs...>>
+ equivalence_class(GeneratorType<Funs>... funs) && {
+ return {{std::tuple_cat(
+ y_absl::move(ordered_vals.eq_classes),
+ std::make_tuple((EquivalenceClass)(y_absl::move(funs)...)))},
+ std::move(expected_failed_tests)};
+ }
+
+ // Execute the tests for the captured set of values, strictly matching a range
+ // of expected profiles in a given domain.
+ template <
+ class ProfRange,
+ y_absl::enable_if_t<IsProfileOrProfileRange<ProfRange>::value>** = nullptr>
+ ABSL_MUST_USE_RESULT ::testing::AssertionResult with_strict_profile(
+ ProfRange /*profile*/) {
+ ConformanceErrors test_result =
+ (ExpectRegularityImpl<
+ T, LogicalProfileOfT<ProfRange>, MinProfileOfT<ProfRange>,
+ MaxProfileOfT<ProfRange>>)(y_absl::move(ordered_vals));
+
+ return ExpectSuccess ? test_result.assertionResult()
+ : test_result.expectFailedTests(expected_failed_tests);
+ }
+
+ // Execute the tests for the captured set of values, loosely matching a range
+ // of expected profiles (loose in that an interface is allowed to be more
+ // refined that a profile suggests, such as a type having a noexcept copy
+ // constructor when all that is required is that the copy constructor exists).
+ template <class Prof, y_absl::enable_if_t<IsProfile<Prof>::value>** = nullptr>
+ ABSL_MUST_USE_RESULT ::testing::AssertionResult with_loose_profile(
+ Prof /*profile*/) {
+ ConformanceErrors test_result =
+ (ExpectRegularityImpl<
+ T, Prof, Prof,
+ CombineProfiles<TriviallyCompleteProfile,
+ NothrowComparableProfile>>)(y_absl::
+ move(ordered_vals));
+
+ return ExpectSuccess ? test_result.assertionResult()
+ : test_result.expectFailedTests(expected_failed_tests);
+ }
+
+ OrderedEquivalenceClasses<EqClasses...> ordered_vals;
+ std::set<TString> expected_failed_tests;
+};
+
+template <class T>
+using ExpectConformanceOfType = ExpectConformanceOf</*ExpectSuccess=*/true, T>;
+
+template <class T>
+using ExpectNonconformanceOfType =
+ ExpectConformanceOf</*ExpectSuccess=*/false, T>;
+
+struct EquivalenceClassMaker {
+ // TODO(calabrese) Constrain to callable
+ template <class Fun>
+ static GeneratorType<Fun> initializer(GeneratorType<Fun> fun) {
+ return fun;
+ }
+};
+
+// A top-level macro that begins the builder pattern.
+//
+// The argument here takes the datatype to be tested.
+#define ABSL_INTERNAL_ASSERT_CONFORMANCE_OF(...) \
+ GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
+ if ABSL_INTERNAL_LPAREN \
+ const ::testing::AssertionResult gtest_ar = \
+ ABSL_INTERNAL_LPAREN ::y_absl::types_internal::ExpectConformanceOfType< \
+ __VA_ARGS__>()
+
+// Akin to ASSERT_CONFORMANCE_OF except that it expects failure and tries to
+// match text.
+#define ABSL_INTERNAL_ASSERT_NONCONFORMANCE_OF(...) \
+ GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
+ if ABSL_INTERNAL_LPAREN \
+ const ::testing::AssertionResult gtest_ar = \
+ ABSL_INTERNAL_LPAREN ::y_absl::types_internal::ExpectNonconformanceOfType< \
+ __VA_ARGS__>()
+
+////////////////////////////////////////////////////////////////////////////////
+// NOTE: The following macros look like they are recursive, but are not (macros
+// cannot recurse). These actually refer to member functions of the same name.
+// This is done intentionally so that a user cannot accidentally invoke a
+// member function of the conformance-testing suite without going through the
+// macro.
+////////////////////////////////////////////////////////////////////////////////
+
+// Specify expected test failures as comma-separated strings.
+#define DUE_TO(...) due_to(__VA_ARGS__)
+
+// Specify a value to be tested.
+//
+// Note: Internally, this takes an expression and turns it into the return value
+// of lambda that captures no data. The expression is stringized during
+// preprocessing so that it can be used in error reports.
+#define INITIALIZER(...) \
+ initializer(::y_absl::types_internal::Generator( \
+ [] { return __VA_ARGS__; }, ABSL_INTERNAL_STRINGIZE(__VA_ARGS__)))
+
+// Specify a value to be tested.
+//
+// Note: Internally, this takes an expression and turns it into the return value
+// of lambda that captures data by reference. The expression is stringized
+// during preprocessing so that it can be used in error reports.
+#define STATEFUL_INITIALIZER(...) \
+ stateful_initializer(::y_absl::types_internal::Generator( \
+ [&] { return __VA_ARGS__; }, ABSL_INTERNAL_STRINGIZE(__VA_ARGS__)))
+
+// Used in the builder-pattern.
+//
+// Takes a series of INITIALIZER and/or STATEFUL_INITIALIZER invocations and
+// forwards them along to be tested, grouping them such that the testing suite
+// knows that they are supposed to represent the same logical value (the values
+// compare the same, hash the same, etc.).
+#define EQUIVALENCE_CLASS(...) \
+ equivalence_class(ABSL_INTERNAL_TRANSFORM_ARGS( \
+ ABSL_INTERNAL_PREPEND_EQ_MAKER, __VA_ARGS__))
+
+// An invocation of this or WITH_STRICT_PROFILE must end the builder-pattern.
+// It takes a Profile as its argument.
+//
+// This executes the tests and allows types that are "more referined" than the
+// profile specifies, but not less. For instance, if the Profile specifies
+// noexcept copy-constructiblity, the test will fail if the copy-constructor is
+// not noexcept, however, it will succeed if the copy constructor is trivial.
+//
+// This is useful for testing that a type meets some minimum set of
+// requirements.
+#define WITH_LOOSE_PROFILE(...) \
+ with_loose_profile( \
+ ::y_absl::types_internal::MakeLooseProfileRangeT<__VA_ARGS__>()) \
+ ABSL_INTERNAL_RPAREN ABSL_INTERNAL_RPAREN; \
+ else GTEST_FATAL_FAILURE_(gtest_ar.failure_message()) // NOLINT
+
+// An invocation of this or WITH_STRICT_PROFILE must end the builder-pattern.
+// It takes a Domain and a Profile as its arguments.
+//
+// This executes the tests and disallows types that differ at all from the
+// properties of the Profile. For instance, if the Profile specifies noexcept
+// copy-constructiblity, the test will fail if the copy constructor is trivial.
+//
+// This is useful for testing that a type does not do anything more than a
+// specification requires, such as to minimize things like Hyrum's Law, or more
+// commonly, to prevent a type from being "accidentally" copy-constructible in
+// a way that may produce incorrect results, simply because the user forget to
+// delete that operation.
+#define WITH_STRICT_PROFILE(...) \
+ with_strict_profile( \
+ ::y_absl::types_internal::MakeStrictProfileRangeT<__VA_ARGS__>()) \
+ ABSL_INTERNAL_RPAREN ABSL_INTERNAL_RPAREN; \
+ else GTEST_FATAL_FAILURE_(gtest_ar.failure_message()) // NOLINT
+
+// Internal macro that is used in the internals of the EDSL when forming
+// equivalence classes.
+#define ABSL_INTERNAL_PREPEND_EQ_MAKER(arg) \
+ ::y_absl::types_internal::EquivalenceClassMaker().arg
+
+} // namespace types_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_TYPES_INTERNAL_CONFORMANCE_TESTING_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/conformance_testing_helpers.h b/contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/conformance_testing_helpers.h
new file mode 100644
index 00000000000..920b5c23ea0
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/conformance_testing_helpers.h
@@ -0,0 +1,391 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_TYPES_INTERNAL_CONFORMANCE_TESTING_HELPERS_H_
+#define ABSL_TYPES_INTERNAL_CONFORMANCE_TESTING_HELPERS_H_
+
+// Checks to determine whether or not we can use abi::__cxa_demangle
+#if (defined(__ANDROID__) || defined(ANDROID)) && !defined(OS_ANDROID)
+#define ABSL_INTERNAL_OS_ANDROID
+#endif
+
+// We support certain compilers only. See demangle.h for details.
+#if defined(OS_ANDROID) && (defined(__i386__) || defined(__x86_64__))
+#define ABSL_TYPES_INTERNAL_HAS_CXA_DEMANGLE 0
+#elif (__GNUC__ >= 4 || (__GNUC__ >= 3 && __GNUC_MINOR__ >= 4)) && \
+ !defined(__mips__)
+#define ABSL_TYPES_INTERNAL_HAS_CXA_DEMANGLE 1
+#elif defined(__clang__) && !defined(_MSC_VER)
+#define ABSL_TYPES_INTERNAL_HAS_CXA_DEMANGLE 1
+#else
+#define ABSL_TYPES_INTERNAL_HAS_CXA_DEMANGLE 0
+#endif
+
+#include <tuple>
+#include <type_traits>
+#include <utility>
+
+#include "y_absl/meta/type_traits.h"
+#include "y_absl/strings/string_view.h"
+#include "y_absl/utility/utility.h"
+
+#if ABSL_TYPES_INTERNAL_HAS_CXA_DEMANGLE
+#include <cxxabi.h>
+
+#include <cstdlib>
+#endif
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace types_internal {
+
+// Return a readable name for type T.
+template <class T>
+y_absl::string_view NameOfImpl() {
+// TODO(calabrese) Investigate using debugging:internal_demangle as a fallback.
+#if ABSL_TYPES_INTERNAL_HAS_CXA_DEMANGLE
+ int status = 0;
+ char* demangled_name = nullptr;
+
+ demangled_name =
+ abi::__cxa_demangle(typeid(T).name(), nullptr, nullptr, &status);
+
+ if (status == 0 && demangled_name != nullptr) {
+ return demangled_name;
+ } else {
+ return typeid(T).name();
+ }
+#else
+ return typeid(T).name();
+#endif
+ // NOTE: We intentionally leak demangled_name so that it remains valid
+ // throughout the remainder of the program.
+}
+
+// Given a type, returns as nice of a type name as we can produce (demangled).
+//
+// Note: This currently strips cv-qualifiers and references, but that is okay
+// because we only use this internally with unqualified object types.
+template <class T>
+TString NameOf() {
+ static const y_absl::string_view result = NameOfImpl<T>();
+ return TString(result);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+//
+// Metafunction to check if a type is callable with no explicit arguments
+template <class Fun, class /*Enabler*/ = void>
+struct IsNullaryCallableImpl : std::false_type {};
+
+template <class Fun>
+struct IsNullaryCallableImpl<
+ Fun, y_absl::void_t<decltype(std::declval<const Fun&>()())>>
+ : std::true_type {
+ using result_type = decltype(std::declval<const Fun&>()());
+
+ template <class ValueType>
+ using for_type = std::is_same<ValueType, result_type>;
+
+ using void_if_true = void;
+};
+
+template <class Fun>
+struct IsNullaryCallable : IsNullaryCallableImpl<Fun> {};
+//
+////////////////////////////////////////////////////////////////////////////////
+
+// A type that contains a function object that returns an instance of a type
+// that is undergoing conformance testing. This function is required to always
+// return the same value upon invocation.
+template <class Fun>
+struct GeneratorType;
+
+// A type that contains a tuple of GeneratorType<Fun> where each Fun has the
+// same return type. The result of each of the different generators should all
+// be equal values, though the underlying object representation may differ (such
+// as if one returns 0.0 and another return -0.0, or if one returns an empty
+// vector and another returns an empty vector with a different capacity.
+template <class... Funs>
+struct EquivalenceClassType;
+
+////////////////////////////////////////////////////////////////////////////////
+//
+// A metafunction to check if a type is a specialization of EquivalenceClassType
+template <class T>
+struct IsEquivalenceClass : std::false_type {};
+
+template <>
+struct IsEquivalenceClass<EquivalenceClassType<>> : std::true_type {
+ using self = IsEquivalenceClass;
+
+ // A metafunction to check if this EquivalenceClassType is a valid
+ // EquivalenceClassType for a type `ValueType` that is undergoing testing
+ template <class ValueType>
+ using for_type = std::true_type;
+};
+
+template <class Head, class... Tail>
+struct IsEquivalenceClass<EquivalenceClassType<Head, Tail...>>
+ : std::true_type {
+ using self = IsEquivalenceClass;
+
+ // The type undergoing conformance testing that this EquivalenceClass
+ // corresponds to
+ using result_type = typename IsNullaryCallable<Head>::result_type;
+
+ // A metafunction to check if this EquivalenceClassType is a valid
+ // EquivalenceClassType for a type `ValueType` that is undergoing testing
+ template <class ValueType>
+ using for_type = std::is_same<ValueType, result_type>;
+};
+//
+////////////////////////////////////////////////////////////////////////////////
+
+// A type that contains an ordered series of EquivalenceClassTypes, where the
+// the function object of each underlying GeneratorType has the same return type
+//
+// These equivalence classes are required to be in a logical ascending order
+// that is consistent with comparison operators that are defined for the return
+// type of each GeneratorType, if any.
+template <class... EqClasses>
+struct OrderedEquivalenceClasses;
+
+////////////////////////////////////////////////////////////////////////////////
+//
+// A metafunction to determine the return type of the function object contained
+// in a GeneratorType specialization.
+template <class T>
+struct ResultOfGenerator {};
+
+template <class Fun>
+struct ResultOfGenerator<GeneratorType<Fun>> {
+ using type = decltype(std::declval<const Fun&>()());
+};
+
+template <class Fun>
+using ResultOfGeneratorT = typename ResultOfGenerator<GeneratorType<Fun>>::type;
+//
+////////////////////////////////////////////////////////////////////////////////
+
+////////////////////////////////////////////////////////////////////////////////
+//
+// A metafunction that yields true iff each of Funs is a GeneratorType
+// specialization and they all contain functions with the same return type
+template <class /*Enabler*/, class... Funs>
+struct AreGeneratorsWithTheSameReturnTypeImpl : std::false_type {};
+
+template <>
+struct AreGeneratorsWithTheSameReturnTypeImpl<void> : std::true_type {};
+
+template <class Head, class... Tail>
+struct AreGeneratorsWithTheSameReturnTypeImpl<
+ typename std::enable_if<y_absl::conjunction<std::is_same<
+ ResultOfGeneratorT<Head>, ResultOfGeneratorT<Tail>>...>::value>::type,
+ Head, Tail...> : std::true_type {};
+
+template <class... Funs>
+struct AreGeneratorsWithTheSameReturnType
+ : AreGeneratorsWithTheSameReturnTypeImpl<void, Funs...>::type {};
+//
+////////////////////////////////////////////////////////////////////////////////
+
+////////////////////////////////////////////////////////////////////////////////
+//
+// A metafunction that yields true iff each of Funs is an EquivalenceClassType
+// specialization and they all contain GeneratorType specializations that have
+// the same return type
+template <class... EqClasses>
+struct AreEquivalenceClassesOfTheSameType {
+ static_assert(sizeof...(EqClasses) != sizeof...(EqClasses), "");
+};
+
+template <>
+struct AreEquivalenceClassesOfTheSameType<> : std::true_type {
+ using self = AreEquivalenceClassesOfTheSameType;
+
+ // Metafunction to check that a type is the same as all of the equivalence
+ // classes, if any.
+ // Note: In this specialization there are no equivalence classes, so the
+ // value type is always compatible.
+ template <class /*ValueType*/>
+ using for_type = std::true_type;
+};
+
+template <class... Funs>
+struct AreEquivalenceClassesOfTheSameType<EquivalenceClassType<Funs...>>
+ : std::true_type {
+ using self = AreEquivalenceClassesOfTheSameType;
+
+ // Metafunction to check that a type is the same as all of the equivalence
+ // classes, if any.
+ template <class ValueType>
+ using for_type = typename IsEquivalenceClass<
+ EquivalenceClassType<Funs...>>::template for_type<ValueType>;
+};
+
+template <class... TailEqClasses>
+struct AreEquivalenceClassesOfTheSameType<
+ EquivalenceClassType<>, EquivalenceClassType<>, TailEqClasses...>
+ : AreEquivalenceClassesOfTheSameType<TailEqClasses...>::self {};
+
+template <class HeadNextFun, class... TailNextFuns, class... TailEqClasses>
+struct AreEquivalenceClassesOfTheSameType<
+ EquivalenceClassType<>, EquivalenceClassType<HeadNextFun, TailNextFuns...>,
+ TailEqClasses...>
+ : AreEquivalenceClassesOfTheSameType<
+ EquivalenceClassType<HeadNextFun, TailNextFuns...>,
+ TailEqClasses...>::self {};
+
+template <class HeadHeadFun, class... TailHeadFuns, class... TailEqClasses>
+struct AreEquivalenceClassesOfTheSameType<
+ EquivalenceClassType<HeadHeadFun, TailHeadFuns...>, EquivalenceClassType<>,
+ TailEqClasses...>
+ : AreEquivalenceClassesOfTheSameType<
+ EquivalenceClassType<HeadHeadFun, TailHeadFuns...>,
+ TailEqClasses...>::self {};
+
+template <class HeadHeadFun, class... TailHeadFuns, class HeadNextFun,
+ class... TailNextFuns, class... TailEqClasses>
+struct AreEquivalenceClassesOfTheSameType<
+ EquivalenceClassType<HeadHeadFun, TailHeadFuns...>,
+ EquivalenceClassType<HeadNextFun, TailNextFuns...>, TailEqClasses...>
+ : y_absl::conditional_t<
+ IsNullaryCallable<HeadNextFun>::template for_type<
+ typename IsNullaryCallable<HeadHeadFun>::result_type>::value,
+ AreEquivalenceClassesOfTheSameType<
+ EquivalenceClassType<HeadHeadFun, TailHeadFuns...>,
+ TailEqClasses...>,
+ std::false_type> {};
+//
+////////////////////////////////////////////////////////////////////////////////
+
+// Execute a function for each passed-in parameter.
+template <class Fun, class... Cases>
+void ForEachParameter(const Fun& fun, const Cases&... cases) {
+ const std::initializer_list<bool> results = {
+ (static_cast<void>(fun(cases)), true)...};
+
+ (void)results;
+}
+
+// Execute a function on each passed-in parameter (using a bound function).
+template <class Fun>
+struct ForEachParameterFun {
+ template <class... T>
+ void operator()(const T&... cases) const {
+ (ForEachParameter)(fun, cases...);
+ }
+
+ Fun fun;
+};
+
+// Execute a function on each element of a tuple.
+template <class Fun, class Tup>
+void ForEachTupleElement(const Fun& fun, const Tup& tup) {
+ y_absl::apply(ForEachParameterFun<Fun>{fun}, tup);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+//
+// Execute a function for each combination of two elements of a tuple, including
+// combinations of an element with itself.
+template <class Fun, class... T>
+struct ForEveryTwoImpl {
+ template <class Lhs>
+ struct WithBoundLhs {
+ template <class Rhs>
+ void operator()(const Rhs& rhs) const {
+ fun(lhs, rhs);
+ }
+
+ Fun fun;
+ Lhs lhs;
+ };
+
+ template <class Lhs>
+ void operator()(const Lhs& lhs) const {
+ (ForEachTupleElement)(WithBoundLhs<Lhs>{fun, lhs}, args);
+ }
+
+ Fun fun;
+ std::tuple<T...> args;
+};
+
+template <class Fun, class... T>
+void ForEveryTwo(const Fun& fun, std::tuple<T...> args) {
+ (ForEachTupleElement)(ForEveryTwoImpl<Fun, T...>{fun, args}, args);
+}
+//
+////////////////////////////////////////////////////////////////////////////////
+
+////////////////////////////////////////////////////////////////////////////////
+//
+// Insert all values into an associative container
+template<class Container>
+void InsertEach(Container* cont) {
+}
+
+template<class Container, class H, class... T>
+void InsertEach(Container* cont, H&& head, T&&... tail) {
+ cont->insert(head);
+ (InsertEach)(cont, tail...);
+}
+//
+////////////////////////////////////////////////////////////////////////////////
+// A template with a nested "Invoke" static-member-function that executes a
+// passed-in Callable when `Condition` is true, otherwise it ignores the
+// Callable. This is useful for executing a function object with a condition
+// that corresponds to whether or not the Callable can be safely instantiated.
+// It has some overlapping uses with C++17 `if constexpr`.
+template <bool Condition>
+struct If;
+
+template <>
+struct If</*Condition =*/false> {
+ template <class Fun, class... P>
+ static void Invoke(const Fun& /*fun*/, P&&... /*args*/) {}
+};
+
+template <>
+struct If</*Condition =*/true> {
+ template <class Fun, class... P>
+ static void Invoke(const Fun& fun, P&&... args) {
+ // TODO(calabrese) Use std::invoke equivalent instead of function-call.
+ fun(y_absl::forward<P>(args)...);
+ }
+};
+
+//
+// ABSL_INTERNAL_STRINGIZE(...)
+//
+// This variadic macro transforms its arguments into a c-string literal after
+// expansion.
+//
+// Example:
+//
+// ABSL_INTERNAL_STRINGIZE(std::array<int, 10>)
+//
+// Results in:
+//
+// "std::array<int, 10>"
+#define ABSL_INTERNAL_STRINGIZE(...) ABSL_INTERNAL_STRINGIZE_IMPL((__VA_ARGS__))
+#define ABSL_INTERNAL_STRINGIZE_IMPL(arg) ABSL_INTERNAL_STRINGIZE_IMPL2 arg
+#define ABSL_INTERNAL_STRINGIZE_IMPL2(...) #__VA_ARGS__
+
+} // namespace types_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_TYPES_INTERNAL_CONFORMANCE_TESTING_HELPERS_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/optional.h b/contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/optional.h
new file mode 100644
index 00000000000..1004a6a68ac
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/optional.h
@@ -0,0 +1,396 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef ABSL_TYPES_INTERNAL_OPTIONAL_H_
+#define ABSL_TYPES_INTERNAL_OPTIONAL_H_
+
+#include <functional>
+#include <new>
+#include <type_traits>
+#include <utility>
+
+#include "y_absl/base/internal/inline_variable.h"
+#include "y_absl/memory/memory.h"
+#include "y_absl/meta/type_traits.h"
+#include "y_absl/utility/utility.h"
+
+// ABSL_OPTIONAL_USE_INHERITING_CONSTRUCTORS
+//
+// Inheriting constructors is supported in GCC 4.8+, Clang 3.3+ and MSVC 2015.
+// __cpp_inheriting_constructors is a predefined macro and a recommended way to
+// check for this language feature, but GCC doesn't support it until 5.0 and
+// Clang doesn't support it until 3.6.
+// Also, MSVC 2015 has a bug: it doesn't inherit the constexpr template
+// constructor. For example, the following code won't work on MSVC 2015 Update3:
+// struct Base {
+// int t;
+// template <typename T>
+// constexpr Base(T t_) : t(t_) {}
+// };
+// struct Foo : Base {
+// using Base::Base;
+// }
+// constexpr Foo foo(0); // doesn't work on MSVC 2015
+#if defined(__clang__)
+#if __has_feature(cxx_inheriting_constructors)
+#define ABSL_OPTIONAL_USE_INHERITING_CONSTRUCTORS 1
+#endif
+#elif (defined(__GNUC__) && \
+ (__GNUC__ > 4 || __GNUC__ == 4 && __GNUC_MINOR__ >= 8)) || \
+ (__cpp_inheriting_constructors >= 200802) || \
+ (defined(_MSC_VER) && _MSC_VER >= 1910)
+#define ABSL_OPTIONAL_USE_INHERITING_CONSTRUCTORS 1
+#endif
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+// Forward declaration
+template <typename T>
+class optional;
+
+namespace optional_internal {
+
+// This tag type is used as a constructor parameter type for `nullopt_t`.
+struct init_t {
+ explicit init_t() = default;
+};
+
+struct empty_struct {};
+
+// This class stores the data in optional<T>.
+// It is specialized based on whether T is trivially destructible.
+// This is the specialization for non trivially destructible type.
+template <typename T, bool unused = std::is_trivially_destructible<T>::value>
+class optional_data_dtor_base {
+ struct dummy_type {
+ static_assert(sizeof(T) % sizeof(empty_struct) == 0, "");
+ // Use an array to avoid GCC 6 placement-new warning.
+ empty_struct data[sizeof(T) / sizeof(empty_struct)];
+ };
+
+ protected:
+ // Whether there is data or not.
+ bool engaged_;
+ // Data storage
+ union {
+ T data_;
+ dummy_type dummy_;
+ };
+
+ void destruct() noexcept {
+ if (engaged_) {
+ data_.~T();
+ engaged_ = false;
+ }
+ }
+
+ // dummy_ must be initialized for constexpr constructor.
+ constexpr optional_data_dtor_base() noexcept : engaged_(false), dummy_{{}} {}
+
+ template <typename... Args>
+ constexpr explicit optional_data_dtor_base(in_place_t, Args&&... args)
+ : engaged_(true), data_(y_absl::forward<Args>(args)...) {}
+
+ ~optional_data_dtor_base() { destruct(); }
+};
+
+// Specialization for trivially destructible type.
+template <typename T>
+class optional_data_dtor_base<T, true> {
+ struct dummy_type {
+ static_assert(sizeof(T) % sizeof(empty_struct) == 0, "");
+ // Use array to avoid GCC 6 placement-new warning.
+ empty_struct data[sizeof(T) / sizeof(empty_struct)];
+ };
+
+ protected:
+ // Whether there is data or not.
+ bool engaged_;
+ // Data storage
+ union {
+ T data_;
+ dummy_type dummy_;
+ };
+ void destruct() noexcept { engaged_ = false; }
+
+ // dummy_ must be initialized for constexpr constructor.
+ constexpr optional_data_dtor_base() noexcept : engaged_(false), dummy_{{}} {}
+
+ template <typename... Args>
+ constexpr explicit optional_data_dtor_base(in_place_t, Args&&... args)
+ : engaged_(true), data_(y_absl::forward<Args>(args)...) {}
+};
+
+template <typename T>
+class optional_data_base : public optional_data_dtor_base<T> {
+ protected:
+ using base = optional_data_dtor_base<T>;
+#ifdef ABSL_OPTIONAL_USE_INHERITING_CONSTRUCTORS
+ using base::base;
+#else
+ optional_data_base() = default;
+
+ template <typename... Args>
+ constexpr explicit optional_data_base(in_place_t t, Args&&... args)
+ : base(t, y_absl::forward<Args>(args)...) {}
+#endif
+
+ template <typename... Args>
+ void construct(Args&&... args) {
+ // Use dummy_'s address to work around casting cv-qualified T* to void*.
+ ::new (static_cast<void*>(&this->dummy_)) T(std::forward<Args>(args)...);
+ this->engaged_ = true;
+ }
+
+ template <typename U>
+ void assign(U&& u) {
+ if (this->engaged_) {
+ this->data_ = std::forward<U>(u);
+ } else {
+ construct(std::forward<U>(u));
+ }
+ }
+};
+
+// TODO(y_absl-team): Add another class using
+// std::is_trivially_move_constructible trait when available to match
+// http://cplusplus.github.io/LWG/lwg-defects.html#2900, for types that
+// have trivial move but nontrivial copy.
+// Also, we should be checking is_trivially_copyable here, which is not
+// supported now, so we use is_trivially_* traits instead.
+template <typename T,
+ bool unused = y_absl::is_trivially_copy_constructible<T>::value&&
+ y_absl::is_trivially_copy_assignable<typename std::remove_cv<
+ T>::type>::value&& std::is_trivially_destructible<T>::value>
+class optional_data;
+
+// Trivially copyable types
+template <typename T>
+class optional_data<T, true> : public optional_data_base<T> {
+ protected:
+#ifdef ABSL_OPTIONAL_USE_INHERITING_CONSTRUCTORS
+ using optional_data_base<T>::optional_data_base;
+#else
+ optional_data() = default;
+
+ template <typename... Args>
+ constexpr explicit optional_data(in_place_t t, Args&&... args)
+ : optional_data_base<T>(t, y_absl::forward<Args>(args)...) {}
+#endif
+};
+
+template <typename T>
+class optional_data<T, false> : public optional_data_base<T> {
+ protected:
+#ifdef ABSL_OPTIONAL_USE_INHERITING_CONSTRUCTORS
+ using optional_data_base<T>::optional_data_base;
+#else
+ template <typename... Args>
+ constexpr explicit optional_data(in_place_t t, Args&&... args)
+ : optional_data_base<T>(t, y_absl::forward<Args>(args)...) {}
+#endif
+
+ optional_data() = default;
+
+ optional_data(const optional_data& rhs) : optional_data_base<T>() {
+ if (rhs.engaged_) {
+ this->construct(rhs.data_);
+ }
+ }
+
+ optional_data(optional_data&& rhs) noexcept(
+ y_absl::default_allocator_is_nothrow::value ||
+ std::is_nothrow_move_constructible<T>::value)
+ : optional_data_base<T>() {
+ if (rhs.engaged_) {
+ this->construct(std::move(rhs.data_));
+ }
+ }
+
+ optional_data& operator=(const optional_data& rhs) {
+ if (rhs.engaged_) {
+ this->assign(rhs.data_);
+ } else {
+ this->destruct();
+ }
+ return *this;
+ }
+
+ optional_data& operator=(optional_data&& rhs) noexcept(
+ std::is_nothrow_move_assignable<T>::value&&
+ std::is_nothrow_move_constructible<T>::value) {
+ if (rhs.engaged_) {
+ this->assign(std::move(rhs.data_));
+ } else {
+ this->destruct();
+ }
+ return *this;
+ }
+};
+
+// Ordered by level of restriction, from low to high.
+// Copyable implies movable.
+enum class copy_traits { copyable = 0, movable = 1, non_movable = 2 };
+
+// Base class for enabling/disabling copy/move constructor.
+template <copy_traits>
+class optional_ctor_base;
+
+template <>
+class optional_ctor_base<copy_traits::copyable> {
+ public:
+ constexpr optional_ctor_base() = default;
+ optional_ctor_base(const optional_ctor_base&) = default;
+ optional_ctor_base(optional_ctor_base&&) = default;
+ optional_ctor_base& operator=(const optional_ctor_base&) = default;
+ optional_ctor_base& operator=(optional_ctor_base&&) = default;
+};
+
+template <>
+class optional_ctor_base<copy_traits::movable> {
+ public:
+ constexpr optional_ctor_base() = default;
+ optional_ctor_base(const optional_ctor_base&) = delete;
+ optional_ctor_base(optional_ctor_base&&) = default;
+ optional_ctor_base& operator=(const optional_ctor_base&) = default;
+ optional_ctor_base& operator=(optional_ctor_base&&) = default;
+};
+
+template <>
+class optional_ctor_base<copy_traits::non_movable> {
+ public:
+ constexpr optional_ctor_base() = default;
+ optional_ctor_base(const optional_ctor_base&) = delete;
+ optional_ctor_base(optional_ctor_base&&) = delete;
+ optional_ctor_base& operator=(const optional_ctor_base&) = default;
+ optional_ctor_base& operator=(optional_ctor_base&&) = default;
+};
+
+// Base class for enabling/disabling copy/move assignment.
+template <copy_traits>
+class optional_assign_base;
+
+template <>
+class optional_assign_base<copy_traits::copyable> {
+ public:
+ constexpr optional_assign_base() = default;
+ optional_assign_base(const optional_assign_base&) = default;
+ optional_assign_base(optional_assign_base&&) = default;
+ optional_assign_base& operator=(const optional_assign_base&) = default;
+ optional_assign_base& operator=(optional_assign_base&&) = default;
+};
+
+template <>
+class optional_assign_base<copy_traits::movable> {
+ public:
+ constexpr optional_assign_base() = default;
+ optional_assign_base(const optional_assign_base&) = default;
+ optional_assign_base(optional_assign_base&&) = default;
+ optional_assign_base& operator=(const optional_assign_base&) = delete;
+ optional_assign_base& operator=(optional_assign_base&&) = default;
+};
+
+template <>
+class optional_assign_base<copy_traits::non_movable> {
+ public:
+ constexpr optional_assign_base() = default;
+ optional_assign_base(const optional_assign_base&) = default;
+ optional_assign_base(optional_assign_base&&) = default;
+ optional_assign_base& operator=(const optional_assign_base&) = delete;
+ optional_assign_base& operator=(optional_assign_base&&) = delete;
+};
+
+template <typename T>
+struct ctor_copy_traits {
+ static constexpr copy_traits traits =
+ std::is_copy_constructible<T>::value
+ ? copy_traits::copyable
+ : std::is_move_constructible<T>::value ? copy_traits::movable
+ : copy_traits::non_movable;
+};
+
+template <typename T>
+struct assign_copy_traits {
+ static constexpr copy_traits traits =
+ y_absl::is_copy_assignable<T>::value && std::is_copy_constructible<T>::value
+ ? copy_traits::copyable
+ : y_absl::is_move_assignable<T>::value &&
+ std::is_move_constructible<T>::value
+ ? copy_traits::movable
+ : copy_traits::non_movable;
+};
+
+// Whether T is constructible or convertible from optional<U>.
+template <typename T, typename U>
+struct is_constructible_convertible_from_optional
+ : std::integral_constant<
+ bool, std::is_constructible<T, optional<U>&>::value ||
+ std::is_constructible<T, optional<U>&&>::value ||
+ std::is_constructible<T, const optional<U>&>::value ||
+ std::is_constructible<T, const optional<U>&&>::value ||
+ std::is_convertible<optional<U>&, T>::value ||
+ std::is_convertible<optional<U>&&, T>::value ||
+ std::is_convertible<const optional<U>&, T>::value ||
+ std::is_convertible<const optional<U>&&, T>::value> {};
+
+// Whether T is constructible or convertible or assignable from optional<U>.
+template <typename T, typename U>
+struct is_constructible_convertible_assignable_from_optional
+ : std::integral_constant<
+ bool, is_constructible_convertible_from_optional<T, U>::value ||
+ std::is_assignable<T&, optional<U>&>::value ||
+ std::is_assignable<T&, optional<U>&&>::value ||
+ std::is_assignable<T&, const optional<U>&>::value ||
+ std::is_assignable<T&, const optional<U>&&>::value> {};
+
+// Helper function used by [optional.relops], [optional.comp_with_t],
+// for checking whether an expression is convertible to bool.
+bool convertible_to_bool(bool);
+
+// Base class for std::hash<y_absl::optional<T>>:
+// If std::hash<std::remove_const_t<T>> is enabled, it provides operator() to
+// compute the hash; Otherwise, it is disabled.
+// Reference N4659 23.14.15 [unord.hash].
+template <typename T, typename = size_t>
+struct optional_hash_base {
+ optional_hash_base() = delete;
+ optional_hash_base(const optional_hash_base&) = delete;
+ optional_hash_base(optional_hash_base&&) = delete;
+ optional_hash_base& operator=(const optional_hash_base&) = delete;
+ optional_hash_base& operator=(optional_hash_base&&) = delete;
+};
+
+template <typename T>
+struct optional_hash_base<T, decltype(std::hash<y_absl::remove_const_t<T> >()(
+ std::declval<y_absl::remove_const_t<T> >()))> {
+ using argument_type = y_absl::optional<T>;
+ using result_type = size_t;
+ size_t operator()(const y_absl::optional<T>& opt) const {
+ y_absl::type_traits_internal::AssertHashEnabled<y_absl::remove_const_t<T>>();
+ if (opt) {
+ return std::hash<y_absl::remove_const_t<T> >()(*opt);
+ } else {
+ return static_cast<size_t>(0x297814aaad196e6dULL);
+ }
+ }
+};
+
+} // namespace optional_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#undef ABSL_OPTIONAL_USE_INHERITING_CONSTRUCTORS
+
+#endif // ABSL_TYPES_INTERNAL_OPTIONAL_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/parentheses.h b/contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/parentheses.h
new file mode 100644
index 00000000000..5aebee8fdeb
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/parentheses.h
@@ -0,0 +1,34 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// parentheses.h
+// -----------------------------------------------------------------------------
+//
+// This file contains macros that expand to a left parenthesis and a right
+// parenthesis. These are in their own file and are generated from macros
+// because otherwise clang-format gets confused and clang-format off directives
+// do not help.
+//
+// The parentheses macros are used when wanting to require a rescan before
+// expansion of parenthesized text appearing after a function-style macro name.
+
+#ifndef ABSL_TYPES_INTERNAL_PARENTHESES_H_
+#define ABSL_TYPES_INTERNAL_PARENTHESES_H_
+
+#define ABSL_INTERNAL_LPAREN (
+
+#define ABSL_INTERNAL_RPAREN )
+
+#endif // ABSL_TYPES_INTERNAL_PARENTHESES_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/span.h b/contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/span.h
new file mode 100644
index 00000000000..6b36c3b0d09
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/span.h
@@ -0,0 +1,128 @@
+//
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef ABSL_TYPES_INTERNAL_SPAN_H_
+#define ABSL_TYPES_INTERNAL_SPAN_H_
+
+#include <algorithm>
+#include <cstddef>
+#include <util/generic/string.h>
+#include <type_traits>
+
+#include "y_absl/algorithm/algorithm.h"
+#include "y_absl/base/internal/throw_delegate.h"
+#include "y_absl/meta/type_traits.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+namespace span_internal {
+// A constexpr min function
+constexpr size_t Min(size_t a, size_t b) noexcept { return a < b ? a : b; }
+
+// Wrappers for access to container data pointers.
+template <typename C>
+constexpr auto GetDataImpl(C& c, char) noexcept // NOLINT(runtime/references)
+ -> decltype(c.data()) {
+ return c.data();
+}
+
+// Before C++17, TString::data returns a const char* in all cases.
+inline char* GetDataImpl(TString& s, // NOLINT(runtime/references)
+ int) noexcept {
+ return &s[0];
+}
+
+template <typename C>
+constexpr auto GetData(C& c) noexcept // NOLINT(runtime/references)
+ -> decltype(GetDataImpl(c, 0)) {
+ return GetDataImpl(c, 0);
+}
+
+// Detection idioms for size() and data().
+template <typename C>
+using HasSize =
+ std::is_integral<y_absl::decay_t<decltype(std::declval<C&>().size())>>;
+
+// We want to enable conversion from vector<T*> to Span<const T* const> but
+// disable conversion from vector<Derived> to Span<Base>. Here we use
+// the fact that U** is convertible to Q* const* if and only if Q is the same
+// type or a more cv-qualified version of U. We also decay the result type of
+// data() to avoid problems with classes which have a member function data()
+// which returns a reference.
+template <typename T, typename C>
+using HasData =
+ std::is_convertible<y_absl::decay_t<decltype(GetData(std::declval<C&>()))>*,
+ T* const*>;
+
+// Extracts value type from a Container
+template <typename C>
+struct ElementType {
+ using type = typename y_absl::remove_reference_t<C>::value_type;
+};
+
+template <typename T, size_t N>
+struct ElementType<T (&)[N]> {
+ using type = T;
+};
+
+template <typename C>
+using ElementT = typename ElementType<C>::type;
+
+template <typename T>
+using EnableIfMutable =
+ typename std::enable_if<!std::is_const<T>::value, int>::type;
+
+template <template <typename> class SpanT, typename T>
+bool EqualImpl(SpanT<T> a, SpanT<T> b) {
+ static_assert(std::is_const<T>::value, "");
+ return y_absl::equal(a.begin(), a.end(), b.begin(), b.end());
+}
+
+template <template <typename> class SpanT, typename T>
+bool LessThanImpl(SpanT<T> a, SpanT<T> b) {
+ // We can't use value_type since that is remove_cv_t<T>, so we go the long way
+ // around.
+ static_assert(std::is_const<T>::value, "");
+ return std::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end());
+}
+
+// The `IsConvertible` classes here are needed because of the
+// `std::is_convertible` bug in libcxx when compiled with GCC. This build
+// configuration is used by Android NDK toolchain. Reference link:
+// https://bugs.llvm.org/show_bug.cgi?id=27538.
+template <typename From, typename To>
+struct IsConvertibleHelper {
+ private:
+ static std::true_type testval(To);
+ static std::false_type testval(...);
+
+ public:
+ using type = decltype(testval(std::declval<From>()));
+};
+
+template <typename From, typename To>
+struct IsConvertible : IsConvertibleHelper<From, To>::type {};
+
+// TODO(zhangxy): replace `IsConvertible` with `std::is_convertible` once the
+// older version of libcxx is not supported.
+template <typename From, typename To>
+using EnableIfConvertibleTo =
+ typename std::enable_if<IsConvertible<From, To>::value>::type;
+} // namespace span_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_TYPES_INTERNAL_SPAN_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/transform_args.h b/contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/transform_args.h
new file mode 100644
index 00000000000..4a0ab42ac49
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/transform_args.h
@@ -0,0 +1,246 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// transform_args.h
+// -----------------------------------------------------------------------------
+//
+// This file contains a higher-order macro that "transforms" each element of a
+// a variadic argument by a provided secondary macro.
+
+#ifndef ABSL_TYPES_INTERNAL_TRANSFORM_ARGS_H_
+#define ABSL_TYPES_INTERNAL_TRANSFORM_ARGS_H_
+
+//
+// ABSL_INTERNAL_CAT(a, b)
+//
+// This macro takes two arguments and concatenates them together via ## after
+// expansion.
+//
+// Example:
+//
+// ABSL_INTERNAL_CAT(foo_, bar)
+//
+// Results in:
+//
+// foo_bar
+#define ABSL_INTERNAL_CAT(a, b) ABSL_INTERNAL_CAT_IMPL(a, b)
+#define ABSL_INTERNAL_CAT_IMPL(a, b) a##b
+
+//
+// ABSL_INTERNAL_TRANSFORM_ARGS(m, ...)
+//
+// This macro takes another macro as an argument followed by a trailing series
+// of additional parameters (up to 32 additional arguments). It invokes the
+// passed-in macro once for each of the additional arguments, with the
+// expansions separated by commas.
+//
+// Example:
+//
+// ABSL_INTERNAL_TRANSFORM_ARGS(MY_MACRO, a, b, c)
+//
+// Results in:
+//
+// MY_MACRO(a), MY_MACRO(b), MY_MACRO(c)
+//
+// TODO(calabrese) Handle no arguments as a special case.
+#define ABSL_INTERNAL_TRANSFORM_ARGS(m, ...) \
+ ABSL_INTERNAL_CAT(ABSL_INTERNAL_TRANSFORM_ARGS, \
+ ABSL_INTERNAL_NUM_ARGS(__VA_ARGS__)) \
+ (m, __VA_ARGS__)
+
+#define ABSL_INTERNAL_TRANSFORM_ARGS1(m, a0) m(a0)
+
+#define ABSL_INTERNAL_TRANSFORM_ARGS2(m, a0, a1) m(a0), m(a1)
+
+#define ABSL_INTERNAL_TRANSFORM_ARGS3(m, a0, a1, a2) m(a0), m(a1), m(a2)
+
+#define ABSL_INTERNAL_TRANSFORM_ARGS4(m, a0, a1, a2, a3) \
+ m(a0), m(a1), m(a2), m(a3)
+
+#define ABSL_INTERNAL_TRANSFORM_ARGS5(m, a0, a1, a2, a3, a4) \
+ m(a0), m(a1), m(a2), m(a3), m(a4)
+
+#define ABSL_INTERNAL_TRANSFORM_ARGS6(m, a0, a1, a2, a3, a4, a5) \
+ m(a0), m(a1), m(a2), m(a3), m(a4), m(a5)
+
+#define ABSL_INTERNAL_TRANSFORM_ARGS7(m, a0, a1, a2, a3, a4, a5, a6) \
+ m(a0), m(a1), m(a2), m(a3), m(a4), m(a5), m(a6)
+
+#define ABSL_INTERNAL_TRANSFORM_ARGS8(m, a0, a1, a2, a3, a4, a5, a6, a7) \
+ m(a0), m(a1), m(a2), m(a3), m(a4), m(a5), m(a6), m(a7)
+
+#define ABSL_INTERNAL_TRANSFORM_ARGS9(m, a0, a1, a2, a3, a4, a5, a6, a7, a8) \
+ m(a0), m(a1), m(a2), m(a3), m(a4), m(a5), m(a6), m(a7), m(a8)
+
+#define ABSL_INTERNAL_TRANSFORM_ARGS10(m, a0, a1, a2, a3, a4, a5, a6, a7, a8, \
+ a9) \
+ m(a0), m(a1), m(a2), m(a3), m(a4), m(a5), m(a6), m(a7), m(a8), m(a9)
+
+#define ABSL_INTERNAL_TRANSFORM_ARGS11(m, a0, a1, a2, a3, a4, a5, a6, a7, a8, \
+ a9, a10) \
+ m(a0), m(a1), m(a2), m(a3), m(a4), m(a5), m(a6), m(a7), m(a8), m(a9), m(a10)
+
+#define ABSL_INTERNAL_TRANSFORM_ARGS12(m, a0, a1, a2, a3, a4, a5, a6, a7, a8, \
+ a9, a10, a11) \
+ m(a0), m(a1), m(a2), m(a3), m(a4), m(a5), m(a6), m(a7), m(a8), m(a9), \
+ m(a10), m(a11)
+
+#define ABSL_INTERNAL_TRANSFORM_ARGS13(m, a0, a1, a2, a3, a4, a5, a6, a7, a8, \
+ a9, a10, a11, a12) \
+ m(a0), m(a1), m(a2), m(a3), m(a4), m(a5), m(a6), m(a7), m(a8), m(a9), \
+ m(a10), m(a11), m(a12)
+
+#define ABSL_INTERNAL_TRANSFORM_ARGS14(m, a0, a1, a2, a3, a4, a5, a6, a7, a8, \
+ a9, a10, a11, a12, a13) \
+ m(a0), m(a1), m(a2), m(a3), m(a4), m(a5), m(a6), m(a7), m(a8), m(a9), \
+ m(a10), m(a11), m(a12), m(a13)
+
+#define ABSL_INTERNAL_TRANSFORM_ARGS15(m, a0, a1, a2, a3, a4, a5, a6, a7, a8, \
+ a9, a10, a11, a12, a13, a14) \
+ m(a0), m(a1), m(a2), m(a3), m(a4), m(a5), m(a6), m(a7), m(a8), m(a9), \
+ m(a10), m(a11), m(a12), m(a13), m(a14)
+
+#define ABSL_INTERNAL_TRANSFORM_ARGS16(m, a0, a1, a2, a3, a4, a5, a6, a7, a8, \
+ a9, a10, a11, a12, a13, a14, a15) \
+ m(a0), m(a1), m(a2), m(a3), m(a4), m(a5), m(a6), m(a7), m(a8), m(a9), \
+ m(a10), m(a11), m(a12), m(a13), m(a14), m(a15)
+
+#define ABSL_INTERNAL_TRANSFORM_ARGS17(m, a0, a1, a2, a3, a4, a5, a6, a7, a8, \
+ a9, a10, a11, a12, a13, a14, a15, a16) \
+ m(a0), m(a1), m(a2), m(a3), m(a4), m(a5), m(a6), m(a7), m(a8), m(a9), \
+ m(a10), m(a11), m(a12), m(a13), m(a14), m(a15), m(a16)
+
+#define ABSL_INTERNAL_TRANSFORM_ARGS18(m, a0, a1, a2, a3, a4, a5, a6, a7, a8, \
+ a9, a10, a11, a12, a13, a14, a15, a16, \
+ a17) \
+ m(a0), m(a1), m(a2), m(a3), m(a4), m(a5), m(a6), m(a7), m(a8), m(a9), \
+ m(a10), m(a11), m(a12), m(a13), m(a14), m(a15), m(a16), m(a17)
+
+#define ABSL_INTERNAL_TRANSFORM_ARGS19(m, a0, a1, a2, a3, a4, a5, a6, a7, a8, \
+ a9, a10, a11, a12, a13, a14, a15, a16, \
+ a17, a18) \
+ m(a0), m(a1), m(a2), m(a3), m(a4), m(a5), m(a6), m(a7), m(a8), m(a9), \
+ m(a10), m(a11), m(a12), m(a13), m(a14), m(a15), m(a16), m(a17), m(a18)
+
+#define ABSL_INTERNAL_TRANSFORM_ARGS20(m, a0, a1, a2, a3, a4, a5, a6, a7, a8, \
+ a9, a10, a11, a12, a13, a14, a15, a16, \
+ a17, a18, a19) \
+ m(a0), m(a1), m(a2), m(a3), m(a4), m(a5), m(a6), m(a7), m(a8), m(a9), \
+ m(a10), m(a11), m(a12), m(a13), m(a14), m(a15), m(a16), m(a17), m(a18), \
+ m(a19)
+
+#define ABSL_INTERNAL_TRANSFORM_ARGS21(m, a0, a1, a2, a3, a4, a5, a6, a7, a8, \
+ a9, a10, a11, a12, a13, a14, a15, a16, \
+ a17, a18, a19, a20) \
+ m(a0), m(a1), m(a2), m(a3), m(a4), m(a5), m(a6), m(a7), m(a8), m(a9), \
+ m(a10), m(a11), m(a12), m(a13), m(a14), m(a15), m(a16), m(a17), m(a18), \
+ m(a19), m(a20)
+
+#define ABSL_INTERNAL_TRANSFORM_ARGS22(m, a0, a1, a2, a3, a4, a5, a6, a7, a8, \
+ a9, a10, a11, a12, a13, a14, a15, a16, \
+ a17, a18, a19, a20, a21) \
+ m(a0), m(a1), m(a2), m(a3), m(a4), m(a5), m(a6), m(a7), m(a8), m(a9), \
+ m(a10), m(a11), m(a12), m(a13), m(a14), m(a15), m(a16), m(a17), m(a18), \
+ m(a19), m(a20), m(a21)
+
+#define ABSL_INTERNAL_TRANSFORM_ARGS23(m, a0, a1, a2, a3, a4, a5, a6, a7, a8, \
+ a9, a10, a11, a12, a13, a14, a15, a16, \
+ a17, a18, a19, a20, a21, a22) \
+ m(a0), m(a1), m(a2), m(a3), m(a4), m(a5), m(a6), m(a7), m(a8), m(a9), \
+ m(a10), m(a11), m(a12), m(a13), m(a14), m(a15), m(a16), m(a17), m(a18), \
+ m(a19), m(a20), m(a21), m(a22)
+
+#define ABSL_INTERNAL_TRANSFORM_ARGS24(m, a0, a1, a2, a3, a4, a5, a6, a7, a8, \
+ a9, a10, a11, a12, a13, a14, a15, a16, \
+ a17, a18, a19, a20, a21, a22, a23) \
+ m(a0), m(a1), m(a2), m(a3), m(a4), m(a5), m(a6), m(a7), m(a8), m(a9), \
+ m(a10), m(a11), m(a12), m(a13), m(a14), m(a15), m(a16), m(a17), m(a18), \
+ m(a19), m(a20), m(a21), m(a22), m(a23)
+
+#define ABSL_INTERNAL_TRANSFORM_ARGS25(m, a0, a1, a2, a3, a4, a5, a6, a7, a8, \
+ a9, a10, a11, a12, a13, a14, a15, a16, \
+ a17, a18, a19, a20, a21, a22, a23, a24) \
+ m(a0), m(a1), m(a2), m(a3), m(a4), m(a5), m(a6), m(a7), m(a8), m(a9), \
+ m(a10), m(a11), m(a12), m(a13), m(a14), m(a15), m(a16), m(a17), m(a18), \
+ m(a19), m(a20), m(a21), m(a22), m(a23), m(a24)
+
+#define ABSL_INTERNAL_TRANSFORM_ARGS26( \
+ m, a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, \
+ a16, a17, a18, a19, a20, a21, a22, a23, a24, a25) \
+ m(a0), m(a1), m(a2), m(a3), m(a4), m(a5), m(a6), m(a7), m(a8), m(a9), \
+ m(a10), m(a11), m(a12), m(a13), m(a14), m(a15), m(a16), m(a17), m(a18), \
+ m(a19), m(a20), m(a21), m(a22), m(a23), m(a24), m(a25)
+
+#define ABSL_INTERNAL_TRANSFORM_ARGS27( \
+ m, a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, \
+ a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26) \
+ m(a0), m(a1), m(a2), m(a3), m(a4), m(a5), m(a6), m(a7), m(a8), m(a9), \
+ m(a10), m(a11), m(a12), m(a13), m(a14), m(a15), m(a16), m(a17), m(a18), \
+ m(a19), m(a20), m(a21), m(a22), m(a23), m(a24), m(a25), m(a26)
+
+#define ABSL_INTERNAL_TRANSFORM_ARGS28( \
+ m, a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, \
+ a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27) \
+ m(a0), m(a1), m(a2), m(a3), m(a4), m(a5), m(a6), m(a7), m(a8), m(a9), \
+ m(a10), m(a11), m(a12), m(a13), m(a14), m(a15), m(a16), m(a17), m(a18), \
+ m(a19), m(a20), m(a21), m(a22), m(a23), m(a24), m(a25), m(a26), m(a27)
+
+#define ABSL_INTERNAL_TRANSFORM_ARGS29( \
+ m, a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, \
+ a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28) \
+ m(a0), m(a1), m(a2), m(a3), m(a4), m(a5), m(a6), m(a7), m(a8), m(a9), \
+ m(a10), m(a11), m(a12), m(a13), m(a14), m(a15), m(a16), m(a17), m(a18), \
+ m(a19), m(a20), m(a21), m(a22), m(a23), m(a24), m(a25), m(a26), m(a27), \
+ m(a28)
+
+#define ABSL_INTERNAL_TRANSFORM_ARGS30( \
+ m, a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, \
+ a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29) \
+ m(a0), m(a1), m(a2), m(a3), m(a4), m(a5), m(a6), m(a7), m(a8), m(a9), \
+ m(a10), m(a11), m(a12), m(a13), m(a14), m(a15), m(a16), m(a17), m(a18), \
+ m(a19), m(a20), m(a21), m(a22), m(a23), m(a24), m(a25), m(a26), m(a27), \
+ m(a28), m(a29)
+
+#define ABSL_INTERNAL_TRANSFORM_ARGS31( \
+ m, a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, \
+ a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29, a30) \
+ m(a0), m(a1), m(a2), m(a3), m(a4), m(a5), m(a6), m(a7), m(a8), m(a9), \
+ m(a10), m(a11), m(a12), m(a13), m(a14), m(a15), m(a16), m(a17), m(a18), \
+ m(a19), m(a20), m(a21), m(a22), m(a23), m(a24), m(a25), m(a26), m(a27), \
+ m(a28), m(a29), m(a30)
+
+#define ABSL_INTERNAL_TRANSFORM_ARGS32(m, a0, a1, a2, a3, a4, a5, a6, a7, a8, \
+ a9, a10, a11, a12, a13, a14, a15, a16, \
+ a17, a18, a19, a20, a21, a22, a23, a24, \
+ a25, a26, a27, a28, a29, a30, a31) \
+ m(a0), m(a1), m(a2), m(a3), m(a4), m(a5), m(a6), m(a7), m(a8), m(a9), \
+ m(a10), m(a11), m(a12), m(a13), m(a14), m(a15), m(a16), m(a17), m(a18), \
+ m(a19), m(a20), m(a21), m(a22), m(a23), m(a24), m(a25), m(a26), m(a27), \
+ m(a28), m(a29), m(a30), m(a31)
+
+#define ABSL_INTERNAL_NUM_ARGS_IMPL(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, \
+ a10, a11, a12, a13, a14, a15, a16, a17, \
+ a18, a19, a20, a21, a22, a23, a24, a25, \
+ a26, a27, a28, a29, a30, a31, result, ...) \
+ result
+
+#define ABSL_INTERNAL_FORCE_EXPANSION(...) __VA_ARGS__
+
+#define ABSL_INTERNAL_NUM_ARGS(...) \
+ ABSL_INTERNAL_FORCE_EXPANSION(ABSL_INTERNAL_NUM_ARGS_IMPL( \
+ __VA_ARGS__, 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, \
+ 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, ))
+
+#endif // ABSL_TYPES_INTERNAL_TRANSFORM_ARGS_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/variant.h b/contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/variant.h
new file mode 100644
index 00000000000..4495fb4de69
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/variant.h
@@ -0,0 +1,1646 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Implementation details of y_absl/types/variant.h, pulled into a
+// separate file to avoid cluttering the top of the API header with
+// implementation details.
+
+#ifndef ABSL_TYPES_variant_internal_H_
+#define ABSL_TYPES_variant_internal_H_
+
+#include <cassert>
+#include <cstddef>
+#include <cstdlib>
+#include <memory>
+#include <stdexcept>
+#include <tuple>
+#include <type_traits>
+
+#include "y_absl/base/config.h"
+#include "y_absl/base/internal/identity.h"
+#include "y_absl/base/internal/inline_variable.h"
+#include "y_absl/base/internal/invoke.h"
+#include "y_absl/base/macros.h"
+#include "y_absl/base/optimization.h"
+#include "y_absl/meta/type_traits.h"
+#include "y_absl/types/bad_variant_access.h"
+#include "y_absl/utility/utility.h"
+
+#if !defined(ABSL_USES_STD_VARIANT)
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+template <class... Types>
+class variant;
+
+ABSL_INTERNAL_INLINE_CONSTEXPR(size_t, variant_npos, static_cast<size_t>(-1));
+
+template <class T>
+struct variant_size;
+
+template <std::size_t I, class T>
+struct variant_alternative;
+
+namespace variant_internal {
+
+// NOTE: See specializations below for details.
+template <std::size_t I, class T>
+struct VariantAlternativeSfinae {};
+
+// Requires: I < variant_size_v<T>.
+//
+// Value: The Ith type of Types...
+template <std::size_t I, class T0, class... Tn>
+struct VariantAlternativeSfinae<I, variant<T0, Tn...>>
+ : VariantAlternativeSfinae<I - 1, variant<Tn...>> {};
+
+// Value: T0
+template <class T0, class... Ts>
+struct VariantAlternativeSfinae<0, variant<T0, Ts...>> {
+ using type = T0;
+};
+
+template <std::size_t I, class T>
+using VariantAlternativeSfinaeT = typename VariantAlternativeSfinae<I, T>::type;
+
+// NOTE: Requires T to be a reference type.
+template <class T, class U>
+struct GiveQualsTo;
+
+template <class T, class U>
+struct GiveQualsTo<T&, U> {
+ using type = U&;
+};
+
+template <class T, class U>
+struct GiveQualsTo<T&&, U> {
+ using type = U&&;
+};
+
+template <class T, class U>
+struct GiveQualsTo<const T&, U> {
+ using type = const U&;
+};
+
+template <class T, class U>
+struct GiveQualsTo<const T&&, U> {
+ using type = const U&&;
+};
+
+template <class T, class U>
+struct GiveQualsTo<volatile T&, U> {
+ using type = volatile U&;
+};
+
+template <class T, class U>
+struct GiveQualsTo<volatile T&&, U> {
+ using type = volatile U&&;
+};
+
+template <class T, class U>
+struct GiveQualsTo<volatile const T&, U> {
+ using type = volatile const U&;
+};
+
+template <class T, class U>
+struct GiveQualsTo<volatile const T&&, U> {
+ using type = volatile const U&&;
+};
+
+template <class T, class U>
+using GiveQualsToT = typename GiveQualsTo<T, U>::type;
+
+// Convenience alias, since size_t integral_constant is used a lot in this file.
+template <std::size_t I>
+using SizeT = std::integral_constant<std::size_t, I>;
+
+using NPos = SizeT<variant_npos>;
+
+template <class Variant, class T, class = void>
+struct IndexOfConstructedType {};
+
+template <std::size_t I, class Variant>
+struct VariantAccessResultImpl;
+
+template <std::size_t I, template <class...> class Variantemplate, class... T>
+struct VariantAccessResultImpl<I, Variantemplate<T...>&> {
+ using type = typename y_absl::variant_alternative<I, variant<T...>>::type&;
+};
+
+template <std::size_t I, template <class...> class Variantemplate, class... T>
+struct VariantAccessResultImpl<I, const Variantemplate<T...>&> {
+ using type =
+ const typename y_absl::variant_alternative<I, variant<T...>>::type&;
+};
+
+template <std::size_t I, template <class...> class Variantemplate, class... T>
+struct VariantAccessResultImpl<I, Variantemplate<T...>&&> {
+ using type = typename y_absl::variant_alternative<I, variant<T...>>::type&&;
+};
+
+template <std::size_t I, template <class...> class Variantemplate, class... T>
+struct VariantAccessResultImpl<I, const Variantemplate<T...>&&> {
+ using type =
+ const typename y_absl::variant_alternative<I, variant<T...>>::type&&;
+};
+
+template <std::size_t I, class Variant>
+using VariantAccessResult =
+ typename VariantAccessResultImpl<I, Variant&&>::type;
+
+// NOTE: This is used instead of std::array to reduce instantiation overhead.
+template <class T, std::size_t Size>
+struct SimpleArray {
+ static_assert(Size != 0, "");
+ T value[Size];
+};
+
+template <class T>
+struct AccessedType {
+ using type = T;
+};
+
+template <class T>
+using AccessedTypeT = typename AccessedType<T>::type;
+
+template <class T, std::size_t Size>
+struct AccessedType<SimpleArray<T, Size>> {
+ using type = AccessedTypeT<T>;
+};
+
+template <class T>
+constexpr T AccessSimpleArray(const T& value) {
+ return value;
+}
+
+template <class T, std::size_t Size, class... SizeT>
+constexpr AccessedTypeT<T> AccessSimpleArray(const SimpleArray<T, Size>& table,
+ std::size_t head_index,
+ SizeT... tail_indices) {
+ return AccessSimpleArray(table.value[head_index], tail_indices...);
+}
+
+// Note: Intentionally is an alias.
+template <class T>
+using AlwaysZero = SizeT<0>;
+
+template <class Op, class... Vs>
+struct VisitIndicesResultImpl {
+ using type = y_absl::result_of_t<Op(AlwaysZero<Vs>...)>;
+};
+
+template <class Op, class... Vs>
+using VisitIndicesResultT = typename VisitIndicesResultImpl<Op, Vs...>::type;
+
+template <class ReturnType, class FunctionObject, class EndIndices,
+ class BoundIndices>
+struct MakeVisitationMatrix;
+
+template <class ReturnType, class FunctionObject, std::size_t... Indices>
+constexpr ReturnType call_with_indices(FunctionObject&& function) {
+ static_assert(
+ std::is_same<ReturnType, decltype(std::declval<FunctionObject>()(
+ SizeT<Indices>()...))>::value,
+ "Not all visitation overloads have the same return type.");
+ return y_absl::forward<FunctionObject>(function)(SizeT<Indices>()...);
+}
+
+template <class ReturnType, class FunctionObject, std::size_t... BoundIndices>
+struct MakeVisitationMatrix<ReturnType, FunctionObject, index_sequence<>,
+ index_sequence<BoundIndices...>> {
+ using ResultType = ReturnType (*)(FunctionObject&&);
+ static constexpr ResultType Run() {
+ return &call_with_indices<ReturnType, FunctionObject,
+ (BoundIndices - 1)...>;
+ }
+};
+
+template <typename Is, std::size_t J>
+struct AppendToIndexSequence;
+
+template <typename Is, std::size_t J>
+using AppendToIndexSequenceT = typename AppendToIndexSequence<Is, J>::type;
+
+template <std::size_t... Is, std::size_t J>
+struct AppendToIndexSequence<index_sequence<Is...>, J> {
+ using type = index_sequence<Is..., J>;
+};
+
+template <class ReturnType, class FunctionObject, class EndIndices,
+ class CurrIndices, class BoundIndices>
+struct MakeVisitationMatrixImpl;
+
+template <class ReturnType, class FunctionObject, class EndIndices,
+ std::size_t... CurrIndices, class BoundIndices>
+struct MakeVisitationMatrixImpl<ReturnType, FunctionObject, EndIndices,
+ index_sequence<CurrIndices...>, BoundIndices> {
+ using ResultType = SimpleArray<
+ typename MakeVisitationMatrix<ReturnType, FunctionObject, EndIndices,
+ index_sequence<>>::ResultType,
+ sizeof...(CurrIndices)>;
+
+ static constexpr ResultType Run() {
+ return {{MakeVisitationMatrix<
+ ReturnType, FunctionObject, EndIndices,
+ AppendToIndexSequenceT<BoundIndices, CurrIndices>>::Run()...}};
+ }
+};
+
+template <class ReturnType, class FunctionObject, std::size_t HeadEndIndex,
+ std::size_t... TailEndIndices, std::size_t... BoundIndices>
+struct MakeVisitationMatrix<ReturnType, FunctionObject,
+ index_sequence<HeadEndIndex, TailEndIndices...>,
+ index_sequence<BoundIndices...>>
+ : MakeVisitationMatrixImpl<ReturnType, FunctionObject,
+ index_sequence<TailEndIndices...>,
+ y_absl::make_index_sequence<HeadEndIndex>,
+ index_sequence<BoundIndices...>> {};
+
+struct UnreachableSwitchCase {
+ template <class Op>
+ [[noreturn]] static VisitIndicesResultT<Op, std::size_t> Run(
+ Op&& /*ignored*/) {
+#if ABSL_HAVE_BUILTIN(__builtin_unreachable) || \
+ (defined(__GNUC__) && !defined(__clang__))
+ __builtin_unreachable();
+#elif defined(_MSC_VER)
+ __assume(false);
+#else
+ // Try to use assert of false being identified as an unreachable intrinsic.
+ // NOTE: We use assert directly to increase chances of exploiting an assume
+ // intrinsic.
+ assert(false); // NOLINT
+
+ // Hack to silence potential no return warning -- cause an infinite loop.
+ return Run(y_absl::forward<Op>(op));
+#endif // Checks for __builtin_unreachable
+ }
+};
+
+template <class Op, std::size_t I>
+struct ReachableSwitchCase {
+ static VisitIndicesResultT<Op, std::size_t> Run(Op&& op) {
+ return y_absl::base_internal::invoke(y_absl::forward<Op>(op), SizeT<I>());
+ }
+};
+
+// The number 33 is just a guess at a reasonable maximum to our switch. It is
+// not based on any analysis. The reason it is a power of 2 plus 1 instead of a
+// power of 2 is because the number was picked to correspond to a power of 2
+// amount of "normal" alternatives, plus one for the possibility of the user
+// providing "monostate" in addition to the more natural alternatives.
+ABSL_INTERNAL_INLINE_CONSTEXPR(std::size_t, MaxUnrolledVisitCases, 33);
+
+// Note: The default-definition is for unreachable cases.
+template <bool IsReachable>
+struct PickCaseImpl {
+ template <class Op, std::size_t I>
+ using Apply = UnreachableSwitchCase;
+};
+
+template <>
+struct PickCaseImpl</*IsReachable =*/true> {
+ template <class Op, std::size_t I>
+ using Apply = ReachableSwitchCase<Op, I>;
+};
+
+// Note: This form of dance with template aliases is to make sure that we
+// instantiate a number of templates proportional to the number of variant
+// alternatives rather than a number of templates proportional to our
+// maximum unrolled amount of visitation cases (aliases are effectively
+// "free" whereas other template instantiations are costly).
+template <class Op, std::size_t I, std::size_t EndIndex>
+using PickCase = typename PickCaseImpl<(I < EndIndex)>::template Apply<Op, I>;
+
+template <class ReturnType>
+[[noreturn]] ReturnType TypedThrowBadVariantAccess() {
+ y_absl::variant_internal::ThrowBadVariantAccess();
+}
+
+// Given N variant sizes, determine the number of cases there would need to be
+// in a single switch-statement that would cover every possibility in the
+// corresponding N-ary visit operation.
+template <std::size_t... NumAlternatives>
+struct NumCasesOfSwitch;
+
+template <std::size_t HeadNumAlternatives, std::size_t... TailNumAlternatives>
+struct NumCasesOfSwitch<HeadNumAlternatives, TailNumAlternatives...> {
+ static constexpr std::size_t value =
+ (HeadNumAlternatives + 1) *
+ NumCasesOfSwitch<TailNumAlternatives...>::value;
+};
+
+template <>
+struct NumCasesOfSwitch<> {
+ static constexpr std::size_t value = 1;
+};
+
+// A switch statement optimizes better than the table of function pointers.
+template <std::size_t EndIndex>
+struct VisitIndicesSwitch {
+ static_assert(EndIndex <= MaxUnrolledVisitCases,
+ "Maximum unrolled switch size exceeded.");
+
+ template <class Op>
+ static VisitIndicesResultT<Op, std::size_t> Run(Op&& op, std::size_t i) {
+ switch (i) {
+ case 0:
+ return PickCase<Op, 0, EndIndex>::Run(y_absl::forward<Op>(op));
+ case 1:
+ return PickCase<Op, 1, EndIndex>::Run(y_absl::forward<Op>(op));
+ case 2:
+ return PickCase<Op, 2, EndIndex>::Run(y_absl::forward<Op>(op));
+ case 3:
+ return PickCase<Op, 3, EndIndex>::Run(y_absl::forward<Op>(op));
+ case 4:
+ return PickCase<Op, 4, EndIndex>::Run(y_absl::forward<Op>(op));
+ case 5:
+ return PickCase<Op, 5, EndIndex>::Run(y_absl::forward<Op>(op));
+ case 6:
+ return PickCase<Op, 6, EndIndex>::Run(y_absl::forward<Op>(op));
+ case 7:
+ return PickCase<Op, 7, EndIndex>::Run(y_absl::forward<Op>(op));
+ case 8:
+ return PickCase<Op, 8, EndIndex>::Run(y_absl::forward<Op>(op));
+ case 9:
+ return PickCase<Op, 9, EndIndex>::Run(y_absl::forward<Op>(op));
+ case 10:
+ return PickCase<Op, 10, EndIndex>::Run(y_absl::forward<Op>(op));
+ case 11:
+ return PickCase<Op, 11, EndIndex>::Run(y_absl::forward<Op>(op));
+ case 12:
+ return PickCase<Op, 12, EndIndex>::Run(y_absl::forward<Op>(op));
+ case 13:
+ return PickCase<Op, 13, EndIndex>::Run(y_absl::forward<Op>(op));
+ case 14:
+ return PickCase<Op, 14, EndIndex>::Run(y_absl::forward<Op>(op));
+ case 15:
+ return PickCase<Op, 15, EndIndex>::Run(y_absl::forward<Op>(op));
+ case 16:
+ return PickCase<Op, 16, EndIndex>::Run(y_absl::forward<Op>(op));
+ case 17:
+ return PickCase<Op, 17, EndIndex>::Run(y_absl::forward<Op>(op));
+ case 18:
+ return PickCase<Op, 18, EndIndex>::Run(y_absl::forward<Op>(op));
+ case 19:
+ return PickCase<Op, 19, EndIndex>::Run(y_absl::forward<Op>(op));
+ case 20:
+ return PickCase<Op, 20, EndIndex>::Run(y_absl::forward<Op>(op));
+ case 21:
+ return PickCase<Op, 21, EndIndex>::Run(y_absl::forward<Op>(op));
+ case 22:
+ return PickCase<Op, 22, EndIndex>::Run(y_absl::forward<Op>(op));
+ case 23:
+ return PickCase<Op, 23, EndIndex>::Run(y_absl::forward<Op>(op));
+ case 24:
+ return PickCase<Op, 24, EndIndex>::Run(y_absl::forward<Op>(op));
+ case 25:
+ return PickCase<Op, 25, EndIndex>::Run(y_absl::forward<Op>(op));
+ case 26:
+ return PickCase<Op, 26, EndIndex>::Run(y_absl::forward<Op>(op));
+ case 27:
+ return PickCase<Op, 27, EndIndex>::Run(y_absl::forward<Op>(op));
+ case 28:
+ return PickCase<Op, 28, EndIndex>::Run(y_absl::forward<Op>(op));
+ case 29:
+ return PickCase<Op, 29, EndIndex>::Run(y_absl::forward<Op>(op));
+ case 30:
+ return PickCase<Op, 30, EndIndex>::Run(y_absl::forward<Op>(op));
+ case 31:
+ return PickCase<Op, 31, EndIndex>::Run(y_absl::forward<Op>(op));
+ case 32:
+ return PickCase<Op, 32, EndIndex>::Run(y_absl::forward<Op>(op));
+ default:
+ ABSL_ASSERT(i == variant_npos);
+ return y_absl::base_internal::invoke(y_absl::forward<Op>(op), NPos());
+ }
+ }
+};
+
+template <std::size_t... EndIndices>
+struct VisitIndicesFallback {
+ template <class Op, class... SizeT>
+ static VisitIndicesResultT<Op, SizeT...> Run(Op&& op, SizeT... indices) {
+ return AccessSimpleArray(
+ MakeVisitationMatrix<VisitIndicesResultT<Op, SizeT...>, Op,
+ index_sequence<(EndIndices + 1)...>,
+ index_sequence<>>::Run(),
+ (indices + 1)...)(y_absl::forward<Op>(op));
+ }
+};
+
+// Take an N-dimensional series of indices and convert them into a single index
+// without loss of information. The purpose of this is to be able to convert an
+// N-ary visit operation into a single switch statement.
+template <std::size_t...>
+struct FlattenIndices;
+
+template <std::size_t HeadSize, std::size_t... TailSize>
+struct FlattenIndices<HeadSize, TailSize...> {
+ template<class... SizeType>
+ static constexpr std::size_t Run(std::size_t head, SizeType... tail) {
+ return head + HeadSize * FlattenIndices<TailSize...>::Run(tail...);
+ }
+};
+
+template <>
+struct FlattenIndices<> {
+ static constexpr std::size_t Run() { return 0; }
+};
+
+// Take a single "flattened" index (flattened by FlattenIndices) and determine
+// the value of the index of one of the logically represented dimensions.
+template <std::size_t I, std::size_t IndexToGet, std::size_t HeadSize,
+ std::size_t... TailSize>
+struct UnflattenIndex {
+ static constexpr std::size_t value =
+ UnflattenIndex<I / HeadSize, IndexToGet - 1, TailSize...>::value;
+};
+
+template <std::size_t I, std::size_t HeadSize, std::size_t... TailSize>
+struct UnflattenIndex<I, 0, HeadSize, TailSize...> {
+ static constexpr std::size_t value = (I % HeadSize);
+};
+
+// The backend for converting an N-ary visit operation into a unary visit.
+template <class IndexSequence, std::size_t... EndIndices>
+struct VisitIndicesVariadicImpl;
+
+template <std::size_t... N, std::size_t... EndIndices>
+struct VisitIndicesVariadicImpl<y_absl::index_sequence<N...>, EndIndices...> {
+ // A type that can take an N-ary function object and converts it to a unary
+ // function object that takes a single, flattened index, and "unflattens" it
+ // into its individual dimensions when forwarding to the wrapped object.
+ template <class Op>
+ struct FlattenedOp {
+ template <std::size_t I>
+ VisitIndicesResultT<Op, decltype(EndIndices)...> operator()(
+ SizeT<I> /*index*/) && {
+ return base_internal::invoke(
+ y_absl::forward<Op>(op),
+ SizeT<UnflattenIndex<I, N, (EndIndices + 1)...>::value -
+ std::size_t{1}>()...);
+ }
+
+ Op&& op;
+ };
+
+ template <class Op, class... SizeType>
+ static VisitIndicesResultT<Op, decltype(EndIndices)...> Run(
+ Op&& op, SizeType... i) {
+ return VisitIndicesSwitch<NumCasesOfSwitch<EndIndices...>::value>::Run(
+ FlattenedOp<Op>{y_absl::forward<Op>(op)},
+ FlattenIndices<(EndIndices + std::size_t{1})...>::Run(
+ (i + std::size_t{1})...));
+ }
+};
+
+template <std::size_t... EndIndices>
+struct VisitIndicesVariadic
+ : VisitIndicesVariadicImpl<y_absl::make_index_sequence<sizeof...(EndIndices)>,
+ EndIndices...> {};
+
+// This implementation will flatten N-ary visit operations into a single switch
+// statement when the number of cases would be less than our maximum specified
+// switch-statement size.
+// TODO(calabrese)
+// Based on benchmarks, determine whether the function table approach actually
+// does optimize better than a chain of switch statements and possibly update
+// the implementation accordingly. Also consider increasing the maximum switch
+// size.
+template <std::size_t... EndIndices>
+struct VisitIndices
+ : y_absl::conditional_t<(NumCasesOfSwitch<EndIndices...>::value <=
+ MaxUnrolledVisitCases),
+ VisitIndicesVariadic<EndIndices...>,
+ VisitIndicesFallback<EndIndices...>> {};
+
+template <std::size_t EndIndex>
+struct VisitIndices<EndIndex>
+ : y_absl::conditional_t<(EndIndex <= MaxUnrolledVisitCases),
+ VisitIndicesSwitch<EndIndex>,
+ VisitIndicesFallback<EndIndex>> {};
+
+// Suppress bogus warning on MSVC: MSVC complains that the `reinterpret_cast`
+// below is returning the address of a temporary or local object.
+#ifdef _MSC_VER
+#pragma warning(push)
+#pragma warning(disable : 4172)
+#endif // _MSC_VER
+
+// TODO(calabrese) std::launder
+// TODO(calabrese) constexpr
+// NOTE: DO NOT REMOVE the `inline` keyword as it is necessary to work around a
+// MSVC bug. See https://github.com/abseil/abseil-cpp/issues/129 for details.
+template <class Self, std::size_t I>
+inline VariantAccessResult<I, Self> AccessUnion(Self&& self, SizeT<I> /*i*/) {
+ return reinterpret_cast<VariantAccessResult<I, Self>>(self);
+}
+
+#ifdef _MSC_VER
+#pragma warning(pop)
+#endif // _MSC_VER
+
+template <class T>
+void DeducedDestroy(T& self) { // NOLINT
+ self.~T();
+}
+
+// NOTE: This type exists as a single entity for variant and its bases to
+// befriend. It contains helper functionality that manipulates the state of the
+// variant, such as the implementation of things like assignment and emplace
+// operations.
+struct VariantCoreAccess {
+ template <class VariantType>
+ static typename VariantType::Variant& Derived(VariantType& self) { // NOLINT
+ return static_cast<typename VariantType::Variant&>(self);
+ }
+
+ template <class VariantType>
+ static const typename VariantType::Variant& Derived(
+ const VariantType& self) { // NOLINT
+ return static_cast<const typename VariantType::Variant&>(self);
+ }
+
+ template <class VariantType>
+ static void Destroy(VariantType& self) { // NOLINT
+ Derived(self).destroy();
+ self.index_ = y_absl::variant_npos;
+ }
+
+ template <class Variant>
+ static void SetIndex(Variant& self, std::size_t i) { // NOLINT
+ self.index_ = i;
+ }
+
+ template <class Variant>
+ static void InitFrom(Variant& self, Variant&& other) { // NOLINT
+ VisitIndices<y_absl::variant_size<Variant>::value>::Run(
+ InitFromVisitor<Variant, Variant&&>{&self,
+ std::forward<Variant>(other)},
+ other.index());
+ self.index_ = other.index();
+ }
+
+ // Access a variant alternative, assuming the index is correct.
+ template <std::size_t I, class Variant>
+ static VariantAccessResult<I, Variant> Access(Variant&& self) {
+ // This cast instead of invocation of AccessUnion with an rvalue is a
+ // workaround for msvc. Without this there is a runtime failure when dealing
+ // with rvalues.
+ // TODO(calabrese) Reduce test case and find a simpler workaround.
+ return static_cast<VariantAccessResult<I, Variant>>(
+ variant_internal::AccessUnion(self.state_, SizeT<I>()));
+ }
+
+ // Access a variant alternative, throwing if the index is incorrect.
+ template <std::size_t I, class Variant>
+ static VariantAccessResult<I, Variant> CheckedAccess(Variant&& self) {
+ if (ABSL_PREDICT_FALSE(self.index_ != I)) {
+ TypedThrowBadVariantAccess<VariantAccessResult<I, Variant>>();
+ }
+
+ return Access<I>(y_absl::forward<Variant>(self));
+ }
+
+ // The implementation of the move-assignment operation for a variant.
+ template <class VType>
+ struct MoveAssignVisitor {
+ using DerivedType = typename VType::Variant;
+ template <std::size_t NewIndex>
+ void operator()(SizeT<NewIndex> /*new_i*/) const {
+ if (left->index_ == NewIndex) {
+ Access<NewIndex>(*left) = std::move(Access<NewIndex>(*right));
+ } else {
+ Derived(*left).template emplace<NewIndex>(
+ std::move(Access<NewIndex>(*right)));
+ }
+ }
+
+ void operator()(SizeT<y_absl::variant_npos> /*new_i*/) const {
+ Destroy(*left);
+ }
+
+ VType* left;
+ VType* right;
+ };
+
+ template <class VType>
+ static MoveAssignVisitor<VType> MakeMoveAssignVisitor(VType* left,
+ VType* other) {
+ return {left, other};
+ }
+
+ // The implementation of the assignment operation for a variant.
+ template <class VType>
+ struct CopyAssignVisitor {
+ using DerivedType = typename VType::Variant;
+ template <std::size_t NewIndex>
+ void operator()(SizeT<NewIndex> /*new_i*/) const {
+ using New =
+ typename y_absl::variant_alternative<NewIndex, DerivedType>::type;
+
+ if (left->index_ == NewIndex) {
+ Access<NewIndex>(*left) = Access<NewIndex>(*right);
+ } else if (std::is_nothrow_copy_constructible<New>::value ||
+ !std::is_nothrow_move_constructible<New>::value) {
+ Derived(*left).template emplace<NewIndex>(Access<NewIndex>(*right));
+ } else {
+ Derived(*left) = DerivedType(Derived(*right));
+ }
+ }
+
+ void operator()(SizeT<y_absl::variant_npos> /*new_i*/) const {
+ Destroy(*left);
+ }
+
+ VType* left;
+ const VType* right;
+ };
+
+ template <class VType>
+ static CopyAssignVisitor<VType> MakeCopyAssignVisitor(VType* left,
+ const VType& other) {
+ return {left, &other};
+ }
+
+ // The implementation of conversion-assignment operations for variant.
+ template <class Left, class QualifiedNew>
+ struct ConversionAssignVisitor {
+ using NewIndex =
+ variant_internal::IndexOfConstructedType<Left, QualifiedNew>;
+
+ void operator()(SizeT<NewIndex::value> /*old_i*/
+ ) const {
+ Access<NewIndex::value>(*left) = y_absl::forward<QualifiedNew>(other);
+ }
+
+ template <std::size_t OldIndex>
+ void operator()(SizeT<OldIndex> /*old_i*/
+ ) const {
+ using New =
+ typename y_absl::variant_alternative<NewIndex::value, Left>::type;
+ if (std::is_nothrow_constructible<New, QualifiedNew>::value ||
+ !std::is_nothrow_move_constructible<New>::value) {
+ left->template emplace<NewIndex::value>(
+ y_absl::forward<QualifiedNew>(other));
+ } else {
+ // the standard says "equivalent to
+ // operator=(variant(std::forward<T>(t)))", but we use `emplace` here
+ // because the variant's move assignment operator could be deleted.
+ left->template emplace<NewIndex::value>(
+ New(y_absl::forward<QualifiedNew>(other)));
+ }
+ }
+
+ Left* left;
+ QualifiedNew&& other;
+ };
+
+ template <class Left, class QualifiedNew>
+ static ConversionAssignVisitor<Left, QualifiedNew>
+ MakeConversionAssignVisitor(Left* left, QualifiedNew&& qual) {
+ return {left, y_absl::forward<QualifiedNew>(qual)};
+ }
+
+ // Backend for operations for `emplace()` which destructs `*self` then
+ // construct a new alternative with `Args...`.
+ template <std::size_t NewIndex, class Self, class... Args>
+ static typename y_absl::variant_alternative<NewIndex, Self>::type& Replace(
+ Self* self, Args&&... args) {
+ Destroy(*self);
+ using New = typename y_absl::variant_alternative<NewIndex, Self>::type;
+ New* const result = ::new (static_cast<void*>(&self->state_))
+ New(y_absl::forward<Args>(args)...);
+ self->index_ = NewIndex;
+ return *result;
+ }
+
+ template <class LeftVariant, class QualifiedRightVariant>
+ struct InitFromVisitor {
+ template <std::size_t NewIndex>
+ void operator()(SizeT<NewIndex> /*new_i*/) const {
+ using Alternative =
+ typename variant_alternative<NewIndex, LeftVariant>::type;
+ ::new (static_cast<void*>(&left->state_)) Alternative(
+ Access<NewIndex>(std::forward<QualifiedRightVariant>(right)));
+ }
+
+ void operator()(SizeT<y_absl::variant_npos> /*new_i*/) const {
+ // This space intentionally left blank.
+ }
+ LeftVariant* left;
+ QualifiedRightVariant&& right;
+ };
+};
+
+template <class Expected, class... T>
+struct IndexOfImpl;
+
+template <class Expected>
+struct IndexOfImpl<Expected> {
+ using IndexFromEnd = SizeT<0>;
+ using MatchedIndexFromEnd = IndexFromEnd;
+ using MultipleMatches = std::false_type;
+};
+
+template <class Expected, class Head, class... Tail>
+struct IndexOfImpl<Expected, Head, Tail...> : IndexOfImpl<Expected, Tail...> {
+ using IndexFromEnd =
+ SizeT<IndexOfImpl<Expected, Tail...>::IndexFromEnd::value + 1>;
+};
+
+template <class Expected, class... Tail>
+struct IndexOfImpl<Expected, Expected, Tail...>
+ : IndexOfImpl<Expected, Tail...> {
+ using IndexFromEnd =
+ SizeT<IndexOfImpl<Expected, Tail...>::IndexFromEnd::value + 1>;
+ using MatchedIndexFromEnd = IndexFromEnd;
+ using MultipleMatches = std::integral_constant<
+ bool, IndexOfImpl<Expected, Tail...>::MatchedIndexFromEnd::value != 0>;
+};
+
+template <class Expected, class... Types>
+struct IndexOfMeta {
+ using Results = IndexOfImpl<Expected, Types...>;
+ static_assert(!Results::MultipleMatches::value,
+ "Attempted to access a variant by specifying a type that "
+ "matches more than one alternative.");
+ static_assert(Results::MatchedIndexFromEnd::value != 0,
+ "Attempted to access a variant by specifying a type that does "
+ "not match any alternative.");
+ using type = SizeT<sizeof...(Types) - Results::MatchedIndexFromEnd::value>;
+};
+
+template <class Expected, class... Types>
+using IndexOf = typename IndexOfMeta<Expected, Types...>::type;
+
+template <class Variant, class T, std::size_t CurrIndex>
+struct UnambiguousIndexOfImpl;
+
+// Terminating case encountered once we've checked all of the alternatives
+template <class T, std::size_t CurrIndex>
+struct UnambiguousIndexOfImpl<variant<>, T, CurrIndex> : SizeT<CurrIndex> {};
+
+// Case where T is not Head
+template <class Head, class... Tail, class T, std::size_t CurrIndex>
+struct UnambiguousIndexOfImpl<variant<Head, Tail...>, T, CurrIndex>
+ : UnambiguousIndexOfImpl<variant<Tail...>, T, CurrIndex + 1>::type {};
+
+// Case where T is Head
+template <class Head, class... Tail, std::size_t CurrIndex>
+struct UnambiguousIndexOfImpl<variant<Head, Tail...>, Head, CurrIndex>
+ : SizeT<UnambiguousIndexOfImpl<variant<Tail...>, Head, 0>::value ==
+ sizeof...(Tail)
+ ? CurrIndex
+ : CurrIndex + sizeof...(Tail) + 1> {};
+
+template <class Variant, class T>
+struct UnambiguousIndexOf;
+
+struct NoMatch {
+ struct type {};
+};
+
+template <class... Alts, class T>
+struct UnambiguousIndexOf<variant<Alts...>, T>
+ : std::conditional<UnambiguousIndexOfImpl<variant<Alts...>, T, 0>::value !=
+ sizeof...(Alts),
+ UnambiguousIndexOfImpl<variant<Alts...>, T, 0>,
+ NoMatch>::type::type {};
+
+template <class T, std::size_t /*Dummy*/>
+using UnambiguousTypeOfImpl = T;
+
+template <class Variant, class T>
+using UnambiguousTypeOfT =
+ UnambiguousTypeOfImpl<T, UnambiguousIndexOf<Variant, T>::value>;
+
+template <class H, class... T>
+class VariantStateBase;
+
+// This is an implementation of the "imaginary function" that is described in
+// [variant.ctor]
+// It is used in order to determine which alternative to construct during
+// initialization from some type T.
+template <class Variant, std::size_t I = 0>
+struct ImaginaryFun;
+
+template <std::size_t I>
+struct ImaginaryFun<variant<>, I> {
+ static void Run() = delete;
+};
+
+template <class H, class... T, std::size_t I>
+struct ImaginaryFun<variant<H, T...>, I> : ImaginaryFun<variant<T...>, I + 1> {
+ using ImaginaryFun<variant<T...>, I + 1>::Run;
+
+ // NOTE: const& and && are used instead of by-value due to lack of guaranteed
+ // move elision of C++17. This may have other minor differences, but tests
+ // pass.
+ static SizeT<I> Run(const H&, SizeT<I>);
+ static SizeT<I> Run(H&&, SizeT<I>);
+};
+
+// The following metafunctions are used in constructor and assignment
+// constraints.
+template <class Self, class T>
+struct IsNeitherSelfNorInPlace : std::true_type {};
+
+template <class Self>
+struct IsNeitherSelfNorInPlace<Self, Self> : std::false_type {};
+
+template <class Self, class T>
+struct IsNeitherSelfNorInPlace<Self, in_place_type_t<T>> : std::false_type {};
+
+template <class Self, std::size_t I>
+struct IsNeitherSelfNorInPlace<Self, in_place_index_t<I>> : std::false_type {};
+
+template <class Variant, class T, class = void>
+struct ConversionIsPossibleImpl : std::false_type {};
+
+template <class Variant, class T>
+struct ConversionIsPossibleImpl<
+ Variant, T,
+ void_t<decltype(ImaginaryFun<Variant>::Run(std::declval<T>(), {}))>>
+ : std::true_type {};
+
+template <class Variant, class T>
+struct ConversionIsPossible : ConversionIsPossibleImpl<Variant, T>::type {};
+
+template <class Variant, class T>
+struct IndexOfConstructedType<
+ Variant, T,
+ void_t<decltype(ImaginaryFun<Variant>::Run(std::declval<T>(), {}))>>
+ : decltype(ImaginaryFun<Variant>::Run(std::declval<T>(), {})) {};
+
+template <std::size_t... Is>
+struct ContainsVariantNPos
+ : y_absl::negation<std::is_same< // NOLINT
+ y_absl::integer_sequence<bool, 0 <= Is...>,
+ y_absl::integer_sequence<bool, Is != y_absl::variant_npos...>>> {};
+
+template <class Op, class... QualifiedVariants>
+using RawVisitResult =
+ y_absl::result_of_t<Op(VariantAccessResult<0, QualifiedVariants>...)>;
+
+// NOTE: The spec requires that all return-paths yield the same type and is not
+// SFINAE-friendly, so we can deduce the return type by examining the first
+// result. If it's not callable, then we get an error, but are compliant and
+// fast to compile.
+// TODO(calabrese) Possibly rewrite in a way that yields better compile errors
+// at the cost of longer compile-times.
+template <class Op, class... QualifiedVariants>
+struct VisitResultImpl {
+ using type =
+ y_absl::result_of_t<Op(VariantAccessResult<0, QualifiedVariants>...)>;
+};
+
+// Done in two steps intentionally so that we don't cause substitution to fail.
+template <class Op, class... QualifiedVariants>
+using VisitResult = typename VisitResultImpl<Op, QualifiedVariants...>::type;
+
+template <class Op, class... QualifiedVariants>
+struct PerformVisitation {
+ using ReturnType = VisitResult<Op, QualifiedVariants...>;
+
+ template <std::size_t... Is>
+ constexpr ReturnType operator()(SizeT<Is>... indices) const {
+ return Run(typename ContainsVariantNPos<Is...>::type{},
+ y_absl::index_sequence_for<QualifiedVariants...>(), indices...);
+ }
+
+ template <std::size_t... TupIs, std::size_t... Is>
+ constexpr ReturnType Run(std::false_type /*has_valueless*/,
+ index_sequence<TupIs...>, SizeT<Is>...) const {
+ static_assert(
+ std::is_same<ReturnType,
+ y_absl::result_of_t<Op(VariantAccessResult<
+ Is, QualifiedVariants>...)>>::value,
+ "All visitation overloads must have the same return type.");
+ return y_absl::base_internal::invoke(
+ y_absl::forward<Op>(op),
+ VariantCoreAccess::Access<Is>(
+ y_absl::forward<QualifiedVariants>(std::get<TupIs>(variant_tup)))...);
+ }
+
+ template <std::size_t... TupIs, std::size_t... Is>
+ [[noreturn]] ReturnType Run(std::true_type /*has_valueless*/,
+ index_sequence<TupIs...>, SizeT<Is>...) const {
+ y_absl::variant_internal::ThrowBadVariantAccess();
+ }
+
+ // TODO(calabrese) Avoid using a tuple, which causes lots of instantiations
+ // Attempts using lambda variadic captures fail on current GCC.
+ std::tuple<QualifiedVariants&&...> variant_tup;
+ Op&& op;
+};
+
+template <class... T>
+union Union;
+
+// We want to allow for variant<> to be trivial. For that, we need the default
+// constructor to be trivial, which means we can't define it ourselves.
+// Instead, we use a non-default constructor that takes NoopConstructorTag
+// that doesn't affect the triviality of the types.
+struct NoopConstructorTag {};
+
+template <std::size_t I>
+struct EmplaceTag {};
+
+template <>
+union Union<> {
+ constexpr explicit Union(NoopConstructorTag) noexcept {}
+};
+
+// Suppress bogus warning on MSVC: MSVC complains that Union<T...> has a defined
+// deleted destructor from the `std::is_destructible` check below.
+#ifdef _MSC_VER
+#pragma warning(push)
+#pragma warning(disable : 4624)
+#endif // _MSC_VER
+
+template <class Head, class... Tail>
+union Union<Head, Tail...> {
+ using TailUnion = Union<Tail...>;
+
+ explicit constexpr Union(NoopConstructorTag /*tag*/) noexcept
+ : tail(NoopConstructorTag()) {}
+
+ template <class... P>
+ explicit constexpr Union(EmplaceTag<0>, P&&... args)
+ : head(y_absl::forward<P>(args)...) {}
+
+ template <std::size_t I, class... P>
+ explicit constexpr Union(EmplaceTag<I>, P&&... args)
+ : tail(EmplaceTag<I - 1>{}, y_absl::forward<P>(args)...) {}
+
+ Head head;
+ TailUnion tail;
+};
+
+#ifdef _MSC_VER
+#pragma warning(pop)
+#endif // _MSC_VER
+
+// TODO(calabrese) Just contain a Union in this union (certain configs fail).
+template <class... T>
+union DestructibleUnionImpl;
+
+template <>
+union DestructibleUnionImpl<> {
+ constexpr explicit DestructibleUnionImpl(NoopConstructorTag) noexcept {}
+};
+
+template <class Head, class... Tail>
+union DestructibleUnionImpl<Head, Tail...> {
+ using TailUnion = DestructibleUnionImpl<Tail...>;
+
+ explicit constexpr DestructibleUnionImpl(NoopConstructorTag /*tag*/) noexcept
+ : tail(NoopConstructorTag()) {}
+
+ template <class... P>
+ explicit constexpr DestructibleUnionImpl(EmplaceTag<0>, P&&... args)
+ : head(y_absl::forward<P>(args)...) {}
+
+ template <std::size_t I, class... P>
+ explicit constexpr DestructibleUnionImpl(EmplaceTag<I>, P&&... args)
+ : tail(EmplaceTag<I - 1>{}, y_absl::forward<P>(args)...) {}
+
+ ~DestructibleUnionImpl() {}
+
+ Head head;
+ TailUnion tail;
+};
+
+// This union type is destructible even if one or more T are not trivially
+// destructible. In the case that all T are trivially destructible, then so is
+// this resultant type.
+template <class... T>
+using DestructibleUnion =
+ y_absl::conditional_t<std::is_destructible<Union<T...>>::value, Union<T...>,
+ DestructibleUnionImpl<T...>>;
+
+// Deepest base, containing the actual union and the discriminator
+template <class H, class... T>
+class VariantStateBase {
+ protected:
+ using Variant = variant<H, T...>;
+
+ template <class LazyH = H,
+ class ConstructibleH = y_absl::enable_if_t<
+ std::is_default_constructible<LazyH>::value, LazyH>>
+ constexpr VariantStateBase() noexcept(
+ std::is_nothrow_default_constructible<ConstructibleH>::value)
+ : state_(EmplaceTag<0>()), index_(0) {}
+
+ template <std::size_t I, class... P>
+ explicit constexpr VariantStateBase(EmplaceTag<I> tag, P&&... args)
+ : state_(tag, y_absl::forward<P>(args)...), index_(I) {}
+
+ explicit constexpr VariantStateBase(NoopConstructorTag)
+ : state_(NoopConstructorTag()), index_(variant_npos) {}
+
+ void destroy() {} // Does nothing (shadowed in child if non-trivial)
+
+ DestructibleUnion<H, T...> state_;
+ std::size_t index_;
+};
+
+using y_absl::internal::identity;
+
+// OverloadSet::Overload() is a unary function which is overloaded to
+// take any of the element types of the variant, by reference-to-const.
+// The return type of the overload on T is identity<T>, so that you
+// can statically determine which overload was called.
+//
+// Overload() is not defined, so it can only be called in unevaluated
+// contexts.
+template <typename... Ts>
+struct OverloadSet;
+
+template <typename T, typename... Ts>
+struct OverloadSet<T, Ts...> : OverloadSet<Ts...> {
+ using Base = OverloadSet<Ts...>;
+ static identity<T> Overload(const T&);
+ using Base::Overload;
+};
+
+template <>
+struct OverloadSet<> {
+ // For any case not handled above.
+ static void Overload(...);
+};
+
+template <class T>
+using LessThanResult = decltype(std::declval<T>() < std::declval<T>());
+
+template <class T>
+using GreaterThanResult = decltype(std::declval<T>() > std::declval<T>());
+
+template <class T>
+using LessThanOrEqualResult = decltype(std::declval<T>() <= std::declval<T>());
+
+template <class T>
+using GreaterThanOrEqualResult =
+ decltype(std::declval<T>() >= std::declval<T>());
+
+template <class T>
+using EqualResult = decltype(std::declval<T>() == std::declval<T>());
+
+template <class T>
+using NotEqualResult = decltype(std::declval<T>() != std::declval<T>());
+
+using type_traits_internal::is_detected_convertible;
+
+template <class... T>
+using RequireAllHaveEqualT = y_absl::enable_if_t<
+ y_absl::conjunction<is_detected_convertible<bool, EqualResult, T>...>::value,
+ bool>;
+
+template <class... T>
+using RequireAllHaveNotEqualT =
+ y_absl::enable_if_t<y_absl::conjunction<is_detected_convertible<
+ bool, NotEqualResult, T>...>::value,
+ bool>;
+
+template <class... T>
+using RequireAllHaveLessThanT =
+ y_absl::enable_if_t<y_absl::conjunction<is_detected_convertible<
+ bool, LessThanResult, T>...>::value,
+ bool>;
+
+template <class... T>
+using RequireAllHaveLessThanOrEqualT =
+ y_absl::enable_if_t<y_absl::conjunction<is_detected_convertible<
+ bool, LessThanOrEqualResult, T>...>::value,
+ bool>;
+
+template <class... T>
+using RequireAllHaveGreaterThanOrEqualT =
+ y_absl::enable_if_t<y_absl::conjunction<is_detected_convertible<
+ bool, GreaterThanOrEqualResult, T>...>::value,
+ bool>;
+
+template <class... T>
+using RequireAllHaveGreaterThanT =
+ y_absl::enable_if_t<y_absl::conjunction<is_detected_convertible<
+ bool, GreaterThanResult, T>...>::value,
+ bool>;
+
+// Helper template containing implementations details of variant that can't go
+// in the private section. For convenience, this takes the variant type as a
+// single template parameter.
+template <typename T>
+struct VariantHelper;
+
+template <typename... Ts>
+struct VariantHelper<variant<Ts...>> {
+ // Type metafunction which returns the element type selected if
+ // OverloadSet::Overload() is well-formed when called with argument type U.
+ template <typename U>
+ using BestMatch = decltype(
+ variant_internal::OverloadSet<Ts...>::Overload(std::declval<U>()));
+
+ // Type metafunction which returns true if OverloadSet::Overload() is
+ // well-formed when called with argument type U.
+ // CanAccept can't be just an alias because there is a MSVC bug on parameter
+ // pack expansion involving decltype.
+ template <typename U>
+ struct CanAccept :
+ std::integral_constant<bool, !std::is_void<BestMatch<U>>::value> {};
+
+ // Type metafunction which returns true if Other is an instantiation of
+ // variant, and variants's converting constructor from Other will be
+ // well-formed. We will use this to remove constructors that would be
+ // ill-formed from the overload set.
+ template <typename Other>
+ struct CanConvertFrom;
+
+ template <typename... Us>
+ struct CanConvertFrom<variant<Us...>>
+ : public y_absl::conjunction<CanAccept<Us>...> {};
+};
+
+// A type with nontrivial copy ctor and trivial move ctor.
+struct TrivialMoveOnly {
+ TrivialMoveOnly(TrivialMoveOnly&&) = default;
+};
+
+// Trait class to detect whether a type is trivially move constructible.
+// A union's defaulted copy/move constructor is deleted if any variant member's
+// copy/move constructor is nontrivial.
+template <typename T>
+struct IsTriviallyMoveConstructible:
+ std::is_move_constructible<Union<T, TrivialMoveOnly>> {};
+
+// To guarantee triviality of all special-member functions that can be trivial,
+// we use a chain of conditional bases for each one.
+// The order of inheritance of bases from child to base are logically:
+//
+// variant
+// VariantCopyAssignBase
+// VariantMoveAssignBase
+// VariantCopyBase
+// VariantMoveBase
+// VariantStateBaseDestructor
+// VariantStateBase
+//
+// Note that there is a separate branch at each base that is dependent on
+// whether or not that corresponding special-member-function can be trivial in
+// the resultant variant type.
+
+template <class... T>
+class VariantStateBaseDestructorNontrivial;
+
+template <class... T>
+class VariantMoveBaseNontrivial;
+
+template <class... T>
+class VariantCopyBaseNontrivial;
+
+template <class... T>
+class VariantMoveAssignBaseNontrivial;
+
+template <class... T>
+class VariantCopyAssignBaseNontrivial;
+
+// Base that is dependent on whether or not the destructor can be trivial.
+template <class... T>
+using VariantStateBaseDestructor =
+ y_absl::conditional_t<std::is_destructible<Union<T...>>::value,
+ VariantStateBase<T...>,
+ VariantStateBaseDestructorNontrivial<T...>>;
+
+// Base that is dependent on whether or not the move-constructor can be
+// implicitly generated by the compiler (trivial or deleted).
+// Previously we were using `std::is_move_constructible<Union<T...>>` to check
+// whether all Ts have trivial move constructor, but it ran into a GCC bug:
+// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=84866
+// So we have to use a different approach (i.e. `HasTrivialMoveConstructor`) to
+// work around the bug.
+template <class... T>
+using VariantMoveBase = y_absl::conditional_t<
+ y_absl::disjunction<
+ y_absl::negation<y_absl::conjunction<std::is_move_constructible<T>...>>,
+ y_absl::conjunction<IsTriviallyMoveConstructible<T>...>>::value,
+ VariantStateBaseDestructor<T...>, VariantMoveBaseNontrivial<T...>>;
+
+// Base that is dependent on whether or not the copy-constructor can be trivial.
+template <class... T>
+using VariantCopyBase = y_absl::conditional_t<
+ y_absl::disjunction<
+ y_absl::negation<y_absl::conjunction<std::is_copy_constructible<T>...>>,
+ std::is_copy_constructible<Union<T...>>>::value,
+ VariantMoveBase<T...>, VariantCopyBaseNontrivial<T...>>;
+
+// Base that is dependent on whether or not the move-assign can be trivial.
+template <class... T>
+using VariantMoveAssignBase = y_absl::conditional_t<
+ y_absl::disjunction<
+ y_absl::conjunction<y_absl::is_move_assignable<Union<T...>>,
+ std::is_move_constructible<Union<T...>>,
+ std::is_destructible<Union<T...>>>,
+ y_absl::negation<y_absl::conjunction<std::is_move_constructible<T>...,
+ // Note: We're not qualifying this with
+ // y_absl:: because it doesn't compile
+ // under MSVC.
+ is_move_assignable<T>...>>>::value,
+ VariantCopyBase<T...>, VariantMoveAssignBaseNontrivial<T...>>;
+
+// Base that is dependent on whether or not the copy-assign can be trivial.
+template <class... T>
+using VariantCopyAssignBase = y_absl::conditional_t<
+ y_absl::disjunction<
+ y_absl::conjunction<y_absl::is_copy_assignable<Union<T...>>,
+ std::is_copy_constructible<Union<T...>>,
+ std::is_destructible<Union<T...>>>,
+ y_absl::negation<y_absl::conjunction<std::is_copy_constructible<T>...,
+ // Note: We're not qualifying this with
+ // y_absl:: because it doesn't compile
+ // under MSVC.
+ is_copy_assignable<T>...>>>::value,
+ VariantMoveAssignBase<T...>, VariantCopyAssignBaseNontrivial<T...>>;
+
+template <class... T>
+using VariantBase = VariantCopyAssignBase<T...>;
+
+template <class... T>
+class VariantStateBaseDestructorNontrivial : protected VariantStateBase<T...> {
+ private:
+ using Base = VariantStateBase<T...>;
+
+ protected:
+ using Base::Base;
+
+ VariantStateBaseDestructorNontrivial() = default;
+ VariantStateBaseDestructorNontrivial(VariantStateBaseDestructorNontrivial&&) =
+ default;
+ VariantStateBaseDestructorNontrivial(
+ const VariantStateBaseDestructorNontrivial&) = default;
+ VariantStateBaseDestructorNontrivial& operator=(
+ VariantStateBaseDestructorNontrivial&&) = default;
+ VariantStateBaseDestructorNontrivial& operator=(
+ const VariantStateBaseDestructorNontrivial&) = default;
+
+ struct Destroyer {
+ template <std::size_t I>
+ void operator()(SizeT<I> i) const {
+ using Alternative =
+ typename y_absl::variant_alternative<I, variant<T...>>::type;
+ variant_internal::AccessUnion(self->state_, i).~Alternative();
+ }
+
+ void operator()(SizeT<y_absl::variant_npos> /*i*/) const {
+ // This space intentionally left blank
+ }
+
+ VariantStateBaseDestructorNontrivial* self;
+ };
+
+ void destroy() { VisitIndices<sizeof...(T)>::Run(Destroyer{this}, index_); }
+
+ ~VariantStateBaseDestructorNontrivial() { destroy(); }
+
+ protected:
+ using Base::index_;
+ using Base::state_;
+};
+
+template <class... T>
+class VariantMoveBaseNontrivial : protected VariantStateBaseDestructor<T...> {
+ private:
+ using Base = VariantStateBaseDestructor<T...>;
+
+ protected:
+ using Base::Base;
+
+ struct Construct {
+ template <std::size_t I>
+ void operator()(SizeT<I> i) const {
+ using Alternative =
+ typename y_absl::variant_alternative<I, variant<T...>>::type;
+ ::new (static_cast<void*>(&self->state_)) Alternative(
+ variant_internal::AccessUnion(y_absl::move(other->state_), i));
+ }
+
+ void operator()(SizeT<y_absl::variant_npos> /*i*/) const {}
+
+ VariantMoveBaseNontrivial* self;
+ VariantMoveBaseNontrivial* other;
+ };
+
+ VariantMoveBaseNontrivial() = default;
+ VariantMoveBaseNontrivial(VariantMoveBaseNontrivial&& other) noexcept(
+ y_absl::conjunction<std::is_nothrow_move_constructible<T>...>::value)
+ : Base(NoopConstructorTag()) {
+ VisitIndices<sizeof...(T)>::Run(Construct{this, &other}, other.index_);
+ index_ = other.index_;
+ }
+
+ VariantMoveBaseNontrivial(VariantMoveBaseNontrivial const&) = default;
+
+ VariantMoveBaseNontrivial& operator=(VariantMoveBaseNontrivial&&) = default;
+ VariantMoveBaseNontrivial& operator=(VariantMoveBaseNontrivial const&) =
+ default;
+
+ protected:
+ using Base::index_;
+ using Base::state_;
+};
+
+template <class... T>
+class VariantCopyBaseNontrivial : protected VariantMoveBase<T...> {
+ private:
+ using Base = VariantMoveBase<T...>;
+
+ protected:
+ using Base::Base;
+
+ VariantCopyBaseNontrivial() = default;
+ VariantCopyBaseNontrivial(VariantCopyBaseNontrivial&&) = default;
+
+ struct Construct {
+ template <std::size_t I>
+ void operator()(SizeT<I> i) const {
+ using Alternative =
+ typename y_absl::variant_alternative<I, variant<T...>>::type;
+ ::new (static_cast<void*>(&self->state_))
+ Alternative(variant_internal::AccessUnion(other->state_, i));
+ }
+
+ void operator()(SizeT<y_absl::variant_npos> /*i*/) const {}
+
+ VariantCopyBaseNontrivial* self;
+ const VariantCopyBaseNontrivial* other;
+ };
+
+ VariantCopyBaseNontrivial(VariantCopyBaseNontrivial const& other)
+ : Base(NoopConstructorTag()) {
+ VisitIndices<sizeof...(T)>::Run(Construct{this, &other}, other.index_);
+ index_ = other.index_;
+ }
+
+ VariantCopyBaseNontrivial& operator=(VariantCopyBaseNontrivial&&) = default;
+ VariantCopyBaseNontrivial& operator=(VariantCopyBaseNontrivial const&) =
+ default;
+
+ protected:
+ using Base::index_;
+ using Base::state_;
+};
+
+template <class... T>
+class VariantMoveAssignBaseNontrivial : protected VariantCopyBase<T...> {
+ friend struct VariantCoreAccess;
+
+ private:
+ using Base = VariantCopyBase<T...>;
+
+ protected:
+ using Base::Base;
+
+ VariantMoveAssignBaseNontrivial() = default;
+ VariantMoveAssignBaseNontrivial(VariantMoveAssignBaseNontrivial&&) = default;
+ VariantMoveAssignBaseNontrivial(const VariantMoveAssignBaseNontrivial&) =
+ default;
+ VariantMoveAssignBaseNontrivial& operator=(
+ VariantMoveAssignBaseNontrivial const&) = default;
+
+ VariantMoveAssignBaseNontrivial&
+ operator=(VariantMoveAssignBaseNontrivial&& other) noexcept(
+ y_absl::conjunction<std::is_nothrow_move_constructible<T>...,
+ std::is_nothrow_move_assignable<T>...>::value) {
+ VisitIndices<sizeof...(T)>::Run(
+ VariantCoreAccess::MakeMoveAssignVisitor(this, &other), other.index_);
+ return *this;
+ }
+
+ protected:
+ using Base::index_;
+ using Base::state_;
+};
+
+template <class... T>
+class VariantCopyAssignBaseNontrivial : protected VariantMoveAssignBase<T...> {
+ friend struct VariantCoreAccess;
+
+ private:
+ using Base = VariantMoveAssignBase<T...>;
+
+ protected:
+ using Base::Base;
+
+ VariantCopyAssignBaseNontrivial() = default;
+ VariantCopyAssignBaseNontrivial(VariantCopyAssignBaseNontrivial&&) = default;
+ VariantCopyAssignBaseNontrivial(const VariantCopyAssignBaseNontrivial&) =
+ default;
+ VariantCopyAssignBaseNontrivial& operator=(
+ VariantCopyAssignBaseNontrivial&&) = default;
+
+ VariantCopyAssignBaseNontrivial& operator=(
+ const VariantCopyAssignBaseNontrivial& other) {
+ VisitIndices<sizeof...(T)>::Run(
+ VariantCoreAccess::MakeCopyAssignVisitor(this, other), other.index_);
+ return *this;
+ }
+
+ protected:
+ using Base::index_;
+ using Base::state_;
+};
+
+////////////////////////////////////////
+// Visitors for Comparison Operations //
+////////////////////////////////////////
+
+template <class... Types>
+struct EqualsOp {
+ const variant<Types...>* v;
+ const variant<Types...>* w;
+
+ constexpr bool operator()(SizeT<y_absl::variant_npos> /*v_i*/) const {
+ return true;
+ }
+
+ template <std::size_t I>
+ constexpr bool operator()(SizeT<I> /*v_i*/) const {
+ return VariantCoreAccess::Access<I>(*v) == VariantCoreAccess::Access<I>(*w);
+ }
+};
+
+template <class... Types>
+struct NotEqualsOp {
+ const variant<Types...>* v;
+ const variant<Types...>* w;
+
+ constexpr bool operator()(SizeT<y_absl::variant_npos> /*v_i*/) const {
+ return false;
+ }
+
+ template <std::size_t I>
+ constexpr bool operator()(SizeT<I> /*v_i*/) const {
+ return VariantCoreAccess::Access<I>(*v) != VariantCoreAccess::Access<I>(*w);
+ }
+};
+
+template <class... Types>
+struct LessThanOp {
+ const variant<Types...>* v;
+ const variant<Types...>* w;
+
+ constexpr bool operator()(SizeT<y_absl::variant_npos> /*v_i*/) const {
+ return false;
+ }
+
+ template <std::size_t I>
+ constexpr bool operator()(SizeT<I> /*v_i*/) const {
+ return VariantCoreAccess::Access<I>(*v) < VariantCoreAccess::Access<I>(*w);
+ }
+};
+
+template <class... Types>
+struct GreaterThanOp {
+ const variant<Types...>* v;
+ const variant<Types...>* w;
+
+ constexpr bool operator()(SizeT<y_absl::variant_npos> /*v_i*/) const {
+ return false;
+ }
+
+ template <std::size_t I>
+ constexpr bool operator()(SizeT<I> /*v_i*/) const {
+ return VariantCoreAccess::Access<I>(*v) > VariantCoreAccess::Access<I>(*w);
+ }
+};
+
+template <class... Types>
+struct LessThanOrEqualsOp {
+ const variant<Types...>* v;
+ const variant<Types...>* w;
+
+ constexpr bool operator()(SizeT<y_absl::variant_npos> /*v_i*/) const {
+ return true;
+ }
+
+ template <std::size_t I>
+ constexpr bool operator()(SizeT<I> /*v_i*/) const {
+ return VariantCoreAccess::Access<I>(*v) <= VariantCoreAccess::Access<I>(*w);
+ }
+};
+
+template <class... Types>
+struct GreaterThanOrEqualsOp {
+ const variant<Types...>* v;
+ const variant<Types...>* w;
+
+ constexpr bool operator()(SizeT<y_absl::variant_npos> /*v_i*/) const {
+ return true;
+ }
+
+ template <std::size_t I>
+ constexpr bool operator()(SizeT<I> /*v_i*/) const {
+ return VariantCoreAccess::Access<I>(*v) >= VariantCoreAccess::Access<I>(*w);
+ }
+};
+
+// Precondition: v.index() == w.index();
+template <class... Types>
+struct SwapSameIndex {
+ variant<Types...>* v;
+ variant<Types...>* w;
+ template <std::size_t I>
+ void operator()(SizeT<I>) const {
+ type_traits_internal::Swap(VariantCoreAccess::Access<I>(*v),
+ VariantCoreAccess::Access<I>(*w));
+ }
+
+ void operator()(SizeT<variant_npos>) const {}
+};
+
+// TODO(calabrese) do this from a different namespace for proper adl usage
+template <class... Types>
+struct Swap {
+ variant<Types...>* v;
+ variant<Types...>* w;
+
+ void generic_swap() const {
+ variant<Types...> tmp(std::move(*w));
+ VariantCoreAccess::Destroy(*w);
+ VariantCoreAccess::InitFrom(*w, std::move(*v));
+ VariantCoreAccess::Destroy(*v);
+ VariantCoreAccess::InitFrom(*v, std::move(tmp));
+ }
+
+ void operator()(SizeT<y_absl::variant_npos> /*w_i*/) const {
+ if (!v->valueless_by_exception()) {
+ generic_swap();
+ }
+ }
+
+ template <std::size_t Wi>
+ void operator()(SizeT<Wi> /*w_i*/) {
+ if (v->index() == Wi) {
+ VisitIndices<sizeof...(Types)>::Run(SwapSameIndex<Types...>{v, w}, Wi);
+ } else {
+ generic_swap();
+ }
+ }
+};
+
+template <typename Variant, typename = void, typename... Ts>
+struct VariantHashBase {
+ VariantHashBase() = delete;
+ VariantHashBase(const VariantHashBase&) = delete;
+ VariantHashBase(VariantHashBase&&) = delete;
+ VariantHashBase& operator=(const VariantHashBase&) = delete;
+ VariantHashBase& operator=(VariantHashBase&&) = delete;
+};
+
+struct VariantHashVisitor {
+ template <typename T>
+ size_t operator()(const T& t) {
+ return std::hash<T>{}(t);
+ }
+};
+
+template <typename Variant, typename... Ts>
+struct VariantHashBase<Variant,
+ y_absl::enable_if_t<y_absl::conjunction<
+ type_traits_internal::IsHashable<Ts>...>::value>,
+ Ts...> {
+ using argument_type = Variant;
+ using result_type = size_t;
+ size_t operator()(const Variant& var) const {
+ type_traits_internal::AssertHashEnabled<Ts...>();
+ if (var.valueless_by_exception()) {
+ return 239799884;
+ }
+ size_t result = VisitIndices<variant_size<Variant>::value>::Run(
+ PerformVisitation<VariantHashVisitor, const Variant&>{
+ std::forward_as_tuple(var), VariantHashVisitor{}},
+ var.index());
+ // Combine the index and the hash result in order to distinguish
+ // std::variant<int, int> holding the same value as different alternative.
+ return result ^ var.index();
+ }
+};
+
+} // namespace variant_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // !defined(ABSL_USES_STD_VARIANT)
+#endif // ABSL_TYPES_variant_internal_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/ya.make b/contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/ya.make
new file mode 100644
index 00000000000..b5ead458565
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/ya.make
@@ -0,0 +1,14 @@
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
+OWNER(
+ somov
+ g:cpp-contrib
+)
+
+LICENSE(Apache-2.0)
+
+LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
+
+END()
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/types/optional.h b/contrib/restricted/abseil-cpp-tstring/y_absl/types/optional.h
new file mode 100644
index 00000000000..d4ef0bb57b0
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/types/optional.h
@@ -0,0 +1,776 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// optional.h
+// -----------------------------------------------------------------------------
+//
+// This header file defines the `y_absl::optional` type for holding a value which
+// may or may not be present. This type is useful for providing value semantics
+// for operations that may either wish to return or hold "something-or-nothing".
+//
+// Example:
+//
+// // A common way to signal operation failure is to provide an output
+// // parameter and a bool return type:
+// bool AcquireResource(const Input&, Resource * out);
+//
+// // Providing an y_absl::optional return type provides a cleaner API:
+// y_absl::optional<Resource> AcquireResource(const Input&);
+//
+// `y_absl::optional` is a C++11 compatible version of the C++17 `std::optional`
+// abstraction and is designed to be a drop-in replacement for code compliant
+// with C++17.
+#ifndef ABSL_TYPES_OPTIONAL_H_
+#define ABSL_TYPES_OPTIONAL_H_
+
+#include "y_absl/base/config.h" // TODO(calabrese) IWYU removal?
+#include "y_absl/utility/utility.h"
+
+#ifdef ABSL_USES_STD_OPTIONAL
+
+#include <optional> // IWYU pragma: export
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+using std::bad_optional_access;
+using std::optional;
+using std::make_optional;
+using std::nullopt_t;
+using std::nullopt;
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#else // ABSL_USES_STD_OPTIONAL
+
+#include <cassert>
+#include <functional>
+#include <initializer_list>
+#include <type_traits>
+#include <utility>
+
+#include "y_absl/base/attributes.h"
+#include "y_absl/base/internal/inline_variable.h"
+#include "y_absl/meta/type_traits.h"
+#include "y_absl/types/bad_optional_access.h"
+#include "y_absl/types/internal/optional.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+// nullopt_t
+//
+// Class type for `y_absl::nullopt` used to indicate an `y_absl::optional<T>` type
+// that does not contain a value.
+struct nullopt_t {
+ // It must not be default-constructible to avoid ambiguity for opt = {}.
+ explicit constexpr nullopt_t(optional_internal::init_t) noexcept {}
+};
+
+// nullopt
+//
+// A tag constant of type `y_absl::nullopt_t` used to indicate an empty
+// `y_absl::optional` in certain functions, such as construction or assignment.
+ABSL_INTERNAL_INLINE_CONSTEXPR(nullopt_t, nullopt,
+ nullopt_t(optional_internal::init_t()));
+
+// -----------------------------------------------------------------------------
+// y_absl::optional
+// -----------------------------------------------------------------------------
+//
+// A value of type `y_absl::optional<T>` holds either a value of `T` or an
+// "empty" value. When it holds a value of `T`, it stores it as a direct
+// sub-object, so `sizeof(optional<T>)` is approximately
+// `sizeof(T) + sizeof(bool)`.
+//
+// This implementation is based on the specification in the latest draft of the
+// C++17 `std::optional` specification as of May 2017, section 20.6.
+//
+// Differences between `y_absl::optional<T>` and `std::optional<T>` include:
+//
+// * `constexpr` is not used for non-const member functions.
+// (dependency on some differences between C++11 and C++14.)
+// * `y_absl::nullopt` and `y_absl::in_place` are not declared `constexpr`. We
+// need the inline variable support in C++17 for external linkage.
+// * Throws `y_absl::bad_optional_access` instead of
+// `std::bad_optional_access`.
+// * `make_optional()` cannot be declared `constexpr` due to the absence of
+// guaranteed copy elision.
+// * The move constructor's `noexcept` specification is stronger, i.e. if the
+// default allocator is non-throwing (via setting
+// `ABSL_ALLOCATOR_NOTHROW`), it evaluates to `noexcept(true)`, because
+// we assume
+// a) move constructors should only throw due to allocation failure and
+// b) if T's move constructor allocates, it uses the same allocation
+// function as the default allocator.
+//
+template <typename T>
+class optional : private optional_internal::optional_data<T>,
+ private optional_internal::optional_ctor_base<
+ optional_internal::ctor_copy_traits<T>::traits>,
+ private optional_internal::optional_assign_base<
+ optional_internal::assign_copy_traits<T>::traits> {
+ using data_base = optional_internal::optional_data<T>;
+
+ public:
+ typedef T value_type;
+
+ // Constructors
+
+ // Constructs an `optional` holding an empty value, NOT a default constructed
+ // `T`.
+ constexpr optional() noexcept {}
+
+ // Constructs an `optional` initialized with `nullopt` to hold an empty value.
+ constexpr optional(nullopt_t) noexcept {} // NOLINT(runtime/explicit)
+
+ // Copy constructor, standard semantics
+ optional(const optional&) = default;
+
+ // Move constructor, standard semantics
+ optional(optional&&) = default;
+
+ // Constructs a non-empty `optional` direct-initialized value of type `T` from
+ // the arguments `std::forward<Args>(args)...` within the `optional`.
+ // (The `in_place_t` is a tag used to indicate that the contained object
+ // should be constructed in-place.)
+ template <typename InPlaceT, typename... Args,
+ y_absl::enable_if_t<y_absl::conjunction<
+ std::is_same<InPlaceT, in_place_t>,
+ std::is_constructible<T, Args&&...> >::value>* = nullptr>
+ constexpr explicit optional(InPlaceT, Args&&... args)
+ : data_base(in_place_t(), y_absl::forward<Args>(args)...) {}
+
+ // Constructs a non-empty `optional` direct-initialized value of type `T` from
+ // the arguments of an initializer_list and `std::forward<Args>(args)...`.
+ // (The `in_place_t` is a tag used to indicate that the contained object
+ // should be constructed in-place.)
+ template <typename U, typename... Args,
+ typename = typename std::enable_if<std::is_constructible<
+ T, std::initializer_list<U>&, Args&&...>::value>::type>
+ constexpr explicit optional(in_place_t, std::initializer_list<U> il,
+ Args&&... args)
+ : data_base(in_place_t(), il, y_absl::forward<Args>(args)...) {
+ }
+
+ // Value constructor (implicit)
+ template <
+ typename U = T,
+ typename std::enable_if<
+ y_absl::conjunction<y_absl::negation<std::is_same<
+ in_place_t, typename std::decay<U>::type> >,
+ y_absl::negation<std::is_same<
+ optional<T>, typename std::decay<U>::type> >,
+ std::is_convertible<U&&, T>,
+ std::is_constructible<T, U&&> >::value,
+ bool>::type = false>
+ constexpr optional(U&& v) : data_base(in_place_t(), y_absl::forward<U>(v)) {}
+
+ // Value constructor (explicit)
+ template <
+ typename U = T,
+ typename std::enable_if<
+ y_absl::conjunction<y_absl::negation<std::is_same<
+ in_place_t, typename std::decay<U>::type>>,
+ y_absl::negation<std::is_same<
+ optional<T>, typename std::decay<U>::type>>,
+ y_absl::negation<std::is_convertible<U&&, T>>,
+ std::is_constructible<T, U&&>>::value,
+ bool>::type = false>
+ explicit constexpr optional(U&& v)
+ : data_base(in_place_t(), y_absl::forward<U>(v)) {}
+
+ // Converting copy constructor (implicit)
+ template <typename U,
+ typename std::enable_if<
+ y_absl::conjunction<
+ y_absl::negation<std::is_same<T, U> >,
+ std::is_constructible<T, const U&>,
+ y_absl::negation<
+ optional_internal::
+ is_constructible_convertible_from_optional<T, U> >,
+ std::is_convertible<const U&, T> >::value,
+ bool>::type = false>
+ optional(const optional<U>& rhs) {
+ if (rhs) {
+ this->construct(*rhs);
+ }
+ }
+
+ // Converting copy constructor (explicit)
+ template <typename U,
+ typename std::enable_if<
+ y_absl::conjunction<
+ y_absl::negation<std::is_same<T, U>>,
+ std::is_constructible<T, const U&>,
+ y_absl::negation<
+ optional_internal::
+ is_constructible_convertible_from_optional<T, U>>,
+ y_absl::negation<std::is_convertible<const U&, T>>>::value,
+ bool>::type = false>
+ explicit optional(const optional<U>& rhs) {
+ if (rhs) {
+ this->construct(*rhs);
+ }
+ }
+
+ // Converting move constructor (implicit)
+ template <typename U,
+ typename std::enable_if<
+ y_absl::conjunction<
+ y_absl::negation<std::is_same<T, U> >,
+ std::is_constructible<T, U&&>,
+ y_absl::negation<
+ optional_internal::
+ is_constructible_convertible_from_optional<T, U> >,
+ std::is_convertible<U&&, T> >::value,
+ bool>::type = false>
+ optional(optional<U>&& rhs) {
+ if (rhs) {
+ this->construct(std::move(*rhs));
+ }
+ }
+
+ // Converting move constructor (explicit)
+ template <
+ typename U,
+ typename std::enable_if<
+ y_absl::conjunction<
+ y_absl::negation<std::is_same<T, U>>, std::is_constructible<T, U&&>,
+ y_absl::negation<
+ optional_internal::is_constructible_convertible_from_optional<
+ T, U>>,
+ y_absl::negation<std::is_convertible<U&&, T>>>::value,
+ bool>::type = false>
+ explicit optional(optional<U>&& rhs) {
+ if (rhs) {
+ this->construct(std::move(*rhs));
+ }
+ }
+
+ // Destructor. Trivial if `T` is trivially destructible.
+ ~optional() = default;
+
+ // Assignment Operators
+
+ // Assignment from `nullopt`
+ //
+ // Example:
+ //
+ // struct S { int value; };
+ // optional<S> opt = y_absl::nullopt; // Could also use opt = { };
+ optional& operator=(nullopt_t) noexcept {
+ this->destruct();
+ return *this;
+ }
+
+ // Copy assignment operator, standard semantics
+ optional& operator=(const optional& src) = default;
+
+ // Move assignment operator, standard semantics
+ optional& operator=(optional&& src) = default;
+
+ // Value assignment operators
+ template <
+ typename U = T,
+ typename = typename std::enable_if<y_absl::conjunction<
+ y_absl::negation<
+ std::is_same<optional<T>, typename std::decay<U>::type>>,
+ y_absl::negation<
+ y_absl::conjunction<std::is_scalar<T>,
+ std::is_same<T, typename std::decay<U>::type>>>,
+ std::is_constructible<T, U>, std::is_assignable<T&, U>>::value>::type>
+ optional& operator=(U&& v) {
+ this->assign(std::forward<U>(v));
+ return *this;
+ }
+
+ template <
+ typename U,
+ typename = typename std::enable_if<y_absl::conjunction<
+ y_absl::negation<std::is_same<T, U>>,
+ std::is_constructible<T, const U&>, std::is_assignable<T&, const U&>,
+ y_absl::negation<
+ optional_internal::
+ is_constructible_convertible_assignable_from_optional<
+ T, U>>>::value>::type>
+ optional& operator=(const optional<U>& rhs) {
+ if (rhs) {
+ this->assign(*rhs);
+ } else {
+ this->destruct();
+ }
+ return *this;
+ }
+
+ template <typename U,
+ typename = typename std::enable_if<y_absl::conjunction<
+ y_absl::negation<std::is_same<T, U>>, std::is_constructible<T, U>,
+ std::is_assignable<T&, U>,
+ y_absl::negation<
+ optional_internal::
+ is_constructible_convertible_assignable_from_optional<
+ T, U>>>::value>::type>
+ optional& operator=(optional<U>&& rhs) {
+ if (rhs) {
+ this->assign(std::move(*rhs));
+ } else {
+ this->destruct();
+ }
+ return *this;
+ }
+
+ // Modifiers
+
+ // optional::reset()
+ //
+ // Destroys the inner `T` value of an `y_absl::optional` if one is present.
+ ABSL_ATTRIBUTE_REINITIALIZES void reset() noexcept { this->destruct(); }
+
+ // optional::emplace()
+ //
+ // (Re)constructs the underlying `T` in-place with the given forwarded
+ // arguments.
+ //
+ // Example:
+ //
+ // optional<Foo> opt;
+ // opt.emplace(arg1,arg2,arg3); // Constructs Foo(arg1,arg2,arg3)
+ //
+ // If the optional is non-empty, and the `args` refer to subobjects of the
+ // current object, then behaviour is undefined, because the current object
+ // will be destructed before the new object is constructed with `args`.
+ template <typename... Args,
+ typename = typename std::enable_if<
+ std::is_constructible<T, Args&&...>::value>::type>
+ T& emplace(Args&&... args) {
+ this->destruct();
+ this->construct(std::forward<Args>(args)...);
+ return reference();
+ }
+
+ // Emplace reconstruction overload for an initializer list and the given
+ // forwarded arguments.
+ //
+ // Example:
+ //
+ // struct Foo {
+ // Foo(std::initializer_list<int>);
+ // };
+ //
+ // optional<Foo> opt;
+ // opt.emplace({1,2,3}); // Constructs Foo({1,2,3})
+ template <typename U, typename... Args,
+ typename = typename std::enable_if<std::is_constructible<
+ T, std::initializer_list<U>&, Args&&...>::value>::type>
+ T& emplace(std::initializer_list<U> il, Args&&... args) {
+ this->destruct();
+ this->construct(il, std::forward<Args>(args)...);
+ return reference();
+ }
+
+ // Swaps
+
+ // Swap, standard semantics
+ void swap(optional& rhs) noexcept(
+ std::is_nothrow_move_constructible<T>::value&&
+ type_traits_internal::IsNothrowSwappable<T>::value) {
+ if (*this) {
+ if (rhs) {
+ type_traits_internal::Swap(**this, *rhs);
+ } else {
+ rhs.construct(std::move(**this));
+ this->destruct();
+ }
+ } else {
+ if (rhs) {
+ this->construct(std::move(*rhs));
+ rhs.destruct();
+ } else {
+ // No effect (swap(disengaged, disengaged)).
+ }
+ }
+ }
+
+ // Observers
+
+ // optional::operator->()
+ //
+ // Accesses the underlying `T` value's member `m` of an `optional`. If the
+ // `optional` is empty, behavior is undefined.
+ //
+ // If you need myOpt->foo in constexpr, use (*myOpt).foo instead.
+ const T* operator->() const {
+ ABSL_HARDENING_ASSERT(this->engaged_);
+ return std::addressof(this->data_);
+ }
+ T* operator->() {
+ ABSL_HARDENING_ASSERT(this->engaged_);
+ return std::addressof(this->data_);
+ }
+
+ // optional::operator*()
+ //
+ // Accesses the underlying `T` value of an `optional`. If the `optional` is
+ // empty, behavior is undefined.
+ constexpr const T& operator*() const& {
+ return ABSL_HARDENING_ASSERT(this->engaged_), reference();
+ }
+ T& operator*() & {
+ ABSL_HARDENING_ASSERT(this->engaged_);
+ return reference();
+ }
+ constexpr const T&& operator*() const && {
+ return ABSL_HARDENING_ASSERT(this->engaged_), y_absl::move(reference());
+ }
+ T&& operator*() && {
+ ABSL_HARDENING_ASSERT(this->engaged_);
+ return std::move(reference());
+ }
+
+ // optional::operator bool()
+ //
+ // Returns false if and only if the `optional` is empty.
+ //
+ // if (opt) {
+ // // do something with *opt or opt->;
+ // } else {
+ // // opt is empty.
+ // }
+ //
+ constexpr explicit operator bool() const noexcept { return this->engaged_; }
+
+ // optional::has_value()
+ //
+ // Determines whether the `optional` contains a value. Returns `false` if and
+ // only if `*this` is empty.
+ constexpr bool has_value() const noexcept { return this->engaged_; }
+
+// Suppress bogus warning on MSVC: MSVC complains call to reference() after
+// throw_bad_optional_access() is unreachable.
+#ifdef _MSC_VER
+#pragma warning(push)
+#pragma warning(disable : 4702)
+#endif // _MSC_VER
+ // optional::value()
+ //
+ // Returns a reference to an `optional`s underlying value. The constness
+ // and lvalue/rvalue-ness of the `optional` is preserved to the view of
+ // the `T` sub-object. Throws `y_absl::bad_optional_access` when the `optional`
+ // is empty.
+ constexpr const T& value() const & {
+ return static_cast<bool>(*this)
+ ? reference()
+ : (optional_internal::throw_bad_optional_access(), reference());
+ }
+ T& value() & {
+ return static_cast<bool>(*this)
+ ? reference()
+ : (optional_internal::throw_bad_optional_access(), reference());
+ }
+ T&& value() && { // NOLINT(build/c++11)
+ return std::move(
+ static_cast<bool>(*this)
+ ? reference()
+ : (optional_internal::throw_bad_optional_access(), reference()));
+ }
+ constexpr const T&& value() const && { // NOLINT(build/c++11)
+ return y_absl::move(
+ static_cast<bool>(*this)
+ ? reference()
+ : (optional_internal::throw_bad_optional_access(), reference()));
+ }
+#ifdef _MSC_VER
+#pragma warning(pop)
+#endif // _MSC_VER
+
+ // optional::value_or()
+ //
+ // Returns either the value of `T` or a passed default `v` if the `optional`
+ // is empty.
+ template <typename U>
+ constexpr T value_or(U&& v) const& {
+ static_assert(std::is_copy_constructible<value_type>::value,
+ "optional<T>::value_or: T must be copy constructible");
+ static_assert(std::is_convertible<U&&, value_type>::value,
+ "optional<T>::value_or: U must be convertible to T");
+ return static_cast<bool>(*this)
+ ? **this
+ : static_cast<T>(y_absl::forward<U>(v));
+ }
+ template <typename U>
+ T value_or(U&& v) && { // NOLINT(build/c++11)
+ static_assert(std::is_move_constructible<value_type>::value,
+ "optional<T>::value_or: T must be move constructible");
+ static_assert(std::is_convertible<U&&, value_type>::value,
+ "optional<T>::value_or: U must be convertible to T");
+ return static_cast<bool>(*this) ? std::move(**this)
+ : static_cast<T>(std::forward<U>(v));
+ }
+
+ private:
+ // Private accessors for internal storage viewed as reference to T.
+ constexpr const T& reference() const { return this->data_; }
+ T& reference() { return this->data_; }
+
+ // T constraint checks. You can't have an optional of nullopt_t, in_place_t
+ // or a reference.
+ static_assert(
+ !std::is_same<nullopt_t, typename std::remove_cv<T>::type>::value,
+ "optional<nullopt_t> is not allowed.");
+ static_assert(
+ !std::is_same<in_place_t, typename std::remove_cv<T>::type>::value,
+ "optional<in_place_t> is not allowed.");
+ static_assert(!std::is_reference<T>::value,
+ "optional<reference> is not allowed.");
+};
+
+// Non-member functions
+
+// swap()
+//
+// Performs a swap between two `y_absl::optional` objects, using standard
+// semantics.
+template <typename T, typename std::enable_if<
+ std::is_move_constructible<T>::value &&
+ type_traits_internal::IsSwappable<T>::value,
+ bool>::type = false>
+void swap(optional<T>& a, optional<T>& b) noexcept(noexcept(a.swap(b))) {
+ a.swap(b);
+}
+
+// make_optional()
+//
+// Creates a non-empty `optional<T>` where the type of `T` is deduced. An
+// `y_absl::optional` can also be explicitly instantiated with
+// `make_optional<T>(v)`.
+//
+// Note: `make_optional()` constructions may be declared `constexpr` for
+// trivially copyable types `T`. Non-trivial types require copy elision
+// support in C++17 for `make_optional` to support `constexpr` on such
+// non-trivial types.
+//
+// Example:
+//
+// constexpr y_absl::optional<int> opt = y_absl::make_optional(1);
+// static_assert(opt.value() == 1, "");
+template <typename T>
+constexpr optional<typename std::decay<T>::type> make_optional(T&& v) {
+ return optional<typename std::decay<T>::type>(y_absl::forward<T>(v));
+}
+
+template <typename T, typename... Args>
+constexpr optional<T> make_optional(Args&&... args) {
+ return optional<T>(in_place_t(), y_absl::forward<Args>(args)...);
+}
+
+template <typename T, typename U, typename... Args>
+constexpr optional<T> make_optional(std::initializer_list<U> il,
+ Args&&... args) {
+ return optional<T>(in_place_t(), il,
+ y_absl::forward<Args>(args)...);
+}
+
+// Relational operators [optional.relops]
+
+// Empty optionals are considered equal to each other and less than non-empty
+// optionals. Supports relations between optional<T> and optional<U>, between
+// optional<T> and U, and between optional<T> and nullopt.
+//
+// Note: We're careful to support T having non-bool relationals.
+
+// Requires: The expression, e.g. "*x == *y" shall be well-formed and its result
+// shall be convertible to bool.
+// The C++17 (N4606) "Returns:" statements are translated into
+// code in an obvious way here, and the original text retained as function docs.
+// Returns: If bool(x) != bool(y), false; otherwise if bool(x) == false, true;
+// otherwise *x == *y.
+template <typename T, typename U>
+constexpr auto operator==(const optional<T>& x, const optional<U>& y)
+ -> decltype(optional_internal::convertible_to_bool(*x == *y)) {
+ return static_cast<bool>(x) != static_cast<bool>(y)
+ ? false
+ : static_cast<bool>(x) == false ? true
+ : static_cast<bool>(*x == *y);
+}
+
+// Returns: If bool(x) != bool(y), true; otherwise, if bool(x) == false, false;
+// otherwise *x != *y.
+template <typename T, typename U>
+constexpr auto operator!=(const optional<T>& x, const optional<U>& y)
+ -> decltype(optional_internal::convertible_to_bool(*x != *y)) {
+ return static_cast<bool>(x) != static_cast<bool>(y)
+ ? true
+ : static_cast<bool>(x) == false ? false
+ : static_cast<bool>(*x != *y);
+}
+// Returns: If !y, false; otherwise, if !x, true; otherwise *x < *y.
+template <typename T, typename U>
+constexpr auto operator<(const optional<T>& x, const optional<U>& y)
+ -> decltype(optional_internal::convertible_to_bool(*x < *y)) {
+ return !y ? false : !x ? true : static_cast<bool>(*x < *y);
+}
+// Returns: If !x, false; otherwise, if !y, true; otherwise *x > *y.
+template <typename T, typename U>
+constexpr auto operator>(const optional<T>& x, const optional<U>& y)
+ -> decltype(optional_internal::convertible_to_bool(*x > *y)) {
+ return !x ? false : !y ? true : static_cast<bool>(*x > *y);
+}
+// Returns: If !x, true; otherwise, if !y, false; otherwise *x <= *y.
+template <typename T, typename U>
+constexpr auto operator<=(const optional<T>& x, const optional<U>& y)
+ -> decltype(optional_internal::convertible_to_bool(*x <= *y)) {
+ return !x ? true : !y ? false : static_cast<bool>(*x <= *y);
+}
+// Returns: If !y, true; otherwise, if !x, false; otherwise *x >= *y.
+template <typename T, typename U>
+constexpr auto operator>=(const optional<T>& x, const optional<U>& y)
+ -> decltype(optional_internal::convertible_to_bool(*x >= *y)) {
+ return !y ? true : !x ? false : static_cast<bool>(*x >= *y);
+}
+
+// Comparison with nullopt [optional.nullops]
+// The C++17 (N4606) "Returns:" statements are used directly here.
+template <typename T>
+constexpr bool operator==(const optional<T>& x, nullopt_t) noexcept {
+ return !x;
+}
+template <typename T>
+constexpr bool operator==(nullopt_t, const optional<T>& x) noexcept {
+ return !x;
+}
+template <typename T>
+constexpr bool operator!=(const optional<T>& x, nullopt_t) noexcept {
+ return static_cast<bool>(x);
+}
+template <typename T>
+constexpr bool operator!=(nullopt_t, const optional<T>& x) noexcept {
+ return static_cast<bool>(x);
+}
+template <typename T>
+constexpr bool operator<(const optional<T>&, nullopt_t) noexcept {
+ return false;
+}
+template <typename T>
+constexpr bool operator<(nullopt_t, const optional<T>& x) noexcept {
+ return static_cast<bool>(x);
+}
+template <typename T>
+constexpr bool operator<=(const optional<T>& x, nullopt_t) noexcept {
+ return !x;
+}
+template <typename T>
+constexpr bool operator<=(nullopt_t, const optional<T>&) noexcept {
+ return true;
+}
+template <typename T>
+constexpr bool operator>(const optional<T>& x, nullopt_t) noexcept {
+ return static_cast<bool>(x);
+}
+template <typename T>
+constexpr bool operator>(nullopt_t, const optional<T>&) noexcept {
+ return false;
+}
+template <typename T>
+constexpr bool operator>=(const optional<T>&, nullopt_t) noexcept {
+ return true;
+}
+template <typename T>
+constexpr bool operator>=(nullopt_t, const optional<T>& x) noexcept {
+ return !x;
+}
+
+// Comparison with T [optional.comp_with_t]
+
+// Requires: The expression, e.g. "*x == v" shall be well-formed and its result
+// shall be convertible to bool.
+// The C++17 (N4606) "Equivalent to:" statements are used directly here.
+template <typename T, typename U>
+constexpr auto operator==(const optional<T>& x, const U& v)
+ -> decltype(optional_internal::convertible_to_bool(*x == v)) {
+ return static_cast<bool>(x) ? static_cast<bool>(*x == v) : false;
+}
+template <typename T, typename U>
+constexpr auto operator==(const U& v, const optional<T>& x)
+ -> decltype(optional_internal::convertible_to_bool(v == *x)) {
+ return static_cast<bool>(x) ? static_cast<bool>(v == *x) : false;
+}
+template <typename T, typename U>
+constexpr auto operator!=(const optional<T>& x, const U& v)
+ -> decltype(optional_internal::convertible_to_bool(*x != v)) {
+ return static_cast<bool>(x) ? static_cast<bool>(*x != v) : true;
+}
+template <typename T, typename U>
+constexpr auto operator!=(const U& v, const optional<T>& x)
+ -> decltype(optional_internal::convertible_to_bool(v != *x)) {
+ return static_cast<bool>(x) ? static_cast<bool>(v != *x) : true;
+}
+template <typename T, typename U>
+constexpr auto operator<(const optional<T>& x, const U& v)
+ -> decltype(optional_internal::convertible_to_bool(*x < v)) {
+ return static_cast<bool>(x) ? static_cast<bool>(*x < v) : true;
+}
+template <typename T, typename U>
+constexpr auto operator<(const U& v, const optional<T>& x)
+ -> decltype(optional_internal::convertible_to_bool(v < *x)) {
+ return static_cast<bool>(x) ? static_cast<bool>(v < *x) : false;
+}
+template <typename T, typename U>
+constexpr auto operator<=(const optional<T>& x, const U& v)
+ -> decltype(optional_internal::convertible_to_bool(*x <= v)) {
+ return static_cast<bool>(x) ? static_cast<bool>(*x <= v) : true;
+}
+template <typename T, typename U>
+constexpr auto operator<=(const U& v, const optional<T>& x)
+ -> decltype(optional_internal::convertible_to_bool(v <= *x)) {
+ return static_cast<bool>(x) ? static_cast<bool>(v <= *x) : false;
+}
+template <typename T, typename U>
+constexpr auto operator>(const optional<T>& x, const U& v)
+ -> decltype(optional_internal::convertible_to_bool(*x > v)) {
+ return static_cast<bool>(x) ? static_cast<bool>(*x > v) : false;
+}
+template <typename T, typename U>
+constexpr auto operator>(const U& v, const optional<T>& x)
+ -> decltype(optional_internal::convertible_to_bool(v > *x)) {
+ return static_cast<bool>(x) ? static_cast<bool>(v > *x) : true;
+}
+template <typename T, typename U>
+constexpr auto operator>=(const optional<T>& x, const U& v)
+ -> decltype(optional_internal::convertible_to_bool(*x >= v)) {
+ return static_cast<bool>(x) ? static_cast<bool>(*x >= v) : false;
+}
+template <typename T, typename U>
+constexpr auto operator>=(const U& v, const optional<T>& x)
+ -> decltype(optional_internal::convertible_to_bool(v >= *x)) {
+ return static_cast<bool>(x) ? static_cast<bool>(v >= *x) : true;
+}
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+namespace std {
+
+// std::hash specialization for y_absl::optional.
+template <typename T>
+struct hash<y_absl::optional<T> >
+ : y_absl::optional_internal::optional_hash_base<T> {};
+
+} // namespace std
+
+#undef ABSL_MSVC_CONSTEXPR_BUG_IN_UNION_LIKE_CLASS
+
+#endif // ABSL_USES_STD_OPTIONAL
+
+#endif // ABSL_TYPES_OPTIONAL_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/types/span.h b/contrib/restricted/abseil-cpp-tstring/y_absl/types/span.h
new file mode 100644
index 00000000000..1490b2f1b24
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/types/span.h
@@ -0,0 +1,726 @@
+//
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// span.h
+// -----------------------------------------------------------------------------
+//
+// This header file defines a `Span<T>` type for holding a reference to existing
+// array data. The `Span` object, much like the `y_absl::string_view` object,
+// does not own such data itself, and the data being referenced by the span must
+// outlive the span itself. Unlike `view` type references, a span can hold a
+// reference to mutable data (and can mutate it for underlying types of
+// non-const T.) A span provides a lightweight way to pass a reference to such
+// data.
+//
+// Additionally, this header file defines `MakeSpan()` and `MakeConstSpan()`
+// factory functions, for clearly creating spans of type `Span<T>` or read-only
+// `Span<const T>` when such types may be difficult to identify due to issues
+// with implicit conversion.
+//
+// The C++20 draft standard includes a `std::span` type. As of June 2020, the
+// differences between `y_absl::Span` and `std::span` are:
+// * `y_absl::Span` has `operator==` (which is likely a design bug,
+// per https://abseil.io/blog/20180531-regular-types)
+// * `y_absl::Span` has the factory functions `MakeSpan()` and
+// `MakeConstSpan()`
+// * bounds-checked access to `y_absl::Span` is accomplished with `at()`
+// * `y_absl::Span` has compiler-provided move and copy constructors and
+// assignment. This is due to them being specified as `constexpr`, but that
+// implies const in C++11.
+// * A read-only `y_absl::Span<const T>` can be implicitly constructed from an
+// initializer list.
+// * `y_absl::Span` has no `bytes()`, `size_bytes()`, `as_bytes()`, or
+// `as_mutable_bytes()` methods
+// * `y_absl::Span` has no static extent template parameter, nor constructors
+// which exist only because of the static extent parameter.
+// * `y_absl::Span` has an explicit mutable-reference constructor
+//
+// For more information, see the class comments below.
+#ifndef ABSL_TYPES_SPAN_H_
+#define ABSL_TYPES_SPAN_H_
+
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <initializer_list>
+#include <iterator>
+#include <type_traits>
+#include <utility>
+
+#include "y_absl/base/internal/throw_delegate.h"
+#include "y_absl/base/macros.h"
+#include "y_absl/base/optimization.h"
+#include "y_absl/base/port.h" // TODO(strel): remove this include
+#include "y_absl/meta/type_traits.h"
+#include "y_absl/types/internal/span.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+//------------------------------------------------------------------------------
+// Span
+//------------------------------------------------------------------------------
+//
+// A `Span` is an "array reference" type for holding a reference of contiguous
+// array data; the `Span` object does not and cannot own such data itself. A
+// span provides an easy way to provide overloads for anything operating on
+// contiguous sequences without needing to manage pointers and array lengths
+// manually.
+
+// A span is conceptually a pointer (ptr) and a length (size) into an already
+// existing array of contiguous memory; the array it represents references the
+// elements "ptr[0] .. ptr[size-1]". Passing a properly-constructed `Span`
+// instead of raw pointers avoids many issues related to index out of bounds
+// errors.
+//
+// Spans may also be constructed from containers holding contiguous sequences.
+// Such containers must supply `data()` and `size() const` methods (e.g
+// `std::vector<T>`, `y_absl::InlinedVector<T, N>`). All implicit conversions to
+// `y_absl::Span` from such containers will create spans of type `const T`;
+// spans which can mutate their values (of type `T`) must use explicit
+// constructors.
+//
+// A `Span<T>` is somewhat analogous to an `y_absl::string_view`, but for an array
+// of elements of type `T`, and unlike an `y_absl::string_view`, a span can hold a
+// reference to mutable data. A user of `Span` must ensure that the data being
+// pointed to outlives the `Span` itself.
+//
+// You can construct a `Span<T>` in several ways:
+//
+// * Explicitly from a reference to a container type
+// * Explicitly from a pointer and size
+// * Implicitly from a container type (but only for spans of type `const T`)
+// * Using the `MakeSpan()` or `MakeConstSpan()` factory functions.
+//
+// Examples:
+//
+// // Construct a Span explicitly from a container:
+// std::vector<int> v = {1, 2, 3, 4, 5};
+// auto span = y_absl::Span<const int>(v);
+//
+// // Construct a Span explicitly from a C-style array:
+// int a[5] = {1, 2, 3, 4, 5};
+// auto span = y_absl::Span<const int>(a);
+//
+// // Construct a Span implicitly from a container
+// void MyRoutine(y_absl::Span<const int> a) {
+// ...
+// }
+// std::vector v = {1,2,3,4,5};
+// MyRoutine(v) // convert to Span<const T>
+//
+// Note that `Span` objects, in addition to requiring that the memory they
+// point to remains alive, must also ensure that such memory does not get
+// reallocated. Therefore, to avoid undefined behavior, containers with
+// associated spans should not invoke operations that may reallocate memory
+// (such as resizing) or invalidate iterators into the container.
+//
+// One common use for a `Span` is when passing arguments to a routine that can
+// accept a variety of array types (e.g. a `std::vector`, `y_absl::InlinedVector`,
+// a C-style array, etc.). Instead of creating overloads for each case, you
+// can simply specify a `Span` as the argument to such a routine.
+//
+// Example:
+//
+// void MyRoutine(y_absl::Span<const int> a) {
+// ...
+// }
+//
+// std::vector v = {1,2,3,4,5};
+// MyRoutine(v);
+//
+// y_absl::InlinedVector<int, 4> my_inline_vector;
+// MyRoutine(my_inline_vector);
+//
+// // Explicit constructor from pointer,size
+// int* my_array = new int[10];
+// MyRoutine(y_absl::Span<const int>(my_array, 10));
+template <typename T>
+class Span {
+ private:
+ // Used to determine whether a Span can be constructed from a container of
+ // type C.
+ template <typename C>
+ using EnableIfConvertibleFrom =
+ typename std::enable_if<span_internal::HasData<T, C>::value &&
+ span_internal::HasSize<C>::value>::type;
+
+ // Used to SFINAE-enable a function when the slice elements are const.
+ template <typename U>
+ using EnableIfConstView =
+ typename std::enable_if<std::is_const<T>::value, U>::type;
+
+ // Used to SFINAE-enable a function when the slice elements are mutable.
+ template <typename U>
+ using EnableIfMutableView =
+ typename std::enable_if<!std::is_const<T>::value, U>::type;
+
+ public:
+ using element_type = T;
+ using value_type = y_absl::remove_cv_t<T>;
+ using pointer = T*;
+ using const_pointer = const T*;
+ using reference = T&;
+ using const_reference = const T&;
+ using iterator = pointer;
+ using const_iterator = const_pointer;
+ using reverse_iterator = std::reverse_iterator<iterator>;
+ using const_reverse_iterator = std::reverse_iterator<const_iterator>;
+ using size_type = size_t;
+ using difference_type = ptrdiff_t;
+
+ static const size_type npos = ~(size_type(0));
+
+ constexpr Span() noexcept : Span(nullptr, 0) {}
+ constexpr Span(pointer array, size_type length) noexcept
+ : ptr_(array), len_(length) {}
+
+ // Implicit conversion constructors
+ template <size_t N>
+ constexpr Span(T (&a)[N]) noexcept // NOLINT(runtime/explicit)
+ : Span(a, N) {}
+
+ // Explicit reference constructor for a mutable `Span<T>` type. Can be
+ // replaced with MakeSpan() to infer the type parameter.
+ template <typename V, typename = EnableIfConvertibleFrom<V>,
+ typename = EnableIfMutableView<V>>
+ explicit Span(V& v) noexcept // NOLINT(runtime/references)
+ : Span(span_internal::GetData(v), v.size()) {}
+
+ // Implicit reference constructor for a read-only `Span<const T>` type
+ template <typename V, typename = EnableIfConvertibleFrom<V>,
+ typename = EnableIfConstView<V>>
+ constexpr Span(const V& v) noexcept // NOLINT(runtime/explicit)
+ : Span(span_internal::GetData(v), v.size()) {}
+
+ // Implicit constructor from an initializer list, making it possible to pass a
+ // brace-enclosed initializer list to a function expecting a `Span`. Such
+ // spans constructed from an initializer list must be of type `Span<const T>`.
+ //
+ // void Process(y_absl::Span<const int> x);
+ // Process({1, 2, 3});
+ //
+ // Note that as always the array referenced by the span must outlive the span.
+ // Since an initializer list constructor acts as if it is fed a temporary
+ // array (cf. C++ standard [dcl.init.list]/5), it's safe to use this
+ // constructor only when the `std::initializer_list` itself outlives the span.
+ // In order to meet this requirement it's sufficient to ensure that neither
+ // the span nor a copy of it is used outside of the expression in which it's
+ // created:
+ //
+ // // Assume that this function uses the array directly, not retaining any
+ // // copy of the span or pointer to any of its elements.
+ // void Process(y_absl::Span<const int> ints);
+ //
+ // // Okay: the std::initializer_list<int> will reference a temporary array
+ // // that isn't destroyed until after the call to Process returns.
+ // Process({ 17, 19 });
+ //
+ // // Not okay: the storage used by the std::initializer_list<int> is not
+ // // allowed to be referenced after the first line.
+ // y_absl::Span<const int> ints = { 17, 19 };
+ // Process(ints);
+ //
+ // // Not okay for the same reason as above: even when the elements of the
+ // // initializer list expression are not temporaries the underlying array
+ // // is, so the initializer list must still outlive the span.
+ // const int foo = 17;
+ // y_absl::Span<const int> ints = { foo };
+ // Process(ints);
+ //
+ template <typename LazyT = T,
+ typename = EnableIfConstView<LazyT>>
+ Span(std::initializer_list<value_type> v
+ ABSL_ATTRIBUTE_LIFETIME_BOUND) noexcept // NOLINT(runtime/explicit)
+ : Span(v.begin(), v.size()) {}
+
+ // Accessors
+
+ // Span::data()
+ //
+ // Returns a pointer to the span's underlying array of data (which is held
+ // outside the span).
+ constexpr pointer data() const noexcept { return ptr_; }
+
+ // Span::size()
+ //
+ // Returns the size of this span.
+ constexpr size_type size() const noexcept { return len_; }
+
+ // Span::length()
+ //
+ // Returns the length (size) of this span.
+ constexpr size_type length() const noexcept { return size(); }
+
+ // Span::empty()
+ //
+ // Returns a boolean indicating whether or not this span is considered empty.
+ constexpr bool empty() const noexcept { return size() == 0; }
+
+ // Span::operator[]
+ //
+ // Returns a reference to the i'th element of this span.
+ constexpr reference operator[](size_type i) const noexcept {
+ // MSVC 2015 accepts this as constexpr, but not ptr_[i]
+ return ABSL_HARDENING_ASSERT(i < size()), *(data() + i);
+ }
+
+ // Span::at()
+ //
+ // Returns a reference to the i'th element of this span.
+ constexpr reference at(size_type i) const {
+ return ABSL_PREDICT_TRUE(i < size()) //
+ ? *(data() + i)
+ : (base_internal::ThrowStdOutOfRange(
+ "Span::at failed bounds check"),
+ *(data() + i));
+ }
+
+ // Span::front()
+ //
+ // Returns a reference to the first element of this span. The span must not
+ // be empty.
+ constexpr reference front() const noexcept {
+ return ABSL_HARDENING_ASSERT(size() > 0), *data();
+ }
+
+ // Span::back()
+ //
+ // Returns a reference to the last element of this span. The span must not
+ // be empty.
+ constexpr reference back() const noexcept {
+ return ABSL_HARDENING_ASSERT(size() > 0), *(data() + size() - 1);
+ }
+
+ // Span::begin()
+ //
+ // Returns an iterator pointing to the first element of this span, or `end()`
+ // if the span is empty.
+ constexpr iterator begin() const noexcept { return data(); }
+
+ // Span::cbegin()
+ //
+ // Returns a const iterator pointing to the first element of this span, or
+ // `end()` if the span is empty.
+ constexpr const_iterator cbegin() const noexcept { return begin(); }
+
+ // Span::end()
+ //
+ // Returns an iterator pointing just beyond the last element at the
+ // end of this span. This iterator acts as a placeholder; attempting to
+ // access it results in undefined behavior.
+ constexpr iterator end() const noexcept { return data() + size(); }
+
+ // Span::cend()
+ //
+ // Returns a const iterator pointing just beyond the last element at the
+ // end of this span. This iterator acts as a placeholder; attempting to
+ // access it results in undefined behavior.
+ constexpr const_iterator cend() const noexcept { return end(); }
+
+ // Span::rbegin()
+ //
+ // Returns a reverse iterator pointing to the last element at the end of this
+ // span, or `rend()` if the span is empty.
+ constexpr reverse_iterator rbegin() const noexcept {
+ return reverse_iterator(end());
+ }
+
+ // Span::crbegin()
+ //
+ // Returns a const reverse iterator pointing to the last element at the end of
+ // this span, or `crend()` if the span is empty.
+ constexpr const_reverse_iterator crbegin() const noexcept { return rbegin(); }
+
+ // Span::rend()
+ //
+ // Returns a reverse iterator pointing just before the first element
+ // at the beginning of this span. This pointer acts as a placeholder;
+ // attempting to access its element results in undefined behavior.
+ constexpr reverse_iterator rend() const noexcept {
+ return reverse_iterator(begin());
+ }
+
+ // Span::crend()
+ //
+ // Returns a reverse const iterator pointing just before the first element
+ // at the beginning of this span. This pointer acts as a placeholder;
+ // attempting to access its element results in undefined behavior.
+ constexpr const_reverse_iterator crend() const noexcept { return rend(); }
+
+ // Span mutations
+
+ // Span::remove_prefix()
+ //
+ // Removes the first `n` elements from the span.
+ void remove_prefix(size_type n) noexcept {
+ ABSL_HARDENING_ASSERT(size() >= n);
+ ptr_ += n;
+ len_ -= n;
+ }
+
+ // Span::remove_suffix()
+ //
+ // Removes the last `n` elements from the span.
+ void remove_suffix(size_type n) noexcept {
+ ABSL_HARDENING_ASSERT(size() >= n);
+ len_ -= n;
+ }
+
+ // Span::subspan()
+ //
+ // Returns a `Span` starting at element `pos` and of length `len`. Both `pos`
+ // and `len` are of type `size_type` and thus non-negative. Parameter `pos`
+ // must be <= size(). Any `len` value that points past the end of the span
+ // will be trimmed to at most size() - `pos`. A default `len` value of `npos`
+ // ensures the returned subspan continues until the end of the span.
+ //
+ // Examples:
+ //
+ // std::vector<int> vec = {10, 11, 12, 13};
+ // y_absl::MakeSpan(vec).subspan(1, 2); // {11, 12}
+ // y_absl::MakeSpan(vec).subspan(2, 8); // {12, 13}
+ // y_absl::MakeSpan(vec).subspan(1); // {11, 12, 13}
+ // y_absl::MakeSpan(vec).subspan(4); // {}
+ // y_absl::MakeSpan(vec).subspan(5); // throws std::out_of_range
+ constexpr Span subspan(size_type pos = 0, size_type len = npos) const {
+ return (pos <= size())
+ ? Span(data() + pos, span_internal::Min(size() - pos, len))
+ : (base_internal::ThrowStdOutOfRange("pos > size()"), Span());
+ }
+
+ // Span::first()
+ //
+ // Returns a `Span` containing first `len` elements. Parameter `len` is of
+ // type `size_type` and thus non-negative. `len` value must be <= size().
+ //
+ // Examples:
+ //
+ // std::vector<int> vec = {10, 11, 12, 13};
+ // y_absl::MakeSpan(vec).first(1); // {10}
+ // y_absl::MakeSpan(vec).first(3); // {10, 11, 12}
+ // y_absl::MakeSpan(vec).first(5); // throws std::out_of_range
+ constexpr Span first(size_type len) const {
+ return (len <= size())
+ ? Span(data(), len)
+ : (base_internal::ThrowStdOutOfRange("len > size()"), Span());
+ }
+
+ // Span::last()
+ //
+ // Returns a `Span` containing last `len` elements. Parameter `len` is of
+ // type `size_type` and thus non-negative. `len` value must be <= size().
+ //
+ // Examples:
+ //
+ // std::vector<int> vec = {10, 11, 12, 13};
+ // y_absl::MakeSpan(vec).last(1); // {13}
+ // y_absl::MakeSpan(vec).last(3); // {11, 12, 13}
+ // y_absl::MakeSpan(vec).last(5); // throws std::out_of_range
+ constexpr Span last(size_type len) const {
+ return (len <= size())
+ ? Span(size() - len + data(), len)
+ : (base_internal::ThrowStdOutOfRange("len > size()"), Span());
+ }
+
+ // Support for y_absl::Hash.
+ template <typename H>
+ friend H AbslHashValue(H h, Span v) {
+ return H::combine(H::combine_contiguous(std::move(h), v.data(), v.size()),
+ v.size());
+ }
+
+ private:
+ pointer ptr_;
+ size_type len_;
+};
+
+template <typename T>
+const typename Span<T>::size_type Span<T>::npos;
+
+// Span relationals
+
+// Equality is compared element-by-element, while ordering is lexicographical.
+// We provide three overloads for each operator to cover any combination on the
+// left or right hand side of mutable Span<T>, read-only Span<const T>, and
+// convertible-to-read-only Span<T>.
+// TODO(zhangxy): Due to MSVC overload resolution bug with partial ordering
+// template functions, 5 overloads per operator is needed as a workaround. We
+// should update them to 3 overloads per operator using non-deduced context like
+// string_view, i.e.
+// - (Span<T>, Span<T>)
+// - (Span<T>, non_deduced<Span<const T>>)
+// - (non_deduced<Span<const T>>, Span<T>)
+
+// operator==
+template <typename T>
+bool operator==(Span<T> a, Span<T> b) {
+ return span_internal::EqualImpl<Span, const T>(a, b);
+}
+template <typename T>
+bool operator==(Span<const T> a, Span<T> b) {
+ return span_internal::EqualImpl<Span, const T>(a, b);
+}
+template <typename T>
+bool operator==(Span<T> a, Span<const T> b) {
+ return span_internal::EqualImpl<Span, const T>(a, b);
+}
+template <
+ typename T, typename U,
+ typename = span_internal::EnableIfConvertibleTo<U, y_absl::Span<const T>>>
+bool operator==(const U& a, Span<T> b) {
+ return span_internal::EqualImpl<Span, const T>(a, b);
+}
+template <
+ typename T, typename U,
+ typename = span_internal::EnableIfConvertibleTo<U, y_absl::Span<const T>>>
+bool operator==(Span<T> a, const U& b) {
+ return span_internal::EqualImpl<Span, const T>(a, b);
+}
+
+// operator!=
+template <typename T>
+bool operator!=(Span<T> a, Span<T> b) {
+ return !(a == b);
+}
+template <typename T>
+bool operator!=(Span<const T> a, Span<T> b) {
+ return !(a == b);
+}
+template <typename T>
+bool operator!=(Span<T> a, Span<const T> b) {
+ return !(a == b);
+}
+template <
+ typename T, typename U,
+ typename = span_internal::EnableIfConvertibleTo<U, y_absl::Span<const T>>>
+bool operator!=(const U& a, Span<T> b) {
+ return !(a == b);
+}
+template <
+ typename T, typename U,
+ typename = span_internal::EnableIfConvertibleTo<U, y_absl::Span<const T>>>
+bool operator!=(Span<T> a, const U& b) {
+ return !(a == b);
+}
+
+// operator<
+template <typename T>
+bool operator<(Span<T> a, Span<T> b) {
+ return span_internal::LessThanImpl<Span, const T>(a, b);
+}
+template <typename T>
+bool operator<(Span<const T> a, Span<T> b) {
+ return span_internal::LessThanImpl<Span, const T>(a, b);
+}
+template <typename T>
+bool operator<(Span<T> a, Span<const T> b) {
+ return span_internal::LessThanImpl<Span, const T>(a, b);
+}
+template <
+ typename T, typename U,
+ typename = span_internal::EnableIfConvertibleTo<U, y_absl::Span<const T>>>
+bool operator<(const U& a, Span<T> b) {
+ return span_internal::LessThanImpl<Span, const T>(a, b);
+}
+template <
+ typename T, typename U,
+ typename = span_internal::EnableIfConvertibleTo<U, y_absl::Span<const T>>>
+bool operator<(Span<T> a, const U& b) {
+ return span_internal::LessThanImpl<Span, const T>(a, b);
+}
+
+// operator>
+template <typename T>
+bool operator>(Span<T> a, Span<T> b) {
+ return b < a;
+}
+template <typename T>
+bool operator>(Span<const T> a, Span<T> b) {
+ return b < a;
+}
+template <typename T>
+bool operator>(Span<T> a, Span<const T> b) {
+ return b < a;
+}
+template <
+ typename T, typename U,
+ typename = span_internal::EnableIfConvertibleTo<U, y_absl::Span<const T>>>
+bool operator>(const U& a, Span<T> b) {
+ return b < a;
+}
+template <
+ typename T, typename U,
+ typename = span_internal::EnableIfConvertibleTo<U, y_absl::Span<const T>>>
+bool operator>(Span<T> a, const U& b) {
+ return b < a;
+}
+
+// operator<=
+template <typename T>
+bool operator<=(Span<T> a, Span<T> b) {
+ return !(b < a);
+}
+template <typename T>
+bool operator<=(Span<const T> a, Span<T> b) {
+ return !(b < a);
+}
+template <typename T>
+bool operator<=(Span<T> a, Span<const T> b) {
+ return !(b < a);
+}
+template <
+ typename T, typename U,
+ typename = span_internal::EnableIfConvertibleTo<U, y_absl::Span<const T>>>
+bool operator<=(const U& a, Span<T> b) {
+ return !(b < a);
+}
+template <
+ typename T, typename U,
+ typename = span_internal::EnableIfConvertibleTo<U, y_absl::Span<const T>>>
+bool operator<=(Span<T> a, const U& b) {
+ return !(b < a);
+}
+
+// operator>=
+template <typename T>
+bool operator>=(Span<T> a, Span<T> b) {
+ return !(a < b);
+}
+template <typename T>
+bool operator>=(Span<const T> a, Span<T> b) {
+ return !(a < b);
+}
+template <typename T>
+bool operator>=(Span<T> a, Span<const T> b) {
+ return !(a < b);
+}
+template <
+ typename T, typename U,
+ typename = span_internal::EnableIfConvertibleTo<U, y_absl::Span<const T>>>
+bool operator>=(const U& a, Span<T> b) {
+ return !(a < b);
+}
+template <
+ typename T, typename U,
+ typename = span_internal::EnableIfConvertibleTo<U, y_absl::Span<const T>>>
+bool operator>=(Span<T> a, const U& b) {
+ return !(a < b);
+}
+
+// MakeSpan()
+//
+// Constructs a mutable `Span<T>`, deducing `T` automatically from either a
+// container or pointer+size.
+//
+// Because a read-only `Span<const T>` is implicitly constructed from container
+// types regardless of whether the container itself is a const container,
+// constructing mutable spans of type `Span<T>` from containers requires
+// explicit constructors. The container-accepting version of `MakeSpan()`
+// deduces the type of `T` by the constness of the pointer received from the
+// container's `data()` member. Similarly, the pointer-accepting version returns
+// a `Span<const T>` if `T` is `const`, and a `Span<T>` otherwise.
+//
+// Examples:
+//
+// void MyRoutine(y_absl::Span<MyComplicatedType> a) {
+// ...
+// };
+// // my_vector is a container of non-const types
+// std::vector<MyComplicatedType> my_vector;
+//
+// // Constructing a Span implicitly attempts to create a Span of type
+// // `Span<const T>`
+// MyRoutine(my_vector); // error, type mismatch
+//
+// // Explicitly constructing the Span is verbose
+// MyRoutine(y_absl::Span<MyComplicatedType>(my_vector));
+//
+// // Use MakeSpan() to make an y_absl::Span<T>
+// MyRoutine(y_absl::MakeSpan(my_vector));
+//
+// // Construct a span from an array ptr+size
+// y_absl::Span<T> my_span() {
+// return y_absl::MakeSpan(&array[0], num_elements_);
+// }
+//
+template <int&... ExplicitArgumentBarrier, typename T>
+constexpr Span<T> MakeSpan(T* ptr, size_t size) noexcept {
+ return Span<T>(ptr, size);
+}
+
+template <int&... ExplicitArgumentBarrier, typename T>
+Span<T> MakeSpan(T* begin, T* end) noexcept {
+ return ABSL_HARDENING_ASSERT(begin <= end), Span<T>(begin, end - begin);
+}
+
+template <int&... ExplicitArgumentBarrier, typename C>
+constexpr auto MakeSpan(C& c) noexcept // NOLINT(runtime/references)
+ -> decltype(y_absl::MakeSpan(span_internal::GetData(c), c.size())) {
+ return MakeSpan(span_internal::GetData(c), c.size());
+}
+
+template <int&... ExplicitArgumentBarrier, typename T, size_t N>
+constexpr Span<T> MakeSpan(T (&array)[N]) noexcept {
+ return Span<T>(array, N);
+}
+
+// MakeConstSpan()
+//
+// Constructs a `Span<const T>` as with `MakeSpan`, deducing `T` automatically,
+// but always returning a `Span<const T>`.
+//
+// Examples:
+//
+// void ProcessInts(y_absl::Span<const int> some_ints);
+//
+// // Call with a pointer and size.
+// int array[3] = { 0, 0, 0 };
+// ProcessInts(y_absl::MakeConstSpan(&array[0], 3));
+//
+// // Call with a [begin, end) pair.
+// ProcessInts(y_absl::MakeConstSpan(&array[0], &array[3]));
+//
+// // Call directly with an array.
+// ProcessInts(y_absl::MakeConstSpan(array));
+//
+// // Call with a contiguous container.
+// std::vector<int> some_ints = ...;
+// ProcessInts(y_absl::MakeConstSpan(some_ints));
+// ProcessInts(y_absl::MakeConstSpan(std::vector<int>{ 0, 0, 0 }));
+//
+template <int&... ExplicitArgumentBarrier, typename T>
+constexpr Span<const T> MakeConstSpan(T* ptr, size_t size) noexcept {
+ return Span<const T>(ptr, size);
+}
+
+template <int&... ExplicitArgumentBarrier, typename T>
+Span<const T> MakeConstSpan(T* begin, T* end) noexcept {
+ return ABSL_HARDENING_ASSERT(begin <= end), Span<const T>(begin, end - begin);
+}
+
+template <int&... ExplicitArgumentBarrier, typename C>
+constexpr auto MakeConstSpan(const C& c) noexcept -> decltype(MakeSpan(c)) {
+ return MakeSpan(c);
+}
+
+template <int&... ExplicitArgumentBarrier, typename T, size_t N>
+constexpr Span<const T> MakeConstSpan(const T (&array)[N]) noexcept {
+ return Span<const T>(array, N);
+}
+ABSL_NAMESPACE_END
+} // namespace y_absl
+#endif // ABSL_TYPES_SPAN_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/types/variant.h b/contrib/restricted/abseil-cpp-tstring/y_absl/types/variant.h
new file mode 100644
index 00000000000..b3d7235018b
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/types/variant.h
@@ -0,0 +1,866 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// variant.h
+// -----------------------------------------------------------------------------
+//
+// This header file defines an `y_absl::variant` type for holding a type-safe
+// value of some prescribed set of types (noted as alternative types), and
+// associated functions for managing variants.
+//
+// The `y_absl::variant` type is a form of type-safe union. An `y_absl::variant`
+// should always hold a value of one of its alternative types (except in the
+// "valueless by exception state" -- see below). A default-constructed
+// `y_absl::variant` will hold the value of its first alternative type, provided
+// it is default-constructible.
+//
+// In exceptional cases due to error, an `y_absl::variant` can hold no
+// value (known as a "valueless by exception" state), though this is not the
+// norm.
+//
+// As with `y_absl::optional`, an `y_absl::variant` -- when it holds a value --
+// allocates a value of that type directly within the `variant` itself; it
+// cannot hold a reference, array, or the type `void`; it can, however, hold a
+// pointer to externally managed memory.
+//
+// `y_absl::variant` is a C++11 compatible version of the C++17 `std::variant`
+// abstraction and is designed to be a drop-in replacement for code compliant
+// with C++17.
+
+#ifndef ABSL_TYPES_VARIANT_H_
+#define ABSL_TYPES_VARIANT_H_
+
+#include "y_absl/base/config.h"
+#include "y_absl/utility/utility.h"
+
+#ifdef ABSL_USES_STD_VARIANT
+
+#include <variant> // IWYU pragma: export
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+using std::bad_variant_access;
+using std::get;
+using std::get_if;
+using std::holds_alternative;
+using std::monostate;
+using std::variant;
+using std::variant_alternative;
+using std::variant_alternative_t;
+using std::variant_npos;
+using std::variant_size;
+using std::variant_size_v;
+using std::visit;
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#else // ABSL_USES_STD_VARIANT
+
+#include <functional>
+#include <new>
+#include <type_traits>
+#include <utility>
+
+#include "y_absl/base/macros.h"
+#include "y_absl/base/port.h"
+#include "y_absl/meta/type_traits.h"
+#include "y_absl/types/internal/variant.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+// -----------------------------------------------------------------------------
+// y_absl::variant
+// -----------------------------------------------------------------------------
+//
+// An `y_absl::variant` type is a form of type-safe union. An `y_absl::variant` --
+// except in exceptional cases -- always holds a value of one of its alternative
+// types.
+//
+// Example:
+//
+// // Construct a variant that holds either an integer or a TString and
+// // assign it to a TString.
+// y_absl::variant<int, TString> v = TString("abc");
+//
+// // A default-constructed variant will hold a value-initialized value of
+// // the first alternative type.
+// auto a = y_absl::variant<int, TString>(); // Holds an int of value '0'.
+//
+// // variants are assignable.
+//
+// // copy assignment
+// auto v1 = y_absl::variant<int, TString>("abc");
+// auto v2 = y_absl::variant<int, TString>(10);
+// v2 = v1; // copy assign
+//
+// // move assignment
+// auto v1 = y_absl::variant<int, TString>("abc");
+// v1 = y_absl::variant<int, TString>(10);
+//
+// // assignment through type conversion
+// a = 128; // variant contains int
+// a = "128"; // variant contains TString
+//
+// An `y_absl::variant` holding a value of one of its alternative types `T` holds
+// an allocation of `T` directly within the variant itself. An `y_absl::variant`
+// is not allowed to allocate additional storage, such as dynamic memory, to
+// allocate the contained value. The contained value shall be allocated in a
+// region of the variant storage suitably aligned for all alternative types.
+template <typename... Ts>
+class variant;
+
+// swap()
+//
+// Swaps two `y_absl::variant` values. This function is equivalent to `v.swap(w)`
+// where `v` and `w` are `y_absl::variant` types.
+//
+// Note that this function requires all alternative types to be both swappable
+// and move-constructible, because any two variants may refer to either the same
+// type (in which case, they will be swapped) or to two different types (in
+// which case the values will need to be moved).
+//
+template <
+ typename... Ts,
+ y_absl::enable_if_t<
+ y_absl::conjunction<std::is_move_constructible<Ts>...,
+ type_traits_internal::IsSwappable<Ts>...>::value,
+ int> = 0>
+void swap(variant<Ts...>& v, variant<Ts...>& w) noexcept(noexcept(v.swap(w))) {
+ v.swap(w);
+}
+
+// variant_size
+//
+// Returns the number of alternative types available for a given `y_absl::variant`
+// type as a compile-time constant expression. As this is a class template, it
+// is not generally useful for accessing the number of alternative types of
+// any given `y_absl::variant` instance.
+//
+// Example:
+//
+// auto a = y_absl::variant<int, TString>;
+// constexpr int num_types =
+// y_absl::variant_size<y_absl::variant<int, TString>>();
+//
+// // You can also use the member constant `value`.
+// constexpr int num_types =
+// y_absl::variant_size<y_absl::variant<int, TString>>::value;
+//
+// // `y_absl::variant_size` is more valuable for use in generic code:
+// template <typename Variant>
+// constexpr bool IsVariantMultivalue() {
+// return y_absl::variant_size<Variant>() > 1;
+// }
+//
+// Note that the set of cv-qualified specializations of `variant_size` are
+// provided to ensure that those specializations compile (especially when passed
+// within template logic).
+template <class T>
+struct variant_size;
+
+template <class... Ts>
+struct variant_size<variant<Ts...>>
+ : std::integral_constant<std::size_t, sizeof...(Ts)> {};
+
+// Specialization of `variant_size` for const qualified variants.
+template <class T>
+struct variant_size<const T> : variant_size<T>::type {};
+
+// Specialization of `variant_size` for volatile qualified variants.
+template <class T>
+struct variant_size<volatile T> : variant_size<T>::type {};
+
+// Specialization of `variant_size` for const volatile qualified variants.
+template <class T>
+struct variant_size<const volatile T> : variant_size<T>::type {};
+
+// variant_alternative
+//
+// Returns the alternative type for a given `y_absl::variant` at the passed
+// index value as a compile-time constant expression. As this is a class
+// template resulting in a type, it is not useful for access of the run-time
+// value of any given `y_absl::variant` variable.
+//
+// Example:
+//
+// // The type of the 0th alternative is "int".
+// using alternative_type_0
+// = y_absl::variant_alternative<0, y_absl::variant<int, TString>>::type;
+//
+// static_assert(std::is_same<alternative_type_0, int>::value, "");
+//
+// // `y_absl::variant_alternative` is more valuable for use in generic code:
+// template <typename Variant>
+// constexpr bool IsFirstElementTrivial() {
+// return std::is_trivial_v<variant_alternative<0, Variant>::type>;
+// }
+//
+// Note that the set of cv-qualified specializations of `variant_alternative`
+// are provided to ensure that those specializations compile (especially when
+// passed within template logic).
+template <std::size_t I, class T>
+struct variant_alternative;
+
+template <std::size_t I, class... Types>
+struct variant_alternative<I, variant<Types...>> {
+ using type =
+ variant_internal::VariantAlternativeSfinaeT<I, variant<Types...>>;
+};
+
+// Specialization of `variant_alternative` for const qualified variants.
+template <std::size_t I, class T>
+struct variant_alternative<I, const T> {
+ using type = const typename variant_alternative<I, T>::type;
+};
+
+// Specialization of `variant_alternative` for volatile qualified variants.
+template <std::size_t I, class T>
+struct variant_alternative<I, volatile T> {
+ using type = volatile typename variant_alternative<I, T>::type;
+};
+
+// Specialization of `variant_alternative` for const volatile qualified
+// variants.
+template <std::size_t I, class T>
+struct variant_alternative<I, const volatile T> {
+ using type = const volatile typename variant_alternative<I, T>::type;
+};
+
+// Template type alias for variant_alternative<I, T>::type.
+//
+// Example:
+//
+// using alternative_type_0
+// = y_absl::variant_alternative_t<0, y_absl::variant<int, TString>>;
+// static_assert(std::is_same<alternative_type_0, int>::value, "");
+template <std::size_t I, class T>
+using variant_alternative_t = typename variant_alternative<I, T>::type;
+
+// holds_alternative()
+//
+// Checks whether the given variant currently holds a given alternative type,
+// returning `true` if so.
+//
+// Example:
+//
+// y_absl::variant<int, TString> foo = 42;
+// if (y_absl::holds_alternative<int>(foo)) {
+// std::cout << "The variant holds an integer";
+// }
+template <class T, class... Types>
+constexpr bool holds_alternative(const variant<Types...>& v) noexcept {
+ static_assert(
+ variant_internal::UnambiguousIndexOfImpl<variant<Types...>, T,
+ 0>::value != sizeof...(Types),
+ "The type T must occur exactly once in Types...");
+ return v.index() ==
+ variant_internal::UnambiguousIndexOf<variant<Types...>, T>::value;
+}
+
+// get()
+//
+// Returns a reference to the value currently within a given variant, using
+// either a unique alternative type amongst the variant's set of alternative
+// types, or the variant's index value. Attempting to get a variant's value
+// using a type that is not unique within the variant's set of alternative types
+// is a compile-time error. If the index of the alternative being specified is
+// different from the index of the alternative that is currently stored, throws
+// `y_absl::bad_variant_access`.
+//
+// Example:
+//
+// auto a = y_absl::variant<int, TString>;
+//
+// // Get the value by type (if unique).
+// int i = y_absl::get<int>(a);
+//
+// auto b = y_absl::variant<int, int>;
+//
+// // Getting the value by a type that is not unique is ill-formed.
+// int j = y_absl::get<int>(b); // Compile Error!
+//
+// // Getting value by index not ambiguous and allowed.
+// int k = y_absl::get<1>(b);
+
+// Overload for getting a variant's lvalue by type.
+template <class T, class... Types>
+constexpr T& get(variant<Types...>& v) { // NOLINT
+ return variant_internal::VariantCoreAccess::CheckedAccess<
+ variant_internal::IndexOf<T, Types...>::value>(v);
+}
+
+// Overload for getting a variant's rvalue by type.
+// Note: `y_absl::move()` is required to allow use of constexpr in C++11.
+template <class T, class... Types>
+constexpr T&& get(variant<Types...>&& v) {
+ return variant_internal::VariantCoreAccess::CheckedAccess<
+ variant_internal::IndexOf<T, Types...>::value>(y_absl::move(v));
+}
+
+// Overload for getting a variant's const lvalue by type.
+template <class T, class... Types>
+constexpr const T& get(const variant<Types...>& v) {
+ return variant_internal::VariantCoreAccess::CheckedAccess<
+ variant_internal::IndexOf<T, Types...>::value>(v);
+}
+
+// Overload for getting a variant's const rvalue by type.
+// Note: `y_absl::move()` is required to allow use of constexpr in C++11.
+template <class T, class... Types>
+constexpr const T&& get(const variant<Types...>&& v) {
+ return variant_internal::VariantCoreAccess::CheckedAccess<
+ variant_internal::IndexOf<T, Types...>::value>(y_absl::move(v));
+}
+
+// Overload for getting a variant's lvalue by index.
+template <std::size_t I, class... Types>
+constexpr variant_alternative_t<I, variant<Types...>>& get(
+ variant<Types...>& v) { // NOLINT
+ return variant_internal::VariantCoreAccess::CheckedAccess<I>(v);
+}
+
+// Overload for getting a variant's rvalue by index.
+// Note: `y_absl::move()` is required to allow use of constexpr in C++11.
+template <std::size_t I, class... Types>
+constexpr variant_alternative_t<I, variant<Types...>>&& get(
+ variant<Types...>&& v) {
+ return variant_internal::VariantCoreAccess::CheckedAccess<I>(y_absl::move(v));
+}
+
+// Overload for getting a variant's const lvalue by index.
+template <std::size_t I, class... Types>
+constexpr const variant_alternative_t<I, variant<Types...>>& get(
+ const variant<Types...>& v) {
+ return variant_internal::VariantCoreAccess::CheckedAccess<I>(v);
+}
+
+// Overload for getting a variant's const rvalue by index.
+// Note: `y_absl::move()` is required to allow use of constexpr in C++11.
+template <std::size_t I, class... Types>
+constexpr const variant_alternative_t<I, variant<Types...>>&& get(
+ const variant<Types...>&& v) {
+ return variant_internal::VariantCoreAccess::CheckedAccess<I>(y_absl::move(v));
+}
+
+// get_if()
+//
+// Returns a pointer to the value currently stored within a given variant, if
+// present, using either a unique alternative type amongst the variant's set of
+// alternative types, or the variant's index value. If such a value does not
+// exist, returns `nullptr`.
+//
+// As with `get`, attempting to get a variant's value using a type that is not
+// unique within the variant's set of alternative types is a compile-time error.
+
+// Overload for getting a pointer to the value stored in the given variant by
+// index.
+template <std::size_t I, class... Types>
+constexpr y_absl::add_pointer_t<variant_alternative_t<I, variant<Types...>>>
+get_if(variant<Types...>* v) noexcept {
+ return (v != nullptr && v->index() == I)
+ ? std::addressof(
+ variant_internal::VariantCoreAccess::Access<I>(*v))
+ : nullptr;
+}
+
+// Overload for getting a pointer to the const value stored in the given
+// variant by index.
+template <std::size_t I, class... Types>
+constexpr y_absl::add_pointer_t<const variant_alternative_t<I, variant<Types...>>>
+get_if(const variant<Types...>* v) noexcept {
+ return (v != nullptr && v->index() == I)
+ ? std::addressof(
+ variant_internal::VariantCoreAccess::Access<I>(*v))
+ : nullptr;
+}
+
+// Overload for getting a pointer to the value stored in the given variant by
+// type.
+template <class T, class... Types>
+constexpr y_absl::add_pointer_t<T> get_if(variant<Types...>* v) noexcept {
+ return y_absl::get_if<variant_internal::IndexOf<T, Types...>::value>(v);
+}
+
+// Overload for getting a pointer to the const value stored in the given variant
+// by type.
+template <class T, class... Types>
+constexpr y_absl::add_pointer_t<const T> get_if(
+ const variant<Types...>* v) noexcept {
+ return y_absl::get_if<variant_internal::IndexOf<T, Types...>::value>(v);
+}
+
+// visit()
+//
+// Calls a provided functor on a given set of variants. `y_absl::visit()` is
+// commonly used to conditionally inspect the state of a given variant (or set
+// of variants).
+//
+// The functor must return the same type when called with any of the variants'
+// alternatives.
+//
+// Example:
+//
+// // Define a visitor functor
+// struct GetVariant {
+// template<typename T>
+// void operator()(const T& i) const {
+// std::cout << "The variant's value is: " << i;
+// }
+// };
+//
+// // Declare our variant, and call `y_absl::visit()` on it.
+// // Note that `GetVariant()` returns void in either case.
+// y_absl::variant<int, TString> foo = TString("foo");
+// GetVariant visitor;
+// y_absl::visit(visitor, foo); // Prints `The variant's value is: foo'
+template <typename Visitor, typename... Variants>
+variant_internal::VisitResult<Visitor, Variants...> visit(Visitor&& vis,
+ Variants&&... vars) {
+ return variant_internal::
+ VisitIndices<variant_size<y_absl::decay_t<Variants> >::value...>::Run(
+ variant_internal::PerformVisitation<Visitor, Variants...>{
+ std::forward_as_tuple(y_absl::forward<Variants>(vars)...),
+ y_absl::forward<Visitor>(vis)},
+ vars.index()...);
+}
+
+// monostate
+//
+// The monostate class serves as a first alternative type for a variant for
+// which the first variant type is otherwise not default-constructible.
+struct monostate {};
+
+// `y_absl::monostate` Relational Operators
+
+constexpr bool operator<(monostate, monostate) noexcept { return false; }
+constexpr bool operator>(monostate, monostate) noexcept { return false; }
+constexpr bool operator<=(monostate, monostate) noexcept { return true; }
+constexpr bool operator>=(monostate, monostate) noexcept { return true; }
+constexpr bool operator==(monostate, monostate) noexcept { return true; }
+constexpr bool operator!=(monostate, monostate) noexcept { return false; }
+
+
+//------------------------------------------------------------------------------
+// `y_absl::variant` Template Definition
+//------------------------------------------------------------------------------
+template <typename T0, typename... Tn>
+class variant<T0, Tn...> : private variant_internal::VariantBase<T0, Tn...> {
+ static_assert(y_absl::conjunction<std::is_object<T0>,
+ std::is_object<Tn>...>::value,
+ "Attempted to instantiate a variant containing a non-object "
+ "type.");
+ // Intentionally not qualifying `negation` with `y_absl::` to work around a bug
+ // in MSVC 2015 with inline namespace and variadic template.
+ static_assert(y_absl::conjunction<negation<std::is_array<T0> >,
+ negation<std::is_array<Tn> >...>::value,
+ "Attempted to instantiate a variant containing an array type.");
+ static_assert(y_absl::conjunction<std::is_nothrow_destructible<T0>,
+ std::is_nothrow_destructible<Tn>...>::value,
+ "Attempted to instantiate a variant containing a non-nothrow "
+ "destructible type.");
+
+ friend struct variant_internal::VariantCoreAccess;
+
+ private:
+ using Base = variant_internal::VariantBase<T0, Tn...>;
+
+ public:
+ // Constructors
+
+ // Constructs a variant holding a default-initialized value of the first
+ // alternative type.
+ constexpr variant() /*noexcept(see 111above)*/ = default;
+
+ // Copy constructor, standard semantics
+ variant(const variant& other) = default;
+
+ // Move constructor, standard semantics
+ variant(variant&& other) /*noexcept(see above)*/ = default;
+
+ // Constructs a variant of an alternative type specified by overload
+ // resolution of the provided forwarding arguments through
+ // direct-initialization.
+ //
+ // Note: If the selected constructor is a constexpr constructor, this
+ // constructor shall be a constexpr constructor.
+ //
+ // NOTE: http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/p0608r1.html
+ // has been voted passed the design phase in the C++ standard meeting in Mar
+ // 2018. It will be implemented and integrated into `y_absl::variant`.
+ template <
+ class T,
+ std::size_t I = std::enable_if<
+ variant_internal::IsNeitherSelfNorInPlace<variant,
+ y_absl::decay_t<T>>::value,
+ variant_internal::IndexOfConstructedType<variant, T>>::type::value,
+ class Tj = y_absl::variant_alternative_t<I, variant>,
+ y_absl::enable_if_t<std::is_constructible<Tj, T>::value>* =
+ nullptr>
+ constexpr variant(T&& t) noexcept(std::is_nothrow_constructible<Tj, T>::value)
+ : Base(variant_internal::EmplaceTag<I>(), y_absl::forward<T>(t)) {}
+
+ // Constructs a variant of an alternative type from the arguments through
+ // direct-initialization.
+ //
+ // Note: If the selected constructor is a constexpr constructor, this
+ // constructor shall be a constexpr constructor.
+ template <class T, class... Args,
+ typename std::enable_if<std::is_constructible<
+ variant_internal::UnambiguousTypeOfT<variant, T>,
+ Args...>::value>::type* = nullptr>
+ constexpr explicit variant(in_place_type_t<T>, Args&&... args)
+ : Base(variant_internal::EmplaceTag<
+ variant_internal::UnambiguousIndexOf<variant, T>::value>(),
+ y_absl::forward<Args>(args)...) {}
+
+ // Constructs a variant of an alternative type from an initializer list
+ // and other arguments through direct-initialization.
+ //
+ // Note: If the selected constructor is a constexpr constructor, this
+ // constructor shall be a constexpr constructor.
+ template <class T, class U, class... Args,
+ typename std::enable_if<std::is_constructible<
+ variant_internal::UnambiguousTypeOfT<variant, T>,
+ std::initializer_list<U>&, Args...>::value>::type* = nullptr>
+ constexpr explicit variant(in_place_type_t<T>, std::initializer_list<U> il,
+ Args&&... args)
+ : Base(variant_internal::EmplaceTag<
+ variant_internal::UnambiguousIndexOf<variant, T>::value>(),
+ il, y_absl::forward<Args>(args)...) {}
+
+ // Constructs a variant of an alternative type from a provided index,
+ // through value-initialization using the provided forwarded arguments.
+ template <std::size_t I, class... Args,
+ typename std::enable_if<std::is_constructible<
+ variant_internal::VariantAlternativeSfinaeT<I, variant>,
+ Args...>::value>::type* = nullptr>
+ constexpr explicit variant(in_place_index_t<I>, Args&&... args)
+ : Base(variant_internal::EmplaceTag<I>(), y_absl::forward<Args>(args)...) {}
+
+ // Constructs a variant of an alternative type from a provided index,
+ // through value-initialization of an initializer list and the provided
+ // forwarded arguments.
+ template <std::size_t I, class U, class... Args,
+ typename std::enable_if<std::is_constructible<
+ variant_internal::VariantAlternativeSfinaeT<I, variant>,
+ std::initializer_list<U>&, Args...>::value>::type* = nullptr>
+ constexpr explicit variant(in_place_index_t<I>, std::initializer_list<U> il,
+ Args&&... args)
+ : Base(variant_internal::EmplaceTag<I>(), il,
+ y_absl::forward<Args>(args)...) {}
+
+ // Destructors
+
+ // Destroys the variant's currently contained value, provided that
+ // `y_absl::valueless_by_exception()` is false.
+ ~variant() = default;
+
+ // Assignment Operators
+
+ // Copy assignment operator
+ variant& operator=(const variant& other) = default;
+
+ // Move assignment operator
+ variant& operator=(variant&& other) /*noexcept(see above)*/ = default;
+
+ // Converting assignment operator
+ //
+ // NOTE: http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/p0608r1.html
+ // has been voted passed the design phase in the C++ standard meeting in Mar
+ // 2018. It will be implemented and integrated into `y_absl::variant`.
+ template <
+ class T,
+ std::size_t I = std::enable_if<
+ !std::is_same<y_absl::decay_t<T>, variant>::value,
+ variant_internal::IndexOfConstructedType<variant, T>>::type::value,
+ class Tj = y_absl::variant_alternative_t<I, variant>,
+ typename std::enable_if<std::is_assignable<Tj&, T>::value &&
+ std::is_constructible<Tj, T>::value>::type* =
+ nullptr>
+ variant& operator=(T&& t) noexcept(
+ std::is_nothrow_assignable<Tj&, T>::value&&
+ std::is_nothrow_constructible<Tj, T>::value) {
+ variant_internal::VisitIndices<sizeof...(Tn) + 1>::Run(
+ variant_internal::VariantCoreAccess::MakeConversionAssignVisitor(
+ this, y_absl::forward<T>(t)),
+ index());
+
+ return *this;
+ }
+
+
+ // emplace() Functions
+
+ // Constructs a value of the given alternative type T within the variant. The
+ // existing value of the variant is destroyed first (provided that
+ // `y_absl::valueless_by_exception()` is false). Requires that T is unambiguous
+ // in the variant.
+ //
+ // Example:
+ //
+ // y_absl::variant<std::vector<int>, int, TString> v;
+ // v.emplace<int>(99);
+ // v.emplace<TString>("abc");
+ template <
+ class T, class... Args,
+ typename std::enable_if<std::is_constructible<
+ y_absl::variant_alternative_t<
+ variant_internal::UnambiguousIndexOf<variant, T>::value, variant>,
+ Args...>::value>::type* = nullptr>
+ T& emplace(Args&&... args) {
+ return variant_internal::VariantCoreAccess::Replace<
+ variant_internal::UnambiguousIndexOf<variant, T>::value>(
+ this, y_absl::forward<Args>(args)...);
+ }
+
+ // Constructs a value of the given alternative type T within the variant using
+ // an initializer list. The existing value of the variant is destroyed first
+ // (provided that `y_absl::valueless_by_exception()` is false). Requires that T
+ // is unambiguous in the variant.
+ //
+ // Example:
+ //
+ // y_absl::variant<std::vector<int>, int, TString> v;
+ // v.emplace<std::vector<int>>({0, 1, 2});
+ template <
+ class T, class U, class... Args,
+ typename std::enable_if<std::is_constructible<
+ y_absl::variant_alternative_t<
+ variant_internal::UnambiguousIndexOf<variant, T>::value, variant>,
+ std::initializer_list<U>&, Args...>::value>::type* = nullptr>
+ T& emplace(std::initializer_list<U> il, Args&&... args) {
+ return variant_internal::VariantCoreAccess::Replace<
+ variant_internal::UnambiguousIndexOf<variant, T>::value>(
+ this, il, y_absl::forward<Args>(args)...);
+ }
+
+ // Destroys the current value of the variant (provided that
+ // `y_absl::valueless_by_exception()` is false) and constructs a new value at
+ // the given index.
+ //
+ // Example:
+ //
+ // y_absl::variant<std::vector<int>, int, int> v;
+ // v.emplace<1>(99);
+ // v.emplace<2>(98);
+ // v.emplace<int>(99); // Won't compile. 'int' isn't a unique type.
+ template <std::size_t I, class... Args,
+ typename std::enable_if<
+ std::is_constructible<y_absl::variant_alternative_t<I, variant>,
+ Args...>::value>::type* = nullptr>
+ y_absl::variant_alternative_t<I, variant>& emplace(Args&&... args) {
+ return variant_internal::VariantCoreAccess::Replace<I>(
+ this, y_absl::forward<Args>(args)...);
+ }
+
+ // Destroys the current value of the variant (provided that
+ // `y_absl::valueless_by_exception()` is false) and constructs a new value at
+ // the given index using an initializer list and the provided arguments.
+ //
+ // Example:
+ //
+ // y_absl::variant<std::vector<int>, int, int> v;
+ // v.emplace<0>({0, 1, 2});
+ template <std::size_t I, class U, class... Args,
+ typename std::enable_if<std::is_constructible<
+ y_absl::variant_alternative_t<I, variant>,
+ std::initializer_list<U>&, Args...>::value>::type* = nullptr>
+ y_absl::variant_alternative_t<I, variant>& emplace(std::initializer_list<U> il,
+ Args&&... args) {
+ return variant_internal::VariantCoreAccess::Replace<I>(
+ this, il, y_absl::forward<Args>(args)...);
+ }
+
+ // variant::valueless_by_exception()
+ //
+ // Returns false if and only if the variant currently holds a valid value.
+ constexpr bool valueless_by_exception() const noexcept {
+ return this->index_ == y_absl::variant_npos;
+ }
+
+ // variant::index()
+ //
+ // Returns the index value of the variant's currently selected alternative
+ // type.
+ constexpr std::size_t index() const noexcept { return this->index_; }
+
+ // variant::swap()
+ //
+ // Swaps the values of two variant objects.
+ //
+ void swap(variant& rhs) noexcept(
+ y_absl::conjunction<
+ std::is_nothrow_move_constructible<T0>,
+ std::is_nothrow_move_constructible<Tn>...,
+ type_traits_internal::IsNothrowSwappable<T0>,
+ type_traits_internal::IsNothrowSwappable<Tn>...>::value) {
+ return variant_internal::VisitIndices<sizeof...(Tn) + 1>::Run(
+ variant_internal::Swap<T0, Tn...>{this, &rhs}, rhs.index());
+ }
+};
+
+// We need a valid declaration of variant<> for SFINAE and overload resolution
+// to work properly above, but we don't need a full declaration since this type
+// will never be constructed. This declaration, though incomplete, suffices.
+template <>
+class variant<>;
+
+//------------------------------------------------------------------------------
+// Relational Operators
+//------------------------------------------------------------------------------
+//
+// If neither operand is in the `variant::valueless_by_exception` state:
+//
+// * If the index of both variants is the same, the relational operator
+// returns the result of the corresponding relational operator for the
+// corresponding alternative type.
+// * If the index of both variants is not the same, the relational operator
+// returns the result of that operation applied to the value of the left
+// operand's index and the value of the right operand's index.
+// * If at least one operand is in the valueless_by_exception state:
+// - A variant in the valueless_by_exception state is only considered equal
+// to another variant in the valueless_by_exception state.
+// - If exactly one operand is in the valueless_by_exception state, the
+// variant in the valueless_by_exception state is less than the variant
+// that is not in the valueless_by_exception state.
+//
+// Note: The value 1 is added to each index in the relational comparisons such
+// that the index corresponding to the valueless_by_exception state wraps around
+// to 0 (the lowest value for the index type), and the remaining indices stay in
+// the same relative order.
+
+// Equal-to operator
+template <typename... Types>
+constexpr variant_internal::RequireAllHaveEqualT<Types...> operator==(
+ const variant<Types...>& a, const variant<Types...>& b) {
+ return (a.index() == b.index()) &&
+ variant_internal::VisitIndices<sizeof...(Types)>::Run(
+ variant_internal::EqualsOp<Types...>{&a, &b}, a.index());
+}
+
+// Not equal operator
+template <typename... Types>
+constexpr variant_internal::RequireAllHaveNotEqualT<Types...> operator!=(
+ const variant<Types...>& a, const variant<Types...>& b) {
+ return (a.index() != b.index()) ||
+ variant_internal::VisitIndices<sizeof...(Types)>::Run(
+ variant_internal::NotEqualsOp<Types...>{&a, &b}, a.index());
+}
+
+// Less-than operator
+template <typename... Types>
+constexpr variant_internal::RequireAllHaveLessThanT<Types...> operator<(
+ const variant<Types...>& a, const variant<Types...>& b) {
+ return (a.index() != b.index())
+ ? (a.index() + 1) < (b.index() + 1)
+ : variant_internal::VisitIndices<sizeof...(Types)>::Run(
+ variant_internal::LessThanOp<Types...>{&a, &b}, a.index());
+}
+
+// Greater-than operator
+template <typename... Types>
+constexpr variant_internal::RequireAllHaveGreaterThanT<Types...> operator>(
+ const variant<Types...>& a, const variant<Types...>& b) {
+ return (a.index() != b.index())
+ ? (a.index() + 1) > (b.index() + 1)
+ : variant_internal::VisitIndices<sizeof...(Types)>::Run(
+ variant_internal::GreaterThanOp<Types...>{&a, &b},
+ a.index());
+}
+
+// Less-than or equal-to operator
+template <typename... Types>
+constexpr variant_internal::RequireAllHaveLessThanOrEqualT<Types...> operator<=(
+ const variant<Types...>& a, const variant<Types...>& b) {
+ return (a.index() != b.index())
+ ? (a.index() + 1) < (b.index() + 1)
+ : variant_internal::VisitIndices<sizeof...(Types)>::Run(
+ variant_internal::LessThanOrEqualsOp<Types...>{&a, &b},
+ a.index());
+}
+
+// Greater-than or equal-to operator
+template <typename... Types>
+constexpr variant_internal::RequireAllHaveGreaterThanOrEqualT<Types...>
+operator>=(const variant<Types...>& a, const variant<Types...>& b) {
+ return (a.index() != b.index())
+ ? (a.index() + 1) > (b.index() + 1)
+ : variant_internal::VisitIndices<sizeof...(Types)>::Run(
+ variant_internal::GreaterThanOrEqualsOp<Types...>{&a, &b},
+ a.index());
+}
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+namespace std {
+
+// hash()
+template <> // NOLINT
+struct hash<y_absl::monostate> {
+ std::size_t operator()(y_absl::monostate) const { return 0; }
+};
+
+template <class... T> // NOLINT
+struct hash<y_absl::variant<T...>>
+ : y_absl::variant_internal::VariantHashBase<y_absl::variant<T...>, void,
+ y_absl::remove_const_t<T>...> {};
+
+} // namespace std
+
+#endif // ABSL_USES_STD_VARIANT
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace variant_internal {
+
+// Helper visitor for converting a variant<Ts...>` into another type (mostly
+// variant) that can be constructed from any type.
+template <typename To>
+struct ConversionVisitor {
+ template <typename T>
+ To operator()(T&& v) const {
+ return To(std::forward<T>(v));
+ }
+};
+
+} // namespace variant_internal
+
+// ConvertVariantTo()
+//
+// Helper functions to convert an `y_absl::variant` to a variant of another set of
+// types, provided that the alternative type of the new variant type can be
+// converted from any type in the source variant.
+//
+// Example:
+//
+// y_absl::variant<name1, name2, float> InternalReq(const Req&);
+//
+// // name1 and name2 are convertible to name
+// y_absl::variant<name, float> ExternalReq(const Req& req) {
+// return y_absl::ConvertVariantTo<y_absl::variant<name, float>>(
+// InternalReq(req));
+// }
+template <typename To, typename Variant>
+To ConvertVariantTo(Variant&& variant) {
+ return y_absl::visit(variant_internal::ConversionVisitor<To>{},
+ std::forward<Variant>(variant));
+}
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_TYPES_VARIANT_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/types/ya.make b/contrib/restricted/abseil-cpp-tstring/y_absl/types/ya.make
new file mode 100644
index 00000000000..b5ead458565
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/types/ya.make
@@ -0,0 +1,14 @@
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
+OWNER(
+ somov
+ g:cpp-contrib
+)
+
+LICENSE(Apache-2.0)
+
+LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
+
+END()
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/utility/.yandex_meta/licenses.list.txt b/contrib/restricted/abseil-cpp-tstring/y_absl/utility/.yandex_meta/licenses.list.txt
new file mode 100644
index 00000000000..7be6b428485
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/utility/.yandex_meta/licenses.list.txt
@@ -0,0 +1,16 @@
+====================Apache-2.0====================
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+
+====================COPYRIGHT====================
+// Copyright 2017 The Abseil Authors.
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/utility/utility.h b/contrib/restricted/abseil-cpp-tstring/y_absl/utility/utility.h
new file mode 100644
index 00000000000..c2e4d91d434
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/utility/utility.h
@@ -0,0 +1,350 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// This header file contains C++11 versions of standard <utility> header
+// abstractions available within C++14 and C++17, and are designed to be drop-in
+// replacement for code compliant with C++14 and C++17.
+//
+// The following abstractions are defined:
+//
+// * integer_sequence<T, Ints...> == std::integer_sequence<T, Ints...>
+// * index_sequence<Ints...> == std::index_sequence<Ints...>
+// * make_integer_sequence<T, N> == std::make_integer_sequence<T, N>
+// * make_index_sequence<N> == std::make_index_sequence<N>
+// * index_sequence_for<Ts...> == std::index_sequence_for<Ts...>
+// * apply<Functor, Tuple> == std::apply<Functor, Tuple>
+// * exchange<T> == std::exchange<T>
+// * make_from_tuple<T> == std::make_from_tuple<T>
+//
+// This header file also provides the tag types `in_place_t`, `in_place_type_t`,
+// and `in_place_index_t`, as well as the constant `in_place`, and
+// `constexpr` `std::move()` and `std::forward()` implementations in C++11.
+//
+// References:
+//
+// https://en.cppreference.com/w/cpp/utility/integer_sequence
+// https://en.cppreference.com/w/cpp/utility/apply
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2013/n3658.html
+
+#ifndef ABSL_UTILITY_UTILITY_H_
+#define ABSL_UTILITY_UTILITY_H_
+
+#include <cstddef>
+#include <cstdlib>
+#include <tuple>
+#include <utility>
+
+#include "y_absl/base/config.h"
+#include "y_absl/base/internal/inline_variable.h"
+#include "y_absl/base/internal/invoke.h"
+#include "y_absl/meta/type_traits.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+
+// integer_sequence
+//
+// Class template representing a compile-time integer sequence. An instantiation
+// of `integer_sequence<T, Ints...>` has a sequence of integers encoded in its
+// type through its template arguments (which is a common need when
+// working with C++11 variadic templates). `y_absl::integer_sequence` is designed
+// to be a drop-in replacement for C++14's `std::integer_sequence`.
+//
+// Example:
+//
+// template< class T, T... Ints >
+// void user_function(integer_sequence<T, Ints...>);
+//
+// int main()
+// {
+// // user_function's `T` will be deduced to `int` and `Ints...`
+// // will be deduced to `0, 1, 2, 3, 4`.
+// user_function(make_integer_sequence<int, 5>());
+// }
+template <typename T, T... Ints>
+struct integer_sequence {
+ using value_type = T;
+ static constexpr size_t size() noexcept { return sizeof...(Ints); }
+};
+
+// index_sequence
+//
+// A helper template for an `integer_sequence` of `size_t`,
+// `y_absl::index_sequence` is designed to be a drop-in replacement for C++14's
+// `std::index_sequence`.
+template <size_t... Ints>
+using index_sequence = integer_sequence<size_t, Ints...>;
+
+namespace utility_internal {
+
+template <typename Seq, size_t SeqSize, size_t Rem>
+struct Extend;
+
+// Note that SeqSize == sizeof...(Ints). It's passed explicitly for efficiency.
+template <typename T, T... Ints, size_t SeqSize>
+struct Extend<integer_sequence<T, Ints...>, SeqSize, 0> {
+ using type = integer_sequence<T, Ints..., (Ints + SeqSize)...>;
+};
+
+template <typename T, T... Ints, size_t SeqSize>
+struct Extend<integer_sequence<T, Ints...>, SeqSize, 1> {
+ using type = integer_sequence<T, Ints..., (Ints + SeqSize)..., 2 * SeqSize>;
+};
+
+// Recursion helper for 'make_integer_sequence<T, N>'.
+// 'Gen<T, N>::type' is an alias for 'integer_sequence<T, 0, 1, ... N-1>'.
+template <typename T, size_t N>
+struct Gen {
+ using type =
+ typename Extend<typename Gen<T, N / 2>::type, N / 2, N % 2>::type;
+};
+
+template <typename T>
+struct Gen<T, 0> {
+ using type = integer_sequence<T>;
+};
+
+template <typename T>
+struct InPlaceTypeTag {
+ explicit InPlaceTypeTag() = delete;
+ InPlaceTypeTag(const InPlaceTypeTag&) = delete;
+ InPlaceTypeTag& operator=(const InPlaceTypeTag&) = delete;
+};
+
+template <size_t I>
+struct InPlaceIndexTag {
+ explicit InPlaceIndexTag() = delete;
+ InPlaceIndexTag(const InPlaceIndexTag&) = delete;
+ InPlaceIndexTag& operator=(const InPlaceIndexTag&) = delete;
+};
+
+} // namespace utility_internal
+
+// Compile-time sequences of integers
+
+// make_integer_sequence
+//
+// This template alias is equivalent to
+// `integer_sequence<int, 0, 1, ..., N-1>`, and is designed to be a drop-in
+// replacement for C++14's `std::make_integer_sequence`.
+template <typename T, T N>
+using make_integer_sequence = typename utility_internal::Gen<T, N>::type;
+
+// make_index_sequence
+//
+// This template alias is equivalent to `index_sequence<0, 1, ..., N-1>`,
+// and is designed to be a drop-in replacement for C++14's
+// `std::make_index_sequence`.
+template <size_t N>
+using make_index_sequence = make_integer_sequence<size_t, N>;
+
+// index_sequence_for
+//
+// Converts a typename pack into an index sequence of the same length, and
+// is designed to be a drop-in replacement for C++14's
+// `std::index_sequence_for()`
+template <typename... Ts>
+using index_sequence_for = make_index_sequence<sizeof...(Ts)>;
+
+// Tag types
+
+#ifdef ABSL_USES_STD_OPTIONAL
+
+using std::in_place_t;
+using std::in_place;
+
+#else // ABSL_USES_STD_OPTIONAL
+
+// in_place_t
+//
+// Tag type used to specify in-place construction, such as with
+// `y_absl::optional`, designed to be a drop-in replacement for C++17's
+// `std::in_place_t`.
+struct in_place_t {};
+
+ABSL_INTERNAL_INLINE_CONSTEXPR(in_place_t, in_place, {});
+
+#endif // ABSL_USES_STD_OPTIONAL
+
+#if defined(ABSL_USES_STD_ANY) || defined(ABSL_USES_STD_VARIANT)
+using std::in_place_type;
+using std::in_place_type_t;
+#else
+
+// in_place_type_t
+//
+// Tag type used for in-place construction when the type to construct needs to
+// be specified, such as with `y_absl::any`, designed to be a drop-in replacement
+// for C++17's `std::in_place_type_t`.
+template <typename T>
+using in_place_type_t = void (*)(utility_internal::InPlaceTypeTag<T>);
+
+template <typename T>
+void in_place_type(utility_internal::InPlaceTypeTag<T>) {}
+#endif // ABSL_USES_STD_ANY || ABSL_USES_STD_VARIANT
+
+#ifdef ABSL_USES_STD_VARIANT
+using std::in_place_index;
+using std::in_place_index_t;
+#else
+
+// in_place_index_t
+//
+// Tag type used for in-place construction when the type to construct needs to
+// be specified, such as with `y_absl::any`, designed to be a drop-in replacement
+// for C++17's `std::in_place_index_t`.
+template <size_t I>
+using in_place_index_t = void (*)(utility_internal::InPlaceIndexTag<I>);
+
+template <size_t I>
+void in_place_index(utility_internal::InPlaceIndexTag<I>) {}
+#endif // ABSL_USES_STD_VARIANT
+
+// Constexpr move and forward
+
+// move()
+//
+// A constexpr version of `std::move()`, designed to be a drop-in replacement
+// for C++14's `std::move()`.
+template <typename T>
+constexpr y_absl::remove_reference_t<T>&& move(T&& t) noexcept {
+ return static_cast<y_absl::remove_reference_t<T>&&>(t);
+}
+
+// forward()
+//
+// A constexpr version of `std::forward()`, designed to be a drop-in replacement
+// for C++14's `std::forward()`.
+template <typename T>
+constexpr T&& forward(
+ y_absl::remove_reference_t<T>& t) noexcept { // NOLINT(runtime/references)
+ return static_cast<T&&>(t);
+}
+
+namespace utility_internal {
+// Helper method for expanding tuple into a called method.
+template <typename Functor, typename Tuple, std::size_t... Indexes>
+auto apply_helper(Functor&& functor, Tuple&& t, index_sequence<Indexes...>)
+ -> decltype(y_absl::base_internal::invoke(
+ y_absl::forward<Functor>(functor),
+ std::get<Indexes>(y_absl::forward<Tuple>(t))...)) {
+ return y_absl::base_internal::invoke(
+ y_absl::forward<Functor>(functor),
+ std::get<Indexes>(y_absl::forward<Tuple>(t))...);
+}
+
+} // namespace utility_internal
+
+// apply
+//
+// Invokes a Callable using elements of a tuple as its arguments.
+// Each element of the tuple corresponds to an argument of the call (in order).
+// Both the Callable argument and the tuple argument are perfect-forwarded.
+// For member-function Callables, the first tuple element acts as the `this`
+// pointer. `y_absl::apply` is designed to be a drop-in replacement for C++17's
+// `std::apply`. Unlike C++17's `std::apply`, this is not currently `constexpr`.
+//
+// Example:
+//
+// class Foo {
+// public:
+// void Bar(int);
+// };
+// void user_function1(int, TString);
+// void user_function2(std::unique_ptr<Foo>);
+// auto user_lambda = [](int, int) {};
+//
+// int main()
+// {
+// std::tuple<int, TString> tuple1(42, "bar");
+// // Invokes the first user function on int, TString.
+// y_absl::apply(&user_function1, tuple1);
+//
+// std::tuple<std::unique_ptr<Foo>> tuple2(y_absl::make_unique<Foo>());
+// // Invokes the user function that takes ownership of the unique
+// // pointer.
+// y_absl::apply(&user_function2, std::move(tuple2));
+//
+// auto foo = y_absl::make_unique<Foo>();
+// std::tuple<Foo*, int> tuple3(foo.get(), 42);
+// // Invokes the method Bar on foo with one argument, 42.
+// y_absl::apply(&Foo::Bar, tuple3);
+//
+// std::tuple<int, int> tuple4(8, 9);
+// // Invokes a lambda.
+// y_absl::apply(user_lambda, tuple4);
+// }
+template <typename Functor, typename Tuple>
+auto apply(Functor&& functor, Tuple&& t)
+ -> decltype(utility_internal::apply_helper(
+ y_absl::forward<Functor>(functor), y_absl::forward<Tuple>(t),
+ y_absl::make_index_sequence<std::tuple_size<
+ typename std::remove_reference<Tuple>::type>::value>{})) {
+ return utility_internal::apply_helper(
+ y_absl::forward<Functor>(functor), y_absl::forward<Tuple>(t),
+ y_absl::make_index_sequence<std::tuple_size<
+ typename std::remove_reference<Tuple>::type>::value>{});
+}
+
+// exchange
+//
+// Replaces the value of `obj` with `new_value` and returns the old value of
+// `obj`. `y_absl::exchange` is designed to be a drop-in replacement for C++14's
+// `std::exchange`.
+//
+// Example:
+//
+// Foo& operator=(Foo&& other) {
+// ptr1_ = y_absl::exchange(other.ptr1_, nullptr);
+// int1_ = y_absl::exchange(other.int1_, -1);
+// return *this;
+// }
+template <typename T, typename U = T>
+T exchange(T& obj, U&& new_value) {
+ T old_value = y_absl::move(obj);
+ obj = y_absl::forward<U>(new_value);
+ return old_value;
+}
+
+namespace utility_internal {
+template <typename T, typename Tuple, size_t... I>
+T make_from_tuple_impl(Tuple&& tup, y_absl::index_sequence<I...>) {
+ return T(std::get<I>(std::forward<Tuple>(tup))...);
+}
+} // namespace utility_internal
+
+// make_from_tuple
+//
+// Given the template parameter type `T` and a tuple of arguments
+// `std::tuple(arg0, arg1, ..., argN)` constructs an object of type `T` as if by
+// calling `T(arg0, arg1, ..., argN)`.
+//
+// Example:
+//
+// std::tuple<const char*, size_t> args("hello world", 5);
+// auto s = y_absl::make_from_tuple<TString>(args);
+// assert(s == "hello");
+//
+template <typename T, typename Tuple>
+constexpr T make_from_tuple(Tuple&& tup) {
+ return utility_internal::make_from_tuple_impl<T>(
+ std::forward<Tuple>(tup),
+ y_absl::make_index_sequence<
+ std::tuple_size<y_absl::decay_t<Tuple>>::value>{});
+}
+
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_UTILITY_UTILITY_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/utility/ya.make b/contrib/restricted/abseil-cpp-tstring/y_absl/utility/ya.make
new file mode 100644
index 00000000000..b5ead458565
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/utility/ya.make
@@ -0,0 +1,14 @@
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
+OWNER(
+ somov
+ g:cpp-contrib
+)
+
+LICENSE(Apache-2.0)
+
+LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
+
+END()
diff --git a/contrib/restricted/abseil-cpp-tstring/ya.make b/contrib/restricted/abseil-cpp-tstring/ya.make
new file mode 100644
index 00000000000..67363a69307
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/ya.make
@@ -0,0 +1,172 @@
+# Generated by devtools/yamaker from nixpkgs 21.11.
+
+LIBRARY()
+
+OWNER(
+ somov
+ g:cpp-contrib
+)
+
+VERSION(20211102.0)
+
+LICENSE(Apache-2.0)
+
+LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
+
+PEERDIR(
+ contrib/restricted/abseil-cpp-tstring/y_absl/algorithm
+ contrib/restricted/abseil-cpp-tstring/y_absl/base
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/low_level_alloc
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/scoped_set_env
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_wait
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/strerror
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/throw_delegate
+ contrib/restricted/abseil-cpp-tstring/y_absl/base/log_severity
+ contrib/restricted/abseil-cpp-tstring/y_absl/city
+ contrib/restricted/abseil-cpp-tstring/y_absl/container
+ contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/absl_hashtablez_sampler
+ contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/raw_hash_set
+ contrib/restricted/abseil-cpp-tstring/y_absl/debugging
+ contrib/restricted/abseil-cpp-tstring/y_absl/debugging/failure_signal_handler
+ contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal
+ contrib/restricted/abseil-cpp-tstring/y_absl/debugging/leak_check
+ contrib/restricted/abseil-cpp-tstring/y_absl/debugging/leak_check_disable
+ contrib/restricted/abseil-cpp-tstring/y_absl/debugging/stacktrace
+ contrib/restricted/abseil-cpp-tstring/y_absl/debugging/symbolize
+ contrib/restricted/abseil-cpp-tstring/y_absl/demangle
+ contrib/restricted/abseil-cpp-tstring/y_absl/flags
+ contrib/restricted/abseil-cpp-tstring/y_absl/flags/commandlineflag
+ contrib/restricted/abseil-cpp-tstring/y_absl/flags/internal/commandlineflag
+ contrib/restricted/abseil-cpp-tstring/y_absl/flags/internal/flag
+ contrib/restricted/abseil-cpp-tstring/y_absl/flags/internal/private_handle_accessor
+ contrib/restricted/abseil-cpp-tstring/y_absl/flags/internal/program_name
+ contrib/restricted/abseil-cpp-tstring/y_absl/flags/internal/usage
+ contrib/restricted/abseil-cpp-tstring/y_absl/flags/marshalling
+ contrib/restricted/abseil-cpp-tstring/y_absl/flags/parse
+ contrib/restricted/abseil-cpp-tstring/y_absl/flags/reflection
+ contrib/restricted/abseil-cpp-tstring/y_absl/flags/usage
+ contrib/restricted/abseil-cpp-tstring/y_absl/flags/usage_config
+ contrib/restricted/abseil-cpp-tstring/y_absl/functional
+ contrib/restricted/abseil-cpp-tstring/y_absl/hash
+ contrib/restricted/abseil-cpp-tstring/y_absl/hash/internal
+ contrib/restricted/abseil-cpp-tstring/y_absl/memory
+ contrib/restricted/abseil-cpp-tstring/y_absl/meta
+ contrib/restricted/abseil-cpp-tstring/y_absl/numeric
+ contrib/restricted/abseil-cpp-tstring/y_absl/profiling/internal/exponential_biased
+ contrib/restricted/abseil-cpp-tstring/y_absl/profiling/internal/periodic_sampler
+ contrib/restricted/abseil-cpp-tstring/y_absl/random/absl_random_distributions
+ contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/absl_random_internal_distribution_test_util
+ contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/pool_urbg
+ contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/randen
+ contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/randen_detect
+ contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/randen_hwaes
+ contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/randen_round_keys
+ contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/randen_slow
+ contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/seed_material
+ contrib/restricted/abseil-cpp-tstring/y_absl/random/seed_gen_exception
+ contrib/restricted/abseil-cpp-tstring/y_absl/random/seed_sequences
+ contrib/restricted/abseil-cpp-tstring/y_absl/status
+ contrib/restricted/abseil-cpp-tstring/y_absl/status/statusor
+ contrib/restricted/abseil-cpp-tstring/y_absl/strings
+ contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord
+ contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/absl_cord_internal
+ contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/absl_strings_internal
+ contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_functions
+ contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_handle
+ contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_info
+ contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_sample_token
+ contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format
+ contrib/restricted/abseil-cpp-tstring/y_absl/synchronization
+ contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal
+ contrib/restricted/abseil-cpp-tstring/y_absl/time
+ contrib/restricted/abseil-cpp-tstring/y_absl/time/civil_time
+ contrib/restricted/abseil-cpp-tstring/y_absl/time/time_zone
+ contrib/restricted/abseil-cpp-tstring/y_absl/types
+ contrib/restricted/abseil-cpp-tstring/y_absl/types/bad_any_cast
+ contrib/restricted/abseil-cpp-tstring/y_absl/types/bad_optional_access
+ contrib/restricted/abseil-cpp-tstring/y_absl/types/bad_variant_access
+ contrib/restricted/abseil-cpp-tstring/y_absl/types/internal
+ contrib/restricted/abseil-cpp-tstring/y_absl/utility
+)
+
+NO_RUNTIME()
+
+END()
+
+RECURSE(
+ y_absl/algorithm
+ y_absl/base
+ y_absl/base/internal/low_level_alloc
+ y_absl/base/internal/raw_logging
+ y_absl/base/internal/scoped_set_env
+ y_absl/base/internal/spinlock_wait
+ y_absl/base/internal/strerror
+ y_absl/base/internal/throw_delegate
+ y_absl/base/log_severity
+ y_absl/city
+ y_absl/container
+ y_absl/container/internal/absl_hashtablez_sampler
+ y_absl/container/internal/raw_hash_set
+ y_absl/debugging
+ y_absl/debugging/failure_signal_handler
+ y_absl/debugging/internal
+ y_absl/debugging/leak_check
+ y_absl/debugging/leak_check_disable
+ y_absl/debugging/stacktrace
+ y_absl/debugging/symbolize
+ y_absl/demangle
+ y_absl/flags
+ y_absl/flags/commandlineflag
+ y_absl/flags/internal/commandlineflag
+ y_absl/flags/internal/flag
+ y_absl/flags/internal/private_handle_accessor
+ y_absl/flags/internal/program_name
+ y_absl/flags/internal/usage
+ y_absl/flags/marshalling
+ y_absl/flags/parse
+ y_absl/flags/reflection
+ y_absl/flags/usage
+ y_absl/flags/usage_config
+ y_absl/functional
+ y_absl/hash
+ y_absl/hash/internal
+ y_absl/memory
+ y_absl/meta
+ y_absl/numeric
+ y_absl/profiling/internal/exponential_biased
+ y_absl/profiling/internal/periodic_sampler
+ y_absl/random/absl_random_distributions
+ y_absl/random/internal/absl_random_internal_distribution_test_util
+ y_absl/random/internal/pool_urbg
+ y_absl/random/internal/randen
+ y_absl/random/internal/randen_detect
+ y_absl/random/internal/randen_hwaes
+ y_absl/random/internal/randen_round_keys
+ y_absl/random/internal/randen_slow
+ y_absl/random/internal/seed_material
+ y_absl/random/seed_gen_exception
+ y_absl/random/seed_sequences
+ y_absl/status
+ y_absl/status/statusor
+ y_absl/strings
+ y_absl/strings/cord
+ y_absl/strings/internal/absl_cord_internal
+ y_absl/strings/internal/absl_strings_internal
+ y_absl/strings/internal/cordz_functions
+ y_absl/strings/internal/cordz_handle
+ y_absl/strings/internal/cordz_info
+ y_absl/strings/internal/cordz_sample_token
+ y_absl/strings/internal/str_format
+ y_absl/synchronization
+ y_absl/synchronization/internal
+ y_absl/time
+ y_absl/time/civil_time
+ y_absl/time/time_zone
+ y_absl/types
+ y_absl/types/bad_any_cast
+ y_absl/types/bad_optional_access
+ y_absl/types/bad_variant_access
+ y_absl/types/internal
+ y_absl/utility
+)