summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorthegeorg <[email protected]>2025-06-18 00:36:34 +0300
committerthegeorg <[email protected]>2025-06-18 11:57:29 +0300
commit9d9c0304a08db64ad08584ae698127aef5d36c2a (patch)
tree0447e420e3ef180a285026b18fd719f53fb4aaa0
parent74b335270a63a46379d471b54fa509cce3ec9c1e (diff)
Update contrib/restricted/abseil-cpp to 20250512.0
Abseil now requires at least `C++17` and follows _Google's Foundational C++ Support Policy_. commit_hash:9209d4b467d10405bf5b082471dcd65c2546c6bc
-rw-r--r--contrib/restricted/abseil-cpp/.yandex_meta/__init__.py6
-rw-r--r--contrib/restricted/abseil-cpp/.yandex_meta/devtools.copyrights.report38
-rw-r--r--contrib/restricted/abseil-cpp/.yandex_meta/devtools.licenses.report45
-rw-r--r--contrib/restricted/abseil-cpp/.yandex_meta/licenses.list.txt9
-rw-r--r--contrib/restricted/abseil-cpp/.yandex_meta/override.nix4
-rw-r--r--contrib/restricted/abseil-cpp/README.md14
-rw-r--r--contrib/restricted/abseil-cpp/absl/algorithm/container.h21
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/attributes.h83
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/call_once.h23
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/config.h151
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/fast_type_id.h (renamed from contrib/restricted/abseil-cpp/absl/base/internal/fast_type_id.h)27
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/cycleclock.cc5
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/cycleclock_config.h14
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/endian.h72
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/inline_variable.h108
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/inline_variable_testing.h46
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/invoke.h241
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/iterator_traits.h71
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/iterator_traits_test_helper.h97
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/low_level_alloc.cc11
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/nullability_deprecated.h (renamed from contrib/restricted/abseil-cpp/absl/base/internal/nullability_impl.h)53
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/spinlock.cc9
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/spinlock.h16
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/unaligned_access.h12
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/unscaledcycleclock.h11
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/no_destructor.h43
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/nullability.h156
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/options.h83
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/policy_checks.h14
-rw-r--r--contrib/restricted/abseil-cpp/absl/cleanup/cleanup.h4
-rw-r--r--contrib/restricted/abseil-cpp/absl/cleanup/internal/cleanup.h7
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/btree_map.h6
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/btree_set.h6
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/fixed_array.h21
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/flat_hash_map.h5
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/flat_hash_set.h7
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/inlined_vector.h13
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/btree.h161
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/btree_container.h246
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/common.h43
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/common_policy_traits.h3
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/container_memory.h19
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/hash_function_defaults.h9
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/hash_generator_testing.h55
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/hash_policy_testing.h6
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/hash_policy_traits.h4
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/hashtable_control_bytes.h527
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler.cc24
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler.h43
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/inlined_vector.h9
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/layout.h68
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_map.h267
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set.cc1537
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set.h2305
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set_resize_impl.h80
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/node_hash_map.h5
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/node_hash_set.h5
-rw-r--r--contrib/restricted/abseil-cpp/absl/crc/crc32c.cc4
-rw-r--r--contrib/restricted/abseil-cpp/absl/crc/crc32c.h12
-rw-r--r--contrib/restricted/abseil-cpp/absl/crc/internal/crc32_x86_arm_combined_simd.h22
-rw-r--r--contrib/restricted/abseil-cpp/absl/crc/internal/crc_x86_arm_combined.cc119
-rw-r--r--contrib/restricted/abseil-cpp/absl/debugging/internal/addresses.h57
-rw-r--r--contrib/restricted/abseil-cpp/absl/debugging/internal/decode_rust_punycode.cc2
-rw-r--r--contrib/restricted/abseil-cpp/absl/debugging/internal/decode_rust_punycode.h10
-rw-r--r--contrib/restricted/abseil-cpp/absl/debugging/internal/demangle.cc43
-rw-r--r--contrib/restricted/abseil-cpp/absl/debugging/internal/demangle_rust.cc32
-rw-r--r--contrib/restricted/abseil-cpp/absl/debugging/internal/stacktrace_aarch64-inl.inc77
-rw-r--r--contrib/restricted/abseil-cpp/absl/debugging/internal/stacktrace_arm-inl.inc23
-rw-r--r--contrib/restricted/abseil-cpp/absl/debugging/internal/stacktrace_emscripten-inl.inc19
-rw-r--r--contrib/restricted/abseil-cpp/absl/debugging/internal/stacktrace_generic-inl.inc14
-rw-r--r--contrib/restricted/abseil-cpp/absl/debugging/internal/stacktrace_powerpc-inl.inc43
-rw-r--r--contrib/restricted/abseil-cpp/absl/debugging/internal/stacktrace_riscv-inl.inc17
-rw-r--r--contrib/restricted/abseil-cpp/absl/debugging/internal/stacktrace_unimplemented-inl.inc7
-rw-r--r--contrib/restricted/abseil-cpp/absl/debugging/internal/stacktrace_win32-inl.inc43
-rw-r--r--contrib/restricted/abseil-cpp/absl/debugging/internal/stacktrace_x86-inl.inc28
-rw-r--r--contrib/restricted/abseil-cpp/absl/debugging/stacktrace.cc171
-rw-r--r--contrib/restricted/abseil-cpp/absl/debugging/stacktrace.h78
-rw-r--r--contrib/restricted/abseil-cpp/absl/debugging/symbolize_elf.inc28
-rw-r--r--contrib/restricted/abseil-cpp/absl/debugging/symbolize_emscripten.inc5
-rw-r--r--contrib/restricted/abseil-cpp/absl/debugging/symbolize_win32.inc31
-rw-r--r--contrib/restricted/abseil-cpp/absl/flags/commandlineflag.h4
-rw-r--r--contrib/restricted/abseil-cpp/absl/flags/flag.h7
-rw-r--r--contrib/restricted/abseil-cpp/absl/flags/internal/commandlineflag.h4
-rw-r--r--contrib/restricted/abseil-cpp/absl/flags/internal/flag.cc3
-rw-r--r--contrib/restricted/abseil-cpp/absl/flags/internal/flag.h13
-rw-r--r--contrib/restricted/abseil-cpp/absl/flags/internal/registry.h7
-rw-r--r--contrib/restricted/abseil-cpp/absl/flags/parse.cc12
-rw-r--r--contrib/restricted/abseil-cpp/absl/flags/reflection.cc5
-rw-r--r--contrib/restricted/abseil-cpp/absl/functional/any_invocable.h18
-rw-r--r--contrib/restricted/abseil-cpp/absl/functional/function_ref.h11
-rw-r--r--contrib/restricted/abseil-cpp/absl/functional/internal/any_invocable.h336
-rw-r--r--contrib/restricted/abseil-cpp/absl/functional/internal/front_binder.h22
-rw-r--r--contrib/restricted/abseil-cpp/absl/functional/internal/function_ref.h7
-rw-r--r--contrib/restricted/abseil-cpp/absl/functional/overload.h20
-rw-r--r--contrib/restricted/abseil-cpp/absl/hash/hash.h18
-rw-r--r--contrib/restricted/abseil-cpp/absl/hash/hash_testing.h40
-rw-r--r--contrib/restricted/abseil-cpp/absl/hash/internal/hash.cc6
-rw-r--r--contrib/restricted/abseil-cpp/absl/hash/internal/hash.h147
-rw-r--r--contrib/restricted/abseil-cpp/absl/hash/internal/low_level_hash.cc93
-rw-r--r--contrib/restricted/abseil-cpp/absl/hash/internal/low_level_hash.h8
-rw-r--r--contrib/restricted/abseil-cpp/absl/hash/internal/spy_hash_state.h7
-rw-r--r--contrib/restricted/abseil-cpp/absl/hash/internal/weakly_mixed_integer.h38
-rw-r--r--contrib/restricted/abseil-cpp/absl/log/check.h3
-rw-r--r--contrib/restricted/abseil-cpp/absl/log/die_if_null.h4
-rw-r--r--contrib/restricted/abseil-cpp/absl/log/globals.h9
-rw-r--r--contrib/restricted/abseil-cpp/absl/log/internal/append_truncated.h28
-rw-r--r--contrib/restricted/abseil-cpp/absl/log/internal/check_op.cc44
-rw-r--r--contrib/restricted/abseil-cpp/absl/log/internal/check_op.h127
-rw-r--r--contrib/restricted/abseil-cpp/absl/log/internal/conditions.cc8
-rw-r--r--contrib/restricted/abseil-cpp/absl/log/internal/conditions.h9
-rw-r--r--contrib/restricted/abseil-cpp/absl/log/internal/log_message.cc128
-rw-r--r--contrib/restricted/abseil-cpp/absl/log/internal/log_message.h143
-rw-r--r--contrib/restricted/abseil-cpp/absl/log/internal/nullstream.h1
-rw-r--r--contrib/restricted/abseil-cpp/absl/log/internal/proto.cc5
-rw-r--r--contrib/restricted/abseil-cpp/absl/log/internal/proto.h6
-rw-r--r--contrib/restricted/abseil-cpp/absl/log/internal/strip.h16
-rw-r--r--contrib/restricted/abseil-cpp/absl/log/internal/structured.h10
-rw-r--r--contrib/restricted/abseil-cpp/absl/log/internal/vlog_config.h14
-rw-r--r--contrib/restricted/abseil-cpp/absl/log/internal/voidify.h14
-rw-r--r--contrib/restricted/abseil-cpp/absl/log/log.h83
-rw-r--r--contrib/restricted/abseil-cpp/absl/log/log_entry.cc41
-rw-r--r--contrib/restricted/abseil-cpp/absl/log/log_sink_registry.h4
-rw-r--r--contrib/restricted/abseil-cpp/absl/meta/type_traits.h221
-rw-r--r--contrib/restricted/abseil-cpp/absl/numeric/bits.h70
-rw-r--r--contrib/restricted/abseil-cpp/absl/numeric/int128.cc52
-rw-r--r--contrib/restricted/abseil-cpp/absl/numeric/internal/bits.h10
-rw-r--r--contrib/restricted/abseil-cpp/absl/profiling/internal/exponential_biased.cc2
-rw-r--r--contrib/restricted/abseil-cpp/absl/random/bit_gen_ref.h21
-rw-r--r--contrib/restricted/abseil-cpp/absl/random/distributions.h14
-rw-r--r--contrib/restricted/abseil-cpp/absl/random/gaussian_distribution.h2
-rw-r--r--contrib/restricted/abseil-cpp/absl/random/internal/distribution_caller.h11
-rw-r--r--contrib/restricted/abseil-cpp/absl/random/internal/entropy_pool.cc (renamed from contrib/restricted/abseil-cpp/absl/random/internal/pool_urbg.cc)112
-rw-r--r--contrib/restricted/abseil-cpp/absl/random/internal/entropy_pool.h (renamed from contrib/restricted/abseil-cpp/absl/strings/cord_buffer.cc)19
-rw-r--r--contrib/restricted/abseil-cpp/absl/random/internal/mock_helpers.h13
-rw-r--r--contrib/restricted/abseil-cpp/absl/random/internal/nonsecure_base.h11
-rw-r--r--contrib/restricted/abseil-cpp/absl/random/internal/pool_urbg.h131
-rw-r--r--contrib/restricted/abseil-cpp/absl/random/internal/randen_detect.cc2
-rw-r--r--contrib/restricted/abseil-cpp/absl/random/internal/seed_material.cc32
-rw-r--r--contrib/restricted/abseil-cpp/absl/random/internal/seed_material.h10
-rw-r--r--contrib/restricted/abseil-cpp/absl/random/mocking_bit_gen.h18
-rw-r--r--contrib/restricted/abseil-cpp/absl/random/random.h141
-rw-r--r--contrib/restricted/abseil-cpp/absl/random/seed_sequences.cc8
-rw-r--r--contrib/restricted/abseil-cpp/absl/status/internal/status_internal.cc7
-rw-r--r--contrib/restricted/abseil-cpp/absl/status/internal/status_internal.h7
-rw-r--r--contrib/restricted/abseil-cpp/absl/status/internal/statusor_internal.h7
-rw-r--r--contrib/restricted/abseil-cpp/absl/status/status.cc12
-rw-r--r--contrib/restricted/abseil-cpp/absl/status/status.h16
-rw-r--r--contrib/restricted/abseil-cpp/absl/status/status_payload_printer.h4
-rw-r--r--contrib/restricted/abseil-cpp/absl/status/statusor.cc4
-rw-r--r--contrib/restricted/abseil-cpp/absl/status/statusor.h12
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/ascii.cc18
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/ascii.h36
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/charconv.cc43
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/charconv.h10
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/cord.cc112
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/cord.h177
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/cord_analysis.cc22
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/cord_analysis.h6
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/cordz_test_helpers.h9
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/escaping.cc279
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/escaping.h19
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/internal/charconv_bigint.cc2
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/internal/cord_internal.h14
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/internal/cord_rep_btree.cc4
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/internal/cordz_info.cc4
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/internal/str_format/arg.cc70
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/internal/str_format/arg.h12
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/internal/str_format/extension.cc22
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/internal/str_format/output.cc8
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/internal/str_format/parser.h6
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/internal/str_join_internal.h6
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/internal/string_constant.h5
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/internal/utf8.cc97
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/internal/utf8.h16
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/numbers.cc85
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/numbers.h145
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/str_cat.cc13
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/str_cat.h64
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/str_format.h36
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/str_replace.cc6
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/str_replace.h12
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/string_view.cc13
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/string_view.h59
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/strip.h8
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/substitute.cc9
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/substitute.h130
-rw-r--r--contrib/restricted/abseil-cpp/absl/synchronization/internal/futex_waiter.cc4
-rw-r--r--contrib/restricted/abseil-cpp/absl/synchronization/internal/kernel_timeout.cc5
-rw-r--r--contrib/restricted/abseil-cpp/absl/synchronization/internal/pthread_waiter.cc4
-rw-r--r--contrib/restricted/abseil-cpp/absl/synchronization/internal/sem_waiter.cc4
-rw-r--r--contrib/restricted/abseil-cpp/absl/synchronization/internal/stdcpp_waiter.cc4
-rw-r--r--contrib/restricted/abseil-cpp/absl/synchronization/internal/waiter_base.cc4
-rw-r--r--contrib/restricted/abseil-cpp/absl/synchronization/internal/win32_waiter.cc4
-rw-r--r--contrib/restricted/abseil-cpp/absl/synchronization/mutex.cc2
-rw-r--r--contrib/restricted/abseil-cpp/absl/synchronization/mutex.h166
-rw-r--r--contrib/restricted/abseil-cpp/absl/synchronization/notification.h2
-rw-r--r--contrib/restricted/abseil-cpp/absl/time/civil_time.cc1
-rw-r--r--contrib/restricted/abseil-cpp/absl/time/duration.cc19
-rw-r--r--contrib/restricted/abseil-cpp/absl/time/internal/cctz/include/cctz/time_zone.h2
-rw-r--r--contrib/restricted/abseil-cpp/absl/time/internal/cctz/src/time_zone_lookup.cc182
-rw-r--r--contrib/restricted/abseil-cpp/absl/time/time.h26
-rw-r--r--contrib/restricted/abseil-cpp/absl/types/any.h493
-rw-r--r--contrib/restricted/abseil-cpp/absl/types/bad_any_cast.cc64
-rw-r--r--contrib/restricted/abseil-cpp/absl/types/bad_any_cast.h75
-rw-r--r--contrib/restricted/abseil-cpp/absl/types/bad_optional_access.cc66
-rw-r--r--contrib/restricted/abseil-cpp/absl/types/bad_optional_access.h78
-rw-r--r--contrib/restricted/abseil-cpp/absl/types/bad_variant_access.cc82
-rw-r--r--contrib/restricted/abseil-cpp/absl/types/bad_variant_access.h82
-rw-r--r--contrib/restricted/abseil-cpp/absl/types/internal/optional.h352
-rw-r--r--contrib/restricted/abseil-cpp/absl/types/internal/variant.h1622
-rw-r--r--contrib/restricted/abseil-cpp/absl/types/optional.h754
-rw-r--r--contrib/restricted/abseil-cpp/absl/types/span.h24
-rw-r--r--contrib/restricted/abseil-cpp/absl/types/variant.h789
-rw-r--r--contrib/restricted/abseil-cpp/absl/utility/internal/if_constexpr.h70
-rw-r--r--contrib/restricted/abseil-cpp/absl/utility/utility.h195
-rw-r--r--contrib/restricted/abseil-cpp/patches/no-icu-windows.patch34
-rw-r--r--contrib/restricted/abseil-cpp/ya.make11
217 files changed, 6862 insertions, 10280 deletions
diff --git a/contrib/restricted/abseil-cpp/.yandex_meta/__init__.py b/contrib/restricted/abseil-cpp/.yandex_meta/__init__.py
index 81483ed1998..e99812c41e8 100644
--- a/contrib/restricted/abseil-cpp/.yandex_meta/__init__.py
+++ b/contrib/restricted/abseil-cpp/.yandex_meta/__init__.py
@@ -54,9 +54,6 @@ abseil_cpp = CMakeNinjaNixProject(
},
put_with={
"absl_base": [
- "absl_bad_any_cast_impl",
- "absl_bad_optional_access",
- "absl_bad_variant_access",
"absl_city",
"absl_civil_time",
"absl_cord",
@@ -94,7 +91,6 @@ abseil_cpp = CMakeNinjaNixProject(
"absl_int128",
"absl_kernel_timeout_internal",
"absl_leak_check",
- "absl_log_entry",
"absl_log_flags",
"absl_log_globals",
"absl_log_initialize",
@@ -116,8 +112,8 @@ abseil_cpp = CMakeNinjaNixProject(
"absl_poison",
"absl_random_distributions",
"absl_random_internal_distribution_test_util",
+ "absl_random_internal_entropy_pool",
"absl_random_internal_platform",
- "absl_random_internal_pool_urbg",
"absl_random_internal_randen",
"absl_random_internal_randen_hwaes",
"absl_random_internal_randen_hwaes_impl",
diff --git a/contrib/restricted/abseil-cpp/.yandex_meta/devtools.copyrights.report b/contrib/restricted/abseil-cpp/.yandex_meta/devtools.copyrights.report
index 371eb874782..f5785ffdeb7 100644
--- a/contrib/restricted/abseil-cpp/.yandex_meta/devtools.copyrights.report
+++ b/contrib/restricted/abseil-cpp/.yandex_meta/devtools.copyrights.report
@@ -57,9 +57,6 @@ BELONGS ya.make
absl/base/internal/exception_safety_testing.h [1:1]
absl/base/internal/exception_testing.h [1:1]
absl/base/internal/identity.h [1:1]
- absl/base/internal/inline_variable.h [1:1]
- absl/base/internal/inline_variable_testing.h [1:1]
- absl/base/internal/invoke.h [1:1]
absl/base/internal/low_level_alloc.cc [1:1]
absl/base/internal/low_level_alloc.h [1:1]
absl/base/internal/low_level_scheduling.h [1:1]
@@ -125,6 +122,8 @@ BELONGS ya.make
absl/random/internal/chi_square.h [1:1]
absl/random/internal/distribution_test_util.cc [1:1]
absl/random/internal/distribution_test_util.h [1:1]
+ absl/random/internal/entropy_pool.cc [1:1]
+ absl/random/internal/entropy_pool.h [1:1]
absl/random/internal/explicit_seed_seq.h [1:1]
absl/random/internal/fast_uniform_bits.h [1:1]
absl/random/internal/fastmath.h [1:1]
@@ -132,8 +131,6 @@ BELONGS ya.make
absl/random/internal/iostream_state_saver.h [1:1]
absl/random/internal/nonsecure_base.h [1:1]
absl/random/internal/platform.h [1:1]
- absl/random/internal/pool_urbg.cc [1:1]
- absl/random/internal/pool_urbg.h [1:1]
absl/random/internal/randen.cc [1:1]
absl/random/internal/randen.h [1:1]
absl/random/internal/randen_detect.cc [1:1]
@@ -222,10 +219,6 @@ BELONGS ya.make
absl/time/time.cc [1:1]
absl/time/time.h [1:1]
absl/types/any.h [2:2]
- absl/types/bad_any_cast.cc [1:1]
- absl/types/bad_optional_access.cc [1:1]
- absl/types/bad_variant_access.cc [1:1]
- absl/types/internal/optional.h [1:1]
absl/types/optional.h [1:1]
absl/types/span.h [2:2]
absl/utility/utility.h [1:1]
@@ -269,7 +262,7 @@ BELONGS ya.make
Score : 100.00
Match type : COPYRIGHT
Files with this license:
- absl/base/internal/nullability_impl.h [1:1]
+ absl/base/internal/nullability_deprecated.h [1:1]
absl/base/no_destructor.h [1:1]
absl/base/nullability.h [1:1]
absl/base/prefetch.h [1:1]
@@ -293,7 +286,6 @@ BELONGS ya.make
absl/synchronization/internal/waiter_base.h [1:1]
absl/synchronization/internal/win32_waiter.cc [1:1]
absl/synchronization/internal/win32_waiter.h [1:1]
- absl/utility/internal/if_constexpr.h [1:1]
KEEP COPYRIGHT_SERVICE_LABEL 2277624a2da390a98ec17138cb6dc2a5
BELONGS ya.make
@@ -445,11 +437,7 @@ BELONGS ya.make
absl/time/civil_time.cc [1:1]
absl/time/civil_time.h [1:1]
absl/time/internal/get_current_time_chrono.inc [1:1]
- absl/types/bad_any_cast.h [1:1]
- absl/types/bad_optional_access.h [1:1]
- absl/types/bad_variant_access.h [1:1]
absl/types/compare.h [1:1]
- absl/types/internal/variant.h [1:1]
absl/types/variant.h [1:1]
KEEP COPYRIGHT_SERVICE_LABEL 58e60221a225d38384f3c66b2400cc91
@@ -568,7 +556,6 @@ BELONGS ya.make
absl/log/internal/vlog_config.h [1:1]
absl/log/internal/voidify.h [1:1]
absl/log/log.h [1:1]
- absl/log/log_entry.cc [2:2]
absl/log/log_entry.h [1:1]
absl/log/log_sink.cc [1:1]
absl/log/log_sink.h [1:1]
@@ -578,7 +565,6 @@ BELONGS ya.make
absl/log/structured.h [1:1]
absl/log/vlog_is_on.h [1:1]
absl/strings/charset.h [1:1]
- absl/strings/cord_buffer.cc [1:1]
absl/strings/has_absl_stringify.h [1:1]
absl/strings/internal/cord_data_edge.h [1:1]
absl/strings/internal/damerau_levenshtein_distance.cc [1:1]
@@ -623,6 +609,22 @@ BELONGS ya.make
Files with this license:
absl/debugging/internal/demangle_rust.cc [38:40]
+KEEP COPYRIGHT_SERVICE_LABEL cf1ca39881e3a11851beebfeb11f8493
+BELONGS ya.make
+ License text:
+ // Copyright 2025 The Abseil Authors
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ absl/base/internal/iterator_traits.h [1:1]
+ absl/base/internal/iterator_traits_test_helper.h [1:1]
+ absl/container/internal/hashtable_control_bytes.h [1:1]
+ absl/container/internal/raw_hash_set_resize_impl.h [1:1]
+ absl/debugging/internal/addresses.h [1:1]
+ absl/hash/internal/weakly_mixed_integer.h [1:1]
+
KEEP COPYRIGHT_SERVICE_LABEL d34864d3c7c7a5ffae3d414344aa54a8
BELONGS ya.make
License text:
@@ -632,7 +634,7 @@ BELONGS ya.make
Score : 100.00
Match type : COPYRIGHT
Files with this license:
- absl/base/internal/fast_type_id.h [2:2]
+ absl/base/fast_type_id.h [1:1]
absl/base/internal/strerror.cc [1:1]
absl/base/internal/strerror.h [1:1]
absl/debugging/symbolize_darwin.inc [1:1]
diff --git a/contrib/restricted/abseil-cpp/.yandex_meta/devtools.licenses.report b/contrib/restricted/abseil-cpp/.yandex_meta/devtools.licenses.report
index 86486511c12..4b3dea2542a 100644
--- a/contrib/restricted/abseil-cpp/.yandex_meta/devtools.licenses.report
+++ b/contrib/restricted/abseil-cpp/.yandex_meta/devtools.licenses.report
@@ -39,6 +39,8 @@ BELONGS ya.make
Links : http://www.apache.org/licenses/, http://www.apache.org/licenses/LICENSE-2.0, https://spdx.org/licenses/Apache-2.0
Files with this license:
absl/base/internal/cycleclock_config.h [3:13]
+ absl/base/internal/iterator_traits.h [3:13]
+ absl/base/internal/iterator_traits_test_helper.h [3:13]
absl/base/internal/poison.cc [3:13]
absl/base/internal/poison.h [3:13]
absl/base/internal/tracing.cc [3:13]
@@ -46,6 +48,8 @@ BELONGS ya.make
absl/base/internal/unscaledcycleclock_config.h [3:13]
absl/base/prefetch.h [3:13]
absl/container/hash_container_defaults.h [3:13]
+ absl/container/internal/hashtable_control_bytes.h [3:13]
+ absl/container/internal/raw_hash_set_resize_impl.h [3:13]
absl/crc/crc32c.cc [3:13]
absl/crc/crc32c.h [3:13]
absl/crc/internal/cpu_detect.cc [3:13]
@@ -59,6 +63,7 @@ BELONGS ya.make
absl/crc/internal/crc_non_temporal_memcpy.cc [3:13]
absl/crc/internal/non_temporal_arm_intrinsics.h [3:13]
absl/crc/internal/non_temporal_memcpy.h [3:13]
+ absl/debugging/internal/addresses.h [3:13]
absl/debugging/internal/bounded_utf8_length_sequence.h [3:13]
absl/debugging/internal/decode_rust_punycode.cc [3:13]
absl/debugging/internal/decode_rust_punycode.h [3:13]
@@ -69,6 +74,7 @@ BELONGS ya.make
absl/debugging/internal/utf8_for_code_point.h [3:13]
absl/hash/internal/low_level_hash.cc [3:13]
absl/hash/internal/low_level_hash.h [3:13]
+ absl/hash/internal/weakly_mixed_integer.h [3:13]
absl/log/internal/append_truncated.h [3:13]
absl/log/internal/fnmatch.cc [3:13]
absl/log/internal/fnmatch.h [3:13]
@@ -88,7 +94,6 @@ BELONGS ya.make
absl/status/internal/status_matchers.h [3:13]
absl/strings/cord_analysis.cc [3:13]
absl/strings/cord_analysis.h [3:13]
- absl/strings/cord_buffer.cc [3:13]
absl/strings/cord_buffer.h [3:13]
absl/strings/cordz_test_helpers.h [3:13]
absl/strings/has_absl_stringify.h [3:13]
@@ -114,7 +119,6 @@ BELONGS ya.make
absl/strings/internal/stringify_sink.cc [3:13]
absl/strings/internal/stringify_sink.h [3:13]
absl/synchronization/internal/kernel_timeout.cc [3:13]
- absl/utility/internal/if_constexpr.h [3:13]
KEEP Apache-2.0 0f66a26c8211d9f8c21369fcb6702370
BELONGS ya.make
@@ -158,7 +162,7 @@ BELONGS ya.make
Match type : NOTICE
Links : http://opensource.org/licenses/mit-license.php, https://spdx.org/licenses/MIT
Files with this license:
- README.md [145:148]
+ README.md [143:146]
KEEP Public-Domain 3a682fe6def1cddc889298ee2a043f6f
BELONGS ya.make
@@ -189,6 +193,7 @@ BELONGS ya.make
absl/base/config.h [4:14]
absl/base/const_init.h [3:13]
absl/base/dynamic_annotations.h [3:13]
+ absl/base/fast_type_id.h [3:13]
absl/base/internal/atomic_hook.h [3:13]
absl/base/internal/atomic_hook_test_helper.h [3:13]
absl/base/internal/cycleclock.cc [3:13]
@@ -199,16 +204,12 @@ BELONGS ya.make
absl/base/internal/errno_saver.h [3:13]
absl/base/internal/exception_safety_testing.h [3:13]
absl/base/internal/exception_testing.h [3:13]
- absl/base/internal/fast_type_id.h [4:14]
absl/base/internal/hide_ptr.h [3:13]
absl/base/internal/identity.h [3:13]
- absl/base/internal/inline_variable.h [3:13]
- absl/base/internal/inline_variable_testing.h [3:13]
- absl/base/internal/invoke.h [3:13]
absl/base/internal/low_level_alloc.cc [3:13]
absl/base/internal/low_level_alloc.h [3:13]
absl/base/internal/low_level_scheduling.h [3:13]
- absl/base/internal/nullability_impl.h [3:13]
+ absl/base/internal/nullability_deprecated.h [3:13]
absl/base/internal/per_thread_tls.h [3:13]
absl/base/internal/pretty_function.h [3:13]
absl/base/internal/raw_logging.cc [3:13]
@@ -409,7 +410,6 @@ BELONGS ya.make
absl/log/internal/test_matchers.h [3:13]
absl/log/internal/voidify.h [3:13]
absl/log/log.h [3:13]
- absl/log/log_entry.cc [4:14]
absl/log/log_entry.h [3:13]
absl/log/log_sink.h [3:13]
absl/log/log_sink_registry.h [3:13]
@@ -439,6 +439,8 @@ BELONGS ya.make
absl/random/internal/distribution_caller.h [4:14]
absl/random/internal/distribution_test_util.cc [3:13]
absl/random/internal/distribution_test_util.h [3:13]
+ absl/random/internal/entropy_pool.cc [3:13]
+ absl/random/internal/entropy_pool.h [3:13]
absl/random/internal/explicit_seed_seq.h [3:13]
absl/random/internal/fast_uniform_bits.h [3:13]
absl/random/internal/fastmath.h [3:13]
@@ -449,8 +451,6 @@ BELONGS ya.make
absl/random/internal/nonsecure_base.h [3:13]
absl/random/internal/pcg_engine.h [3:13]
absl/random/internal/platform.h [3:13]
- absl/random/internal/pool_urbg.cc [3:13]
- absl/random/internal/pool_urbg.h [3:13]
absl/random/internal/randen.cc [3:13]
absl/random/internal/randen.h [3:13]
absl/random/internal/randen_detect.cc [3:13]
@@ -600,21 +600,26 @@ BELONGS ya.make
absl/time/time.cc [3:13]
absl/time/time.h [3:13]
absl/types/any.h [4:14]
- absl/types/bad_any_cast.cc [3:13]
- absl/types/bad_any_cast.h [3:13]
- absl/types/bad_optional_access.cc [3:13]
- absl/types/bad_optional_access.h [3:13]
- absl/types/bad_variant_access.cc [3:13]
- absl/types/bad_variant_access.h [3:13]
absl/types/compare.h [3:13]
- absl/types/internal/optional.h [3:13]
absl/types/internal/span.h [4:14]
- absl/types/internal/variant.h [3:13]
absl/types/optional.h [3:13]
absl/types/span.h [4:14]
absl/types/variant.h [3:13]
absl/utility/utility.h [3:13]
+KEEP Apache-2.0 504cec32e5f5e837a345cb36cdffbdeb
+BELONGS ya.make
+ License text:
+ The Abseil C++ library is licensed under the terms of the Apache
+ license. See [LICENSE](LICENSE) for more information.
+ Scancode info:
+ Original SPDX id: Apache-2.0
+ Score : 90.00
+ Match type : NOTICE
+ Links : http://www.apache.org/licenses/, http://www.apache.org/licenses/LICENSE-2.0, https://spdx.org/licenses/Apache-2.0
+ Files with this license:
+ README.md [145:146]
+
SKIP LicenseRef-scancode-warranty-disclaimer 5ba761db85e57267704f71a6bcf20c2a
BELONGS ya.make
License text:
@@ -650,7 +655,7 @@ BELONGS ya.make
Match type : REFERENCE
Links : https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/unknown-license-reference.LICENSE
Files with this license:
- README.md [148:148]
+ README.md [146:146]
SKIP LicenseRef-scancode-generic-exception 99cf00730bf3973359b67cfa5b7ac051
BELONGS ya.make
diff --git a/contrib/restricted/abseil-cpp/.yandex_meta/licenses.list.txt b/contrib/restricted/abseil-cpp/.yandex_meta/licenses.list.txt
index 48f71f2ff0b..ea402851990 100644
--- a/contrib/restricted/abseil-cpp/.yandex_meta/licenses.list.txt
+++ b/contrib/restricted/abseil-cpp/.yandex_meta/licenses.list.txt
@@ -265,6 +265,11 @@ license. See [LICENSE](LICENSE) for more information.
// limitations under the License.
+====================Apache-2.0====================
+The Abseil C++ library is licensed under the terms of the Apache
+license. See [LICENSE](LICENSE) for more information.
+
+
====================COPYRIGHT====================
// Copyright 2016 Google Inc. All Rights Reserved.
@@ -306,6 +311,10 @@ license. See [LICENSE](LICENSE) for more information.
====================COPYRIGHT====================
+// Copyright 2025 The Abseil Authors
+
+
+====================COPYRIGHT====================
bool IsAlpha(char c) { return IsLower(c) || IsUpper(c); }
bool IsIdentifierChar(char c) { return IsAlpha(c) || IsDigit(c) || c == '_'; }
bool IsLowerHexDigit(char c) { return IsDigit(c) || ('a' <= c && c <= 'f'); }
diff --git a/contrib/restricted/abseil-cpp/.yandex_meta/override.nix b/contrib/restricted/abseil-cpp/.yandex_meta/override.nix
index d68c09faac8..646caf14599 100644
--- a/contrib/restricted/abseil-cpp/.yandex_meta/override.nix
+++ b/contrib/restricted/abseil-cpp/.yandex_meta/override.nix
@@ -1,11 +1,11 @@
self: super: with self; rec {
- version = "20250127.1";
+ version = "20250512.0";
src = fetchFromGitHub {
owner = "abseil";
repo = "abseil-cpp";
rev = version;
- hash = "sha256-QTywqQCkyGFpdbtDBvUwz9bGXxbJs/qoFKF6zYAZUmQ=";
+ hash = "sha256-Tuw1Py+LQdXS+bizXsduPjjEU5YIAVFvL+iJ+w8JoSU=";
};
patches = [];
diff --git a/contrib/restricted/abseil-cpp/README.md b/contrib/restricted/abseil-cpp/README.md
index f834fcdec06..c2c851ec35b 100644
--- a/contrib/restricted/abseil-cpp/README.md
+++ b/contrib/restricted/abseil-cpp/README.md
@@ -1,7 +1,7 @@
# Abseil - C++ Common Libraries
The repository contains the Abseil C++ library code. Abseil is an open-source
-collection of C++ code (compliant to C++14) designed to augment the C++
+collection of C++ code (compliant to C++17) designed to augment the C++
standard library.
## Table of Contents
@@ -99,8 +99,8 @@ Abseil contains the following C++ library components:
<br /> The `memory` library contains memory management facilities that augment
C++'s `<memory>` library.
* [`meta`](absl/meta/)
- <br /> The `meta` library contains compatible versions of type checks
- available within C++14 and C++17 versions of the C++ `<type_traits>` library.
+ <br /> The `meta` library contains type checks
+ similar to those available in the C++ `<type_traits>` library.
* [`numeric`](absl/numeric/)
<br /> The `numeric` library contains 128-bit integer types as well as
implementations of C++20's bitwise math functions.
@@ -108,15 +108,14 @@ Abseil contains the following C++ library components:
<br /> The `profiling` library contains utility code for profiling C++
entities. It is currently a private dependency of other Abseil libraries.
* [`random`](absl/random/)
- <br /> The `random` library contains functions for generating psuedorandom
+ <br /> The `random` library contains functions for generating pseudorandom
values.
* [`status`](absl/status/)
<br /> The `status` library contains abstractions for error handling,
specifically `absl::Status` and `absl::StatusOr<T>`.
* [`strings`](absl/strings/)
<br /> The `strings` library contains a variety of strings routines and
- utilities, including a C++14-compatible version of the C++17
- `std::string_view` type.
+ utilities.
* [`synchronization`](absl/synchronization/)
<br /> The `synchronization` library contains concurrency primitives (Abseil's
`absl::Mutex` class, an alternative to `std::mutex`) and a variety of
@@ -126,8 +125,7 @@ Abseil contains the following C++ library components:
points in time, durations of time, and formatting and parsing time within
time zones.
* [`types`](absl/types/)
- <br /> The `types` library contains non-container utility types, like a
- C++14-compatible version of the C++17 `std::optional` type.
+ <br /> The `types` library contains non-container utility types.
* [`utility`](absl/utility/)
<br /> The `utility` library contains utility and helper code.
diff --git a/contrib/restricted/abseil-cpp/absl/algorithm/container.h b/contrib/restricted/abseil-cpp/absl/algorithm/container.h
index 3193656f889..6f9c1938fa8 100644
--- a/contrib/restricted/abseil-cpp/absl/algorithm/container.h
+++ b/contrib/restricted/abseil-cpp/absl/algorithm/container.h
@@ -44,7 +44,6 @@
#include <cassert>
#include <iterator>
#include <numeric>
-#include <random>
#include <type_traits>
#include <unordered_map>
#include <unordered_set>
@@ -76,8 +75,8 @@ using ContainerIter = decltype(begin(std::declval<C&>()));
// An MSVC bug involving template parameter substitution requires us to use
// decltype() here instead of just std::pair.
template <typename C1, typename C2>
-using ContainerIterPairType =
- decltype(std::make_pair(ContainerIter<C1>(), ContainerIter<C2>()));
+using ContainerIterPairType = decltype(std::make_pair(
+ std::declval<ContainerIter<C1>>(), std::declval<ContainerIter<C2>>()));
template <typename C>
using ContainerDifferenceType = decltype(std::distance(
@@ -847,25 +846,9 @@ template <typename C, typename OutputIterator, typename Distance,
typename UniformRandomBitGenerator>
OutputIterator c_sample(const C& c, OutputIterator result, Distance n,
UniformRandomBitGenerator&& gen) {
-#if defined(__cpp_lib_sample) && __cpp_lib_sample >= 201603L
return std::sample(container_algorithm_internal::c_begin(c),
container_algorithm_internal::c_end(c), result, n,
std::forward<UniformRandomBitGenerator>(gen));
-#else
- // Fall back to a stable selection-sampling implementation.
- auto first = container_algorithm_internal::c_begin(c);
- Distance unsampled_elements = c_distance(c);
- n = (std::min)(n, unsampled_elements);
- for (; n != 0; ++first) {
- Distance r =
- std::uniform_int_distribution<Distance>(0, --unsampled_elements)(gen);
- if (r < n) {
- *result++ = *first;
- --n;
- }
- }
- return result;
-#endif
}
//------------------------------------------------------------------------------
diff --git a/contrib/restricted/abseil-cpp/absl/base/attributes.h b/contrib/restricted/abseil-cpp/absl/base/attributes.h
index 95b102e52bc..d009f6d4912 100644
--- a/contrib/restricted/abseil-cpp/absl/base/attributes.h
+++ b/contrib/restricted/abseil-cpp/absl/base/attributes.h
@@ -339,9 +339,9 @@
#ifndef ABSL_ATTRIBUTE_SECTION_VARIABLE
#ifdef _AIX
// __attribute__((section(#name))) on AIX is achieved by using the `.csect`
-// psudo op which includes an additional integer as part of its syntax indcating
-// alignment. If data fall under different alignments then you might get a
-// compilation error indicating a `Section type conflict`.
+// pseudo op which includes an additional integer as part of its syntax
+// indicating alignment. If data fall under different alignments then you might
+// get a compilation error indicating a `Section type conflict`.
#define ABSL_ATTRIBUTE_SECTION_VARIABLE(name)
#else
#define ABSL_ATTRIBUTE_SECTION_VARIABLE(name) __attribute__((section(#name)))
@@ -553,12 +553,11 @@
//
// Prevents the compiler from complaining about variables that appear unused.
//
-// For code or headers that are assured to only build with C++17 and up, prefer
-// just using the standard '[[maybe_unused]]' directly over this macro.
+// Deprecated: Use the standard C++17 `[[maybe_unused]` instead.
//
// Due to differences in positioning requirements between the old, compiler
-// specific __attribute__ syntax and the now standard [[maybe_unused]], this
-// macro does not attempt to take advantage of '[[maybe_unused]]'.
+// specific __attribute__ syntax and the now standard `[[maybe_unused]]`, this
+// macro does not attempt to take advantage of `[[maybe_unused]]`.
#if ABSL_HAVE_ATTRIBUTE(unused) || (defined(__GNUC__) && !defined(__clang__))
#undef ABSL_ATTRIBUTE_UNUSED
#define ABSL_ATTRIBUTE_UNUSED __attribute__((__unused__))
@@ -759,6 +758,76 @@
#define ABSL_CONST_INIT
#endif
+// ABSL_REQUIRE_EXPLICIT_INIT
+//
+// ABSL_REQUIRE_EXPLICIT_INIT is placed *after* the data members of an aggregate
+// type to indicate that the annotated member must be explicitly initialized by
+// the user whenever the aggregate is constructed. For example:
+//
+// struct Coord {
+// int x ABSL_REQUIRE_EXPLICIT_INIT;
+// int y ABSL_REQUIRE_EXPLICIT_INIT;
+// };
+// Coord coord = {1}; // warning: field 'y' is not explicitly initialized
+//
+// Note that usage on C arrays is not supported in C++.
+// Use a struct (such as std::array) to wrap the array member instead.
+//
+// Avoid applying this attribute to the members of non-aggregate types.
+// The behavior within non-aggregates is unspecified and subject to change.
+//
+// Do NOT attempt to suppress or demote the error generated by this attribute.
+// Just like with a missing function argument, it is a hard error by design.
+//
+// See the upstream documentation for more details:
+// https://clang.llvm.org/docs/AttributeReference.html#require-explicit-initialization
+#ifdef __cplusplus
+#if ABSL_HAVE_CPP_ATTRIBUTE(clang::require_explicit_initialization)
+// clang-format off
+#define ABSL_REQUIRE_EXPLICIT_INIT \
+ [[clang::require_explicit_initialization]] = \
+ AbslInternal_YouForgotToExplicitlyInitializeAField::v
+#else
+#define ABSL_REQUIRE_EXPLICIT_INIT \
+ = AbslInternal_YouForgotToExplicitlyInitializeAField::v
+#endif
+// clang-format on
+#else
+// clang-format off
+#if ABSL_HAVE_ATTRIBUTE(require_explicit_initialization)
+#define ABSL_REQUIRE_EXPLICIT_INIT \
+ __attribute__((require_explicit_initialization))
+#else
+#define ABSL_REQUIRE_EXPLICIT_INIT \
+ /* No portable fallback for C is available */
+#endif
+// clang-format on
+#endif
+
+#ifdef __cplusplus
+struct AbslInternal_YouForgotToExplicitlyInitializeAField {
+ // A portable version of [[clang::require_explicit_initialization]] that
+ // never builds, as a last resort for all toolchains.
+ // The error messages are poor, so we don't rely on this unless we have to.
+ template <class T>
+#if !defined(SWIG)
+ constexpr
+#endif
+ operator T() const /* NOLINT */ {
+ const void *volatile deliberately_volatile_ptr = nullptr;
+ // Infinite loop to prevent constexpr compilation
+ for (;;) {
+ // This assignment ensures the 'this' pointer is not optimized away, so
+ // that linking always fails.
+ deliberately_volatile_ptr = this; // Deliberately not constexpr
+ (void)deliberately_volatile_ptr;
+ }
+ }
+ // This is deliberately left undefined to prevent linking
+ static AbslInternal_YouForgotToExplicitlyInitializeAField v;
+};
+#endif
+
// ABSL_ATTRIBUTE_PURE_FUNCTION
//
// ABSL_ATTRIBUTE_PURE_FUNCTION is used to annotate declarations of "pure"
diff --git a/contrib/restricted/abseil-cpp/absl/base/call_once.h b/contrib/restricted/abseil-cpp/absl/base/call_once.h
index b666d36f0e4..7bfd91610e5 100644
--- a/contrib/restricted/abseil-cpp/absl/base/call_once.h
+++ b/contrib/restricted/abseil-cpp/absl/base/call_once.h
@@ -28,12 +28,12 @@
#include <algorithm>
#include <atomic>
#include <cstdint>
+#include <functional>
#include <type_traits>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/base/config.h"
-#include "absl/base/internal/invoke.h"
#include "absl/base/internal/low_level_scheduling.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/base/internal/scheduling_mode.h"
@@ -49,8 +49,8 @@ ABSL_NAMESPACE_BEGIN
class once_flag;
namespace base_internal {
-absl::Nonnull<std::atomic<uint32_t>*> ControlWord(
- absl::Nonnull<absl::once_flag*> flag);
+std::atomic<uint32_t>* absl_nonnull ControlWord(
+ absl::once_flag* absl_nonnull flag);
} // namespace base_internal
// call_once()
@@ -93,8 +93,8 @@ class once_flag {
once_flag& operator=(const once_flag&) = delete;
private:
- friend absl::Nonnull<std::atomic<uint32_t>*> base_internal::ControlWord(
- absl::Nonnull<once_flag*> flag);
+ friend std::atomic<uint32_t>* absl_nonnull base_internal::ControlWord(
+ once_flag* absl_nonnull flag);
std::atomic<uint32_t> control_;
};
@@ -108,7 +108,7 @@ namespace base_internal {
// Like call_once, but uses KERNEL_ONLY scheduling. Intended to be used to
// initialize entities used by the scheduler implementation.
template <typename Callable, typename... Args>
-void LowLevelCallOnce(absl::Nonnull<absl::once_flag*> flag, Callable&& fn,
+void LowLevelCallOnce(absl::once_flag* absl_nonnull flag, Callable&& fn,
Args&&... args);
// Disables scheduling while on stack when scheduling mode is non-cooperative.
@@ -150,7 +150,7 @@ enum {
template <typename Callable, typename... Args>
void
- CallOnceImpl(absl::Nonnull<std::atomic<uint32_t>*> control,
+ CallOnceImpl(std::atomic<uint32_t>* absl_nonnull control,
base_internal::SchedulingMode scheduling_mode, Callable&& fn,
Args&&... args) {
#ifndef NDEBUG
@@ -181,8 +181,7 @@ template <typename Callable, typename... Args>
std::memory_order_relaxed) ||
base_internal::SpinLockWait(control, ABSL_ARRAYSIZE(trans), trans,
scheduling_mode) == kOnceInit) {
- base_internal::invoke(std::forward<Callable>(fn),
- std::forward<Args>(args)...);
+ std::invoke(std::forward<Callable>(fn), std::forward<Args>(args)...);
old_control =
control->exchange(base_internal::kOnceDone, std::memory_order_release);
if (old_control == base_internal::kOnceWaiter) {
@@ -191,13 +190,13 @@ template <typename Callable, typename... Args>
} // else *control is already kOnceDone
}
-inline absl::Nonnull<std::atomic<uint32_t>*> ControlWord(
- absl::Nonnull<once_flag*> flag) {
+inline std::atomic<uint32_t>* absl_nonnull ControlWord(
+ once_flag* absl_nonnull flag) {
return &flag->control_;
}
template <typename Callable, typename... Args>
-void LowLevelCallOnce(absl::Nonnull<absl::once_flag*> flag, Callable&& fn,
+void LowLevelCallOnce(absl::once_flag* absl_nonnull flag, Callable&& fn,
Args&&... args) {
std::atomic<uint32_t>* once = base_internal::ControlWord(flag);
uint32_t s = once->load(std::memory_order_acquire);
diff --git a/contrib/restricted/abseil-cpp/absl/base/config.h b/contrib/restricted/abseil-cpp/absl/base/config.h
index 63b9642d793..9170a6f62e1 100644
--- a/contrib/restricted/abseil-cpp/absl/base/config.h
+++ b/contrib/restricted/abseil-cpp/absl/base/config.h
@@ -117,8 +117,8 @@
//
// LTS releases can be obtained from
// https://github.com/abseil/abseil-cpp/releases.
-#define ABSL_LTS_RELEASE_VERSION 20250127
-#define ABSL_LTS_RELEASE_PATCH_LEVEL 1
+#define ABSL_LTS_RELEASE_VERSION 20250512
+#define ABSL_LTS_RELEASE_PATCH_LEVEL 0
// Helper macro to convert a CPP variable to a string literal.
#define ABSL_INTERNAL_DO_TOKEN_STR(x) #x
@@ -274,15 +274,12 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#define ABSL_HAVE_STD_IS_TRIVIALLY_COPYABLE 1
#endif
-
// ABSL_HAVE_THREAD_LOCAL
//
-// DEPRECATED - `thread_local` is available on all supported platforms.
-// Checks whether C++11's `thread_local` storage duration specifier is
-// supported.
+// Checks whether the `thread_local` storage duration specifier is supported.
#ifdef ABSL_HAVE_THREAD_LOCAL
#error ABSL_HAVE_THREAD_LOCAL cannot be directly set
-#else
+#elif !defined(__XTENSA__)
#define ABSL_HAVE_THREAD_LOCAL 1
#endif
@@ -469,6 +466,9 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
//
// Checks the endianness of the platform.
//
+// Prefer using `std::endian` in C++20, or `absl::endian` from
+// absl/numeric/bits.h prior to C++20.
+//
// Notes: uses the built in endian macros provided by GCC (since 4.6) and
// Clang (since 3.2); see
// https://gcc.gnu.org/onlinedocs/cpp/Common-Predefined-Macros.html.
@@ -520,54 +520,22 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#define ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE 0
#endif
-// ABSL_HAVE_STD_ANY
-//
-// Checks whether C++17 std::any is available.
-#ifdef ABSL_HAVE_STD_ANY
-#error "ABSL_HAVE_STD_ANY cannot be directly set."
-#elif defined(__cpp_lib_any) && __cpp_lib_any >= 201606L
-#define ABSL_HAVE_STD_ANY 1
-#elif defined(ABSL_INTERNAL_CPLUSPLUS_LANG) && \
- ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L && \
- !ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE
+// Deprecated macros for polyfill detection.
#define ABSL_HAVE_STD_ANY 1
-#endif
-
-// ABSL_HAVE_STD_OPTIONAL
-//
-// Checks whether C++17 std::optional is available.
-#ifdef ABSL_HAVE_STD_OPTIONAL
-#error "ABSL_HAVE_STD_OPTIONAL cannot be directly set."
-#elif defined(__cpp_lib_optional) && __cpp_lib_optional >= 202106L
-#define ABSL_HAVE_STD_OPTIONAL 1
-#elif defined(ABSL_INTERNAL_CPLUSPLUS_LANG) && \
- ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L && \
- !ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE
+#define ABSL_USES_STD_ANY 1
#define ABSL_HAVE_STD_OPTIONAL 1
-#endif
-
-// ABSL_HAVE_STD_VARIANT
-//
-// Checks whether C++17 std::variant is available.
-#ifdef ABSL_HAVE_STD_VARIANT
-#error "ABSL_HAVE_STD_VARIANT cannot be directly set."
-#elif defined(__cpp_lib_variant) && __cpp_lib_variant >= 201606L
-#define ABSL_HAVE_STD_VARIANT 1
-#elif defined(ABSL_INTERNAL_CPLUSPLUS_LANG) && \
- ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L && \
- !ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE
+#define ABSL_USES_STD_OPTIONAL 1
#define ABSL_HAVE_STD_VARIANT 1
-#endif
+#define ABSL_USES_STD_VARIANT 1
// ABSL_HAVE_STD_STRING_VIEW
//
-// Checks whether C++17 std::string_view is available.
+// Deprecated: always defined to 1.
+// std::string_view was added in C++17, which means all versions of C++
+// supported by Abseil have it.
#ifdef ABSL_HAVE_STD_STRING_VIEW
#error "ABSL_HAVE_STD_STRING_VIEW cannot be directly set."
-#elif defined(__cpp_lib_string_view) && __cpp_lib_string_view >= 201606L
-#define ABSL_HAVE_STD_STRING_VIEW 1
-#elif defined(ABSL_INTERNAL_CPLUSPLUS_LANG) && \
- ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L
+#else
#define ABSL_HAVE_STD_STRING_VIEW 1
#endif
@@ -587,63 +555,15 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#define ABSL_HAVE_STD_ORDERING 1
#endif
-// ABSL_USES_STD_ANY
-//
-// Indicates whether absl::any is an alias for std::any.
-#if !defined(ABSL_OPTION_USE_STD_ANY)
-#error options.h is misconfigured.
-#elif ABSL_OPTION_USE_STD_ANY == 0 || \
- (ABSL_OPTION_USE_STD_ANY == 2 && !defined(ABSL_HAVE_STD_ANY))
-#undef ABSL_USES_STD_ANY
-#elif ABSL_OPTION_USE_STD_ANY == 1 || \
- (ABSL_OPTION_USE_STD_ANY == 2 && defined(ABSL_HAVE_STD_ANY))
-#define ABSL_USES_STD_ANY 1
-#else
-#error options.h is misconfigured.
-#endif
-
-// ABSL_USES_STD_OPTIONAL
-//
-// Indicates whether absl::optional is an alias for std::optional.
-#if !defined(ABSL_OPTION_USE_STD_OPTIONAL)
-#error options.h is misconfigured.
-#elif ABSL_OPTION_USE_STD_OPTIONAL == 0 || \
- (ABSL_OPTION_USE_STD_OPTIONAL == 2 && !defined(ABSL_HAVE_STD_OPTIONAL))
-#undef ABSL_USES_STD_OPTIONAL
-#elif ABSL_OPTION_USE_STD_OPTIONAL == 1 || \
- (ABSL_OPTION_USE_STD_OPTIONAL == 2 && defined(ABSL_HAVE_STD_OPTIONAL))
-#define ABSL_USES_STD_OPTIONAL 1
-#else
-#error options.h is misconfigured.
-#endif
-
-// ABSL_USES_STD_VARIANT
-//
-// Indicates whether absl::variant is an alias for std::variant.
-#if !defined(ABSL_OPTION_USE_STD_VARIANT)
-#error options.h is misconfigured.
-#elif ABSL_OPTION_USE_STD_VARIANT == 0 || \
- (ABSL_OPTION_USE_STD_VARIANT == 2 && !defined(ABSL_HAVE_STD_VARIANT))
-#undef ABSL_USES_STD_VARIANT
-#elif ABSL_OPTION_USE_STD_VARIANT == 1 || \
- (ABSL_OPTION_USE_STD_VARIANT == 2 && defined(ABSL_HAVE_STD_VARIANT))
-#define ABSL_USES_STD_VARIANT 1
-#else
-#error options.h is misconfigured.
-#endif
-
// ABSL_USES_STD_STRING_VIEW
//
// Indicates whether absl::string_view is an alias for std::string_view.
#if !defined(ABSL_OPTION_USE_STD_STRING_VIEW)
#error options.h is misconfigured.
-#elif ABSL_OPTION_USE_STD_STRING_VIEW == 0 || \
- (ABSL_OPTION_USE_STD_STRING_VIEW == 2 && \
- !defined(ABSL_HAVE_STD_STRING_VIEW))
+#elif ABSL_OPTION_USE_STD_STRING_VIEW == 0
#undef ABSL_USES_STD_STRING_VIEW
#elif ABSL_OPTION_USE_STD_STRING_VIEW == 1 || \
- (ABSL_OPTION_USE_STD_STRING_VIEW == 2 && \
- defined(ABSL_HAVE_STD_STRING_VIEW))
+ ABSL_OPTION_USE_STD_STRING_VIEW == 2
#define ABSL_USES_STD_STRING_VIEW 1
#else
#error options.h is misconfigured.
@@ -665,14 +585,6 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#error options.h is misconfigured.
#endif
-// In debug mode, MSVC 2017's std::variant throws a EXCEPTION_ACCESS_VIOLATION
-// SEH exception from emplace for variant<SomeStruct> when constructing the
-// struct can throw. This defeats some of variant_test and
-// variant_exception_safety_test.
-#if defined(_MSC_VER) && _MSC_VER >= 1700 && defined(_DEBUG)
-#define ABSL_INTERNAL_MSVC_2017_DBG_MODE
-#endif
-
// ABSL_INTERNAL_MANGLED_NS
// ABSL_INTERNAL_MANGLED_BACKREFERENCE
//
@@ -813,36 +725,15 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
// ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION
//
-// Class template argument deduction is a language feature added in C++17.
+// Deprecated: always defined to 1.
+// Class template argument deduction is a language feature added in C++17,
+// which means all versions of C++ supported by Abseil have it.
#ifdef ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION
#error "ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION cannot be directly set."
-#elif defined(__cpp_deduction_guides)
+#else
#define ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION 1
#endif
-// ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
-//
-// Prior to C++17, static constexpr variables defined in classes required a
-// separate definition outside of the class body, for example:
-//
-// class Foo {
-// static constexpr int kBar = 0;
-// };
-// constexpr int Foo::kBar;
-//
-// In C++17, these variables defined in classes are considered inline variables,
-// and the extra declaration is redundant. Since some compilers warn on the
-// extra declarations, ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL can be used
-// conditionally ignore them:
-//
-// #ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
-// constexpr int Foo::kBar;
-// #endif
-#if defined(ABSL_INTERNAL_CPLUSPLUS_LANG) && \
- ABSL_INTERNAL_CPLUSPLUS_LANG < 201703L
-#define ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL 1
-#endif
-
// `ABSL_INTERNAL_HAS_RTTI` determines whether abseil is being compiled with
// RTTI support.
#ifdef ABSL_INTERNAL_HAS_RTTI
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/fast_type_id.h b/contrib/restricted/abseil-cpp/absl/base/fast_type_id.h
index a547b3a8bc2..ff250276739 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/fast_type_id.h
+++ b/contrib/restricted/abseil-cpp/absl/base/fast_type_id.h
@@ -1,4 +1,3 @@
-//
// Copyright 2020 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,37 +13,33 @@
// limitations under the License.
//
-#ifndef ABSL_BASE_INTERNAL_FAST_TYPE_ID_H_
-#define ABSL_BASE_INTERNAL_FAST_TYPE_ID_H_
+#ifndef ABSL_BASE_FAST_TYPE_ID_H_
+#define ABSL_BASE_FAST_TYPE_ID_H_
#include "absl/base/config.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace base_internal {
+namespace base_internal {
template <typename Type>
struct FastTypeTag {
- constexpr static char dummy_var = 0;
+ static constexpr char kDummyVar = 0;
};
+} // namespace base_internal
-#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
-template <typename Type>
-constexpr char FastTypeTag<Type>::dummy_var;
-#endif
+// The type returned by `absl::FastTypeId<T>()`.
+using FastTypeIdType = const void*;
-// FastTypeId<Type>() evaluates at compile/link-time to a unique pointer for the
+// `absl::FastTypeId<Type>()` evaluates at compile-time to a unique id for the
// passed-in type. These are meant to be good match for keys into maps or
// straight up comparisons.
-using FastTypeIdType = const void*;
-
template <typename Type>
-constexpr inline FastTypeIdType FastTypeId() {
- return &FastTypeTag<Type>::dummy_var;
+constexpr FastTypeIdType FastTypeId() {
+ return &base_internal::FastTypeTag<Type>::kDummyVar;
}
-} // namespace base_internal
ABSL_NAMESPACE_END
} // namespace absl
-#endif // ABSL_BASE_INTERNAL_FAST_TYPE_ID_H_
+#endif // ABSL_BASE_FAST_TYPE_ID_H_
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/cycleclock.cc b/contrib/restricted/abseil-cpp/absl/base/internal/cycleclock.cc
index 902e3f5ef1b..99466015096 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/cycleclock.cc
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/cycleclock.cc
@@ -35,11 +35,6 @@ namespace base_internal {
#if ABSL_USE_UNSCALED_CYCLECLOCK
-#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
-constexpr int32_t CycleClock::kShift;
-constexpr double CycleClock::kFrequencyScale;
-#endif
-
ABSL_CONST_INIT std::atomic<CycleClockSourceFunc>
CycleClock::cycle_clock_source_{nullptr};
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/cycleclock_config.h b/contrib/restricted/abseil-cpp/absl/base/internal/cycleclock_config.h
index 191112b58ef..50a46978687 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/cycleclock_config.h
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/cycleclock_config.h
@@ -18,7 +18,6 @@
#include <cstdint>
#include "absl/base/config.h"
-#include "absl/base/internal/inline_variable.h"
#include "absl/base/internal/unscaledcycleclock_config.h"
namespace absl {
@@ -31,22 +30,23 @@ namespace base_internal {
// Not debug mode and the UnscaledCycleClock frequency is the CPU
// frequency. Scale the CycleClock to prevent overflow if someone
// tries to represent the time as cycles since the Unix epoch.
-ABSL_INTERNAL_INLINE_CONSTEXPR(int32_t, kCycleClockShift, 1);
+inline constexpr int32_t kCycleClockShift = 1;
#else
// Not debug mode and the UnscaledCycleClock isn't operating at the
// raw CPU frequency. There is no need to do any scaling, so don't
// needlessly sacrifice precision.
-ABSL_INTERNAL_INLINE_CONSTEXPR(int32_t, kCycleClockShift, 0);
+inline constexpr int32_t kCycleClockShift = 0;
#endif
#else // NDEBUG
// In debug mode use a different shift to discourage depending on a
// particular shift value.
-ABSL_INTERNAL_INLINE_CONSTEXPR(int32_t, kCycleClockShift, 2);
+inline constexpr int32_t kCycleClockShift = 2;
#endif // NDEBUG
-ABSL_INTERNAL_INLINE_CONSTEXPR(double, kCycleClockFrequencyScale,
- 1.0 / (1 << kCycleClockShift));
-#endif // ABSL_USE_UNSCALED_CYCLECLOC
+inline constexpr double kCycleClockFrequencyScale =
+ 1.0 / (1 << kCycleClockShift);
+
+#endif // ABSL_USE_UNSCALED_CYCLECLOCK
} // namespace base_internal
ABSL_NAMESPACE_END
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/endian.h b/contrib/restricted/abseil-cpp/absl/base/internal/endian.h
index 943f3d97e79..fb38f602470 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/endian.h
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/endian.h
@@ -12,6 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//
+// This file is for Abseil internal use only.
+// See //absl/numeric/bits.h for supported functions related to endian-ness.
#ifndef ABSL_BASE_INTERNAL_ENDIAN_H_
#define ABSL_BASE_INTERNAL_ENDIAN_H_
@@ -28,44 +30,38 @@
namespace absl {
ABSL_NAMESPACE_BEGIN
-inline uint64_t gbswap_64(uint64_t host_int) {
+constexpr uint64_t gbswap_64(uint64_t x) {
#if ABSL_HAVE_BUILTIN(__builtin_bswap64) || defined(__GNUC__)
- return __builtin_bswap64(host_int);
-#elif defined(_MSC_VER)
- return _byteswap_uint64(host_int);
+ return __builtin_bswap64(x);
#else
- return (((host_int & uint64_t{0xFF}) << 56) |
- ((host_int & uint64_t{0xFF00}) << 40) |
- ((host_int & uint64_t{0xFF0000}) << 24) |
- ((host_int & uint64_t{0xFF000000}) << 8) |
- ((host_int & uint64_t{0xFF00000000}) >> 8) |
- ((host_int & uint64_t{0xFF0000000000}) >> 24) |
- ((host_int & uint64_t{0xFF000000000000}) >> 40) |
- ((host_int & uint64_t{0xFF00000000000000}) >> 56));
+ return (((x & uint64_t{0xFF}) << 56) |
+ ((x & uint64_t{0xFF00}) << 40) |
+ ((x & uint64_t{0xFF0000}) << 24) |
+ ((x & uint64_t{0xFF000000}) << 8) |
+ ((x & uint64_t{0xFF00000000}) >> 8) |
+ ((x & uint64_t{0xFF0000000000}) >> 24) |
+ ((x & uint64_t{0xFF000000000000}) >> 40) |
+ ((x & uint64_t{0xFF00000000000000}) >> 56));
#endif
}
-inline uint32_t gbswap_32(uint32_t host_int) {
+constexpr uint32_t gbswap_32(uint32_t x) {
#if ABSL_HAVE_BUILTIN(__builtin_bswap32) || defined(__GNUC__)
- return __builtin_bswap32(host_int);
-#elif defined(_MSC_VER)
- return _byteswap_ulong(host_int);
+ return __builtin_bswap32(x);
#else
- return (((host_int & uint32_t{0xFF}) << 24) |
- ((host_int & uint32_t{0xFF00}) << 8) |
- ((host_int & uint32_t{0xFF0000}) >> 8) |
- ((host_int & uint32_t{0xFF000000}) >> 24));
+ return (((x & uint32_t{0xFF}) << 24) |
+ ((x & uint32_t{0xFF00}) << 8) |
+ ((x & uint32_t{0xFF0000}) >> 8) |
+ ((x & uint32_t{0xFF000000}) >> 24));
#endif
}
-inline uint16_t gbswap_16(uint16_t host_int) {
+constexpr uint16_t gbswap_16(uint16_t x) {
#if ABSL_HAVE_BUILTIN(__builtin_bswap16) || defined(__GNUC__)
- return __builtin_bswap16(host_int);
-#elif defined(_MSC_VER)
- return _byteswap_ushort(host_int);
+ return __builtin_bswap16(x);
#else
- return (((host_int & uint16_t{0xFF}) << 8) |
- ((host_int & uint16_t{0xFF00}) >> 8));
+ return (((x & uint16_t{0xFF}) << 8) |
+ ((x & uint16_t{0xFF00}) >> 8));
#endif
}
@@ -161,27 +157,27 @@ inline int64_t ToHost(int64_t x) {
}
// Functions to do unaligned loads and stores in little-endian order.
-inline uint16_t Load16(absl::Nonnull<const void *> p) {
+inline uint16_t Load16(const void* absl_nonnull p) {
return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p));
}
-inline void Store16(absl::Nonnull<void *> p, uint16_t v) {
+inline void Store16(void* absl_nonnull p, uint16_t v) {
ABSL_INTERNAL_UNALIGNED_STORE16(p, FromHost16(v));
}
-inline uint32_t Load32(absl::Nonnull<const void *> p) {
+inline uint32_t Load32(const void* absl_nonnull p) {
return ToHost32(ABSL_INTERNAL_UNALIGNED_LOAD32(p));
}
-inline void Store32(absl::Nonnull<void *> p, uint32_t v) {
+inline void Store32(void* absl_nonnull p, uint32_t v) {
ABSL_INTERNAL_UNALIGNED_STORE32(p, FromHost32(v));
}
-inline uint64_t Load64(absl::Nonnull<const void *> p) {
+inline uint64_t Load64(const void* absl_nonnull p) {
return ToHost64(ABSL_INTERNAL_UNALIGNED_LOAD64(p));
}
-inline void Store64(absl::Nonnull<void *> p, uint64_t v) {
+inline void Store64(void* absl_nonnull p, uint64_t v) {
ABSL_INTERNAL_UNALIGNED_STORE64(p, FromHost64(v));
}
@@ -251,27 +247,27 @@ inline int64_t ToHost(int64_t x) {
}
// Functions to do unaligned loads and stores in big-endian order.
-inline uint16_t Load16(absl::Nonnull<const void *> p) {
+inline uint16_t Load16(const void* absl_nonnull p) {
return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p));
}
-inline void Store16(absl::Nonnull<void *> p, uint16_t v) {
+inline void Store16(void* absl_nonnull p, uint16_t v) {
ABSL_INTERNAL_UNALIGNED_STORE16(p, FromHost16(v));
}
-inline uint32_t Load32(absl::Nonnull<const void *> p) {
+inline uint32_t Load32(const void* absl_nonnull p) {
return ToHost32(ABSL_INTERNAL_UNALIGNED_LOAD32(p));
}
-inline void Store32(absl::Nonnull<void *>p, uint32_t v) {
+inline void Store32(void* absl_nonnull p, uint32_t v) {
ABSL_INTERNAL_UNALIGNED_STORE32(p, FromHost32(v));
}
-inline uint64_t Load64(absl::Nonnull<const void *> p) {
+inline uint64_t Load64(const void* absl_nonnull p) {
return ToHost64(ABSL_INTERNAL_UNALIGNED_LOAD64(p));
}
-inline void Store64(absl::Nonnull<void *> p, uint64_t v) {
+inline void Store64(void* absl_nonnull p, uint64_t v) {
ABSL_INTERNAL_UNALIGNED_STORE64(p, FromHost64(v));
}
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/inline_variable.h b/contrib/restricted/abseil-cpp/absl/base/internal/inline_variable.h
deleted file mode 100644
index 09daf0f5b73..00000000000
--- a/contrib/restricted/abseil-cpp/absl/base/internal/inline_variable.h
+++ /dev/null
@@ -1,108 +0,0 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef ABSL_BASE_INTERNAL_INLINE_VARIABLE_H_
-#define ABSL_BASE_INTERNAL_INLINE_VARIABLE_H_
-
-#include <type_traits>
-
-#include "absl/base/internal/identity.h"
-
-// File:
-// This file define a macro that allows the creation of or emulation of C++17
-// inline variables based on whether or not the feature is supported.
-
-////////////////////////////////////////////////////////////////////////////////
-// Macro: ABSL_INTERNAL_INLINE_CONSTEXPR(type, name, init)
-//
-// Description:
-// Expands to the equivalent of an inline constexpr instance of the specified
-// `type` and `name`, initialized to the value `init`. If the compiler being
-// used is detected as supporting actual inline variables as a language
-// feature, then the macro expands to an actual inline variable definition.
-//
-// Requires:
-// `type` is a type that is usable in an extern variable declaration.
-//
-// Requires: `name` is a valid identifier
-//
-// Requires:
-// `init` is an expression that can be used in the following definition:
-// constexpr type name = init;
-//
-// Usage:
-//
-// // Equivalent to: `inline constexpr size_t variant_npos = -1;`
-// ABSL_INTERNAL_INLINE_CONSTEXPR(size_t, variant_npos, -1);
-//
-// Differences in implementation:
-// For a direct, language-level inline variable, decltype(name) will be the
-// type that was specified along with const qualification, whereas for
-// emulated inline variables, decltype(name) may be different (in practice
-// it will likely be a reference type).
-////////////////////////////////////////////////////////////////////////////////
-
-#ifdef __cpp_inline_variables
-
-// Clang's -Wmissing-variable-declarations option erroneously warned that
-// inline constexpr objects need to be pre-declared. This has now been fixed,
-// but we will need to support this workaround for people building with older
-// versions of clang.
-//
-// Bug: https://bugs.llvm.org/show_bug.cgi?id=35862
-//
-// Note:
-// type_identity_t is used here so that the const and name are in the
-// appropriate place for pointer types, reference types, function pointer
-// types, etc..
-#if defined(__clang__)
-#define ABSL_INTERNAL_EXTERN_DECL(type, name) \
- extern const ::absl::internal::type_identity_t<type> name;
-#else // Otherwise, just define the macro to do nothing.
-#define ABSL_INTERNAL_EXTERN_DECL(type, name)
-#endif // defined(__clang__)
-
-// See above comment at top of file for details.
-#define ABSL_INTERNAL_INLINE_CONSTEXPR(type, name, init) \
- ABSL_INTERNAL_EXTERN_DECL(type, name) \
- inline constexpr ::absl::internal::type_identity_t<type> name = init
-
-#else
-
-// See above comment at top of file for details.
-//
-// Note:
-// type_identity_t is used here so that the const and name are in the
-// appropriate place for pointer types, reference types, function pointer
-// types, etc..
-#define ABSL_INTERNAL_INLINE_CONSTEXPR(var_type, name, init) \
- template <class /*AbslInternalDummy*/ = void> \
- struct AbslInternalInlineVariableHolder##name { \
- static constexpr ::absl::internal::type_identity_t<var_type> kInstance = \
- init; \
- }; \
- \
- template <class AbslInternalDummy> \
- constexpr ::absl::internal::type_identity_t<var_type> \
- AbslInternalInlineVariableHolder##name<AbslInternalDummy>::kInstance; \
- \
- static constexpr const ::absl::internal::type_identity_t<var_type>& \
- name = /* NOLINT */ \
- AbslInternalInlineVariableHolder##name<>::kInstance; \
- static_assert(sizeof(void (*)(decltype(name))) != 0, \
- "Silence unused variable warnings.")
-
-#endif // __cpp_inline_variables
-
-#endif // ABSL_BASE_INTERNAL_INLINE_VARIABLE_H_
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/inline_variable_testing.h b/contrib/restricted/abseil-cpp/absl/base/internal/inline_variable_testing.h
deleted file mode 100644
index f3c81459fa0..00000000000
--- a/contrib/restricted/abseil-cpp/absl/base/internal/inline_variable_testing.h
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef ABSL_BASE_INTERNAL_INLINE_VARIABLE_TESTING_H_
-#define ABSL_BASE_INTERNAL_INLINE_VARIABLE_TESTING_H_
-
-#include "absl/base/internal/inline_variable.h"
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-namespace inline_variable_testing_internal {
-
-struct Foo {
- int value = 5;
-};
-
-ABSL_INTERNAL_INLINE_CONSTEXPR(Foo, inline_variable_foo, {});
-ABSL_INTERNAL_INLINE_CONSTEXPR(Foo, other_inline_variable_foo, {});
-
-ABSL_INTERNAL_INLINE_CONSTEXPR(int, inline_variable_int, 5);
-ABSL_INTERNAL_INLINE_CONSTEXPR(int, other_inline_variable_int, 5);
-
-ABSL_INTERNAL_INLINE_CONSTEXPR(void(*)(), inline_variable_fun_ptr, nullptr);
-
-const Foo& get_foo_a();
-const Foo& get_foo_b();
-
-const int& get_int_a();
-const int& get_int_b();
-
-} // namespace inline_variable_testing_internal
-ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_BASE_INTERNAL_INLINE_VARIABLE_TESTING_H_
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/invoke.h b/contrib/restricted/abseil-cpp/absl/base/internal/invoke.h
deleted file mode 100644
index 643c2a42f08..00000000000
--- a/contrib/restricted/abseil-cpp/absl/base/internal/invoke.h
+++ /dev/null
@@ -1,241 +0,0 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// absl::base_internal::invoke(f, args...) is an implementation of
-// INVOKE(f, args...) from section [func.require] of the C++ standard.
-// When compiled as C++17 and later versions, it is implemented as an alias of
-// std::invoke.
-//
-// [func.require]
-// Define INVOKE (f, t1, t2, ..., tN) as follows:
-// 1. (t1.*f)(t2, ..., tN) when f is a pointer to a member function of a class T
-// and t1 is an object of type T or a reference to an object of type T or a
-// reference to an object of a type derived from T;
-// 2. ((*t1).*f)(t2, ..., tN) when f is a pointer to a member function of a
-// class T and t1 is not one of the types described in the previous item;
-// 3. t1.*f when N == 1 and f is a pointer to member data of a class T and t1 is
-// an object of type T or a reference to an object of type T or a reference
-// to an object of a type derived from T;
-// 4. (*t1).*f when N == 1 and f is a pointer to member data of a class T and t1
-// is not one of the types described in the previous item;
-// 5. f(t1, t2, ..., tN) in all other cases.
-//
-// The implementation is SFINAE-friendly: substitution failure within invoke()
-// isn't an error.
-
-#ifndef ABSL_BASE_INTERNAL_INVOKE_H_
-#define ABSL_BASE_INTERNAL_INVOKE_H_
-
-#include "absl/base/config.h"
-
-#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L
-
-#include <functional>
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-namespace base_internal {
-
-using std::invoke;
-using std::invoke_result_t;
-using std::is_invocable_r;
-
-} // namespace base_internal
-ABSL_NAMESPACE_END
-} // namespace absl
-
-#else // ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L
-
-#include <algorithm>
-#include <type_traits>
-#include <utility>
-
-#include "absl/meta/type_traits.h"
-
-// The following code is internal implementation detail. See the comment at the
-// top of this file for the API documentation.
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-namespace base_internal {
-
-// The five classes below each implement one of the clauses from the definition
-// of INVOKE. The inner class template Accept<F, Args...> checks whether the
-// clause is applicable; static function template Invoke(f, args...) does the
-// invocation.
-//
-// By separating the clause selection logic from invocation we make sure that
-// Invoke() does exactly what the standard says.
-
-template <typename Derived>
-struct StrippedAccept {
- template <typename... Args>
- struct Accept : Derived::template AcceptImpl<typename std::remove_cv<
- typename std::remove_reference<Args>::type>::type...> {};
-};
-
-// (t1.*f)(t2, ..., tN) when f is a pointer to a member function of a class T
-// and t1 is an object of type T or a reference to an object of type T or a
-// reference to an object of a type derived from T.
-struct MemFunAndRef : StrippedAccept<MemFunAndRef> {
- template <typename... Args>
- struct AcceptImpl : std::false_type {};
-
- template <typename MemFunType, typename C, typename Obj, typename... Args>
- struct AcceptImpl<MemFunType C::*, Obj, Args...>
- : std::integral_constant<bool, std::is_base_of<C, Obj>::value &&
- absl::is_function<MemFunType>::value> {
- };
-
- template <typename MemFun, typename Obj, typename... Args>
- static decltype((std::declval<Obj>().*
- std::declval<MemFun>())(std::declval<Args>()...))
- Invoke(MemFun&& mem_fun, Obj&& obj, Args&&... args) {
-// Ignore bogus GCC warnings on this line.
-// See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=101436 for similar example.
-#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(11, 0)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Warray-bounds"
-#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
-#endif
- return (std::forward<Obj>(obj).*
- std::forward<MemFun>(mem_fun))(std::forward<Args>(args)...);
-#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(11, 0)
-#pragma GCC diagnostic pop
-#endif
- }
-};
-
-// ((*t1).*f)(t2, ..., tN) when f is a pointer to a member function of a
-// class T and t1 is not one of the types described in the previous item.
-struct MemFunAndPtr : StrippedAccept<MemFunAndPtr> {
- template <typename... Args>
- struct AcceptImpl : std::false_type {};
-
- template <typename MemFunType, typename C, typename Ptr, typename... Args>
- struct AcceptImpl<MemFunType C::*, Ptr, Args...>
- : std::integral_constant<bool, !std::is_base_of<C, Ptr>::value &&
- absl::is_function<MemFunType>::value> {
- };
-
- template <typename MemFun, typename Ptr, typename... Args>
- static decltype(((*std::declval<Ptr>()).*
- std::declval<MemFun>())(std::declval<Args>()...))
- Invoke(MemFun&& mem_fun, Ptr&& ptr, Args&&... args) {
- return ((*std::forward<Ptr>(ptr)).*
- std::forward<MemFun>(mem_fun))(std::forward<Args>(args)...);
- }
-};
-
-// t1.*f when N == 1 and f is a pointer to member data of a class T and t1 is
-// an object of type T or a reference to an object of type T or a reference
-// to an object of a type derived from T.
-struct DataMemAndRef : StrippedAccept<DataMemAndRef> {
- template <typename... Args>
- struct AcceptImpl : std::false_type {};
-
- template <typename R, typename C, typename Obj>
- struct AcceptImpl<R C::*, Obj>
- : std::integral_constant<bool, std::is_base_of<C, Obj>::value &&
- !absl::is_function<R>::value> {};
-
- template <typename DataMem, typename Ref>
- static decltype(std::declval<Ref>().*std::declval<DataMem>()) Invoke(
- DataMem&& data_mem, Ref&& ref) {
- return std::forward<Ref>(ref).*std::forward<DataMem>(data_mem);
- }
-};
-
-// (*t1).*f when N == 1 and f is a pointer to member data of a class T and t1
-// is not one of the types described in the previous item.
-struct DataMemAndPtr : StrippedAccept<DataMemAndPtr> {
- template <typename... Args>
- struct AcceptImpl : std::false_type {};
-
- template <typename R, typename C, typename Ptr>
- struct AcceptImpl<R C::*, Ptr>
- : std::integral_constant<bool, !std::is_base_of<C, Ptr>::value &&
- !absl::is_function<R>::value> {};
-
- template <typename DataMem, typename Ptr>
- static decltype((*std::declval<Ptr>()).*std::declval<DataMem>()) Invoke(
- DataMem&& data_mem, Ptr&& ptr) {
- return (*std::forward<Ptr>(ptr)).*std::forward<DataMem>(data_mem);
- }
-};
-
-// f(t1, t2, ..., tN) in all other cases.
-struct Callable {
- // Callable doesn't have Accept because it's the last clause that gets picked
- // when none of the previous clauses are applicable.
- template <typename F, typename... Args>
- static decltype(std::declval<F>()(std::declval<Args>()...)) Invoke(
- F&& f, Args&&... args) {
- return std::forward<F>(f)(std::forward<Args>(args)...);
- }
-};
-
-// Resolves to the first matching clause.
-template <typename... Args>
-struct Invoker {
- typedef typename std::conditional<
- MemFunAndRef::Accept<Args...>::value, MemFunAndRef,
- typename std::conditional<
- MemFunAndPtr::Accept<Args...>::value, MemFunAndPtr,
- typename std::conditional<
- DataMemAndRef::Accept<Args...>::value, DataMemAndRef,
- typename std::conditional<DataMemAndPtr::Accept<Args...>::value,
- DataMemAndPtr, Callable>::type>::type>::
- type>::type type;
-};
-
-// The result type of Invoke<F, Args...>.
-template <typename F, typename... Args>
-using invoke_result_t = decltype(Invoker<F, Args...>::type::Invoke(
- std::declval<F>(), std::declval<Args>()...));
-
-// Invoke(f, args...) is an implementation of INVOKE(f, args...) from section
-// [func.require] of the C++ standard.
-template <typename F, typename... Args>
-invoke_result_t<F, Args...> invoke(F&& f, Args&&... args) {
- return Invoker<F, Args...>::type::Invoke(std::forward<F>(f),
- std::forward<Args>(args)...);
-}
-
-template <typename AlwaysVoid, typename, typename, typename...>
-struct IsInvocableRImpl : std::false_type {};
-
-template <typename R, typename F, typename... Args>
-struct IsInvocableRImpl<
- absl::void_t<absl::base_internal::invoke_result_t<F, Args...> >, R, F,
- Args...>
- : std::integral_constant<
- bool,
- std::is_convertible<absl::base_internal::invoke_result_t<F, Args...>,
- R>::value ||
- std::is_void<R>::value> {};
-
-// Type trait whose member `value` is true if invoking `F` with `Args` is valid,
-// and either the return type is convertible to `R`, or `R` is void.
-// C++11-compatible version of `std::is_invocable_r`.
-template <typename R, typename F, typename... Args>
-using is_invocable_r = IsInvocableRImpl<void, R, F, Args...>;
-
-} // namespace base_internal
-ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L
-
-#endif // ABSL_BASE_INTERNAL_INVOKE_H_
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/iterator_traits.h b/contrib/restricted/abseil-cpp/absl/base/internal/iterator_traits.h
new file mode 100644
index 00000000000..472c43688af
--- /dev/null
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/iterator_traits.h
@@ -0,0 +1,71 @@
+// Copyright 2025 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: internal/iterator_traits.h
+// -----------------------------------------------------------------------------
+//
+// Helpers for querying traits of iterators, for implementing containers, etc.
+
+#ifndef ABSL_BASE_INTERNAL_ITERATOR_TRAITS_H_
+#define ABSL_BASE_INTERNAL_ITERATOR_TRAITS_H_
+
+#include <iterator>
+#include <type_traits>
+
+#include "absl/base/config.h"
+#include "absl/meta/type_traits.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+template <typename Iterator, typename = void>
+struct IteratorCategory {};
+
+template <typename Iterator>
+struct IteratorCategory<
+ Iterator,
+ absl::void_t<typename std::iterator_traits<Iterator>::iterator_category>> {
+ using type = typename std::iterator_traits<Iterator>::iterator_category;
+};
+
+template <typename Iterator, typename = void>
+struct IteratorConceptImpl : IteratorCategory<Iterator> {};
+
+template <typename Iterator>
+struct IteratorConceptImpl<
+ Iterator,
+ absl::void_t<typename std::iterator_traits<Iterator>::iterator_concept>> {
+ using type = typename std::iterator_traits<Iterator>::iterator_concept;
+};
+
+// The newer `std::iterator_traits<Iterator>::iterator_concept` if available,
+// else `std::iterator_traits<Iterator>::iterator_category`.
+template <typename Iterator>
+using IteratorConcept = typename IteratorConceptImpl<Iterator>::type;
+
+template <typename IteratorTag, typename Iterator>
+using IsAtLeastIterator =
+ std::is_convertible<IteratorConcept<Iterator>, IteratorTag>;
+
+template <typename Iterator>
+using IsAtLeastForwardIterator =
+ IsAtLeastIterator<std::forward_iterator_tag, Iterator>;
+
+} // namespace base_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_BASE_INTERNAL_ITERATOR_TRAITS_H_
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/iterator_traits_test_helper.h b/contrib/restricted/abseil-cpp/absl/base/internal/iterator_traits_test_helper.h
new file mode 100644
index 00000000000..707612d6621
--- /dev/null
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/iterator_traits_test_helper.h
@@ -0,0 +1,97 @@
+// Copyright 2025 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_BASE_INTERNAL_ITERATOR_TRAITS_TEST_HELPER_H_
+#define ABSL_BASE_INTERNAL_ITERATOR_TRAITS_TEST_HELPER_H_
+
+#include <iterator>
+#include <utility>
+
+#include "absl/base/config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+// This would be a forward_iterator in C++20, but it's only an input iterator
+// before that, since it has a non-reference `reference`.
+template <typename Iterator>
+class Cpp20ForwardZipIterator {
+ using IteratorReference = typename std::iterator_traits<Iterator>::reference;
+
+ public:
+ Cpp20ForwardZipIterator() = default;
+ explicit Cpp20ForwardZipIterator(Iterator left, Iterator right)
+ : left_(left), right_(right) {}
+
+ Cpp20ForwardZipIterator& operator++() {
+ ++left_;
+ ++right_;
+ return *this;
+ }
+
+ Cpp20ForwardZipIterator operator++(int) {
+ Cpp20ForwardZipIterator tmp(*this);
+ ++*this;
+ return *this;
+ }
+
+ std::pair<IteratorReference, IteratorReference> operator*() const {
+ return {*left_, *right_};
+ }
+
+ // C++17 input iterators require `operator->`, but this isn't possible to
+ // implement. C++20 dropped the requirement.
+
+ friend bool operator==(const Cpp20ForwardZipIterator& lhs,
+ const Cpp20ForwardZipIterator& rhs) {
+ return lhs.left_ == rhs.left_ && lhs.right_ == rhs.right_;
+ }
+
+ friend bool operator!=(const Cpp20ForwardZipIterator& lhs,
+ const Cpp20ForwardZipIterator& rhs) {
+ return !(lhs == rhs);
+ }
+
+ private:
+ Iterator left_{};
+ Iterator right_{};
+};
+
+} // namespace base_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+template <typename Iterator>
+struct std::iterator_traits<
+ absl::base_internal::Cpp20ForwardZipIterator<Iterator>> {
+ private:
+ using IteratorReference = typename std::iterator_traits<Iterator>::reference;
+
+ public:
+ using iterator_category = std::input_iterator_tag;
+ using iterator_concept = std::forward_iterator_tag;
+ using value_type = std::pair<IteratorReference, IteratorReference>;
+ using difference_type =
+ typename std::iterator_traits<Iterator>::difference_type;
+ using reference = value_type;
+ using pointer = void;
+};
+
+#if defined(__cpp_lib_concepts)
+static_assert(
+ std::forward_iterator<absl::base_internal::Cpp20ForwardZipIterator<int*>>);
+#endif // defined(__cpp_lib_concepts)
+
+#endif // ABSL_BASE_INTERNAL_ITERATOR_TRAITS_TEST_HELPER_H_
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/low_level_alloc.cc b/contrib/restricted/abseil-cpp/absl/base/internal/low_level_alloc.cc
index a563f7b9f9b..158b60982f1 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/low_level_alloc.cc
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/low_level_alloc.cc
@@ -330,7 +330,7 @@ size_t GetPageSize() {
GetSystemInfo(&system_info);
return std::max(system_info.dwPageSize, system_info.dwAllocationGranularity);
#elif defined(__wasm__) || defined(__asmjs__) || defined(__hexagon__)
- return getpagesize();
+ return static_cast<size_t>(getpagesize());
#else
return static_cast<size_t>(sysconf(_SC_PAGESIZE));
#endif
@@ -448,8 +448,8 @@ static inline uintptr_t RoundUp(uintptr_t addr, uintptr_t align) {
// that the freelist is in the correct order, that it
// consists of regions marked "unallocated", and that no two regions
// are adjacent in memory (they should have been coalesced).
-// L >= arena->mu
-static AllocList *Next(int i, AllocList *prev, LowLevelAlloc::Arena *arena) {
+static AllocList *Next(int i, AllocList *prev, LowLevelAlloc::Arena *arena)
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(arena->mu) {
ABSL_RAW_CHECK(i < prev->levels, "too few levels in Next()");
AllocList *next = prev->next[i];
if (next != nullptr) {
@@ -473,6 +473,7 @@ static void Coalesce(AllocList *a) {
if (n != nullptr && reinterpret_cast<char *>(a) + a->header.size ==
reinterpret_cast<char *>(n)) {
LowLevelAlloc::Arena *arena = a->header.arena;
+ arena->mu.AssertHeld();
a->header.size += n->header.size;
n->header.magic = 0;
n->header.arena = nullptr;
@@ -486,8 +487,8 @@ static void Coalesce(AllocList *a) {
}
// Adds block at location "v" to the free list
-// L >= arena->mu
-static void AddToFreelist(void *v, LowLevelAlloc::Arena *arena) {
+static void AddToFreelist(void *v, LowLevelAlloc::Arena *arena)
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(arena->mu) {
AllocList *f = reinterpret_cast<AllocList *>(reinterpret_cast<char *>(v) -
sizeof(f->header));
ABSL_RAW_CHECK(f->header.magic == Magic(kMagicAllocated, &f->header),
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/nullability_impl.h b/contrib/restricted/abseil-cpp/absl/base/internal/nullability_deprecated.h
index b601fc4ce94..1174a96eaa3 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/nullability_impl.h
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/nullability_deprecated.h
@@ -11,16 +11,11 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
-
-#ifndef ABSL_BASE_INTERNAL_NULLABILITY_IMPL_H_
-#define ABSL_BASE_INTERNAL_NULLABILITY_IMPL_H_
-
-#include <memory>
-#include <type_traits>
+#ifndef ABSL_BASE_INTERNAL_NULLABILITY_DEPRECATED_H_
+#define ABSL_BASE_INTERNAL_NULLABILITY_DEPRECATED_H_
#include "absl/base/attributes.h"
#include "absl/base/config.h"
-#include "absl/meta/type_traits.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
@@ -63,7 +58,49 @@ using NullabilityUnknownImpl
#endif
} // namespace nullability_internal
+
+// The following template aliases are deprecated forms of nullability
+// annotations. They have some limitations, for example, an incompatibility with
+// `auto*` pointers, as `auto` cannot be used in a template argument.
+//
+// It is important to note that these annotations are not distinct strong
+// *types*. They are alias templates defined to be equal to the underlying
+// pointer type. A pointer annotated `Nonnull<T*>`, for example, is simply a
+// pointer of type `T*`.
+//
+// Prefer the macro style annotations in `absl/base/nullability.h` instead.
+
+// absl::Nonnull, analogous to absl_nonnull
+//
+// Example:
+// absl::Nonnull<int*> foo;
+// Is equivalent to:
+// int* absl_nonnull foo;
+template <typename T>
+using Nonnull [[deprecated("Use `absl_nonnull`.")]] =
+ nullability_internal::NonnullImpl<T>;
+
+// absl::Nullable, analogous to absl_nullable
+//
+// Example:
+// absl::Nullable<int*> foo;
+// Is equivalent to:
+// int* absl_nullable foo;
+template <typename T>
+using Nullable [[deprecated("Use `absl_nullable`.")]] =
+ nullability_internal::NullableImpl<T>;
+
+// absl::NullabilityUnknown, analogous to absl_nullability_unknown
+//
+// Example:
+// absl::NullabilityUnknown<int*> foo;
+// Is equivalent to:
+// int* absl_nullability_unknown foo;
+template <typename T>
+using NullabilityUnknown [[deprecated("Use `absl_nullability_unknown`.")]] =
+ nullability_internal::NullabilityUnknownImpl<T>;
+
ABSL_NAMESPACE_END
} // namespace absl
-#endif // ABSL_BASE_INTERNAL_NULLABILITY_IMPL_H_
+#endif // ABSL_BASE_INTERNAL_NULLABILITY_DEPRECATED_H_
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/spinlock.cc b/contrib/restricted/abseil-cpp/absl/base/internal/spinlock.cc
index 381b913b29f..430f775bdf9 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/spinlock.cc
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/spinlock.cc
@@ -67,15 +67,6 @@ void RegisterSpinLockProfiler(void (*fn)(const void *contendedlock,
submit_profile_data.Store(fn);
}
-#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
-// Static member variable definitions.
-constexpr uint32_t SpinLock::kSpinLockHeld;
-constexpr uint32_t SpinLock::kSpinLockCooperative;
-constexpr uint32_t SpinLock::kSpinLockDisabledScheduling;
-constexpr uint32_t SpinLock::kSpinLockSleeper;
-constexpr uint32_t SpinLock::kWaitTimeMask;
-#endif
-
// Uncommon constructors.
SpinLock::SpinLock(base_internal::SchedulingMode mode)
: lockword_(IsCooperative(mode) ? kSpinLockCooperative : 0) {
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/spinlock.h b/contrib/restricted/abseil-cpp/absl/base/internal/spinlock.h
index 1bb260f46bf..2a108969767 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/spinlock.h
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/spinlock.h
@@ -89,8 +89,7 @@ class ABSL_LOCKABLE ABSL_ATTRIBUTE_WARN_UNUSED SpinLock {
// acquisition was successful. If the lock was not acquired, false is
// returned. If this SpinLock is free at the time of the call, TryLock
// will return true with high probability.
- ABSL_MUST_USE_RESULT inline bool TryLock()
- ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) {
+ [[nodiscard]] inline bool TryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) {
ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_try_lock);
bool res = TryLockImpl();
ABSL_TSAN_MUTEX_POST_LOCK(
@@ -121,7 +120,7 @@ class ABSL_LOCKABLE ABSL_ATTRIBUTE_WARN_UNUSED SpinLock {
// Determine if the lock is held. When the lock is held by the invoking
// thread, true will always be returned. Intended to be used as
// CHECK(lock.IsHeld()).
- ABSL_MUST_USE_RESULT inline bool IsHeld() const {
+ [[nodiscard]] inline bool IsHeld() const {
return (lockword_.load(std::memory_order_relaxed) & kSpinLockHeld) != 0;
}
@@ -203,16 +202,7 @@ class ABSL_LOCKABLE ABSL_ATTRIBUTE_WARN_UNUSED SpinLock {
// Corresponding locker object that arranges to acquire a spinlock for
// the duration of a C++ scope.
-//
-// TODO(b/176172494): Use only [[nodiscard]] when baseline is raised.
-// TODO(b/6695610): Remove forward declaration when #ifdef is no longer needed.
-#if ABSL_HAVE_CPP_ATTRIBUTE(nodiscard)
-class [[nodiscard]] SpinLockHolder;
-#else
-class ABSL_MUST_USE_RESULT ABSL_ATTRIBUTE_TRIVIAL_ABI SpinLockHolder;
-#endif
-
-class ABSL_SCOPED_LOCKABLE SpinLockHolder {
+class ABSL_SCOPED_LOCKABLE [[nodiscard]] SpinLockHolder {
public:
inline explicit SpinLockHolder(SpinLock* l) ABSL_EXCLUSIVE_LOCK_FUNCTION(l)
: lock_(l) {
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/unaligned_access.h b/contrib/restricted/abseil-cpp/absl/base/internal/unaligned_access.h
index 4fea457491c..3f5dd6f9ff9 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/unaligned_access.h
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/unaligned_access.h
@@ -36,33 +36,33 @@ namespace absl {
ABSL_NAMESPACE_BEGIN
namespace base_internal {
-inline uint16_t UnalignedLoad16(absl::Nonnull<const void *> p) {
+inline uint16_t UnalignedLoad16(const void* absl_nonnull p) {
uint16_t t;
memcpy(&t, p, sizeof t);
return t;
}
-inline uint32_t UnalignedLoad32(absl::Nonnull<const void *> p) {
+inline uint32_t UnalignedLoad32(const void* absl_nonnull p) {
uint32_t t;
memcpy(&t, p, sizeof t);
return t;
}
-inline uint64_t UnalignedLoad64(absl::Nonnull<const void *> p) {
+inline uint64_t UnalignedLoad64(const void* absl_nonnull p) {
uint64_t t;
memcpy(&t, p, sizeof t);
return t;
}
-inline void UnalignedStore16(absl::Nonnull<void *> p, uint16_t v) {
+inline void UnalignedStore16(void* absl_nonnull p, uint16_t v) {
memcpy(p, &v, sizeof v);
}
-inline void UnalignedStore32(absl::Nonnull<void *> p, uint32_t v) {
+inline void UnalignedStore32(void* absl_nonnull p, uint32_t v) {
memcpy(p, &v, sizeof v);
}
-inline void UnalignedStore64(absl::Nonnull<void *> p, uint64_t v) {
+inline void UnalignedStore64(void* absl_nonnull p, uint64_t v) {
memcpy(p, &v, sizeof v);
}
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/unscaledcycleclock.h b/contrib/restricted/abseil-cpp/absl/base/internal/unscaledcycleclock.h
index 965c42de4ea..bfd98873152 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/unscaledcycleclock.h
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/unscaledcycleclock.h
@@ -88,9 +88,14 @@ inline int64_t UnscaledCycleClock::Now() {
#elif defined(__aarch64__)
// System timer of ARMv8 runs at a different frequency than the CPU's.
-// The frequency is fixed, typically in the range 1-50MHz. It can be
-// read at CNTFRQ special register. We assume the OS has set up
-// the virtual timer properly.
+//
+// Frequency is fixed. From Armv8.6-A and Armv9.1-A on, the frequency is 1GHz.
+// Pre-Armv8.6-A, the frequency was a system design choice, typically in the
+// range of 1MHz to 50MHz. See also:
+// https://developer.arm.com/documentation/102379/0101/What-is-the-Generic-Timer-
+//
+// It can be read at CNTFRQ special register. We assume the OS has set up the
+// virtual timer properly.
inline int64_t UnscaledCycleClock::Now() {
int64_t virtual_timer_value;
asm volatile("mrs %0, cntvct_el0" : "=r"(virtual_timer_value));
diff --git a/contrib/restricted/abseil-cpp/absl/base/no_destructor.h b/contrib/restricted/abseil-cpp/absl/base/no_destructor.h
index 43b3540aeba..9d960ee3348 100644
--- a/contrib/restricted/abseil-cpp/absl/base/no_destructor.h
+++ b/contrib/restricted/abseil-cpp/absl/base/no_destructor.h
@@ -135,11 +135,11 @@ class NoDestructor {
// Pretend to be a smart pointer to T with deep constness.
// Never returns a null pointer.
T& operator*() { return *get(); }
- absl::Nonnull<T*> operator->() { return get(); }
- absl::Nonnull<T*> get() { return impl_.get(); }
+ T* absl_nonnull operator->() { return get(); }
+ T* absl_nonnull get() { return impl_.get(); }
const T& operator*() const { return *get(); }
- absl::Nonnull<const T*> operator->() const { return get(); }
- absl::Nonnull<const T*> get() const { return impl_.get(); }
+ const T* absl_nonnull operator->() const { return get(); }
+ const T* absl_nonnull get() const { return impl_.get(); }
private:
class DirectImpl {
@@ -147,8 +147,8 @@ class NoDestructor {
template <typename... Args>
explicit constexpr DirectImpl(Args&&... args)
: value_(std::forward<Args>(args)...) {}
- absl::Nonnull<const T*> get() const { return &value_; }
- absl::Nonnull<T*> get() { return &value_; }
+ const T* absl_nonnull get() const { return &value_; }
+ T* absl_nonnull get() { return &value_; }
private:
T value_;
@@ -160,33 +160,14 @@ class NoDestructor {
explicit PlacementImpl(Args&&... args) {
new (&space_) T(std::forward<Args>(args)...);
}
- absl::Nonnull<const T*> get() const {
- return Launder(reinterpret_cast<const T*>(&space_));
+ const T* absl_nonnull get() const {
+ return std::launder(reinterpret_cast<const T*>(&space_));
}
- absl::Nonnull<T*> get() { return Launder(reinterpret_cast<T*>(&space_)); }
-
- private:
- template <typename P>
- static absl::Nonnull<P*> Launder(absl::Nonnull<P*> p) {
-#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606L
- return std::launder(p);
-#elif ABSL_HAVE_BUILTIN(__builtin_launder)
- return __builtin_launder(p);
-#else
- // When `std::launder` or equivalent are not available, we rely on
- // undefined behavior, which works as intended on Abseil's officially
- // supported platforms as of Q3 2023.
-#if defined(__GNUC__) && !defined(__clang__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wstrict-aliasing"
-#endif
- return p;
-#if defined(__GNUC__) && !defined(__clang__)
-#pragma GCC diagnostic pop
-#endif
-#endif
+ T* absl_nonnull get() {
+ return std::launder(reinterpret_cast<T*>(&space_));
}
+ private:
alignas(T) unsigned char space_[sizeof(T)];
};
@@ -199,12 +180,10 @@ class NoDestructor {
impl_;
};
-#ifdef ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION
// Provide 'Class Template Argument Deduction': the type of NoDestructor's T
// will be the same type as the argument passed to NoDestructor's constructor.
template <typename T>
NoDestructor(T) -> NoDestructor<T>;
-#endif // ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/contrib/restricted/abseil-cpp/absl/base/nullability.h b/contrib/restricted/abseil-cpp/absl/base/nullability.h
index 241c65ac25d..3a5d6e83e20 100644
--- a/contrib/restricted/abseil-cpp/absl/base/nullability.h
+++ b/contrib/restricted/abseil-cpp/absl/base/nullability.h
@@ -16,21 +16,21 @@
// File: nullability.h
// -----------------------------------------------------------------------------
//
-// This header file defines a set of "templated annotations" for designating the
-// expected nullability of pointers. These annotations allow you to designate
-// pointers in one of three classification states:
+// This header file defines a set of annotations for designating the expected
+// nullability of pointers. These annotations allow you to designate pointers in
+// one of three classification states:
//
-// * "Non-null" (for pointers annotated `Nonnull<T>`), indicating that it is
+// * "Non-null" (for pointers annotated `absl_nonnull`), indicating that it is
// invalid for the given pointer to ever be null.
-// * "Nullable" (for pointers annotated `Nullable<T>`), indicating that it is
+// * "Nullable" (for pointers annotated `absl_nullable`), indicating that it is
// valid for the given pointer to be null.
-// * "Unknown" (for pointers annotated `NullabilityUnknown<T>`), indicating
-// that the given pointer has not been yet classified as either nullable or
+// * "Unknown" (for pointers annotated `absl_nullability_unknown`), indicating
+// that the given pointer has not yet been classified as either nullable or
// non-null. This is the default state of unannotated pointers.
//
-// NOTE: unannotated pointers implicitly bear the annotation
-// `NullabilityUnknown<T>`; you should rarely, if ever, see this annotation used
-// in the codebase explicitly.
+// NOTE: Unannotated pointers implicitly bear the annotation
+// `absl_nullability_unknown`; you should rarely, if ever, see this annotation
+// used in the codebase explicitly.
//
// -----------------------------------------------------------------------------
// Nullability and Contracts
@@ -64,16 +64,49 @@
// formalize those contracts within the codebase.
//
// -----------------------------------------------------------------------------
+// Annotation Syntax
+// -----------------------------------------------------------------------------
+//
+// The annotations should be positioned as a qualifier for the pointer type. For
+// example, the position of `const` when declaring a const pointer (not a
+// pointer to a const type) is the position you should also use for these
+// annotations.
+//
+// Example:
+//
+// // A const non-null pointer to an `Employee`.
+// Employee* absl_nonnull const e;
+//
+// // A non-null pointer to a const `Employee`.
+// const Employee* absl_nonnull e;
+//
+// // A non-null pointer to a const nullable pointer to an `Employee`.
+// Employee* absl_nullable const* absl_nonnull e = nullptr;
+//
+// // A non-null function pointer.
+// void (*absl_nonnull func)(int, double);
+//
+// // A non-null array of `Employee`s as a parameter.
+// void func(Employee employees[absl_nonnull]);
+//
+// // A non-null std::unique_ptr to an `Employee`.
+// // As with `const`, it is possible to place the annotation on either side of
+// // a named type not ending in `*`, but placing it before the type it
+// // describes is preferred, unless inconsistent with surrounding code.
+// absl_nonnull std::unique_ptr<Employee> employee;
+//
+// // Invalid annotation usage – this attempts to declare a pointer to a
+// // nullable `Employee`, which is meaningless.
+// absl_nullable Employee* e;
+//
+// -----------------------------------------------------------------------------
// Using Nullability Annotations
// -----------------------------------------------------------------------------
//
-// It is important to note that these annotations are not distinct strong
-// *types*. They are alias templates defined to be equal to the underlying
-// pointer type. A pointer annotated `Nonnull<T*>`, for example, is simply a
-// pointer of type `T*`. Each annotation acts as a form of documentation about
-// the contract for the given pointer. Each annotation requires providers or
-// consumers of these pointers across API boundaries to take appropriate steps
-// when setting or using these pointers:
+// Each annotation acts as a form of documentation about the contract for the
+// given pointer. Each annotation requires providers or consumers of these
+// pointers across API boundaries to take appropriate steps when setting or
+// using these pointers:
//
// * "Non-null" pointers should never be null. It is the responsibility of the
// provider of this pointer to ensure that the pointer may never be set to
@@ -91,20 +124,20 @@
// Example:
//
// // PaySalary() requires the passed pointer to an `Employee` to be non-null.
-// void PaySalary(absl::Nonnull<Employee *> e) {
+// void PaySalary(Employee* absl_nonnull e) {
// pay(e->salary); // OK to dereference
// }
//
// // CompleteTransaction() guarantees the returned pointer to an `Account` to
// // be non-null.
-// absl::Nonnull<Account *> balance CompleteTransaction(double fee) {
+// Account* absl_nonnull balance CompleteTransaction(double fee) {
// ...
// }
//
// // Note that specifying a nullability annotation does not prevent someone
// // from violating the contract:
//
-// Nullable<Employee *> find(Map& employees, std::string_view name);
+// Employee* absl_nullable find(Map& employees, std::string_view name);
//
// void g(Map& employees) {
// Employee *e = find(employees, "Pat");
@@ -144,14 +177,14 @@
// These nullability annotations are primarily a human readable signal about the
// intended contract of the pointer. They are not *types* and do not currently
// provide any correctness guarantees. For example, a pointer annotated as
-// `Nonnull<T*>` is *not guaranteed* to be non-null, and the compiler won't
-// alert or prevent assignment of a `Nullable<T*>` to a `Nonnull<T*>`.
+// `absl_nonnull` is *not guaranteed* to be non-null, and the compiler won't
+// alert or prevent assignment of a `T* absl_nullable` to a `T* absl_nonnull`.
// ===========================================================================
#ifndef ABSL_BASE_NULLABILITY_H_
#define ABSL_BASE_NULLABILITY_H_
#include "absl/base/config.h"
-#include "absl/base/internal/nullability_impl.h"
+#include "absl/base/internal/nullability_deprecated.h"
// ABSL_POINTERS_DEFAULT_NONNULL
//
@@ -168,14 +201,14 @@
// ABSL_POINTERS_DEFAULT_NONNULL
//
// void FillMessage(Message *m); // implicitly non-null
-// absl::Nullable<T*> GetNullablePtr(); // explicitly nullable
-// absl::NullabilityUnknown<T*> GetUnknownPtr(); // explicitly unknown
+// T* absl_nullable GetNullablePtr(); // explicitly nullable
+// T* absl_nullability_unknown GetUnknownPtr(); // explicitly unknown
//
-// The macro can be safely used in header files -- it will not affect any files
+// The macro can be safely used in header files – it will not affect any files
// that include it.
//
-// In files with the macro, plain `T*` syntax means `absl::Nonnull<T*>`, and the
-// exceptions (`Nullable` and `NullabilityUnknown`) must be marked
+// In files with the macro, plain `T*` syntax means `T* absl_nonnull`, and the
+// exceptions (`absl_nullable` and `absl_nullability_unknown`) must be marked
// explicitly. The same holds, correspondingly, for smart pointer types.
//
// For comparison, without the macro, all unannotated pointers would default to
@@ -183,17 +216,16 @@
//
// #include "absl/base/nullability.h"
//
-// void FillMessage(absl::Nonnull<Message*> m); // explicitly non-null
-// absl::Nullable<T*> GetNullablePtr(); // explicitly nullable
+// void FillMessage(Message* absl_nonnull m); // explicitly non-null
+// T* absl_nullable GetNullablePtr(); // explicitly nullable
// T* GetUnknownPtr(); // implicitly unknown
//
// No-op except for being a human readable signal.
#define ABSL_POINTERS_DEFAULT_NONNULL
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-
-// absl::Nonnull (default with `ABSL_POINTERS_DEFAULT_NONNULL`)
+#if defined(__clang__) && !defined(__OBJC__) && \
+ ABSL_HAVE_FEATURE(nullability_on_classes)
+// absl_nonnull (default with `ABSL_POINTERS_DEFAULT_NONNULL`)
//
// The indicated pointer is never null. It is the responsibility of the provider
// of this pointer across an API boundary to ensure that the pointer is never
@@ -203,13 +235,12 @@ ABSL_NAMESPACE_BEGIN
// Example:
//
// // `employee` is designated as not null.
-// void PaySalary(absl::Nonnull<Employee *> employee) {
+// void PaySalary(Employee* absl_nonnull employee) {
// pay(*employee); // OK to dereference
// }
-template <typename T>
-using Nonnull = nullability_internal::NonnullImpl<T>;
+#define absl_nonnull _Nonnull
-// absl::Nullable
+// absl_nullable
//
// The indicated pointer may, by design, be either null or non-null. Consumers
// of this pointer across an API boundary should perform a `nullptr` check
@@ -218,15 +249,14 @@ using Nonnull = nullability_internal::NonnullImpl<T>;
// Example:
//
// // `employee` may be null.
-// void PaySalary(absl::Nullable<Employee *> employee) {
+// void PaySalary(Employee* absl_nullable employee) {
// if (employee != nullptr) {
// Pay(*employee); // OK to dereference
// }
// }
-template <typename T>
-using Nullable = nullability_internal::NullableImpl<T>;
+#define absl_nullable _Nullable
-// absl::NullabilityUnknown (default without `ABSL_POINTERS_DEFAULT_NONNULL`)
+// absl_nullability_unknown (default without `ABSL_POINTERS_DEFAULT_NONNULL`)
//
// The indicated pointer has not yet been determined to be definitively
// "non-null" or "nullable." Providers of such pointers across API boundaries
@@ -234,8 +264,8 @@ using Nullable = nullability_internal::NullableImpl<T>;
// Consumers of these pointers across an API boundary should treat such pointers
// with the same caution they treat currently unannotated pointers. Most
// existing code will have "unknown" pointers, which should eventually be
-// migrated into one of the above two nullability states: `Nonnull<T>` or
-// `Nullable<T>`.
+// migrated into one of the above two nullability states: `absl_nonnull` or
+// `absl_nullable`.
//
// NOTE: For files that do not specify `ABSL_POINTERS_DEFAULT_NONNULL`,
// because this annotation is the global default state, unannotated pointers are
@@ -245,7 +275,7 @@ using Nullable = nullability_internal::NullableImpl<T>;
// Example:
//
// // `employee`s nullability state is unknown.
-// void PaySalary(absl::NullabilityUnknown<Employee *> employee) {
+// void PaySalary(Employee* absl_nullability_unknown employee) {
// Pay(*employee); // Potentially dangerous. API provider should investigate.
// }
//
@@ -256,11 +286,15 @@ using Nullable = nullability_internal::NullableImpl<T>;
// void PaySalary(Employee* employee) {
// Pay(*employee); // Potentially dangerous. API provider should investigate.
// }
-template <typename T>
-using NullabilityUnknown = nullability_internal::NullabilityUnknownImpl<T>;
-
-ABSL_NAMESPACE_END
-} // namespace absl
+#define absl_nullability_unknown _Null_unspecified
+#else
+// No-op for non-Clang compilers or Objective-C.
+#define absl_nonnull
+// No-op for non-Clang compilers or Objective-C.
+#define absl_nullable
+// No-op for non-Clang compilers or Objective-C.
+#define absl_nullability_unknown
+#endif
// ABSL_NULLABILITY_COMPATIBLE
//
@@ -281,26 +315,4 @@ ABSL_NAMESPACE_END
#define ABSL_NULLABILITY_COMPATIBLE
#endif
-// ABSL_NONNULL
-// ABSL_NULLABLE
-// ABSL_NULLABILITY_UNKNOWN
-//
-// These macros are analogues of the alias template nullability annotations
-// above.
-//
-// Example:
-// int* ABSL_NULLABLE foo;
-// Is equivalent to:
-// absl::Nullable<int*> foo;
-#if defined(__clang__) && !defined(__OBJC__) && \
- ABSL_HAVE_FEATURE(nullability_on_classes)
-#define ABSL_NONNULL _Nonnull
-#define ABSL_NULLABLE _Nullable
-#define ABSL_NULLABILITY_UNKNOWN _Null_unspecified
-#else
-#define ABSL_NONNULL
-#define ABSL_NULLABLE
-#define ABSL_NULLABILITY_UNKNOWN
-#endif
-
#endif // ABSL_BASE_NULLABILITY_H_
diff --git a/contrib/restricted/abseil-cpp/absl/base/options.h b/contrib/restricted/abseil-cpp/absl/base/options.h
index 5caa58f6e3b..f904f64465a 100644
--- a/contrib/restricted/abseil-cpp/absl/base/options.h
+++ b/contrib/restricted/abseil-cpp/absl/base/options.h
@@ -64,65 +64,14 @@
// proper Abseil implementation at compile-time, which will not be sufficient
// to guarantee ABI stability to package managers.
+// SKIP_ABSL_INLINE_NAMESPACE_CHECK
+
#ifndef ABSL_BASE_OPTIONS_H_
#define ABSL_BASE_OPTIONS_H_
// -----------------------------------------------------------------------------
// Type Compatibility Options
// -----------------------------------------------------------------------------
-//
-// ABSL_OPTION_USE_STD_ANY
-//
-// This option controls whether absl::any is implemented as an alias to
-// std::any, or as an independent implementation.
-//
-// A value of 0 means to use Abseil's implementation. This requires only C++11
-// support, and is expected to work on every toolchain we support.
-//
-// A value of 1 means to use an alias to std::any. This requires that all code
-// using Abseil is built in C++17 mode or later.
-//
-// A value of 2 means to detect the C++ version being used to compile Abseil,
-// and use an alias only if a working std::any is available. This option is
-// useful when you are building your entire program, including all of its
-// dependencies, from source. It should not be used otherwise -- for example,
-// if you are distributing Abseil in a binary package manager -- since in
-// mode 2, absl::any will name a different type, with a different mangled name
-// and binary layout, depending on the compiler flags passed by the end user.
-// For more info, see https://abseil.io/about/design/dropin-types.
-//
-// User code should not inspect this macro. To check in the preprocessor if
-// absl::any is a typedef of std::any, use the feature macro ABSL_USES_STD_ANY.
-
-#define ABSL_OPTION_USE_STD_ANY 2
-
-
-// ABSL_OPTION_USE_STD_OPTIONAL
-//
-// This option controls whether absl::optional is implemented as an alias to
-// std::optional, or as an independent implementation.
-//
-// A value of 0 means to use Abseil's implementation. This requires only C++11
-// support, and is expected to work on every toolchain we support.
-//
-// A value of 1 means to use an alias to std::optional. This requires that all
-// code using Abseil is built in C++17 mode or later.
-//
-// A value of 2 means to detect the C++ version being used to compile Abseil,
-// and use an alias only if a working std::optional is available. This option
-// is useful when you are building your program from source. It should not be
-// used otherwise -- for example, if you are distributing Abseil in a binary
-// package manager -- since in mode 2, absl::optional will name a different
-// type, with a different mangled name and binary layout, depending on the
-// compiler flags passed by the end user. For more info, see
-// https://abseil.io/about/design/dropin-types.
-
-// User code should not inspect this macro. To check in the preprocessor if
-// absl::optional is a typedef of std::optional, use the feature macro
-// ABSL_USES_STD_OPTIONAL.
-
-#define ABSL_OPTION_USE_STD_OPTIONAL 2
-
// ABSL_OPTION_USE_STD_STRING_VIEW
//
@@ -150,32 +99,6 @@
#define ABSL_OPTION_USE_STD_STRING_VIEW 2
-// ABSL_OPTION_USE_STD_VARIANT
-//
-// This option controls whether absl::variant is implemented as an alias to
-// std::variant, or as an independent implementation.
-//
-// A value of 0 means to use Abseil's implementation. This requires only C++11
-// support, and is expected to work on every toolchain we support.
-//
-// A value of 1 means to use an alias to std::variant. This requires that all
-// code using Abseil is built in C++17 mode or later.
-//
-// A value of 2 means to detect the C++ version being used to compile Abseil,
-// and use an alias only if a working std::variant is available. This option
-// is useful when you are building your program from source. It should not be
-// used otherwise -- for example, if you are distributing Abseil in a binary
-// package manager -- since in mode 2, absl::variant will name a different
-// type, with a different mangled name and binary layout, depending on the
-// compiler flags passed by the end user. For more info, see
-// https://abseil.io/about/design/dropin-types.
-//
-// User code should not inspect this macro. To check in the preprocessor if
-// absl::variant is a typedef of std::variant, use the feature macro
-// ABSL_USES_STD_VARIANT.
-
-#define ABSL_OPTION_USE_STD_VARIANT 2
-
// ABSL_OPTION_USE_STD_ORDERING
//
// This option controls whether absl::{partial,weak,strong}_ordering are
@@ -226,7 +149,7 @@
// allowed.
#define ABSL_OPTION_USE_INLINE_NAMESPACE 1
-#define ABSL_OPTION_INLINE_NAMESPACE_NAME lts_20250127
+#define ABSL_OPTION_INLINE_NAMESPACE_NAME lts_20250512
// ABSL_OPTION_HARDENED
//
diff --git a/contrib/restricted/abseil-cpp/absl/base/policy_checks.h b/contrib/restricted/abseil-cpp/absl/base/policy_checks.h
index 7538166bed2..f84944cfe22 100644
--- a/contrib/restricted/abseil-cpp/absl/base/policy_checks.h
+++ b/contrib/restricted/abseil-cpp/absl/base/policy_checks.h
@@ -71,15 +71,15 @@
// C++ Version Check
// -----------------------------------------------------------------------------
-// Enforce C++14 as the minimum.
+// Enforce C++17 as the minimum.
#if defined(_MSVC_LANG)
-#if _MSVC_LANG < 201402L
-#error "C++ versions less than C++14 are not supported."
-#endif // _MSVC_LANG < 201402L
+#if _MSVC_LANG < 201703L
+#error "C++ versions less than C++17 are not supported."
+#endif // _MSVC_LANG < 201703L
#elif defined(__cplusplus)
-#if __cplusplus < 201402L
-#error "C++ versions less than C++14 are not supported."
-#endif // __cplusplus < 201402L
+#if __cplusplus < 201703L
+#error "C++ versions less than C++17 are not supported."
+#endif // __cplusplus < 201703L
#endif
// -----------------------------------------------------------------------------
diff --git a/contrib/restricted/abseil-cpp/absl/cleanup/cleanup.h b/contrib/restricted/abseil-cpp/absl/cleanup/cleanup.h
index 960ccd080e0..311e4828cd3 100644
--- a/contrib/restricted/abseil-cpp/absl/cleanup/cleanup.h
+++ b/contrib/restricted/abseil-cpp/absl/cleanup/cleanup.h
@@ -78,7 +78,7 @@ namespace absl {
ABSL_NAMESPACE_BEGIN
template <typename Arg, typename Callback = void()>
-class ABSL_MUST_USE_RESULT Cleanup final {
+class [[nodiscard]] Cleanup final {
static_assert(cleanup_internal::WasDeduced<Arg>(),
"Explicit template parameters are not supported.");
@@ -115,10 +115,8 @@ class ABSL_MUST_USE_RESULT Cleanup final {
// `absl::Cleanup c = /* callback */;`
//
// C++17 type deduction API for creating an instance of `absl::Cleanup`
-#if defined(ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION)
template <typename Callback>
Cleanup(Callback callback) -> Cleanup<cleanup_internal::Tag, Callback>;
-#endif // defined(ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION)
// `auto c = absl::MakeCleanup(/* callback */);`
//
diff --git a/contrib/restricted/abseil-cpp/absl/cleanup/internal/cleanup.h b/contrib/restricted/abseil-cpp/absl/cleanup/internal/cleanup.h
index 2783fcb7c16..4dd6f9132b9 100644
--- a/contrib/restricted/abseil-cpp/absl/cleanup/internal/cleanup.h
+++ b/contrib/restricted/abseil-cpp/absl/cleanup/internal/cleanup.h
@@ -19,7 +19,6 @@
#include <type_traits>
#include <utility>
-#include "absl/base/internal/invoke.h"
#include "absl/base/macros.h"
#include "absl/base/thread_annotations.h"
#include "absl/utility/utility.h"
@@ -39,7 +38,7 @@ constexpr bool WasDeduced() {
template <typename Callback>
constexpr bool ReturnsVoid() {
- return (std::is_same<base_internal::invoke_result_t<Callback>, void>::value);
+ return (std::is_same<std::invoke_result_t<Callback>, void>::value);
}
template <typename Callback>
@@ -70,7 +69,7 @@ class Storage {
Storage& operator=(const Storage& other) = delete;
- void* GetCallbackBuffer() { return static_cast<void*>(+callback_buffer_); }
+ void* GetCallbackBuffer() { return static_cast<void*>(callback_buffer_); }
Callback& GetCallback() {
return *reinterpret_cast<Callback*>(GetCallbackBuffer());
@@ -89,7 +88,7 @@ class Storage {
private:
bool is_callback_engaged_;
- alignas(Callback) char callback_buffer_[sizeof(Callback)];
+ alignas(Callback) unsigned char callback_buffer_[sizeof(Callback)];
};
} // namespace cleanup_internal
diff --git a/contrib/restricted/abseil-cpp/absl/container/btree_map.h b/contrib/restricted/abseil-cpp/absl/container/btree_map.h
index 470de2a1992..32a82ef062c 100644
--- a/contrib/restricted/abseil-cpp/absl/container/btree_map.h
+++ b/contrib/restricted/abseil-cpp/absl/container/btree_map.h
@@ -47,8 +47,10 @@
// iterator at the current position. Another important difference is that
// key-types must be copy-constructible.
//
-// Another API difference is that btree iterators can be subtracted, and this
-// is faster than using std::distance.
+// There are other API differences: first, btree iterators can be subtracted,
+// and this is faster than using `std::distance`. Additionally, btree
+// iterators can be advanced via `operator+=` and `operator-=`, which is faster
+// than using `std::advance`.
//
// B-tree maps are not exception-safe.
diff --git a/contrib/restricted/abseil-cpp/absl/container/btree_set.h b/contrib/restricted/abseil-cpp/absl/container/btree_set.h
index e57d6d9b68f..16181de577f 100644
--- a/contrib/restricted/abseil-cpp/absl/container/btree_set.h
+++ b/contrib/restricted/abseil-cpp/absl/container/btree_set.h
@@ -46,8 +46,10 @@
// reason, `insert()`, `erase()`, and `extract_and_get_next()` return a valid
// iterator at the current position.
//
-// Another API difference is that btree iterators can be subtracted, and this
-// is faster than using std::distance.
+// There are other API differences: first, btree iterators can be subtracted,
+// and this is faster than using `std::distance`. Additionally, btree
+// iterators can be advanced via `operator+=` and `operator-=`, which is faster
+// than using `std::advance`.
//
// B-tree sets are not exception-safe.
diff --git a/contrib/restricted/abseil-cpp/absl/container/fixed_array.h b/contrib/restricted/abseil-cpp/absl/container/fixed_array.h
index 95abb0a59dd..6c238fc381f 100644
--- a/contrib/restricted/abseil-cpp/absl/container/fixed_array.h
+++ b/contrib/restricted/abseil-cpp/absl/container/fixed_array.h
@@ -44,11 +44,13 @@
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/base/dynamic_annotations.h"
+#include "absl/base/internal/iterator_traits.h"
#include "absl/base/internal/throw_delegate.h"
#include "absl/base/macros.h"
#include "absl/base/optimization.h"
#include "absl/base/port.h"
#include "absl/container/internal/compressed_tuple.h"
+#include "absl/hash/internal/weakly_mixed_integer.h"
#include "absl/memory/memory.h"
namespace absl {
@@ -85,9 +87,8 @@ class ABSL_ATTRIBUTE_WARN_UNUSED FixedArray {
// std::iterator_traits isn't guaranteed to be SFINAE-friendly until C++17,
// but this seems to be mostly pedantic.
template <typename Iterator>
- using EnableIfForwardIterator = absl::enable_if_t<std::is_convertible<
- typename std::iterator_traits<Iterator>::iterator_category,
- std::forward_iterator_tag>::value>;
+ using EnableIfForwardIterator = std::enable_if_t<
+ base_internal::IsAtLeastForwardIterator<Iterator>::value>;
static constexpr bool NoexceptCopyable() {
return std::is_nothrow_copy_constructible<StorageElement>::value &&
absl::allocator_is_nothrow<allocator_type>::value;
@@ -392,7 +393,7 @@ class ABSL_ATTRIBUTE_WARN_UNUSED FixedArray {
template <typename H>
friend H AbslHashValue(H h, const FixedArray& v) {
return H::combine(H::combine_contiguous(std::move(h), v.data(), v.size()),
- v.size());
+ hash_internal::WeaklyMixedInteger{v.size()});
}
private:
@@ -447,7 +448,8 @@ class ABSL_ATTRIBUTE_WARN_UNUSED FixedArray {
private:
ABSL_ADDRESS_SANITIZER_REDZONE(redzone_begin_);
- alignas(StorageElement) char buff_[sizeof(StorageElement[inline_elements])];
+ alignas(StorageElement) unsigned char buff_[sizeof(
+ StorageElement[inline_elements])];
ABSL_ADDRESS_SANITIZER_REDZONE(redzone_end_);
};
@@ -517,15 +519,6 @@ class ABSL_ATTRIBUTE_WARN_UNUSED FixedArray {
Storage storage_;
};
-#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
-template <typename T, size_t N, typename A>
-constexpr size_t FixedArray<T, N, A>::kInlineBytesDefault;
-
-template <typename T, size_t N, typename A>
-constexpr typename FixedArray<T, N, A>::size_type
- FixedArray<T, N, A>::inline_elements;
-#endif
-
template <typename T, size_t N, typename A>
void FixedArray<T, N, A>::NonEmptyInlinedStorage::AnnotateConstruct(
typename FixedArray<T, N, A>::size_type n) {
diff --git a/contrib/restricted/abseil-cpp/absl/container/flat_hash_map.h b/contrib/restricted/abseil-cpp/absl/container/flat_hash_map.h
index 735ee3424df..bc86ced9971 100644
--- a/contrib/restricted/abseil-cpp/absl/container/flat_hash_map.h
+++ b/contrib/restricted/abseil-cpp/absl/container/flat_hash_map.h
@@ -104,6 +104,11 @@ struct FlatHashMapPolicy;
// If your types are not moveable or you require pointer stability for keys,
// consider `absl::node_hash_map`.
//
+// PERFORMANCE WARNING: Erasure & sparsity can negatively affect performance:
+// * Iteration takes O(capacity) time, not O(size).
+// * erase() slows down begin() and ++iterator.
+// * Capacity only shrinks on rehash() or clear() -- not on erase().
+//
// Example:
//
// // Create a flat hash map of three strings (that map to strings)
diff --git a/contrib/restricted/abseil-cpp/absl/container/flat_hash_set.h b/contrib/restricted/abseil-cpp/absl/container/flat_hash_set.h
index b5d0f7f9559..bf63eb59867 100644
--- a/contrib/restricted/abseil-cpp/absl/container/flat_hash_set.h
+++ b/contrib/restricted/abseil-cpp/absl/container/flat_hash_set.h
@@ -62,7 +62,7 @@ struct FlatHashSetPolicy;
// Its interface is similar to that of `std::unordered_set<T>` with the
// following notable differences:
//
-// * Requires keys that are CopyConstructible
+// * Requires keys that are MoveConstructible
// * Supports heterogeneous lookup, through `find()` and `insert()`, provided
// that the set is provided a compatible heterogeneous hashing function and
// equality operator. See below for details.
@@ -103,6 +103,11 @@ struct FlatHashSetPolicy;
// `absl::flat_hash_set<std::unique_ptr<T>>`. If your type is not moveable and
// you require pointer stability, consider `absl::node_hash_set` instead.
//
+// PERFORMANCE WARNING: Erasure & sparsity can negatively affect performance:
+// * Iteration takes O(capacity) time, not O(size).
+// * erase() slows down begin() and ++iterator.
+// * Capacity only shrinks on rehash() or clear() -- not on erase().
+//
// Example:
//
// // Create a flat hash set of three strings
diff --git a/contrib/restricted/abseil-cpp/absl/container/inlined_vector.h b/contrib/restricted/abseil-cpp/absl/container/inlined_vector.h
index cbf8bc2c1fc..f871b349134 100644
--- a/contrib/restricted/abseil-cpp/absl/container/inlined_vector.h
+++ b/contrib/restricted/abseil-cpp/absl/container/inlined_vector.h
@@ -47,11 +47,13 @@
#include "absl/algorithm/algorithm.h"
#include "absl/base/attributes.h"
+#include "absl/base/internal/iterator_traits.h"
#include "absl/base/internal/throw_delegate.h"
#include "absl/base/macros.h"
#include "absl/base/optimization.h"
#include "absl/base/port.h"
#include "absl/container/internal/inlined_vector.h"
+#include "absl/hash/internal/weakly_mixed_integer.h"
#include "absl/memory/memory.h"
#include "absl/meta/type_traits.h"
@@ -90,11 +92,11 @@ class ABSL_ATTRIBUTE_WARN_UNUSED InlinedVector {
inlined_vector_internal::DefaultValueAdapter<TheA>;
template <typename Iterator>
- using EnableIfAtLeastForwardIterator = absl::enable_if_t<
- inlined_vector_internal::IsAtLeastForwardIterator<Iterator>::value, int>;
+ using EnableIfAtLeastForwardIterator = std::enable_if_t<
+ base_internal::IsAtLeastForwardIterator<Iterator>::value, int>;
template <typename Iterator>
- using DisableIfAtLeastForwardIterator = absl::enable_if_t<
- !inlined_vector_internal::IsAtLeastForwardIterator<Iterator>::value, int>;
+ using DisableIfAtLeastForwardIterator = std::enable_if_t<
+ !base_internal::IsAtLeastForwardIterator<Iterator>::value, int>;
using MemcpyPolicy = typename Storage::MemcpyPolicy;
using ElementwiseAssignPolicy = typename Storage::ElementwiseAssignPolicy;
@@ -1007,7 +1009,8 @@ bool operator>=(const absl::InlinedVector<T, N, A>& a,
template <typename H, typename T, size_t N, typename A>
H AbslHashValue(H h, const absl::InlinedVector<T, N, A>& a) {
auto size = a.size();
- return H::combine(H::combine_contiguous(std::move(h), a.data(), size), size);
+ return H::combine(H::combine_contiguous(std::move(h), a.data(), size),
+ hash_internal::WeaklyMixedInteger{size});
}
ABSL_NAMESPACE_END
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/btree.h b/contrib/restricted/abseil-cpp/absl/container/internal/btree.h
index 689e71a5ce3..ed541e758a6 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/btree.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/btree.h
@@ -60,6 +60,7 @@
#include "absl/base/config.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/base/macros.h"
+#include "absl/base/optimization.h"
#include "absl/container/internal/common.h"
#include "absl/container/internal/common_policy_traits.h"
#include "absl/container/internal/compressed_tuple.h"
@@ -224,7 +225,7 @@ struct key_compare_adapter {
public:
using Base::Base;
- checked_compare(Compare comp) : Base(std::move(comp)) {} // NOLINT
+ checked_compare(Compare cmp) : Base(std::move(cmp)) {} // NOLINT
// Allow converting to Compare for use in key_comp()/value_comp().
explicit operator Compare() const { return comp(); }
@@ -708,6 +709,8 @@ class btree_node {
}
// Getter for the parent of this node.
+ // TODO(ezb): assert that the child of the returned node at position
+ // `node_->position()` maps to the current node.
btree_node *parent() const { return *GetField<0>(); }
// Getter for whether the node is the root of the tree. The parent of the
// root of the tree is the leftmost node in the tree which is guaranteed to
@@ -1175,6 +1178,26 @@ class btree_iterator : private btree_iterator_generation_info {
return distance_slow(other);
}
+ // Advances the iterator by `n`. Values of `n` must not result in going past
+ // the `end` iterator (for a positive `n`) or before the `begin` iterator (for
+ // a negative `n`).
+ btree_iterator &operator+=(difference_type n) {
+ assert_valid_generation(node_);
+ if (n == 0) return *this;
+ if (n < 0) return decrement_n_slow(-n);
+ return increment_n_slow(n);
+ }
+
+ // Moves the iterator by `n` positions backwards. Values of `n` must not
+ // result in going before the `begin` iterator (for a positive `n`) or past
+ // the `end` iterator (for a negative `n`).
+ btree_iterator &operator-=(difference_type n) {
+ assert_valid_generation(node_);
+ if (n == 0) return *this;
+ if (n < 0) return increment_n_slow(-n);
+ return decrement_n_slow(n);
+ }
+
// Accessors for the key/value the iterator is pointing at.
reference operator*() const {
ABSL_HARDENING_ASSERT(node_ != nullptr);
@@ -1277,6 +1300,7 @@ class btree_iterator : private btree_iterator_generation_info {
increment_slow();
}
void increment_slow();
+ btree_iterator &increment_n_slow(difference_type n);
void decrement() {
assert_valid_generation(node_);
@@ -1286,6 +1310,7 @@ class btree_iterator : private btree_iterator_generation_info {
decrement_slow();
}
void decrement_slow();
+ btree_iterator &decrement_n_slow(difference_type n);
const key_type &key() const {
return node_->key(static_cast<size_type>(position_));
@@ -2126,50 +2151,128 @@ auto btree_iterator<N, R, P>::distance_slow(const_iterator other) const
template <typename N, typename R, typename P>
void btree_iterator<N, R, P>::increment_slow() {
- if (node_->is_leaf()) {
- assert(position_ >= node_->finish());
- btree_iterator save(*this);
- while (position_ == node_->finish() && !node_->is_root()) {
- assert(node_->parent()->child(node_->position()) == node_);
- position_ = node_->position();
- node_ = node_->parent();
+ N* node = node_;
+ int position = position_;
+ if (node->is_leaf()) {
+ assert(position >= node->finish());
+ while (position == node->finish() && !node->is_root()) {
+ assert(node->parent()->child(node->position()) == node);
+ position = node->position();
+ node = node->parent();
}
// TODO(ezb): assert we aren't incrementing end() instead of handling.
- if (position_ == node_->finish()) {
- *this = save;
+ if (position == node->finish()) {
+ return;
}
} else {
- assert(position_ < node_->finish());
- node_ = node_->child(static_cast<field_type>(position_ + 1));
- while (node_->is_internal()) {
- node_ = node_->start_child();
+ assert(position < node->finish());
+ node = node->child(static_cast<field_type>(position + 1));
+ while (node->is_internal()) {
+ node = node->start_child();
}
- position_ = node_->start();
+ position = node->start();
}
+ *this = {node, position};
}
template <typename N, typename R, typename P>
void btree_iterator<N, R, P>::decrement_slow() {
- if (node_->is_leaf()) {
- assert(position_ <= -1);
- btree_iterator save(*this);
- while (position_ < node_->start() && !node_->is_root()) {
- assert(node_->parent()->child(node_->position()) == node_);
- position_ = node_->position() - 1;
- node_ = node_->parent();
+ N* node = node_;
+ int position = position_;
+ if (node->is_leaf()) {
+ assert(position <= -1);
+ while (position < node->start() && !node->is_root()) {
+ assert(node->parent()->child(node->position()) == node);
+ position = node->position() - 1;
+ node = node->parent();
}
// TODO(ezb): assert we aren't decrementing begin() instead of handling.
- if (position_ < node_->start()) {
- *this = save;
+ if (position < node->start()) {
+ return;
}
} else {
- assert(position_ >= node_->start());
- node_ = node_->child(static_cast<field_type>(position_));
- while (node_->is_internal()) {
- node_ = node_->child(node_->finish());
+ assert(position >= node->start());
+ node = node->child(static_cast<field_type>(position));
+ while (node->is_internal()) {
+ node = node->child(node->finish());
}
- position_ = node_->finish() - 1;
+ position = node->finish() - 1;
}
+ *this = {node, position};
+}
+
+template <typename N, typename R, typename P>
+btree_iterator<N, R, P> &btree_iterator<N, R, P>::increment_n_slow(
+ difference_type n) {
+ N *node = node_;
+ int position = position_;
+ ABSL_ASSUME(n > 0);
+ while (n > 0) {
+ if (node->is_leaf()) {
+ if (position + n < node->finish()) {
+ position += n;
+ break;
+ } else {
+ n -= node->finish() - position;
+ position = node->finish();
+ btree_iterator save = {node, position};
+ while (position == node->finish() && !node->is_root()) {
+ position = node->position();
+ node = node->parent();
+ }
+ if (position == node->finish()) {
+ ABSL_HARDENING_ASSERT(n == 0);
+ return *this = save;
+ }
+ }
+ } else {
+ --n;
+ assert(position < node->finish());
+ node = node->child(static_cast<field_type>(position + 1));
+ while (node->is_internal()) {
+ node = node->start_child();
+ }
+ position = node->start();
+ }
+ }
+ node_ = node;
+ position_ = position;
+ return *this;
+}
+
+template <typename N, typename R, typename P>
+btree_iterator<N, R, P> &btree_iterator<N, R, P>::decrement_n_slow(
+ difference_type n) {
+ N *node = node_;
+ int position = position_;
+ ABSL_ASSUME(n > 0);
+ while (n > 0) {
+ if (node->is_leaf()) {
+ if (position - n >= node->start()) {
+ position -= n;
+ break;
+ } else {
+ n -= 1 + position - node->start();
+ position = node->start() - 1;
+ while (position < node->start() && !node->is_root()) {
+ position = node->position() - 1;
+ node = node->parent();
+ }
+ ABSL_HARDENING_ASSERT(position >= node->start());
+ }
+ } else {
+ --n;
+ assert(position >= node->start());
+ node = node->child(static_cast<field_type>(position));
+ while (node->is_internal()) {
+ node = node->child(node->finish());
+ }
+ position = node->finish() - 1;
+ }
+ }
+ node_ = node;
+ position_ = position;
+ return *this;
}
////
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/btree_container.h b/contrib/restricted/abseil-cpp/absl/container/internal/btree_container.h
index a68ce445542..21f00ae414c 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/btree_container.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/btree_container.h
@@ -18,12 +18,14 @@
#include <algorithm>
#include <initializer_list>
#include <iterator>
+#include <type_traits>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/base/internal/throw_delegate.h"
#include "absl/container/internal/btree.h" // IWYU pragma: export
#include "absl/container/internal/common.h"
+#include "absl/hash/internal/weakly_mixed_integer.h"
#include "absl/memory/memory.h"
#include "absl/meta/type_traits.h"
@@ -266,7 +268,8 @@ class btree_container {
for (const auto &v : b) {
h = State::combine(std::move(h), v);
}
- return State::combine(std::move(h), b.size());
+ return State::combine(std::move(h),
+ hash_internal::WeaklyMixedInteger{b.size()});
}
protected:
@@ -451,6 +454,29 @@ class btree_map_container : public btree_set_container<Tree> {
template <class K>
using key_arg = typename super_type::template key_arg<K>;
+ // NOTE: The mess here is to shorten the code for the (very repetitive)
+ // function overloads, and to allow the lifetime-bound overloads to dispatch
+ // to the non-lifetime-bound overloads, to ensure there is a single source of
+ // truth for each overload set.
+ //
+ // Enabled if an assignment from the given type would require the
+ // source object to remain alive for the life of the element.
+ //
+ // TODO(b/402804213): Remove these traits and simplify the overloads whenever
+ // we have a better mechanism available to handle lifetime analysis.
+ template <class K, bool Value, typename = void>
+ using LifetimeBoundK =
+ HasValue<Value, type_traits_internal::IsLifetimeBoundAssignment<
+ typename Tree::key_type, K>>;
+ template <class M, bool Value, typename = void>
+ using LifetimeBoundV =
+ HasValue<Value, type_traits_internal::IsLifetimeBoundAssignment<
+ typename Tree::params_type::mapped_type, M>>;
+ template <class K, bool KValue, class M, bool MValue, typename... Dummy>
+ using LifetimeBoundKV =
+ absl::conjunction<LifetimeBoundK<K, KValue, absl::void_t<Dummy...>>,
+ LifetimeBoundV<M, MValue>>;
+
public:
using key_type = typename Tree::key_type;
using mapped_type = typename params_type::mapped_type;
@@ -464,85 +490,163 @@ class btree_map_container : public btree_set_container<Tree> {
using super_type::super_type;
btree_map_container() {}
+ // TODO(b/402804213): Remove these macros whenever we have a better mechanism
+ // available to handle lifetime analysis.
+#define ABSL_INTERNAL_X(Func, Callee, KQual, MQual, KValue, MValue, ...) \
+ template < \
+ typename K = key_type, class M, \
+ ABSL_INTERNAL_IF_##KValue##_NOR_##MValue( \
+ int = (EnableIf<LifetimeBoundKV<K, KValue, M, MValue, \
+ IfRRef<int KQual>::AddPtr<K>, \
+ IfRRef<int MQual>::AddPtr<M>>>()), \
+ ABSL_INTERNAL_SINGLE_ARG( \
+ int &..., \
+ decltype(EnableIf<LifetimeBoundKV<K, KValue, M, MValue>>()) = \
+ 0))> \
+ decltype(auto) Func( \
+ __VA_ARGS__ key_arg<K> KQual k ABSL_INTERNAL_IF_##KValue( \
+ ABSL_INTERNAL_ATTRIBUTE_CAPTURED_BY(this)), \
+ M MQual obj ABSL_INTERNAL_IF_##MValue( \
+ ABSL_INTERNAL_ATTRIBUTE_CAPTURED_BY(this))) \
+ ABSL_ATTRIBUTE_LIFETIME_BOUND { \
+ return ABSL_INTERNAL_IF_##KValue##_OR_##MValue( \
+ (this->template Func<K, M, 0>), Callee)( \
+ __VA_ARGS__ std::forward<decltype(k)>(k), \
+ std::forward<decltype(obj)>(obj)); \
+ } \
+ friend struct std::enable_if<false> /* just to force a semicolon */
// Insertion routines.
// Note: the nullptr template arguments and extra `const M&` overloads allow
// for supporting bitfield arguments.
- template <typename K = key_type, class M>
- std::pair<iterator, bool> insert_or_assign(const key_arg<K> &k, const M &obj)
- ABSL_ATTRIBUTE_LIFETIME_BOUND {
- return insert_or_assign_impl(k, obj);
- }
- template <typename K = key_type, class M, K * = nullptr>
- std::pair<iterator, bool> insert_or_assign(key_arg<K> &&k, const M &obj)
- ABSL_ATTRIBUTE_LIFETIME_BOUND {
- return insert_or_assign_impl(std::forward<K>(k), obj);
- }
- template <typename K = key_type, class M, M * = nullptr>
- std::pair<iterator, bool> insert_or_assign(const key_arg<K> &k, M &&obj)
- ABSL_ATTRIBUTE_LIFETIME_BOUND {
- return insert_or_assign_impl(k, std::forward<M>(obj));
- }
- template <typename K = key_type, class M, K * = nullptr, M * = nullptr>
- std::pair<iterator, bool> insert_or_assign(key_arg<K> &&k, M &&obj)
- ABSL_ATTRIBUTE_LIFETIME_BOUND {
- return insert_or_assign_impl(std::forward<K>(k), std::forward<M>(obj));
- }
- template <typename K = key_type, class M>
- iterator insert_or_assign(const_iterator hint, const key_arg<K> &k,
- const M &obj) ABSL_ATTRIBUTE_LIFETIME_BOUND {
- return insert_or_assign_hint_impl(hint, k, obj);
- }
- template <typename K = key_type, class M, K * = nullptr>
- iterator insert_or_assign(const_iterator hint, key_arg<K> &&k,
- const M &obj) ABSL_ATTRIBUTE_LIFETIME_BOUND {
- return insert_or_assign_hint_impl(hint, std::forward<K>(k), obj);
- }
- template <typename K = key_type, class M, M * = nullptr>
- iterator insert_or_assign(const_iterator hint, const key_arg<K> &k,
- M &&obj) ABSL_ATTRIBUTE_LIFETIME_BOUND {
- return insert_or_assign_hint_impl(hint, k, std::forward<M>(obj));
- }
- template <typename K = key_type, class M, K * = nullptr, M * = nullptr>
- iterator insert_or_assign(const_iterator hint, key_arg<K> &&k,
- M &&obj) ABSL_ATTRIBUTE_LIFETIME_BOUND {
- return insert_or_assign_hint_impl(hint, std::forward<K>(k),
- std::forward<M>(obj));
- }
-
- template <typename K = key_type, typename... Args,
- typename absl::enable_if_t<
- !std::is_convertible<K, const_iterator>::value, int> = 0>
- std::pair<iterator, bool> try_emplace(const key_arg<K> &k, Args &&...args)
- ABSL_ATTRIBUTE_LIFETIME_BOUND {
- return try_emplace_impl(k, std::forward<Args>(args)...);
- }
- template <typename K = key_type, typename... Args,
- typename absl::enable_if_t<
- !std::is_convertible<K, const_iterator>::value, int> = 0>
- std::pair<iterator, bool> try_emplace(key_arg<K> &&k, Args &&...args)
- ABSL_ATTRIBUTE_LIFETIME_BOUND {
- return try_emplace_impl(std::forward<K>(k), std::forward<Args>(args)...);
- }
- template <typename K = key_type, typename... Args>
- iterator try_emplace(const_iterator hint, const key_arg<K> &k,
- Args &&...args) ABSL_ATTRIBUTE_LIFETIME_BOUND {
- return try_emplace_hint_impl(hint, k, std::forward<Args>(args)...);
- }
- template <typename K = key_type, typename... Args>
- iterator try_emplace(const_iterator hint, key_arg<K> &&k,
- Args &&...args) ABSL_ATTRIBUTE_LIFETIME_BOUND {
- return try_emplace_hint_impl(hint, std::forward<K>(k),
- std::forward<Args>(args)...);
- }
-
- template <typename K = key_type>
+ ABSL_INTERNAL_X(insert_or_assign, insert_or_assign_impl, const &, const &,
+ false, false);
+ ABSL_INTERNAL_X(insert_or_assign, insert_or_assign_impl, const &, const &,
+ false, true);
+ ABSL_INTERNAL_X(insert_or_assign, insert_or_assign_impl, const &, const &,
+ true, false);
+ ABSL_INTERNAL_X(insert_or_assign, insert_or_assign_impl, const &, const &,
+ true, true);
+
+ ABSL_INTERNAL_X(insert_or_assign, insert_or_assign_impl, const &, &&, false,
+ false);
+ ABSL_INTERNAL_X(insert_or_assign, insert_or_assign_impl, const &, &&, false,
+ true);
+ ABSL_INTERNAL_X(insert_or_assign, insert_or_assign_impl, const &, &&, true,
+ false);
+ ABSL_INTERNAL_X(insert_or_assign, insert_or_assign_impl, const &, &&, true,
+ true);
+
+ ABSL_INTERNAL_X(insert_or_assign, insert_or_assign_impl, &&, const &, false,
+ false);
+ ABSL_INTERNAL_X(insert_or_assign, insert_or_assign_impl, &&, const &, false,
+ true);
+ ABSL_INTERNAL_X(insert_or_assign, insert_or_assign_impl, &&, const &, true,
+ false);
+ ABSL_INTERNAL_X(insert_or_assign, insert_or_assign_impl, &&, const &, true,
+ true);
+
+ ABSL_INTERNAL_X(insert_or_assign, insert_or_assign_impl, &&, &&, false,
+ false);
+ ABSL_INTERNAL_X(insert_or_assign, insert_or_assign_impl, &&, &&, false, true);
+ ABSL_INTERNAL_X(insert_or_assign, insert_or_assign_impl, &&, &&, true, false);
+ ABSL_INTERNAL_X(insert_or_assign, insert_or_assign_impl, &&, &&, true, true);
+
+ ABSL_INTERNAL_X(insert_or_assign, insert_or_assign_hint_impl, const &,
+ const &, false, false,
+ const_iterator(hint) ABSL_INTERNAL_COMMA);
+ ABSL_INTERNAL_X(insert_or_assign, insert_or_assign_hint_impl, const &,
+ const &, false, true,
+ const_iterator(hint) ABSL_INTERNAL_COMMA);
+ ABSL_INTERNAL_X(insert_or_assign, insert_or_assign_hint_impl, const &,
+ const &, true, false,
+ const_iterator(hint) ABSL_INTERNAL_COMMA);
+ ABSL_INTERNAL_X(insert_or_assign, insert_or_assign_hint_impl, const &,
+ const &, true, true,
+ const_iterator(hint) ABSL_INTERNAL_COMMA);
+
+ ABSL_INTERNAL_X(insert_or_assign, insert_or_assign_hint_impl, const &, &&,
+ false, false, const_iterator(hint) ABSL_INTERNAL_COMMA);
+ ABSL_INTERNAL_X(insert_or_assign, insert_or_assign_hint_impl, const &, &&,
+ false, true, const_iterator(hint) ABSL_INTERNAL_COMMA);
+ ABSL_INTERNAL_X(insert_or_assign, insert_or_assign_hint_impl, const &, &&,
+ true, false, const_iterator(hint) ABSL_INTERNAL_COMMA);
+ ABSL_INTERNAL_X(insert_or_assign, insert_or_assign_hint_impl, const &, &&,
+ true, true, const_iterator(hint) ABSL_INTERNAL_COMMA);
+
+ ABSL_INTERNAL_X(insert_or_assign, insert_or_assign_hint_impl, &&, const &,
+ false, false, const_iterator(hint) ABSL_INTERNAL_COMMA);
+ ABSL_INTERNAL_X(insert_or_assign, insert_or_assign_hint_impl, &&, const &,
+ false, true, const_iterator(hint) ABSL_INTERNAL_COMMA);
+ ABSL_INTERNAL_X(insert_or_assign, insert_or_assign_hint_impl, &&, const &,
+ true, false, const_iterator(hint) ABSL_INTERNAL_COMMA);
+ ABSL_INTERNAL_X(insert_or_assign, insert_or_assign_hint_impl, &&, const &,
+ true, true, const_iterator(hint) ABSL_INTERNAL_COMMA);
+
+ ABSL_INTERNAL_X(insert_or_assign, insert_or_assign_hint_impl, &&, &&, false,
+ false, const_iterator(hint) ABSL_INTERNAL_COMMA);
+ ABSL_INTERNAL_X(insert_or_assign, insert_or_assign_hint_impl, &&, &&, false,
+ true, const_iterator(hint) ABSL_INTERNAL_COMMA);
+ ABSL_INTERNAL_X(insert_or_assign, insert_or_assign_hint_impl, &&, &&, true,
+ false, const_iterator(hint) ABSL_INTERNAL_COMMA);
+ ABSL_INTERNAL_X(insert_or_assign, insert_or_assign_hint_impl, &&, &&, true,
+ true, const_iterator(hint) ABSL_INTERNAL_COMMA);
+#undef ABSL_INTERNAL_X
+
+#define ABSL_INTERNAL_X(Func, Callee, KQual, KValue, ...) \
+ template < \
+ class K = key_type, \
+ ABSL_INTERNAL_IF_##KValue( \
+ class... Args, \
+ int = (EnableIf< \
+ LifetimeBoundK<K, KValue, IfRRef<int KQual>::AddPtr<K>>>())), \
+ ABSL_INTERNAL_IF_##KValue( \
+ decltype(EnableIf<LifetimeBoundK< \
+ K, KValue, IfRRef<int KQual>::AddPtr<K>>>()) = 0, \
+ class... Args), \
+ std::enable_if_t<!std::is_convertible<K, const_iterator>::value, int> = \
+ 0> \
+ decltype(auto) Func( \
+ __VA_ARGS__ key_arg<K> KQual k ABSL_INTERNAL_IF_##KValue( \
+ ABSL_INTERNAL_ATTRIBUTE_CAPTURED_BY(this)), \
+ Args &&...args) ABSL_ATTRIBUTE_LIFETIME_BOUND { \
+ return ABSL_INTERNAL_IF_##KValue((this->template Func<K, 0>), Callee)( \
+ __VA_ARGS__ std::forward<decltype(k)>(k), \
+ std::forward<decltype(args)>(args)...); \
+ } \
+ friend struct std::enable_if<false> /* just to force a semicolon */
+ ABSL_INTERNAL_X(try_emplace, try_emplace_impl, const &, false);
+ ABSL_INTERNAL_X(try_emplace, try_emplace_impl, const &, true);
+ ABSL_INTERNAL_X(try_emplace, try_emplace_impl, &&, false);
+ ABSL_INTERNAL_X(try_emplace, try_emplace_impl, &&, true);
+ ABSL_INTERNAL_X(try_emplace, try_emplace_hint_impl, const &, false,
+ const_iterator(hint) ABSL_INTERNAL_COMMA);
+ ABSL_INTERNAL_X(try_emplace, try_emplace_hint_impl, const &, true,
+ const_iterator(hint) ABSL_INTERNAL_COMMA);
+ ABSL_INTERNAL_X(try_emplace, try_emplace_hint_impl, &&, false,
+ const_iterator(hint) ABSL_INTERNAL_COMMA);
+ ABSL_INTERNAL_X(try_emplace, try_emplace_hint_impl, &&, true,
+ const_iterator(hint) ABSL_INTERNAL_COMMA);
+#undef ABSL_INTERNAL_X
+
+ template <class K = key_type, int = EnableIf<LifetimeBoundK<K, false>>()>
mapped_type &operator[](const key_arg<K> &k) ABSL_ATTRIBUTE_LIFETIME_BOUND {
return try_emplace(k).first->second;
}
- template <typename K = key_type>
+ template <class K = key_type, int &..., EnableIf<LifetimeBoundK<K, true>> = 0>
+ mapped_type &operator[](
+ const key_arg<K> &k ABSL_INTERNAL_ATTRIBUTE_CAPTURED_BY(this))
+ ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ return this->template operator[]<K, 0>(k);
+ }
+ template <class K = key_type, int = EnableIf<LifetimeBoundK<K, false>>()>
mapped_type &operator[](key_arg<K> &&k) ABSL_ATTRIBUTE_LIFETIME_BOUND {
return try_emplace(std::forward<K>(k)).first->second;
}
+ template <class K = key_type, int &..., EnableIf<LifetimeBoundK<K, true>> = 0>
+ mapped_type &operator[](key_arg<K> &&k ABSL_INTERNAL_ATTRIBUTE_CAPTURED_BY(
+ this)) ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ return this->template operator[]<K, 0>(std::forward<K>(k));
+ }
template <typename K = key_type>
mapped_type &at(const key_arg<K> &key) ABSL_ATTRIBUTE_LIFETIME_BOUND {
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/common.h b/contrib/restricted/abseil-cpp/absl/container/internal/common.h
index 9239bb4d093..5ef6c569a7d 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/common.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/common.h
@@ -21,10 +21,53 @@
#include "absl/meta/type_traits.h"
#include "absl/types/optional.h"
+// TODO(b/402804213): Clean up these macros when no longer needed.
+#define ABSL_INTERNAL_SINGLE_ARG(...) __VA_ARGS__
+
+#define ABSL_INTERNAL_IF_true(if_satisfied, ...) if_satisfied
+#define ABSL_INTERNAL_IF_false(if_satisfied, ...) __VA_ARGS__
+
+#define ABSL_INTERNAL_IF_true_AND_true ABSL_INTERNAL_IF_true
+#define ABSL_INTERNAL_IF_false_AND_false ABSL_INTERNAL_IF_false
+#define ABSL_INTERNAL_IF_true_AND_false ABSL_INTERNAL_IF_false_AND_false
+#define ABSL_INTERNAL_IF_false_AND_true ABSL_INTERNAL_IF_false_AND_false
+
+#define ABSL_INTERNAL_IF_true_OR_true ABSL_INTERNAL_IF_true
+#define ABSL_INTERNAL_IF_false_OR_false ABSL_INTERNAL_IF_false
+#define ABSL_INTERNAL_IF_true_OR_false ABSL_INTERNAL_IF_true_OR_true
+#define ABSL_INTERNAL_IF_false_OR_true ABSL_INTERNAL_IF_true_OR_true
+
+#define ABSL_INTERNAL_IF_true_NOR_true ABSL_INTERNAL_IF_false_AND_false
+#define ABSL_INTERNAL_IF_false_NOR_false ABSL_INTERNAL_IF_true_AND_true
+#define ABSL_INTERNAL_IF_true_NOR_false ABSL_INTERNAL_IF_false_AND_true
+#define ABSL_INTERNAL_IF_false_NOR_true ABSL_INTERNAL_IF_true_AND_false
+
+#define ABSL_INTERNAL_COMMA ,
+
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {
+// TODO(b/402804213): Clean up these traits when no longer needed or
+// deduplicate them with absl::functional_internal::EnableIf.
+template <class Cond>
+using EnableIf = std::enable_if_t<Cond::value, int>;
+
+template <bool Value, class T>
+using HasValue = std::conditional_t<Value, T, absl::negation<T>>;
+
+template <class T>
+struct IfRRef {
+ template <class Other>
+ using AddPtr = Other;
+};
+
+template <class T>
+struct IfRRef<T&&> {
+ template <class Other>
+ using AddPtr = Other*;
+};
+
template <class, class = void>
struct IsTransparent : std::false_type {};
template <class T>
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/common_policy_traits.h b/contrib/restricted/abseil-cpp/absl/container/internal/common_policy_traits.h
index bbf54750f94..86e038e178e 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/common_policy_traits.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/common_policy_traits.h
@@ -119,7 +119,7 @@ struct common_policy_traits {
old_slot)) {
return P::transfer(alloc, new_slot, old_slot);
}
-#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606
+
// This overload returns true_type for the trait below.
// The conditional_t is to make the enabler type dependent.
template <class Alloc,
@@ -135,7 +135,6 @@ struct common_policy_traits {
static_cast<const void*>(&element(old_slot)), sizeof(value_type));
return {};
}
-#endif
template <class Alloc>
static void transfer_impl(Alloc* alloc, slot_type* new_slot,
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/container_memory.h b/contrib/restricted/abseil-cpp/absl/container/internal/container_memory.h
index e7031797018..e7ac1dba43b 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/container_memory.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/container_memory.h
@@ -374,9 +374,6 @@ struct map_slot_policy {
return slot->value;
}
- // When C++17 is available, we can use std::launder to provide mutable
- // access to the key for use in node handle.
-#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606
static K& mutable_key(slot_type* slot) {
// Still check for kMutableKeys so that we can avoid calling std::launder
// unless necessary because it can interfere with optimizations.
@@ -384,9 +381,6 @@ struct map_slot_policy {
: *std::launder(const_cast<K*>(
std::addressof(slot->value.first)));
}
-#else // !(defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606)
- static const K& mutable_key(slot_type* slot) { return key(slot); }
-#endif
static const K& key(const slot_type* slot) {
return kMutableKeys::value ? slot->key : slot->value.first;
@@ -439,11 +433,17 @@ struct map_slot_policy {
template <class Allocator>
static auto transfer(Allocator* alloc, slot_type* new_slot,
slot_type* old_slot) {
- auto is_relocatable =
- typename absl::is_trivially_relocatable<value_type>::type();
+ // This should really just be
+ // typename absl::is_trivially_relocatable<value_type>::type()
+ // but std::pair is not trivially copyable in C++23 in some standard
+ // library versions.
+ // See https://github.com/llvm/llvm-project/pull/95444 for instance.
+ auto is_relocatable = typename std::conjunction<
+ absl::is_trivially_relocatable<typename value_type::first_type>,
+ absl::is_trivially_relocatable<typename value_type::second_type>>::
+ type();
emplace(new_slot);
-#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606
if (is_relocatable) {
// TODO(b/247130232,b/251814870): remove casts after fixing warnings.
std::memcpy(static_cast<void*>(std::launder(&new_slot->value)),
@@ -451,7 +451,6 @@ struct map_slot_policy {
sizeof(value_type));
return is_relocatable;
}
-#endif
if (kMutableKeys::value) {
absl::allocator_traits<Allocator>::construct(
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/hash_function_defaults.h b/contrib/restricted/abseil-cpp/absl/container/internal/hash_function_defaults.h
index 0f07bcfe294..c2a757b53f6 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/hash_function_defaults.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/hash_function_defaults.h
@@ -49,6 +49,7 @@
#include <functional>
#include <memory>
#include <string>
+#include <string_view>
#include <type_traits>
#include "absl/base/config.h"
@@ -58,10 +59,6 @@
#include "absl/strings/cord.h"
#include "absl/strings/string_view.h"
-#ifdef ABSL_HAVE_STD_STRING_VIEW
-#include <string_view>
-#endif
-
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {
@@ -113,8 +110,6 @@ struct HashEq<absl::string_view> : StringHashEq {};
template <>
struct HashEq<absl::Cord> : StringHashEq {};
-#ifdef ABSL_HAVE_STD_STRING_VIEW
-
template <typename TChar>
struct BasicStringHash {
using is_transparent = void;
@@ -153,8 +148,6 @@ struct HashEq<std::u32string> : BasicStringHashEq<char32_t> {};
template <>
struct HashEq<std::u32string_view> : BasicStringHashEq<char32_t> {};
-#endif // ABSL_HAVE_STD_STRING_VIEW
-
// Supports heterogeneous lookup for pointers and smart pointers.
template <class T>
struct HashEq<T*> {
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/hash_generator_testing.h b/contrib/restricted/abseil-cpp/absl/container/internal/hash_generator_testing.h
index f1f555a5c14..14c878e4c23 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/hash_generator_testing.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/hash_generator_testing.h
@@ -23,7 +23,9 @@
#include <algorithm>
#include <cassert>
#include <iosfwd>
+#include <memory>
#include <random>
+#include <string>
#include <tuple>
#include <type_traits>
#include <utility>
@@ -32,6 +34,7 @@
#include "absl/container/internal/hash_policy_testing.h"
#include "absl/memory/memory.h"
#include "absl/meta/type_traits.h"
+#include "absl/random/random.h"
#include "absl/strings/string_view.h"
namespace absl {
@@ -48,9 +51,7 @@ struct IsMap<Map, absl::void_t<typename Map::mapped_type>> : std::true_type {};
} // namespace generator_internal
-std::mt19937_64* GetSharedRng();
-
-enum Enum {
+enum Enum : uint64_t {
kEnumEmpty,
kEnumDeleted,
};
@@ -69,37 +70,27 @@ struct Generator;
template <class T>
struct Generator<T, typename std::enable_if<std::is_integral<T>::value>::type> {
- T operator()() const {
- std::uniform_int_distribution<T> dist;
- return dist(*GetSharedRng());
- }
+ T operator()() const { return dist(gen); }
+ mutable absl::InsecureBitGen gen;
+ mutable std::uniform_int_distribution<T> dist;
};
template <>
struct Generator<Enum> {
- Enum operator()() const {
- std::uniform_int_distribution<typename std::underlying_type<Enum>::type>
- dist;
- while (true) {
- auto variate = dist(*GetSharedRng());
- if (variate != kEnumEmpty && variate != kEnumDeleted)
- return static_cast<Enum>(variate);
- }
- }
+ Enum operator()() const { return static_cast<Enum>(dist(gen)); }
+ mutable absl::InsecureBitGen gen;
+ mutable std::uniform_int_distribution<
+ typename std::underlying_type<Enum>::type>
+ dist;
};
template <>
struct Generator<EnumClass> {
- EnumClass operator()() const {
- std::uniform_int_distribution<
- typename std::underlying_type<EnumClass>::type>
- dist;
- while (true) {
- EnumClass variate = static_cast<EnumClass>(dist(*GetSharedRng()));
- if (variate != EnumClass::kEmpty && variate != EnumClass::kDeleted)
- return static_cast<EnumClass>(variate);
- }
- }
+ EnumClass operator()() const { return static_cast<EnumClass>(dist(gen)); }
+ mutable absl::InsecureBitGen gen;
+ mutable std::uniform_int_distribution<
+ typename std::underlying_type<EnumClass>::type>
+ dist;
};
template <>
@@ -143,17 +134,17 @@ struct Generator<std::unique_ptr<T>> {
template <class U>
struct Generator<U, absl::void_t<decltype(std::declval<U&>().key()),
- decltype(std::declval<U&>().value())>>
+ decltype(std::declval<U&>().value())>>
: Generator<std::pair<
typename std::decay<decltype(std::declval<U&>().key())>::type,
typename std::decay<decltype(std::declval<U&>().value())>::type>> {};
template <class Container>
-using GeneratedType = decltype(
- std::declval<const Generator<
- typename std::conditional<generator_internal::IsMap<Container>::value,
- typename Container::value_type,
- typename Container::key_type>::type>&>()());
+using GeneratedType =
+ decltype(std::declval<const Generator<typename std::conditional<
+ generator_internal::IsMap<Container>::value,
+ typename Container::value_type,
+ typename Container::key_type>::type>&>()());
// Naive wrapper that performs a linear search of previous values.
// Beware this is O(SQR), which is reasonable for smaller kMaxValues.
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/hash_policy_testing.h b/contrib/restricted/abseil-cpp/absl/container/internal/hash_policy_testing.h
index 66bb12ec457..e9f57579ed7 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/hash_policy_testing.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/hash_policy_testing.h
@@ -119,7 +119,11 @@ struct Alloc : std::allocator<T> {
using propagate_on_container_swap = std::true_type;
// Using old paradigm for this to ensure compatibility.
- explicit Alloc(size_t id = 0) : id_(id) {}
+ //
+ // NOTE: As of 2025-05, this constructor cannot be explicit in order to work
+ // with the libstdc++ that ships with GCC15.
+ // NOLINTNEXTLINE(google-explicit-constructor)
+ Alloc(size_t id = 0) : id_(id) {}
Alloc(const Alloc&) = default;
Alloc& operator=(const Alloc&) = default;
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/hash_policy_traits.h b/contrib/restricted/abseil-cpp/absl/container/internal/hash_policy_traits.h
index ad835d6fcde..cd6b42f9ec6 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/hash_policy_traits.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/hash_policy_traits.h
@@ -36,16 +36,12 @@ struct hash_policy_traits : common_policy_traits<Policy> {
private:
struct ReturnKey {
- // When C++17 is available, we can use std::launder to provide mutable
- // access to the key for use in node handle.
-#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606
template <class Key,
absl::enable_if_t<std::is_lvalue_reference<Key>::value, int> = 0>
static key_type& Impl(Key&& k, int) {
return *std::launder(
const_cast<key_type*>(std::addressof(std::forward<Key>(k))));
}
-#endif
template <class Key>
static Key Impl(Key&& k, char) {
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/hashtable_control_bytes.h b/contrib/restricted/abseil-cpp/absl/container/internal/hashtable_control_bytes.h
new file mode 100644
index 00000000000..abaadc3bae2
--- /dev/null
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/hashtable_control_bytes.h
@@ -0,0 +1,527 @@
+// Copyright 2025 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// This file contains the implementation of the hashtable control bytes
+// manipulation.
+
+#ifndef ABSL_CONTAINER_INTERNAL_HASHTABLE_CONTROL_BYTES_H_
+#define ABSL_CONTAINER_INTERNAL_HASHTABLE_CONTROL_BYTES_H_
+
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <type_traits>
+
+#include "absl/base/config.h"
+
+#ifdef ABSL_INTERNAL_HAVE_SSE2
+#include <emmintrin.h>
+#endif
+
+#ifdef ABSL_INTERNAL_HAVE_SSSE3
+#include <tmmintrin.h>
+#endif
+
+#ifdef _MSC_VER
+#include <intrin.h>
+#endif
+
+#ifdef ABSL_INTERNAL_HAVE_ARM_NEON
+#include <arm_neon.h>
+#endif
+
+#include "absl/base/optimization.h"
+#include "absl/numeric/bits.h"
+#include "absl/base/internal/endian.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+#ifdef ABSL_SWISSTABLE_ASSERT
+#error ABSL_SWISSTABLE_ASSERT cannot be directly set
+#else
+// We use this macro for assertions that users may see when the table is in an
+// invalid state that sanitizers may help diagnose.
+#define ABSL_SWISSTABLE_ASSERT(CONDITION) \
+ assert((CONDITION) && "Try enabling sanitizers.")
+#endif
+
+
+template <typename T>
+uint32_t TrailingZeros(T x) {
+ ABSL_ASSUME(x != 0);
+ return static_cast<uint32_t>(countr_zero(x));
+}
+
+// 8 bytes bitmask with most significant bit set for every byte.
+constexpr uint64_t kMsbs8Bytes = 0x8080808080808080ULL;
+// 8 kEmpty bytes that is useful for small table initialization.
+constexpr uint64_t k8EmptyBytes = kMsbs8Bytes;
+
+// An abstract bitmask, such as that emitted by a SIMD instruction.
+//
+// Specifically, this type implements a simple bitset whose representation is
+// controlled by `SignificantBits` and `Shift`. `SignificantBits` is the number
+// of abstract bits in the bitset, while `Shift` is the log-base-two of the
+// width of an abstract bit in the representation.
+// This mask provides operations for any number of real bits set in an abstract
+// bit. To add iteration on top of that, implementation must guarantee no more
+// than the most significant real bit is set in a set abstract bit.
+template <class T, int SignificantBits, int Shift = 0>
+class NonIterableBitMask {
+ public:
+ explicit NonIterableBitMask(T mask) : mask_(mask) {}
+
+ explicit operator bool() const { return this->mask_ != 0; }
+
+ // Returns the index of the lowest *abstract* bit set in `self`.
+ uint32_t LowestBitSet() const {
+ return container_internal::TrailingZeros(mask_) >> Shift;
+ }
+
+ // Returns the index of the highest *abstract* bit set in `self`.
+ uint32_t HighestBitSet() const {
+ return static_cast<uint32_t>((bit_width(mask_) - 1) >> Shift);
+ }
+
+ // Returns the number of trailing zero *abstract* bits.
+ uint32_t TrailingZeros() const {
+ return container_internal::TrailingZeros(mask_) >> Shift;
+ }
+
+ // Returns the number of leading zero *abstract* bits.
+ uint32_t LeadingZeros() const {
+ constexpr int total_significant_bits = SignificantBits << Shift;
+ constexpr int extra_bits = sizeof(T) * 8 - total_significant_bits;
+ return static_cast<uint32_t>(
+ countl_zero(static_cast<T>(mask_ << extra_bits))) >>
+ Shift;
+ }
+
+ T mask_;
+};
+
+// Mask that can be iterable
+//
+// For example, when `SignificantBits` is 16 and `Shift` is zero, this is just
+// an ordinary 16-bit bitset occupying the low 16 bits of `mask`. When
+// `SignificantBits` is 8 and `Shift` is 3, abstract bits are represented as
+// the bytes `0x00` and `0x80`, and it occupies all 64 bits of the bitmask.
+// If NullifyBitsOnIteration is true (only allowed for Shift == 3),
+// non zero abstract bit is allowed to have additional bits
+// (e.g., `0xff`, `0x83` and `0x9c` are ok, but `0x6f` is not).
+//
+// For example:
+// for (int i : BitMask<uint32_t, 16>(0b101)) -> yields 0, 2
+// for (int i : BitMask<uint64_t, 8, 3>(0x0000000080800000)) -> yields 2, 3
+template <class T, int SignificantBits, int Shift = 0,
+ bool NullifyBitsOnIteration = false>
+class BitMask : public NonIterableBitMask<T, SignificantBits, Shift> {
+ using Base = NonIterableBitMask<T, SignificantBits, Shift>;
+ static_assert(std::is_unsigned<T>::value, "");
+ static_assert(Shift == 0 || Shift == 3, "");
+ static_assert(!NullifyBitsOnIteration || Shift == 3, "");
+
+ public:
+ explicit BitMask(T mask) : Base(mask) {
+ if (Shift == 3 && !NullifyBitsOnIteration) {
+ ABSL_SWISSTABLE_ASSERT(this->mask_ == (this->mask_ & kMsbs8Bytes));
+ }
+ }
+ // BitMask is an iterator over the indices of its abstract bits.
+ using value_type = int;
+ using iterator = BitMask;
+ using const_iterator = BitMask;
+
+ BitMask& operator++() {
+ if (Shift == 3 && NullifyBitsOnIteration) {
+ this->mask_ &= kMsbs8Bytes;
+ }
+ this->mask_ &= (this->mask_ - 1);
+ return *this;
+ }
+
+ uint32_t operator*() const { return Base::LowestBitSet(); }
+
+ BitMask begin() const { return *this; }
+ BitMask end() const { return BitMask(0); }
+
+ private:
+ friend bool operator==(const BitMask& a, const BitMask& b) {
+ return a.mask_ == b.mask_;
+ }
+ friend bool operator!=(const BitMask& a, const BitMask& b) {
+ return a.mask_ != b.mask_;
+ }
+};
+
+using h2_t = uint8_t;
+
+// The values here are selected for maximum performance. See the static asserts
+// below for details.
+
+// A `ctrl_t` is a single control byte, which can have one of four
+// states: empty, deleted, full (which has an associated seven-bit h2_t value)
+// and the sentinel. They have the following bit patterns:
+//
+// empty: 1 0 0 0 0 0 0 0
+// deleted: 1 1 1 1 1 1 1 0
+// full: 0 h h h h h h h // h represents the hash bits.
+// sentinel: 1 1 1 1 1 1 1 1
+//
+// These values are specifically tuned for SSE-flavored SIMD.
+// The static_asserts below detail the source of these choices.
+//
+// We use an enum class so that when strict aliasing is enabled, the compiler
+// knows ctrl_t doesn't alias other types.
+enum class ctrl_t : int8_t {
+ kEmpty = -128, // 0b10000000
+ kDeleted = -2, // 0b11111110
+ kSentinel = -1, // 0b11111111
+};
+static_assert(
+ (static_cast<int8_t>(ctrl_t::kEmpty) &
+ static_cast<int8_t>(ctrl_t::kDeleted) &
+ static_cast<int8_t>(ctrl_t::kSentinel) & 0x80) != 0,
+ "Special markers need to have the MSB to make checking for them efficient");
+static_assert(
+ ctrl_t::kEmpty < ctrl_t::kSentinel && ctrl_t::kDeleted < ctrl_t::kSentinel,
+ "ctrl_t::kEmpty and ctrl_t::kDeleted must be smaller than "
+ "ctrl_t::kSentinel to make the SIMD test of IsEmptyOrDeleted() efficient");
+static_assert(
+ ctrl_t::kSentinel == static_cast<ctrl_t>(-1),
+ "ctrl_t::kSentinel must be -1 to elide loading it from memory into SIMD "
+ "registers (pcmpeqd xmm, xmm)");
+static_assert(ctrl_t::kEmpty == static_cast<ctrl_t>(-128),
+ "ctrl_t::kEmpty must be -128 to make the SIMD check for its "
+ "existence efficient (psignb xmm, xmm)");
+static_assert(
+ (~static_cast<int8_t>(ctrl_t::kEmpty) &
+ ~static_cast<int8_t>(ctrl_t::kDeleted) &
+ static_cast<int8_t>(ctrl_t::kSentinel) & 0x7F) != 0,
+ "ctrl_t::kEmpty and ctrl_t::kDeleted must share an unset bit that is not "
+ "shared by ctrl_t::kSentinel to make the scalar test for "
+ "MaskEmptyOrDeleted() efficient");
+static_assert(ctrl_t::kDeleted == static_cast<ctrl_t>(-2),
+ "ctrl_t::kDeleted must be -2 to make the implementation of "
+ "ConvertSpecialToEmptyAndFullToDeleted efficient");
+
+// Helpers for checking the state of a control byte.
+inline bool IsEmpty(ctrl_t c) { return c == ctrl_t::kEmpty; }
+inline bool IsFull(ctrl_t c) {
+ // Cast `c` to the underlying type instead of casting `0` to `ctrl_t` as `0`
+ // is not a value in the enum. Both ways are equivalent, but this way makes
+ // linters happier.
+ return static_cast<std::underlying_type_t<ctrl_t>>(c) >= 0;
+}
+inline bool IsDeleted(ctrl_t c) { return c == ctrl_t::kDeleted; }
+inline bool IsEmptyOrDeleted(ctrl_t c) { return c < ctrl_t::kSentinel; }
+
+#ifdef ABSL_INTERNAL_HAVE_SSE2
+// Quick reference guide for intrinsics used below:
+//
+// * __m128i: An XMM (128-bit) word.
+//
+// * _mm_setzero_si128: Returns a zero vector.
+// * _mm_set1_epi8: Returns a vector with the same i8 in each lane.
+//
+// * _mm_subs_epi8: Saturating-subtracts two i8 vectors.
+// * _mm_and_si128: Ands two i128s together.
+// * _mm_or_si128: Ors two i128s together.
+// * _mm_andnot_si128: And-nots two i128s together.
+//
+// * _mm_cmpeq_epi8: Component-wise compares two i8 vectors for equality,
+// filling each lane with 0x00 or 0xff.
+// * _mm_cmpgt_epi8: Same as above, but using > rather than ==.
+//
+// * _mm_loadu_si128: Performs an unaligned load of an i128.
+// * _mm_storeu_si128: Performs an unaligned store of an i128.
+//
+// * _mm_sign_epi8: Retains, negates, or zeroes each i8 lane of the first
+// argument if the corresponding lane of the second
+// argument is positive, negative, or zero, respectively.
+// * _mm_movemask_epi8: Selects the sign bit out of each i8 lane and produces a
+// bitmask consisting of those bits.
+// * _mm_shuffle_epi8: Selects i8s from the first argument, using the low
+// four bits of each i8 lane in the second argument as
+// indices.
+
+// https://github.com/abseil/abseil-cpp/issues/209
+// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=87853
+// _mm_cmpgt_epi8 is broken under GCC with -funsigned-char
+// Work around this by using the portable implementation of Group
+// when using -funsigned-char under GCC.
+inline __m128i _mm_cmpgt_epi8_fixed(__m128i a, __m128i b) {
+#if defined(__GNUC__) && !defined(__clang__)
+ if (std::is_unsigned<char>::value) {
+ const __m128i mask = _mm_set1_epi8(0x80);
+ const __m128i diff = _mm_subs_epi8(b, a);
+ return _mm_cmpeq_epi8(_mm_and_si128(diff, mask), mask);
+ }
+#endif
+ return _mm_cmpgt_epi8(a, b);
+}
+
+struct GroupSse2Impl {
+ static constexpr size_t kWidth = 16; // the number of slots per group
+ using BitMaskType = BitMask<uint16_t, kWidth>;
+ using NonIterableBitMaskType = NonIterableBitMask<uint16_t, kWidth>;
+
+ explicit GroupSse2Impl(const ctrl_t* pos) {
+ ctrl = _mm_loadu_si128(reinterpret_cast<const __m128i*>(pos));
+ }
+
+ // Returns a bitmask representing the positions of slots that match hash.
+ BitMaskType Match(h2_t hash) const {
+ auto match = _mm_set1_epi8(static_cast<char>(hash));
+ return BitMaskType(
+ static_cast<uint16_t>(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl))));
+ }
+
+ // Returns a bitmask representing the positions of empty slots.
+ NonIterableBitMaskType MaskEmpty() const {
+#ifdef ABSL_INTERNAL_HAVE_SSSE3
+ // This only works because ctrl_t::kEmpty is -128.
+ return NonIterableBitMaskType(
+ static_cast<uint16_t>(_mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl))));
+#else
+ auto match = _mm_set1_epi8(static_cast<char>(ctrl_t::kEmpty));
+ return NonIterableBitMaskType(
+ static_cast<uint16_t>(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl))));
+#endif
+ }
+
+ // Returns a bitmask representing the positions of full slots.
+ // Note: for `is_small()` tables group may contain the "same" slot twice:
+ // original and mirrored.
+ BitMaskType MaskFull() const {
+ return BitMaskType(static_cast<uint16_t>(_mm_movemask_epi8(ctrl) ^ 0xffff));
+ }
+
+ // Returns a bitmask representing the positions of non full slots.
+ // Note: this includes: kEmpty, kDeleted, kSentinel.
+ // It is useful in contexts when kSentinel is not present.
+ auto MaskNonFull() const {
+ return BitMaskType(static_cast<uint16_t>(_mm_movemask_epi8(ctrl)));
+ }
+
+ // Returns a bitmask representing the positions of empty or deleted slots.
+ NonIterableBitMaskType MaskEmptyOrDeleted() const {
+ auto special = _mm_set1_epi8(static_cast<char>(ctrl_t::kSentinel));
+ return NonIterableBitMaskType(static_cast<uint16_t>(
+ _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl))));
+ }
+
+ // Returns the number of trailing empty or deleted elements in the group.
+ uint32_t CountLeadingEmptyOrDeleted() const {
+ auto special = _mm_set1_epi8(static_cast<char>(ctrl_t::kSentinel));
+ return TrailingZeros(static_cast<uint32_t>(
+ _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)) + 1));
+ }
+
+ void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
+ auto msbs = _mm_set1_epi8(static_cast<char>(-128));
+ auto x126 = _mm_set1_epi8(126);
+#ifdef ABSL_INTERNAL_HAVE_SSSE3
+ auto res = _mm_or_si128(_mm_shuffle_epi8(x126, ctrl), msbs);
+#else
+ auto zero = _mm_setzero_si128();
+ auto special_mask = _mm_cmpgt_epi8_fixed(zero, ctrl);
+ auto res = _mm_or_si128(msbs, _mm_andnot_si128(special_mask, x126));
+#endif
+ _mm_storeu_si128(reinterpret_cast<__m128i*>(dst), res);
+ }
+
+ __m128i ctrl;
+};
+#endif // ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
+
+#if defined(ABSL_INTERNAL_HAVE_ARM_NEON) && defined(ABSL_IS_LITTLE_ENDIAN)
+struct GroupAArch64Impl {
+ static constexpr size_t kWidth = 8;
+ using BitMaskType = BitMask<uint64_t, kWidth, /*Shift=*/3,
+ /*NullifyBitsOnIteration=*/true>;
+ using NonIterableBitMaskType =
+ NonIterableBitMask<uint64_t, kWidth, /*Shift=*/3>;
+
+ explicit GroupAArch64Impl(const ctrl_t* pos) {
+ ctrl = vld1_u8(reinterpret_cast<const uint8_t*>(pos));
+ }
+
+ auto Match(h2_t hash) const {
+ uint8x8_t dup = vdup_n_u8(hash);
+ auto mask = vceq_u8(ctrl, dup);
+ return BitMaskType(vget_lane_u64(vreinterpret_u64_u8(mask), 0));
+ }
+
+ auto MaskEmpty() const {
+ uint64_t mask =
+ vget_lane_u64(vreinterpret_u64_u8(vceq_s8(
+ vdup_n_s8(static_cast<int8_t>(ctrl_t::kEmpty)),
+ vreinterpret_s8_u8(ctrl))),
+ 0);
+ return NonIterableBitMaskType(mask);
+ }
+
+ // Returns a bitmask representing the positions of full slots.
+ // Note: for `is_small()` tables group may contain the "same" slot twice:
+ // original and mirrored.
+ auto MaskFull() const {
+ uint64_t mask = vget_lane_u64(
+ vreinterpret_u64_u8(vcge_s8(vreinterpret_s8_u8(ctrl),
+ vdup_n_s8(static_cast<int8_t>(0)))),
+ 0);
+ return BitMaskType(mask);
+ }
+
+ // Returns a bitmask representing the positions of non full slots.
+ // Note: this includes: kEmpty, kDeleted, kSentinel.
+ // It is useful in contexts when kSentinel is not present.
+ auto MaskNonFull() const {
+ uint64_t mask = vget_lane_u64(
+ vreinterpret_u64_u8(vclt_s8(vreinterpret_s8_u8(ctrl),
+ vdup_n_s8(static_cast<int8_t>(0)))),
+ 0);
+ return BitMaskType(mask);
+ }
+
+ auto MaskEmptyOrDeleted() const {
+ uint64_t mask =
+ vget_lane_u64(vreinterpret_u64_u8(vcgt_s8(
+ vdup_n_s8(static_cast<int8_t>(ctrl_t::kSentinel)),
+ vreinterpret_s8_u8(ctrl))),
+ 0);
+ return NonIterableBitMaskType(mask);
+ }
+
+ uint32_t CountLeadingEmptyOrDeleted() const {
+ uint64_t mask =
+ vget_lane_u64(vreinterpret_u64_u8(vcle_s8(
+ vdup_n_s8(static_cast<int8_t>(ctrl_t::kSentinel)),
+ vreinterpret_s8_u8(ctrl))),
+ 0);
+ // Similar to MaskEmptyorDeleted() but we invert the logic to invert the
+ // produced bitfield. We then count number of trailing zeros.
+ // Clang and GCC optimize countr_zero to rbit+clz without any check for 0,
+ // so we should be fine.
+ return static_cast<uint32_t>(countr_zero(mask)) >> 3;
+ }
+
+ void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
+ uint64_t mask = vget_lane_u64(vreinterpret_u64_u8(ctrl), 0);
+ constexpr uint64_t slsbs = 0x0202020202020202ULL;
+ constexpr uint64_t midbs = 0x7e7e7e7e7e7e7e7eULL;
+ auto x = slsbs & (mask >> 6);
+ auto res = (x + midbs) | kMsbs8Bytes;
+ little_endian::Store64(dst, res);
+ }
+
+ uint8x8_t ctrl;
+};
+#endif // ABSL_INTERNAL_HAVE_ARM_NEON && ABSL_IS_LITTLE_ENDIAN
+
+struct GroupPortableImpl {
+ static constexpr size_t kWidth = 8;
+ using BitMaskType = BitMask<uint64_t, kWidth, /*Shift=*/3,
+ /*NullifyBitsOnIteration=*/false>;
+ using NonIterableBitMaskType =
+ NonIterableBitMask<uint64_t, kWidth, /*Shift=*/3>;
+
+ explicit GroupPortableImpl(const ctrl_t* pos)
+ : ctrl(little_endian::Load64(pos)) {}
+
+ BitMaskType Match(h2_t hash) const {
+ // For the technique, see:
+ // http://graphics.stanford.edu/~seander/bithacks.html##ValueInWord
+ // (Determine if a word has a byte equal to n).
+ //
+ // Caveat: there are false positives but:
+ // - they only occur if there is a real match
+ // - they never occur on ctrl_t::kEmpty, ctrl_t::kDeleted, ctrl_t::kSentinel
+ // - they will be handled gracefully by subsequent checks in code
+ //
+ // Example:
+ // v = 0x1716151413121110
+ // hash = 0x12
+ // retval = (v - lsbs) & ~v & msbs = 0x0000000080800000
+ constexpr uint64_t lsbs = 0x0101010101010101ULL;
+ auto x = ctrl ^ (lsbs * hash);
+ return BitMaskType((x - lsbs) & ~x & kMsbs8Bytes);
+ }
+
+ auto MaskEmpty() const {
+ return NonIterableBitMaskType((ctrl & ~(ctrl << 6)) & kMsbs8Bytes);
+ }
+
+ // Returns a bitmask representing the positions of full slots.
+ // Note: for `is_small()` tables group may contain the "same" slot twice:
+ // original and mirrored.
+ auto MaskFull() const {
+ return BitMaskType((ctrl ^ kMsbs8Bytes) & kMsbs8Bytes);
+ }
+
+ // Returns a bitmask representing the positions of non full slots.
+ // Note: this includes: kEmpty, kDeleted, kSentinel.
+ // It is useful in contexts when kSentinel is not present.
+ auto MaskNonFull() const { return BitMaskType(ctrl & kMsbs8Bytes); }
+
+ auto MaskEmptyOrDeleted() const {
+ return NonIterableBitMaskType((ctrl & ~(ctrl << 7)) & kMsbs8Bytes);
+ }
+
+ uint32_t CountLeadingEmptyOrDeleted() const {
+ // ctrl | ~(ctrl >> 7) will have the lowest bit set to zero for kEmpty and
+ // kDeleted. We lower all other bits and count number of trailing zeros.
+ constexpr uint64_t bits = 0x0101010101010101ULL;
+ return static_cast<uint32_t>(countr_zero((ctrl | ~(ctrl >> 7)) & bits) >>
+ 3);
+ }
+
+ void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
+ constexpr uint64_t lsbs = 0x0101010101010101ULL;
+ auto x = ctrl & kMsbs8Bytes;
+ auto res = (~x + (x >> 7)) & ~lsbs;
+ little_endian::Store64(dst, res);
+ }
+
+ uint64_t ctrl;
+};
+
+#ifdef ABSL_INTERNAL_HAVE_SSE2
+using Group = GroupSse2Impl;
+using GroupFullEmptyOrDeleted = GroupSse2Impl;
+#elif defined(ABSL_INTERNAL_HAVE_ARM_NEON) && defined(ABSL_IS_LITTLE_ENDIAN)
+using Group = GroupAArch64Impl;
+// For Aarch64, we use the portable implementation for counting and masking
+// full, empty or deleted group elements. This is to avoid the latency of moving
+// between data GPRs and Neon registers when it does not provide a benefit.
+// Using Neon is profitable when we call Match(), but is not when we don't,
+// which is the case when we do *EmptyOrDeleted and MaskFull operations.
+// It is difficult to make a similar approach beneficial on other architectures
+// such as x86 since they have much lower GPR <-> vector register transfer
+// latency and 16-wide Groups.
+using GroupFullEmptyOrDeleted = GroupPortableImpl;
+#else
+using Group = GroupPortableImpl;
+using GroupFullEmptyOrDeleted = GroupPortableImpl;
+#endif
+
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#undef ABSL_SWISSTABLE_ASSERT
+
+#endif // ABSL_CONTAINER_INTERNAL_HASHTABLE_CONTROL_BYTES_H_
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler.cc b/contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler.cc
index fd21d966b70..c0fce8794d4 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler.cc
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler.cc
@@ -42,10 +42,6 @@ namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {
-#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
-constexpr int HashtablezInfo::kMaxStackDepth;
-#endif
-
namespace {
ABSL_CONST_INIT std::atomic<bool> g_hashtablez_enabled{
false
@@ -126,6 +122,26 @@ static bool ShouldForceSampling() {
return state == kForce;
}
+#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
+HashtablezInfoHandle ForcedTrySample(size_t inline_element_size,
+ size_t key_size, size_t value_size,
+ uint16_t soo_capacity) {
+ return HashtablezInfoHandle(SampleSlow(global_next_sample,
+ inline_element_size, key_size,
+ value_size, soo_capacity));
+}
+void TestOnlyRefreshSamplingStateForCurrentThread() {
+ global_next_sample.next_sample =
+ g_hashtablez_sample_parameter.load(std::memory_order_relaxed);
+ global_next_sample.sample_stride = global_next_sample.next_sample;
+}
+#else
+HashtablezInfoHandle ForcedTrySample(size_t, size_t, size_t, uint16_t) {
+ return HashtablezInfoHandle{nullptr};
+}
+void TestOnlyRefreshSamplingStateForCurrentThread() {}
+#endif // ABSL_INTERNAL_HASHTABLEZ_SAMPLE
+
HashtablezInfo* SampleSlow(SamplingState& next_sample,
size_t inline_element_size, size_t key_size,
size_t value_size, uint16_t soo_capacity) {
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler.h b/contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler.h
index d74acf8c6e2..305dc855b82 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler.h
@@ -219,22 +219,41 @@ class HashtablezInfoHandle {
extern ABSL_PER_THREAD_TLS_KEYWORD SamplingState global_next_sample;
#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
-// Returns a sampling handle.
-inline HashtablezInfoHandle Sample(
- ABSL_ATTRIBUTE_UNUSED size_t inline_element_size,
- ABSL_ATTRIBUTE_UNUSED size_t key_size,
- ABSL_ATTRIBUTE_UNUSED size_t value_size,
- ABSL_ATTRIBUTE_UNUSED uint16_t soo_capacity) {
+// Returns true if the next table should be sampled.
+// This function updates the global state.
+// If the function returns true, actual sampling should be done by calling
+// ForcedTrySample().
+inline bool ShouldSampleNextTable() {
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
if (ABSL_PREDICT_TRUE(--global_next_sample.next_sample > 0)) {
- return HashtablezInfoHandle(nullptr);
+ return false;
}
- return HashtablezInfoHandle(SampleSlow(global_next_sample,
- inline_element_size, key_size,
- value_size, soo_capacity));
+ return true;
#else
- return HashtablezInfoHandle(nullptr);
-#endif // !ABSL_PER_THREAD_TLS
+ return false;
+#endif // ABSL_INTERNAL_HASHTABLEZ_SAMPLE
+}
+
+// Returns a sampling handle.
+// Must be called only if HashSetShouldBeSampled() returned true.
+// Returned handle still can be unsampled if sampling is not possible.
+HashtablezInfoHandle ForcedTrySample(size_t inline_element_size,
+ size_t key_size, size_t value_size,
+ uint16_t soo_capacity);
+
+// In case sampling needs to be disabled and re-enabled in tests, this function
+// can be used to reset the sampling state for the current thread.
+// It is useful to avoid sampling attempts and sampling delays in tests.
+void TestOnlyRefreshSamplingStateForCurrentThread();
+
+// Returns a sampling handle.
+inline HashtablezInfoHandle Sample(size_t inline_element_size, size_t key_size,
+ size_t value_size, uint16_t soo_capacity) {
+ if (ABSL_PREDICT_TRUE(!ShouldSampleNextTable())) {
+ return HashtablezInfoHandle(nullptr);
+ }
+ return ForcedTrySample(inline_element_size, key_size, value_size,
+ soo_capacity);
}
using HashtablezSampler =
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/inlined_vector.h b/contrib/restricted/abseil-cpp/absl/container/internal/inlined_vector.h
index 0bd0a1c4a42..b0d3f077e6c 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/inlined_vector.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/inlined_vector.h
@@ -73,11 +73,6 @@ using ConstReverseIterator = typename std::reverse_iterator<ConstIterator<A>>;
template <typename A>
using MoveIterator = typename std::move_iterator<Iterator<A>>;
-template <typename Iterator>
-using IsAtLeastForwardIterator = std::is_convertible<
- typename std::iterator_traits<Iterator>::iterator_category,
- std::forward_iterator_tag>;
-
template <typename A>
using IsMoveAssignOk = std::is_move_assignable<ValueType<A>>;
template <typename A>
@@ -234,7 +229,7 @@ class AllocationTransaction {
return result.data;
}
- ABSL_MUST_USE_RESULT Allocation<A> Release() && {
+ [[nodiscard]] Allocation<A> Release() && {
Allocation<A> result = {GetData(), GetCapacity()};
Reset();
return result;
@@ -548,7 +543,7 @@ class Storage {
(std::max)(N, sizeof(Allocated) / sizeof(ValueType<A>));
struct Inlined {
- alignas(ValueType<A>) char inlined_data[sizeof(
+ alignas(ValueType<A>) unsigned char inlined_data[sizeof(
ValueType<A>[kOptimalInlinedSize])];
};
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/layout.h b/contrib/restricted/abseil-cpp/absl/container/internal/layout.h
index f8b425c5776..58c8d4f141b 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/layout.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/layout.h
@@ -192,7 +192,6 @@
#include <typeinfo>
#include <utility>
-#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/debugging/internal/demangle.h"
#include "absl/meta/type_traits.h"
@@ -316,9 +315,6 @@ std::string TypeName() {
} // namespace adl_barrier
-template <bool C>
-using EnableIf = typename std::enable_if<C, int>::type;
-
// Can `T` be a template argument of `Layout`?
template <class T>
using IsLegalElementType = std::integral_constant<
@@ -418,17 +414,16 @@ class LayoutImpl<
// assert(x.Offset<1>() == 16); // The doubles starts from 16.
//
// Requires: `N <= NumSizes && N < sizeof...(Ts)`.
- template <size_t N, EnableIf<N == 0> = 0>
- constexpr size_t Offset() const {
- return 0;
- }
-
- template <size_t N, EnableIf<N != 0> = 0>
+ template <size_t N>
constexpr size_t Offset() const {
- static_assert(N < NumOffsets, "Index out of bounds");
- return adl_barrier::Align(
- Offset<N - 1>() + SizeOf<ElementType<N - 1>>::value * Size<N - 1>(),
- ElementAlignment<N>::value);
+ if constexpr (N == 0) {
+ return 0;
+ } else {
+ static_assert(N < NumOffsets, "Index out of bounds");
+ return adl_barrier::Align(
+ Offset<N - 1>() + SizeOf<ElementType<N - 1>>::value * Size<N - 1>(),
+ ElementAlignment<N>::value);
+ }
}
// Offset in bytes of the array with the specified element type. There must
@@ -457,15 +452,14 @@ class LayoutImpl<
// assert(x.Size<1>() == 4);
//
// Requires: `N < NumSizes`.
- template <size_t N, EnableIf<(N < NumStaticSizes)> = 0>
- constexpr size_t Size() const {
- return kStaticSizes[N];
- }
-
- template <size_t N, EnableIf<(N >= NumStaticSizes)> = 0>
+ template <size_t N>
constexpr size_t Size() const {
- static_assert(N < NumSizes, "Index out of bounds");
- return size_[N - NumStaticSizes];
+ if constexpr (N < NumStaticSizes) {
+ return kStaticSizes[N];
+ } else {
+ static_assert(N < NumSizes, "Index out of bounds");
+ return size_[N - NumStaticSizes];
+ }
}
// The number of elements in the array with the specified element type.
@@ -596,10 +590,10 @@ class LayoutImpl<
//
// Requires: `p` is aligned to `Alignment()`.
//
- // Note: We mark the parameter as unused because GCC detects it is not used
- // when `SizeSeq` is empty [-Werror=unused-but-set-parameter].
+ // Note: We mark the parameter as maybe_unused because GCC detects it is not
+ // used when `SizeSeq` is empty [-Werror=unused-but-set-parameter].
template <class Char>
- auto Slices(ABSL_ATTRIBUTE_UNUSED Char* p) const {
+ auto Slices([[maybe_unused]] Char* p) const {
return std::tuple<SliceType<CopyConst<Char, ElementType<SizeSeq>>>...>(
Slice<SizeSeq>(p)...);
}
@@ -624,15 +618,13 @@ class LayoutImpl<
// `Char` must be `[const] [signed|unsigned] char`.
//
// Requires: `p` is aligned to `Alignment()`.
- template <class Char, size_t N = NumOffsets - 1, EnableIf<N == 0> = 0>
- void PoisonPadding(const Char* p) const {
- Pointer<0>(p); // verify the requirements on `Char` and `p`
- }
-
- template <class Char, size_t N = NumOffsets - 1, EnableIf<N != 0> = 0>
+ template <class Char, size_t N = NumOffsets - 1>
void PoisonPadding(const Char* p) const {
- static_assert(N < NumOffsets, "Index out of bounds");
- (void)p;
+ if constexpr (N == 0) {
+ Pointer<0>(p); // verify the requirements on `Char` and `p`
+ } else {
+ static_assert(N < NumOffsets, "Index out of bounds");
+ (void)p;
#ifdef ABSL_HAVE_ADDRESS_SANITIZER
PoisonPadding<Char, N - 1>(p);
// The `if` is an optimization. It doesn't affect the observable behaviour.
@@ -642,6 +634,7 @@ class LayoutImpl<
ASAN_POISON_MEMORY_REGION(p + start, Offset<N>() - start);
}
#endif
+ }
}
// Human-readable description of the memory layout. Useful for debugging.
@@ -692,15 +685,6 @@ class LayoutImpl<
size_t size_[NumRuntimeSizes > 0 ? NumRuntimeSizes : 1];
};
-// Defining a constexpr static class member variable is redundant and deprecated
-// in C++17, but required in C++14.
-template <class... Elements, size_t... StaticSizeSeq, size_t... RuntimeSizeSeq,
- size_t... SizeSeq, size_t... OffsetSeq>
-constexpr std::array<size_t, sizeof...(StaticSizeSeq)> LayoutImpl<
- std::tuple<Elements...>, absl::index_sequence<StaticSizeSeq...>,
- absl::index_sequence<RuntimeSizeSeq...>, absl::index_sequence<SizeSeq...>,
- absl::index_sequence<OffsetSeq...>>::kStaticSizes;
-
template <class StaticSizeSeq, size_t NumRuntimeSizes, class... Ts>
using LayoutType = LayoutImpl<
std::tuple<Ts...>, StaticSizeSeq,
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_map.h b/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_map.h
index 464bf23bd31..b42a4f22f67 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_map.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_map.h
@@ -22,8 +22,10 @@
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/base/internal/throw_delegate.h"
+#include "absl/container/internal/common_policy_traits.h"
#include "absl/container/internal/container_memory.h"
#include "absl/container/internal/raw_hash_set.h" // IWYU pragma: export
+#include "absl/meta/type_traits.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
@@ -43,14 +45,39 @@ class raw_hash_map : public raw_hash_set<Policy, Hash, Eq, Alloc> {
using MappedConstReference = decltype(P::value(
std::addressof(std::declval<typename raw_hash_map::const_reference>())));
- using KeyArgImpl =
- KeyArg<IsTransparent<Eq>::value && IsTransparent<Hash>::value>;
+ template <class K>
+ using key_arg =
+ typename KeyArg<IsTransparent<Eq>::value && IsTransparent<Hash>::value>::
+ template type<K, typename Policy::key_type>;
+
+ // NOTE: The mess here is to shorten the code for the (very repetitive)
+ // function overloads, and to allow the lifetime-bound overloads to dispatch
+ // to the non-lifetime-bound overloads, to ensure there is a single source of
+ // truth for each overload set.
+ //
+ // Enabled if an assignment from the given type would require the
+ // source object to remain alive for the life of the element.
+ //
+ // TODO(b/402804213): Remove these traits and simplify the overloads whenever
+ // we have a better mechanism available to handle lifetime analysis.
+ template <class K, bool Value, typename = void>
+ using LifetimeBoundK = HasValue<
+ Value, std::conditional_t<policy_trait_element_is_owner<Policy>::value,
+ std::false_type,
+ type_traits_internal::IsLifetimeBoundAssignment<
+ typename Policy::key_type, K>>>;
+ template <class V, bool Value, typename = void>
+ using LifetimeBoundV =
+ HasValue<Value, type_traits_internal::IsLifetimeBoundAssignment<
+ typename Policy::mapped_type, V>>;
+ template <class K, bool KValue, class V, bool VValue, typename... Dummy>
+ using LifetimeBoundKV =
+ absl::conjunction<LifetimeBoundK<K, KValue, absl::void_t<Dummy...>>,
+ LifetimeBoundV<V, VValue>>;
public:
using key_type = typename Policy::key_type;
using mapped_type = typename Policy::mapped_type;
- template <class K>
- using key_arg = typename KeyArgImpl::template type<K, key_type>;
static_assert(!std::is_reference<key_type>::value, "");
@@ -71,87 +98,175 @@ class raw_hash_map : public raw_hash_set<Policy, Hash, Eq, Alloc> {
// union { int n : 1; };
// flat_hash_map<int, int> m;
// m.insert_or_assign(n, n);
- template <class K = key_type, class V = mapped_type, K* = nullptr,
- V* = nullptr>
- std::pair<iterator, bool> insert_or_assign(key_arg<K>&& k, V&& v)
- ABSL_ATTRIBUTE_LIFETIME_BOUND {
- return insert_or_assign_impl(std::forward<K>(k), std::forward<V>(v));
- }
-
- template <class K = key_type, class V = mapped_type, K* = nullptr>
- std::pair<iterator, bool> insert_or_assign(key_arg<K>&& k, const V& v)
- ABSL_ATTRIBUTE_LIFETIME_BOUND {
- return insert_or_assign_impl(std::forward<K>(k), v);
- }
-
- template <class K = key_type, class V = mapped_type, V* = nullptr>
- std::pair<iterator, bool> insert_or_assign(const key_arg<K>& k, V&& v)
- ABSL_ATTRIBUTE_LIFETIME_BOUND {
- return insert_or_assign_impl(k, std::forward<V>(v));
- }
-
- template <class K = key_type, class V = mapped_type>
- std::pair<iterator, bool> insert_or_assign(const key_arg<K>& k, const V& v)
- ABSL_ATTRIBUTE_LIFETIME_BOUND {
- return insert_or_assign_impl(k, v);
- }
-
- template <class K = key_type, class V = mapped_type, K* = nullptr,
- V* = nullptr>
- iterator insert_or_assign(const_iterator, key_arg<K>&& k,
- V&& v) ABSL_ATTRIBUTE_LIFETIME_BOUND {
- return insert_or_assign(std::forward<K>(k), std::forward<V>(v)).first;
- }
-
- template <class K = key_type, class V = mapped_type, K* = nullptr>
- iterator insert_or_assign(const_iterator, key_arg<K>&& k,
- const V& v) ABSL_ATTRIBUTE_LIFETIME_BOUND {
- return insert_or_assign(std::forward<K>(k), v).first;
- }
-
- template <class K = key_type, class V = mapped_type, V* = nullptr>
- iterator insert_or_assign(const_iterator, const key_arg<K>& k,
- V&& v) ABSL_ATTRIBUTE_LIFETIME_BOUND {
- return insert_or_assign(k, std::forward<V>(v)).first;
- }
-
- template <class K = key_type, class V = mapped_type>
- iterator insert_or_assign(const_iterator, const key_arg<K>& k,
- const V& v) ABSL_ATTRIBUTE_LIFETIME_BOUND {
- return insert_or_assign(k, v).first;
- }
+ //
+ // TODO(b/402804213): Remove these macros whenever we have a better mechanism
+ // available to handle lifetime analysis.
+#define ABSL_INTERNAL_X(Func, Callee, KQual, VQual, KValue, VValue, Tail, ...) \
+ template < \
+ typename K = key_type, class V = mapped_type, \
+ ABSL_INTERNAL_IF_##KValue##_NOR_##VValue( \
+ int = (EnableIf<LifetimeBoundKV<K, KValue, V, VValue, \
+ IfRRef<int KQual>::AddPtr<K>, \
+ IfRRef<int VQual>::AddPtr<V>>>()), \
+ ABSL_INTERNAL_SINGLE_ARG( \
+ int &..., \
+ decltype(EnableIf<LifetimeBoundKV<K, KValue, V, VValue>>()) = \
+ 0))> \
+ decltype(auto) Func( \
+ __VA_ARGS__ key_arg<K> KQual k ABSL_INTERNAL_IF_##KValue( \
+ ABSL_INTERNAL_ATTRIBUTE_CAPTURED_BY(this)), \
+ V VQual v ABSL_INTERNAL_IF_##VValue(ABSL_INTERNAL_ATTRIBUTE_CAPTURED_BY( \
+ this))) ABSL_ATTRIBUTE_LIFETIME_BOUND { \
+ return ABSL_INTERNAL_IF_##KValue##_OR_##VValue( \
+ (this->template Func<K, V, 0>), Callee)( \
+ std::forward<decltype(k)>(k), std::forward<decltype(v)>(v)) Tail; \
+ } \
+ static_assert(true, "This is to force a semicolon.")
+
+ ABSL_INTERNAL_X(insert_or_assign, insert_or_assign_impl, const &, const &,
+ false, false, ABSL_INTERNAL_SINGLE_ARG());
+ ABSL_INTERNAL_X(insert_or_assign, insert_or_assign_impl, const &, const &,
+ false, true, ABSL_INTERNAL_SINGLE_ARG());
+ ABSL_INTERNAL_X(insert_or_assign, insert_or_assign_impl, const &, const &,
+ true, false, ABSL_INTERNAL_SINGLE_ARG());
+ ABSL_INTERNAL_X(insert_or_assign, insert_or_assign_impl, const &, const &,
+ true, true, ABSL_INTERNAL_SINGLE_ARG());
+
+ ABSL_INTERNAL_X(insert_or_assign, insert_or_assign_impl, const &, &&, false,
+ false, ABSL_INTERNAL_SINGLE_ARG());
+ ABSL_INTERNAL_X(insert_or_assign, insert_or_assign_impl, const &, &&, false,
+ true, ABSL_INTERNAL_SINGLE_ARG());
+ ABSL_INTERNAL_X(insert_or_assign, insert_or_assign_impl, const &, &&, true,
+ false, ABSL_INTERNAL_SINGLE_ARG());
+ ABSL_INTERNAL_X(insert_or_assign, insert_or_assign_impl, const &, &&, true,
+ true, ABSL_INTERNAL_SINGLE_ARG());
+
+ ABSL_INTERNAL_X(insert_or_assign, insert_or_assign_impl, &&, const &, false,
+ false, ABSL_INTERNAL_SINGLE_ARG());
+ ABSL_INTERNAL_X(insert_or_assign, insert_or_assign_impl, &&, const &, false,
+ true, ABSL_INTERNAL_SINGLE_ARG());
+ ABSL_INTERNAL_X(insert_or_assign, insert_or_assign_impl, &&, const &, true,
+ false, ABSL_INTERNAL_SINGLE_ARG());
+ ABSL_INTERNAL_X(insert_or_assign, insert_or_assign_impl, &&, const &, true,
+ true, ABSL_INTERNAL_SINGLE_ARG());
+
+ ABSL_INTERNAL_X(insert_or_assign, insert_or_assign_impl, &&, &&, false, false,
+ ABSL_INTERNAL_SINGLE_ARG());
+ ABSL_INTERNAL_X(insert_or_assign, insert_or_assign_impl, &&, &&, false, true,
+ ABSL_INTERNAL_SINGLE_ARG());
+ ABSL_INTERNAL_X(insert_or_assign, insert_or_assign_impl, &&, &&, true, false,
+ ABSL_INTERNAL_SINGLE_ARG());
+ ABSL_INTERNAL_X(insert_or_assign, insert_or_assign_impl, &&, &&, true, true,
+ ABSL_INTERNAL_SINGLE_ARG());
+
+ ABSL_INTERNAL_X(insert_or_assign, insert_or_assign_impl, const &, const &,
+ false, false, .first, const_iterator ABSL_INTERNAL_COMMA);
+ ABSL_INTERNAL_X(insert_or_assign, insert_or_assign_impl, const &, const &,
+ false, true, .first, const_iterator ABSL_INTERNAL_COMMA);
+ ABSL_INTERNAL_X(insert_or_assign, insert_or_assign_impl, const &, const &,
+ true, false, .first, const_iterator ABSL_INTERNAL_COMMA);
+ ABSL_INTERNAL_X(insert_or_assign, insert_or_assign_impl, const &, const &,
+ true, true, .first, const_iterator ABSL_INTERNAL_COMMA);
+
+ ABSL_INTERNAL_X(insert_or_assign, insert_or_assign_impl, const &, &&, false,
+ false, .first, const_iterator ABSL_INTERNAL_COMMA);
+ ABSL_INTERNAL_X(insert_or_assign, insert_or_assign_impl, const &, &&, false,
+ true, .first, const_iterator ABSL_INTERNAL_COMMA);
+ ABSL_INTERNAL_X(insert_or_assign, insert_or_assign_impl, const &, &&, true,
+ false, .first, const_iterator ABSL_INTERNAL_COMMA);
+ ABSL_INTERNAL_X(insert_or_assign, insert_or_assign_impl, const &, &&, true,
+ true, .first, const_iterator ABSL_INTERNAL_COMMA);
+
+ ABSL_INTERNAL_X(insert_or_assign, insert_or_assign_impl, &&, const &, false,
+ false, .first, const_iterator ABSL_INTERNAL_COMMA);
+ ABSL_INTERNAL_X(insert_or_assign, insert_or_assign_impl, &&, const &, false,
+ true, .first, const_iterator ABSL_INTERNAL_COMMA);
+ ABSL_INTERNAL_X(insert_or_assign, insert_or_assign_impl, &&, const &, true,
+ false, .first, const_iterator ABSL_INTERNAL_COMMA);
+ ABSL_INTERNAL_X(insert_or_assign, insert_or_assign_impl, &&, const &, true,
+ true, .first, const_iterator ABSL_INTERNAL_COMMA);
+
+ ABSL_INTERNAL_X(insert_or_assign, insert_or_assign_impl, &&, &&, false, false,
+ .first, const_iterator ABSL_INTERNAL_COMMA);
+ ABSL_INTERNAL_X(insert_or_assign, insert_or_assign_impl, &&, &&, false, true,
+ .first, const_iterator ABSL_INTERNAL_COMMA);
+ ABSL_INTERNAL_X(insert_or_assign, insert_or_assign_impl, &&, &&, true, false,
+ .first, const_iterator ABSL_INTERNAL_COMMA);
+ ABSL_INTERNAL_X(insert_or_assign, insert_or_assign_impl, &&, &&, true, true,
+ .first, const_iterator ABSL_INTERNAL_COMMA);
+#undef ABSL_INTERNAL_X
// All `try_emplace()` overloads make the same guarantees regarding rvalue
// arguments as `std::unordered_map::try_emplace()`, namely that these
// functions will not move from rvalue arguments if insertions do not happen.
- template <class K = key_type, class... Args,
+ template <class K = key_type, int = EnableIf<LifetimeBoundK<K, false, K *>>(),
+ class... Args,
typename std::enable_if<
- !std::is_convertible<K, const_iterator>::value, int>::type = 0,
- K* = nullptr>
- std::pair<iterator, bool> try_emplace(key_arg<K>&& k, Args&&... args)
+ !std::is_convertible<K, const_iterator>::value, int>::type = 0>
+ std::pair<iterator, bool> try_emplace(key_arg<K> &&k, Args &&...args)
ABSL_ATTRIBUTE_LIFETIME_BOUND {
return try_emplace_impl(std::forward<K>(k), std::forward<Args>(args)...);
}
template <class K = key_type, class... Args,
+ EnableIf<LifetimeBoundK<K, true, K *>> = 0,
typename std::enable_if<
!std::is_convertible<K, const_iterator>::value, int>::type = 0>
- std::pair<iterator, bool> try_emplace(const key_arg<K>& k, Args&&... args)
+ std::pair<iterator, bool> try_emplace(
+ key_arg<K> &&k ABSL_INTERNAL_ATTRIBUTE_CAPTURED_BY(this),
+ Args &&...args) ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ return this->template try_emplace<K, 0>(std::forward<K>(k),
+ std::forward<Args>(args)...);
+ }
+
+ template <class K = key_type, int = EnableIf<LifetimeBoundK<K, false>>(),
+ class... Args,
+ typename std::enable_if<
+ !std::is_convertible<K, const_iterator>::value, int>::type = 0>
+ std::pair<iterator, bool> try_emplace(const key_arg<K> &k, Args &&...args)
ABSL_ATTRIBUTE_LIFETIME_BOUND {
return try_emplace_impl(k, std::forward<Args>(args)...);
}
+ template <class K = key_type, class... Args,
+ EnableIf<LifetimeBoundK<K, true>> = 0,
+ typename std::enable_if<
+ !std::is_convertible<K, const_iterator>::value, int>::type = 0>
+ std::pair<iterator, bool> try_emplace(
+ const key_arg<K> &k ABSL_INTERNAL_ATTRIBUTE_CAPTURED_BY(this),
+ Args &&...args) ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ return this->template try_emplace<K, 0>(k, std::forward<Args>(args)...);
+ }
- template <class K = key_type, class... Args, K* = nullptr>
- iterator try_emplace(const_iterator, key_arg<K>&& k,
- Args&&... args) ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ template <class K = key_type, int = EnableIf<LifetimeBoundK<K, false, K *>>(),
+ class... Args>
+ iterator try_emplace(const_iterator, key_arg<K> &&k,
+ Args &&...args) ABSL_ATTRIBUTE_LIFETIME_BOUND {
return try_emplace(std::forward<K>(k), std::forward<Args>(args)...).first;
}
+ template <class K = key_type, class... Args,
+ EnableIf<LifetimeBoundK<K, true, K *>> = 0>
+ iterator try_emplace(const_iterator hint,
+ key_arg<K> &&k ABSL_INTERNAL_ATTRIBUTE_CAPTURED_BY(this),
+ Args &&...args) ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ return this->template try_emplace<K, 0>(hint, std::forward<K>(k),
+ std::forward<Args>(args)...);
+ }
- template <class K = key_type, class... Args>
- iterator try_emplace(const_iterator, const key_arg<K>& k,
- Args&&... args) ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ template <class K = key_type, int = EnableIf<LifetimeBoundK<K, false>>(),
+ class... Args>
+ iterator try_emplace(const_iterator, const key_arg<K> &k,
+ Args &&...args) ABSL_ATTRIBUTE_LIFETIME_BOUND {
return try_emplace(k, std::forward<Args>(args)...).first;
}
+ template <class K = key_type, class... Args,
+ EnableIf<LifetimeBoundK<K, true>> = 0>
+ iterator try_emplace(const_iterator hint,
+ const key_arg<K> &k
+ ABSL_INTERNAL_ATTRIBUTE_CAPTURED_BY(this),
+ Args &&...args) ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ return this->template try_emplace<K, 0>(hint, std::forward<K>(k),
+ std::forward<Args>(args)...);
+ }
template <class K = key_type, class P = Policy>
MappedReference<P> at(const key_arg<K>& key) ABSL_ATTRIBUTE_LIFETIME_BOUND {
@@ -174,8 +289,9 @@ class raw_hash_map : public raw_hash_set<Policy, Hash, Eq, Alloc> {
return Policy::value(&*it);
}
- template <class K = key_type, class P = Policy, K* = nullptr>
- MappedReference<P> operator[](key_arg<K>&& key)
+ template <class K = key_type, class P = Policy,
+ int = EnableIf<LifetimeBoundK<K, false, K *>>()>
+ MappedReference<P> operator[](key_arg<K> &&key)
ABSL_ATTRIBUTE_LIFETIME_BOUND {
// It is safe to use unchecked_deref here because try_emplace
// will always return an iterator pointing to a valid item in the table,
@@ -183,15 +299,30 @@ class raw_hash_map : public raw_hash_set<Policy, Hash, Eq, Alloc> {
return Policy::value(
&this->unchecked_deref(try_emplace(std::forward<K>(key)).first));
}
+ template <class K = key_type, class P = Policy, int &...,
+ EnableIf<LifetimeBoundK<K, true, K *>> = 0>
+ MappedReference<P> operator[](
+ key_arg<K> &&key ABSL_INTERNAL_ATTRIBUTE_CAPTURED_BY(this))
+ ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ return this->template operator[]<K, P, 0>(std::forward<K>(key));
+ }
- template <class K = key_type, class P = Policy>
- MappedReference<P> operator[](const key_arg<K>& key)
+ template <class K = key_type, class P = Policy,
+ int = EnableIf<LifetimeBoundK<K, false>>()>
+ MappedReference<P> operator[](const key_arg<K> &key)
ABSL_ATTRIBUTE_LIFETIME_BOUND {
// It is safe to use unchecked_deref here because try_emplace
// will always return an iterator pointing to a valid item in the table,
// since it inserts if nothing is found for the given key.
return Policy::value(&this->unchecked_deref(try_emplace(key).first));
}
+ template <class K = key_type, class P = Policy, int &...,
+ EnableIf<LifetimeBoundK<K, true>> = 0>
+ MappedReference<P> operator[](
+ const key_arg<K> &key ABSL_INTERNAL_ATTRIBUTE_CAPTURED_BY(this))
+ ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ return this->template operator[]<K, P, 0>(key);
+ }
private:
template <class K, class V>
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set.cc b/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set.cc
index 8911aa3dc82..339e662d012 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set.cc
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set.cc
@@ -27,9 +27,11 @@
#include "absl/base/internal/raw_logging.h"
#include "absl/base/optimization.h"
#include "absl/container/internal/container_memory.h"
+#include "absl/container/internal/hashtable_control_bytes.h"
#include "absl/container/internal/hashtablez_sampler.h"
+#include "absl/container/internal/raw_hash_set_resize_impl.h"
+#include "absl/functional/function_ref.h"
#include "absl/hash/hash.h"
-#include "absl/numeric/bits.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
@@ -65,21 +67,31 @@ ABSL_CONST_INIT ABSL_DLL const ctrl_t kSooControl[17] = {
static_assert(NumControlBytes(SooCapacity()) <= 17,
"kSooControl capacity too small");
-#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
-constexpr size_t Group::kWidth;
+namespace {
+
+#ifdef ABSL_SWISSTABLE_ASSERT
+#error ABSL_SWISSTABLE_ASSERT cannot be directly set
+#else
+// We use this macro for assertions that users may see when the table is in an
+// invalid state that sanitizers may help diagnose.
+#define ABSL_SWISSTABLE_ASSERT(CONDITION) \
+ assert((CONDITION) && "Try enabling sanitizers.")
#endif
-namespace {
+[[noreturn]] ABSL_ATTRIBUTE_NOINLINE void HashTableSizeOverflow() {
+ ABSL_RAW_LOG(FATAL, "Hash table size overflow");
+}
+
+void ValidateMaxSize(size_t size, size_t slot_size) {
+ if (IsAboveValidSize(size, slot_size)) {
+ HashTableSizeOverflow();
+ }
+}
// Returns "random" seed.
inline size_t RandomSeed() {
#ifdef ABSL_HAVE_THREAD_LOCAL
static thread_local size_t counter = 0;
- // On Linux kernels >= 5.4 the MSAN runtime has a false-positive when
- // accessing thread local storage data from loaded libraries
- // (https://github.com/google/sanitizers/issues/1265), for this reason counter
- // needs to be annotated as initialized.
- ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(&counter, sizeof(size_t));
size_t value = ++counter;
#else // ABSL_HAVE_THREAD_LOCAL
static std::atomic<size_t> counter(0);
@@ -88,24 +100,35 @@ inline size_t RandomSeed() {
return value ^ static_cast<size_t>(reinterpret_cast<uintptr_t>(&counter));
}
-bool ShouldRehashForBugDetection(const ctrl_t* ctrl, size_t capacity) {
+bool ShouldRehashForBugDetection(PerTableSeed seed, size_t capacity) {
// Note: we can't use the abseil-random library because abseil-random
// depends on swisstable. We want to return true with probability
// `min(1, RehashProbabilityConstant() / capacity())`. In order to do this,
// we probe based on a random hash and see if the offset is less than
// RehashProbabilityConstant().
- return probe(ctrl, capacity, absl::HashOf(RandomSeed())).offset() <
+ return probe(seed, capacity, absl::HashOf(RandomSeed())).offset() <
RehashProbabilityConstant();
}
// Find a non-deterministic hash for single group table.
// Last two bits are used to find a position for a newly inserted element after
// resize.
-// This function is mixing all bits of hash and control pointer to maximize
-// entropy.
-size_t SingleGroupTableH1(size_t hash, ctrl_t* control) {
- return static_cast<size_t>(absl::popcount(
- hash ^ static_cast<size_t>(reinterpret_cast<uintptr_t>(control))));
+// This function basically using H2 last bits to save on shift operation.
+size_t SingleGroupTableH1(size_t hash, PerTableSeed seed) {
+ return hash ^ seed.seed();
+}
+
+// Returns the address of the slot `i` iterations after `slot` assuming each
+// slot has the specified size.
+inline void* NextSlot(void* slot, size_t slot_size, size_t i = 1) {
+ return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(slot) +
+ slot_size * i);
+}
+
+// Returns the address of the slot just before `slot` assuming each slot has the
+// specified size.
+inline void* PrevSlot(void* slot, size_t slot_size) {
+ return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(slot) - slot_size);
}
} // namespace
@@ -121,42 +144,104 @@ GenerationType* EmptyGeneration() {
}
bool CommonFieldsGenerationInfoEnabled::
- should_rehash_for_bug_detection_on_insert(const ctrl_t* ctrl,
+ should_rehash_for_bug_detection_on_insert(PerTableSeed seed,
size_t capacity) const {
if (reserved_growth_ == kReservedGrowthJustRanOut) return true;
if (reserved_growth_ > 0) return false;
- return ShouldRehashForBugDetection(ctrl, capacity);
+ return ShouldRehashForBugDetection(seed, capacity);
}
bool CommonFieldsGenerationInfoEnabled::should_rehash_for_bug_detection_on_move(
- const ctrl_t* ctrl, size_t capacity) const {
- return ShouldRehashForBugDetection(ctrl, capacity);
+ PerTableSeed seed, size_t capacity) const {
+ return ShouldRehashForBugDetection(seed, capacity);
+}
+
+namespace {
+
+FindInfo find_first_non_full_from_h1(const ctrl_t* ctrl, size_t h1,
+ size_t capacity) {
+ auto seq = probe(h1, capacity);
+ if (IsEmptyOrDeleted(ctrl[seq.offset()])) {
+ return {seq.offset(), /*probe_length=*/0};
+ }
+ while (true) {
+ GroupFullEmptyOrDeleted g{ctrl + seq.offset()};
+ auto mask = g.MaskEmptyOrDeleted();
+ if (mask) {
+ return {seq.offset(mask.LowestBitSet()), seq.index()};
+ }
+ seq.next();
+ ABSL_SWISSTABLE_ASSERT(seq.index() <= capacity && "full table!");
+ }
}
-bool ShouldInsertBackwardsForDebug(size_t capacity, size_t hash,
- const ctrl_t* ctrl) {
- // To avoid problems with weak hashes and single bit tests, we use % 13.
- // TODO(kfm,sbenza): revisit after we do unconditional mixing
- return !is_small(capacity) && (H1(hash, ctrl) ^ RandomSeed()) % 13 > 6;
+// Whether a table is "small". A small table fits entirely into a probing
+// group, i.e., has a capacity < `Group::kWidth`.
+//
+// In small mode we are able to use the whole capacity. The extra control
+// bytes give us at least one "empty" control byte to stop the iteration.
+// This is important to make 1 a valid capacity.
+//
+// In small mode only the first `capacity` control bytes after the sentinel
+// are valid. The rest contain dummy ctrl_t::kEmpty values that do not
+// represent a real slot.
+constexpr bool is_small(size_t capacity) {
+ return capacity < Group::kWidth - 1;
}
-size_t PrepareInsertAfterSoo(size_t hash, size_t slot_size,
- CommonFields& common) {
- assert(common.capacity() == NextCapacity(SooCapacity()));
- // After resize from capacity 1 to 3, we always have exactly the slot with
- // index 1 occupied, so we need to insert either at index 0 or index 2.
- assert(HashSetResizeHelper::SooSlotIndex() == 1);
- PrepareInsertCommon(common);
- const size_t offset = SingleGroupTableH1(hash, common.control()) & 2;
- common.growth_info().OverwriteEmptyAsFull();
- SetCtrlInSingleGroupTable(common, offset, H2(hash), slot_size);
- common.infoz().RecordInsert(hash, /*distance_from_desired=*/0);
- return offset;
+template <class Fn>
+void IterateOverFullSlotsImpl(const CommonFields& c, size_t slot_size, Fn cb) {
+ const size_t cap = c.capacity();
+ const ctrl_t* ctrl = c.control();
+ void* slot = c.slot_array();
+ if (is_small(cap)) {
+ // Mirrored/cloned control bytes in small table are also located in the
+ // first group (starting from position 0). We are taking group from position
+ // `capacity` in order to avoid duplicates.
+
+ // Small tables capacity fits into portable group, where
+ // GroupPortableImpl::MaskFull is more efficient for the
+ // capacity <= GroupPortableImpl::kWidth.
+ ABSL_SWISSTABLE_ASSERT(cap <= GroupPortableImpl::kWidth &&
+ "unexpectedly large small capacity");
+ static_assert(Group::kWidth >= GroupPortableImpl::kWidth,
+ "unexpected group width");
+ // Group starts from kSentinel slot, so indices in the mask will
+ // be increased by 1.
+ const auto mask = GroupPortableImpl(ctrl + cap).MaskFull();
+ --ctrl;
+ slot = PrevSlot(slot, slot_size);
+ for (uint32_t i : mask) {
+ cb(ctrl + i, SlotAddress(slot, i, slot_size));
+ }
+ return;
+ }
+ size_t remaining = c.size();
+ ABSL_ATTRIBUTE_UNUSED const size_t original_size_for_assert = remaining;
+ while (remaining != 0) {
+ for (uint32_t i : GroupFullEmptyOrDeleted(ctrl).MaskFull()) {
+ ABSL_SWISSTABLE_ASSERT(IsFull(ctrl[i]) &&
+ "hash table was modified unexpectedly");
+ cb(ctrl + i, SlotAddress(slot, i, slot_size));
+ --remaining;
+ }
+ ctrl += Group::kWidth;
+ slot = NextSlot(slot, slot_size, Group::kWidth);
+ ABSL_SWISSTABLE_ASSERT(
+ (remaining == 0 || *(ctrl - 1) != ctrl_t::kSentinel) &&
+ "hash table was modified unexpectedly");
+ }
+ // NOTE: erasure of the current element is allowed in callback for
+ // absl::erase_if specialization. So we use `>=`.
+ ABSL_SWISSTABLE_ASSERT(original_size_for_assert >= c.size() &&
+ "hash table was modified unexpectedly");
}
+} // namespace
+
void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity) {
- assert(ctrl[capacity] == ctrl_t::kSentinel);
- assert(IsValidCapacity(capacity));
+ ABSL_SWISSTABLE_ASSERT(ctrl[capacity] == ctrl_t::kSentinel);
+ ABSL_SWISSTABLE_ASSERT(IsValidCapacity(capacity));
for (ctrl_t* pos = ctrl; pos < ctrl + capacity; pos += Group::kWidth) {
Group{pos}.ConvertSpecialToEmptyAndFullToDeleted(pos);
}
@@ -164,26 +249,25 @@ void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity) {
std::memcpy(ctrl + capacity + 1, ctrl, NumClonedBytes());
ctrl[capacity] = ctrl_t::kSentinel;
}
-// Extern template instantiation for inline function.
-template FindInfo find_first_non_full(const CommonFields&, size_t);
-FindInfo find_first_non_full_outofline(const CommonFields& common,
- size_t hash) {
- return find_first_non_full(common, hash);
+FindInfo find_first_non_full(const CommonFields& common, size_t hash) {
+ return find_first_non_full_from_h1(common.control(), H1(hash, common.seed()),
+ common.capacity());
+}
+
+void IterateOverFullSlots(const CommonFields& c, size_t slot_size,
+ absl::FunctionRef<void(const ctrl_t*, void*)> cb) {
+ IterateOverFullSlotsImpl(c, slot_size, cb);
}
namespace {
-// Returns the address of the slot just after slot assuming each slot has the
-// specified size.
-static inline void* NextSlot(void* slot, size_t slot_size) {
- return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(slot) + slot_size);
+void ResetGrowthLeft(GrowthInfo& growth_info, size_t capacity, size_t size) {
+ growth_info.InitGrowthLeftNoDeleted(CapacityToGrowth(capacity) - size);
}
-// Returns the address of the slot just before slot assuming each slot has the
-// specified size.
-static inline void* PrevSlot(void* slot, size_t slot_size) {
- return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(slot) - slot_size);
+void ResetGrowthLeft(CommonFields& common) {
+ ResetGrowthLeft(common.growth_info(), common.capacity(), common.size());
}
// Finds guaranteed to exists empty slot from the given position.
@@ -196,17 +280,34 @@ size_t FindEmptySlot(size_t start, size_t end, const ctrl_t* ctrl) {
return i;
}
}
- assert(false && "no empty slot");
- return ~size_t{};
+ ABSL_UNREACHABLE();
}
-void DropDeletesWithoutResize(CommonFields& common,
- const PolicyFunctions& policy) {
+// Finds guaranteed to exist full slot starting from the given position.
+// NOTE: this function is only triggered for rehash(0), when we need to
+// go back to SOO state, so we keep it simple.
+size_t FindFirstFullSlot(size_t start, size_t end, const ctrl_t* ctrl) {
+ for (size_t i = start; i < end; ++i) {
+ if (IsFull(ctrl[i])) {
+ return i;
+ }
+ }
+ ABSL_UNREACHABLE();
+}
+
+void PrepareInsertCommon(CommonFields& common) {
+ common.increment_size();
+ common.maybe_increment_generation_on_insert();
+}
+
+size_t DropDeletesWithoutResizeAndPrepareInsert(
+ CommonFields& common, const PolicyFunctions& __restrict policy,
+ size_t new_hash) {
void* set = &common;
void* slot_array = common.slot_array();
const size_t capacity = common.capacity();
- assert(IsValidCapacity(capacity));
- assert(!is_small(capacity));
+ ABSL_SWISSTABLE_ASSERT(IsValidCapacity(capacity));
+ ABSL_SWISSTABLE_ASSERT(!is_single_group(capacity));
// Algorithm:
// - mark all DELETED slots as EMPTY
// - mark all FULL slots as DELETED
@@ -227,7 +328,7 @@ void DropDeletesWithoutResize(CommonFields& common,
ConvertDeletedToEmptyAndFullToDeleted(ctrl, capacity);
const void* hash_fn = policy.hash_fn(common);
auto hasher = policy.hash_slot;
- auto transfer = policy.transfer;
+ auto transfer_n = policy.transfer_n;
const size_t slot_size = policy.slot_size;
size_t total_probe_length = 0;
@@ -240,7 +341,7 @@ void DropDeletesWithoutResize(CommonFields& common,
for (size_t i = 0; i != capacity;
++i, slot_ptr = NextSlot(slot_ptr, slot_size)) {
- assert(slot_ptr == SlotAddress(slot_array, i, slot_size));
+ ABSL_SWISSTABLE_ASSERT(slot_ptr == SlotAddress(slot_array, i, slot_size));
if (IsEmpty(ctrl[i])) {
tmp_space_id = i;
continue;
@@ -255,13 +356,14 @@ void DropDeletesWithoutResize(CommonFields& common,
// If they do, we don't need to move the object as it falls already in the
// best probe we can.
const size_t probe_offset = probe(common, hash).offset();
+ const h2_t h2 = H2(hash);
const auto probe_index = [probe_offset, capacity](size_t pos) {
return ((pos - probe_offset) & capacity) / Group::kWidth;
};
// Element doesn't move.
if (ABSL_PREDICT_TRUE(probe_index(new_i) == probe_index(i))) {
- SetCtrl(common, i, H2(hash), slot_size);
+ SetCtrlInLargeTable(common, i, h2, slot_size);
continue;
}
@@ -270,14 +372,14 @@ void DropDeletesWithoutResize(CommonFields& common,
// Transfer element to the empty spot.
// SetCtrl poisons/unpoisons the slots so we have to call it at the
// right time.
- SetCtrl(common, new_i, H2(hash), slot_size);
- (*transfer)(set, new_slot_ptr, slot_ptr);
- SetCtrl(common, i, ctrl_t::kEmpty, slot_size);
+ SetCtrlInLargeTable(common, new_i, h2, slot_size);
+ (*transfer_n)(set, new_slot_ptr, slot_ptr, 1);
+ SetCtrlInLargeTable(common, i, ctrl_t::kEmpty, slot_size);
// Initialize or change empty space id.
tmp_space_id = i;
} else {
- assert(IsDeleted(ctrl[new_i]));
- SetCtrl(common, new_i, H2(hash), slot_size);
+ ABSL_SWISSTABLE_ASSERT(IsDeleted(ctrl[new_i]));
+ SetCtrlInLargeTable(common, new_i, h2, slot_size);
// Until we are done rehashing, DELETED marks previously FULL slots.
if (tmp_space_id == kUnknownId) {
@@ -287,9 +389,9 @@ void DropDeletesWithoutResize(CommonFields& common,
SanitizerUnpoisonMemoryRegion(tmp_space, slot_size);
// Swap i and new_i elements.
- (*transfer)(set, tmp_space, new_slot_ptr);
- (*transfer)(set, new_slot_ptr, slot_ptr);
- (*transfer)(set, slot_ptr, tmp_space);
+ (*transfer_n)(set, tmp_space, new_slot_ptr, 1);
+ (*transfer_n)(set, new_slot_ptr, slot_ptr, 1);
+ (*transfer_n)(set, slot_ptr, tmp_space, 1);
SanitizerPoisonMemoryRegion(tmp_space, slot_size);
@@ -298,8 +400,14 @@ void DropDeletesWithoutResize(CommonFields& common,
slot_ptr = PrevSlot(slot_ptr, slot_size);
}
}
+ // Prepare insert for the new element.
+ PrepareInsertCommon(common);
ResetGrowthLeft(common);
+ FindInfo find_info = find_first_non_full(common, new_hash);
+ SetCtrlInLargeTable(common, find_info.offset, H2(new_hash), slot_size);
+ common.infoz().RecordInsert(new_hash, find_info.probe_length);
common.infoz().RecordRehash(total_probe_length);
+ return find_info.offset;
}
static bool WasNeverFull(CommonFields& c, size_t index) {
@@ -319,10 +427,126 @@ static bool WasNeverFull(CommonFields& c, size_t index) {
Group::kWidth;
}
+// Updates the control bytes to indicate a completely empty table such that all
+// control bytes are kEmpty except for the kSentinel byte.
+void ResetCtrl(CommonFields& common, size_t slot_size) {
+ const size_t capacity = common.capacity();
+ ctrl_t* ctrl = common.control();
+ static constexpr size_t kTwoGroupCapacity = 2 * Group::kWidth - 1;
+ if (ABSL_PREDICT_TRUE(capacity <= kTwoGroupCapacity)) {
+ std::memset(ctrl, static_cast<int8_t>(ctrl_t::kEmpty), Group::kWidth);
+ std::memset(ctrl + capacity, static_cast<int8_t>(ctrl_t::kEmpty),
+ Group::kWidth);
+ if (capacity == kTwoGroupCapacity) {
+ std::memset(ctrl + Group::kWidth, static_cast<int8_t>(ctrl_t::kEmpty),
+ Group::kWidth);
+ }
+ } else {
+ std::memset(ctrl, static_cast<int8_t>(ctrl_t::kEmpty),
+ capacity + 1 + NumClonedBytes());
+ }
+ ctrl[capacity] = ctrl_t::kSentinel;
+ SanitizerPoisonMemoryRegion(common.slot_array(), slot_size * capacity);
+}
+
+// Initializes control bytes for single element table.
+// Capacity of the table must be 1.
+ABSL_ATTRIBUTE_ALWAYS_INLINE inline void InitializeSingleElementControlBytes(
+ uint64_t h2, ctrl_t* new_ctrl) {
+ static constexpr uint64_t kEmptyXorSentinel =
+ static_cast<uint8_t>(ctrl_t::kEmpty) ^
+ static_cast<uint8_t>(ctrl_t::kSentinel);
+ static constexpr uint64_t kEmpty64 = static_cast<uint8_t>(ctrl_t::kEmpty);
+ // The first 8 bytes, where present slot positions are replaced with 0.
+ static constexpr uint64_t kFirstCtrlBytesWithZeroes =
+ k8EmptyBytes ^ kEmpty64 ^ (kEmptyXorSentinel << 8) ^ (kEmpty64 << 16);
+
+ // Fill the original 0th and mirrored 2nd bytes with the hash.
+ // Result will look like:
+ // HSHEEEEE
+ // Where H = h2, E = kEmpty, S = kSentinel.
+ const uint64_t first_ctrl_bytes =
+ (h2 | kFirstCtrlBytesWithZeroes) | (h2 << 16);
+ // Fill last bytes with kEmpty.
+ std::memset(new_ctrl + 1, static_cast<int8_t>(ctrl_t::kEmpty), Group::kWidth);
+ // Overwrite the first 3 bytes with HSH. Other bytes will not be changed.
+ absl::little_endian::Store64(new_ctrl, first_ctrl_bytes);
+}
+
+// Initializes control bytes for growing after SOO to the next capacity.
+// `soo_ctrl` is placed in the position `SooSlotIndex()`.
+// `new_hash` is placed in the position `new_offset`.
+// The table must be non-empty SOO.
+ABSL_ATTRIBUTE_ALWAYS_INLINE inline void
+InitializeThreeElementsControlBytesAfterSoo(ctrl_t soo_ctrl, size_t new_hash,
+ size_t new_offset,
+ ctrl_t* new_ctrl) {
+ static constexpr size_t kNewCapacity = NextCapacity(SooCapacity());
+ static_assert(kNewCapacity == 3);
+ static_assert(is_single_group(kNewCapacity));
+ static_assert(SooSlotIndex() == 1);
+ ABSL_SWISSTABLE_ASSERT(new_offset == 0 || new_offset == 2);
+
+ static constexpr uint64_t kEmptyXorSentinel =
+ static_cast<uint8_t>(ctrl_t::kEmpty) ^
+ static_cast<uint8_t>(ctrl_t::kSentinel);
+ static constexpr uint64_t kEmpty64 = static_cast<uint8_t>(ctrl_t::kEmpty);
+ static constexpr size_t kMirroredSooSlotIndex =
+ SooSlotIndex() + kNewCapacity + 1;
+ // The first 8 bytes, where SOO slot original and mirrored positions are
+ // replaced with 0.
+ // Result will look like: E0ESE0EE
+ static constexpr uint64_t kFirstCtrlBytesWithZeroes =
+ k8EmptyBytes ^ (kEmpty64 << (8 * SooSlotIndex())) ^
+ (kEmptyXorSentinel << (8 * kNewCapacity)) ^
+ (kEmpty64 << (8 * kMirroredSooSlotIndex));
+
+ const uint64_t soo_h2 = static_cast<uint64_t>(soo_ctrl);
+ const uint64_t new_h2_xor_empty = static_cast<uint64_t>(
+ H2(new_hash) ^ static_cast<uint8_t>(ctrl_t::kEmpty));
+ // Fill the original and mirrored bytes for SOO slot.
+ // Result will look like:
+ // EHESEHEE
+ // Where H = soo_h2, E = kEmpty, S = kSentinel.
+ uint64_t first_ctrl_bytes =
+ ((soo_h2 << (8 * SooSlotIndex())) | kFirstCtrlBytesWithZeroes) |
+ (soo_h2 << (8 * kMirroredSooSlotIndex));
+ // Replace original and mirrored empty bytes for the new position.
+ // Result for new_offset 0 will look like:
+ // NHESNHEE
+ // Where H = soo_h2, N = H2(new_hash), E = kEmpty, S = kSentinel.
+ // Result for new_offset 2 will look like:
+ // EHNSEHNE
+ first_ctrl_bytes ^= (new_h2_xor_empty << (8 * new_offset));
+ size_t new_mirrored_offset = new_offset + kNewCapacity + 1;
+ first_ctrl_bytes ^= (new_h2_xor_empty << (8 * new_mirrored_offset));
+
+ // Fill last bytes with kEmpty.
+ std::memset(new_ctrl + kNewCapacity, static_cast<int8_t>(ctrl_t::kEmpty),
+ Group::kWidth);
+ // Overwrite the first 8 bytes with first_ctrl_bytes.
+ absl::little_endian::Store64(new_ctrl, first_ctrl_bytes);
+
+ // Example for group size 16:
+ // new_ctrl after 1st memset = ???EEEEEEEEEEEEEEEE
+ // new_offset 0:
+ // new_ctrl after 2nd store = NHESNHEEEEEEEEEEEEE
+ // new_offset 2:
+ // new_ctrl after 2nd store = EHNSEHNEEEEEEEEEEEE
+
+ // Example for group size 8:
+ // new_ctrl after 1st memset = ???EEEEEEEE
+ // new_offset 0:
+ // new_ctrl after 2nd store = NHESNHEEEEE
+ // new_offset 2:
+ // new_ctrl after 2nd store = EHNSEHNEEEE
+}
+
} // namespace
void EraseMetaOnly(CommonFields& c, size_t index, size_t slot_size) {
- assert(IsFull(c.control()[index]) && "erasing a dangling iterator");
+ ABSL_SWISSTABLE_ASSERT(IsFull(c.control()[index]) &&
+ "erasing a dangling iterator");
c.decrement_size();
c.infoz().RecordErase();
@@ -333,14 +557,15 @@ void EraseMetaOnly(CommonFields& c, size_t index, size_t slot_size) {
}
c.growth_info().OverwriteFullAsDeleted();
- SetCtrl(c, index, ctrl_t::kDeleted, slot_size);
+ SetCtrlInLargeTable(c, index, ctrl_t::kDeleted, slot_size);
}
-void ClearBackingArray(CommonFields& c, const PolicyFunctions& policy,
+void ClearBackingArray(CommonFields& c,
+ const PolicyFunctions& __restrict policy, void* alloc,
bool reuse, bool soo_enabled) {
- c.set_size(0);
if (reuse) {
- assert(!soo_enabled || c.capacity() > SooCapacity());
+ c.set_size_to_zero();
+ ABSL_SWISSTABLE_ASSERT(!soo_enabled || c.capacity() > SooCapacity());
ResetCtrl(c, policy.slot_size);
ResetGrowthLeft(c);
c.infoz().RecordStorageChanged(0, c.capacity());
@@ -349,17 +574,224 @@ void ClearBackingArray(CommonFields& c, const PolicyFunctions& policy,
// infoz.
c.infoz().RecordClearedReservation();
c.infoz().RecordStorageChanged(0, soo_enabled ? SooCapacity() : 0);
- (*policy.dealloc)(c, policy);
+ c.infoz().Unregister();
+ (*policy.dealloc)(alloc, c.capacity(), c.control(), policy.slot_size,
+ policy.slot_align, c.has_infoz());
c = soo_enabled ? CommonFields{soo_tag_t{}} : CommonFields{non_soo_tag_t{}};
}
}
-void HashSetResizeHelper::GrowIntoSingleGroupShuffleControlBytes(
- ctrl_t* __restrict new_ctrl, size_t new_capacity) const {
- assert(is_single_group(new_capacity));
+namespace {
+
+enum class ResizeNonSooMode {
+ kGuaranteedEmpty,
+ kGuaranteedAllocated,
+};
+
+// Iterates over full slots in old table, finds new positions for them and
+// transfers the slots.
+// This function is used for reserving or rehashing non-empty tables.
+// This use case is rare so the function is type erased.
+// Returns the total probe length.
+size_t FindNewPositionsAndTransferSlots(
+ CommonFields& common, const PolicyFunctions& __restrict policy,
+ ctrl_t* old_ctrl, void* old_slots, size_t old_capacity) {
+ void* new_slots = common.slot_array();
+ const void* hash_fn = policy.hash_fn(common);
+ const size_t slot_size = policy.slot_size;
+
+ const auto insert_slot = [&](void* slot) {
+ size_t hash = policy.hash_slot(hash_fn, slot);
+ auto target = find_first_non_full(common, hash);
+ SetCtrl(common, target.offset, H2(hash), slot_size);
+ policy.transfer_n(&common, SlotAddress(new_slots, target.offset, slot_size),
+ slot, 1);
+ return target.probe_length;
+ };
+ size_t total_probe_length = 0;
+ for (size_t i = 0; i < old_capacity; ++i) {
+ if (IsFull(old_ctrl[i])) {
+ total_probe_length += insert_slot(old_slots);
+ }
+ old_slots = NextSlot(old_slots, slot_size);
+ }
+ return total_probe_length;
+}
+
+template <ResizeNonSooMode kMode>
+void ResizeNonSooImpl(CommonFields& common,
+ const PolicyFunctions& __restrict policy,
+ size_t new_capacity, HashtablezInfoHandle infoz) {
+ ABSL_SWISSTABLE_ASSERT(IsValidCapacity(new_capacity));
+ ABSL_SWISSTABLE_ASSERT(new_capacity > policy.soo_capacity());
+
+ const size_t old_capacity = common.capacity();
+ [[maybe_unused]] ctrl_t* old_ctrl = common.control();
+ [[maybe_unused]] void* old_slots = common.slot_array();
+
+ const size_t slot_size = policy.slot_size;
+ const size_t slot_align = policy.slot_align;
+ const bool has_infoz = infoz.IsSampled();
+
+ common.set_capacity(new_capacity);
+ RawHashSetLayout layout(new_capacity, slot_size, slot_align, has_infoz);
+ void* alloc = policy.get_char_alloc(common);
+ char* mem = static_cast<char*>(policy.alloc(alloc, layout.alloc_size()));
+ const GenerationType old_generation = common.generation();
+ common.set_generation_ptr(
+ reinterpret_cast<GenerationType*>(mem + layout.generation_offset()));
+ common.set_generation(NextGeneration(old_generation));
+
+ ctrl_t* new_ctrl = reinterpret_cast<ctrl_t*>(mem + layout.control_offset());
+ common.set_control</*kGenerateSeed=*/true>(new_ctrl);
+ common.set_slots(mem + layout.slot_offset());
+
+ size_t total_probe_length = 0;
+ ResetCtrl(common, slot_size);
+ ABSL_SWISSTABLE_ASSERT(kMode != ResizeNonSooMode::kGuaranteedEmpty ||
+ old_capacity == policy.soo_capacity());
+ ABSL_SWISSTABLE_ASSERT(kMode != ResizeNonSooMode::kGuaranteedAllocated ||
+ old_capacity > 0);
+ if constexpr (kMode == ResizeNonSooMode::kGuaranteedAllocated) {
+ total_probe_length = FindNewPositionsAndTransferSlots(
+ common, policy, old_ctrl, old_slots, old_capacity);
+ (*policy.dealloc)(alloc, old_capacity, old_ctrl, slot_size, slot_align,
+ has_infoz);
+ ResetGrowthLeft(GetGrowthInfoFromControl(new_ctrl), new_capacity,
+ common.size());
+ } else {
+ GetGrowthInfoFromControl(new_ctrl).InitGrowthLeftNoDeleted(
+ CapacityToGrowth(new_capacity));
+ }
+
+ if (has_infoz) {
+ common.set_has_infoz();
+ infoz.RecordStorageChanged(common.size(), new_capacity);
+ infoz.RecordRehash(total_probe_length);
+ common.set_infoz(infoz);
+ }
+}
+
+void ResizeEmptyNonAllocatedTableImpl(CommonFields& common,
+ const PolicyFunctions& __restrict policy,
+ size_t new_capacity, bool force_infoz) {
+ ABSL_SWISSTABLE_ASSERT(IsValidCapacity(new_capacity));
+ ABSL_SWISSTABLE_ASSERT(new_capacity > policy.soo_capacity());
+ ABSL_SWISSTABLE_ASSERT(!force_infoz || policy.soo_enabled);
+ ABSL_SWISSTABLE_ASSERT(common.capacity() <= policy.soo_capacity());
+ ABSL_SWISSTABLE_ASSERT(common.empty());
+ const size_t slot_size = policy.slot_size;
+ HashtablezInfoHandle infoz;
+ const bool should_sample =
+ policy.is_hashtablez_eligible && (force_infoz || ShouldSampleNextTable());
+ if (ABSL_PREDICT_FALSE(should_sample)) {
+ infoz = ForcedTrySample(slot_size, policy.key_size, policy.value_size,
+ policy.soo_capacity());
+ }
+ ResizeNonSooImpl<ResizeNonSooMode::kGuaranteedEmpty>(common, policy,
+ new_capacity, infoz);
+}
+
+// If the table was SOO, initializes new control bytes and transfers slot.
+// After transferring the slot, sets control and slots in CommonFields.
+// It is rare to resize an SOO table with one element to a large size.
+// Requires: `c` contains SOO data.
+void InsertOldSooSlotAndInitializeControlBytes(
+ CommonFields& c, const PolicyFunctions& __restrict policy, size_t hash,
+ ctrl_t* new_ctrl, void* new_slots) {
+ ABSL_SWISSTABLE_ASSERT(c.size() == policy.soo_capacity());
+ ABSL_SWISSTABLE_ASSERT(policy.soo_enabled);
+ size_t new_capacity = c.capacity();
+
+ c.generate_new_seed();
+ size_t offset = probe(c.seed(), new_capacity, hash).offset();
+ offset = offset == new_capacity ? 0 : offset;
+ SanitizerPoisonMemoryRegion(new_slots, policy.slot_size * new_capacity);
+ void* target_slot = SlotAddress(new_slots, offset, policy.slot_size);
+ SanitizerUnpoisonMemoryRegion(target_slot, policy.slot_size);
+ policy.transfer_n(&c, target_slot, c.soo_data(), 1);
+ c.set_control</*kGenerateSeed=*/false>(new_ctrl);
+ c.set_slots(new_slots);
+ ResetCtrl(c, policy.slot_size);
+ SetCtrl(c, offset, H2(hash), policy.slot_size);
+}
+
+enum class ResizeFullSooTableSamplingMode {
+ kNoSampling,
+ // Force sampling. If the table was still not sampled, do not resize.
+ kForceSampleNoResizeIfUnsampled,
+};
+
+void AssertSoo([[maybe_unused]] CommonFields& common,
+ [[maybe_unused]] const PolicyFunctions& policy) {
+ ABSL_SWISSTABLE_ASSERT(policy.soo_enabled);
+ ABSL_SWISSTABLE_ASSERT(common.capacity() == policy.soo_capacity());
+}
+void AssertFullSoo([[maybe_unused]] CommonFields& common,
+ [[maybe_unused]] const PolicyFunctions& policy) {
+ AssertSoo(common, policy);
+ ABSL_SWISSTABLE_ASSERT(common.size() == policy.soo_capacity());
+}
+
+void ResizeFullSooTable(CommonFields& common,
+ const PolicyFunctions& __restrict policy,
+ size_t new_capacity,
+ ResizeFullSooTableSamplingMode sampling_mode) {
+ AssertFullSoo(common, policy);
+ const size_t slot_size = policy.slot_size;
+ const size_t slot_align = policy.slot_align;
+
+ HashtablezInfoHandle infoz;
+ if (sampling_mode ==
+ ResizeFullSooTableSamplingMode::kForceSampleNoResizeIfUnsampled) {
+ if (ABSL_PREDICT_FALSE(policy.is_hashtablez_eligible)) {
+ infoz = ForcedTrySample(slot_size, policy.key_size, policy.value_size,
+ policy.soo_capacity());
+ }
+
+ if (!infoz.IsSampled()) {
+ return;
+ }
+ }
+
+ const bool has_infoz = infoz.IsSampled();
+
+ common.set_capacity(new_capacity);
+
+ RawHashSetLayout layout(new_capacity, slot_size, slot_align, has_infoz);
+ void* alloc = policy.get_char_alloc(common);
+ char* mem = static_cast<char*>(policy.alloc(alloc, layout.alloc_size()));
+ const GenerationType old_generation = common.generation();
+ common.set_generation_ptr(
+ reinterpret_cast<GenerationType*>(mem + layout.generation_offset()));
+ common.set_generation(NextGeneration(old_generation));
+
+ // We do not set control and slots in CommonFields yet to avoid overriding
+ // SOO data.
+ ctrl_t* new_ctrl = reinterpret_cast<ctrl_t*>(mem + layout.control_offset());
+ void* new_slots = mem + layout.slot_offset();
+
+ const size_t soo_slot_hash =
+ policy.hash_slot(policy.hash_fn(common), common.soo_data());
+
+ InsertOldSooSlotAndInitializeControlBytes(common, policy, soo_slot_hash,
+ new_ctrl, new_slots);
+ ResetGrowthLeft(common);
+ if (has_infoz) {
+ common.set_has_infoz();
+ common.set_infoz(infoz);
+ infoz.RecordStorageChanged(common.size(), new_capacity);
+ }
+}
+
+void GrowIntoSingleGroupShuffleControlBytes(ctrl_t* __restrict old_ctrl,
+ size_t old_capacity,
+ ctrl_t* __restrict new_ctrl,
+ size_t new_capacity) {
+ ABSL_SWISSTABLE_ASSERT(is_single_group(new_capacity));
constexpr size_t kHalfWidth = Group::kWidth / 2;
- ABSL_ASSUME(old_capacity_ < kHalfWidth);
- ABSL_ASSUME(old_capacity_ > 0);
+ ABSL_ASSUME(old_capacity < kHalfWidth);
+ ABSL_ASSUME(old_capacity > 0);
static_assert(Group::kWidth == 8 || Group::kWidth == 16,
"Group size is not supported.");
@@ -373,8 +805,7 @@ void HashSetResizeHelper::GrowIntoSingleGroupShuffleControlBytes(
// Example:
// old_ctrl = 012S012EEEEEEEEE...
// copied_bytes = S012EEEE
- uint64_t copied_bytes =
- absl::little_endian::Load64(old_ctrl() + old_capacity_);
+ uint64_t copied_bytes = absl::little_endian::Load64(old_ctrl + old_capacity);
// We change the sentinel byte to kEmpty before storing to both the start of
// the new_ctrl, and past the end of the new_ctrl later for the new cloned
@@ -395,7 +826,8 @@ void HashSetResizeHelper::GrowIntoSingleGroupShuffleControlBytes(
if (Group::kWidth == 8) {
// With group size 8, we can grow with two write operations.
- assert(old_capacity_ < 8 && "old_capacity_ is too large for group size 8");
+ ABSL_SWISSTABLE_ASSERT(old_capacity < 8 &&
+ "old_capacity is too large for group size 8");
absl::little_endian::Store64(new_ctrl, copied_bytes);
static constexpr uint64_t kSentinal64 =
@@ -421,7 +853,7 @@ void HashSetResizeHelper::GrowIntoSingleGroupShuffleControlBytes(
return;
}
- assert(Group::kWidth == 16);
+ ABSL_SWISSTABLE_ASSERT(Group::kWidth == 16);
// Fill the second half of the main control bytes with kEmpty.
// For small capacity that may write into mirrored control bytes.
@@ -463,7 +895,6 @@ void HashSetResizeHelper::GrowIntoSingleGroupShuffleControlBytes(
// >!
// new_ctrl after 2nd store = E012EEESE012EEEEEEEEEEE
-
// Example for growth capacity 7->15:
// old_ctrl = 0123456S0123456EEEEEEEE
// new_ctrl at the end = E0123456EEEEEEESE0123456EEEEEEE
@@ -478,58 +909,428 @@ void HashSetResizeHelper::GrowIntoSingleGroupShuffleControlBytes(
// new_ctrl after 2nd store = E0123456EEEEEEESE0123456EEEEEEE
}
-void HashSetResizeHelper::InitControlBytesAfterSoo(ctrl_t* new_ctrl, ctrl_t h2,
- size_t new_capacity) {
- assert(is_single_group(new_capacity));
- std::memset(new_ctrl, static_cast<int8_t>(ctrl_t::kEmpty),
- NumControlBytes(new_capacity));
- assert(HashSetResizeHelper::SooSlotIndex() == 1);
- // This allows us to avoid branching on had_soo_slot_.
- assert(had_soo_slot_ || h2 == ctrl_t::kEmpty);
- new_ctrl[1] = new_ctrl[new_capacity + 2] = h2;
+// Size of the buffer we allocate on stack for storing probed elements in
+// GrowToNextCapacity algorithm.
+constexpr size_t kProbedElementsBufferSize = 512;
+
+// Decodes information about probed elements from contiguous memory.
+// Finds new position for each element and transfers it to the new slots.
+// Returns the total probe length.
+template <typename ProbedItem>
+ABSL_ATTRIBUTE_NOINLINE size_t DecodeAndInsertImpl(
+ CommonFields& c, const PolicyFunctions& __restrict policy,
+ const ProbedItem* start, const ProbedItem* end, void* old_slots) {
+ const size_t new_capacity = c.capacity();
+
+ void* new_slots = c.slot_array();
+ ctrl_t* new_ctrl = c.control();
+ size_t total_probe_length = 0;
+
+ const size_t slot_size = policy.slot_size;
+ auto transfer_n = policy.transfer_n;
+
+ for (; start < end; ++start) {
+ const FindInfo target = find_first_non_full_from_h1(
+ new_ctrl, static_cast<size_t>(start->h1), new_capacity);
+ total_probe_length += target.probe_length;
+ const size_t old_index = static_cast<size_t>(start->source_offset);
+ const size_t new_i = target.offset;
+ ABSL_SWISSTABLE_ASSERT(old_index < new_capacity / 2);
+ ABSL_SWISSTABLE_ASSERT(new_i < new_capacity);
+ ABSL_SWISSTABLE_ASSERT(IsEmpty(new_ctrl[new_i]));
+ void* src_slot = SlotAddress(old_slots, old_index, slot_size);
+ void* dst_slot = SlotAddress(new_slots, new_i, slot_size);
+ SanitizerUnpoisonMemoryRegion(dst_slot, slot_size);
+ transfer_n(&c, dst_slot, src_slot, 1);
+ SetCtrlInLargeTable(c, new_i, static_cast<h2_t>(start->h2), slot_size);
+ }
+ return total_probe_length;
+}
+
+// Sentinel value for the start of marked elements.
+// Signals that there are no marked elements.
+constexpr size_t kNoMarkedElementsSentinel = ~size_t{};
+
+// Process probed elements that did not fit into available buffers.
+// We marked them in control bytes as kSentinel.
+// Hash recomputation and full probing is done here.
+// This use case should be extremely rare.
+ABSL_ATTRIBUTE_NOINLINE size_t ProcessProbedMarkedElements(
+ CommonFields& c, const PolicyFunctions& __restrict policy, ctrl_t* old_ctrl,
+ void* old_slots, size_t start) {
+ size_t old_capacity = PreviousCapacity(c.capacity());
+ const size_t slot_size = policy.slot_size;
+ void* new_slots = c.slot_array();
+ size_t total_probe_length = 0;
+ const void* hash_fn = policy.hash_fn(c);
+ auto hash_slot = policy.hash_slot;
+ auto transfer_n = policy.transfer_n;
+ for (size_t old_index = start; old_index < old_capacity; ++old_index) {
+ if (old_ctrl[old_index] != ctrl_t::kSentinel) {
+ continue;
+ }
+ void* src_slot = SlotAddress(old_slots, old_index, slot_size);
+ const size_t hash = hash_slot(hash_fn, src_slot);
+ const FindInfo target = find_first_non_full(c, hash);
+ total_probe_length += target.probe_length;
+ const size_t new_i = target.offset;
+ void* dst_slot = SlotAddress(new_slots, new_i, slot_size);
+ SetCtrlInLargeTable(c, new_i, H2(hash), slot_size);
+ transfer_n(&c, dst_slot, src_slot, 1);
+ }
+ return total_probe_length;
+}
+
+// The largest old capacity for which it is guaranteed that all probed elements
+// fit in ProbedItemEncoder's local buffer.
+// For such tables, `encode_probed_element` is trivial.
+constexpr size_t kMaxLocalBufferOldCapacity =
+ kProbedElementsBufferSize / sizeof(ProbedItem4Bytes) - 1;
+static_assert(IsValidCapacity(kMaxLocalBufferOldCapacity));
+constexpr size_t kMaxLocalBufferNewCapacity =
+ NextCapacity(kMaxLocalBufferOldCapacity);
+static_assert(kMaxLocalBufferNewCapacity <= ProbedItem4Bytes::kMaxNewCapacity);
+static_assert(NextCapacity(kMaxLocalBufferNewCapacity) <=
+ ProbedItem4Bytes::kMaxNewCapacity);
+
+// Initializes mirrored control bytes after
+// transfer_unprobed_elements_to_next_capacity.
+void InitializeMirroredControlBytes(ctrl_t* new_ctrl, size_t new_capacity) {
+ std::memcpy(new_ctrl + new_capacity,
+ // We own GrowthInfo just before control bytes. So it is ok
+ // to read one byte from it.
+ new_ctrl - 1, Group::kWidth);
new_ctrl[new_capacity] = ctrl_t::kSentinel;
}
-void HashSetResizeHelper::GrowIntoSingleGroupShuffleTransferableSlots(
- void* new_slots, size_t slot_size) const {
- ABSL_ASSUME(old_capacity_ > 0);
- SanitizerUnpoisonMemoryRegion(old_slots(), slot_size * old_capacity_);
- std::memcpy(SlotAddress(new_slots, 1, slot_size), old_slots(),
- slot_size * old_capacity_);
+// Encodes probed elements into available memory.
+// At first, a local (on stack) buffer is used. The size of the buffer is
+// kProbedElementsBufferSize bytes.
+// When the local buffer is full, we switch to `control_` buffer. We are allowed
+// to overwrite `control_` buffer till the `source_offset` byte. In case we have
+// no space in `control_` buffer, we fallback to a naive algorithm for all the
+// rest of the probed elements. We mark elements as kSentinel in control bytes
+// and later process them fully. See ProcessMarkedElements for details. It
+// should be extremely rare.
+template <typename ProbedItemType,
+ // If true, we only use the local buffer and never switch to the
+ // control buffer.
+ bool kGuaranteedFitToBuffer = false>
+class ProbedItemEncoder {
+ public:
+ using ProbedItem = ProbedItemType;
+ explicit ProbedItemEncoder(ctrl_t* control) : control_(control) {}
+
+ // Encode item into the best available location.
+ void EncodeItem(ProbedItem item) {
+ if (ABSL_PREDICT_FALSE(!kGuaranteedFitToBuffer && pos_ >= end_)) {
+ return ProcessEncodeWithOverflow(item);
+ }
+ ABSL_SWISSTABLE_ASSERT(pos_ < end_);
+ *pos_ = item;
+ ++pos_;
+ }
+
+ // Decodes information about probed elements from all available sources.
+ // Finds new position for each element and transfers it to the new slots.
+ // Returns the total probe length.
+ size_t DecodeAndInsertToTable(CommonFields& common,
+ const PolicyFunctions& __restrict policy,
+ void* old_slots) const {
+ if (pos_ == buffer_) {
+ return 0;
+ }
+ if constexpr (kGuaranteedFitToBuffer) {
+ return DecodeAndInsertImpl(common, policy, buffer_, pos_, old_slots);
+ }
+ size_t total_probe_length = DecodeAndInsertImpl(
+ common, policy, buffer_,
+ local_buffer_full_ ? buffer_ + kBufferSize : pos_, old_slots);
+ if (!local_buffer_full_) {
+ return total_probe_length;
+ }
+ total_probe_length +=
+ DecodeAndInsertToTableOverflow(common, policy, old_slots);
+ return total_probe_length;
+ }
+
+ private:
+ static ProbedItem* AlignToNextItem(void* ptr) {
+ return reinterpret_cast<ProbedItem*>(AlignUpTo(
+ reinterpret_cast<uintptr_t>(ptr), alignof(ProbedItem)));
+ }
+
+ ProbedItem* OverflowBufferStart() const {
+ // We reuse GrowthInfo memory as well.
+ return AlignToNextItem(control_ - ControlOffset(/*has_infoz=*/false));
+ }
+
+ // Encodes item when previously allocated buffer is full.
+ // At first that happens when local buffer is full.
+ // We switch from the local buffer to the control buffer.
+ // Every time this function is called, the available buffer is extended till
+ // `item.source_offset` byte in the control buffer.
+ // After the buffer is extended, this function wouldn't be called till the
+ // buffer is exhausted.
+ //
+ // If there's no space in the control buffer, we fallback to naive algorithm
+ // and mark probed elements as kSentinel in the control buffer. In this case,
+ // we will call this function for every subsequent probed element.
+ ABSL_ATTRIBUTE_NOINLINE void ProcessEncodeWithOverflow(ProbedItem item) {
+ if (!local_buffer_full_) {
+ local_buffer_full_ = true;
+ pos_ = OverflowBufferStart();
+ }
+ const size_t source_offset = static_cast<size_t>(item.source_offset);
+ // We are in fallback mode so we can't reuse control buffer anymore.
+ // Probed elements are marked as kSentinel in the control buffer.
+ if (ABSL_PREDICT_FALSE(marked_elements_starting_position_ !=
+ kNoMarkedElementsSentinel)) {
+ control_[source_offset] = ctrl_t::kSentinel;
+ return;
+ }
+ // Refresh the end pointer to the new available position.
+ // Invariant: if pos < end, then we have at least sizeof(ProbedItem) bytes
+ // to write.
+ end_ = control_ + source_offset + 1 - sizeof(ProbedItem);
+ if (ABSL_PREDICT_TRUE(pos_ < end_)) {
+ *pos_ = item;
+ ++pos_;
+ return;
+ }
+ control_[source_offset] = ctrl_t::kSentinel;
+ marked_elements_starting_position_ = source_offset;
+ // Now we will always fall down to `ProcessEncodeWithOverflow`.
+ ABSL_SWISSTABLE_ASSERT(pos_ >= end_);
+ }
+
+ // Decodes information about probed elements from control buffer and processes
+ // marked elements.
+ // Finds new position for each element and transfers it to the new slots.
+ // Returns the total probe length.
+ ABSL_ATTRIBUTE_NOINLINE size_t DecodeAndInsertToTableOverflow(
+ CommonFields& common, const PolicyFunctions& __restrict policy,
+ void* old_slots) const {
+ ABSL_SWISSTABLE_ASSERT(local_buffer_full_ &&
+ "must not be called when local buffer is not full");
+ size_t total_probe_length = DecodeAndInsertImpl(
+ common, policy, OverflowBufferStart(), pos_, old_slots);
+ if (ABSL_PREDICT_TRUE(marked_elements_starting_position_ ==
+ kNoMarkedElementsSentinel)) {
+ return total_probe_length;
+ }
+ total_probe_length +=
+ ProcessProbedMarkedElements(common, policy, control_, old_slots,
+ marked_elements_starting_position_);
+ return total_probe_length;
+ }
+
+ static constexpr size_t kBufferSize =
+ kProbedElementsBufferSize / sizeof(ProbedItem);
+ ProbedItem buffer_[kBufferSize];
+ // If local_buffer_full_ is false, then pos_/end_ are in the local buffer,
+ // otherwise, they're in the overflow buffer.
+ ProbedItem* pos_ = buffer_;
+ const void* end_ = buffer_ + kBufferSize;
+ ctrl_t* const control_;
+ size_t marked_elements_starting_position_ = kNoMarkedElementsSentinel;
+ bool local_buffer_full_ = false;
+};
+
+// Grows to next capacity with specified encoder type.
+// Encoder is used to store probed elements that are processed later.
+// Different encoder is used depending on the capacity of the table.
+// Returns total probe length.
+template <typename Encoder>
+size_t GrowToNextCapacity(CommonFields& common,
+ const PolicyFunctions& __restrict policy,
+ ctrl_t* old_ctrl, void* old_slots) {
+ using ProbedItem = typename Encoder::ProbedItem;
+ ABSL_SWISSTABLE_ASSERT(common.capacity() <= ProbedItem::kMaxNewCapacity);
+ Encoder encoder(old_ctrl);
+ policy.transfer_unprobed_elements_to_next_capacity(
+ common, old_ctrl, old_slots, &encoder,
+ [](void* probed_storage, h2_t h2, size_t source_offset, size_t h1) {
+ auto encoder_ptr = static_cast<Encoder*>(probed_storage);
+ encoder_ptr->EncodeItem(ProbedItem(h2, source_offset, h1));
+ });
+ InitializeMirroredControlBytes(common.control(), common.capacity());
+ return encoder.DecodeAndInsertToTable(common, policy, old_slots);
}
-void HashSetResizeHelper::GrowSizeIntoSingleGroupTransferable(
- CommonFields& c, size_t slot_size) {
- assert(old_capacity_ < Group::kWidth / 2);
- assert(is_single_group(c.capacity()));
- assert(IsGrowingIntoSingleGroupApplicable(old_capacity_, c.capacity()));
+// Grows to next capacity for relatively small tables so that even if all
+// elements are probed, we don't need to overflow the local buffer.
+// Returns total probe length.
+size_t GrowToNextCapacityThatFitsInLocalBuffer(
+ CommonFields& common, const PolicyFunctions& __restrict policy,
+ ctrl_t* old_ctrl, void* old_slots) {
+ ABSL_SWISSTABLE_ASSERT(common.capacity() <= kMaxLocalBufferNewCapacity);
+ return GrowToNextCapacity<
+ ProbedItemEncoder<ProbedItem4Bytes, /*kGuaranteedFitToBuffer=*/true>>(
+ common, policy, old_ctrl, old_slots);
+}
- GrowIntoSingleGroupShuffleControlBytes(c.control(), c.capacity());
- GrowIntoSingleGroupShuffleTransferableSlots(c.slot_array(), slot_size);
+// Grows to next capacity with different encodings. Returns total probe length.
+// These functions are useful to simplify profile analysis.
+size_t GrowToNextCapacity4BytesEncoder(CommonFields& common,
+ const PolicyFunctions& __restrict policy,
+ ctrl_t* old_ctrl, void* old_slots) {
+ return GrowToNextCapacity<ProbedItemEncoder<ProbedItem4Bytes>>(
+ common, policy, old_ctrl, old_slots);
+}
+size_t GrowToNextCapacity8BytesEncoder(CommonFields& common,
+ const PolicyFunctions& __restrict policy,
+ ctrl_t* old_ctrl, void* old_slots) {
+ return GrowToNextCapacity<ProbedItemEncoder<ProbedItem8Bytes>>(
+ common, policy, old_ctrl, old_slots);
+}
+size_t GrowToNextCapacity16BytesEncoder(
+ CommonFields& common, const PolicyFunctions& __restrict policy,
+ ctrl_t* old_ctrl, void* old_slots) {
+ return GrowToNextCapacity<ProbedItemEncoder<ProbedItem16Bytes>>(
+ common, policy, old_ctrl, old_slots);
+}
- // We poison since GrowIntoSingleGroupShuffleTransferableSlots
- // may leave empty slots unpoisoned.
- PoisonSingleGroupEmptySlots(c, slot_size);
+// Grows to next capacity for tables with relatively large capacity so that we
+// can't guarantee that all probed elements fit in the local buffer. Returns
+// total probe length.
+size_t GrowToNextCapacityOverflowLocalBuffer(
+ CommonFields& common, const PolicyFunctions& __restrict policy,
+ ctrl_t* old_ctrl, void* old_slots) {
+ const size_t new_capacity = common.capacity();
+ if (ABSL_PREDICT_TRUE(new_capacity <= ProbedItem4Bytes::kMaxNewCapacity)) {
+ return GrowToNextCapacity4BytesEncoder(common, policy, old_ctrl, old_slots);
+ }
+ if (ABSL_PREDICT_TRUE(new_capacity <= ProbedItem8Bytes::kMaxNewCapacity)) {
+ return GrowToNextCapacity8BytesEncoder(common, policy, old_ctrl, old_slots);
+ }
+ // 16 bytes encoding supports the maximum swisstable capacity.
+ return GrowToNextCapacity16BytesEncoder(common, policy, old_ctrl, old_slots);
}
-void HashSetResizeHelper::TransferSlotAfterSoo(CommonFields& c,
- size_t slot_size) {
- assert(was_soo_);
- assert(had_soo_slot_);
- assert(is_single_group(c.capacity()));
- std::memcpy(SlotAddress(c.slot_array(), SooSlotIndex(), slot_size),
- old_soo_data(), slot_size);
- PoisonSingleGroupEmptySlots(c, slot_size);
+// Dispatches to the appropriate `GrowToNextCapacity*` function based on the
+// capacity of the table. Returns total probe length.
+ABSL_ATTRIBUTE_NOINLINE
+size_t GrowToNextCapacityDispatch(CommonFields& common,
+ const PolicyFunctions& __restrict policy,
+ ctrl_t* old_ctrl, void* old_slots) {
+ const size_t new_capacity = common.capacity();
+ if (ABSL_PREDICT_TRUE(new_capacity <= kMaxLocalBufferNewCapacity)) {
+ return GrowToNextCapacityThatFitsInLocalBuffer(common, policy, old_ctrl,
+ old_slots);
+ } else {
+ return GrowToNextCapacityOverflowLocalBuffer(common, policy, old_ctrl,
+ old_slots);
+ }
}
-namespace {
+// Grows to next capacity and prepares insert for the given new_hash.
+// Returns the offset of the new element.
+size_t GrowToNextCapacityAndPrepareInsert(
+ CommonFields& common, const PolicyFunctions& __restrict policy,
+ size_t new_hash) {
+ ABSL_SWISSTABLE_ASSERT(common.growth_left() == 0);
+ const size_t old_capacity = common.capacity();
+ ABSL_SWISSTABLE_ASSERT(old_capacity == 0 ||
+ old_capacity > policy.soo_capacity());
+
+ const size_t new_capacity = NextCapacity(old_capacity);
+ ABSL_SWISSTABLE_ASSERT(IsValidCapacity(new_capacity));
+ ABSL_SWISSTABLE_ASSERT(new_capacity > policy.soo_capacity());
+
+ ctrl_t* old_ctrl = common.control();
+ void* old_slots = common.slot_array();
+
+ common.set_capacity(new_capacity);
+ const size_t slot_size = policy.slot_size;
+ const size_t slot_align = policy.slot_align;
+ HashtablezInfoHandle infoz;
+ if (old_capacity > 0) {
+ infoz = common.infoz();
+ } else {
+ const bool should_sample =
+ policy.is_hashtablez_eligible && ShouldSampleNextTable();
+ if (ABSL_PREDICT_FALSE(should_sample)) {
+ infoz = ForcedTrySample(slot_size, policy.key_size, policy.value_size,
+ policy.soo_capacity());
+ }
+ }
+ const bool has_infoz = infoz.IsSampled();
+
+ RawHashSetLayout layout(new_capacity, slot_size, slot_align, has_infoz);
+ void* alloc = policy.get_char_alloc(common);
+ char* mem = static_cast<char*>(policy.alloc(alloc, layout.alloc_size()));
+ const GenerationType old_generation = common.generation();
+ common.set_generation_ptr(
+ reinterpret_cast<GenerationType*>(mem + layout.generation_offset()));
+ common.set_generation(NextGeneration(old_generation));
+
+ ctrl_t* new_ctrl = reinterpret_cast<ctrl_t*>(mem + layout.control_offset());
+ void* new_slots = mem + layout.slot_offset();
+ common.set_control</*kGenerateSeed=*/false>(new_ctrl);
+ common.set_slots(new_slots);
+ SanitizerPoisonMemoryRegion(new_slots, new_capacity * slot_size);
+
+ h2_t new_h2 = H2(new_hash);
+ size_t total_probe_length = 0;
+ FindInfo find_info;
+ if (old_capacity == 0) {
+ static_assert(NextCapacity(0) == 1);
+ InitializeSingleElementControlBytes(new_h2, new_ctrl);
+ common.generate_new_seed();
+ find_info = FindInfo{0, 0};
+ SanitizerUnpoisonMemoryRegion(new_slots, slot_size);
+ } else {
+ if (ABSL_PREDICT_TRUE(is_single_group(new_capacity))) {
+ GrowIntoSingleGroupShuffleControlBytes(old_ctrl, old_capacity, new_ctrl,
+ new_capacity);
+ // Single group tables have all slots full on resize. So we can transfer
+ // all slots without checking the control bytes.
+ ABSL_SWISSTABLE_ASSERT(common.size() == old_capacity);
+ auto* target = NextSlot(new_slots, slot_size);
+ SanitizerUnpoisonMemoryRegion(target, old_capacity * slot_size);
+ policy.transfer_n(&common, target, old_slots, old_capacity);
+ // We put the new element either at the beginning or at the end of the
+ // table with approximately equal probability.
+ size_t offset = SingleGroupTableH1(new_hash, common.seed()) & 1
+ ? 0
+ : new_capacity - 1;
+
+ ABSL_SWISSTABLE_ASSERT(IsEmpty(new_ctrl[offset]));
+ SetCtrlInSingleGroupTable(common, offset, new_h2, policy.slot_size);
+ find_info = FindInfo{offset, 0};
+ } else {
+ total_probe_length =
+ GrowToNextCapacityDispatch(common, policy, old_ctrl, old_slots);
+ find_info = find_first_non_full(common, new_hash);
+ SetCtrlInLargeTable(common, find_info.offset, new_h2, policy.slot_size);
+ }
+ ABSL_SWISSTABLE_ASSERT(old_capacity > policy.soo_capacity());
+ (*policy.dealloc)(alloc, old_capacity, old_ctrl, slot_size, slot_align,
+ has_infoz);
+ }
+ PrepareInsertCommon(common);
+ ResetGrowthLeft(GetGrowthInfoFromControl(new_ctrl), new_capacity,
+ common.size());
+
+ if (ABSL_PREDICT_FALSE(has_infoz)) {
+ common.set_has_infoz();
+ infoz.RecordStorageChanged(common.size() - 1, new_capacity);
+ infoz.RecordRehash(total_probe_length);
+ infoz.RecordInsert(new_hash, find_info.probe_length);
+ common.set_infoz(infoz);
+ }
+ return find_info.offset;
+}
// Called whenever the table needs to vacate empty slots either by removing
-// tombstones via rehash or growth.
+// tombstones via rehash or growth to next capacity.
ABSL_ATTRIBUTE_NOINLINE
-FindInfo FindInsertPositionWithGrowthOrRehash(CommonFields& common, size_t hash,
- const PolicyFunctions& policy) {
+size_t RehashOrGrowToNextCapacityAndPrepareInsert(
+ CommonFields& common, const PolicyFunctions& __restrict policy,
+ size_t new_hash) {
const size_t cap = common.capacity();
+ ABSL_ASSUME(cap > 0);
if (cap > Group::kWidth &&
// Do these calculations in 64-bit to avoid overflow.
common.size() * uint64_t{32} <= cap * uint64_t{25}) {
@@ -574,97 +1375,467 @@ FindInfo FindInsertPositionWithGrowthOrRehash(CommonFields& common, size_t hash,
// 762 | 149836 0.37 13 | 148559 0.74 190
// 807 | 149736 0.39 14 | 151107 0.39 14
// 852 | 150204 0.42 15 | 151019 0.42 15
- DropDeletesWithoutResize(common, policy);
+ return DropDeletesWithoutResizeAndPrepareInsert(common, policy, new_hash);
} else {
// Otherwise grow the container.
- policy.resize(common, NextCapacity(cap), HashtablezInfoHandle{});
+ return GrowToNextCapacityAndPrepareInsert(common, policy, new_hash);
+ }
+}
+
+// Slow path for PrepareInsertNonSoo that is called when the table has deleted
+// slots or need to be resized or rehashed.
+size_t PrepareInsertNonSooSlow(CommonFields& common,
+ const PolicyFunctions& __restrict policy,
+ size_t hash) {
+ const GrowthInfo growth_info = common.growth_info();
+ ABSL_SWISSTABLE_ASSERT(!growth_info.HasNoDeletedAndGrowthLeft());
+ if (ABSL_PREDICT_TRUE(growth_info.HasNoGrowthLeftAndNoDeleted())) {
+ // Table without deleted slots (>95% cases) that needs to be resized.
+ ABSL_SWISSTABLE_ASSERT(growth_info.HasNoDeleted() &&
+ growth_info.GetGrowthLeft() == 0);
+ return GrowToNextCapacityAndPrepareInsert(common, policy, hash);
+ }
+ if (ABSL_PREDICT_FALSE(growth_info.HasNoGrowthLeftAssumingMayHaveDeleted())) {
+ // Table with deleted slots that needs to be rehashed or resized.
+ return RehashOrGrowToNextCapacityAndPrepareInsert(common, policy, hash);
+ }
+ // Table with deleted slots that has space for the inserting element.
+ FindInfo target = find_first_non_full(common, hash);
+ PrepareInsertCommon(common);
+ common.growth_info().OverwriteControlAsFull(common.control()[target.offset]);
+ SetCtrlInLargeTable(common, target.offset, H2(hash), policy.slot_size);
+ common.infoz().RecordInsert(hash, target.probe_length);
+ return target.offset;
+}
+
+// Resizes empty non-allocated SOO table to NextCapacity(SooCapacity()),
+// forces the table to be sampled and prepares the insert.
+// SOO tables need to switch from SOO to heap in order to store the infoz.
+// Requires:
+// 1. `c.capacity() == SooCapacity()`.
+// 2. `c.empty()`.
+ABSL_ATTRIBUTE_NOINLINE size_t
+GrowEmptySooTableToNextCapacityForceSamplingAndPrepareInsert(
+ CommonFields& common, const PolicyFunctions& __restrict policy,
+ size_t new_hash) {
+ ResizeEmptyNonAllocatedTableImpl(common, policy, NextCapacity(SooCapacity()),
+ /*force_infoz=*/true);
+ PrepareInsertCommon(common);
+ common.growth_info().OverwriteEmptyAsFull();
+ SetCtrlInSingleGroupTable(common, SooSlotIndex(), H2(new_hash),
+ policy.slot_size);
+ common.infoz().RecordInsert(new_hash, /*distance_from_desired=*/0);
+ return SooSlotIndex();
+}
+
+// Resizes empty non-allocated table to the capacity to fit new_size elements.
+// Requires:
+// 1. `c.capacity() == policy.soo_capacity()`.
+// 2. `c.empty()`.
+// 3. `new_size > policy.soo_capacity()`.
+// The table will be attempted to be sampled.
+void ReserveEmptyNonAllocatedTableToFitNewSize(
+ CommonFields& common, const PolicyFunctions& __restrict policy,
+ size_t new_size) {
+ ValidateMaxSize(new_size, policy.slot_size);
+ ABSL_ASSUME(new_size > 0);
+ ResizeEmptyNonAllocatedTableImpl(common, policy, SizeToCapacity(new_size),
+ /*force_infoz=*/false);
+ // This is after resize, to ensure that we have completed the allocation
+ // and have potentially sampled the hashtable.
+ common.infoz().RecordReservation(new_size);
+}
+
+// Type erased version of raw_hash_set::reserve for tables that have an
+// allocated backing array.
+//
+// Requires:
+// 1. `c.capacity() > policy.soo_capacity()` OR `!c.empty()`.
+// Reserving already allocated tables is considered to be a rare case.
+ABSL_ATTRIBUTE_NOINLINE void ReserveAllocatedTable(
+ CommonFields& common, const PolicyFunctions& __restrict policy,
+ size_t new_size) {
+ const size_t cap = common.capacity();
+ ValidateMaxSize(new_size, policy.slot_size);
+ ABSL_ASSUME(new_size > 0);
+ const size_t new_capacity = SizeToCapacity(new_size);
+ if (cap == policy.soo_capacity()) {
+ ABSL_SWISSTABLE_ASSERT(!common.empty());
+ ResizeFullSooTable(common, policy, new_capacity,
+ ResizeFullSooTableSamplingMode::kNoSampling);
+ } else {
+ ABSL_SWISSTABLE_ASSERT(cap > policy.soo_capacity());
+ // TODO(b/382423690): consider using GrowToNextCapacity, when applicable.
+ ResizeAllocatedTableWithSeedChange(common, policy, new_capacity);
}
- // This function is typically called with tables containing deleted slots.
- // The table will be big and `FindFirstNonFullAfterResize` will always
- // fallback to `find_first_non_full`. So using `find_first_non_full` directly.
- return find_first_non_full(common, hash);
+ common.infoz().RecordReservation(new_size);
}
} // namespace
-const void* GetHashRefForEmptyHasher(const CommonFields& common) {
+void* GetRefForEmptyClass(CommonFields& common) {
// Empty base optimization typically make the empty base class address to be
// the same as the first address of the derived class object.
- // But we generally assume that for empty hasher we can return any valid
+ // But we generally assume that for empty classes we can return any valid
// pointer.
return &common;
}
-FindInfo HashSetResizeHelper::FindFirstNonFullAfterResize(const CommonFields& c,
- size_t old_capacity,
- size_t hash) {
- size_t new_capacity = c.capacity();
- if (!IsGrowingIntoSingleGroupApplicable(old_capacity, new_capacity)) {
- return find_first_non_full(c, hash);
+void ResizeAllocatedTableWithSeedChange(
+ CommonFields& common, const PolicyFunctions& __restrict policy,
+ size_t new_capacity) {
+ ResizeNonSooImpl<ResizeNonSooMode::kGuaranteedAllocated>(
+ common, policy, new_capacity, common.infoz());
+}
+
+void ReserveEmptyNonAllocatedTableToFitBucketCount(
+ CommonFields& common, const PolicyFunctions& __restrict policy,
+ size_t bucket_count) {
+ size_t new_capacity = NormalizeCapacity(bucket_count);
+ ValidateMaxSize(CapacityToGrowth(new_capacity), policy.slot_size);
+ ResizeEmptyNonAllocatedTableImpl(common, policy, new_capacity,
+ /*force_infoz=*/false);
+}
+
+// Resizes a full SOO table to the NextCapacity(SooCapacity()).
+template <size_t SooSlotMemcpySize, bool TransferUsesMemcpy>
+size_t GrowSooTableToNextCapacityAndPrepareInsert(
+ CommonFields& common, const PolicyFunctions& __restrict policy,
+ size_t new_hash, ctrl_t soo_slot_ctrl) {
+ AssertSoo(common, policy);
+ if (ABSL_PREDICT_FALSE(soo_slot_ctrl == ctrl_t::kEmpty)) {
+ // The table is empty, it is only used for forced sampling of SOO tables.
+ return GrowEmptySooTableToNextCapacityForceSamplingAndPrepareInsert(
+ common, policy, new_hash);
}
+ ABSL_SWISSTABLE_ASSERT(common.size() == policy.soo_capacity());
+ static constexpr size_t kNewCapacity = NextCapacity(SooCapacity());
+ const size_t slot_size = policy.slot_size;
+ const size_t slot_align = policy.slot_align;
+ common.set_capacity(kNewCapacity);
+
+ // Since the table is not empty, it will not be sampled.
+ // The decision to sample was already made during the first insertion.
+ RawHashSetLayout layout(kNewCapacity, slot_size, slot_align,
+ /*has_infoz=*/false);
+ void* alloc = policy.get_char_alloc(common);
+ char* mem = static_cast<char*>(policy.alloc(alloc, layout.alloc_size()));
+ const GenerationType old_generation = common.generation();
+ common.set_generation_ptr(
+ reinterpret_cast<GenerationType*>(mem + layout.generation_offset()));
+ common.set_generation(NextGeneration(old_generation));
+
+ // We do not set control and slots in CommonFields yet to avoid overriding
+ // SOO data.
+ ctrl_t* new_ctrl = reinterpret_cast<ctrl_t*>(mem + layout.control_offset());
+ void* new_slots = mem + layout.slot_offset();
- // We put the new element either at the beginning or at the end of the table
- // with approximately equal probability.
- size_t offset =
- SingleGroupTableH1(hash, c.control()) & 1 ? 0 : new_capacity - 1;
+ PrepareInsertCommon(common);
+ ABSL_SWISSTABLE_ASSERT(common.size() == 2);
+ GetGrowthInfoFromControl(new_ctrl).InitGrowthLeftNoDeleted(kNewCapacity - 2);
+ common.generate_new_seed();
- assert(IsEmpty(c.control()[offset]));
- return FindInfo{offset, 0};
+ // After resize from capacity 1 to 3, we always have exactly the slot with
+ // index 1 occupied, so we need to insert either at index 0 or index 2.
+ static_assert(SooSlotIndex() == 1);
+ const size_t offset = SingleGroupTableH1(new_hash, common.seed()) & 2;
+ InitializeThreeElementsControlBytesAfterSoo(soo_slot_ctrl, new_hash, offset,
+ new_ctrl);
+
+ SanitizerPoisonMemoryRegion(new_slots, slot_size * kNewCapacity);
+ void* target_slot = SlotAddress(new_slots, SooSlotIndex(), slot_size);
+ SanitizerUnpoisonMemoryRegion(target_slot, slot_size);
+ if constexpr (TransferUsesMemcpy) {
+ // Target slot is placed at index 1, but capacity is at
+ // minimum 3. So we are allowed to copy at least twice as much
+ // memory.
+ static_assert(SooSlotIndex() == 1);
+ static_assert(SooSlotMemcpySize > 0);
+ static_assert(SooSlotMemcpySize <= MaxSooSlotSize());
+ ABSL_SWISSTABLE_ASSERT(SooSlotMemcpySize <= 2 * slot_size);
+ ABSL_SWISSTABLE_ASSERT(SooSlotMemcpySize >= slot_size);
+ void* next_slot = SlotAddress(target_slot, 1, slot_size);
+ SanitizerUnpoisonMemoryRegion(next_slot, SooSlotMemcpySize - slot_size);
+ std::memcpy(target_slot, common.soo_data(), SooSlotMemcpySize);
+ SanitizerPoisonMemoryRegion(next_slot, SooSlotMemcpySize - slot_size);
+ } else {
+ static_assert(SooSlotMemcpySize == 0);
+ policy.transfer_n(&common, target_slot, common.soo_data(), 1);
+ }
+ // Seed was already generated above.
+ common.set_control</*kGenerateSeed=*/false>(new_ctrl);
+ common.set_slots(new_slots);
+
+ common.infoz().RecordInsert(new_hash, /*distance_from_desired=*/0);
+ SanitizerUnpoisonMemoryRegion(SlotAddress(new_slots, offset, slot_size),
+ slot_size);
+ return offset;
}
-size_t PrepareInsertNonSoo(CommonFields& common, size_t hash, FindInfo target,
- const PolicyFunctions& policy) {
- // When there are no deleted slots in the table
- // and growth_left is positive, we can insert at the first
- // empty slot in the probe sequence (target).
- const bool use_target_hint =
- // Optimization is disabled when generations are enabled.
- // We have to rehash even sparse tables randomly in such mode.
- !SwisstableGenerationsEnabled() &&
- common.growth_info().HasNoDeletedAndGrowthLeft();
- if (ABSL_PREDICT_FALSE(!use_target_hint)) {
- // Notes about optimized mode when generations are disabled:
- // We do not enter this branch if table has no deleted slots
- // and growth_left is positive.
- // We enter this branch in the following cases listed in decreasing
- // frequency:
- // 1. Table without deleted slots (>95% cases) that needs to be resized.
- // 2. Table with deleted slots that has space for the inserting element.
- // 3. Table with deleted slots that needs to be rehashed or resized.
- if (ABSL_PREDICT_TRUE(common.growth_info().HasNoGrowthLeftAndNoDeleted())) {
- const size_t old_capacity = common.capacity();
- policy.resize(common, NextCapacity(old_capacity), HashtablezInfoHandle{});
- target = HashSetResizeHelper::FindFirstNonFullAfterResize(
- common, old_capacity, hash);
- } else {
- // Note: the table may have no deleted slots here when generations
- // are enabled.
- const bool rehash_for_bug_detection =
- common.should_rehash_for_bug_detection_on_insert();
- if (rehash_for_bug_detection) {
- // Move to a different heap allocation in order to detect bugs.
- const size_t cap = common.capacity();
- policy.resize(common,
- common.growth_left() > 0 ? cap : NextCapacity(cap),
- HashtablezInfoHandle{});
+void GrowFullSooTableToNextCapacityForceSampling(
+ CommonFields& common, const PolicyFunctions& __restrict policy) {
+ AssertFullSoo(common, policy);
+ ResizeFullSooTable(
+ common, policy, NextCapacity(SooCapacity()),
+ ResizeFullSooTableSamplingMode::kForceSampleNoResizeIfUnsampled);
+}
+
+void Rehash(CommonFields& common, const PolicyFunctions& __restrict policy,
+ size_t n) {
+ const size_t cap = common.capacity();
+
+ auto clear_backing_array = [&]() {
+ ClearBackingArray(common, policy, policy.get_char_alloc(common),
+ /*reuse=*/false, policy.soo_enabled);
+ };
+
+ const size_t slot_size = policy.slot_size;
+
+ if (n == 0) {
+ if (cap <= policy.soo_capacity()) return;
+ if (common.empty()) {
+ clear_backing_array();
+ return;
+ }
+ if (common.size() <= policy.soo_capacity()) {
+ // When the table is already sampled, we keep it sampled.
+ if (common.infoz().IsSampled()) {
+ static constexpr size_t kInitialSampledCapacity =
+ NextCapacity(SooCapacity());
+ if (cap > kInitialSampledCapacity) {
+ ResizeAllocatedTableWithSeedChange(common, policy,
+ kInitialSampledCapacity);
+ }
+ // This asserts that we didn't lose sampling coverage in `resize`.
+ ABSL_SWISSTABLE_ASSERT(common.infoz().IsSampled());
+ return;
}
- if (ABSL_PREDICT_TRUE(common.growth_left() > 0)) {
- target = find_first_non_full(common, hash);
+ ABSL_SWISSTABLE_ASSERT(slot_size <= sizeof(HeapOrSoo));
+ ABSL_SWISSTABLE_ASSERT(policy.slot_align <= alignof(HeapOrSoo));
+ HeapOrSoo tmp_slot(uninitialized_tag_t{});
+ size_t begin_offset = FindFirstFullSlot(0, cap, common.control());
+ policy.transfer_n(
+ &common, &tmp_slot,
+ SlotAddress(common.slot_array(), begin_offset, slot_size), 1);
+ clear_backing_array();
+ policy.transfer_n(&common, common.soo_data(), &tmp_slot, 1);
+ common.set_full_soo();
+ return;
+ }
+ }
+
+ ValidateMaxSize(n, policy.slot_size);
+ // bitor is a faster way of doing `max` here. We will round up to the next
+ // power-of-2-minus-1, so bitor is good enough.
+ const size_t new_capacity =
+ NormalizeCapacity(n | SizeToCapacity(common.size()));
+ // n == 0 unconditionally rehashes as per the standard.
+ if (n == 0 || new_capacity > cap) {
+ if (cap == policy.soo_capacity()) {
+ if (common.empty()) {
+ ResizeEmptyNonAllocatedTableImpl(common, policy, new_capacity,
+ /*force_infoz=*/false);
} else {
- target = FindInsertPositionWithGrowthOrRehash(common, hash, policy);
+ ResizeFullSooTable(common, policy, new_capacity,
+ ResizeFullSooTableSamplingMode::kNoSampling);
}
+ } else {
+ ResizeAllocatedTableWithSeedChange(common, policy, new_capacity);
+ }
+ // This is after resize, to ensure that we have completed the allocation
+ // and have potentially sampled the hashtable.
+ common.infoz().RecordReservation(n);
+ }
+}
+
+void Copy(CommonFields& common, const PolicyFunctions& __restrict policy,
+ const CommonFields& other,
+ absl::FunctionRef<void(void*, const void*)> copy_fn) {
+ const size_t size = other.size();
+ ABSL_SWISSTABLE_ASSERT(size > 0);
+ const size_t soo_capacity = policy.soo_capacity();
+ const size_t slot_size = policy.slot_size;
+ if (size <= soo_capacity) {
+ ABSL_SWISSTABLE_ASSERT(size == 1);
+ common.set_full_soo();
+ const void* other_slot =
+ other.capacity() <= soo_capacity
+ ? other.soo_data()
+ : SlotAddress(
+ other.slot_array(),
+ FindFirstFullSlot(0, other.capacity(), other.control()),
+ slot_size);
+ copy_fn(common.soo_data(), other_slot);
+
+ if (policy.is_hashtablez_eligible && ShouldSampleNextTable()) {
+ GrowFullSooTableToNextCapacityForceSampling(common, policy);
}
+ return;
+ }
+
+ ReserveTableToFitNewSize(common, policy, size);
+ auto infoz = common.infoz();
+ ABSL_SWISSTABLE_ASSERT(other.capacity() > soo_capacity);
+ const size_t cap = common.capacity();
+ ABSL_SWISSTABLE_ASSERT(cap > soo_capacity);
+ // Note about single group tables:
+ // 1. It is correct to have any order of elements.
+ // 2. Order has to be non deterministic.
+ // 3. We are assigning elements with arbitrary `shift` starting from
+ // `capacity + shift` position.
+ // 4. `shift` must be coprime with `capacity + 1` in order to be able to use
+ // modular arithmetic to traverse all positions, instead of cycling
+ // through a subset of positions. Odd numbers are coprime with any
+ // `capacity + 1` (2^N).
+ size_t offset = cap;
+ const size_t shift = is_single_group(cap) ? (common.seed().seed() | 1) : 0;
+ const void* hash_fn = policy.hash_fn(common);
+ auto hasher = policy.hash_slot;
+ IterateOverFullSlotsImpl(
+ other, slot_size, [&](const ctrl_t* that_ctrl, void* that_slot) {
+ if (shift == 0) {
+ // Big tables case. Position must be searched via probing.
+ // The table is guaranteed to be empty, so we can do faster than
+ // a full `insert`.
+ const size_t hash = (*hasher)(hash_fn, that_slot);
+ FindInfo target = find_first_non_full(common, hash);
+ infoz.RecordInsert(hash, target.probe_length);
+ offset = target.offset;
+ } else {
+ // Small tables case. Next position is computed via shift.
+ offset = (offset + shift) & cap;
+ }
+ const h2_t h2 = static_cast<h2_t>(*that_ctrl);
+ // We rely on the hash not changing for small tables.
+ ABSL_SWISSTABLE_ASSERT(
+ H2((*hasher)(hash_fn, that_slot)) == h2 &&
+ "hash function value changed unexpectedly during the copy");
+ SetCtrl(common, offset, h2, slot_size);
+ copy_fn(SlotAddress(common.slot_array(), offset, slot_size), that_slot);
+ common.maybe_increment_generation_on_insert();
+ });
+ if (shift != 0) {
+ // On small table copy we do not record individual inserts.
+ // RecordInsert requires hash, but it is unknown for small tables.
+ infoz.RecordStorageChanged(size, cap);
+ }
+ common.increment_size(size);
+ common.growth_info().OverwriteManyEmptyAsFull(size);
+}
+
+void ReserveTableToFitNewSize(CommonFields& common,
+ const PolicyFunctions& __restrict policy,
+ size_t new_size) {
+ common.reset_reserved_growth(new_size);
+ common.set_reservation_size(new_size);
+ ABSL_SWISSTABLE_ASSERT(new_size > policy.soo_capacity());
+ const size_t cap = common.capacity();
+ if (ABSL_PREDICT_TRUE(common.empty() && cap <= policy.soo_capacity())) {
+ return ReserveEmptyNonAllocatedTableToFitNewSize(common, policy, new_size);
+ }
+
+ ABSL_SWISSTABLE_ASSERT(!common.empty() || cap > policy.soo_capacity());
+ ABSL_SWISSTABLE_ASSERT(cap > 0);
+ const size_t max_size_before_growth =
+ cap <= policy.soo_capacity() ? policy.soo_capacity()
+ : common.size() + common.growth_left();
+ if (new_size <= max_size_before_growth) {
+ return;
+ }
+ ReserveAllocatedTable(common, policy, new_size);
+}
+
+size_t PrepareInsertNonSoo(CommonFields& common,
+ const PolicyFunctions& __restrict policy,
+ size_t hash, FindInfo target) {
+ const bool rehash_for_bug_detection =
+ common.should_rehash_for_bug_detection_on_insert() &&
+ // Required to allow use of ResizeAllocatedTable.
+ common.capacity() > 0;
+ if (rehash_for_bug_detection) {
+ // Move to a different heap allocation in order to detect bugs.
+ const size_t cap = common.capacity();
+ ResizeAllocatedTableWithSeedChange(
+ common, policy, common.growth_left() > 0 ? cap : NextCapacity(cap));
+ target = find_first_non_full(common, hash);
+ }
+
+ const GrowthInfo growth_info = common.growth_info();
+ // When there are no deleted slots in the table
+ // and growth_left is positive, we can insert at the first
+ // empty slot in the probe sequence (target).
+ if (ABSL_PREDICT_FALSE(!growth_info.HasNoDeletedAndGrowthLeft())) {
+ return PrepareInsertNonSooSlow(common, policy, hash);
}
PrepareInsertCommon(common);
- common.growth_info().OverwriteControlAsFull(common.control()[target.offset]);
+ common.growth_info().OverwriteEmptyAsFull();
SetCtrl(common, target.offset, H2(hash), policy.slot_size);
common.infoz().RecordInsert(hash, target.probe_length);
return target.offset;
}
-void HashTableSizeOverflow() {
- ABSL_RAW_LOG(FATAL, "Hash table size overflow");
+namespace {
+// Returns true if the following is true
+// 1. OptimalMemcpySizeForSooSlotTransfer(left) >
+// OptimalMemcpySizeForSooSlotTransfer(left - 1)
+// 2. OptimalMemcpySizeForSooSlotTransfer(left) are equal for all i in [left,
+// right].
+// This function is used to verify that we have all the possible template
+// instantiations for GrowFullSooTableToNextCapacity.
+// With this verification the problem may be detected at compile time instead of
+// link time.
+constexpr bool VerifyOptimalMemcpySizeForSooSlotTransferRange(size_t left,
+ size_t right) {
+ size_t optimal_size_for_range = OptimalMemcpySizeForSooSlotTransfer(left);
+ if (optimal_size_for_range <= OptimalMemcpySizeForSooSlotTransfer(left - 1)) {
+ return false;
+ }
+ for (size_t i = left + 1; i <= right; ++i) {
+ if (OptimalMemcpySizeForSooSlotTransfer(i) != optimal_size_for_range) {
+ return false;
+ }
+ }
+ return true;
}
+} // namespace
+
+// Extern template instantiation for inline function.
+template size_t TryFindNewIndexWithoutProbing(size_t h1, size_t old_index,
+ size_t old_capacity,
+ ctrl_t* new_ctrl,
+ size_t new_capacity);
+
+// We need to instantiate ALL possible template combinations because we define
+// the function in the cc file.
+template size_t GrowSooTableToNextCapacityAndPrepareInsert<0, false>(
+ CommonFields&, const PolicyFunctions&, size_t, ctrl_t);
+template size_t GrowSooTableToNextCapacityAndPrepareInsert<
+ OptimalMemcpySizeForSooSlotTransfer(1), true>(CommonFields&,
+ const PolicyFunctions&,
+ size_t, ctrl_t);
+
+static_assert(VerifyOptimalMemcpySizeForSooSlotTransferRange(2, 3));
+template size_t GrowSooTableToNextCapacityAndPrepareInsert<
+ OptimalMemcpySizeForSooSlotTransfer(3), true>(CommonFields&,
+ const PolicyFunctions&,
+ size_t, ctrl_t);
+
+static_assert(VerifyOptimalMemcpySizeForSooSlotTransferRange(4, 8));
+template size_t GrowSooTableToNextCapacityAndPrepareInsert<
+ OptimalMemcpySizeForSooSlotTransfer(8), true>(CommonFields&,
+ const PolicyFunctions&,
+ size_t, ctrl_t);
+
+#if UINTPTR_MAX == UINT32_MAX
+static_assert(MaxSooSlotSize() == 8);
+#else
+static_assert(VerifyOptimalMemcpySizeForSooSlotTransferRange(9, 16));
+template size_t GrowSooTableToNextCapacityAndPrepareInsert<
+ OptimalMemcpySizeForSooSlotTransfer(16), true>(CommonFields&,
+ const PolicyFunctions&,
+ size_t, ctrl_t);
+static_assert(MaxSooSlotSize() == 16);
+#endif
} // namespace container_internal
ABSL_NAMESPACE_END
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set.h b/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set.h
index 79ccb596b74..3effc441ae1 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set.h
@@ -196,6 +196,7 @@
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/base/internal/endian.h"
+#include "absl/base/internal/iterator_traits.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/base/macros.h"
#include "absl/base/optimization.h"
@@ -208,30 +209,17 @@
#include "absl/container/internal/container_memory.h"
#include "absl/container/internal/hash_function_defaults.h"
#include "absl/container/internal/hash_policy_traits.h"
+#include "absl/container/internal/hashtable_control_bytes.h"
#include "absl/container/internal/hashtable_debug_hooks.h"
#include "absl/container/internal/hashtablez_sampler.h"
+#include "absl/functional/function_ref.h"
#include "absl/hash/hash.h"
+#include "absl/hash/internal/weakly_mixed_integer.h"
#include "absl/memory/memory.h"
#include "absl/meta/type_traits.h"
#include "absl/numeric/bits.h"
#include "absl/utility/utility.h"
-#ifdef ABSL_INTERNAL_HAVE_SSE2
-#include <emmintrin.h>
-#endif
-
-#ifdef ABSL_INTERNAL_HAVE_SSSE3
-#include <tmmintrin.h>
-#endif
-
-#ifdef _MSC_VER
-#include <intrin.h>
-#endif
-
-#ifdef ABSL_INTERNAL_HAVE_ARM_NEON
-#include <arm_neon.h>
-#endif
-
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {
@@ -278,6 +266,15 @@ constexpr bool SwisstableGenerationsEnabled() { return false; }
constexpr size_t NumGenerationBytes() { return 0; }
#endif
+// Returns true if we should assert that the table is not accessed after it has
+// been destroyed or during the destruction of the table.
+constexpr bool SwisstableAssertAccessToDestroyedTable() {
+#ifndef NDEBUG
+ return true;
+#endif
+ return SwisstableGenerationsEnabled();
+}
+
template <typename AllocType>
void SwapAlloc(AllocType& lhs, AllocType& rhs,
std::true_type /* propagate_on_container_swap */) {
@@ -383,163 +380,6 @@ constexpr bool IsNoThrowSwappable(std::false_type /* is_swappable */) {
return false;
}
-template <typename T>
-uint32_t TrailingZeros(T x) {
- ABSL_ASSUME(x != 0);
- return static_cast<uint32_t>(countr_zero(x));
-}
-
-// 8 bytes bitmask with most significant bit set for every byte.
-constexpr uint64_t kMsbs8Bytes = 0x8080808080808080ULL;
-
-// An abstract bitmask, such as that emitted by a SIMD instruction.
-//
-// Specifically, this type implements a simple bitset whose representation is
-// controlled by `SignificantBits` and `Shift`. `SignificantBits` is the number
-// of abstract bits in the bitset, while `Shift` is the log-base-two of the
-// width of an abstract bit in the representation.
-// This mask provides operations for any number of real bits set in an abstract
-// bit. To add iteration on top of that, implementation must guarantee no more
-// than the most significant real bit is set in a set abstract bit.
-template <class T, int SignificantBits, int Shift = 0>
-class NonIterableBitMask {
- public:
- explicit NonIterableBitMask(T mask) : mask_(mask) {}
-
- explicit operator bool() const { return this->mask_ != 0; }
-
- // Returns the index of the lowest *abstract* bit set in `self`.
- uint32_t LowestBitSet() const {
- return container_internal::TrailingZeros(mask_) >> Shift;
- }
-
- // Returns the index of the highest *abstract* bit set in `self`.
- uint32_t HighestBitSet() const {
- return static_cast<uint32_t>((bit_width(mask_) - 1) >> Shift);
- }
-
- // Returns the number of trailing zero *abstract* bits.
- uint32_t TrailingZeros() const {
- return container_internal::TrailingZeros(mask_) >> Shift;
- }
-
- // Returns the number of leading zero *abstract* bits.
- uint32_t LeadingZeros() const {
- constexpr int total_significant_bits = SignificantBits << Shift;
- constexpr int extra_bits = sizeof(T) * 8 - total_significant_bits;
- return static_cast<uint32_t>(
- countl_zero(static_cast<T>(mask_ << extra_bits))) >>
- Shift;
- }
-
- T mask_;
-};
-
-// Mask that can be iterable
-//
-// For example, when `SignificantBits` is 16 and `Shift` is zero, this is just
-// an ordinary 16-bit bitset occupying the low 16 bits of `mask`. When
-// `SignificantBits` is 8 and `Shift` is 3, abstract bits are represented as
-// the bytes `0x00` and `0x80`, and it occupies all 64 bits of the bitmask.
-// If NullifyBitsOnIteration is true (only allowed for Shift == 3),
-// non zero abstract bit is allowed to have additional bits
-// (e.g., `0xff`, `0x83` and `0x9c` are ok, but `0x6f` is not).
-//
-// For example:
-// for (int i : BitMask<uint32_t, 16>(0b101)) -> yields 0, 2
-// for (int i : BitMask<uint64_t, 8, 3>(0x0000000080800000)) -> yields 2, 3
-template <class T, int SignificantBits, int Shift = 0,
- bool NullifyBitsOnIteration = false>
-class BitMask : public NonIterableBitMask<T, SignificantBits, Shift> {
- using Base = NonIterableBitMask<T, SignificantBits, Shift>;
- static_assert(std::is_unsigned<T>::value, "");
- static_assert(Shift == 0 || Shift == 3, "");
- static_assert(!NullifyBitsOnIteration || Shift == 3, "");
-
- public:
- explicit BitMask(T mask) : Base(mask) {
- if (Shift == 3 && !NullifyBitsOnIteration) {
- ABSL_SWISSTABLE_ASSERT(this->mask_ == (this->mask_ & kMsbs8Bytes));
- }
- }
- // BitMask is an iterator over the indices of its abstract bits.
- using value_type = int;
- using iterator = BitMask;
- using const_iterator = BitMask;
-
- BitMask& operator++() {
- if (Shift == 3 && NullifyBitsOnIteration) {
- this->mask_ &= kMsbs8Bytes;
- }
- this->mask_ &= (this->mask_ - 1);
- return *this;
- }
-
- uint32_t operator*() const { return Base::LowestBitSet(); }
-
- BitMask begin() const { return *this; }
- BitMask end() const { return BitMask(0); }
-
- private:
- friend bool operator==(const BitMask& a, const BitMask& b) {
- return a.mask_ == b.mask_;
- }
- friend bool operator!=(const BitMask& a, const BitMask& b) {
- return a.mask_ != b.mask_;
- }
-};
-
-using h2_t = uint8_t;
-
-// The values here are selected for maximum performance. See the static asserts
-// below for details.
-
-// A `ctrl_t` is a single control byte, which can have one of four
-// states: empty, deleted, full (which has an associated seven-bit h2_t value)
-// and the sentinel. They have the following bit patterns:
-//
-// empty: 1 0 0 0 0 0 0 0
-// deleted: 1 1 1 1 1 1 1 0
-// full: 0 h h h h h h h // h represents the hash bits.
-// sentinel: 1 1 1 1 1 1 1 1
-//
-// These values are specifically tuned for SSE-flavored SIMD.
-// The static_asserts below detail the source of these choices.
-//
-// We use an enum class so that when strict aliasing is enabled, the compiler
-// knows ctrl_t doesn't alias other types.
-enum class ctrl_t : int8_t {
- kEmpty = -128, // 0b10000000
- kDeleted = -2, // 0b11111110
- kSentinel = -1, // 0b11111111
-};
-static_assert(
- (static_cast<int8_t>(ctrl_t::kEmpty) &
- static_cast<int8_t>(ctrl_t::kDeleted) &
- static_cast<int8_t>(ctrl_t::kSentinel) & 0x80) != 0,
- "Special markers need to have the MSB to make checking for them efficient");
-static_assert(
- ctrl_t::kEmpty < ctrl_t::kSentinel && ctrl_t::kDeleted < ctrl_t::kSentinel,
- "ctrl_t::kEmpty and ctrl_t::kDeleted must be smaller than "
- "ctrl_t::kSentinel to make the SIMD test of IsEmptyOrDeleted() efficient");
-static_assert(
- ctrl_t::kSentinel == static_cast<ctrl_t>(-1),
- "ctrl_t::kSentinel must be -1 to elide loading it from memory into SIMD "
- "registers (pcmpeqd xmm, xmm)");
-static_assert(ctrl_t::kEmpty == static_cast<ctrl_t>(-128),
- "ctrl_t::kEmpty must be -128 to make the SIMD check for its "
- "existence efficient (psignb xmm, xmm)");
-static_assert(
- (~static_cast<int8_t>(ctrl_t::kEmpty) &
- ~static_cast<int8_t>(ctrl_t::kDeleted) &
- static_cast<int8_t>(ctrl_t::kSentinel) & 0x7F) != 0,
- "ctrl_t::kEmpty and ctrl_t::kDeleted must share an unset bit that is not "
- "shared by ctrl_t::kSentinel to make the scalar test for "
- "MaskEmptyOrDeleted() efficient");
-static_assert(ctrl_t::kDeleted == static_cast<ctrl_t>(-2),
- "ctrl_t::kDeleted must be -2 to make the implementation of "
- "ConvertSpecialToEmptyAndFullToDeleted efficient");
-
// See definition comment for why this is size 32.
ABSL_DLL extern const ctrl_t kEmptyGroup[32];
@@ -585,360 +425,117 @@ inline bool IsEmptyGeneration(const GenerationType* generation) {
return *generation == SentinelEmptyGeneration();
}
-// Mixes a randomly generated per-process seed with `hash` and `ctrl` to
-// randomize insertion order within groups.
-bool ShouldInsertBackwardsForDebug(size_t capacity, size_t hash,
- const ctrl_t* ctrl);
-
-ABSL_ATTRIBUTE_ALWAYS_INLINE inline bool ShouldInsertBackwards(
- ABSL_ATTRIBUTE_UNUSED size_t capacity, ABSL_ATTRIBUTE_UNUSED size_t hash,
- ABSL_ATTRIBUTE_UNUSED const ctrl_t* ctrl) {
-#if defined(NDEBUG)
- return false;
-#else
- return ShouldInsertBackwardsForDebug(capacity, hash, ctrl);
-#endif
-}
+// We only allow a maximum of 1 SOO element, which makes the implementation
+// much simpler. Complications with multiple SOO elements include:
+// - Satisfying the guarantee that erasing one element doesn't invalidate
+// iterators to other elements means we would probably need actual SOO
+// control bytes.
+// - In order to prevent user code from depending on iteration order for small
+// tables, we would need to randomize the iteration order somehow.
+constexpr size_t SooCapacity() { return 1; }
+// Sentinel type to indicate SOO CommonFields construction.
+struct soo_tag_t {};
+// Sentinel type to indicate SOO CommonFields construction with full size.
+struct full_soo_tag_t {};
+// Sentinel type to indicate non-SOO CommonFields construction.
+struct non_soo_tag_t {};
+// Sentinel value to indicate an uninitialized value explicitly.
+struct uninitialized_tag_t {};
+// Sentinel value to indicate creation of an empty table without a seed.
+struct no_seed_empty_tag_t {};
-// Returns insert position for the given mask.
-// We want to add entropy even when ASLR is not enabled.
-// In debug build we will randomly insert in either the front or back of
-// the group.
-// TODO(kfm,sbenza): revisit after we do unconditional mixing
-template <class Mask>
-ABSL_ATTRIBUTE_ALWAYS_INLINE inline auto GetInsertionOffset(
- Mask mask, ABSL_ATTRIBUTE_UNUSED size_t capacity,
- ABSL_ATTRIBUTE_UNUSED size_t hash,
- ABSL_ATTRIBUTE_UNUSED const ctrl_t* ctrl) {
-#if defined(NDEBUG)
- return mask.LowestBitSet();
-#else
- return ShouldInsertBackwardsForDebug(capacity, hash, ctrl)
- ? mask.HighestBitSet()
- : mask.LowestBitSet();
-#endif
-}
+// Per table hash salt. This gets mixed into H1 to randomize iteration order
+// per-table.
+// The seed is needed to ensure non-determinism of iteration order.
+class PerTableSeed {
+ public:
+ // The number of bits in the seed.
+ // It is big enough to ensure non-determinism of iteration order.
+ // We store the seed inside a uint64_t together with size and other metadata.
+ // Using 16 bits allows us to save one `and` instruction in H1 (we use movzwl
+ // instead of movq+and).
+ static constexpr size_t kBitCount = 16;
-// Returns a per-table, hash salt, which changes on resize. This gets mixed into
-// H1 to randomize iteration order per-table.
-//
-// The seed consists of the ctrl_ pointer, which adds enough entropy to ensure
-// non-determinism of iteration order in most cases.
-inline size_t PerTableSalt(const ctrl_t* ctrl) {
- // The low bits of the pointer have little or no entropy because of
- // alignment. We shift the pointer to try to use higher entropy bits. A
- // good number seems to be 12 bits, because that aligns with page size.
- return reinterpret_cast<uintptr_t>(ctrl) >> 12;
-}
-// Extracts the H1 portion of a hash: 57 bits mixed with a per-table salt.
-inline size_t H1(size_t hash, const ctrl_t* ctrl) {
- return (hash >> 7) ^ PerTableSalt(ctrl);
-}
+ // Returns the seed for the table. Only the lowest kBitCount are non zero.
+ size_t seed() const { return seed_; }
-// Extracts the H2 portion of a hash: the 7 bits not used for H1.
-//
-// These are used as an occupied control byte.
-inline h2_t H2(size_t hash) { return hash & 0x7F; }
+ private:
+ friend class HashtableSize;
+ explicit PerTableSeed(size_t seed) : seed_(seed) {}
-// Helpers for checking the state of a control byte.
-inline bool IsEmpty(ctrl_t c) { return c == ctrl_t::kEmpty; }
-inline bool IsFull(ctrl_t c) {
- // Cast `c` to the underlying type instead of casting `0` to `ctrl_t` as `0`
- // is not a value in the enum. Both ways are equivalent, but this way makes
- // linters happier.
- return static_cast<std::underlying_type_t<ctrl_t>>(c) >= 0;
-}
-inline bool IsDeleted(ctrl_t c) { return c == ctrl_t::kDeleted; }
-inline bool IsEmptyOrDeleted(ctrl_t c) { return c < ctrl_t::kSentinel; }
+ const size_t seed_;
+};
-#ifdef ABSL_INTERNAL_HAVE_SSE2
-// Quick reference guide for intrinsics used below:
-//
-// * __m128i: An XMM (128-bit) word.
-//
-// * _mm_setzero_si128: Returns a zero vector.
-// * _mm_set1_epi8: Returns a vector with the same i8 in each lane.
-//
-// * _mm_subs_epi8: Saturating-subtracts two i8 vectors.
-// * _mm_and_si128: Ands two i128s together.
-// * _mm_or_si128: Ors two i128s together.
-// * _mm_andnot_si128: And-nots two i128s together.
-//
-// * _mm_cmpeq_epi8: Component-wise compares two i8 vectors for equality,
-// filling each lane with 0x00 or 0xff.
-// * _mm_cmpgt_epi8: Same as above, but using > rather than ==.
-//
-// * _mm_loadu_si128: Performs an unaligned load of an i128.
-// * _mm_storeu_si128: Performs an unaligned store of an i128.
-//
-// * _mm_sign_epi8: Retains, negates, or zeroes each i8 lane of the first
-// argument if the corresponding lane of the second
-// argument is positive, negative, or zero, respectively.
-// * _mm_movemask_epi8: Selects the sign bit out of each i8 lane and produces a
-// bitmask consisting of those bits.
-// * _mm_shuffle_epi8: Selects i8s from the first argument, using the low
-// four bits of each i8 lane in the second argument as
-// indices.
-
-// https://github.com/abseil/abseil-cpp/issues/209
-// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=87853
-// _mm_cmpgt_epi8 is broken under GCC with -funsigned-char
-// Work around this by using the portable implementation of Group
-// when using -funsigned-char under GCC.
-inline __m128i _mm_cmpgt_epi8_fixed(__m128i a, __m128i b) {
-#if defined(__GNUC__) && !defined(__clang__)
- if (std::is_unsigned<char>::value) {
- const __m128i mask = _mm_set1_epi8(0x80);
- const __m128i diff = _mm_subs_epi8(b, a);
- return _mm_cmpeq_epi8(_mm_and_si128(diff, mask), mask);
- }
-#endif
- return _mm_cmpgt_epi8(a, b);
+// Returns next per-table seed.
+inline uint16_t NextSeed() {
+ static_assert(PerTableSeed::kBitCount == 16);
+ thread_local uint16_t seed =
+ static_cast<uint16_t>(reinterpret_cast<uintptr_t>(&seed));
+ seed += uint16_t{0xad53};
+ return seed;
}
-struct GroupSse2Impl {
- static constexpr size_t kWidth = 16; // the number of slots per group
+// The size and also has additionally
+// 1) one bit that stores whether we have infoz.
+// 2) PerTableSeed::kBitCount bits for the seed.
+class HashtableSize {
+ public:
+ static constexpr size_t kSizeBitCount = 64 - PerTableSeed::kBitCount - 1;
- explicit GroupSse2Impl(const ctrl_t* pos) {
- ctrl = _mm_loadu_si128(reinterpret_cast<const __m128i*>(pos));
- }
+ explicit HashtableSize(uninitialized_tag_t) {}
+ explicit HashtableSize(no_seed_empty_tag_t) : data_(0) {}
+ explicit HashtableSize(full_soo_tag_t) : data_(kSizeOneNoMetadata) {}
- // Returns a bitmask representing the positions of slots that match hash.
- BitMask<uint16_t, kWidth> Match(h2_t hash) const {
- auto match = _mm_set1_epi8(static_cast<char>(hash));
- return BitMask<uint16_t, kWidth>(
- static_cast<uint16_t>(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl))));
+ // Returns actual size of the table.
+ size_t size() const { return static_cast<size_t>(data_ >> kSizeShift); }
+ void increment_size() { data_ += kSizeOneNoMetadata; }
+ void increment_size(size_t size) {
+ data_ += static_cast<uint64_t>(size) * kSizeOneNoMetadata;
}
+ void decrement_size() { data_ -= kSizeOneNoMetadata; }
+ // Returns true if the table is empty.
+ bool empty() const { return data_ < kSizeOneNoMetadata; }
+ // Sets the size to zero, but keeps all the metadata bits.
+ void set_size_to_zero_keep_metadata() { data_ = data_ & kMetadataMask; }
- // Returns a bitmask representing the positions of empty slots.
- NonIterableBitMask<uint16_t, kWidth> MaskEmpty() const {
-#ifdef ABSL_INTERNAL_HAVE_SSSE3
- // This only works because ctrl_t::kEmpty is -128.
- return NonIterableBitMask<uint16_t, kWidth>(
- static_cast<uint16_t>(_mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl))));
-#else
- auto match = _mm_set1_epi8(static_cast<char>(ctrl_t::kEmpty));
- return NonIterableBitMask<uint16_t, kWidth>(
- static_cast<uint16_t>(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl))));
-#endif
+ PerTableSeed seed() const {
+ return PerTableSeed(static_cast<size_t>(data_) & kSeedMask);
}
- // Returns a bitmask representing the positions of full slots.
- // Note: for `is_small()` tables group may contain the "same" slot twice:
- // original and mirrored.
- BitMask<uint16_t, kWidth> MaskFull() const {
- return BitMask<uint16_t, kWidth>(
- static_cast<uint16_t>(_mm_movemask_epi8(ctrl) ^ 0xffff));
+ void generate_new_seed() {
+ data_ = (data_ & ~kSeedMask) ^ uint64_t{NextSeed()};
}
- // Returns a bitmask representing the positions of non full slots.
- // Note: this includes: kEmpty, kDeleted, kSentinel.
- // It is useful in contexts when kSentinel is not present.
- auto MaskNonFull() const {
- return BitMask<uint16_t, kWidth>(
- static_cast<uint16_t>(_mm_movemask_epi8(ctrl)));
- }
-
- // Returns a bitmask representing the positions of empty or deleted slots.
- NonIterableBitMask<uint16_t, kWidth> MaskEmptyOrDeleted() const {
- auto special = _mm_set1_epi8(static_cast<char>(ctrl_t::kSentinel));
- return NonIterableBitMask<uint16_t, kWidth>(static_cast<uint16_t>(
- _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl))));
+ // Returns true if the table has infoz.
+ bool has_infoz() const {
+ return ABSL_PREDICT_FALSE((data_ & kHasInfozMask) != 0);
}
- // Returns the number of trailing empty or deleted elements in the group.
- uint32_t CountLeadingEmptyOrDeleted() const {
- auto special = _mm_set1_epi8(static_cast<char>(ctrl_t::kSentinel));
- return TrailingZeros(static_cast<uint32_t>(
- _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)) + 1));
- }
+ // Sets the has_infoz bit.
+ void set_has_infoz() { data_ |= kHasInfozMask; }
- void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
- auto msbs = _mm_set1_epi8(static_cast<char>(-128));
- auto x126 = _mm_set1_epi8(126);
-#ifdef ABSL_INTERNAL_HAVE_SSSE3
- auto res = _mm_or_si128(_mm_shuffle_epi8(x126, ctrl), msbs);
-#else
- auto zero = _mm_setzero_si128();
- auto special_mask = _mm_cmpgt_epi8_fixed(zero, ctrl);
- auto res = _mm_or_si128(msbs, _mm_andnot_si128(special_mask, x126));
-#endif
- _mm_storeu_si128(reinterpret_cast<__m128i*>(dst), res);
- }
+ void set_no_seed_for_testing() { data_ &= ~kSeedMask; }
- __m128i ctrl;
-};
-#endif // ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
-
-#if defined(ABSL_INTERNAL_HAVE_ARM_NEON) && defined(ABSL_IS_LITTLE_ENDIAN)
-struct GroupAArch64Impl {
- static constexpr size_t kWidth = 8;
-
- explicit GroupAArch64Impl(const ctrl_t* pos) {
- ctrl = vld1_u8(reinterpret_cast<const uint8_t*>(pos));
- }
-
- auto Match(h2_t hash) const {
- uint8x8_t dup = vdup_n_u8(hash);
- auto mask = vceq_u8(ctrl, dup);
- return BitMask<uint64_t, kWidth, /*Shift=*/3,
- /*NullifyBitsOnIteration=*/true>(
- vget_lane_u64(vreinterpret_u64_u8(mask), 0));
- }
-
- NonIterableBitMask<uint64_t, kWidth, 3> MaskEmpty() const {
- uint64_t mask =
- vget_lane_u64(vreinterpret_u64_u8(vceq_s8(
- vdup_n_s8(static_cast<int8_t>(ctrl_t::kEmpty)),
- vreinterpret_s8_u8(ctrl))),
- 0);
- return NonIterableBitMask<uint64_t, kWidth, 3>(mask);
- }
-
- // Returns a bitmask representing the positions of full slots.
- // Note: for `is_small()` tables group may contain the "same" slot twice:
- // original and mirrored.
- auto MaskFull() const {
- uint64_t mask = vget_lane_u64(
- vreinterpret_u64_u8(vcge_s8(vreinterpret_s8_u8(ctrl),
- vdup_n_s8(static_cast<int8_t>(0)))),
- 0);
- return BitMask<uint64_t, kWidth, /*Shift=*/3,
- /*NullifyBitsOnIteration=*/true>(mask);
- }
-
- // Returns a bitmask representing the positions of non full slots.
- // Note: this includes: kEmpty, kDeleted, kSentinel.
- // It is useful in contexts when kSentinel is not present.
- auto MaskNonFull() const {
- uint64_t mask = vget_lane_u64(
- vreinterpret_u64_u8(vclt_s8(vreinterpret_s8_u8(ctrl),
- vdup_n_s8(static_cast<int8_t>(0)))),
- 0);
- return BitMask<uint64_t, kWidth, /*Shift=*/3,
- /*NullifyBitsOnIteration=*/true>(mask);
- }
-
- NonIterableBitMask<uint64_t, kWidth, 3> MaskEmptyOrDeleted() const {
- uint64_t mask =
- vget_lane_u64(vreinterpret_u64_u8(vcgt_s8(
- vdup_n_s8(static_cast<int8_t>(ctrl_t::kSentinel)),
- vreinterpret_s8_u8(ctrl))),
- 0);
- return NonIterableBitMask<uint64_t, kWidth, 3>(mask);
- }
-
- uint32_t CountLeadingEmptyOrDeleted() const {
- uint64_t mask =
- vget_lane_u64(vreinterpret_u64_u8(vcle_s8(
- vdup_n_s8(static_cast<int8_t>(ctrl_t::kSentinel)),
- vreinterpret_s8_u8(ctrl))),
- 0);
- // Similar to MaskEmptyorDeleted() but we invert the logic to invert the
- // produced bitfield. We then count number of trailing zeros.
- // Clang and GCC optimize countr_zero to rbit+clz without any check for 0,
- // so we should be fine.
- return static_cast<uint32_t>(countr_zero(mask)) >> 3;
- }
-
- void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
- uint64_t mask = vget_lane_u64(vreinterpret_u64_u8(ctrl), 0);
- constexpr uint64_t slsbs = 0x0202020202020202ULL;
- constexpr uint64_t midbs = 0x7e7e7e7e7e7e7e7eULL;
- auto x = slsbs & (mask >> 6);
- auto res = (x + midbs) | kMsbs8Bytes;
- little_endian::Store64(dst, res);
- }
-
- uint8x8_t ctrl;
+ private:
+ static constexpr size_t kSizeShift = 64 - kSizeBitCount;
+ static constexpr uint64_t kSizeOneNoMetadata = uint64_t{1} << kSizeShift;
+ static constexpr uint64_t kMetadataMask = kSizeOneNoMetadata - 1;
+ static constexpr uint64_t kSeedMask =
+ (uint64_t{1} << PerTableSeed::kBitCount) - 1;
+ // The next bit after the seed.
+ static constexpr uint64_t kHasInfozMask = kSeedMask + 1;
+ uint64_t data_;
};
-#endif // ABSL_INTERNAL_HAVE_ARM_NEON && ABSL_IS_LITTLE_ENDIAN
-
-struct GroupPortableImpl {
- static constexpr size_t kWidth = 8;
-
- explicit GroupPortableImpl(const ctrl_t* pos)
- : ctrl(little_endian::Load64(pos)) {}
-
- BitMask<uint64_t, kWidth, 3> Match(h2_t hash) const {
- // For the technique, see:
- // http://graphics.stanford.edu/~seander/bithacks.html##ValueInWord
- // (Determine if a word has a byte equal to n).
- //
- // Caveat: there are false positives but:
- // - they only occur if there is a real match
- // - they never occur on ctrl_t::kEmpty, ctrl_t::kDeleted, ctrl_t::kSentinel
- // - they will be handled gracefully by subsequent checks in code
- //
- // Example:
- // v = 0x1716151413121110
- // hash = 0x12
- // retval = (v - lsbs) & ~v & msbs = 0x0000000080800000
- constexpr uint64_t lsbs = 0x0101010101010101ULL;
- auto x = ctrl ^ (lsbs * hash);
- return BitMask<uint64_t, kWidth, 3>((x - lsbs) & ~x & kMsbs8Bytes);
- }
-
- NonIterableBitMask<uint64_t, kWidth, 3> MaskEmpty() const {
- return NonIterableBitMask<uint64_t, kWidth, 3>((ctrl & ~(ctrl << 6)) &
- kMsbs8Bytes);
- }
-
- // Returns a bitmask representing the positions of full slots.
- // Note: for `is_small()` tables group may contain the "same" slot twice:
- // original and mirrored.
- BitMask<uint64_t, kWidth, 3> MaskFull() const {
- return BitMask<uint64_t, kWidth, 3>((ctrl ^ kMsbs8Bytes) & kMsbs8Bytes);
- }
-
- // Returns a bitmask representing the positions of non full slots.
- // Note: this includes: kEmpty, kDeleted, kSentinel.
- // It is useful in contexts when kSentinel is not present.
- auto MaskNonFull() const {
- return BitMask<uint64_t, kWidth, 3>(ctrl & kMsbs8Bytes);
- }
-
- NonIterableBitMask<uint64_t, kWidth, 3> MaskEmptyOrDeleted() const {
- return NonIterableBitMask<uint64_t, kWidth, 3>((ctrl & ~(ctrl << 7)) &
- kMsbs8Bytes);
- }
-
- uint32_t CountLeadingEmptyOrDeleted() const {
- // ctrl | ~(ctrl >> 7) will have the lowest bit set to zero for kEmpty and
- // kDeleted. We lower all other bits and count number of trailing zeros.
- constexpr uint64_t bits = 0x0101010101010101ULL;
- return static_cast<uint32_t>(countr_zero((ctrl | ~(ctrl >> 7)) & bits) >>
- 3);
- }
-
- void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
- constexpr uint64_t lsbs = 0x0101010101010101ULL;
- auto x = ctrl & kMsbs8Bytes;
- auto res = (~x + (x >> 7)) & ~lsbs;
- little_endian::Store64(dst, res);
- }
- uint64_t ctrl;
-};
+// Extracts the H1 portion of a hash: 57 bits mixed with a per-table seed.
+inline size_t H1(size_t hash, PerTableSeed seed) {
+ return (hash >> 7) ^ seed.seed();
+}
-#ifdef ABSL_INTERNAL_HAVE_SSE2
-using Group = GroupSse2Impl;
-using GroupFullEmptyOrDeleted = GroupSse2Impl;
-#elif defined(ABSL_INTERNAL_HAVE_ARM_NEON) && defined(ABSL_IS_LITTLE_ENDIAN)
-using Group = GroupAArch64Impl;
-// For Aarch64, we use the portable implementation for counting and masking
-// full, empty or deleted group elements. This is to avoid the latency of moving
-// between data GPRs and Neon registers when it does not provide a benefit.
-// Using Neon is profitable when we call Match(), but is not when we don't,
-// which is the case when we do *EmptyOrDeleted and MaskFull operations.
-// It is difficult to make a similar approach beneficial on other architectures
-// such as x86 since they have much lower GPR <-> vector register transfer
-// latency and 16-wide Groups.
-using GroupFullEmptyOrDeleted = GroupPortableImpl;
-#else
-using Group = GroupPortableImpl;
-using GroupFullEmptyOrDeleted = GroupPortableImpl;
-#endif
+// Extracts the H2 portion of a hash: the 7 bits not used for H1.
+//
+// These are used as an occupied control byte.
+inline h2_t H2(size_t hash) { return hash & 0x7F; }
// When there is an insertion with no reserved growth, we rehash with
// probability `min(1, RehashProbabilityConstant() / capacity())`. Using a
@@ -974,10 +571,10 @@ class CommonFieldsGenerationInfoEnabled {
// references. We rehash on the first insertion after reserved_growth_ reaches
// 0 after a call to reserve. We also do a rehash with low probability
// whenever reserved_growth_ is zero.
- bool should_rehash_for_bug_detection_on_insert(const ctrl_t* ctrl,
+ bool should_rehash_for_bug_detection_on_insert(PerTableSeed seed,
size_t capacity) const;
// Similar to above, except that we don't depend on reserved_growth_.
- bool should_rehash_for_bug_detection_on_move(const ctrl_t* ctrl,
+ bool should_rehash_for_bug_detection_on_move(PerTableSeed seed,
size_t capacity) const;
void maybe_increment_generation_on_insert() {
if (reserved_growth_ == kReservedGrowthJustRanOut) reserved_growth_ = 0;
@@ -1031,10 +628,10 @@ class CommonFieldsGenerationInfoDisabled {
CommonFieldsGenerationInfoDisabled& operator=(
CommonFieldsGenerationInfoDisabled&&) = default;
- bool should_rehash_for_bug_detection_on_insert(const ctrl_t*, size_t) const {
+ bool should_rehash_for_bug_detection_on_insert(PerTableSeed, size_t) const {
return false;
}
- bool should_rehash_for_bug_detection_on_move(const ctrl_t*, size_t) const {
+ bool should_rehash_for_bug_detection_on_move(PerTableSeed, size_t) const {
return false;
}
void maybe_increment_generation_on_insert() {}
@@ -1127,9 +724,9 @@ class GrowthInfo {
}
// Overwrites several empty slots with full slots.
- void OverwriteManyEmptyAsFull(size_t cnt) {
- ABSL_SWISSTABLE_ASSERT(GetGrowthLeft() >= cnt);
- growth_left_info_ -= cnt;
+ void OverwriteManyEmptyAsFull(size_t count) {
+ ABSL_SWISSTABLE_ASSERT(GetGrowthLeft() >= count);
+ growth_left_info_ -= count;
}
// Overwrites specified control element with full slot.
@@ -1154,7 +751,14 @@ class GrowthInfo {
// 2. There is no growth left.
bool HasNoGrowthLeftAndNoDeleted() const { return growth_left_info_ == 0; }
- // Returns true if table guaranteed to have no k
+ // Returns true if GetGrowthLeft() == 0, but must be called only if
+ // HasNoDeleted() is false. It is slightly more efficient.
+ bool HasNoGrowthLeftAssumingMayHaveDeleted() const {
+ ABSL_SWISSTABLE_ASSERT(!HasNoDeleted());
+ return growth_left_info_ == kDeletedBit;
+ }
+
+ // Returns true if table guaranteed to have no kDeleted slots.
bool HasNoDeleted() const {
return static_cast<std::make_signed_t<size_t>>(growth_left_info_) >= 0;
}
@@ -1175,7 +779,7 @@ static_assert(alignof(GrowthInfo) == alignof(size_t), "");
// Returns whether `n` is a valid capacity (i.e., number of slots).
//
// A valid capacity is a non-zero integer `2^m - 1`.
-inline bool IsValidCapacity(size_t n) { return ((n + 1) & n) == 0 && n > 0; }
+constexpr bool IsValidCapacity(size_t n) { return ((n + 1) & n) == 0 && n > 0; }
// Returns the number of "cloned control bytes".
//
@@ -1191,26 +795,32 @@ constexpr size_t NumControlBytes(size_t capacity) {
// Computes the offset from the start of the backing allocation of control.
// infoz and growth_info are stored at the beginning of the backing array.
-inline static size_t ControlOffset(bool has_infoz) {
+constexpr size_t ControlOffset(bool has_infoz) {
return (has_infoz ? sizeof(HashtablezInfoHandle) : 0) + sizeof(GrowthInfo);
}
+// Returns the offset of the next item after `offset` that is aligned to `align`
+// bytes. `align` must be a power of two.
+constexpr size_t AlignUpTo(size_t offset, size_t align) {
+ return (offset + align - 1) & (~align + 1);
+}
+
// Helper class for computing offsets and allocation size of hash set fields.
class RawHashSetLayout {
public:
- explicit RawHashSetLayout(size_t capacity, size_t slot_align, bool has_infoz)
- : capacity_(capacity),
- control_offset_(ControlOffset(has_infoz)),
+ explicit RawHashSetLayout(size_t capacity, size_t slot_size,
+ size_t slot_align, bool has_infoz)
+ : control_offset_(ControlOffset(has_infoz)),
generation_offset_(control_offset_ + NumControlBytes(capacity)),
slot_offset_(
- (generation_offset_ + NumGenerationBytes() + slot_align - 1) &
- (~slot_align + 1)) {
+ AlignUpTo(generation_offset_ + NumGenerationBytes(), slot_align)),
+ alloc_size_(slot_offset_ + capacity * slot_size) {
ABSL_SWISSTABLE_ASSERT(IsValidCapacity(capacity));
+ ABSL_SWISSTABLE_ASSERT(
+ slot_size <=
+ ((std::numeric_limits<size_t>::max)() - slot_offset_) / capacity);
}
- // Returns the capacity of a table.
- size_t capacity() const { return capacity_; }
-
// Returns precomputed offset from the start of the backing allocation of
// control.
size_t control_offset() const { return control_offset_; }
@@ -1225,39 +835,17 @@ class RawHashSetLayout {
// Given the capacity of a table, computes the total size of the backing
// array.
- size_t alloc_size(size_t slot_size) const {
- ABSL_SWISSTABLE_ASSERT(
- slot_size <=
- ((std::numeric_limits<size_t>::max)() - slot_offset_) / capacity_);
- return slot_offset_ + capacity_ * slot_size;
- }
+ size_t alloc_size() const { return alloc_size_; }
private:
- size_t capacity_;
size_t control_offset_;
size_t generation_offset_;
size_t slot_offset_;
+ size_t alloc_size_;
};
struct HashtableFreeFunctionsAccess;
-// We only allow a maximum of 1 SOO element, which makes the implementation
-// much simpler. Complications with multiple SOO elements include:
-// - Satisfying the guarantee that erasing one element doesn't invalidate
-// iterators to other elements means we would probably need actual SOO
-// control bytes.
-// - In order to prevent user code from depending on iteration order for small
-// tables, we would need to randomize the iteration order somehow.
-constexpr size_t SooCapacity() { return 1; }
-// Sentinel type to indicate SOO CommonFields construction.
-struct soo_tag_t {};
-// Sentinel type to indicate SOO CommonFields construction with full size.
-struct full_soo_tag_t {};
-// Sentinel type to indicate non-SOO CommonFields construction.
-struct non_soo_tag_t {};
-// Sentinel value to indicate an uninitialized CommonFields for use in swapping.
-struct uninitialized_tag_t {};
-
// Suppress erroneous uninitialized memory errors on GCC. For example, GCC
// thinks that the call to slot_array() in find_or_prepare_insert() is reading
// uninitialized memory, but slot_array is only called there when the table is
@@ -1285,7 +873,7 @@ union MaybeInitializedPtr {
};
struct HeapPtrs {
- HeapPtrs() = default;
+ explicit HeapPtrs(uninitialized_tag_t) {}
explicit HeapPtrs(ctrl_t* c) : control(c) {}
// The control bytes (and, also, a pointer near to the base of the backing
@@ -1304,10 +892,13 @@ struct HeapPtrs {
MaybeInitializedPtr slot_array;
};
+// Returns the maximum size of the SOO slot.
+constexpr size_t MaxSooSlotSize() { return sizeof(HeapPtrs); }
+
// Manages the backing array pointers or the SOO slot. When raw_hash_set::is_soo
// is true, the SOO slot is stored in `soo_data`. Otherwise, we use `heap`.
union HeapOrSoo {
- HeapOrSoo() = default;
+ explicit HeapOrSoo(uninitialized_tag_t) : heap(uninitialized_tag_t{}) {}
explicit HeapOrSoo(ctrl_t* c) : heap(c) {}
ctrl_t*& control() {
@@ -1330,26 +921,50 @@ union HeapOrSoo {
}
HeapPtrs heap;
- unsigned char soo_data[sizeof(HeapPtrs)];
+ unsigned char soo_data[MaxSooSlotSize()];
};
+// Returns a reference to the GrowthInfo object stored immediately before
+// `control`.
+inline GrowthInfo& GetGrowthInfoFromControl(ctrl_t* control) {
+ auto* gl_ptr = reinterpret_cast<GrowthInfo*>(control) - 1;
+ ABSL_SWISSTABLE_ASSERT(
+ reinterpret_cast<uintptr_t>(gl_ptr) % alignof(GrowthInfo) == 0);
+ return *gl_ptr;
+}
+
// CommonFields hold the fields in raw_hash_set that do not depend
// on template parameters. This allows us to conveniently pass all
// of this state to helper functions as a single argument.
class CommonFields : public CommonFieldsGenerationInfo {
public:
- explicit CommonFields(soo_tag_t) : capacity_(SooCapacity()), size_(0) {}
+ explicit CommonFields(soo_tag_t)
+ : capacity_(SooCapacity()),
+ size_(no_seed_empty_tag_t{}),
+ heap_or_soo_(uninitialized_tag_t{}) {}
explicit CommonFields(full_soo_tag_t)
- : capacity_(SooCapacity()), size_(size_t{1} << HasInfozShift()) {}
+ : capacity_(SooCapacity()),
+ size_(full_soo_tag_t{}),
+ heap_or_soo_(uninitialized_tag_t{}) {}
explicit CommonFields(non_soo_tag_t)
- : capacity_(0), size_(0), heap_or_soo_(EmptyGroup()) {}
+ : capacity_(0),
+ size_(no_seed_empty_tag_t{}),
+ heap_or_soo_(EmptyGroup()) {}
// For use in swapping.
- explicit CommonFields(uninitialized_tag_t) {}
+ explicit CommonFields(uninitialized_tag_t)
+ : size_(uninitialized_tag_t{}), heap_or_soo_(uninitialized_tag_t{}) {}
// Not copyable
CommonFields(const CommonFields&) = delete;
CommonFields& operator=(const CommonFields&) = delete;
+ // Copy with guarantee that it is not SOO.
+ CommonFields(non_soo_tag_t, const CommonFields& that)
+ : capacity_(that.capacity_),
+ size_(that.size_),
+ heap_or_soo_(that.heap_or_soo_) {
+ }
+
// Movable
CommonFields(CommonFields&& that) = default;
CommonFields& operator=(CommonFields&&) = default;
@@ -1364,11 +979,21 @@ class CommonFields : public CommonFieldsGenerationInfo {
const void* soo_data() const { return heap_or_soo_.get_soo_data(); }
void* soo_data() { return heap_or_soo_.get_soo_data(); }
- HeapOrSoo heap_or_soo() const { return heap_or_soo_; }
- const HeapOrSoo& heap_or_soo_ref() const { return heap_or_soo_; }
-
ctrl_t* control() const { return heap_or_soo_.control(); }
- void set_control(ctrl_t* c) { heap_or_soo_.control() = c; }
+
+ // When we set the control bytes, we also often want to generate a new seed.
+ // So we bundle these two operations together to make sure we don't forget to
+ // generate a new seed.
+ // The table will be invalidated if
+ // `kGenerateSeed && !empty() && !is_single_group(capacity())` because H1 is
+ // being changed. In such cases, we will need to rehash the table.
+ template <bool kGenerateSeed>
+ void set_control(ctrl_t* c) {
+ heap_or_soo_.control() = c;
+ if constexpr (kGenerateSeed) {
+ generate_new_seed();
+ }
+ }
void* backing_array_start() const {
// growth_info (and maybe infoz) is stored before control bytes.
ABSL_SWISSTABLE_ASSERT(
@@ -1382,26 +1007,39 @@ class CommonFields : public CommonFieldsGenerationInfo {
void set_slots(void* s) { heap_or_soo_.slot_array().set(s); }
// The number of filled slots.
- size_t size() const { return size_ >> HasInfozShift(); }
- void set_size(size_t s) {
- size_ = (s << HasInfozShift()) | (size_ & HasInfozMask());
- }
+ size_t size() const { return size_.size(); }
+ // Sets the size to zero, but keeps hashinfoz bit and seed.
+ void set_size_to_zero() { size_.set_size_to_zero_keep_metadata(); }
void set_empty_soo() {
AssertInSooMode();
- size_ = 0;
+ size_ = HashtableSize(no_seed_empty_tag_t{});
}
void set_full_soo() {
AssertInSooMode();
- size_ = size_t{1} << HasInfozShift();
+ size_ = HashtableSize(full_soo_tag_t{});
}
void increment_size() {
ABSL_SWISSTABLE_ASSERT(size() < capacity());
- size_ += size_t{1} << HasInfozShift();
+ size_.increment_size();
+ }
+ void increment_size(size_t n) {
+ ABSL_SWISSTABLE_ASSERT(size() + n <= capacity());
+ size_.increment_size(n);
}
void decrement_size() {
- ABSL_SWISSTABLE_ASSERT(size() > 0);
- size_ -= size_t{1} << HasInfozShift();
+ ABSL_SWISSTABLE_ASSERT(!empty());
+ size_.decrement_size();
}
+ bool empty() const { return size_.empty(); }
+
+ // The seed used for the H1 part of the hash function.
+ PerTableSeed seed() const { return size_.seed(); }
+ // Generates a new seed for the H1 part of the hash function.
+ // The table will be invalidated if
+ // `kGenerateSeed && !empty() && !is_single_group(capacity())` because H1 is
+ // being changed. In such cases, we will need to rehash the table.
+ void generate_new_seed() { size_.generate_new_seed(); }
+ void set_no_seed_for_testing() { size_.set_no_seed_for_testing(); }
// The total number of available slots.
size_t capacity() const { return capacity_; }
@@ -1419,21 +1057,14 @@ class CommonFields : public CommonFieldsGenerationInfo {
size_t growth_left() const { return growth_info().GetGrowthLeft(); }
GrowthInfo& growth_info() {
- auto* gl_ptr = reinterpret_cast<GrowthInfo*>(control()) - 1;
- ABSL_SWISSTABLE_ASSERT(
- reinterpret_cast<uintptr_t>(gl_ptr) % alignof(GrowthInfo) == 0);
- return *gl_ptr;
+ return GetGrowthInfoFromControl(control());
}
GrowthInfo growth_info() const {
return const_cast<CommonFields*>(this)->growth_info();
}
- bool has_infoz() const {
- return ABSL_PREDICT_FALSE((size_ & HasInfozMask()) != 0);
- }
- void set_has_infoz(bool has_infoz) {
- size_ = (size() << HasInfozShift()) | static_cast<size_t>(has_infoz);
- }
+ bool has_infoz() const { return size_.has_infoz(); }
+ void set_has_infoz() { size_.set_has_infoz(); }
HashtablezInfoHandle infoz() {
return has_infoz()
@@ -1446,12 +1077,18 @@ class CommonFields : public CommonFieldsGenerationInfo {
}
bool should_rehash_for_bug_detection_on_insert() const {
+ if constexpr (!SwisstableGenerationsEnabled()) {
+ return false;
+ }
+ // As an optimization, we avoid calling ShouldRehashForBugDetection if we
+ // will end up rehashing anyways.
+ if (growth_left() == 0) return false;
return CommonFieldsGenerationInfo::
- should_rehash_for_bug_detection_on_insert(control(), capacity());
+ should_rehash_for_bug_detection_on_insert(seed(), capacity());
}
bool should_rehash_for_bug_detection_on_move() const {
return CommonFieldsGenerationInfo::should_rehash_for_bug_detection_on_move(
- control(), capacity());
+ seed(), capacity());
}
void reset_reserved_growth(size_t reservation) {
CommonFieldsGenerationInfo::reset_reserved_growth(reservation, size());
@@ -1459,8 +1096,8 @@ class CommonFields : public CommonFieldsGenerationInfo {
// The size of the backing array allocation.
size_t alloc_size(size_t slot_size, size_t slot_align) const {
- return RawHashSetLayout(capacity(), slot_align, has_infoz())
- .alloc_size(slot_size);
+ return RawHashSetLayout(capacity(), slot_size, slot_align, has_infoz())
+ .alloc_size();
}
// Move fields other than heap_or_soo_.
@@ -1513,11 +1150,10 @@ class CommonFields : public CommonFieldsGenerationInfo {
// regressions, presumably because we need capacity to do find operations.
size_t capacity_;
- // The size and also has one bit that stores whether we have infoz.
// TODO(b/289225379): we could put size_ into HeapOrSoo and make capacity_
// encode the size in SOO case. We would be making size()/capacity() more
// expensive in order to have more SOO space.
- size_t size_;
+ HashtableSize size_;
// Either the control/slots pointers or the SOO slot.
HeapOrSoo heap_or_soo_;
@@ -1527,11 +1163,17 @@ template <class Policy, class Hash, class Eq, class Alloc>
class raw_hash_set;
// Returns the next valid capacity after `n`.
-inline size_t NextCapacity(size_t n) {
+constexpr size_t NextCapacity(size_t n) {
ABSL_SWISSTABLE_ASSERT(IsValidCapacity(n) || n == 0);
return n * 2 + 1;
}
+// Returns the previous valid capacity before `n`.
+constexpr size_t PreviousCapacity(size_t n) {
+ ABSL_SWISSTABLE_ASSERT(IsValidCapacity(n));
+ return n / 2;
+}
+
// Applies the following mapping to every byte in the control array:
// * kDeleted -> kEmpty
// * kEmpty -> kEmpty
@@ -1543,19 +1185,10 @@ inline size_t NextCapacity(size_t n) {
void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity);
// Converts `n` into the next valid capacity, per `IsValidCapacity`.
-inline size_t NormalizeCapacity(size_t n) {
+constexpr size_t NormalizeCapacity(size_t n) {
return n ? ~size_t{} >> countl_zero(n) : 1;
}
-template <size_t kSlotSize>
-size_t MaxValidCapacity() {
- return NormalizeCapacity((std::numeric_limits<size_t>::max)() / 4 /
- kSlotSize);
-}
-
-// Use a non-inlined function to avoid code bloat.
-[[noreturn]] void HashTableSizeOverflow();
-
// General notes on capacity/growth methods below:
// - We use 7/8th as maximum load factor. For 16-wide groups, that gives an
// average of two empty slots per group.
@@ -1566,7 +1199,7 @@ size_t MaxValidCapacity() {
// Given `capacity`, applies the load factor; i.e., it returns the maximum
// number of values we should put into the table before a resizing rehash.
-inline size_t CapacityToGrowth(size_t capacity) {
+constexpr size_t CapacityToGrowth(size_t capacity) {
ABSL_SWISSTABLE_ASSERT(IsValidCapacity(capacity));
// `capacity*7/8`
if (Group::kWidth == 8 && capacity == 7) {
@@ -1576,18 +1209,28 @@ inline size_t CapacityToGrowth(size_t capacity) {
return capacity - capacity / 8;
}
-// Given `growth`, "unapplies" the load factor to find how large the capacity
+// Given `size`, "unapplies" the load factor to find how large the capacity
// should be to stay within the load factor.
//
-// This might not be a valid capacity and `NormalizeCapacity()` should be
-// called on this.
-inline size_t GrowthToLowerboundCapacity(size_t growth) {
- // `growth*8/7`
- if (Group::kWidth == 8 && growth == 7) {
- // x+(x-1)/7 does not work when x==7.
- return 8;
- }
- return growth + static_cast<size_t>((static_cast<int64_t>(growth) - 1) / 7);
+// For size == 0, returns 0.
+// For other values, returns the same as `NormalizeCapacity(size*8/7)`.
+constexpr size_t SizeToCapacity(size_t size) {
+ if (size == 0) {
+ return 0;
+ }
+ // The minimum possible capacity is NormalizeCapacity(size).
+ // Shifting right `~size_t{}` by `leading_zeros` yields
+ // NormalizeCapacity(size).
+ int leading_zeros = absl::countl_zero(size);
+ constexpr size_t kLast3Bits = size_t{7} << (sizeof(size_t) * 8 - 3);
+ size_t max_size_for_next_capacity = kLast3Bits >> leading_zeros;
+ // Decrease shift if size is too big for the minimum capacity.
+ leading_zeros -= static_cast<int>(size > max_size_for_next_capacity);
+ if constexpr (Group::kWidth == 8) {
+ // Formula doesn't work when size==7 for 8-wide groups.
+ leading_zeros -= (size == 7);
+ }
+ return (~size_t{}) >> leading_zeros;
}
template <class InputIter>
@@ -1596,12 +1239,9 @@ size_t SelectBucketCountForIterRange(InputIter first, InputIter last,
if (bucket_count != 0) {
return bucket_count;
}
- using InputIterCategory =
- typename std::iterator_traits<InputIter>::iterator_category;
- if (std::is_base_of<std::random_access_iterator_tag,
- InputIterCategory>::value) {
- return GrowthToLowerboundCapacity(
- static_cast<size_t>(std::distance(first, last)));
+ if (base_internal::IsAtLeastIterator<std::random_access_iterator_tag,
+ InputIter>()) {
+ return SizeToCapacity(static_cast<size_t>(std::distance(first, last)));
}
return 0;
}
@@ -1674,7 +1314,7 @@ inline void AssertIsValidForComparison(const ctrl_t* ctrl,
FATAL, "Invalid iterator comparison. The element was likely erased.");
}
} else {
- ABSL_HARDENING_ASSERT(
+ ABSL_HARDENING_ASSERT_SLOW(
ctrl_is_valid_for_comparison &&
"Invalid iterator comparison. The element might have been erased or "
"the table might have rehashed. Consider running with --config=asan to "
@@ -1772,33 +1412,22 @@ struct FindInfo {
size_t probe_length;
};
-// Whether a table is "small". A small table fits entirely into a probing
-// group, i.e., has a capacity < `Group::kWidth`.
-//
-// In small mode we are able to use the whole capacity. The extra control
-// bytes give us at least one "empty" control byte to stop the iteration.
-// This is important to make 1 a valid capacity.
-//
-// In small mode only the first `capacity` control bytes after the sentinel
-// are valid. The rest contain dummy ctrl_t::kEmpty values that do not
-// represent a real slot. This is important to take into account on
-// `find_first_non_full()`, where we never try
-// `ShouldInsertBackwards()` for small tables.
-inline bool is_small(size_t capacity) { return capacity < Group::kWidth - 1; }
-
// Whether a table fits entirely into a probing group.
// Arbitrary order of elements in such tables is correct.
-inline bool is_single_group(size_t capacity) {
+constexpr bool is_single_group(size_t capacity) {
return capacity <= Group::kWidth;
}
// Begins a probing operation on `common.control`, using `hash`.
-inline probe_seq<Group::kWidth> probe(const ctrl_t* ctrl, const size_t capacity,
+inline probe_seq<Group::kWidth> probe(size_t h1, size_t capacity) {
+ return probe_seq<Group::kWidth>(h1, capacity);
+}
+inline probe_seq<Group::kWidth> probe(PerTableSeed seed, size_t capacity,
size_t hash) {
- return probe_seq<Group::kWidth>(H1(hash, ctrl), capacity);
+ return probe(H1(hash, seed), capacity);
}
inline probe_seq<Group::kWidth> probe(const CommonFields& common, size_t hash) {
- return probe(common.control(), common.capacity(), hash);
+ return probe(common.seed(), common.capacity(), hash);
}
// Probes an array of control bits using a probe sequence derived from `hash`,
@@ -1808,51 +1437,70 @@ inline probe_seq<Group::kWidth> probe(const CommonFields& common, size_t hash) {
//
// NOTE: this function must work with tables having both empty and deleted
// slots in the same group. Such tables appear during `erase()`.
+FindInfo find_first_non_full(const CommonFields& common, size_t hash);
+
+constexpr size_t kProbedElementIndexSentinel = ~size_t{};
+
+// Implementation detail of transfer_unprobed_elements_to_next_capacity_fn.
+// Tries to find the new index for an element whose hash corresponds to
+// `h1` for growth to the next capacity.
+// Returns kProbedElementIndexSentinel if full probing is required.
+//
+// If element is located in the first probing group in the table before growth,
+// returns one of two positions: `old_index` or `old_index + old_capacity + 1`.
+//
+// Otherwise, we will try to insert it into the first probe group of the new
+// table. We only attempt to do so if the first probe group is already
+// initialized.
template <typename = void>
-inline FindInfo find_first_non_full(const CommonFields& common, size_t hash) {
- auto seq = probe(common, hash);
- const ctrl_t* ctrl = common.control();
- if (IsEmptyOrDeleted(ctrl[seq.offset()]) &&
- !ShouldInsertBackwards(common.capacity(), hash, ctrl)) {
- return {seq.offset(), /*probe_length=*/0};
- }
- while (true) {
- GroupFullEmptyOrDeleted g{ctrl + seq.offset()};
- auto mask = g.MaskEmptyOrDeleted();
- if (mask) {
- return {
- seq.offset(GetInsertionOffset(mask, common.capacity(), hash, ctrl)),
- seq.index()};
- }
- seq.next();
- ABSL_SWISSTABLE_ASSERT(seq.index() <= common.capacity() && "full table!");
- }
+inline size_t TryFindNewIndexWithoutProbing(size_t h1, size_t old_index,
+ size_t old_capacity,
+ ctrl_t* new_ctrl,
+ size_t new_capacity) {
+ size_t index_diff = old_index - h1;
+ // The first probe group starts with h1 & capacity.
+ // All following groups start at (h1 + Group::kWidth * K) & capacity.
+ // We can find an index within the floating group as index_diff modulo
+ // Group::kWidth.
+ // Both old and new capacity are larger than Group::kWidth so we can avoid
+ // computing `& capacity`.
+ size_t in_floating_group_index = index_diff & (Group::kWidth - 1);
+ // By subtracting we will get the difference between the first probe group
+ // and the probe group corresponding to old_index.
+ index_diff -= in_floating_group_index;
+ if (ABSL_PREDICT_TRUE((index_diff & old_capacity) == 0)) {
+ size_t new_index = (h1 + in_floating_group_index) & new_capacity;
+ ABSL_ASSUME(new_index != kProbedElementIndexSentinel);
+ return new_index;
+ }
+ ABSL_SWISSTABLE_ASSERT(((old_index - h1) & old_capacity) >= Group::kWidth);
+ // Try to insert element into the first probe group.
+ // new_ctrl is not yet fully initialized so we can't use regular search via
+ // find_first_non_full.
+
+ // We can search in the first probe group only if it is located in already
+ // initialized part of the table.
+ if (ABSL_PREDICT_FALSE((h1 & old_capacity) >= old_index)) {
+ return kProbedElementIndexSentinel;
+ }
+ size_t offset = h1 & new_capacity;
+ Group new_g(new_ctrl + offset);
+ if (auto mask = new_g.MaskNonFull(); ABSL_PREDICT_TRUE(mask)) {
+ size_t result = offset + mask.LowestBitSet();
+ ABSL_ASSUME(result != kProbedElementIndexSentinel);
+ return result;
+ }
+ return kProbedElementIndexSentinel;
}
-// Extern template for inline function keep possibility of inlining.
+// Extern template for inline function keeps possibility of inlining.
// When compiler decided to not inline, no symbols will be added to the
// corresponding translation unit.
-extern template FindInfo find_first_non_full(const CommonFields&, size_t);
-
-// Non-inlined version of find_first_non_full for use in less
-// performance critical routines.
-FindInfo find_first_non_full_outofline(const CommonFields&, size_t);
-
-inline void ResetGrowthLeft(CommonFields& common) {
- common.growth_info().InitGrowthLeftNoDeleted(
- CapacityToGrowth(common.capacity()) - common.size());
-}
-
-// Sets `ctrl` to `{kEmpty, kSentinel, ..., kEmpty}`, marking the entire
-// array as marked as empty.
-inline void ResetCtrl(CommonFields& common, size_t slot_size) {
- const size_t capacity = common.capacity();
- ctrl_t* ctrl = common.control();
- std::memset(ctrl, static_cast<int8_t>(ctrl_t::kEmpty),
- capacity + 1 + NumClonedBytes());
- ctrl[capacity] = ctrl_t::kSentinel;
- SanitizerPoisonMemoryRegion(common.slot_array(), slot_size * capacity);
-}
+extern template size_t TryFindNewIndexWithoutProbing(size_t h1,
+ size_t old_index,
+ size_t old_capacity,
+ ctrl_t* new_ctrl,
+ size_t new_capacity);
// Sets sanitizer poisoning for slot corresponding to control byte being set.
inline void DoSanitizeOnSetCtrl(const CommonFields& c, size_t i, ctrl_t h,
@@ -1899,6 +1547,22 @@ inline void SetCtrlInSingleGroupTable(const CommonFields& c, size_t i, h2_t h,
SetCtrlInSingleGroupTable(c, i, static_cast<ctrl_t>(h), slot_size);
}
+// Like SetCtrl, but in a table with capacity >= Group::kWidth - 1,
+// we can save some operations when setting the cloned control byte.
+inline void SetCtrlInLargeTable(const CommonFields& c, size_t i, ctrl_t h,
+ size_t slot_size) {
+ ABSL_SWISSTABLE_ASSERT(c.capacity() >= Group::kWidth - 1);
+ DoSanitizeOnSetCtrl(c, i, h, slot_size);
+ ctrl_t* ctrl = c.control();
+ ctrl[i] = h;
+ ctrl[((i - NumClonedBytes()) & c.capacity()) + NumClonedBytes()] = h;
+}
+// Overload for setting to an occupied `h2_t` rather than a special `ctrl_t`.
+inline void SetCtrlInLargeTable(const CommonFields& c, size_t i, h2_t h,
+ size_t slot_size) {
+ SetCtrlInLargeTable(c, i, static_cast<ctrl_t>(h), slot_size);
+}
+
// growth_info (which is a size_t) is stored with the backing array.
constexpr size_t BackingArrayAlignment(size_t align_of_slot) {
return (std::max)(align_of_slot, alignof(GrowthInfo));
@@ -1911,423 +1575,283 @@ inline void* SlotAddress(void* slot_array, size_t slot, size_t slot_size) {
(slot * slot_size));
}
-// Iterates over all full slots and calls `cb(const ctrl_t*, SlotType*)`.
-// No insertion to the table allowed during Callback call.
+// Iterates over all full slots and calls `cb(const ctrl_t*, void*)`.
+// No insertion to the table is allowed during `cb` call.
// Erasure is allowed only for the element passed to the callback.
-template <class SlotType, class Callback>
-ABSL_ATTRIBUTE_ALWAYS_INLINE inline void IterateOverFullSlots(
- const CommonFields& c, SlotType* slot, Callback cb) {
- const size_t cap = c.capacity();
- const ctrl_t* ctrl = c.control();
- if (is_small(cap)) {
- // Mirrored/cloned control bytes in small table are also located in the
- // first group (starting from position 0). We are taking group from position
- // `capacity` in order to avoid duplicates.
-
- // Small tables capacity fits into portable group, where
- // GroupPortableImpl::MaskFull is more efficient for the
- // capacity <= GroupPortableImpl::kWidth.
- ABSL_SWISSTABLE_ASSERT(cap <= GroupPortableImpl::kWidth &&
- "unexpectedly large small capacity");
- static_assert(Group::kWidth >= GroupPortableImpl::kWidth,
- "unexpected group width");
- // Group starts from kSentinel slot, so indices in the mask will
- // be increased by 1.
- const auto mask = GroupPortableImpl(ctrl + cap).MaskFull();
- --ctrl;
- --slot;
- for (uint32_t i : mask) {
- cb(ctrl + i, slot + i);
- }
- return;
- }
- size_t remaining = c.size();
- ABSL_ATTRIBUTE_UNUSED const size_t original_size_for_assert = remaining;
- while (remaining != 0) {
- for (uint32_t i : GroupFullEmptyOrDeleted(ctrl).MaskFull()) {
- ABSL_SWISSTABLE_ASSERT(IsFull(ctrl[i]) &&
- "hash table was modified unexpectedly");
- cb(ctrl + i, slot + i);
- --remaining;
- }
- ctrl += Group::kWidth;
- slot += Group::kWidth;
- ABSL_SWISSTABLE_ASSERT(
- (remaining == 0 || *(ctrl - 1) != ctrl_t::kSentinel) &&
- "hash table was modified unexpectedly");
- }
- // NOTE: erasure of the current element is allowed in callback for
- // absl::erase_if specialization. So we use `>=`.
- ABSL_SWISSTABLE_ASSERT(original_size_for_assert >= c.size() &&
- "hash table was modified unexpectedly");
-}
+// The table must not be in SOO mode.
+void IterateOverFullSlots(const CommonFields& c, size_t slot_size,
+ absl::FunctionRef<void(const ctrl_t*, void*)> cb);
template <typename CharAlloc>
-constexpr bool ShouldSampleHashtablezInfo() {
+constexpr bool ShouldSampleHashtablezInfoForAlloc() {
// Folks with custom allocators often make unwarranted assumptions about the
// behavior of their classes vis-a-vis trivial destructability and what
// calls they will or won't make. Avoid sampling for people with custom
// allocators to get us out of this mess. This is not a hard guarantee but
// a workaround while we plan the exact guarantee we want to provide.
- return std::is_same<CharAlloc, std::allocator<char>>::value;
+ return std::is_same_v<CharAlloc, std::allocator<char>>;
}
template <bool kSooEnabled>
-HashtablezInfoHandle SampleHashtablezInfo(size_t sizeof_slot, size_t sizeof_key,
- size_t sizeof_value,
- size_t old_capacity, bool was_soo,
- HashtablezInfoHandle forced_infoz,
- CommonFields& c) {
- if (forced_infoz.IsSampled()) return forced_infoz;
+bool ShouldSampleHashtablezInfoOnResize(bool force_sampling,
+ bool is_hashtablez_eligible,
+ size_t old_capacity, CommonFields& c) {
+ if (!is_hashtablez_eligible) return false;
+ // Force sampling is only allowed for SOO tables.
+ ABSL_SWISSTABLE_ASSERT(kSooEnabled || !force_sampling);
+ if (kSooEnabled && force_sampling) {
+ return true;
+ }
// In SOO, we sample on the first insertion so if this is an empty SOO case
// (e.g. when reserve is called), then we still need to sample.
- if (kSooEnabled && was_soo && c.size() == 0) {
- return Sample(sizeof_slot, sizeof_key, sizeof_value, SooCapacity());
+ if (kSooEnabled && old_capacity == SooCapacity() && c.empty()) {
+ return ShouldSampleNextTable();
}
- // For non-SOO cases, we sample whenever the capacity is increasing from zero
- // to non-zero.
if (!kSooEnabled && old_capacity == 0) {
- return Sample(sizeof_slot, sizeof_key, sizeof_value, 0);
+ return ShouldSampleNextTable();
}
- return c.infoz();
+ return false;
}
-// Helper class to perform resize of the hash set.
-//
-// It contains special optimizations for small group resizes.
-// See GrowIntoSingleGroupShuffleControlBytes for details.
-class HashSetResizeHelper {
- public:
- explicit HashSetResizeHelper(CommonFields& c, bool was_soo, bool had_soo_slot,
- HashtablezInfoHandle forced_infoz)
- : old_capacity_(c.capacity()),
- had_infoz_(c.has_infoz()),
- was_soo_(was_soo),
- had_soo_slot_(had_soo_slot),
- forced_infoz_(forced_infoz) {}
-
- // Optimized for small groups version of `find_first_non_full`.
- // Beneficial only right after calling `raw_hash_set::resize`.
- // It is safe to call in case capacity is big or was not changed, but there
- // will be no performance benefit.
- // It has implicit assumption that `resize` will call
- // `GrowSizeIntoSingleGroup*` in case `IsGrowingIntoSingleGroupApplicable`.
- // Falls back to `find_first_non_full` in case of big groups.
- static FindInfo FindFirstNonFullAfterResize(const CommonFields& c,
- size_t old_capacity, size_t hash);
-
- HeapOrSoo& old_heap_or_soo() { return old_heap_or_soo_; }
- void* old_soo_data() { return old_heap_or_soo_.get_soo_data(); }
- ctrl_t* old_ctrl() const {
- ABSL_SWISSTABLE_ASSERT(!was_soo_);
- return old_heap_or_soo_.control();
- }
- void* old_slots() const {
- ABSL_SWISSTABLE_ASSERT(!was_soo_);
- return old_heap_or_soo_.slot_array().get();
- }
- size_t old_capacity() const { return old_capacity_; }
-
- // Returns the index of the SOO slot when growing from SOO to non-SOO in a
- // single group. See also InitControlBytesAfterSoo(). It's important to use
- // index 1 so that when resizing from capacity 1 to 3, we can still have
- // random iteration order between the first two inserted elements.
- // I.e. it allows inserting the second element at either index 0 or 2.
- static size_t SooSlotIndex() { return 1; }
-
- // Allocates a backing array for the hashtable.
- // Reads `capacity` and updates all other fields based on the result of
- // the allocation.
- //
- // It also may do the following actions:
- // 1. initialize control bytes
- // 2. initialize slots
- // 3. deallocate old slots.
- //
- // We are bundling a lot of functionality
- // in one ABSL_ATTRIBUTE_NOINLINE function in order to minimize binary code
- // duplication in raw_hash_set<>::resize.
- //
- // `c.capacity()` must be nonzero.
- // POSTCONDITIONS:
- // 1. CommonFields is initialized.
- //
- // if IsGrowingIntoSingleGroupApplicable && TransferUsesMemcpy
- // Both control bytes and slots are fully initialized.
- // old_slots are deallocated.
- // infoz.RecordRehash is called.
- //
- // if IsGrowingIntoSingleGroupApplicable && !TransferUsesMemcpy
- // Control bytes are fully initialized.
- // infoz.RecordRehash is called.
- // GrowSizeIntoSingleGroup must be called to finish slots initialization.
- //
- // if !IsGrowingIntoSingleGroupApplicable
- // Control bytes are initialized to empty table via ResetCtrl.
- // raw_hash_set<>::resize must insert elements regularly.
- // infoz.RecordRehash is called if old_capacity == 0.
- //
- // Returns IsGrowingIntoSingleGroupApplicable result to avoid recomputation.
- template <typename Alloc, size_t SizeOfSlot, bool TransferUsesMemcpy,
- bool SooEnabled, size_t AlignOfSlot>
- ABSL_ATTRIBUTE_NOINLINE bool InitializeSlots(CommonFields& c, Alloc alloc,
- ctrl_t soo_slot_h2,
- size_t key_size,
- size_t value_size) {
- ABSL_SWISSTABLE_ASSERT(c.capacity());
- HashtablezInfoHandle infoz =
- ShouldSampleHashtablezInfo<Alloc>()
- ? SampleHashtablezInfo<SooEnabled>(SizeOfSlot, key_size, value_size,
- old_capacity_, was_soo_,
- forced_infoz_, c)
- : HashtablezInfoHandle{};
-
- const bool has_infoz = infoz.IsSampled();
- RawHashSetLayout layout(c.capacity(), AlignOfSlot, has_infoz);
- char* mem = static_cast<char*>(Allocate<BackingArrayAlignment(AlignOfSlot)>(
- &alloc, layout.alloc_size(SizeOfSlot)));
- const GenerationType old_generation = c.generation();
- c.set_generation_ptr(
- reinterpret_cast<GenerationType*>(mem + layout.generation_offset()));
- c.set_generation(NextGeneration(old_generation));
- c.set_control(reinterpret_cast<ctrl_t*>(mem + layout.control_offset()));
- c.set_slots(mem + layout.slot_offset());
- ResetGrowthLeft(c);
-
- const bool grow_single_group =
- IsGrowingIntoSingleGroupApplicable(old_capacity_, layout.capacity());
- if (SooEnabled && was_soo_ && grow_single_group) {
- InitControlBytesAfterSoo(c.control(), soo_slot_h2, layout.capacity());
- if (TransferUsesMemcpy && had_soo_slot_) {
- TransferSlotAfterSoo(c, SizeOfSlot);
- }
- // SooEnabled implies that old_capacity_ != 0.
- } else if ((SooEnabled || old_capacity_ != 0) && grow_single_group) {
- if (TransferUsesMemcpy) {
- GrowSizeIntoSingleGroupTransferable(c, SizeOfSlot);
- DeallocateOld<AlignOfSlot>(alloc, SizeOfSlot);
- } else {
- GrowIntoSingleGroupShuffleControlBytes(c.control(), layout.capacity());
- }
- } else {
- ResetCtrl(c, SizeOfSlot);
- }
-
- c.set_has_infoz(has_infoz);
- if (has_infoz) {
- infoz.RecordStorageChanged(c.size(), layout.capacity());
- if ((SooEnabled && was_soo_) || grow_single_group || old_capacity_ == 0) {
- infoz.RecordRehash(0);
- }
- c.set_infoz(infoz);
- }
- return grow_single_group;
- }
-
- // Relocates slots into new single group consistent with
- // GrowIntoSingleGroupShuffleControlBytes.
- //
- // PRECONDITIONS:
- // 1. GrowIntoSingleGroupShuffleControlBytes was already called.
- template <class PolicyTraits, class Alloc>
- void GrowSizeIntoSingleGroup(CommonFields& c, Alloc& alloc_ref) {
- ABSL_SWISSTABLE_ASSERT(old_capacity_ < Group::kWidth / 2);
- ABSL_SWISSTABLE_ASSERT(
- IsGrowingIntoSingleGroupApplicable(old_capacity_, c.capacity()));
- using slot_type = typename PolicyTraits::slot_type;
- ABSL_SWISSTABLE_ASSERT(is_single_group(c.capacity()));
-
- auto* new_slots = static_cast<slot_type*>(c.slot_array()) + 1;
- auto* old_slots_ptr = static_cast<slot_type*>(old_slots());
- auto* old_ctrl_ptr = old_ctrl();
-
- for (size_t i = 0; i < old_capacity_; ++i, ++new_slots) {
- if (IsFull(old_ctrl_ptr[i])) {
- SanitizerUnpoisonMemoryRegion(new_slots, sizeof(slot_type));
- PolicyTraits::transfer(&alloc_ref, new_slots, old_slots_ptr + i);
- }
- }
- PoisonSingleGroupEmptySlots(c, sizeof(slot_type));
- }
-
- // Deallocates old backing array.
- template <size_t AlignOfSlot, class CharAlloc>
- void DeallocateOld(CharAlloc alloc_ref, size_t slot_size) {
- SanitizerUnpoisonMemoryRegion(old_slots(), slot_size * old_capacity_);
- auto layout = RawHashSetLayout(old_capacity_, AlignOfSlot, had_infoz_);
- Deallocate<BackingArrayAlignment(AlignOfSlot)>(
- &alloc_ref, old_ctrl() - layout.control_offset(),
- layout.alloc_size(slot_size));
- }
-
- private:
- // Returns true if `GrowSizeIntoSingleGroup` can be used for resizing.
- static bool IsGrowingIntoSingleGroupApplicable(size_t old_capacity,
- size_t new_capacity) {
- // NOTE that `old_capacity < new_capacity` in order to have
- // `old_capacity < Group::kWidth / 2` to make faster copies of 8 bytes.
- return is_single_group(new_capacity) && old_capacity < new_capacity;
- }
-
- // Relocates control bytes and slots into new single group for
- // transferable objects.
- // Must be called only if IsGrowingIntoSingleGroupApplicable returned true.
- void GrowSizeIntoSingleGroupTransferable(CommonFields& c, size_t slot_size);
-
- // If there was an SOO slot and slots are transferable, transfers the SOO slot
- // into the new heap allocation. Must be called only if
- // IsGrowingIntoSingleGroupApplicable returned true.
- void TransferSlotAfterSoo(CommonFields& c, size_t slot_size);
-
- // Shuffle control bits deterministically to the next capacity.
- // Returns offset for newly added element with given hash.
- //
- // PRECONDITIONs:
- // 1. new_ctrl is allocated for new_capacity,
- // but not initialized.
- // 2. new_capacity is a single group.
- // 3. old_capacity > 0.
- //
- // All elements are transferred into the first `old_capacity + 1` positions
- // of the new_ctrl. Elements are shifted by 1 in order to keep a space at the
- // beginning for the new element.
- // Position of the new added element will be based on `H1` and is not
- // deterministic.
- //
- // Examples:
- // S = kSentinel, E = kEmpty
- //
- // old_ctrl = 0SEEEEEEE...
- // new_ctrl = E0ESE0EEE...
- //
- // old_ctrl = 012S012EEEEEEEEE...
- // new_ctrl = E012EEESE012EEE...
- //
- // old_ctrl = 0123456S0123456EEEEEEEEEEE...
- // new_ctrl = E0123456EEEEEESE0123456EEE...
- void GrowIntoSingleGroupShuffleControlBytes(ctrl_t* new_ctrl,
- size_t new_capacity) const;
-
- // If the table was SOO, initializes new control bytes. `h2` is the control
- // byte corresponding to the full slot. Must be called only if
- // IsGrowingIntoSingleGroupApplicable returned true.
- // Requires: `had_soo_slot_ || h2 == ctrl_t::kEmpty`.
- void InitControlBytesAfterSoo(ctrl_t* new_ctrl, ctrl_t h2,
- size_t new_capacity);
-
- // Shuffle trivially transferable slots in the way consistent with
- // GrowIntoSingleGroupShuffleControlBytes.
- //
- // PRECONDITIONs:
- // 1. old_capacity must be non-zero.
- // 2. new_ctrl is fully initialized using
- // GrowIntoSingleGroupShuffleControlBytes.
- // 3. new_slots is allocated and *not* poisoned.
- //
- // POSTCONDITIONS:
- // 1. new_slots are transferred from old_slots_ consistent with
- // GrowIntoSingleGroupShuffleControlBytes.
- // 2. Empty new_slots are *not* poisoned.
- void GrowIntoSingleGroupShuffleTransferableSlots(void* new_slots,
- size_t slot_size) const;
-
- // Poison empty slots that were transferred using the deterministic algorithm
- // described above.
- // PRECONDITIONs:
- // 1. new_ctrl is fully initialized using
- // GrowIntoSingleGroupShuffleControlBytes.
- // 2. new_slots is fully initialized consistent with
- // GrowIntoSingleGroupShuffleControlBytes.
- void PoisonSingleGroupEmptySlots(CommonFields& c, size_t slot_size) const {
- // poison non full items
- for (size_t i = 0; i < c.capacity(); ++i) {
- if (!IsFull(c.control()[i])) {
- SanitizerPoisonMemoryRegion(SlotAddress(c.slot_array(), i, slot_size),
- slot_size);
- }
- }
- }
-
- HeapOrSoo old_heap_or_soo_;
- size_t old_capacity_;
- bool had_infoz_;
- bool was_soo_;
- bool had_soo_slot_;
- // Either null infoz or a pre-sampled forced infoz for SOO tables.
- HashtablezInfoHandle forced_infoz_;
-};
-
-inline void PrepareInsertCommon(CommonFields& common) {
- common.increment_size();
- common.maybe_increment_generation_on_insert();
+// Allocates `n` bytes for a backing array.
+template <size_t AlignOfBackingArray, typename Alloc>
+ABSL_ATTRIBUTE_NOINLINE void* AllocateBackingArray(void* alloc, size_t n) {
+ return Allocate<AlignOfBackingArray>(static_cast<Alloc*>(alloc), n);
}
-// Like prepare_insert, but for the case of inserting into a full SOO table.
-size_t PrepareInsertAfterSoo(size_t hash, size_t slot_size,
- CommonFields& common);
+template <size_t AlignOfBackingArray, typename Alloc>
+ABSL_ATTRIBUTE_NOINLINE void DeallocateBackingArray(
+ void* alloc, size_t capacity, ctrl_t* ctrl, size_t slot_size,
+ size_t slot_align, bool had_infoz) {
+ RawHashSetLayout layout(capacity, slot_size, slot_align, had_infoz);
+ void* backing_array = ctrl - layout.control_offset();
+ // Unpoison before returning the memory to the allocator.
+ SanitizerUnpoisonMemoryRegion(backing_array, layout.alloc_size());
+ Deallocate<AlignOfBackingArray>(static_cast<Alloc*>(alloc), backing_array,
+ layout.alloc_size());
+}
// PolicyFunctions bundles together some information for a particular
// raw_hash_set<T, ...> instantiation. This information is passed to
// type-erased functions that want to do small amounts of type-specific
// work.
struct PolicyFunctions {
- size_t slot_size;
+ uint32_t key_size;
+ uint32_t value_size;
+ uint32_t slot_size;
+ uint16_t slot_align;
+ bool soo_enabled;
+ bool is_hashtablez_eligible;
// Returns the pointer to the hash function stored in the set.
- const void* (*hash_fn)(const CommonFields& common);
+ void* (*hash_fn)(CommonFields& common);
// Returns the hash of the pointed-to slot.
size_t (*hash_slot)(const void* hash_fn, void* slot);
- // Transfers the contents of src_slot to dst_slot.
- void (*transfer)(void* set, void* dst_slot, void* src_slot);
+ // Transfers the contents of `count` slots from src_slot to dst_slot.
+ // We use ability to transfer several slots in single group table growth.
+ void (*transfer_n)(void* set, void* dst_slot, void* src_slot, size_t count);
- // Deallocates the backing store from common.
- void (*dealloc)(CommonFields& common, const PolicyFunctions& policy);
+ // Returns the pointer to the CharAlloc stored in the set.
+ void* (*get_char_alloc)(CommonFields& common);
+
+ // Allocates n bytes for the backing store for common.
+ void* (*alloc)(void* alloc, size_t n);
- // Resizes set to the new capacity.
- // Arguments are used as in raw_hash_set::resize_impl.
- void (*resize)(CommonFields& common, size_t new_capacity,
- HashtablezInfoHandle forced_infoz);
+ // Deallocates the backing store from common.
+ void (*dealloc)(void* alloc, size_t capacity, ctrl_t* ctrl, size_t slot_size,
+ size_t slot_align, bool had_infoz);
+
+ // Implementation detail of GrowToNextCapacity.
+ // Iterates over all full slots and transfers unprobed elements.
+ // Initializes the new control bytes except mirrored bytes and kSentinel.
+ // Caller must finish the initialization.
+ // All slots corresponding to the full control bytes are transferred.
+ // Probed elements are reported by `encode_probed_element` callback.
+ // encode_probed_element may overwrite old_ctrl buffer till source_offset.
+ // Different encoding is used depending on the capacity of the table.
+ // See ProbedItem*Bytes classes for details.
+ void (*transfer_unprobed_elements_to_next_capacity)(
+ CommonFields& common, const ctrl_t* old_ctrl, void* old_slots,
+ // TODO(b/382423690): Try to use absl::FunctionRef here.
+ void* probed_storage,
+ void (*encode_probed_element)(void* probed_storage, h2_t h2,
+ size_t source_offset, size_t h1));
+
+ uint8_t soo_capacity() const {
+ return static_cast<uint8_t>(soo_enabled ? SooCapacity() : 0);
+ }
};
+// Returns the maximum valid size for a table with 1-byte slots.
+// This function is an utility shared by MaxValidSize and IsAboveValidSize.
+// Template parameter is only used to enable testing.
+template <size_t kSizeOfSizeT = sizeof(size_t)>
+constexpr size_t MaxValidSizeFor1ByteSlot() {
+ if constexpr (kSizeOfSizeT == 8) {
+ return CapacityToGrowth(
+ static_cast<size_t>(uint64_t{1} << HashtableSize::kSizeBitCount) - 1);
+ } else {
+ static_assert(kSizeOfSizeT == 4);
+ return CapacityToGrowth((size_t{1} << (kSizeOfSizeT * 8 - 2)) - 1);
+ }
+}
+
+// Returns the maximum valid size for a table with provided slot size.
+// Template parameter is only used to enable testing.
+template <size_t kSizeOfSizeT = sizeof(size_t)>
+constexpr size_t MaxValidSize(size_t slot_size) {
+ if constexpr (kSizeOfSizeT == 8) {
+ // For small slot sizes we are limited by HashtableSize::kSizeBitCount.
+ if (slot_size < size_t{1} << (64 - HashtableSize::kSizeBitCount)) {
+ return MaxValidSizeFor1ByteSlot<kSizeOfSizeT>();
+ }
+ return (size_t{1} << (kSizeOfSizeT * 8 - 2)) / slot_size;
+ } else {
+ return MaxValidSizeFor1ByteSlot<kSizeOfSizeT>() / slot_size;
+ }
+}
+
+// Returns true if size is larger than the maximum valid size.
+// It is an optimization to avoid the division operation in the common case.
+// Template parameter is only used to enable testing.
+template <size_t kSizeOfSizeT = sizeof(size_t)>
+constexpr bool IsAboveValidSize(size_t size, size_t slot_size) {
+ if constexpr (kSizeOfSizeT == 8) {
+ // For small slot sizes we are limited by HashtableSize::kSizeBitCount.
+ if (ABSL_PREDICT_TRUE(slot_size <
+ (size_t{1} << (64 - HashtableSize::kSizeBitCount)))) {
+ return size > MaxValidSizeFor1ByteSlot<kSizeOfSizeT>();
+ }
+ return size > MaxValidSize<kSizeOfSizeT>(slot_size);
+ } else {
+ return uint64_t{size} * slot_size >
+ MaxValidSizeFor1ByteSlot<kSizeOfSizeT>();
+ }
+}
+
+// Returns the index of the SOO slot when growing from SOO to non-SOO in a
+// single group. See also InitializeSmallControlBytesAfterSoo(). It's important
+// to use index 1 so that when resizing from capacity 1 to 3, we can still have
+// random iteration order between the first two inserted elements.
+// I.e. it allows inserting the second element at either index 0 or 2.
+constexpr size_t SooSlotIndex() { return 1; }
+
+// Maximum capacity for the algorithm for small table after SOO.
+// Note that typical size after SOO is 3, but we allow up to 7.
+// Allowing till 16 would require additional store that can be avoided.
+constexpr size_t MaxSmallAfterSooCapacity() { return 7; }
+
+// Type erased version of raw_hash_set::reserve.
+// Requires: `new_size > policy.soo_capacity`.
+void ReserveTableToFitNewSize(CommonFields& common,
+ const PolicyFunctions& policy, size_t new_size);
+
+// Resizes empty non-allocated table to the next valid capacity after
+// `bucket_count`. Requires:
+// 1. `c.capacity() == policy.soo_capacity`.
+// 2. `c.empty()`.
+// 3. `new_size > policy.soo_capacity`.
+// The table will be attempted to be sampled.
+void ReserveEmptyNonAllocatedTableToFitBucketCount(
+ CommonFields& common, const PolicyFunctions& policy, size_t bucket_count);
+
+// Type erased version of raw_hash_set::rehash.
+void Rehash(CommonFields& common, const PolicyFunctions& policy, size_t n);
+
+// Type erased version of copy constructor.
+void Copy(CommonFields& common, const PolicyFunctions& policy,
+ const CommonFields& other,
+ absl::FunctionRef<void(void*, const void*)> copy_fn);
+
+// Returns the optimal size for memcpy when transferring SOO slot.
+// Otherwise, returns the optimal size for memcpy SOO slot transfer
+// to SooSlotIndex().
+// At the destination we are allowed to copy upto twice more bytes,
+// because there is at least one more slot after SooSlotIndex().
+// The result must not exceed MaxSooSlotSize().
+// Some of the cases are merged to minimize the number of function
+// instantiations.
+constexpr size_t OptimalMemcpySizeForSooSlotTransfer(
+ size_t slot_size, size_t max_soo_slot_size = MaxSooSlotSize()) {
+ static_assert(MaxSooSlotSize() >= 8, "unexpectedly small SOO slot size");
+ if (slot_size == 1) {
+ return 1;
+ }
+ if (slot_size <= 3) {
+ return 4;
+ }
+ // We are merging 4 and 8 into one case because we expect them to be the
+ // hottest cases. Copying 8 bytes is as fast on common architectures.
+ if (slot_size <= 8) {
+ return 8;
+ }
+ if (max_soo_slot_size <= 16) {
+ return max_soo_slot_size;
+ }
+ if (slot_size <= 16) {
+ return 16;
+ }
+ if (max_soo_slot_size <= 24) {
+ return max_soo_slot_size;
+ }
+ static_assert(MaxSooSlotSize() <= 24, "unexpectedly large SOO slot size");
+ return 24;
+}
+
+// Resizes SOO table to the NextCapacity(SooCapacity()) and prepares insert for
+// the given new_hash. Returns the offset of the new element.
+// `soo_slot_ctrl` is the control byte of the SOO slot.
+// If soo_slot_ctrl is kEmpty
+// 1. The table must be empty.
+// 2. Table will be forced to be sampled.
+// All possible template combinations are defined in cc file to improve
+// compilation time.
+template <size_t SooSlotMemcpySize, bool TransferUsesMemcpy>
+size_t GrowSooTableToNextCapacityAndPrepareInsert(CommonFields& common,
+ const PolicyFunctions& policy,
+ size_t new_hash,
+ ctrl_t soo_slot_ctrl);
+
+// As `ResizeFullSooTableToNextCapacity`, except that we also force the SOO
+// table to be sampled. SOO tables need to switch from SOO to heap in order to
+// store the infoz. No-op if sampling is disabled or not possible.
+void GrowFullSooTableToNextCapacityForceSampling(CommonFields& common,
+ const PolicyFunctions& policy);
+
+// Resizes table with allocated slots and change the table seed.
+// Tables with SOO enabled must have capacity > policy.soo_capacity.
+// No sampling will be performed since table is already allocated.
+void ResizeAllocatedTableWithSeedChange(CommonFields& common,
+ const PolicyFunctions& policy,
+ size_t new_capacity);
+
// ClearBackingArray clears the backing array, either modifying it in place,
// or creating a new one based on the value of "reuse".
// REQUIRES: c.capacity > 0
void ClearBackingArray(CommonFields& c, const PolicyFunctions& policy,
- bool reuse, bool soo_enabled);
+ void* alloc, bool reuse, bool soo_enabled);
// Type-erased version of raw_hash_set::erase_meta_only.
void EraseMetaOnly(CommonFields& c, size_t index, size_t slot_size);
-// Function to place in PolicyFunctions::dealloc for raw_hash_sets
-// that are using std::allocator. This allows us to share the same
-// function body for raw_hash_set instantiations that have the
-// same slot alignment.
-template <size_t AlignOfSlot>
-ABSL_ATTRIBUTE_NOINLINE void DeallocateStandard(CommonFields& common,
- const PolicyFunctions& policy) {
- // Unpoison before returning the memory to the allocator.
- SanitizerUnpoisonMemoryRegion(common.slot_array(),
- policy.slot_size * common.capacity());
-
- std::allocator<char> alloc;
- common.infoz().Unregister();
- Deallocate<BackingArrayAlignment(AlignOfSlot)>(
- &alloc, common.backing_array_start(),
- common.alloc_size(policy.slot_size, AlignOfSlot));
-}
-
// For trivially relocatable types we use memcpy directly. This allows us to
// share the same function body for raw_hash_set instantiations that have the
// same slot size as long as they are relocatable.
+// Separate function for relocating single slot cause significant binary bloat.
template <size_t SizeOfSlot>
-ABSL_ATTRIBUTE_NOINLINE void TransferRelocatable(void*, void* dst, void* src) {
- memcpy(dst, src, SizeOfSlot);
+ABSL_ATTRIBUTE_NOINLINE void TransferNRelocatable(void*, void* dst, void* src,
+ size_t count) {
+ // TODO(b/382423690): Experiment with making specialization for power of 2 and
+ // non power of 2. This would require passing the size of the slot.
+ memcpy(dst, src, SizeOfSlot * count);
}
-// Type erased raw_hash_set::get_hash_ref_fn for the empty hash function case.
-const void* GetHashRefForEmptyHasher(const CommonFields& common);
+// Returns a pointer to `common`. This is used to implement type erased
+// raw_hash_set::get_hash_ref_fn and raw_hash_set::get_alloc_ref_fn for the
+// empty class cases.
+void* GetRefForEmptyClass(CommonFields& common);
// Given the hash of a value not currently in the table and the first empty
// slot in the probe sequence, finds a viable slot index to insert it at.
@@ -2344,8 +1868,8 @@ const void* GetHashRefForEmptyHasher(const CommonFields& common);
// REQUIRES: Table is not SOO.
// REQUIRES: At least one non-full slot available.
// REQUIRES: `target` is a valid empty position to insert.
-size_t PrepareInsertNonSoo(CommonFields& common, size_t hash, FindInfo target,
- const PolicyFunctions& policy);
+size_t PrepareInsertNonSoo(CommonFields& common, const PolicyFunctions& policy,
+ size_t hash, FindInfo target);
// A SwissTable.
//
@@ -2376,9 +1900,6 @@ class raw_hash_set {
public:
using init_type = typename PolicyTraits::init_type;
using key_type = typename PolicyTraits::key_type;
- // TODO(sbenza): Hide slot_type as it is an implementation detail. Needs user
- // code fixes!
- using slot_type = typename PolicyTraits::slot_type;
using allocator_type = Alloc;
using size_type = size_t;
using difference_type = ptrdiff_t;
@@ -2393,6 +1914,7 @@ class raw_hash_set {
using const_pointer = typename absl::allocator_traits<
allocator_type>::template rebind_traits<value_type>::const_pointer;
+ private:
// Alias used for heterogeneous lookup functions.
// `key_arg<K>` evaluates to `K` when the functors are transparent and to
// `key_type` otherwise. It permits template argument deduction on `K` for the
@@ -2400,7 +1922,8 @@ class raw_hash_set {
template <class K>
using key_arg = typename KeyArgImpl::template type<K, key_type>;
- private:
+ using slot_type = typename PolicyTraits::slot_type;
+
// TODO(b/289225379): we could add extra SOO space inside raw_hash_set
// after CommonFields to allow inlining larger slot_types (e.g. std::string),
// but it's a bit complicated if we want to support incomplete mapped_type in
@@ -2650,18 +2173,15 @@ class raw_hash_set {
std::is_nothrow_default_constructible<key_equal>::value &&
std::is_nothrow_default_constructible<allocator_type>::value) {}
- ABSL_ATTRIBUTE_NOINLINE explicit raw_hash_set(
+ explicit raw_hash_set(
size_t bucket_count, const hasher& hash = hasher(),
const key_equal& eq = key_equal(),
const allocator_type& alloc = allocator_type())
: settings_(CommonFields::CreateDefault<SooEnabled()>(), hash, eq,
alloc) {
if (bucket_count > DefaultCapacity()) {
- if (ABSL_PREDICT_FALSE(bucket_count >
- MaxValidCapacity<sizeof(slot_type)>())) {
- HashTableSizeOverflow();
- }
- resize(NormalizeCapacity(bucket_count));
+ ReserveEmptyNonAllocatedTableToFitBucketCount(
+ common(), GetPolicyFunctions(), bucket_count);
}
}
@@ -2762,74 +2282,20 @@ class raw_hash_set {
raw_hash_set(const raw_hash_set& that)
: raw_hash_set(that, AllocTraits::select_on_container_copy_construction(
- that.alloc_ref())) {}
+ allocator_type(that.char_alloc_ref()))) {}
raw_hash_set(const raw_hash_set& that, const allocator_type& a)
- : raw_hash_set(GrowthToLowerboundCapacity(that.size()), that.hash_ref(),
- that.eq_ref(), a) {
+ : raw_hash_set(0, that.hash_ref(), that.eq_ref(), a) {
that.AssertNotDebugCapacity();
- const size_t size = that.size();
- if (size == 0) {
- return;
- }
- // We don't use `that.is_soo()` here because `that` can have non-SOO
- // capacity but have a size that fits into SOO capacity.
- if (fits_in_soo(size)) {
- ABSL_SWISSTABLE_ASSERT(size == 1);
- common().set_full_soo();
- emplace_at(soo_iterator(), *that.begin());
- const HashtablezInfoHandle infoz = try_sample_soo();
- if (infoz.IsSampled()) resize_with_soo_infoz(infoz);
- return;
- }
- ABSL_SWISSTABLE_ASSERT(!that.is_soo());
- const size_t cap = capacity();
- // Note about single group tables:
- // 1. It is correct to have any order of elements.
- // 2. Order has to be non deterministic.
- // 3. We are assigning elements with arbitrary `shift` starting from
- // `capacity + shift` position.
- // 4. `shift` must be coprime with `capacity + 1` in order to be able to use
- // modular arithmetic to traverse all positions, instead if cycling
- // through a subset of positions. Odd numbers are coprime with any
- // `capacity + 1` (2^N).
- size_t offset = cap;
- const size_t shift =
- is_single_group(cap) ? (PerTableSalt(control()) | 1) : 0;
- IterateOverFullSlots(
- that.common(), that.slot_array(),
- [&](const ctrl_t* that_ctrl,
- slot_type* that_slot) ABSL_ATTRIBUTE_ALWAYS_INLINE {
- if (shift == 0) {
- // Big tables case. Position must be searched via probing.
- // The table is guaranteed to be empty, so we can do faster than
- // a full `insert`.
- const size_t hash = PolicyTraits::apply(
- HashElement{hash_ref()}, PolicyTraits::element(that_slot));
- FindInfo target = find_first_non_full_outofline(common(), hash);
- infoz().RecordInsert(hash, target.probe_length);
- offset = target.offset;
- } else {
- // Small tables case. Next position is computed via shift.
- offset = (offset + shift) & cap;
- }
- const h2_t h2 = static_cast<h2_t>(*that_ctrl);
- ABSL_SWISSTABLE_ASSERT( // We rely that hash is not changed for small
- // tables.
- H2(PolicyTraits::apply(HashElement{hash_ref()},
- PolicyTraits::element(that_slot))) == h2 &&
- "hash function value changed unexpectedly during the copy");
- SetCtrl(common(), offset, h2, sizeof(slot_type));
- emplace_at(iterator_at(offset), PolicyTraits::element(that_slot));
- common().maybe_increment_generation_on_insert();
- });
- if (shift != 0) {
- // On small table copy we do not record individual inserts.
- // RecordInsert requires hash, but it is unknown for small tables.
- infoz().RecordStorageChanged(size, cap);
- }
- common().set_size(size);
- growth_info().OverwriteManyEmptyAsFull(size);
+ if (that.empty()) return;
+ Copy(common(), GetPolicyFunctions(), that.common(),
+ [this](void* dst, const void* src) {
+ // TODO(b/413598253): type erase for trivially copyable types via
+ // PolicyTraits.
+ construct(to_slot(dst),
+ PolicyTraits::element(
+ static_cast<slot_type*>(const_cast<void*>(src))));
+ });
}
ABSL_ATTRIBUTE_NOINLINE raw_hash_set(raw_hash_set&& that) noexcept(
@@ -2843,7 +2309,7 @@ class raw_hash_set {
settings_(PolicyTraits::transfer_uses_memcpy() || !that.is_full_soo()
? std::move(that.common())
: CommonFields{full_soo_tag_t{}},
- that.hash_ref(), that.eq_ref(), that.alloc_ref()) {
+ that.hash_ref(), that.eq_ref(), that.char_alloc_ref()) {
if (!PolicyTraits::transfer_uses_memcpy() && that.is_full_soo()) {
transfer(soo_slot(), that.soo_slot());
}
@@ -2854,7 +2320,7 @@ class raw_hash_set {
raw_hash_set(raw_hash_set&& that, const allocator_type& a)
: settings_(CommonFields::CreateDefault<SooEnabled()>(), that.hash_ref(),
that.eq_ref(), a) {
- if (a == that.alloc_ref()) {
+ if (CharAlloc(a) == that.char_alloc_ref()) {
swap_common(that);
annotate_for_bug_detection_on_move(that);
} else {
@@ -2871,7 +2337,9 @@ class raw_hash_set {
// is an exact match for that.size(). If this->capacity() is too big, then
// it would make iteration very slow to reuse the allocation. Maybe we can
// do the same heuristic as clear() and reuse if it's small enough.
- raw_hash_set tmp(that, propagate_alloc ? that.alloc_ref() : alloc_ref());
+ allocator_type alloc(propagate_alloc ? that.char_alloc_ref()
+ : char_alloc_ref());
+ raw_hash_set tmp(that, alloc);
// NOLINTNEXTLINE: not returning *this for performance.
return assign_impl<propagate_alloc>(std::move(tmp));
}
@@ -2890,14 +2358,14 @@ class raw_hash_set {
~raw_hash_set() {
destructor_impl();
-#ifndef NDEBUG
- common().set_capacity(InvalidCapacity::kDestroyed);
-#endif
+ if constexpr (SwisstableAssertAccessToDestroyedTable()) {
+ common().set_capacity(InvalidCapacity::kDestroyed);
+ }
}
iterator begin() ABSL_ATTRIBUTE_LIFETIME_BOUND {
if (ABSL_PREDICT_FALSE(empty())) return end();
- if (is_soo()) return soo_iterator();
+ if (capacity() == 1) return single_iterator();
iterator it = {control(), common().slots_union(),
common().generation_ptr()};
it.skip_empty_or_deleted();
@@ -2933,9 +2401,7 @@ class raw_hash_set {
ABSL_ASSUME(cap >= kDefaultCapacity);
return cap;
}
- size_t max_size() const {
- return CapacityToGrowth(MaxValidCapacity<sizeof(slot_type)>());
- }
+ size_t max_size() const { return MaxValidSize(sizeof(slot_type)); }
ABSL_ATTRIBUTE_REINITIALIZES void clear() {
if (SwisstableGenerationsEnabled() &&
@@ -2958,8 +2424,7 @@ class raw_hash_set {
common().set_empty_soo();
} else {
destroy_slots();
- ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/cap < 128,
- SooEnabled());
+ clear_backing_array(/*reuse=*/cap < 128);
}
common().set_reserved_growth(0);
common().set_reservation_size(0);
@@ -2971,15 +2436,15 @@ class raw_hash_set {
// flat_hash_map<std::string, int> m;
// m.insert(std::make_pair("abc", 42));
template <class T,
- std::enable_if_t<IsDecomposableAndInsertable<T>::value &&
- IsNotBitField<T>::value &&
- !IsLifetimeBoundAssignmentFrom<T>::value,
- int> = 0>
+ int = std::enable_if_t<IsDecomposableAndInsertable<T>::value &&
+ IsNotBitField<T>::value &&
+ !IsLifetimeBoundAssignmentFrom<T>::value,
+ int>()>
std::pair<iterator, bool> insert(T&& value) ABSL_ATTRIBUTE_LIFETIME_BOUND {
return emplace(std::forward<T>(value));
}
- template <class T,
+ template <class T, int&...,
std::enable_if_t<IsDecomposableAndInsertable<T>::value &&
IsNotBitField<T>::value &&
IsLifetimeBoundAssignmentFrom<T>::value,
@@ -2987,7 +2452,7 @@ class raw_hash_set {
std::pair<iterator, bool> insert(
T&& value ABSL_INTERNAL_ATTRIBUTE_CAPTURED_BY(this))
ABSL_ATTRIBUTE_LIFETIME_BOUND {
- return emplace(std::forward<T>(value));
+ return this->template insert<T, 0>(std::forward<T>(value));
}
// This overload kicks in when the argument is a bitfield or an lvalue of
@@ -3001,22 +2466,22 @@ class raw_hash_set {
// const char* p = "hello";
// s.insert(p);
//
- template <class T, std::enable_if_t<
+ template <class T, int = std::enable_if_t<
IsDecomposableAndInsertable<const T&>::value &&
!IsLifetimeBoundAssignmentFrom<const T&>::value,
- int> = 0>
+ int>()>
std::pair<iterator, bool> insert(const T& value)
ABSL_ATTRIBUTE_LIFETIME_BOUND {
return emplace(value);
}
- template <class T,
+ template <class T, int&...,
std::enable_if_t<IsDecomposableAndInsertable<const T&>::value &&
IsLifetimeBoundAssignmentFrom<const T&>::value,
int> = 0>
std::pair<iterator, bool> insert(
const T& value ABSL_INTERNAL_ATTRIBUTE_CAPTURED_BY(this))
ABSL_ATTRIBUTE_LIFETIME_BOUND {
- return emplace(value);
+ return this->template insert<T, 0>(value);
}
// This overload kicks in when the argument is an rvalue of init_type. Its
@@ -3043,21 +2508,22 @@ class raw_hash_set {
#endif
template <class T,
- std::enable_if_t<IsDecomposableAndInsertable<T>::value &&
- IsNotBitField<T>::value &&
- !IsLifetimeBoundAssignmentFrom<T>::value,
- int> = 0>
+ int = std::enable_if_t<IsDecomposableAndInsertable<T>::value &&
+ IsNotBitField<T>::value &&
+ !IsLifetimeBoundAssignmentFrom<T>::value,
+ int>()>
iterator insert(const_iterator, T&& value) ABSL_ATTRIBUTE_LIFETIME_BOUND {
return insert(std::forward<T>(value)).first;
}
- template <class T,
+ template <class T, int&...,
std::enable_if_t<IsDecomposableAndInsertable<T>::value &&
IsNotBitField<T>::value &&
IsLifetimeBoundAssignmentFrom<T>::value,
int> = 0>
- iterator insert(const_iterator, T&& value ABSL_INTERNAL_ATTRIBUTE_CAPTURED_BY(
- this)) ABSL_ATTRIBUTE_LIFETIME_BOUND {
- return insert(std::forward<T>(value)).first;
+ iterator insert(const_iterator hint,
+ T&& value ABSL_INTERNAL_ATTRIBUTE_CAPTURED_BY(this))
+ ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ return this->template insert<T, 0>(hint, std::forward<T>(value));
}
template <class T, std::enable_if_t<
@@ -3198,7 +2664,8 @@ class raw_hash_set {
auto res = find_or_prepare_insert(key);
if (res.second) {
slot_type* slot = res.first.slot();
- std::forward<F>(f)(constructor(&alloc_ref(), &slot));
+ allocator_type alloc(char_alloc_ref());
+ std::forward<F>(f)(constructor(&alloc, &slot));
ABSL_SWISSTABLE_ASSERT(!slot);
}
return res.first;
@@ -3243,7 +2710,7 @@ class raw_hash_set {
iterator erase(const_iterator first,
const_iterator last) ABSL_ATTRIBUTE_LIFETIME_BOUND {
AssertNotDebugCapacity();
- // We check for empty first because ClearBackingArray requires that
+ // We check for empty first because clear_backing_array requires that
// capacity() > 0 as a precondition.
if (empty()) return end();
if (first == last) return last.inner_;
@@ -3254,11 +2721,10 @@ class raw_hash_set {
}
if (first == begin() && last == end()) {
// TODO(ezb): we access control bytes in destroy_slots so it could make
- // sense to combine destroy_slots and ClearBackingArray to avoid cache
+ // sense to combine destroy_slots and clear_backing_array to avoid cache
// misses when the table is large. Note that we also do this in clear().
destroy_slots();
- ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/true,
- SooEnabled());
+ clear_backing_array(/*reuse=*/true);
common().set_reserved_growth(common().reservation_size());
return end();
}
@@ -3303,7 +2769,8 @@ class raw_hash_set {
AssertNotDebugCapacity();
AssertIsFull(position.control(), position.inner_.generation(),
position.inner_.generation_ptr(), "extract()");
- auto node = CommonAccess::Transfer<node_type>(alloc_ref(), position.slot());
+ allocator_type alloc(char_alloc_ref());
+ auto node = CommonAccess::Transfer<node_type>(alloc, position.slot());
if (is_soo()) {
common().set_empty_soo();
} else {
@@ -3329,73 +2796,16 @@ class raw_hash_set {
swap_common(that);
swap(hash_ref(), that.hash_ref());
swap(eq_ref(), that.eq_ref());
- SwapAlloc(alloc_ref(), that.alloc_ref(),
+ SwapAlloc(char_alloc_ref(), that.char_alloc_ref(),
typename AllocTraits::propagate_on_container_swap{});
}
- void rehash(size_t n) {
- const size_t cap = capacity();
- if (n == 0) {
- if (cap == 0 || is_soo()) return;
- if (empty()) {
- ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/false,
- SooEnabled());
- return;
- }
- if (fits_in_soo(size())) {
- // When the table is already sampled, we keep it sampled.
- if (infoz().IsSampled()) {
- const size_t kInitialSampledCapacity = NextCapacity(SooCapacity());
- if (capacity() > kInitialSampledCapacity) {
- resize(kInitialSampledCapacity);
- }
- // This asserts that we didn't lose sampling coverage in `resize`.
- ABSL_SWISSTABLE_ASSERT(infoz().IsSampled());
- return;
- }
- alignas(slot_type) unsigned char slot_space[sizeof(slot_type)];
- slot_type* tmp_slot = to_slot(slot_space);
- transfer(tmp_slot, begin().slot());
- ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/false,
- SooEnabled());
- transfer(soo_slot(), tmp_slot);
- common().set_full_soo();
- return;
- }
- }
-
- // bitor is a faster way of doing `max` here. We will round up to the next
- // power-of-2-minus-1, so bitor is good enough.
- auto m = NormalizeCapacity(n | GrowthToLowerboundCapacity(size()));
- // n == 0 unconditionally rehashes as per the standard.
- if (n == 0 || m > cap) {
- if (ABSL_PREDICT_FALSE(m > MaxValidCapacity<sizeof(slot_type)>())) {
- HashTableSizeOverflow();
- }
- resize(m);
-
- // This is after resize, to ensure that we have completed the allocation
- // and have potentially sampled the hashtable.
- infoz().RecordReservation(n);
- }
- }
+ void rehash(size_t n) { Rehash(common(), GetPolicyFunctions(), n); }
void reserve(size_t n) {
- const size_t max_size_before_growth =
- is_soo() ? SooCapacity() : size() + growth_left();
- if (n > max_size_before_growth) {
- if (ABSL_PREDICT_FALSE(n > max_size())) {
- HashTableSizeOverflow();
- }
- size_t m = GrowthToLowerboundCapacity(n);
- resize(NormalizeCapacity(m));
-
- // This is after resize, to ensure that we have completed the allocation
- // and have potentially sampled the hashtable.
- infoz().RecordReservation(n);
+ if (ABSL_PREDICT_TRUE(n > DefaultCapacity())) {
+ ReserveTableToFitNewSize(common(), GetPolicyFunctions(), n);
}
- common().reset_reserved_growth(n);
- common().set_reservation_size(n);
}
// Extension API: support for heterogeneous keys.
@@ -3424,7 +2834,7 @@ class raw_hash_set {
// Avoid probing if we won't be able to prefetch the addresses received.
#ifdef ABSL_HAVE_PREFETCH
prefetch_heap_block();
- auto seq = probe(common(), hash_ref()(key));
+ auto seq = probe(common(), hash_of(key));
PrefetchToLocalCache(control() + seq.offset());
PrefetchToLocalCache(slot_array() + seq.offset());
#endif // ABSL_HAVE_PREFETCH
@@ -3441,9 +2851,9 @@ class raw_hash_set {
template <class K = key_type>
iterator find(const key_arg<K>& key) ABSL_ATTRIBUTE_LIFETIME_BOUND {
AssertOnFind(key);
- if (is_soo()) return find_soo(key);
+ if (capacity() <= 1) return find_small(key);
prefetch_heap_block();
- return find_non_soo(key, hash_ref()(key));
+ return find_large(key, hash_of(key));
}
template <class K = key_type>
@@ -3493,7 +2903,9 @@ class raw_hash_set {
hasher hash_function() const { return hash_ref(); }
key_equal key_eq() const { return eq_ref(); }
- allocator_type get_allocator() const { return alloc_ref(); }
+ allocator_type get_allocator() const {
+ return allocator_type(char_alloc_ref());
+ }
friend bool operator==(const raw_hash_set& a, const raw_hash_set& b) {
if (a.size() != b.size()) return false;
@@ -3525,7 +2937,7 @@ class raw_hash_set {
H>::type
AbslHashValue(H h, const raw_hash_set& s) {
return H::combine(H::combine_unordered(std::move(h), s.begin(), s.end()),
- s.size());
+ hash_internal::WeaklyMixedInteger{s.size()});
}
friend void swap(raw_hash_set& a,
@@ -3560,7 +2972,7 @@ class raw_hash_set {
struct EqualElement {
template <class K2, class... Args>
bool operator()(const K2& lhs, Args&&...) const {
- return eq(lhs, rhs);
+ ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(eq(lhs, rhs));
}
const K1& rhs;
const key_equal& eq;
@@ -3598,37 +3010,48 @@ class raw_hash_set {
template <typename... Args>
inline void construct(slot_type* slot, Args&&... args) {
common().RunWithReentrancyGuard([&] {
- PolicyTraits::construct(&alloc_ref(), slot, std::forward<Args>(args)...);
+ allocator_type alloc(char_alloc_ref());
+ PolicyTraits::construct(&alloc, slot, std::forward<Args>(args)...);
});
}
inline void destroy(slot_type* slot) {
- common().RunWithReentrancyGuard(
- [&] { PolicyTraits::destroy(&alloc_ref(), slot); });
+ common().RunWithReentrancyGuard([&] {
+ allocator_type alloc(char_alloc_ref());
+ PolicyTraits::destroy(&alloc, slot);
+ });
}
inline void transfer(slot_type* to, slot_type* from) {
- common().RunWithReentrancyGuard(
- [&] { PolicyTraits::transfer(&alloc_ref(), to, from); });
+ common().RunWithReentrancyGuard([&] {
+ allocator_type alloc(char_alloc_ref());
+ PolicyTraits::transfer(&alloc, to, from);
+ });
}
// TODO(b/289225379): consider having a helper class that has the impls for
// SOO functionality.
template <class K = key_type>
- iterator find_soo(const key_arg<K>& key) {
- ABSL_SWISSTABLE_ASSERT(is_soo());
- return empty() || !PolicyTraits::apply(EqualElement<K>{key, eq_ref()},
- PolicyTraits::element(soo_slot()))
+ iterator find_small(const key_arg<K>& key) {
+ ABSL_SWISSTABLE_ASSERT(capacity() <= 1);
+ return empty() || !PolicyTraits::apply(
+ EqualElement<K>{key, eq_ref()},
+ PolicyTraits::element(single_slot()))
? end()
- : soo_iterator();
+ : single_iterator();
}
template <class K = key_type>
- iterator find_non_soo(const key_arg<K>& key, size_t hash) {
+ iterator find_large(const key_arg<K>& key, size_t hash) {
+ ABSL_SWISSTABLE_ASSERT(capacity() > 1);
ABSL_SWISSTABLE_ASSERT(!is_soo());
auto seq = probe(common(), hash);
+ const h2_t h2 = H2(hash);
const ctrl_t* ctrl = control();
while (true) {
+#ifndef ABSL_HAVE_MEMORY_SANITIZER
+ absl::PrefetchToLocalCache(slot_array() + seq.offset());
+#endif
Group g{ctrl + seq.offset()};
- for (uint32_t i : g.Match(H2(hash))) {
+ for (uint32_t i : g.Match(h2)) {
if (ABSL_PREDICT_TRUE(PolicyTraits::apply(
EqualElement<K>{key, eq_ref()},
PolicyTraits::element(slot_array() + seq.offset(i)))))
@@ -3640,36 +3063,49 @@ class raw_hash_set {
}
}
- // Conditionally samples hashtablez for SOO tables. This should be called on
- // insertion into an empty SOO table and in copy construction when the size
- // can fit in SOO capacity.
- inline HashtablezInfoHandle try_sample_soo() {
+ // Returns true if the table needs to be sampled.
+ // This should be called on insertion into an empty SOO table and in copy
+ // construction when the size can fit in SOO capacity.
+ bool should_sample_soo() const {
ABSL_SWISSTABLE_ASSERT(is_soo());
- if (!ShouldSampleHashtablezInfo<CharAlloc>()) return HashtablezInfoHandle{};
- return Sample(sizeof(slot_type), sizeof(key_type), sizeof(value_type),
- SooCapacity());
+ if (!ShouldSampleHashtablezInfoForAlloc<CharAlloc>()) return false;
+ return ABSL_PREDICT_FALSE(ShouldSampleNextTable());
+ }
+
+ void clear_backing_array(bool reuse) {
+ ABSL_SWISSTABLE_ASSERT(capacity() > DefaultCapacity());
+ ClearBackingArray(common(), GetPolicyFunctions(), &char_alloc_ref(), reuse,
+ SooEnabled());
}
- inline void destroy_slots() {
+ void destroy_slots() {
ABSL_SWISSTABLE_ASSERT(!is_soo());
if (PolicyTraits::template destroy_is_trivial<Alloc>()) return;
- IterateOverFullSlots(
- common(), slot_array(),
- [&](const ctrl_t*, slot_type* slot)
- ABSL_ATTRIBUTE_ALWAYS_INLINE { this->destroy(slot); });
+ auto destroy_slot = [&](const ctrl_t*, void* slot) {
+ this->destroy(static_cast<slot_type*>(slot));
+ };
+ if constexpr (SwisstableAssertAccessToDestroyedTable()) {
+ CommonFields common_copy(non_soo_tag_t{}, this->common());
+ common().set_capacity(InvalidCapacity::kDestroyed);
+ IterateOverFullSlots(common_copy, sizeof(slot_type), destroy_slot);
+ common().set_capacity(common_copy.capacity());
+ } else {
+ IterateOverFullSlots(common(), sizeof(slot_type), destroy_slot);
+ }
}
- inline void dealloc() {
- ABSL_SWISSTABLE_ASSERT(capacity() != 0);
+ void dealloc() {
+ ABSL_SWISSTABLE_ASSERT(capacity() > DefaultCapacity());
// Unpoison before returning the memory to the allocator.
SanitizerUnpoisonMemoryRegion(slot_array(), sizeof(slot_type) * capacity());
infoz().Unregister();
- Deallocate<BackingArrayAlignment(alignof(slot_type))>(
- &alloc_ref(), common().backing_array_start(),
- common().alloc_size(sizeof(slot_type), alignof(slot_type)));
+ DeallocateBackingArray<BackingArrayAlignment(alignof(slot_type)),
+ CharAlloc>(&char_alloc_ref(), capacity(), control(),
+ sizeof(slot_type), alignof(slot_type),
+ common().has_infoz());
}
- inline void destructor_impl() {
+ void destructor_impl() {
if (SwisstableGenerationsEnabled() &&
capacity() >= InvalidCapacity::kMovedFrom) {
return;
@@ -3695,128 +3131,21 @@ class raw_hash_set {
sizeof(slot_type));
}
+ template <class K>
+ size_t hash_of(const K& key) const {
+ return hash_ref()(key);
+ }
size_t hash_of(slot_type* slot) const {
return PolicyTraits::apply(HashElement{hash_ref()},
PolicyTraits::element(slot));
}
- // Resizes table to the new capacity and move all elements to the new
- // positions accordingly.
- //
- // Note that for better performance instead of
- // find_first_non_full(common(), hash),
- // HashSetResizeHelper::FindFirstNonFullAfterResize(
- // common(), old_capacity, hash)
- // can be called right after `resize`.
- void resize(size_t new_capacity) {
- raw_hash_set::resize_impl(common(), new_capacity, HashtablezInfoHandle{});
- }
-
- // As above, except that we also accept a pre-sampled, forced infoz for
- // SOO tables, since they need to switch from SOO to heap in order to
- // store the infoz.
- void resize_with_soo_infoz(HashtablezInfoHandle forced_infoz) {
- ABSL_SWISSTABLE_ASSERT(forced_infoz.IsSampled());
- raw_hash_set::resize_impl(common(), NextCapacity(SooCapacity()),
- forced_infoz);
- }
-
- // Resizes set to the new capacity.
- // It is a static function in order to use its pointer in GetPolicyFunctions.
- ABSL_ATTRIBUTE_NOINLINE static void resize_impl(
- CommonFields& common, size_t new_capacity,
- HashtablezInfoHandle forced_infoz) {
- raw_hash_set* set = reinterpret_cast<raw_hash_set*>(&common);
- ABSL_SWISSTABLE_ASSERT(IsValidCapacity(new_capacity));
- ABSL_SWISSTABLE_ASSERT(!set->fits_in_soo(new_capacity));
- const bool was_soo = set->is_soo();
- const bool had_soo_slot = was_soo && !set->empty();
- const ctrl_t soo_slot_h2 =
- had_soo_slot ? static_cast<ctrl_t>(H2(set->hash_of(set->soo_slot())))
- : ctrl_t::kEmpty;
- HashSetResizeHelper resize_helper(common, was_soo, had_soo_slot,
- forced_infoz);
- // Initialize HashSetResizeHelper::old_heap_or_soo_. We can't do this in
- // HashSetResizeHelper constructor because it can't transfer slots when
- // transfer_uses_memcpy is false.
- // TODO(b/289225379): try to handle more of the SOO cases inside
- // InitializeSlots. See comment on cl/555990034 snapshot #63.
- if (PolicyTraits::transfer_uses_memcpy() || !had_soo_slot) {
- resize_helper.old_heap_or_soo() = common.heap_or_soo();
- } else {
- set->transfer(set->to_slot(resize_helper.old_soo_data()),
- set->soo_slot());
- }
- common.set_capacity(new_capacity);
- // Note that `InitializeSlots` does different number initialization steps
- // depending on the values of `transfer_uses_memcpy` and capacities.
- // Refer to the comment in `InitializeSlots` for more details.
- const bool grow_single_group =
- resize_helper.InitializeSlots<CharAlloc, sizeof(slot_type),
- PolicyTraits::transfer_uses_memcpy(),
- SooEnabled(), alignof(slot_type)>(
- common, CharAlloc(set->alloc_ref()), soo_slot_h2, sizeof(key_type),
- sizeof(value_type));
-
- // In the SooEnabled() case, capacity is never 0 so we don't check.
- if (!SooEnabled() && resize_helper.old_capacity() == 0) {
- // InitializeSlots did all the work including infoz().RecordRehash().
- return;
- }
- ABSL_SWISSTABLE_ASSERT(resize_helper.old_capacity() > 0);
- // Nothing more to do in this case.
- if (was_soo && !had_soo_slot) return;
-
- slot_type* new_slots = set->slot_array();
- if (grow_single_group) {
- if (PolicyTraits::transfer_uses_memcpy()) {
- // InitializeSlots did all the work.
- return;
- }
- if (was_soo) {
- set->transfer(new_slots + resize_helper.SooSlotIndex(),
- to_slot(resize_helper.old_soo_data()));
- return;
- } else {
- // We want GrowSizeIntoSingleGroup to be called here in order to make
- // InitializeSlots not depend on PolicyTraits.
- resize_helper.GrowSizeIntoSingleGroup<PolicyTraits>(common,
- set->alloc_ref());
- }
- } else {
- // InitializeSlots prepares control bytes to correspond to empty table.
- const auto insert_slot = [&](slot_type* slot) {
- size_t hash = PolicyTraits::apply(HashElement{set->hash_ref()},
- PolicyTraits::element(slot));
- auto target = find_first_non_full(common, hash);
- SetCtrl(common, target.offset, H2(hash), sizeof(slot_type));
- set->transfer(new_slots + target.offset, slot);
- return target.probe_length;
- };
- if (was_soo) {
- insert_slot(to_slot(resize_helper.old_soo_data()));
- return;
- } else {
- auto* old_slots = static_cast<slot_type*>(resize_helper.old_slots());
- size_t total_probe_length = 0;
- for (size_t i = 0; i != resize_helper.old_capacity(); ++i) {
- if (IsFull(resize_helper.old_ctrl()[i])) {
- total_probe_length += insert_slot(old_slots + i);
- }
- }
- common.infoz().RecordRehash(total_probe_length);
- }
- }
- resize_helper.DeallocateOld<alignof(slot_type)>(CharAlloc(set->alloc_ref()),
- sizeof(slot_type));
- }
-
// Casting directly from e.g. char* to slot_type* can cause compilation errors
// on objective-C. This function converts to void* first, avoiding the issue.
static slot_type* to_slot(void* buf) { return static_cast<slot_type*>(buf); }
// Requires that lhs does not have a full SOO slot.
- static void move_common(bool rhs_is_full_soo, allocator_type& rhs_alloc,
+ static void move_common(bool rhs_is_full_soo, CharAlloc& rhs_alloc,
CommonFields& lhs, CommonFields&& rhs) {
if (PolicyTraits::transfer_uses_memcpy() || !rhs_is_full_soo) {
lhs = std::move(rhs);
@@ -3841,10 +3170,12 @@ class raw_hash_set {
}
CommonFields tmp = CommonFields(uninitialized_tag_t{});
const bool that_is_full_soo = that.is_full_soo();
- move_common(that_is_full_soo, that.alloc_ref(), tmp,
+ move_common(that_is_full_soo, that.char_alloc_ref(), tmp,
std::move(that.common()));
- move_common(is_full_soo(), alloc_ref(), that.common(), std::move(common()));
- move_common(that_is_full_soo, that.alloc_ref(), common(), std::move(tmp));
+ move_common(is_full_soo(), char_alloc_ref(), that.common(),
+ std::move(common()));
+ move_common(that_is_full_soo, that.char_alloc_ref(), common(),
+ std::move(tmp));
}
void annotate_for_bug_detection_on_move(
@@ -3862,7 +3193,8 @@ class raw_hash_set {
}
common().increment_generation();
if (!empty() && common().should_rehash_for_bug_detection_on_move()) {
- resize(capacity());
+ ResizeAllocatedTableWithSeedChange(common(), GetPolicyFunctions(),
+ capacity());
}
}
@@ -3871,11 +3203,11 @@ class raw_hash_set {
// We don't bother checking for this/that aliasing. We just need to avoid
// breaking the invariants in that case.
destructor_impl();
- move_common(that.is_full_soo(), that.alloc_ref(), common(),
+ move_common(that.is_full_soo(), that.char_alloc_ref(), common(),
std::move(that.common()));
hash_ref() = that.hash_ref();
eq_ref() = that.eq_ref();
- CopyAlloc(alloc_ref(), that.alloc_ref(),
+ CopyAlloc(char_alloc_ref(), that.char_alloc_ref(),
std::integral_constant<bool, propagate_alloc>());
that.common() = CommonFields::CreateDefault<SooEnabled()>();
annotate_for_bug_detection_on_move(that);
@@ -3902,7 +3234,7 @@ class raw_hash_set {
}
raw_hash_set& move_assign(raw_hash_set&& that,
std::false_type /*propagate_alloc*/) {
- if (alloc_ref() == that.alloc_ref()) {
+ if (char_alloc_ref() == that.char_alloc_ref()) {
return assign_impl<false>(std::move(that));
}
// Aliasing can't happen here because allocs would compare equal above.
@@ -3918,22 +3250,25 @@ class raw_hash_set {
template <class K>
std::pair<iterator, bool> find_or_prepare_insert_soo(const K& key) {
+ ctrl_t soo_slot_ctrl;
if (empty()) {
- const HashtablezInfoHandle infoz = try_sample_soo();
- if (infoz.IsSampled()) {
- resize_with_soo_infoz(infoz);
- } else {
+ if (!should_sample_soo()) {
common().set_full_soo();
return {soo_iterator(), true};
}
+ soo_slot_ctrl = ctrl_t::kEmpty;
} else if (PolicyTraits::apply(EqualElement<K>{key, eq_ref()},
PolicyTraits::element(soo_slot()))) {
return {soo_iterator(), false};
} else {
- resize(NextCapacity(SooCapacity()));
- }
- const size_t index =
- PrepareInsertAfterSoo(hash_ref()(key), sizeof(slot_type), common());
+ soo_slot_ctrl = static_cast<ctrl_t>(H2(hash_of(soo_slot())));
+ }
+ constexpr bool kUseMemcpy =
+ PolicyTraits::transfer_uses_memcpy() && SooEnabled();
+ size_t index = GrowSooTableToNextCapacityAndPrepareInsert<
+ kUseMemcpy ? OptimalMemcpySizeForSooSlotTransfer(sizeof(slot_type)) : 0,
+ kUseMemcpy>(common(), GetPolicyFunctions(), hash_of(key),
+ soo_slot_ctrl);
return {iterator_at(index), true};
}
@@ -3941,12 +3276,16 @@ class raw_hash_set {
std::pair<iterator, bool> find_or_prepare_insert_non_soo(const K& key) {
ABSL_SWISSTABLE_ASSERT(!is_soo());
prefetch_heap_block();
- auto hash = hash_ref()(key);
+ const size_t hash = hash_of(key);
auto seq = probe(common(), hash);
+ const h2_t h2 = H2(hash);
const ctrl_t* ctrl = control();
while (true) {
+#ifndef ABSL_HAVE_MEMORY_SANITIZER
+ absl::PrefetchToLocalCache(slot_array() + seq.offset());
+#endif
Group g{ctrl + seq.offset()};
- for (uint32_t i : g.Match(H2(hash))) {
+ for (uint32_t i : g.Match(h2)) {
if (ABSL_PREDICT_TRUE(PolicyTraits::apply(
EqualElement<K>{key, eq_ref()},
PolicyTraits::element(slot_array() + seq.offset(i)))))
@@ -3954,11 +3293,10 @@ class raw_hash_set {
}
auto mask_empty = g.MaskEmpty();
if (ABSL_PREDICT_TRUE(mask_empty)) {
- size_t target = seq.offset(
- GetInsertionOffset(mask_empty, capacity(), hash, control()));
- return {iterator_at(PrepareInsertNonSoo(common(), hash,
- FindInfo{target, seq.index()},
- GetPolicyFunctions())),
+ size_t target = seq.offset(mask_empty.LowestBitSet());
+ return {iterator_at(PrepareInsertNonSoo(common(), GetPolicyFunctions(),
+ hash,
+ FindInfo{target, seq.index()})),
true};
}
seq.next();
@@ -3976,6 +3314,11 @@ class raw_hash_set {
// Asserts that the capacity is not a sentinel invalid value.
void AssertNotDebugCapacity() const {
+#ifdef NDEBUG
+ if (!SwisstableGenerationsEnabled()) {
+ return;
+ }
+#endif
if (ABSL_PREDICT_TRUE(capacity() <
InvalidCapacity::kAboveMaxValidCapacity)) {
return;
@@ -3983,8 +3326,11 @@ class raw_hash_set {
assert(capacity() != InvalidCapacity::kReentrance &&
"Reentrant container access during element construction/destruction "
"is not allowed.");
- assert(capacity() != InvalidCapacity::kDestroyed &&
- "Use of destroyed hash table.");
+ if constexpr (SwisstableAssertAccessToDestroyedTable()) {
+ if (capacity() == InvalidCapacity::kDestroyed) {
+ ABSL_RAW_LOG(FATAL, "Use of destroyed hash table.");
+ }
+ }
if (SwisstableGenerationsEnabled() &&
ABSL_PREDICT_FALSE(capacity() >= InvalidCapacity::kMovedFrom)) {
if (capacity() == InvalidCapacity::kSelfMovedFrom) {
@@ -4015,9 +3361,10 @@ class raw_hash_set {
}
if (empty()) return;
- const size_t hash_of_arg = hash_ref()(key);
- const auto assert_consistent = [&](const ctrl_t*, slot_type* slot) {
- const value_type& element = PolicyTraits::element(slot);
+ const size_t hash_of_arg = hash_of(key);
+ const auto assert_consistent = [&](const ctrl_t*, void* slot) {
+ const value_type& element =
+ PolicyTraits::element(static_cast<slot_type*>(slot));
const bool is_key_equal =
PolicyTraits::apply(EqualElement<K>{key, eq_ref()}, element);
if (!is_key_equal) return;
@@ -4037,7 +3384,7 @@ class raw_hash_set {
}
// We only do validation for small tables so that it's constant time.
if (capacity() > 16) return;
- IterateOverFullSlots(common(), slot_array(), assert_consistent);
+ IterateOverFullSlots(common(), sizeof(slot_type), assert_consistent);
}
// Attempts to find `key` in the table; if it isn't found, returns an iterator
@@ -4062,7 +3409,10 @@ class raw_hash_set {
void emplace_at(iterator iter, Args&&... args) {
construct(iter.slot(), std::forward<Args>(args)...);
- assert(PolicyTraits::apply(FindElement{*this}, *iter) == iter &&
+ // When capacity is 1, find calls find_small and if size is 0, then it will
+ // return an end iterator. This can happen in the raw_hash_set copy ctor.
+ assert((capacity() == 1 ||
+ PolicyTraits::apply(FindElement{*this}, *iter) == iter) &&
"constructed value does not match the lookup key");
}
@@ -4125,10 +3475,12 @@ class raw_hash_set {
}
slot_type* soo_slot() {
ABSL_SWISSTABLE_ASSERT(is_soo());
- return static_cast<slot_type*>(common().soo_data());
+ ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(
+ static_cast<slot_type*>(common().soo_data()));
}
const slot_type* soo_slot() const {
- return const_cast<raw_hash_set*>(this)->soo_slot();
+ ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(
+ const_cast<raw_hash_set*>(this)->soo_slot());
}
iterator soo_iterator() {
return {SooControl(), soo_slot(), common().generation_ptr()};
@@ -4136,6 +3488,20 @@ class raw_hash_set {
const_iterator soo_iterator() const {
return const_cast<raw_hash_set*>(this)->soo_iterator();
}
+ slot_type* single_slot() {
+ ABSL_SWISSTABLE_ASSERT(capacity() <= 1);
+ return SooEnabled() ? soo_slot() : slot_array();
+ }
+ const slot_type* single_slot() const {
+ return const_cast<raw_hash_set*>(this)->single_slot();
+ }
+ iterator single_iterator() {
+ return {SooEnabled() ? SooControl() : control(), single_slot(),
+ common().generation_ptr()};
+ }
+ const_iterator single_iterator() const {
+ return const_cast<raw_hash_set*>(this)->single_iterator();
+ }
HashtablezInfoHandle infoz() {
ABSL_SWISSTABLE_ASSERT(!is_soo());
return common().infoz();
@@ -4145,49 +3511,118 @@ class raw_hash_set {
const hasher& hash_ref() const { return settings_.template get<1>(); }
key_equal& eq_ref() { return settings_.template get<2>(); }
const key_equal& eq_ref() const { return settings_.template get<2>(); }
- allocator_type& alloc_ref() { return settings_.template get<3>(); }
- const allocator_type& alloc_ref() const {
+ CharAlloc& char_alloc_ref() { return settings_.template get<3>(); }
+ const CharAlloc& char_alloc_ref() const {
return settings_.template get<3>();
}
- static const void* get_hash_ref_fn(const CommonFields& common) {
- auto* h = reinterpret_cast<const raw_hash_set*>(&common);
- return &h->hash_ref();
+ static void* get_char_alloc_ref_fn(CommonFields& common) {
+ auto* h = reinterpret_cast<raw_hash_set*>(&common);
+ return &h->char_alloc_ref();
}
- static void transfer_slot_fn(void* set, void* dst, void* src) {
+ static void* get_hash_ref_fn(CommonFields& common) {
+ auto* h = reinterpret_cast<raw_hash_set*>(&common);
+ // TODO(b/397453582): Remove support for const hasher.
+ return const_cast<std::remove_const_t<hasher>*>(&h->hash_ref());
+ }
+ static void transfer_n_slots_fn(void* set, void* dst, void* src,
+ size_t count) {
+ auto* src_slot = to_slot(src);
+ auto* dst_slot = to_slot(dst);
+
auto* h = static_cast<raw_hash_set*>(set);
- h->transfer(static_cast<slot_type*>(dst), static_cast<slot_type*>(src));
+ for (; count > 0; --count, ++src_slot, ++dst_slot) {
+ h->transfer(dst_slot, src_slot);
+ }
}
- // Note: dealloc_fn will only be used if we have a non-standard allocator.
- static void dealloc_fn(CommonFields& common, const PolicyFunctions&) {
- auto* set = reinterpret_cast<raw_hash_set*>(&common);
- // Unpoison before returning the memory to the allocator.
- SanitizerUnpoisonMemoryRegion(common.slot_array(),
- sizeof(slot_type) * common.capacity());
+ // TODO(b/382423690): Try to type erase entire function or at least type erase
+ // by GetKey + Hash for memcpyable types.
+ // TODO(b/382423690): Try to type erase for big slots: sizeof(slot_type) > 16.
+ static void transfer_unprobed_elements_to_next_capacity_fn(
+ CommonFields& common, const ctrl_t* old_ctrl, void* old_slots,
+ void* probed_storage,
+ void (*encode_probed_element)(void* probed_storage, h2_t h2,
+ size_t source_offset, size_t h1)) {
+ const size_t new_capacity = common.capacity();
+ const size_t old_capacity = PreviousCapacity(new_capacity);
+ ABSL_ASSUME(old_capacity + 1 >= Group::kWidth);
+ ABSL_ASSUME((old_capacity + 1) % Group::kWidth == 0);
+
+ auto* set = reinterpret_cast<raw_hash_set*>(&common);
+ slot_type* old_slots_ptr = to_slot(old_slots);
+ ctrl_t* new_ctrl = common.control();
+ slot_type* new_slots = set->slot_array();
- common.infoz().Unregister();
- Deallocate<BackingArrayAlignment(alignof(slot_type))>(
- &set->alloc_ref(), common.backing_array_start(),
- common.alloc_size(sizeof(slot_type), alignof(slot_type)));
+ const PerTableSeed seed = common.seed();
+
+ for (size_t group_index = 0; group_index < old_capacity;
+ group_index += Group::kWidth) {
+ GroupFullEmptyOrDeleted old_g(old_ctrl + group_index);
+ std::memset(new_ctrl + group_index, static_cast<int8_t>(ctrl_t::kEmpty),
+ Group::kWidth);
+ std::memset(new_ctrl + group_index + old_capacity + 1,
+ static_cast<int8_t>(ctrl_t::kEmpty), Group::kWidth);
+ // TODO(b/382423690): try to type erase everything outside of the loop.
+ // We will share a lot of code in expense of one function call per group.
+ for (auto in_fixed_group_index : old_g.MaskFull()) {
+ size_t old_index = group_index + in_fixed_group_index;
+ slot_type* old_slot = old_slots_ptr + old_index;
+ // TODO(b/382423690): try to avoid entire hash calculation since we need
+ // only one new bit of h1.
+ size_t hash = set->hash_of(old_slot);
+ size_t h1 = H1(hash, seed);
+ h2_t h2 = H2(hash);
+ size_t new_index = TryFindNewIndexWithoutProbing(
+ h1, old_index, old_capacity, new_ctrl, new_capacity);
+ // Note that encode_probed_element is allowed to use old_ctrl buffer
+ // till and included the old_index.
+ if (ABSL_PREDICT_FALSE(new_index == kProbedElementIndexSentinel)) {
+ encode_probed_element(probed_storage, h2, old_index, h1);
+ continue;
+ }
+ ABSL_SWISSTABLE_ASSERT((new_index & old_capacity) <= old_index);
+ ABSL_SWISSTABLE_ASSERT(IsEmpty(new_ctrl[new_index]));
+ new_ctrl[new_index] = static_cast<ctrl_t>(h2);
+ auto* new_slot = new_slots + new_index;
+ SanitizerUnpoisonMemoryRegion(new_slot, sizeof(slot_type));
+ set->transfer(new_slot, old_slot);
+ SanitizerPoisonMemoryRegion(old_slot, sizeof(slot_type));
+ }
+ }
}
static const PolicyFunctions& GetPolicyFunctions() {
+ static_assert(sizeof(slot_type) <= (std::numeric_limits<uint32_t>::max)(),
+ "Slot size is too large. Use std::unique_ptr for value type "
+ "or use absl::node_hash_{map,set}.");
+ static_assert(alignof(slot_type) <=
+ size_t{(std::numeric_limits<uint16_t>::max)()});
+ static_assert(sizeof(key_type) <=
+ size_t{(std::numeric_limits<uint32_t>::max)()});
+ static_assert(sizeof(value_type) <=
+ size_t{(std::numeric_limits<uint32_t>::max)()});
+ static constexpr size_t kBackingArrayAlignment =
+ BackingArrayAlignment(alignof(slot_type));
static constexpr PolicyFunctions value = {
- sizeof(slot_type),
+ static_cast<uint32_t>(sizeof(key_type)),
+ static_cast<uint32_t>(sizeof(value_type)),
+ static_cast<uint32_t>(sizeof(slot_type)),
+ static_cast<uint16_t>(alignof(slot_type)), SooEnabled(),
+ ShouldSampleHashtablezInfoForAlloc<CharAlloc>(),
// TODO(b/328722020): try to type erase
// for standard layout and alignof(Hash) <= alignof(CommonFields).
- std::is_empty<hasher>::value ? &GetHashRefForEmptyHasher
- : &raw_hash_set::get_hash_ref_fn,
+ std::is_empty_v<hasher> ? &GetRefForEmptyClass
+ : &raw_hash_set::get_hash_ref_fn,
PolicyTraits::template get_hash_slot_fn<hasher>(),
PolicyTraits::transfer_uses_memcpy()
- ? TransferRelocatable<sizeof(slot_type)>
- : &raw_hash_set::transfer_slot_fn,
- (std::is_same<SlotAlloc, std::allocator<slot_type>>::value
- ? &DeallocateStandard<alignof(slot_type)>
- : &raw_hash_set::dealloc_fn),
- &raw_hash_set::resize_impl
- };
+ ? TransferNRelocatable<sizeof(slot_type)>
+ : &raw_hash_set::transfer_n_slots_fn,
+ std::is_empty_v<Alloc> ? &GetRefForEmptyClass
+ : &raw_hash_set::get_char_alloc_ref_fn,
+ &AllocateBackingArray<kBackingArrayAlignment, CharAlloc>,
+ &DeallocateBackingArray<kBackingArrayAlignment, CharAlloc>,
+ &raw_hash_set::transfer_unprobed_elements_to_next_capacity_fn};
return value;
}
@@ -4195,9 +3630,9 @@ class raw_hash_set {
// CompressedTuple will ensure that sizeof is not affected by any of the empty
// fields that occur after CommonFields.
absl::container_internal::CompressedTuple<CommonFields, hasher, key_equal,
- allocator_type>
+ CharAlloc>
settings_{CommonFields::CreateDefault<SooEnabled()>(), hasher{},
- key_equal{}, allocator_type{}};
+ key_equal{}, CharAlloc{}};
};
// Friend access for free functions in raw_hash_set.h.
@@ -4220,8 +3655,11 @@ struct HashtableFreeFunctionsAccess {
}
ABSL_ATTRIBUTE_UNUSED const size_t original_size_for_assert = c->size();
size_t num_deleted = 0;
+ using SlotType = typename Set::slot_type;
IterateOverFullSlots(
- c->common(), c->slot_array(), [&](const ctrl_t* ctrl, auto* slot) {
+ c->common(), sizeof(SlotType),
+ [&](const ctrl_t* ctrl, void* slot_void) {
+ auto* slot = static_cast<SlotType*>(slot_void);
if (pred(Set::PolicyTraits::element(slot))) {
c->destroy(slot);
EraseMetaOnly(c->common(), static_cast<size_t>(ctrl - c->control()),
@@ -4246,10 +3684,12 @@ struct HashtableFreeFunctionsAccess {
cb(*c->soo_iterator());
return;
}
+ using SlotType = typename Set::slot_type;
using ElementTypeWithConstness = decltype(*c->begin());
IterateOverFullSlots(
- c->common(), c->slot_array(), [&cb](const ctrl_t*, auto* slot) {
- ElementTypeWithConstness& element = Set::PolicyTraits::element(slot);
+ c->common(), sizeof(SlotType), [&cb](const ctrl_t*, void* slot) {
+ ElementTypeWithConstness& element =
+ Set::PolicyTraits::element(static_cast<SlotType*>(slot));
cb(element);
});
}
@@ -4282,12 +3722,13 @@ struct HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>> {
const typename Set::key_type& key) {
if (set.is_soo()) return 0;
size_t num_probes = 0;
- size_t hash = set.hash_ref()(key);
+ const size_t hash = set.hash_of(key);
auto seq = probe(set.common(), hash);
+ const h2_t h2 = H2(hash);
const ctrl_t* ctrl = set.control();
while (true) {
container_internal::Group g{ctrl + seq.offset()};
- for (uint32_t i : g.Match(container_internal::H2(hash))) {
+ for (uint32_t i : g.Match(h2)) {
if (Traits::apply(
typename Set::template EqualElement<typename Set::key_type>{
key, set.eq_ref()},
@@ -4320,6 +3761,22 @@ struct HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>> {
};
} // namespace hashtable_debug_internal
+
+// Extern template instantiations reduce binary size and linker input size.
+// Function definition is in raw_hash_set.cc.
+extern template size_t GrowSooTableToNextCapacityAndPrepareInsert<0, false>(
+ CommonFields&, const PolicyFunctions&, size_t, ctrl_t);
+extern template size_t GrowSooTableToNextCapacityAndPrepareInsert<1, true>(
+ CommonFields&, const PolicyFunctions&, size_t, ctrl_t);
+extern template size_t GrowSooTableToNextCapacityAndPrepareInsert<4, true>(
+ CommonFields&, const PolicyFunctions&, size_t, ctrl_t);
+extern template size_t GrowSooTableToNextCapacityAndPrepareInsert<8, true>(
+ CommonFields&, const PolicyFunctions&, size_t, ctrl_t);
+#if UINTPTR_MAX == UINT64_MAX
+extern template size_t GrowSooTableToNextCapacityAndPrepareInsert<16, true>(
+ CommonFields&, const PolicyFunctions&, size_t, ctrl_t);
+#endif
+
} // namespace container_internal
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set_resize_impl.h b/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set_resize_impl.h
new file mode 100644
index 00000000000..149d9e825e5
--- /dev/null
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set_resize_impl.h
@@ -0,0 +1,80 @@
+// Copyright 2025 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// This is a private implementation detail of resize algorithm of
+// raw_hash_set. It is exposed in a separate file for testing purposes.
+
+#ifndef ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_RESIZE_IMPL_H_
+#define ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_RESIZE_IMPL_H_
+
+#include <cstddef>
+#include <cstdint>
+
+#include "absl/base/config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+// Encoding for probed elements used for smaller tables.
+// Data is encoded into single integer.
+// Storage format for 4 bytes:
+// - 7 bits for h2
+// - 12 bits for source_offset
+// - 13 bits for h1
+// Storage format for 8 bytes:
+// - 7 bits for h2
+// - 28 bits for source_offset
+// - 29 bits for h1
+// Storage format for 16 bytes:
+// - 7 bits for h2
+// - 57 bits for source_offset
+// - 58 bits for h1
+template <typename IntType, size_t kTotalBits>
+struct ProbedItemImpl {
+ static constexpr IntType kH2Bits = 7;
+
+ static constexpr IntType kMaxOldBits = (kTotalBits - kH2Bits) / 2;
+ static constexpr IntType kMaxOldCapacity = (IntType{1} << kMaxOldBits) - 1;
+
+ // We always have one bit more for h1.
+ static constexpr IntType kMaxNewBits = kMaxOldBits + 1;
+ static constexpr IntType kMaxNewCapacity = (IntType{1} << kMaxNewBits) - 1;
+
+ static constexpr IntType kH2Shift = (kTotalBits - kH2Bits);
+ static_assert(kMaxNewBits + kMaxOldBits + kH2Bits == kTotalBits);
+
+ ProbedItemImpl() = default;
+ ProbedItemImpl(uint8_t h2_arg, size_t source_offset_arg, size_t h1_arg)
+ : h2(h2_arg),
+ source_offset(static_cast<IntType>(source_offset_arg)),
+ h1(static_cast<IntType>(h1_arg)) {}
+
+ IntType h2 : kH2Bits;
+ IntType source_offset : kMaxOldBits;
+ IntType h1 : kMaxNewBits;
+};
+
+using ProbedItem4Bytes = ProbedItemImpl<uint32_t, 32>;
+static_assert(sizeof(ProbedItem4Bytes) == 4);
+using ProbedItem8Bytes = ProbedItemImpl<uint64_t, 64>;
+static_assert(sizeof(ProbedItem8Bytes) == 8);
+using ProbedItem16Bytes = ProbedItemImpl<uint64_t, 7 + 57 + 58>;
+static_assert(sizeof(ProbedItem16Bytes) == 16);
+
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_RESIZE_IMPL_H_
diff --git a/contrib/restricted/abseil-cpp/absl/container/node_hash_map.h b/contrib/restricted/abseil-cpp/absl/container/node_hash_map.h
index 127c8937390..8aed18b2e61 100644
--- a/contrib/restricted/abseil-cpp/absl/container/node_hash_map.h
+++ b/contrib/restricted/abseil-cpp/absl/container/node_hash_map.h
@@ -99,6 +99,11 @@ class NodeHashMapPolicy;
// In most cases `T` needs only to provide the `absl_container_hash`. In this
// case `std::equal_to<void>` will be used instead of `eq` part.
//
+// PERFORMANCE WARNING: Erasure & sparsity can negatively affect performance:
+// * Iteration takes O(capacity) time, not O(size).
+// * erase() slows down begin() and ++iterator.
+// * Capacity only shrinks on rehash() or clear() -- not on erase().
+//
// Example:
//
// // Create a node hash map of three strings (that map to strings)
diff --git a/contrib/restricted/abseil-cpp/absl/container/node_hash_set.h b/contrib/restricted/abseil-cpp/absl/container/node_hash_set.h
index cffa50ec776..6240e2d9154 100644
--- a/contrib/restricted/abseil-cpp/absl/container/node_hash_set.h
+++ b/contrib/restricted/abseil-cpp/absl/container/node_hash_set.h
@@ -97,6 +97,11 @@ struct NodeHashSetPolicy;
// In most cases `T` needs only to provide the `absl_container_hash`. In this
// case `std::equal_to<void>` will be used instead of `eq` part.
//
+// PERFORMANCE WARNING: Erasure & sparsity can negatively affect performance:
+// * Iteration takes O(capacity) time, not O(size).
+// * erase() slows down begin() and ++iterator.
+// * Capacity only shrinks on rehash() or clear() -- not on erase().
+//
// Example:
//
// // Create a node hash set of three strings
diff --git a/contrib/restricted/abseil-cpp/absl/crc/crc32c.cc b/contrib/restricted/abseil-cpp/absl/crc/crc32c.cc
index 468c1b3b3e3..9b1ef7e2efc 100644
--- a/contrib/restricted/abseil-cpp/absl/crc/crc32c.cc
+++ b/contrib/restricted/abseil-cpp/absl/crc/crc32c.cc
@@ -54,10 +54,6 @@ crc32c_t ExtendCrc32cInternal(crc32c_t initial_crc,
} // namespace crc_internal
-crc32c_t ComputeCrc32c(absl::string_view buf) {
- return ExtendCrc32c(crc32c_t{0}, buf);
-}
-
crc32c_t ExtendCrc32cByZeroes(crc32c_t initial_crc, size_t length) {
uint32_t crc = static_cast<uint32_t>(initial_crc) ^ kCRC32Xor;
CrcEngine()->ExtendByZeroes(&crc, length);
diff --git a/contrib/restricted/abseil-cpp/absl/crc/crc32c.h b/contrib/restricted/abseil-cpp/absl/crc/crc32c.h
index 362861e4a6d..5ecc6b35464 100644
--- a/contrib/restricted/abseil-cpp/absl/crc/crc32c.h
+++ b/contrib/restricted/abseil-cpp/absl/crc/crc32c.h
@@ -83,11 +83,6 @@ crc32c_t ExtendCrc32cInternal(crc32c_t initial_crc,
// CRC32C Computation Functions
// -----------------------------------------------------------------------------
-// ComputeCrc32c()
-//
-// Returns the CRC32C value of the provided string.
-crc32c_t ComputeCrc32c(absl::string_view buf);
-
// ExtendCrc32c()
//
// Computes a CRC32C value from an `initial_crc` CRC32C value including the
@@ -112,6 +107,13 @@ inline crc32c_t ExtendCrc32c(crc32c_t initial_crc,
return crc_internal::ExtendCrc32cInternal(initial_crc, buf_to_add);
}
+// ComputeCrc32c()
+//
+// Returns the CRC32C value of the provided string.
+inline crc32c_t ComputeCrc32c(absl::string_view buf) {
+ return ExtendCrc32c(crc32c_t{0}, buf);
+}
+
// ExtendCrc32cByZeroes()
//
// Computes a CRC32C value for a buffer with an `initial_crc` CRC32C value,
diff --git a/contrib/restricted/abseil-cpp/absl/crc/internal/crc32_x86_arm_combined_simd.h b/contrib/restricted/abseil-cpp/absl/crc/internal/crc32_x86_arm_combined_simd.h
index 0f6e3479e67..5a9b61a5faf 100644
--- a/contrib/restricted/abseil-cpp/absl/crc/internal/crc32_x86_arm_combined_simd.h
+++ b/contrib/restricted/abseil-cpp/absl/crc/internal/crc32_x86_arm_combined_simd.h
@@ -99,19 +99,12 @@ V128 V128_PMul10(const V128 l, const V128 r);
// Produces a XOR operation of |l| and |r|.
V128 V128_Xor(const V128 l, const V128 r);
-// Produces an AND operation of |l| and |r|.
-V128 V128_And(const V128 l, const V128 r);
-
// Sets the lower half of a 128 bit register to the given 64-bit value and
// zeroes the upper half.
// dst[63:0] := |r|
// dst[127:64] := |0|
V128 V128_From64WithZeroFill(const uint64_t r);
-// Shift |l| right by |imm| bytes while shifting in zeros.
-template <int imm>
-V128 V128_ShiftRight(const V128 l);
-
// Extracts a 32-bit integer from |l|, selected with |imm|.
template <int imm>
int V128_Extract32(const V128 l);
@@ -170,18 +163,11 @@ inline V128 V128_PMul10(const V128 l, const V128 r) {
inline V128 V128_Xor(const V128 l, const V128 r) { return _mm_xor_si128(l, r); }
-inline V128 V128_And(const V128 l, const V128 r) { return _mm_and_si128(l, r); }
-
inline V128 V128_From64WithZeroFill(const uint64_t r) {
return _mm_set_epi64x(static_cast<int64_t>(0), static_cast<int64_t>(r));
}
template <int imm>
-inline V128 V128_ShiftRight(const V128 l) {
- return _mm_srli_si128(l, imm);
-}
-
-template <int imm>
inline int V128_Extract32(const V128 l) {
return _mm_extract_epi32(l, imm);
}
@@ -261,8 +247,6 @@ inline V128 V128_PMul10(const V128 l, const V128 r) {
inline V128 V128_Xor(const V128 l, const V128 r) { return veorq_u64(l, r); }
-inline V128 V128_And(const V128 l, const V128 r) { return vandq_u64(l, r); }
-
inline V128 V128_From64WithZeroFill(const uint64_t r){
constexpr uint64x2_t kZero = {0, 0};
return vsetq_lane_u64(r, kZero, 0);
@@ -270,12 +254,6 @@ inline V128 V128_From64WithZeroFill(const uint64_t r){
template <int imm>
-inline V128 V128_ShiftRight(const V128 l) {
- return vreinterpretq_u64_s8(
- vextq_s8(vreinterpretq_s8_u64(l), vdupq_n_s8(0), imm));
-}
-
-template <int imm>
inline int V128_Extract32(const V128 l) {
return vgetq_lane_s32(vreinterpretq_s32_u64(l), imm);
}
diff --git a/contrib/restricted/abseil-cpp/absl/crc/internal/crc_x86_arm_combined.cc b/contrib/restricted/abseil-cpp/absl/crc/internal/crc_x86_arm_combined.cc
index 79dace34f44..3194bec468e 100644
--- a/contrib/restricted/abseil-cpp/absl/crc/internal/crc_x86_arm_combined.cc
+++ b/contrib/restricted/abseil-cpp/absl/crc/internal/crc_x86_arm_combined.cc
@@ -64,27 +64,27 @@ class CRC32AcceleratedX86ARMCombined : public CRC32 {
constexpr size_t kSmallCutoff = 256;
constexpr size_t kMediumCutoff = 2048;
-#define ABSL_INTERNAL_STEP1(crc) \
+#define ABSL_INTERNAL_STEP1(crc, p) \
do { \
crc = CRC32_u8(static_cast<uint32_t>(crc), *p++); \
} while (0)
-#define ABSL_INTERNAL_STEP2(crc) \
+#define ABSL_INTERNAL_STEP2(crc, p) \
do { \
crc = \
CRC32_u16(static_cast<uint32_t>(crc), absl::little_endian::Load16(p)); \
p += 2; \
} while (0)
-#define ABSL_INTERNAL_STEP4(crc) \
+#define ABSL_INTERNAL_STEP4(crc, p) \
do { \
crc = \
CRC32_u32(static_cast<uint32_t>(crc), absl::little_endian::Load32(p)); \
p += 4; \
} while (0)
-#define ABSL_INTERNAL_STEP8(crc, data) \
- do { \
- crc = CRC32_u64(static_cast<uint32_t>(crc), \
- absl::little_endian::Load64(data)); \
- data += 8; \
+#define ABSL_INTERNAL_STEP8(crc, p) \
+ do { \
+ crc = \
+ CRC32_u64(static_cast<uint32_t>(crc), absl::little_endian::Load64(p)); \
+ p += 8; \
} while (0)
#define ABSL_INTERNAL_STEP8BY2(crc0, crc1, p0, p1) \
do { \
@@ -221,7 +221,8 @@ class CRC32AcceleratedX86ARMCombinedMultipleStreamsBase
// We are applying it to CRC32C polynomial.
ABSL_ATTRIBUTE_ALWAYS_INLINE void Process64BytesPclmul(
const uint8_t* p, V128* partialCRC) const {
- V128 loopMultiplicands = V128_Load(reinterpret_cast<const V128*>(k1k2));
+ V128 loopMultiplicands =
+ V128_Load(reinterpret_cast<const V128*>(kFoldAcross512Bits));
V128 partialCRC1 = partialCRC[0];
V128 partialCRC2 = partialCRC[1];
@@ -265,53 +266,33 @@ class CRC32AcceleratedX86ARMCombinedMultipleStreamsBase
// Combine 4 vectors of partial crc into a single vector.
V128 reductionMultiplicands =
- V128_Load(reinterpret_cast<const V128*>(k5k6));
+ V128_Load(reinterpret_cast<const V128*>(kFoldAcross256Bits));
V128 low = V128_PMulLow(reductionMultiplicands, partialCRC1);
V128 high = V128_PMulHi(reductionMultiplicands, partialCRC1);
partialCRC1 = V128_Xor(low, high);
- partialCRC1 = V128_Xor(partialCRC1, partialCRC2);
+ partialCRC1 = V128_Xor(partialCRC1, partialCRC3);
- low = V128_PMulLow(reductionMultiplicands, partialCRC3);
- high = V128_PMulHi(reductionMultiplicands, partialCRC3);
+ low = V128_PMulLow(reductionMultiplicands, partialCRC2);
+ high = V128_PMulHi(reductionMultiplicands, partialCRC2);
- partialCRC3 = V128_Xor(low, high);
- partialCRC3 = V128_Xor(partialCRC3, partialCRC4);
+ partialCRC2 = V128_Xor(low, high);
+ partialCRC2 = V128_Xor(partialCRC2, partialCRC4);
- reductionMultiplicands = V128_Load(reinterpret_cast<const V128*>(k3k4));
+ reductionMultiplicands =
+ V128_Load(reinterpret_cast<const V128*>(kFoldAcross128Bits));
low = V128_PMulLow(reductionMultiplicands, partialCRC1);
high = V128_PMulHi(reductionMultiplicands, partialCRC1);
V128 fullCRC = V128_Xor(low, high);
- fullCRC = V128_Xor(fullCRC, partialCRC3);
+ fullCRC = V128_Xor(fullCRC, partialCRC2);
// Reduce fullCRC into scalar value.
- reductionMultiplicands = V128_Load(reinterpret_cast<const V128*>(k5k6));
-
- V128 mask = V128_Load(reinterpret_cast<const V128*>(kMask));
-
- V128 tmp = V128_PMul01(reductionMultiplicands, fullCRC);
- fullCRC = V128_ShiftRight<8>(fullCRC);
- fullCRC = V128_Xor(fullCRC, tmp);
-
- reductionMultiplicands = V128_Load(reinterpret_cast<const V128*>(k7k0));
-
- tmp = V128_ShiftRight<4>(fullCRC);
- fullCRC = V128_And(fullCRC, mask);
- fullCRC = V128_PMulLow(reductionMultiplicands, fullCRC);
- fullCRC = V128_Xor(tmp, fullCRC);
-
- reductionMultiplicands = V128_Load(reinterpret_cast<const V128*>(kPoly));
-
- tmp = V128_And(fullCRC, mask);
- tmp = V128_PMul01(reductionMultiplicands, tmp);
- tmp = V128_And(tmp, mask);
- tmp = V128_PMulLow(reductionMultiplicands, tmp);
-
- fullCRC = V128_Xor(tmp, fullCRC);
-
- return static_cast<uint64_t>(V128_Extract32<1>(fullCRC));
+ uint32_t crc = 0;
+ crc = CRC32_u64(crc, V128_Extract64<0>(fullCRC));
+ crc = CRC32_u64(crc, V128_Extract64<1>(fullCRC));
+ return crc;
}
// Update crc with 64 bytes of data from p.
@@ -325,15 +306,23 @@ class CRC32AcceleratedX86ARMCombinedMultipleStreamsBase
return crc;
}
- // Generated by crc32c_x86_test --crc32c_generate_constants=true
- // and verified against constants in linux kernel for S390:
- // https://github.com/torvalds/linux/blob/master/arch/s390/crypto/crc32le-vx.S
- alignas(16) static constexpr uint64_t k1k2[2] = {0x0740eef02, 0x09e4addf8};
- alignas(16) static constexpr uint64_t k3k4[2] = {0x1384aa63a, 0x0ba4fc28e};
- alignas(16) static constexpr uint64_t k5k6[2] = {0x0f20c0dfe, 0x14cd00bd6};
- alignas(16) static constexpr uint64_t k7k0[2] = {0x0dd45aab8, 0x000000000};
- alignas(16) static constexpr uint64_t kPoly[2] = {0x105ec76f0, 0x0dea713f1};
- alignas(16) static constexpr uint32_t kMask[4] = {~0u, 0u, ~0u, 0u};
+ // Constants generated by './scripts/gen-crc-consts.py x86_pclmul
+ // crc32_lsb_0x82f63b78' from the Linux kernel.
+ alignas(16) static constexpr uint64_t kFoldAcross512Bits[2] = {
+ // (x^543 mod G) * x^32
+ 0x00000000740eef02,
+ // (x^479 mod G) * x^32
+ 0x000000009e4addf8};
+ alignas(16) static constexpr uint64_t kFoldAcross256Bits[2] = {
+ // (x^287 mod G) * x^32
+ 0x000000003da6d0cb,
+ // (x^223 mod G) * x^32
+ 0x00000000ba4fc28e};
+ alignas(16) static constexpr uint64_t kFoldAcross128Bits[2] = {
+ // (x^159 mod G) * x^32
+ 0x00000000f20c0dfe,
+ // (x^95 mod G) * x^32
+ 0x00000000493c7d27};
// Medium runs of bytes are broken into groups of kGroupsSmall blocks of same
// size. Each group is CRCed in parallel then combined at the end of the
@@ -345,24 +334,6 @@ class CRC32AcceleratedX86ARMCombinedMultipleStreamsBase
static constexpr size_t kMaxStreams = 3;
};
-#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
-alignas(16) constexpr uint64_t
- CRC32AcceleratedX86ARMCombinedMultipleStreamsBase::k1k2[2];
-alignas(16) constexpr uint64_t
- CRC32AcceleratedX86ARMCombinedMultipleStreamsBase::k3k4[2];
-alignas(16) constexpr uint64_t
- CRC32AcceleratedX86ARMCombinedMultipleStreamsBase::k5k6[2];
-alignas(16) constexpr uint64_t
- CRC32AcceleratedX86ARMCombinedMultipleStreamsBase::k7k0[2];
-alignas(16) constexpr uint64_t
- CRC32AcceleratedX86ARMCombinedMultipleStreamsBase::kPoly[2];
-alignas(16) constexpr uint32_t
- CRC32AcceleratedX86ARMCombinedMultipleStreamsBase::kMask[4];
-constexpr size_t
- CRC32AcceleratedX86ARMCombinedMultipleStreamsBase::kGroupsSmall;
-constexpr size_t CRC32AcceleratedX86ARMCombinedMultipleStreamsBase::kMaxStreams;
-#endif // ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
-
template <size_t num_crc_streams, size_t num_pclmul_streams,
CutoffStrategy strategy>
class CRC32AcceleratedX86ARMCombinedMultipleStreams
@@ -384,15 +355,15 @@ class CRC32AcceleratedX86ARMCombinedMultipleStreams
length &= ~size_t{8};
}
if (length & 4) {
- ABSL_INTERNAL_STEP4(l);
+ ABSL_INTERNAL_STEP4(l, p);
length &= ~size_t{4};
}
if (length & 2) {
- ABSL_INTERNAL_STEP2(l);
+ ABSL_INTERNAL_STEP2(l, p);
length &= ~size_t{2};
}
if (length & 1) {
- ABSL_INTERNAL_STEP1(l);
+ ABSL_INTERNAL_STEP1(l, p);
length &= ~size_t{1};
}
if (length == 0) {
@@ -478,7 +449,7 @@ class CRC32AcceleratedX86ARMCombinedMultipleStreams
const uint8_t* x = RoundUp<8>(p);
// Process bytes until p is 8-byte aligned, if that isn't past the end.
while (p != x) {
- ABSL_INTERNAL_STEP1(l);
+ ABSL_INTERNAL_STEP1(l, p);
}
size_t bs = static_cast<size_t>(e - p) /
@@ -597,7 +568,7 @@ class CRC32AcceleratedX86ARMCombinedMultipleStreams
}
// Process the last few bytes
while (p != e) {
- ABSL_INTERNAL_STEP1(l);
+ ABSL_INTERNAL_STEP1(l, p);
}
#undef ABSL_INTERNAL_STEP8BY3
diff --git a/contrib/restricted/abseil-cpp/absl/debugging/internal/addresses.h b/contrib/restricted/abseil-cpp/absl/debugging/internal/addresses.h
new file mode 100644
index 00000000000..504fd6f5d1d
--- /dev/null
+++ b/contrib/restricted/abseil-cpp/absl/debugging/internal/addresses.h
@@ -0,0 +1,57 @@
+// Copyright 2025 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_DEBUGGING_INTERNAL_ADDRESSES_H_
+#define ABSL_DEBUGGING_INTERNAL_ADDRESSES_H_
+
+#include <stdint.h>
+
+#include "absl/base/config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace debugging_internal {
+
+// Removes any metadata (tag bits) from the given pointer, converting it into a
+// user-readable address.
+inline uintptr_t StripPointerMetadata(uintptr_t ptr) {
+#if defined(__aarch64__)
+ // When PAC-RET (-mbranch-protection=pac-ret) is enabled, return addresses
+ // stored on the stack will be signed, which means that pointer bits outside
+ // of the virtual address range are potentially set. Since the stacktrace code
+ // is expected to return normal code pointers, this function clears those
+ // bits.
+ register uintptr_t x30 __asm__("x30") = ptr;
+ // The normal instruction for clearing PAC bits is XPACI, but for
+ // compatibility with ARM platforms that do not support pointer
+ // authentication, we use the hint space instruction XPACLRI instead. Hint
+ // space instructions behave as NOPs on unsupported platforms.
+#define ABSL_XPACLRI_HINT "hint #0x7;"
+ asm(ABSL_XPACLRI_HINT : "+r"(x30)); // asm("xpaclri" : "+r"(x30));
+#undef ABSL_XPACLRI_HINT
+ return x30;
+#else
+ return ptr;
+#endif
+}
+
+inline uintptr_t StripPointerMetadata(void* ptr) {
+ return StripPointerMetadata(reinterpret_cast<uintptr_t>(ptr));
+}
+
+} // namespace debugging_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_DEBUGGING_INTERNAL_ADDRESSES_H_
diff --git a/contrib/restricted/abseil-cpp/absl/debugging/internal/decode_rust_punycode.cc b/contrib/restricted/abseil-cpp/absl/debugging/internal/decode_rust_punycode.cc
index 43b46bf940b..6652dc221a3 100644
--- a/contrib/restricted/abseil-cpp/absl/debugging/internal/decode_rust_punycode.cc
+++ b/contrib/restricted/abseil-cpp/absl/debugging/internal/decode_rust_punycode.cc
@@ -172,7 +172,7 @@ bool ScanNextDelta(const char*& punycode_begin, const char* const punycode_end,
} // namespace
-absl::Nullable<char*> DecodeRustPunycode(DecodeRustPunycodeOptions options) {
+char* absl_nullable DecodeRustPunycode(DecodeRustPunycodeOptions options) {
const char* punycode_begin = options.punycode_begin;
const char* const punycode_end = options.punycode_end;
char* const out_begin = options.out_begin;
diff --git a/contrib/restricted/abseil-cpp/absl/debugging/internal/decode_rust_punycode.h b/contrib/restricted/abseil-cpp/absl/debugging/internal/decode_rust_punycode.h
index 0ae53ff31ec..44aad8adb2b 100644
--- a/contrib/restricted/abseil-cpp/absl/debugging/internal/decode_rust_punycode.h
+++ b/contrib/restricted/abseil-cpp/absl/debugging/internal/decode_rust_punycode.h
@@ -23,10 +23,10 @@ ABSL_NAMESPACE_BEGIN
namespace debugging_internal {
struct DecodeRustPunycodeOptions {
- const char* punycode_begin;
- const char* punycode_end;
- char* out_begin;
- char* out_end;
+ const char* absl_nonnull punycode_begin;
+ const char* absl_nonnull punycode_end;
+ char* absl_nonnull out_begin;
+ char* absl_nonnull out_end;
};
// Given Rust Punycode in `punycode_begin .. punycode_end`, writes the
@@ -46,7 +46,7 @@ struct DecodeRustPunycodeOptions {
// DecodeRustPunycode is async-signal-safe with bounded runtime and a small
// stack footprint, making it suitable for use in demangling Rust symbol names
// from a signal handler.
-absl::Nullable<char*> DecodeRustPunycode(DecodeRustPunycodeOptions options);
+char* absl_nullable DecodeRustPunycode(DecodeRustPunycodeOptions options);
} // namespace debugging_internal
ABSL_NAMESPACE_END
diff --git a/contrib/restricted/abseil-cpp/absl/debugging/internal/demangle.cc b/contrib/restricted/abseil-cpp/absl/debugging/internal/demangle.cc
index f7de117b201..5f62ebb8978 100644
--- a/contrib/restricted/abseil-cpp/absl/debugging/internal/demangle.cc
+++ b/contrib/restricted/abseil-cpp/absl/debugging/internal/demangle.cc
@@ -484,36 +484,6 @@ static bool IsAlpha(char c) {
static bool IsDigit(char c) { return c >= '0' && c <= '9'; }
-// Returns true if "str" is a function clone suffix. These suffixes are used
-// by GCC 4.5.x and later versions (and our locally-modified version of GCC
-// 4.4.x) to indicate functions which have been cloned during optimization.
-// We treat any sequence (.<alpha>+.<digit>+)+ as a function clone suffix.
-// Additionally, '_' is allowed along with the alphanumeric sequence.
-static bool IsFunctionCloneSuffix(const char *str) {
- size_t i = 0;
- while (str[i] != '\0') {
- bool parsed = false;
- // Consume a single [.<alpha> | _]*[.<digit>]* sequence.
- if (str[i] == '.' && (IsAlpha(str[i + 1]) || str[i + 1] == '_')) {
- parsed = true;
- i += 2;
- while (IsAlpha(str[i]) || str[i] == '_') {
- ++i;
- }
- }
- if (str[i] == '.' && IsDigit(str[i + 1])) {
- parsed = true;
- i += 2;
- while (IsDigit(str[i])) {
- ++i;
- }
- }
- if (!parsed)
- return false;
- }
- return true; // Consumed everything in "str".
-}
-
static bool EndsWith(State *state, const char chr) {
return state->parse_state.out_cur_idx > 0 &&
state->parse_state.out_cur_idx < state->out_end_idx &&
@@ -1039,7 +1009,8 @@ static bool ParseNumber(State *state, int *number_out) {
number = ~number + 1;
}
if (p != RemainingInput(state)) { // Conversion succeeded.
- state->parse_state.mangled_idx += p - RemainingInput(state);
+ state->parse_state.mangled_idx +=
+ static_cast<int>(p - RemainingInput(state));
UpdateHighWaterMark(state);
if (number_out != nullptr) {
// Note: possibly truncate "number".
@@ -1062,7 +1033,8 @@ static bool ParseFloatNumber(State *state) {
}
}
if (p != RemainingInput(state)) { // Conversion succeeded.
- state->parse_state.mangled_idx += p - RemainingInput(state);
+ state->parse_state.mangled_idx +=
+ static_cast<int>(p - RemainingInput(state));
UpdateHighWaterMark(state);
return true;
}
@@ -1081,7 +1053,8 @@ static bool ParseSeqId(State *state) {
}
}
if (p != RemainingInput(state)) { // Conversion succeeded.
- state->parse_state.mangled_idx += p - RemainingInput(state);
+ state->parse_state.mangled_idx +=
+ static_cast<int>(p - RemainingInput(state));
UpdateHighWaterMark(state);
return true;
}
@@ -1100,7 +1073,7 @@ static bool ParseIdentifier(State *state, size_t length) {
} else {
MaybeAppendWithLength(state, RemainingInput(state), length);
}
- state->parse_state.mangled_idx += length;
+ state->parse_state.mangled_idx += static_cast<int>(length);
UpdateHighWaterMark(state);
return true;
}
@@ -2929,7 +2902,7 @@ static bool ParseTopLevelMangledName(State *state) {
if (ParseMangledName(state)) {
if (RemainingInput(state)[0] != '\0') {
// Drop trailing function clone suffix, if any.
- if (IsFunctionCloneSuffix(RemainingInput(state))) {
+ if (RemainingInput(state)[0] == '.') {
return true;
}
// Append trailing version suffix if any.
diff --git a/contrib/restricted/abseil-cpp/absl/debugging/internal/demangle_rust.cc b/contrib/restricted/abseil-cpp/absl/debugging/internal/demangle_rust.cc
index 4309bd849a1..f7f671341f4 100644
--- a/contrib/restricted/abseil-cpp/absl/debugging/internal/demangle_rust.cc
+++ b/contrib/restricted/abseil-cpp/absl/debugging/internal/demangle_rust.cc
@@ -84,7 +84,7 @@ class RustSymbolParser {
// structure was not recognized or exceeded implementation limits, such as by
// nesting structures too deep. In either case *this should not be used
// again.
- ABSL_MUST_USE_RESULT bool Parse() && {
+ [[nodiscard]] bool Parse() && {
// Recursively parses the grammar production named by callee, then resumes
// execution at the next statement.
//
@@ -564,7 +564,7 @@ class RustSymbolParser {
// If the next input character is the given character, consumes it and returns
// true; otherwise returns false without consuming a character.
- ABSL_MUST_USE_RESULT bool Eat(char want) {
+ [[nodiscard]] bool Eat(char want) {
if (encoding_[pos_] != want) return false;
++pos_;
return true;
@@ -573,7 +573,7 @@ class RustSymbolParser {
// Provided there is enough remaining output space, appends c to the output,
// writing a fresh NUL terminator afterward, and returns true. Returns false
// if the output buffer had less than two bytes free.
- ABSL_MUST_USE_RESULT bool EmitChar(char c) {
+ [[nodiscard]] bool EmitChar(char c) {
if (silence_depth_ > 0) return true;
if (out_end_ - out_ < 2) return false;
*out_++ = c;
@@ -584,7 +584,7 @@ class RustSymbolParser {
// Provided there is enough remaining output space, appends the C string token
// to the output, followed by a NUL character, and returns true. Returns
// false if not everything fit into the output buffer.
- ABSL_MUST_USE_RESULT bool Emit(const char* token) {
+ [[nodiscard]] bool Emit(const char* token) {
if (silence_depth_ > 0) return true;
const size_t token_length = std::strlen(token);
const size_t bytes_to_copy = token_length + 1; // token and final NUL
@@ -598,7 +598,7 @@ class RustSymbolParser {
// of disambiguator (if it's nonnegative) or "?" (if it's negative) to the
// output, followed by a NUL character, and returns true. Returns false if
// not everything fit into the output buffer.
- ABSL_MUST_USE_RESULT bool EmitDisambiguator(int disambiguator) {
+ [[nodiscard]] bool EmitDisambiguator(int disambiguator) {
if (disambiguator < 0) return EmitChar('?'); // parsed but too large
if (disambiguator == 0) return EmitChar('0');
// Convert disambiguator to decimal text. Three digits per byte is enough
@@ -618,7 +618,7 @@ class RustSymbolParser {
// On success returns true and fills value with the encoded value if it was
// not too big, otherwise with -1. If the optional disambiguator was omitted,
// value is 0. On parse failure returns false and sets value to -1.
- ABSL_MUST_USE_RESULT bool ParseDisambiguator(int& value) {
+ [[nodiscard]] bool ParseDisambiguator(int& value) {
value = -1;
// disambiguator = s base-62-number
@@ -639,7 +639,7 @@ class RustSymbolParser {
// On success returns true and fills value with the encoded value if it was
// not too big, otherwise with -1. On parse failure returns false and sets
// value to -1.
- ABSL_MUST_USE_RESULT bool ParseBase62Number(int& value) {
+ [[nodiscard]] bool ParseBase62Number(int& value) {
value = -1;
// base-62-number = (digit | lower | upper)* _
@@ -686,7 +686,7 @@ class RustSymbolParser {
// A nonzero uppercase_namespace specifies the character after the N in a
// nested-identifier, e.g., 'C' for a closure, allowing ParseIdentifier to
// write out the name with the conventional decoration for that namespace.
- ABSL_MUST_USE_RESULT bool ParseIdentifier(char uppercase_namespace = '\0') {
+ [[nodiscard]] bool ParseIdentifier(char uppercase_namespace = '\0') {
// identifier -> disambiguator? undisambiguated-identifier
int disambiguator = 0;
if (!ParseDisambiguator(disambiguator)) return false;
@@ -703,7 +703,7 @@ class RustSymbolParser {
//
// At other appearances of undisambiguated-identifier in the grammar, this
// treatment is not applicable, and the call site omits both arguments.
- ABSL_MUST_USE_RESULT bool ParseUndisambiguatedIdentifier(
+ [[nodiscard]] bool ParseUndisambiguatedIdentifier(
char uppercase_namespace = '\0', int disambiguator = 0) {
// undisambiguated-identifier -> u? decimal-number _? bytes
const bool is_punycoded = Eat('u');
@@ -766,7 +766,7 @@ class RustSymbolParser {
// Consumes a decimal number like 0 or 123 from the input. On success returns
// true and fills value with the encoded value. If the encoded value is too
// large or otherwise unparsable, returns false and sets value to -1.
- ABSL_MUST_USE_RESULT bool ParseDecimalNumber(int& value) {
+ [[nodiscard]] bool ParseDecimalNumber(int& value) {
value = -1;
if (!IsDigit(Peek())) return false;
int encoded_number = Take() - '0';
@@ -788,7 +788,7 @@ class RustSymbolParser {
// Consumes a binder of higher-ranked lifetimes if one is present. On success
// returns true and discards the encoded lifetime count. On parse failure
// returns false.
- ABSL_MUST_USE_RESULT bool ParseOptionalBinder() {
+ [[nodiscard]] bool ParseOptionalBinder() {
// binder -> G base-62-number
if (!Eat('G')) return true;
int ignored_binding_count;
@@ -802,7 +802,7 @@ class RustSymbolParser {
// things we omit from output, such as the entire contents of generic-args.
//
// On parse failure returns false.
- ABSL_MUST_USE_RESULT bool ParseOptionalLifetime() {
+ [[nodiscard]] bool ParseOptionalLifetime() {
// lifetime -> L base-62-number
if (!Eat('L')) return true;
int ignored_de_bruijn_index;
@@ -811,14 +811,14 @@ class RustSymbolParser {
// Consumes a lifetime just like ParseOptionalLifetime, but returns false if
// there is no lifetime here.
- ABSL_MUST_USE_RESULT bool ParseRequiredLifetime() {
+ [[nodiscard]] bool ParseRequiredLifetime() {
if (Peek() != 'L') return false;
return ParseOptionalLifetime();
}
// Pushes ns onto the namespace stack and returns true if the stack is not
// full, else returns false.
- ABSL_MUST_USE_RESULT bool PushNamespace(char ns) {
+ [[nodiscard]] bool PushNamespace(char ns) {
if (namespace_depth_ == kNamespaceStackSize) return false;
namespace_stack_[namespace_depth_++] = ns;
return true;
@@ -830,7 +830,7 @@ class RustSymbolParser {
// Pushes position onto the position stack and returns true if the stack is
// not full, else returns false.
- ABSL_MUST_USE_RESULT bool PushPosition(int position) {
+ [[nodiscard]] bool PushPosition(int position) {
if (position_depth_ == kPositionStackSize) return false;
position_stack_[position_depth_++] = position;
return true;
@@ -845,7 +845,7 @@ class RustSymbolParser {
// beginning of the backref target. Returns true on success. Returns false
// if parsing failed, the stack is exhausted, or the backref target position
// is out of range.
- ABSL_MUST_USE_RESULT bool BeginBackref() {
+ [[nodiscard]] bool BeginBackref() {
// backref = B base-62-number (B already consumed)
//
// Reject backrefs that don't parse, overflow int, or don't point backward.
diff --git a/contrib/restricted/abseil-cpp/absl/debugging/internal/stacktrace_aarch64-inl.inc b/contrib/restricted/abseil-cpp/absl/debugging/internal/stacktrace_aarch64-inl.inc
index dccadaeb7c2..1746b5d4c34 100644
--- a/contrib/restricted/abseil-cpp/absl/debugging/internal/stacktrace_aarch64-inl.inc
+++ b/contrib/restricted/abseil-cpp/absl/debugging/internal/stacktrace_aarch64-inl.inc
@@ -18,6 +18,7 @@
#include "absl/base/attributes.h"
#include "absl/debugging/internal/address_is_readable.h"
+#include "absl/debugging/internal/addresses.h"
#include "absl/debugging/internal/vdso_support.h" // a no-op on non-elf or non-glibc systems
#include "absl/debugging/stacktrace.h"
@@ -101,7 +102,8 @@ static bool InsideSignalStack(void** ptr, const StackInfo* stack_info) {
// "STRICT_UNWINDING") to reduce the chance that a bad pointer is returned.
template<bool STRICT_UNWINDING, bool WITH_CONTEXT>
ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS // May read random elements from stack.
-ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack.
+ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack.
+ABSL_ATTRIBUTE_NO_SANITIZE_THREAD // May read random elements from stack.
static void **NextStackFrame(void **old_frame_pointer, const void *uc,
const StackInfo *stack_info) {
void **new_frame_pointer = reinterpret_cast<void**>(*old_frame_pointer);
@@ -124,6 +126,7 @@ static void **NextStackFrame(void **old_frame_pointer, const void *uc,
if (pre_signal_frame_pointer >= old_frame_pointer) {
new_frame_pointer = pre_signal_frame_pointer;
}
+ }
}
#endif
@@ -131,17 +134,13 @@ static void **NextStackFrame(void **old_frame_pointer, const void *uc,
if ((reinterpret_cast<uintptr_t>(new_frame_pointer) & 7) != 0)
return nullptr;
- // Check that alleged frame pointer is actually readable. This is to
- // prevent "double fault" in case we hit the first fault due to e.g.
- // stack corruption.
- if (!absl::debugging_internal::AddressIsReadable(
- new_frame_pointer))
- return nullptr;
- }
-
+ uintptr_t new_fp_comparable = reinterpret_cast<uintptr_t>(new_frame_pointer);
// Only check the size if both frames are in the same stack.
- if (InsideSignalStack(new_frame_pointer, stack_info) ==
- InsideSignalStack(old_frame_pointer, stack_info)) {
+ const bool old_inside_signal_stack =
+ InsideSignalStack(old_frame_pointer, stack_info);
+ const bool new_inside_signal_stack =
+ InsideSignalStack(new_frame_pointer, stack_info);
+ if (new_inside_signal_stack == old_inside_signal_stack) {
// Check frame size. In strict mode, we assume frames to be under
// 100,000 bytes. In non-strict mode, we relax the limit to 1MB.
const size_t max_size = STRICT_UNWINDING ? 100000 : 1000000;
@@ -155,16 +154,15 @@ static void **NextStackFrame(void **old_frame_pointer, const void *uc,
if (frame_size > max_size) {
size_t stack_low = stack_info->stack_low;
size_t stack_high = stack_info->stack_high;
- if (InsideSignalStack(new_frame_pointer, stack_info)) {
+ if (new_inside_signal_stack) {
stack_low = stack_info->sig_stack_low;
stack_high = stack_info->sig_stack_high;
}
if (stack_high < kUnknownStackEnd &&
static_cast<size_t>(getpagesize()) < stack_low) {
- const uintptr_t new_fp_u =
- reinterpret_cast<uintptr_t>(new_frame_pointer);
// Stack bounds are known.
- if (!(stack_low < new_fp_u && new_fp_u <= stack_high)) {
+ if (!(stack_low < new_fp_comparable &&
+ new_fp_comparable <= stack_high)) {
// new_frame_pointer is not within a known stack.
return nullptr;
}
@@ -174,24 +172,19 @@ static void **NextStackFrame(void **old_frame_pointer, const void *uc,
}
}
}
+ // New frame pointer is valid if it is inside either known stack or readable.
+ // This assumes that everything within either known stack is readable. Outside
+ // either known stack but readable is unexpected, and possibly corrupt, but
+ // for now assume it is valid. If it isn't actually valid, the next frame will
+ // be corrupt and we will detect that next iteration.
+ if (new_inside_signal_stack ||
+ (new_fp_comparable >= stack_info->stack_low &&
+ new_fp_comparable < stack_info->stack_high) ||
+ absl::debugging_internal::AddressIsReadable(new_frame_pointer)) {
+ return new_frame_pointer;
+ }
- return new_frame_pointer;
-}
-
-// When PAC-RET (-mbranch-protection=pac-ret) is enabled, return addresses
-// stored on the stack will be signed, which means that pointer bits outside of
-// the VA range are potentially set. Since the stacktrace code is expected to
-// return normal code pointers, this function clears those bits.
-inline void* ClearPacBits(void* ptr) {
- register void* x30 __asm__("x30") = ptr;
- // The normal instruction for clearing PAC bits is XPACI, but for
- // compatibility with ARM platforms that do not support pointer
- // authentication, we use the hint space instruction XPACLRI instead. Hint
- // space instructions behave as NOPs on unsupported platforms.
-#define ABSL_XPACLRI_HINT "hint #0x7;"
- asm(ABSL_XPACLRI_HINT : "+r"(x30)); // asm("xpaclri" : "+r"(x30));
-#undef ABSL_XPACLRI_HINT
- return x30;
+ return nullptr;
}
template <bool IS_STACK_FRAMES, bool IS_WITH_CONTEXT>
@@ -200,8 +193,10 @@ template <bool IS_STACK_FRAMES, bool IS_WITH_CONTEXT>
ABSL_ATTRIBUTE_NOINLINE
ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS // May read random elements from stack.
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack.
-static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count,
- const void *ucp, int *min_dropped_frames) {
+ABSL_ATTRIBUTE_NO_SANITIZE_THREAD // May read random elements from stack.
+static int UnwindImpl(void **result, uintptr_t *frames, int *sizes,
+ int max_depth, int skip_count, const void *ucp,
+ int *min_dropped_frames) {
#ifdef __GNUC__
void **frame_pointer = reinterpret_cast<void**>(__builtin_frame_address(0));
#else
@@ -235,10 +230,18 @@ static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count,
if (skip_count > 0) {
skip_count--;
} else {
- result[n] = ClearPacBits(prev_return_address);
+ result[n] = reinterpret_cast<void *>(
+ absl::debugging_internal::StripPointerMetadata(prev_return_address));
if (IS_STACK_FRAMES) {
- sizes[n] = static_cast<int>(
- ComputeStackFrameSize(prev_frame_pointer, frame_pointer));
+ if (frames != nullptr) {
+ frames[n] = absl::debugging_internal::StripPointerMetadata(
+ prev_frame_pointer) +
+ 2 * sizeof(void *) /* go past the return address */;
+ }
+ if (sizes != nullptr) {
+ sizes[n] = static_cast<int>(
+ ComputeStackFrameSize(prev_frame_pointer, frame_pointer));
+ }
}
n++;
}
diff --git a/contrib/restricted/abseil-cpp/absl/debugging/internal/stacktrace_arm-inl.inc b/contrib/restricted/abseil-cpp/absl/debugging/internal/stacktrace_arm-inl.inc
index 102a2a1251a..3feb5218638 100644
--- a/contrib/restricted/abseil-cpp/absl/debugging/internal/stacktrace_arm-inl.inc
+++ b/contrib/restricted/abseil-cpp/absl/debugging/internal/stacktrace_arm-inl.inc
@@ -19,6 +19,7 @@
#include <cstdint>
+#include "absl/debugging/internal/addresses.h"
#include "absl/debugging/stacktrace.h"
// WARNING:
@@ -67,8 +68,9 @@ void StacktraceArmDummyFunction() { __asm__ volatile(""); }
#endif
template <bool IS_STACK_FRAMES, bool IS_WITH_CONTEXT>
-static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count,
- const void * /* ucp */, int *min_dropped_frames) {
+static int UnwindImpl(void **result, uintptr_t *frames, int *sizes,
+ int max_depth, int skip_count, const void * /* ucp */,
+ int *min_dropped_frames) {
#ifdef __GNUC__
void **sp = reinterpret_cast<void**>(__builtin_frame_address(0));
#else
@@ -97,11 +99,18 @@ static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count,
result[n] = *sp;
if (IS_STACK_FRAMES) {
- if (next_sp > sp) {
- sizes[n] = (uintptr_t)next_sp - (uintptr_t)sp;
- } else {
- // A frame-size of 0 is used to indicate unknown frame size.
- sizes[n] = 0;
+ if (frames != nullptr) {
+ frames[n] = absl::debugging_internal::StripPointerMetadata(sp) +
+ 1 * sizeof(void *) /* go past the return address */;
+ }
+ if (sizes != nullptr) {
+ if (next_sp > sp) {
+ sizes[n] = absl::debugging_internal::StripPointerMetadata(next_sp) -
+ absl::debugging_internal::StripPointerMetadata(sp);
+ } else {
+ // A frame-size of 0 is used to indicate unknown frame size.
+ sizes[n] = 0;
+ }
}
}
n++;
diff --git a/contrib/restricted/abseil-cpp/absl/debugging/internal/stacktrace_emscripten-inl.inc b/contrib/restricted/abseil-cpp/absl/debugging/internal/stacktrace_emscripten-inl.inc
index 0f444514386..2f39c70506b 100644
--- a/contrib/restricted/abseil-cpp/absl/debugging/internal/stacktrace_emscripten-inl.inc
+++ b/contrib/restricted/abseil-cpp/absl/debugging/internal/stacktrace_emscripten-inl.inc
@@ -21,8 +21,10 @@
#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_EMSCRIPTEN_INL_H_
#include <emscripten.h>
+#include <stdint.h>
#include <atomic>
+#include <cstddef>
#include <cstring>
#include "absl/base/attributes.h"
@@ -62,8 +64,9 @@ ABSL_ATTRIBUTE_UNUSED static int stacktraces_enabler = []() {
}();
template <bool IS_STACK_FRAMES, bool IS_WITH_CONTEXT>
-static int UnwindImpl(void **result, int *sizes, int max_depth, int skip_count,
- const void *ucp, int *min_dropped_frames) {
+static int UnwindImpl(void **result, uintptr_t *frames, int *sizes,
+ int max_depth, int skip_count, const void *ucp,
+ int *min_dropped_frames) {
if (recursive || disable_stacktraces.load(std::memory_order_relaxed)) {
return 0;
}
@@ -75,7 +78,8 @@ static int UnwindImpl(void **result, int *sizes, int max_depth, int skip_count,
int size;
uintptr_t pc = emscripten_stack_snapshot();
- size = emscripten_stack_unwind_buffer(pc, stack, kStackLength);
+ size =
+ static_cast<int>(emscripten_stack_unwind_buffer(pc, stack, kStackLength));
int result_count = size - skip_count;
if (result_count < 0) result_count = 0;
@@ -83,8 +87,13 @@ static int UnwindImpl(void **result, int *sizes, int max_depth, int skip_count,
for (int i = 0; i < result_count; i++) result[i] = stack[i + skip_count];
if (IS_STACK_FRAMES) {
- // No implementation for finding out the stack frame sizes yet.
- memset(sizes, 0, sizeof(*sizes) * result_count);
+ // No implementation for finding out the stack frames yet.
+ if (frames != nullptr) {
+ memset(frames, 0, sizeof(*frames) * static_cast<size_t>(result_count));
+ }
+ if (sizes != nullptr) {
+ memset(sizes, 0, sizeof(*sizes) * static_cast<size_t>(result_count));
+ }
}
if (min_dropped_frames != nullptr) {
if (size - skip_count - max_depth > 0) {
diff --git a/contrib/restricted/abseil-cpp/absl/debugging/internal/stacktrace_generic-inl.inc b/contrib/restricted/abseil-cpp/absl/debugging/internal/stacktrace_generic-inl.inc
index 5fa169a7ec8..e7a11fcd381 100644
--- a/contrib/restricted/abseil-cpp/absl/debugging/internal/stacktrace_generic-inl.inc
+++ b/contrib/restricted/abseil-cpp/absl/debugging/internal/stacktrace_generic-inl.inc
@@ -56,8 +56,9 @@ ABSL_ATTRIBUTE_UNUSED static int stacktraces_enabler = []() {
}();
template <bool IS_STACK_FRAMES, bool IS_WITH_CONTEXT>
-static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count,
- const void *ucp, int *min_dropped_frames) {
+static int UnwindImpl(void** result, uintptr_t* frames, int* sizes,
+ int max_depth, int skip_count, const void* ucp,
+ int* min_dropped_frames) {
if (recursive || disable_stacktraces.load(std::memory_order_relaxed)) {
return 0;
}
@@ -79,8 +80,13 @@ static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count,
result[i] = stack[i + skip_count];
if (IS_STACK_FRAMES) {
- // No implementation for finding out the stack frame sizes yet.
- memset(sizes, 0, sizeof(*sizes) * static_cast<size_t>(result_count));
+ // No implementation for finding out the stack frames yet.
+ if (frames != nullptr) {
+ memset(frames, 0, sizeof(*frames) * static_cast<size_t>(result_count));
+ }
+ if (sizes != nullptr) {
+ memset(sizes, 0, sizeof(*sizes) * static_cast<size_t>(result_count));
+ }
}
if (min_dropped_frames != nullptr) {
if (size - skip_count - max_depth > 0) {
diff --git a/contrib/restricted/abseil-cpp/absl/debugging/internal/stacktrace_powerpc-inl.inc b/contrib/restricted/abseil-cpp/absl/debugging/internal/stacktrace_powerpc-inl.inc
index a49ed2f7f25..f82ca8f2c17 100644
--- a/contrib/restricted/abseil-cpp/absl/debugging/internal/stacktrace_powerpc-inl.inc
+++ b/contrib/restricted/abseil-cpp/absl/debugging/internal/stacktrace_powerpc-inl.inc
@@ -21,6 +21,7 @@
#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_POWERPC_INL_H_
#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_POWERPC_INL_H_
+#include "absl/debugging/internal/addresses.h"
#if defined(__linux__)
#include <asm/ptrace.h> // for PT_NIP.
#include <ucontext.h> // for ucontext_t
@@ -40,22 +41,22 @@
// Given a stack pointer, return the saved link register value.
// Note that this is the link register for a callee.
-static inline void *StacktracePowerPCGetLR(void **sp) {
+static inline void **StacktracePowerPCGetLRPtr(void **sp) {
// PowerPC has 3 main ABIs, which say where in the stack the
// Link Register is. For DARWIN and AIX (used by apple and
// linux ppc64), it's in sp[2]. For SYSV (used by linux ppc),
// it's in sp[1].
#if defined(_CALL_AIX) || defined(_CALL_DARWIN)
- return *(sp+2);
+ return (sp + 2);
#elif defined(_CALL_SYSV)
- return *(sp+1);
+ return (sp + 1);
#elif defined(__APPLE__) || defined(__FreeBSD__) || \
(defined(__linux__) && defined(__PPC64__))
// This check is in case the compiler doesn't define _CALL_AIX/etc.
- return *(sp+2);
+ return (sp + 2);
#elif defined(__linux)
// This check is in case the compiler doesn't define _CALL_SYSV.
- return *(sp+1);
+ return (sp + 1);
#else
#error Need to specify the PPC ABI for your architecture.
#endif
@@ -68,6 +69,7 @@ static inline void *StacktracePowerPCGetLR(void **sp) {
template<bool STRICT_UNWINDING, bool IS_WITH_CONTEXT>
ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS // May read random elements from stack.
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack.
+ABSL_ATTRIBUTE_NO_SANITIZE_THREAD // May read random elements from stack.
static void **NextStackFrame(void **old_sp, const void *uc) {
void **new_sp = (void **) *old_sp;
enum { kStackAlignment = 16 };
@@ -125,9 +127,8 @@ static void **NextStackFrame(void **old_sp, const void *uc) {
}
}
- if (new_sp != nullptr &&
- kernel_symbol_status == kAddressValid &&
- StacktracePowerPCGetLR(new_sp) == kernel_sigtramp_rt64_address) {
+ if (new_sp != nullptr && kernel_symbol_status == kAddressValid &&
+ *StacktracePowerPCGetLRPtr(new_sp) == kernel_sigtramp_rt64_address) {
const ucontext_t* signal_context =
reinterpret_cast<const ucontext_t*>(uc);
void **const sp_before_signal =
@@ -164,8 +165,10 @@ ABSL_ATTRIBUTE_NOINLINE static void AbslStacktracePowerPCDummyFunction() {
template <bool IS_STACK_FRAMES, bool IS_WITH_CONTEXT>
ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS // May read random elements from stack.
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack.
-static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count,
- const void *ucp, int *min_dropped_frames) {
+ABSL_ATTRIBUTE_NO_SANITIZE_THREAD // May read random elements from stack.
+static int UnwindImpl(void **result, uintptr_t *frames, int *sizes,
+ int max_depth, int skip_count, const void *ucp,
+ int *min_dropped_frames) {
void **sp;
// Apple macOS uses an old version of gnu as -- both Darwin 7.9.0 (Panther)
// and Darwin 8.8.1 (Tiger) use as 1.38. This means we have to use a
@@ -211,13 +214,21 @@ static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count,
if (skip_count > 0) {
skip_count--;
} else {
- result[n] = StacktracePowerPCGetLR(sp);
+ void **lr = StacktracePowerPCGetLRPtr(sp);
+ result[n] = *lr;
if (IS_STACK_FRAMES) {
- if (next_sp > sp) {
- sizes[n] = (uintptr_t)next_sp - (uintptr_t)sp;
- } else {
- // A frame-size of 0 is used to indicate unknown frame size.
- sizes[n] = 0;
+ if (frames != nullptr) {
+ frames[n] = absl::debugging_internal::StripPointerMetadata(lr) +
+ 1 * sizeof(void *) /* go past the return address */;
+ }
+ if (sizes != nullptr) {
+ if (next_sp > sp) {
+ sizes[n] = absl::debugging_internal::StripPointerMetadata(next_sp) -
+ absl::debugging_internal::StripPointerMetadata(sp);
+ } else {
+ // A frame-size of 0 is used to indicate unknown frame size.
+ sizes[n] = 0;
+ }
}
}
n++;
diff --git a/contrib/restricted/abseil-cpp/absl/debugging/internal/stacktrace_riscv-inl.inc b/contrib/restricted/abseil-cpp/absl/debugging/internal/stacktrace_riscv-inl.inc
index 3f9e12407ab..f9919c6156b 100644
--- a/contrib/restricted/abseil-cpp/absl/debugging/internal/stacktrace_riscv-inl.inc
+++ b/contrib/restricted/abseil-cpp/absl/debugging/internal/stacktrace_riscv-inl.inc
@@ -20,6 +20,7 @@
#include <sys/ucontext.h>
#include "absl/base/config.h"
+#include "absl/debugging/internal/addresses.h"
#if defined(__linux__)
#include <sys/mman.h>
#include <ucontext.h>
@@ -55,6 +56,7 @@ static inline ptrdiff_t ComputeStackFrameSize(const T *low, const T *high) {
template <bool STRICT_UNWINDING, bool WITH_CONTEXT>
ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS // May read random elements from stack.
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack.
+ABSL_ATTRIBUTE_NO_SANITIZE_THREAD // May read random elements from stack.
static void ** NextStackFrame(void **old_frame_pointer, const void *uc,
const std::pair<size_t, size_t> range) {
// .
@@ -117,8 +119,10 @@ static void ** NextStackFrame(void **old_frame_pointer, const void *uc,
template <bool IS_STACK_FRAMES, bool IS_WITH_CONTEXT>
ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS // May read random elements from stack.
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack.
-static int UnwindImpl(void **result, int *sizes, int max_depth, int skip_count,
- const void *ucp, int *min_dropped_frames) {
+ABSL_ATTRIBUTE_NO_SANITIZE_THREAD // May read random elements from stack.
+static int UnwindImpl(void **result, uintptr_t *frames, int *sizes,
+ int max_depth, int skip_count, const void *ucp,
+ int *min_dropped_frames) {
// The `frame_pointer` that is computed here points to the top of the frame.
// The two words preceding the address are the return address and the previous
// frame pointer.
@@ -153,8 +157,13 @@ static int UnwindImpl(void **result, int *sizes, int max_depth, int skip_count,
result[n] = return_address;
if (IS_STACK_FRAMES) {
// NextStackFrame() has already checked that frame size fits to int
- sizes[n] = static_cast<int>(ComputeStackFrameSize(frame_pointer,
- next_frame_pointer));
+ if (frames != nullptr) {
+ frames[n] =
+ absl::debugging_internal::StripPointerMetadata(frame_pointer);
+ }
+ if (sizes != nullptr) {
+ sizes[n] = ComputeStackFrameSize(frame_pointer, next_frame_pointer);
+ }
}
n++;
}
diff --git a/contrib/restricted/abseil-cpp/absl/debugging/internal/stacktrace_unimplemented-inl.inc b/contrib/restricted/abseil-cpp/absl/debugging/internal/stacktrace_unimplemented-inl.inc
index 5b8fb191b65..ec63940b845 100644
--- a/contrib/restricted/abseil-cpp/absl/debugging/internal/stacktrace_unimplemented-inl.inc
+++ b/contrib/restricted/abseil-cpp/absl/debugging/internal/stacktrace_unimplemented-inl.inc
@@ -2,9 +2,10 @@
#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_UNIMPLEMENTED_INL_H_
template <bool IS_STACK_FRAMES, bool IS_WITH_CONTEXT>
-static int UnwindImpl(void** /* result */, int* /* sizes */,
- int /* max_depth */, int /* skip_count */,
- const void* /* ucp */, int *min_dropped_frames) {
+static int UnwindImpl(void** /* result */, uintptr_t* /* frames */,
+ int* /* sizes */, int /* max_depth */,
+ int /* skip_count */, const void* /* ucp */,
+ int* min_dropped_frames) {
if (min_dropped_frames != nullptr) {
*min_dropped_frames = 0;
}
diff --git a/contrib/restricted/abseil-cpp/absl/debugging/internal/stacktrace_win32-inl.inc b/contrib/restricted/abseil-cpp/absl/debugging/internal/stacktrace_win32-inl.inc
index ef2b973ec3e..f57c187253f 100644
--- a/contrib/restricted/abseil-cpp/absl/debugging/internal/stacktrace_win32-inl.inc
+++ b/contrib/restricted/abseil-cpp/absl/debugging/internal/stacktrace_win32-inl.inc
@@ -37,42 +37,29 @@
#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_WIN32_INL_H_
#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_WIN32_INL_H_
-#include <windows.h> // for GetProcAddress and GetModuleHandle
-#include <cassert>
-
-typedef USHORT NTAPI RtlCaptureStackBackTrace_Function(
- IN ULONG frames_to_skip,
- IN ULONG frames_to_capture,
- OUT PVOID *backtrace,
- OUT PULONG backtrace_hash);
+#include <windows.h> // CaptureStackBackTrace
-// It is not possible to load RtlCaptureStackBackTrace at static init time in
-// UWP. CaptureStackBackTrace is the public version of RtlCaptureStackBackTrace
-#if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP) && \
- !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP)
-static RtlCaptureStackBackTrace_Function* const RtlCaptureStackBackTrace_fn =
- &::CaptureStackBackTrace;
-#else
-// Load the function we need at static init time, where we don't have
-// to worry about someone else holding the loader's lock.
-static RtlCaptureStackBackTrace_Function* const RtlCaptureStackBackTrace_fn =
- (RtlCaptureStackBackTrace_Function*)GetProcAddress(
- GetModuleHandleA("ntdll.dll"), "RtlCaptureStackBackTrace");
-#endif // WINAPI_PARTITION_APP && !WINAPI_PARTITION_DESKTOP
+#include <cassert>
template <bool IS_STACK_FRAMES, bool IS_WITH_CONTEXT>
-static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count,
- const void*, int* min_dropped_frames) {
+static int UnwindImpl(void** result, uintptr_t* frames, int* sizes,
+ int max_depth, int skip_count, const void*,
+ int* min_dropped_frames) {
USHORT n = 0;
- if (!RtlCaptureStackBackTrace_fn || skip_count < 0 || max_depth < 0) {
+ if (skip_count < 0 || max_depth < 0) {
// can't get a stacktrace with no function/invalid args
} else {
- n = RtlCaptureStackBackTrace_fn(static_cast<ULONG>(skip_count) + 2,
- static_cast<ULONG>(max_depth), result, 0);
+ n = CaptureStackBackTrace(static_cast<ULONG>(skip_count) + 2,
+ static_cast<ULONG>(max_depth), result, 0);
}
if (IS_STACK_FRAMES) {
- // No implementation for finding out the stack frame sizes yet.
- memset(sizes, 0, sizeof(*sizes) * n);
+ // No implementation for finding out the stack frames yet.
+ if (frames != nullptr) {
+ memset(frames, 0, sizeof(*frames) * n);
+ }
+ if (sizes != nullptr) {
+ memset(sizes, 0, sizeof(*sizes) * n);
+ }
}
if (min_dropped_frames != nullptr) {
// Not implemented.
diff --git a/contrib/restricted/abseil-cpp/absl/debugging/internal/stacktrace_x86-inl.inc b/contrib/restricted/abseil-cpp/absl/debugging/internal/stacktrace_x86-inl.inc
index 1975ba74434..96b128e04ea 100644
--- a/contrib/restricted/abseil-cpp/absl/debugging/internal/stacktrace_x86-inl.inc
+++ b/contrib/restricted/abseil-cpp/absl/debugging/internal/stacktrace_x86-inl.inc
@@ -33,6 +33,7 @@
#include "absl/base/macros.h"
#include "absl/base/port.h"
#include "absl/debugging/internal/address_is_readable.h"
+#include "absl/debugging/internal/addresses.h"
#include "absl/debugging/internal/vdso_support.h" // a no-op on non-elf or non-glibc systems
#include "absl/debugging/stacktrace.h"
@@ -163,6 +164,7 @@ static uintptr_t GetFP(const void *vuc) {
template <bool STRICT_UNWINDING, bool WITH_CONTEXT>
ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS // May read random elements from stack.
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack.
+ABSL_ATTRIBUTE_NO_SANITIZE_THREAD // May read random elements from stack.
static void **NextStackFrame(void **old_fp, const void *uc,
size_t stack_low, size_t stack_high) {
void **new_fp = (void **)*old_fp;
@@ -326,9 +328,11 @@ static void **NextStackFrame(void **old_fp, const void *uc,
template <bool IS_STACK_FRAMES, bool IS_WITH_CONTEXT>
ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS // May read random elements from stack.
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack.
+ABSL_ATTRIBUTE_NO_SANITIZE_THREAD // May read random elements from stack.
ABSL_ATTRIBUTE_NOINLINE
-static int UnwindImpl(void **result, int *sizes, int max_depth, int skip_count,
- const void *ucp, int *min_dropped_frames) {
+static int UnwindImpl(void **result, uintptr_t *frames, int *sizes,
+ int max_depth, int skip_count, const void *ucp,
+ int *min_dropped_frames) {
int n = 0;
void **fp = reinterpret_cast<void **>(__builtin_frame_address(0));
@@ -349,13 +353,19 @@ static int UnwindImpl(void **result, int *sizes, int max_depth, int skip_count,
} else {
result[n] = *(fp + 1);
if (IS_STACK_FRAMES) {
- if (next_fp > fp) {
- sizes[n] = static_cast<int>(
- reinterpret_cast<uintptr_t>(next_fp) -
- reinterpret_cast<uintptr_t>(fp));
- } else {
- // A frame-size of 0 is used to indicate unknown frame size.
- sizes[n] = 0;
+ if (frames) {
+ frames[n] = absl::debugging_internal::StripPointerMetadata(fp) +
+ 2 * sizeof(void *) /* go past the return address */;
+ }
+ if (sizes) {
+ if (next_fp > fp) {
+ sizes[n] = static_cast<int>(
+ absl::debugging_internal::StripPointerMetadata(next_fp) -
+ absl::debugging_internal::StripPointerMetadata(fp));
+ } else {
+ // A frame-size of 0 is used to indicate unknown frame size.
+ sizes[n] = 0;
+ }
}
}
n++;
diff --git a/contrib/restricted/abseil-cpp/absl/debugging/stacktrace.cc b/contrib/restricted/abseil-cpp/absl/debugging/stacktrace.cc
index ff8069f8431..f71e80cfef0 100644
--- a/contrib/restricted/abseil-cpp/absl/debugging/stacktrace.cc
+++ b/contrib/restricted/abseil-cpp/absl/debugging/stacktrace.cc
@@ -36,12 +36,42 @@
#include "absl/debugging/stacktrace.h"
+#include <stddef.h>
+#include <stdint.h>
+
+#include <algorithm>
#include <atomic>
#include "absl/base/attributes.h"
+#include "absl/base/config.h"
+#include "absl/base/optimization.h"
#include "absl/base/port.h"
#include "absl/debugging/internal/stacktrace_config.h"
+#ifdef ABSL_INTERNAL_HAVE_ALLOCA
+#error ABSL_INTERNAL_HAVE_ALLOCA cannot be directly set
+#endif
+
+#ifdef _WIN32
+#include <malloc.h>
+#define ABSL_INTERNAL_HAVE_ALLOCA 1
+#else
+#ifdef __has_include
+#if __has_include(<alloca.h>)
+#include <alloca.h>
+#define ABSL_INTERNAL_HAVE_ALLOCA 1
+#elif !defined(alloca)
+static void* alloca(size_t) noexcept { return nullptr; }
+#endif
+#endif
+#endif
+
+#ifdef ABSL_INTERNAL_HAVE_ALLOCA
+static constexpr bool kHaveAlloca = true;
+#else
+static constexpr bool kHaveAlloca = false;
+#endif
+
#if defined(ABSL_STACKTRACE_INL_HEADER)
#include ABSL_STACKTRACE_INL_HEADER
#else
@@ -66,59 +96,111 @@ typedef int (*Unwinder)(void**, int*, int, int, const void*, int*);
std::atomic<Unwinder> custom;
template <bool IS_STACK_FRAMES, bool IS_WITH_CONTEXT>
-ABSL_ATTRIBUTE_ALWAYS_INLINE inline int Unwind(void** result, int* sizes,
- int max_depth, int skip_count,
- const void* uc,
+ABSL_ATTRIBUTE_ALWAYS_INLINE inline int Unwind(void** result, uintptr_t* frames,
+ int* sizes, int max_depth,
+ int skip_count, const void* uc,
int* min_dropped_frames) {
- Unwinder f = &UnwindImpl<IS_STACK_FRAMES, IS_WITH_CONTEXT>;
Unwinder g = custom.load(std::memory_order_acquire);
- if (g != nullptr) f = g;
-
+ int size;
// Add 1 to skip count for the unwinder function itself
- int size = (*f)(result, sizes, max_depth, skip_count + 1, uc,
- min_dropped_frames);
- // To disable tail call to (*f)(...)
+ ++skip_count;
+ if (g != nullptr) {
+ size = (*g)(result, sizes, max_depth, skip_count, uc, min_dropped_frames);
+ // Frame pointers aren't returned by existing hooks, so clear them.
+ if (frames != nullptr) {
+ std::fill(frames, frames + size, uintptr_t());
+ }
+ } else {
+ size = UnwindImpl<IS_STACK_FRAMES, IS_WITH_CONTEXT>(
+ result, frames, sizes, max_depth, skip_count, uc, min_dropped_frames);
+ }
ABSL_BLOCK_TAIL_CALL_OPTIMIZATION();
return size;
}
} // anonymous namespace
-ABSL_ATTRIBUTE_NOINLINE ABSL_ATTRIBUTE_NO_TAIL_CALL int GetStackFrames(
- void** result, int* sizes, int max_depth, int skip_count) {
- return Unwind<true, false>(result, sizes, max_depth, skip_count, nullptr,
- nullptr);
+ABSL_ATTRIBUTE_NOINLINE ABSL_ATTRIBUTE_NO_TAIL_CALL int
+internal_stacktrace::GetStackFrames(void** result, uintptr_t* frames,
+ int* sizes, int max_depth, int skip_count) {
+ if (internal_stacktrace::ShouldFixUpStack()) {
+ size_t depth = static_cast<size_t>(Unwind<true, true>(
+ result, frames, sizes, max_depth, skip_count, nullptr, nullptr));
+ internal_stacktrace::FixUpStack(result, frames, sizes,
+ static_cast<size_t>(max_depth), depth);
+ return static_cast<int>(depth);
+ }
+
+ return Unwind<true, false>(result, frames, sizes, max_depth, skip_count,
+ nullptr, nullptr);
}
ABSL_ATTRIBUTE_NOINLINE ABSL_ATTRIBUTE_NO_TAIL_CALL int
-GetStackFramesWithContext(void** result, int* sizes, int max_depth,
- int skip_count, const void* uc,
- int* min_dropped_frames) {
- return Unwind<true, true>(result, sizes, max_depth, skip_count, uc,
+internal_stacktrace::GetStackFramesWithContext(void** result, uintptr_t* frames,
+ int* sizes, int max_depth,
+ int skip_count, const void* uc,
+ int* min_dropped_frames) {
+ if (internal_stacktrace::ShouldFixUpStack()) {
+ size_t depth = static_cast<size_t>(Unwind<true, true>(
+ result, frames, sizes, max_depth, skip_count, uc, min_dropped_frames));
+ internal_stacktrace::FixUpStack(result, frames, sizes,
+ static_cast<size_t>(max_depth), depth);
+ return static_cast<int>(depth);
+ }
+
+ return Unwind<true, true>(result, frames, sizes, max_depth, skip_count, uc,
min_dropped_frames);
}
ABSL_ATTRIBUTE_NOINLINE ABSL_ATTRIBUTE_NO_TAIL_CALL int GetStackTrace(
void** result, int max_depth, int skip_count) {
- return Unwind<false, false>(result, nullptr, max_depth, skip_count, nullptr,
- nullptr);
+ if (internal_stacktrace::ShouldFixUpStack()) {
+ if constexpr (kHaveAlloca) {
+ const size_t nmax = static_cast<size_t>(max_depth);
+ uintptr_t* frames =
+ static_cast<uintptr_t*>(alloca(nmax * sizeof(*frames)));
+ int* sizes = static_cast<int*>(alloca(nmax * sizeof(*sizes)));
+ size_t depth = static_cast<size_t>(Unwind<true, false>(
+ result, frames, sizes, max_depth, skip_count, nullptr, nullptr));
+ internal_stacktrace::FixUpStack(result, frames, sizes, nmax, depth);
+ return static_cast<int>(depth);
+ }
+ }
+
+ return Unwind<false, false>(result, nullptr, nullptr, max_depth, skip_count,
+ nullptr, nullptr);
}
ABSL_ATTRIBUTE_NOINLINE ABSL_ATTRIBUTE_NO_TAIL_CALL int
GetStackTraceWithContext(void** result, int max_depth, int skip_count,
const void* uc, int* min_dropped_frames) {
- return Unwind<false, true>(result, nullptr, max_depth, skip_count, uc,
- min_dropped_frames);
+ if (internal_stacktrace::ShouldFixUpStack()) {
+ if constexpr (kHaveAlloca) {
+ const size_t nmax = static_cast<size_t>(max_depth);
+ uintptr_t* frames =
+ static_cast<uintptr_t*>(alloca(nmax * sizeof(*frames)));
+ int* sizes = static_cast<int*>(alloca(nmax * sizeof(*sizes)));
+ size_t depth = static_cast<size_t>(
+ Unwind<true, true>(result, frames, sizes, max_depth, skip_count, uc,
+ min_dropped_frames));
+ internal_stacktrace::FixUpStack(result, frames, sizes, nmax, depth);
+ return static_cast<int>(depth);
+ }
+ }
+
+ return Unwind<false, true>(result, nullptr, nullptr, max_depth, skip_count,
+ uc, min_dropped_frames);
}
void SetStackUnwinder(Unwinder w) {
custom.store(w, std::memory_order_release);
}
-int DefaultStackUnwinder(void** pcs, int* sizes, int depth, int skip,
- const void* uc, int* min_dropped_frames) {
+ABSL_ATTRIBUTE_ALWAYS_INLINE static inline int DefaultStackUnwinderImpl(
+ void** pcs, uintptr_t* frames, int* sizes, int depth, int skip,
+ const void* uc, int* min_dropped_frames) {
skip++; // For this function
- Unwinder f = nullptr;
+ decltype(&UnwindImpl<false, false>) f;
if (sizes == nullptr) {
if (uc == nullptr) {
f = &UnwindImpl<false, false>;
@@ -132,11 +214,46 @@ int DefaultStackUnwinder(void** pcs, int* sizes, int depth, int skip,
f = &UnwindImpl<true, true>;
}
}
- volatile int x = 0;
- int n = (*f)(pcs, sizes, depth, skip, uc, min_dropped_frames);
- x = 1; (void) x; // To disable tail call to (*f)(...)
+ return (*f)(pcs, frames, sizes, depth, skip, uc, min_dropped_frames);
+}
+
+ABSL_ATTRIBUTE_NOINLINE ABSL_ATTRIBUTE_NO_TAIL_CALL int
+internal_stacktrace::DefaultStackUnwinder(void** pcs, uintptr_t* frames,
+ int* sizes, int depth, int skip,
+ const void* uc,
+ int* min_dropped_frames) {
+ int n = DefaultStackUnwinderImpl(pcs, frames, sizes, depth, skip, uc,
+ min_dropped_frames);
+ ABSL_BLOCK_TAIL_CALL_OPTIMIZATION();
+ return n;
+}
+
+ABSL_ATTRIBUTE_NOINLINE ABSL_ATTRIBUTE_NO_TAIL_CALL int DefaultStackUnwinder(
+ void** pcs, int* sizes, int depth, int skip, const void* uc,
+ int* min_dropped_frames) {
+ int n = DefaultStackUnwinderImpl(pcs, nullptr, sizes, depth, skip, uc,
+ min_dropped_frames);
+ ABSL_BLOCK_TAIL_CALL_OPTIMIZATION();
return n;
}
+ABSL_ATTRIBUTE_WEAK bool internal_stacktrace::ShouldFixUpStack() {
+ return false;
+}
+
+// Fixes up the stack trace of the current thread, in the first `depth` frames
+// of each buffer. The buffers need to be larger than `depth`, to accommodate
+// any newly inserted elements. `depth` is updated to reflect the new number of
+// elements valid across all the buffers. (It is therefore recommended that all
+// buffer sizes be equal.)
+//
+// The `frames` and `sizes` parameters denote the bounds of the stack frame
+// corresponding to each instruction pointer in the `pcs`.
+// Any elements inside these buffers may be zero or null, in which case that
+// information is assumed to be absent/unavailable.
+ABSL_ATTRIBUTE_WEAK void internal_stacktrace::FixUpStack(void**, uintptr_t*,
+ int*, size_t,
+ size_t&) {}
+
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/contrib/restricted/abseil-cpp/absl/debugging/stacktrace.h b/contrib/restricted/abseil-cpp/absl/debugging/stacktrace.h
index 0ec0ffdabd4..87771724d8c 100644
--- a/contrib/restricted/abseil-cpp/absl/debugging/stacktrace.h
+++ b/contrib/restricted/abseil-cpp/absl/debugging/stacktrace.h
@@ -31,11 +31,53 @@
#ifndef ABSL_DEBUGGING_STACKTRACE_H_
#define ABSL_DEBUGGING_STACKTRACE_H_
+#include <stddef.h>
+#include <stdint.h>
+
+#include "absl/base/attributes.h"
#include "absl/base/config.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
+namespace internal_stacktrace {
+
+// Same as `absl::GetStackFrames`, but with an optional `frames` parameter to
+// allow callers to receive the raw stack frame addresses.
+// This is internal for now; use `absl::GetStackFrames()` instead.
+extern int GetStackFrames(void** result, uintptr_t* frames, int* sizes,
+ int max_depth, int skip_count);
+
+// Same as `absl::GetStackFramesWithContext`, but with an optional `frames`
+// parameter to allow callers to receive a start address for each stack frame.
+// The address may be zero in cases where it cannot be computed.
+//
+// DO NOT use this function without consulting the owners of absl/debuggging.
+// There is NO GUARANTEE on the precise frame addresses returned on any given
+// platform. It is only intended to provide sufficient non-overlapping bounds on
+// the local variables of a stack frame when used in conjunction with the
+// returned frame sizes. The actual pointers may be ABI-dependent, may vary at
+// run time, and are subject to breakage without notice.
+//
+// Implementation note:
+// Currently, we *attempt* to return the Canonical Frame Address (CFA) in DWARF
+// on most platforms. This is the value of the stack pointer just before the
+// 'call' instruction is executed in the caller.
+// Not all platforms and toolchains support this exact address, so this should
+// not be relied on for correctness.
+extern int GetStackFramesWithContext(void** result, uintptr_t* frames,
+ int* sizes, int max_depth, int skip_count,
+ const void* uc, int* min_dropped_frames);
+
+// Same as `absl::DefaultStackUnwinder`, but with an optional `frames` parameter
+// to allow callers to receive the raw stack frame addresses.
+// This is internal for now; do not depend on this externally.
+extern int DefaultStackUnwinder(void** pcs, uintptr_t* frames, int* sizes,
+ int max_depth, int skip_count, const void* uc,
+ int* min_dropped_frames);
+
+} // namespace internal_stacktrace
+
// GetStackFrames()
//
// Records program counter values for up to `max_depth` frames, skipping the
@@ -78,8 +120,13 @@ ABSL_NAMESPACE_BEGIN
//
// This routine may return fewer stack frame entries than are
// available. Also note that `result` and `sizes` must both be non-null.
-extern int GetStackFrames(void** result, int* sizes, int max_depth,
- int skip_count);
+ABSL_ATTRIBUTE_ALWAYS_INLINE inline int GetStackFrames(void** result,
+ int* sizes,
+ int max_depth,
+ int skip_count) {
+ return internal_stacktrace::GetStackFrames(result, nullptr, sizes, max_depth,
+ skip_count);
+}
// GetStackFramesWithContext()
//
@@ -102,9 +149,12 @@ extern int GetStackFrames(void** result, int* sizes, int max_depth,
// or other reasons. (This value will be set to `0` if no frames were dropped.)
// The number of total stack frames is guaranteed to be >= skip_count +
// max_depth + *min_dropped_frames.
-extern int GetStackFramesWithContext(void** result, int* sizes, int max_depth,
- int skip_count, const void* uc,
- int* min_dropped_frames);
+ABSL_ATTRIBUTE_ALWAYS_INLINE inline int GetStackFramesWithContext(
+ void** result, int* sizes, int max_depth, int skip_count, const void* uc,
+ int* min_dropped_frames) {
+ return internal_stacktrace::GetStackFramesWithContext(
+ result, nullptr, sizes, max_depth, skip_count, uc, min_dropped_frames);
+}
// GetStackTrace()
//
@@ -225,6 +275,24 @@ namespace debugging_internal {
// working.
extern bool StackTraceWorksForTest();
} // namespace debugging_internal
+
+namespace internal_stacktrace {
+extern bool ShouldFixUpStack();
+
+// Fixes up the stack trace of the current thread, in the first `depth` frames
+// of each buffer. The buffers need to be larger than `depth`, to accommodate
+// any newly inserted elements. `depth` is updated to reflect the new number of
+// elements valid across all the buffers. (It is therefore recommended that all
+// buffer sizes be equal.)
+//
+// The `frames` and `sizes` parameters denote the bounds of the stack frame
+// corresponding to each instruction pointer in the `pcs`.
+// Any elements inside these buffers may be zero or null, in which case that
+// information is assumed to be absent/unavailable.
+extern void FixUpStack(void** pcs, uintptr_t* frames, int* sizes,
+ size_t capacity, size_t& depth);
+} // namespace internal_stacktrace
+
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/contrib/restricted/abseil-cpp/absl/debugging/symbolize_elf.inc b/contrib/restricted/abseil-cpp/absl/debugging/symbolize_elf.inc
index a98ca81d175..9836c93295c 100644
--- a/contrib/restricted/abseil-cpp/absl/debugging/symbolize_elf.inc
+++ b/contrib/restricted/abseil-cpp/absl/debugging/symbolize_elf.inc
@@ -125,12 +125,20 @@ namespace {
// Some platforms use a special .opd section to store function pointers.
const char kOpdSectionName[] = ".opd";
-#if (defined(__powerpc__) && !(_CALL_ELF > 1)) || defined(__ia64)
+#if defined(__powerpc64__) && defined(_CALL_ELF)
+#if _CALL_ELF <= 1
+#define ABSL_INTERNAL_HAVE_PPC64_ELFV1_ABI 1
+#endif
+#endif
+#if defined(ABSL_INTERNAL_HAVE_PPC64_ELFV1_ABI) || defined(__ia64)
// Use opd section for function descriptors on these platforms, the function
// address is the first word of the descriptor.
-enum { kPlatformUsesOPDSections = 1 };
-#else // not PPC or IA64
-enum { kPlatformUsesOPDSections = 0 };
+//
+// https://maskray.me/blog/2023-02-26-linker-notes-on-power-isa notes that
+// opd sections are used on 64-bit PowerPC with the ELFv1 ABI.
+inline constexpr bool kPlatformUsesOPDSections = true;
+#else
+inline constexpr bool kPlatformUsesOPDSections = false;
#endif
// This works for PowerPC & IA64 only. A function descriptor consist of two
@@ -1451,11 +1459,11 @@ static bool MaybeInitializeObjFile(ObjFile *obj) {
}
phoff += phentsize;
-#if defined(__powerpc__) && !(_CALL_ELF > 1)
- // On the PowerPC ELF v1 ABI, function pointers actually point to function
- // descriptors. These descriptors are stored in an .opd section, which is
- // mapped read-only. We thus need to look at all readable segments, not
- // just the executable ones.
+#ifdef ABSL_INTERNAL_HAVE_PPC64_ELFV1_ABI
+ // On the PowerPC 64-bit ELFv1 ABI, function pointers actually point to
+ // function descriptors. These descriptors are stored in an .opd section,
+ // which is mapped read-only. We thus need to look at all readable
+ // segments, not just the executable ones.
constexpr int interesting = PF_R;
#else
constexpr int interesting = PF_X | PF_R;
@@ -1762,3 +1770,5 @@ extern "C" bool AbslInternalGetFileMappingHint(const void **start,
return absl::debugging_internal::GetFileMappingHint(start, end, offset,
filename);
}
+
+#undef ABSL_INTERNAL_HAVE_PPC64_ELFV1_ABI
diff --git a/contrib/restricted/abseil-cpp/absl/debugging/symbolize_emscripten.inc b/contrib/restricted/abseil-cpp/absl/debugging/symbolize_emscripten.inc
index a0f344dd9b5..f6da0ac202f 100644
--- a/contrib/restricted/abseil-cpp/absl/debugging/symbolize_emscripten.inc
+++ b/contrib/restricted/abseil-cpp/absl/debugging/symbolize_emscripten.inc
@@ -58,12 +58,13 @@ bool Symbolize(const void* pc, char* out, int out_size) {
return false;
}
- strncpy(out, func_name, out_size);
+ strncpy(out, func_name, static_cast<size_t>(out_size));
if (out[out_size - 1] != '\0') {
// strncpy() does not '\0' terminate when it truncates.
static constexpr char kEllipsis[] = "...";
- int ellipsis_size = std::min<int>(sizeof(kEllipsis) - 1, out_size - 1);
+ size_t ellipsis_size =
+ std::min(sizeof(kEllipsis) - 1, static_cast<size_t>(out_size) - 1);
memcpy(out + out_size - ellipsis_size - 1, kEllipsis, ellipsis_size);
out[out_size - 1] = '\0';
}
diff --git a/contrib/restricted/abseil-cpp/absl/debugging/symbolize_win32.inc b/contrib/restricted/abseil-cpp/absl/debugging/symbolize_win32.inc
index 53a099a1814..589890f31e3 100644
--- a/contrib/restricted/abseil-cpp/absl/debugging/symbolize_win32.inc
+++ b/contrib/restricted/abseil-cpp/absl/debugging/symbolize_win32.inc
@@ -15,7 +15,9 @@
// See "Retrieving Symbol Information by Address":
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms680578(v=vs.85).aspx
+#include <ntstatus.h>
#include <windows.h>
+#include <winternl.h>
// MSVC header dbghelp.h has a warning for an ignored typedef.
#pragma warning(push)
@@ -45,13 +47,30 @@ void InitializeSymbolizer(const char*) {
// symbols be loaded. This is the fastest, most efficient way to use
// the symbol handler.
SymSetOptions(SYMOPT_DEFERRED_LOADS | SYMOPT_UNDNAME);
- if (!SymInitialize(process, nullptr, true)) {
- // GetLastError() returns a Win32 DWORD, but we assign to
- // unsigned long long to simplify the ABSL_RAW_LOG case below. The uniform
- // initialization guarantees this is not a narrowing conversion.
- const unsigned long long error{GetLastError()}; // NOLINT(runtime/int)
- ABSL_RAW_LOG(FATAL, "SymInitialize() failed: %llu", error);
+ DWORD syminitialize_error;
+ constexpr int kSymInitializeRetries = 5;
+ for (int i = 0; i < kSymInitializeRetries; ++i) {
+ if (SymInitialize(process, nullptr, true)) {
+ return;
+ }
+
+ // SymInitialize can fail sometimes with a STATUS_INFO_LENGTH_MISMATCH
+ // NTSTATUS (0xC0000004), which can happen when a module load occurs during
+ // the SymInitialize call. In this case, we should try calling SymInitialize
+ // again.
+ syminitialize_error = GetLastError();
+ // Both NTSTATUS and DWORD are 32-bit numbers, which makes the cast safe.
+ if (syminitialize_error !=
+ static_cast<DWORD>(STATUS_INFO_LENGTH_MISMATCH)) {
+ break;
+ }
}
+
+ // GetLastError() returns a Win32 DWORD, but we assign to
+ // unsigned long long to simplify the ABSL_RAW_LOG case below. The uniform
+ // initialization guarantees this is not a narrowing conversion.
+ const unsigned long long error{syminitialize_error};
+ ABSL_RAW_LOG(FATAL, "SymInitialize() failed: %llu", error);
}
bool Symbolize(const void* pc, char* out, int out_size) {
diff --git a/contrib/restricted/abseil-cpp/absl/flags/commandlineflag.h b/contrib/restricted/abseil-cpp/absl/flags/commandlineflag.h
index a9ffd020844..9098b4cb27d 100644
--- a/contrib/restricted/abseil-cpp/absl/flags/commandlineflag.h
+++ b/contrib/restricted/abseil-cpp/absl/flags/commandlineflag.h
@@ -30,7 +30,7 @@
#include <string>
#include "absl/base/config.h"
-#include "absl/base/internal/fast_type_id.h"
+#include "absl/base/fast_type_id.h"
#include "absl/flags/internal/commandlineflag.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
@@ -80,7 +80,7 @@ class CommandLineFlag {
// Return true iff flag has type T.
template <typename T>
inline bool IsOfType() const {
- return TypeId() == base_internal::FastTypeId<T>();
+ return TypeId() == FastTypeId<T>();
}
// absl::CommandLineFlag::TryGet()
diff --git a/contrib/restricted/abseil-cpp/absl/flags/flag.h b/contrib/restricted/abseil-cpp/absl/flags/flag.h
index 19d0ef992ff..e052d5fcee2 100644
--- a/contrib/restricted/abseil-cpp/absl/flags/flag.h
+++ b/contrib/restricted/abseil-cpp/absl/flags/flag.h
@@ -35,6 +35,7 @@
#include "absl/base/attributes.h"
#include "absl/base/config.h"
+#include "absl/base/nullability.h"
#include "absl/base/optimization.h"
#include "absl/flags/commandlineflag.h"
#include "absl/flags/config.h"
@@ -94,7 +95,7 @@ using Flag = flags_internal::Flag<T>;
// // FLAGS_firstname is a Flag of type `std::string`
// std::string first_name = absl::GetFlag(FLAGS_firstname);
template <typename T>
-ABSL_MUST_USE_RESULT T GetFlag(const absl::Flag<T>& flag) {
+[[nodiscard]] T GetFlag(const absl::Flag<T>& flag) {
return flags_internal::FlagImplPeer::InvokeGet<T>(flag);
}
@@ -106,7 +107,7 @@ ABSL_MUST_USE_RESULT T GetFlag(const absl::Flag<T>& flag) {
// thread-safe, but is potentially expensive. Avoid setting flags in general,
// but especially within performance-critical code.
template <typename T>
-void SetFlag(absl::Flag<T>* flag, const T& v) {
+void SetFlag(absl::Flag<T>* absl_nonnull flag, const T& v) {
flags_internal::FlagImplPeer::InvokeSet(*flag, v);
}
@@ -114,7 +115,7 @@ void SetFlag(absl::Flag<T>* flag, const T& v) {
// convertible to `T`. E.g., use this overload to pass a "const char*" when `T`
// is `std::string`.
template <typename T, typename V>
-void SetFlag(absl::Flag<T>* flag, const V& v) {
+void SetFlag(absl::Flag<T>* absl_nonnull flag, const V& v) {
T value(v);
flags_internal::FlagImplPeer::InvokeSet(*flag, value);
}
diff --git a/contrib/restricted/abseil-cpp/absl/flags/internal/commandlineflag.h b/contrib/restricted/abseil-cpp/absl/flags/internal/commandlineflag.h
index ebfe81ba1e3..daef4e354ce 100644
--- a/contrib/restricted/abseil-cpp/absl/flags/internal/commandlineflag.h
+++ b/contrib/restricted/abseil-cpp/absl/flags/internal/commandlineflag.h
@@ -17,7 +17,7 @@
#define ABSL_FLAGS_INTERNAL_COMMANDLINEFLAG_H_
#include "absl/base/config.h"
-#include "absl/base/internal/fast_type_id.h"
+#include "absl/base/fast_type_id.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
@@ -28,7 +28,7 @@ namespace flags_internal {
// cases this id is enough to uniquely identify the flag's value type. In a few
// cases we'll have to resort to using actual RTTI implementation if it is
// available.
-using FlagFastTypeId = absl::base_internal::FastTypeIdType;
+using FlagFastTypeId = absl::FastTypeIdType;
// Options that control SetCommandLineOptionWithMode.
enum FlagSettingMode {
diff --git a/contrib/restricted/abseil-cpp/absl/flags/internal/flag.cc b/contrib/restricted/abseil-cpp/absl/flags/internal/flag.cc
index ccd26670cf8..37f6ef1e9d6 100644
--- a/contrib/restricted/abseil-cpp/absl/flags/internal/flag.cc
+++ b/contrib/restricted/abseil-cpp/absl/flags/internal/flag.cc
@@ -34,6 +34,7 @@
#include "absl/base/config.h"
#include "absl/base/const_init.h"
#include "absl/base/dynamic_annotations.h"
+#include "absl/base/fast_type_id.h"
#include "absl/base/no_destructor.h"
#include "absl/base/optimization.h"
#include "absl/base/thread_annotations.h"
@@ -59,7 +60,7 @@ namespace {
// Currently we only validate flag values for user-defined flag types.
bool ShouldValidateFlagValue(FlagFastTypeId flag_type_id) {
#define DONT_VALIDATE(T, _) \
- if (flag_type_id == base_internal::FastTypeId<T>()) return false;
+ if (flag_type_id == absl::FastTypeId<T>()) return false;
ABSL_FLAGS_INTERNAL_SUPPORTED_TYPES(DONT_VALIDATE)
#undef DONT_VALIDATE
diff --git a/contrib/restricted/abseil-cpp/absl/flags/internal/flag.h b/contrib/restricted/abseil-cpp/absl/flags/internal/flag.h
index a6e7986f961..b61a24737fd 100644
--- a/contrib/restricted/abseil-cpp/absl/flags/internal/flag.h
+++ b/contrib/restricted/abseil-cpp/absl/flags/internal/flag.h
@@ -57,7 +57,7 @@ template <typename T>
using Flag = flags_internal::Flag<T>;
template <typename T>
-ABSL_MUST_USE_RESULT T GetFlag(const absl::Flag<T>& flag);
+[[nodiscard]] T GetFlag(const absl::Flag<T>& flag);
template <typename T>
void SetFlag(absl::Flag<T>* flag, const T& v);
@@ -783,7 +783,7 @@ class FlagImpl final : public CommandLineFlag {
// heap allocation during initialization, which is both slows program startup
// and can fail. Using reserved space + placement new allows us to avoid both
// problems.
- alignas(absl::Mutex) mutable char data_guard_[sizeof(absl::Mutex)];
+ alignas(absl::Mutex) mutable unsigned char data_guard_[sizeof(absl::Mutex)];
};
#if defined(__GNUC__) && !defined(__clang__)
#pragma GCC diagnostic pop
@@ -828,7 +828,7 @@ class Flag {
U u;
#if !defined(NDEBUG)
- impl_.AssertValidType(base_internal::FastTypeId<T>(), &GenRuntimeTypeId<T>);
+ impl_.AssertValidType(absl::FastTypeId<T>(), &GenRuntimeTypeId<T>);
#endif
if (ABSL_PREDICT_FALSE(!value_.Get(impl_.seq_lock_, u.value))) {
@@ -837,7 +837,7 @@ class Flag {
return std::move(u.value);
}
void Set(const T& v) {
- impl_.AssertValidType(base_internal::FastTypeId<T>(), &GenRuntimeTypeId<T>);
+ impl_.AssertValidType(absl::FastTypeId<T>(), &GenRuntimeTypeId<T>);
impl_.Write(&v);
}
@@ -876,7 +876,8 @@ class FlagImplPeer {
template <typename T>
void* FlagOps(FlagOp op, const void* v1, void* v2, void* v3) {
struct AlignedSpace {
- alignas(MaskedPointer::RequiredAlignment()) alignas(T) char buf[sizeof(T)];
+ alignas(MaskedPointer::RequiredAlignment()) alignas(
+ T) unsigned char buf[sizeof(T)];
};
using Allocator = std::allocator<AlignedSpace>;
switch (op) {
@@ -901,7 +902,7 @@ void* FlagOps(FlagOp op, const void* v1, void* v2, void* v3) {
case FlagOp::kSizeof:
return reinterpret_cast<void*>(static_cast<uintptr_t>(sizeof(T)));
case FlagOp::kFastTypeId:
- return const_cast<void*>(base_internal::FastTypeId<T>());
+ return const_cast<void*>(absl::FastTypeId<T>());
case FlagOp::kRuntimeTypeId:
return const_cast<std::type_info*>(GenRuntimeTypeId<T>());
case FlagOp::kParse: {
diff --git a/contrib/restricted/abseil-cpp/absl/flags/internal/registry.h b/contrib/restricted/abseil-cpp/absl/flags/internal/registry.h
index 4b68c85f5c4..be9aaccc7ef 100644
--- a/contrib/restricted/abseil-cpp/absl/flags/internal/registry.h
+++ b/contrib/restricted/abseil-cpp/absl/flags/internal/registry.h
@@ -19,6 +19,7 @@
#include <functional>
#include "absl/base/config.h"
+#include "absl/base/fast_type_id.h"
#include "absl/flags/commandlineflag.h"
#include "absl/flags/internal/commandlineflag.h"
#include "absl/strings/string_view.h"
@@ -73,7 +74,7 @@ void FinalizeRegistry();
//
// Retire flag with name "name" and type indicated by ops.
-void Retire(const char* name, FlagFastTypeId type_id, char* buf);
+void Retire(const char* name, FlagFastTypeId type_id, unsigned char* buf);
constexpr size_t kRetiredFlagObjSize = 3 * sizeof(void*);
constexpr size_t kRetiredFlagObjAlignment = alignof(void*);
@@ -83,11 +84,11 @@ template <typename T>
class RetiredFlag {
public:
void Retire(const char* flag_name) {
- flags_internal::Retire(flag_name, base_internal::FastTypeId<T>(), buf_);
+ flags_internal::Retire(flag_name, absl::FastTypeId<T>(), buf_);
}
private:
- alignas(kRetiredFlagObjAlignment) char buf_[kRetiredFlagObjSize];
+ alignas(kRetiredFlagObjAlignment) unsigned char buf_[kRetiredFlagObjSize];
};
} // namespace flags_internal
diff --git a/contrib/restricted/abseil-cpp/absl/flags/parse.cc b/contrib/restricted/abseil-cpp/absl/flags/parse.cc
index 8be2016fd69..c87cacdc7c5 100644
--- a/contrib/restricted/abseil-cpp/absl/flags/parse.cc
+++ b/contrib/restricted/abseil-cpp/absl/flags/parse.cc
@@ -76,9 +76,13 @@ ABSL_CONST_INIT bool fromenv_needs_processing
ABSL_CONST_INIT bool tryfromenv_needs_processing
ABSL_GUARDED_BY(ProcessingChecksMutex()) = false;
-ABSL_CONST_INIT absl::Mutex specified_flags_guard(absl::kConstInit);
+absl::Mutex* SpecifiedFlagsMutex() {
+ static absl::NoDestructor<absl::Mutex> mutex;
+ return mutex.get();
+}
+
ABSL_CONST_INIT std::vector<const CommandLineFlag*>* specified_flags
- ABSL_GUARDED_BY(specified_flags_guard) = nullptr;
+ ABSL_GUARDED_BY(SpecifiedFlagsMutex()) = nullptr;
// Suggesting at most kMaxHints flags in case of misspellings.
ABSL_CONST_INIT const size_t kMaxHints = 100;
@@ -640,7 +644,7 @@ void ReportUnrecognizedFlags(
// --------------------------------------------------------------------
bool WasPresentOnCommandLine(absl::string_view flag_name) {
- absl::ReaderMutexLock l(&specified_flags_guard);
+ absl::ReaderMutexLock l(SpecifiedFlagsMutex());
ABSL_INTERNAL_CHECK(specified_flags != nullptr,
"ParseCommandLine is not invoked yet");
@@ -767,7 +771,7 @@ HelpMode ParseAbseilFlagsOnlyImpl(
}
positional_args.push_back(argv[0]);
- absl::MutexLock l(&flags_internal::specified_flags_guard);
+ absl::MutexLock l(flags_internal::SpecifiedFlagsMutex());
if (specified_flags == nullptr) {
specified_flags = new std::vector<const CommandLineFlag*>;
} else {
diff --git a/contrib/restricted/abseil-cpp/absl/flags/reflection.cc b/contrib/restricted/abseil-cpp/absl/flags/reflection.cc
index ea856ff9c16..b8b4a2ea703 100644
--- a/contrib/restricted/abseil-cpp/absl/flags/reflection.cc
+++ b/contrib/restricted/abseil-cpp/absl/flags/reflection.cc
@@ -289,11 +289,10 @@ class RetiredFlagObj final : public CommandLineFlag {
} // namespace
-void Retire(const char* name, FlagFastTypeId type_id, char* buf) {
+void Retire(const char* name, FlagFastTypeId type_id, unsigned char* buf) {
static_assert(sizeof(RetiredFlagObj) == kRetiredFlagObjSize, "");
static_assert(alignof(RetiredFlagObj) == kRetiredFlagObjAlignment, "");
- auto* flag = ::new (static_cast<void*>(buf))
- flags_internal::RetiredFlagObj(name, type_id);
+ auto* flag = ::new (buf) flags_internal::RetiredFlagObj(name, type_id);
FlagRegistry::GlobalRegistry().RegisterFlag(*flag, nullptr);
}
diff --git a/contrib/restricted/abseil-cpp/absl/functional/any_invocable.h b/contrib/restricted/abseil-cpp/absl/functional/any_invocable.h
index 3acb9fd0896..43ea9af8824 100644
--- a/contrib/restricted/abseil-cpp/absl/functional/any_invocable.h
+++ b/contrib/restricted/abseil-cpp/absl/functional/any_invocable.h
@@ -25,7 +25,7 @@
//
// NOTE: `absl::AnyInvocable` is similar to the C++23 `std::move_only_function`
// abstraction, but has a slightly different API and is not designed to be a
-// drop-in replacement or C++11-compatible backfill of that type.
+// drop-in replacement or backfill of that type.
//
// Credits to Matt Calabrese (https://github.com/mattcalabrese) for the original
// implementation.
@@ -97,11 +97,10 @@ ABSL_NAMESPACE_BEGIN
// my_func(std::move(func6));
//
// `AnyInvocable` also properly respects `const` qualifiers, reference
-// qualifiers, and the `noexcept` specification (only in C++ 17 and beyond) as
-// part of the user-specified function type (e.g.
-// `AnyInvocable<void() const && noexcept>`). These qualifiers will be applied
-// to the `AnyInvocable` object's `operator()`, and the underlying invocable
-// must be compatible with those qualifiers.
+// qualifiers, and the `noexcept` specification as part of the user-specified
+// function type (e.g. `AnyInvocable<void() const && noexcept>`). These
+// qualifiers will be applied to the `AnyInvocable` object's `operator()`, and
+// the underlying invocable must be compatible with those qualifiers.
//
// Comparison of const and non-const function types:
//
@@ -280,11 +279,10 @@ class AnyInvocable : private internal_any_invocable::Impl<Sig> {
//
// WARNING: An `AnyInvocable` that wraps an empty `std::function` is not
// itself empty. This behavior is consistent with the standard equivalent
- // `std::move_only_function`.
- //
- // In other words:
+ // `std::move_only_function`. In the following example, `a()` will actually
+ // invoke `f()`, leading to an `std::bad_function_call` exception:
// std::function<void()> f; // empty
- // absl::AnyInvocable<void()> a = std::move(f); // not empty
+ // absl::AnyInvocable<void()> a = f; // not empty
//
// Invoking an empty `AnyInvocable` results in undefined behavior.
explicit operator bool() const noexcept { return this->HasValue(); }
diff --git a/contrib/restricted/abseil-cpp/absl/functional/function_ref.h b/contrib/restricted/abseil-cpp/absl/functional/function_ref.h
index 96cece551b5..f1d087a77a2 100644
--- a/contrib/restricted/abseil-cpp/absl/functional/function_ref.h
+++ b/contrib/restricted/abseil-cpp/absl/functional/function_ref.h
@@ -82,17 +82,12 @@ class FunctionRef;
// // replaced by an `absl::FunctionRef`:
// bool Visitor(absl::FunctionRef<void(my_proto&, absl::string_view)>
// callback);
-//
-// Note: the assignment operator within an `absl::FunctionRef` is intentionally
-// deleted to prevent misuse; because the `absl::FunctionRef` does not own the
-// underlying type, assignment likely indicates misuse.
template <typename R, typename... Args>
class FunctionRef<R(Args...)> {
private:
// Used to disable constructors for objects that are not compatible with the
// signature of this FunctionRef.
- template <typename F,
- typename FR = absl::base_internal::invoke_result_t<F, Args&&...>>
+ template <typename F, typename FR = std::invoke_result_t<F, Args&&...>>
using EnableIfCompatible =
typename std::enable_if<std::is_void<R>::value ||
std::is_convertible<FR, R>::value>::type;
@@ -122,9 +117,7 @@ class FunctionRef<R(Args...)> {
ptr_.fun = reinterpret_cast<decltype(ptr_.fun)>(f);
}
- // To help prevent subtle lifetime bugs, FunctionRef is not assignable.
- // Typically, it should only be used as an argument type.
- FunctionRef& operator=(const FunctionRef& rhs) = delete;
+ FunctionRef& operator=(const FunctionRef& rhs) = default;
FunctionRef(const FunctionRef& rhs) = default;
// Call the underlying object.
diff --git a/contrib/restricted/abseil-cpp/absl/functional/internal/any_invocable.h b/contrib/restricted/abseil-cpp/absl/functional/internal/any_invocable.h
index c2d8cd4727a..167d947d4c5 100644
--- a/contrib/restricted/abseil-cpp/absl/functional/internal/any_invocable.h
+++ b/contrib/restricted/abseil-cpp/absl/functional/internal/any_invocable.h
@@ -65,7 +65,6 @@
#include "absl/base/attributes.h"
#include "absl/base/config.h"
-#include "absl/base/internal/invoke.h"
#include "absl/base/macros.h"
#include "absl/base/optimization.h"
#include "absl/meta/type_traits.h"
@@ -74,15 +73,6 @@
namespace absl {
ABSL_NAMESPACE_BEGIN
-// Helper macro used to prevent spelling `noexcept` in language versions older
-// than C++17, where it is not part of the type system, in order to avoid
-// compilation failures and internal compiler errors.
-#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L
-#define ABSL_INTERNAL_NOEXCEPT_SPEC(noex) noexcept(noex)
-#else
-#define ABSL_INTERNAL_NOEXCEPT_SPEC(noex)
-#endif
-
// Defined in functional/any_invocable.h
template <class Sig>
class AnyInvocable;
@@ -107,44 +97,30 @@ struct IsAnyInvocable<AnyInvocable<Sig>> : std::true_type {};
//
////////////////////////////////////////////////////////////////////////////////
-// A type trait that tells us whether or not a target function type should be
+// A metafunction that tells us whether or not a target function type should be
// stored locally in the small object optimization storage
template <class T>
-using IsStoredLocally = std::integral_constant<
- bool, sizeof(T) <= kStorageSize && alignof(T) <= kAlignment &&
- kAlignment % alignof(T) == 0 &&
- std::is_nothrow_move_constructible<T>::value>;
+constexpr bool IsStoredLocally() {
+ if constexpr (sizeof(T) <= kStorageSize && alignof(T) <= kAlignment &&
+ kAlignment % alignof(T) == 0) {
+ return std::is_nothrow_move_constructible<T>::value;
+ }
+ return false;
+}
// An implementation of std::remove_cvref_t of C++20.
template <class T>
using RemoveCVRef =
typename std::remove_cv<typename std::remove_reference<T>::type>::type;
-////////////////////////////////////////////////////////////////////////////////
-//
-// An implementation of the C++ standard INVOKE<R> pseudo-macro, operation is
-// equivalent to std::invoke except that it forces an implicit conversion to the
-// specified return type. If "R" is void, the function is executed and the
-// return value is simply ignored.
-template <class ReturnType, class F, class... P,
- typename = absl::enable_if_t<std::is_void<ReturnType>::value>>
-void InvokeR(F&& f, P&&... args) {
- absl::base_internal::invoke(std::forward<F>(f), std::forward<P>(args)...);
-}
-
-template <class ReturnType, class F, class... P,
- absl::enable_if_t<!std::is_void<ReturnType>::value, int> = 0>
+// An implementation of std::invoke_r of C++23.
+template <class ReturnType, class F, class... P>
ReturnType InvokeR(F&& f, P&&... args) {
- // GCC 12 has a false-positive -Wmaybe-uninitialized warning here.
-#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
-#endif
- return absl::base_internal::invoke(std::forward<F>(f),
- std::forward<P>(args)...);
-#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0)
-#pragma GCC diagnostic pop
-#endif
+ if constexpr (std::is_void_v<ReturnType>) {
+ std::invoke(std::forward<F>(f), std::forward<P>(args)...);
+ } else {
+ return std::invoke(std::forward<F>(f), std::forward<P>(args)...);
+ }
}
//
@@ -198,32 +174,14 @@ union TypeErasedState {
} remote;
// Local-storage for the type-erased object when small and trivial enough
- alignas(kAlignment) char storage[kStorageSize];
+ alignas(kAlignment) unsigned char storage[kStorageSize];
};
// A typed accessor for the object in `TypeErasedState` storage
template <class T>
T& ObjectInLocalStorage(TypeErasedState* const state) {
// We launder here because the storage may be reused with the same type.
-#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606L
return *std::launder(reinterpret_cast<T*>(&state->storage));
-#elif ABSL_HAVE_BUILTIN(__builtin_launder)
- return *__builtin_launder(reinterpret_cast<T*>(&state->storage));
-#else
-
- // When `std::launder` or equivalent are not available, we rely on undefined
- // behavior, which works as intended on Abseil's officially supported
- // platforms as of Q2 2022.
-#if !defined(__clang__) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wstrict-aliasing"
-#endif
- return *reinterpret_cast<T*>(&state->storage);
-#if !defined(__clang__) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif
-
-#endif
}
// The type for functions issuing lifetime-related operations: move and dispose
@@ -231,14 +189,14 @@ T& ObjectInLocalStorage(TypeErasedState* const state) {
// NOTE: When specifying `FunctionToCall::`dispose, the same state must be
// passed as both "from" and "to".
using ManagerType = void(FunctionToCall /*operation*/,
- TypeErasedState* /*from*/, TypeErasedState* /*to*/)
- ABSL_INTERNAL_NOEXCEPT_SPEC(true);
+ TypeErasedState* /*from*/,
+ TypeErasedState* /*to*/) noexcept(true);
// The type for functions issuing the actual invocation of the object
// A pointer to such a function is contained in each AnyInvocable instance.
template <bool SigIsNoexcept, class ReturnType, class... P>
-using InvokerType = ReturnType(TypeErasedState*, ForwardedParameterType<P>...)
- ABSL_INTERNAL_NOEXCEPT_SPEC(SigIsNoexcept);
+using InvokerType = ReturnType(
+ TypeErasedState*, ForwardedParameterType<P>...) noexcept(SigIsNoexcept);
// The manager that is used when AnyInvocable is empty
inline void EmptyManager(FunctionToCall /*operation*/,
@@ -275,7 +233,7 @@ template <class T>
void LocalManagerNontrivial(FunctionToCall operation,
TypeErasedState* const from,
TypeErasedState* const to) noexcept {
- static_assert(IsStoredLocally<T>::value,
+ static_assert(IsStoredLocally<T>(),
"Local storage must only be used for supported types.");
static_assert(!std::is_trivially_copyable<T>::value,
"Locally stored types must be trivially copyable.");
@@ -303,7 +261,7 @@ ReturnType LocalInvoker(
ForwardedParameterType<P>... args) noexcept(SigIsNoexcept) {
using RawT = RemoveCVRef<QualTRef>;
static_assert(
- IsStoredLocally<RawT>::value,
+ IsStoredLocally<RawT>(),
"Target object must be in local storage in order to be invoked from it.");
auto& f = (ObjectInLocalStorage<RawT>)(state);
@@ -338,7 +296,7 @@ template <class T>
void RemoteManagerNontrivial(FunctionToCall operation,
TypeErasedState* const from,
TypeErasedState* const to) noexcept {
- static_assert(!IsStoredLocally<T>::value,
+ static_assert(!IsStoredLocally<T>(),
"Remote storage must only be used for types that do not "
"qualify for local storage.");
@@ -360,7 +318,7 @@ ReturnType RemoteInvoker(
TypeErasedState* const state,
ForwardedParameterType<P>... args) noexcept(SigIsNoexcept) {
using RawT = RemoveCVRef<QualTRef>;
- static_assert(!IsStoredLocally<RawT>::value,
+ static_assert(!IsStoredLocally<RawT>(),
"Target object must be in remote storage in order to be "
"invoked from it.");
@@ -440,13 +398,6 @@ class CoreImpl {
CoreImpl() noexcept : manager_(EmptyManager), invoker_(nullptr) {}
- enum class TargetType {
- kPointer,
- kCompatibleAnyInvocable,
- kIncompatibleAnyInvocable,
- kOther,
- };
-
// Note: QualDecayedTRef here includes the cv-ref qualifiers associated with
// the invocation of the Invocable. The unqualified type is the target object
// type to be stored.
@@ -454,19 +405,47 @@ class CoreImpl {
explicit CoreImpl(TypedConversionConstruct<QualDecayedTRef>, F&& f) {
using DecayedT = RemoveCVRef<QualDecayedTRef>;
- constexpr TargetType kTargetType =
- (std::is_pointer<DecayedT>::value ||
- std::is_member_pointer<DecayedT>::value)
- ? TargetType::kPointer
- : IsCompatibleAnyInvocable<DecayedT>::value
- ? TargetType::kCompatibleAnyInvocable
- : IsAnyInvocable<DecayedT>::value
- ? TargetType::kIncompatibleAnyInvocable
- : TargetType::kOther;
- // NOTE: We only use integers instead of enums as template parameters in
- // order to work around a bug on C++14 under MSVC 2017.
- // See b/236131881.
- Initialize<kTargetType, QualDecayedTRef>(std::forward<F>(f));
+ if constexpr (std::is_pointer<DecayedT>::value ||
+ std::is_member_pointer<DecayedT>::value) {
+ // This condition handles types that decay into pointers. This includes
+ // function references, which cannot be null. GCC warns against comparing
+ // their decayed form with nullptr (https://godbolt.org/z/9r9TMTcPK).
+ // We could work around this warning with constexpr programming, using
+ // std::is_function_v<std::remove_reference_t<F>>, but we choose to ignore
+ // it instead of writing more code.
+#if !defined(__clang__) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wpragmas"
+#pragma GCC diagnostic ignored "-Waddress"
+#pragma GCC diagnostic ignored "-Wnonnull-compare"
+#endif
+ if (static_cast<DecayedT>(f) == nullptr) {
+#if !defined(__clang__) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+ manager_ = EmptyManager;
+ invoker_ = nullptr;
+ } else {
+ InitializeStorage<QualDecayedTRef>(std::forward<F>(f));
+ }
+ } else if constexpr (IsCompatibleAnyInvocable<DecayedT>::value) {
+ // In this case we can "steal the guts" of the other AnyInvocable.
+ f.manager_(FunctionToCall::relocate_from_to, &f.state_, &state_);
+ manager_ = f.manager_;
+ invoker_ = f.invoker_;
+
+ f.manager_ = EmptyManager;
+ f.invoker_ = nullptr;
+ } else if constexpr (IsAnyInvocable<DecayedT>::value) {
+ if (f.HasValue()) {
+ InitializeStorage<QualDecayedTRef>(std::forward<F>(f));
+ } else {
+ manager_ = EmptyManager;
+ invoker_ = nullptr;
+ }
+ } else {
+ InitializeStorage<QualDecayedTRef>(std::forward<F>(f));
+ }
}
// Note: QualTRef here includes the cv-ref qualifiers associated with the
@@ -517,122 +496,43 @@ class CoreImpl {
invoker_ = nullptr;
}
- template <TargetType target_type, class QualDecayedTRef, class F,
- absl::enable_if_t<target_type == TargetType::kPointer, int> = 0>
- void Initialize(F&& f) {
-// This condition handles types that decay into pointers, which includes
-// function references. Since function references cannot be null, GCC warns
-// against comparing their decayed form with nullptr.
-// Since this is template-heavy code, we prefer to disable these warnings
-// locally instead of adding yet another overload of this function.
-#if !defined(__clang__) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wpragmas"
-#pragma GCC diagnostic ignored "-Waddress"
-#pragma GCC diagnostic ignored "-Wnonnull-compare"
-#endif
- if (static_cast<RemoveCVRef<QualDecayedTRef>>(f) == nullptr) {
-#if !defined(__clang__) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif
- manager_ = EmptyManager;
- invoker_ = nullptr;
- return;
- }
- InitializeStorage<QualDecayedTRef>(std::forward<F>(f));
- }
-
- template <TargetType target_type, class QualDecayedTRef, class F,
- absl::enable_if_t<
- target_type == TargetType::kCompatibleAnyInvocable, int> = 0>
- void Initialize(F&& f) {
- // In this case we can "steal the guts" of the other AnyInvocable.
- f.manager_(FunctionToCall::relocate_from_to, &f.state_, &state_);
- manager_ = f.manager_;
- invoker_ = f.invoker_;
-
- f.manager_ = EmptyManager;
- f.invoker_ = nullptr;
- }
-
- template <TargetType target_type, class QualDecayedTRef, class F,
- absl::enable_if_t<
- target_type == TargetType::kIncompatibleAnyInvocable, int> = 0>
- void Initialize(F&& f) {
- if (f.HasValue()) {
- InitializeStorage<QualDecayedTRef>(std::forward<F>(f));
- } else {
- manager_ = EmptyManager;
- invoker_ = nullptr;
- }
- }
-
- template <TargetType target_type, class QualDecayedTRef, class F,
- typename = absl::enable_if_t<target_type == TargetType::kOther>>
- void Initialize(F&& f) {
- InitializeStorage<QualDecayedTRef>(std::forward<F>(f));
- }
-
// Use local (inline) storage for applicable target object types.
- template <class QualTRef, class... Args,
- typename = absl::enable_if_t<
- IsStoredLocally<RemoveCVRef<QualTRef>>::value>>
+ template <class QualTRef, class... Args>
void InitializeStorage(Args&&... args) {
using RawT = RemoveCVRef<QualTRef>;
- ::new (static_cast<void*>(&state_.storage))
- RawT(std::forward<Args>(args)...);
-
- invoker_ = LocalInvoker<SigIsNoexcept, ReturnType, QualTRef, P...>;
- // We can simplify our manager if we know the type is trivially copyable.
- InitializeLocalManager<RawT>();
- }
-
- // Use remote storage for target objects that cannot be stored locally.
- template <class QualTRef, class... Args,
- absl::enable_if_t<!IsStoredLocally<RemoveCVRef<QualTRef>>::value,
- int> = 0>
- void InitializeStorage(Args&&... args) {
- InitializeRemoteManager<RemoveCVRef<QualTRef>>(std::forward<Args>(args)...);
- // This is set after everything else in case an exception is thrown in an
- // earlier step of the initialization.
- invoker_ = RemoteInvoker<SigIsNoexcept, ReturnType, QualTRef, P...>;
- }
-
- template <class T,
- typename = absl::enable_if_t<std::is_trivially_copyable<T>::value>>
- void InitializeLocalManager() {
- manager_ = LocalManagerTrivial;
- }
-
- template <class T,
- absl::enable_if_t<!std::is_trivially_copyable<T>::value, int> = 0>
- void InitializeLocalManager() {
- manager_ = LocalManagerNontrivial<T>;
- }
-
- template <class T>
- using HasTrivialRemoteStorage =
- std::integral_constant<bool, std::is_trivially_destructible<T>::value &&
- alignof(T) <=
- ABSL_INTERNAL_DEFAULT_NEW_ALIGNMENT>;
-
- template <class T, class... Args,
- typename = absl::enable_if_t<HasTrivialRemoteStorage<T>::value>>
- void InitializeRemoteManager(Args&&... args) {
- // unique_ptr is used for exception-safety in case construction throws.
- std::unique_ptr<void, TrivialDeleter> uninitialized_target(
- ::operator new(sizeof(T)), TrivialDeleter(sizeof(T)));
- ::new (uninitialized_target.get()) T(std::forward<Args>(args)...);
- state_.remote.target = uninitialized_target.release();
- state_.remote.size = sizeof(T);
- manager_ = RemoteManagerTrivial;
+ if constexpr (IsStoredLocally<RawT>()) {
+ ::new (static_cast<void*>(&state_.storage))
+ RawT(std::forward<Args>(args)...);
+ invoker_ = LocalInvoker<SigIsNoexcept, ReturnType, QualTRef, P...>;
+ // We can simplify our manager if we know the type is trivially copyable.
+ if constexpr (std::is_trivially_copyable_v<RawT>) {
+ manager_ = LocalManagerTrivial;
+ } else {
+ manager_ = LocalManagerNontrivial<RawT>;
+ }
+ } else {
+ InitializeRemoteManager<RawT>(std::forward<Args>(args)...);
+ // This is set after everything else in case an exception is thrown in an
+ // earlier step of the initialization.
+ invoker_ = RemoteInvoker<SigIsNoexcept, ReturnType, QualTRef, P...>;
+ }
}
- template <class T, class... Args,
- absl::enable_if_t<!HasTrivialRemoteStorage<T>::value, int> = 0>
+ template <class T, class... Args>
void InitializeRemoteManager(Args&&... args) {
- state_.remote.target = ::new T(std::forward<Args>(args)...);
- manager_ = RemoteManagerNontrivial<T>;
+ if constexpr (std::is_trivially_destructible_v<T> &&
+ alignof(T) <= ABSL_INTERNAL_DEFAULT_NEW_ALIGNMENT) {
+ // unique_ptr is used for exception-safety in case construction throws.
+ std::unique_ptr<void, TrivialDeleter> uninitialized_target(
+ ::operator new(sizeof(T)), TrivialDeleter(sizeof(T)));
+ ::new (uninitialized_target.get()) T(std::forward<Args>(args)...);
+ state_.remote.target = uninitialized_target.release();
+ state_.remote.size = sizeof(T);
+ manager_ = RemoteManagerTrivial;
+ } else {
+ state_.remote.target = ::new T(std::forward<Args>(args)...);
+ manager_ = RemoteManagerNontrivial<T>;
+ }
}
//////////////////////////////////////////////////////////////////////////////
@@ -734,17 +634,12 @@ using CanAssignReferenceWrapper = TrueAlias<
absl::enable_if_t<Impl<Sig>::template CallIsNoexceptIfSigIsNoexcept<
std::reference_wrapper<F>>::value>>;
-////////////////////////////////////////////////////////////////////////////////
-//
// The constraint for checking whether or not a call meets the noexcept
-// callability requirements. This is a preprocessor macro because specifying it
+// callability requirements. We use a preprocessor macro because specifying it
// this way as opposed to a disjunction/branch can improve the user-side error
// messages and avoids an instantiation of std::is_nothrow_invocable_r in the
// cases where the user did not specify a noexcept function type.
//
-#define ABSL_INTERNAL_ANY_INVOCABLE_NOEXCEPT_CONSTRAINT(inv_quals, noex) \
- ABSL_INTERNAL_ANY_INVOCABLE_NOEXCEPT_CONSTRAINT_##noex(inv_quals)
-
// The disjunction below is because we can't rely on std::is_nothrow_invocable_r
// to give the right result when ReturnType is non-moveable in toolchains that
// don't treat non-moveable result types correctly. For example this was the
@@ -759,7 +654,7 @@ using CanAssignReferenceWrapper = TrueAlias<
UnwrapStdReferenceWrapper<absl::decay_t<F>> inv_quals, P...>, \
std::is_same< \
ReturnType, \
- absl::base_internal::invoke_result_t< \
+ std::invoke_result_t< \
UnwrapStdReferenceWrapper<absl::decay_t<F>> inv_quals, \
P...>>>>::value>
@@ -775,13 +670,13 @@ using CanAssignReferenceWrapper = TrueAlias<
// noex is "true" if the function type is noexcept, or false if it is not.
//
// The CallIsValid condition is more complicated than simply using
-// absl::base_internal::is_invocable_r because we can't rely on it to give the
-// right result when ReturnType is non-moveable in toolchains that don't treat
-// non-moveable result types correctly. For example this was the case in libc++
-// before commit c3a24882 (2022-05).
+// std::is_invocable_r because we can't rely on it to give the right result
+// when ReturnType is non-moveable in toolchains that don't treat non-moveable
+// result types correctly. For example this was the case in libc++ before commit
+// c3a24882 (2022-05).
#define ABSL_INTERNAL_ANY_INVOCABLE_IMPL_(cv, ref, inv_quals, noex) \
template <class ReturnType, class... P> \
- class Impl<ReturnType(P...) cv ref ABSL_INTERNAL_NOEXCEPT_SPEC(noex)> \
+ class Impl<ReturnType(P...) cv ref noexcept(noex)> \
: public CoreImpl<noex, ReturnType, P...> { \
public: \
/*The base class, which contains the datamembers and core operations*/ \
@@ -790,17 +685,16 @@ using CanAssignReferenceWrapper = TrueAlias<
/*SFINAE constraint to check if F is invocable with the proper signature*/ \
template <class F> \
using CallIsValid = TrueAlias<absl::enable_if_t<absl::disjunction< \
- absl::base_internal::is_invocable_r<ReturnType, \
- absl::decay_t<F> inv_quals, P...>, \
- std::is_same<ReturnType, \
- absl::base_internal::invoke_result_t< \
- absl::decay_t<F> inv_quals, P...>>>::value>>; \
+ std::is_invocable_r<ReturnType, absl::decay_t<F> inv_quals, P...>, \
+ std::is_same< \
+ ReturnType, \
+ std::invoke_result_t<absl::decay_t<F> inv_quals, P...>>>::value>>; \
\
/*SFINAE constraint to check if F is nothrow-invocable when necessary*/ \
template <class F> \
using CallIsNoexceptIfSigIsNoexcept = \
- TrueAlias<ABSL_INTERNAL_ANY_INVOCABLE_NOEXCEPT_CONSTRAINT(inv_quals, \
- noex)>; \
+ TrueAlias<ABSL_INTERNAL_ANY_INVOCABLE_NOEXCEPT_CONSTRAINT_##noex( \
+ inv_quals)>; \
\
/*Put the AnyInvocable into an empty state.*/ \
Impl() = default; \
@@ -822,8 +716,7 @@ using CanAssignReferenceWrapper = TrueAlias<
\
/*Raises a fatal error when the AnyInvocable is invoked after a move*/ \
static ReturnType InvokedAfterMove( \
- TypeErasedState*, \
- ForwardedParameterType<P>...) noexcept(noex) { \
+ TypeErasedState*, ForwardedParameterType<P>...) noexcept(noex) { \
ABSL_HARDENING_ASSERT(false && "AnyInvocable use-after-move"); \
std::terminate(); \
} \
@@ -851,18 +744,11 @@ using CanAssignReferenceWrapper = TrueAlias<
} \
}
-// Define the `noexcept(true)` specialization only for C++17 and beyond, when
-// `noexcept` is part of the type system.
-#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L
// A convenience macro that defines specializations for the noexcept(true) and
// noexcept(false) forms, given the other properties.
#define ABSL_INTERNAL_ANY_INVOCABLE_IMPL(cv, ref, inv_quals) \
ABSL_INTERNAL_ANY_INVOCABLE_IMPL_(cv, ref, inv_quals, false); \
ABSL_INTERNAL_ANY_INVOCABLE_IMPL_(cv, ref, inv_quals, true)
-#else
-#define ABSL_INTERNAL_ANY_INVOCABLE_IMPL(cv, ref, inv_quals) \
- ABSL_INTERNAL_ANY_INVOCABLE_IMPL_(cv, ref, inv_quals, false)
-#endif
// Non-ref-qualified partial specializations
ABSL_INTERNAL_ANY_INVOCABLE_IMPL(, , &);
@@ -881,8 +767,6 @@ ABSL_INTERNAL_ANY_INVOCABLE_IMPL(const, &&, const&&);
#undef ABSL_INTERNAL_ANY_INVOCABLE_IMPL_
#undef ABSL_INTERNAL_ANY_INVOCABLE_NOEXCEPT_CONSTRAINT_false
#undef ABSL_INTERNAL_ANY_INVOCABLE_NOEXCEPT_CONSTRAINT_true
-#undef ABSL_INTERNAL_ANY_INVOCABLE_NOEXCEPT_CONSTRAINT
-#undef ABSL_INTERNAL_NOEXCEPT_SPEC
} // namespace internal_any_invocable
ABSL_NAMESPACE_END
diff --git a/contrib/restricted/abseil-cpp/absl/functional/internal/front_binder.h b/contrib/restricted/abseil-cpp/absl/functional/internal/front_binder.h
index 44a5492897a..62f373f0496 100644
--- a/contrib/restricted/abseil-cpp/absl/functional/internal/front_binder.h
+++ b/contrib/restricted/abseil-cpp/absl/functional/internal/front_binder.h
@@ -21,7 +21,6 @@
#include <type_traits>
#include <utility>
-#include "absl/base/internal/invoke.h"
#include "absl/container/internal/compressed_tuple.h"
#include "absl/meta/type_traits.h"
#include "absl/utility/utility.h"
@@ -33,9 +32,8 @@ namespace functional_internal {
// Invoke the method, expanding the tuple of bound arguments.
template <class R, class Tuple, size_t... Idx, class... Args>
R Apply(Tuple&& bound, absl::index_sequence<Idx...>, Args&&... free) {
- return base_internal::invoke(
- std::forward<Tuple>(bound).template get<Idx>()...,
- std::forward<Args>(free)...);
+ return std::invoke(std::forward<Tuple>(bound).template get<Idx>()...,
+ std::forward<Args>(free)...);
}
template <class F, class... BoundArgs>
@@ -50,23 +48,23 @@ class FrontBinder {
constexpr explicit FrontBinder(absl::in_place_t, Ts&&... ts)
: bound_args_(std::forward<Ts>(ts)...) {}
- template <class... FreeArgs, class R = base_internal::invoke_result_t<
- F&, BoundArgs&..., FreeArgs&&...>>
+ template <class... FreeArgs,
+ class R = std::invoke_result_t<F&, BoundArgs&..., FreeArgs&&...>>
R operator()(FreeArgs&&... free_args) & {
return functional_internal::Apply<R>(bound_args_, Idx(),
std::forward<FreeArgs>(free_args)...);
}
template <class... FreeArgs,
- class R = base_internal::invoke_result_t<
- const F&, const BoundArgs&..., FreeArgs&&...>>
+ class R = std::invoke_result_t<const F&, const BoundArgs&...,
+ FreeArgs&&...>>
R operator()(FreeArgs&&... free_args) const& {
return functional_internal::Apply<R>(bound_args_, Idx(),
std::forward<FreeArgs>(free_args)...);
}
- template <class... FreeArgs, class R = base_internal::invoke_result_t<
- F&&, BoundArgs&&..., FreeArgs&&...>>
+ template <class... FreeArgs,
+ class R = std::invoke_result_t<F&&, BoundArgs&&..., FreeArgs&&...>>
R operator()(FreeArgs&&... free_args) && {
// This overload is called when *this is an rvalue. If some of the bound
// arguments are stored by value or rvalue reference, we move them.
@@ -75,8 +73,8 @@ class FrontBinder {
}
template <class... FreeArgs,
- class R = base_internal::invoke_result_t<
- const F&&, const BoundArgs&&..., FreeArgs&&...>>
+ class R = std::invoke_result_t<const F&&, const BoundArgs&&...,
+ FreeArgs&&...>>
R operator()(FreeArgs&&... free_args) const&& {
// This overload is called when *this is an rvalue. If some of the bound
// arguments are stored by value or rvalue reference, we move them.
diff --git a/contrib/restricted/abseil-cpp/absl/functional/internal/function_ref.h b/contrib/restricted/abseil-cpp/absl/functional/internal/function_ref.h
index 1cd34a3c766..27d45b886de 100644
--- a/contrib/restricted/abseil-cpp/absl/functional/internal/function_ref.h
+++ b/contrib/restricted/abseil-cpp/absl/functional/internal/function_ref.h
@@ -19,7 +19,6 @@
#include <functional>
#include <type_traits>
-#include "absl/base/internal/invoke.h"
#include "absl/functional/any_invocable.h"
#include "absl/meta/type_traits.h"
@@ -74,15 +73,13 @@ using Invoker = R (*)(VoidPtr, typename ForwardT<Args>::type...);
template <typename Obj, typename R, typename... Args>
R InvokeObject(VoidPtr ptr, typename ForwardT<Args>::type... args) {
auto o = static_cast<const Obj*>(ptr.obj);
- return static_cast<R>(
- absl::base_internal::invoke(*o, std::forward<Args>(args)...));
+ return static_cast<R>(std::invoke(*o, std::forward<Args>(args)...));
}
template <typename Fun, typename R, typename... Args>
R InvokeFunction(VoidPtr ptr, typename ForwardT<Args>::type... args) {
auto f = reinterpret_cast<Fun>(ptr.fun);
- return static_cast<R>(
- absl::base_internal::invoke(f, std::forward<Args>(args)...));
+ return static_cast<R>(std::invoke(f, std::forward<Args>(args)...));
}
template <typename Sig>
diff --git a/contrib/restricted/abseil-cpp/absl/functional/overload.h b/contrib/restricted/abseil-cpp/absl/functional/overload.h
index 7e19e705030..35eec96292d 100644
--- a/contrib/restricted/abseil-cpp/absl/functional/overload.h
+++ b/contrib/restricted/abseil-cpp/absl/functional/overload.h
@@ -23,8 +23,6 @@
// Before using this function, consider whether named function overloads would
// be a better design.
//
-// Note: absl::Overload requires C++17.
-//
// Example:
//
// std::variant<std::string, int32_t, int64_t> v(int32_t{1});
@@ -46,9 +44,6 @@
namespace absl {
ABSL_NAMESPACE_BEGIN
-#if defined(ABSL_INTERNAL_CPLUSPLUS_LANG) && \
- ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L
-
template <typename... T>
struct Overload final : T... {
using T::operator()...;
@@ -71,21 +66,6 @@ struct Overload final : T... {
template <typename... T>
Overload(T...) -> Overload<T...>;
-#else
-
-namespace functional_internal {
-template <typename T>
-constexpr bool kDependentFalse = false;
-}
-
-template <typename Dependent = int, typename... T>
-auto Overload(T&&...) {
- static_assert(functional_internal::kDependentFalse<Dependent>,
- "Overload is only usable with C++17 or above.");
-}
-
-#endif
-
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/contrib/restricted/abseil-cpp/absl/hash/hash.h b/contrib/restricted/abseil-cpp/absl/hash/hash.h
index 479b17b7b17..23f4e9d374c 100644
--- a/contrib/restricted/abseil-cpp/absl/hash/hash.h
+++ b/contrib/restricted/abseil-cpp/absl/hash/hash.h
@@ -87,6 +87,7 @@
#include "absl/base/config.h"
#include "absl/functional/function_ref.h"
#include "absl/hash/internal/hash.h"
+#include "absl/hash/internal/weakly_mixed_integer.h"
#include "absl/meta/type_traits.h"
namespace absl {
@@ -356,6 +357,12 @@ class HashState : public hash_internal::HashStateBase<HashState> {
hash_state.combine_contiguous_(hash_state.state_, first, size);
return hash_state;
}
+
+ static HashState combine_weakly_mixed_integer(
+ HashState hash_state, hash_internal::WeaklyMixedInteger value) {
+ hash_state.combine_weakly_mixed_integer_(hash_state.state_, value);
+ return hash_state;
+ }
using HashState::HashStateBase::combine_contiguous;
private:
@@ -371,6 +378,13 @@ class HashState : public hash_internal::HashStateBase<HashState> {
state = T::combine_contiguous(std::move(state), first, size);
}
+ template <typename T>
+ static void CombineWeaklyMixedIntegerImpl(
+ void* p, hash_internal::WeaklyMixedInteger value) {
+ T& state = *static_cast<T*>(p);
+ state = T::combine_weakly_mixed_integer(std::move(state), value);
+ }
+
static HashState combine_raw(HashState hash_state, uint64_t value) {
hash_state.combine_raw_(hash_state.state_, value);
return hash_state;
@@ -385,6 +399,7 @@ class HashState : public hash_internal::HashStateBase<HashState> {
template <typename T>
void Init(T* state) {
state_ = state;
+ combine_weakly_mixed_integer_ = &CombineWeaklyMixedIntegerImpl<T>;
combine_contiguous_ = &CombineContiguousImpl<T>;
combine_raw_ = &CombineRawImpl<T>;
run_combine_unordered_ = &RunCombineUnorderedImpl<T>;
@@ -424,6 +439,7 @@ class HashState : public hash_internal::HashStateBase<HashState> {
// Do not erase an already erased state.
void Init(HashState* state) {
state_ = state->state_;
+ combine_weakly_mixed_integer_ = state->combine_weakly_mixed_integer_;
combine_contiguous_ = state->combine_contiguous_;
combine_raw_ = state->combine_raw_;
run_combine_unordered_ = state->run_combine_unordered_;
@@ -435,6 +451,8 @@ class HashState : public hash_internal::HashStateBase<HashState> {
}
void* state_;
+ void (*combine_weakly_mixed_integer_)(
+ void*, absl::hash_internal::WeaklyMixedInteger);
void (*combine_contiguous_)(void*, const unsigned char*, size_t);
void (*combine_raw_)(void*, uint64_t);
HashState (*run_combine_unordered_)(
diff --git a/contrib/restricted/abseil-cpp/absl/hash/hash_testing.h b/contrib/restricted/abseil-cpp/absl/hash/hash_testing.h
index 673366deefc..817a40da58e 100644
--- a/contrib/restricted/abseil-cpp/absl/hash/hash_testing.h
+++ b/contrib/restricted/abseil-cpp/absl/hash/hash_testing.h
@@ -15,7 +15,9 @@
#ifndef ABSL_HASH_HASH_TESTING_H_
#define ABSL_HASH_HASH_TESTING_H_
+#include <cstddef>
#include <initializer_list>
+#include <string>
#include <tuple>
#include <type_traits>
#include <vector>
@@ -141,21 +143,20 @@ ABSL_NAMESPACE_BEGIN
// }
//
template <int&... ExplicitBarrier, typename Container>
-ABSL_MUST_USE_RESULT testing::AssertionResult
-VerifyTypeImplementsAbslHashCorrectly(const Container& values);
+testing::AssertionResult VerifyTypeImplementsAbslHashCorrectly(
+ const Container& values);
template <int&... ExplicitBarrier, typename Container, typename Eq>
-ABSL_MUST_USE_RESULT testing::AssertionResult
-VerifyTypeImplementsAbslHashCorrectly(const Container& values, Eq equals);
+testing::AssertionResult VerifyTypeImplementsAbslHashCorrectly(
+ const Container& values, Eq equals);
template <int&..., typename T>
-ABSL_MUST_USE_RESULT testing::AssertionResult
-VerifyTypeImplementsAbslHashCorrectly(std::initializer_list<T> values);
+testing::AssertionResult VerifyTypeImplementsAbslHashCorrectly(
+ std::initializer_list<T> values);
template <int&..., typename T, typename Eq>
-ABSL_MUST_USE_RESULT testing::AssertionResult
-VerifyTypeImplementsAbslHashCorrectly(std::initializer_list<T> values,
- Eq equals);
+testing::AssertionResult VerifyTypeImplementsAbslHashCorrectly(
+ std::initializer_list<T> values, Eq equals);
namespace hash_internal {
@@ -184,8 +185,8 @@ struct ExpandVisitor {
};
template <typename Container, typename Eq>
-ABSL_MUST_USE_RESULT testing::AssertionResult
-VerifyTypeImplementsAbslHashCorrectly(const Container& values, Eq equals) {
+testing::AssertionResult VerifyTypeImplementsAbslHashCorrectly(
+ const Container& values, Eq equals) {
using V = typename Container::value_type;
struct Info {
@@ -343,32 +344,31 @@ struct DefaultEquals {
} // namespace hash_internal
template <int&..., typename Container>
-ABSL_MUST_USE_RESULT testing::AssertionResult
-VerifyTypeImplementsAbslHashCorrectly(const Container& values) {
+testing::AssertionResult VerifyTypeImplementsAbslHashCorrectly(
+ const Container& values) {
return hash_internal::VerifyTypeImplementsAbslHashCorrectly(
hash_internal::ContainerAsVector<Container>::Do(values),
hash_internal::DefaultEquals{});
}
template <int&..., typename Container, typename Eq>
-ABSL_MUST_USE_RESULT testing::AssertionResult
-VerifyTypeImplementsAbslHashCorrectly(const Container& values, Eq equals) {
+testing::AssertionResult VerifyTypeImplementsAbslHashCorrectly(
+ const Container& values, Eq equals) {
return hash_internal::VerifyTypeImplementsAbslHashCorrectly(
hash_internal::ContainerAsVector<Container>::Do(values), equals);
}
template <int&..., typename T>
-ABSL_MUST_USE_RESULT testing::AssertionResult
-VerifyTypeImplementsAbslHashCorrectly(std::initializer_list<T> values) {
+testing::AssertionResult VerifyTypeImplementsAbslHashCorrectly(
+ std::initializer_list<T> values) {
return hash_internal::VerifyTypeImplementsAbslHashCorrectly(
hash_internal::ContainerAsVector<std::initializer_list<T>>::Do(values),
hash_internal::DefaultEquals{});
}
template <int&..., typename T, typename Eq>
-ABSL_MUST_USE_RESULT testing::AssertionResult
-VerifyTypeImplementsAbslHashCorrectly(std::initializer_list<T> values,
- Eq equals) {
+testing::AssertionResult VerifyTypeImplementsAbslHashCorrectly(
+ std::initializer_list<T> values, Eq equals) {
return hash_internal::VerifyTypeImplementsAbslHashCorrectly(
hash_internal::ContainerAsVector<std::initializer_list<T>>::Do(values),
equals);
diff --git a/contrib/restricted/abseil-cpp/absl/hash/internal/hash.cc b/contrib/restricted/abseil-cpp/absl/hash/internal/hash.cc
index e0a8ea99744..9abace5e2b7 100644
--- a/contrib/restricted/abseil-cpp/absl/hash/internal/hash.cc
+++ b/contrib/restricted/abseil-cpp/absl/hash/internal/hash.cc
@@ -55,13 +55,9 @@ uint64_t MixingHashState::CombineLargeContiguousImpl64(
ABSL_CONST_INIT const void* const MixingHashState::kSeed = &kSeed;
-#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
-constexpr uint64_t MixingHashState::kStaticRandomData[];
-#endif
-
uint64_t MixingHashState::LowLevelHashImpl(const unsigned char* data,
size_t len) {
- return LowLevelHashLenGt16(data, len, Seed(), &kStaticRandomData[0]);
+ return LowLevelHashLenGt32(data, len, Seed(), &kStaticRandomData[0]);
}
} // namespace hash_internal
diff --git a/contrib/restricted/abseil-cpp/absl/hash/internal/hash.h b/contrib/restricted/abseil-cpp/absl/hash/internal/hash.h
index f4a0d7857c3..63b35490b21 100644
--- a/contrib/restricted/abseil-cpp/absl/hash/internal/hash.h
+++ b/contrib/restricted/abseil-cpp/absl/hash/internal/hash.h
@@ -24,15 +24,29 @@
#include <TargetConditionals.h>
#endif
+// We include config.h here to make sure that ABSL_INTERNAL_CPLUSPLUS_LANG is
+// defined.
#include "absl/base/config.h"
+// GCC15 warns that <ciso646> is deprecated in C++17 and suggests using
+// <version> instead, even though <version> is not available in C++17 mode prior
+// to GCC9.
+#if defined(__has_include)
+#if __has_include(<version>)
+#define ABSL_INTERNAL_VERSION_HEADER_AVAILABLE 1
+#endif
+#endif
+
// For feature testing and determining which headers can be included.
-#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 202002L
+#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 202002L || \
+ ABSL_INTERNAL_VERSION_HEADER_AVAILABLE
#include <version>
#else
#include <ciso646>
#endif
+#undef ABSL_INTERNAL_VERSION_HEADER_AVAILABLE
+
#include <algorithm>
#include <array>
#include <bitset>
@@ -51,6 +65,7 @@
#include <memory>
#include <set>
#include <string>
+#include <string_view>
#include <tuple>
#include <type_traits>
#include <unordered_map>
@@ -65,7 +80,7 @@
#include "absl/base/port.h"
#include "absl/container/fixed_array.h"
#include "absl/hash/internal/city.h"
-#include "absl/hash/internal/low_level_hash.h"
+#include "absl/hash/internal/weakly_mixed_integer.h"
#include "absl/meta/type_traits.h"
#include "absl/numeric/bits.h"
#include "absl/numeric/int128.h"
@@ -78,14 +93,6 @@
#include <filesystem> // NOLINT
#endif
-#ifdef ABSL_HAVE_STD_STRING_VIEW
-#include <string_view>
-#endif
-
-#ifdef __ARM_ACLE
-#include <arm_acle.h>
-#endif
-
namespace absl {
ABSL_NAMESPACE_BEGIN
@@ -375,14 +382,14 @@ template <typename H, typename T,
H hash_bytes(H hash_state, const T& value) {
const unsigned char* start = reinterpret_cast<const unsigned char*>(&value);
uint64_t v;
- if (sizeof(T) == 1) {
+ if constexpr (sizeof(T) == 1) {
v = *start;
- } else if (sizeof(T) == 2) {
+ } else if constexpr (sizeof(T) == 2) {
v = absl::base_internal::UnalignedLoad16(start);
- } else if (sizeof(T) == 4) {
+ } else if constexpr (sizeof(T) == 4) {
v = absl::base_internal::UnalignedLoad32(start);
} else {
- assert(sizeof(T) == 8);
+ static_assert(sizeof(T) == 8);
v = absl::base_internal::UnalignedLoad64(start);
}
return CombineRaw()(std::move(hash_state), v);
@@ -394,6 +401,11 @@ H hash_bytes(H hash_state, const T& value) {
return H::combine_contiguous(std::move(hash_state), start, sizeof(value));
}
+template <typename H>
+H hash_weakly_mixed_integer(H hash_state, WeaklyMixedInteger value) {
+ return H::combine_weakly_mixed_integer(std::move(hash_state), value);
+}
+
// -----------------------------------------------------------------------------
// AbslHashValue for Basic Types
// -----------------------------------------------------------------------------
@@ -512,7 +524,7 @@ H AbslHashValue(H hash_state, T C::*ptr) {
// padding (namely when they have 1 or 3 ints). The value below is a lower
// bound on the number of salient, non-padding bytes that we use for
// hashing.
- if (alignof(T C::*) == alignof(int)) {
+ if constexpr (alignof(T C::*) == alignof(int)) {
// No padding when all subobjects have the same size as the total
// alignment. This happens in 32-bit mode.
return n;
@@ -609,7 +621,7 @@ template <typename H>
H AbslHashValue(H hash_state, absl::string_view str) {
return H::combine(
H::combine_contiguous(std::move(hash_state), str.data(), str.size()),
- str.size());
+ WeaklyMixedInteger{str.size()});
}
// Support std::wstring, std::u16string and std::u32string.
@@ -622,11 +634,9 @@ H AbslHashValue(
const std::basic_string<Char, std::char_traits<Char>, Alloc>& str) {
return H::combine(
H::combine_contiguous(std::move(hash_state), str.data(), str.size()),
- str.size());
+ WeaklyMixedInteger{str.size()});
}
-#ifdef ABSL_HAVE_STD_STRING_VIEW
-
// Support std::wstring_view, std::u16string_view and std::u32string_view.
template <typename Char, typename H,
typename = absl::enable_if_t<std::is_same<Char, wchar_t>::value ||
@@ -635,11 +645,9 @@ template <typename Char, typename H,
H AbslHashValue(H hash_state, std::basic_string_view<Char> str) {
return H::combine(
H::combine_contiguous(std::move(hash_state), str.data(), str.size()),
- str.size());
+ WeaklyMixedInteger{str.size()});
}
-#endif // ABSL_HAVE_STD_STRING_VIEW
-
#if defined(__cpp_lib_filesystem) && __cpp_lib_filesystem >= 201703L && \
(!defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) || \
__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ >= 130000) && \
@@ -685,7 +693,7 @@ typename std::enable_if<is_hashable<T>::value, H>::type AbslHashValue(
for (const auto& t : deque) {
hash_state = H::combine(std::move(hash_state), t);
}
- return H::combine(std::move(hash_state), deque.size());
+ return H::combine(std::move(hash_state), WeaklyMixedInteger{deque.size()});
}
// AbslHashValue for hashing std::forward_list
@@ -697,7 +705,7 @@ typename std::enable_if<is_hashable<T>::value, H>::type AbslHashValue(
hash_state = H::combine(std::move(hash_state), t);
++size;
}
- return H::combine(std::move(hash_state), size);
+ return H::combine(std::move(hash_state), WeaklyMixedInteger{size});
}
// AbslHashValue for hashing std::list
@@ -707,7 +715,7 @@ typename std::enable_if<is_hashable<T>::value, H>::type AbslHashValue(
for (const auto& t : list) {
hash_state = H::combine(std::move(hash_state), t);
}
- return H::combine(std::move(hash_state), list.size());
+ return H::combine(std::move(hash_state), WeaklyMixedInteger{list.size()});
}
// AbslHashValue for hashing std::vector
@@ -721,7 +729,7 @@ typename std::enable_if<is_hashable<T>::value && !std::is_same<T, bool>::value,
AbslHashValue(H hash_state, const std::vector<T, Allocator>& vector) {
return H::combine(H::combine_contiguous(std::move(hash_state), vector.data(),
vector.size()),
- vector.size());
+ WeaklyMixedInteger{vector.size()});
}
// AbslHashValue special cases for hashing std::vector<bool>
@@ -742,7 +750,8 @@ AbslHashValue(H hash_state, const std::vector<T, Allocator>& vector) {
unsigned char c = static_cast<unsigned char>(i);
hash_state = combiner.add_buffer(std::move(hash_state), &c, sizeof(c));
}
- return H::combine(combiner.finalize(std::move(hash_state)), vector.size());
+ return H::combine(combiner.finalize(std::move(hash_state)),
+ WeaklyMixedInteger{vector.size()});
}
#else
// When not working around the libstdc++ bug above, we still have to contend
@@ -758,7 +767,7 @@ typename std::enable_if<is_hashable<T>::value && std::is_same<T, bool>::value,
AbslHashValue(H hash_state, const std::vector<T, Allocator>& vector) {
return H::combine(std::move(hash_state),
std::hash<std::vector<T, Allocator>>{}(vector),
- vector.size());
+ WeaklyMixedInteger{vector.size()});
}
#endif
@@ -775,7 +784,7 @@ AbslHashValue(H hash_state, const std::map<Key, T, Compare, Allocator>& map) {
for (const auto& t : map) {
hash_state = H::combine(std::move(hash_state), t);
}
- return H::combine(std::move(hash_state), map.size());
+ return H::combine(std::move(hash_state), WeaklyMixedInteger{map.size()});
}
// AbslHashValue for hashing std::multimap
@@ -788,7 +797,7 @@ AbslHashValue(H hash_state,
for (const auto& t : map) {
hash_state = H::combine(std::move(hash_state), t);
}
- return H::combine(std::move(hash_state), map.size());
+ return H::combine(std::move(hash_state), WeaklyMixedInteger{map.size()});
}
// AbslHashValue for hashing std::set
@@ -798,7 +807,7 @@ typename std::enable_if<is_hashable<Key>::value, H>::type AbslHashValue(
for (const auto& t : set) {
hash_state = H::combine(std::move(hash_state), t);
}
- return H::combine(std::move(hash_state), set.size());
+ return H::combine(std::move(hash_state), WeaklyMixedInteger{set.size()});
}
// AbslHashValue for hashing std::multiset
@@ -808,7 +817,7 @@ typename std::enable_if<is_hashable<Key>::value, H>::type AbslHashValue(
for (const auto& t : set) {
hash_state = H::combine(std::move(hash_state), t);
}
- return H::combine(std::move(hash_state), set.size());
+ return H::combine(std::move(hash_state), WeaklyMixedInteger{set.size()});
}
// -----------------------------------------------------------------------------
@@ -822,7 +831,7 @@ typename std::enable_if<is_hashable<Key>::value, H>::type AbslHashValue(
H hash_state, const std::unordered_set<Key, Hash, KeyEqual, Alloc>& s) {
return H::combine(
H::combine_unordered(std::move(hash_state), s.begin(), s.end()),
- s.size());
+ WeaklyMixedInteger{s.size()});
}
// AbslHashValue for hashing std::unordered_multiset
@@ -833,7 +842,7 @@ typename std::enable_if<is_hashable<Key>::value, H>::type AbslHashValue(
const std::unordered_multiset<Key, Hash, KeyEqual, Alloc>& s) {
return H::combine(
H::combine_unordered(std::move(hash_state), s.begin(), s.end()),
- s.size());
+ WeaklyMixedInteger{s.size()});
}
// AbslHashValue for hashing std::unordered_set
@@ -845,7 +854,7 @@ AbslHashValue(H hash_state,
const std::unordered_map<Key, T, Hash, KeyEqual, Alloc>& s) {
return H::combine(
H::combine_unordered(std::move(hash_state), s.begin(), s.end()),
- s.size());
+ WeaklyMixedInteger{s.size()});
}
// AbslHashValue for hashing std::unordered_multiset
@@ -857,7 +866,7 @@ AbslHashValue(H hash_state,
const std::unordered_multimap<Key, T, Hash, KeyEqual, Alloc>& s) {
return H::combine(
H::combine_unordered(std::move(hash_state), s.begin(), s.end()),
- s.size());
+ WeaklyMixedInteger{s.size()});
}
// -----------------------------------------------------------------------------
@@ -968,11 +977,20 @@ hash_range_or_bytes(H hash_state, const T* data, size_t size) {
// `false`.
struct HashSelect {
private:
+ struct WeaklyMixedIntegerProbe {
+ template <typename H>
+ static H Invoke(H state, WeaklyMixedInteger value) {
+ return hash_internal::hash_weakly_mixed_integer(std::move(state), value);
+ }
+ };
+
struct State : HashStateBase<State> {
static State combine_contiguous(State hash_state, const unsigned char*,
size_t);
using State::HashStateBase::combine_contiguous;
static State combine_raw(State state, uint64_t value);
+ static State combine_weakly_mixed_integer(State hash_state,
+ WeaklyMixedInteger value);
};
struct UniquelyRepresentedProbe {
@@ -1034,6 +1052,7 @@ struct HashSelect {
// disjunction provides short circuiting wrt instantiation.
template <typename T>
using Apply = absl::disjunction< //
+ Probe<WeaklyMixedIntegerProbe, T>, //
Probe<UniquelyRepresentedProbe, T>, //
Probe<HashValueProbe, T>, //
Probe<LegacyHashProbe, T>, //
@@ -1063,8 +1082,7 @@ class ABSL_DLL MixingHashState : public HashStateBase<MixingHashState> {
};
static constexpr uint64_t kMul =
- sizeof(size_t) == 4 ? uint64_t{0xcc9e2d51}
- : uint64_t{0xdcb22ca68cb134ed};
+ uint64_t{0xdcb22ca68cb134ed};
template <typename T>
using IntegralFastPath =
@@ -1099,7 +1117,7 @@ class ABSL_DLL MixingHashState : public HashStateBase<MixingHashState> {
template <typename T, absl::enable_if_t<IntegralFastPath<T>::value, int> = 0>
static size_t hash(T value) {
return static_cast<size_t>(
- WeakMix(Seed() ^ static_cast<std::make_unsigned_t<T>>(value)));
+ WeakMix(Seed(), static_cast<std::make_unsigned_t<T>>(value)));
}
// Overload of MixingHashState::hash()
@@ -1114,6 +1132,18 @@ class ABSL_DLL MixingHashState : public HashStateBase<MixingHashState> {
MixingHashState() : state_(Seed()) {}
friend class MixingHashState::HashStateBase;
+ template <typename H>
+ friend H absl::hash_internal::hash_weakly_mixed_integer(H,
+ WeaklyMixedInteger);
+
+ static MixingHashState combine_weakly_mixed_integer(
+ MixingHashState hash_state, WeaklyMixedInteger value) {
+ // Some transformation for the value is needed to make an empty
+ // string/container change the mixing hash state.
+ // We use constant smaller than 8 bits to make compiler use
+ // `add` with an immediate operand with 1 byte value.
+ return MixingHashState{hash_state.state_ + (0x57 + value.value)};
+ }
template <typename CombinerT>
static MixingHashState RunCombineUnordered(MixingHashState state,
@@ -1152,7 +1182,7 @@ class ABSL_DLL MixingHashState : public HashStateBase<MixingHashState> {
// optimize Read1To3 and Read4To8 differently for the string case.
static MixingHashState combine_raw(MixingHashState hash_state,
uint64_t value) {
- return MixingHashState(WeakMix(hash_state.state_ ^ value));
+ return MixingHashState(WeakMix(hash_state.state_, value));
}
// Implementation of the base case for combine_contiguous where we actually
@@ -1180,7 +1210,7 @@ class ABSL_DLL MixingHashState : public HashStateBase<MixingHashState> {
// Empty ranges have no effect.
return state;
}
- return WeakMix(state ^ v);
+ return WeakMix(state, v);
}
ABSL_ATTRIBUTE_ALWAYS_INLINE static uint64_t CombineContiguousImpl9to16(
@@ -1222,8 +1252,8 @@ class ABSL_DLL MixingHashState : public HashStateBase<MixingHashState> {
size_t len);
// Reads 9 to 16 bytes from p.
- // The least significant 8 bytes are in .first, the rest (zero padded) bytes
- // are in .second.
+ // The least significant 8 bytes are in .first, and the rest of the bytes are
+ // in .second along with duplicated bytes from .first if len<16.
static std::pair<uint64_t, uint64_t> Read9To16(const unsigned char* p,
size_t len) {
uint64_t low_mem = Read8(p);
@@ -1251,11 +1281,7 @@ class ABSL_DLL MixingHashState : public HashStateBase<MixingHashState> {
#endif
}
- // Reads 4 to 8 bytes from p. Zero pads to fill uint64_t.
- // TODO(b/384509507): consider optimizing this by not requiring the output to
- // be equivalent to an integer load for 4/8 bytes. Currently, we rely on this
- // behavior for the HashConsistentAcrossIntTypes test case. Ditto for
- // Read1To3.
+ // Reads 4 to 8 bytes from p. Some input bytes may be duplicated in output.
static uint64_t Read4To8(const unsigned char* p, size_t len) {
// If `len < 8`, we duplicate bytes in the middle.
// E.g.:
@@ -1274,7 +1300,7 @@ class ABSL_DLL MixingHashState : public HashStateBase<MixingHashState> {
return most_significant | least_significant;
}
- // Reads 1 to 3 bytes from p. Zero pads to fill uint32_t.
+ // Reads 1 to 3 bytes from p. Some input bytes may be duplicated in output.
static uint32_t Read1To3(const unsigned char* p, size_t len) {
// The trick used by this implementation is to avoid branches.
// We always read three bytes by duplicating.
@@ -1290,27 +1316,26 @@ class ABSL_DLL MixingHashState : public HashStateBase<MixingHashState> {
}
ABSL_ATTRIBUTE_ALWAYS_INLINE static uint64_t Mix(uint64_t lhs, uint64_t rhs) {
+ // For 32 bit platforms we are trying to use all 64 lower bits.
+ if constexpr (sizeof(size_t) < 8) {
+ uint64_t m = lhs * rhs;
+ return m ^ (m >> 32);
+ }
// Though the 128-bit product on AArch64 needs two instructions, it is
// still a good balance between speed and hash quality.
- using MultType =
- absl::conditional_t<sizeof(size_t) == 4, uint64_t, uint128>;
- MultType m = lhs;
+ uint128 m = lhs;
m *= rhs;
- return static_cast<uint64_t>(m ^ (m >> (sizeof(m) * 8 / 2)));
+ return Uint128High64(m) ^ Uint128Low64(m);
}
// Slightly lower latency than Mix, but with lower quality. The byte swap
// helps ensure that low bits still have high quality.
- ABSL_ATTRIBUTE_ALWAYS_INLINE static uint64_t WeakMix(uint64_t n) {
+ ABSL_ATTRIBUTE_ALWAYS_INLINE static uint64_t WeakMix(uint64_t lhs,
+ uint64_t rhs) {
+ const uint64_t n = lhs ^ rhs;
// WeakMix doesn't work well on 32-bit platforms so just use Mix.
- if (sizeof(size_t) < 8) return Mix(n, kMul);
-#ifdef __ARM_ACLE
- // gbswap_64 compiles to `rev` on ARM, but `rbit` is better because it
- // reverses bits rather than reversing bytes.
- return __rbitll(n * kMul);
-#else
+ if constexpr (sizeof(size_t) < 8) return Mix(n, kMul);
return absl::gbswap_64(n * kMul);
-#endif
}
// An extern to avoid bloat on a direct call to LowLevelHash() with fixed
diff --git a/contrib/restricted/abseil-cpp/absl/hash/internal/low_level_hash.cc b/contrib/restricted/abseil-cpp/absl/hash/internal/low_level_hash.cc
index ec02d7e7c5a..1a107ec674c 100644
--- a/contrib/restricted/abseil-cpp/absl/hash/internal/low_level_hash.cc
+++ b/contrib/restricted/abseil-cpp/absl/hash/internal/low_level_hash.cc
@@ -14,29 +14,44 @@
#include "absl/hash/internal/low_level_hash.h"
+#include <cassert>
#include <cstddef>
#include <cstdint>
+#include "absl/base/config.h"
#include "absl/base/internal/unaligned_access.h"
+#include "absl/base/optimization.h"
#include "absl/base/prefetch.h"
#include "absl/numeric/int128.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace hash_internal {
-
-static uint64_t Mix(uint64_t v0, uint64_t v1) {
+namespace {
+uint64_t Mix(uint64_t v0, uint64_t v1) {
absl::uint128 p = v0;
p *= v1;
return absl::Uint128Low64(p) ^ absl::Uint128High64(p);
}
+uint64_t Mix32Bytes(const uint8_t* ptr, uint64_t current_state,
+ const uint64_t salt[5]) {
+ uint64_t a = absl::base_internal::UnalignedLoad64(ptr);
+ uint64_t b = absl::base_internal::UnalignedLoad64(ptr + 8);
+ uint64_t c = absl::base_internal::UnalignedLoad64(ptr + 16);
+ uint64_t d = absl::base_internal::UnalignedLoad64(ptr + 24);
+
+ uint64_t cs0 = Mix(a ^ salt[1], b ^ current_state);
+ uint64_t cs1 = Mix(c ^ salt[2], d ^ current_state);
+ return cs0 ^ cs1;
+}
+} // namespace
-uint64_t LowLevelHashLenGt16(const void* data, size_t len, uint64_t seed,
+uint64_t LowLevelHashLenGt32(const void* data, size_t len, uint64_t seed,
const uint64_t salt[5]) {
+ assert(len > 32);
const uint8_t* ptr = static_cast<const uint8_t*>(data);
- uint64_t starting_length = static_cast<uint64_t>(len);
- const uint8_t* last_16_ptr = ptr + starting_length - 16;
- uint64_t current_state = seed ^ salt[0];
+ uint64_t current_state = seed ^ salt[0] ^ len;
+ const uint8_t* last_32_ptr = ptr + len - 32;
if (len > 64) {
// If we have more than 64 bytes, we're going to handle chunks of 64
@@ -76,71 +91,13 @@ uint64_t LowLevelHashLenGt16(const void* data, size_t len, uint64_t seed,
// We now have a data `ptr` with at most 64 bytes and the current state
// of the hashing state machine stored in current_state.
if (len > 32) {
- uint64_t a = absl::base_internal::UnalignedLoad64(ptr);
- uint64_t b = absl::base_internal::UnalignedLoad64(ptr + 8);
- uint64_t c = absl::base_internal::UnalignedLoad64(ptr + 16);
- uint64_t d = absl::base_internal::UnalignedLoad64(ptr + 24);
-
- uint64_t cs0 = Mix(a ^ salt[1], b ^ current_state);
- uint64_t cs1 = Mix(c ^ salt[2], d ^ current_state);
- current_state = cs0 ^ cs1;
-
- ptr += 32;
- len -= 32;
+ current_state = Mix32Bytes(ptr, current_state, salt);
}
// We now have a data `ptr` with at most 32 bytes and the current state
- // of the hashing state machine stored in current_state.
- if (len > 16) {
- uint64_t a = absl::base_internal::UnalignedLoad64(ptr);
- uint64_t b = absl::base_internal::UnalignedLoad64(ptr + 8);
-
- current_state = Mix(a ^ salt[1], b ^ current_state);
- }
-
- // We now have a data `ptr` with at least 1 and at most 16 bytes. But we can
- // safely read from `ptr + len - 16`.
- uint64_t a = absl::base_internal::UnalignedLoad64(last_16_ptr);
- uint64_t b = absl::base_internal::UnalignedLoad64(last_16_ptr + 8);
-
- return Mix(a ^ salt[1] ^ starting_length, b ^ current_state);
-}
-
-uint64_t LowLevelHash(const void* data, size_t len, uint64_t seed,
- const uint64_t salt[5]) {
- if (len > 16) return LowLevelHashLenGt16(data, len, seed, salt);
-
- // Prefetch the cacheline that data resides in.
- PrefetchToLocalCache(data);
- const uint8_t* ptr = static_cast<const uint8_t*>(data);
- uint64_t starting_length = static_cast<uint64_t>(len);
- uint64_t current_state = seed ^ salt[0];
- if (len == 0) return current_state;
-
- uint64_t a = 0;
- uint64_t b = 0;
-
- // We now have a data `ptr` with at least 1 and at most 16 bytes.
- if (len > 8) {
- // When we have at least 9 and at most 16 bytes, set A to the first 64
- // bits of the input and B to the last 64 bits of the input. Yes, they
- // will overlap in the middle if we are working with less than the full 16
- // bytes.
- a = absl::base_internal::UnalignedLoad64(ptr);
- b = absl::base_internal::UnalignedLoad64(ptr + len - 8);
- } else if (len > 3) {
- // If we have at least 4 and at most 8 bytes, set A to the first 32
- // bits and B to the last 32 bits.
- a = absl::base_internal::UnalignedLoad32(ptr);
- b = absl::base_internal::UnalignedLoad32(ptr + len - 4);
- } else {
- // If we have at least 1 and at most 3 bytes, read 2 bytes into A and the
- // other byte into B, with some adjustments.
- a = static_cast<uint64_t>((ptr[0] << 8) | ptr[len - 1]);
- b = static_cast<uint64_t>(ptr[len >> 1]);
- }
-
- return Mix(a ^ salt[1] ^ starting_length, b ^ current_state);
+ // of the hashing state machine stored in current_state. But we can
+ // safely read from `ptr + len - 32`.
+ return Mix32Bytes(last_32_ptr, current_state, salt);
}
} // namespace hash_internal
diff --git a/contrib/restricted/abseil-cpp/absl/hash/internal/low_level_hash.h b/contrib/restricted/abseil-cpp/absl/hash/internal/low_level_hash.h
index d460e351980..49e9ec46bad 100644
--- a/contrib/restricted/abseil-cpp/absl/hash/internal/low_level_hash.h
+++ b/contrib/restricted/abseil-cpp/absl/hash/internal/low_level_hash.h
@@ -35,16 +35,12 @@ ABSL_NAMESPACE_BEGIN
namespace hash_internal {
// Hash function for a byte array. A 64-bit seed and a set of five 64-bit
-// integers are hashed into the result.
+// integers are hashed into the result. The length must be greater than 32.
//
// To allow all hashable types (including string_view and Span) to depend on
// this algorithm, we keep the API low-level, with as few dependencies as
// possible.
-uint64_t LowLevelHash(const void* data, size_t len, uint64_t seed,
- const uint64_t salt[5]);
-
-// Same as above except the length must be greater than 16.
-uint64_t LowLevelHashLenGt16(const void* data, size_t len, uint64_t seed,
+uint64_t LowLevelHashLenGt32(const void* data, size_t len, uint64_t seed,
const uint64_t salt[5]);
} // namespace hash_internal
diff --git a/contrib/restricted/abseil-cpp/absl/hash/internal/spy_hash_state.h b/contrib/restricted/abseil-cpp/absl/hash/internal/spy_hash_state.h
index 92490b1a21e..e403113b0ea 100644
--- a/contrib/restricted/abseil-cpp/absl/hash/internal/spy_hash_state.h
+++ b/contrib/restricted/abseil-cpp/absl/hash/internal/spy_hash_state.h
@@ -16,12 +16,14 @@
#define ABSL_HASH_INTERNAL_SPY_HASH_STATE_H_
#include <algorithm>
+#include <cstddef>
#include <cstdint>
#include <ostream>
#include <string>
#include <vector>
#include "absl/hash/hash.h"
+#include "absl/hash/internal/weakly_mixed_integer.h"
#include "absl/strings/match.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
@@ -167,6 +169,11 @@ class SpyHashStateImpl : public HashStateBase<SpyHashStateImpl<T>> {
return hash_state;
}
+ static SpyHashStateImpl combine_weakly_mixed_integer(
+ SpyHashStateImpl hash_state, WeaklyMixedInteger value) {
+ return combine(std::move(hash_state), value.value);
+ }
+
using SpyHashStateImpl::HashStateBase::combine_contiguous;
template <typename CombinerT>
diff --git a/contrib/restricted/abseil-cpp/absl/hash/internal/weakly_mixed_integer.h b/contrib/restricted/abseil-cpp/absl/hash/internal/weakly_mixed_integer.h
new file mode 100644
index 00000000000..55754366c03
--- /dev/null
+++ b/contrib/restricted/abseil-cpp/absl/hash/internal/weakly_mixed_integer.h
@@ -0,0 +1,38 @@
+// Copyright 2025 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_HASH_INTERNAL_WEAKLY_MIXED_INTEGER_H_
+#define ABSL_HASH_INTERNAL_WEAKLY_MIXED_INTEGER_H_
+
+#include <cstddef>
+
+#include "absl/base/config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace hash_internal {
+
+// Contains an integer that will be mixed into a hash state more weakly than
+// regular integers. It is useful for cases in which an integer is a part of a
+// larger object and needs to be mixed as a supplement. E.g., absl::string_view
+// and absl::Span are mixing their size wrapped with WeaklyMixedInteger.
+struct WeaklyMixedInteger {
+ size_t value;
+};
+
+} // namespace hash_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_HASH_INTERNAL_WEAKLY_MIXED_INTEGER_H_
diff --git a/contrib/restricted/abseil-cpp/absl/log/check.h b/contrib/restricted/abseil-cpp/absl/log/check.h
index 50f633ddf66..9e2219b8c1e 100644
--- a/contrib/restricted/abseil-cpp/absl/log/check.h
+++ b/contrib/restricted/abseil-cpp/absl/log/check.h
@@ -42,7 +42,8 @@
// CHECK()
//
-// `CHECK` terminates the program with a fatal error if `condition` is not true.
+// `CHECK` enforces that the `condition` is true. If the condition is false,
+// the program is terminated with a fatal error.
//
// The message may include additional information such as stack traces, when
// available.
diff --git a/contrib/restricted/abseil-cpp/absl/log/die_if_null.h b/contrib/restricted/abseil-cpp/absl/log/die_if_null.h
index f773aa854e9..8597976f4c2 100644
--- a/contrib/restricted/abseil-cpp/absl/log/die_if_null.h
+++ b/contrib/restricted/abseil-cpp/absl/log/die_if_null.h
@@ -60,8 +60,8 @@ namespace log_internal {
// Helper for `ABSL_DIE_IF_NULL`.
template <typename T>
-ABSL_MUST_USE_RESULT T DieIfNull(const char* file, int line,
- const char* exprtext, T&& t) {
+[[nodiscard]] T DieIfNull(const char* file, int line, const char* exprtext,
+ T&& t) {
if (ABSL_PREDICT_FALSE(t == nullptr)) {
// Call a non-inline helper function for a small code size improvement.
DieBecauseNull(file, line, exprtext);
diff --git a/contrib/restricted/abseil-cpp/absl/log/globals.h b/contrib/restricted/abseil-cpp/absl/log/globals.h
index 4feec4078fa..9718967ae73 100644
--- a/contrib/restricted/abseil-cpp/absl/log/globals.h
+++ b/contrib/restricted/abseil-cpp/absl/log/globals.h
@@ -43,7 +43,7 @@ ABSL_NAMESPACE_BEGIN
//
// Returns the value of the Minimum Log Level parameter.
// This function is async-signal-safe.
-ABSL_MUST_USE_RESULT absl::LogSeverityAtLeast MinLogLevel();
+[[nodiscard]] absl::LogSeverityAtLeast MinLogLevel();
// SetMinLogLevel()
//
@@ -82,7 +82,7 @@ class ScopedMinLogLevel final {
//
// Returns the value of the Stderr Threshold parameter.
// This function is async-signal-safe.
-ABSL_MUST_USE_RESULT absl::LogSeverityAtLeast StderrThreshold();
+[[nodiscard]] absl::LogSeverityAtLeast StderrThreshold();
// SetStderrThreshold()
//
@@ -118,8 +118,7 @@ class ScopedStderrThreshold final {
//
// Returns true if we should log a backtrace at the specified location.
namespace log_internal {
-ABSL_MUST_USE_RESULT bool ShouldLogBacktraceAt(absl::string_view file,
- int line);
+[[nodiscard]] bool ShouldLogBacktraceAt(absl::string_view file, int line);
} // namespace log_internal
// SetLogBacktraceLocation()
@@ -145,7 +144,7 @@ void ClearLogBacktraceLocation();
//
// Returns the value of the Prepend Log Prefix option.
// This function is async-signal-safe.
-ABSL_MUST_USE_RESULT bool ShouldPrependLogPrefix();
+[[nodiscard]] bool ShouldPrependLogPrefix();
// EnableLogPrefix()
//
diff --git a/contrib/restricted/abseil-cpp/absl/log/internal/append_truncated.h b/contrib/restricted/abseil-cpp/absl/log/internal/append_truncated.h
index f0e7912c2ac..d420a8b5c6a 100644
--- a/contrib/restricted/abseil-cpp/absl/log/internal/append_truncated.h
+++ b/contrib/restricted/abseil-cpp/absl/log/internal/append_truncated.h
@@ -17,8 +17,10 @@
#include <cstddef>
#include <cstring>
+#include <string_view>
#include "absl/base/config.h"
+#include "absl/strings/internal/utf8.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
@@ -33,6 +35,32 @@ inline size_t AppendTruncated(absl::string_view src, absl::Span<char> &dst) {
dst.remove_prefix(src.size());
return src.size();
}
+// Likewise, but it also takes a wide character string and transforms it into a
+// UTF-8 encoded byte string regardless of the current locale.
+// - On platforms where `wchar_t` is 2 bytes (e.g., Windows), the input is
+// treated as UTF-16.
+// - On platforms where `wchar_t` is 4 bytes (e.g., Linux, macOS), the input
+// is treated as UTF-32.
+inline size_t AppendTruncated(std::wstring_view src, absl::Span<char> &dst) {
+ absl::strings_internal::ShiftState state;
+ size_t total_bytes_written = 0;
+ for (const wchar_t wc : src) {
+ // If the destination buffer might not be large enough to write the next
+ // character, stop.
+ if (dst.size() < absl::strings_internal::kMaxEncodedUTF8Size) break;
+ size_t bytes_written =
+ absl::strings_internal::WideToUtf8(wc, dst.data(), state);
+ if (bytes_written == static_cast<size_t>(-1)) {
+ // Invalid character. Encode REPLACEMENT CHARACTER (U+FFFD) instead.
+ constexpr wchar_t kReplacementCharacter = L'\uFFFD';
+ bytes_written = absl::strings_internal::WideToUtf8(kReplacementCharacter,
+ dst.data(), state);
+ }
+ dst.remove_prefix(bytes_written);
+ total_bytes_written += bytes_written;
+ }
+ return total_bytes_written;
+}
// Likewise, but `n` copies of `c`.
inline size_t AppendTruncated(char c, size_t n, absl::Span<char> &dst) {
if (n > dst.size()) n = dst.size();
diff --git a/contrib/restricted/abseil-cpp/absl/log/internal/check_op.cc b/contrib/restricted/abseil-cpp/absl/log/internal/check_op.cc
index cec94218ec3..23db63bf101 100644
--- a/contrib/restricted/abseil-cpp/absl/log/internal/check_op.cc
+++ b/contrib/restricted/abseil-cpp/absl/log/internal/check_op.cc
@@ -35,26 +35,26 @@ namespace absl {
ABSL_NAMESPACE_BEGIN
namespace log_internal {
-#define ABSL_LOGGING_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING(x) \
- template absl::Nonnull<const char*> MakeCheckOpString( \
- x, x, absl::Nonnull<const char*>)
-ABSL_LOGGING_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING(bool);
-ABSL_LOGGING_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING(int64_t);
-ABSL_LOGGING_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING(uint64_t);
-ABSL_LOGGING_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING(float);
-ABSL_LOGGING_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING(double);
-ABSL_LOGGING_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING(char);
-ABSL_LOGGING_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING(unsigned char);
-ABSL_LOGGING_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING(const std::string&);
-ABSL_LOGGING_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING(const absl::string_view&);
-ABSL_LOGGING_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING(const char*);
-ABSL_LOGGING_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING(const signed char*);
-ABSL_LOGGING_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING(const unsigned char*);
-ABSL_LOGGING_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING(const void*);
-#undef ABSL_LOGGING_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING
+#define ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING(x) \
+ template const char* absl_nonnull MakeCheckOpString( \
+ x, x, const char* absl_nonnull)
+ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING(bool);
+ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING(int64_t);
+ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING(uint64_t);
+ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING(float);
+ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING(double);
+ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING(char);
+ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING(unsigned char);
+ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING(const std::string&);
+ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING(const absl::string_view&);
+ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING(const char*);
+ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING(const signed char*);
+ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING(const unsigned char*);
+ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING(const void*);
+#undef ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING
CheckOpMessageBuilder::CheckOpMessageBuilder(
- absl::Nonnull<const char*> exprtext) {
+ const char* absl_nonnull exprtext) {
stream_ << exprtext << " (";
}
@@ -63,7 +63,7 @@ std::ostream& CheckOpMessageBuilder::ForVar2() {
return stream_;
}
-absl::Nonnull<const char*> CheckOpMessageBuilder::NewString() {
+const char* absl_nonnull CheckOpMessageBuilder::NewString() {
stream_ << ")";
// There's no need to free this string since the process is crashing.
return absl::IgnoreLeak(new std::string(std::move(stream_).str()))->c_str();
@@ -103,9 +103,9 @@ void MakeCheckOpValueString(std::ostream& os, const void* p) {
// Helper functions for string comparisons.
#define DEFINE_CHECK_STROP_IMPL(name, func, expected) \
- absl::Nullable<const char*> Check##func##expected##Impl( \
- absl::Nullable<const char*> s1, absl::Nullable<const char*> s2, \
- absl::Nonnull<const char*> exprtext) { \
+ const char* absl_nullable Check##func##expected##Impl( \
+ const char* absl_nullable s1, const char* absl_nullable s2, \
+ const char* absl_nonnull exprtext) { \
bool equal = s1 == s2 || (s1 && s2 && !func(s1, s2)); \
if (equal == expected) { \
return nullptr; \
diff --git a/contrib/restricted/abseil-cpp/absl/log/internal/check_op.h b/contrib/restricted/abseil-cpp/absl/log/internal/check_op.h
index d56aa3130e7..725340282bd 100644
--- a/contrib/restricted/abseil-cpp/absl/log/internal/check_op.h
+++ b/contrib/restricted/abseil-cpp/absl/log/internal/check_op.h
@@ -64,49 +64,48 @@
#endif
#define ABSL_LOG_INTERNAL_CHECK_OP(name, op, val1, val1_text, val2, val2_text) \
- while (absl::Nullable<const char*> absl_log_internal_check_op_result \
- ABSL_LOG_INTERNAL_ATTRIBUTE_UNUSED_IF_STRIP_LOG = \
- ::absl::log_internal::name##Impl( \
- ::absl::log_internal::GetReferenceableValue(val1), \
- ::absl::log_internal::GetReferenceableValue(val2), \
- ABSL_LOG_INTERNAL_STRIP_STRING_LITERAL( \
- val1_text " " #op " " val2_text))) \
+ while (const char* absl_nullable absl_log_internal_check_op_result \
+ [[maybe_unused]] = ::absl::log_internal::name##Impl( \
+ ::absl::log_internal::GetReferenceableValue(val1), \
+ ::absl::log_internal::GetReferenceableValue(val2), \
+ ABSL_LOG_INTERNAL_STRIP_STRING_LITERAL(val1_text " " #op \
+ " " val2_text))) \
ABSL_LOG_INTERNAL_CONDITION_FATAL(STATELESS, true) \
- ABSL_LOG_INTERNAL_CHECK(absl::implicit_cast<absl::Nonnull<const char*>>( \
+ ABSL_LOG_INTERNAL_CHECK(::absl::implicit_cast<const char* absl_nonnull>( \
absl_log_internal_check_op_result)) \
.InternalStream()
#define ABSL_LOG_INTERNAL_QCHECK_OP(name, op, val1, val1_text, val2, \
val2_text) \
- while (absl::Nullable<const char*> absl_log_internal_qcheck_op_result = \
+ while (const char* absl_nullable absl_log_internal_qcheck_op_result = \
::absl::log_internal::name##Impl( \
::absl::log_internal::GetReferenceableValue(val1), \
::absl::log_internal::GetReferenceableValue(val2), \
ABSL_LOG_INTERNAL_STRIP_STRING_LITERAL( \
val1_text " " #op " " val2_text))) \
ABSL_LOG_INTERNAL_CONDITION_QFATAL(STATELESS, true) \
- ABSL_LOG_INTERNAL_QCHECK(absl::implicit_cast<absl::Nonnull<const char*>>( \
+ ABSL_LOG_INTERNAL_QCHECK(::absl::implicit_cast<const char* absl_nonnull>( \
absl_log_internal_qcheck_op_result)) \
.InternalStream()
#define ABSL_LOG_INTERNAL_CHECK_STROP(func, op, expected, s1, s1_text, s2, \
s2_text) \
- while (absl::Nullable<const char*> absl_log_internal_check_strop_result = \
+ while (const char* absl_nullable absl_log_internal_check_strop_result = \
::absl::log_internal::Check##func##expected##Impl( \
(s1), (s2), \
ABSL_LOG_INTERNAL_STRIP_STRING_LITERAL(s1_text " " #op \
" " s2_text))) \
ABSL_LOG_INTERNAL_CONDITION_FATAL(STATELESS, true) \
- ABSL_LOG_INTERNAL_CHECK(absl::implicit_cast<absl::Nonnull<const char*>>( \
+ ABSL_LOG_INTERNAL_CHECK(::absl::implicit_cast<const char* absl_nonnull>( \
absl_log_internal_check_strop_result)) \
.InternalStream()
#define ABSL_LOG_INTERNAL_QCHECK_STROP(func, op, expected, s1, s1_text, s2, \
s2_text) \
- while (absl::Nullable<const char*> absl_log_internal_qcheck_strop_result = \
+ while (const char* absl_nullable absl_log_internal_qcheck_strop_result = \
::absl::log_internal::Check##func##expected##Impl( \
(s1), (s2), \
ABSL_LOG_INTERNAL_STRIP_STRING_LITERAL(s1_text " " #op \
" " s2_text))) \
ABSL_LOG_INTERNAL_CONDITION_QFATAL(STATELESS, true) \
- ABSL_LOG_INTERNAL_QCHECK(absl::implicit_cast<absl::Nonnull<const char*>>( \
+ ABSL_LOG_INTERNAL_QCHECK(::absl::implicit_cast<const char* absl_nonnull>( \
absl_log_internal_qcheck_strop_result)) \
.InternalStream()
@@ -135,8 +134,8 @@
// strip the call to stringify the non-ok `Status` as long as we don't log it;
// dropping the `Status`'s message text is out of scope.
#define ABSL_LOG_INTERNAL_CHECK_OK(val, val_text) \
- for (::std::pair<absl::Nonnull<const ::absl::Status*>, \
- absl::Nullable<const char*>> \
+ for (::std::pair<const ::absl::Status* absl_nonnull, \
+ const char* absl_nullable> \
absl_log_internal_check_ok_goo; \
absl_log_internal_check_ok_goo.first = \
::absl::log_internal::AsStatus(val), \
@@ -149,12 +148,12 @@
" is OK")), \
!ABSL_PREDICT_TRUE(absl_log_internal_check_ok_goo.first->ok());) \
ABSL_LOG_INTERNAL_CONDITION_FATAL(STATELESS, true) \
- ABSL_LOG_INTERNAL_CHECK(absl::implicit_cast<absl::Nonnull<const char*>>( \
+ ABSL_LOG_INTERNAL_CHECK(::absl::implicit_cast<const char* absl_nonnull>( \
absl_log_internal_check_ok_goo.second)) \
.InternalStream()
#define ABSL_LOG_INTERNAL_QCHECK_OK(val, val_text) \
- for (::std::pair<absl::Nonnull<const ::absl::Status*>, \
- absl::Nullable<const char*>> \
+ for (::std::pair<const ::absl::Status* absl_nonnull, \
+ const char* absl_nullable> \
absl_log_internal_qcheck_ok_goo; \
absl_log_internal_qcheck_ok_goo.first = \
::absl::log_internal::AsStatus(val), \
@@ -167,7 +166,7 @@
" is OK")), \
!ABSL_PREDICT_TRUE(absl_log_internal_qcheck_ok_goo.first->ok());) \
ABSL_LOG_INTERNAL_CONDITION_QFATAL(STATELESS, true) \
- ABSL_LOG_INTERNAL_QCHECK(absl::implicit_cast<absl::Nonnull<const char*>>( \
+ ABSL_LOG_INTERNAL_QCHECK(::absl::implicit_cast<const char* absl_nonnull>( \
absl_log_internal_qcheck_ok_goo.second)) \
.InternalStream()
@@ -179,9 +178,8 @@ template <typename T>
class StatusOr;
namespace status_internal {
-ABSL_ATTRIBUTE_PURE_FUNCTION absl::Nonnull<const char*> MakeCheckFailString(
- absl::Nonnull<const absl::Status*> status,
- absl::Nonnull<const char*> prefix);
+ABSL_ATTRIBUTE_PURE_FUNCTION const char* absl_nonnull MakeCheckFailString(
+ const absl::Status* absl_nonnull status, const char* absl_nonnull prefix);
} // namespace status_internal
namespace log_internal {
@@ -189,11 +187,11 @@ namespace log_internal {
// Convert a Status or a StatusOr to its underlying status value.
//
// (This implementation does not require a dep on absl::Status to work.)
-inline absl::Nonnull<const absl::Status*> AsStatus(const absl::Status& s) {
+inline const absl::Status* absl_nonnull AsStatus(const absl::Status& s) {
return &s;
}
template <typename T>
-absl::Nonnull<const absl::Status*> AsStatus(const absl::StatusOr<T>& s) {
+const absl::Status* absl_nonnull AsStatus(const absl::StatusOr<T>& s) {
return &s.status();
}
@@ -202,14 +200,14 @@ absl::Nonnull<const absl::Status*> AsStatus(const absl::StatusOr<T>& s) {
class CheckOpMessageBuilder final {
public:
// Inserts `exprtext` and ` (` to the stream.
- explicit CheckOpMessageBuilder(absl::Nonnull<const char*> exprtext);
+ explicit CheckOpMessageBuilder(const char* absl_nonnull exprtext);
~CheckOpMessageBuilder() = default;
// For inserting the first variable.
std::ostream& ForVar1() { return stream_; }
// For inserting the second variable (adds an intermediate ` vs. `).
std::ostream& ForVar2();
// Get the result (inserts the closing `)`).
- absl::Nonnull<const char*> NewString();
+ const char* absl_nonnull NewString();
private:
std::ostringstream stream_;
@@ -226,7 +224,7 @@ inline void MakeCheckOpValueString(std::ostream& os, const T& v) {
void MakeCheckOpValueString(std::ostream& os, char v);
void MakeCheckOpValueString(std::ostream& os, signed char v);
void MakeCheckOpValueString(std::ostream& os, unsigned char v);
-void MakeCheckOpValueString(std::ostream& os, const void* p);
+void MakeCheckOpValueString(std::ostream& os, const void* absl_nullable p);
namespace detect_specialization {
@@ -268,8 +266,9 @@ float operator<<(std::ostream&, float value);
double operator<<(std::ostream&, double value);
long double operator<<(std::ostream&, long double value);
bool operator<<(std::ostream&, bool value);
-const void* operator<<(std::ostream&, const void* value);
-const void* operator<<(std::ostream&, std::nullptr_t);
+const void* absl_nullable operator<<(std::ostream&,
+ const void* absl_nullable value);
+const void* absl_nullable operator<<(std::ostream&, std::nullptr_t);
// These `char` overloads are specified like this in the standard, so we have to
// write them exactly the same to ensure the call is ambiguous.
@@ -283,13 +282,14 @@ signed char operator<<(std::basic_ostream<char, Traits>&, signed char);
template <typename Traits>
unsigned char operator<<(std::basic_ostream<char, Traits>&, unsigned char);
template <typename Traits>
-const char* operator<<(std::basic_ostream<char, Traits>&, const char*);
+const char* absl_nonnull operator<<(std::basic_ostream<char, Traits>&,
+ const char* absl_nonnull);
template <typename Traits>
-const signed char* operator<<(std::basic_ostream<char, Traits>&,
- const signed char*);
+const signed char* absl_nonnull operator<<(std::basic_ostream<char, Traits>&,
+ const signed char* absl_nonnull);
template <typename Traits>
-const unsigned char* operator<<(std::basic_ostream<char, Traits>&,
- const unsigned char*);
+const unsigned char* absl_nonnull operator<<(std::basic_ostream<char, Traits>&,
+ const unsigned char* absl_nonnull);
// This overload triggers when the call is not ambiguous.
// It means that T is being printed with some overload not on this list.
@@ -314,7 +314,8 @@ class StringifySink {
void Append(absl::string_view text);
void Append(size_t length, char ch);
- friend void AbslFormatFlush(StringifySink* sink, absl::string_view text);
+ friend void AbslFormatFlush(StringifySink* absl_nonnull sink,
+ absl::string_view text);
private:
std::ostream& os_;
@@ -352,12 +353,12 @@ using CheckOpStreamType = decltype(detect_specialization::Detect<T>(0));
// Build the error message string. Specify no inlining for code size.
template <typename T1, typename T2>
-ABSL_ATTRIBUTE_RETURNS_NONNULL absl::Nonnull<const char*> MakeCheckOpString(
- T1 v1, T2 v2, absl::Nonnull<const char*> exprtext) ABSL_ATTRIBUTE_NOINLINE;
+ABSL_ATTRIBUTE_RETURNS_NONNULL const char* absl_nonnull MakeCheckOpString(
+ T1 v1, T2 v2, const char* absl_nonnull exprtext) ABSL_ATTRIBUTE_NOINLINE;
template <typename T1, typename T2>
-absl::Nonnull<const char*> MakeCheckOpString(
- T1 v1, T2 v2, absl::Nonnull<const char*> exprtext) {
+const char* absl_nonnull MakeCheckOpString(T1 v1, T2 v2,
+ const char* absl_nonnull exprtext) {
CheckOpMessageBuilder comb(exprtext);
MakeCheckOpValueString(comb.ForVar1(), v1);
MakeCheckOpValueString(comb.ForVar2(), v2);
@@ -367,8 +368,8 @@ absl::Nonnull<const char*> MakeCheckOpString(
// Add a few commonly used instantiations as extern to reduce size of objects
// files.
#define ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING_EXTERN(x) \
- extern template absl::Nonnull<const char*> MakeCheckOpString( \
- x, x, absl::Nonnull<const char*>)
+ extern template const char* absl_nonnull MakeCheckOpString( \
+ x, x, const char* absl_nonnull)
ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING_EXTERN(bool);
ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING_EXTERN(int64_t);
ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING_EXTERN(uint64_t);
@@ -378,10 +379,12 @@ ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING_EXTERN(char);
ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING_EXTERN(unsigned char);
ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING_EXTERN(const std::string&);
ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING_EXTERN(const absl::string_view&);
-ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING_EXTERN(const char*);
-ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING_EXTERN(const signed char*);
-ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING_EXTERN(const unsigned char*);
-ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING_EXTERN(const void*);
+ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING_EXTERN(const char* absl_nonnull);
+ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING_EXTERN(
+ const signed char* absl_nonnull);
+ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING_EXTERN(
+ const unsigned char* absl_nonnull);
+ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING_EXTERN(const void* absl_nonnull);
#undef ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING_EXTERN
// `ABSL_LOG_INTERNAL_CHECK_OP_IMPL_RESULT` skips formatting the Check_OP result
@@ -404,8 +407,8 @@ ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING_EXTERN(const void*);
// type.
#define ABSL_LOG_INTERNAL_CHECK_OP_IMPL(name, op) \
template <typename T1, typename T2> \
- inline constexpr absl::Nullable<const char*> name##Impl( \
- const T1& v1, const T2& v2, absl::Nonnull<const char*> exprtext) { \
+ inline constexpr const char* absl_nullable name##Impl( \
+ const T1& v1, const T2& v2, const char* absl_nonnull exprtext) { \
using U1 = CheckOpStreamType<T1>; \
using U2 = CheckOpStreamType<T2>; \
return ABSL_PREDICT_TRUE(v1 op v2) \
@@ -413,8 +416,8 @@ ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING_EXTERN(const void*);
: ABSL_LOG_INTERNAL_CHECK_OP_IMPL_RESULT(U1, U2, U1(v1), \
U2(v2), exprtext); \
} \
- inline constexpr absl::Nullable<const char*> name##Impl( \
- int v1, int v2, absl::Nonnull<const char*> exprtext) { \
+ inline constexpr const char* absl_nullable name##Impl( \
+ int v1, int v2, const char* absl_nonnull exprtext) { \
return name##Impl<int, int>(v1, v2, exprtext); \
}
@@ -427,18 +430,18 @@ ABSL_LOG_INTERNAL_CHECK_OP_IMPL(Check_GT, >)
#undef ABSL_LOG_INTERNAL_CHECK_OP_IMPL_RESULT
#undef ABSL_LOG_INTERNAL_CHECK_OP_IMPL
-absl::Nullable<const char*> CheckstrcmptrueImpl(
- absl::Nullable<const char*> s1, absl::Nullable<const char*> s2,
- absl::Nonnull<const char*> exprtext);
-absl::Nullable<const char*> CheckstrcmpfalseImpl(
- absl::Nullable<const char*> s1, absl::Nullable<const char*> s2,
- absl::Nonnull<const char*> exprtext);
-absl::Nullable<const char*> CheckstrcasecmptrueImpl(
- absl::Nullable<const char*> s1, absl::Nullable<const char*> s2,
- absl::Nonnull<const char*> exprtext);
-absl::Nullable<const char*> CheckstrcasecmpfalseImpl(
- absl::Nullable<const char*> s1, absl::Nullable<const char*> s2,
- absl::Nonnull<const char*> exprtext);
+const char* absl_nullable CheckstrcmptrueImpl(
+ const char* absl_nullable s1, const char* absl_nullable s2,
+ const char* absl_nonnull exprtext);
+const char* absl_nullable CheckstrcmpfalseImpl(
+ const char* absl_nullable s1, const char* absl_nullable s2,
+ const char* absl_nonnull exprtext);
+const char* absl_nullable CheckstrcasecmptrueImpl(
+ const char* absl_nullable s1, const char* absl_nullable s2,
+ const char* absl_nonnull exprtext);
+const char* absl_nullable CheckstrcasecmpfalseImpl(
+ const char* absl_nullable s1, const char* absl_nullable s2,
+ const char* absl_nonnull exprtext);
// `CHECK_EQ` and friends want to pass their arguments by reference, however
// this winds up exposing lots of cases where people have defined and
diff --git a/contrib/restricted/abseil-cpp/absl/log/internal/conditions.cc b/contrib/restricted/abseil-cpp/absl/log/internal/conditions.cc
index a9f4966f5dd..a418c88428e 100644
--- a/contrib/restricted/abseil-cpp/absl/log/internal/conditions.cc
+++ b/contrib/restricted/abseil-cpp/absl/log/internal/conditions.cc
@@ -63,8 +63,9 @@ bool LogEveryNSecState::ShouldLog(double seconds) {
// myriad2 does not have 8-byte compare and exchange. Use a racy version that
// is "good enough" but will over-log in the face of concurrent logging.
if (now_cycles > next_cycles) {
- next_log_time_cycles_.store(now_cycles + seconds * CycleClock::Frequency(),
- std::memory_order_relaxed);
+ next_log_time_cycles_.store(
+ static_cast<int64_t>(now_cycles + seconds * CycleClock::Frequency()),
+ std::memory_order_relaxed);
return true;
}
return false;
@@ -72,7 +73,8 @@ bool LogEveryNSecState::ShouldLog(double seconds) {
do {
if (now_cycles <= next_cycles) return false;
} while (!next_log_time_cycles_.compare_exchange_weak(
- next_cycles, now_cycles + seconds * CycleClock::Frequency(),
+ next_cycles,
+ static_cast<int64_t>(now_cycles + seconds * CycleClock::Frequency()),
std::memory_order_relaxed, std::memory_order_relaxed));
return true;
#endif
diff --git a/contrib/restricted/abseil-cpp/absl/log/internal/conditions.h b/contrib/restricted/abseil-cpp/absl/log/internal/conditions.h
index 9dc15db4283..6fb74b142bc 100644
--- a/contrib/restricted/abseil-cpp/absl/log/internal/conditions.h
+++ b/contrib/restricted/abseil-cpp/absl/log/internal/conditions.h
@@ -65,7 +65,7 @@
switch (0) \
case 0: \
default: \
- !(condition) ? (void)0 : ::absl::log_internal::Voidify()&&
+ !(condition) ? (void)0 : ::absl::log_internal::Voidify() &&
// `ABSL_LOG_INTERNAL_STATEFUL_CONDITION` applies a condition like
// `ABSL_LOG_INTERNAL_STATELESS_CONDITION` but adds to that a series of variable
@@ -96,7 +96,8 @@
for (const uint32_t COUNTER ABSL_ATTRIBUTE_UNUSED = \
absl_log_internal_stateful_condition_state.counter(); \
absl_log_internal_stateful_condition_do_log; \
- absl_log_internal_stateful_condition_do_log = false)
+ absl_log_internal_stateful_condition_do_log = false) \
+ ::absl::log_internal::Voidify() &&
// `ABSL_LOG_INTERNAL_CONDITION_*` serve to combine any conditions from the
// macro (e.g. `LOG_IF` or `VLOG`) with inherent conditions (e.g.
@@ -117,6 +118,8 @@
ABSL_LOG_INTERNAL_##type##_CONDITION( \
(condition) && ::absl::LogSeverity::kError >= \
static_cast<::absl::LogSeverity>(ABSL_MIN_LOG_LEVEL))
+#define ABSL_LOG_INTERNAL_CONDITION_DO_NOT_SUBMIT(type, condition) \
+ ABSL_LOG_INTERNAL_CONDITION_ERROR(type, condition)
// NOTE: Use ternary operators instead of short-circuiting to mitigate
// https://bugs.llvm.org/show_bug.cgi?id=51928.
#define ABSL_LOG_INTERNAL_CONDITION_FATAL(type, condition) \
@@ -168,6 +171,8 @@
ABSL_LOG_INTERNAL_##type##_CONDITION(condition)
#define ABSL_LOG_INTERNAL_CONDITION_ERROR(type, condition) \
ABSL_LOG_INTERNAL_##type##_CONDITION(condition)
+#define ABSL_LOG_INTERNAL_CONDITION_DO_NOT_SUBMIT(type, condition) \
+ ABSL_LOG_INTERNAL_CONDITION_ERROR(type, condition)
#define ABSL_LOG_INTERNAL_CONDITION_FATAL(type, condition) \
ABSL_LOG_INTERNAL_##type##_CONDITION(condition)
#define ABSL_LOG_INTERNAL_CONDITION_QFATAL(type, condition) \
diff --git a/contrib/restricted/abseil-cpp/absl/log/internal/log_message.cc b/contrib/restricted/abseil-cpp/absl/log/internal/log_message.cc
index 9e7722dac0f..3aed3a2fdfd 100644
--- a/contrib/restricted/abseil-cpp/absl/log/internal/log_message.cc
+++ b/contrib/restricted/abseil-cpp/absl/log/internal/log_message.cc
@@ -31,6 +31,7 @@
#include <memory>
#include <ostream>
#include <string>
+#include <string_view>
#include <tuple>
#include "absl/base/attributes.h"
@@ -47,12 +48,14 @@
#include "absl/log/internal/globals.h"
#include "absl/log/internal/log_format.h"
#include "absl/log/internal/log_sink_set.h"
+#include "absl/log/internal/nullguard.h"
#include "absl/log/internal/proto.h"
#include "absl/log/internal/structured_proto.h"
#include "absl/log/log_entry.h"
#include "absl/log/log_sink.h"
#include "absl/log/log_sink_registry.h"
#include "absl/memory/memory.h"
+#include "absl/strings/internal/utf8.h"
#include "absl/strings/string_view.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
@@ -147,7 +150,7 @@ void WriteToStream(const char* data, void* os) {
} // namespace
struct LogMessage::LogMessageData final {
- LogMessageData(absl::Nonnull<const char*> file, int line,
+ LogMessageData(const char* absl_nonnull file, int line,
absl::LogSeverity severity, absl::Time timestamp);
LogMessageData(const LogMessageData&) = delete;
LogMessageData& operator=(const LogMessageData&) = delete;
@@ -163,7 +166,7 @@ struct LogMessage::LogMessageData final {
bool is_perror;
// Extra `LogSink`s to log to, in addition to `global_sinks`.
- absl::InlinedVector<absl::Nonnull<absl::LogSink*>, 16> extra_sinks;
+ absl::InlinedVector<absl::LogSink* absl_nonnull, 16> extra_sinks;
// If true, log to `extra_sinks` but not to `global_sinks` or hardcoded
// non-sink targets (e.g. stderr, log files).
bool extra_sinks_only;
@@ -199,7 +202,7 @@ struct LogMessage::LogMessageData final {
void FinalizeEncodingAndFormat();
};
-LogMessage::LogMessageData::LogMessageData(absl::Nonnull<const char*> file,
+LogMessage::LogMessageData::LogMessageData(const char* absl_nonnull file,
int line, absl::LogSeverity severity,
absl::Time timestamp)
: extra_sinks_only(false), manipulated(nullptr) {
@@ -270,7 +273,7 @@ void LogMessage::LogMessageData::FinalizeEncodingAndFormat() {
absl::MakeSpan(string_buf).subspan(0, chars_written);
}
-LogMessage::LogMessage(absl::Nonnull<const char*> file, int line,
+LogMessage::LogMessage(const char* absl_nonnull file, int line,
absl::LogSeverity severity)
: data_(absl::make_unique<LogMessageData>(file, line, severity,
absl::Now())) {
@@ -284,23 +287,15 @@ LogMessage::LogMessage(absl::Nonnull<const char*> file, int line,
LogBacktraceIfNeeded();
}
-LogMessage::LogMessage(absl::Nonnull<const char*> file, int line, InfoTag)
+LogMessage::LogMessage(const char* absl_nonnull file, int line, InfoTag)
: LogMessage(file, line, absl::LogSeverity::kInfo) {}
-LogMessage::LogMessage(absl::Nonnull<const char*> file, int line, WarningTag)
+LogMessage::LogMessage(const char* absl_nonnull file, int line, WarningTag)
: LogMessage(file, line, absl::LogSeverity::kWarning) {}
-LogMessage::LogMessage(absl::Nonnull<const char*> file, int line, ErrorTag)
+LogMessage::LogMessage(const char* absl_nonnull file, int line, ErrorTag)
: LogMessage(file, line, absl::LogSeverity::kError) {}
-LogMessage::~LogMessage() {
-#ifdef ABSL_MIN_LOG_LEVEL
- if (data_->entry.log_severity() <
- static_cast<absl::LogSeverity>(ABSL_MIN_LOG_LEVEL) &&
- data_->entry.log_severity() < absl::LogSeverity::kFatal) {
- return;
- }
-#endif
- Flush();
-}
+// This cannot go in the header since LogMessageData is defined in this file.
+LogMessage::~LogMessage() = default;
LogMessage& LogMessage::AtLocation(absl::string_view file, int line) {
data_->entry.full_filename_ = file;
@@ -351,13 +346,13 @@ LogMessage& LogMessage::WithPerror() {
return *this;
}
-LogMessage& LogMessage::ToSinkAlso(absl::Nonnull<absl::LogSink*> sink) {
+LogMessage& LogMessage::ToSinkAlso(absl::LogSink* absl_nonnull sink) {
ABSL_INTERNAL_CHECK(sink, "null LogSink*");
data_->extra_sinks.push_back(sink);
return *this;
}
-LogMessage& LogMessage::ToSinkOnly(absl::Nonnull<absl::LogSink*> sink) {
+LogMessage& LogMessage::ToSinkOnly(absl::LogSink* absl_nonnull sink) {
ABSL_INTERNAL_CHECK(sink, "null LogSink*");
data_->extra_sinks.clear();
data_->extra_sinks.push_back(sink);
@@ -411,6 +406,34 @@ LogMessage& LogMessage::operator<<(absl::string_view v) {
CopyToEncodedBuffer<StringType::kNotLiteral>(v);
return *this;
}
+
+LogMessage& LogMessage::operator<<(const std::wstring& v) {
+ CopyToEncodedBuffer<StringType::kNotLiteral>(v);
+ return *this;
+}
+
+LogMessage& LogMessage::operator<<(std::wstring_view v) {
+ CopyToEncodedBuffer<StringType::kNotLiteral>(v);
+ return *this;
+}
+
+template <>
+LogMessage& LogMessage::operator<< <const wchar_t*>(
+ const wchar_t* absl_nullable const& v) {
+ if (v == nullptr) {
+ CopyToEncodedBuffer<StringType::kNotLiteral>(
+ absl::string_view(kCharNull.data(), kCharNull.size() - 1));
+ } else {
+ CopyToEncodedBuffer<StringType::kNotLiteral>(v);
+ }
+ return *this;
+}
+
+LogMessage& LogMessage::operator<<(wchar_t v) {
+ CopyToEncodedBuffer<StringType::kNotLiteral>(std::wstring_view(&v, 1));
+ return *this;
+}
+
LogMessage& LogMessage::operator<<(std::ostream& (*m)(std::ostream& os)) {
OstreamView view(*data_);
data_->manipulated << m;
@@ -633,6 +656,37 @@ template void LogMessage::CopyToEncodedBuffer<LogMessage::StringType::kLiteral>(
template void LogMessage::CopyToEncodedBuffer<
LogMessage::StringType::kNotLiteral>(char ch, size_t num);
+template <LogMessage::StringType str_type>
+void LogMessage::CopyToEncodedBuffer(std::wstring_view str) {
+ auto encoded_remaining_copy = data_->encoded_remaining();
+ constexpr uint8_t tag_value = str_type == StringType::kLiteral
+ ? ValueTag::kStringLiteral
+ : ValueTag::kString;
+ size_t max_str_byte_length =
+ absl::strings_internal::kMaxEncodedUTF8Size * str.length();
+ auto value_start =
+ EncodeMessageStart(EventTag::kValue,
+ BufferSizeFor(tag_value, WireType::kLengthDelimited) +
+ max_str_byte_length,
+ &encoded_remaining_copy);
+ auto str_start = EncodeMessageStart(tag_value, max_str_byte_length,
+ &encoded_remaining_copy);
+ if (str_start.data()) {
+ log_internal::AppendTruncated(str, encoded_remaining_copy);
+ EncodeMessageLength(str_start, &encoded_remaining_copy);
+ EncodeMessageLength(value_start, &encoded_remaining_copy);
+ data_->encoded_remaining() = encoded_remaining_copy;
+ } else {
+ // The field header(s) did not fit; zero `encoded_remaining()` so we don't
+ // write anything else later.
+ data_->encoded_remaining().remove_suffix(data_->encoded_remaining().size());
+ }
+}
+template void LogMessage::CopyToEncodedBuffer<LogMessage::StringType::kLiteral>(
+ std::wstring_view str);
+template void LogMessage::CopyToEncodedBuffer<
+ LogMessage::StringType::kNotLiteral>(std::wstring_view str);
+
template void LogMessage::CopyToEncodedBufferWithStructuredProtoField<
LogMessage::StringType::kLiteral>(StructuredProtoField field,
absl::string_view str);
@@ -681,57 +735,45 @@ void LogMessage::CopyToEncodedBufferWithStructuredProtoField(
#pragma warning(disable : 4722)
#endif
-LogMessageFatal::LogMessageFatal(absl::Nonnull<const char*> file, int line)
+LogMessageFatal::LogMessageFatal(const char* absl_nonnull file, int line)
: LogMessage(file, line, absl::LogSeverity::kFatal) {}
-LogMessageFatal::LogMessageFatal(absl::Nonnull<const char*> file, int line,
- absl::Nonnull<const char*> failure_msg)
+LogMessageFatal::LogMessageFatal(const char* absl_nonnull file, int line,
+ const char* absl_nonnull failure_msg)
: LogMessage(file, line, absl::LogSeverity::kFatal) {
*this << "Check failed: " << failure_msg << " ";
}
-LogMessageFatal::~LogMessageFatal() {
- Flush();
- FailWithoutStackTrace();
-}
+LogMessageFatal::~LogMessageFatal() { FailWithoutStackTrace(); }
-LogMessageDebugFatal::LogMessageDebugFatal(absl::Nonnull<const char*> file,
+LogMessageDebugFatal::LogMessageDebugFatal(const char* absl_nonnull file,
int line)
: LogMessage(file, line, absl::LogSeverity::kFatal) {}
-LogMessageDebugFatal::~LogMessageDebugFatal() {
- Flush();
- FailWithoutStackTrace();
-}
+LogMessageDebugFatal::~LogMessageDebugFatal() { FailWithoutStackTrace(); }
LogMessageQuietlyDebugFatal::LogMessageQuietlyDebugFatal(
- absl::Nonnull<const char*> file, int line)
+ const char* absl_nonnull file, int line)
: LogMessage(file, line, absl::LogSeverity::kFatal) {
SetFailQuietly();
}
-LogMessageQuietlyDebugFatal::~LogMessageQuietlyDebugFatal() {
- Flush();
- FailQuietly();
-}
+LogMessageQuietlyDebugFatal::~LogMessageQuietlyDebugFatal() { FailQuietly(); }
-LogMessageQuietlyFatal::LogMessageQuietlyFatal(absl::Nonnull<const char*> file,
+LogMessageQuietlyFatal::LogMessageQuietlyFatal(const char* absl_nonnull file,
int line)
: LogMessage(file, line, absl::LogSeverity::kFatal) {
SetFailQuietly();
}
LogMessageQuietlyFatal::LogMessageQuietlyFatal(
- absl::Nonnull<const char*> file, int line,
- absl::Nonnull<const char*> failure_msg)
+ const char* absl_nonnull file, int line,
+ const char* absl_nonnull failure_msg)
: LogMessageQuietlyFatal(file, line) {
*this << "Check failed: " << failure_msg << " ";
}
-LogMessageQuietlyFatal::~LogMessageQuietlyFatal() {
- Flush();
- FailQuietly();
-}
+LogMessageQuietlyFatal::~LogMessageQuietlyFatal() { FailQuietly(); }
#if defined(_MSC_VER) && !defined(__clang__)
#pragma warning(pop)
#endif
diff --git a/contrib/restricted/abseil-cpp/absl/log/internal/log_message.h b/contrib/restricted/abseil-cpp/absl/log/internal/log_message.h
index 7d0e403e3a9..1aaf05e31f4 100644
--- a/contrib/restricted/abseil-cpp/absl/log/internal/log_message.h
+++ b/contrib/restricted/abseil-cpp/absl/log/internal/log_message.h
@@ -17,22 +17,25 @@
// -----------------------------------------------------------------------------
//
// This file declares `class absl::log_internal::LogMessage`. This class more or
-// less represents a particular log message. LOG/CHECK macros create a
-// temporary instance of `LogMessage` and then stream values to it. At the end
-// of the LOG/CHECK statement, LogMessage instance goes out of scope and
-// `~LogMessage` directs the message to the registered log sinks.
-// Heap-allocation of `LogMessage` is unsupported. Construction outside of a
-// `LOG` macro is unsupported.
+// less represents a particular log message. LOG/CHECK macros create a temporary
+// instance of `LogMessage` and then stream values to it. At the end of the
+// LOG/CHECK statement, the LogMessage is voidified by operator&&, and `Flush()`
+// directs the message to the registered log sinks. Heap-allocation of
+// `LogMessage` is unsupported. Construction outside of a `LOG` macro is
+// unsupported.
#ifndef ABSL_LOG_INTERNAL_LOG_MESSAGE_H_
#define ABSL_LOG_INTERNAL_LOG_MESSAGE_H_
+#include <wchar.h>
+
#include <cstddef>
#include <ios>
#include <memory>
#include <ostream>
#include <streambuf>
#include <string>
+#include <string_view>
#include <type_traits>
#include "absl/base/attributes.h"
@@ -62,15 +65,15 @@ class LogMessage {
struct ErrorTag {};
// Used for `LOG`.
- LogMessage(absl::Nonnull<const char*> file, int line,
+ LogMessage(const char* absl_nonnull file, int line,
absl::LogSeverity severity) ABSL_ATTRIBUTE_COLD;
// These constructors are slightly smaller/faster to call; the severity is
// curried into the function pointer.
- LogMessage(absl::Nonnull<const char*> file, int line,
+ LogMessage(const char* absl_nonnull file, int line,
InfoTag) ABSL_ATTRIBUTE_COLD ABSL_ATTRIBUTE_NOINLINE;
- LogMessage(absl::Nonnull<const char*> file, int line,
+ LogMessage(const char* absl_nonnull file, int line,
WarningTag) ABSL_ATTRIBUTE_COLD ABSL_ATTRIBUTE_NOINLINE;
- LogMessage(absl::Nonnull<const char*> file, int line,
+ LogMessage(const char* absl_nonnull file, int line,
ErrorTag) ABSL_ATTRIBUTE_COLD ABSL_ATTRIBUTE_NOINLINE;
LogMessage(const LogMessage&) = delete;
LogMessage& operator=(const LogMessage&) = delete;
@@ -102,9 +105,9 @@ class LogMessage {
LogMessage& WithPerror();
// Sends this message to `*sink` in addition to whatever other sinks it would
// otherwise have been sent to.
- LogMessage& ToSinkAlso(absl::Nonnull<absl::LogSink*> sink);
+ LogMessage& ToSinkAlso(absl::LogSink* absl_nonnull sink);
// Sends this message to `*sink` and no others.
- LogMessage& ToSinkOnly(absl::Nonnull<absl::LogSink*> sink);
+ LogMessage& ToSinkOnly(absl::LogSink* absl_nonnull sink);
// Don't call this method from outside this library.
LogMessage& InternalStream() { return *this; }
@@ -141,10 +144,10 @@ class LogMessage {
LogMessage& operator<<(unsigned long long v) {
return operator<< <unsigned long long>(v);
}
- LogMessage& operator<<(absl::Nullable<void*> v) {
+ LogMessage& operator<<(void* absl_nullable v) {
return operator<< <void*>(v);
}
- LogMessage& operator<<(absl::Nullable<const void*> v) {
+ LogMessage& operator<<(const void* absl_nullable v) {
return operator<< <const void*>(v);
}
LogMessage& operator<<(float v) { return operator<< <float>(v); }
@@ -158,10 +161,16 @@ class LogMessage {
LogMessage& operator<<(const std::string& v);
LogMessage& operator<<(absl::string_view v);
+ // Wide string overloads (since std::ostream does not provide them).
+ LogMessage& operator<<(const std::wstring& v);
+ LogMessage& operator<<(std::wstring_view v);
+ // `const wchar_t*` is handled by `operator<< <const wchar_t*>`.
+ LogMessage& operator<<(wchar_t* absl_nullable v);
+ LogMessage& operator<<(wchar_t v);
+
// Handle stream manipulators e.g. std::endl.
- LogMessage& operator<<(absl::Nonnull<std::ostream& (*)(std::ostream & os)> m);
- LogMessage& operator<<(
- absl::Nonnull<std::ios_base& (*)(std::ios_base & os)> m);
+ LogMessage& operator<<(std::ostream& (*absl_nonnull m)(std::ostream& os));
+ LogMessage& operator<<(std::ios_base& (*absl_nonnull m)(std::ios_base& os));
// Literal strings. This allows us to record C string literals as literals in
// the logging.proto.Value.
@@ -170,31 +179,30 @@ class LogMessage {
// this template for every value of `SIZE` encountered in each source code
// file. That significantly increases linker input sizes. Inlining is cheap
// because the argument to this overload is almost always a string literal so
- // the call to `strlen` can be replaced at compile time. The overload for
- // `char[]` below should not be inlined. The compiler typically does not have
- // the string at compile time and cannot replace the call to `strlen` so
- // inlining it increases the binary size. See the discussion on
+ // the call to `strlen` can be replaced at compile time. The overloads for
+ // `char[]`/`wchar_t[]` below should not be inlined. The compiler typically
+ // does not have the string at compile time and cannot replace the call to
+ // `strlen` so inlining it increases the binary size. See the discussion on
// cl/107527369.
template <int SIZE>
LogMessage& operator<<(const char (&buf)[SIZE]);
+ template <int SIZE>
+ LogMessage& operator<<(const wchar_t (&buf)[SIZE]);
// This prevents non-const `char[]` arrays from looking like literals.
template <int SIZE>
LogMessage& operator<<(char (&buf)[SIZE]) ABSL_ATTRIBUTE_NOINLINE;
+ // `wchar_t[SIZE]` is handled by `operator<< <const wchar_t*>`.
// Types that support `AbslStringify()` are serialized that way.
- template <typename T,
- typename std::enable_if<absl::HasAbslStringify<T>::value,
- int>::type = 0>
- LogMessage& operator<<(const T& v) ABSL_ATTRIBUTE_NOINLINE;
-
// Types that don't support `AbslStringify()` but do support streaming into a
// `std::ostream&` are serialized that way.
- template <typename T,
- typename std::enable_if<!absl::HasAbslStringify<T>::value,
- int>::type = 0>
+ template <typename T>
LogMessage& operator<<(const T& v) ABSL_ATTRIBUTE_NOINLINE;
+ // Dispatches the completed `absl::LogEntry` to applicable `absl::LogSink`s.
+ void Flush();
+
// Note: We explicitly do not support `operator<<` for non-const references
// because it breaks logging of non-integer bitfield types (i.e., enums).
@@ -207,11 +215,6 @@ class LogMessage {
// the process with an error exit code.
[[noreturn]] static void FailQuietly();
- // Dispatches the completed `absl::LogEntry` to applicable `absl::LogSink`s.
- // This might as well be inlined into `~LogMessage` except that
- // `~LogMessageFatal` needs to call it early.
- void Flush();
-
// After this is called, failures are done as quiet as possible for this log
// message.
void SetFailQuietly();
@@ -253,6 +256,8 @@ class LogMessage {
void CopyToEncodedBuffer(absl::string_view str) ABSL_ATTRIBUTE_NOINLINE;
template <StringType str_type>
void CopyToEncodedBuffer(char ch, size_t num) ABSL_ATTRIBUTE_NOINLINE;
+ template <StringType str_type>
+ void CopyToEncodedBuffer(std::wstring_view str) ABSL_ATTRIBUTE_NOINLINE;
// Copies `field` to the encoded buffer, then appends `str` after it
// (truncating `str` if necessary to fit).
@@ -280,9 +285,25 @@ class LogMessage {
// We keep the data in a separate struct so that each instance of `LogMessage`
// uses less stack space.
- absl::Nonnull<std::unique_ptr<LogMessageData>> data_;
+ absl_nonnull std::unique_ptr<LogMessageData> data_;
};
+// Explicitly specializes the generic operator<< for `const wchar_t*`
+// arguments.
+//
+// This method is used instead of a non-template `const wchar_t*` overload,
+// as the latter was found to take precedence over the array template
+// (`operator<<(const wchar_t(&)[SIZE])`) when handling string literals.
+// This specialization ensures the array template now correctly processes
+// literals.
+template <>
+LogMessage& LogMessage::operator<< <const wchar_t*>(
+ const wchar_t* absl_nullable const& v);
+
+inline LogMessage& LogMessage::operator<<(wchar_t* absl_nullable v) {
+ return operator<<(const_cast<const wchar_t*>(v));
+}
+
// Helper class so that `AbslStringify()` can modify the LogMessage.
class StringifySink final {
public:
@@ -298,7 +319,7 @@ class StringifySink final {
}
// For types that implement `AbslStringify` using `absl::Format()`.
- friend void AbslFormatFlush(absl::Nonnull<StringifySink*> sink,
+ friend void AbslFormatFlush(StringifySink* absl_nonnull sink,
absl::string_view v) {
sink->Append(v);
}
@@ -308,26 +329,27 @@ class StringifySink final {
};
// Note: the following is declared `ABSL_ATTRIBUTE_NOINLINE`
-template <typename T,
- typename std::enable_if<absl::HasAbslStringify<T>::value, int>::type>
+template <typename T>
LogMessage& LogMessage::operator<<(const T& v) {
- StringifySink sink(*this);
- // Replace with public API.
- AbslStringify(sink, v);
+ if constexpr (absl::HasAbslStringify<T>::value) {
+ StringifySink sink(*this);
+ // Replace with public API.
+ AbslStringify(sink, v);
+ } else {
+ OstreamView view(*data_);
+ view.stream() << log_internal::NullGuard<T>().Guard(v);
+ }
return *this;
}
-// Note: the following is declared `ABSL_ATTRIBUTE_NOINLINE`
-template <typename T,
- typename std::enable_if<!absl::HasAbslStringify<T>::value, int>::type>
-LogMessage& LogMessage::operator<<(const T& v) {
- OstreamView view(*data_);
- view.stream() << log_internal::NullGuard<T>().Guard(v);
+template <int SIZE>
+LogMessage& LogMessage::operator<<(const char (&buf)[SIZE]) {
+ CopyToEncodedBuffer<StringType::kLiteral>(buf);
return *this;
}
template <int SIZE>
-LogMessage& LogMessage::operator<<(const char (&buf)[SIZE]) {
+LogMessage& LogMessage::operator<<(const wchar_t (&buf)[SIZE]) {
CopyToEncodedBuffer<StringType::kLiteral>(buf);
return *this;
}
@@ -355,9 +377,9 @@ extern template LogMessage& LogMessage::operator<<(const unsigned long& v);
extern template LogMessage& LogMessage::operator<<(const long long& v);
extern template LogMessage& LogMessage::operator<<(const unsigned long long& v);
extern template LogMessage& LogMessage::operator<<(
- absl::Nullable<void*> const& v);
+ void* absl_nullable const& v);
extern template LogMessage& LogMessage::operator<<(
- absl::Nullable<const void*> const& v);
+ const void* absl_nullable const& v);
extern template LogMessage& LogMessage::operator<<(const float& v);
extern template LogMessage& LogMessage::operator<<(const double& v);
extern template LogMessage& LogMessage::operator<<(const bool& v);
@@ -373,15 +395,18 @@ LogMessage::CopyToEncodedBuffer<LogMessage::StringType::kLiteral>(char ch,
size_t num);
extern template void LogMessage::CopyToEncodedBuffer<
LogMessage::StringType::kNotLiteral>(char ch, size_t num);
+extern template void LogMessage::CopyToEncodedBuffer<
+ LogMessage::StringType::kLiteral>(std::wstring_view str);
+extern template void LogMessage::CopyToEncodedBuffer<
+ LogMessage::StringType::kNotLiteral>(std::wstring_view str);
// `LogMessageFatal` ensures the process will exit in failure after logging this
// message.
class LogMessageFatal final : public LogMessage {
public:
- LogMessageFatal(absl::Nonnull<const char*> file,
- int line) ABSL_ATTRIBUTE_COLD;
- LogMessageFatal(absl::Nonnull<const char*> file, int line,
- absl::Nonnull<const char*> failure_msg) ABSL_ATTRIBUTE_COLD;
+ LogMessageFatal(const char* absl_nonnull file, int line) ABSL_ATTRIBUTE_COLD;
+ LogMessageFatal(const char* absl_nonnull file, int line,
+ const char* absl_nonnull failure_msg) ABSL_ATTRIBUTE_COLD;
[[noreturn]] ~LogMessageFatal();
};
@@ -390,7 +415,7 @@ class LogMessageFatal final : public LogMessage {
// for DLOG(FATAL) variants.
class LogMessageDebugFatal final : public LogMessage {
public:
- LogMessageDebugFatal(absl::Nonnull<const char*> file,
+ LogMessageDebugFatal(const char* absl_nonnull file,
int line) ABSL_ATTRIBUTE_COLD;
~LogMessageDebugFatal();
};
@@ -400,7 +425,7 @@ class LogMessageQuietlyDebugFatal final : public LogMessage {
// DLOG(QFATAL) calls this instead of LogMessageQuietlyFatal to make sure the
// destructor is not [[noreturn]] even if this is always FATAL as this is only
// invoked when DLOG() is enabled.
- LogMessageQuietlyDebugFatal(absl::Nonnull<const char*> file,
+ LogMessageQuietlyDebugFatal(const char* absl_nonnull file,
int line) ABSL_ATTRIBUTE_COLD;
~LogMessageQuietlyDebugFatal();
};
@@ -408,10 +433,10 @@ class LogMessageQuietlyDebugFatal final : public LogMessage {
// Used for LOG(QFATAL) to make sure it's properly understood as [[noreturn]].
class LogMessageQuietlyFatal final : public LogMessage {
public:
- LogMessageQuietlyFatal(absl::Nonnull<const char*> file,
+ LogMessageQuietlyFatal(const char* absl_nonnull file,
int line) ABSL_ATTRIBUTE_COLD;
- LogMessageQuietlyFatal(absl::Nonnull<const char*> file, int line,
- absl::Nonnull<const char*> failure_msg)
+ LogMessageQuietlyFatal(const char* absl_nonnull file, int line,
+ const char* absl_nonnull failure_msg)
ABSL_ATTRIBUTE_COLD;
[[noreturn]] ~LogMessageQuietlyFatal();
};
diff --git a/contrib/restricted/abseil-cpp/absl/log/internal/nullstream.h b/contrib/restricted/abseil-cpp/absl/log/internal/nullstream.h
index 973e91ab686..c87f9aaa550 100644
--- a/contrib/restricted/abseil-cpp/absl/log/internal/nullstream.h
+++ b/contrib/restricted/abseil-cpp/absl/log/internal/nullstream.h
@@ -79,6 +79,7 @@ class NullStream {
return *this;
}
NullStream& InternalStream() { return *this; }
+ void Flush() {}
};
template <typename T>
inline NullStream& operator<<(NullStream& str, const T&) {
diff --git a/contrib/restricted/abseil-cpp/absl/log/internal/proto.cc b/contrib/restricted/abseil-cpp/absl/log/internal/proto.cc
index 3513b15053e..821be2b76a2 100644
--- a/contrib/restricted/abseil-cpp/absl/log/internal/proto.cc
+++ b/contrib/restricted/abseil-cpp/absl/log/internal/proto.cc
@@ -123,8 +123,9 @@ bool EncodeBytesTruncate(uint64_t tag, absl::Span<const char> value,
return true;
}
-ABSL_MUST_USE_RESULT absl::Span<char> EncodeMessageStart(
- uint64_t tag, uint64_t max_size, absl::Span<char> *buf) {
+[[nodiscard]] absl::Span<char> EncodeMessageStart(uint64_t tag,
+ uint64_t max_size,
+ absl::Span<char> *buf) {
const uint64_t tag_type = MakeTagType(tag, WireType::kLengthDelimited);
const size_t tag_type_size = VarintSize(tag_type);
max_size = std::min<uint64_t>(max_size, buf->size());
diff --git a/contrib/restricted/abseil-cpp/absl/log/internal/proto.h b/contrib/restricted/abseil-cpp/absl/log/internal/proto.h
index 20a9f3a80b9..23f79543231 100644
--- a/contrib/restricted/abseil-cpp/absl/log/internal/proto.h
+++ b/contrib/restricted/abseil-cpp/absl/log/internal/proto.h
@@ -169,9 +169,9 @@ inline bool EncodeStringTruncate(uint64_t tag, absl::string_view value,
// safe to pass to `EncodeMessageLength` but need not be.
// Used for string, bytes, message, and packed-repeated field type.
// Consumes up to kMaxVarintSize * 2 bytes (20).
-ABSL_MUST_USE_RESULT absl::Span<char> EncodeMessageStart(uint64_t tag,
- uint64_t max_size,
- absl::Span<char> *buf);
+[[nodiscard]] absl::Span<char> EncodeMessageStart(uint64_t tag,
+ uint64_t max_size,
+ absl::Span<char> *buf);
// Finalizes the length field in `msg` so that it encompasses all data encoded
// since the call to `EncodeMessageStart` which returned `msg`. Does nothing if
diff --git a/contrib/restricted/abseil-cpp/absl/log/internal/strip.h b/contrib/restricted/abseil-cpp/absl/log/internal/strip.h
index 3e5501040ca..60ef87825c9 100644
--- a/contrib/restricted/abseil-cpp/absl/log/internal/strip.h
+++ b/contrib/restricted/abseil-cpp/absl/log/internal/strip.h
@@ -15,7 +15,8 @@
// -----------------------------------------------------------------------------
// File: log/internal/strip.h
// -----------------------------------------------------------------------------
-//
+
+// SKIP_ABSL_INLINE_NAMESPACE_CHECK
#ifndef ABSL_LOG_INTERNAL_STRIP_H_
#define ABSL_LOG_INTERNAL_STRIP_H_
@@ -31,15 +32,6 @@
// logging in subtly different ways for subtly different reasons (see below).
#if defined(STRIP_LOG) && STRIP_LOG
-// Attribute for marking variables used in implementation details of logging
-// macros as unused, but only when `STRIP_LOG` is defined.
-// With `STRIP_LOG` on, not marking them triggers `-Wunused-but-set-variable`,
-// With `STRIP_LOG` off, marking them triggers `-Wused-but-marked-unused`.
-//
-// TODO(b/290784225): Replace this macro with attribute [[maybe_unused]] when
-// Abseil stops supporting C++14.
-#define ABSL_LOG_INTERNAL_ATTRIBUTE_UNUSED_IF_STRIP_LOG ABSL_ATTRIBUTE_UNUSED
-
#define ABSL_LOGGING_INTERNAL_LOG_INFO ::absl::log_internal::NullStream()
#define ABSL_LOGGING_INTERNAL_LOG_WARNING ::absl::log_internal::NullStream()
#define ABSL_LOGGING_INTERNAL_LOG_ERROR ::absl::log_internal::NullStream()
@@ -62,8 +54,6 @@
#else // !defined(STRIP_LOG) || !STRIP_LOG
-#define ABSL_LOG_INTERNAL_ATTRIBUTE_UNUSED_IF_STRIP_LOG
-
#define ABSL_LOGGING_INTERNAL_LOG_INFO \
::absl::log_internal::LogMessage( \
__FILE__, __LINE__, ::absl::log_internal::LogMessage::InfoTag{})
@@ -105,4 +95,6 @@
#define ABSL_LOGGING_INTERNAL_DLOG_DFATAL ABSL_LOGGING_INTERNAL_LOG_DFATAL
#define ABSL_LOGGING_INTERNAL_DLOG_LEVEL ABSL_LOGGING_INTERNAL_LOG_LEVEL
+#define ABSL_LOGGING_INTERNAL_LOG_DO_NOT_SUBMIT ABSL_LOGGING_INTERNAL_LOG_ERROR
+
#endif // ABSL_LOG_INTERNAL_STRIP_H_
diff --git a/contrib/restricted/abseil-cpp/absl/log/internal/structured.h b/contrib/restricted/abseil-cpp/absl/log/internal/structured.h
index 50783dffd21..70b50e2e83f 100644
--- a/contrib/restricted/abseil-cpp/absl/log/internal/structured.h
+++ b/contrib/restricted/abseil-cpp/absl/log/internal/structured.h
@@ -34,7 +34,7 @@ namespace absl {
ABSL_NAMESPACE_BEGIN
namespace log_internal {
-class ABSL_MUST_USE_RESULT AsLiteralImpl final {
+class [[nodiscard]] AsLiteralImpl final {
public:
explicit AsLiteralImpl(absl::string_view str ABSL_ATTRIBUTE_LIFETIME_BOUND)
: str_(str) {}
@@ -66,7 +66,7 @@ enum class StructuredStringType {
// Structured log data for a string and associated structured proto field,
// both of which must outlive this object.
template <StructuredStringType str_type>
-class ABSL_MUST_USE_RESULT AsStructuredStringTypeImpl final {
+class [[nodiscard]] AsStructuredStringTypeImpl final {
public:
constexpr AsStructuredStringTypeImpl(
absl::string_view str ABSL_ATTRIBUTE_LIFETIME_BOUND,
@@ -105,7 +105,7 @@ using AsStructuredNotLiteralImpl =
// Structured log data for a stringifyable type T and associated structured
// proto field, both of which must outlive this object.
template <typename T>
-class ABSL_MUST_USE_RESULT AsStructuredValueImpl final {
+class [[nodiscard]] AsStructuredValueImpl final {
public:
using ValueFormatter = absl::AnyInvocable<std::string(T) const>;
@@ -139,8 +139,6 @@ class ABSL_MUST_USE_RESULT AsStructuredValueImpl final {
}
};
-#ifdef ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION
-
// Template deduction guide so `AsStructuredValueImpl(42, data)` works
// without specifying the template type.
template <typename T>
@@ -155,8 +153,6 @@ AsStructuredValueImpl(
typename AsStructuredValueImpl<T>::ValueFormatter value_formatter)
-> AsStructuredValueImpl<T>;
-#endif // ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION
-
} // namespace log_internal
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/contrib/restricted/abseil-cpp/absl/log/internal/vlog_config.h b/contrib/restricted/abseil-cpp/absl/log/internal/vlog_config.h
index b6e322c4688..84e817a3e20 100644
--- a/contrib/restricted/abseil-cpp/absl/log/internal/vlog_config.h
+++ b/contrib/restricted/abseil-cpp/absl/log/internal/vlog_config.h
@@ -34,6 +34,7 @@
#include "absl/base/attributes.h"
#include "absl/base/config.h"
+#include "absl/base/nullability.h"
#include "absl/base/optimization.h"
#include "absl/base/thread_annotations.h"
#include "absl/strings/string_view.h"
@@ -45,7 +46,7 @@ namespace log_internal {
class SyntheticBinary;
class VLogSite;
-int RegisterAndInitialize(VLogSite* v);
+int RegisterAndInitialize(VLogSite* absl_nonnull v);
void UpdateVLogSites();
constexpr int kUseFlag = (std::numeric_limits<int16_t>::min)();
@@ -60,7 +61,7 @@ constexpr int kUseFlag = (std::numeric_limits<int16_t>::min)();
class VLogSite final {
public:
// `f` must not be destroyed until the program exits.
- explicit constexpr VLogSite(const char* f)
+ explicit constexpr VLogSite(const char* absl_nonnull f)
: file_(f), v_(kUninitialized), next_(nullptr) {}
VLogSite(const VLogSite&) = delete;
VLogSite& operator=(const VLogSite&) = delete;
@@ -93,7 +94,7 @@ class VLogSite final {
}
private:
- friend int log_internal::RegisterAndInitialize(VLogSite* v);
+ friend int log_internal::RegisterAndInitialize(VLogSite* absl_nonnull v);
friend void log_internal::UpdateVLogSites();
friend class log_internal::SyntheticBinary;
static constexpr int kUninitialized = (std::numeric_limits<int>::max)();
@@ -116,7 +117,7 @@ class VLogSite final {
ABSL_ATTRIBUTE_NOINLINE bool SlowIsEnabled5(int stale_v);
// This object is too size-sensitive to use absl::string_view.
- const char* const file_;
+ const char* absl_nonnull const file_;
std::atomic<int> v_;
std::atomic<VLogSite*> next_;
};
@@ -130,7 +131,7 @@ int VLogLevel(absl::string_view file);
// Registers a site `v` to get updated as `vmodule` and `v` change. Also
// initializes the site based on their current values, and returns that result.
// Does not allocate memory.
-int RegisterAndInitialize(VLogSite* v);
+int RegisterAndInitialize(VLogSite* absl_nonnull v);
// Allocates memory.
void UpdateVLogSites();
@@ -154,7 +155,8 @@ int PrependVModule(absl::string_view module_pattern, int log_level);
void OnVLogVerbosityUpdate(std::function<void()> cb);
// Does not allocate memory.
-VLogSite* SetVModuleListHeadForTestOnly(VLogSite* v);
+VLogSite* absl_nullable SetVModuleListHeadForTestOnly(
+ VLogSite* absl_nullable v);
} // namespace log_internal
ABSL_NAMESPACE_END
diff --git a/contrib/restricted/abseil-cpp/absl/log/internal/voidify.h b/contrib/restricted/abseil-cpp/absl/log/internal/voidify.h
index 8f62da20a04..f42859eba14 100644
--- a/contrib/restricted/abseil-cpp/absl/log/internal/voidify.h
+++ b/contrib/restricted/abseil-cpp/absl/log/internal/voidify.h
@@ -16,13 +16,15 @@
// File: log/internal/voidify.h
// -----------------------------------------------------------------------------
//
-// This class is used to explicitly ignore values in the conditional logging
-// macros. This avoids compiler warnings like "value computed is not used" and
-// "statement has no effect".
+// This class does the dispatching of the completed `absl::LogEntry` to
+// applicable `absl::LogSink`s, and is used to explicitly ignore values in the
+// conditional logging macros. This avoids compiler warnings like "value
+// computed is not used" and "statement has no effect".
#ifndef ABSL_LOG_INTERNAL_VOIDIFY_H_
#define ABSL_LOG_INTERNAL_VOIDIFY_H_
+#include "absl/base/attributes.h"
#include "absl/base/config.h"
namespace absl {
@@ -34,7 +36,11 @@ class Voidify final {
// This has to be an operator with a precedence lower than << but higher than
// ?:
template <typename T>
- void operator&&(const T&) const&& {}
+ ABSL_ATTRIBUTE_COLD void operator&&(T&& message) const&& {
+ // The dispatching of the completed `absl::LogEntry` to applicable
+ // `absl::LogSink`s happens here.
+ message.Flush();
+ }
};
} // namespace log_internal
diff --git a/contrib/restricted/abseil-cpp/absl/log/log.h b/contrib/restricted/abseil-cpp/absl/log/log.h
index a4e1d1fe818..f1cab9d0c05 100644
--- a/contrib/restricted/abseil-cpp/absl/log/log.h
+++ b/contrib/restricted/abseil-cpp/absl/log/log.h
@@ -34,6 +34,13 @@
// running registered error handlers.
// * The `DFATAL` pseudo-severity level is defined as `FATAL` in debug mode and
// as `ERROR` otherwise.
+// * The `DO_NOT_SUBMIT` pseudo-severity level is an alias for `ERROR`, and is
+// intended for debugging statements that won't be submitted. The name is
+// chosen to be easy to spot in review and with tools in order to ensure that
+// such statements aren't inadvertently checked in.
+// The contract is that **it may not be checked in**, meaning that no
+// in-contract uses will be affected if we decide in the future to remove it
+// or change what it does.
// Some preprocessor shenanigans are used to ensure that e.g. `LOG(INFO)` has
// the same meaning even if a local symbol or preprocessor macro named `INFO` is
// defined. To specify a severity level using an expression instead of a
@@ -194,6 +201,8 @@
// LOG(INFO) << std::hex << 0xdeadbeef; // logs "0xdeadbeef"
// LOG(INFO) << 0xdeadbeef; // logs "3735928559"
+// SKIP_ABSL_INLINE_NAMESPACE_CHECK
+
#ifndef ABSL_LOG_LOG_H_
#define ABSL_LOG_LOG_H_
@@ -260,44 +269,55 @@
ABSL_LOG_INTERNAL_DLOG_IF_IMPL(_##severity, condition)
// LOG_EVERY_N
+// LOG_FIRST_N
+// LOG_EVERY_POW_2
+// LOG_EVERY_N_SEC
//
-// An instance of `LOG_EVERY_N` increments a hidden zero-initialized counter
-// every time execution passes through it and logs the specified message when
-// the counter's value is a multiple of `n`, doing nothing otherwise. Each
-// instance has its own counter. The counter's value can be logged by streaming
-// the symbol `COUNTER`. `LOG_EVERY_N` is thread-safe.
-// Example:
+// These "stateful" macros log conditionally based on a hidden counter or timer.
+// When the condition is false and no logging is done, streamed operands aren't
+// evaluated either. Each instance has its own state (i.e. counter, timer)
+// that's independent of other instances of the macros. The macros in this
+// family are thread-safe in the sense that they are meant to be called
+// concurrently and will not invoke undefined behavior, however their
+// implementation prioritizes efficiency over exactness and may occasionally log
+// more or less often than specified.
+//
+// * `LOG_EVERY_N` logs the first time and once every `n` times thereafter.
+// * `LOG_FIRST_N` logs the first `n` times and then stops.
+// * `LOG_EVERY_POW_2` logs the first, second, fourth, eighth, etc. times.
+// * `LOG_EVERY_N_SEC` logs the first time and no more than once every `n`
+// seconds thereafter. `n` is passed as a floating point value.
+//
+// The `LOG_IF`... variations with an extra condition evaluate the specified
+// condition first and short-circuit if it is false. For example, an evaluation
+// of `LOG_IF_FIRST_N` does not count against the first `n` if the specified
+// condition is false. Stateful `VLOG`... variations likewise short-circuit
+// if `VLOG` is disabled.
+//
+// An approximate count of the number of times a particular instance's stateful
+// condition has been evaluated (i.e. excluding those where a specified `LOG_IF`
+// condition was false) can be included in the logged message by streaming the
+// symbol `COUNTER`.
+//
+// The `n` parameter need not be a constant. Conditional logging following a
+// change to `n` isn't fully specified, but it should converge on the new value
+// within roughly `max(old_n, new_n)` evaluations or seconds.
+//
+// Examples:
//
// LOG_EVERY_N(WARNING, 1000) << "Got a packet with a bad CRC (" << COUNTER
// << " total)";
+//
+// LOG_EVERY_N_SEC(INFO, 2.5) << "Got " << COUNTER << " cookies so far";
+//
+// LOG_IF_EVERY_N(INFO, (size > 1024), 10) << "Got the " << COUNTER
+// << "th big cookie";
#define LOG_EVERY_N(severity, n) \
ABSL_LOG_INTERNAL_LOG_EVERY_N_IMPL(_##severity, n)
-
-// LOG_FIRST_N
-//
-// `LOG_FIRST_N` behaves like `LOG_EVERY_N` except that the specified message is
-// logged when the counter's value is less than `n`. `LOG_FIRST_N` is
-// thread-safe.
#define LOG_FIRST_N(severity, n) \
ABSL_LOG_INTERNAL_LOG_FIRST_N_IMPL(_##severity, n)
-
-// LOG_EVERY_POW_2
-//
-// `LOG_EVERY_POW_2` behaves like `LOG_EVERY_N` except that the specified
-// message is logged when the counter's value is a power of 2.
-// `LOG_EVERY_POW_2` is thread-safe.
#define LOG_EVERY_POW_2(severity) \
ABSL_LOG_INTERNAL_LOG_EVERY_POW_2_IMPL(_##severity)
-
-// LOG_EVERY_N_SEC
-//
-// An instance of `LOG_EVERY_N_SEC` uses a hidden state variable to log the
-// specified message at most once every `n_seconds`. A hidden counter of
-// executions (whether a message is logged or not) is also maintained and can be
-// logged by streaming the symbol `COUNTER`. `LOG_EVERY_N_SEC` is thread-safe.
-// Example:
-//
-// LOG_EVERY_N_SEC(INFO, 2.5) << "Got " << COUNTER << " cookies so far";
#define LOG_EVERY_N_SEC(severity, n_seconds) \
ABSL_LOG_INTERNAL_LOG_EVERY_N_SEC_IMPL(_##severity, n_seconds)
@@ -328,13 +348,6 @@
#define VLOG_EVERY_N_SEC(severity, n_seconds) \
ABSL_LOG_INTERNAL_VLOG_EVERY_N_SEC_IMPL(severity, n_seconds)
-// `LOG_IF_EVERY_N` and friends behave as the corresponding `LOG_EVERY_N`
-// but neither increment a counter nor log a message if condition is false (as
-// `LOG_IF`).
-// Example:
-//
-// LOG_IF_EVERY_N(INFO, (size > 1024), 10) << "Got the " << COUNTER
-// << "th big cookie";
#define LOG_IF_EVERY_N(severity, condition, n) \
ABSL_LOG_INTERNAL_LOG_IF_EVERY_N_IMPL(_##severity, condition, n)
#define LOG_IF_FIRST_N(severity, condition, n) \
diff --git a/contrib/restricted/abseil-cpp/absl/log/log_entry.cc b/contrib/restricted/abseil-cpp/absl/log/log_entry.cc
deleted file mode 100644
index fe58a576b51..00000000000
--- a/contrib/restricted/abseil-cpp/absl/log/log_entry.cc
+++ /dev/null
@@ -1,41 +0,0 @@
-//
-// Copyright 2022 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "absl/log/log_entry.h"
-
-#include "absl/base/config.h"
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-
-#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
-constexpr int LogEntry::kNoVerbosityLevel;
-constexpr int LogEntry::kNoVerboseLevel;
-#endif
-
-// https://github.com/abseil/abseil-cpp/issues/1465
-// CMake builds on Apple platforms error when libraries are empty.
-// Our CMake configuration can avoid this error on header-only libraries,
-// but since this library is conditionally empty, including a single
-// variable is an easy workaround.
-#ifdef __APPLE__
-namespace log_internal {
-extern const char kAvoidEmptyLogEntryLibraryWarning;
-const char kAvoidEmptyLogEntryLibraryWarning = 0;
-} // namespace log_internal
-#endif // __APPLE__
-
-ABSL_NAMESPACE_END
-} // namespace absl
diff --git a/contrib/restricted/abseil-cpp/absl/log/log_sink_registry.h b/contrib/restricted/abseil-cpp/absl/log/log_sink_registry.h
index 3aa3bf67953..a3fa9a3db35 100644
--- a/contrib/restricted/abseil-cpp/absl/log/log_sink_registry.h
+++ b/contrib/restricted/abseil-cpp/absl/log/log_sink_registry.h
@@ -44,10 +44,10 @@ ABSL_NAMESPACE_BEGIN
// sink instead which writes them to `stderr`.
//
// Do not call these inside `absl::LogSink::Send`.
-inline void AddLogSink(absl::Nonnull<absl::LogSink*> sink) {
+inline void AddLogSink(absl::LogSink* absl_nonnull sink) {
log_internal::AddLogSink(sink);
}
-inline void RemoveLogSink(absl::Nonnull<absl::LogSink*> sink) {
+inline void RemoveLogSink(absl::LogSink* absl_nonnull sink) {
log_internal::RemoveLogSink(sink);
}
diff --git a/contrib/restricted/abseil-cpp/absl/meta/type_traits.h b/contrib/restricted/abseil-cpp/absl/meta/type_traits.h
index 02da0674a88..02c1e6309ca 100644
--- a/contrib/restricted/abseil-cpp/absl/meta/type_traits.h
+++ b/contrib/restricted/abseil-cpp/absl/meta/type_traits.h
@@ -38,6 +38,7 @@
#include <cstddef>
#include <functional>
#include <string>
+#include <string_view>
#include <type_traits>
#include <vector>
@@ -48,10 +49,6 @@
#include <span> // NOLINT(build/c++20)
#endif
-#ifdef ABSL_HAVE_STD_STRING_VIEW
-#include <string_view>
-#endif
-
// Defines the default alignment. `__STDCPP_DEFAULT_NEW_ALIGNMENT__` is a C++17
// feature.
#if defined(__STDCPP_DEFAULT_NEW_ALIGNMENT__)
@@ -97,22 +94,6 @@ struct is_detected_impl<typename VoidTImpl<Op<Args...>>::type, Op, Args...> {
template <template <class...> class Op, class... Args>
struct is_detected : is_detected_impl<void, Op, Args...>::type {};
-template <class Enabler, class To, template <class...> class Op, class... Args>
-struct is_detected_convertible_impl {
- using type = std::false_type;
-};
-
-template <class To, template <class...> class Op, class... Args>
-struct is_detected_convertible_impl<
- typename std::enable_if<std::is_convertible<Op<Args...>, To>::value>::type,
- To, Op, Args...> {
- using type = std::true_type;
-};
-
-template <class To, template <class...> class Op, class... Args>
-struct is_detected_convertible
- : is_detected_convertible_impl<void, To, Op, Args...>::type {};
-
} // namespace type_traits_internal
// void_t()
@@ -121,96 +102,32 @@ struct is_detected_convertible
// metafunction allows you to create a general case that maps to `void` while
// allowing specializations that map to specific types.
//
-// This metafunction is designed to be a drop-in replacement for the C++17
-// `std::void_t` metafunction.
-//
-// NOTE: `absl::void_t` does not use the standard-specified implementation so
-// that it can remain compatible with gcc < 5.1. This can introduce slightly
-// different behavior, such as when ordering partial specializations.
+// This metafunction is not 100% compatible with the C++17 `std::void_t`
+// metafunction. It has slightly different behavior, such as when ordering
+// partial specializations. It is recommended to use `std::void_t` instead.
template <typename... Ts>
using void_t = typename type_traits_internal::VoidTImpl<Ts...>::type;
-// conjunction
-//
-// Performs a compile-time logical AND operation on the passed types (which
-// must have `::value` members convertible to `bool`. Short-circuits if it
-// encounters any `false` members (and does not compare the `::value` members
-// of any remaining arguments).
-//
-// This metafunction is designed to be a drop-in replacement for the C++17
-// `std::conjunction` metafunction.
-template <typename... Ts>
-struct conjunction : std::true_type {};
-
-template <typename T, typename... Ts>
-struct conjunction<T, Ts...>
- : std::conditional<T::value, conjunction<Ts...>, T>::type {};
-
-template <typename T>
-struct conjunction<T> : T {};
-
-// disjunction
-//
-// Performs a compile-time logical OR operation on the passed types (which
-// must have `::value` members convertible to `bool`. Short-circuits if it
-// encounters any `true` members (and does not compare the `::value` members
-// of any remaining arguments).
-//
-// This metafunction is designed to be a drop-in replacement for the C++17
-// `std::disjunction` metafunction.
-template <typename... Ts>
-struct disjunction : std::false_type {};
-
-template <typename T, typename... Ts>
-struct disjunction<T, Ts...>
- : std::conditional<T::value, T, disjunction<Ts...>>::type {};
-
-template <typename T>
-struct disjunction<T> : T {};
-
-// negation
-//
-// Performs a compile-time logical NOT operation on the passed type (which
-// must have `::value` members convertible to `bool`.
-//
-// This metafunction is designed to be a drop-in replacement for the C++17
-// `std::negation` metafunction.
-template <typename T>
-struct negation : std::integral_constant<bool, !T::value> {};
-
-// is_function()
-//
-// Determines whether the passed type `T` is a function type.
-//
-// This metafunction is designed to be a drop-in replacement for the C++11
-// `std::is_function()` metafunction for platforms that have incomplete C++11
-// support (such as libstdc++ 4.x).
-//
-// This metafunction works because appending `const` to a type does nothing to
-// function types and reference types (and forms a const-qualified type
-// otherwise).
-template <typename T>
-struct is_function
- : std::integral_constant<
- bool, !(std::is_reference<T>::value ||
- std::is_const<typename std::add_const<T>::type>::value)> {};
-
-// is_copy_assignable()
-// is_move_assignable()
-// is_trivially_destructible()
-// is_trivially_default_constructible()
-// is_trivially_move_constructible()
-// is_trivially_copy_constructible()
-// is_trivially_move_assignable()
-// is_trivially_copy_assignable()
-//
// Historical note: Abseil once provided implementations of these type traits
// for platforms that lacked full support. New code should prefer to use the
// std variants.
//
// See the documentation for the STL <type_traits> header for more information:
// https://en.cppreference.com/w/cpp/header/type_traits
+using std::add_const_t;
+using std::add_cv_t;
+using std::add_lvalue_reference_t;
+using std::add_pointer_t;
+using std::add_rvalue_reference_t;
+using std::add_volatile_t;
+using std::common_type_t;
+using std::conditional_t;
+using std::conjunction;
+using std::decay_t;
+using std::enable_if_t;
+using std::disjunction;
using std::is_copy_assignable;
+using std::is_function;
using std::is_move_assignable;
using std::is_trivially_copy_assignable;
using std::is_trivially_copy_constructible;
@@ -218,6 +135,17 @@ using std::is_trivially_default_constructible;
using std::is_trivially_destructible;
using std::is_trivially_move_assignable;
using std::is_trivially_move_constructible;
+using std::make_signed_t;
+using std::make_unsigned_t;
+using std::negation;
+using std::remove_all_extents_t;
+using std::remove_const_t;
+using std::remove_cv_t;
+using std::remove_extent_t;
+using std::remove_pointer_t;
+using std::remove_reference_t;
+using std::remove_volatile_t;
+using std::underlying_type_t;
#if defined(__cpp_lib_remove_cvref) && __cpp_lib_remove_cvref >= 201711L
template <typename T>
@@ -240,70 +168,6 @@ template <typename T>
using remove_cvref_t = typename remove_cvref<T>::type;
#endif
-// -----------------------------------------------------------------------------
-// C++14 "_t" trait aliases
-// -----------------------------------------------------------------------------
-
-template <typename T>
-using remove_cv_t = typename std::remove_cv<T>::type;
-
-template <typename T>
-using remove_const_t = typename std::remove_const<T>::type;
-
-template <typename T>
-using remove_volatile_t = typename std::remove_volatile<T>::type;
-
-template <typename T>
-using add_cv_t = typename std::add_cv<T>::type;
-
-template <typename T>
-using add_const_t = typename std::add_const<T>::type;
-
-template <typename T>
-using add_volatile_t = typename std::add_volatile<T>::type;
-
-template <typename T>
-using remove_reference_t = typename std::remove_reference<T>::type;
-
-template <typename T>
-using add_lvalue_reference_t = typename std::add_lvalue_reference<T>::type;
-
-template <typename T>
-using add_rvalue_reference_t = typename std::add_rvalue_reference<T>::type;
-
-template <typename T>
-using remove_pointer_t = typename std::remove_pointer<T>::type;
-
-template <typename T>
-using add_pointer_t = typename std::add_pointer<T>::type;
-
-template <typename T>
-using make_signed_t = typename std::make_signed<T>::type;
-
-template <typename T>
-using make_unsigned_t = typename std::make_unsigned<T>::type;
-
-template <typename T>
-using remove_extent_t = typename std::remove_extent<T>::type;
-
-template <typename T>
-using remove_all_extents_t = typename std::remove_all_extents<T>::type;
-
-template <typename T>
-using decay_t = typename std::decay<T>::type;
-
-template <bool B, typename T = void>
-using enable_if_t = typename std::enable_if<B, T>::type;
-
-template <bool B, typename T, typename F>
-using conditional_t = typename std::conditional<B, T, F>::type;
-
-template <typename... T>
-using common_type_t = typename std::common_type<T...>::type;
-
-template <typename T>
-using underlying_type_t = typename std::underlying_type<T>::type;
-
namespace type_traits_internal {
#if (defined(__cpp_lib_is_invocable) && __cpp_lib_is_invocable >= 201703L) || \
@@ -460,11 +324,17 @@ using swap_internal::Swap;
// absl::is_trivially_relocatable<T>
//
+// https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2024/p2786r11.html
+//
// Detects whether a type is known to be "trivially relocatable" -- meaning it
// can be relocated from one place to another as if by memcpy/memmove.
// This implies that its object representation doesn't depend on its address,
// and also none of its special member functions do anything strange.
//
+// Note that when relocating the caller code should ensure that if the object is
+// polymorphic, the dynamic type is of the most derived type. Padding bytes
+// should not be copied.
+//
// This trait is conservative. If it's true then the type is definitely
// trivially relocatable, but if it's false then the type may or may not be. For
// example, std::vector<int> is trivially relocatable on every known STL
@@ -482,11 +352,7 @@ using swap_internal::Swap;
//
// Upstream documentation:
//
-// https://clang.llvm.org/docs/LanguageExtensions.html#:~:text=__is_trivially_relocatable
-
-// If the compiler offers a builtin that tells us the answer, we can use that.
-// This covers all of the cases in the fallback below, plus types that opt in
-// using e.g. [[clang::trivial_abi]].
+// https://clang.llvm.org/docs/LanguageExtensions.html#:~:text=__builtin_is_cpp_trivially_relocatable
//
// Clang on Windows has the builtin, but it falsely claims types with a
// user-provided destructor are trivial (http://b/275003464). So we opt out
@@ -511,15 +377,22 @@ using swap_internal::Swap;
//
// According to https://github.com/abseil/abseil-cpp/issues/1479, this does not
// work with NVCC either.
-#if ABSL_HAVE_BUILTIN(__is_trivially_relocatable) && \
- (defined(__cpp_impl_trivially_relocatable) || \
- (!defined(__clang__) && !defined(__APPLE__) && !defined(__NVCC__)))
+#if ABSL_HAVE_BUILTIN(__builtin_is_cpp_trivially_relocatable)
+// https://github.com/llvm/llvm-project/pull/127636#pullrequestreview-2637005293
+// In the current implementation, __builtin_is_cpp_trivially_relocatable will
+// only return true for types that are trivially relocatable according to the
+// standard. Notably, this means that marking a type [[clang::trivial_abi]] aka
+// ABSL_HAVE_ATTRIBUTE_TRIVIAL_ABI will have no effect on this trait.
template <class T>
struct is_trivially_relocatable
- : std::integral_constant<bool, __is_trivially_relocatable(T)> {};
+ : std::integral_constant<bool, __builtin_is_cpp_trivially_relocatable(T)> {
+};
#elif ABSL_HAVE_BUILTIN(__is_trivially_relocatable) && defined(__clang__) && \
!(defined(_WIN32) || defined(_WIN64)) && !defined(__APPLE__) && \
!defined(__NVCC__)
+// https://github.com/llvm/llvm-project/pull/139061
+// __is_trivially_relocatable is deprecated.
+// TODO(b/325479096): Remove this case.
template <class T>
struct is_trivially_relocatable
: std::integral_constant<
@@ -640,10 +513,8 @@ template <typename T>
struct IsView : std::integral_constant<bool, std::is_pointer<T>::value ||
IsViewImpl<T>::value> {};
-#ifdef ABSL_HAVE_STD_STRING_VIEW
template <typename Char, typename Traits>
struct IsView<std::basic_string_view<Char, Traits>> : std::true_type {};
-#endif
#ifdef __cpp_lib_span
template <typename T>
diff --git a/contrib/restricted/abseil-cpp/absl/numeric/bits.h b/contrib/restricted/abseil-cpp/absl/numeric/bits.h
index c76454c8fb1..9a0c2290bc8 100644
--- a/contrib/restricted/abseil-cpp/absl/numeric/bits.h
+++ b/contrib/restricted/abseil-cpp/absl/numeric/bits.h
@@ -27,6 +27,10 @@
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2019/p1355r2.html
// P1956R1:
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2020/p1956r1.pdf
+// P0463R1
+// https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2017/p0463r1.html
+// P1272R4
+// https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2021/p1272r4.html
//
// When using a standard library that implements these functions, we use the
// standard library's implementation.
@@ -45,6 +49,7 @@
#endif
#include "absl/base/attributes.h"
+#include "absl/base/internal/endian.h"
#include "absl/numeric/internal/bits.h"
namespace absl {
@@ -63,14 +68,14 @@ using std::rotr;
// Rotating functions
template <class T>
-ABSL_MUST_USE_RESULT constexpr
+[[nodiscard]] constexpr
typename std::enable_if<std::is_unsigned<T>::value, T>::type
rotl(T x, int s) noexcept {
return numeric_internal::RotateLeft(x, s);
}
template <class T>
-ABSL_MUST_USE_RESULT constexpr
+[[nodiscard]] constexpr
typename std::enable_if<std::is_unsigned<T>::value, T>::type
rotr(T x, int s) noexcept {
return numeric_internal::RotateRight(x, s);
@@ -190,6 +195,67 @@ ABSL_INTERNAL_CONSTEXPR_CLZ inline
#endif
+#if defined(__cpp_lib_endian) && __cpp_lib_endian >= 201907L
+
+// https://en.cppreference.com/w/cpp/types/endian
+//
+// Indicates the endianness of all scalar types:
+// * If all scalar types are little-endian, `absl::endian::native` equals
+// absl::endian::little.
+// * If all scalar types are big-endian, `absl::endian::native` equals
+// `absl::endian::big`.
+// * Platforms that use anything else are unsupported.
+using std::endian;
+
+#else
+
+enum class endian {
+ little,
+ big,
+#if defined(ABSL_IS_LITTLE_ENDIAN)
+ native = little
+#elif defined(ABSL_IS_BIG_ENDIAN)
+ native = big
+#else
+#error "Endian detection needs to be set up for this platform"
+#endif
+};
+
+#endif // defined(__cpp_lib_endian) && __cpp_lib_endian >= 201907L
+
+#if defined(__cpp_lib_byteswap) && __cpp_lib_byteswap >= 202110L
+
+// https://en.cppreference.com/w/cpp/numeric/byteswap
+//
+// Reverses the bytes in the given integer value `x`.
+//
+// `absl::byteswap` participates in overload resolution only if `T` satisfies
+// integral, i.e., `T` is an integer type. The program is ill-formed if `T` has
+// padding bits.
+using std::byteswap;
+
+#else
+
+template <class T>
+[[nodiscard]] constexpr T byteswap(T x) noexcept {
+ static_assert(std::is_integral_v<T>,
+ "byteswap requires an integral argument");
+ static_assert(
+ sizeof(T) == 1 || sizeof(T) == 2 || sizeof(T) == 4 || sizeof(T) == 8,
+ "byteswap works only with 8, 16, 32, or 64-bit integers");
+ if constexpr (sizeof(T) == 1) {
+ return x;
+ } else if constexpr (sizeof(T) == 2) {
+ return static_cast<T>(gbswap_16(static_cast<uint16_t>(x)));
+ } else if constexpr (sizeof(T) == 4) {
+ return static_cast<T>(gbswap_32(static_cast<uint32_t>(x)));
+ } else if constexpr (sizeof(T) == 8) {
+ return static_cast<T>(gbswap_64(static_cast<uint64_t>(x)));
+ }
+}
+
+#endif // defined(__cpp_lib_byteswap) && __cpp_lib_byteswap >= 202110L
+
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/contrib/restricted/abseil-cpp/absl/numeric/int128.cc b/contrib/restricted/abseil-cpp/absl/numeric/int128.cc
index 5d6c68d1281..281bf127a55 100644
--- a/contrib/restricted/abseil-cpp/absl/numeric/int128.cc
+++ b/contrib/restricted/abseil-cpp/absl/numeric/int128.cc
@@ -342,55 +342,3 @@ std::ostream& operator<<(std::ostream& os, int128 v) {
ABSL_NAMESPACE_END
} // namespace absl
-
-#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
-namespace std {
-constexpr bool numeric_limits<absl::uint128>::is_specialized;
-constexpr bool numeric_limits<absl::uint128>::is_signed;
-constexpr bool numeric_limits<absl::uint128>::is_integer;
-constexpr bool numeric_limits<absl::uint128>::is_exact;
-constexpr bool numeric_limits<absl::uint128>::has_infinity;
-constexpr bool numeric_limits<absl::uint128>::has_quiet_NaN;
-constexpr bool numeric_limits<absl::uint128>::has_signaling_NaN;
-constexpr float_denorm_style numeric_limits<absl::uint128>::has_denorm;
-constexpr bool numeric_limits<absl::uint128>::has_denorm_loss;
-constexpr float_round_style numeric_limits<absl::uint128>::round_style;
-constexpr bool numeric_limits<absl::uint128>::is_iec559;
-constexpr bool numeric_limits<absl::uint128>::is_bounded;
-constexpr bool numeric_limits<absl::uint128>::is_modulo;
-constexpr int numeric_limits<absl::uint128>::digits;
-constexpr int numeric_limits<absl::uint128>::digits10;
-constexpr int numeric_limits<absl::uint128>::max_digits10;
-constexpr int numeric_limits<absl::uint128>::radix;
-constexpr int numeric_limits<absl::uint128>::min_exponent;
-constexpr int numeric_limits<absl::uint128>::min_exponent10;
-constexpr int numeric_limits<absl::uint128>::max_exponent;
-constexpr int numeric_limits<absl::uint128>::max_exponent10;
-constexpr bool numeric_limits<absl::uint128>::traps;
-constexpr bool numeric_limits<absl::uint128>::tinyness_before;
-
-constexpr bool numeric_limits<absl::int128>::is_specialized;
-constexpr bool numeric_limits<absl::int128>::is_signed;
-constexpr bool numeric_limits<absl::int128>::is_integer;
-constexpr bool numeric_limits<absl::int128>::is_exact;
-constexpr bool numeric_limits<absl::int128>::has_infinity;
-constexpr bool numeric_limits<absl::int128>::has_quiet_NaN;
-constexpr bool numeric_limits<absl::int128>::has_signaling_NaN;
-constexpr float_denorm_style numeric_limits<absl::int128>::has_denorm;
-constexpr bool numeric_limits<absl::int128>::has_denorm_loss;
-constexpr float_round_style numeric_limits<absl::int128>::round_style;
-constexpr bool numeric_limits<absl::int128>::is_iec559;
-constexpr bool numeric_limits<absl::int128>::is_bounded;
-constexpr bool numeric_limits<absl::int128>::is_modulo;
-constexpr int numeric_limits<absl::int128>::digits;
-constexpr int numeric_limits<absl::int128>::digits10;
-constexpr int numeric_limits<absl::int128>::max_digits10;
-constexpr int numeric_limits<absl::int128>::radix;
-constexpr int numeric_limits<absl::int128>::min_exponent;
-constexpr int numeric_limits<absl::int128>::min_exponent10;
-constexpr int numeric_limits<absl::int128>::max_exponent;
-constexpr int numeric_limits<absl::int128>::max_exponent10;
-constexpr bool numeric_limits<absl::int128>::traps;
-constexpr bool numeric_limits<absl::int128>::tinyness_before;
-} // namespace std
-#endif
diff --git a/contrib/restricted/abseil-cpp/absl/numeric/internal/bits.h b/contrib/restricted/abseil-cpp/absl/numeric/internal/bits.h
index 0917464d6ad..e1d18b86334 100644
--- a/contrib/restricted/abseil-cpp/absl/numeric/internal/bits.h
+++ b/contrib/restricted/abseil-cpp/absl/numeric/internal/bits.h
@@ -71,7 +71,7 @@ constexpr bool IsPowerOf2(unsigned int x) noexcept {
}
template <class T>
-ABSL_MUST_USE_RESULT ABSL_ATTRIBUTE_ALWAYS_INLINE constexpr T RotateRight(
+[[nodiscard]] ABSL_ATTRIBUTE_ALWAYS_INLINE constexpr T RotateRight(
T x, int s) noexcept {
static_assert(std::is_unsigned<T>::value, "T must be unsigned");
static_assert(IsPowerOf2(std::numeric_limits<T>::digits),
@@ -82,7 +82,7 @@ ABSL_MUST_USE_RESULT ABSL_ATTRIBUTE_ALWAYS_INLINE constexpr T RotateRight(
}
template <class T>
-ABSL_MUST_USE_RESULT ABSL_ATTRIBUTE_ALWAYS_INLINE constexpr T RotateLeft(
+[[nodiscard]] ABSL_ATTRIBUTE_ALWAYS_INLINE constexpr T RotateLeft(
T x, int s) noexcept {
static_assert(std::is_unsigned<T>::value, "T must be unsigned");
static_assert(IsPowerOf2(std::numeric_limits<T>::digits),
@@ -126,7 +126,11 @@ Popcount(T x) noexcept {
static_assert(IsPowerOf2(std::numeric_limits<T>::digits),
"T must have a power-of-2 size");
static_assert(sizeof(x) <= sizeof(uint64_t), "T is too large");
- return sizeof(x) <= sizeof(uint32_t) ? Popcount32(x) : Popcount64(x);
+ if constexpr (sizeof(x) <= sizeof(uint32_t)) {
+ return Popcount32(x);
+ } else {
+ return Popcount64(x);
+ }
}
ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CLZ inline int
diff --git a/contrib/restricted/abseil-cpp/absl/profiling/internal/exponential_biased.cc b/contrib/restricted/abseil-cpp/absl/profiling/internal/exponential_biased.cc
index 81d9a757652..918d063aaae 100644
--- a/contrib/restricted/abseil-cpp/absl/profiling/internal/exponential_biased.cc
+++ b/contrib/restricted/abseil-cpp/absl/profiling/internal/exponential_biased.cc
@@ -66,7 +66,7 @@ int64_t ExponentialBiased::GetSkipCount(int64_t mean) {
}
double value = std::rint(interval);
bias_ = interval - value;
- return value;
+ return static_cast<int64_t>(value);
}
int64_t ExponentialBiased::GetStride(int64_t mean) {
diff --git a/contrib/restricted/abseil-cpp/absl/random/bit_gen_ref.h b/contrib/restricted/abseil-cpp/absl/random/bit_gen_ref.h
index 40e7b607399..dfce2c481b5 100644
--- a/contrib/restricted/abseil-cpp/absl/random/bit_gen_ref.h
+++ b/contrib/restricted/abseil-cpp/absl/random/bit_gen_ref.h
@@ -31,7 +31,7 @@
#include "absl/base/attributes.h"
#include "absl/base/config.h"
-#include "absl/base/internal/fast_type_id.h"
+#include "absl/base/fast_type_id.h"
#include "absl/meta/type_traits.h"
#include "absl/random/internal/distribution_caller.h"
#include "absl/random/internal/fast_uniform_bits.h"
@@ -88,7 +88,7 @@ class MockHelpers;
//
class BitGenRef {
// SFINAE to detect whether the URBG type includes a member matching
- // bool InvokeMock(base_internal::FastTypeIdType, void*, void*).
+ // bool InvokeMock(key_id, args_tuple*, result*).
//
// These live inside BitGenRef so that they have friend access
// to MockingBitGen. (see similar methods in DistributionCaller).
@@ -100,7 +100,7 @@ class BitGenRef {
template <class T>
using invoke_mock_t = decltype(std::declval<T*>()->InvokeMock(
- std::declval<base_internal::FastTypeIdType>(), std::declval<void*>(),
+ std::declval<FastTypeIdType>(), std::declval<void*>(),
std::declval<void*>()));
template <typename T>
@@ -145,8 +145,7 @@ class BitGenRef {
private:
using impl_fn = result_type (*)(uintptr_t);
- using mock_call_fn = bool (*)(uintptr_t, base_internal::FastTypeIdType, void*,
- void*);
+ using mock_call_fn = bool (*)(uintptr_t, FastTypeIdType, void*, void*);
template <typename URBG>
static result_type ImplFn(uintptr_t ptr) {
@@ -158,19 +157,19 @@ class BitGenRef {
// Get a type-erased InvokeMock pointer.
template <typename URBG>
- static bool MockCall(uintptr_t gen_ptr, base_internal::FastTypeIdType type,
- void* result, void* arg_tuple) {
- return reinterpret_cast<URBG*>(gen_ptr)->InvokeMock(type, result,
+ static bool MockCall(uintptr_t gen_ptr, FastTypeIdType key_id, void* result,
+ void* arg_tuple) {
+ return reinterpret_cast<URBG*>(gen_ptr)->InvokeMock(key_id, result,
arg_tuple);
}
- static bool NotAMock(uintptr_t, base_internal::FastTypeIdType, void*, void*) {
+ static bool NotAMock(uintptr_t, FastTypeIdType, void*, void*) {
return false;
}
- inline bool InvokeMock(base_internal::FastTypeIdType type, void* args_tuple,
+ inline bool InvokeMock(FastTypeIdType key_id, void* args_tuple,
void* result) {
if (mock_call_ == NotAMock) return false; // avoids an indirect call.
- return mock_call_(t_erased_gen_ptr_, type, args_tuple, result);
+ return mock_call_(t_erased_gen_ptr_, key_id, args_tuple, result);
}
uintptr_t t_erased_gen_ptr_;
diff --git a/contrib/restricted/abseil-cpp/absl/random/distributions.h b/contrib/restricted/abseil-cpp/absl/random/distributions.h
index b6ade685d4b..dced8950c19 100644
--- a/contrib/restricted/abseil-cpp/absl/random/distributions.h
+++ b/contrib/restricted/abseil-cpp/absl/random/distributions.h
@@ -50,7 +50,6 @@
#include <type_traits>
#include "absl/base/config.h"
-#include "absl/base/internal/inline_variable.h"
#include "absl/meta/type_traits.h"
#include "absl/random/bernoulli_distribution.h"
#include "absl/random/beta_distribution.h"
@@ -68,13 +67,12 @@
namespace absl {
ABSL_NAMESPACE_BEGIN
-ABSL_INTERNAL_INLINE_CONSTEXPR(IntervalClosedClosedTag, IntervalClosedClosed,
- {});
-ABSL_INTERNAL_INLINE_CONSTEXPR(IntervalClosedClosedTag, IntervalClosed, {});
-ABSL_INTERNAL_INLINE_CONSTEXPR(IntervalClosedOpenTag, IntervalClosedOpen, {});
-ABSL_INTERNAL_INLINE_CONSTEXPR(IntervalOpenOpenTag, IntervalOpenOpen, {});
-ABSL_INTERNAL_INLINE_CONSTEXPR(IntervalOpenOpenTag, IntervalOpen, {});
-ABSL_INTERNAL_INLINE_CONSTEXPR(IntervalOpenClosedTag, IntervalOpenClosed, {});
+inline constexpr IntervalClosedClosedTag IntervalClosedClosed = {};
+inline constexpr IntervalClosedClosedTag IntervalClosed = {};
+inline constexpr IntervalClosedOpenTag IntervalClosedOpen = {};
+inline constexpr IntervalOpenOpenTag IntervalOpenOpen = {};
+inline constexpr IntervalOpenOpenTag IntervalOpen = {};
+inline constexpr IntervalOpenClosedTag IntervalOpenClosed = {};
// -----------------------------------------------------------------------------
// absl::Uniform<T>(tag, bitgen, lo, hi)
diff --git a/contrib/restricted/abseil-cpp/absl/random/gaussian_distribution.h b/contrib/restricted/abseil-cpp/absl/random/gaussian_distribution.h
index ce84d4a56bc..eb75bfee251 100644
--- a/contrib/restricted/abseil-cpp/absl/random/gaussian_distribution.h
+++ b/contrib/restricted/abseil-cpp/absl/random/gaussian_distribution.h
@@ -244,7 +244,7 @@ inline double gaussian_distribution_base::zignor(
bits); // U(-1, 1)
const double x = j * zg_.x[i];
- // Retangular box. Handles >97% of all cases.
+ // Rectangular box. Handles >97% of all cases.
// For any given box, this handles between 75% and 99% of values.
// Equivalent to U(01) < (x[i+1] / x[i]), and when i == 0, ~93.5%
if (std::abs(x) < zg_.x[i + 1]) {
diff --git a/contrib/restricted/abseil-cpp/absl/random/internal/distribution_caller.h b/contrib/restricted/abseil-cpp/absl/random/internal/distribution_caller.h
index 2534ca951de..e84ec8caa17 100644
--- a/contrib/restricted/abseil-cpp/absl/random/internal/distribution_caller.h
+++ b/contrib/restricted/abseil-cpp/absl/random/internal/distribution_caller.h
@@ -22,7 +22,7 @@
#include <utility>
#include "absl/base/config.h"
-#include "absl/base/internal/fast_type_id.h"
+#include "absl/base/fast_type_id.h"
#include "absl/meta/type_traits.h"
#include "absl/utility/utility.h"
@@ -38,7 +38,7 @@ struct DistributionCaller {
static_assert(!std::is_pointer<URBG>::value,
"You must pass a reference, not a pointer.");
// SFINAE to detect whether the URBG type includes a member matching
- // bool InvokeMock(base_internal::FastTypeIdType, void*, void*).
+ // bool InvokeMock(key_id, args_tuple*, result*).
//
// These live inside BitGenRef so that they have friend access
// to MockingBitGen. (see similar methods in DistributionCaller).
@@ -50,8 +50,8 @@ struct DistributionCaller {
template <class T>
using invoke_mock_t = decltype(std::declval<T*>()->InvokeMock(
- std::declval<::absl::base_internal::FastTypeIdType>(),
- std::declval<void*>(), std::declval<void*>()));
+ std::declval<FastTypeIdType>(), std::declval<void*>(),
+ std::declval<void*>()));
using HasInvokeMock = typename detector<invoke_mock_t, void, URBG>::type;
@@ -74,8 +74,7 @@ struct DistributionCaller {
ArgTupleT arg_tuple(std::forward<Args>(args)...);
ResultT result;
- if (!urbg->InvokeMock(::absl::base_internal::FastTypeId<KeyT>(), &arg_tuple,
- &result)) {
+ if (!urbg->InvokeMock(FastTypeId<KeyT>(), &arg_tuple, &result)) {
auto dist = absl::make_from_tuple<DistrT>(arg_tuple);
result = dist(*urbg);
}
diff --git a/contrib/restricted/abseil-cpp/absl/random/internal/pool_urbg.cc b/contrib/restricted/abseil-cpp/absl/random/internal/entropy_pool.cc
index 5aefa7d97b5..fa47d0de6f5 100644
--- a/contrib/restricted/abseil-cpp/absl/random/internal/pool_urbg.cc
+++ b/contrib/restricted/abseil-cpp/absl/random/internal/entropy_pool.cc
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#include "absl/random/internal/pool_urbg.h"
+#include "absl/random/internal/entropy_pool.h"
#include <algorithm>
#include <atomic>
@@ -23,15 +23,14 @@
#include "absl/base/attributes.h"
#include "absl/base/call_once.h"
#include "absl/base/config.h"
-#include "absl/base/internal/endian.h"
-#include "absl/base/internal/raw_logging.h"
#include "absl/base/internal/spinlock.h"
-#include "absl/base/internal/sysinfo.h"
-#include "absl/base/internal/unaligned_access.h"
#include "absl/base/optimization.h"
+#include "absl/base/thread_annotations.h"
#include "absl/random/internal/randen.h"
+#include "absl/random/internal/randen_traits.h"
#include "absl/random/internal/seed_material.h"
#include "absl/random/seed_gen_exception.h"
+#include "absl/types/span.h"
using absl::base_internal::SpinLock;
using absl::base_internal::SpinLockHolder;
@@ -45,9 +44,11 @@ namespace {
// single generator within a RandenPool<T>. It is an internal implementation
// detail, and does not aim to conform to [rand.req.urng].
//
-// NOTE: There are alignment issues when used on ARM, for instance.
-// See the allocation code in PoolAlignedAlloc().
-class RandenPoolEntry {
+// At least 32-byte alignment is required for the state_ array on some ARM
+// platforms. We also want this aligned to a cacheline to eliminate false
+// sharing.
+class alignas(std::max(size_t{ABSL_CACHELINE_SIZE}, size_t{32}))
+ RandenPoolEntry {
public:
static constexpr size_t kState = RandenTraits::kStateBytes / sizeof(uint32_t);
static constexpr size_t kCapacity =
@@ -62,10 +63,6 @@ class RandenPoolEntry {
// Copy bytes into out.
void Fill(uint8_t* out, size_t bytes) ABSL_LOCKS_EXCLUDED(mu_);
- // Returns random bits from the buffer in units of T.
- template <typename T>
- inline T Generate() ABSL_LOCKS_EXCLUDED(mu_);
-
inline void MaybeRefill() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (next_ >= kState) {
next_ = kCapacity;
@@ -73,55 +70,24 @@ class RandenPoolEntry {
}
}
+ inline size_t available() const ABSL_SHARED_LOCKS_REQUIRED(mu_) {
+ return kState - next_;
+ }
+
private:
// Randen URBG state.
- uint32_t state_[kState] ABSL_GUARDED_BY(mu_); // First to satisfy alignment.
+ // At least 32-byte alignment is required by ARM platform code.
+ alignas(32) uint32_t state_[kState] ABSL_GUARDED_BY(mu_);
SpinLock mu_;
const Randen impl_;
size_t next_ ABSL_GUARDED_BY(mu_);
};
-template <>
-inline uint8_t RandenPoolEntry::Generate<uint8_t>() {
- SpinLockHolder l(&mu_);
- MaybeRefill();
- return static_cast<uint8_t>(state_[next_++]);
-}
-
-template <>
-inline uint16_t RandenPoolEntry::Generate<uint16_t>() {
- SpinLockHolder l(&mu_);
- MaybeRefill();
- return static_cast<uint16_t>(state_[next_++]);
-}
-
-template <>
-inline uint32_t RandenPoolEntry::Generate<uint32_t>() {
- SpinLockHolder l(&mu_);
- MaybeRefill();
- return state_[next_++];
-}
-
-template <>
-inline uint64_t RandenPoolEntry::Generate<uint64_t>() {
- SpinLockHolder l(&mu_);
- if (next_ >= kState - 1) {
- next_ = kCapacity;
- impl_.Generate(state_);
- }
- auto p = state_ + next_;
- next_ += 2;
-
- uint64_t result;
- std::memcpy(&result, p, sizeof(result));
- return result;
-}
-
void RandenPoolEntry::Fill(uint8_t* out, size_t bytes) {
SpinLockHolder l(&mu_);
while (bytes > 0) {
MaybeRefill();
- size_t remaining = (kState - next_) * sizeof(state_[0]);
+ size_t remaining = available() * sizeof(state_[0]);
size_t to_copy = std::min(bytes, remaining);
std::memcpy(out, &state_[next_], to_copy);
out += to_copy;
@@ -185,38 +151,17 @@ size_t GetPoolID() {
#endif
}
-// Allocate a RandenPoolEntry with at least 32-byte alignment, which is required
-// by ARM platform code.
-RandenPoolEntry* PoolAlignedAlloc() {
- constexpr size_t kAlignment =
- ABSL_CACHELINE_SIZE > 32 ? ABSL_CACHELINE_SIZE : 32;
-
- // Not all the platforms that we build for have std::aligned_alloc, however
- // since we never free these objects, we can over allocate and munge the
- // pointers to the correct alignment.
- uintptr_t x = reinterpret_cast<uintptr_t>(
- new char[sizeof(RandenPoolEntry) + kAlignment]);
- auto y = x % kAlignment;
- void* aligned = reinterpret_cast<void*>(y == 0 ? x : (x + kAlignment - y));
- return new (aligned) RandenPoolEntry();
-}
-
// Allocate and initialize kPoolSize objects of type RandenPoolEntry.
-//
-// The initialization strategy is to initialize one object directly from
-// OS entropy, then to use that object to seed all of the individual
-// pool instances.
void InitPoolURBG() {
static constexpr size_t kSeedSize =
RandenTraits::kStateBytes / sizeof(uint32_t);
- // Read the seed data from OS entropy once.
+ // Read OS entropy once, and use it to initialize each pool entry.
uint32_t seed_material[kPoolSize * kSeedSize];
- if (!random_internal::ReadSeedMaterialFromOSEntropy(
- absl::MakeSpan(seed_material))) {
- random_internal::ThrowSeedGenException();
+ if (!ReadSeedMaterialFromOSEntropy(absl::MakeSpan(seed_material))) {
+ ThrowSeedGenException();
}
for (size_t i = 0; i < kPoolSize; i++) {
- shared_pools[i] = PoolAlignedAlloc();
+ shared_pools[i] = new RandenPoolEntry();
shared_pools[i]->Init(
absl::MakeSpan(&seed_material[i * kSeedSize], kSeedSize));
}
@@ -230,24 +175,11 @@ RandenPoolEntry* GetPoolForCurrentThread() {
} // namespace
-template <typename T>
-typename RandenPool<T>::result_type RandenPool<T>::Generate() {
- auto* pool = GetPoolForCurrentThread();
- return pool->Generate<T>();
-}
-
-template <typename T>
-void RandenPool<T>::Fill(absl::Span<result_type> data) {
+void GetEntropyFromRandenPool(void* dest, size_t bytes) {
auto* pool = GetPoolForCurrentThread();
- pool->Fill(reinterpret_cast<uint8_t*>(data.data()),
- data.size() * sizeof(result_type));
+ pool->Fill(reinterpret_cast<uint8_t*>(dest), bytes);
}
-template class RandenPool<uint8_t>;
-template class RandenPool<uint16_t>;
-template class RandenPool<uint32_t>;
-template class RandenPool<uint64_t>;
-
} // namespace random_internal
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/contrib/restricted/abseil-cpp/absl/strings/cord_buffer.cc b/contrib/restricted/abseil-cpp/absl/random/internal/entropy_pool.h
index fad6269cb93..970ef8744cc 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/cord_buffer.cc
+++ b/contrib/restricted/abseil-cpp/absl/random/internal/entropy_pool.h
@@ -1,10 +1,10 @@
-// Copyright 2022 The Abseil Authors
+// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// https://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -12,7 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#include "absl/strings/cord_buffer.h"
+#ifndef ABSL_RANDOM_INTERNAL_ENTROPY_POOL_H_
+#define ABSL_RANDOM_INTERNAL_ENTROPY_POOL_H_
#include <cstddef>
@@ -20,11 +21,15 @@
namespace absl {
ABSL_NAMESPACE_BEGIN
+namespace random_internal {
-#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
-constexpr size_t CordBuffer::kDefaultLimit;
-constexpr size_t CordBuffer::kCustomLimit;
-#endif
+// GetEntropyFromRandenPool() is a helper function that fills a memory region
+// with random bytes from the RandenPool. This is used by the absl::BitGen
+// implementation to fill the internal buffer.
+void GetEntropyFromRandenPool(void* dest, size_t bytes);
+} // namespace random_internal
ABSL_NAMESPACE_END
} // namespace absl
+
+#endif // ABSL_RANDOM_INTERNAL_ENTROPY_POOL_H_
diff --git a/contrib/restricted/abseil-cpp/absl/random/internal/mock_helpers.h b/contrib/restricted/abseil-cpp/absl/random/internal/mock_helpers.h
index 19d05612ee8..85f7387f4a7 100644
--- a/contrib/restricted/abseil-cpp/absl/random/internal/mock_helpers.h
+++ b/contrib/restricted/abseil-cpp/absl/random/internal/mock_helpers.h
@@ -19,7 +19,7 @@
#include <utility>
#include "absl/base/config.h"
-#include "absl/base/internal/fast_type_id.h"
+#include "absl/base/fast_type_id.h"
#include "absl/types/optional.h"
namespace absl {
@@ -48,7 +48,7 @@ struct NoOpValidator {
// result_type(args...)
//
class MockHelpers {
- using IdType = ::absl::base_internal::FastTypeIdType;
+ using IdType = ::absl::FastTypeIdType;
// Given a key signature type used to index the mock, extract the components.
// KeyT is expected to have the form:
@@ -82,8 +82,7 @@ class MockHelpers {
Args&&... args) {
ArgTupleT arg_tuple(std::forward<Args>(args)...);
ReturnT result;
- if (urbg->InvokeMock(::absl::base_internal::FastTypeId<KeyT>(), &arg_tuple,
- &result)) {
+ if (urbg->InvokeMock(FastTypeId<KeyT>(), &arg_tuple, &result)) {
return result;
}
return absl::nullopt;
@@ -92,9 +91,9 @@ class MockHelpers {
public:
// InvokeMock is private; this provides access for some specialized use cases.
template <typename URBG>
- static inline bool PrivateInvokeMock(URBG* urbg, IdType type,
+ static inline bool PrivateInvokeMock(URBG* urbg, IdType key_id,
void* args_tuple, void* result) {
- return urbg->InvokeMock(type, args_tuple, result);
+ return urbg->InvokeMock(key_id, args_tuple, result);
}
// Invoke a mock for the KeyT (may or may not be a signature).
@@ -138,7 +137,7 @@ class MockHelpers {
m, std::declval<IdType>(), ValidatorT())) {
return m.template RegisterMock<typename KeySignature<KeyT>::result_type,
typename KeySignature<KeyT>::arg_tuple_type>(
- m, ::absl::base_internal::FastTypeId<KeyT>(), ValidatorT());
+ m, ::absl::FastTypeId<KeyT>(), ValidatorT());
}
// Acquire a mock for the KeyT (may or may not be a signature).
diff --git a/contrib/restricted/abseil-cpp/absl/random/internal/nonsecure_base.h b/contrib/restricted/abseil-cpp/absl/random/internal/nonsecure_base.h
index c3b80335aed..e8c2bb9fe68 100644
--- a/contrib/restricted/abseil-cpp/absl/random/internal/nonsecure_base.h
+++ b/contrib/restricted/abseil-cpp/absl/random/internal/nonsecure_base.h
@@ -16,19 +16,19 @@
#define ABSL_RANDOM_INTERNAL_NONSECURE_BASE_H_
#include <algorithm>
+#include <cstddef>
#include <cstdint>
#include <iterator>
#include <type_traits>
#include <utility>
#include <vector>
-#include "absl/base/macros.h"
+#include "absl/base/config.h"
#include "absl/container/inlined_vector.h"
#include "absl/meta/type_traits.h"
-#include "absl/random/internal/pool_urbg.h"
+#include "absl/random/internal/entropy_pool.h"
#include "absl/random/internal/salted_seed_seq.h"
#include "absl/random/internal/seed_material.h"
-#include "absl/types/span.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
@@ -46,8 +46,7 @@ class RandenPoolSeedSeq {
void generate_impl(ContiguousTag, Contiguous begin, Contiguous end) {
const size_t n = static_cast<size_t>(std::distance(begin, end));
auto* a = &(*begin);
- RandenPool<uint8_t>::Fill(
- absl::MakeSpan(reinterpret_cast<uint8_t*>(a), sizeof(*a) * n));
+ GetEntropyFromRandenPool(a, sizeof(*a) * n);
}
// Construct a buffer of size n and fill it with values, then copy
@@ -57,7 +56,7 @@ class RandenPoolSeedSeq {
RandomAccessIterator end) {
const size_t n = std::distance(begin, end);
absl::InlinedVector<uint32_t, 8> data(n, 0);
- RandenPool<uint32_t>::Fill(absl::MakeSpan(data.begin(), data.end()));
+ GetEntropyFromRandenPool(data.begin(), sizeof(data[0]) * n);
std::copy(std::begin(data), std::end(data), begin);
}
diff --git a/contrib/restricted/abseil-cpp/absl/random/internal/pool_urbg.h b/contrib/restricted/abseil-cpp/absl/random/internal/pool_urbg.h
deleted file mode 100644
index 05721929f54..00000000000
--- a/contrib/restricted/abseil-cpp/absl/random/internal/pool_urbg.h
+++ /dev/null
@@ -1,131 +0,0 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef ABSL_RANDOM_INTERNAL_POOL_URBG_H_
-#define ABSL_RANDOM_INTERNAL_POOL_URBG_H_
-
-#include <cinttypes>
-#include <limits>
-
-#include "absl/random/internal/traits.h"
-#include "absl/types/span.h"
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-namespace random_internal {
-
-// RandenPool is a thread-safe random number generator [random.req.urbg] that
-// uses an underlying pool of Randen generators to generate values. Each thread
-// has affinity to one instance of the underlying pool generators. Concurrent
-// access is guarded by a spin-lock.
-template <typename T>
-class RandenPool {
- public:
- using result_type = T;
- static_assert(std::is_unsigned<result_type>::value,
- "RandenPool template argument must be a built-in unsigned "
- "integer type");
-
- static constexpr result_type(min)() {
- return (std::numeric_limits<result_type>::min)();
- }
-
- static constexpr result_type(max)() {
- return (std::numeric_limits<result_type>::max)();
- }
-
- RandenPool() {}
-
- // Returns a single value.
- inline result_type operator()() { return Generate(); }
-
- // Fill data with random values.
- static void Fill(absl::Span<result_type> data);
-
- protected:
- // Generate returns a single value.
- static result_type Generate();
-};
-
-extern template class RandenPool<uint8_t>;
-extern template class RandenPool<uint16_t>;
-extern template class RandenPool<uint32_t>;
-extern template class RandenPool<uint64_t>;
-
-// PoolURBG uses an underlying pool of random generators to implement a
-// thread-compatible [random.req.urbg] interface with an internal cache of
-// values.
-template <typename T, size_t kBufferSize>
-class PoolURBG {
- // Inheritance to access the protected static members of RandenPool.
- using unsigned_type = typename make_unsigned_bits<T>::type;
- using PoolType = RandenPool<unsigned_type>;
- using SpanType = absl::Span<unsigned_type>;
-
- static constexpr size_t kInitialBuffer = kBufferSize + 1;
- static constexpr size_t kHalfBuffer = kBufferSize / 2;
-
- public:
- using result_type = T;
-
- static_assert(std::is_unsigned<result_type>::value,
- "PoolURBG must be parameterized by an unsigned integer type");
-
- static_assert(kBufferSize > 1,
- "PoolURBG must be parameterized by a buffer-size > 1");
-
- static_assert(kBufferSize <= 256,
- "PoolURBG must be parameterized by a buffer-size <= 256");
-
- static constexpr result_type(min)() {
- return (std::numeric_limits<result_type>::min)();
- }
-
- static constexpr result_type(max)() {
- return (std::numeric_limits<result_type>::max)();
- }
-
- PoolURBG() : next_(kInitialBuffer) {}
-
- // copy-constructor does not copy cache.
- PoolURBG(const PoolURBG&) : next_(kInitialBuffer) {}
- const PoolURBG& operator=(const PoolURBG&) {
- next_ = kInitialBuffer;
- return *this;
- }
-
- // move-constructor does move cache.
- PoolURBG(PoolURBG&&) = default;
- PoolURBG& operator=(PoolURBG&&) = default;
-
- inline result_type operator()() {
- if (next_ >= kBufferSize) {
- next_ = (kBufferSize > 2 && next_ > kBufferSize) ? kHalfBuffer : 0;
- PoolType::Fill(SpanType(reinterpret_cast<unsigned_type*>(state_ + next_),
- kBufferSize - next_));
- }
- return state_[next_++];
- }
-
- private:
- // Buffer size.
- size_t next_; // index within state_
- result_type state_[kBufferSize];
-};
-
-} // namespace random_internal
-ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_RANDOM_INTERNAL_POOL_URBG_H_
diff --git a/contrib/restricted/abseil-cpp/absl/random/internal/randen_detect.cc b/contrib/restricted/abseil-cpp/absl/random/internal/randen_detect.cc
index 58948d59067..63e8379056e 100644
--- a/contrib/restricted/abseil-cpp/absl/random/internal/randen_detect.cc
+++ b/contrib/restricted/abseil-cpp/absl/random/internal/randen_detect.cc
@@ -74,7 +74,7 @@ static void __cpuid(int cpu_info[4], int info_type) {
// On linux, just use the c-library getauxval call.
#if defined(ABSL_INTERNAL_USE_LINUX_GETAUXVAL)
-extern "C" unsigned long getauxval(unsigned long type); // NOLINT(runtime/int)
+#include <sys/auxv.h>
static uint32_t GetAuxval(uint32_t hwcap_type) {
return static_cast<uint32_t>(getauxval(hwcap_type));
diff --git a/contrib/restricted/abseil-cpp/absl/random/internal/seed_material.cc b/contrib/restricted/abseil-cpp/absl/random/internal/seed_material.cc
index 1041302b587..8099ec73602 100644
--- a/contrib/restricted/abseil-cpp/absl/random/internal/seed_material.cc
+++ b/contrib/restricted/abseil-cpp/absl/random/internal/seed_material.cc
@@ -23,17 +23,23 @@
#endif
#include <algorithm>
+#include <cassert>
#include <cerrno>
#include <cstdint>
#include <cstdlib>
#include <cstring>
+#include <string>
+#include <vector>
+#include "absl/base/config.h"
#include "absl/base/dynamic_annotations.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/strings/ascii.h"
#include "absl/strings/escaping.h"
#include "absl/strings/string_view.h"
#include "absl/strings/strip.h"
+#include "absl/types/optional.h"
+#include "absl/types/span.h"
#if defined(__native_client__)
@@ -167,24 +173,27 @@ bool ReadSeedMaterialFromDevURandom(absl::Span<uint32_t> values) {
size_t buffer_size = sizeof(uint32_t) * values.size();
int dev_urandom = open(kEntropyFile, O_RDONLY);
- bool success = (-1 != dev_urandom);
- if (!success) {
+ if (dev_urandom < 0) {
+ ABSL_RAW_LOG(ERROR, "Failed to open /dev/urandom.");
return false;
}
- while (success && buffer_size > 0) {
+ while (buffer_size > 0) {
ssize_t bytes_read = read(dev_urandom, buffer, buffer_size);
int read_error = errno;
- success = (bytes_read > 0);
- if (success) {
- buffer += bytes_read;
- buffer_size -= static_cast<size_t>(bytes_read);
- } else if (bytes_read == -1 && read_error == EINTR) {
- success = true; // Need to try again.
+ if (bytes_read == -1 && read_error == EINTR) {
+ // Interrupted, try again.
+ continue;
+ } else if (bytes_read <= 0) {
+ // EOF, or error.
+ break;
}
+ buffer += bytes_read;
+ buffer_size -= static_cast<size_t>(bytes_read);
}
+
close(dev_urandom);
- return success;
+ return buffer_size == 0;
}
bool ReadSeedMaterialFromOSEntropyImpl(absl::Span<uint32_t> values) {
@@ -251,8 +260,7 @@ absl::optional<uint32_t> GetSaltMaterial() {
static const auto salt_material = []() -> absl::optional<uint32_t> {
uint32_t salt_value = 0;
- if (random_internal::ReadSeedMaterialFromOSEntropy(
- MakeSpan(&salt_value, 1))) {
+ if (ReadSeedMaterialFromOSEntropy(absl::MakeSpan(&salt_value, 1))) {
return salt_value;
}
diff --git a/contrib/restricted/abseil-cpp/absl/random/internal/seed_material.h b/contrib/restricted/abseil-cpp/absl/random/internal/seed_material.h
index 4be10e92568..b671a8c3a8f 100644
--- a/contrib/restricted/abseil-cpp/absl/random/internal/seed_material.h
+++ b/contrib/restricted/abseil-cpp/absl/random/internal/seed_material.h
@@ -21,7 +21,7 @@
#include <string>
#include <vector>
-#include "absl/base/attributes.h"
+#include "absl/base/config.h"
#include "absl/random/internal/fast_uniform_bits.h"
#include "absl/types/optional.h"
#include "absl/types/span.h"
@@ -54,7 +54,7 @@ static_assert(kEntropyBlocksNeeded > 0,
// to the C++ Standard "Seed Sequence" concept [rand.req.seedseq].
//
// If values.data() == nullptr, the behavior is undefined.
-ABSL_MUST_USE_RESULT
+[[nodiscard]]
bool ReadSeedMaterialFromOSEntropy(absl::Span<uint32_t> values);
// Attempts to fill a span of uint32_t-values using variates generated by an
@@ -65,8 +65,8 @@ bool ReadSeedMaterialFromOSEntropy(absl::Span<uint32_t> values);
//
// If urbg == nullptr or values.data() == nullptr, the behavior is undefined.
template <typename URBG>
-ABSL_MUST_USE_RESULT bool ReadSeedMaterialFromURBG(
- URBG* urbg, absl::Span<uint32_t> values) {
+[[nodiscard]] bool ReadSeedMaterialFromURBG(URBG* urbg,
+ absl::Span<uint32_t> values) {
random_internal::FastUniformBits<uint32_t> distr;
assert(urbg != nullptr && values.data() != nullptr);
@@ -94,7 +94,7 @@ void MixIntoSeedMaterial(absl::Span<const uint32_t> sequence,
//
// Salt is obtained only once and stored in static variable.
//
-// May return empty value if optaining the salt was not possible.
+// May return empty value if obtaining the salt was not possible.
absl::optional<uint32_t> GetSaltMaterial();
} // namespace random_internal
diff --git a/contrib/restricted/abseil-cpp/absl/random/mocking_bit_gen.h b/contrib/restricted/abseil-cpp/absl/random/mocking_bit_gen.h
index ba7ceae04aa..1680ff4dacf 100644
--- a/contrib/restricted/abseil-cpp/absl/random/mocking_bit_gen.h
+++ b/contrib/restricted/abseil-cpp/absl/random/mocking_bit_gen.h
@@ -35,7 +35,7 @@
#include "gmock/gmock.h"
#include "absl/base/config.h"
-#include "absl/base/internal/fast_type_id.h"
+#include "absl/base/fast_type_id.h"
#include "absl/container/flat_hash_map.h"
#include "absl/meta/type_traits.h"
#include "absl/random/internal/mock_helpers.h"
@@ -175,9 +175,9 @@ class MockingBitGen {
// distribution parameters of the expectation.
template <typename ResultT, typename ArgTupleT, typename SelfT,
typename ValidatorT>
- auto RegisterMock(SelfT&, base_internal::FastTypeIdType type, ValidatorT)
+ auto RegisterMock(SelfT&, FastTypeIdType type, ValidatorT)
-> decltype(GetMockFnType(std::declval<ResultT>(),
- std::declval<ArgTupleT>())) & {
+ std::declval<ArgTupleT>()))& {
using MockFnType = decltype(GetMockFnType(std::declval<ResultT>(),
std::declval<ArgTupleT>()));
@@ -203,8 +203,8 @@ class MockingBitGen {
// MockingBitGen::InvokeMock
//
- // InvokeMock(FastTypeIdType, args, result) is the entrypoint for invoking
- // mocks registered on MockingBitGen.
+ // bool InvokeMock(key_id, args_tuple*, result*) is the entrypoint
+ // for invoking mocks registered on MockingBitGen.
//
// When no mocks are registered on the provided FastTypeIdType, returns false.
// Otherwise attempts to invoke the mock function ResultT(Args...) that
@@ -212,18 +212,16 @@ class MockingBitGen {
// Requires tuple_args to point to a ArgTupleT, which is a std::tuple<Args...>
// used to invoke the mock function.
// Requires result to point to a ResultT, which is the result of the call.
- inline bool InvokeMock(base_internal::FastTypeIdType type, void* args_tuple,
+ inline bool InvokeMock(FastTypeIdType key_id, void* args_tuple,
void* result) {
// Trigger a mock, if there exists one that matches `param`.
- auto it = mocks_.find(type);
+ auto it = mocks_.find(key_id);
if (it == mocks_.end()) return false;
it->second->Apply(args_tuple, result);
return true;
}
- absl::flat_hash_map<base_internal::FastTypeIdType,
- std::unique_ptr<FunctionHolder>>
- mocks_;
+ absl::flat_hash_map<FastTypeIdType, std::unique_ptr<FunctionHolder>> mocks_;
absl::BitGen gen_;
template <typename>
diff --git a/contrib/restricted/abseil-cpp/absl/random/random.h b/contrib/restricted/abseil-cpp/absl/random/random.h
index 767208671c1..b55b3612876 100644
--- a/contrib/restricted/abseil-cpp/absl/random/random.h
+++ b/contrib/restricted/abseil-cpp/absl/random/random.h
@@ -31,12 +31,13 @@
#ifndef ABSL_RANDOM_RANDOM_H_
#define ABSL_RANDOM_RANDOM_H_
+#include <cstdint>
#include <random>
+#include "absl/base/config.h"
#include "absl/random/distributions.h" // IWYU pragma: export
-#include "absl/random/internal/nonsecure_base.h" // IWYU pragma: export
-#include "absl/random/internal/pcg_engine.h" // IWYU pragma: export
-#include "absl/random/internal/pool_urbg.h"
+#include "absl/random/internal/nonsecure_base.h"
+#include "absl/random/internal/pcg_engine.h"
#include "absl/random/internal/randen_engine.h"
#include "absl/random/seed_sequences.h" // IWYU pragma: export
@@ -94,31 +95,46 @@ ABSL_NAMESPACE_BEGIN
// types on modern x86, ARM, and PPC architectures.
//
// This type is thread-compatible, but not thread-safe.
-
-// ---------------------------------------------------------------------------
-// absl::BitGen member functions
-// ---------------------------------------------------------------------------
-
-// absl::BitGen::operator()()
-//
-// Calls the BitGen, returning a generated value.
-
-// absl::BitGen::min()
-//
-// Returns the smallest possible value from this bit generator.
-
-// absl::BitGen::max()
-//
-// Returns the largest possible value from this bit generator.
-
-// absl::BitGen::discard(num)
-//
-// Advances the internal state of this bit generator by `num` times, and
-// discards the intermediate results.
-// ---------------------------------------------------------------------------
-
-using BitGen = random_internal::NonsecureURBGBase<
- random_internal::randen_engine<uint64_t>>;
+class BitGen : private random_internal::NonsecureURBGBase<
+ random_internal::randen_engine<uint64_t>> {
+ using Base = random_internal::NonsecureURBGBase<
+ random_internal::randen_engine<uint64_t>>;
+
+ public:
+ using result_type = typename Base::result_type;
+
+ // BitGen()
+ // BitGen(SeedSequence seed_seq)
+ //
+ // Copy disallowed.
+ // Move allowed.
+ using Base::Base;
+ using Base::operator=;
+
+ // BitGen::min()
+ //
+ // Returns the smallest possible value from this bit generator.
+ using Base::min;
+
+ // BitGen::max()
+ //
+ // Returns the largest possible value from this bit generator.
+ using Base::max;
+
+ // BitGen::discard(num)
+ //
+ // Advances the internal state of this bit generator by `num` times, and
+ // discards the intermediate results.
+ using Base::discard;
+
+ // BitGen::operator()()
+ //
+ // Invoke the URBG, returning a generated value.
+ using Base::operator();
+
+ using Base::operator==;
+ using Base::operator!=;
+};
// -----------------------------------------------------------------------------
// absl::InsecureBitGen
@@ -156,32 +172,51 @@ using BitGen = random_internal::NonsecureURBGBase<
// `absl::InsecureBitGen` is not cryptographically secure.
//
// Prefer `absl::BitGen` over `absl::InsecureBitGen` as the general type is
-// often fast enough for the vast majority of applications.
-
-using InsecureBitGen =
- random_internal::NonsecureURBGBase<random_internal::pcg64_2018_engine>;
-
-// ---------------------------------------------------------------------------
-// absl::InsecureBitGen member functions
-// ---------------------------------------------------------------------------
-
-// absl::InsecureBitGen::operator()()
-//
-// Calls the InsecureBitGen, returning a generated value.
-
-// absl::InsecureBitGen::min()
+// often fast enough for the vast majority of applications. However, it is
+// reasonable to use `absl::InsecureBitGen` in tests or when using a URBG
+// in small isolated tasks such as in `std::shuffle`.
//
-// Returns the smallest possible value from this bit generator.
-
-// absl::InsecureBitGen::max()
-//
-// Returns the largest possible value from this bit generator.
-
-// absl::InsecureBitGen::discard(num)
-//
-// Advances the internal state of this bit generator by `num` times, and
-// discards the intermediate results.
-// ---------------------------------------------------------------------------
+// This type is thread-compatible, but not thread-safe.
+class InsecureBitGen : private random_internal::NonsecureURBGBase<
+ random_internal::pcg64_2018_engine> {
+ using Base =
+ random_internal::NonsecureURBGBase<random_internal::pcg64_2018_engine>;
+
+ public:
+ using result_type = typename Base::result_type;
+
+ // InsecureBitGen()
+ // InsecureBitGen(SeedSequence seed_seq)
+ //
+ // Copy disallowed.
+ // Move allowed.
+ using Base::Base;
+ using Base::operator=;
+
+ // InsecureBitGen::min()
+ //
+ // Returns the smallest possible value from this bit generator.
+ using Base::min;
+
+ // InsecureBitGen::max()
+ //
+ // Returns the largest possible value from this bit generator.
+ using Base::max;
+
+ // InsecureBitGen::discard(num)
+ //
+ // Advances the internal state of this bit generator by `num` times, and
+ // discards the intermediate results.
+ using Base::discard;
+
+ // InsecureBitGen::operator()()
+ //
+ // Invoke the URBG, returning a generated value.
+ using Base::operator();
+
+ using Base::operator==;
+ using Base::operator!=;
+};
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/contrib/restricted/abseil-cpp/absl/random/seed_sequences.cc b/contrib/restricted/abseil-cpp/absl/random/seed_sequences.cc
index 426eafd3c8d..316381ba971 100644
--- a/contrib/restricted/abseil-cpp/absl/random/seed_sequences.cc
+++ b/contrib/restricted/abseil-cpp/absl/random/seed_sequences.cc
@@ -14,14 +14,18 @@
#include "absl/random/seed_sequences.h"
-#include "absl/random/internal/pool_urbg.h"
+#include <iterator>
+
+#include "absl/base/config.h"
+#include "absl/random/internal/entropy_pool.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
SeedSeq MakeSeedSeq() {
SeedSeq::result_type seed_material[8];
- random_internal::RandenPool<uint32_t>::Fill(absl::MakeSpan(seed_material));
+ random_internal::GetEntropyFromRandenPool(&seed_material[0],
+ sizeof(seed_material[0]) * 8);
return SeedSeq(std::begin(seed_material), std::end(seed_material));
}
diff --git a/contrib/restricted/abseil-cpp/absl/status/internal/status_internal.cc b/contrib/restricted/abseil-cpp/absl/status/internal/status_internal.cc
index 99bf8faca30..988418904a8 100644
--- a/contrib/restricted/abseil-cpp/absl/status/internal/status_internal.cc
+++ b/contrib/restricted/abseil-cpp/absl/status/internal/status_internal.cc
@@ -189,7 +189,7 @@ bool StatusRep::operator==(const StatusRep& other) const {
return true;
}
-absl::Nonnull<StatusRep*> StatusRep::CloneAndUnref() const {
+StatusRep* absl_nonnull StatusRep::CloneAndUnref() const {
// Optimization: no need to create a clone if we already have a refcount of 1.
if (ref_.load(std::memory_order_acquire) == 1) {
// All StatusRep instances are heap allocated and mutable, therefore this
@@ -235,9 +235,8 @@ absl::StatusCode MapToLocalCode(int value) {
}
}
-absl::Nonnull<const char*> MakeCheckFailString(
- absl::Nonnull<const absl::Status*> status,
- absl::Nonnull<const char*> prefix) {
+const char* absl_nonnull MakeCheckFailString(
+ const absl::Status* absl_nonnull status, const char* absl_nonnull prefix) {
// There's no need to free this string since the process is crashing.
return absl::IgnoreLeak(
new std::string(absl::StrCat(
diff --git a/contrib/restricted/abseil-cpp/absl/status/internal/status_internal.h b/contrib/restricted/abseil-cpp/absl/status/internal/status_internal.h
index fe335b0b7ca..45b90f3648f 100644
--- a/contrib/restricted/abseil-cpp/absl/status/internal/status_internal.h
+++ b/contrib/restricted/abseil-cpp/absl/status/internal/status_internal.h
@@ -100,7 +100,7 @@ class StatusRep {
// Returns an equivalent heap allocated StatusRep with refcount 1.
//
// `this` is not safe to be used after calling as it may have been deleted.
- absl::Nonnull<StatusRep*> CloneAndUnref() const;
+ StatusRep* absl_nonnull CloneAndUnref() const;
private:
mutable std::atomic<int32_t> ref_;
@@ -120,9 +120,8 @@ absl::StatusCode MapToLocalCode(int value);
//
// This is an internal implementation detail for Abseil logging.
ABSL_ATTRIBUTE_PURE_FUNCTION
-absl::Nonnull<const char*> MakeCheckFailString(
- absl::Nonnull<const absl::Status*> status,
- absl::Nonnull<const char*> prefix);
+const char* absl_nonnull MakeCheckFailString(
+ const absl::Status* absl_nonnull status, const char* absl_nonnull prefix);
} // namespace status_internal
diff --git a/contrib/restricted/abseil-cpp/absl/status/internal/statusor_internal.h b/contrib/restricted/abseil-cpp/absl/status/internal/statusor_internal.h
index 67603156a49..e986611396e 100644
--- a/contrib/restricted/abseil-cpp/absl/status/internal/statusor_internal.h
+++ b/contrib/restricted/abseil-cpp/absl/status/internal/statusor_internal.h
@@ -39,7 +39,8 @@ template <typename T, typename U, typename = void>
struct HasConversionOperatorToStatusOr : std::false_type {};
template <typename T, typename U>
-void test(char (*)[sizeof(std::declval<U>().operator absl::StatusOr<T>())]);
+void test(char (*absl_nullable)[sizeof(
+ std::declval<U>().operator absl::StatusOr<T>())]);
template <typename T, typename U>
struct HasConversionOperatorToStatusOr<T, U, decltype(test<T, U>(0))>
@@ -185,7 +186,7 @@ using IsStatusOrAssignmentValid = absl::conjunction<
class Helper {
public:
// Move type-agnostic error handling to the .cc.
- static void HandleInvalidStatusCtorArg(absl::Nonnull<Status*>);
+ static void HandleInvalidStatusCtorArg(Status* absl_nonnull);
[[noreturn]] static void Crash(const absl::Status& status);
};
@@ -194,7 +195,7 @@ class Helper {
// This abstraction is here mostly for the gcc performance fix.
template <typename T, typename... Args>
ABSL_ATTRIBUTE_NONNULL(1)
-void PlacementNew(absl::Nonnull<void*> p, Args&&... args) {
+void PlacementNew(void* absl_nonnull p, Args&&... args) {
new (p) T(std::forward<Args>(args)...);
}
diff --git a/contrib/restricted/abseil-cpp/absl/status/status.cc b/contrib/restricted/abseil-cpp/absl/status/status.cc
index 745ab889227..963dab6723c 100644
--- a/contrib/restricted/abseil-cpp/absl/status/status.cc
+++ b/contrib/restricted/abseil-cpp/absl/status/status.cc
@@ -91,16 +91,12 @@ std::ostream& operator<<(std::ostream& os, StatusCode code) {
return os << StatusCodeToString(code);
}
-absl::Nonnull<const std::string*> Status::EmptyString() {
+const std::string* absl_nonnull Status::EmptyString() {
static const absl::NoDestructor<std::string> kEmpty;
return kEmpty.get();
}
-#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
-constexpr const char Status::kMovedFromString[];
-#endif
-
-absl::Nonnull<const std::string*> Status::MovedFromString() {
+const std::string* absl_nonnull Status::MovedFromString() {
static const absl::NoDestructor<std::string> kMovedFrom(kMovedFromString);
return kMovedFrom.get();
}
@@ -112,7 +108,7 @@ Status::Status(absl::StatusCode code, absl::string_view msg)
}
}
-absl::Nonnull<status_internal::StatusRep*> Status::PrepareToModify(
+status_internal::StatusRep* absl_nonnull Status::PrepareToModify(
uintptr_t rep) {
if (IsInlined(rep)) {
return new status_internal::StatusRep(InlinedRepToCode(rep),
@@ -410,7 +406,7 @@ Status ErrnoToStatus(int error_number, absl::string_view message) {
MessageForErrnoToStatus(error_number, message));
}
-absl::Nonnull<const char*> StatusMessageAsCStr(const Status& status) {
+const char* absl_nonnull StatusMessageAsCStr(const Status& status) {
// As an internal implementation detail, we guarantee that if status.message()
// is non-empty, then the resulting string_view is null terminated.
auto sv_message = status.message();
diff --git a/contrib/restricted/abseil-cpp/absl/status/status.h b/contrib/restricted/abseil-cpp/absl/status/status.h
index 02fd2964854..45168225842 100644
--- a/contrib/restricted/abseil-cpp/absl/status/status.h
+++ b/contrib/restricted/abseil-cpp/absl/status/status.h
@@ -623,15 +623,15 @@ class ABSL_ATTRIBUTE_TRIVIAL_ABI Status final {
// REQUIRES: !ok()
// Ensures rep is not inlined or shared with any other Status.
- static absl::Nonnull<status_internal::StatusRep*> PrepareToModify(
+ static status_internal::StatusRep* absl_nonnull PrepareToModify(
uintptr_t rep);
// MSVC 14.0 limitation requires the const.
static constexpr const char kMovedFromString[] =
"Status accessed after move.";
- static absl::Nonnull<const std::string*> EmptyString();
- static absl::Nonnull<const std::string*> MovedFromString();
+ static const std::string* absl_nonnull EmptyString();
+ static const std::string* absl_nonnull MovedFromString();
// Returns whether rep contains an inlined representation.
// See rep_ for details.
@@ -649,8 +649,8 @@ class ABSL_ATTRIBUTE_TRIVIAL_ABI Status final {
// Converts between StatusRep* and the external uintptr_t representation used
// by rep_. See rep_ for details.
- static uintptr_t PointerToRep(absl::Nonnull<status_internal::StatusRep*> r);
- static absl::Nonnull<const status_internal::StatusRep*> RepToPointer(
+ static uintptr_t PointerToRep(status_internal::StatusRep* absl_nonnull r);
+ static const status_internal::StatusRep* absl_nonnull RepToPointer(
uintptr_t r);
static std::string ToStringSlow(uintptr_t rep, StatusToStringMode mode);
@@ -902,14 +902,14 @@ constexpr uintptr_t Status::MovedFromRep() {
return CodeToInlinedRep(absl::StatusCode::kInternal) | 2;
}
-inline absl::Nonnull<const status_internal::StatusRep*> Status::RepToPointer(
+inline const status_internal::StatusRep* absl_nonnull Status::RepToPointer(
uintptr_t rep) {
assert(!IsInlined(rep));
return reinterpret_cast<const status_internal::StatusRep*>(rep);
}
inline uintptr_t Status::PointerToRep(
- absl::Nonnull<status_internal::StatusRep*> rep) {
+ status_internal::StatusRep* absl_nonnull rep) {
return reinterpret_cast<uintptr_t>(rep);
}
@@ -934,7 +934,7 @@ inline Status CancelledError() { return Status(absl::StatusCode::kCancelled); }
// If the status's message is empty, the empty string is returned.
//
// StatusMessageAsCStr exists for C support. Use `status.message()` in C++.
-absl::Nonnull<const char*> StatusMessageAsCStr(
+const char* absl_nonnull StatusMessageAsCStr(
const Status& status ABSL_ATTRIBUTE_LIFETIME_BOUND);
ABSL_NAMESPACE_END
diff --git a/contrib/restricted/abseil-cpp/absl/status/status_payload_printer.h b/contrib/restricted/abseil-cpp/absl/status/status_payload_printer.h
index f22255e1c6c..fc555152e9e 100644
--- a/contrib/restricted/abseil-cpp/absl/status/status_payload_printer.h
+++ b/contrib/restricted/abseil-cpp/absl/status/status_payload_printer.h
@@ -35,8 +35,8 @@ namespace status_internal {
// NOTE: This is an internal API and the design is subject to change in the
// future in a non-backward-compatible way. Since it's only meant for debugging
// purpose, you should not rely on it in any critical logic.
-using StatusPayloadPrinter = absl::Nullable<absl::optional<std::string> (*)(
- absl::string_view, const absl::Cord&)>;
+using StatusPayloadPrinter = absl::optional<std::string> (*absl_nullable)(
+ absl::string_view, const absl::Cord&);
// Sets the global payload printer. Only one printer should be set per process.
// If multiple printers are set, it's undefined which one will be used.
diff --git a/contrib/restricted/abseil-cpp/absl/status/statusor.cc b/contrib/restricted/abseil-cpp/absl/status/statusor.cc
index 7e6b334c74b..d8f66a64619 100644
--- a/contrib/restricted/abseil-cpp/absl/status/statusor.cc
+++ b/contrib/restricted/abseil-cpp/absl/status/statusor.cc
@@ -55,7 +55,7 @@ BadStatusOrAccess& BadStatusOrAccess::operator=(BadStatusOrAccess&& other) {
BadStatusOrAccess::BadStatusOrAccess(BadStatusOrAccess&& other)
: status_(std::move(other.status_)) {}
-absl::Nonnull<const char*> BadStatusOrAccess::what() const noexcept {
+const char* absl_nonnull BadStatusOrAccess::what() const noexcept {
InitWhat();
return what_.c_str();
}
@@ -70,7 +70,7 @@ void BadStatusOrAccess::InitWhat() const {
namespace internal_statusor {
-void Helper::HandleInvalidStatusCtorArg(absl::Nonnull<absl::Status*> status) {
+void Helper::HandleInvalidStatusCtorArg(absl::Status* absl_nonnull status) {
const char* kMessage =
"An OK status is not a valid constructor argument to StatusOr<T>";
#ifdef NDEBUG
diff --git a/contrib/restricted/abseil-cpp/absl/status/statusor.h b/contrib/restricted/abseil-cpp/absl/status/statusor.h
index b1da45e6f0e..6142a2f8dd7 100644
--- a/contrib/restricted/abseil-cpp/absl/status/statusor.h
+++ b/contrib/restricted/abseil-cpp/absl/status/statusor.h
@@ -93,7 +93,7 @@ class BadStatusOrAccess : public std::exception {
//
// The pointer of this string is guaranteed to be valid until any non-const
// function is invoked on the exception object.
- absl::Nonnull<const char*> what() const noexcept override;
+ const char* absl_nonnull what() const noexcept override;
// BadStatusOrAccess::status()
//
@@ -464,7 +464,7 @@ class StatusOr : private internal_statusor::StatusOrData<T>,
// Returns a reference to the current `absl::Status` contained within the
// `absl::StatusOr<T>`. If `absl::StatusOr<T>` contains a `T`, then this
// function returns `absl::OkStatus()`.
- const Status& status() const&;
+ ABSL_MUST_USE_RESULT const Status& status() const&;
Status status() &&;
// StatusOr<T>::value()
@@ -520,8 +520,8 @@ class StatusOr : private internal_statusor::StatusOrData<T>,
// REQUIRES: `this->ok() == true`, otherwise the behavior is undefined.
//
// Use `this->ok()` to verify that there is a current value.
- const T* operator->() const ABSL_ATTRIBUTE_LIFETIME_BOUND;
- T* operator->() ABSL_ATTRIBUTE_LIFETIME_BOUND;
+ const T* absl_nonnull operator->() const ABSL_ATTRIBUTE_LIFETIME_BOUND;
+ T* absl_nonnull operator->() ABSL_ATTRIBUTE_LIFETIME_BOUND;
// StatusOr<T>::value_or()
//
@@ -756,13 +756,13 @@ T&& StatusOr<T>::operator*() && {
}
template <typename T>
-absl::Nonnull<const T*> StatusOr<T>::operator->() const {
+const T* absl_nonnull StatusOr<T>::operator->() const {
this->EnsureOk();
return &this->data_;
}
template <typename T>
-absl::Nonnull<T*> StatusOr<T>::operator->() {
+T* absl_nonnull StatusOr<T>::operator->() {
this->EnsureOk();
return &this->data_;
}
diff --git a/contrib/restricted/abseil-cpp/absl/strings/ascii.cc b/contrib/restricted/abseil-cpp/absl/strings/ascii.cc
index d15e4249ec1..4cd9ff942bc 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/ascii.cc
+++ b/contrib/restricted/abseil-cpp/absl/strings/ascii.cc
@@ -183,8 +183,8 @@ constexpr bool AsciiInAZRangeNaive(unsigned char c) {
}
template <bool ToUpper, bool Naive>
-constexpr void AsciiStrCaseFoldImpl(absl::Nonnull<char*> dst,
- absl::Nullable<const char*> src,
+constexpr void AsciiStrCaseFoldImpl(char* absl_nonnull dst,
+ const char* absl_nullable src,
size_t size) {
// The upper- and lowercase versions of ASCII characters differ by only 1 bit.
// When we need to flip the case, we can xor with this bit to achieve the
@@ -211,18 +211,18 @@ constexpr void AsciiStrCaseFoldImpl(absl::Nonnull<char*> dst,
// strings it's not important).
// `src` may be null iff `size` is zero.
template <bool ToUpper>
-constexpr void AsciiStrCaseFold(absl::Nonnull<char*> dst,
- absl::Nullable<const char*> src, size_t size) {
+constexpr void AsciiStrCaseFold(char* absl_nonnull dst,
+ const char* absl_nullable src, size_t size) {
size < 16 ? AsciiStrCaseFoldImpl<ToUpper, /*Naive=*/true>(dst, src, size)
: AsciiStrCaseFoldImpl<ToUpper, /*Naive=*/false>(dst, src, size);
}
-void AsciiStrToLower(absl::Nonnull<char*> dst, absl::Nullable<const char*> src,
+void AsciiStrToLower(char* absl_nonnull dst, const char* absl_nullable src,
size_t n) {
return AsciiStrCaseFold<false>(dst, src, n);
}
-void AsciiStrToUpper(absl::Nonnull<char*> dst, absl::Nullable<const char*> src,
+void AsciiStrToUpper(char* absl_nonnull dst, const char* absl_nullable src,
size_t n) {
return AsciiStrCaseFold<true>(dst, src, n);
}
@@ -253,17 +253,17 @@ static_assert(ValidateAsciiCasefold() == 0, "error in case conversion");
} // namespace ascii_internal
-void AsciiStrToLower(absl::Nonnull<std::string*> s) {
+void AsciiStrToLower(std::string* absl_nonnull s) {
char* p = &(*s)[0];
return ascii_internal::AsciiStrCaseFold<false>(p, p, s->size());
}
-void AsciiStrToUpper(absl::Nonnull<std::string*> s) {
+void AsciiStrToUpper(std::string* absl_nonnull s) {
char* p = &(*s)[0];
return ascii_internal::AsciiStrCaseFold<true>(p, p, s->size());
}
-void RemoveExtraAsciiWhitespace(absl::Nonnull<std::string*> str) {
+void RemoveExtraAsciiWhitespace(std::string* absl_nonnull str) {
auto stripped = StripAsciiWhitespace(*str);
if (stripped.empty()) {
diff --git a/contrib/restricted/abseil-cpp/absl/strings/ascii.h b/contrib/restricted/abseil-cpp/absl/strings/ascii.h
index d9317eb1133..ca0747e56b8 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/ascii.h
+++ b/contrib/restricted/abseil-cpp/absl/strings/ascii.h
@@ -76,10 +76,10 @@ ABSL_DLL extern const char kToUpper[256];
// Declaration for the array of characters to lower-case characters.
ABSL_DLL extern const char kToLower[256];
-void AsciiStrToLower(absl::Nonnull<char*> dst, absl::Nullable<const char*> src,
+void AsciiStrToLower(char* absl_nonnull dst, const char* absl_nullable src,
size_t n);
-void AsciiStrToUpper(absl::Nonnull<char*> dst, absl::Nullable<const char*> src,
+void AsciiStrToUpper(char* absl_nonnull dst, const char* absl_nullable src,
size_t n);
} // namespace ascii_internal
@@ -185,10 +185,10 @@ inline char ascii_tolower(unsigned char c) {
}
// Converts the characters in `s` to lowercase, changing the contents of `s`.
-void AsciiStrToLower(absl::Nonnull<std::string*> s);
+void AsciiStrToLower(std::string* absl_nonnull s);
// Creates a lowercase string from a given absl::string_view.
-ABSL_MUST_USE_RESULT inline std::string AsciiStrToLower(absl::string_view s) {
+[[nodiscard]] inline std::string AsciiStrToLower(absl::string_view s) {
std::string result;
strings_internal::STLStringResizeUninitialized(&result, s.size());
ascii_internal::AsciiStrToLower(&result[0], s.data(), s.size());
@@ -199,7 +199,7 @@ ABSL_MUST_USE_RESULT inline std::string AsciiStrToLower(absl::string_view s) {
//
// (Template is used to lower priority of this overload.)
template <int&... DoNotSpecify>
-ABSL_MUST_USE_RESULT inline std::string AsciiStrToLower(std::string&& s) {
+[[nodiscard]] inline std::string AsciiStrToLower(std::string&& s) {
std::string result = std::move(s);
absl::AsciiStrToLower(&result);
return result;
@@ -214,10 +214,10 @@ inline char ascii_toupper(unsigned char c) {
}
// Converts the characters in `s` to uppercase, changing the contents of `s`.
-void AsciiStrToUpper(absl::Nonnull<std::string*> s);
+void AsciiStrToUpper(std::string* absl_nonnull s);
// Creates an uppercase string from a given absl::string_view.
-ABSL_MUST_USE_RESULT inline std::string AsciiStrToUpper(absl::string_view s) {
+[[nodiscard]] inline std::string AsciiStrToUpper(absl::string_view s) {
std::string result;
strings_internal::STLStringResizeUninitialized(&result, s.size());
ascii_internal::AsciiStrToUpper(&result[0], s.data(), s.size());
@@ -228,7 +228,7 @@ ABSL_MUST_USE_RESULT inline std::string AsciiStrToUpper(absl::string_view s) {
//
// (Template is used to lower priority of this overload.)
template <int&... DoNotSpecify>
-ABSL_MUST_USE_RESULT inline std::string AsciiStrToUpper(std::string&& s) {
+[[nodiscard]] inline std::string AsciiStrToUpper(std::string&& s) {
std::string result = std::move(s);
absl::AsciiStrToUpper(&result);
return result;
@@ -236,47 +236,47 @@ ABSL_MUST_USE_RESULT inline std::string AsciiStrToUpper(std::string&& s) {
// Returns absl::string_view with whitespace stripped from the beginning of the
// given string_view.
-ABSL_MUST_USE_RESULT inline absl::string_view StripLeadingAsciiWhitespace(
- absl::string_view str) {
+[[nodiscard]] inline absl::string_view StripLeadingAsciiWhitespace(
+ absl::string_view str ABSL_ATTRIBUTE_LIFETIME_BOUND) {
auto it = std::find_if_not(str.begin(), str.end(), absl::ascii_isspace);
return str.substr(static_cast<size_t>(it - str.begin()));
}
// Strips in place whitespace from the beginning of the given string.
-inline void StripLeadingAsciiWhitespace(absl::Nonnull<std::string*> str) {
+inline void StripLeadingAsciiWhitespace(std::string* absl_nonnull str) {
auto it = std::find_if_not(str->begin(), str->end(), absl::ascii_isspace);
str->erase(str->begin(), it);
}
// Returns absl::string_view with whitespace stripped from the end of the given
// string_view.
-ABSL_MUST_USE_RESULT inline absl::string_view StripTrailingAsciiWhitespace(
- absl::string_view str) {
+[[nodiscard]] inline absl::string_view StripTrailingAsciiWhitespace(
+ absl::string_view str ABSL_ATTRIBUTE_LIFETIME_BOUND) {
auto it = std::find_if_not(str.rbegin(), str.rend(), absl::ascii_isspace);
return str.substr(0, static_cast<size_t>(str.rend() - it));
}
// Strips in place whitespace from the end of the given string
-inline void StripTrailingAsciiWhitespace(absl::Nonnull<std::string*> str) {
+inline void StripTrailingAsciiWhitespace(std::string* absl_nonnull str) {
auto it = std::find_if_not(str->rbegin(), str->rend(), absl::ascii_isspace);
str->erase(static_cast<size_t>(str->rend() - it));
}
// Returns absl::string_view with whitespace stripped from both ends of the
// given string_view.
-ABSL_MUST_USE_RESULT inline absl::string_view StripAsciiWhitespace(
- absl::string_view str) {
+[[nodiscard]] inline absl::string_view StripAsciiWhitespace(
+ absl::string_view str ABSL_ATTRIBUTE_LIFETIME_BOUND) {
return StripTrailingAsciiWhitespace(StripLeadingAsciiWhitespace(str));
}
// Strips in place whitespace from both ends of the given string
-inline void StripAsciiWhitespace(absl::Nonnull<std::string*> str) {
+inline void StripAsciiWhitespace(std::string* absl_nonnull str) {
StripTrailingAsciiWhitespace(str);
StripLeadingAsciiWhitespace(str);
}
// Removes leading, trailing, and consecutive internal whitespace.
-void RemoveExtraAsciiWhitespace(absl::Nonnull<std::string*> str);
+void RemoveExtraAsciiWhitespace(std::string* absl_nonnull str);
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/contrib/restricted/abseil-cpp/absl/strings/charconv.cc b/contrib/restricted/abseil-cpp/absl/strings/charconv.cc
index 66c12cc73d6..6f367739e9d 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/charconv.cc
+++ b/contrib/restricted/abseil-cpp/absl/strings/charconv.cc
@@ -120,7 +120,7 @@ struct FloatTraits<double> {
// Parsing a smaller N will produce something finite.
static constexpr int kEiselLemireMaxExclusiveExp10 = 309;
- static double MakeNan(absl::Nonnull<const char*> tagp) {
+ static double MakeNan(const char* absl_nonnull tagp) {
#if ABSL_HAVE_BUILTIN(__builtin_nan)
// Use __builtin_nan() if available since it has a fix for
// https://bugs.llvm.org/show_bug.cgi?id=37778
@@ -193,7 +193,7 @@ struct FloatTraits<float> {
static constexpr int kEiselLemireMinInclusiveExp10 = -46 - 18;
static constexpr int kEiselLemireMaxExclusiveExp10 = 39;
- static float MakeNan(absl::Nonnull<const char*> tagp) {
+ static float MakeNan(const char* absl_nonnull tagp) {
#if ABSL_HAVE_BUILTIN(__builtin_nanf)
// Use __builtin_nanf() if available since it has a fix for
// https://bugs.llvm.org/show_bug.cgi?id=37778
@@ -345,7 +345,7 @@ int NormalizedShiftSize(int mantissa_width, int binary_exponent) {
// `value` must be wider than the requested bit width.
//
// Returns the number of bits shifted.
-int TruncateToBitWidth(int bit_width, absl::Nonnull<uint128*> value) {
+int TruncateToBitWidth(int bit_width, uint128* absl_nonnull value) {
const int current_bit_width = BitWidth(*value);
const int shift = current_bit_width - bit_width;
*value >>= shift;
@@ -357,7 +357,7 @@ int TruncateToBitWidth(int bit_width, absl::Nonnull<uint128*> value) {
// the appropriate double, and returns true.
template <typename FloatType>
bool HandleEdgeCase(const strings_internal::ParsedFloat& input, bool negative,
- absl::Nonnull<FloatType*> value) {
+ FloatType* absl_nonnull value) {
if (input.type == strings_internal::FloatType::kNan) {
// A bug in gcc would cause the compiler to optimize away the buffer we are
// building below. Declaring the buffer volatile avoids the issue, and has
@@ -389,7 +389,7 @@ bool HandleEdgeCase(const strings_internal::ParsedFloat& input, bool negative,
return true;
}
if (input.mantissa == 0) {
- *value = negative ? -0.0 : 0.0;
+ *value = negative ? -0.0f : 0.0f;
return true;
}
return false;
@@ -403,8 +403,8 @@ bool HandleEdgeCase(const strings_internal::ParsedFloat& input, bool negative,
// number is stored in *value.
template <typename FloatType>
void EncodeResult(const CalculatedFloat& calculated, bool negative,
- absl::Nonnull<absl::from_chars_result*> result,
- absl::Nonnull<FloatType*> value) {
+ absl::from_chars_result* absl_nonnull result,
+ FloatType* absl_nonnull value) {
if (calculated.exponent == kOverflow) {
result->ec = std::errc::result_out_of_range;
*value = negative ? -std::numeric_limits<FloatType>::max()
@@ -412,7 +412,7 @@ void EncodeResult(const CalculatedFloat& calculated, bool negative,
return;
} else if (calculated.mantissa == 0 || calculated.exponent == kUnderflow) {
result->ec = std::errc::result_out_of_range;
- *value = negative ? -0.0 : 0.0;
+ *value = negative ? -0.0f : 0.0f;
return;
}
*value = FloatTraits<FloatType>::Make(
@@ -450,7 +450,7 @@ void EncodeResult(const CalculatedFloat& calculated, bool negative,
// Zero and negative values of `shift` are accepted, in which case the word is
// shifted left, as necessary.
uint64_t ShiftRightAndRound(uint128 value, int shift, bool input_exact,
- absl::Nonnull<bool*> output_exact) {
+ bool* absl_nonnull output_exact) {
if (shift <= 0) {
*output_exact = input_exact;
return static_cast<uint64_t>(value << -shift);
@@ -684,12 +684,11 @@ CalculatedFloat CalculateFromParsedDecimal(
// this function returns false) is both fast and correct.
template <typename FloatType>
bool EiselLemire(const strings_internal::ParsedFloat& input, bool negative,
- absl::Nonnull<FloatType*> value,
- absl::Nonnull<std::errc*> ec) {
+ FloatType* absl_nonnull value, std::errc* absl_nonnull ec) {
uint64_t man = input.mantissa;
int exp10 = input.exponent;
if (exp10 < FloatTraits<FloatType>::kEiselLemireMinInclusiveExp10) {
- *value = negative ? -0.0 : 0.0;
+ *value = negative ? -0.0f : 0.0f;
*ec = std::errc::result_out_of_range;
return true;
} else if (exp10 >= FloatTraits<FloatType>::kEiselLemireMaxExclusiveExp10) {
@@ -842,7 +841,7 @@ bool EiselLemire(const strings_internal::ParsedFloat& input, bool negative,
if (negative) {
ret_bits |= 0x8000000000000000u;
}
- *value = absl::bit_cast<double>(ret_bits);
+ *value = static_cast<FloatType>(absl::bit_cast<double>(ret_bits));
return true;
} else if (FloatTraits<FloatType>::kTargetBits == 32) {
uint32_t ret_bits = (static_cast<uint32_t>(ret_exp2) << 23) |
@@ -850,7 +849,7 @@ bool EiselLemire(const strings_internal::ParsedFloat& input, bool negative,
if (negative) {
ret_bits |= 0x80000000u;
}
- *value = absl::bit_cast<float>(ret_bits);
+ *value = static_cast<FloatType>(absl::bit_cast<float>(ret_bits));
return true;
}
#endif // ABSL_BIT_PACK_FLOATS
@@ -858,9 +857,9 @@ bool EiselLemire(const strings_internal::ParsedFloat& input, bool negative,
}
template <typename FloatType>
-from_chars_result FromCharsImpl(absl::Nonnull<const char*> first,
- absl::Nonnull<const char*> last,
- FloatType& value, chars_format fmt_flags) {
+from_chars_result FromCharsImpl(const char* absl_nonnull first,
+ const char* absl_nonnull last, FloatType& value,
+ chars_format fmt_flags) {
from_chars_result result;
result.ptr = first; // overwritten on successful parse
result.ec = std::errc();
@@ -890,7 +889,7 @@ from_chars_result FromCharsImpl(absl::Nonnull<const char*> first,
result.ec = std::errc::invalid_argument;
} else {
result.ptr = first + 1;
- value = negative ? -0.0 : 0.0;
+ value = negative ? -0.0f : 0.0f;
}
return result;
}
@@ -945,14 +944,14 @@ from_chars_result FromCharsImpl(absl::Nonnull<const char*> first,
}
} // namespace
-from_chars_result from_chars(absl::Nonnull<const char*> first,
- absl::Nonnull<const char*> last, double& value,
+from_chars_result from_chars(const char* absl_nonnull first,
+ const char* absl_nonnull last, double& value,
chars_format fmt) {
return FromCharsImpl(first, last, value, fmt);
}
-from_chars_result from_chars(absl::Nonnull<const char*> first,
- absl::Nonnull<const char*> last, float& value,
+from_chars_result from_chars(const char* absl_nonnull first,
+ const char* absl_nonnull last, float& value,
chars_format fmt) {
return FromCharsImpl(first, last, value, fmt);
}
diff --git a/contrib/restricted/abseil-cpp/absl/strings/charconv.h b/contrib/restricted/abseil-cpp/absl/strings/charconv.h
index be250902ad6..e5733f8c251 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/charconv.h
+++ b/contrib/restricted/abseil-cpp/absl/strings/charconv.h
@@ -45,7 +45,7 @@ enum class chars_format {
// characters that were successfully parsed. If none was found, `ptr` is set
// to the `first` argument to from_chars.
struct from_chars_result {
- absl::Nonnull<const char*> ptr;
+ const char* absl_nonnull ptr;
std::errc ec;
};
@@ -77,13 +77,13 @@ struct from_chars_result {
// format that strtod() accepts, except that a "0x" prefix is NOT matched.
// (In particular, in `hex` mode, the input "0xff" results in the largest
// matching pattern "0".)
-absl::from_chars_result from_chars(absl::Nonnull<const char*> first,
- absl::Nonnull<const char*> last,
+absl::from_chars_result from_chars(const char* absl_nonnull first,
+ const char* absl_nonnull last,
double& value, // NOLINT
chars_format fmt = chars_format::general);
-absl::from_chars_result from_chars(absl::Nonnull<const char*> first,
- absl::Nonnull<const char*> last,
+absl::from_chars_result from_chars(const char* absl_nonnull first,
+ const char* absl_nonnull last,
float& value, // NOLINT
chars_format fmt = chars_format::general);
diff --git a/contrib/restricted/abseil-cpp/absl/strings/cord.cc b/contrib/restricted/abseil-cpp/absl/strings/cord.cc
index f0f4f31ac52..e53f914dbbd 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/cord.cc
+++ b/contrib/restricted/abseil-cpp/absl/strings/cord.cc
@@ -75,21 +75,19 @@ using ::absl::cord_internal::kMinFlatLength;
using ::absl::cord_internal::kInlinedVectorSize;
using ::absl::cord_internal::kMaxBytesToCopy;
-static void DumpNode(absl::Nonnull<CordRep*> nonnull_rep, bool include_data,
- absl::Nonnull<std::ostream*> os, int indent = 0);
-static bool VerifyNode(absl::Nonnull<CordRep*> root,
- absl::Nonnull<CordRep*> start_node);
+static void DumpNode(CordRep* absl_nonnull nonnull_rep, bool include_data,
+ std::ostream* absl_nonnull os, int indent = 0);
+static bool VerifyNode(CordRep* absl_nonnull root,
+ CordRep* absl_nonnull start_node);
-static inline absl::Nullable<CordRep*> VerifyTree(
- absl::Nullable<CordRep*> node) {
+static inline CordRep* absl_nullable VerifyTree(CordRep* absl_nullable node) {
assert(node == nullptr || VerifyNode(node, node));
static_cast<void>(&VerifyNode);
return node;
}
-static absl::Nonnull<CordRepFlat*> CreateFlat(absl::Nonnull<const char*> data,
- size_t length,
- size_t alloc_hint) {
+static CordRepFlat* absl_nonnull CreateFlat(const char* absl_nonnull data,
+ size_t length, size_t alloc_hint) {
CordRepFlat* flat = CordRepFlat::New(length + alloc_hint);
flat->length = length;
memcpy(flat->Data(), data, length);
@@ -98,8 +96,8 @@ static absl::Nonnull<CordRepFlat*> CreateFlat(absl::Nonnull<const char*> data,
// Creates a new flat or Btree out of the specified array.
// The returned node has a refcount of 1.
-static absl::Nonnull<CordRep*> NewBtree(absl::Nonnull<const char*> data,
- size_t length, size_t alloc_hint) {
+static CordRep* absl_nonnull NewBtree(const char* absl_nonnull data,
+ size_t length, size_t alloc_hint) {
if (length <= kMaxFlatLength) {
return CreateFlat(data, length, alloc_hint);
}
@@ -112,8 +110,8 @@ static absl::Nonnull<CordRep*> NewBtree(absl::Nonnull<const char*> data,
// Create a new tree out of the specified array.
// The returned node has a refcount of 1.
-static absl::Nullable<CordRep*> NewTree(absl::Nullable<const char*> data,
- size_t length, size_t alloc_hint) {
+static CordRep* absl_nullable NewTree(const char* absl_nullable data,
+ size_t length, size_t alloc_hint) {
if (length == 0) return nullptr;
return NewBtree(data, length, alloc_hint);
}
@@ -121,7 +119,7 @@ static absl::Nullable<CordRep*> NewTree(absl::Nullable<const char*> data,
namespace cord_internal {
void InitializeCordRepExternal(absl::string_view data,
- absl::Nonnull<CordRepExternal*> rep) {
+ CordRepExternal* absl_nonnull rep) {
assert(!data.empty());
rep->length = data.size();
rep->tag = EXTERNAL;
@@ -135,7 +133,7 @@ void InitializeCordRepExternal(absl::string_view data,
// and not wasteful, we move the string into an external cord rep, preserving
// the already allocated string contents.
// Requires the provided string length to be larger than `kMaxInline`.
-static absl::Nonnull<CordRep*> CordRepFromString(std::string&& src) {
+static CordRep* absl_nonnull CordRepFromString(std::string&& src) {
assert(src.length() > cord_internal::kMaxInline);
if (
// String is short: copy data to avoid external block overhead.
@@ -163,17 +161,12 @@ static absl::Nonnull<CordRep*> CordRepFromString(std::string&& src) {
// --------------------------------------------------------------------
// Cord::InlineRep functions
-#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
-constexpr unsigned char Cord::InlineRep::kMaxInline;
-#endif
-
-inline void Cord::InlineRep::set_data(absl::Nonnull<const char*> data,
- size_t n) {
+inline void Cord::InlineRep::set_data(const char* absl_nonnull data, size_t n) {
static_assert(kMaxInline == 15, "set_data is hard-coded for a length of 15");
data_.set_inline_data(data, n);
}
-inline absl::Nonnull<char*> Cord::InlineRep::set_data(size_t n) {
+inline char* absl_nonnull Cord::InlineRep::set_data(size_t n) {
assert(n <= kMaxInline);
ResetToEmpty();
set_inline_size(n);
@@ -197,13 +190,13 @@ inline void Cord::InlineRep::remove_prefix(size_t n) {
// Returns `rep` converted into a CordRepBtree.
// Directly returns `rep` if `rep` is already a CordRepBtree.
-static absl::Nonnull<CordRepBtree*> ForceBtree(CordRep* rep) {
+static CordRepBtree* absl_nonnull ForceBtree(CordRep* rep) {
return rep->IsBtree()
? rep->btree()
: CordRepBtree::Create(cord_internal::RemoveCrcNode(rep));
}
-void Cord::InlineRep::AppendTreeToInlined(absl::Nonnull<CordRep*> tree,
+void Cord::InlineRep::AppendTreeToInlined(CordRep* absl_nonnull tree,
MethodIdentifier method) {
assert(!is_tree());
if (!data_.is_empty()) {
@@ -213,7 +206,7 @@ void Cord::InlineRep::AppendTreeToInlined(absl::Nonnull<CordRep*> tree,
EmplaceTree(tree, method);
}
-void Cord::InlineRep::AppendTreeToTree(absl::Nonnull<CordRep*> tree,
+void Cord::InlineRep::AppendTreeToTree(CordRep* absl_nonnull tree,
MethodIdentifier method) {
assert(is_tree());
const CordzUpdateScope scope(data_.cordz_info(), method);
@@ -221,7 +214,7 @@ void Cord::InlineRep::AppendTreeToTree(absl::Nonnull<CordRep*> tree,
SetTree(tree, scope);
}
-void Cord::InlineRep::AppendTree(absl::Nonnull<CordRep*> tree,
+void Cord::InlineRep::AppendTree(CordRep* absl_nonnull tree,
MethodIdentifier method) {
assert(tree != nullptr);
assert(tree->length != 0);
@@ -233,7 +226,7 @@ void Cord::InlineRep::AppendTree(absl::Nonnull<CordRep*> tree,
}
}
-void Cord::InlineRep::PrependTreeToInlined(absl::Nonnull<CordRep*> tree,
+void Cord::InlineRep::PrependTreeToInlined(CordRep* absl_nonnull tree,
MethodIdentifier method) {
assert(!is_tree());
if (!data_.is_empty()) {
@@ -243,7 +236,7 @@ void Cord::InlineRep::PrependTreeToInlined(absl::Nonnull<CordRep*> tree,
EmplaceTree(tree, method);
}
-void Cord::InlineRep::PrependTreeToTree(absl::Nonnull<CordRep*> tree,
+void Cord::InlineRep::PrependTreeToTree(CordRep* absl_nonnull tree,
MethodIdentifier method) {
assert(is_tree());
const CordzUpdateScope scope(data_.cordz_info(), method);
@@ -251,7 +244,7 @@ void Cord::InlineRep::PrependTreeToTree(absl::Nonnull<CordRep*> tree,
SetTree(tree, scope);
}
-void Cord::InlineRep::PrependTree(absl::Nonnull<CordRep*> tree,
+void Cord::InlineRep::PrependTree(CordRep* absl_nonnull tree,
MethodIdentifier method) {
assert(tree != nullptr);
assert(tree->length != 0);
@@ -267,9 +260,10 @@ void Cord::InlineRep::PrependTree(absl::Nonnull<CordRep*> tree,
// suitable leaf is found, the function will update the length field for all
// nodes to account for the size increase. The append region address will be
// written to region and the actual size increase will be written to size.
-static inline bool PrepareAppendRegion(
- absl::Nonnull<CordRep*> root, absl::Nonnull<absl::Nullable<char*>*> region,
- absl::Nonnull<size_t*> size, size_t max_length) {
+static inline bool PrepareAppendRegion(CordRep* absl_nonnull root,
+ char* absl_nullable* absl_nonnull region,
+ size_t* absl_nonnull size,
+ size_t max_length) {
if (root->IsBtree() && root->refcount.IsOne()) {
Span<char> span = root->btree()->GetAppendBuffer(max_length);
if (!span.empty()) {
@@ -472,11 +466,11 @@ void Cord::InlineRep::AppendArray(absl::string_view src,
CommitTree(root, rep, scope, method);
}
-inline absl::Nonnull<CordRep*> Cord::TakeRep() const& {
+inline CordRep* absl_nonnull Cord::TakeRep() const& {
return CordRep::Ref(contents_.tree());
}
-inline absl::Nonnull<CordRep*> Cord::TakeRep() && {
+inline CordRep* absl_nonnull Cord::TakeRep() && {
CordRep* rep = contents_.tree();
contents_.clear();
return rep;
@@ -534,7 +528,7 @@ inline void Cord::AppendImpl(C&& src) {
contents_.AppendTree(rep, CordzUpdateTracker::kAppendCord);
}
-static CordRep::ExtractResult ExtractAppendBuffer(absl::Nonnull<CordRep*> rep,
+static CordRep::ExtractResult ExtractAppendBuffer(CordRep* absl_nonnull rep,
size_t min_capacity) {
switch (rep->tag) {
case cord_internal::BTREE:
@@ -781,9 +775,9 @@ int ClampResult(int memcmp_res) {
return static_cast<int>(memcmp_res > 0) - static_cast<int>(memcmp_res < 0);
}
-int CompareChunks(absl::Nonnull<absl::string_view*> lhs,
- absl::Nonnull<absl::string_view*> rhs,
- absl::Nonnull<size_t*> size_to_compare) {
+int CompareChunks(absl::string_view* absl_nonnull lhs,
+ absl::string_view* absl_nonnull rhs,
+ size_t* absl_nonnull size_to_compare) {
size_t compared_size = std::min(lhs->size(), rhs->size());
assert(*size_to_compare >= compared_size);
*size_to_compare -= compared_size;
@@ -881,7 +875,7 @@ void Cord::SetExpectedChecksum(uint32_t crc) {
SetCrcCordState(std::move(state));
}
-absl::Nullable<const crc_internal::CrcCordState*> Cord::MaybeGetCrcCordState()
+const crc_internal::CrcCordState* absl_nullable Cord::MaybeGetCrcCordState()
const {
if (!contents_.is_tree() || !contents_.tree()->IsCrc()) {
return nullptr;
@@ -899,8 +893,8 @@ absl::optional<uint32_t> Cord::ExpectedChecksum() const {
inline int Cord::CompareSlowPath(absl::string_view rhs, size_t compared_size,
size_t size_to_compare) const {
- auto advance = [](absl::Nonnull<Cord::ChunkIterator*> it,
- absl::Nonnull<absl::string_view*> chunk) {
+ auto advance = [](Cord::ChunkIterator* absl_nonnull it,
+ absl::string_view* absl_nonnull chunk) {
if (!chunk->empty()) return true;
++*it;
if (it->bytes_remaining_ == 0) return false;
@@ -930,8 +924,8 @@ inline int Cord::CompareSlowPath(absl::string_view rhs, size_t compared_size,
inline int Cord::CompareSlowPath(const Cord& rhs, size_t compared_size,
size_t size_to_compare) const {
- auto advance = [](absl::Nonnull<Cord::ChunkIterator*> it,
- absl::Nonnull<absl::string_view*> chunk) {
+ auto advance = [](Cord::ChunkIterator* absl_nonnull it,
+ absl::string_view* absl_nonnull chunk) {
if (!chunk->empty()) return true;
++*it;
if (it->bytes_remaining_ == 0) return false;
@@ -981,7 +975,9 @@ ResultType GenericCompare(const Cord& lhs, const RHS& rhs,
size_t compared_size = std::min(lhs_chunk.size(), rhs_chunk.size());
assert(size_to_compare >= compared_size);
- int memcmp_res = ::memcmp(lhs_chunk.data(), rhs_chunk.data(), compared_size);
+ int memcmp_res = compared_size > 0 ? ::memcmp(lhs_chunk.data(),
+ rhs_chunk.data(), compared_size)
+ : 0;
if (compared_size == size_to_compare || memcmp_res != 0) {
return ComputeCompareResult<ResultType>(memcmp_res);
}
@@ -1053,7 +1049,7 @@ Cord::operator std::string() const {
return s;
}
-void CopyCordToString(const Cord& src, absl::Nonnull<std::string*> dst) {
+void CopyCordToString(const Cord& src, std::string* absl_nonnull dst) {
if (!src.contents_.is_tree()) {
src.contents_.CopyTo(dst);
} else {
@@ -1062,7 +1058,7 @@ void CopyCordToString(const Cord& src, absl::Nonnull<std::string*> dst) {
}
}
-void AppendCordToString(const Cord& src, absl::Nonnull<std::string*> dst) {
+void AppendCordToString(const Cord& src, std::string* absl_nonnull dst) {
const size_t cur_dst_size = dst->size();
const size_t new_dst_size = cur_dst_size + src.size();
absl::strings_internal::STLStringResizeUninitializedAmortized(dst,
@@ -1071,10 +1067,10 @@ void AppendCordToString(const Cord& src, absl::Nonnull<std::string*> dst) {
src.CopyToArrayImpl(append_ptr);
}
-void Cord::CopyToArraySlowPath(absl::Nonnull<char*> dst) const {
+void Cord::CopyToArraySlowPath(char* absl_nonnull dst) const {
assert(contents_.is_tree());
absl::string_view fragment;
- if (GetFlatAux(contents_.tree(), &fragment)) {
+ if (GetFlatAux(contents_.tree(), &fragment) && !fragment.empty()) {
memcpy(dst, fragment.data(), fragment.size());
return;
}
@@ -1397,8 +1393,8 @@ absl::string_view Cord::FlattenSlowPath() {
return absl::string_view(new_buffer, total_size);
}
-/* static */ bool Cord::GetFlatAux(absl::Nonnull<CordRep*> rep,
- absl::Nonnull<absl::string_view*> fragment) {
+/* static */ bool Cord::GetFlatAux(CordRep* absl_nonnull rep,
+ absl::string_view* absl_nonnull fragment) {
assert(rep != nullptr);
if (rep->length == 0) {
*fragment = absl::string_view();
@@ -1432,7 +1428,7 @@ absl::string_view Cord::FlattenSlowPath() {
}
/* static */ void Cord::ForEachChunkAux(
- absl::Nonnull<absl::cord_internal::CordRep*> rep,
+ absl::cord_internal::CordRep* absl_nonnull rep,
absl::FunctionRef<void(absl::string_view)> callback) {
assert(rep != nullptr);
if (rep->length == 0) return;
@@ -1457,8 +1453,8 @@ absl::string_view Cord::FlattenSlowPath() {
}
}
-static void DumpNode(absl::Nonnull<CordRep*> nonnull_rep, bool include_data,
- absl::Nonnull<std::ostream*> os, int indent) {
+static void DumpNode(CordRep* absl_nonnull nonnull_rep, bool include_data,
+ std::ostream* absl_nonnull os, int indent) {
CordRep* rep = nonnull_rep;
const int kIndentStep = 1;
for (;;) {
@@ -1504,17 +1500,17 @@ static void DumpNode(absl::Nonnull<CordRep*> nonnull_rep, bool include_data,
}
}
-static std::string ReportError(absl::Nonnull<CordRep*> root,
- absl::Nonnull<CordRep*> node) {
+static std::string ReportError(CordRep* absl_nonnull root,
+ CordRep* absl_nonnull node) {
std::ostringstream buf;
buf << "Error at node " << node << " in:";
DumpNode(root, true, &buf);
return buf.str();
}
-static bool VerifyNode(absl::Nonnull<CordRep*> root,
- absl::Nonnull<CordRep*> start_node) {
- absl::InlinedVector<absl::Nonnull<CordRep*>, 2> worklist;
+static bool VerifyNode(CordRep* absl_nonnull root,
+ CordRep* absl_nonnull start_node) {
+ absl::InlinedVector<CordRep* absl_nonnull, 2> worklist;
worklist.push_back(start_node);
do {
CordRep* node = worklist.back();
diff --git a/contrib/restricted/abseil-cpp/absl/strings/cord.h b/contrib/restricted/abseil-cpp/absl/strings/cord.h
index 1f8aafb5875..7afa419a685 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/cord.h
+++ b/contrib/restricted/abseil-cpp/absl/strings/cord.h
@@ -79,6 +79,7 @@
#include "absl/base/optimization.h"
#include "absl/crc/internal/crc_cord_state.h"
#include "absl/functional/function_ref.h"
+#include "absl/hash/internal/weakly_mixed_integer.h"
#include "absl/meta/type_traits.h"
#include "absl/strings/cord_analysis.h"
#include "absl/strings/cord_buffer.h"
@@ -102,8 +103,8 @@ class Cord;
class CordTestPeer;
template <typename Releaser>
Cord MakeCordFromExternal(absl::string_view, Releaser&&);
-void CopyCordToString(const Cord& src, absl::Nonnull<std::string*> dst);
-void AppendCordToString(const Cord& src, absl::Nonnull<std::string*> dst);
+void CopyCordToString(const Cord& src, std::string* absl_nonnull dst);
+void AppendCordToString(const Cord& src, std::string* absl_nonnull dst);
// Cord memory accounting modes
enum class CordMemoryAccounting {
@@ -417,8 +418,7 @@ class Cord {
// guarantee that pointers previously returned by `dst->data()` remain valid
// even if `*dst` had enough capacity to hold `src`. If `*dst` is a new
// object, prefer to simply use the conversion operator to `std::string`.
- friend void CopyCordToString(const Cord& src,
- absl::Nonnull<std::string*> dst);
+ friend void CopyCordToString(const Cord& src, std::string* absl_nonnull dst);
// AppendCordToString()
//
@@ -430,7 +430,7 @@ class Cord {
// `dst->data()`. If `*dst` is a new object, prefer to simply use the
// conversion operator to `std::string`.
friend void AppendCordToString(const Cord& src,
- absl::Nonnull<std::string*> dst);
+ std::string* absl_nonnull dst);
class CharIterator;
@@ -467,7 +467,7 @@ class Cord {
using iterator_category = std::input_iterator_tag;
using value_type = absl::string_view;
using difference_type = ptrdiff_t;
- using pointer = absl::Nonnull<const value_type*>;
+ using pointer = const value_type* absl_nonnull;
using reference = value_type;
ChunkIterator() = default;
@@ -488,13 +488,13 @@ class Cord {
using CordRepBtreeReader = absl::cord_internal::CordRepBtreeReader;
// Constructs a `begin()` iterator from `tree`.
- explicit ChunkIterator(absl::Nonnull<cord_internal::CordRep*> tree);
+ explicit ChunkIterator(cord_internal::CordRep* absl_nonnull tree);
// Constructs a `begin()` iterator from `cord`.
- explicit ChunkIterator(absl::Nonnull<const Cord*> cord);
+ explicit ChunkIterator(const Cord* absl_nonnull cord);
// Initializes this instance from a tree. Invoked by constructors.
- void InitTree(absl::Nonnull<cord_internal::CordRep*> tree);
+ void InitTree(cord_internal::CordRep* absl_nonnull tree);
// Removes `n` bytes from `current_chunk_`. Expects `n` to be smaller than
// `current_chunk_.size()`.
@@ -512,7 +512,7 @@ class Cord {
// The current leaf, or `nullptr` if the iterator points to short data.
// If the current chunk is a substring node, current_leaf_ points to the
// underlying flat or external node.
- absl::Nullable<absl::cord_internal::CordRep*> current_leaf_ = nullptr;
+ absl::cord_internal::CordRep* absl_nullable current_leaf_ = nullptr;
// The number of bytes left in the `Cord` over which we are iterating.
size_t bytes_remaining_ = 0;
@@ -569,13 +569,13 @@ class Cord {
using iterator = ChunkIterator;
using const_iterator = ChunkIterator;
- explicit ChunkRange(absl::Nonnull<const Cord*> cord) : cord_(cord) {}
+ explicit ChunkRange(const Cord* absl_nonnull cord) : cord_(cord) {}
ChunkIterator begin() const;
ChunkIterator end() const;
private:
- absl::Nonnull<const Cord*> cord_;
+ const Cord* absl_nonnull cord_;
};
// Cord::Chunks()
@@ -628,7 +628,7 @@ class Cord {
using iterator_category = std::input_iterator_tag;
using value_type = char;
using difference_type = ptrdiff_t;
- using pointer = absl::Nonnull<const char*>;
+ using pointer = const char* absl_nonnull;
using reference = const char&;
CharIterator() = default;
@@ -642,7 +642,7 @@ class Cord {
friend Cord;
private:
- explicit CharIterator(absl::Nonnull<const Cord*> cord)
+ explicit CharIterator(const Cord* absl_nonnull cord)
: chunk_iterator_(cord) {}
ChunkIterator chunk_iterator_;
@@ -654,14 +654,14 @@ class Cord {
// advanced as a separate `Cord`. `n_bytes` must be less than or equal to the
// number of bytes within the Cord; otherwise, behavior is undefined. It is
// valid to pass `char_end()` and `0`.
- static Cord AdvanceAndRead(absl::Nonnull<CharIterator*> it, size_t n_bytes);
+ static Cord AdvanceAndRead(CharIterator* absl_nonnull it, size_t n_bytes);
// Cord::Advance()
//
// Advances the `Cord::CharIterator` by `n_bytes`. `n_bytes` must be less than
// or equal to the number of bytes remaining within the Cord; otherwise,
// behavior is undefined. It is valid to pass `char_end()` and `0`.
- static void Advance(absl::Nonnull<CharIterator*> it, size_t n_bytes);
+ static void Advance(CharIterator* absl_nonnull it, size_t n_bytes);
// Cord::ChunkRemaining()
//
@@ -670,6 +670,13 @@ class Cord {
// `it` must be dereferenceable.
static absl::string_view ChunkRemaining(const CharIterator& it);
+ // Cord::Distance()
+ //
+ // Returns the distance between `first` and `last`, as if
+ // `std::distance(first, last)` was called.
+ static ptrdiff_t Distance(const CharIterator& first,
+ const CharIterator& last);
+
// Cord::char_begin()
//
// Returns an iterator to the first character of the `Cord`.
@@ -710,13 +717,13 @@ class Cord {
using iterator = CharIterator;
using const_iterator = CharIterator;
- explicit CharRange(absl::Nonnull<const Cord*> cord) : cord_(cord) {}
+ explicit CharRange(const Cord* absl_nonnull cord) : cord_(cord) {}
CharIterator begin() const;
CharIterator end() const;
private:
- absl::Nonnull<const Cord*> cord_;
+ const Cord* absl_nonnull cord_;
};
// Cord::Chars()
@@ -775,7 +782,7 @@ class Cord {
CharIterator Find(const absl::Cord& needle) const;
// Supports absl::Cord as a sink object for absl::Format().
- friend void AbslFormatFlush(absl::Nonnull<absl::Cord*> cord,
+ friend void AbslFormatFlush(absl::Cord* absl_nonnull cord,
absl::string_view part) {
cord->Append(part);
}
@@ -878,7 +885,7 @@ class Cord {
}
#endif
- friend absl::Nullable<const CordzInfo*> GetCordzInfoForTesting(
+ friend const CordzInfo* absl_nullable GetCordzInfoForTesting(
const Cord& cord);
// Calls the provided function once for each cord chunk, in order. Unlike
@@ -907,21 +914,21 @@ class Cord {
InlineRep& operator=(InlineRep&& src) noexcept;
explicit constexpr InlineRep(absl::string_view sv,
- absl::Nullable<CordRep*> rep);
+ CordRep* absl_nullable rep);
- void Swap(absl::Nonnull<InlineRep*> rhs);
+ void Swap(InlineRep* absl_nonnull rhs);
size_t size() const;
// Returns nullptr if holding pointer
- absl::Nullable<const char*> data() const;
+ const char* absl_nullable data() const;
// Discards pointer, if any
- void set_data(absl::Nonnull<const char*> data, size_t n);
- absl::Nonnull<char*> set_data(size_t n); // Write data to the result
+ void set_data(const char* absl_nonnull data, size_t n);
+ char* absl_nonnull set_data(size_t n); // Write data to the result
// Returns nullptr if holding bytes
- absl::Nullable<absl::cord_internal::CordRep*> tree() const;
- absl::Nonnull<absl::cord_internal::CordRep*> as_tree() const;
- absl::Nonnull<const char*> as_chars() const;
+ absl::cord_internal::CordRep* absl_nullable tree() const;
+ absl::cord_internal::CordRep* absl_nonnull as_tree() const;
+ const char* absl_nonnull as_chars() const;
// Returns non-null iff was holding a pointer
- absl::Nullable<absl::cord_internal::CordRep*> clear();
+ absl::cord_internal::CordRep* absl_nullable clear();
// Converts to pointer if necessary.
void reduce_size(size_t n); // REQUIRES: holding data
void remove_prefix(size_t n); // REQUIRES: holding data
@@ -930,58 +937,56 @@ class Cord {
// Creates a CordRepFlat instance from the current inlined data with `extra'
// bytes of desired additional capacity.
- absl::Nonnull<CordRepFlat*> MakeFlatWithExtraCapacity(size_t extra);
+ CordRepFlat* absl_nonnull MakeFlatWithExtraCapacity(size_t extra);
// Sets the tree value for this instance. `rep` must not be null.
// Requires the current instance to hold a tree, and a lock to be held on
// any CordzInfo referenced by this instance. The latter is enforced through
// the CordzUpdateScope argument. If the current instance is sampled, then
// the CordzInfo instance is updated to reference the new `rep` value.
- void SetTree(absl::Nonnull<CordRep*> rep, const CordzUpdateScope& scope);
+ void SetTree(CordRep* absl_nonnull rep, const CordzUpdateScope& scope);
// Identical to SetTree(), except that `rep` is allowed to be null, in
// which case the current instance is reset to an empty value.
- void SetTreeOrEmpty(absl::Nullable<CordRep*> rep,
+ void SetTreeOrEmpty(CordRep* absl_nullable rep,
const CordzUpdateScope& scope);
// Sets the tree value for this instance, and randomly samples this cord.
// This function disregards existing contents in `data_`, and should be
// called when a Cord is 'promoted' from an 'uninitialized' or 'inlined'
// value to a non-inlined (tree / ring) value.
- void EmplaceTree(absl::Nonnull<CordRep*> rep, MethodIdentifier method);
+ void EmplaceTree(CordRep* absl_nonnull rep, MethodIdentifier method);
// Identical to EmplaceTree, except that it copies the parent stack from
// the provided `parent` data if the parent is sampled.
- void EmplaceTree(absl::Nonnull<CordRep*> rep, const InlineData& parent,
+ void EmplaceTree(CordRep* absl_nonnull rep, const InlineData& parent,
MethodIdentifier method);
// Commits the change of a newly created, or updated `rep` root value into
// this cord. `old_rep` indicates the old (inlined or tree) value of the
// cord, and determines if the commit invokes SetTree() or EmplaceTree().
- void CommitTree(absl::Nullable<const CordRep*> old_rep,
- absl::Nonnull<CordRep*> rep, const CordzUpdateScope& scope,
+ void CommitTree(const CordRep* absl_nullable old_rep,
+ CordRep* absl_nonnull rep, const CordzUpdateScope& scope,
MethodIdentifier method);
- void AppendTreeToInlined(absl::Nonnull<CordRep*> tree,
+ void AppendTreeToInlined(CordRep* absl_nonnull tree,
MethodIdentifier method);
- void AppendTreeToTree(absl::Nonnull<CordRep*> tree,
- MethodIdentifier method);
- void AppendTree(absl::Nonnull<CordRep*> tree, MethodIdentifier method);
- void PrependTreeToInlined(absl::Nonnull<CordRep*> tree,
+ void AppendTreeToTree(CordRep* absl_nonnull tree, MethodIdentifier method);
+ void AppendTree(CordRep* absl_nonnull tree, MethodIdentifier method);
+ void PrependTreeToInlined(CordRep* absl_nonnull tree,
MethodIdentifier method);
- void PrependTreeToTree(absl::Nonnull<CordRep*> tree,
- MethodIdentifier method);
- void PrependTree(absl::Nonnull<CordRep*> tree, MethodIdentifier method);
+ void PrependTreeToTree(CordRep* absl_nonnull tree, MethodIdentifier method);
+ void PrependTree(CordRep* absl_nonnull tree, MethodIdentifier method);
bool IsSame(const InlineRep& other) const { return data_ == other.data_; }
// Copies the inline contents into `dst`. Assumes the cord is not empty.
- void CopyTo(absl::Nonnull<std::string*> dst) const {
+ void CopyTo(std::string* absl_nonnull dst) const {
data_.CopyInlineToString(dst);
}
// Copies the inline contents into `dst`. Assumes the cord is not empty.
- void CopyToArray(absl::Nonnull<char*> dst) const;
+ void CopyToArray(char* absl_nonnull dst) const;
bool is_tree() const { return data_.is_tree(); }
@@ -994,12 +999,12 @@ class Cord {
}
// Returns the profiled CordzInfo, or nullptr if not sampled.
- absl::Nullable<absl::cord_internal::CordzInfo*> cordz_info() const {
+ absl::cord_internal::CordzInfo* absl_nullable cordz_info() const {
return data_.cordz_info();
}
// Sets the profiled CordzInfo.
- void set_cordz_info(absl::Nonnull<cord_internal::CordzInfo*> cordz_info) {
+ void set_cordz_info(cord_internal::CordzInfo* absl_nonnull cordz_info) {
assert(cordz_info != nullptr);
data_.set_cordz_info(cordz_info);
}
@@ -1031,19 +1036,19 @@ class Cord {
InlineRep contents_;
// Helper for GetFlat() and TryFlat().
- static bool GetFlatAux(absl::Nonnull<absl::cord_internal::CordRep*> rep,
- absl::Nonnull<absl::string_view*> fragment);
+ static bool GetFlatAux(absl::cord_internal::CordRep* absl_nonnull rep,
+ absl::string_view* absl_nonnull fragment);
// Helper for ForEachChunk().
static void ForEachChunkAux(
- absl::Nonnull<absl::cord_internal::CordRep*> rep,
+ absl::cord_internal::CordRep* absl_nonnull rep,
absl::FunctionRef<void(absl::string_view)> callback);
// The destructor for non-empty Cords.
void DestroyCordSlow();
// Out-of-line implementation of slower parts of logic.
- void CopyToArraySlowPath(absl::Nonnull<char*> dst) const;
+ void CopyToArraySlowPath(char* absl_nonnull dst) const;
int CompareSlowPath(absl::string_view rhs, size_t compared_size,
size_t size_to_compare) const;
int CompareSlowPath(const Cord& rhs, size_t compared_size,
@@ -1060,8 +1065,8 @@ class Cord {
// Returns a new reference to contents_.tree(), or steals an existing
// reference if called on an rvalue.
- absl::Nonnull<absl::cord_internal::CordRep*> TakeRep() const&;
- absl::Nonnull<absl::cord_internal::CordRep*> TakeRep() &&;
+ absl::cord_internal::CordRep* absl_nonnull TakeRep() const&;
+ absl::cord_internal::CordRep* absl_nonnull TakeRep() &&;
// Helper for Append().
template <typename C>
@@ -1093,17 +1098,17 @@ class Cord {
hash_state = combiner.add_buffer(std::move(hash_state), chunk.data(),
chunk.size());
});
- return H::combine(combiner.finalize(std::move(hash_state)), size());
+ return H::combine(combiner.finalize(std::move(hash_state)),
+ hash_internal::WeaklyMixedInteger{size()});
}
friend class CrcCord;
void SetCrcCordState(crc_internal::CrcCordState state);
- absl::Nullable<const crc_internal::CrcCordState*> MaybeGetCrcCordState()
- const;
+ const crc_internal::CrcCordState* absl_nullable MaybeGetCrcCordState() const;
CharIterator FindImpl(CharIterator it, absl::string_view needle) const;
- void CopyToArrayImpl(absl::Nonnull<char*> dst) const;
+ void CopyToArrayImpl(char* absl_nonnull dst) const;
};
ABSL_NAMESPACE_END
@@ -1123,14 +1128,14 @@ namespace cord_internal {
// Does non-template-specific `CordRepExternal` initialization.
// Requires `data` to be non-empty.
void InitializeCordRepExternal(absl::string_view data,
- absl::Nonnull<CordRepExternal*> rep);
+ CordRepExternal* absl_nonnull rep);
// Creates a new `CordRep` that owns `data` and `releaser` and returns a pointer
// to it. Requires `data` to be non-empty.
template <typename Releaser>
// NOLINTNEXTLINE - suppress clang-tidy raw pointer return.
-absl::Nonnull<CordRep*> NewExternalRep(absl::string_view data,
- Releaser&& releaser) {
+CordRep* absl_nonnull NewExternalRep(absl::string_view data,
+ Releaser&& releaser) {
assert(!data.empty());
using ReleaserType = absl::decay_t<Releaser>;
CordRepExternal* rep = new CordRepExternalImpl<ReleaserType>(
@@ -1142,7 +1147,7 @@ absl::Nonnull<CordRep*> NewExternalRep(absl::string_view data,
// Overload for function reference types that dispatches using a function
// pointer because there are no `alignof()` or `sizeof()` a function reference.
// NOLINTNEXTLINE - suppress clang-tidy raw pointer return.
-inline absl::Nonnull<CordRep*> NewExternalRep(
+inline CordRep* absl_nonnull NewExternalRep(
absl::string_view data, void (&releaser)(absl::string_view)) {
return NewExternalRep(data, &releaser);
}
@@ -1166,7 +1171,7 @@ Cord MakeCordFromExternal(absl::string_view data, Releaser&& releaser) {
}
constexpr Cord::InlineRep::InlineRep(absl::string_view sv,
- absl::Nullable<CordRep*> rep)
+ CordRep* absl_nullable rep)
: data_(sv, rep) {}
inline Cord::InlineRep::InlineRep(const Cord::InlineRep& src)
@@ -1205,7 +1210,7 @@ inline Cord::InlineRep& Cord::InlineRep::operator=(
return *this;
}
-inline void Cord::InlineRep::Swap(absl::Nonnull<Cord::InlineRep*> rhs) {
+inline void Cord::InlineRep::Swap(Cord::InlineRep* absl_nonnull rhs) {
if (rhs == this) {
return;
}
@@ -1213,22 +1218,22 @@ inline void Cord::InlineRep::Swap(absl::Nonnull<Cord::InlineRep*> rhs) {
swap(data_, rhs->data_);
}
-inline absl::Nullable<const char*> Cord::InlineRep::data() const {
+inline const char* absl_nullable Cord::InlineRep::data() const {
return is_tree() ? nullptr : data_.as_chars();
}
-inline absl::Nonnull<const char*> Cord::InlineRep::as_chars() const {
+inline const char* absl_nonnull Cord::InlineRep::as_chars() const {
assert(!data_.is_tree());
return data_.as_chars();
}
-inline absl::Nonnull<absl::cord_internal::CordRep*> Cord::InlineRep::as_tree()
+inline absl::cord_internal::CordRep* absl_nonnull Cord::InlineRep::as_tree()
const {
assert(data_.is_tree());
return data_.as_tree();
}
-inline absl::Nullable<absl::cord_internal::CordRep*> Cord::InlineRep::tree()
+inline absl::cord_internal::CordRep* absl_nullable Cord::InlineRep::tree()
const {
if (is_tree()) {
return as_tree();
@@ -1241,7 +1246,7 @@ inline size_t Cord::InlineRep::size() const {
return is_tree() ? as_tree()->length : inline_size();
}
-inline absl::Nonnull<cord_internal::CordRepFlat*>
+inline cord_internal::CordRepFlat* absl_nonnull
Cord::InlineRep::MakeFlatWithExtraCapacity(size_t extra) {
static_assert(cord_internal::kMinFlatLength >= sizeof(data_), "");
size_t len = data_.inline_size();
@@ -1251,21 +1256,21 @@ Cord::InlineRep::MakeFlatWithExtraCapacity(size_t extra) {
return result;
}
-inline void Cord::InlineRep::EmplaceTree(absl::Nonnull<CordRep*> rep,
+inline void Cord::InlineRep::EmplaceTree(CordRep* absl_nonnull rep,
MethodIdentifier method) {
assert(rep);
data_.make_tree(rep);
CordzInfo::MaybeTrackCord(data_, method);
}
-inline void Cord::InlineRep::EmplaceTree(absl::Nonnull<CordRep*> rep,
+inline void Cord::InlineRep::EmplaceTree(CordRep* absl_nonnull rep,
const InlineData& parent,
MethodIdentifier method) {
data_.make_tree(rep);
CordzInfo::MaybeTrackCord(data_, parent, method);
}
-inline void Cord::InlineRep::SetTree(absl::Nonnull<CordRep*> rep,
+inline void Cord::InlineRep::SetTree(CordRep* absl_nonnull rep,
const CordzUpdateScope& scope) {
assert(rep);
assert(data_.is_tree());
@@ -1273,7 +1278,7 @@ inline void Cord::InlineRep::SetTree(absl::Nonnull<CordRep*> rep,
scope.SetCordRep(rep);
}
-inline void Cord::InlineRep::SetTreeOrEmpty(absl::Nullable<CordRep*> rep,
+inline void Cord::InlineRep::SetTreeOrEmpty(CordRep* absl_nullable rep,
const CordzUpdateScope& scope) {
assert(data_.is_tree());
if (rep) {
@@ -1284,8 +1289,8 @@ inline void Cord::InlineRep::SetTreeOrEmpty(absl::Nullable<CordRep*> rep,
scope.SetCordRep(rep);
}
-inline void Cord::InlineRep::CommitTree(absl::Nullable<const CordRep*> old_rep,
- absl::Nonnull<CordRep*> rep,
+inline void Cord::InlineRep::CommitTree(const CordRep* absl_nullable old_rep,
+ CordRep* absl_nonnull rep,
const CordzUpdateScope& scope,
MethodIdentifier method) {
if (old_rep) {
@@ -1295,7 +1300,7 @@ inline void Cord::InlineRep::CommitTree(absl::Nullable<const CordRep*> old_rep,
}
}
-inline absl::Nullable<absl::cord_internal::CordRep*> Cord::InlineRep::clear() {
+inline absl::cord_internal::CordRep* absl_nullable Cord::InlineRep::clear() {
if (is_tree()) {
CordzInfo::MaybeUntrackCord(cordz_info());
}
@@ -1304,7 +1309,7 @@ inline absl::Nullable<absl::cord_internal::CordRep*> Cord::InlineRep::clear() {
return result;
}
-inline void Cord::InlineRep::CopyToArray(absl::Nonnull<char*> dst) const {
+inline void Cord::InlineRep::CopyToArray(char* absl_nonnull dst) const {
assert(!is_tree());
size_t n = inline_size();
assert(n != 0);
@@ -1488,7 +1493,7 @@ inline bool Cord::StartsWith(absl::string_view rhs) const {
return EqualsImpl(rhs, rhs_size);
}
-inline void Cord::CopyToArrayImpl(absl::Nonnull<char*> dst) const {
+inline void Cord::CopyToArrayImpl(char* absl_nonnull dst) const {
if (!contents_.is_tree()) {
if (!empty()) contents_.CopyToArray(dst);
} else {
@@ -1497,7 +1502,7 @@ inline void Cord::CopyToArrayImpl(absl::Nonnull<char*> dst) const {
}
inline void Cord::ChunkIterator::InitTree(
- absl::Nonnull<cord_internal::CordRep*> tree) {
+ cord_internal::CordRep* absl_nonnull tree) {
tree = cord_internal::SkipCrcNode(tree);
if (tree->tag == cord_internal::BTREE) {
current_chunk_ = btree_reader_.Init(tree->btree());
@@ -1508,12 +1513,12 @@ inline void Cord::ChunkIterator::InitTree(
}
inline Cord::ChunkIterator::ChunkIterator(
- absl::Nonnull<cord_internal::CordRep*> tree) {
+ cord_internal::CordRep* absl_nonnull tree) {
bytes_remaining_ = tree->length;
InitTree(tree);
}
-inline Cord::ChunkIterator::ChunkIterator(absl::Nonnull<const Cord*> cord) {
+inline Cord::ChunkIterator::ChunkIterator(const Cord* absl_nonnull cord) {
if (CordRep* tree = cord->contents_.tree()) {
bytes_remaining_ = tree->length;
if (ABSL_PREDICT_TRUE(bytes_remaining_ != 0)) {
@@ -1649,13 +1654,13 @@ inline Cord::CharIterator::reference Cord::CharIterator::operator*() const {
return *chunk_iterator_->data();
}
-inline Cord Cord::AdvanceAndRead(absl::Nonnull<CharIterator*> it,
+inline Cord Cord::AdvanceAndRead(CharIterator* absl_nonnull it,
size_t n_bytes) {
assert(it != nullptr);
return it->chunk_iterator_.AdvanceAndReadBytes(n_bytes);
}
-inline void Cord::Advance(absl::Nonnull<CharIterator*> it, size_t n_bytes) {
+inline void Cord::Advance(CharIterator* absl_nonnull it, size_t n_bytes) {
assert(it != nullptr);
it->chunk_iterator_.AdvanceBytes(n_bytes);
}
@@ -1664,6 +1669,12 @@ inline absl::string_view Cord::ChunkRemaining(const CharIterator& it) {
return *it.chunk_iterator_;
}
+inline ptrdiff_t Cord::Distance(const CharIterator& first,
+ const CharIterator& last) {
+ return static_cast<ptrdiff_t>(first.chunk_iterator_.bytes_remaining_ -
+ last.chunk_iterator_.bytes_remaining_);
+}
+
inline Cord::CharIterator Cord::char_begin() const {
return CharIterator(this);
}
diff --git a/contrib/restricted/abseil-cpp/absl/strings/cord_analysis.cc b/contrib/restricted/abseil-cpp/absl/strings/cord_analysis.cc
index 19b0fa44364..dcbc826c6e6 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/cord_analysis.cc
+++ b/contrib/restricted/abseil-cpp/absl/strings/cord_analysis.cc
@@ -39,15 +39,15 @@ enum class Mode { kFairShare, kTotal, kTotalMorePrecise };
template <Mode mode>
struct CordRepRef {
// Instantiates a CordRepRef instance.
- explicit CordRepRef(absl::Nonnull<const CordRep*> r) : rep(r) {}
+ explicit CordRepRef(const CordRep* absl_nonnull r) : rep(r) {}
// Creates a child reference holding the provided child.
// Overloaded to add cumulative reference count for kFairShare.
- CordRepRef Child(absl::Nonnull<const CordRep*> child) const {
+ CordRepRef Child(const CordRep* absl_nonnull child) const {
return CordRepRef(child);
}
- absl::Nonnull<const CordRep*> rep;
+ const CordRep* absl_nonnull rep;
};
// RawUsage holds the computed total number of bytes.
@@ -66,7 +66,7 @@ template <>
struct RawUsage<Mode::kTotalMorePrecise> {
size_t total = 0;
// TODO(b/289250880): Replace this with a flat_hash_set.
- std::unordered_set<absl::Nonnull<const CordRep*>> counted;
+ std::unordered_set<const CordRep* absl_nonnull> counted;
void Add(size_t size, CordRepRef<Mode::kTotalMorePrecise> repref) {
if (counted.insert(repref.rep).second) {
@@ -90,15 +90,15 @@ double MaybeDiv(double d, refcount_t refcount) {
template <>
struct CordRepRef<Mode::kFairShare> {
// Creates a CordRepRef with the provided rep and top (parent) fraction.
- explicit CordRepRef(absl::Nonnull<const CordRep*> r, double frac = 1.0)
+ explicit CordRepRef(const CordRep* absl_nonnull r, double frac = 1.0)
: rep(r), fraction(MaybeDiv(frac, r->refcount.Get())) {}
// Returns a CordRepRef with a fraction of `this->fraction / child.refcount`
- CordRepRef Child(absl::Nonnull<const CordRep*> child) const {
+ CordRepRef Child(const CordRep* absl_nonnull child) const {
return CordRepRef(child, fraction);
}
- absl::Nonnull<const CordRep*> rep;
+ const CordRep* absl_nonnull rep;
double fraction;
};
@@ -150,7 +150,7 @@ void AnalyzeBtree(CordRepRef<mode> rep, RawUsage<mode>& raw_usage) {
}
template <Mode mode>
-size_t GetEstimatedUsage(absl::Nonnull<const CordRep*> rep) {
+size_t GetEstimatedUsage(const CordRep* absl_nonnull rep) {
// Zero initialized memory usage totals.
RawUsage<mode> raw_usage;
@@ -179,15 +179,15 @@ size_t GetEstimatedUsage(absl::Nonnull<const CordRep*> rep) {
} // namespace
-size_t GetEstimatedMemoryUsage(absl::Nonnull<const CordRep*> rep) {
+size_t GetEstimatedMemoryUsage(const CordRep* absl_nonnull rep) {
return GetEstimatedUsage<Mode::kTotal>(rep);
}
-size_t GetEstimatedFairShareMemoryUsage(absl::Nonnull<const CordRep*> rep) {
+size_t GetEstimatedFairShareMemoryUsage(const CordRep* absl_nonnull rep) {
return GetEstimatedUsage<Mode::kFairShare>(rep);
}
-size_t GetMorePreciseMemoryUsage(absl::Nonnull<const CordRep*> rep) {
+size_t GetMorePreciseMemoryUsage(const CordRep* absl_nonnull rep) {
return GetEstimatedUsage<Mode::kTotalMorePrecise>(rep);
}
diff --git a/contrib/restricted/abseil-cpp/absl/strings/cord_analysis.h b/contrib/restricted/abseil-cpp/absl/strings/cord_analysis.h
index f8ce3489846..db50f3a19ce 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/cord_analysis.h
+++ b/contrib/restricted/abseil-cpp/absl/strings/cord_analysis.h
@@ -29,7 +29,7 @@ namespace cord_internal {
// Returns the *approximate* number of bytes held in full or in part by this
// Cord (which may not remain the same between invocations). Cords that share
// memory could each be "charged" independently for the same shared memory.
-size_t GetEstimatedMemoryUsage(absl::Nonnull<const CordRep*> rep);
+size_t GetEstimatedMemoryUsage(const CordRep* absl_nonnull rep);
// Returns the *approximate* number of bytes held in full or in part by this
// Cord for the distinct memory held by this cord. This is similar to
@@ -47,13 +47,13 @@ size_t GetEstimatedMemoryUsage(absl::Nonnull<const CordRep*> rep);
//
// This is more expensive than `GetEstimatedMemoryUsage()` as it requires
// deduplicating all memory references.
-size_t GetMorePreciseMemoryUsage(absl::Nonnull<const CordRep*> rep);
+size_t GetMorePreciseMemoryUsage(const CordRep* absl_nonnull rep);
// Returns the *approximate* number of bytes held in full or in part by this
// CordRep weighted by the sharing ratio of that data. For example, if some data
// edge is shared by 4 different Cords, then each cord is attribute 1/4th of
// the total memory usage as a 'fair share' of the total memory usage.
-size_t GetEstimatedFairShareMemoryUsage(absl::Nonnull<const CordRep*> rep);
+size_t GetEstimatedFairShareMemoryUsage(const CordRep* absl_nonnull rep);
} // namespace cord_internal
ABSL_NAMESPACE_END
diff --git a/contrib/restricted/abseil-cpp/absl/strings/cordz_test_helpers.h b/contrib/restricted/abseil-cpp/absl/strings/cordz_test_helpers.h
index 619f13c27a4..66232db7caa 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/cordz_test_helpers.h
+++ b/contrib/restricted/abseil-cpp/absl/strings/cordz_test_helpers.h
@@ -34,16 +34,15 @@ namespace absl {
ABSL_NAMESPACE_BEGIN
// Returns the CordzInfo for the cord, or nullptr if the cord is not sampled.
-inline absl::Nullable<const cord_internal::CordzInfo*> GetCordzInfoForTesting(
+inline const cord_internal::CordzInfo* GetCordzInfoForTesting(
const Cord& cord) {
if (!cord.contents_.is_tree()) return nullptr;
return cord.contents_.cordz_info();
}
// Returns true if the provided cordz_info is in the list of sampled cords.
-inline bool CordzInfoIsListed(
- absl::Nonnull<const cord_internal::CordzInfo*> cordz_info,
- cord_internal::CordzSampleToken token = {}) {
+inline bool CordzInfoIsListed(const cord_internal::CordzInfo* cordz_info,
+ cord_internal::CordzSampleToken token = {}) {
for (const cord_internal::CordzInfo& info : token) {
if (cordz_info == &info) return true;
}
@@ -121,7 +120,7 @@ class CordzSamplingIntervalHelper {
// Wrapper struct managing a small CordRep `rep`
struct TestCordRep {
- absl::Nonnull<cord_internal::CordRepFlat*> rep;
+ cord_internal::CordRepFlat* rep;
TestCordRep() {
rep = cord_internal::CordRepFlat::New(100);
diff --git a/contrib/restricted/abseil-cpp/absl/strings/escaping.cc b/contrib/restricted/abseil-cpp/absl/strings/escaping.cc
index b70c5041ae9..e551c66312e 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/escaping.cc
+++ b/contrib/restricted/abseil-cpp/absl/strings/escaping.cc
@@ -59,7 +59,7 @@ inline unsigned int hex_digit_to_int(char c) {
}
inline bool IsSurrogate(char32_t c, absl::string_view src,
- absl::Nullable<std::string*> error) {
+ std::string* absl_nullable error) {
if (c >= 0xD800 && c <= 0xDFFF) {
if (error) {
*error = absl::StrCat("invalid surrogate character (0xD800-DFFF): \\",
@@ -76,49 +76,49 @@ inline bool IsSurrogate(char32_t c, absl::string_view src,
//
// Unescapes C escape sequences and is the reverse of CEscape().
//
-// If 'source' is valid, stores the unescaped string and its size in
-// 'dest' and 'dest_len' respectively, and returns true. Otherwise
-// returns false and optionally stores the error description in
-// 'error'. Set 'error' to nullptr to disable error reporting.
+// If `src` is valid, stores the unescaped string `dst`, and returns
+// true. Otherwise returns false and optionally stores the error
+// description in `error`. Set `error` to nullptr to disable error
+// reporting.
//
-// 'dest' should point to a buffer that is at least as big as 'source'.
-// 'source' and 'dest' may be the same.
-//
-// NOTE: any changes to this function must also be reflected in the older
-// UnescapeCEscapeSequences().
+// `src` and `dst` may use the same underlying buffer.
// ----------------------------------------------------------------------
-bool CUnescapeInternal(absl::string_view source, bool leave_nulls_escaped,
- absl::Nonnull<char*> dest,
- absl::Nonnull<ptrdiff_t*> dest_len,
- absl::Nullable<std::string*> error) {
- char* d = dest;
- const char* p = source.data();
- const char* end = p + source.size();
- const char* last_byte = end - 1;
-
- // Small optimization for case where source = dest and there's no escaping
- while (p == d && p < end && *p != '\\') p++, d++;
-
- while (p < end) {
- if (*p != '\\') {
- *d++ = *p++;
+
+bool CUnescapeInternal(absl::string_view src, bool leave_nulls_escaped,
+ std::string* absl_nonnull dst,
+ std::string* absl_nullable error) {
+ strings_internal::STLStringResizeUninitialized(dst, src.size());
+
+ absl::string_view::size_type p = 0; // Current src position.
+ std::string::size_type d = 0; // Current dst position.
+
+ // When unescaping in-place, skip any prefix that does not have escaping.
+ if (src.data() == dst->data()) {
+ while (p < src.size() && src[p] != '\\') p++, d++;
+ }
+
+ while (p < src.size()) {
+ if (src[p] != '\\') {
+ (*dst)[d++] = src[p++];
} else {
- if (++p > last_byte) { // skip past the '\\'
- if (error) *error = "String cannot end with \\";
+ if (++p >= src.size()) { // skip past the '\\'
+ if (error != nullptr) {
+ *error = "String cannot end with \\";
+ }
return false;
}
- switch (*p) {
- case 'a': *d++ = '\a'; break;
- case 'b': *d++ = '\b'; break;
- case 'f': *d++ = '\f'; break;
- case 'n': *d++ = '\n'; break;
- case 'r': *d++ = '\r'; break;
- case 't': *d++ = '\t'; break;
- case 'v': *d++ = '\v'; break;
- case '\\': *d++ = '\\'; break;
- case '?': *d++ = '\?'; break; // \? Who knew?
- case '\'': *d++ = '\''; break;
- case '"': *d++ = '\"'; break;
+ switch (src[p]) {
+ case 'a': (*dst)[d++] = '\a'; break;
+ case 'b': (*dst)[d++] = '\b'; break;
+ case 'f': (*dst)[d++] = '\f'; break;
+ case 'n': (*dst)[d++] = '\n'; break;
+ case 'r': (*dst)[d++] = '\r'; break;
+ case 't': (*dst)[d++] = '\t'; break;
+ case 'v': (*dst)[d++] = '\v'; break;
+ case '\\': (*dst)[d++] = '\\'; break;
+ case '?': (*dst)[d++] = '\?'; break;
+ case '\'': (*dst)[d++] = '\''; break;
+ case '"': (*dst)[d++] = '\"'; break;
case '0':
case '1':
case '2':
@@ -128,188 +128,170 @@ bool CUnescapeInternal(absl::string_view source, bool leave_nulls_escaped,
case '6':
case '7': {
// octal digit: 1 to 3 digits
- const char* octal_start = p;
- unsigned int ch = static_cast<unsigned int>(*p - '0'); // digit 1
- if (p < last_byte && is_octal_digit(p[1]))
- ch = ch * 8 + static_cast<unsigned int>(*++p - '0'); // digit 2
- if (p < last_byte && is_octal_digit(p[1]))
- ch = ch * 8 + static_cast<unsigned int>(*++p - '0'); // digit 3
+ auto octal_start = p;
+ unsigned int ch = static_cast<unsigned int>(src[p] - '0'); // digit 1
+ if (p + 1 < src.size() && is_octal_digit(src[p + 1]))
+ ch = ch * 8 + static_cast<unsigned int>(src[++p] - '0'); // digit 2
+ if (p + 1 < src.size() && is_octal_digit(src[p + 1]))
+ ch = ch * 8 + static_cast<unsigned int>(src[++p] - '0'); // digit 3
if (ch > 0xff) {
- if (error) {
- *error = "Value of \\" +
- std::string(octal_start,
- static_cast<size_t>(p + 1 - octal_start)) +
- " exceeds 0xff";
+ if (error != nullptr) {
+ *error =
+ "Value of \\" +
+ std::string(src.substr(octal_start, p + 1 - octal_start)) +
+ " exceeds 0xff";
}
return false;
}
if ((ch == 0) && leave_nulls_escaped) {
// Copy the escape sequence for the null character
- const size_t octal_size = static_cast<size_t>(p + 1 - octal_start);
- *d++ = '\\';
- memmove(d, octal_start, octal_size);
- d += octal_size;
+ (*dst)[d++] = '\\';
+ while (octal_start <= p) {
+ (*dst)[d++] = src[octal_start++];
+ }
break;
}
- *d++ = static_cast<char>(ch);
+ (*dst)[d++] = static_cast<char>(ch);
break;
}
case 'x':
case 'X': {
- if (p >= last_byte) {
- if (error) *error = "String cannot end with \\x";
+ if (p + 1 >= src.size()) {
+ if (error != nullptr) {
+ *error = "String cannot end with \\x";
+ }
return false;
- } else if (!absl::ascii_isxdigit(static_cast<unsigned char>(p[1]))) {
- if (error) *error = "\\x cannot be followed by a non-hex digit";
+ } else if (!absl::ascii_isxdigit(
+ static_cast<unsigned char>(src[p + 1]))) {
+ if (error != nullptr) {
+ *error = "\\x cannot be followed by a non-hex digit";
+ }
return false;
}
unsigned int ch = 0;
- const char* hex_start = p;
- while (p < last_byte &&
- absl::ascii_isxdigit(static_cast<unsigned char>(p[1])))
+ auto hex_start = p;
+ while (p + 1 < src.size() &&
+ absl::ascii_isxdigit(static_cast<unsigned char>(src[p + 1]))) {
// Arbitrarily many hex digits
- ch = (ch << 4) + hex_digit_to_int(*++p);
+ ch = (ch << 4) + hex_digit_to_int(src[++p]);
+ }
if (ch > 0xFF) {
- if (error) {
+ if (error != nullptr) {
*error = "Value of \\" +
- std::string(hex_start,
- static_cast<size_t>(p + 1 - hex_start)) +
+ std::string(src.substr(hex_start, p + 1 - hex_start)) +
" exceeds 0xff";
}
return false;
}
if ((ch == 0) && leave_nulls_escaped) {
// Copy the escape sequence for the null character
- const size_t hex_size = static_cast<size_t>(p + 1 - hex_start);
- *d++ = '\\';
- memmove(d, hex_start, hex_size);
- d += hex_size;
+ (*dst)[d++] = '\\';
+ while (hex_start <= p) {
+ (*dst)[d++] = src[hex_start++];
+ }
break;
}
- *d++ = static_cast<char>(ch);
+ (*dst)[d++] = static_cast<char>(ch);
break;
}
case 'u': {
// \uhhhh => convert 4 hex digits to UTF-8
char32_t rune = 0;
- const char* hex_start = p;
- if (p + 4 >= end) {
- if (error) {
- *error = "\\u must be followed by 4 hex digits: \\" +
- std::string(hex_start,
- static_cast<size_t>(p + 1 - hex_start));
+ auto hex_start = p;
+ if (p + 4 >= src.size()) {
+ if (error != nullptr) {
+ *error = "\\u must be followed by 4 hex digits";
}
return false;
}
for (int i = 0; i < 4; ++i) {
// Look one char ahead.
- if (absl::ascii_isxdigit(static_cast<unsigned char>(p[1]))) {
- rune = (rune << 4) + hex_digit_to_int(*++p); // Advance p.
+ if (absl::ascii_isxdigit(static_cast<unsigned char>(src[p + 1]))) {
+ rune = (rune << 4) + hex_digit_to_int(src[++p]);
} else {
- if (error) {
+ if (error != nullptr) {
*error = "\\u must be followed by 4 hex digits: \\" +
- std::string(hex_start,
- static_cast<size_t>(p + 1 - hex_start));
+ std::string(src.substr(hex_start, p + 1 - hex_start));
}
return false;
}
}
if ((rune == 0) && leave_nulls_escaped) {
// Copy the escape sequence for the null character
- *d++ = '\\';
- memmove(d, hex_start, 5); // u0000
- d += 5;
+ (*dst)[d++] = '\\';
+ while (hex_start <= p) {
+ (*dst)[d++] = src[hex_start++];
+ }
break;
}
- if (IsSurrogate(rune, absl::string_view(hex_start, 5), error)) {
+ if (IsSurrogate(rune, src.substr(hex_start, 5), error)) {
return false;
}
- d += strings_internal::EncodeUTF8Char(d, rune);
+ d += strings_internal::EncodeUTF8Char(dst->data() + d, rune);
break;
}
case 'U': {
// \Uhhhhhhhh => convert 8 hex digits to UTF-8
char32_t rune = 0;
- const char* hex_start = p;
- if (p + 8 >= end) {
- if (error) {
- *error = "\\U must be followed by 8 hex digits: \\" +
- std::string(hex_start,
- static_cast<size_t>(p + 1 - hex_start));
+ auto hex_start = p;
+ if (p + 8 >= src.size()) {
+ if (error != nullptr) {
+ *error = "\\U must be followed by 8 hex digits";
}
return false;
}
for (int i = 0; i < 8; ++i) {
// Look one char ahead.
- if (absl::ascii_isxdigit(static_cast<unsigned char>(p[1]))) {
+ if (absl::ascii_isxdigit(static_cast<unsigned char>(src[p + 1]))) {
// Don't change rune until we're sure this
// is within the Unicode limit, but do advance p.
- uint32_t newrune = (rune << 4) + hex_digit_to_int(*++p);
+ uint32_t newrune = (rune << 4) + hex_digit_to_int(src[++p]);
if (newrune > 0x10FFFF) {
- if (error) {
- *error = "Value of \\" +
- std::string(hex_start,
- static_cast<size_t>(p + 1 - hex_start)) +
- " exceeds Unicode limit (0x10FFFF)";
+ if (error != nullptr) {
+ *error =
+ "Value of \\" +
+ std::string(src.substr(hex_start, p + 1 - hex_start)) +
+ " exceeds Unicode limit (0x10FFFF)";
}
return false;
} else {
rune = newrune;
}
} else {
- if (error) {
+ if (error != nullptr) {
*error = "\\U must be followed by 8 hex digits: \\" +
- std::string(hex_start,
- static_cast<size_t>(p + 1 - hex_start));
+ std::string(src.substr(hex_start, p + 1 - hex_start));
}
return false;
}
}
if ((rune == 0) && leave_nulls_escaped) {
// Copy the escape sequence for the null character
- *d++ = '\\';
- memmove(d, hex_start, 9); // U00000000
- d += 9;
+ (*dst)[d++] = '\\';
+ // U00000000
+ while (hex_start <= p) {
+ (*dst)[d++] = src[hex_start++];
+ }
break;
}
- if (IsSurrogate(rune, absl::string_view(hex_start, 9), error)) {
+ if (IsSurrogate(rune, src.substr(hex_start, 9), error)) {
return false;
}
- d += strings_internal::EncodeUTF8Char(d, rune);
+ d += strings_internal::EncodeUTF8Char(dst->data() + d, rune);
break;
}
default: {
- if (error) *error = std::string("Unknown escape sequence: \\") + *p;
+ if (error != nullptr) {
+ *error = std::string("Unknown escape sequence: \\") + src[p];
+ }
return false;
}
}
- p++; // read past letter we escaped
+ p++; // Read past letter we escaped.
}
}
- *dest_len = d - dest;
- return true;
-}
-// ----------------------------------------------------------------------
-// CUnescapeInternal()
-//
-// Same as above but uses a std::string for output. 'source' and 'dest'
-// may be the same.
-// ----------------------------------------------------------------------
-bool CUnescapeInternal(absl::string_view source, bool leave_nulls_escaped,
- absl::Nonnull<std::string*> dest,
- absl::Nullable<std::string*> error) {
- strings_internal::STLStringResizeUninitialized(dest, source.size());
-
- ptrdiff_t dest_size;
- if (!CUnescapeInternal(source,
- leave_nulls_escaped,
- &(*dest)[0],
- &dest_size,
- error)) {
- return false;
- }
- dest->erase(static_cast<size_t>(dest_size));
+ dst->erase(d);
return true;
}
@@ -450,7 +432,7 @@ inline size_t CEscapedLength(absl::string_view src) {
}
void CEscapeAndAppendInternal(absl::string_view src,
- absl::Nonnull<std::string*> dest) {
+ std::string* absl_nonnull dest) {
size_t escaped_len = CEscapedLength(src);
if (escaped_len == src.size()) {
dest->append(src.data(), src.size());
@@ -479,10 +461,10 @@ void CEscapeAndAppendInternal(absl::string_view src,
// Reverses the mapping in Base64EscapeInternal; see that method's
// documentation for details of the mapping.
-bool Base64UnescapeInternal(absl::Nullable<const char*> src_param, size_t szsrc,
- absl::Nullable<char*> dest, size_t szdest,
+bool Base64UnescapeInternal(const char* absl_nullable src_param, size_t szsrc,
+ char* absl_nullable dest, size_t szdest,
const std::array<signed char, 256>& unbase64,
- absl::Nonnull<size_t*> len) {
+ size_t* absl_nonnull len) {
static const char kPad64Equals = '=';
static const char kPad64Dot = '.';
@@ -818,8 +800,8 @@ constexpr std::array<signed char, 256> kUnWebSafeBase64 = {
/* clang-format on */
template <typename String>
-bool Base64UnescapeInternal(absl::Nullable<const char*> src, size_t slen,
- absl::Nonnull<String*> dest,
+bool Base64UnescapeInternal(const char* absl_nullable src, size_t slen,
+ String* absl_nonnull dest,
const std::array<signed char, 256>& unbase64) {
// Determine the size of the output string. Base64 encodes every 3 bytes into
// 4 characters. Any leftover chars are added directly for good measure.
@@ -888,7 +870,7 @@ constexpr std::array<signed char, 256> kHexValueStrict = {
// or a string. This works because we use the [] operator to access
// individual characters at a time.
template <typename T>
-void HexStringToBytesInternal(absl::Nullable<const char*> from, T to,
+void HexStringToBytesInternal(const char* absl_nullable from, T to,
size_t num) {
for (size_t i = 0; i < num; i++) {
to[i] = static_cast<char>(kHexValueLenient[from[i * 2] & 0xFF] << 4) +
@@ -899,7 +881,7 @@ void HexStringToBytesInternal(absl::Nullable<const char*> from, T to,
// This is a templated function so that T can be either a char* or a
// std::string.
template <typename T>
-void BytesToHexStringInternal(absl::Nullable<const unsigned char*> src, T dest,
+void BytesToHexStringInternal(const unsigned char* absl_nullable src, T dest,
size_t num) {
auto dest_ptr = &dest[0];
for (auto src_ptr = src; src_ptr != (src + num); ++src_ptr, dest_ptr += 2) {
@@ -915,8 +897,8 @@ void BytesToHexStringInternal(absl::Nullable<const unsigned char*> src, T dest,
//
// See CUnescapeInternal() for implementation details.
// ----------------------------------------------------------------------
-bool CUnescape(absl::string_view source, absl::Nonnull<std::string*> dest,
- absl::Nullable<std::string*> error) {
+bool CUnescape(absl::string_view source, std::string* absl_nonnull dest,
+ std::string* absl_nullable error) {
return CUnescapeInternal(source, kUnescapeNulls, dest, error);
}
@@ -938,23 +920,23 @@ std::string Utf8SafeCHexEscape(absl::string_view src) {
return CEscapeInternal(src, true, true);
}
-bool Base64Unescape(absl::string_view src, absl::Nonnull<std::string*> dest) {
+bool Base64Unescape(absl::string_view src, std::string* absl_nonnull dest) {
return Base64UnescapeInternal(src.data(), src.size(), dest, kUnBase64);
}
bool WebSafeBase64Unescape(absl::string_view src,
- absl::Nonnull<std::string*> dest) {
+ std::string* absl_nonnull dest) {
return Base64UnescapeInternal(src.data(), src.size(), dest, kUnWebSafeBase64);
}
-void Base64Escape(absl::string_view src, absl::Nonnull<std::string*> dest) {
+void Base64Escape(absl::string_view src, std::string* absl_nonnull dest) {
strings_internal::Base64EscapeInternal(
reinterpret_cast<const unsigned char*>(src.data()), src.size(), dest,
true, strings_internal::kBase64Chars);
}
void WebSafeBase64Escape(absl::string_view src,
- absl::Nonnull<std::string*> dest) {
+ std::string* absl_nonnull dest) {
strings_internal::Base64EscapeInternal(
reinterpret_cast<const unsigned char*>(src.data()), src.size(), dest,
false, strings_internal::kWebSafeBase64Chars);
@@ -976,8 +958,7 @@ std::string WebSafeBase64Escape(absl::string_view src) {
return dest;
}
-bool HexStringToBytes(absl::string_view hex,
- absl::Nonnull<std::string*> bytes) {
+bool HexStringToBytes(absl::string_view hex, std::string* absl_nonnull bytes) {
std::string output;
size_t num_bytes = hex.size() / 2;
diff --git a/contrib/restricted/abseil-cpp/absl/strings/escaping.h b/contrib/restricted/abseil-cpp/absl/strings/escaping.h
index 3f34fbfc1c5..3aaf39ca83f 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/escaping.h
+++ b/contrib/restricted/abseil-cpp/absl/strings/escaping.h
@@ -71,12 +71,12 @@ ABSL_NAMESPACE_BEGIN
// ...
// }
// EXPECT_EQ(unescaped_s, "foo\rbar\nbaz\t");
-bool CUnescape(absl::string_view source, absl::Nonnull<std::string*> dest,
- absl::Nullable<std::string*> error);
+bool CUnescape(absl::string_view source, std::string* absl_nonnull dest,
+ std::string* absl_nullable error);
// Overload of `CUnescape()` with no error reporting.
inline bool CUnescape(absl::string_view source,
- absl::Nonnull<std::string*> dest) {
+ std::string* absl_nonnull dest) {
return CUnescape(source, dest, nullptr);
}
@@ -126,7 +126,7 @@ std::string Utf8SafeCHexEscape(absl::string_view src);
// Encodes a `src` string into a base64-encoded 'dest' string with padding
// characters. This function conforms with RFC 4648 section 4 (base64) and RFC
// 2045.
-void Base64Escape(absl::string_view src, absl::Nonnull<std::string*> dest);
+void Base64Escape(absl::string_view src, std::string* absl_nonnull dest);
std::string Base64Escape(absl::string_view src);
// WebSafeBase64Escape()
@@ -134,8 +134,7 @@ std::string Base64Escape(absl::string_view src);
// Encodes a `src` string into a base64 string, like Base64Escape() does, but
// outputs '-' instead of '+' and '_' instead of '/', and does not pad 'dest'.
// This function conforms with RFC 4648 section 5 (base64url).
-void WebSafeBase64Escape(absl::string_view src,
- absl::Nonnull<std::string*> dest);
+void WebSafeBase64Escape(absl::string_view src, std::string* absl_nonnull dest);
std::string WebSafeBase64Escape(absl::string_view src);
// Base64Unescape()
@@ -145,7 +144,7 @@ std::string WebSafeBase64Escape(absl::string_view src);
// `src` contains invalid characters, `dest` is cleared and returns `false`.
// If padding is included (note that `Base64Escape()` does produce it), it must
// be correct. In the padding, '=' and '.' are treated identically.
-bool Base64Unescape(absl::string_view src, absl::Nonnull<std::string*> dest);
+bool Base64Unescape(absl::string_view src, std::string* absl_nonnull dest);
// WebSafeBase64Unescape()
//
@@ -155,7 +154,7 @@ bool Base64Unescape(absl::string_view src, absl::Nonnull<std::string*> dest);
// included (note that `WebSafeBase64Escape()` does not produce it), it must be
// correct. In the padding, '=' and '.' are treated identically.
bool WebSafeBase64Unescape(absl::string_view src,
- absl::Nonnull<std::string*> dest);
+ std::string* absl_nonnull dest);
// HexStringToBytes()
//
@@ -163,8 +162,8 @@ bool WebSafeBase64Unescape(absl::string_view src,
// output string. If `hex` does not consist of valid hexadecimal data, this
// function returns false and leaves `bytes` in an unspecified state. Returns
// true on success.
-ABSL_MUST_USE_RESULT bool HexStringToBytes(absl::string_view hex,
- absl::Nonnull<std::string*> bytes);
+[[nodiscard]] bool HexStringToBytes(absl::string_view hex,
+ std::string* absl_nonnull bytes);
// HexStringToBytes()
//
diff --git a/contrib/restricted/abseil-cpp/absl/strings/internal/charconv_bigint.cc b/contrib/restricted/abseil-cpp/absl/strings/internal/charconv_bigint.cc
index 46b5289a041..9185f1f9f17 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/internal/charconv_bigint.cc
+++ b/contrib/restricted/abseil-cpp/absl/strings/internal/charconv_bigint.cc
@@ -279,7 +279,7 @@ int BigUnsigned<max_words>::ReadDigits(const char* begin, const char* end,
// Either way, [begin, decimal_point) will contain the set of dropped digits
// that require an exponent adjustment.
const char* decimal_point = std::find(begin, end, '.');
- exponent_adjust += (decimal_point - begin);
+ exponent_adjust += static_cast<int>(decimal_point - begin);
}
return exponent_adjust;
}
diff --git a/contrib/restricted/abseil-cpp/absl/strings/internal/cord_internal.h b/contrib/restricted/abseil-cpp/absl/strings/internal/cord_internal.h
index b68ec2bbc5f..cf1f703bfc6 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/internal/cord_internal.h
+++ b/contrib/restricted/abseil-cpp/absl/strings/internal/cord_internal.h
@@ -25,7 +25,6 @@
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/base/internal/endian.h"
-#include "absl/base/internal/invoke.h"
#include "absl/base/macros.h"
#include "absl/base/nullability.h"
#include "absl/base/optimization.h"
@@ -358,16 +357,15 @@ struct CordRepExternal : public CordRep {
struct Rank0 {};
struct Rank1 : Rank0 {};
-template <typename Releaser, typename = ::absl::base_internal::invoke_result_t<
- Releaser, absl::string_view>>
+template <typename Releaser,
+ typename = ::std::invoke_result_t<Releaser, absl::string_view>>
void InvokeReleaser(Rank1, Releaser&& releaser, absl::string_view data) {
- ::absl::base_internal::invoke(std::forward<Releaser>(releaser), data);
+ ::std::invoke(std::forward<Releaser>(releaser), data);
}
-template <typename Releaser,
- typename = ::absl::base_internal::invoke_result_t<Releaser>>
+template <typename Releaser, typename = ::std::invoke_result_t<Releaser>>
void InvokeReleaser(Rank0, Releaser&& releaser, absl::string_view) {
- ::absl::base_internal::invoke(std::forward<Releaser>(releaser));
+ ::std::invoke(std::forward<Releaser>(releaser));
}
// We use CompressedTuple so that we can benefit from EBCO.
@@ -637,7 +635,7 @@ class InlineData {
poison();
}
- void CopyInlineToString(absl::Nonnull<std::string*> dst) const {
+ void CopyInlineToString(std::string* dst) const {
assert(!is_tree());
// As Cord can store only 15 bytes it is smaller than std::string's
// small string optimization buffer size. Therefore we will always trigger
diff --git a/contrib/restricted/abseil-cpp/absl/strings/internal/cord_rep_btree.cc b/contrib/restricted/abseil-cpp/absl/strings/internal/cord_rep_btree.cc
index 05bd0e20675..33ea820d0a2 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/internal/cord_rep_btree.cc
+++ b/contrib/restricted/abseil-cpp/absl/strings/internal/cord_rep_btree.cc
@@ -36,10 +36,6 @@ namespace absl {
ABSL_NAMESPACE_BEGIN
namespace cord_internal {
-#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
-constexpr size_t CordRepBtree::kMaxCapacity;
-#endif
-
namespace {
using NodeStack = CordRepBtree * [CordRepBtree::kMaxDepth];
diff --git a/contrib/restricted/abseil-cpp/absl/strings/internal/cordz_info.cc b/contrib/restricted/abseil-cpp/absl/strings/internal/cordz_info.cc
index b7c7fed9fa2..4baaecdcf69 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/internal/cordz_info.cc
+++ b/contrib/restricted/abseil-cpp/absl/strings/internal/cordz_info.cc
@@ -34,10 +34,6 @@ namespace absl {
ABSL_NAMESPACE_BEGIN
namespace cord_internal {
-#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
-constexpr size_t CordzInfo::kMaxStackDepth;
-#endif
-
ABSL_CONST_INIT CordzInfo::List CordzInfo::global_list_{absl::kConstInit};
namespace {
diff --git a/contrib/restricted/abseil-cpp/absl/strings/internal/str_format/arg.cc b/contrib/restricted/abseil-cpp/absl/strings/internal/str_format/arg.cc
index eeb2108154f..01e4e42d9d0 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/internal/str_format/arg.cc
+++ b/contrib/restricted/abseil-cpp/absl/strings/internal/str_format/arg.cc
@@ -26,6 +26,7 @@
#include <cstring>
#include <cwchar>
#include <string>
+#include <string_view>
#include <type_traits>
#include "absl/base/config.h"
@@ -34,13 +35,10 @@
#include "absl/numeric/int128.h"
#include "absl/strings/internal/str_format/extension.h"
#include "absl/strings/internal/str_format/float_conversion.h"
+#include "absl/strings/internal/utf8.h"
#include "absl/strings/numbers.h"
#include "absl/strings/string_view.h"
-#if defined(ABSL_HAVE_STD_STRING_VIEW)
-#include <string_view>
-#endif
-
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace str_format_internal {
@@ -311,68 +309,16 @@ inline bool ConvertStringArg(string_view v, const FormatConversionSpecImpl conv,
conv.has_left_flag());
}
-struct ShiftState {
- bool saw_high_surrogate = false;
- uint8_t bits = 0;
-};
-
-// Converts `v` from UTF-16 or UTF-32 to UTF-8 and writes to `buf`. `buf` is
-// assumed to have enough space for the output. `s` is used to carry state
-// between successive calls with a UTF-16 surrogate pair. Returns the number of
-// chars written, or `static_cast<size_t>(-1)` on failure.
-//
-// This is basically std::wcrtomb(), but always outputting UTF-8 instead of
-// respecting the current locale.
-inline size_t WideToUtf8(wchar_t wc, char *buf, ShiftState &s) {
- const auto v = static_cast<uint32_t>(wc);
- if (v < 0x80) {
- *buf = static_cast<char>(v);
- return 1;
- } else if (v < 0x800) {
- *buf++ = static_cast<char>(0xc0 | (v >> 6));
- *buf = static_cast<char>(0x80 | (v & 0x3f));
- return 2;
- } else if (v < 0xd800 || (v - 0xe000) < 0x2000) {
- *buf++ = static_cast<char>(0xe0 | (v >> 12));
- *buf++ = static_cast<char>(0x80 | ((v >> 6) & 0x3f));
- *buf = static_cast<char>(0x80 | (v & 0x3f));
- return 3;
- } else if ((v - 0x10000) < 0x100000) {
- *buf++ = static_cast<char>(0xf0 | (v >> 18));
- *buf++ = static_cast<char>(0x80 | ((v >> 12) & 0x3f));
- *buf++ = static_cast<char>(0x80 | ((v >> 6) & 0x3f));
- *buf = static_cast<char>(0x80 | (v & 0x3f));
- return 4;
- } else if (v < 0xdc00) {
- s.saw_high_surrogate = true;
- s.bits = static_cast<uint8_t>(v & 0x3);
- const uint8_t high_bits = ((v >> 6) & 0xf) + 1;
- *buf++ = static_cast<char>(0xf0 | (high_bits >> 2));
- *buf =
- static_cast<char>(0x80 | static_cast<uint8_t>((high_bits & 0x3) << 4) |
- static_cast<uint8_t>((v >> 2) & 0xf));
- return 2;
- } else if (v < 0xe000 && s.saw_high_surrogate) {
- *buf++ = static_cast<char>(0x80 | static_cast<uint8_t>(s.bits << 4) |
- static_cast<uint8_t>((v >> 6) & 0xf));
- *buf = static_cast<char>(0x80 | (v & 0x3f));
- s.saw_high_surrogate = false;
- s.bits = 0;
- return 2;
- } else {
- return static_cast<size_t>(-1);
- }
-}
-
inline bool ConvertStringArg(const wchar_t *v,
size_t len,
const FormatConversionSpecImpl conv,
FormatSinkImpl *sink) {
FixedArray<char> mb(len * 4);
- ShiftState s;
+ strings_internal::ShiftState s;
size_t chars_written = 0;
for (size_t i = 0; i < len; ++i) {
- const size_t chars = WideToUtf8(v[i], &mb[chars_written], s);
+ const size_t chars =
+ strings_internal::WideToUtf8(v[i], &mb[chars_written], s);
if (chars == static_cast<size_t>(-1)) { return false; }
chars_written += chars;
}
@@ -382,8 +328,8 @@ inline bool ConvertStringArg(const wchar_t *v,
bool ConvertWCharTImpl(wchar_t v, const FormatConversionSpecImpl conv,
FormatSinkImpl *sink) {
char mb[4];
- ShiftState s;
- const size_t chars_written = WideToUtf8(v, mb, s);
+ strings_internal::ShiftState s;
+ const size_t chars_written = strings_internal::WideToUtf8(v, mb, s);
return chars_written != static_cast<size_t>(-1) && !s.saw_high_surrogate &&
ConvertStringArg(string_view(mb, chars_written), conv, sink);
}
@@ -510,13 +456,11 @@ StringConvertResult FormatConvertImpl(string_view v,
return {ConvertStringArg(v, conv, sink)};
}
-#if defined(ABSL_HAVE_STD_STRING_VIEW)
StringConvertResult FormatConvertImpl(std::wstring_view v,
const FormatConversionSpecImpl conv,
FormatSinkImpl* sink) {
return {ConvertStringArg(v.data(), v.size(), conv, sink)};
}
-#endif
StringPtrConvertResult FormatConvertImpl(const char* v,
const FormatConversionSpecImpl conv,
diff --git a/contrib/restricted/abseil-cpp/absl/strings/internal/str_format/arg.h b/contrib/restricted/abseil-cpp/absl/strings/internal/str_format/arg.h
index 309161d5915..021013fb1ab 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/internal/str_format/arg.h
+++ b/contrib/restricted/abseil-cpp/absl/strings/internal/str_format/arg.h
@@ -26,6 +26,7 @@
#include <memory>
#include <sstream>
#include <string>
+#include <string_view>
#include <type_traits>
#include <utility>
@@ -37,10 +38,6 @@
#include "absl/strings/internal/str_format/extension.h"
#include "absl/strings/string_view.h"
-#if defined(ABSL_HAVE_STD_STRING_VIEW)
-#include <string_view>
-#endif
-
namespace absl {
ABSL_NAMESPACE_BEGIN
@@ -228,7 +225,6 @@ StringConvertResult FormatConvertImpl(const std::wstring& v,
StringConvertResult FormatConvertImpl(string_view v,
FormatConversionSpecImpl conv,
FormatSinkImpl* sink);
-#if defined(ABSL_HAVE_STD_STRING_VIEW)
StringConvertResult FormatConvertImpl(std::wstring_view v,
FormatConversionSpecImpl conv,
FormatSinkImpl* sink);
@@ -239,7 +235,6 @@ inline StringConvertResult FormatConvertImpl(std::string_view v,
return FormatConvertImpl(absl::string_view(v.data(), v.size()), conv, sink);
}
#endif // !ABSL_USES_STD_STRING_VIEW
-#endif // ABSL_HAVE_STD_STRING_VIEW
using StringPtrConvertResult = ArgConvertResult<FormatConversionCharSetUnion(
FormatConversionCharSetInternal::s,
@@ -651,15 +646,10 @@ class FormatArgImpl {
ABSL_INTERNAL_FORMAT_DISPATCH_INSTANTIATE_(const wchar_t*, __VA_ARGS__); \
ABSL_INTERNAL_FORMAT_DISPATCH_INSTANTIATE_(std::wstring, __VA_ARGS__)
-#if defined(ABSL_HAVE_STD_STRING_VIEW)
#define ABSL_INTERNAL_FORMAT_DISPATCH_OVERLOADS_EXPAND_(...) \
ABSL_INTERNAL_FORMAT_DISPATCH_OVERLOADS_EXPAND_NO_WSTRING_VIEW_( \
__VA_ARGS__); \
ABSL_INTERNAL_FORMAT_DISPATCH_INSTANTIATE_(std::wstring_view, __VA_ARGS__)
-#else
-#define ABSL_INTERNAL_FORMAT_DISPATCH_OVERLOADS_EXPAND_(...) \
- ABSL_INTERNAL_FORMAT_DISPATCH_OVERLOADS_EXPAND_NO_WSTRING_VIEW_(__VA_ARGS__)
-#endif
ABSL_INTERNAL_FORMAT_DISPATCH_OVERLOADS_EXPAND_(extern);
diff --git a/contrib/restricted/abseil-cpp/absl/strings/internal/str_format/extension.cc b/contrib/restricted/abseil-cpp/absl/strings/internal/str_format/extension.cc
index 2a0ceb13d7b..2d441c21c56 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/internal/str_format/extension.cc
+++ b/contrib/restricted/abseil-cpp/absl/strings/internal/str_format/extension.cc
@@ -33,28 +33,6 @@ std::string FlagsToString(Flags v) {
return s;
}
-#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
-
-#define ABSL_INTERNAL_X_VAL(id) \
- constexpr absl::FormatConversionChar FormatConversionCharInternal::id;
-ABSL_INTERNAL_CONVERSION_CHARS_EXPAND_(ABSL_INTERNAL_X_VAL, )
-#undef ABSL_INTERNAL_X_VAL
-// NOLINTNEXTLINE(readability-redundant-declaration)
-constexpr absl::FormatConversionChar FormatConversionCharInternal::kNone;
-
-#define ABSL_INTERNAL_CHAR_SET_CASE(c) \
- constexpr FormatConversionCharSet FormatConversionCharSetInternal::c;
-ABSL_INTERNAL_CONVERSION_CHARS_EXPAND_(ABSL_INTERNAL_CHAR_SET_CASE, )
-#undef ABSL_INTERNAL_CHAR_SET_CASE
-
-constexpr FormatConversionCharSet FormatConversionCharSetInternal::kStar;
-constexpr FormatConversionCharSet FormatConversionCharSetInternal::kIntegral;
-constexpr FormatConversionCharSet FormatConversionCharSetInternal::kFloating;
-constexpr FormatConversionCharSet FormatConversionCharSetInternal::kNumeric;
-constexpr FormatConversionCharSet FormatConversionCharSetInternal::kPointer;
-
-#endif // ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
-
bool FormatSinkImpl::PutPaddedString(string_view value, int width,
int precision, bool left) {
size_t space_remaining = 0;
diff --git a/contrib/restricted/abseil-cpp/absl/strings/internal/str_format/output.cc b/contrib/restricted/abseil-cpp/absl/strings/internal/str_format/output.cc
index c4b24706132..068091c1a8b 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/internal/str_format/output.cc
+++ b/contrib/restricted/abseil-cpp/absl/strings/internal/str_format/output.cc
@@ -33,9 +33,11 @@ struct ClearErrnoGuard {
void BufferRawSink::Write(string_view v) {
size_t to_write = std::min(v.size(), size_);
- std::memcpy(buffer_, v.data(), to_write);
- buffer_ += to_write;
- size_ -= to_write;
+ if (to_write > 0) {
+ std::memcpy(buffer_, v.data(), to_write);
+ buffer_ += to_write;
+ size_ -= to_write;
+ }
total_written_ += v.size();
}
diff --git a/contrib/restricted/abseil-cpp/absl/strings/internal/str_format/parser.h b/contrib/restricted/abseil-cpp/absl/strings/internal/str_format/parser.h
index b1d6d5fd0e8..329b060a891 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/internal/str_format/parser.h
+++ b/contrib/restricted/abseil-cpp/absl/strings/internal/str_format/parser.h
@@ -132,8 +132,10 @@ class ParsedFormatBase {
has_error_ = other.has_error_;
items_ = other.items_;
size_t text_size = items_.empty() ? 0 : items_.back().text_end;
- data_.reset(new char[text_size]);
- memcpy(data_.get(), other.data_.get(), text_size);
+ data_ = std::make_unique<char[]>(text_size);
+ if (text_size > 0) {
+ memcpy(data_.get(), other.data_.get(), text_size);
+ }
return *this;
}
diff --git a/contrib/restricted/abseil-cpp/absl/strings/internal/str_join_internal.h b/contrib/restricted/abseil-cpp/absl/strings/internal/str_join_internal.h
index 3e730c7aabe..31fcf6dc226 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/internal/str_join_internal.h
+++ b/contrib/restricted/abseil-cpp/absl/strings/internal/str_join_internal.h
@@ -43,6 +43,7 @@
#include <utility>
#include "absl/base/config.h"
+#include "absl/base/internal/iterator_traits.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/strings/internal/ostringstream.h"
#include "absl/strings/internal/resize_uninitialized.h"
@@ -228,9 +229,8 @@ std::string JoinAlgorithm(Iterator start, Iterator end, absl::string_view s,
// range will be traversed twice: once to calculate the total needed size, and
// then again to copy the elements and delimiters to the output string.
template <typename Iterator,
- typename = typename std::enable_if<std::is_convertible<
- typename std::iterator_traits<Iterator>::iterator_category,
- std::forward_iterator_tag>::value>::type>
+ typename = std::enable_if_t<
+ base_internal::IsAtLeastForwardIterator<Iterator>::value>>
std::string JoinAlgorithm(Iterator start, Iterator end, absl::string_view s,
NoFormatter) {
std::string result;
diff --git a/contrib/restricted/abseil-cpp/absl/strings/internal/string_constant.h b/contrib/restricted/abseil-cpp/absl/strings/internal/string_constant.h
index f68b17d75e8..d52c3308462 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/internal/string_constant.h
+++ b/contrib/restricted/abseil-cpp/absl/strings/internal/string_constant.h
@@ -50,11 +50,6 @@ struct StringConstant {
"The input string_view must point to constant data.");
};
-#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
-template <typename T>
-constexpr absl::string_view StringConstant<T>::value;
-#endif
-
// Factory function for `StringConstant` instances.
// It supports callables that have a constexpr default constructor and a
// constexpr operator().
diff --git a/contrib/restricted/abseil-cpp/absl/strings/internal/utf8.cc b/contrib/restricted/abseil-cpp/absl/strings/internal/utf8.cc
index 7ecb93dfbe7..61945f5869b 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/internal/utf8.cc
+++ b/contrib/restricted/abseil-cpp/absl/strings/internal/utf8.cc
@@ -16,11 +16,17 @@
#include "absl/strings/internal/utf8.h"
+#include <cstddef>
+#include <cstdint>
+#include <limits>
+
+#include "absl/base/config.h"
+
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace strings_internal {
-size_t EncodeUTF8Char(char *buffer, char32_t utf8_char) {
+size_t EncodeUTF8Char(char* buffer, char32_t utf8_char) {
if (utf8_char <= 0x7F) {
*buffer = static_cast<char>(utf8_char);
return 1;
@@ -48,6 +54,95 @@ size_t EncodeUTF8Char(char *buffer, char32_t utf8_char) {
}
}
+size_t WideToUtf8(wchar_t wc, char* buf, ShiftState& s) {
+ // Reinterpret the output buffer `buf` as `unsigned char*` for subsequent
+ // bitwise operations. This ensures well-defined behavior for bit
+ // manipulations (avoiding issues with signed `char`) and is safe under C++
+ // aliasing rules, as `unsigned char` can alias any type.
+ auto* ubuf = reinterpret_cast<unsigned char*>(buf);
+ const uint32_t v = static_cast<uint32_t>(wc);
+ constexpr size_t kError = static_cast<size_t>(-1);
+
+ if (v <= 0x007F) {
+ // 1-byte sequence (U+0000 to U+007F).
+ // 0xxxxxxx.
+ ubuf[0] = (0b0111'1111 & v);
+ s = {}; // Reset surrogate state.
+ return 1;
+ } else if (0x0080 <= v && v <= 0x07FF) {
+ // 2-byte sequence (U+0080 to U+07FF).
+ // 110xxxxx 10xxxxxx.
+ ubuf[0] = 0b1100'0000 | (0b0001'1111 & (v >> 6));
+ ubuf[1] = 0b1000'0000 | (0b0011'1111 & v);
+ s = {}; // Reset surrogate state.
+ return 2;
+ } else if ((0x0800 <= v && v <= 0xD7FF) || (0xE000 <= v && v <= 0xFFFF)) {
+ // 3-byte sequence (U+0800 to U+D7FF or U+E000 to U+FFFF).
+ // Excludes surrogate code points U+D800-U+DFFF.
+ // 1110xxxx 10xxxxxx 10xxxxxx.
+ ubuf[0] = 0b1110'0000 | (0b0000'1111 & (v >> 12));
+ ubuf[1] = 0b1000'0000 | (0b0011'1111 & (v >> 6));
+ ubuf[2] = 0b1000'0000 | (0b0011'1111 & v);
+ s = {}; // Reset surrogate state.
+ return 3;
+ } else if (0xD800 <= v && v <= 0xDBFF) {
+ // High Surrogate (U+D800 to U+DBFF).
+ // This part forms the first two bytes of an eventual 4-byte UTF-8 sequence.
+ const unsigned char high_bits_val = (0b0000'1111 & (v >> 6)) + 1;
+
+ // First byte of the 4-byte UTF-8 sequence (11110xxx).
+ ubuf[0] = 0b1111'0000 | (0b0000'0111 & (high_bits_val >> 2));
+ // Second byte of the 4-byte UTF-8 sequence (10xxxxxx).
+ ubuf[1] = 0b1000'0000 | //
+ (0b0011'0000 & (high_bits_val << 4)) | //
+ (0b0000'1111 & (v >> 2));
+ // Set state for high surrogate after writing to buffer.
+ s = {true, static_cast<unsigned char>(0b0000'0011 & v)};
+ return 2; // Wrote 2 bytes, expecting 2 more from a low surrogate.
+ } else if (0xDC00 <= v && v <= 0xDFFF) {
+ // Low Surrogate (U+DC00 to U+DFFF).
+ // This part forms the last two bytes of a 4-byte UTF-8 sequence,
+ // using state from a preceding high surrogate.
+ if (!s.saw_high_surrogate) {
+ // Error: Isolated low surrogate without a preceding high surrogate.
+ // s remains in its current (problematic) state.
+ // Caller should handle error.
+ return kError;
+ }
+
+ // Third byte of the 4-byte UTF-8 sequence (10xxxxxx).
+ ubuf[0] = 0b1000'0000 | //
+ (0b0011'0000 & (s.bits << 4)) | //
+ (0b0000'1111 & (v >> 6));
+ // Fourth byte of the 4-byte UTF-8 sequence (10xxxxxx).
+ ubuf[1] = 0b1000'0000 | (0b0011'1111 & v);
+
+ s = {}; // Reset surrogate state, pair complete.
+ return 2; // Wrote 2 more bytes, completing the 4-byte sequence.
+ } else if constexpr (0xFFFF < std::numeric_limits<wchar_t>::max()) {
+ // Conditionally compile the 4-byte direct conversion branch.
+ // This block is compiled only if wchar_t can represent values > 0xFFFF.
+ // It's placed after surrogate checks to ensure surrogates are handled by
+ // their specific logic. This inner 'if' is the runtime check for the 4-byte
+ // range. At this point, v is known not to be in the 1, 2, or 3-byte BMP
+ // ranges, nor is it a surrogate code point.
+ if (0x10000 <= v && v <= 0x10FFFF) {
+ // 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx.
+ ubuf[0] = 0b1111'0000 | (0b0000'0111 & (v >> 18));
+ ubuf[1] = 0b1000'0000 | (0b0011'1111 & (v >> 12));
+ ubuf[2] = 0b1000'0000 | (0b0011'1111 & (v >> 6));
+ ubuf[3] = 0b1000'0000 | (0b0011'1111 & v);
+ s = {}; // Reset surrogate state.
+ return 4;
+ }
+ }
+
+ // Invalid wchar_t value (e.g., out of Unicode range, or unhandled after all
+ // checks).
+ s = {}; // Reset surrogate state.
+ return kError;
+}
+
} // namespace strings_internal
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/contrib/restricted/abseil-cpp/absl/strings/internal/utf8.h b/contrib/restricted/abseil-cpp/absl/strings/internal/utf8.h
index 32fb1093bea..ed1db110a7b 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/internal/utf8.h
+++ b/contrib/restricted/abseil-cpp/absl/strings/internal/utf8.h
@@ -41,7 +41,21 @@ namespace strings_internal {
// characters into buffer, however never will more than kMaxEncodedUTF8Size
// bytes be written, regardless of the value of utf8_char.
enum { kMaxEncodedUTF8Size = 4 };
-size_t EncodeUTF8Char(char *buffer, char32_t utf8_char);
+size_t EncodeUTF8Char(char* buffer, char32_t utf8_char);
+
+struct ShiftState {
+ bool saw_high_surrogate = false;
+ unsigned char bits = 0;
+};
+
+// Converts `wc` from UTF-16 or UTF-32 to UTF-8 and writes to `buf`. `buf` is
+// assumed to have enough space for the output. `s` is used to carry state
+// between successive calls with a UTF-16 surrogate pair. Returns the number of
+// chars written, or `static_cast<size_t>(-1)` on failure.
+//
+// This is basically std::wcrtomb(), but always outputting UTF-8 instead of
+// respecting the current locale.
+size_t WideToUtf8(wchar_t wc, char* buf, ShiftState& s);
} // namespace strings_internal
ABSL_NAMESPACE_END
diff --git a/contrib/restricted/abseil-cpp/absl/strings/numbers.cc b/contrib/restricted/abseil-cpp/absl/strings/numbers.cc
index 83ea80b474b..a83fd2ca842 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/numbers.cc
+++ b/contrib/restricted/abseil-cpp/absl/strings/numbers.cc
@@ -47,7 +47,7 @@
namespace absl {
ABSL_NAMESPACE_BEGIN
-bool SimpleAtof(absl::string_view str, absl::Nonnull<float*> out) {
+bool SimpleAtof(absl::string_view str, float* absl_nonnull out) {
*out = 0.0;
str = StripAsciiWhitespace(str);
// std::from_chars doesn't accept an initial +, but SimpleAtof does, so if one
@@ -78,7 +78,7 @@ bool SimpleAtof(absl::string_view str, absl::Nonnull<float*> out) {
return true;
}
-bool SimpleAtod(absl::string_view str, absl::Nonnull<double*> out) {
+bool SimpleAtod(absl::string_view str, double* absl_nonnull out) {
*out = 0.0;
str = StripAsciiWhitespace(str);
// std::from_chars doesn't accept an initial +, but SimpleAtod does, so if one
@@ -109,7 +109,7 @@ bool SimpleAtod(absl::string_view str, absl::Nonnull<double*> out) {
return true;
}
-bool SimpleAtob(absl::string_view str, absl::Nonnull<bool*> out) {
+bool SimpleAtob(absl::string_view str, bool* absl_nonnull out) {
ABSL_RAW_CHECK(out != nullptr, "Output pointer must not be nullptr.");
if (EqualsIgnoreCase(str, "true") || EqualsIgnoreCase(str, "t") ||
EqualsIgnoreCase(str, "yes") || EqualsIgnoreCase(str, "y") ||
@@ -168,7 +168,7 @@ constexpr uint64_t kDivisionBy100Mul = 10486u;
constexpr uint64_t kDivisionBy100Div = 1 << 20;
// Encode functions write the ASCII output of input `n` to `out_str`.
-inline char* EncodeHundred(uint32_t n, absl::Nonnull<char*> out_str) {
+inline char* EncodeHundred(uint32_t n, char* absl_nonnull out_str) {
int num_digits = static_cast<int>(n - 10) >> 8;
uint32_t div10 = (n * kDivisionBy10Mul) / kDivisionBy10Div;
uint32_t mod10 = n - 10u * div10;
@@ -178,7 +178,7 @@ inline char* EncodeHundred(uint32_t n, absl::Nonnull<char*> out_str) {
return out_str + 2 + num_digits;
}
-inline char* EncodeTenThousand(uint32_t n, absl::Nonnull<char*> out_str) {
+inline char* EncodeTenThousand(uint32_t n, char* absl_nonnull out_str) {
// We split lower 2 digits and upper 2 digits of n into 2 byte consecutive
// blocks. 123 -> [\0\1][\0\23]. We divide by 10 both blocks
// (it's 1 division + zeroing upper bits), and compute modulo 10 as well "in
@@ -234,8 +234,8 @@ inline uint64_t PrepareEightDigits(uint32_t i) {
return tens;
}
-inline ABSL_ATTRIBUTE_ALWAYS_INLINE absl::Nonnull<char*> EncodeFullU32(
- uint32_t n, absl::Nonnull<char*> out_str) {
+inline ABSL_ATTRIBUTE_ALWAYS_INLINE char* absl_nonnull EncodeFullU32(
+ uint32_t n, char* absl_nonnull out_str) {
if (n < 10) {
*out_str = static_cast<char>('0' + n);
return out_str + 1;
@@ -284,7 +284,7 @@ inline ABSL_ATTRIBUTE_ALWAYS_INLINE char* EncodeFullU64(uint64_t i,
} // namespace
-void numbers_internal::PutTwoDigits(uint32_t i, absl::Nonnull<char*> buf) {
+void numbers_internal::PutTwoDigits(uint32_t i, char* absl_nonnull buf) {
assert(i < 100);
uint32_t base = kTwoZeroBytes;
uint32_t div10 = (i * kDivisionBy10Mul) / kDivisionBy10Div;
@@ -293,15 +293,15 @@ void numbers_internal::PutTwoDigits(uint32_t i, absl::Nonnull<char*> buf) {
little_endian::Store16(buf, static_cast<uint16_t>(base));
}
-absl::Nonnull<char*> numbers_internal::FastIntToBuffer(
- uint32_t n, absl::Nonnull<char*> out_str) {
+char* absl_nonnull numbers_internal::FastIntToBuffer(
+ uint32_t n, char* absl_nonnull out_str) {
out_str = EncodeFullU32(n, out_str);
*out_str = '\0';
return out_str;
}
-absl::Nonnull<char*> numbers_internal::FastIntToBuffer(
- int32_t i, absl::Nonnull<char*> buffer) {
+char* absl_nonnull numbers_internal::FastIntToBuffer(
+ int32_t i, char* absl_nonnull buffer) {
uint32_t u = static_cast<uint32_t>(i);
if (i < 0) {
*buffer++ = '-';
@@ -315,15 +315,15 @@ absl::Nonnull<char*> numbers_internal::FastIntToBuffer(
return buffer;
}
-absl::Nonnull<char*> numbers_internal::FastIntToBuffer(
- uint64_t i, absl::Nonnull<char*> buffer) {
+char* absl_nonnull numbers_internal::FastIntToBuffer(
+ uint64_t i, char* absl_nonnull buffer) {
buffer = EncodeFullU64(i, buffer);
*buffer = '\0';
return buffer;
}
-absl::Nonnull<char*> numbers_internal::FastIntToBuffer(
- int64_t i, absl::Nonnull<char*> buffer) {
+char* absl_nonnull numbers_internal::FastIntToBuffer(
+ int64_t i, char* absl_nonnull buffer) {
uint64_t u = static_cast<uint64_t>(i);
if (i < 0) {
*buffer++ = '-';
@@ -464,7 +464,7 @@ static ExpDigits SplitToSix(const double value) {
// Since we'd like to know if the fractional part of d is close to a half,
// we multiply it by 65536 and see if the fractional part is close to 32768.
// (The number doesn't have to be a power of two,but powers of two are faster)
- uint64_t d64k = d * 65536;
+ uint64_t d64k = static_cast<uint64_t>(d * 65536);
uint32_t dddddd; // A 6-digit decimal integer.
if ((d64k % 65536) == 32767 || (d64k % 65536) == 32768) {
// OK, it's fairly likely that precision was lost above, which is
@@ -478,7 +478,8 @@ static ExpDigits SplitToSix(const double value) {
// value we're representing, of course, is M.mmm... * 2^exp2.
int exp2;
double m = std::frexp(value, &exp2);
- uint64_t mantissa = m * (32768.0 * 65536.0 * 65536.0 * 65536.0);
+ uint64_t mantissa =
+ static_cast<uint64_t>(m * (32768.0 * 65536.0 * 65536.0 * 65536.0));
// std::frexp returns an m value in the range [0.5, 1.0), however we
// can't multiply it by 2^64 and convert to an integer because some FPUs
// throw an exception when converting an number higher than 2^63 into an
@@ -545,7 +546,7 @@ static ExpDigits SplitToSix(const double value) {
// Helper function for fast formatting of floating-point.
// The result is the same as "%g", a.k.a. "%.6g".
size_t numbers_internal::SixDigitsToBuffer(double d,
- absl::Nonnull<char*> const buffer) {
+ char* absl_nonnull const buffer) {
static_assert(std::numeric_limits<float>::is_iec559,
"IEEE-754/IEC-559 support only");
@@ -693,9 +694,9 @@ static constexpr std::array<int8_t, 256> kAsciiToInt = {
// Parse the sign and optional hex or oct prefix in text.
inline bool safe_parse_sign_and_base(
- absl::Nonnull<absl::string_view*> text /*inout*/,
- absl::Nonnull<int*> base_ptr /*inout*/,
- absl::Nonnull<bool*> negative_ptr /*output*/) {
+ absl::string_view* absl_nonnull text /*inout*/,
+ int* absl_nonnull base_ptr /*inout*/,
+ bool* absl_nonnull negative_ptr /*output*/) {
if (text->data() == nullptr) {
return false;
}
@@ -980,7 +981,7 @@ ABSL_CONST_INIT const IntType LookupTables<IntType>::kVminOverBase[] =
template <typename IntType>
inline bool safe_parse_positive_int(absl::string_view text, int base,
- absl::Nonnull<IntType*> value_p) {
+ IntType* absl_nonnull value_p) {
IntType value = 0;
const IntType vmax = std::numeric_limits<IntType>::max();
assert(vmax > 0);
@@ -1017,7 +1018,7 @@ inline bool safe_parse_positive_int(absl::string_view text, int base,
template <typename IntType>
inline bool safe_parse_negative_int(absl::string_view text, int base,
- absl::Nonnull<IntType*> value_p) {
+ IntType* absl_nonnull value_p) {
IntType value = 0;
const IntType vmin = std::numeric_limits<IntType>::min();
assert(vmin < 0);
@@ -1062,7 +1063,7 @@ inline bool safe_parse_negative_int(absl::string_view text, int base,
// http://pubs.opengroup.org/onlinepubs/9699919799/functions/strtol.html
template <typename IntType>
inline bool safe_int_internal(absl::string_view text,
- absl::Nonnull<IntType*> value_p, int base) {
+ IntType* absl_nonnull value_p, int base) {
*value_p = 0;
bool negative;
if (!safe_parse_sign_and_base(&text, &base, &negative)) {
@@ -1077,7 +1078,7 @@ inline bool safe_int_internal(absl::string_view text,
template <typename IntType>
inline bool safe_uint_internal(absl::string_view text,
- absl::Nonnull<IntType*> value_p, int base) {
+ IntType* absl_nonnull value_p, int base) {
*value_p = 0;
bool negative;
if (!safe_parse_sign_and_base(&text, &base, &negative) || negative) {
@@ -1111,32 +1112,52 @@ ABSL_CONST_INIT ABSL_DLL const char kHexTable[513] =
"e0e1e2e3e4e5e6e7e8e9eaebecedeeef"
"f0f1f2f3f4f5f6f7f8f9fafbfcfdfeff";
-bool safe_strto32_base(absl::string_view text, absl::Nonnull<int32_t*> value,
+bool safe_strto8_base(absl::string_view text, int8_t* absl_nonnull value,
+ int base) {
+ return safe_int_internal<int8_t>(text, value, base);
+}
+
+bool safe_strto16_base(absl::string_view text, int16_t* absl_nonnull value,
+ int base) {
+ return safe_int_internal<int16_t>(text, value, base);
+}
+
+bool safe_strto32_base(absl::string_view text, int32_t* absl_nonnull value,
int base) {
return safe_int_internal<int32_t>(text, value, base);
}
-bool safe_strto64_base(absl::string_view text, absl::Nonnull<int64_t*> value,
+bool safe_strto64_base(absl::string_view text, int64_t* absl_nonnull value,
int base) {
return safe_int_internal<int64_t>(text, value, base);
}
-bool safe_strto128_base(absl::string_view text, absl::Nonnull<int128*> value,
+bool safe_strto128_base(absl::string_view text, int128* absl_nonnull value,
int base) {
return safe_int_internal<absl::int128>(text, value, base);
}
-bool safe_strtou32_base(absl::string_view text, absl::Nonnull<uint32_t*> value,
+bool safe_strtou8_base(absl::string_view text, uint8_t* absl_nonnull value,
+ int base) {
+ return safe_uint_internal<uint8_t>(text, value, base);
+}
+
+bool safe_strtou16_base(absl::string_view text, uint16_t* absl_nonnull value,
+ int base) {
+ return safe_uint_internal<uint16_t>(text, value, base);
+}
+
+bool safe_strtou32_base(absl::string_view text, uint32_t* absl_nonnull value,
int base) {
return safe_uint_internal<uint32_t>(text, value, base);
}
-bool safe_strtou64_base(absl::string_view text, absl::Nonnull<uint64_t*> value,
+bool safe_strtou64_base(absl::string_view text, uint64_t* absl_nonnull value,
int base) {
return safe_uint_internal<uint64_t>(text, value, base);
}
-bool safe_strtou128_base(absl::string_view text, absl::Nonnull<uint128*> value,
+bool safe_strtou128_base(absl::string_view text, uint128* absl_nonnull value,
int base) {
return safe_uint_internal<absl::uint128>(text, value, base);
}
diff --git a/contrib/restricted/abseil-cpp/absl/strings/numbers.h b/contrib/restricted/abseil-cpp/absl/strings/numbers.h
index 739dbb28f93..9c679746d37 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/numbers.h
+++ b/contrib/restricted/abseil-cpp/absl/strings/numbers.h
@@ -32,6 +32,7 @@
#endif
#include <cstddef>
+#include <cstdint>
#include <cstdlib>
#include <cstring>
#include <ctime>
@@ -39,6 +40,7 @@
#include <string>
#include <type_traits>
+#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/base/internal/endian.h"
#include "absl/base/macros.h"
@@ -60,8 +62,8 @@ ABSL_NAMESPACE_BEGIN
// encountered, this function returns `false`, leaving `out` in an unspecified
// state.
template <typename int_type>
-ABSL_MUST_USE_RESULT bool SimpleAtoi(absl::string_view str,
- absl::Nonnull<int_type*> out);
+[[nodiscard]] bool SimpleAtoi(absl::string_view str,
+ int_type* absl_nonnull out);
// SimpleAtof()
//
@@ -72,8 +74,7 @@ ABSL_MUST_USE_RESULT bool SimpleAtoi(absl::string_view str,
// allowed formats for `str`, except SimpleAtof() is locale-independent and will
// always use the "C" locale. If any errors are encountered, this function
// returns `false`, leaving `out` in an unspecified state.
-ABSL_MUST_USE_RESULT bool SimpleAtof(absl::string_view str,
- absl::Nonnull<float*> out);
+[[nodiscard]] bool SimpleAtof(absl::string_view str, float* absl_nonnull out);
// SimpleAtod()
//
@@ -84,8 +85,7 @@ ABSL_MUST_USE_RESULT bool SimpleAtof(absl::string_view str,
// allowed formats for `str`, except SimpleAtod is locale-independent and will
// always use the "C" locale. If any errors are encountered, this function
// returns `false`, leaving `out` in an unspecified state.
-ABSL_MUST_USE_RESULT bool SimpleAtod(absl::string_view str,
- absl::Nonnull<double*> out);
+[[nodiscard]] bool SimpleAtod(absl::string_view str, double* absl_nonnull out);
// SimpleAtob()
//
@@ -95,8 +95,7 @@ ABSL_MUST_USE_RESULT bool SimpleAtod(absl::string_view str,
// are interpreted as boolean `false`: "false", "f", "no", "n", "0". If any
// errors are encountered, this function returns `false`, leaving `out` in an
// unspecified state.
-ABSL_MUST_USE_RESULT bool SimpleAtob(absl::string_view str,
- absl::Nonnull<bool*> out);
+[[nodiscard]] bool SimpleAtob(absl::string_view str, bool* absl_nonnull out);
// SimpleHexAtoi()
//
@@ -109,14 +108,14 @@ ABSL_MUST_USE_RESULT bool SimpleAtob(absl::string_view str,
// by this function. If any errors are encountered, this function returns
// `false`, leaving `out` in an unspecified state.
template <typename int_type>
-ABSL_MUST_USE_RESULT bool SimpleHexAtoi(absl::string_view str,
- absl::Nonnull<int_type*> out);
+[[nodiscard]] bool SimpleHexAtoi(absl::string_view str,
+ int_type* absl_nonnull out);
// Overloads of SimpleHexAtoi() for 128 bit integers.
-ABSL_MUST_USE_RESULT inline bool SimpleHexAtoi(
- absl::string_view str, absl::Nonnull<absl::int128*> out);
-ABSL_MUST_USE_RESULT inline bool SimpleHexAtoi(
- absl::string_view str, absl::Nonnull<absl::uint128*> out);
+[[nodiscard]] inline bool SimpleHexAtoi(absl::string_view str,
+ absl::int128* absl_nonnull out);
+[[nodiscard]] inline bool SimpleHexAtoi(absl::string_view str,
+ absl::uint128* absl_nonnull out);
ABSL_NAMESPACE_END
} // namespace absl
@@ -127,6 +126,18 @@ namespace absl {
ABSL_NAMESPACE_BEGIN
namespace numbers_internal {
+template <typename int_type>
+constexpr bool is_signed() {
+ if constexpr (std::is_arithmetic<int_type>::value) {
+ // Use std::numeric_limits<T>::is_signed where it's defined to work.
+ return std::numeric_limits<int_type>::is_signed;
+ }
+ // TODO(jorg): This signed-ness check is used because it works correctly
+ // with enums, and it also serves to check that int_type is not a pointer.
+ // If one day something like std::is_signed<enum E> works, switch to it.
+ return static_cast<int_type>(1) - 2 < 0;
+}
+
// Digit conversion.
ABSL_DLL extern const char kHexChar[17]; // 0123456789abcdef
ABSL_DLL extern const char
@@ -138,22 +149,30 @@ ABSL_DLL extern const char
// PutTwoDigits(42, buf);
// // buf[0] == '4'
// // buf[1] == '2'
-void PutTwoDigits(uint32_t i, absl::Nonnull<char*> buf);
+void PutTwoDigits(uint32_t i, char* absl_nonnull buf);
// safe_strto?() functions for implementing SimpleAtoi()
-bool safe_strto32_base(absl::string_view text, absl::Nonnull<int32_t*> value,
+bool safe_strto8_base(absl::string_view text, int8_t* absl_nonnull value,
+ int base);
+bool safe_strto16_base(absl::string_view text, int16_t* absl_nonnull value,
+ int base);
+bool safe_strto32_base(absl::string_view text, int32_t* absl_nonnull value,
int base);
-bool safe_strto64_base(absl::string_view text, absl::Nonnull<int64_t*> value,
+bool safe_strto64_base(absl::string_view text, int64_t* absl_nonnull value,
int base);
bool safe_strto128_base(absl::string_view text,
- absl::Nonnull<absl::int128*> value, int base);
-bool safe_strtou32_base(absl::string_view text, absl::Nonnull<uint32_t*> value,
+ absl::int128* absl_nonnull value, int base);
+bool safe_strtou8_base(absl::string_view text, uint8_t* absl_nonnull value,
+ int base);
+bool safe_strtou16_base(absl::string_view text, uint16_t* absl_nonnull value,
int base);
-bool safe_strtou64_base(absl::string_view text, absl::Nonnull<uint64_t*> value,
+bool safe_strtou32_base(absl::string_view text, uint32_t* absl_nonnull value,
+ int base);
+bool safe_strtou64_base(absl::string_view text, uint64_t* absl_nonnull value,
int base);
bool safe_strtou128_base(absl::string_view text,
- absl::Nonnull<absl::uint128*> value, int base);
+ absl::uint128* absl_nonnull value, int base);
static const int kFastToBufferSize = 32;
static const int kSixDigitsToBufferSize = 16;
@@ -164,33 +183,30 @@ static const int kSixDigitsToBufferSize = 16;
// outside the range 0.0001-999999 are output using scientific notation
// (1.23456e+06). This routine is heavily optimized.
// Required buffer size is `kSixDigitsToBufferSize`.
-size_t SixDigitsToBuffer(double d, absl::Nonnull<char*> buffer);
+size_t SixDigitsToBuffer(double d, char* absl_nonnull buffer);
// WARNING: These functions may write more characters than necessary, because
// they are intended for speed. All functions take an output buffer
// as an argument and return a pointer to the last byte they wrote, which is the
// terminating '\0'. At most `kFastToBufferSize` bytes are written.
-absl::Nonnull<char*> FastIntToBuffer(int32_t i, absl::Nonnull<char*> buffer)
+char* absl_nonnull FastIntToBuffer(int32_t i, char* absl_nonnull buffer)
ABSL_INTERNAL_NEED_MIN_SIZE(buffer, kFastToBufferSize);
-absl::Nonnull<char*> FastIntToBuffer(uint32_t n, absl::Nonnull<char*> out_str)
+char* absl_nonnull FastIntToBuffer(uint32_t n, char* absl_nonnull out_str)
ABSL_INTERNAL_NEED_MIN_SIZE(out_str, kFastToBufferSize);
-absl::Nonnull<char*> FastIntToBuffer(int64_t i, absl::Nonnull<char*> buffer)
+char* absl_nonnull FastIntToBuffer(int64_t i, char* absl_nonnull buffer)
ABSL_INTERNAL_NEED_MIN_SIZE(buffer, kFastToBufferSize);
-absl::Nonnull<char*> FastIntToBuffer(uint64_t i, absl::Nonnull<char*> buffer)
+char* absl_nonnull FastIntToBuffer(uint64_t i, char* absl_nonnull buffer)
ABSL_INTERNAL_NEED_MIN_SIZE(buffer, kFastToBufferSize);
// For enums and integer types that are not an exact match for the types above,
// use templates to call the appropriate one of the four overloads above.
template <typename int_type>
-absl::Nonnull<char*> FastIntToBuffer(int_type i, absl::Nonnull<char*> buffer)
+char* absl_nonnull FastIntToBuffer(int_type i, char* absl_nonnull buffer)
ABSL_INTERNAL_NEED_MIN_SIZE(buffer, kFastToBufferSize) {
static_assert(sizeof(i) <= 64 / 8,
"FastIntToBuffer works only with 64-bit-or-less integers.");
- // TODO(jorg): This signed-ness check is used because it works correctly
- // with enums, and it also serves to check that int_type is not a pointer.
- // If one day something like std::is_signed<enum E> works, switch to it.
// These conditions are constexpr bools to suppress MSVC warning C4127.
- constexpr bool kIsSigned = static_cast<int_type>(1) - 2 < 0;
+ constexpr bool kIsSigned = is_signed<int_type>();
constexpr bool kUse64Bit = sizeof(i) > 32 / 8;
if (kIsSigned) {
if (kUse64Bit) {
@@ -210,39 +226,52 @@ absl::Nonnull<char*> FastIntToBuffer(int_type i, absl::Nonnull<char*> buffer)
// Implementation of SimpleAtoi, generalized to support arbitrary base (used
// with base different from 10 elsewhere in Abseil implementation).
template <typename int_type>
-ABSL_MUST_USE_RESULT bool safe_strtoi_base(absl::string_view s,
- absl::Nonnull<int_type*> out,
- int base) {
- static_assert(sizeof(*out) == 4 || sizeof(*out) == 8,
- "SimpleAtoi works only with 32-bit or 64-bit integers.");
+[[nodiscard]] bool safe_strtoi_base(absl::string_view s,
+ int_type* absl_nonnull out, int base) {
+ static_assert(sizeof(*out) == 1 || sizeof(*out) == 2 || sizeof(*out) == 4 ||
+ sizeof(*out) == 8,
+ "SimpleAtoi works only with 8, 16, 32, or 64-bit integers.");
static_assert(!std::is_floating_point<int_type>::value,
"Use SimpleAtof or SimpleAtod instead.");
bool parsed;
- // TODO(jorg): This signed-ness check is used because it works correctly
- // with enums, and it also serves to check that int_type is not a pointer.
- // If one day something like std::is_signed<enum E> works, switch to it.
// These conditions are constexpr bools to suppress MSVC warning C4127.
- constexpr bool kIsSigned = static_cast<int_type>(1) - 2 < 0;
- constexpr bool kUse64Bit = sizeof(*out) == 64 / 8;
+ constexpr bool kIsSigned = is_signed<int_type>();
+ constexpr int kIntTypeSize = sizeof(*out) * 8;
if (kIsSigned) {
- if (kUse64Bit) {
+ if (kIntTypeSize == 64) {
int64_t val;
parsed = numbers_internal::safe_strto64_base(s, &val, base);
*out = static_cast<int_type>(val);
- } else {
+ } else if (kIntTypeSize == 32) {
int32_t val;
parsed = numbers_internal::safe_strto32_base(s, &val, base);
*out = static_cast<int_type>(val);
+ } else if (kIntTypeSize == 16) {
+ int16_t val;
+ parsed = numbers_internal::safe_strto16_base(s, &val, base);
+ *out = static_cast<int_type>(val);
+ } else if (kIntTypeSize == 8) {
+ int8_t val;
+ parsed = numbers_internal::safe_strto8_base(s, &val, base);
+ *out = static_cast<int_type>(val);
}
} else {
- if (kUse64Bit) {
+ if (kIntTypeSize == 64) {
uint64_t val;
parsed = numbers_internal::safe_strtou64_base(s, &val, base);
*out = static_cast<int_type>(val);
- } else {
+ } else if (kIntTypeSize == 32) {
uint32_t val;
parsed = numbers_internal::safe_strtou32_base(s, &val, base);
*out = static_cast<int_type>(val);
+ } else if (kIntTypeSize == 16) {
+ uint16_t val;
+ parsed = numbers_internal::safe_strtou16_base(s, &val, base);
+ *out = static_cast<int_type>(val);
+ } else if (kIntTypeSize == 8) {
+ uint8_t val;
+ parsed = numbers_internal::safe_strtou8_base(s, &val, base);
+ *out = static_cast<int_type>(val);
}
}
return parsed;
@@ -254,7 +283,7 @@ ABSL_MUST_USE_RESULT bool safe_strtoi_base(absl::string_view s,
// without the terminating null character. Thus `out` must be of length >= 16.
// Returns the number of non-pad digits of the output (it can never be zero
// since 0 has one digit).
-inline size_t FastHexToBufferZeroPad16(uint64_t val, absl::Nonnull<char*> out) {
+inline size_t FastHexToBufferZeroPad16(uint64_t val, char* absl_nonnull out) {
#ifdef ABSL_INTERNAL_HAVE_SSSE3
uint64_t be = absl::big_endian::FromHost64(val);
const auto kNibbleMask = _mm_set1_epi8(0xf);
@@ -280,34 +309,34 @@ inline size_t FastHexToBufferZeroPad16(uint64_t val, absl::Nonnull<char*> out) {
} // namespace numbers_internal
template <typename int_type>
-ABSL_MUST_USE_RESULT bool SimpleAtoi(absl::string_view str,
- absl::Nonnull<int_type*> out) {
+[[nodiscard]] bool SimpleAtoi(absl::string_view str,
+ int_type* absl_nonnull out) {
return numbers_internal::safe_strtoi_base(str, out, 10);
}
-ABSL_MUST_USE_RESULT inline bool SimpleAtoi(absl::string_view str,
- absl::Nonnull<absl::int128*> out) {
+[[nodiscard]] inline bool SimpleAtoi(absl::string_view str,
+ absl::int128* absl_nonnull out) {
return numbers_internal::safe_strto128_base(str, out, 10);
}
-ABSL_MUST_USE_RESULT inline bool SimpleAtoi(absl::string_view str,
- absl::Nonnull<absl::uint128*> out) {
+[[nodiscard]] inline bool SimpleAtoi(absl::string_view str,
+ absl::uint128* absl_nonnull out) {
return numbers_internal::safe_strtou128_base(str, out, 10);
}
template <typename int_type>
-ABSL_MUST_USE_RESULT bool SimpleHexAtoi(absl::string_view str,
- absl::Nonnull<int_type*> out) {
+[[nodiscard]] bool SimpleHexAtoi(absl::string_view str,
+ int_type* absl_nonnull out) {
return numbers_internal::safe_strtoi_base(str, out, 16);
}
-ABSL_MUST_USE_RESULT inline bool SimpleHexAtoi(
- absl::string_view str, absl::Nonnull<absl::int128*> out) {
+[[nodiscard]] inline bool SimpleHexAtoi(absl::string_view str,
+ absl::int128* absl_nonnull out) {
return numbers_internal::safe_strto128_base(str, out, 16);
}
-ABSL_MUST_USE_RESULT inline bool SimpleHexAtoi(
- absl::string_view str, absl::Nonnull<absl::uint128*> out) {
+[[nodiscard]] inline bool SimpleHexAtoi(absl::string_view str,
+ absl::uint128* absl_nonnull out) {
return numbers_internal::safe_strtou128_base(str, out, 16);
}
diff --git a/contrib/restricted/abseil-cpp/absl/strings/str_cat.cc b/contrib/restricted/abseil-cpp/absl/strings/str_cat.cc
index c51c13732d2..1f3cfbffc11 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/str_cat.cc
+++ b/contrib/restricted/abseil-cpp/absl/strings/str_cat.cc
@@ -42,8 +42,7 @@ ABSL_NAMESPACE_BEGIN
namespace {
// Append is merely a version of memcpy that returns the address of the byte
// after the area just overwritten.
-inline absl::Nonnull<char*> Append(absl::Nonnull<char*> out,
- const AlphaNum& x) {
+inline char* absl_nonnull Append(char* absl_nonnull out, const AlphaNum& x) {
// memcpy is allowed to overwrite arbitrary memory, so doing this after the
// call would force an extra fetch of x.size().
char* after = out + x.size();
@@ -159,7 +158,7 @@ std::string CatPieces(std::initializer_list<absl::string_view> pieces) {
assert(((src).size() == 0) || \
(uintptr_t((src).data() - (dest).data()) > uintptr_t((dest).size())))
-void AppendPieces(absl::Nonnull<std::string*> dest,
+void AppendPieces(std::string* absl_nonnull dest,
std::initializer_list<absl::string_view> pieces) {
size_t old_size = dest->size();
size_t to_append = 0;
@@ -183,7 +182,7 @@ void AppendPieces(absl::Nonnull<std::string*> dest,
} // namespace strings_internal
-void StrAppend(absl::Nonnull<std::string*> dest, const AlphaNum& a) {
+void StrAppend(std::string* absl_nonnull dest, const AlphaNum& a) {
ASSERT_NO_OVERLAP(*dest, a);
std::string::size_type old_size = dest->size();
STLStringAppendUninitializedAmortized(dest, a.size());
@@ -193,7 +192,7 @@ void StrAppend(absl::Nonnull<std::string*> dest, const AlphaNum& a) {
assert(out == begin + dest->size());
}
-void StrAppend(absl::Nonnull<std::string*> dest, const AlphaNum& a,
+void StrAppend(std::string* absl_nonnull dest, const AlphaNum& a,
const AlphaNum& b) {
ASSERT_NO_OVERLAP(*dest, a);
ASSERT_NO_OVERLAP(*dest, b);
@@ -206,7 +205,7 @@ void StrAppend(absl::Nonnull<std::string*> dest, const AlphaNum& a,
assert(out == begin + dest->size());
}
-void StrAppend(absl::Nonnull<std::string*> dest, const AlphaNum& a,
+void StrAppend(std::string* absl_nonnull dest, const AlphaNum& a,
const AlphaNum& b, const AlphaNum& c) {
ASSERT_NO_OVERLAP(*dest, a);
ASSERT_NO_OVERLAP(*dest, b);
@@ -221,7 +220,7 @@ void StrAppend(absl::Nonnull<std::string*> dest, const AlphaNum& a,
assert(out == begin + dest->size());
}
-void StrAppend(absl::Nonnull<std::string*> dest, const AlphaNum& a,
+void StrAppend(std::string* absl_nonnull dest, const AlphaNum& a,
const AlphaNum& b, const AlphaNum& c, const AlphaNum& d) {
ASSERT_NO_OVERLAP(*dest, a);
ASSERT_NO_OVERLAP(*dest, b);
diff --git a/contrib/restricted/abseil-cpp/absl/strings/str_cat.h b/contrib/restricted/abseil-cpp/absl/strings/str_cat.h
index 1a806627758..84db0f6cd50 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/str_cat.h
+++ b/contrib/restricted/abseil-cpp/absl/strings/str_cat.h
@@ -111,7 +111,7 @@
#include "absl/strings/numbers.h"
#include "absl/strings/string_view.h"
-#if defined(ABSL_HAVE_STD_STRING_VIEW) && !defined(ABSL_USES_STD_STRING_VIEW)
+#if !defined(ABSL_USES_STD_STRING_VIEW)
#include <string_view>
#endif
@@ -191,29 +191,29 @@ struct Hex {
template <typename Int>
explicit Hex(
Int v, PadSpec spec = absl::kNoPad,
- typename std::enable_if<sizeof(Int) == 1 &&
- !std::is_pointer<Int>::value>::type* = nullptr)
+ std::enable_if_t<sizeof(Int) == 1 && !std::is_pointer<Int>::value, bool> =
+ true)
: Hex(spec, static_cast<uint8_t>(v)) {}
template <typename Int>
explicit Hex(
Int v, PadSpec spec = absl::kNoPad,
- typename std::enable_if<sizeof(Int) == 2 &&
- !std::is_pointer<Int>::value>::type* = nullptr)
+ std::enable_if_t<sizeof(Int) == 2 && !std::is_pointer<Int>::value, bool> =
+ true)
: Hex(spec, static_cast<uint16_t>(v)) {}
template <typename Int>
explicit Hex(
Int v, PadSpec spec = absl::kNoPad,
- typename std::enable_if<sizeof(Int) == 4 &&
- !std::is_pointer<Int>::value>::type* = nullptr)
+ std::enable_if_t<sizeof(Int) == 4 && !std::is_pointer<Int>::value, bool> =
+ true)
: Hex(spec, static_cast<uint32_t>(v)) {}
template <typename Int>
explicit Hex(
Int v, PadSpec spec = absl::kNoPad,
- typename std::enable_if<sizeof(Int) == 8 &&
- !std::is_pointer<Int>::value>::type* = nullptr)
+ std::enable_if_t<sizeof(Int) == 8 && !std::is_pointer<Int>::value, bool> =
+ true)
: Hex(spec, static_cast<uint64_t>(v)) {}
template <typename Pointee>
- explicit Hex(absl::Nullable<Pointee*> v, PadSpec spec = absl::kNoPad)
+ explicit Hex(Pointee* absl_nullable v, PadSpec spec = absl::kNoPad)
: Hex(spec, reinterpret_cast<uintptr_t>(v)) {}
template <typename S>
@@ -262,7 +262,7 @@ struct Dec {
template <typename Int>
explicit Dec(Int v, PadSpec spec = absl::kNoPad,
- typename std::enable_if<(sizeof(Int) <= 8)>::type* = nullptr)
+ std::enable_if_t<sizeof(Int) <= 8, bool> = true)
: value(v >= 0 ? static_cast<uint64_t>(v)
: uint64_t{0} - static_cast<uint64_t>(v)),
width(spec == absl::kNoPad ? 1
@@ -359,14 +359,14 @@ class AlphaNum {
ABSL_ATTRIBUTE_LIFETIME_BOUND)
: piece_(&buf.data[0], buf.size) {}
- AlphaNum(absl::Nullable<const char*> c_str // NOLINT(runtime/explicit)
+ AlphaNum(const char* absl_nullable c_str // NOLINT(runtime/explicit)
ABSL_ATTRIBUTE_LIFETIME_BOUND)
: piece_(NullSafeStringView(c_str)) {}
AlphaNum(absl::string_view pc // NOLINT(runtime/explicit)
ABSL_ATTRIBUTE_LIFETIME_BOUND)
: piece_(pc) {}
-#if defined(ABSL_HAVE_STD_STRING_VIEW) && !defined(ABSL_USES_STD_STRING_VIEW)
+#if !defined(ABSL_USES_STD_STRING_VIEW)
AlphaNum(std::string_view pc // NOLINT(runtime/explicit)
ABSL_ATTRIBUTE_LIFETIME_BOUND)
: piece_(pc.data(), pc.size()) {}
@@ -392,7 +392,7 @@ class AlphaNum {
AlphaNum& operator=(const AlphaNum&) = delete;
absl::string_view::size_type size() const { return piece_.size(); }
- absl::Nullable<const char*> data() const { return piece_.data(); }
+ const char* absl_nullable data() const { return piece_.data(); }
absl::string_view Piece() const { return piece_; }
// Match unscoped enums. Use integral promotion so that a `char`-backed
@@ -462,7 +462,7 @@ namespace strings_internal {
// Do not call directly - this is not part of the public API.
std::string CatPieces(std::initializer_list<absl::string_view> pieces);
-void AppendPieces(absl::Nonnull<std::string*> dest,
+void AppendPieces(std::string* absl_nonnull dest,
std::initializer_list<absl::string_view> pieces);
template <typename Integer>
@@ -538,28 +538,28 @@ using EnableIfFastCase = T;
} // namespace strings_internal
-ABSL_MUST_USE_RESULT inline std::string StrCat() { return std::string(); }
+[[nodiscard]] inline std::string StrCat() { return std::string(); }
template <typename T>
-ABSL_MUST_USE_RESULT inline std::string StrCat(
+[[nodiscard]] inline std::string StrCat(
strings_internal::EnableIfFastCase<T> a) {
return strings_internal::SingleArgStrCat(a);
}
-ABSL_MUST_USE_RESULT inline std::string StrCat(const AlphaNum& a) {
+[[nodiscard]] inline std::string StrCat(const AlphaNum& a) {
return std::string(a.data(), a.size());
}
-ABSL_MUST_USE_RESULT std::string StrCat(const AlphaNum& a, const AlphaNum& b);
-ABSL_MUST_USE_RESULT std::string StrCat(const AlphaNum& a, const AlphaNum& b,
- const AlphaNum& c);
-ABSL_MUST_USE_RESULT std::string StrCat(const AlphaNum& a, const AlphaNum& b,
- const AlphaNum& c, const AlphaNum& d);
+[[nodiscard]] std::string StrCat(const AlphaNum& a, const AlphaNum& b);
+[[nodiscard]] std::string StrCat(const AlphaNum& a, const AlphaNum& b,
+ const AlphaNum& c);
+[[nodiscard]] std::string StrCat(const AlphaNum& a, const AlphaNum& b,
+ const AlphaNum& c, const AlphaNum& d);
// Support 5 or more arguments
template <typename... AV>
-ABSL_MUST_USE_RESULT inline std::string StrCat(
- const AlphaNum& a, const AlphaNum& b, const AlphaNum& c, const AlphaNum& d,
- const AlphaNum& e, const AV&... args) {
+[[nodiscard]] inline std::string StrCat(const AlphaNum& a, const AlphaNum& b,
+ const AlphaNum& c, const AlphaNum& d,
+ const AlphaNum& e, const AV&... args) {
return strings_internal::CatPieces(
{a.Piece(), b.Piece(), c.Piece(), d.Piece(), e.Piece(),
static_cast<const AlphaNum&>(args).Piece()...});
@@ -592,18 +592,18 @@ ABSL_MUST_USE_RESULT inline std::string StrCat(
// absl::string_view p = s;
// StrAppend(&s, p);
-inline void StrAppend(absl::Nonnull<std::string*>) {}
-void StrAppend(absl::Nonnull<std::string*> dest, const AlphaNum& a);
-void StrAppend(absl::Nonnull<std::string*> dest, const AlphaNum& a,
+inline void StrAppend(std::string* absl_nonnull) {}
+void StrAppend(std::string* absl_nonnull dest, const AlphaNum& a);
+void StrAppend(std::string* absl_nonnull dest, const AlphaNum& a,
const AlphaNum& b);
-void StrAppend(absl::Nonnull<std::string*> dest, const AlphaNum& a,
+void StrAppend(std::string* absl_nonnull dest, const AlphaNum& a,
const AlphaNum& b, const AlphaNum& c);
-void StrAppend(absl::Nonnull<std::string*> dest, const AlphaNum& a,
+void StrAppend(std::string* absl_nonnull dest, const AlphaNum& a,
const AlphaNum& b, const AlphaNum& c, const AlphaNum& d);
// Support 5 or more arguments
template <typename... AV>
-inline void StrAppend(absl::Nonnull<std::string*> dest, const AlphaNum& a,
+inline void StrAppend(std::string* absl_nonnull dest, const AlphaNum& a,
const AlphaNum& b, const AlphaNum& c, const AlphaNum& d,
const AlphaNum& e, const AV&... args) {
strings_internal::AppendPieces(
diff --git a/contrib/restricted/abseil-cpp/absl/strings/str_format.h b/contrib/restricted/abseil-cpp/absl/strings/str_format.h
index 76904d32285..ffa7f113be0 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/str_format.h
+++ b/contrib/restricted/abseil-cpp/absl/strings/str_format.h
@@ -112,7 +112,7 @@ class UntypedFormatSpec {
protected:
explicit UntypedFormatSpec(
- absl::Nonnull<const str_format_internal::ParsedFormatBase*> pc)
+ const str_format_internal::ParsedFormatBase* absl_nonnull pc)
: spec_(pc) {}
private:
@@ -152,7 +152,7 @@ str_format_internal::StreamedWrapper<T> FormatStreamed(const T& v) {
// EXPECT_EQ(8, n);
class FormatCountCapture {
public:
- explicit FormatCountCapture(absl::Nonnull<int*> p) : p_(p) {}
+ explicit FormatCountCapture(int* absl_nonnull p) : p_(p) {}
private:
// FormatCountCaptureHelper is used to define FormatConvertImpl() for this
@@ -161,8 +161,8 @@ class FormatCountCapture {
// Unused() is here because of the false positive from -Wunused-private-field
// p_ is used in the templated function of the friend FormatCountCaptureHelper
// class.
- absl::Nonnull<int*> Unused() { return p_; }
- absl::Nonnull<int*> p_;
+ int* absl_nonnull Unused() { return p_; }
+ int* absl_nonnull p_;
};
// FormatSpec
@@ -359,8 +359,8 @@ using ParsedFormat = str_format_internal::ExtendedParsedFormat<
//
// Returns an empty string in case of error.
template <typename... Args>
-ABSL_MUST_USE_RESULT std::string StrFormat(const FormatSpec<Args...>& format,
- const Args&... args) {
+[[nodiscard]] std::string StrFormat(const FormatSpec<Args...>& format,
+ const Args&... args) {
return str_format_internal::FormatPack(
str_format_internal::UntypedFormatSpecImpl::Extract(format),
{str_format_internal::FormatArgImpl(args)...});
@@ -377,7 +377,7 @@ ABSL_MUST_USE_RESULT std::string StrFormat(const FormatSpec<Args...>& format,
// std::string orig("For example PI is approximately ");
// std::cout << StrAppendFormat(&orig, "%12.6f", 3.14);
template <typename... Args>
-std::string& StrAppendFormat(absl::Nonnull<std::string*> dst,
+std::string& StrAppendFormat(std::string* absl_nonnull dst,
const FormatSpec<Args...>& format,
const Args&... args) {
return str_format_internal::AppendPack(
@@ -396,7 +396,7 @@ std::string& StrAppendFormat(absl::Nonnull<std::string*> dst,
//
// std::cout << StreamFormat("%12.6f", 3.14);
template <typename... Args>
-ABSL_MUST_USE_RESULT str_format_internal::Streamable StreamFormat(
+[[nodiscard]] str_format_internal::Streamable StreamFormat(
const FormatSpec<Args...>& format, const Args&... args) {
return str_format_internal::Streamable(
str_format_internal::UntypedFormatSpecImpl::Extract(format),
@@ -437,7 +437,7 @@ int PrintF(const FormatSpec<Args...>& format, const Args&... args) {
// Outputs: "The capital of Mongolia is Ulaanbaatar"
//
template <typename... Args>
-int FPrintF(absl::Nonnull<std::FILE*> output, const FormatSpec<Args...>& format,
+int FPrintF(std::FILE* absl_nonnull output, const FormatSpec<Args...>& format,
const Args&... args) {
return str_format_internal::FprintF(
output, str_format_internal::UntypedFormatSpecImpl::Extract(format),
@@ -466,7 +466,7 @@ int FPrintF(absl::Nonnull<std::FILE*> output, const FormatSpec<Args...>& format,
// Post-condition: output == "The capital of Mongolia is Ulaanbaatar"
//
template <typename... Args>
-int SNPrintF(absl::Nonnull<char*> output, std::size_t size,
+int SNPrintF(char* absl_nonnull output, std::size_t size,
const FormatSpec<Args...>& format, const Args&... args) {
return str_format_internal::SnprintF(
output, size, str_format_internal::UntypedFormatSpecImpl::Extract(format),
@@ -500,7 +500,7 @@ class FormatRawSink {
template <typename T,
typename = typename std::enable_if<std::is_constructible<
str_format_internal::FormatRawSinkImpl, T*>::value>::type>
- FormatRawSink(absl::Nonnull<T*> raw) // NOLINT
+ FormatRawSink(T* absl_nonnull raw) // NOLINT
: sink_(raw) {}
private:
@@ -582,9 +582,9 @@ using FormatArg = str_format_internal::FormatArgImpl;
// return std::move(out);
// }
//
-ABSL_MUST_USE_RESULT inline bool FormatUntyped(
- FormatRawSink raw_sink, const UntypedFormatSpec& format,
- absl::Span<const FormatArg> args) {
+[[nodiscard]] inline bool FormatUntyped(FormatRawSink raw_sink,
+ const UntypedFormatSpec& format,
+ absl::Span<const FormatArg> args) {
return str_format_internal::FormatUntyped(
str_format_internal::FormatRawSinkImpl::Extract(raw_sink),
str_format_internal::UntypedFormatSpecImpl::Extract(format), args);
@@ -609,7 +609,7 @@ ABSL_MUST_USE_RESULT inline bool FormatUntyped(
//
// Note that unlike with AbslFormatConvert(), AbslStringify() does not allow
// customization of allowed conversion characters. AbslStringify() uses `%v` as
-// the underlying conversion specififer. Additionally, AbslStringify() supports
+// the underlying conversion specifier. Additionally, AbslStringify() supports
// use with absl::StrCat while AbslFormatConvert() does not.
//
// Example:
@@ -857,16 +857,16 @@ class FormatSink {
}
// Support `absl::Format(&sink, format, args...)`.
- friend void AbslFormatFlush(absl::Nonnull<FormatSink*> sink,
+ friend void AbslFormatFlush(FormatSink* absl_nonnull sink,
absl::string_view v) {
sink->Append(v);
}
private:
friend str_format_internal::FormatSinkImpl;
- explicit FormatSink(absl::Nonnull<str_format_internal::FormatSinkImpl*> s)
+ explicit FormatSink(str_format_internal::FormatSinkImpl* absl_nonnull s)
: sink_(s) {}
- absl::Nonnull<str_format_internal::FormatSinkImpl*> sink_;
+ str_format_internal::FormatSinkImpl* absl_nonnull sink_;
};
// FormatConvertResult
diff --git a/contrib/restricted/abseil-cpp/absl/strings/str_replace.cc b/contrib/restricted/abseil-cpp/absl/strings/str_replace.cc
index a7ab52fed5a..377e30cfd73 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/str_replace.cc
+++ b/contrib/restricted/abseil-cpp/absl/strings/str_replace.cc
@@ -37,8 +37,8 @@ using FixedMapping =
// occurred.
int ApplySubstitutions(
absl::string_view s,
- absl::Nonnull<std::vector<strings_internal::ViableSubstitution>*> subs_ptr,
- absl::Nonnull<std::string*> result_ptr) {
+ std::vector<strings_internal::ViableSubstitution>* absl_nonnull subs_ptr,
+ std::string* absl_nonnull result_ptr) {
auto& subs = *subs_ptr;
int substitutions = 0;
size_t pos = 0;
@@ -83,7 +83,7 @@ std::string StrReplaceAll(absl::string_view s,
}
int StrReplaceAll(strings_internal::FixedMapping replacements,
- absl::Nonnull<std::string*> target) {
+ std::string* absl_nonnull target) {
return StrReplaceAll<strings_internal::FixedMapping>(replacements, target);
}
diff --git a/contrib/restricted/abseil-cpp/absl/strings/str_replace.h b/contrib/restricted/abseil-cpp/absl/strings/str_replace.h
index e77ced3e576..91b920bcaae 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/str_replace.h
+++ b/contrib/restricted/abseil-cpp/absl/strings/str_replace.h
@@ -66,7 +66,7 @@ ABSL_NAMESPACE_BEGIN
// {"$who", "Bob"},
// {"#Noun", "Apples"}});
// EXPECT_EQ("Bob bought 5 Apples. Thanks Bob!", s);
-ABSL_MUST_USE_RESULT std::string StrReplaceAll(
+[[nodiscard]] std::string StrReplaceAll(
absl::string_view s,
std::initializer_list<std::pair<absl::string_view, absl::string_view>>
replacements);
@@ -114,7 +114,7 @@ std::string StrReplaceAll(absl::string_view s,
int StrReplaceAll(
std::initializer_list<std::pair<absl::string_view, absl::string_view>>
replacements,
- absl::Nonnull<std::string*> target);
+ std::string* absl_nonnull target);
// Overload of `StrReplaceAll()` to replace patterns within a given output
// string *in place* with replacements provided within a container of key/value
@@ -130,7 +130,7 @@ int StrReplaceAll(
// EXPECT_EQ("if (ptr &lt; &amp;foo)", s);
template <typename StrToStrMapping>
int StrReplaceAll(const StrToStrMapping& replacements,
- absl::Nonnull<std::string*> target);
+ std::string* absl_nonnull target);
// Implementation details only, past this point.
namespace strings_internal {
@@ -187,8 +187,8 @@ std::vector<ViableSubstitution> FindSubstitutions(
}
int ApplySubstitutions(absl::string_view s,
- absl::Nonnull<std::vector<ViableSubstitution>*> subs_ptr,
- absl::Nonnull<std::string*> result_ptr);
+ std::vector<ViableSubstitution>* absl_nonnull subs_ptr,
+ std::string* absl_nonnull result_ptr);
} // namespace strings_internal
@@ -204,7 +204,7 @@ std::string StrReplaceAll(absl::string_view s,
template <typename StrToStrMapping>
int StrReplaceAll(const StrToStrMapping& replacements,
- absl::Nonnull<std::string*> target) {
+ std::string* absl_nonnull target) {
auto subs = strings_internal::FindSubstitutions(*target, replacements);
if (subs.empty()) return 0;
diff --git a/contrib/restricted/abseil-cpp/absl/strings/string_view.cc b/contrib/restricted/abseil-cpp/absl/strings/string_view.cc
index 97025c32a5a..33bd1bbce38 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/string_view.cc
+++ b/contrib/restricted/abseil-cpp/absl/strings/string_view.cc
@@ -30,10 +30,10 @@ namespace {
// This is significantly faster for case-sensitive matches with very
// few possible matches.
-absl::Nullable<const char*> memmatch(absl::Nullable<const char*> phaystack,
- size_t haylen,
- absl::Nullable<const char*> pneedle,
- size_t neelen) {
+const char* absl_nullable memmatch(const char* absl_nullable phaystack,
+ size_t haylen,
+ const char* absl_nullable pneedle,
+ size_t neelen) {
if (0 == neelen) {
return phaystack; // even if haylen is 0
}
@@ -233,11 +233,6 @@ string_view::size_type string_view::find_last_not_of(
return npos;
}
-#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
-constexpr string_view::size_type string_view::npos;
-constexpr string_view::size_type string_view::kMaxSize;
-#endif
-
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/contrib/restricted/abseil-cpp/absl/strings/string_view.h b/contrib/restricted/abseil-cpp/absl/strings/string_view.h
index b461478f14b..9a1933b611b 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/string_view.h
+++ b/contrib/restricted/abseil-cpp/absl/strings/string_view.h
@@ -163,11 +163,11 @@ class ABSL_ATTRIBUTE_VIEW string_view {
public:
using traits_type = std::char_traits<char>;
using value_type = char;
- using pointer = absl::Nullable<char*>;
- using const_pointer = absl::Nullable<const char*>;
+ using pointer = char* absl_nullable;
+ using const_pointer = const char* absl_nullable;
using reference = char&;
using const_reference = const char&;
- using const_iterator = absl::Nullable<const char*>;
+ using const_iterator = const char* absl_nullable;
using iterator = const_iterator;
using const_reverse_iterator = std::reverse_iterator<const_iterator>;
using reverse_iterator = const_reverse_iterator;
@@ -197,11 +197,11 @@ class ABSL_ATTRIBUTE_VIEW string_view {
// instead (see below).
// The length check is skipped since it is unnecessary and causes code bloat.
constexpr string_view( // NOLINT(runtime/explicit)
- absl::Nonnull<const char*> str)
+ const char* absl_nonnull str)
: ptr_(str), length_(str ? StrlenInternal(str) : 0) {}
// Constructor of a `string_view` from a `const char*` and length.
- constexpr string_view(absl::Nullable<const char*> data, size_type len)
+ constexpr string_view(const char* absl_nullable data, size_type len)
: ptr_(data), length_(CheckLengthInternal(len)) {}
constexpr string_view(const string_view&) noexcept = default;
@@ -398,7 +398,7 @@ class ABSL_ATTRIBUTE_VIEW string_view {
if (ABSL_PREDICT_FALSE(pos > length_)) {
base_internal::ThrowStdOutOfRange("absl::string_view::substr");
}
- return string_view(ptr_ + pos, Min(n, length_ - pos));
+ return string_view(ptr_ + pos, (std::min)(n, length_ - pos));
}
// string_view::compare()
@@ -409,10 +409,10 @@ class ABSL_ATTRIBUTE_VIEW string_view {
// is greater than `x`.
constexpr int compare(string_view x) const noexcept {
return CompareImpl(length_, x.length_,
- Min(length_, x.length_) == 0
+ (std::min)(length_, x.length_) == 0
? 0
: ABSL_INTERNAL_STRING_VIEW_MEMCMP(
- ptr_, x.ptr_, Min(length_, x.length_)));
+ ptr_, x.ptr_, (std::min)(length_, x.length_)));
}
// Overload of `string_view::compare()` for comparing a substring of the
@@ -430,21 +430,21 @@ class ABSL_ATTRIBUTE_VIEW string_view {
// Overload of `string_view::compare()` for comparing a `string_view` and a
// a different C-style string `s`.
- constexpr int compare(absl::Nonnull<const char*> s) const {
+ constexpr int compare(const char* absl_nonnull s) const {
return compare(string_view(s));
}
// Overload of `string_view::compare()` for comparing a substring of the
// `string_view` and a different string C-style string `s`.
constexpr int compare(size_type pos1, size_type count1,
- absl::Nonnull<const char*> s) const {
+ const char* absl_nonnull s) const {
return substr(pos1, count1).compare(string_view(s));
}
// Overload of `string_view::compare()` for comparing a substring of the
// `string_view` and a substring of a different C-style string `s`.
constexpr int compare(size_type pos1, size_type count1,
- absl::Nonnull<const char*> s, size_type count2) const {
+ const char* absl_nonnull s, size_type count2) const {
return substr(pos1, count1).compare(string_view(s, count2));
}
@@ -463,14 +463,14 @@ class ABSL_ATTRIBUTE_VIEW string_view {
// Overload of `string_view::find()` for finding a substring of a different
// C-style string `s` within the `string_view`.
- size_type find(absl::Nonnull<const char*> s, size_type pos,
+ size_type find(const char* absl_nonnull s, size_type pos,
size_type count) const {
return find(string_view(s, count), pos);
}
// Overload of `string_view::find()` for finding a different C-style string
// `s` within the `string_view`.
- size_type find(absl::Nonnull<const char *> s, size_type pos = 0) const {
+ size_type find(const char* absl_nonnull s, size_type pos = 0) const {
return find(string_view(s), pos);
}
@@ -487,14 +487,14 @@ class ABSL_ATTRIBUTE_VIEW string_view {
// Overload of `string_view::rfind()` for finding a substring of a different
// C-style string `s` within the `string_view`.
- size_type rfind(absl::Nonnull<const char*> s, size_type pos,
+ size_type rfind(const char* absl_nonnull s, size_type pos,
size_type count) const {
return rfind(string_view(s, count), pos);
}
// Overload of `string_view::rfind()` for finding a different C-style string
// `s` within the `string_view`.
- size_type rfind(absl::Nonnull<const char*> s, size_type pos = npos) const {
+ size_type rfind(const char* absl_nonnull s, size_type pos = npos) const {
return rfind(string_view(s), pos);
}
@@ -513,15 +513,14 @@ class ABSL_ATTRIBUTE_VIEW string_view {
// Overload of `string_view::find_first_of()` for finding a substring of a
// different C-style string `s` within the `string_view`.
- size_type find_first_of(absl::Nonnull<const char*> s, size_type pos,
+ size_type find_first_of(const char* absl_nonnull s, size_type pos,
size_type count) const {
return find_first_of(string_view(s, count), pos);
}
// Overload of `string_view::find_first_of()` for finding a different C-style
// string `s` within the `string_view`.
- size_type find_first_of(absl::Nonnull<const char*> s,
- size_type pos = 0) const {
+ size_type find_first_of(const char* absl_nonnull s, size_type pos = 0) const {
return find_first_of(string_view(s), pos);
}
@@ -540,14 +539,14 @@ class ABSL_ATTRIBUTE_VIEW string_view {
// Overload of `string_view::find_last_of()` for finding a substring of a
// different C-style string `s` within the `string_view`.
- size_type find_last_of(absl::Nonnull<const char*> s, size_type pos,
+ size_type find_last_of(const char* absl_nonnull s, size_type pos,
size_type count) const {
return find_last_of(string_view(s, count), pos);
}
// Overload of `string_view::find_last_of()` for finding a different C-style
// string `s` within the `string_view`.
- size_type find_last_of(absl::Nonnull<const char*> s,
+ size_type find_last_of(const char* absl_nonnull s,
size_type pos = npos) const {
return find_last_of(string_view(s), pos);
}
@@ -565,14 +564,14 @@ class ABSL_ATTRIBUTE_VIEW string_view {
// Overload of `string_view::find_first_not_of()` for finding a substring of a
// different C-style string `s` within the `string_view`.
- size_type find_first_not_of(absl::Nonnull<const char*> s, size_type pos,
+ size_type find_first_not_of(const char* absl_nonnull s, size_type pos,
size_type count) const {
return find_first_not_of(string_view(s, count), pos);
}
// Overload of `string_view::find_first_not_of()` for finding a different
// C-style string `s` within the `string_view`.
- size_type find_first_not_of(absl::Nonnull<const char*> s,
+ size_type find_first_not_of(const char* absl_nonnull s,
size_type pos = 0) const {
return find_first_not_of(string_view(s), pos);
}
@@ -591,14 +590,14 @@ class ABSL_ATTRIBUTE_VIEW string_view {
// Overload of `string_view::find_last_not_of()` for finding a substring of a
// different C-style string `s` within the `string_view`.
- size_type find_last_not_of(absl::Nonnull<const char*> s, size_type pos,
+ size_type find_last_not_of(const char* absl_nonnull s, size_type pos,
size_type count) const {
return find_last_not_of(string_view(s, count), pos);
}
// Overload of `string_view::find_last_not_of()` for finding a different
// C-style string `s` within the `string_view`.
- size_type find_last_not_of(absl::Nonnull<const char*> s,
+ size_type find_last_not_of(const char* absl_nonnull s,
size_type pos = npos) const {
return find_last_not_of(string_view(s), pos);
}
@@ -659,7 +658,7 @@ class ABSL_ATTRIBUTE_VIEW string_view {
// The constructor from std::string delegates to this constructor.
// See the comment on that constructor for the rationale.
struct SkipCheckLengthTag {};
- string_view(absl::Nullable<const char*> data, size_type len,
+ string_view(const char* absl_nullable data, size_type len,
SkipCheckLengthTag) noexcept
: ptr_(data), length_(len) {}
@@ -671,7 +670,7 @@ class ABSL_ATTRIBUTE_VIEW string_view {
return len;
}
- static constexpr size_type StrlenInternal(absl::Nonnull<const char*> str) {
+ static constexpr size_type StrlenInternal(const char* absl_nonnull str) {
#if defined(_MSC_VER) && !defined(__clang__)
// MSVC 2017+ can evaluate this at compile-time.
const char* begin = str;
@@ -689,10 +688,6 @@ class ABSL_ATTRIBUTE_VIEW string_view {
#endif
}
- static constexpr size_t Min(size_type length_a, size_type length_b) {
- return length_a < length_b ? length_a : length_b;
- }
-
static constexpr int CompareImpl(size_type length_a, size_type length_b,
int compare_result) {
return compare_result == 0 ? static_cast<int>(length_a > length_b) -
@@ -700,7 +695,7 @@ class ABSL_ATTRIBUTE_VIEW string_view {
: (compare_result < 0 ? -1 : 1);
}
- absl::Nullable<const char*> ptr_;
+ const char* absl_nullable ptr_;
size_type length_;
};
@@ -761,7 +756,7 @@ inline string_view ClippedSubstr(string_view s, size_t pos,
// Creates an `absl::string_view` from a pointer `p` even if it's null-valued.
// This function should be used where an `absl::string_view` can be created from
// a possibly-null pointer.
-constexpr string_view NullSafeStringView(absl::Nullable<const char*> p) {
+constexpr string_view NullSafeStringView(const char* absl_nullable p) {
return p ? string_view(p) : string_view();
}
diff --git a/contrib/restricted/abseil-cpp/absl/strings/strip.h b/contrib/restricted/abseil-cpp/absl/strings/strip.h
index 8a55375169e..55398ffe889 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/strip.h
+++ b/contrib/restricted/abseil-cpp/absl/strings/strip.h
@@ -45,7 +45,7 @@ ABSL_NAMESPACE_BEGIN
// absl::string_view input("abc");
// EXPECT_TRUE(absl::ConsumePrefix(&input, "a"));
// EXPECT_EQ(input, "bc");
-inline constexpr bool ConsumePrefix(absl::Nonnull<absl::string_view*> str,
+inline constexpr bool ConsumePrefix(absl::string_view* absl_nonnull str,
absl::string_view expected) {
if (!absl::StartsWith(*str, expected)) return false;
str->remove_prefix(expected.size());
@@ -62,7 +62,7 @@ inline constexpr bool ConsumePrefix(absl::Nonnull<absl::string_view*> str,
// absl::string_view input("abcdef");
// EXPECT_TRUE(absl::ConsumeSuffix(&input, "def"));
// EXPECT_EQ(input, "abc");
-inline constexpr bool ConsumeSuffix(absl::Nonnull<absl::string_view*> str,
+inline constexpr bool ConsumeSuffix(absl::string_view* absl_nonnull str,
absl::string_view expected) {
if (!absl::EndsWith(*str, expected)) return false;
str->remove_suffix(expected.size());
@@ -74,7 +74,7 @@ inline constexpr bool ConsumeSuffix(absl::Nonnull<absl::string_view*> str,
// Returns a view into the input string `str` with the given `prefix` removed,
// but leaving the original string intact. If the prefix does not match at the
// start of the string, returns the original string instead.
-ABSL_MUST_USE_RESULT inline constexpr absl::string_view StripPrefix(
+[[nodiscard]] inline constexpr absl::string_view StripPrefix(
absl::string_view str ABSL_ATTRIBUTE_LIFETIME_BOUND,
absl::string_view prefix) {
if (absl::StartsWith(str, prefix)) str.remove_prefix(prefix.size());
@@ -86,7 +86,7 @@ ABSL_MUST_USE_RESULT inline constexpr absl::string_view StripPrefix(
// Returns a view into the input string `str` with the given `suffix` removed,
// but leaving the original string intact. If the suffix does not match at the
// end of the string, returns the original string instead.
-ABSL_MUST_USE_RESULT inline constexpr absl::string_view StripSuffix(
+[[nodiscard]] inline constexpr absl::string_view StripSuffix(
absl::string_view str ABSL_ATTRIBUTE_LIFETIME_BOUND,
absl::string_view suffix) {
if (absl::EndsWith(str, suffix)) str.remove_suffix(suffix.size());
diff --git a/contrib/restricted/abseil-cpp/absl/strings/substitute.cc b/contrib/restricted/abseil-cpp/absl/strings/substitute.cc
index a71f565a120..3c2ca5d2a88 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/substitute.cc
+++ b/contrib/restricted/abseil-cpp/absl/strings/substitute.cc
@@ -35,9 +35,10 @@ namespace absl {
ABSL_NAMESPACE_BEGIN
namespace substitute_internal {
-void SubstituteAndAppendArray(
- absl::Nonnull<std::string*> output, absl::string_view format,
- absl::Nullable<const absl::string_view*> args_array, size_t num_args) {
+void SubstituteAndAppendArray(std::string* absl_nonnull output,
+ absl::string_view format,
+ const absl::string_view* absl_nullable args_array,
+ size_t num_args) {
// Determine total size needed.
size_t size = 0;
for (size_t i = 0; i < format.size(); i++) {
@@ -109,7 +110,7 @@ void SubstituteAndAppendArray(
assert(target == output->data() + output->size());
}
-Arg::Arg(absl::Nullable<const void*> value) {
+Arg::Arg(const void* absl_nullable value) {
static_assert(sizeof(scratch_) >= sizeof(value) * 2 + 2,
"fix sizeof(scratch_)");
if (value == nullptr) {
diff --git a/contrib/restricted/abseil-cpp/absl/strings/substitute.h b/contrib/restricted/abseil-cpp/absl/strings/substitute.h
index 6c7cba4be54..c93b1cc645f 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/substitute.h
+++ b/contrib/restricted/abseil-cpp/absl/strings/substitute.h
@@ -106,7 +106,7 @@ class Arg {
// Overloads for string-y things
//
// Explicitly overload `const char*` so the compiler doesn't cast to `bool`.
- Arg(absl::Nullable<const char*> value) // NOLINT(google-explicit-constructor)
+ Arg(const char* absl_nullable value) // NOLINT(google-explicit-constructor)
: piece_(absl::NullSafeStringView(value)) {}
template <typename Allocator>
Arg( // NOLINT
@@ -187,19 +187,20 @@ class Arg {
// vector<bool>::reference and const_reference require special help to convert
// to `Arg` because it requires two user defined conversions.
- template <typename T,
- absl::enable_if_t<
- std::is_class<T>::value &&
- (std::is_same<T, std::vector<bool>::reference>::value ||
- std::is_same<T, std::vector<bool>::const_reference>::value)>* =
- nullptr>
+ template <
+ typename T,
+ std::enable_if_t<
+ std::is_class<T>::value &&
+ (std::is_same<T, std::vector<bool>::reference>::value ||
+ std::is_same<T, std::vector<bool>::const_reference>::value),
+ bool> = true>
Arg(T value) // NOLINT(google-explicit-constructor)
: Arg(static_cast<bool>(value)) {}
// `void*` values, with the exception of `char*`, are printed as
// "0x<hex value>". However, in the case of `nullptr`, "NULL" is printed.
Arg( // NOLINT(google-explicit-constructor)
- absl::Nullable<const void*> value);
+ const void* absl_nullable value);
// Normal enums are already handled by the integer formatters.
// This overload matches only scoped enums.
@@ -222,12 +223,13 @@ class Arg {
// Internal helper function. Don't call this from outside this implementation.
// This interface may change without notice.
-void SubstituteAndAppendArray(
- absl::Nonnull<std::string*> output, absl::string_view format,
- absl::Nullable<const absl::string_view*> args_array, size_t num_args);
+void SubstituteAndAppendArray(std::string* absl_nonnull output,
+ absl::string_view format,
+ const absl::string_view* absl_nullable args_array,
+ size_t num_args);
#if defined(ABSL_BAD_CALL_IF)
-constexpr int CalculateOneBit(absl::Nonnull<const char*> format) {
+constexpr int CalculateOneBit(const char* absl_nonnull format) {
// Returns:
// * 2^N for '$N' when N is in [0-9]
// * 0 for correct '$' escaping: '$$'.
@@ -236,11 +238,11 @@ constexpr int CalculateOneBit(absl::Nonnull<const char*> format) {
: (1 << (*format - '0'));
}
-constexpr const char* SkipNumber(absl::Nonnull<const char*> format) {
+constexpr const char* absl_nonnull SkipNumber(const char* absl_nonnull format) {
return !*format ? format : (format + 1);
}
-constexpr int PlaceholderBitmask(absl::Nonnull<const char*> format) {
+constexpr int PlaceholderBitmask(const char* absl_nonnull format) {
return !*format
? 0
: *format != '$' ? PlaceholderBitmask(format + 1)
@@ -273,12 +275,12 @@ constexpr int PlaceholderBitmask(absl::Nonnull<const char*> format) {
// absl::SubstituteAndAppend(boilerplate, format, args...);
// }
//
-inline void SubstituteAndAppend(absl::Nonnull<std::string*> output,
+inline void SubstituteAndAppend(std::string* absl_nonnull output,
absl::string_view format) {
substitute_internal::SubstituteAndAppendArray(output, format, nullptr, 0);
}
-inline void SubstituteAndAppend(absl::Nonnull<std::string*> output,
+inline void SubstituteAndAppend(std::string* absl_nonnull output,
absl::string_view format,
const substitute_internal::Arg& a0) {
const absl::string_view args[] = {a0.piece()};
@@ -286,7 +288,7 @@ inline void SubstituteAndAppend(absl::Nonnull<std::string*> output,
ABSL_ARRAYSIZE(args));
}
-inline void SubstituteAndAppend(absl::Nonnull<std::string*> output,
+inline void SubstituteAndAppend(std::string* absl_nonnull output,
absl::string_view format,
const substitute_internal::Arg& a0,
const substitute_internal::Arg& a1) {
@@ -295,7 +297,7 @@ inline void SubstituteAndAppend(absl::Nonnull<std::string*> output,
ABSL_ARRAYSIZE(args));
}
-inline void SubstituteAndAppend(absl::Nonnull<std::string*> output,
+inline void SubstituteAndAppend(std::string* absl_nonnull output,
absl::string_view format,
const substitute_internal::Arg& a0,
const substitute_internal::Arg& a1,
@@ -305,7 +307,7 @@ inline void SubstituteAndAppend(absl::Nonnull<std::string*> output,
ABSL_ARRAYSIZE(args));
}
-inline void SubstituteAndAppend(absl::Nonnull<std::string*> output,
+inline void SubstituteAndAppend(std::string* absl_nonnull output,
absl::string_view format,
const substitute_internal::Arg& a0,
const substitute_internal::Arg& a1,
@@ -317,7 +319,7 @@ inline void SubstituteAndAppend(absl::Nonnull<std::string*> output,
ABSL_ARRAYSIZE(args));
}
-inline void SubstituteAndAppend(absl::Nonnull<std::string*> output,
+inline void SubstituteAndAppend(std::string* absl_nonnull output,
absl::string_view format,
const substitute_internal::Arg& a0,
const substitute_internal::Arg& a1,
@@ -331,7 +333,7 @@ inline void SubstituteAndAppend(absl::Nonnull<std::string*> output,
}
inline void SubstituteAndAppend(
- absl::Nonnull<std::string*> output, absl::string_view format,
+ std::string* absl_nonnull output, absl::string_view format,
const substitute_internal::Arg& a0, const substitute_internal::Arg& a1,
const substitute_internal::Arg& a2, const substitute_internal::Arg& a3,
const substitute_internal::Arg& a4, const substitute_internal::Arg& a5) {
@@ -342,7 +344,7 @@ inline void SubstituteAndAppend(
}
inline void SubstituteAndAppend(
- absl::Nonnull<std::string*> output, absl::string_view format,
+ std::string* absl_nonnull output, absl::string_view format,
const substitute_internal::Arg& a0, const substitute_internal::Arg& a1,
const substitute_internal::Arg& a2, const substitute_internal::Arg& a3,
const substitute_internal::Arg& a4, const substitute_internal::Arg& a5,
@@ -355,7 +357,7 @@ inline void SubstituteAndAppend(
}
inline void SubstituteAndAppend(
- absl::Nonnull<std::string*> output, absl::string_view format,
+ std::string* absl_nonnull output, absl::string_view format,
const substitute_internal::Arg& a0, const substitute_internal::Arg& a1,
const substitute_internal::Arg& a2, const substitute_internal::Arg& a3,
const substitute_internal::Arg& a4, const substitute_internal::Arg& a5,
@@ -368,7 +370,7 @@ inline void SubstituteAndAppend(
}
inline void SubstituteAndAppend(
- absl::Nonnull<std::string*> output, absl::string_view format,
+ std::string* absl_nonnull output, absl::string_view format,
const substitute_internal::Arg& a0, const substitute_internal::Arg& a1,
const substitute_internal::Arg& a2, const substitute_internal::Arg& a3,
const substitute_internal::Arg& a4, const substitute_internal::Arg& a5,
@@ -382,7 +384,7 @@ inline void SubstituteAndAppend(
}
inline void SubstituteAndAppend(
- absl::Nonnull<std::string*> output, absl::string_view format,
+ std::string* absl_nonnull output, absl::string_view format,
const substitute_internal::Arg& a0, const substitute_internal::Arg& a1,
const substitute_internal::Arg& a2, const substitute_internal::Arg& a3,
const substitute_internal::Arg& a4, const substitute_internal::Arg& a5,
@@ -398,16 +400,16 @@ inline void SubstituteAndAppend(
#if defined(ABSL_BAD_CALL_IF)
// This body of functions catches cases where the number of placeholders
// doesn't match the number of data arguments.
-void SubstituteAndAppend(absl::Nonnull<std::string*> output,
- absl::Nonnull<const char*> format)
+void SubstituteAndAppend(std::string* absl_nonnull output,
+ const char* absl_nonnull format)
ABSL_BAD_CALL_IF(
substitute_internal::PlaceholderBitmask(format) != 0,
"There were no substitution arguments "
"but this format string either has a $[0-9] in it or contains "
"an unescaped $ character (use $$ instead)");
-void SubstituteAndAppend(absl::Nonnull<std::string*> output,
- absl::Nonnull<const char*> format,
+void SubstituteAndAppend(std::string* absl_nonnull output,
+ const char* absl_nonnull format,
const substitute_internal::Arg& a0)
ABSL_BAD_CALL_IF(substitute_internal::PlaceholderBitmask(format) != 1,
"There was 1 substitution argument given, but "
@@ -415,8 +417,8 @@ void SubstituteAndAppend(absl::Nonnull<std::string*> output,
"one of $1-$9, or contains an unescaped $ character (use "
"$$ instead)");
-void SubstituteAndAppend(absl::Nonnull<std::string*> output,
- absl::Nonnull<const char*> format,
+void SubstituteAndAppend(std::string* absl_nonnull output,
+ const char* absl_nonnull format,
const substitute_internal::Arg& a0,
const substitute_internal::Arg& a1)
ABSL_BAD_CALL_IF(
@@ -425,8 +427,8 @@ void SubstituteAndAppend(absl::Nonnull<std::string*> output,
"missing its $0/$1, contains one of $2-$9, or contains an "
"unescaped $ character (use $$ instead)");
-void SubstituteAndAppend(absl::Nonnull<std::string*> output,
- absl::Nonnull<const char*> format,
+void SubstituteAndAppend(std::string* absl_nonnull output,
+ const char* absl_nonnull format,
const substitute_internal::Arg& a0,
const substitute_internal::Arg& a1,
const substitute_internal::Arg& a2)
@@ -436,8 +438,8 @@ void SubstituteAndAppend(absl::Nonnull<std::string*> output,
"this format string is missing its $0/$1/$2, contains one of "
"$3-$9, or contains an unescaped $ character (use $$ instead)");
-void SubstituteAndAppend(absl::Nonnull<std::string*> output,
- absl::Nonnull<const char*> format,
+void SubstituteAndAppend(std::string* absl_nonnull output,
+ const char* absl_nonnull format,
const substitute_internal::Arg& a0,
const substitute_internal::Arg& a1,
const substitute_internal::Arg& a2,
@@ -448,8 +450,8 @@ void SubstituteAndAppend(absl::Nonnull<std::string*> output,
"this format string is missing its $0-$3, contains one of "
"$4-$9, or contains an unescaped $ character (use $$ instead)");
-void SubstituteAndAppend(absl::Nonnull<std::string*> output,
- absl::Nonnull<const char*> format,
+void SubstituteAndAppend(std::string* absl_nonnull output,
+ const char* absl_nonnull format,
const substitute_internal::Arg& a0,
const substitute_internal::Arg& a1,
const substitute_internal::Arg& a2,
@@ -462,7 +464,7 @@ void SubstituteAndAppend(absl::Nonnull<std::string*> output,
"$5-$9, or contains an unescaped $ character (use $$ instead)");
void SubstituteAndAppend(
- absl::Nonnull<std::string*> output, absl::Nonnull<const char*> format,
+ std::string* absl_nonnull output, const char* absl_nonnull format,
const substitute_internal::Arg& a0, const substitute_internal::Arg& a1,
const substitute_internal::Arg& a2, const substitute_internal::Arg& a3,
const substitute_internal::Arg& a4, const substitute_internal::Arg& a5)
@@ -473,7 +475,7 @@ void SubstituteAndAppend(
"$6-$9, or contains an unescaped $ character (use $$ instead)");
void SubstituteAndAppend(
- absl::Nonnull<std::string*> output, absl::Nonnull<const char*> format,
+ std::string* absl_nonnull output, const char* absl_nonnull format,
const substitute_internal::Arg& a0, const substitute_internal::Arg& a1,
const substitute_internal::Arg& a2, const substitute_internal::Arg& a3,
const substitute_internal::Arg& a4, const substitute_internal::Arg& a5,
@@ -485,7 +487,7 @@ void SubstituteAndAppend(
"$7-$9, or contains an unescaped $ character (use $$ instead)");
void SubstituteAndAppend(
- absl::Nonnull<std::string*> output, absl::Nonnull<const char*> format,
+ std::string* absl_nonnull output, const char* absl_nonnull format,
const substitute_internal::Arg& a0, const substitute_internal::Arg& a1,
const substitute_internal::Arg& a2, const substitute_internal::Arg& a3,
const substitute_internal::Arg& a4, const substitute_internal::Arg& a5,
@@ -497,7 +499,7 @@ void SubstituteAndAppend(
"$8-$9, or contains an unescaped $ character (use $$ instead)");
void SubstituteAndAppend(
- absl::Nonnull<std::string*> output, absl::Nonnull<const char*> format,
+ std::string* absl_nonnull output, const char* absl_nonnull format,
const substitute_internal::Arg& a0, const substitute_internal::Arg& a1,
const substitute_internal::Arg& a2, const substitute_internal::Arg& a3,
const substitute_internal::Arg& a4, const substitute_internal::Arg& a5,
@@ -510,7 +512,7 @@ void SubstituteAndAppend(
"contains an unescaped $ character (use $$ instead)");
void SubstituteAndAppend(
- absl::Nonnull<std::string*> output, absl::Nonnull<const char*> format,
+ std::string* absl_nonnull output, const char* absl_nonnull format,
const substitute_internal::Arg& a0, const substitute_internal::Arg& a1,
const substitute_internal::Arg& a2, const substitute_internal::Arg& a3,
const substitute_internal::Arg& a4, const substitute_internal::Arg& a5,
@@ -539,20 +541,20 @@ void SubstituteAndAppend(
// void VarMsg(absl::string_view format, const Args&... args) {
// std::string s = absl::Substitute(format, args...);
-ABSL_MUST_USE_RESULT inline std::string Substitute(absl::string_view format) {
+[[nodiscard]] inline std::string Substitute(absl::string_view format) {
std::string result;
SubstituteAndAppend(&result, format);
return result;
}
-ABSL_MUST_USE_RESULT inline std::string Substitute(
+[[nodiscard]] inline std::string Substitute(
absl::string_view format, const substitute_internal::Arg& a0) {
std::string result;
SubstituteAndAppend(&result, format, a0);
return result;
}
-ABSL_MUST_USE_RESULT inline std::string Substitute(
+[[nodiscard]] inline std::string Substitute(
absl::string_view format, const substitute_internal::Arg& a0,
const substitute_internal::Arg& a1) {
std::string result;
@@ -560,7 +562,7 @@ ABSL_MUST_USE_RESULT inline std::string Substitute(
return result;
}
-ABSL_MUST_USE_RESULT inline std::string Substitute(
+[[nodiscard]] inline std::string Substitute(
absl::string_view format, const substitute_internal::Arg& a0,
const substitute_internal::Arg& a1, const substitute_internal::Arg& a2) {
std::string result;
@@ -568,7 +570,7 @@ ABSL_MUST_USE_RESULT inline std::string Substitute(
return result;
}
-ABSL_MUST_USE_RESULT inline std::string Substitute(
+[[nodiscard]] inline std::string Substitute(
absl::string_view format, const substitute_internal::Arg& a0,
const substitute_internal::Arg& a1, const substitute_internal::Arg& a2,
const substitute_internal::Arg& a3) {
@@ -577,7 +579,7 @@ ABSL_MUST_USE_RESULT inline std::string Substitute(
return result;
}
-ABSL_MUST_USE_RESULT inline std::string Substitute(
+[[nodiscard]] inline std::string Substitute(
absl::string_view format, const substitute_internal::Arg& a0,
const substitute_internal::Arg& a1, const substitute_internal::Arg& a2,
const substitute_internal::Arg& a3, const substitute_internal::Arg& a4) {
@@ -586,7 +588,7 @@ ABSL_MUST_USE_RESULT inline std::string Substitute(
return result;
}
-ABSL_MUST_USE_RESULT inline std::string Substitute(
+[[nodiscard]] inline std::string Substitute(
absl::string_view format, const substitute_internal::Arg& a0,
const substitute_internal::Arg& a1, const substitute_internal::Arg& a2,
const substitute_internal::Arg& a3, const substitute_internal::Arg& a4,
@@ -596,7 +598,7 @@ ABSL_MUST_USE_RESULT inline std::string Substitute(
return result;
}
-ABSL_MUST_USE_RESULT inline std::string Substitute(
+[[nodiscard]] inline std::string Substitute(
absl::string_view format, const substitute_internal::Arg& a0,
const substitute_internal::Arg& a1, const substitute_internal::Arg& a2,
const substitute_internal::Arg& a3, const substitute_internal::Arg& a4,
@@ -606,7 +608,7 @@ ABSL_MUST_USE_RESULT inline std::string Substitute(
return result;
}
-ABSL_MUST_USE_RESULT inline std::string Substitute(
+[[nodiscard]] inline std::string Substitute(
absl::string_view format, const substitute_internal::Arg& a0,
const substitute_internal::Arg& a1, const substitute_internal::Arg& a2,
const substitute_internal::Arg& a3, const substitute_internal::Arg& a4,
@@ -617,7 +619,7 @@ ABSL_MUST_USE_RESULT inline std::string Substitute(
return result;
}
-ABSL_MUST_USE_RESULT inline std::string Substitute(
+[[nodiscard]] inline std::string Substitute(
absl::string_view format, const substitute_internal::Arg& a0,
const substitute_internal::Arg& a1, const substitute_internal::Arg& a2,
const substitute_internal::Arg& a3, const substitute_internal::Arg& a4,
@@ -628,7 +630,7 @@ ABSL_MUST_USE_RESULT inline std::string Substitute(
return result;
}
-ABSL_MUST_USE_RESULT inline std::string Substitute(
+[[nodiscard]] inline std::string Substitute(
absl::string_view format, const substitute_internal::Arg& a0,
const substitute_internal::Arg& a1, const substitute_internal::Arg& a2,
const substitute_internal::Arg& a3, const substitute_internal::Arg& a4,
@@ -643,13 +645,13 @@ ABSL_MUST_USE_RESULT inline std::string Substitute(
#if defined(ABSL_BAD_CALL_IF)
// This body of functions catches cases where the number of placeholders
// doesn't match the number of data arguments.
-std::string Substitute(absl::Nonnull<const char*> format)
+std::string Substitute(const char* absl_nonnull format)
ABSL_BAD_CALL_IF(substitute_internal::PlaceholderBitmask(format) != 0,
"There were no substitution arguments "
"but this format string either has a $[0-9] in it or "
"contains an unescaped $ character (use $$ instead)");
-std::string Substitute(absl::Nonnull<const char*> format,
+std::string Substitute(const char* absl_nonnull format,
const substitute_internal::Arg& a0)
ABSL_BAD_CALL_IF(
substitute_internal::PlaceholderBitmask(format) != 1,
@@ -657,7 +659,7 @@ std::string Substitute(absl::Nonnull<const char*> format,
"this format string is missing its $0, contains one of $1-$9, "
"or contains an unescaped $ character (use $$ instead)");
-std::string Substitute(absl::Nonnull<const char*> format,
+std::string Substitute(const char* absl_nonnull format,
const substitute_internal::Arg& a0,
const substitute_internal::Arg& a1)
ABSL_BAD_CALL_IF(
@@ -666,7 +668,7 @@ std::string Substitute(absl::Nonnull<const char*> format,
"this format string is missing its $0/$1, contains one of "
"$2-$9, or contains an unescaped $ character (use $$ instead)");
-std::string Substitute(absl::Nonnull<const char*> format,
+std::string Substitute(const char* absl_nonnull format,
const substitute_internal::Arg& a0,
const substitute_internal::Arg& a1,
const substitute_internal::Arg& a2)
@@ -676,7 +678,7 @@ std::string Substitute(absl::Nonnull<const char*> format,
"this format string is missing its $0/$1/$2, contains one of "
"$3-$9, or contains an unescaped $ character (use $$ instead)");
-std::string Substitute(absl::Nonnull<const char*> format,
+std::string Substitute(const char* absl_nonnull format,
const substitute_internal::Arg& a0,
const substitute_internal::Arg& a1,
const substitute_internal::Arg& a2,
@@ -687,7 +689,7 @@ std::string Substitute(absl::Nonnull<const char*> format,
"this format string is missing its $0-$3, contains one of "
"$4-$9, or contains an unescaped $ character (use $$ instead)");
-std::string Substitute(absl::Nonnull<const char*> format,
+std::string Substitute(const char* absl_nonnull format,
const substitute_internal::Arg& a0,
const substitute_internal::Arg& a1,
const substitute_internal::Arg& a2,
@@ -699,7 +701,7 @@ std::string Substitute(absl::Nonnull<const char*> format,
"this format string is missing its $0-$4, contains one of "
"$5-$9, or contains an unescaped $ character (use $$ instead)");
-std::string Substitute(absl::Nonnull<const char*> format,
+std::string Substitute(const char* absl_nonnull format,
const substitute_internal::Arg& a0,
const substitute_internal::Arg& a1,
const substitute_internal::Arg& a2,
@@ -713,7 +715,7 @@ std::string Substitute(absl::Nonnull<const char*> format,
"$6-$9, or contains an unescaped $ character (use $$ instead)");
std::string Substitute(
- absl::Nonnull<const char*> format, const substitute_internal::Arg& a0,
+ const char* absl_nonnull format, const substitute_internal::Arg& a0,
const substitute_internal::Arg& a1, const substitute_internal::Arg& a2,
const substitute_internal::Arg& a3, const substitute_internal::Arg& a4,
const substitute_internal::Arg& a5, const substitute_internal::Arg& a6)
@@ -724,7 +726,7 @@ std::string Substitute(
"$7-$9, or contains an unescaped $ character (use $$ instead)");
std::string Substitute(
- absl::Nonnull<const char*> format, const substitute_internal::Arg& a0,
+ const char* absl_nonnull format, const substitute_internal::Arg& a0,
const substitute_internal::Arg& a1, const substitute_internal::Arg& a2,
const substitute_internal::Arg& a3, const substitute_internal::Arg& a4,
const substitute_internal::Arg& a5, const substitute_internal::Arg& a6,
@@ -736,7 +738,7 @@ std::string Substitute(
"$8-$9, or contains an unescaped $ character (use $$ instead)");
std::string Substitute(
- absl::Nonnull<const char*> format, const substitute_internal::Arg& a0,
+ const char* absl_nonnull format, const substitute_internal::Arg& a0,
const substitute_internal::Arg& a1, const substitute_internal::Arg& a2,
const substitute_internal::Arg& a3, const substitute_internal::Arg& a4,
const substitute_internal::Arg& a5, const substitute_internal::Arg& a6,
@@ -748,7 +750,7 @@ std::string Substitute(
"contains an unescaped $ character (use $$ instead)");
std::string Substitute(
- absl::Nonnull<const char*> format, const substitute_internal::Arg& a0,
+ const char* absl_nonnull format, const substitute_internal::Arg& a0,
const substitute_internal::Arg& a1, const substitute_internal::Arg& a2,
const substitute_internal::Arg& a3, const substitute_internal::Arg& a4,
const substitute_internal::Arg& a5, const substitute_internal::Arg& a6,
diff --git a/contrib/restricted/abseil-cpp/absl/synchronization/internal/futex_waiter.cc b/contrib/restricted/abseil-cpp/absl/synchronization/internal/futex_waiter.cc
index 87eb3b23cd2..8945c176e6e 100644
--- a/contrib/restricted/abseil-cpp/absl/synchronization/internal/futex_waiter.cc
+++ b/contrib/restricted/abseil-cpp/absl/synchronization/internal/futex_waiter.cc
@@ -31,10 +31,6 @@ namespace absl {
ABSL_NAMESPACE_BEGIN
namespace synchronization_internal {
-#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
-constexpr char FutexWaiter::kName[];
-#endif
-
int FutexWaiter::WaitUntil(std::atomic<int32_t>* v, int32_t val,
KernelTimeout t) {
#ifdef CLOCK_MONOTONIC
diff --git a/contrib/restricted/abseil-cpp/absl/synchronization/internal/kernel_timeout.cc b/contrib/restricted/abseil-cpp/absl/synchronization/internal/kernel_timeout.cc
index 48ea6287b7b..252397ab54d 100644
--- a/contrib/restricted/abseil-cpp/absl/synchronization/internal/kernel_timeout.cc
+++ b/contrib/restricted/abseil-cpp/absl/synchronization/internal/kernel_timeout.cc
@@ -35,11 +35,6 @@ namespace absl {
ABSL_NAMESPACE_BEGIN
namespace synchronization_internal {
-#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
-constexpr uint64_t KernelTimeout::kNoTimeout;
-constexpr int64_t KernelTimeout::kMaxNanos;
-#endif
-
int64_t KernelTimeout::SteadyClockNow() {
if (!SupportsSteadyClock()) {
return absl::GetCurrentTimeNanos();
diff --git a/contrib/restricted/abseil-cpp/absl/synchronization/internal/pthread_waiter.cc b/contrib/restricted/abseil-cpp/absl/synchronization/internal/pthread_waiter.cc
index bf700e95528..eead9de05b4 100644
--- a/contrib/restricted/abseil-cpp/absl/synchronization/internal/pthread_waiter.cc
+++ b/contrib/restricted/abseil-cpp/absl/synchronization/internal/pthread_waiter.cc
@@ -58,10 +58,6 @@ class PthreadMutexHolder {
};
} // namespace
-#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
-constexpr char PthreadWaiter::kName[];
-#endif
-
PthreadWaiter::PthreadWaiter() : waiter_count_(0), wakeup_count_(0) {
const int err = pthread_mutex_init(&mu_, 0);
if (err != 0) {
diff --git a/contrib/restricted/abseil-cpp/absl/synchronization/internal/sem_waiter.cc b/contrib/restricted/abseil-cpp/absl/synchronization/internal/sem_waiter.cc
index d62dbdc70d1..2119290d530 100644
--- a/contrib/restricted/abseil-cpp/absl/synchronization/internal/sem_waiter.cc
+++ b/contrib/restricted/abseil-cpp/absl/synchronization/internal/sem_waiter.cc
@@ -33,10 +33,6 @@ namespace absl {
ABSL_NAMESPACE_BEGIN
namespace synchronization_internal {
-#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
-constexpr char SemWaiter::kName[];
-#endif
-
SemWaiter::SemWaiter() : wakeups_(0) {
if (sem_init(&sem_, 0, 0) != 0) {
ABSL_RAW_LOG(FATAL, "sem_init failed with errno %d\n", errno);
diff --git a/contrib/restricted/abseil-cpp/absl/synchronization/internal/stdcpp_waiter.cc b/contrib/restricted/abseil-cpp/absl/synchronization/internal/stdcpp_waiter.cc
index 355718a792b..607d683ffe6 100644
--- a/contrib/restricted/abseil-cpp/absl/synchronization/internal/stdcpp_waiter.cc
+++ b/contrib/restricted/abseil-cpp/absl/synchronization/internal/stdcpp_waiter.cc
@@ -30,10 +30,6 @@ namespace absl {
ABSL_NAMESPACE_BEGIN
namespace synchronization_internal {
-#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
-constexpr char StdcppWaiter::kName[];
-#endif
-
StdcppWaiter::StdcppWaiter() : waiter_count_(0), wakeup_count_(0) {}
bool StdcppWaiter::Wait(KernelTimeout t) {
diff --git a/contrib/restricted/abseil-cpp/absl/synchronization/internal/waiter_base.cc b/contrib/restricted/abseil-cpp/absl/synchronization/internal/waiter_base.cc
index 46928b404e0..e9797f8b141 100644
--- a/contrib/restricted/abseil-cpp/absl/synchronization/internal/waiter_base.cc
+++ b/contrib/restricted/abseil-cpp/absl/synchronization/internal/waiter_base.cc
@@ -21,10 +21,6 @@ namespace absl {
ABSL_NAMESPACE_BEGIN
namespace synchronization_internal {
-#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
-constexpr int WaiterBase::kIdlePeriods;
-#endif
-
void WaiterBase::MaybeBecomeIdle() {
base_internal::ThreadIdentity *identity =
base_internal::CurrentThreadIdentityIfPresent();
diff --git a/contrib/restricted/abseil-cpp/absl/synchronization/internal/win32_waiter.cc b/contrib/restricted/abseil-cpp/absl/synchronization/internal/win32_waiter.cc
index bd95ff08009..b2fe402cb29 100644
--- a/contrib/restricted/abseil-cpp/absl/synchronization/internal/win32_waiter.cc
+++ b/contrib/restricted/abseil-cpp/absl/synchronization/internal/win32_waiter.cc
@@ -28,10 +28,6 @@ namespace absl {
ABSL_NAMESPACE_BEGIN
namespace synchronization_internal {
-#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
-constexpr char Win32Waiter::kName[];
-#endif
-
class Win32Waiter::WinHelper {
public:
static SRWLOCK *GetLock(Win32Waiter *w) {
diff --git a/contrib/restricted/abseil-cpp/absl/synchronization/mutex.cc b/contrib/restricted/abseil-cpp/absl/synchronization/mutex.cc
index 52ed27f696f..5091b8fd340 100644
--- a/contrib/restricted/abseil-cpp/absl/synchronization/mutex.cc
+++ b/contrib/restricted/abseil-cpp/absl/synchronization/mutex.cc
@@ -1339,7 +1339,7 @@ static char* StackString(void** pcs, int n, char* buf, int maxlen,
} else {
snprintf(buf + len, count, " %p", pcs[i]);
}
- len += strlen(&buf[len]);
+ len += static_cast<int>(strlen(&buf[len]));
}
return buf;
}
diff --git a/contrib/restricted/abseil-cpp/absl/synchronization/mutex.h b/contrib/restricted/abseil-cpp/absl/synchronization/mutex.h
index be3f1f56bb8..78b1c7a048a 100644
--- a/contrib/restricted/abseil-cpp/absl/synchronization/mutex.h
+++ b/contrib/restricted/abseil-cpp/absl/synchronization/mutex.h
@@ -70,6 +70,7 @@
#include "absl/base/internal/low_level_alloc.h"
#include "absl/base/internal/thread_identity.h"
#include "absl/base/internal/tsan_mutex_interface.h"
+#include "absl/base/nullability.h"
#include "absl/base/port.h"
#include "absl/base/thread_annotations.h"
#include "absl/synchronization/internal/kernel_timeout.h"
@@ -190,7 +191,7 @@ class ABSL_LOCKABLE ABSL_ATTRIBUTE_WARN_UNUSED Mutex {
// If the mutex can be acquired without blocking, does so exclusively and
// returns `true`. Otherwise, returns `false`. Returns `true` with high
// probability if the `Mutex` was free.
- ABSL_MUST_USE_RESULT bool TryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true);
+ [[nodiscard]] bool TryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true);
// Mutex::AssertHeld()
//
@@ -255,7 +256,7 @@ class ABSL_LOCKABLE ABSL_ATTRIBUTE_WARN_UNUSED Mutex {
// If the mutex can be acquired without blocking, acquires this mutex for
// shared access and returns `true`. Otherwise, returns `false`. Returns
// `true` with high probability if the `Mutex` was free or shared.
- ABSL_MUST_USE_RESULT bool ReaderTryLock() ABSL_SHARED_TRYLOCK_FUNCTION(true);
+ [[nodiscard]] bool ReaderTryLock() ABSL_SHARED_TRYLOCK_FUNCTION(true);
// Mutex::AssertReaderHeld()
//
@@ -281,8 +282,7 @@ class ABSL_LOCKABLE ABSL_ATTRIBUTE_WARN_UNUSED Mutex {
void WriterUnlock() ABSL_UNLOCK_FUNCTION() { this->Unlock(); }
- ABSL_MUST_USE_RESULT bool WriterTryLock()
- ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) {
+ [[nodiscard]] bool WriterTryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) {
return this->TryLock();
}
@@ -450,7 +450,9 @@ class ABSL_LOCKABLE ABSL_ATTRIBUTE_WARN_UNUSED Mutex {
// substantially reduce `Mutex` performance; it should be set only for
// non-production runs. Optimization options may also disable invariant
// checks.
- void EnableInvariantDebugging(void (*invariant)(void*), void* arg);
+ void EnableInvariantDebugging(
+ void (*absl_nullable invariant)(void* absl_nullability_unknown),
+ void* absl_nullability_unknown arg);
// Mutex::EnableDebugLog()
//
@@ -459,7 +461,7 @@ class ABSL_LOCKABLE ABSL_ATTRIBUTE_WARN_UNUSED Mutex {
// call to `EnableInvariantDebugging()` or `EnableDebugLog()` has been made.
//
// Note: This method substantially reduces `Mutex` performance.
- void EnableDebugLog(const char* name);
+ void EnableDebugLog(const char* absl_nullable name);
// Deadlock detection
@@ -509,20 +511,23 @@ class ABSL_LOCKABLE ABSL_ATTRIBUTE_WARN_UNUSED Mutex {
// Post()/Wait() versus associated PerThreadSem; in class for required
// friendship with PerThreadSem.
- static void IncrementSynchSem(Mutex* mu, base_internal::PerThreadSynch* w);
- static bool DecrementSynchSem(Mutex* mu, base_internal::PerThreadSynch* w,
+ static void IncrementSynchSem(Mutex* absl_nonnull mu,
+ base_internal::PerThreadSynch* absl_nonnull w);
+ static bool DecrementSynchSem(Mutex* absl_nonnull mu,
+ base_internal::PerThreadSynch* absl_nonnull w,
synchronization_internal::KernelTimeout t);
// slow path acquire
- void LockSlowLoop(SynchWaitParams* waitp, int flags);
+ void LockSlowLoop(SynchWaitParams* absl_nonnull waitp, int flags);
// wrappers around LockSlowLoop()
- bool LockSlowWithDeadline(MuHow how, const Condition* cond,
+ bool LockSlowWithDeadline(MuHow absl_nonnull how,
+ const Condition* absl_nullable cond,
synchronization_internal::KernelTimeout t,
int flags);
- void LockSlow(MuHow how, const Condition* cond,
+ void LockSlow(MuHow absl_nonnull how, const Condition* absl_nullable cond,
int flags) ABSL_ATTRIBUTE_COLD;
// slow path release
- void UnlockSlow(SynchWaitParams* waitp) ABSL_ATTRIBUTE_COLD;
+ void UnlockSlow(SynchWaitParams* absl_nullable waitp) ABSL_ATTRIBUTE_COLD;
// TryLock slow path.
bool TryLockSlow();
// ReaderTryLock slow path.
@@ -533,20 +538,21 @@ class ABSL_LOCKABLE ABSL_ATTRIBUTE_WARN_UNUSED Mutex {
bool LockWhenCommon(const Condition& cond,
synchronization_internal::KernelTimeout t, bool write);
// Attempt to remove thread s from queue.
- void TryRemove(base_internal::PerThreadSynch* s);
+ void TryRemove(base_internal::PerThreadSynch* absl_nonnull s);
// Block a thread on mutex.
- void Block(base_internal::PerThreadSynch* s);
+ void Block(base_internal::PerThreadSynch* absl_nonnull s);
// Wake a thread; return successor.
- base_internal::PerThreadSynch* Wakeup(base_internal::PerThreadSynch* w);
+ base_internal::PerThreadSynch* absl_nullable Wakeup(
+ base_internal::PerThreadSynch* absl_nonnull w);
void Dtor();
friend class CondVar; // for access to Trans()/Fer().
- void Trans(MuHow how); // used for CondVar->Mutex transfer
- void Fer(
- base_internal::PerThreadSynch* w); // used for CondVar->Mutex transfer
+ void Trans(MuHow absl_nonnull how); // used for CondVar->Mutex transfer
+ void Fer(base_internal::PerThreadSynch* absl_nonnull
+ w); // used for CondVar->Mutex transfer
// Catch the error of writing Mutex when intending MutexLock.
- explicit Mutex(const volatile Mutex* /*ignored*/) {}
+ explicit Mutex(const volatile Mutex* absl_nullable /*ignored*/) {}
Mutex(const Mutex&) = delete;
Mutex& operator=(const Mutex&) = delete;
@@ -581,14 +587,15 @@ class ABSL_SCOPED_LOCKABLE MutexLock {
// Calls `mu->Lock()` and returns when that call returns. That is, `*mu` is
// guaranteed to be locked when this object is constructed. Requires that
// `mu` be dereferenceable.
- explicit MutexLock(Mutex* mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) : mu_(mu) {
+ explicit MutexLock(Mutex* absl_nonnull mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
+ : mu_(mu) {
this->mu_->Lock();
}
// Like above, but calls `mu->LockWhen(cond)` instead. That is, in addition to
// the above, the condition given by `cond` is also guaranteed to hold when
// this object is constructed.
- explicit MutexLock(Mutex* mu, const Condition& cond)
+ explicit MutexLock(Mutex* absl_nonnull mu, const Condition& cond)
ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
: mu_(mu) {
this->mu_->LockWhen(cond);
@@ -602,7 +609,7 @@ class ABSL_SCOPED_LOCKABLE MutexLock {
~MutexLock() ABSL_UNLOCK_FUNCTION() { this->mu_->Unlock(); }
private:
- Mutex* const mu_;
+ Mutex* absl_nonnull const mu_;
};
// ReaderMutexLock
@@ -611,11 +618,12 @@ class ABSL_SCOPED_LOCKABLE MutexLock {
// releases a shared lock on a `Mutex` via RAII.
class ABSL_SCOPED_LOCKABLE ReaderMutexLock {
public:
- explicit ReaderMutexLock(Mutex* mu) ABSL_SHARED_LOCK_FUNCTION(mu) : mu_(mu) {
+ explicit ReaderMutexLock(Mutex* absl_nonnull mu) ABSL_SHARED_LOCK_FUNCTION(mu)
+ : mu_(mu) {
mu->ReaderLock();
}
- explicit ReaderMutexLock(Mutex* mu, const Condition& cond)
+ explicit ReaderMutexLock(Mutex* absl_nonnull mu, const Condition& cond)
ABSL_SHARED_LOCK_FUNCTION(mu)
: mu_(mu) {
mu->ReaderLockWhen(cond);
@@ -629,7 +637,7 @@ class ABSL_SCOPED_LOCKABLE ReaderMutexLock {
~ReaderMutexLock() ABSL_UNLOCK_FUNCTION() { this->mu_->ReaderUnlock(); }
private:
- Mutex* const mu_;
+ Mutex* absl_nonnull const mu_;
};
// WriterMutexLock
@@ -638,12 +646,13 @@ class ABSL_SCOPED_LOCKABLE ReaderMutexLock {
// releases a write (exclusive) lock on a `Mutex` via RAII.
class ABSL_SCOPED_LOCKABLE WriterMutexLock {
public:
- explicit WriterMutexLock(Mutex* mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
+ explicit WriterMutexLock(Mutex* absl_nonnull mu)
+ ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
: mu_(mu) {
mu->WriterLock();
}
- explicit WriterMutexLock(Mutex* mu, const Condition& cond)
+ explicit WriterMutexLock(Mutex* absl_nonnull mu, const Condition& cond)
ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
: mu_(mu) {
mu->WriterLockWhen(cond);
@@ -657,7 +666,7 @@ class ABSL_SCOPED_LOCKABLE WriterMutexLock {
~WriterMutexLock() ABSL_UNLOCK_FUNCTION() { this->mu_->WriterUnlock(); }
private:
- Mutex* const mu_;
+ Mutex* absl_nonnull const mu_;
};
// -----------------------------------------------------------------------------
@@ -715,7 +724,8 @@ class ABSL_SCOPED_LOCKABLE WriterMutexLock {
class Condition {
public:
// A Condition that returns the result of "(*func)(arg)"
- Condition(bool (*func)(void*), void* arg);
+ Condition(bool (*absl_nonnull func)(void* absl_nullability_unknown),
+ void* absl_nullability_unknown arg);
// Templated version for people who are averse to casts.
//
@@ -727,7 +737,8 @@ class Condition {
//
// See class comment for performance advice.
template <typename T>
- Condition(bool (*func)(T*), T* arg);
+ Condition(bool (*absl_nonnull func)(T* absl_nullability_unknown),
+ T* absl_nullability_unknown arg);
// Same as above, but allows for cases where `arg` comes from a pointer that
// is convertible to the function parameter type `T*` but not an exact match.
@@ -741,8 +752,10 @@ class Condition {
// a function template is passed as `func`. Also, the dummy `typename = void`
// template parameter exists just to work around a MSVC mangling bug.
template <typename T, typename = void>
- Condition(bool (*func)(T*),
- typename absl::internal::type_identity<T>::type* arg);
+ Condition(
+ bool (*absl_nonnull func)(T* absl_nullability_unknown),
+ typename absl::internal::type_identity<T>::type* absl_nullability_unknown
+ arg);
// Templated version for invoking a method that returns a `bool`.
//
@@ -753,16 +766,19 @@ class Condition {
// methods to come from base classes. A simpler signature like
// `Condition(T*, bool (T::*)())` does not suffice.
template <typename T>
- Condition(T* object,
- bool (absl::internal::type_identity<T>::type::*method)());
+ Condition(
+ T* absl_nonnull object,
+ bool (absl::internal::type_identity<T>::type::* absl_nonnull method)());
// Same as above, for const members
template <typename T>
- Condition(const T* object,
- bool (absl::internal::type_identity<T>::type::*method)() const);
+ Condition(
+ const T* absl_nonnull object,
+ bool (absl::internal::type_identity<T>::type::* absl_nonnull method)()
+ const);
// A Condition that returns the value of `*cond`
- explicit Condition(const bool* cond);
+ explicit Condition(const bool* absl_nonnull cond);
// Templated version for invoking a functor that returns a `bool`.
// This approach accepts pointers to non-mutable lambdas, `std::function`,
@@ -791,7 +807,7 @@ class Condition {
// `bool operator() const`.
template <typename T, typename E = decltype(static_cast<bool (T::*)() const>(
&T::operator()))>
- explicit Condition(const T* obj)
+ explicit Condition(const T* absl_nonnull obj)
: Condition(obj, static_cast<bool (T::*)() const>(&T::operator())) {}
// A Condition that always returns `true`.
@@ -817,7 +833,8 @@ class Condition {
// Two `Condition` values are guaranteed equal if both their `func` and `arg`
// components are the same. A null pointer is equivalent to a `true`
// condition.
- static bool GuaranteedEqual(const Condition* a, const Condition* b);
+ static bool GuaranteedEqual(const Condition* absl_nullable a,
+ const Condition* absl_nullable b);
private:
// Sizing an allocation for a method pointer can be subtle. In the Itanium
@@ -842,17 +859,17 @@ class Condition {
#endif
// Function with which to evaluate callbacks and/or arguments.
- bool (*eval_)(const Condition*) = nullptr;
+ bool (*absl_nullable eval_)(const Condition* absl_nonnull) = nullptr;
// Either an argument for a function call or an object for a method call.
- void* arg_ = nullptr;
+ void* absl_nullable arg_ = nullptr;
// Various functions eval_ can point to:
- static bool CallVoidPtrFunction(const Condition*);
+ static bool CallVoidPtrFunction(const Condition* absl_nonnull c);
template <typename T>
- static bool CastAndCallFunction(const Condition* c);
+ static bool CastAndCallFunction(const Condition* absl_nonnull c);
template <typename T, typename ConditionMethodPtr>
- static bool CastAndCallMethod(const Condition* c);
+ static bool CastAndCallMethod(const Condition* absl_nonnull c);
// Helper methods for storing, validating, and reading callback arguments.
template <typename T>
@@ -864,11 +881,11 @@ class Condition {
}
template <typename T>
- inline void ReadCallback(T* callback) const {
+ inline void ReadCallback(T* absl_nonnull callback) const {
std::memcpy(callback, callback_, sizeof(*callback));
}
- static bool AlwaysTrue(const Condition*) { return true; }
+ static bool AlwaysTrue(const Condition* absl_nullable) { return true; }
// Used only to create kTrue.
constexpr Condition() : eval_(AlwaysTrue), arg_(nullptr) {}
@@ -922,7 +939,7 @@ class CondVar {
// spurious wakeup), then reacquires the `Mutex` and returns.
//
// Requires and ensures that the current thread holds the `Mutex`.
- void Wait(Mutex* mu) {
+ void Wait(Mutex* absl_nonnull mu) {
WaitCommon(mu, synchronization_internal::KernelTimeout::Never());
}
@@ -939,7 +956,7 @@ class CondVar {
// to return `true` or `false`.
//
// Requires and ensures that the current thread holds the `Mutex`.
- bool WaitWithTimeout(Mutex* mu, absl::Duration timeout) {
+ bool WaitWithTimeout(Mutex* absl_nonnull mu, absl::Duration timeout) {
return WaitCommon(mu, synchronization_internal::KernelTimeout(timeout));
}
@@ -958,7 +975,7 @@ class CondVar {
// to return `true` or `false`.
//
// Requires and ensures that the current thread holds the `Mutex`.
- bool WaitWithDeadline(Mutex* mu, absl::Time deadline) {
+ bool WaitWithDeadline(Mutex* absl_nonnull mu, absl::Time deadline) {
return WaitCommon(mu, synchronization_internal::KernelTimeout(deadline));
}
@@ -977,11 +994,12 @@ class CondVar {
// Causes all subsequent uses of this `CondVar` to be logged via
// `ABSL_RAW_LOG(INFO)`. Log entries are tagged with `name` if `name != 0`.
// Note: this method substantially reduces `CondVar` performance.
- void EnableDebugLog(const char* name);
+ void EnableDebugLog(const char* absl_nullable name);
private:
- bool WaitCommon(Mutex* mutex, synchronization_internal::KernelTimeout t);
- void Remove(base_internal::PerThreadSynch* s);
+ bool WaitCommon(Mutex* absl_nonnull mutex,
+ synchronization_internal::KernelTimeout t);
+ void Remove(base_internal::PerThreadSynch* absl_nonnull s);
std::atomic<intptr_t> cv_; // Condition variable state.
CondVar(const CondVar&) = delete;
CondVar& operator=(const CondVar&) = delete;
@@ -997,14 +1015,15 @@ class CondVar {
// MutexLockMaybe is like MutexLock, but is a no-op when mu is null.
class ABSL_SCOPED_LOCKABLE MutexLockMaybe {
public:
- explicit MutexLockMaybe(Mutex* mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
+ explicit MutexLockMaybe(Mutex* absl_nullable mu)
+ ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
: mu_(mu) {
if (this->mu_ != nullptr) {
this->mu_->Lock();
}
}
- explicit MutexLockMaybe(Mutex* mu, const Condition& cond)
+ explicit MutexLockMaybe(Mutex* absl_nullable mu, const Condition& cond)
ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
: mu_(mu) {
if (this->mu_ != nullptr) {
@@ -1019,7 +1038,7 @@ class ABSL_SCOPED_LOCKABLE MutexLockMaybe {
}
private:
- Mutex* const mu_;
+ Mutex* absl_nullable const mu_;
MutexLockMaybe(const MutexLockMaybe&) = delete;
MutexLockMaybe(MutexLockMaybe&&) = delete;
MutexLockMaybe& operator=(const MutexLockMaybe&) = delete;
@@ -1032,12 +1051,13 @@ class ABSL_SCOPED_LOCKABLE MutexLockMaybe {
// mutex before destruction. `Release()` may be called at most once.
class ABSL_SCOPED_LOCKABLE ReleasableMutexLock {
public:
- explicit ReleasableMutexLock(Mutex* mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
+ explicit ReleasableMutexLock(Mutex* absl_nonnull mu)
+ ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
: mu_(mu) {
this->mu_->Lock();
}
- explicit ReleasableMutexLock(Mutex* mu, const Condition& cond)
+ explicit ReleasableMutexLock(Mutex* absl_nonnull mu, const Condition& cond)
ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
: mu_(mu) {
this->mu_->LockWhen(cond);
@@ -1052,7 +1072,7 @@ class ABSL_SCOPED_LOCKABLE ReleasableMutexLock {
void Release() ABSL_UNLOCK_FUNCTION();
private:
- Mutex* mu_;
+ Mutex* absl_nonnull mu_;
ReleasableMutexLock(const ReleasableMutexLock&) = delete;
ReleasableMutexLock(ReleasableMutexLock&&) = delete;
ReleasableMutexLock& operator=(const ReleasableMutexLock&) = delete;
@@ -1084,7 +1104,7 @@ inline CondVar::CondVar() : cv_(0) {}
// static
template <typename T, typename ConditionMethodPtr>
-bool Condition::CastAndCallMethod(const Condition* c) {
+bool Condition::CastAndCallMethod(const Condition* absl_nonnull c) {
T* object = static_cast<T*>(c->arg_);
ConditionMethodPtr condition_method_pointer;
c->ReadCallback(&condition_method_pointer);
@@ -1093,7 +1113,7 @@ bool Condition::CastAndCallMethod(const Condition* c) {
// static
template <typename T>
-bool Condition::CastAndCallFunction(const Condition* c) {
+bool Condition::CastAndCallFunction(const Condition* absl_nonnull c) {
bool (*function)(T*);
c->ReadCallback(&function);
T* argument = static_cast<T*>(c->arg_);
@@ -1101,7 +1121,9 @@ bool Condition::CastAndCallFunction(const Condition* c) {
}
template <typename T>
-inline Condition::Condition(bool (*func)(T*), T* arg)
+inline Condition::Condition(
+ bool (*absl_nonnull func)(T* absl_nullability_unknown),
+ T* absl_nullability_unknown arg)
: eval_(&CastAndCallFunction<T>),
arg_(const_cast<void*>(static_cast<const void*>(arg))) {
static_assert(sizeof(&func) <= sizeof(callback_),
@@ -1111,13 +1133,16 @@ inline Condition::Condition(bool (*func)(T*), T* arg)
template <typename T, typename>
inline Condition::Condition(
- bool (*func)(T*), typename absl::internal::type_identity<T>::type* arg)
+ bool (*absl_nonnull func)(T* absl_nullability_unknown),
+ typename absl::internal::type_identity<T>::type* absl_nullability_unknown
+ arg)
// Just delegate to the overload above.
: Condition(func, arg) {}
template <typename T>
inline Condition::Condition(
- T* object, bool (absl::internal::type_identity<T>::type::*method)())
+ T* absl_nonnull object,
+ bool (absl::internal::type_identity<T>::type::* absl_nonnull method)())
: eval_(&CastAndCallMethod<T, decltype(method)>), arg_(object) {
static_assert(sizeof(&method) <= sizeof(callback_),
"An overlarge method pointer was passed to Condition.");
@@ -1126,8 +1151,9 @@ inline Condition::Condition(
template <typename T>
inline Condition::Condition(
- const T* object,
- bool (absl::internal::type_identity<T>::type::*method)() const)
+ const T* absl_nonnull object,
+ bool (absl::internal::type_identity<T>::type::* absl_nonnull method)()
+ const)
: eval_(&CastAndCallMethod<const T, decltype(method)>),
arg_(reinterpret_cast<void*>(const_cast<T*>(object))) {
StoreCallback(method);
@@ -1145,7 +1171,7 @@ inline Condition::Condition(
// binary; if this function is called a second time with a different function
// pointer, the value is ignored (and will cause an assertion failure in debug
// mode.)
-void RegisterMutexProfiler(void (*fn)(int64_t wait_cycles));
+void RegisterMutexProfiler(void (*absl_nonnull fn)(int64_t wait_cycles));
// Register a hook for Mutex tracing.
//
@@ -1159,8 +1185,9 @@ void RegisterMutexProfiler(void (*fn)(int64_t wait_cycles));
//
// This has the same ordering and single-use limitations as
// RegisterMutexProfiler() above.
-void RegisterMutexTracer(void (*fn)(const char* msg, const void* obj,
- int64_t wait_cycles));
+void RegisterMutexTracer(void (*absl_nonnull fn)(const char* absl_nonnull msg,
+ const void* absl_nonnull obj,
+ int64_t wait_cycles));
// Register a hook for CondVar tracing.
//
@@ -1174,7 +1201,8 @@ void RegisterMutexTracer(void (*fn)(const char* msg, const void* obj,
//
// This has the same ordering and single-use limitations as
// RegisterMutexProfiler() above.
-void RegisterCondVarTracer(void (*fn)(const char* msg, const void* cv));
+void RegisterCondVarTracer(void (*absl_nonnull fn)(
+ const char* absl_nonnull msg, const void* absl_nonnull cv));
// EnableMutexInvariantDebugging()
//
diff --git a/contrib/restricted/abseil-cpp/absl/synchronization/notification.h b/contrib/restricted/abseil-cpp/absl/synchronization/notification.h
index 78cdf296084..1ceffdb67b0 100644
--- a/contrib/restricted/abseil-cpp/absl/synchronization/notification.h
+++ b/contrib/restricted/abseil-cpp/absl/synchronization/notification.h
@@ -75,7 +75,7 @@ class Notification {
// Notification::HasBeenNotified()
//
// Returns the value of the notification's internal "notified" state.
- ABSL_MUST_USE_RESULT bool HasBeenNotified() const {
+ [[nodiscard]] bool HasBeenNotified() const {
if (HasBeenNotifiedInternal(&this->notified_yet_)) {
base_internal::TraceObserved(this, TraceObjectKind());
return true;
diff --git a/contrib/restricted/abseil-cpp/absl/time/civil_time.cc b/contrib/restricted/abseil-cpp/absl/time/civil_time.cc
index 65df39d7318..1773366be0c 100644
--- a/contrib/restricted/abseil-cpp/absl/time/civil_time.cc
+++ b/contrib/restricted/abseil-cpp/absl/time/civil_time.cc
@@ -14,6 +14,7 @@
#include "absl/time/civil_time.h"
+#include <cerrno>
#include <cstdlib>
#include <ostream>
#include <string>
diff --git a/contrib/restricted/abseil-cpp/absl/time/duration.cc b/contrib/restricted/abseil-cpp/absl/time/duration.cc
index 19407080a7f..38c4b63990c 100644
--- a/contrib/restricted/abseil-cpp/absl/time/duration.cc
+++ b/contrib/restricted/abseil-cpp/absl/time/duration.cc
@@ -202,7 +202,8 @@ inline bool SafeAddRepHi(double a_hi, double b_hi, Duration* d) {
*d = -InfiniteDuration();
return false;
}
- *d = time_internal::MakeDuration(c, time_internal::GetRepLo(*d));
+ *d = time_internal::MakeDuration(static_cast<int64_t>(c),
+ time_internal::GetRepLo(*d));
return true;
}
@@ -239,8 +240,8 @@ inline Duration ScaleFixed(Duration d, int64_t r) {
template <template <typename> class Operation>
inline Duration ScaleDouble(Duration d, double r) {
Operation<double> op;
- double hi_doub = op(time_internal::GetRepHi(d), r);
- double lo_doub = op(time_internal::GetRepLo(d), r);
+ double hi_doub = op(static_cast<double>(time_internal::GetRepHi(d)), r);
+ double lo_doub = op(static_cast<double>(time_internal::GetRepLo(d)), r);
double hi_int = 0;
double hi_frac = std::modf(hi_doub, &hi_int);
@@ -253,12 +254,15 @@ inline Duration ScaleDouble(Duration d, double r) {
double lo_frac = std::modf(lo_doub, &lo_int);
// Rolls lo into hi if necessary.
- int64_t lo64 = std::round(lo_frac * kTicksPerSecond);
+ int64_t lo64 = static_cast<int64_t>(std::round(lo_frac * kTicksPerSecond));
Duration ans;
if (!SafeAddRepHi(hi_int, lo_int, &ans)) return ans;
int64_t hi64 = time_internal::GetRepHi(ans);
- if (!SafeAddRepHi(hi64, lo64 / kTicksPerSecond, &ans)) return ans;
+ if (!SafeAddRepHi(static_cast<double>(hi64),
+ static_cast<double>(lo64 / kTicksPerSecond), &ans)) {
+ return ans;
+ }
hi64 = time_internal::GetRepHi(ans);
lo64 %= kTicksPerSecond;
NormalizeTicks(&hi64, &lo64);
@@ -699,8 +703,9 @@ void AppendNumberUnit(std::string* out, double n, DisplayUnit unit) {
char buf[kBufferSize]; // also large enough to hold integer part
char* ep = buf + sizeof(buf);
double d = 0;
- int64_t frac_part = std::round(std::modf(n, &d) * unit.pow10);
- int64_t int_part = d;
+ int64_t frac_part =
+ static_cast<int64_t>(std::round(std::modf(n, &d) * unit.pow10));
+ int64_t int_part = static_cast<int64_t>(d);
if (int_part != 0 || frac_part != 0) {
char* bp = Format64(ep, 0, int_part); // always < 1000
out->append(bp, static_cast<size_t>(ep - bp));
diff --git a/contrib/restricted/abseil-cpp/absl/time/internal/cctz/include/cctz/time_zone.h b/contrib/restricted/abseil-cpp/absl/time/internal/cctz/include/cctz/time_zone.h
index b2b0cf6f511..5b232f51b83 100644
--- a/contrib/restricted/abseil-cpp/absl/time/internal/cctz/include/cctz/time_zone.h
+++ b/contrib/restricted/abseil-cpp/absl/time/internal/cctz/include/cctz/time_zone.h
@@ -200,7 +200,7 @@ class time_zone {
// version() and description() provide additional information about the
// time zone. The content of each of the returned strings is unspecified,
// however, when the IANA Time Zone Database is the underlying data source
- // the version() string will be in the familar form (e.g, "2018e") or
+ // the version() string will be in the familiar form (e.g, "2018e") or
// empty when unavailable.
//
// Note: These functions are for informational or testing purposes only.
diff --git a/contrib/restricted/abseil-cpp/absl/time/internal/cctz/src/time_zone_lookup.cc b/contrib/restricted/abseil-cpp/absl/time/internal/cctz/src/time_zone_lookup.cc
index a6f3430c4a2..0b0d0ccce08 100644
--- a/contrib/restricted/abseil-cpp/absl/time/internal/cctz/src/time_zone_lookup.cc
+++ b/contrib/restricted/abseil-cpp/absl/time/internal/cctz/src/time_zone_lookup.cc
@@ -32,25 +32,8 @@
#error #include <zircon/types.h>
#endif
-#if defined(_WIN32)
-#include <sdkddkver.h>
-// Include only when the SDK is for Windows 10 (and later), and the binary is
-// targeted for Windows XP and later.
-// Note: The Windows SDK added windows.globalization.h file for Windows 10, but
-// MinGW did not add it until NTDDI_WIN10_NI (SDK version 10.0.22621.0).
-#if ((defined(_WIN32_WINNT_WIN10) && !defined(__MINGW32__)) || \
- (defined(NTDDI_WIN10_NI) && NTDDI_VERSION >= NTDDI_WIN10_NI)) && \
- (_WIN32_WINNT >= _WIN32_WINNT_WINXP)
-#define USE_WIN32_LOCAL_TIME_ZONE
-#include <roapi.h>
-#include <tchar.h>
-#include <wchar.h>
-#include <windows.globalization.h>
-#include <windows.h>
-#include <winstring.h>
-#endif
-#endif
-
+#include <array>
+#include <cstdint>
#include <cstdlib>
#include <cstring>
#include <string>
@@ -65,80 +48,78 @@ namespace cctz {
namespace {
#if defined(USE_WIN32_LOCAL_TIME_ZONE)
-// Calls the WinRT Calendar.GetTimeZone method to obtain the IANA ID of the
-// local time zone. Returns an empty vector in case of an error.
-std::string win32_local_time_zone(const HMODULE combase) {
- std::string result;
- const auto ro_activate_instance =
- reinterpret_cast<decltype(&RoActivateInstance)>(
- GetProcAddress(combase, "RoActivateInstance"));
- if (!ro_activate_instance) {
- return result;
- }
- const auto windows_create_string_reference =
- reinterpret_cast<decltype(&WindowsCreateStringReference)>(
- GetProcAddress(combase, "WindowsCreateStringReference"));
- if (!windows_create_string_reference) {
- return result;
- }
- const auto windows_delete_string =
- reinterpret_cast<decltype(&WindowsDeleteString)>(
- GetProcAddress(combase, "WindowsDeleteString"));
- if (!windows_delete_string) {
- return result;
- }
- const auto windows_get_string_raw_buffer =
- reinterpret_cast<decltype(&WindowsGetStringRawBuffer)>(
- GetProcAddress(combase, "WindowsGetStringRawBuffer"));
- if (!windows_get_string_raw_buffer) {
- return result;
+// True if we have already failed to load the API.
+static std::atomic_bool g_ucal_getTimeZoneIDForWindowsIDUnavailable;
+static std::atomic<decltype(ucal_getTimeZoneIDForWindowsID)*>
+ g_ucal_getTimeZoneIDForWindowsIDRef;
+
+std::string win32_local_time_zone() {
+ // If we have already failed to load the API, then just give up.
+ if (g_ucal_getTimeZoneIDForWindowsIDUnavailable.load()) {
+ return "";
}
- // The string returned by WindowsCreateStringReference doesn't need to be
- // deleted.
- HSTRING calendar_class_id;
- HSTRING_HEADER calendar_class_id_header;
- HRESULT hr = windows_create_string_reference(
- RuntimeClass_Windows_Globalization_Calendar,
- sizeof(RuntimeClass_Windows_Globalization_Calendar) / sizeof(wchar_t) - 1,
- &calendar_class_id_header, &calendar_class_id);
- if (FAILED(hr)) {
- return result;
- }
+ auto ucal_getTimeZoneIDForWindowsIDFunc =
+ g_ucal_getTimeZoneIDForWindowsIDRef.load();
+ if (ucal_getTimeZoneIDForWindowsIDFunc == nullptr) {
+ // If we have already failed to load the API, then just give up.
+ if (g_ucal_getTimeZoneIDForWindowsIDUnavailable.load()) {
+ return "";
+ }
+
+ const HMODULE icudll =
+ ::LoadLibraryExW(L"icu.dll", nullptr, LOAD_LIBRARY_SEARCH_SYSTEM32);
- IInspectable* calendar;
- hr = ro_activate_instance(calendar_class_id, &calendar);
- if (FAILED(hr)) {
- return result;
+ if (icudll == nullptr) {
+ g_ucal_getTimeZoneIDForWindowsIDUnavailable.store(true);
+ return "";
+ }
+
+ ucal_getTimeZoneIDForWindowsIDFunc =
+ reinterpret_cast<decltype(ucal_getTimeZoneIDForWindowsID)*>(
+ ::GetProcAddress(icudll, "ucal_getTimeZoneIDForWindowsID"));
+
+ if (ucal_getTimeZoneIDForWindowsIDFunc == nullptr) {
+ g_ucal_getTimeZoneIDForWindowsIDUnavailable.store(true);
+ return "";
+ }
+ // store-race is not a problem here, because ::GetProcAddress() returns the
+ // same address for the same function in the same DLL.
+ g_ucal_getTimeZoneIDForWindowsIDRef.store(
+ ucal_getTimeZoneIDForWindowsIDFunc);
+
+ // We intentionally do not call ::FreeLibrary() here to avoid frequent DLL
+ // loadings and unloading. As "icu.dll" is a system library, keeping it on
+ // memory is supposed to have no major drawback.
}
- ABI::Windows::Globalization::ITimeZoneOnCalendar* time_zone;
- hr = calendar->QueryInterface(IID_PPV_ARGS(&time_zone));
- if (FAILED(hr)) {
- calendar->Release();
- return result;
+ DYNAMIC_TIME_ZONE_INFORMATION info = {};
+ if (::GetDynamicTimeZoneInformation(&info) == TIME_ZONE_ID_INVALID) {
+ return "";
}
- HSTRING tz_hstr;
- hr = time_zone->GetTimeZone(&tz_hstr);
- if (SUCCEEDED(hr)) {
- UINT32 wlen;
- const PCWSTR tz_wstr = windows_get_string_raw_buffer(tz_hstr, &wlen);
- if (tz_wstr) {
- const int size =
- WideCharToMultiByte(CP_UTF8, 0, tz_wstr, static_cast<int>(wlen),
- nullptr, 0, nullptr, nullptr);
- result.resize(static_cast<size_t>(size));
- WideCharToMultiByte(CP_UTF8, 0, tz_wstr, static_cast<int>(wlen),
- &result[0], size, nullptr, nullptr);
- }
- windows_delete_string(tz_hstr);
+ std::array<UChar, 128> buffer;
+ UErrorCode status = U_ZERO_ERROR;
+ const auto num_chars_in_buffer = ucal_getTimeZoneIDForWindowsIDFunc(
+ reinterpret_cast<const UChar*>(info.TimeZoneKeyName), -1, nullptr,
+ buffer.data(), static_cast<int32_t>(buffer.size()), &status);
+ if (status != U_ZERO_ERROR || num_chars_in_buffer <= 0 ||
+ num_chars_in_buffer > static_cast<int32_t>(buffer.size())) {
+ return "";
}
- time_zone->Release();
- calendar->Release();
- return result;
+
+ const int num_bytes_in_utf8 = ::WideCharToMultiByte(
+ CP_UTF8, 0, reinterpret_cast<const wchar_t*>(buffer.data()),
+ static_cast<int>(num_chars_in_buffer), nullptr, 0, nullptr, nullptr);
+ std::string local_time_str;
+ local_time_str.resize(static_cast<size_t>(num_bytes_in_utf8));
+ ::WideCharToMultiByte(
+ CP_UTF8, 0, reinterpret_cast<const wchar_t*>(buffer.data()),
+ static_cast<int>(num_chars_in_buffer), &local_time_str[0],
+ num_bytes_in_utf8, nullptr, nullptr);
+ return local_time_str;
}
-#endif
+#endif // USE_WIN32_LOCAL_TIME_ZONE
} // namespace
std::string time_zone::name() const { return effective_impl().Name(); }
@@ -256,36 +237,9 @@ time_zone local_time_zone() {
}
#endif
#if defined(USE_WIN32_LOCAL_TIME_ZONE)
- // Use the WinRT Calendar class to get the local time zone. This feature is
- // available on Windows 10 and later. The library is dynamically linked to
- // maintain binary compatibility with Windows XP - Windows 7. On Windows 8,
- // The combase.dll API functions are available but the RoActivateInstance
- // call will fail for the Calendar class.
- std::string winrt_tz;
- const HMODULE combase =
- LoadLibraryEx(_T("combase.dll"), nullptr, LOAD_LIBRARY_SEARCH_SYSTEM32);
- if (combase) {
- const auto ro_initialize = reinterpret_cast<decltype(&::RoInitialize)>(
- GetProcAddress(combase, "RoInitialize"));
- const auto ro_uninitialize = reinterpret_cast<decltype(&::RoUninitialize)>(
- GetProcAddress(combase, "RoUninitialize"));
- if (ro_initialize && ro_uninitialize) {
- const HRESULT hr = ro_initialize(RO_INIT_MULTITHREADED);
- // RPC_E_CHANGED_MODE means that a previous RoInitialize call specified
- // a different concurrency model. The WinRT runtime is initialized and
- // should work for our purpose here, but we should *not* call
- // RoUninitialize because it's a failure.
- if (SUCCEEDED(hr) || hr == RPC_E_CHANGED_MODE) {
- winrt_tz = win32_local_time_zone(combase);
- if (SUCCEEDED(hr)) {
- ro_uninitialize();
- }
- }
- }
- FreeLibrary(combase);
- }
- if (!winrt_tz.empty()) {
- zone = winrt_tz.c_str();
+ std::string win32_tz = win32_local_time_zone();
+ if (!win32_tz.empty()) {
+ zone = win32_tz.c_str();
}
#endif
diff --git a/contrib/restricted/abseil-cpp/absl/time/time.h b/contrib/restricted/abseil-cpp/absl/time/time.h
index d73a204c12c..db17a4cd40b 100644
--- a/contrib/restricted/abseil-cpp/absl/time/time.h
+++ b/contrib/restricted/abseil-cpp/absl/time/time.h
@@ -620,12 +620,12 @@ ABSL_ATTRIBUTE_CONST_FUNCTION Duration Hours(T n) {
//
// absl::Duration d = absl::Milliseconds(1500);
// int64_t isec = absl::ToInt64Seconds(d); // isec == 1
-ABSL_ATTRIBUTE_CONST_FUNCTION inline int64_t ToInt64Nanoseconds(Duration d);
-ABSL_ATTRIBUTE_CONST_FUNCTION inline int64_t ToInt64Microseconds(Duration d);
-ABSL_ATTRIBUTE_CONST_FUNCTION inline int64_t ToInt64Milliseconds(Duration d);
-ABSL_ATTRIBUTE_CONST_FUNCTION inline int64_t ToInt64Seconds(Duration d);
-ABSL_ATTRIBUTE_CONST_FUNCTION inline int64_t ToInt64Minutes(Duration d);
-ABSL_ATTRIBUTE_CONST_FUNCTION inline int64_t ToInt64Hours(Duration d);
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr int64_t ToInt64Nanoseconds(Duration d);
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr int64_t ToInt64Microseconds(Duration d);
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr int64_t ToInt64Milliseconds(Duration d);
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr int64_t ToInt64Seconds(Duration d);
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr int64_t ToInt64Minutes(Duration d);
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr int64_t ToInt64Hours(Duration d);
// ToDoubleNanoseconds()
// ToDoubleMicroseconds()
@@ -1864,7 +1864,7 @@ ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Time FromTimeT(time_t t) {
return time_internal::FromUnixDuration(Seconds(t));
}
-ABSL_ATTRIBUTE_CONST_FUNCTION inline int64_t ToInt64Nanoseconds(Duration d) {
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr int64_t ToInt64Nanoseconds(Duration d) {
if (time_internal::GetRepHi(d) >= 0 &&
time_internal::GetRepHi(d) >> 33 == 0) {
return (time_internal::GetRepHi(d) * 1000 * 1000 * 1000) +
@@ -1873,7 +1873,8 @@ ABSL_ATTRIBUTE_CONST_FUNCTION inline int64_t ToInt64Nanoseconds(Duration d) {
return d / Nanoseconds(1);
}
-ABSL_ATTRIBUTE_CONST_FUNCTION inline int64_t ToInt64Microseconds(Duration d) {
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr int64_t ToInt64Microseconds(
+ Duration d) {
if (time_internal::GetRepHi(d) >= 0 &&
time_internal::GetRepHi(d) >> 43 == 0) {
return (time_internal::GetRepHi(d) * 1000 * 1000) +
@@ -1883,7 +1884,8 @@ ABSL_ATTRIBUTE_CONST_FUNCTION inline int64_t ToInt64Microseconds(Duration d) {
return d / Microseconds(1);
}
-ABSL_ATTRIBUTE_CONST_FUNCTION inline int64_t ToInt64Milliseconds(Duration d) {
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr int64_t ToInt64Milliseconds(
+ Duration d) {
if (time_internal::GetRepHi(d) >= 0 &&
time_internal::GetRepHi(d) >> 53 == 0) {
return (time_internal::GetRepHi(d) * 1000) +
@@ -1893,21 +1895,21 @@ ABSL_ATTRIBUTE_CONST_FUNCTION inline int64_t ToInt64Milliseconds(Duration d) {
return d / Milliseconds(1);
}
-ABSL_ATTRIBUTE_CONST_FUNCTION inline int64_t ToInt64Seconds(Duration d) {
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr int64_t ToInt64Seconds(Duration d) {
int64_t hi = time_internal::GetRepHi(d);
if (time_internal::IsInfiniteDuration(d)) return hi;
if (hi < 0 && time_internal::GetRepLo(d) != 0) ++hi;
return hi;
}
-ABSL_ATTRIBUTE_CONST_FUNCTION inline int64_t ToInt64Minutes(Duration d) {
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr int64_t ToInt64Minutes(Duration d) {
int64_t hi = time_internal::GetRepHi(d);
if (time_internal::IsInfiniteDuration(d)) return hi;
if (hi < 0 && time_internal::GetRepLo(d) != 0) ++hi;
return hi / 60;
}
-ABSL_ATTRIBUTE_CONST_FUNCTION inline int64_t ToInt64Hours(Duration d) {
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr int64_t ToInt64Hours(Duration d) {
int64_t hi = time_internal::GetRepHi(d);
if (time_internal::IsInfiniteDuration(d)) return hi;
if (hi < 0 && time_internal::GetRepLo(d) != 0) ++hi;
diff --git a/contrib/restricted/abseil-cpp/absl/types/any.h b/contrib/restricted/abseil-cpp/absl/types/any.h
index 61f071f19bc..c4886316b0c 100644
--- a/contrib/restricted/abseil-cpp/absl/types/any.h
+++ b/contrib/restricted/abseil-cpp/absl/types/any.h
@@ -17,49 +17,20 @@
// any.h
// -----------------------------------------------------------------------------
//
-// This header file define the `absl::any` type for holding a type-safe value
-// of any type. The 'absl::any` type is useful for providing a way to hold
-// something that is, as yet, unspecified. Such unspecified types
-// traditionally are passed between API boundaries until they are later cast to
-// their "destination" types. To cast to such a destination type, use
-// `absl::any_cast()`. Note that when casting an `absl::any`, you must cast it
-// to an explicit type; implicit conversions will throw.
-//
-// Example:
-//
-// auto a = absl::any(65);
-// absl::any_cast<int>(a); // 65
-// absl::any_cast<char>(a); // throws absl::bad_any_cast
-// absl::any_cast<std::string>(a); // throws absl::bad_any_cast
-//
-// `absl::any` is a C++11 compatible version of the C++17 `std::any` abstraction
-// and is designed to be a drop-in replacement for code compliant with C++17.
-//
-// Traditionally, the behavior of casting to a temporary unspecified type has
-// been accomplished with the `void *` paradigm, where the pointer was to some
-// other unspecified type. `absl::any` provides an "owning" version of `void *`
-// that avoids issues of pointer management.
-//
-// Note: just as in the case of `void *`, use of `absl::any` (and its C++17
-// version `std::any`) is a code smell indicating that your API might not be
-// constructed correctly. We have seen that most uses of `any` are unwarranted,
-// and `absl::any`, like `std::any`, is difficult to use properly. Before using
-// this abstraction, make sure that you should not instead be rewriting your
-// code to be more specific.
-//
-// Abseil has also released an `absl::variant` type (a C++11 compatible version
-// of the C++17 `std::variant`), which is generally preferred for use over
-// `absl::any`.
+// Historical note: Abseil once provided an implementation of `absl::any` as a
+// polyfill for `std::any` prior to C++17. Now that C++17 is required,
+// `absl::any` is an alias for `std::any`.
+
#ifndef ABSL_TYPES_ANY_H_
#define ABSL_TYPES_ANY_H_
-#include "absl/base/attributes.h"
-#include "absl/base/config.h"
-#include "absl/utility/utility.h"
+#include <any> // IWYU pragma: export
-#ifdef ABSL_USES_STD_ANY
+#include "absl/base/config.h"
-#include <any> // IWYU pragma: export
+// Include-what-you-use cleanup required for these headers.
+#include "absl/base/attributes.h"
+#include "absl/utility/utility.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
@@ -70,450 +41,4 @@ using std::make_any;
ABSL_NAMESPACE_END
} // namespace absl
-#else // ABSL_USES_STD_ANY
-
-#include <algorithm>
-#include <cstddef>
-#include <initializer_list>
-#include <memory>
-#include <stdexcept>
-#include <type_traits>
-#include <typeinfo>
-#include <utility>
-
-#include "absl/base/internal/fast_type_id.h"
-#include "absl/meta/type_traits.h"
-#include "absl/types/bad_any_cast.h"
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-
-class any;
-
-// swap()
-//
-// Swaps two `absl::any` values. Equivalent to `x.swap(y) where `x` and `y` are
-// `absl::any` types.
-void swap(any& x, any& y) noexcept;
-
-// make_any()
-//
-// Constructs an `absl::any` of type `T` with the given arguments.
-template <typename T, typename... Args>
-any make_any(Args&&... args);
-
-// Overload of `absl::make_any()` for constructing an `absl::any` type from an
-// initializer list.
-template <typename T, typename U, typename... Args>
-any make_any(std::initializer_list<U> il, Args&&... args);
-
-// any_cast()
-//
-// Statically casts the value of a `const absl::any` type to the given type.
-// This function will throw `absl::bad_any_cast` if the stored value type of the
-// `absl::any` does not match the cast.
-//
-// `any_cast()` can also be used to get a reference to the internal storage iff
-// a reference type is passed as its `ValueType`:
-//
-// Example:
-//
-// absl::any my_any = std::vector<int>();
-// absl::any_cast<std::vector<int>&>(my_any).push_back(42);
-template <typename ValueType>
-ValueType any_cast(const any& operand);
-
-// Overload of `any_cast()` to statically cast the value of a non-const
-// `absl::any` type to the given type. This function will throw
-// `absl::bad_any_cast` if the stored value type of the `absl::any` does not
-// match the cast.
-template <typename ValueType>
-ValueType any_cast(any& operand); // NOLINT(runtime/references)
-
-// Overload of `any_cast()` to statically cast the rvalue of an `absl::any`
-// type. This function will throw `absl::bad_any_cast` if the stored value type
-// of the `absl::any` does not match the cast.
-template <typename ValueType>
-ValueType any_cast(any&& operand);
-
-// Overload of `any_cast()` to statically cast the value of a const pointer
-// `absl::any` type to the given pointer type, or `nullptr` if the stored value
-// type of the `absl::any` does not match the cast.
-template <typename ValueType>
-const ValueType* any_cast(const any* operand) noexcept;
-
-// Overload of `any_cast()` to statically cast the value of a pointer
-// `absl::any` type to the given pointer type, or `nullptr` if the stored value
-// type of the `absl::any` does not match the cast.
-template <typename ValueType>
-ValueType* any_cast(any* operand) noexcept;
-
-// -----------------------------------------------------------------------------
-// absl::any
-// -----------------------------------------------------------------------------
-//
-// An `absl::any` object provides the facility to either store an instance of a
-// type, known as the "contained object", or no value. An `absl::any` is used to
-// store values of types that are unknown at compile time. The `absl::any`
-// object, when containing a value, must contain a value type; storing a
-// reference type is neither desired nor supported.
-//
-// An `absl::any` can only store a type that is copy-constructible; move-only
-// types are not allowed within an `any` object.
-//
-// Example:
-//
-// auto a = absl::any(65); // Literal, copyable
-// auto b = absl::any(std::vector<int>()); // Default-initialized, copyable
-// std::unique_ptr<Foo> my_foo;
-// auto c = absl::any(std::move(my_foo)); // Error, not copy-constructible
-//
-// Note that `absl::any` makes use of decayed types (`absl::decay_t` in this
-// context) to remove const-volatile qualifiers (known as "cv qualifiers"),
-// decay functions to function pointers, etc. We essentially "decay" a given
-// type into its essential type.
-//
-// `absl::any` makes use of decayed types when determining the basic type `T` of
-// the value to store in the any's contained object. In the documentation below,
-// we explicitly denote this by using the phrase "a decayed type of `T`".
-//
-// Example:
-//
-// const int a = 4;
-// absl::any foo(a); // Decay ensures we store an "int", not a "const int&".
-//
-// void my_function() {}
-// absl::any bar(my_function); // Decay ensures we store a function pointer.
-//
-// `absl::any` is a C++11 compatible version of the C++17 `std::any` abstraction
-// and is designed to be a drop-in replacement for code compliant with C++17.
-class any {
- private:
- template <typename T>
- struct IsInPlaceType;
-
- public:
- // Constructors
-
- // Constructs an empty `absl::any` object (`any::has_value()` will return
- // `false`).
- constexpr any() noexcept;
-
- // Copy constructs an `absl::any` object with a "contained object" of the
- // passed type of `other` (or an empty `absl::any` if `other.has_value()` is
- // `false`.
- any(const any& other)
- : obj_(other.has_value() ? other.obj_->Clone()
- : std::unique_ptr<ObjInterface>()) {}
-
- // Move constructs an `absl::any` object with a "contained object" of the
- // passed type of `other` (or an empty `absl::any` if `other.has_value()` is
- // `false`).
- any(any&& other) noexcept = default;
-
- // Constructs an `absl::any` object with a "contained object" of the decayed
- // type of `T`, which is initialized via `std::forward<T>(value)`.
- //
- // This constructor will not participate in overload resolution if the
- // decayed type of `T` is not copy-constructible.
- template <
- typename T, typename VT = absl::decay_t<T>,
- absl::enable_if_t<!absl::disjunction<
- std::is_same<any, VT>, IsInPlaceType<VT>,
- absl::negation<std::is_copy_constructible<VT> > >::value>* = nullptr>
- any(T&& value) : obj_(new Obj<VT>(in_place, std::forward<T>(value))) {}
-
- // Constructs an `absl::any` object with a "contained object" of the decayed
- // type of `T`, which is initialized via `std::forward<T>(value)`.
- template <typename T, typename... Args, typename VT = absl::decay_t<T>,
- absl::enable_if_t<absl::conjunction<
- std::is_copy_constructible<VT>,
- std::is_constructible<VT, Args...>>::value>* = nullptr>
- explicit any(in_place_type_t<T> /*tag*/, Args&&... args)
- : obj_(new Obj<VT>(in_place, std::forward<Args>(args)...)) {}
-
- // Constructs an `absl::any` object with a "contained object" of the passed
- // type `VT` as a decayed type of `T`. `VT` is initialized as if
- // direct-non-list-initializing an object of type `VT` with the arguments
- // `initializer_list, std::forward<Args>(args)...`.
- template <
- typename T, typename U, typename... Args, typename VT = absl::decay_t<T>,
- absl::enable_if_t<
- absl::conjunction<std::is_copy_constructible<VT>,
- std::is_constructible<VT, std::initializer_list<U>&,
- Args...>>::value>* = nullptr>
- explicit any(in_place_type_t<T> /*tag*/, std::initializer_list<U> ilist,
- Args&&... args)
- : obj_(new Obj<VT>(in_place, ilist, std::forward<Args>(args)...)) {}
-
- // Assignment operators
-
- // Copy assigns an `absl::any` object with a "contained object" of the
- // passed type.
- any& operator=(const any& rhs) {
- any(rhs).swap(*this);
- return *this;
- }
-
- // Move assigns an `absl::any` object with a "contained object" of the
- // passed type. `rhs` is left in a valid but otherwise unspecified state.
- any& operator=(any&& rhs) noexcept {
- any(std::move(rhs)).swap(*this);
- return *this;
- }
-
- // Assigns an `absl::any` object with a "contained object" of the passed type.
- template <typename T, typename VT = absl::decay_t<T>,
- absl::enable_if_t<absl::conjunction<
- absl::negation<std::is_same<VT, any>>,
- std::is_copy_constructible<VT>>::value>* = nullptr>
- any& operator=(T&& rhs) {
- any tmp(in_place_type_t<VT>(), std::forward<T>(rhs));
- tmp.swap(*this);
- return *this;
- }
-
- // Modifiers
-
- // any::emplace()
- //
- // Emplaces a value within an `absl::any` object by calling `any::reset()`,
- // initializing the contained value as if direct-non-list-initializing an
- // object of type `VT` with the arguments `std::forward<Args>(args)...`, and
- // returning a reference to the new contained value.
- //
- // Note: If an exception is thrown during the call to `VT`'s constructor,
- // `*this` does not contain a value, and any previously contained value has
- // been destroyed.
- template <
- typename T, typename... Args, typename VT = absl::decay_t<T>,
- absl::enable_if_t<std::is_copy_constructible<VT>::value &&
- std::is_constructible<VT, Args...>::value>* = nullptr>
- VT& emplace(Args&&... args) ABSL_ATTRIBUTE_LIFETIME_BOUND {
- reset(); // NOTE: reset() is required here even in the world of exceptions.
- Obj<VT>* const object_ptr =
- new Obj<VT>(in_place, std::forward<Args>(args)...);
- obj_ = std::unique_ptr<ObjInterface>(object_ptr);
- return object_ptr->value;
- }
-
- // Overload of `any::emplace()` to emplace a value within an `absl::any`
- // object by calling `any::reset()`, initializing the contained value as if
- // direct-non-list-initializing an object of type `VT` with the arguments
- // `initializer_list, std::forward<Args>(args)...`, and returning a reference
- // to the new contained value.
- //
- // Note: If an exception is thrown during the call to `VT`'s constructor,
- // `*this` does not contain a value, and any previously contained value has
- // been destroyed. The function shall not participate in overload resolution
- // unless `is_copy_constructible_v<VT>` is `true` and
- // `is_constructible_v<VT, initializer_list<U>&, Args...>` is `true`.
- template <
- typename T, typename U, typename... Args, typename VT = absl::decay_t<T>,
- absl::enable_if_t<std::is_copy_constructible<VT>::value &&
- std::is_constructible<VT, std::initializer_list<U>&,
- Args...>::value>* = nullptr>
- VT& emplace(std::initializer_list<U> ilist,
- Args&&... args) ABSL_ATTRIBUTE_LIFETIME_BOUND {
- reset(); // NOTE: reset() is required here even in the world of exceptions.
- Obj<VT>* const object_ptr =
- new Obj<VT>(in_place, ilist, std::forward<Args>(args)...);
- obj_ = std::unique_ptr<ObjInterface>(object_ptr);
- return object_ptr->value;
- }
-
- // any::reset()
- //
- // Resets the state of the `absl::any` object, destroying the contained object
- // if present.
- void reset() noexcept { obj_ = nullptr; }
-
- // any::swap()
- //
- // Swaps the passed value and the value of this `absl::any` object.
- void swap(any& other) noexcept { obj_.swap(other.obj_); }
-
- // Observers
-
- // any::has_value()
- //
- // Returns `true` if the `any` object has a contained value, otherwise
- // returns `false`.
- bool has_value() const noexcept { return obj_ != nullptr; }
-
-#ifdef ABSL_INTERNAL_HAS_RTTI
- // Returns: typeid(T) if *this has a contained object of type T, otherwise
- // typeid(void).
- const std::type_info& type() const noexcept {
- if (has_value()) {
- return obj_->Type();
- }
-
- return typeid(void);
- }
-#endif // ABSL_INTERNAL_HAS_RTTI
-
- private:
- // Tagged type-erased abstraction for holding a cloneable object.
- class ObjInterface {
- public:
- virtual ~ObjInterface() = default;
- virtual std::unique_ptr<ObjInterface> Clone() const = 0;
- virtual const void* ObjTypeId() const noexcept = 0;
-#ifdef ABSL_INTERNAL_HAS_RTTI
- virtual const std::type_info& Type() const noexcept = 0;
-#endif // ABSL_INTERNAL_HAS_RTTI
- };
-
- // Hold a value of some queryable type, with an ability to Clone it.
- template <typename T>
- class Obj : public ObjInterface {
- public:
- template <typename... Args>
- explicit Obj(in_place_t /*tag*/, Args&&... args)
- : value(std::forward<Args>(args)...) {}
-
- std::unique_ptr<ObjInterface> Clone() const final {
- return std::unique_ptr<ObjInterface>(new Obj(in_place, value));
- }
-
- const void* ObjTypeId() const noexcept final { return IdForType<T>(); }
-
-#ifdef ABSL_INTERNAL_HAS_RTTI
- const std::type_info& Type() const noexcept final { return typeid(T); }
-#endif // ABSL_INTERNAL_HAS_RTTI
-
- T value;
- };
-
- std::unique_ptr<ObjInterface> CloneObj() const {
- if (!obj_) return nullptr;
- return obj_->Clone();
- }
-
- template <typename T>
- constexpr static const void* IdForType() {
- // Note: This type dance is to make the behavior consistent with typeid.
- using NormalizedType =
- typename std::remove_cv<typename std::remove_reference<T>::type>::type;
-
- return base_internal::FastTypeId<NormalizedType>();
- }
-
- const void* GetObjTypeId() const {
- return obj_ ? obj_->ObjTypeId() : base_internal::FastTypeId<void>();
- }
-
- // `absl::any` nonmember functions //
-
- // Description at the declaration site (top of file).
- template <typename ValueType>
- friend ValueType any_cast(const any& operand);
-
- // Description at the declaration site (top of file).
- template <typename ValueType>
- friend ValueType any_cast(any& operand); // NOLINT(runtime/references)
-
- // Description at the declaration site (top of file).
- template <typename T>
- friend const T* any_cast(const any* operand) noexcept;
-
- // Description at the declaration site (top of file).
- template <typename T>
- friend T* any_cast(any* operand) noexcept;
-
- std::unique_ptr<ObjInterface> obj_;
-};
-
-// -----------------------------------------------------------------------------
-// Implementation Details
-// -----------------------------------------------------------------------------
-
-constexpr any::any() noexcept = default;
-
-template <typename T>
-struct any::IsInPlaceType : std::false_type {};
-
-template <typename T>
-struct any::IsInPlaceType<in_place_type_t<T>> : std::true_type {};
-
-inline void swap(any& x, any& y) noexcept { x.swap(y); }
-
-// Description at the declaration site (top of file).
-template <typename T, typename... Args>
-any make_any(Args&&... args) {
- return any(in_place_type_t<T>(), std::forward<Args>(args)...);
-}
-
-// Description at the declaration site (top of file).
-template <typename T, typename U, typename... Args>
-any make_any(std::initializer_list<U> il, Args&&... args) {
- return any(in_place_type_t<T>(), il, std::forward<Args>(args)...);
-}
-
-// Description at the declaration site (top of file).
-template <typename ValueType>
-ValueType any_cast(const any& operand) {
- using U = typename std::remove_cv<
- typename std::remove_reference<ValueType>::type>::type;
- static_assert(std::is_constructible<ValueType, const U&>::value,
- "Invalid ValueType");
- auto* const result = (any_cast<U>)(&operand);
- if (result == nullptr) {
- any_internal::ThrowBadAnyCast();
- }
- return static_cast<ValueType>(*result);
-}
-
-// Description at the declaration site (top of file).
-template <typename ValueType>
-ValueType any_cast(any& operand) { // NOLINT(runtime/references)
- using U = typename std::remove_cv<
- typename std::remove_reference<ValueType>::type>::type;
- static_assert(std::is_constructible<ValueType, U&>::value,
- "Invalid ValueType");
- auto* result = (any_cast<U>)(&operand);
- if (result == nullptr) {
- any_internal::ThrowBadAnyCast();
- }
- return static_cast<ValueType>(*result);
-}
-
-// Description at the declaration site (top of file).
-template <typename ValueType>
-ValueType any_cast(any&& operand) {
- using U = typename std::remove_cv<
- typename std::remove_reference<ValueType>::type>::type;
- static_assert(std::is_constructible<ValueType, U>::value,
- "Invalid ValueType");
- return static_cast<ValueType>(std::move((any_cast<U&>)(operand)));
-}
-
-// Description at the declaration site (top of file).
-template <typename T>
-const T* any_cast(const any* operand) noexcept {
- using U =
- typename std::remove_cv<typename std::remove_reference<T>::type>::type;
- return operand && operand->GetObjTypeId() == any::IdForType<U>()
- ? std::addressof(
- static_cast<const any::Obj<U>*>(operand->obj_.get())->value)
- : nullptr;
-}
-
-// Description at the declaration site (top of file).
-template <typename T>
-T* any_cast(any* operand) noexcept {
- using U =
- typename std::remove_cv<typename std::remove_reference<T>::type>::type;
- return operand && operand->GetObjTypeId() == any::IdForType<U>()
- ? std::addressof(
- static_cast<any::Obj<U>*>(operand->obj_.get())->value)
- : nullptr;
-}
-
-ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_USES_STD_ANY
-
#endif // ABSL_TYPES_ANY_H_
diff --git a/contrib/restricted/abseil-cpp/absl/types/bad_any_cast.cc b/contrib/restricted/abseil-cpp/absl/types/bad_any_cast.cc
deleted file mode 100644
index 22558b48c28..00000000000
--- a/contrib/restricted/abseil-cpp/absl/types/bad_any_cast.cc
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "absl/types/bad_any_cast.h"
-
-#ifndef ABSL_USES_STD_ANY
-
-#include <cstdlib>
-
-#include "absl/base/config.h"
-#include "absl/base/internal/raw_logging.h"
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-
-bad_any_cast::~bad_any_cast() = default;
-
-const char* bad_any_cast::what() const noexcept { return "Bad any cast"; }
-
-namespace any_internal {
-
-void ThrowBadAnyCast() {
-#ifdef ABSL_HAVE_EXCEPTIONS
- throw bad_any_cast();
-#else
- ABSL_RAW_LOG(FATAL, "Bad any cast");
- std::abort();
-#endif
-}
-
-} // namespace any_internal
-ABSL_NAMESPACE_END
-} // namespace absl
-
-#else
-
-// https://github.com/abseil/abseil-cpp/issues/1465
-// CMake builds on Apple platforms error when libraries are empty.
-// Our CMake configuration can avoid this error on header-only libraries,
-// but since this library is conditionally empty, including a single
-// variable is an easy workaround.
-#ifdef __APPLE__
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-namespace types_internal {
-extern const char kAvoidEmptyBadAnyCastLibraryWarning;
-const char kAvoidEmptyBadAnyCastLibraryWarning = 0;
-} // namespace types_internal
-ABSL_NAMESPACE_END
-} // namespace absl
-#endif // __APPLE__
-
-#endif // ABSL_USES_STD_ANY
diff --git a/contrib/restricted/abseil-cpp/absl/types/bad_any_cast.h b/contrib/restricted/abseil-cpp/absl/types/bad_any_cast.h
deleted file mode 100644
index 114cef80cdd..00000000000
--- a/contrib/restricted/abseil-cpp/absl/types/bad_any_cast.h
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright 2018 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// -----------------------------------------------------------------------------
-// bad_any_cast.h
-// -----------------------------------------------------------------------------
-//
-// This header file defines the `absl::bad_any_cast` type.
-
-#ifndef ABSL_TYPES_BAD_ANY_CAST_H_
-#define ABSL_TYPES_BAD_ANY_CAST_H_
-
-#include <typeinfo>
-
-#include "absl/base/config.h"
-
-#ifdef ABSL_USES_STD_ANY
-
-#include <any>
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-using std::bad_any_cast;
-ABSL_NAMESPACE_END
-} // namespace absl
-
-#else // ABSL_USES_STD_ANY
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-
-// -----------------------------------------------------------------------------
-// bad_any_cast
-// -----------------------------------------------------------------------------
-//
-// An `absl::bad_any_cast` type is an exception type that is thrown when
-// failing to successfully cast the return value of an `absl::any` object.
-//
-// Example:
-//
-// auto a = absl::any(65);
-// absl::any_cast<int>(a); // 65
-// try {
-// absl::any_cast<char>(a);
-// } catch(const absl::bad_any_cast& e) {
-// std::cout << "Bad any cast: " << e.what() << '\n';
-// }
-class bad_any_cast : public std::bad_cast {
- public:
- ~bad_any_cast() override;
- const char* what() const noexcept override;
-};
-
-namespace any_internal {
-
-[[noreturn]] void ThrowBadAnyCast();
-
-} // namespace any_internal
-ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_USES_STD_ANY
-
-#endif // ABSL_TYPES_BAD_ANY_CAST_H_
diff --git a/contrib/restricted/abseil-cpp/absl/types/bad_optional_access.cc b/contrib/restricted/abseil-cpp/absl/types/bad_optional_access.cc
deleted file mode 100644
index 2552cc853a1..00000000000
--- a/contrib/restricted/abseil-cpp/absl/types/bad_optional_access.cc
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "absl/types/bad_optional_access.h"
-
-#ifndef ABSL_USES_STD_OPTIONAL
-
-#include <cstdlib>
-
-#include "absl/base/config.h"
-#include "absl/base/internal/raw_logging.h"
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-
-bad_optional_access::~bad_optional_access() = default;
-
-const char* bad_optional_access::what() const noexcept {
- return "optional has no value";
-}
-
-namespace optional_internal {
-
-void throw_bad_optional_access() {
-#ifdef ABSL_HAVE_EXCEPTIONS
- throw bad_optional_access();
-#else
- ABSL_RAW_LOG(FATAL, "Bad optional access");
- abort();
-#endif
-}
-
-} // namespace optional_internal
-ABSL_NAMESPACE_END
-} // namespace absl
-
-#else
-
-// https://github.com/abseil/abseil-cpp/issues/1465
-// CMake builds on Apple platforms error when libraries are empty.
-// Our CMake configuration can avoid this error on header-only libraries,
-// but since this library is conditionally empty, including a single
-// variable is an easy workaround.
-#ifdef __APPLE__
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-namespace types_internal {
-extern const char kAvoidEmptyBadOptionalAccessLibraryWarning;
-const char kAvoidEmptyBadOptionalAccessLibraryWarning = 0;
-} // namespace types_internal
-ABSL_NAMESPACE_END
-} // namespace absl
-#endif // __APPLE__
-
-#endif // ABSL_USES_STD_OPTIONAL
diff --git a/contrib/restricted/abseil-cpp/absl/types/bad_optional_access.h b/contrib/restricted/abseil-cpp/absl/types/bad_optional_access.h
deleted file mode 100644
index 049e72ad9a8..00000000000
--- a/contrib/restricted/abseil-cpp/absl/types/bad_optional_access.h
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2018 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// -----------------------------------------------------------------------------
-// bad_optional_access.h
-// -----------------------------------------------------------------------------
-//
-// This header file defines the `absl::bad_optional_access` type.
-
-#ifndef ABSL_TYPES_BAD_OPTIONAL_ACCESS_H_
-#define ABSL_TYPES_BAD_OPTIONAL_ACCESS_H_
-
-#include <stdexcept>
-
-#include "absl/base/config.h"
-
-#ifdef ABSL_USES_STD_OPTIONAL
-
-#include <optional>
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-using std::bad_optional_access;
-ABSL_NAMESPACE_END
-} // namespace absl
-
-#else // ABSL_USES_STD_OPTIONAL
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-
-// -----------------------------------------------------------------------------
-// bad_optional_access
-// -----------------------------------------------------------------------------
-//
-// An `absl::bad_optional_access` type is an exception type that is thrown when
-// attempting to access an `absl::optional` object that does not contain a
-// value.
-//
-// Example:
-//
-// absl::optional<int> o;
-//
-// try {
-// int n = o.value();
-// } catch(const absl::bad_optional_access& e) {
-// std::cout << "Bad optional access: " << e.what() << '\n';
-// }
-class bad_optional_access : public std::exception {
- public:
- bad_optional_access() = default;
- ~bad_optional_access() override;
- const char* what() const noexcept override;
-};
-
-namespace optional_internal {
-
-// throw delegator
-[[noreturn]] ABSL_DLL void throw_bad_optional_access();
-
-} // namespace optional_internal
-ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_USES_STD_OPTIONAL
-
-#endif // ABSL_TYPES_BAD_OPTIONAL_ACCESS_H_
diff --git a/contrib/restricted/abseil-cpp/absl/types/bad_variant_access.cc b/contrib/restricted/abseil-cpp/absl/types/bad_variant_access.cc
deleted file mode 100644
index a76aa80dfa1..00000000000
--- a/contrib/restricted/abseil-cpp/absl/types/bad_variant_access.cc
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "absl/types/bad_variant_access.h"
-
-#ifndef ABSL_USES_STD_VARIANT
-
-#include <cstdlib>
-#include <stdexcept>
-
-#include "absl/base/config.h"
-#include "absl/base/internal/raw_logging.h"
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-
-//////////////////////////
-// [variant.bad.access] //
-//////////////////////////
-
-bad_variant_access::~bad_variant_access() = default;
-
-const char* bad_variant_access::what() const noexcept {
- return "Bad variant access";
-}
-
-namespace variant_internal {
-
-void ThrowBadVariantAccess() {
-#ifdef ABSL_HAVE_EXCEPTIONS
- throw bad_variant_access();
-#else
- ABSL_RAW_LOG(FATAL, "Bad variant access");
- abort(); // TODO(calabrese) Remove once RAW_LOG FATAL is noreturn.
-#endif
-}
-
-void Rethrow() {
-#ifdef ABSL_HAVE_EXCEPTIONS
- throw;
-#else
- ABSL_RAW_LOG(FATAL,
- "Internal error in absl::variant implementation. Attempted to "
- "rethrow an exception when building with exceptions disabled.");
- abort(); // TODO(calabrese) Remove once RAW_LOG FATAL is noreturn.
-#endif
-}
-
-} // namespace variant_internal
-ABSL_NAMESPACE_END
-} // namespace absl
-
-#else
-
-// https://github.com/abseil/abseil-cpp/issues/1465
-// CMake builds on Apple platforms error when libraries are empty.
-// Our CMake configuration can avoid this error on header-only libraries,
-// but since this library is conditionally empty, including a single
-// variable is an easy workaround.
-#ifdef __APPLE__
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-namespace types_internal {
-extern const char kAvoidEmptyBadVariantAccessLibraryWarning;
-const char kAvoidEmptyBadVariantAccessLibraryWarning = 0;
-} // namespace types_internal
-ABSL_NAMESPACE_END
-} // namespace absl
-#endif // __APPLE__
-
-#endif // ABSL_USES_STD_VARIANT
diff --git a/contrib/restricted/abseil-cpp/absl/types/bad_variant_access.h b/contrib/restricted/abseil-cpp/absl/types/bad_variant_access.h
deleted file mode 100644
index 8ab215e97da..00000000000
--- a/contrib/restricted/abseil-cpp/absl/types/bad_variant_access.h
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright 2018 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// -----------------------------------------------------------------------------
-// bad_variant_access.h
-// -----------------------------------------------------------------------------
-//
-// This header file defines the `absl::bad_variant_access` type.
-
-#ifndef ABSL_TYPES_BAD_VARIANT_ACCESS_H_
-#define ABSL_TYPES_BAD_VARIANT_ACCESS_H_
-
-#include <stdexcept>
-
-#include "absl/base/config.h"
-
-#ifdef ABSL_USES_STD_VARIANT
-
-#include <variant>
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-using std::bad_variant_access;
-ABSL_NAMESPACE_END
-} // namespace absl
-
-#else // ABSL_USES_STD_VARIANT
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-
-// -----------------------------------------------------------------------------
-// bad_variant_access
-// -----------------------------------------------------------------------------
-//
-// An `absl::bad_variant_access` type is an exception type that is thrown in
-// the following cases:
-//
-// * Calling `absl::get(absl::variant) with an index or type that does not
-// match the currently selected alternative type
-// * Calling `absl::visit on an `absl::variant` that is in the
-// `variant::valueless_by_exception` state.
-//
-// Example:
-//
-// absl::variant<int, std::string> v;
-// v = 1;
-// try {
-// absl::get<std::string>(v);
-// } catch(const absl::bad_variant_access& e) {
-// std::cout << "Bad variant access: " << e.what() << '\n';
-// }
-class bad_variant_access : public std::exception {
- public:
- bad_variant_access() noexcept = default;
- ~bad_variant_access() override;
- const char* what() const noexcept override;
-};
-
-namespace variant_internal {
-
-[[noreturn]] ABSL_DLL void ThrowBadVariantAccess();
-[[noreturn]] ABSL_DLL void Rethrow();
-
-} // namespace variant_internal
-ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_USES_STD_VARIANT
-
-#endif // ABSL_TYPES_BAD_VARIANT_ACCESS_H_
diff --git a/contrib/restricted/abseil-cpp/absl/types/internal/optional.h b/contrib/restricted/abseil-cpp/absl/types/internal/optional.h
deleted file mode 100644
index 5731a5bcc90..00000000000
--- a/contrib/restricted/abseil-cpp/absl/types/internal/optional.h
+++ /dev/null
@@ -1,352 +0,0 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-#ifndef ABSL_TYPES_INTERNAL_OPTIONAL_H_
-#define ABSL_TYPES_INTERNAL_OPTIONAL_H_
-
-#include <functional>
-#include <new>
-#include <type_traits>
-#include <utility>
-
-#include "absl/base/internal/inline_variable.h"
-#include "absl/memory/memory.h"
-#include "absl/meta/type_traits.h"
-#include "absl/utility/utility.h"
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-
-// Forward declaration
-template <typename T>
-class optional;
-
-namespace optional_internal {
-
-// This tag type is used as a constructor parameter type for `nullopt_t`.
-struct init_t {
- explicit init_t() = default;
-};
-
-struct empty_struct {};
-
-// This class stores the data in optional<T>.
-// It is specialized based on whether T is trivially destructible.
-// This is the specialization for non trivially destructible type.
-template <typename T, bool unused = std::is_trivially_destructible<T>::value>
-class optional_data_dtor_base {
- struct dummy_type {
- static_assert(sizeof(T) % sizeof(empty_struct) == 0, "");
- // Use an array to avoid GCC 6 placement-new warning.
- empty_struct data[sizeof(T) / sizeof(empty_struct)];
- };
-
- protected:
- // Whether there is data or not.
- bool engaged_;
- // Data storage
- union {
- T data_;
- dummy_type dummy_;
- };
-
- void destruct() noexcept {
- if (engaged_) {
- // `data_` must be initialized if `engaged_` is true.
-#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
-#endif
- data_.~T();
-#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0)
-#pragma GCC diagnostic pop
-#endif
- engaged_ = false;
- }
- }
-
- // dummy_ must be initialized for constexpr constructor.
- constexpr optional_data_dtor_base() noexcept : engaged_(false), dummy_{{}} {}
-
- template <typename... Args>
- constexpr explicit optional_data_dtor_base(in_place_t, Args&&... args)
- : engaged_(true), data_(std::forward<Args>(args)...) {}
-
- ~optional_data_dtor_base() { destruct(); }
-};
-
-// Specialization for trivially destructible type.
-template <typename T>
-class optional_data_dtor_base<T, true> {
- struct dummy_type {
- static_assert(sizeof(T) % sizeof(empty_struct) == 0, "");
- // Use array to avoid GCC 6 placement-new warning.
- empty_struct data[sizeof(T) / sizeof(empty_struct)];
- };
-
- protected:
- // Whether there is data or not.
- bool engaged_;
- // Data storage
- union {
- T data_;
- dummy_type dummy_;
- };
- void destruct() noexcept { engaged_ = false; }
-
- // dummy_ must be initialized for constexpr constructor.
- constexpr optional_data_dtor_base() noexcept : engaged_(false), dummy_{{}} {}
-
- template <typename... Args>
- constexpr explicit optional_data_dtor_base(in_place_t, Args&&... args)
- : engaged_(true), data_(std::forward<Args>(args)...) {}
-};
-
-template <typename T>
-class optional_data_base : public optional_data_dtor_base<T> {
- protected:
- using base = optional_data_dtor_base<T>;
- using base::base;
-
- template <typename... Args>
- void construct(Args&&... args) {
- // Use dummy_'s address to work around casting cv-qualified T* to void*.
- ::new (static_cast<void*>(&this->dummy_)) T(std::forward<Args>(args)...);
- this->engaged_ = true;
- }
-
- template <typename U>
- void assign(U&& u) {
- if (this->engaged_) {
- this->data_ = std::forward<U>(u);
- } else {
- construct(std::forward<U>(u));
- }
- }
-};
-
-// TODO(absl-team): Add another class using
-// std::is_trivially_move_constructible trait when available to match
-// http://cplusplus.github.io/LWG/lwg-defects.html#2900, for types that
-// have trivial move but nontrivial copy.
-// Also, we should be checking is_trivially_copyable here, which is not
-// supported now, so we use is_trivially_* traits instead.
-template <typename T,
- bool unused = absl::is_trivially_copy_constructible<T>::value&&
- absl::is_trivially_copy_assignable<typename std::remove_cv<
- T>::type>::value&& std::is_trivially_destructible<T>::value>
-class optional_data;
-
-// Trivially copyable types
-template <typename T>
-class optional_data<T, true> : public optional_data_base<T> {
- protected:
- using optional_data_base<T>::optional_data_base;
-};
-
-template <typename T>
-class optional_data<T, false> : public optional_data_base<T> {
- protected:
- using optional_data_base<T>::optional_data_base;
-
- optional_data() = default;
-
- optional_data(const optional_data& rhs) : optional_data_base<T>() {
- if (rhs.engaged_) {
- this->construct(rhs.data_);
- }
- }
-
- optional_data(optional_data&& rhs) noexcept(
- absl::default_allocator_is_nothrow::value ||
- std::is_nothrow_move_constructible<T>::value)
- : optional_data_base<T>() {
- if (rhs.engaged_) {
- this->construct(std::move(rhs.data_));
- }
- }
-
- optional_data& operator=(const optional_data& rhs) {
- if (rhs.engaged_) {
- this->assign(rhs.data_);
- } else {
- this->destruct();
- }
- return *this;
- }
-
- optional_data& operator=(optional_data&& rhs) noexcept(
- std::is_nothrow_move_assignable<T>::value&&
- std::is_nothrow_move_constructible<T>::value) {
- if (rhs.engaged_) {
- this->assign(std::move(rhs.data_));
- } else {
- this->destruct();
- }
- return *this;
- }
-};
-
-// Ordered by level of restriction, from low to high.
-// Copyable implies movable.
-enum class copy_traits { copyable = 0, movable = 1, non_movable = 2 };
-
-// Base class for enabling/disabling copy/move constructor.
-template <copy_traits>
-class optional_ctor_base;
-
-template <>
-class optional_ctor_base<copy_traits::copyable> {
- public:
- constexpr optional_ctor_base() = default;
- optional_ctor_base(const optional_ctor_base&) = default;
- optional_ctor_base(optional_ctor_base&&) = default;
- optional_ctor_base& operator=(const optional_ctor_base&) = default;
- optional_ctor_base& operator=(optional_ctor_base&&) = default;
-};
-
-template <>
-class optional_ctor_base<copy_traits::movable> {
- public:
- constexpr optional_ctor_base() = default;
- optional_ctor_base(const optional_ctor_base&) = delete;
- optional_ctor_base(optional_ctor_base&&) = default;
- optional_ctor_base& operator=(const optional_ctor_base&) = default;
- optional_ctor_base& operator=(optional_ctor_base&&) = default;
-};
-
-template <>
-class optional_ctor_base<copy_traits::non_movable> {
- public:
- constexpr optional_ctor_base() = default;
- optional_ctor_base(const optional_ctor_base&) = delete;
- optional_ctor_base(optional_ctor_base&&) = delete;
- optional_ctor_base& operator=(const optional_ctor_base&) = default;
- optional_ctor_base& operator=(optional_ctor_base&&) = default;
-};
-
-// Base class for enabling/disabling copy/move assignment.
-template <copy_traits>
-class optional_assign_base;
-
-template <>
-class optional_assign_base<copy_traits::copyable> {
- public:
- constexpr optional_assign_base() = default;
- optional_assign_base(const optional_assign_base&) = default;
- optional_assign_base(optional_assign_base&&) = default;
- optional_assign_base& operator=(const optional_assign_base&) = default;
- optional_assign_base& operator=(optional_assign_base&&) = default;
-};
-
-template <>
-class optional_assign_base<copy_traits::movable> {
- public:
- constexpr optional_assign_base() = default;
- optional_assign_base(const optional_assign_base&) = default;
- optional_assign_base(optional_assign_base&&) = default;
- optional_assign_base& operator=(const optional_assign_base&) = delete;
- optional_assign_base& operator=(optional_assign_base&&) = default;
-};
-
-template <>
-class optional_assign_base<copy_traits::non_movable> {
- public:
- constexpr optional_assign_base() = default;
- optional_assign_base(const optional_assign_base&) = default;
- optional_assign_base(optional_assign_base&&) = default;
- optional_assign_base& operator=(const optional_assign_base&) = delete;
- optional_assign_base& operator=(optional_assign_base&&) = delete;
-};
-
-template <typename T>
-struct ctor_copy_traits {
- static constexpr copy_traits traits =
- std::is_copy_constructible<T>::value
- ? copy_traits::copyable
- : std::is_move_constructible<T>::value ? copy_traits::movable
- : copy_traits::non_movable;
-};
-
-template <typename T>
-struct assign_copy_traits {
- static constexpr copy_traits traits =
- absl::is_copy_assignable<T>::value && std::is_copy_constructible<T>::value
- ? copy_traits::copyable
- : absl::is_move_assignable<T>::value &&
- std::is_move_constructible<T>::value
- ? copy_traits::movable
- : copy_traits::non_movable;
-};
-
-// Whether T is constructible or convertible from optional<U>.
-template <typename T, typename U>
-struct is_constructible_convertible_from_optional
- : std::integral_constant<
- bool, std::is_constructible<T, optional<U>&>::value ||
- std::is_constructible<T, optional<U>&&>::value ||
- std::is_constructible<T, const optional<U>&>::value ||
- std::is_constructible<T, const optional<U>&&>::value ||
- std::is_convertible<optional<U>&, T>::value ||
- std::is_convertible<optional<U>&&, T>::value ||
- std::is_convertible<const optional<U>&, T>::value ||
- std::is_convertible<const optional<U>&&, T>::value> {};
-
-// Whether T is constructible or convertible or assignable from optional<U>.
-template <typename T, typename U>
-struct is_constructible_convertible_assignable_from_optional
- : std::integral_constant<
- bool, is_constructible_convertible_from_optional<T, U>::value ||
- std::is_assignable<T&, optional<U>&>::value ||
- std::is_assignable<T&, optional<U>&&>::value ||
- std::is_assignable<T&, const optional<U>&>::value ||
- std::is_assignable<T&, const optional<U>&&>::value> {};
-
-// Helper function used by [optional.relops], [optional.comp_with_t],
-// for checking whether an expression is convertible to bool.
-bool convertible_to_bool(bool);
-
-// Base class for std::hash<absl::optional<T>>:
-// If std::hash<std::remove_const_t<T>> is enabled, it provides operator() to
-// compute the hash; Otherwise, it is disabled.
-// Reference N4659 23.14.15 [unord.hash].
-template <typename T, typename = size_t>
-struct optional_hash_base {
- optional_hash_base() = delete;
- optional_hash_base(const optional_hash_base&) = delete;
- optional_hash_base(optional_hash_base&&) = delete;
- optional_hash_base& operator=(const optional_hash_base&) = delete;
- optional_hash_base& operator=(optional_hash_base&&) = delete;
-};
-
-template <typename T>
-struct optional_hash_base<T, decltype(std::hash<absl::remove_const_t<T> >()(
- std::declval<absl::remove_const_t<T> >()))> {
- using argument_type = absl::optional<T>;
- using result_type = size_t;
- size_t operator()(const absl::optional<T>& opt) const {
- absl::type_traits_internal::AssertHashEnabled<absl::remove_const_t<T>>();
- if (opt) {
- return std::hash<absl::remove_const_t<T> >()(*opt);
- } else {
- return static_cast<size_t>(0x297814aaad196e6dULL);
- }
- }
-};
-
-} // namespace optional_internal
-ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_TYPES_INTERNAL_OPTIONAL_H_
diff --git a/contrib/restricted/abseil-cpp/absl/types/internal/variant.h b/contrib/restricted/abseil-cpp/absl/types/internal/variant.h
deleted file mode 100644
index 4cb15f2921d..00000000000
--- a/contrib/restricted/abseil-cpp/absl/types/internal/variant.h
+++ /dev/null
@@ -1,1622 +0,0 @@
-// Copyright 2018 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// Implementation details of absl/types/variant.h, pulled into a
-// separate file to avoid cluttering the top of the API header with
-// implementation details.
-
-#ifndef ABSL_TYPES_INTERNAL_VARIANT_H_
-#define ABSL_TYPES_INTERNAL_VARIANT_H_
-
-#include <cassert>
-#include <cstddef>
-#include <cstdlib>
-#include <memory>
-#include <stdexcept>
-#include <tuple>
-#include <type_traits>
-#include <utility>
-
-#include "absl/base/config.h"
-#include "absl/base/internal/identity.h"
-#include "absl/base/internal/inline_variable.h"
-#include "absl/base/internal/invoke.h"
-#include "absl/base/macros.h"
-#include "absl/base/optimization.h"
-#include "absl/meta/type_traits.h"
-#include "absl/types/bad_variant_access.h"
-#include "absl/utility/utility.h"
-
-#if !defined(ABSL_USES_STD_VARIANT)
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-
-template <class... Types>
-class variant;
-
-ABSL_INTERNAL_INLINE_CONSTEXPR(size_t, variant_npos, static_cast<size_t>(-1));
-
-template <class T>
-struct variant_size;
-
-template <std::size_t I, class T>
-struct variant_alternative;
-
-namespace variant_internal {
-
-// NOTE: See specializations below for details.
-template <std::size_t I, class T>
-struct VariantAlternativeSfinae {};
-
-// Requires: I < variant_size_v<T>.
-//
-// Value: The Ith type of Types...
-template <std::size_t I, class T0, class... Tn>
-struct VariantAlternativeSfinae<I, variant<T0, Tn...>>
- : VariantAlternativeSfinae<I - 1, variant<Tn...>> {};
-
-// Value: T0
-template <class T0, class... Ts>
-struct VariantAlternativeSfinae<0, variant<T0, Ts...>> {
- using type = T0;
-};
-
-template <std::size_t I, class T>
-using VariantAlternativeSfinaeT = typename VariantAlternativeSfinae<I, T>::type;
-
-// NOTE: Requires T to be a reference type.
-template <class T, class U>
-struct GiveQualsTo;
-
-template <class T, class U>
-struct GiveQualsTo<T&, U> {
- using type = U&;
-};
-
-template <class T, class U>
-struct GiveQualsTo<T&&, U> {
- using type = U&&;
-};
-
-template <class T, class U>
-struct GiveQualsTo<const T&, U> {
- using type = const U&;
-};
-
-template <class T, class U>
-struct GiveQualsTo<const T&&, U> {
- using type = const U&&;
-};
-
-template <class T, class U>
-struct GiveQualsTo<volatile T&, U> {
- using type = volatile U&;
-};
-
-template <class T, class U>
-struct GiveQualsTo<volatile T&&, U> {
- using type = volatile U&&;
-};
-
-template <class T, class U>
-struct GiveQualsTo<volatile const T&, U> {
- using type = volatile const U&;
-};
-
-template <class T, class U>
-struct GiveQualsTo<volatile const T&&, U> {
- using type = volatile const U&&;
-};
-
-template <class T, class U>
-using GiveQualsToT = typename GiveQualsTo<T, U>::type;
-
-// Convenience alias, since size_t integral_constant is used a lot in this file.
-template <std::size_t I>
-using SizeT = std::integral_constant<std::size_t, I>;
-
-using NPos = SizeT<variant_npos>;
-
-template <class Variant, class T, class = void>
-struct IndexOfConstructedType {};
-
-template <std::size_t I, class Variant>
-struct VariantAccessResultImpl;
-
-template <std::size_t I, template <class...> class Variantemplate, class... T>
-struct VariantAccessResultImpl<I, Variantemplate<T...>&> {
- using type = typename absl::variant_alternative<I, variant<T...>>::type&;
-};
-
-template <std::size_t I, template <class...> class Variantemplate, class... T>
-struct VariantAccessResultImpl<I, const Variantemplate<T...>&> {
- using type =
- const typename absl::variant_alternative<I, variant<T...>>::type&;
-};
-
-template <std::size_t I, template <class...> class Variantemplate, class... T>
-struct VariantAccessResultImpl<I, Variantemplate<T...>&&> {
- using type = typename absl::variant_alternative<I, variant<T...>>::type&&;
-};
-
-template <std::size_t I, template <class...> class Variantemplate, class... T>
-struct VariantAccessResultImpl<I, const Variantemplate<T...>&&> {
- using type =
- const typename absl::variant_alternative<I, variant<T...>>::type&&;
-};
-
-template <std::size_t I, class Variant>
-using VariantAccessResult =
- typename VariantAccessResultImpl<I, Variant&&>::type;
-
-// NOTE: This is used instead of std::array to reduce instantiation overhead.
-template <class T, std::size_t Size>
-struct SimpleArray {
- static_assert(Size != 0, "");
- T value[Size];
-};
-
-template <class T>
-struct AccessedType {
- using type = T;
-};
-
-template <class T>
-using AccessedTypeT = typename AccessedType<T>::type;
-
-template <class T, std::size_t Size>
-struct AccessedType<SimpleArray<T, Size>> {
- using type = AccessedTypeT<T>;
-};
-
-template <class T>
-constexpr T AccessSimpleArray(const T& value) {
- return value;
-}
-
-template <class T, std::size_t Size, class... SizeT>
-constexpr AccessedTypeT<T> AccessSimpleArray(const SimpleArray<T, Size>& table,
- std::size_t head_index,
- SizeT... tail_indices) {
- return AccessSimpleArray(table.value[head_index], tail_indices...);
-}
-
-// Note: Intentionally is an alias.
-template <class T>
-using AlwaysZero = SizeT<0>;
-
-template <class Op, class... Vs>
-struct VisitIndicesResultImpl {
- using type = absl::result_of_t<Op(AlwaysZero<Vs>...)>;
-};
-
-template <class Op, class... Vs>
-using VisitIndicesResultT = typename VisitIndicesResultImpl<Op, Vs...>::type;
-
-template <class ReturnType, class FunctionObject, class EndIndices,
- class BoundIndices>
-struct MakeVisitationMatrix;
-
-template <class ReturnType, class FunctionObject, std::size_t... Indices>
-constexpr ReturnType call_with_indices(FunctionObject&& function) {
- static_assert(
- std::is_same<ReturnType, decltype(std::declval<FunctionObject>()(
- SizeT<Indices>()...))>::value,
- "Not all visitation overloads have the same return type.");
- return std::forward<FunctionObject>(function)(SizeT<Indices>()...);
-}
-
-template <class ReturnType, class FunctionObject, std::size_t... BoundIndices>
-struct MakeVisitationMatrix<ReturnType, FunctionObject, index_sequence<>,
- index_sequence<BoundIndices...>> {
- using ResultType = ReturnType (*)(FunctionObject&&);
- static constexpr ResultType Run() {
- return &call_with_indices<ReturnType, FunctionObject,
- (BoundIndices - 1)...>;
- }
-};
-
-template <typename Is, std::size_t J>
-struct AppendToIndexSequence;
-
-template <typename Is, std::size_t J>
-using AppendToIndexSequenceT = typename AppendToIndexSequence<Is, J>::type;
-
-template <std::size_t... Is, std::size_t J>
-struct AppendToIndexSequence<index_sequence<Is...>, J> {
- using type = index_sequence<Is..., J>;
-};
-
-template <class ReturnType, class FunctionObject, class EndIndices,
- class CurrIndices, class BoundIndices>
-struct MakeVisitationMatrixImpl;
-
-template <class ReturnType, class FunctionObject, class EndIndices,
- std::size_t... CurrIndices, class BoundIndices>
-struct MakeVisitationMatrixImpl<ReturnType, FunctionObject, EndIndices,
- index_sequence<CurrIndices...>, BoundIndices> {
- using ResultType = SimpleArray<
- typename MakeVisitationMatrix<ReturnType, FunctionObject, EndIndices,
- index_sequence<>>::ResultType,
- sizeof...(CurrIndices)>;
-
- static constexpr ResultType Run() {
- return {{MakeVisitationMatrix<
- ReturnType, FunctionObject, EndIndices,
- AppendToIndexSequenceT<BoundIndices, CurrIndices>>::Run()...}};
- }
-};
-
-template <class ReturnType, class FunctionObject, std::size_t HeadEndIndex,
- std::size_t... TailEndIndices, std::size_t... BoundIndices>
-struct MakeVisitationMatrix<ReturnType, FunctionObject,
- index_sequence<HeadEndIndex, TailEndIndices...>,
- index_sequence<BoundIndices...>>
- : MakeVisitationMatrixImpl<ReturnType, FunctionObject,
- index_sequence<TailEndIndices...>,
- absl::make_index_sequence<HeadEndIndex>,
- index_sequence<BoundIndices...>> {};
-
-struct UnreachableSwitchCase {
- template <class Op>
- [[noreturn]] static VisitIndicesResultT<Op, std::size_t> Run(
- Op&& /*ignored*/) {
- ABSL_UNREACHABLE();
- }
-};
-
-template <class Op, std::size_t I>
-struct ReachableSwitchCase {
- static VisitIndicesResultT<Op, std::size_t> Run(Op&& op) {
- return absl::base_internal::invoke(std::forward<Op>(op), SizeT<I>());
- }
-};
-
-// The number 33 is just a guess at a reasonable maximum to our switch. It is
-// not based on any analysis. The reason it is a power of 2 plus 1 instead of a
-// power of 2 is because the number was picked to correspond to a power of 2
-// amount of "normal" alternatives, plus one for the possibility of the user
-// providing "monostate" in addition to the more natural alternatives.
-ABSL_INTERNAL_INLINE_CONSTEXPR(std::size_t, MaxUnrolledVisitCases, 33);
-
-// Note: The default-definition is for unreachable cases.
-template <bool IsReachable>
-struct PickCaseImpl {
- template <class Op, std::size_t I>
- using Apply = UnreachableSwitchCase;
-};
-
-template <>
-struct PickCaseImpl</*IsReachable =*/true> {
- template <class Op, std::size_t I>
- using Apply = ReachableSwitchCase<Op, I>;
-};
-
-// Note: This form of dance with template aliases is to make sure that we
-// instantiate a number of templates proportional to the number of variant
-// alternatives rather than a number of templates proportional to our
-// maximum unrolled amount of visitation cases (aliases are effectively
-// "free" whereas other template instantiations are costly).
-template <class Op, std::size_t I, std::size_t EndIndex>
-using PickCase = typename PickCaseImpl<(I < EndIndex)>::template Apply<Op, I>;
-
-template <class ReturnType>
-[[noreturn]] ReturnType TypedThrowBadVariantAccess() {
- absl::variant_internal::ThrowBadVariantAccess();
-}
-
-// Given N variant sizes, determine the number of cases there would need to be
-// in a single switch-statement that would cover every possibility in the
-// corresponding N-ary visit operation.
-template <std::size_t... NumAlternatives>
-struct NumCasesOfSwitch;
-
-template <std::size_t HeadNumAlternatives, std::size_t... TailNumAlternatives>
-struct NumCasesOfSwitch<HeadNumAlternatives, TailNumAlternatives...> {
- static constexpr std::size_t value =
- (HeadNumAlternatives + 1) *
- NumCasesOfSwitch<TailNumAlternatives...>::value;
-};
-
-template <>
-struct NumCasesOfSwitch<> {
- static constexpr std::size_t value = 1;
-};
-
-// A switch statement optimizes better than the table of function pointers.
-template <std::size_t EndIndex>
-struct VisitIndicesSwitch {
- static_assert(EndIndex <= MaxUnrolledVisitCases,
- "Maximum unrolled switch size exceeded.");
-
- template <class Op>
- static VisitIndicesResultT<Op, std::size_t> Run(Op&& op, std::size_t i) {
- switch (i) {
- case 0:
- return PickCase<Op, 0, EndIndex>::Run(std::forward<Op>(op));
- case 1:
- return PickCase<Op, 1, EndIndex>::Run(std::forward<Op>(op));
- case 2:
- return PickCase<Op, 2, EndIndex>::Run(std::forward<Op>(op));
- case 3:
- return PickCase<Op, 3, EndIndex>::Run(std::forward<Op>(op));
- case 4:
- return PickCase<Op, 4, EndIndex>::Run(std::forward<Op>(op));
- case 5:
- return PickCase<Op, 5, EndIndex>::Run(std::forward<Op>(op));
- case 6:
- return PickCase<Op, 6, EndIndex>::Run(std::forward<Op>(op));
- case 7:
- return PickCase<Op, 7, EndIndex>::Run(std::forward<Op>(op));
- case 8:
- return PickCase<Op, 8, EndIndex>::Run(std::forward<Op>(op));
- case 9:
- return PickCase<Op, 9, EndIndex>::Run(std::forward<Op>(op));
- case 10:
- return PickCase<Op, 10, EndIndex>::Run(std::forward<Op>(op));
- case 11:
- return PickCase<Op, 11, EndIndex>::Run(std::forward<Op>(op));
- case 12:
- return PickCase<Op, 12, EndIndex>::Run(std::forward<Op>(op));
- case 13:
- return PickCase<Op, 13, EndIndex>::Run(std::forward<Op>(op));
- case 14:
- return PickCase<Op, 14, EndIndex>::Run(std::forward<Op>(op));
- case 15:
- return PickCase<Op, 15, EndIndex>::Run(std::forward<Op>(op));
- case 16:
- return PickCase<Op, 16, EndIndex>::Run(std::forward<Op>(op));
- case 17:
- return PickCase<Op, 17, EndIndex>::Run(std::forward<Op>(op));
- case 18:
- return PickCase<Op, 18, EndIndex>::Run(std::forward<Op>(op));
- case 19:
- return PickCase<Op, 19, EndIndex>::Run(std::forward<Op>(op));
- case 20:
- return PickCase<Op, 20, EndIndex>::Run(std::forward<Op>(op));
- case 21:
- return PickCase<Op, 21, EndIndex>::Run(std::forward<Op>(op));
- case 22:
- return PickCase<Op, 22, EndIndex>::Run(std::forward<Op>(op));
- case 23:
- return PickCase<Op, 23, EndIndex>::Run(std::forward<Op>(op));
- case 24:
- return PickCase<Op, 24, EndIndex>::Run(std::forward<Op>(op));
- case 25:
- return PickCase<Op, 25, EndIndex>::Run(std::forward<Op>(op));
- case 26:
- return PickCase<Op, 26, EndIndex>::Run(std::forward<Op>(op));
- case 27:
- return PickCase<Op, 27, EndIndex>::Run(std::forward<Op>(op));
- case 28:
- return PickCase<Op, 28, EndIndex>::Run(std::forward<Op>(op));
- case 29:
- return PickCase<Op, 29, EndIndex>::Run(std::forward<Op>(op));
- case 30:
- return PickCase<Op, 30, EndIndex>::Run(std::forward<Op>(op));
- case 31:
- return PickCase<Op, 31, EndIndex>::Run(std::forward<Op>(op));
- case 32:
- return PickCase<Op, 32, EndIndex>::Run(std::forward<Op>(op));
- default:
- ABSL_ASSERT(i == variant_npos);
- return absl::base_internal::invoke(std::forward<Op>(op), NPos());
- }
- }
-};
-
-template <std::size_t... EndIndices>
-struct VisitIndicesFallback {
- template <class Op, class... SizeT>
- static VisitIndicesResultT<Op, SizeT...> Run(Op&& op, SizeT... indices) {
- return AccessSimpleArray(
- MakeVisitationMatrix<VisitIndicesResultT<Op, SizeT...>, Op,
- index_sequence<(EndIndices + 1)...>,
- index_sequence<>>::Run(),
- (indices + 1)...)(std::forward<Op>(op));
- }
-};
-
-// Take an N-dimensional series of indices and convert them into a single index
-// without loss of information. The purpose of this is to be able to convert an
-// N-ary visit operation into a single switch statement.
-template <std::size_t...>
-struct FlattenIndices;
-
-template <std::size_t HeadSize, std::size_t... TailSize>
-struct FlattenIndices<HeadSize, TailSize...> {
- template <class... SizeType>
- static constexpr std::size_t Run(std::size_t head, SizeType... tail) {
- return head + HeadSize * FlattenIndices<TailSize...>::Run(tail...);
- }
-};
-
-template <>
-struct FlattenIndices<> {
- static constexpr std::size_t Run() { return 0; }
-};
-
-// Take a single "flattened" index (flattened by FlattenIndices) and determine
-// the value of the index of one of the logically represented dimensions.
-template <std::size_t I, std::size_t IndexToGet, std::size_t HeadSize,
- std::size_t... TailSize>
-struct UnflattenIndex {
- static constexpr std::size_t value =
- UnflattenIndex<I / HeadSize, IndexToGet - 1, TailSize...>::value;
-};
-
-template <std::size_t I, std::size_t HeadSize, std::size_t... TailSize>
-struct UnflattenIndex<I, 0, HeadSize, TailSize...> {
- static constexpr std::size_t value = (I % HeadSize);
-};
-
-// The backend for converting an N-ary visit operation into a unary visit.
-template <class IndexSequence, std::size_t... EndIndices>
-struct VisitIndicesVariadicImpl;
-
-template <std::size_t... N, std::size_t... EndIndices>
-struct VisitIndicesVariadicImpl<absl::index_sequence<N...>, EndIndices...> {
- // A type that can take an N-ary function object and converts it to a unary
- // function object that takes a single, flattened index, and "unflattens" it
- // into its individual dimensions when forwarding to the wrapped object.
- template <class Op>
- struct FlattenedOp {
- template <std::size_t I>
- VisitIndicesResultT<Op, decltype(EndIndices)...> operator()(
- SizeT<I> /*index*/) && {
- return base_internal::invoke(
- std::forward<Op>(op),
- SizeT<UnflattenIndex<I, N, (EndIndices + 1)...>::value -
- std::size_t{1}>()...);
- }
-
- Op&& op;
- };
-
- template <class Op, class... SizeType>
- static VisitIndicesResultT<Op, decltype(EndIndices)...> Run(Op&& op,
- SizeType... i) {
- return VisitIndicesSwitch<NumCasesOfSwitch<EndIndices...>::value>::Run(
- FlattenedOp<Op>{std::forward<Op>(op)},
- FlattenIndices<(EndIndices + std::size_t{1})...>::Run(
- (i + std::size_t{1})...));
- }
-};
-
-template <std::size_t... EndIndices>
-struct VisitIndicesVariadic
- : VisitIndicesVariadicImpl<absl::make_index_sequence<sizeof...(EndIndices)>,
- EndIndices...> {};
-
-// This implementation will flatten N-ary visit operations into a single switch
-// statement when the number of cases would be less than our maximum specified
-// switch-statement size.
-// TODO(calabrese)
-// Based on benchmarks, determine whether the function table approach actually
-// does optimize better than a chain of switch statements and possibly update
-// the implementation accordingly. Also consider increasing the maximum switch
-// size.
-template <std::size_t... EndIndices>
-struct VisitIndices
- : absl::conditional_t<(NumCasesOfSwitch<EndIndices...>::value <=
- MaxUnrolledVisitCases),
- VisitIndicesVariadic<EndIndices...>,
- VisitIndicesFallback<EndIndices...>> {};
-
-template <std::size_t EndIndex>
-struct VisitIndices<EndIndex>
- : absl::conditional_t<(EndIndex <= MaxUnrolledVisitCases),
- VisitIndicesSwitch<EndIndex>,
- VisitIndicesFallback<EndIndex>> {};
-
-// Suppress bogus warning on MSVC: MSVC complains that the `reinterpret_cast`
-// below is returning the address of a temporary or local object.
-#ifdef _MSC_VER
-#pragma warning(push)
-#pragma warning(disable : 4172)
-#endif // _MSC_VER
-
-// TODO(calabrese) std::launder
-// TODO(calabrese) constexpr
-// NOTE: DO NOT REMOVE the `inline` keyword as it is necessary to work around a
-// MSVC bug. See https://github.com/abseil/abseil-cpp/issues/129 for details.
-template <class Self, std::size_t I>
-inline VariantAccessResult<I, Self> AccessUnion(Self&& self, SizeT<I> /*i*/) {
- return reinterpret_cast<VariantAccessResult<I, Self>>(self);
-}
-
-#ifdef _MSC_VER
-#pragma warning(pop)
-#endif // _MSC_VER
-
-template <class T>
-void DeducedDestroy(T& self) { // NOLINT
- self.~T();
-}
-
-// NOTE: This type exists as a single entity for variant and its bases to
-// befriend. It contains helper functionality that manipulates the state of the
-// variant, such as the implementation of things like assignment and emplace
-// operations.
-struct VariantCoreAccess {
- template <class VariantType>
- static typename VariantType::Variant& Derived(VariantType& self) { // NOLINT
- return static_cast<typename VariantType::Variant&>(self);
- }
-
- template <class VariantType>
- static const typename VariantType::Variant& Derived(
- const VariantType& self) { // NOLINT
- return static_cast<const typename VariantType::Variant&>(self);
- }
-
- template <class VariantType>
- static void Destroy(VariantType& self) { // NOLINT
- Derived(self).destroy();
- self.index_ = absl::variant_npos;
- }
-
- template <class Variant>
- static void SetIndex(Variant& self, std::size_t i) { // NOLINT
- self.index_ = i;
- }
-
- template <class Variant>
- static void InitFrom(Variant& self, Variant&& other) { // NOLINT
- VisitIndices<absl::variant_size<Variant>::value>::Run(
- InitFromVisitor<Variant, Variant&&>{&self,
- std::forward<Variant>(other)},
- other.index());
- self.index_ = other.index();
- }
-
- // Access a variant alternative, assuming the index is correct.
- template <std::size_t I, class Variant>
- static VariantAccessResult<I, Variant> Access(Variant&& self) {
- // This cast instead of invocation of AccessUnion with an rvalue is a
- // workaround for msvc. Without this there is a runtime failure when dealing
- // with rvalues.
- // TODO(calabrese) Reduce test case and find a simpler workaround.
- return static_cast<VariantAccessResult<I, Variant>>(
- variant_internal::AccessUnion(self.state_, SizeT<I>()));
- }
-
- // Access a variant alternative, throwing if the index is incorrect.
- template <std::size_t I, class Variant>
- static VariantAccessResult<I, Variant> CheckedAccess(Variant&& self) {
- if (ABSL_PREDICT_FALSE(self.index_ != I)) {
- TypedThrowBadVariantAccess<VariantAccessResult<I, Variant>>();
- }
-
- return Access<I>(std::forward<Variant>(self));
- }
-
- // The implementation of the move-assignment operation for a variant.
- template <class VType>
- struct MoveAssignVisitor {
- using DerivedType = typename VType::Variant;
- template <std::size_t NewIndex>
- void operator()(SizeT<NewIndex> /*new_i*/) const {
- if (left->index_ == NewIndex) {
- Access<NewIndex>(*left) = std::move(Access<NewIndex>(*right));
- } else {
- Derived(*left).template emplace<NewIndex>(
- std::move(Access<NewIndex>(*right)));
- }
- }
-
- void operator()(SizeT<absl::variant_npos> /*new_i*/) const {
- Destroy(*left);
- }
-
- VType* left;
- VType* right;
- };
-
- template <class VType>
- static MoveAssignVisitor<VType> MakeMoveAssignVisitor(VType* left,
- VType* other) {
- return {left, other};
- }
-
- // The implementation of the assignment operation for a variant.
- template <class VType>
- struct CopyAssignVisitor {
- using DerivedType = typename VType::Variant;
- template <std::size_t NewIndex>
- void operator()(SizeT<NewIndex> /*new_i*/) const {
- using New =
- typename absl::variant_alternative<NewIndex, DerivedType>::type;
-
- if (left->index_ == NewIndex) {
- Access<NewIndex>(*left) = Access<NewIndex>(*right);
- } else if (std::is_nothrow_copy_constructible<New>::value ||
- !std::is_nothrow_move_constructible<New>::value) {
- Derived(*left).template emplace<NewIndex>(Access<NewIndex>(*right));
- } else {
- Derived(*left) = DerivedType(Derived(*right));
- }
- }
-
- void operator()(SizeT<absl::variant_npos> /*new_i*/) const {
- Destroy(*left);
- }
-
- VType* left;
- const VType* right;
- };
-
- template <class VType>
- static CopyAssignVisitor<VType> MakeCopyAssignVisitor(VType* left,
- const VType& other) {
- return {left, &other};
- }
-
- // The implementation of conversion-assignment operations for variant.
- template <class Left, class QualifiedNew>
- struct ConversionAssignVisitor {
- using NewIndex =
- variant_internal::IndexOfConstructedType<Left, QualifiedNew>;
-
- void operator()(SizeT<NewIndex::value> /*old_i*/
- ) const {
- Access<NewIndex::value>(*left) = std::forward<QualifiedNew>(other);
- }
-
- template <std::size_t OldIndex>
- void operator()(SizeT<OldIndex> /*old_i*/
- ) const {
- using New =
- typename absl::variant_alternative<NewIndex::value, Left>::type;
- if (std::is_nothrow_constructible<New, QualifiedNew>::value ||
- !std::is_nothrow_move_constructible<New>::value) {
- left->template emplace<NewIndex::value>(
- std::forward<QualifiedNew>(other));
- } else {
- // the standard says "equivalent to
- // operator=(variant(std::forward<T>(t)))", but we use `emplace` here
- // because the variant's move assignment operator could be deleted.
- left->template emplace<NewIndex::value>(
- New(std::forward<QualifiedNew>(other)));
- }
- }
-
- Left* left;
- QualifiedNew&& other;
- };
-
- template <class Left, class QualifiedNew>
- static ConversionAssignVisitor<Left, QualifiedNew>
- MakeConversionAssignVisitor(Left* left, QualifiedNew&& qual) {
- return {left, std::forward<QualifiedNew>(qual)};
- }
-
- // Backend for operations for `emplace()` which destructs `*self` then
- // construct a new alternative with `Args...`.
- template <std::size_t NewIndex, class Self, class... Args>
- static typename absl::variant_alternative<NewIndex, Self>::type& Replace(
- Self* self, Args&&... args) {
- Destroy(*self);
- using New = typename absl::variant_alternative<NewIndex, Self>::type;
- New* const result = ::new (static_cast<void*>(&self->state_))
- New(std::forward<Args>(args)...);
- self->index_ = NewIndex;
- return *result;
- }
-
- template <class LeftVariant, class QualifiedRightVariant>
- struct InitFromVisitor {
- template <std::size_t NewIndex>
- void operator()(SizeT<NewIndex> /*new_i*/) const {
- using Alternative =
- typename variant_alternative<NewIndex, LeftVariant>::type;
- ::new (static_cast<void*>(&left->state_)) Alternative(
- Access<NewIndex>(std::forward<QualifiedRightVariant>(right)));
- }
-
- void operator()(SizeT<absl::variant_npos> /*new_i*/) const {
- // This space intentionally left blank.
- }
- LeftVariant* left;
- QualifiedRightVariant&& right;
- };
-};
-
-template <class Expected, class... T>
-struct IndexOfImpl;
-
-template <class Expected>
-struct IndexOfImpl<Expected> {
- using IndexFromEnd = SizeT<0>;
- using MatchedIndexFromEnd = IndexFromEnd;
- using MultipleMatches = std::false_type;
-};
-
-template <class Expected, class Head, class... Tail>
-struct IndexOfImpl<Expected, Head, Tail...> : IndexOfImpl<Expected, Tail...> {
- using IndexFromEnd =
- SizeT<IndexOfImpl<Expected, Tail...>::IndexFromEnd::value + 1>;
-};
-
-template <class Expected, class... Tail>
-struct IndexOfImpl<Expected, Expected, Tail...>
- : IndexOfImpl<Expected, Tail...> {
- using IndexFromEnd =
- SizeT<IndexOfImpl<Expected, Tail...>::IndexFromEnd::value + 1>;
- using MatchedIndexFromEnd = IndexFromEnd;
- using MultipleMatches = std::integral_constant<
- bool, IndexOfImpl<Expected, Tail...>::MatchedIndexFromEnd::value != 0>;
-};
-
-template <class Expected, class... Types>
-struct IndexOfMeta {
- using Results = IndexOfImpl<Expected, Types...>;
- static_assert(!Results::MultipleMatches::value,
- "Attempted to access a variant by specifying a type that "
- "matches more than one alternative.");
- static_assert(Results::MatchedIndexFromEnd::value != 0,
- "Attempted to access a variant by specifying a type that does "
- "not match any alternative.");
- using type = SizeT<sizeof...(Types) - Results::MatchedIndexFromEnd::value>;
-};
-
-template <class Expected, class... Types>
-using IndexOf = typename IndexOfMeta<Expected, Types...>::type;
-
-template <class Variant, class T, std::size_t CurrIndex>
-struct UnambiguousIndexOfImpl;
-
-// Terminating case encountered once we've checked all of the alternatives
-template <class T, std::size_t CurrIndex>
-struct UnambiguousIndexOfImpl<variant<>, T, CurrIndex> : SizeT<CurrIndex> {};
-
-// Case where T is not Head
-template <class Head, class... Tail, class T, std::size_t CurrIndex>
-struct UnambiguousIndexOfImpl<variant<Head, Tail...>, T, CurrIndex>
- : UnambiguousIndexOfImpl<variant<Tail...>, T, CurrIndex + 1>::type {};
-
-// Case where T is Head
-template <class Head, class... Tail, std::size_t CurrIndex>
-struct UnambiguousIndexOfImpl<variant<Head, Tail...>, Head, CurrIndex>
- : SizeT<UnambiguousIndexOfImpl<variant<Tail...>, Head, 0>::value ==
- sizeof...(Tail)
- ? CurrIndex
- : CurrIndex + sizeof...(Tail) + 1> {};
-
-template <class Variant, class T>
-struct UnambiguousIndexOf;
-
-struct NoMatch {
- struct type {};
-};
-
-template <class... Alts, class T>
-struct UnambiguousIndexOf<variant<Alts...>, T>
- : std::conditional<UnambiguousIndexOfImpl<variant<Alts...>, T, 0>::value !=
- sizeof...(Alts),
- UnambiguousIndexOfImpl<variant<Alts...>, T, 0>,
- NoMatch>::type::type {};
-
-template <class T, std::size_t /*Dummy*/>
-using UnambiguousTypeOfImpl = T;
-
-template <class Variant, class T>
-using UnambiguousTypeOfT =
- UnambiguousTypeOfImpl<T, UnambiguousIndexOf<Variant, T>::value>;
-
-template <class H, class... T>
-class VariantStateBase;
-
-// This is an implementation of the "imaginary function" that is described in
-// [variant.ctor]
-// It is used in order to determine which alternative to construct during
-// initialization from some type T.
-template <class Variant, std::size_t I = 0>
-struct ImaginaryFun;
-
-template <std::size_t I>
-struct ImaginaryFun<variant<>, I> {
- static void Run() = delete;
-};
-
-template <class H, class... T, std::size_t I>
-struct ImaginaryFun<variant<H, T...>, I> : ImaginaryFun<variant<T...>, I + 1> {
- using ImaginaryFun<variant<T...>, I + 1>::Run;
-
- // NOTE: const& and && are used instead of by-value due to lack of guaranteed
- // move elision of C++17. This may have other minor differences, but tests
- // pass.
- static SizeT<I> Run(const H&, SizeT<I>);
- static SizeT<I> Run(H&&, SizeT<I>);
-};
-
-// The following metafunctions are used in constructor and assignment
-// constraints.
-template <class Self, class T>
-struct IsNeitherSelfNorInPlace : std::true_type {};
-
-template <class Self>
-struct IsNeitherSelfNorInPlace<Self, Self> : std::false_type {};
-
-template <class Self, class T>
-struct IsNeitherSelfNorInPlace<Self, in_place_type_t<T>> : std::false_type {};
-
-template <class Self, std::size_t I>
-struct IsNeitherSelfNorInPlace<Self, in_place_index_t<I>> : std::false_type {};
-
-template <class Variant, class T>
-struct IndexOfConstructedType<
- Variant, T,
- void_t<decltype(ImaginaryFun<Variant>::Run(std::declval<T>(), {}))>>
- : decltype(ImaginaryFun<Variant>::Run(std::declval<T>(), {})) {};
-
-template <std::size_t... Is>
-struct ContainsVariantNPos
- : absl::negation<std::is_same< // NOLINT
- std::integer_sequence<bool, 0 <= Is...>,
- std::integer_sequence<bool, Is != absl::variant_npos...>>> {};
-
-template <class Op, class... QualifiedVariants>
-using RawVisitResult =
- absl::result_of_t<Op(VariantAccessResult<0, QualifiedVariants>...)>;
-
-// NOTE: The spec requires that all return-paths yield the same type and is not
-// SFINAE-friendly, so we can deduce the return type by examining the first
-// result. If it's not callable, then we get an error, but are compliant and
-// fast to compile.
-// TODO(calabrese) Possibly rewrite in a way that yields better compile errors
-// at the cost of longer compile-times.
-template <class Op, class... QualifiedVariants>
-struct VisitResultImpl {
- using type =
- absl::result_of_t<Op(VariantAccessResult<0, QualifiedVariants>...)>;
-};
-
-// Done in two steps intentionally so that we don't cause substitution to fail.
-template <class Op, class... QualifiedVariants>
-using VisitResult = typename VisitResultImpl<Op, QualifiedVariants...>::type;
-
-template <class Op, class... QualifiedVariants>
-struct PerformVisitation {
- using ReturnType = VisitResult<Op, QualifiedVariants...>;
-
- template <std::size_t... Is>
- constexpr ReturnType operator()(SizeT<Is>... indices) const {
- return Run(typename ContainsVariantNPos<Is...>::type{},
- absl::index_sequence_for<QualifiedVariants...>(), indices...);
- }
-
- template <std::size_t... TupIs, std::size_t... Is>
- constexpr ReturnType Run(std::false_type /*has_valueless*/,
- index_sequence<TupIs...>, SizeT<Is>...) const {
- static_assert(
- std::is_same<ReturnType,
- absl::result_of_t<Op(VariantAccessResult<
- Is, QualifiedVariants>...)>>::value,
- "All visitation overloads must have the same return type.");
- return absl::base_internal::invoke(
- std::forward<Op>(op),
- VariantCoreAccess::Access<Is>(
- std::forward<QualifiedVariants>(std::get<TupIs>(variant_tup)))...);
- }
-
- template <std::size_t... TupIs, std::size_t... Is>
- [[noreturn]] ReturnType Run(std::true_type /*has_valueless*/,
- index_sequence<TupIs...>, SizeT<Is>...) const {
- absl::variant_internal::ThrowBadVariantAccess();
- }
-
- // TODO(calabrese) Avoid using a tuple, which causes lots of instantiations
- // Attempts using lambda variadic captures fail on current GCC.
- std::tuple<QualifiedVariants&&...> variant_tup;
- Op&& op;
-};
-
-template <class... T>
-union Union;
-
-// We want to allow for variant<> to be trivial. For that, we need the default
-// constructor to be trivial, which means we can't define it ourselves.
-// Instead, we use a non-default constructor that takes NoopConstructorTag
-// that doesn't affect the triviality of the types.
-struct NoopConstructorTag {};
-
-template <std::size_t I>
-struct EmplaceTag {};
-
-template <>
-union Union<> {
- constexpr explicit Union(NoopConstructorTag) noexcept {}
-};
-
-// Suppress bogus warning on MSVC: MSVC complains that Union<T...> has a defined
-// deleted destructor from the `std::is_destructible` check below.
-#ifdef _MSC_VER
-#pragma warning(push)
-#pragma warning(disable : 4624)
-#endif // _MSC_VER
-
-template <class Head, class... Tail>
-union Union<Head, Tail...> {
- using TailUnion = Union<Tail...>;
-
- explicit constexpr Union(NoopConstructorTag /*tag*/) noexcept
- : tail(NoopConstructorTag()) {}
-
- template <class... P>
- explicit constexpr Union(EmplaceTag<0>, P&&... args)
- : head(std::forward<P>(args)...) {}
-
- template <std::size_t I, class... P>
- explicit constexpr Union(EmplaceTag<I>, P&&... args)
- : tail(EmplaceTag<I - 1>{}, std::forward<P>(args)...) {}
-
- Head head;
- TailUnion tail;
-};
-
-#ifdef _MSC_VER
-#pragma warning(pop)
-#endif // _MSC_VER
-
-// TODO(calabrese) Just contain a Union in this union (certain configs fail).
-template <class... T>
-union DestructibleUnionImpl;
-
-template <>
-union DestructibleUnionImpl<> {
- constexpr explicit DestructibleUnionImpl(NoopConstructorTag) noexcept {}
-};
-
-template <class Head, class... Tail>
-union DestructibleUnionImpl<Head, Tail...> {
- using TailUnion = DestructibleUnionImpl<Tail...>;
-
- explicit constexpr DestructibleUnionImpl(NoopConstructorTag /*tag*/) noexcept
- : tail(NoopConstructorTag()) {}
-
- template <class... P>
- explicit constexpr DestructibleUnionImpl(EmplaceTag<0>, P&&... args)
- : head(std::forward<P>(args)...) {}
-
- template <std::size_t I, class... P>
- explicit constexpr DestructibleUnionImpl(EmplaceTag<I>, P&&... args)
- : tail(EmplaceTag<I - 1>{}, std::forward<P>(args)...) {}
-
- ~DestructibleUnionImpl() {}
-
- Head head;
- TailUnion tail;
-};
-
-// This union type is destructible even if one or more T are not trivially
-// destructible. In the case that all T are trivially destructible, then so is
-// this resultant type.
-template <class... T>
-using DestructibleUnion =
- absl::conditional_t<std::is_destructible<Union<T...>>::value, Union<T...>,
- DestructibleUnionImpl<T...>>;
-
-// Deepest base, containing the actual union and the discriminator
-template <class H, class... T>
-class VariantStateBase {
- protected:
- using Variant = variant<H, T...>;
-
- template <class LazyH = H,
- class ConstructibleH = absl::enable_if_t<
- std::is_default_constructible<LazyH>::value, LazyH>>
- constexpr VariantStateBase() noexcept(
- std::is_nothrow_default_constructible<ConstructibleH>::value)
- : state_(EmplaceTag<0>()), index_(0) {}
-
- template <std::size_t I, class... P>
- explicit constexpr VariantStateBase(EmplaceTag<I> tag, P&&... args)
- : state_(tag, std::forward<P>(args)...), index_(I) {}
-
- explicit constexpr VariantStateBase(NoopConstructorTag)
- : state_(NoopConstructorTag()), index_(variant_npos) {}
-
- void destroy() {} // Does nothing (shadowed in child if non-trivial)
-
- DestructibleUnion<H, T...> state_;
- std::size_t index_;
-};
-
-using absl::internal::type_identity;
-
-// OverloadSet::Overload() is a unary function which is overloaded to
-// take any of the element types of the variant, by reference-to-const.
-// The return type of the overload on T is type_identity<T>, so that you
-// can statically determine which overload was called.
-//
-// Overload() is not defined, so it can only be called in unevaluated
-// contexts.
-template <typename... Ts>
-struct OverloadSet;
-
-template <typename T, typename... Ts>
-struct OverloadSet<T, Ts...> : OverloadSet<Ts...> {
- using Base = OverloadSet<Ts...>;
- static type_identity<T> Overload(const T&);
- using Base::Overload;
-};
-
-template <>
-struct OverloadSet<> {
- // For any case not handled above.
- static void Overload(...);
-};
-
-template <class T>
-using LessThanResult = decltype(std::declval<T>() < std::declval<T>());
-
-template <class T>
-using GreaterThanResult = decltype(std::declval<T>() > std::declval<T>());
-
-template <class T>
-using LessThanOrEqualResult = decltype(std::declval<T>() <= std::declval<T>());
-
-template <class T>
-using GreaterThanOrEqualResult =
- decltype(std::declval<T>() >= std::declval<T>());
-
-template <class T>
-using EqualResult = decltype(std::declval<T>() == std::declval<T>());
-
-template <class T>
-using NotEqualResult = decltype(std::declval<T>() != std::declval<T>());
-
-using type_traits_internal::is_detected_convertible;
-
-template <class... T>
-using RequireAllHaveEqualT = absl::enable_if_t<
- absl::conjunction<is_detected_convertible<bool, EqualResult, T>...>::value,
- bool>;
-
-template <class... T>
-using RequireAllHaveNotEqualT =
- absl::enable_if_t<absl::conjunction<is_detected_convertible<
- bool, NotEqualResult, T>...>::value,
- bool>;
-
-template <class... T>
-using RequireAllHaveLessThanT =
- absl::enable_if_t<absl::conjunction<is_detected_convertible<
- bool, LessThanResult, T>...>::value,
- bool>;
-
-template <class... T>
-using RequireAllHaveLessThanOrEqualT =
- absl::enable_if_t<absl::conjunction<is_detected_convertible<
- bool, LessThanOrEqualResult, T>...>::value,
- bool>;
-
-template <class... T>
-using RequireAllHaveGreaterThanOrEqualT =
- absl::enable_if_t<absl::conjunction<is_detected_convertible<
- bool, GreaterThanOrEqualResult, T>...>::value,
- bool>;
-
-template <class... T>
-using RequireAllHaveGreaterThanT =
- absl::enable_if_t<absl::conjunction<is_detected_convertible<
- bool, GreaterThanResult, T>...>::value,
- bool>;
-
-// Helper template containing implementations details of variant that can't go
-// in the private section. For convenience, this takes the variant type as a
-// single template parameter.
-template <typename T>
-struct VariantHelper;
-
-template <typename... Ts>
-struct VariantHelper<variant<Ts...>> {
- // Type metafunction which returns the element type selected if
- // OverloadSet::Overload() is well-formed when called with argument type U.
- template <typename U>
- using BestMatch = decltype(variant_internal::OverloadSet<Ts...>::Overload(
- std::declval<U>()));
-
- // Type metafunction which returns true if OverloadSet::Overload() is
- // well-formed when called with argument type U.
- // CanAccept can't be just an alias because there is a MSVC bug on parameter
- // pack expansion involving decltype.
- template <typename U>
- struct CanAccept
- : std::integral_constant<bool, !std::is_void<BestMatch<U>>::value> {};
-
- // Type metafunction which returns true if Other is an instantiation of
- // variant, and variants's converting constructor from Other will be
- // well-formed. We will use this to remove constructors that would be
- // ill-formed from the overload set.
- template <typename Other>
- struct CanConvertFrom;
-
- template <typename... Us>
- struct CanConvertFrom<variant<Us...>>
- : public absl::conjunction<CanAccept<Us>...> {};
-};
-
-// A type with nontrivial copy ctor and trivial move ctor.
-struct TrivialMoveOnly {
- TrivialMoveOnly(TrivialMoveOnly&&) = default;
-};
-
-// Trait class to detect whether a type is trivially move constructible.
-// A union's defaulted copy/move constructor is deleted if any variant member's
-// copy/move constructor is nontrivial.
-template <typename T>
-struct IsTriviallyMoveConstructible
- : std::is_move_constructible<Union<T, TrivialMoveOnly>> {};
-
-// To guarantee triviality of all special-member functions that can be trivial,
-// we use a chain of conditional bases for each one.
-// The order of inheritance of bases from child to base are logically:
-//
-// variant
-// VariantCopyAssignBase
-// VariantMoveAssignBase
-// VariantCopyBase
-// VariantMoveBase
-// VariantStateBaseDestructor
-// VariantStateBase
-//
-// Note that there is a separate branch at each base that is dependent on
-// whether or not that corresponding special-member-function can be trivial in
-// the resultant variant type.
-
-template <class... T>
-class VariantStateBaseDestructorNontrivial;
-
-template <class... T>
-class VariantMoveBaseNontrivial;
-
-template <class... T>
-class VariantCopyBaseNontrivial;
-
-template <class... T>
-class VariantMoveAssignBaseNontrivial;
-
-template <class... T>
-class VariantCopyAssignBaseNontrivial;
-
-// Base that is dependent on whether or not the destructor can be trivial.
-template <class... T>
-using VariantStateBaseDestructor =
- absl::conditional_t<std::is_destructible<Union<T...>>::value,
- VariantStateBase<T...>,
- VariantStateBaseDestructorNontrivial<T...>>;
-
-// Base that is dependent on whether or not the move-constructor can be
-// implicitly generated by the compiler (trivial or deleted).
-// Previously we were using `std::is_move_constructible<Union<T...>>` to check
-// whether all Ts have trivial move constructor, but it ran into a GCC bug:
-// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=84866
-// So we have to use a different approach (i.e. `HasTrivialMoveConstructor`) to
-// work around the bug.
-template <class... T>
-using VariantMoveBase = absl::conditional_t<
- absl::disjunction<
- absl::negation<absl::conjunction<std::is_move_constructible<T>...>>,
- absl::conjunction<IsTriviallyMoveConstructible<T>...>>::value,
- VariantStateBaseDestructor<T...>, VariantMoveBaseNontrivial<T...>>;
-
-// Base that is dependent on whether or not the copy-constructor can be trivial.
-template <class... T>
-using VariantCopyBase = absl::conditional_t<
- absl::disjunction<
- absl::negation<absl::conjunction<std::is_copy_constructible<T>...>>,
- std::is_copy_constructible<Union<T...>>>::value,
- VariantMoveBase<T...>, VariantCopyBaseNontrivial<T...>>;
-
-// Base that is dependent on whether or not the move-assign can be trivial.
-template <class... T>
-using VariantMoveAssignBase = absl::conditional_t<
- absl::disjunction<
- absl::conjunction<absl::is_move_assignable<Union<T...>>,
- std::is_move_constructible<Union<T...>>,
- std::is_destructible<Union<T...>>>,
- absl::negation<absl::conjunction<std::is_move_constructible<T>...,
- // Note: We're not qualifying this with
- // absl:: because it doesn't compile
- // under MSVC.
- is_move_assignable<T>...>>>::value,
- VariantCopyBase<T...>, VariantMoveAssignBaseNontrivial<T...>>;
-
-// Base that is dependent on whether or not the copy-assign can be trivial.
-template <class... T>
-using VariantCopyAssignBase = absl::conditional_t<
- absl::disjunction<
- absl::conjunction<absl::is_copy_assignable<Union<T...>>,
- std::is_copy_constructible<Union<T...>>,
- std::is_destructible<Union<T...>>>,
- absl::negation<absl::conjunction<std::is_copy_constructible<T>...,
- // Note: We're not qualifying this with
- // absl:: because it doesn't compile
- // under MSVC.
- is_copy_assignable<T>...>>>::value,
- VariantMoveAssignBase<T...>, VariantCopyAssignBaseNontrivial<T...>>;
-
-template <class... T>
-using VariantBase = VariantCopyAssignBase<T...>;
-
-template <class... T>
-class VariantStateBaseDestructorNontrivial : protected VariantStateBase<T...> {
- private:
- using Base = VariantStateBase<T...>;
-
- protected:
- using Base::Base;
-
- VariantStateBaseDestructorNontrivial() = default;
- VariantStateBaseDestructorNontrivial(VariantStateBaseDestructorNontrivial&&) =
- default;
- VariantStateBaseDestructorNontrivial(
- const VariantStateBaseDestructorNontrivial&) = default;
- VariantStateBaseDestructorNontrivial& operator=(
- VariantStateBaseDestructorNontrivial&&) = default;
- VariantStateBaseDestructorNontrivial& operator=(
- const VariantStateBaseDestructorNontrivial&) = default;
-
- struct Destroyer {
- template <std::size_t I>
- void operator()(SizeT<I> i) const {
- using Alternative =
- typename absl::variant_alternative<I, variant<T...>>::type;
- variant_internal::AccessUnion(self->state_, i).~Alternative();
- }
-
- void operator()(SizeT<absl::variant_npos> /*i*/) const {
- // This space intentionally left blank
- }
-
- VariantStateBaseDestructorNontrivial* self;
- };
-
- void destroy() { VisitIndices<sizeof...(T)>::Run(Destroyer{this}, index_); }
-
- ~VariantStateBaseDestructorNontrivial() { destroy(); }
-
- protected:
- using Base::index_;
- using Base::state_;
-};
-
-template <class... T>
-class VariantMoveBaseNontrivial : protected VariantStateBaseDestructor<T...> {
- private:
- using Base = VariantStateBaseDestructor<T...>;
-
- protected:
- using Base::Base;
-
- struct Construct {
- template <std::size_t I>
- void operator()(SizeT<I> i) const {
- using Alternative =
- typename absl::variant_alternative<I, variant<T...>>::type;
- ::new (static_cast<void*>(&self->state_)) Alternative(
- variant_internal::AccessUnion(std::move(other->state_), i));
- }
-
- void operator()(SizeT<absl::variant_npos> /*i*/) const {}
-
- VariantMoveBaseNontrivial* self;
- VariantMoveBaseNontrivial* other;
- };
-
- VariantMoveBaseNontrivial() = default;
- VariantMoveBaseNontrivial(VariantMoveBaseNontrivial&& other) noexcept(
- absl::conjunction<std::is_nothrow_move_constructible<T>...>::value)
- : Base(NoopConstructorTag()) {
- VisitIndices<sizeof...(T)>::Run(Construct{this, &other}, other.index_);
- index_ = other.index_;
- }
-
- VariantMoveBaseNontrivial(VariantMoveBaseNontrivial const&) = default;
-
- VariantMoveBaseNontrivial& operator=(VariantMoveBaseNontrivial&&) = default;
- VariantMoveBaseNontrivial& operator=(VariantMoveBaseNontrivial const&) =
- default;
-
- protected:
- using Base::index_;
- using Base::state_;
-};
-
-template <class... T>
-class VariantCopyBaseNontrivial : protected VariantMoveBase<T...> {
- private:
- using Base = VariantMoveBase<T...>;
-
- protected:
- using Base::Base;
-
- VariantCopyBaseNontrivial() = default;
- VariantCopyBaseNontrivial(VariantCopyBaseNontrivial&&) = default;
-
- struct Construct {
- template <std::size_t I>
- void operator()(SizeT<I> i) const {
- using Alternative =
- typename absl::variant_alternative<I, variant<T...>>::type;
- ::new (static_cast<void*>(&self->state_))
- Alternative(variant_internal::AccessUnion(other->state_, i));
- }
-
- void operator()(SizeT<absl::variant_npos> /*i*/) const {}
-
- VariantCopyBaseNontrivial* self;
- const VariantCopyBaseNontrivial* other;
- };
-
- VariantCopyBaseNontrivial(VariantCopyBaseNontrivial const& other)
- : Base(NoopConstructorTag()) {
- VisitIndices<sizeof...(T)>::Run(Construct{this, &other}, other.index_);
- index_ = other.index_;
- }
-
- VariantCopyBaseNontrivial& operator=(VariantCopyBaseNontrivial&&) = default;
- VariantCopyBaseNontrivial& operator=(VariantCopyBaseNontrivial const&) =
- default;
-
- protected:
- using Base::index_;
- using Base::state_;
-};
-
-template <class... T>
-class VariantMoveAssignBaseNontrivial : protected VariantCopyBase<T...> {
- friend struct VariantCoreAccess;
-
- private:
- using Base = VariantCopyBase<T...>;
-
- protected:
- using Base::Base;
-
- VariantMoveAssignBaseNontrivial() = default;
- VariantMoveAssignBaseNontrivial(VariantMoveAssignBaseNontrivial&&) = default;
- VariantMoveAssignBaseNontrivial(const VariantMoveAssignBaseNontrivial&) =
- default;
- VariantMoveAssignBaseNontrivial& operator=(
- VariantMoveAssignBaseNontrivial const&) = default;
-
- VariantMoveAssignBaseNontrivial&
- operator=(VariantMoveAssignBaseNontrivial&& other) noexcept(
- absl::conjunction<std::is_nothrow_move_constructible<T>...,
- std::is_nothrow_move_assignable<T>...>::value) {
- VisitIndices<sizeof...(T)>::Run(
- VariantCoreAccess::MakeMoveAssignVisitor(this, &other), other.index_);
- return *this;
- }
-
- protected:
- using Base::index_;
- using Base::state_;
-};
-
-template <class... T>
-class VariantCopyAssignBaseNontrivial : protected VariantMoveAssignBase<T...> {
- friend struct VariantCoreAccess;
-
- private:
- using Base = VariantMoveAssignBase<T...>;
-
- protected:
- using Base::Base;
-
- VariantCopyAssignBaseNontrivial() = default;
- VariantCopyAssignBaseNontrivial(VariantCopyAssignBaseNontrivial&&) = default;
- VariantCopyAssignBaseNontrivial(const VariantCopyAssignBaseNontrivial&) =
- default;
- VariantCopyAssignBaseNontrivial& operator=(
- VariantCopyAssignBaseNontrivial&&) = default;
-
- VariantCopyAssignBaseNontrivial& operator=(
- const VariantCopyAssignBaseNontrivial& other) {
- VisitIndices<sizeof...(T)>::Run(
- VariantCoreAccess::MakeCopyAssignVisitor(this, other), other.index_);
- return *this;
- }
-
- protected:
- using Base::index_;
- using Base::state_;
-};
-
-////////////////////////////////////////
-// Visitors for Comparison Operations //
-////////////////////////////////////////
-
-template <class... Types>
-struct EqualsOp {
- const variant<Types...>* v;
- const variant<Types...>* w;
-
- constexpr bool operator()(SizeT<absl::variant_npos> /*v_i*/) const {
- return true;
- }
-
- template <std::size_t I>
- constexpr bool operator()(SizeT<I> /*v_i*/) const {
- return VariantCoreAccess::Access<I>(*v) == VariantCoreAccess::Access<I>(*w);
- }
-};
-
-template <class... Types>
-struct NotEqualsOp {
- const variant<Types...>* v;
- const variant<Types...>* w;
-
- constexpr bool operator()(SizeT<absl::variant_npos> /*v_i*/) const {
- return false;
- }
-
- template <std::size_t I>
- constexpr bool operator()(SizeT<I> /*v_i*/) const {
- return VariantCoreAccess::Access<I>(*v) != VariantCoreAccess::Access<I>(*w);
- }
-};
-
-template <class... Types>
-struct LessThanOp {
- const variant<Types...>* v;
- const variant<Types...>* w;
-
- constexpr bool operator()(SizeT<absl::variant_npos> /*v_i*/) const {
- return false;
- }
-
- template <std::size_t I>
- constexpr bool operator()(SizeT<I> /*v_i*/) const {
- return VariantCoreAccess::Access<I>(*v) < VariantCoreAccess::Access<I>(*w);
- }
-};
-
-template <class... Types>
-struct GreaterThanOp {
- const variant<Types...>* v;
- const variant<Types...>* w;
-
- constexpr bool operator()(SizeT<absl::variant_npos> /*v_i*/) const {
- return false;
- }
-
- template <std::size_t I>
- constexpr bool operator()(SizeT<I> /*v_i*/) const {
- return VariantCoreAccess::Access<I>(*v) > VariantCoreAccess::Access<I>(*w);
- }
-};
-
-template <class... Types>
-struct LessThanOrEqualsOp {
- const variant<Types...>* v;
- const variant<Types...>* w;
-
- constexpr bool operator()(SizeT<absl::variant_npos> /*v_i*/) const {
- return true;
- }
-
- template <std::size_t I>
- constexpr bool operator()(SizeT<I> /*v_i*/) const {
- return VariantCoreAccess::Access<I>(*v) <= VariantCoreAccess::Access<I>(*w);
- }
-};
-
-template <class... Types>
-struct GreaterThanOrEqualsOp {
- const variant<Types...>* v;
- const variant<Types...>* w;
-
- constexpr bool operator()(SizeT<absl::variant_npos> /*v_i*/) const {
- return true;
- }
-
- template <std::size_t I>
- constexpr bool operator()(SizeT<I> /*v_i*/) const {
- return VariantCoreAccess::Access<I>(*v) >= VariantCoreAccess::Access<I>(*w);
- }
-};
-
-// Precondition: v.index() == w.index();
-template <class... Types>
-struct SwapSameIndex {
- variant<Types...>* v;
- variant<Types...>* w;
- template <std::size_t I>
- void operator()(SizeT<I>) const {
- type_traits_internal::Swap(VariantCoreAccess::Access<I>(*v),
- VariantCoreAccess::Access<I>(*w));
- }
-
- void operator()(SizeT<variant_npos>) const {}
-};
-
-// TODO(calabrese) do this from a different namespace for proper adl usage
-template <class... Types>
-struct Swap {
- variant<Types...>* v;
- variant<Types...>* w;
-
- void generic_swap() const {
- variant<Types...> tmp(std::move(*w));
- VariantCoreAccess::Destroy(*w);
- VariantCoreAccess::InitFrom(*w, std::move(*v));
- VariantCoreAccess::Destroy(*v);
- VariantCoreAccess::InitFrom(*v, std::move(tmp));
- }
-
- void operator()(SizeT<absl::variant_npos> /*w_i*/) const {
- if (!v->valueless_by_exception()) {
- generic_swap();
- }
- }
-
- template <std::size_t Wi>
- void operator()(SizeT<Wi> /*w_i*/) {
- if (v->index() == Wi) {
- VisitIndices<sizeof...(Types)>::Run(SwapSameIndex<Types...>{v, w}, Wi);
- } else {
- generic_swap();
- }
- }
-};
-
-template <typename Variant, typename = void, typename... Ts>
-struct VariantHashBase {
- VariantHashBase() = delete;
- VariantHashBase(const VariantHashBase&) = delete;
- VariantHashBase(VariantHashBase&&) = delete;
- VariantHashBase& operator=(const VariantHashBase&) = delete;
- VariantHashBase& operator=(VariantHashBase&&) = delete;
-};
-
-struct VariantHashVisitor {
- template <typename T>
- size_t operator()(const T& t) {
- return std::hash<T>{}(t);
- }
-};
-
-template <typename Variant, typename... Ts>
-struct VariantHashBase<Variant,
- absl::enable_if_t<absl::conjunction<
- type_traits_internal::IsHashable<Ts>...>::value>,
- Ts...> {
- using argument_type = Variant;
- using result_type = size_t;
- size_t operator()(const Variant& var) const {
- type_traits_internal::AssertHashEnabled<Ts...>();
- if (var.valueless_by_exception()) {
- return 239799884;
- }
- size_t result = VisitIndices<variant_size<Variant>::value>::Run(
- PerformVisitation<VariantHashVisitor, const Variant&>{
- std::forward_as_tuple(var), VariantHashVisitor{}},
- var.index());
- // Combine the index and the hash result in order to distinguish
- // std::variant<int, int> holding the same value as different alternative.
- return result ^ var.index();
- }
-};
-
-} // namespace variant_internal
-ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // !defined(ABSL_USES_STD_VARIANT)
-#endif // ABSL_TYPES_INTERNAL_VARIANT_H_
diff --git a/contrib/restricted/abseil-cpp/absl/types/optional.h b/contrib/restricted/abseil-cpp/absl/types/optional.h
index 0d8f8704c50..65bba64ff25 100644
--- a/contrib/restricted/abseil-cpp/absl/types/optional.h
+++ b/contrib/restricted/abseil-cpp/absl/types/optional.h
@@ -16,31 +16,17 @@
// optional.h
// -----------------------------------------------------------------------------
//
-// This header file defines the `absl::optional` type for holding a value which
-// may or may not be present. This type is useful for providing value semantics
-// for operations that may either wish to return or hold "something-or-nothing".
-//
-// Example:
-//
-// // A common way to signal operation failure is to provide an output
-// // parameter and a bool return type:
-// bool AcquireResource(const Input&, Resource * out);
-//
-// // Providing an absl::optional return type provides a cleaner API:
-// absl::optional<Resource> AcquireResource(const Input&);
-//
-// `absl::optional` is a C++11 compatible version of the C++17 `std::optional`
-// abstraction and is designed to be a drop-in replacement for code compliant
-// with C++17.
+// Historical note: Abseil once provided an implementation of `absl::optional`
+// as a polyfill for `std::optional` prior to C++17. Now that C++17 is required,
+// `absl::optional` is an alias for `std::optional`.
+
#ifndef ABSL_TYPES_OPTIONAL_H_
#define ABSL_TYPES_OPTIONAL_H_
-#include "absl/base/config.h" // TODO(calabrese) IWYU removal?
-#include "absl/utility/utility.h"
-
-#ifdef ABSL_USES_STD_OPTIONAL
+#include <optional>
-#include <optional> // IWYU pragma: export
+#include "absl/base/config.h"
+#include "absl/utility/utility.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
@@ -52,730 +38,4 @@ using std::nullopt;
ABSL_NAMESPACE_END
} // namespace absl
-#else // ABSL_USES_STD_OPTIONAL
-
-#include <cassert>
-#include <functional>
-#include <initializer_list>
-#include <type_traits>
-#include <utility>
-
-#include "absl/base/attributes.h"
-#include "absl/base/nullability.h"
-#include "absl/base/internal/inline_variable.h"
-#include "absl/meta/type_traits.h"
-#include "absl/types/bad_optional_access.h"
-#include "absl/types/internal/optional.h"
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-
-// nullopt_t
-//
-// Class type for `absl::nullopt` used to indicate an `absl::optional<T>` type
-// that does not contain a value.
-struct nullopt_t {
- // It must not be default-constructible to avoid ambiguity for opt = {}.
- explicit constexpr nullopt_t(optional_internal::init_t) noexcept {}
-};
-
-// nullopt
-//
-// A tag constant of type `absl::nullopt_t` used to indicate an empty
-// `absl::optional` in certain functions, such as construction or assignment.
-ABSL_INTERNAL_INLINE_CONSTEXPR(nullopt_t, nullopt,
- nullopt_t(optional_internal::init_t()));
-
-// -----------------------------------------------------------------------------
-// absl::optional
-// -----------------------------------------------------------------------------
-//
-// A value of type `absl::optional<T>` holds either a value of `T` or an
-// "empty" value. When it holds a value of `T`, it stores it as a direct
-// sub-object, so `sizeof(optional<T>)` is approximately
-// `sizeof(T) + sizeof(bool)`.
-//
-// This implementation is based on the specification in the latest draft of the
-// C++17 `std::optional` specification as of May 2017, section 20.6.
-//
-// Differences between `absl::optional<T>` and `std::optional<T>` include:
-//
-// * `constexpr` is not used for non-const member functions.
-// (dependency on some differences between C++11 and C++14.)
-// * `absl::nullopt` and `absl::in_place` are not declared `constexpr`. We
-// need the inline variable support in C++17 for external linkage.
-// * Throws `absl::bad_optional_access` instead of
-// `std::bad_optional_access`.
-// * `make_optional()` cannot be declared `constexpr` due to the absence of
-// guaranteed copy elision.
-// * The move constructor's `noexcept` specification is stronger, i.e. if the
-// default allocator is non-throwing (via setting
-// `ABSL_ALLOCATOR_NOTHROW`), it evaluates to `noexcept(true)`, because
-// we assume
-// a) move constructors should only throw due to allocation failure and
-// b) if T's move constructor allocates, it uses the same allocation
-// function as the default allocator.
-//
-template <typename T>
-class optional : private optional_internal::optional_data<T>,
- private optional_internal::optional_ctor_base<
- optional_internal::ctor_copy_traits<T>::traits>,
- private optional_internal::optional_assign_base<
- optional_internal::assign_copy_traits<T>::traits> {
- using data_base = optional_internal::optional_data<T>;
-
- public:
- typedef T value_type;
-
- // Constructors
-
- // Constructs an `optional` holding an empty value, NOT a default constructed
- // `T`.
- constexpr optional() noexcept = default;
-
- // Constructs an `optional` initialized with `nullopt` to hold an empty value.
- constexpr optional(nullopt_t) noexcept {} // NOLINT(runtime/explicit)
-
- // Copy constructor, standard semantics
- optional(const optional&) = default;
-
- // Move constructor, standard semantics
- optional(optional&&) = default;
-
- // Constructs a non-empty `optional` direct-initialized value of type `T` from
- // the arguments `std::forward<Args>(args)...` within the `optional`.
- // (The `in_place_t` is a tag used to indicate that the contained object
- // should be constructed in-place.)
- template <typename InPlaceT, typename... Args,
- absl::enable_if_t<absl::conjunction<
- std::is_same<InPlaceT, in_place_t>,
- std::is_constructible<T, Args&&...> >::value>* = nullptr>
- constexpr explicit optional(InPlaceT, Args&&... args)
- : data_base(in_place_t(), std::forward<Args>(args)...) {}
-
- // Constructs a non-empty `optional` direct-initialized value of type `T` from
- // the arguments of an initializer_list and `std::forward<Args>(args)...`.
- // (The `in_place_t` is a tag used to indicate that the contained object
- // should be constructed in-place.)
- template <typename U, typename... Args,
- typename = typename std::enable_if<std::is_constructible<
- T, std::initializer_list<U>&, Args&&...>::value>::type>
- constexpr explicit optional(in_place_t, std::initializer_list<U> il,
- Args&&... args)
- : data_base(in_place_t(), il, std::forward<Args>(args)...) {}
-
- // Value constructor (implicit)
- template <
- typename U = T,
- typename std::enable_if<
- absl::conjunction<absl::negation<std::is_same<
- in_place_t, typename std::decay<U>::type> >,
- absl::negation<std::is_same<
- optional<T>, typename std::decay<U>::type> >,
- std::is_convertible<U&&, T>,
- std::is_constructible<T, U&&> >::value,
- bool>::type = false>
- constexpr optional(U&& v) : data_base(in_place_t(), std::forward<U>(v)) {}
-
- // Value constructor (explicit)
- template <
- typename U = T,
- typename std::enable_if<
- absl::conjunction<absl::negation<std::is_same<
- in_place_t, typename std::decay<U>::type> >,
- absl::negation<std::is_same<
- optional<T>, typename std::decay<U>::type> >,
- absl::negation<std::is_convertible<U&&, T> >,
- std::is_constructible<T, U&&> >::value,
- bool>::type = false>
- explicit constexpr optional(U&& v)
- : data_base(in_place_t(), std::forward<U>(v)) {}
-
- // Converting copy constructor (implicit)
- template <typename U,
- typename std::enable_if<
- absl::conjunction<
- absl::negation<std::is_same<T, U> >,
- std::is_constructible<T, const U&>,
- absl::negation<
- optional_internal::
- is_constructible_convertible_from_optional<T, U> >,
- std::is_convertible<const U&, T> >::value,
- bool>::type = false>
- optional(const optional<U>& rhs) {
- if (rhs) {
- this->construct(*rhs);
- }
- }
-
- // Converting copy constructor (explicit)
- template <typename U,
- typename std::enable_if<
- absl::conjunction<
- absl::negation<std::is_same<T, U>>,
- std::is_constructible<T, const U&>,
- absl::negation<
- optional_internal::
- is_constructible_convertible_from_optional<T, U>>,
- absl::negation<std::is_convertible<const U&, T>>>::value,
- bool>::type = false>
- explicit optional(const optional<U>& rhs) {
- if (rhs) {
- this->construct(*rhs);
- }
- }
-
- // Converting move constructor (implicit)
- template <typename U,
- typename std::enable_if<
- absl::conjunction<
- absl::negation<std::is_same<T, U> >,
- std::is_constructible<T, U&&>,
- absl::negation<
- optional_internal::
- is_constructible_convertible_from_optional<T, U> >,
- std::is_convertible<U&&, T> >::value,
- bool>::type = false>
- optional(optional<U>&& rhs) {
- if (rhs) {
- this->construct(std::move(*rhs));
- }
- }
-
- // Converting move constructor (explicit)
- template <
- typename U,
- typename std::enable_if<
- absl::conjunction<
- absl::negation<std::is_same<T, U>>, std::is_constructible<T, U&&>,
- absl::negation<
- optional_internal::is_constructible_convertible_from_optional<
- T, U>>,
- absl::negation<std::is_convertible<U&&, T>>>::value,
- bool>::type = false>
- explicit optional(optional<U>&& rhs) {
- if (rhs) {
- this->construct(std::move(*rhs));
- }
- }
-
- // Destructor. Trivial if `T` is trivially destructible.
- ~optional() = default;
-
- // Assignment Operators
-
- // Assignment from `nullopt`
- //
- // Example:
- //
- // struct S { int value; };
- // optional<S> opt = absl::nullopt; // Could also use opt = { };
- optional& operator=(nullopt_t) noexcept {
- this->destruct();
- return *this;
- }
-
- // Copy assignment operator, standard semantics
- optional& operator=(const optional& src) = default;
-
- // Move assignment operator, standard semantics
- optional& operator=(optional&& src) = default;
-
- // Value assignment operators
- template <typename U = T,
- int&..., // Workaround an internal compiler error in GCC 5 to 10.
- typename = typename std::enable_if<absl::conjunction<
- absl::negation<
- std::is_same<optional<T>, typename std::decay<U>::type> >,
- absl::negation<absl::conjunction<
- std::is_scalar<T>,
- std::is_same<T, typename std::decay<U>::type> > >,
- std::is_constructible<T, U>,
- std::is_assignable<T&, U> >::value>::type>
- optional& operator=(U&& v) {
- this->assign(std::forward<U>(v));
- return *this;
- }
-
- template <
- typename U,
- int&..., // Workaround an internal compiler error in GCC 5 to 10.
- typename = typename std::enable_if<absl::conjunction<
- absl::negation<std::is_same<T, U> >,
- std::is_constructible<T, const U&>, std::is_assignable<T&, const U&>,
- absl::negation<
- optional_internal::
- is_constructible_convertible_assignable_from_optional<
- T, U> > >::value>::type>
- optional& operator=(const optional<U>& rhs) {
- if (rhs) {
- this->assign(*rhs);
- } else {
- this->destruct();
- }
- return *this;
- }
-
- template <typename U,
- int&..., // Workaround an internal compiler error in GCC 5 to 10.
- typename = typename std::enable_if<absl::conjunction<
- absl::negation<std::is_same<T, U> >,
- std::is_constructible<T, U>, std::is_assignable<T&, U>,
- absl::negation<
- optional_internal::
- is_constructible_convertible_assignable_from_optional<
- T, U> > >::value>::type>
- optional& operator=(optional<U>&& rhs) {
- if (rhs) {
- this->assign(std::move(*rhs));
- } else {
- this->destruct();
- }
- return *this;
- }
-
- // Modifiers
-
- // optional::reset()
- //
- // Destroys the inner `T` value of an `absl::optional` if one is present.
- ABSL_ATTRIBUTE_REINITIALIZES void reset() noexcept { this->destruct(); }
-
- // optional::emplace()
- //
- // (Re)constructs the underlying `T` in-place with the given forwarded
- // arguments.
- //
- // Example:
- //
- // optional<Foo> opt;
- // opt.emplace(arg1,arg2,arg3); // Constructs Foo(arg1,arg2,arg3)
- //
- // If the optional is non-empty, and the `args` refer to subobjects of the
- // current object, then behaviour is undefined, because the current object
- // will be destructed before the new object is constructed with `args`.
- template <typename... Args,
- typename = typename std::enable_if<
- std::is_constructible<T, Args&&...>::value>::type>
- T& emplace(Args&&... args) ABSL_ATTRIBUTE_LIFETIME_BOUND {
- this->destruct();
- this->construct(std::forward<Args>(args)...);
- return reference();
- }
-
- // Emplace reconstruction overload for an initializer list and the given
- // forwarded arguments.
- //
- // Example:
- //
- // struct Foo {
- // Foo(std::initializer_list<int>);
- // };
- //
- // optional<Foo> opt;
- // opt.emplace({1,2,3}); // Constructs Foo({1,2,3})
- template <typename U, typename... Args,
- typename = typename std::enable_if<std::is_constructible<
- T, std::initializer_list<U>&, Args&&...>::value>::type>
- T& emplace(std::initializer_list<U> il,
- Args&&... args) ABSL_ATTRIBUTE_LIFETIME_BOUND {
- this->destruct();
- this->construct(il, std::forward<Args>(args)...);
- return reference();
- }
-
- // Swaps
-
- // Swap, standard semantics
- void swap(optional& rhs) noexcept(
- std::is_nothrow_move_constructible<T>::value&&
- type_traits_internal::IsNothrowSwappable<T>::value) {
- if (*this) {
- if (rhs) {
- type_traits_internal::Swap(**this, *rhs);
- } else {
- rhs.construct(std::move(**this));
- this->destruct();
- }
- } else {
- if (rhs) {
- this->construct(std::move(*rhs));
- rhs.destruct();
- } else {
- // No effect (swap(disengaged, disengaged)).
- }
- }
- }
-
- // Observers
-
- // optional::operator->()
- //
- // Accesses the underlying `T` value's member `m` of an `optional`. If the
- // `optional` is empty, behavior is undefined.
- //
- // If you need myOpt->foo in constexpr, use (*myOpt).foo instead.
- absl::Nonnull<const T*> operator->() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
- ABSL_HARDENING_ASSERT(this->engaged_);
- return std::addressof(this->data_);
- }
- absl::Nonnull<T*> operator->() ABSL_ATTRIBUTE_LIFETIME_BOUND {
- ABSL_HARDENING_ASSERT(this->engaged_);
- return std::addressof(this->data_);
- }
-
- // optional::operator*()
- //
- // Accesses the underlying `T` value of an `optional`. If the `optional` is
- // empty, behavior is undefined.
- constexpr const T& operator*() const& ABSL_ATTRIBUTE_LIFETIME_BOUND {
- ABSL_HARDENING_ASSERT(this->engaged_);
- return reference();
- }
- T& operator*() & ABSL_ATTRIBUTE_LIFETIME_BOUND {
- ABSL_HARDENING_ASSERT(this->engaged_);
- return reference();
- }
- constexpr const T&& operator*() const&& ABSL_ATTRIBUTE_LIFETIME_BOUND {
- ABSL_HARDENING_ASSERT(this->engaged_);
- return std::move(reference());
- }
- T&& operator*() && ABSL_ATTRIBUTE_LIFETIME_BOUND {
- ABSL_HARDENING_ASSERT(this->engaged_);
- return std::move(reference());
- }
-
- // optional::operator bool()
- //
- // Returns false if and only if the `optional` is empty.
- //
- // if (opt) {
- // // do something with *opt or opt->;
- // } else {
- // // opt is empty.
- // }
- //
- constexpr explicit operator bool() const noexcept { return this->engaged_; }
-
- // optional::has_value()
- //
- // Determines whether the `optional` contains a value. Returns `false` if and
- // only if `*this` is empty.
- constexpr bool has_value() const noexcept { return this->engaged_; }
-
-// Suppress bogus warning on MSVC: MSVC complains call to reference() after
-// throw_bad_optional_access() is unreachable.
-#ifdef _MSC_VER
-#pragma warning(push)
-#pragma warning(disable : 4702)
-#endif // _MSC_VER
- // optional::value()
- //
- // Returns a reference to an `optional`s underlying value. The constness
- // and lvalue/rvalue-ness of the `optional` is preserved to the view of
- // the `T` sub-object. Throws `absl::bad_optional_access` when the `optional`
- // is empty.
- constexpr const T& value() const& ABSL_ATTRIBUTE_LIFETIME_BOUND {
- return static_cast<bool>(*this)
- ? reference()
- : (optional_internal::throw_bad_optional_access(), reference());
- }
- T& value() & ABSL_ATTRIBUTE_LIFETIME_BOUND {
- return static_cast<bool>(*this)
- ? reference()
- : (optional_internal::throw_bad_optional_access(), reference());
- }
- T&& value() && ABSL_ATTRIBUTE_LIFETIME_BOUND { // NOLINT(build/c++11)
- return std::move(
- static_cast<bool>(*this)
- ? reference()
- : (optional_internal::throw_bad_optional_access(), reference()));
- }
- constexpr const T&& value()
- const&& ABSL_ATTRIBUTE_LIFETIME_BOUND { // NOLINT(build/c++11)
- return std::move(
- static_cast<bool>(*this)
- ? reference()
- : (optional_internal::throw_bad_optional_access(), reference()));
- }
-#ifdef _MSC_VER
-#pragma warning(pop)
-#endif // _MSC_VER
-
- // optional::value_or()
- //
- // Returns either the value of `T` or a passed default `v` if the `optional`
- // is empty.
- template <typename U>
- constexpr T value_or(U&& v) const& {
- static_assert(std::is_copy_constructible<value_type>::value,
- "optional<T>::value_or: T must be copy constructible");
- static_assert(std::is_convertible<U&&, value_type>::value,
- "optional<T>::value_or: U must be convertible to T");
- return static_cast<bool>(*this) ? **this
- : static_cast<T>(std::forward<U>(v));
- }
- template <typename U>
- T value_or(U&& v) && { // NOLINT(build/c++11)
- static_assert(std::is_move_constructible<value_type>::value,
- "optional<T>::value_or: T must be move constructible");
- static_assert(std::is_convertible<U&&, value_type>::value,
- "optional<T>::value_or: U must be convertible to T");
- return static_cast<bool>(*this) ? std::move(**this)
- : static_cast<T>(std::forward<U>(v));
- }
-
- private:
- // Private accessors for internal storage viewed as reference to T.
- constexpr const T& reference() const { return this->data_; }
- T& reference() { return this->data_; }
-
- // T constraint checks. You can't have an optional of nullopt_t, in_place_t
- // or a reference.
- static_assert(
- !std::is_same<nullopt_t, typename std::remove_cv<T>::type>::value,
- "optional<nullopt_t> is not allowed.");
- static_assert(
- !std::is_same<in_place_t, typename std::remove_cv<T>::type>::value,
- "optional<in_place_t> is not allowed.");
- static_assert(!std::is_reference<T>::value,
- "optional<reference> is not allowed.");
-};
-
-// Non-member functions
-
-// swap()
-//
-// Performs a swap between two `absl::optional` objects, using standard
-// semantics.
-template <typename T, typename std::enable_if<
- std::is_move_constructible<T>::value &&
- type_traits_internal::IsSwappable<T>::value,
- bool>::type = false>
-void swap(optional<T>& a, optional<T>& b) noexcept(noexcept(a.swap(b))) {
- a.swap(b);
-}
-
-// make_optional()
-//
-// Creates a non-empty `optional<T>` where the type of `T` is deduced. An
-// `absl::optional` can also be explicitly instantiated with
-// `make_optional<T>(v)`.
-//
-// Note: `make_optional()` constructions may be declared `constexpr` for
-// trivially copyable types `T`. Non-trivial types require copy elision
-// support in C++17 for `make_optional` to support `constexpr` on such
-// non-trivial types.
-//
-// Example:
-//
-// constexpr absl::optional<int> opt = absl::make_optional(1);
-// static_assert(opt.value() == 1, "");
-template <typename T>
-constexpr optional<typename std::decay<T>::type> make_optional(T&& v) {
- return optional<typename std::decay<T>::type>(std::forward<T>(v));
-}
-
-template <typename T, typename... Args>
-constexpr optional<T> make_optional(Args&&... args) {
- return optional<T>(in_place_t(), std::forward<Args>(args)...);
-}
-
-template <typename T, typename U, typename... Args>
-constexpr optional<T> make_optional(std::initializer_list<U> il,
- Args&&... args) {
- return optional<T>(in_place_t(), il, std::forward<Args>(args)...);
-}
-
-// Relational operators [optional.relops]
-
-// Empty optionals are considered equal to each other and less than non-empty
-// optionals. Supports relations between optional<T> and optional<U>, between
-// optional<T> and U, and between optional<T> and nullopt.
-//
-// Note: We're careful to support T having non-bool relationals.
-
-// Requires: The expression, e.g. "*x == *y" shall be well-formed and its result
-// shall be convertible to bool.
-// The C++17 (N4606) "Returns:" statements are translated into
-// code in an obvious way here, and the original text retained as function docs.
-// Returns: If bool(x) != bool(y), false; otherwise if bool(x) == false, true;
-// otherwise *x == *y.
-template <typename T, typename U>
-constexpr auto operator==(const optional<T>& x, const optional<U>& y)
- -> decltype(optional_internal::convertible_to_bool(*x == *y)) {
- return static_cast<bool>(x) != static_cast<bool>(y)
- ? false
- : static_cast<bool>(x) == false ? true
- : static_cast<bool>(*x == *y);
-}
-
-// Returns: If bool(x) != bool(y), true; otherwise, if bool(x) == false, false;
-// otherwise *x != *y.
-template <typename T, typename U>
-constexpr auto operator!=(const optional<T>& x, const optional<U>& y)
- -> decltype(optional_internal::convertible_to_bool(*x != *y)) {
- return static_cast<bool>(x) != static_cast<bool>(y)
- ? true
- : static_cast<bool>(x) == false ? false
- : static_cast<bool>(*x != *y);
-}
-// Returns: If !y, false; otherwise, if !x, true; otherwise *x < *y.
-template <typename T, typename U>
-constexpr auto operator<(const optional<T>& x, const optional<U>& y)
- -> decltype(optional_internal::convertible_to_bool(*x < *y)) {
- return !y ? false : !x ? true : static_cast<bool>(*x < *y);
-}
-// Returns: If !x, false; otherwise, if !y, true; otherwise *x > *y.
-template <typename T, typename U>
-constexpr auto operator>(const optional<T>& x, const optional<U>& y)
- -> decltype(optional_internal::convertible_to_bool(*x > *y)) {
- return !x ? false : !y ? true : static_cast<bool>(*x > *y);
-}
-// Returns: If !x, true; otherwise, if !y, false; otherwise *x <= *y.
-template <typename T, typename U>
-constexpr auto operator<=(const optional<T>& x, const optional<U>& y)
- -> decltype(optional_internal::convertible_to_bool(*x <= *y)) {
- return !x ? true : !y ? false : static_cast<bool>(*x <= *y);
-}
-// Returns: If !y, true; otherwise, if !x, false; otherwise *x >= *y.
-template <typename T, typename U>
-constexpr auto operator>=(const optional<T>& x, const optional<U>& y)
- -> decltype(optional_internal::convertible_to_bool(*x >= *y)) {
- return !y ? true : !x ? false : static_cast<bool>(*x >= *y);
-}
-
-// Comparison with nullopt [optional.nullops]
-// The C++17 (N4606) "Returns:" statements are used directly here.
-template <typename T>
-constexpr bool operator==(const optional<T>& x, nullopt_t) noexcept {
- return !x;
-}
-template <typename T>
-constexpr bool operator==(nullopt_t, const optional<T>& x) noexcept {
- return !x;
-}
-template <typename T>
-constexpr bool operator!=(const optional<T>& x, nullopt_t) noexcept {
- return static_cast<bool>(x);
-}
-template <typename T>
-constexpr bool operator!=(nullopt_t, const optional<T>& x) noexcept {
- return static_cast<bool>(x);
-}
-template <typename T>
-constexpr bool operator<(const optional<T>&, nullopt_t) noexcept {
- return false;
-}
-template <typename T>
-constexpr bool operator<(nullopt_t, const optional<T>& x) noexcept {
- return static_cast<bool>(x);
-}
-template <typename T>
-constexpr bool operator<=(const optional<T>& x, nullopt_t) noexcept {
- return !x;
-}
-template <typename T>
-constexpr bool operator<=(nullopt_t, const optional<T>&) noexcept {
- return true;
-}
-template <typename T>
-constexpr bool operator>(const optional<T>& x, nullopt_t) noexcept {
- return static_cast<bool>(x);
-}
-template <typename T>
-constexpr bool operator>(nullopt_t, const optional<T>&) noexcept {
- return false;
-}
-template <typename T>
-constexpr bool operator>=(const optional<T>&, nullopt_t) noexcept {
- return true;
-}
-template <typename T>
-constexpr bool operator>=(nullopt_t, const optional<T>& x) noexcept {
- return !x;
-}
-
-// Comparison with T [optional.comp_with_t]
-
-// Requires: The expression, e.g. "*x == v" shall be well-formed and its result
-// shall be convertible to bool.
-// The C++17 (N4606) "Equivalent to:" statements are used directly here.
-template <typename T, typename U>
-constexpr auto operator==(const optional<T>& x, const U& v)
- -> decltype(optional_internal::convertible_to_bool(*x == v)) {
- return static_cast<bool>(x) ? static_cast<bool>(*x == v) : false;
-}
-template <typename T, typename U>
-constexpr auto operator==(const U& v, const optional<T>& x)
- -> decltype(optional_internal::convertible_to_bool(v == *x)) {
- return static_cast<bool>(x) ? static_cast<bool>(v == *x) : false;
-}
-template <typename T, typename U>
-constexpr auto operator!=(const optional<T>& x, const U& v)
- -> decltype(optional_internal::convertible_to_bool(*x != v)) {
- return static_cast<bool>(x) ? static_cast<bool>(*x != v) : true;
-}
-template <typename T, typename U>
-constexpr auto operator!=(const U& v, const optional<T>& x)
- -> decltype(optional_internal::convertible_to_bool(v != *x)) {
- return static_cast<bool>(x) ? static_cast<bool>(v != *x) : true;
-}
-template <typename T, typename U>
-constexpr auto operator<(const optional<T>& x, const U& v)
- -> decltype(optional_internal::convertible_to_bool(*x < v)) {
- return static_cast<bool>(x) ? static_cast<bool>(*x < v) : true;
-}
-template <typename T, typename U>
-constexpr auto operator<(const U& v, const optional<T>& x)
- -> decltype(optional_internal::convertible_to_bool(v < *x)) {
- return static_cast<bool>(x) ? static_cast<bool>(v < *x) : false;
-}
-template <typename T, typename U>
-constexpr auto operator<=(const optional<T>& x, const U& v)
- -> decltype(optional_internal::convertible_to_bool(*x <= v)) {
- return static_cast<bool>(x) ? static_cast<bool>(*x <= v) : true;
-}
-template <typename T, typename U>
-constexpr auto operator<=(const U& v, const optional<T>& x)
- -> decltype(optional_internal::convertible_to_bool(v <= *x)) {
- return static_cast<bool>(x) ? static_cast<bool>(v <= *x) : false;
-}
-template <typename T, typename U>
-constexpr auto operator>(const optional<T>& x, const U& v)
- -> decltype(optional_internal::convertible_to_bool(*x > v)) {
- return static_cast<bool>(x) ? static_cast<bool>(*x > v) : false;
-}
-template <typename T, typename U>
-constexpr auto operator>(const U& v, const optional<T>& x)
- -> decltype(optional_internal::convertible_to_bool(v > *x)) {
- return static_cast<bool>(x) ? static_cast<bool>(v > *x) : true;
-}
-template <typename T, typename U>
-constexpr auto operator>=(const optional<T>& x, const U& v)
- -> decltype(optional_internal::convertible_to_bool(*x >= v)) {
- return static_cast<bool>(x) ? static_cast<bool>(*x >= v) : false;
-}
-template <typename T, typename U>
-constexpr auto operator>=(const U& v, const optional<T>& x)
- -> decltype(optional_internal::convertible_to_bool(v >= *x)) {
- return static_cast<bool>(x) ? static_cast<bool>(v >= *x) : true;
-}
-
-ABSL_NAMESPACE_END
-} // namespace absl
-
-namespace std {
-
-// std::hash specialization for absl::optional.
-template <typename T>
-struct hash<absl::optional<T> >
- : absl::optional_internal::optional_hash_base<T> {};
-
-} // namespace std
-
-#undef ABSL_MSVC_CONSTEXPR_BUG_IN_UNION_LIKE_CLASS
-
-#endif // ABSL_USES_STD_OPTIONAL
-
#endif // ABSL_TYPES_OPTIONAL_H_
diff --git a/contrib/restricted/abseil-cpp/absl/types/span.h b/contrib/restricted/abseil-cpp/absl/types/span.h
index 33904a904bc..39e6a8a5d75 100644
--- a/contrib/restricted/abseil-cpp/absl/types/span.h
+++ b/contrib/restricted/abseil-cpp/absl/types/span.h
@@ -66,7 +66,8 @@
#include "absl/base/macros.h"
#include "absl/base/nullability.h"
#include "absl/base/optimization.h"
-#include "absl/base/port.h" // TODO(strel): remove this include
+#include "absl/base/port.h" // TODO(strel): remove this include
+#include "absl/hash/internal/weakly_mixed_integer.h"
#include "absl/meta/type_traits.h"
#include "absl/types/internal/span.h"
@@ -201,10 +202,11 @@ class ABSL_ATTRIBUTE_VIEW Span {
public:
using element_type = T;
using value_type = absl::remove_cv_t<T>;
- // TODO(b/316099902) - pointer should be Nullable<T*>, but this makes it hard
- // to recognize foreach loops as safe.
- using pointer = T*;
- using const_pointer = const T*;
+ // TODO(b/316099902) - pointer should be absl_nullable, but this makes it hard
+ // to recognize foreach loops as safe. absl_nullability_unknown is currently
+ // used to suppress -Wnullability-completeness warnings.
+ using pointer = T* absl_nullability_unknown;
+ using const_pointer = const T* absl_nullability_unknown;
using reference = T&;
using const_reference = const T&;
using iterator = pointer;
@@ -498,7 +500,7 @@ class ABSL_ATTRIBUTE_VIEW Span {
template <typename H>
friend H AbslHashValue(H h, Span v) {
return H::combine(H::combine_contiguous(std::move(h), v.data(), v.size()),
- v.size());
+ hash_internal::WeaklyMixedInteger{v.size()});
}
private:
@@ -724,12 +726,12 @@ ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 bool operator>=(Span<T> a, const U& b) {
// }
//
template <int&... ExplicitArgumentBarrier, typename T>
-constexpr Span<T> MakeSpan(absl::Nullable<T*> ptr, size_t size) noexcept {
+constexpr Span<T> MakeSpan(T* absl_nullable ptr, size_t size) noexcept {
return Span<T>(ptr, size);
}
template <int&... ExplicitArgumentBarrier, typename T>
-Span<T> MakeSpan(absl::Nullable<T*> begin, absl::Nullable<T*> end) noexcept {
+Span<T> MakeSpan(T* absl_nullable begin, T* absl_nullable end) noexcept {
ABSL_HARDENING_ASSERT(begin <= end);
return Span<T>(begin, static_cast<size_t>(end - begin));
}
@@ -770,14 +772,14 @@ constexpr Span<T> MakeSpan(T (&array)[N]) noexcept {
// ProcessInts(absl::MakeConstSpan(std::vector<int>{ 0, 0, 0 }));
//
template <int&... ExplicitArgumentBarrier, typename T>
-constexpr Span<const T> MakeConstSpan(absl::Nullable<T*> ptr,
+constexpr Span<const T> MakeConstSpan(T* absl_nullable ptr,
size_t size) noexcept {
return Span<const T>(ptr, size);
}
template <int&... ExplicitArgumentBarrier, typename T>
-Span<const T> MakeConstSpan(absl::Nullable<T*> begin,
- absl::Nullable<T*> end) noexcept {
+Span<const T> MakeConstSpan(T* absl_nullable begin,
+ T* absl_nullable end) noexcept {
ABSL_HARDENING_ASSERT(begin <= end);
return Span<const T>(begin, end - begin);
}
diff --git a/contrib/restricted/abseil-cpp/absl/types/variant.h b/contrib/restricted/abseil-cpp/absl/types/variant.h
index 56a7e05ee61..6b366454675 100644
--- a/contrib/restricted/abseil-cpp/absl/types/variant.h
+++ b/contrib/restricted/abseil-cpp/absl/types/variant.h
@@ -16,39 +16,18 @@
// variant.h
// -----------------------------------------------------------------------------
//
-// This header file defines an `absl::variant` type for holding a type-safe
-// value of some prescribed set of types (noted as alternative types), and
-// associated functions for managing variants.
-//
-// The `absl::variant` type is a form of type-safe union. An `absl::variant`
-// should always hold a value of one of its alternative types (except in the
-// "valueless by exception state" -- see below). A default-constructed
-// `absl::variant` will hold the value of its first alternative type, provided
-// it is default-constructible.
-//
-// In exceptional cases due to error, an `absl::variant` can hold no
-// value (known as a "valueless by exception" state), though this is not the
-// norm.
-//
-// As with `absl::optional`, an `absl::variant` -- when it holds a value --
-// allocates a value of that type directly within the `variant` itself; it
-// cannot hold a reference, array, or the type `void`; it can, however, hold a
-// pointer to externally managed memory.
-//
-// `absl::variant` is a C++11 compatible version of the C++17 `std::variant`
-// abstraction and is designed to be a drop-in replacement for code compliant
-// with C++17.
+// Historical note: Abseil once provided an implementation of `absl::variant`
+// as a polyfill for `std::variant` prior to C++17. Now that C++17 is required,
+// `absl::variant` is an alias for `std::variant`.
#ifndef ABSL_TYPES_VARIANT_H_
#define ABSL_TYPES_VARIANT_H_
+#include <variant>
+
#include "absl/base/config.h"
#include "absl/utility/utility.h"
-#ifdef ABSL_USES_STD_VARIANT
-
-#include <variant> // IWYU pragma: export
-
namespace absl {
ABSL_NAMESPACE_BEGIN
using std::bad_variant_access;
@@ -63,765 +42,8 @@ using std::variant_npos;
using std::variant_size;
using std::variant_size_v;
using std::visit;
-ABSL_NAMESPACE_END
-} // namespace absl
-
-#else // ABSL_USES_STD_VARIANT
-
-#include <functional>
-#include <new>
-#include <type_traits>
-#include <utility>
-
-#include "absl/base/macros.h"
-#include "absl/base/port.h"
-#include "absl/meta/type_traits.h"
-#include "absl/types/internal/variant.h"
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-
-// -----------------------------------------------------------------------------
-// absl::variant
-// -----------------------------------------------------------------------------
-//
-// An `absl::variant` type is a form of type-safe union. An `absl::variant` --
-// except in exceptional cases -- always holds a value of one of its alternative
-// types.
-//
-// Example:
-//
-// // Construct a variant that holds either an integer or a std::string and
-// // assign it to a std::string.
-// absl::variant<int, std::string> v = std::string("abc");
-//
-// // A default-constructed variant will hold a value-initialized value of
-// // the first alternative type.
-// auto a = absl::variant<int, std::string>(); // Holds an int of value '0'.
-//
-// // variants are assignable.
-//
-// // copy assignment
-// auto v1 = absl::variant<int, std::string>("abc");
-// auto v2 = absl::variant<int, std::string>(10);
-// v2 = v1; // copy assign
-//
-// // move assignment
-// auto v1 = absl::variant<int, std::string>("abc");
-// v1 = absl::variant<int, std::string>(10);
-//
-// // assignment through type conversion
-// a = 128; // variant contains int
-// a = "128"; // variant contains std::string
-//
-// An `absl::variant` holding a value of one of its alternative types `T` holds
-// an allocation of `T` directly within the variant itself. An `absl::variant`
-// is not allowed to allocate additional storage, such as dynamic memory, to
-// allocate the contained value. The contained value shall be allocated in a
-// region of the variant storage suitably aligned for all alternative types.
-template <typename... Ts>
-class variant;
-
-// swap()
-//
-// Swaps two `absl::variant` values. This function is equivalent to `v.swap(w)`
-// where `v` and `w` are `absl::variant` types.
-//
-// Note that this function requires all alternative types to be both swappable
-// and move-constructible, because any two variants may refer to either the same
-// type (in which case, they will be swapped) or to two different types (in
-// which case the values will need to be moved).
-//
-template <
- typename... Ts,
- absl::enable_if_t<
- absl::conjunction<std::is_move_constructible<Ts>...,
- type_traits_internal::IsSwappable<Ts>...>::value,
- int> = 0>
-void swap(variant<Ts...>& v, variant<Ts...>& w) noexcept(noexcept(v.swap(w))) {
- v.swap(w);
-}
-
-// variant_size
-//
-// Returns the number of alternative types available for a given `absl::variant`
-// type as a compile-time constant expression. As this is a class template, it
-// is not generally useful for accessing the number of alternative types of
-// any given `absl::variant` instance.
-//
-// Example:
-//
-// auto a = absl::variant<int, std::string>;
-// constexpr int num_types =
-// absl::variant_size<absl::variant<int, std::string>>();
-//
-// // You can also use the member constant `value`.
-// constexpr int num_types =
-// absl::variant_size<absl::variant<int, std::string>>::value;
-//
-// // `absl::variant_size` is more valuable for use in generic code:
-// template <typename Variant>
-// constexpr bool IsVariantMultivalue() {
-// return absl::variant_size<Variant>() > 1;
-// }
-//
-// Note that the set of cv-qualified specializations of `variant_size` are
-// provided to ensure that those specializations compile (especially when passed
-// within template logic).
-template <class T>
-struct variant_size;
-
-template <class... Ts>
-struct variant_size<variant<Ts...>>
- : std::integral_constant<std::size_t, sizeof...(Ts)> {};
-
-// Specialization of `variant_size` for const qualified variants.
-template <class T>
-struct variant_size<const T> : variant_size<T>::type {};
-
-// Specialization of `variant_size` for volatile qualified variants.
-template <class T>
-struct variant_size<volatile T> : variant_size<T>::type {};
-
-// Specialization of `variant_size` for const volatile qualified variants.
-template <class T>
-struct variant_size<const volatile T> : variant_size<T>::type {};
-
-// variant_alternative
-//
-// Returns the alternative type for a given `absl::variant` at the passed
-// index value as a compile-time constant expression. As this is a class
-// template resulting in a type, it is not useful for access of the run-time
-// value of any given `absl::variant` variable.
-//
-// Example:
-//
-// // The type of the 0th alternative is "int".
-// using alternative_type_0
-// = absl::variant_alternative<0, absl::variant<int, std::string>>::type;
-//
-// static_assert(std::is_same<alternative_type_0, int>::value, "");
-//
-// // `absl::variant_alternative` is more valuable for use in generic code:
-// template <typename Variant>
-// constexpr bool IsFirstElementTrivial() {
-// return std::is_trivial_v<variant_alternative<0, Variant>::type>;
-// }
-//
-// Note that the set of cv-qualified specializations of `variant_alternative`
-// are provided to ensure that those specializations compile (especially when
-// passed within template logic).
-template <std::size_t I, class T>
-struct variant_alternative;
-
-template <std::size_t I, class... Types>
-struct variant_alternative<I, variant<Types...>> {
- using type =
- variant_internal::VariantAlternativeSfinaeT<I, variant<Types...>>;
-};
-
-// Specialization of `variant_alternative` for const qualified variants.
-template <std::size_t I, class T>
-struct variant_alternative<I, const T> {
- using type = const typename variant_alternative<I, T>::type;
-};
-
-// Specialization of `variant_alternative` for volatile qualified variants.
-template <std::size_t I, class T>
-struct variant_alternative<I, volatile T> {
- using type = volatile typename variant_alternative<I, T>::type;
-};
-
-// Specialization of `variant_alternative` for const volatile qualified
-// variants.
-template <std::size_t I, class T>
-struct variant_alternative<I, const volatile T> {
- using type = const volatile typename variant_alternative<I, T>::type;
-};
-
-// Template type alias for variant_alternative<I, T>::type.
-//
-// Example:
-//
-// using alternative_type_0
-// = absl::variant_alternative_t<0, absl::variant<int, std::string>>;
-// static_assert(std::is_same<alternative_type_0, int>::value, "");
-template <std::size_t I, class T>
-using variant_alternative_t = typename variant_alternative<I, T>::type;
-
-// holds_alternative()
-//
-// Checks whether the given variant currently holds a given alternative type,
-// returning `true` if so.
-//
-// Example:
-//
-// absl::variant<int, std::string> foo = 42;
-// if (absl::holds_alternative<int>(foo)) {
-// std::cout << "The variant holds an integer";
-// }
-template <class T, class... Types>
-constexpr bool holds_alternative(const variant<Types...>& v) noexcept {
- static_assert(
- variant_internal::UnambiguousIndexOfImpl<variant<Types...>, T,
- 0>::value != sizeof...(Types),
- "The type T must occur exactly once in Types...");
- return v.index() ==
- variant_internal::UnambiguousIndexOf<variant<Types...>, T>::value;
-}
-
-// get()
-//
-// Returns a reference to the value currently within a given variant, using
-// either a unique alternative type amongst the variant's set of alternative
-// types, or the variant's index value. Attempting to get a variant's value
-// using a type that is not unique within the variant's set of alternative types
-// is a compile-time error. If the index of the alternative being specified is
-// different from the index of the alternative that is currently stored, throws
-// `absl::bad_variant_access`.
-//
-// Example:
-//
-// auto a = absl::variant<int, std::string>;
-//
-// // Get the value by type (if unique).
-// int i = absl::get<int>(a);
-//
-// auto b = absl::variant<int, int>;
-//
-// // Getting the value by a type that is not unique is ill-formed.
-// int j = absl::get<int>(b); // Compile Error!
-//
-// // Getting value by index not ambiguous and allowed.
-// int k = absl::get<1>(b);
-
-// Overload for getting a variant's lvalue by type.
-template <class T, class... Types>
-constexpr T& get(variant<Types...>& v) { // NOLINT
- return variant_internal::VariantCoreAccess::CheckedAccess<
- variant_internal::IndexOf<T, Types...>::value>(v);
-}
-
-// Overload for getting a variant's rvalue by type.
-template <class T, class... Types>
-constexpr T&& get(variant<Types...>&& v) {
- return variant_internal::VariantCoreAccess::CheckedAccess<
- variant_internal::IndexOf<T, Types...>::value>(std::move(v));
-}
-
-// Overload for getting a variant's const lvalue by type.
-template <class T, class... Types>
-constexpr const T& get(const variant<Types...>& v) {
- return variant_internal::VariantCoreAccess::CheckedAccess<
- variant_internal::IndexOf<T, Types...>::value>(v);
-}
-
-// Overload for getting a variant's const rvalue by type.
-template <class T, class... Types>
-constexpr const T&& get(const variant<Types...>&& v) {
- return variant_internal::VariantCoreAccess::CheckedAccess<
- variant_internal::IndexOf<T, Types...>::value>(std::move(v));
-}
-
-// Overload for getting a variant's lvalue by index.
-template <std::size_t I, class... Types>
-constexpr variant_alternative_t<I, variant<Types...>>& get(
- variant<Types...>& v) { // NOLINT
- return variant_internal::VariantCoreAccess::CheckedAccess<I>(v);
-}
-
-// Overload for getting a variant's rvalue by index.
-template <std::size_t I, class... Types>
-constexpr variant_alternative_t<I, variant<Types...>>&& get(
- variant<Types...>&& v) {
- return variant_internal::VariantCoreAccess::CheckedAccess<I>(std::move(v));
-}
-// Overload for getting a variant's const lvalue by index.
-template <std::size_t I, class... Types>
-constexpr const variant_alternative_t<I, variant<Types...>>& get(
- const variant<Types...>& v) {
- return variant_internal::VariantCoreAccess::CheckedAccess<I>(v);
-}
-
-// Overload for getting a variant's const rvalue by index.
-template <std::size_t I, class... Types>
-constexpr const variant_alternative_t<I, variant<Types...>>&& get(
- const variant<Types...>&& v) {
- return variant_internal::VariantCoreAccess::CheckedAccess<I>(std::move(v));
-}
-
-// get_if()
-//
-// Returns a pointer to the value currently stored within a given variant, if
-// present, using either a unique alternative type amongst the variant's set of
-// alternative types, or the variant's index value. If such a value does not
-// exist, returns `nullptr`.
-//
-// As with `get`, attempting to get a variant's value using a type that is not
-// unique within the variant's set of alternative types is a compile-time error.
-
-// Overload for getting a pointer to the value stored in the given variant by
-// index.
-template <std::size_t I, class... Types>
-constexpr absl::add_pointer_t<variant_alternative_t<I, variant<Types...>>>
-get_if(variant<Types...>* v) noexcept {
- return (v != nullptr && v->index() == I)
- ? std::addressof(
- variant_internal::VariantCoreAccess::Access<I>(*v))
- : nullptr;
-}
-
-// Overload for getting a pointer to the const value stored in the given
-// variant by index.
-template <std::size_t I, class... Types>
-constexpr absl::add_pointer_t<const variant_alternative_t<I, variant<Types...>>>
-get_if(const variant<Types...>* v) noexcept {
- return (v != nullptr && v->index() == I)
- ? std::addressof(
- variant_internal::VariantCoreAccess::Access<I>(*v))
- : nullptr;
-}
-
-// Overload for getting a pointer to the value stored in the given variant by
-// type.
-template <class T, class... Types>
-constexpr absl::add_pointer_t<T> get_if(variant<Types...>* v) noexcept {
- return absl::get_if<variant_internal::IndexOf<T, Types...>::value>(v);
-}
-
-// Overload for getting a pointer to the const value stored in the given variant
-// by type.
-template <class T, class... Types>
-constexpr absl::add_pointer_t<const T> get_if(
- const variant<Types...>* v) noexcept {
- return absl::get_if<variant_internal::IndexOf<T, Types...>::value>(v);
-}
-
-// visit()
-//
-// Calls a provided functor on a given set of variants. `absl::visit()` is
-// commonly used to conditionally inspect the state of a given variant (or set
-// of variants).
-//
-// The functor must return the same type when called with any of the variants'
-// alternatives.
-//
-// Example:
-//
-// // Define a visitor functor
-// struct GetVariant {
-// template<typename T>
-// void operator()(const T& i) const {
-// std::cout << "The variant's value is: " << i;
-// }
-// };
-//
-// // Declare our variant, and call `absl::visit()` on it.
-// // Note that `GetVariant()` returns void in either case.
-// absl::variant<int, std::string> foo = std::string("foo");
-// GetVariant visitor;
-// absl::visit(visitor, foo); // Prints `The variant's value is: foo'
-template <typename Visitor, typename... Variants>
-variant_internal::VisitResult<Visitor, Variants...> visit(Visitor&& vis,
- Variants&&... vars) {
- return variant_internal::
- VisitIndices<variant_size<absl::decay_t<Variants> >::value...>::Run(
- variant_internal::PerformVisitation<Visitor, Variants...>{
- std::forward_as_tuple(std::forward<Variants>(vars)...),
- std::forward<Visitor>(vis)},
- vars.index()...);
-}
-
-// monostate
-//
-// The monostate class serves as a first alternative type for a variant for
-// which the first variant type is otherwise not default-constructible.
-struct monostate {};
-
-// `absl::monostate` Relational Operators
-
-constexpr bool operator<(monostate, monostate) noexcept { return false; }
-constexpr bool operator>(monostate, monostate) noexcept { return false; }
-constexpr bool operator<=(monostate, monostate) noexcept { return true; }
-constexpr bool operator>=(monostate, monostate) noexcept { return true; }
-constexpr bool operator==(monostate, monostate) noexcept { return true; }
-constexpr bool operator!=(monostate, monostate) noexcept { return false; }
-
-
-//------------------------------------------------------------------------------
-// `absl::variant` Template Definition
-//------------------------------------------------------------------------------
-template <typename T0, typename... Tn>
-class variant<T0, Tn...> : private variant_internal::VariantBase<T0, Tn...> {
- static_assert(absl::conjunction<std::is_object<T0>,
- std::is_object<Tn>...>::value,
- "Attempted to instantiate a variant containing a non-object "
- "type.");
- // Intentionally not qualifying `negation` with `absl::` to work around a bug
- // in MSVC 2015 with inline namespace and variadic template.
- static_assert(absl::conjunction<negation<std::is_array<T0> >,
- negation<std::is_array<Tn> >...>::value,
- "Attempted to instantiate a variant containing an array type.");
- static_assert(absl::conjunction<std::is_nothrow_destructible<T0>,
- std::is_nothrow_destructible<Tn>...>::value,
- "Attempted to instantiate a variant containing a non-nothrow "
- "destructible type.");
-
- friend struct variant_internal::VariantCoreAccess;
-
- private:
- using Base = variant_internal::VariantBase<T0, Tn...>;
-
- public:
- // Constructors
-
- // Constructs a variant holding a default-initialized value of the first
- // alternative type.
- constexpr variant() /*noexcept(see 111above)*/ = default;
-
- // Copy constructor, standard semantics
- variant(const variant& other) = default;
-
- // Move constructor, standard semantics
- variant(variant&& other) /*noexcept(see above)*/ = default;
-
- // Constructs a variant of an alternative type specified by overload
- // resolution of the provided forwarding arguments through
- // direct-initialization.
- //
- // Note: If the selected constructor is a constexpr constructor, this
- // constructor shall be a constexpr constructor.
- //
- // NOTE: http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/p0608r1.html
- // has been voted passed the design phase in the C++ standard meeting in Mar
- // 2018. It will be implemented and integrated into `absl::variant`.
- template <
- class T,
- std::size_t I = std::enable_if<
- variant_internal::IsNeitherSelfNorInPlace<variant,
- absl::decay_t<T> >::value,
- variant_internal::IndexOfConstructedType<variant, T> >::type::value,
- class Tj = absl::variant_alternative_t<I, variant>,
- absl::enable_if_t<std::is_constructible<Tj, T>::value>* = nullptr>
- constexpr variant(T&& t) noexcept(std::is_nothrow_constructible<Tj, T>::value)
- : Base(variant_internal::EmplaceTag<I>(), std::forward<T>(t)) {}
-
- // Constructs a variant of an alternative type from the arguments through
- // direct-initialization.
- //
- // Note: If the selected constructor is a constexpr constructor, this
- // constructor shall be a constexpr constructor.
- template <class T, class... Args,
- typename std::enable_if<std::is_constructible<
- variant_internal::UnambiguousTypeOfT<variant, T>,
- Args...>::value>::type* = nullptr>
- constexpr explicit variant(in_place_type_t<T>, Args&&... args)
- : Base(variant_internal::EmplaceTag<
- variant_internal::UnambiguousIndexOf<variant, T>::value>(),
- std::forward<Args>(args)...) {}
-
- // Constructs a variant of an alternative type from an initializer list
- // and other arguments through direct-initialization.
- //
- // Note: If the selected constructor is a constexpr constructor, this
- // constructor shall be a constexpr constructor.
- template <class T, class U, class... Args,
- typename std::enable_if<std::is_constructible<
- variant_internal::UnambiguousTypeOfT<variant, T>,
- std::initializer_list<U>&, Args...>::value>::type* = nullptr>
- constexpr explicit variant(in_place_type_t<T>, std::initializer_list<U> il,
- Args&&... args)
- : Base(variant_internal::EmplaceTag<
- variant_internal::UnambiguousIndexOf<variant, T>::value>(),
- il, std::forward<Args>(args)...) {}
-
- // Constructs a variant of an alternative type from a provided index,
- // through value-initialization using the provided forwarded arguments.
- template <std::size_t I, class... Args,
- typename std::enable_if<std::is_constructible<
- variant_internal::VariantAlternativeSfinaeT<I, variant>,
- Args...>::value>::type* = nullptr>
- constexpr explicit variant(in_place_index_t<I>, Args&&... args)
- : Base(variant_internal::EmplaceTag<I>(), std::forward<Args>(args)...) {}
-
- // Constructs a variant of an alternative type from a provided index,
- // through value-initialization of an initializer list and the provided
- // forwarded arguments.
- template <std::size_t I, class U, class... Args,
- typename std::enable_if<std::is_constructible<
- variant_internal::VariantAlternativeSfinaeT<I, variant>,
- std::initializer_list<U>&, Args...>::value>::type* = nullptr>
- constexpr explicit variant(in_place_index_t<I>, std::initializer_list<U> il,
- Args&&... args)
- : Base(variant_internal::EmplaceTag<I>(), il,
- std::forward<Args>(args)...) {}
-
- // Destructors
-
- // Destroys the variant's currently contained value, provided that
- // `absl::valueless_by_exception()` is false.
- ~variant() = default;
-
- // Assignment Operators
-
- // Copy assignment operator
- variant& operator=(const variant& other) = default;
-
- // Move assignment operator
- variant& operator=(variant&& other) /*noexcept(see above)*/ = default;
-
- // Converting assignment operator
- //
- // NOTE: http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/p0608r1.html
- // has been voted passed the design phase in the C++ standard meeting in Mar
- // 2018. It will be implemented and integrated into `absl::variant`.
- template <
- class T,
- std::size_t I = std::enable_if<
- !std::is_same<absl::decay_t<T>, variant>::value,
- variant_internal::IndexOfConstructedType<variant, T>>::type::value,
- class Tj = absl::variant_alternative_t<I, variant>,
- typename std::enable_if<std::is_assignable<Tj&, T>::value &&
- std::is_constructible<Tj, T>::value>::type* =
- nullptr>
- variant& operator=(T&& t) noexcept(
- std::is_nothrow_assignable<Tj&, T>::value&&
- std::is_nothrow_constructible<Tj, T>::value) {
- variant_internal::VisitIndices<sizeof...(Tn) + 1>::Run(
- variant_internal::VariantCoreAccess::MakeConversionAssignVisitor(
- this, std::forward<T>(t)),
- index());
-
- return *this;
- }
-
-
- // emplace() Functions
-
- // Constructs a value of the given alternative type T within the variant. The
- // existing value of the variant is destroyed first (provided that
- // `absl::valueless_by_exception()` is false). Requires that T is unambiguous
- // in the variant.
- //
- // Example:
- //
- // absl::variant<std::vector<int>, int, std::string> v;
- // v.emplace<int>(99);
- // v.emplace<std::string>("abc");
- template <
- class T, class... Args,
- typename std::enable_if<std::is_constructible<
- absl::variant_alternative_t<
- variant_internal::UnambiguousIndexOf<variant, T>::value, variant>,
- Args...>::value>::type* = nullptr>
- T& emplace(Args&&... args) {
- return variant_internal::VariantCoreAccess::Replace<
- variant_internal::UnambiguousIndexOf<variant, T>::value>(
- this, std::forward<Args>(args)...);
- }
-
- // Constructs a value of the given alternative type T within the variant using
- // an initializer list. The existing value of the variant is destroyed first
- // (provided that `absl::valueless_by_exception()` is false). Requires that T
- // is unambiguous in the variant.
- //
- // Example:
- //
- // absl::variant<std::vector<int>, int, std::string> v;
- // v.emplace<std::vector<int>>({0, 1, 2});
- template <
- class T, class U, class... Args,
- typename std::enable_if<std::is_constructible<
- absl::variant_alternative_t<
- variant_internal::UnambiguousIndexOf<variant, T>::value, variant>,
- std::initializer_list<U>&, Args...>::value>::type* = nullptr>
- T& emplace(std::initializer_list<U> il, Args&&... args) {
- return variant_internal::VariantCoreAccess::Replace<
- variant_internal::UnambiguousIndexOf<variant, T>::value>(
- this, il, std::forward<Args>(args)...);
- }
-
- // Destroys the current value of the variant (provided that
- // `absl::valueless_by_exception()` is false) and constructs a new value at
- // the given index.
- //
- // Example:
- //
- // absl::variant<std::vector<int>, int, int> v;
- // v.emplace<1>(99);
- // v.emplace<2>(98);
- // v.emplace<int>(99); // Won't compile. 'int' isn't a unique type.
- template <std::size_t I, class... Args,
- typename std::enable_if<
- std::is_constructible<absl::variant_alternative_t<I, variant>,
- Args...>::value>::type* = nullptr>
- absl::variant_alternative_t<I, variant>& emplace(Args&&... args) {
- return variant_internal::VariantCoreAccess::Replace<I>(
- this, std::forward<Args>(args)...);
- }
-
- // Destroys the current value of the variant (provided that
- // `absl::valueless_by_exception()` is false) and constructs a new value at
- // the given index using an initializer list and the provided arguments.
- //
- // Example:
- //
- // absl::variant<std::vector<int>, int, int> v;
- // v.emplace<0>({0, 1, 2});
- template <std::size_t I, class U, class... Args,
- typename std::enable_if<std::is_constructible<
- absl::variant_alternative_t<I, variant>,
- std::initializer_list<U>&, Args...>::value>::type* = nullptr>
- absl::variant_alternative_t<I, variant>& emplace(std::initializer_list<U> il,
- Args&&... args) {
- return variant_internal::VariantCoreAccess::Replace<I>(
- this, il, std::forward<Args>(args)...);
- }
-
- // variant::valueless_by_exception()
- //
- // Returns false if and only if the variant currently holds a valid value.
- constexpr bool valueless_by_exception() const noexcept {
- return this->index_ == absl::variant_npos;
- }
-
- // variant::index()
- //
- // Returns the index value of the variant's currently selected alternative
- // type.
- constexpr std::size_t index() const noexcept { return this->index_; }
-
- // variant::swap()
- //
- // Swaps the values of two variant objects.
- //
- void swap(variant& rhs) noexcept(
- absl::conjunction<
- std::is_nothrow_move_constructible<T0>,
- std::is_nothrow_move_constructible<Tn>...,
- type_traits_internal::IsNothrowSwappable<T0>,
- type_traits_internal::IsNothrowSwappable<Tn>...>::value) {
- return variant_internal::VisitIndices<sizeof...(Tn) + 1>::Run(
- variant_internal::Swap<T0, Tn...>{this, &rhs}, rhs.index());
- }
-};
-
-// We need a valid declaration of variant<> for SFINAE and overload resolution
-// to work properly above, but we don't need a full declaration since this type
-// will never be constructed. This declaration, though incomplete, suffices.
-template <>
-class variant<>;
-
-//------------------------------------------------------------------------------
-// Relational Operators
-//------------------------------------------------------------------------------
-//
-// If neither operand is in the `variant::valueless_by_exception` state:
-//
-// * If the index of both variants is the same, the relational operator
-// returns the result of the corresponding relational operator for the
-// corresponding alternative type.
-// * If the index of both variants is not the same, the relational operator
-// returns the result of that operation applied to the value of the left
-// operand's index and the value of the right operand's index.
-// * If at least one operand is in the valueless_by_exception state:
-// - A variant in the valueless_by_exception state is only considered equal
-// to another variant in the valueless_by_exception state.
-// - If exactly one operand is in the valueless_by_exception state, the
-// variant in the valueless_by_exception state is less than the variant
-// that is not in the valueless_by_exception state.
-//
-// Note: The value 1 is added to each index in the relational comparisons such
-// that the index corresponding to the valueless_by_exception state wraps around
-// to 0 (the lowest value for the index type), and the remaining indices stay in
-// the same relative order.
-
-// Equal-to operator
-template <typename... Types>
-constexpr variant_internal::RequireAllHaveEqualT<Types...> operator==(
- const variant<Types...>& a, const variant<Types...>& b) {
- return (a.index() == b.index()) &&
- variant_internal::VisitIndices<sizeof...(Types)>::Run(
- variant_internal::EqualsOp<Types...>{&a, &b}, a.index());
-}
-
-// Not equal operator
-template <typename... Types>
-constexpr variant_internal::RequireAllHaveNotEqualT<Types...> operator!=(
- const variant<Types...>& a, const variant<Types...>& b) {
- return (a.index() != b.index()) ||
- variant_internal::VisitIndices<sizeof...(Types)>::Run(
- variant_internal::NotEqualsOp<Types...>{&a, &b}, a.index());
-}
-
-// Less-than operator
-template <typename... Types>
-constexpr variant_internal::RequireAllHaveLessThanT<Types...> operator<(
- const variant<Types...>& a, const variant<Types...>& b) {
- return (a.index() != b.index())
- ? (a.index() + 1) < (b.index() + 1)
- : variant_internal::VisitIndices<sizeof...(Types)>::Run(
- variant_internal::LessThanOp<Types...>{&a, &b}, a.index());
-}
-
-// Greater-than operator
-template <typename... Types>
-constexpr variant_internal::RequireAllHaveGreaterThanT<Types...> operator>(
- const variant<Types...>& a, const variant<Types...>& b) {
- return (a.index() != b.index())
- ? (a.index() + 1) > (b.index() + 1)
- : variant_internal::VisitIndices<sizeof...(Types)>::Run(
- variant_internal::GreaterThanOp<Types...>{&a, &b},
- a.index());
-}
-
-// Less-than or equal-to operator
-template <typename... Types>
-constexpr variant_internal::RequireAllHaveLessThanOrEqualT<Types...> operator<=(
- const variant<Types...>& a, const variant<Types...>& b) {
- return (a.index() != b.index())
- ? (a.index() + 1) < (b.index() + 1)
- : variant_internal::VisitIndices<sizeof...(Types)>::Run(
- variant_internal::LessThanOrEqualsOp<Types...>{&a, &b},
- a.index());
-}
-
-// Greater-than or equal-to operator
-template <typename... Types>
-constexpr variant_internal::RequireAllHaveGreaterThanOrEqualT<Types...>
-operator>=(const variant<Types...>& a, const variant<Types...>& b) {
- return (a.index() != b.index())
- ? (a.index() + 1) > (b.index() + 1)
- : variant_internal::VisitIndices<sizeof...(Types)>::Run(
- variant_internal::GreaterThanOrEqualsOp<Types...>{&a, &b},
- a.index());
-}
-
-ABSL_NAMESPACE_END
-} // namespace absl
-
-namespace std {
-
-// hash()
-template <> // NOLINT
-struct hash<absl::monostate> {
- std::size_t operator()(absl::monostate) const { return 0; }
-};
-
-template <class... T> // NOLINT
-struct hash<absl::variant<T...>>
- : absl::variant_internal::VariantHashBase<absl::variant<T...>, void,
- absl::remove_const_t<T>...> {};
-
-} // namespace std
-
-#endif // ABSL_USES_STD_VARIANT
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
namespace variant_internal {
-
// Helper visitor for converting a variant<Ts...>` into another type (mostly
// variant) that can be constructed from any type.
template <typename To>
@@ -831,7 +53,6 @@ struct ConversionVisitor {
return To(std::forward<T>(v));
}
};
-
} // namespace variant_internal
// ConvertVariantTo()
diff --git a/contrib/restricted/abseil-cpp/absl/utility/internal/if_constexpr.h b/contrib/restricted/abseil-cpp/absl/utility/internal/if_constexpr.h
deleted file mode 100644
index 7a26311daa1..00000000000
--- a/contrib/restricted/abseil-cpp/absl/utility/internal/if_constexpr.h
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright 2023 The Abseil Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// The IfConstexpr and IfConstexprElse utilities in this file are meant to be
-// used to emulate `if constexpr` in pre-C++17 mode in library implementation.
-// The motivation is to allow for avoiding complex SFINAE.
-//
-// The functions passed in must depend on the type(s) of the object(s) that
-// require SFINAE. For example:
-// template<typename T>
-// int MaybeFoo(T& t) {
-// if constexpr (HasFoo<T>::value) return t.foo();
-// return 0;
-// }
-//
-// can be written in pre-C++17 as:
-//
-// template<typename T>
-// int MaybeFoo(T& t) {
-// int i = 0;
-// absl::utility_internal::IfConstexpr<HasFoo<T>::value>(
-// [&](const auto& fooer) { i = fooer.foo(); }, t);
-// return i;
-// }
-
-#ifndef ABSL_UTILITY_INTERNAL_IF_CONSTEXPR_H_
-#define ABSL_UTILITY_INTERNAL_IF_CONSTEXPR_H_
-
-#include <tuple>
-#include <utility>
-
-#include "absl/base/config.h"
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-
-namespace utility_internal {
-
-template <bool condition, typename TrueFunc, typename FalseFunc,
- typename... Args>
-auto IfConstexprElse(TrueFunc&& true_func, FalseFunc&& false_func,
- Args&&... args) {
- return std::get<condition>(std::forward_as_tuple(
- std::forward<FalseFunc>(false_func), std::forward<TrueFunc>(true_func)))(
- std::forward<Args>(args)...);
-}
-
-template <bool condition, typename Func, typename... Args>
-void IfConstexpr(Func&& func, Args&&... args) {
- IfConstexprElse<condition>(std::forward<Func>(func), [](auto&&...){},
- std::forward<Args>(args)...);
-}
-
-} // namespace utility_internal
-
-ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_UTILITY_INTERNAL_IF_CONSTEXPR_H_
diff --git a/contrib/restricted/abseil-cpp/absl/utility/utility.h b/contrib/restricted/abseil-cpp/absl/utility/utility.h
index ebbb49b7159..4637b03df15 100644
--- a/contrib/restricted/abseil-cpp/absl/utility/utility.h
+++ b/contrib/restricted/abseil-cpp/absl/utility/utility.h
@@ -11,25 +11,6 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
-//
-// This header file contains C++14 versions of standard <utility> header
-// abstractions available within C++17, and are designed to be drop-in
-// replacement for code compliant with C++14 and C++17.
-//
-// The following abstractions are defined:
-//
-// * apply<Functor, Tuple> == std::apply<Functor, Tuple>
-// * exchange<T> == std::exchange<T>
-// * make_from_tuple<T> == std::make_from_tuple<T>
-//
-// This header file also provides the tag types `in_place_t`, `in_place_type_t`,
-// and `in_place_index_t`, as well as the constant `in_place`, and
-// `constexpr` `std::move()` and `std::forward()` implementations in C++11.
-//
-// References:
-//
-// https://en.cppreference.com/w/cpp/utility/apply
-// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2013/n3658.html
#ifndef ABSL_UTILITY_UTILITY_H_
#define ABSL_UTILITY_UTILITY_H_
@@ -40,8 +21,8 @@
#include <utility>
#include "absl/base/config.h"
-#include "absl/base/internal/inline_variable.h"
-#include "absl/base/internal/invoke.h"
+
+// TODO(b/290784225): Include what you use cleanup required.
#include "absl/meta/type_traits.h"
namespace absl {
@@ -51,179 +32,23 @@ ABSL_NAMESPACE_BEGIN
// abstractions for platforms that had not yet provided them. Those
// platforms are no longer supported. New code should simply use the
// the ones from std directly.
+using std::apply;
using std::exchange;
using std::forward;
+using std::in_place;
+using std::in_place_index;
+using std::in_place_index_t;
+using std::in_place_t;
+using std::in_place_type;
+using std::in_place_type_t;
using std::index_sequence;
using std::index_sequence_for;
using std::integer_sequence;
+using std::make_from_tuple;
using std::make_index_sequence;
using std::make_integer_sequence;
using std::move;
-namespace utility_internal {
-
-template <typename T>
-struct InPlaceTypeTag {
- explicit InPlaceTypeTag() = delete;
- InPlaceTypeTag(const InPlaceTypeTag&) = delete;
- InPlaceTypeTag& operator=(const InPlaceTypeTag&) = delete;
-};
-
-template <size_t I>
-struct InPlaceIndexTag {
- explicit InPlaceIndexTag() = delete;
- InPlaceIndexTag(const InPlaceIndexTag&) = delete;
- InPlaceIndexTag& operator=(const InPlaceIndexTag&) = delete;
-};
-
-} // namespace utility_internal
-
-// Tag types
-
-#ifdef ABSL_USES_STD_OPTIONAL
-
-using std::in_place_t;
-using std::in_place;
-
-#else // ABSL_USES_STD_OPTIONAL
-
-// in_place_t
-//
-// Tag type used to specify in-place construction, such as with
-// `absl::optional`, designed to be a drop-in replacement for C++17's
-// `std::in_place_t`.
-struct in_place_t {};
-
-ABSL_INTERNAL_INLINE_CONSTEXPR(in_place_t, in_place, {});
-
-#endif // ABSL_USES_STD_OPTIONAL
-
-#if defined(ABSL_USES_STD_ANY) || defined(ABSL_USES_STD_VARIANT)
-using std::in_place_type;
-using std::in_place_type_t;
-#else
-
-// in_place_type_t
-//
-// Tag type used for in-place construction when the type to construct needs to
-// be specified, such as with `absl::any`, designed to be a drop-in replacement
-// for C++17's `std::in_place_type_t`.
-template <typename T>
-using in_place_type_t = void (*)(utility_internal::InPlaceTypeTag<T>);
-
-template <typename T>
-void in_place_type(utility_internal::InPlaceTypeTag<T>) {}
-#endif // ABSL_USES_STD_ANY || ABSL_USES_STD_VARIANT
-
-#ifdef ABSL_USES_STD_VARIANT
-using std::in_place_index;
-using std::in_place_index_t;
-#else
-
-// in_place_index_t
-//
-// Tag type used for in-place construction when the type to construct needs to
-// be specified, such as with `absl::any`, designed to be a drop-in replacement
-// for C++17's `std::in_place_index_t`.
-template <size_t I>
-using in_place_index_t = void (*)(utility_internal::InPlaceIndexTag<I>);
-
-template <size_t I>
-void in_place_index(utility_internal::InPlaceIndexTag<I>) {}
-#endif // ABSL_USES_STD_VARIANT
-
-namespace utility_internal {
-// Helper method for expanding tuple into a called method.
-template <typename Functor, typename Tuple, std::size_t... Indexes>
-auto apply_helper(Functor&& functor, Tuple&& t, index_sequence<Indexes...>)
- -> decltype(absl::base_internal::invoke(
- absl::forward<Functor>(functor),
- std::get<Indexes>(absl::forward<Tuple>(t))...)) {
- return absl::base_internal::invoke(
- absl::forward<Functor>(functor),
- std::get<Indexes>(absl::forward<Tuple>(t))...);
-}
-
-} // namespace utility_internal
-
-// apply
-//
-// Invokes a Callable using elements of a tuple as its arguments.
-// Each element of the tuple corresponds to an argument of the call (in order).
-// Both the Callable argument and the tuple argument are perfect-forwarded.
-// For member-function Callables, the first tuple element acts as the `this`
-// pointer. `absl::apply` is designed to be a drop-in replacement for C++17's
-// `std::apply`. Unlike C++17's `std::apply`, this is not currently `constexpr`.
-//
-// Example:
-//
-// class Foo {
-// public:
-// void Bar(int);
-// };
-// void user_function1(int, std::string);
-// void user_function2(std::unique_ptr<Foo>);
-// auto user_lambda = [](int, int) {};
-//
-// int main()
-// {
-// std::tuple<int, std::string> tuple1(42, "bar");
-// // Invokes the first user function on int, std::string.
-// absl::apply(&user_function1, tuple1);
-//
-// std::tuple<std::unique_ptr<Foo>> tuple2(absl::make_unique<Foo>());
-// // Invokes the user function that takes ownership of the unique
-// // pointer.
-// absl::apply(&user_function2, std::move(tuple2));
-//
-// auto foo = absl::make_unique<Foo>();
-// std::tuple<Foo*, int> tuple3(foo.get(), 42);
-// // Invokes the method Bar on foo with one argument, 42.
-// absl::apply(&Foo::Bar, tuple3);
-//
-// std::tuple<int, int> tuple4(8, 9);
-// // Invokes a lambda.
-// absl::apply(user_lambda, tuple4);
-// }
-template <typename Functor, typename Tuple>
-auto apply(Functor&& functor, Tuple&& t)
- -> decltype(utility_internal::apply_helper(
- absl::forward<Functor>(functor), absl::forward<Tuple>(t),
- absl::make_index_sequence<std::tuple_size<
- typename std::remove_reference<Tuple>::type>::value>{})) {
- return utility_internal::apply_helper(
- absl::forward<Functor>(functor), absl::forward<Tuple>(t),
- absl::make_index_sequence<std::tuple_size<
- typename std::remove_reference<Tuple>::type>::value>{});
-}
-
-namespace utility_internal {
-template <typename T, typename Tuple, size_t... I>
-T make_from_tuple_impl(Tuple&& tup, absl::index_sequence<I...>) {
- return T(std::get<I>(std::forward<Tuple>(tup))...);
-}
-} // namespace utility_internal
-
-// make_from_tuple
-//
-// Given the template parameter type `T` and a tuple of arguments
-// `std::tuple(arg0, arg1, ..., argN)` constructs an object of type `T` as if by
-// calling `T(arg0, arg1, ..., argN)`.
-//
-// Example:
-//
-// std::tuple<const char*, size_t> args("hello world", 5);
-// auto s = absl::make_from_tuple<std::string>(args);
-// assert(s == "hello");
-//
-template <typename T, typename Tuple>
-constexpr T make_from_tuple(Tuple&& tup) {
- return utility_internal::make_from_tuple_impl<T>(
- std::forward<Tuple>(tup),
- absl::make_index_sequence<
- std::tuple_size<absl::decay_t<Tuple>>::value>{});
-}
-
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/contrib/restricted/abseil-cpp/patches/no-icu-windows.patch b/contrib/restricted/abseil-cpp/patches/no-icu-windows.patch
new file mode 100644
index 00000000000..cdc4962f982
--- /dev/null
+++ b/contrib/restricted/abseil-cpp/patches/no-icu-windows.patch
@@ -0,0 +1,34 @@
+--- contrib/restricted/abseil-cpp/absl/time/internal/cctz/src/time_zone_lookup.cc (index)
++++ contrib/restricted/abseil-cpp/absl/time/internal/cctz/src/time_zone_lookup.cc (working tree)
+@@ -32,31 +32,6 @@
+ #error #include <zircon/types.h>
+ #endif
+
+-#if defined(_WIN32)
+-// Include only when <icu.h> is available.
+-// https://learn.microsoft.com/en-us/windows/win32/intl/international-components-for-unicode--icu-
+-// https://devblogs.microsoft.com/oldnewthing/20210527-00/?p=105255
+-#if defined(__has_include)
+-#if __has_include(<icu.h>)
+-#define USE_WIN32_LOCAL_TIME_ZONE
+-#include <windows.h>
+-#pragma push_macro("_WIN32_WINNT")
+-#pragma push_macro("NTDDI_VERSION")
+-// Minimum _WIN32_WINNT and NTDDI_VERSION to use ucal_getTimeZoneIDForWindowsID
+-#undef _WIN32_WINNT
+-#define _WIN32_WINNT 0x0A00 // == _WIN32_WINNT_WIN10
+-#undef NTDDI_VERSION
+-#define NTDDI_VERSION 0x0A000004 // == NTDDI_WIN10_RS3
+-#include <icu.h>
+-#pragma pop_macro("NTDDI_VERSION")
+-#pragma pop_macro("_WIN32_WINNT")
+-#include <timezoneapi.h>
+-
+-#include <atomic>
+-#endif // __has_include(<icu.h>)
+-#endif // __has_include
+-#endif // _WIN32
+-
+ #include <array>
+ #include <cstdint>
+ #include <cstdlib>
diff --git a/contrib/restricted/abseil-cpp/ya.make b/contrib/restricted/abseil-cpp/ya.make
index c229bae7b62..724c19c8e91 100644
--- a/contrib/restricted/abseil-cpp/ya.make
+++ b/contrib/restricted/abseil-cpp/ya.make
@@ -9,9 +9,9 @@ LICENSE(
LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
-VERSION(20250127.1)
+VERSION(20250512.0)
-ORIGINAL_SOURCE(https://github.com/abseil/abseil-cpp/archive/20250127.1.tar.gz)
+ORIGINAL_SOURCE(https://github.com/abseil/abseil-cpp/archive/20250512.0.tar.gz)
PEERDIR(
library/cpp/sanitizer/include
@@ -98,7 +98,6 @@ SRCS(
absl/log/internal/proto.cc
absl/log/internal/structured_proto.cc
absl/log/internal/vlog_config.cc
- absl/log/log_entry.cc
absl/log/log_sink.cc
absl/numeric/int128.cc
absl/profiling/internal/exponential_biased.cc
@@ -107,7 +106,7 @@ SRCS(
absl/random/gaussian_distribution.cc
absl/random/internal/chi_square.cc
absl/random/internal/distribution_test_util.cc
- absl/random/internal/pool_urbg.cc
+ absl/random/internal/entropy_pool.cc
absl/random/internal/randen.cc
absl/random/internal/randen_detect.cc
absl/random/internal/randen_hwaes.cc
@@ -124,7 +123,6 @@ SRCS(
absl/strings/charconv.cc
absl/strings/cord.cc
absl/strings/cord_analysis.cc
- absl/strings/cord_buffer.cc
absl/strings/escaping.cc
absl/strings/internal/charconv_bigint.cc
absl/strings/internal/charconv_parse.cc
@@ -186,9 +184,6 @@ SRCS(
absl/time/internal/cctz/src/time_zone_posix.cc
absl/time/internal/cctz/src/zone_info_source.cc
absl/time/time.cc
- absl/types/bad_any_cast.cc
- absl/types/bad_optional_access.cc
- absl/types/bad_variant_access.cc
)
END()