aboutsummaryrefslogtreecommitdiffstats
path: root/contrib
diff options
context:
space:
mode:
authorprime <prime@yandex-team.ru>2022-02-10 16:46:00 +0300
committerDaniil Cherednik <dcherednik@yandex-team.ru>2022-02-10 16:46:00 +0300
commit3695a7cd42b74a4987d8d5a8f2e2443556998943 (patch)
treeee79ee9294a61ee00e647684b3700d0a87e102a3 /contrib
parent4d8b546b89b5afc08cf3667e176271c7ba935f33 (diff)
downloadydb-3695a7cd42b74a4987d8d5a8f2e2443556998943.tar.gz
Restoring authorship annotation for <prime@yandex-team.ru>. Commit 1 of 2.
Diffstat (limited to 'contrib')
-rw-r--r--contrib/libs/grpc/include/grpc/grpc.h6
-rw-r--r--contrib/libs/grpc/src/compiler/generator_helpers.h10
-rw-r--r--contrib/libs/grpc/src/core/tsi/ssl_transport_security.cc20
-rw-r--r--contrib/libs/grpc/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi2
-rw-r--r--contrib/libs/grpc/src/python/grpcio/grpc/_cython/cygrpc.pyx4
-rw-r--r--contrib/libs/protobuf/src/google/protobuf/descriptor.pb.cc2
-rw-r--r--contrib/libs/sqlite3/ya.make2
-rw-r--r--contrib/libs/tcmalloc/common.inc116
-rw-r--r--contrib/libs/tcmalloc/default/ya.make38
-rw-r--r--contrib/libs/tcmalloc/malloc_extension/ya.make50
-rw-r--r--contrib/libs/tcmalloc/numa_256k/ya.make50
-rw-r--r--contrib/libs/tcmalloc/numa_large_pages/ya.make56
-rw-r--r--contrib/libs/tcmalloc/patches/fork.patch620
-rw-r--r--contrib/libs/tcmalloc/patches/yandex.patch178
-rw-r--r--contrib/libs/tcmalloc/slow_but_small/ya.make30
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/BUILD2632
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/arena.cc66
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/arena.h16
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/arena_test.cc76
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/background.cc154
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/central_freelist.cc170
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/central_freelist.h48
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/central_freelist_benchmark.cc396
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/central_freelist_test.cc18
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/common.cc60
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/common.h262
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/cpu_cache.cc1016
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/cpu_cache.h276
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/cpu_cache_test.cc864
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/experiment.cc68
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/experiment.h18
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/experiment_config.h16
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/experiment_fuzz.cc4
-rwxr-xr-xcontrib/libs/tcmalloc/tcmalloc/experimental_pow2_below64_size_class.cc1358
-rwxr-xr-xcontrib/libs/tcmalloc/tcmalloc/experimental_pow2_size_class.cc478
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/guarded_page_allocator.cc42
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/guarded_page_allocator.h10
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/guarded_page_allocator_benchmark.cc16
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/guarded_page_allocator_test.cc78
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/heap_profiling_test.cc18
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/huge_address_map.cc10
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/huge_address_map.h10
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/huge_address_map_test.cc4
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/huge_allocator.cc10
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/huge_allocator.h12
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/huge_allocator_test.cc4
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/huge_cache.cc16
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/huge_cache.h14
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/huge_cache_test.cc14
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/huge_page_aware_allocator.cc114
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/huge_page_aware_allocator.h20
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/huge_page_aware_allocator_test.cc12
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/huge_page_filler.h258
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/huge_page_filler_test.cc50
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/huge_pages.h92
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/huge_region.h66
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/huge_region_test.cc22
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/internal/atomic_danger.h8
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/internal/atomic_stats_counter.h6
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/internal/cache_topology.cc176
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/internal/cache_topology.h72
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/internal/cache_topology_test.cc102
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/internal/clock.h82
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/internal/config.h130
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/internal/environment.cc4
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/internal/environment.h8
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/internal/lifetime_predictions.h504
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/internal/lifetime_predictions_test.cc312
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/internal/lifetime_tracker.h344
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/internal/lifetime_tracker_test.cc258
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/internal/linked_list.h16
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/internal/linked_list_benchmark.cc8
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/internal/linked_list_test.cc12
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/internal/logging.cc20
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/internal/logging.h38
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/internal/logging_test.cc16
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/internal/logging_test_helper.cc36
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/internal/memory_stats.cc6
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/internal/memory_stats.h8
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/internal/mincore.cc20
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/internal/mincore.h14
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/internal/mincore_benchmark.cc4
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/internal/mincore_test.cc16
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/internal/numa.cc440
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/internal/numa.h454
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/internal/numa_test.cc568
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/internal/optimization.h16
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/internal/parameter_accessors.h12
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/internal/percpu.cc126
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/internal/percpu.h90
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/internal/percpu_rseq_aarch64.S238
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/internal/percpu_rseq_ppc.S176
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/internal/percpu_rseq_unsupported.cc36
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/internal/percpu_rseq_x86_64.S152
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/internal/percpu_tcmalloc.h1084
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/internal/percpu_tcmalloc_test.cc114
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/internal/proc_maps.cc12
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/internal/proc_maps.h8
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/internal/range_tracker.h96
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/internal/range_tracker_benchmark.cc8
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/internal/range_tracker_test.cc50
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/internal/timeseries_tracker.h76
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/internal/timeseries_tracker_test.cc14
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/internal/util.cc4
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/internal/util.h24
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/internal_malloc_extension.h22
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/legacy_size_classes.cc126
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/libc_override.h4
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/libc_override_gcc_and_weak.h4
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/libc_override_redefine.h88
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/malloc_extension.cc108
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/malloc_extension.h72
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/malloc_extension_test.cc24
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/mock_central_freelist.cc22
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/mock_central_freelist.h24
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/mock_transfer_cache.cc4
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/mock_transfer_cache.h286
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/noruntime_size_classes.cc8
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/page_allocator.cc106
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/page_allocator.h74
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/page_allocator_interface.cc12
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/page_allocator_interface.h10
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/page_allocator_test.cc6
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/page_allocator_test_util.h8
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/page_heap.cc14
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/page_heap.h12
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/page_heap_allocator.h18
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/page_heap_test.cc30
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/pagemap.cc8
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/pagemap.h60
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/pagemap_test.cc20
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/pages.h64
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/parameters.cc150
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/parameters.h34
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/peak_heap_tracker.cc12
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/peak_heap_tracker.h12
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/profile_test.cc74
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/realloc_test.cc2
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/runtime_size_classes.cc18
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/runtime_size_classes.h12
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/runtime_size_classes_fuzz.cc10
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/runtime_size_classes_test.cc8
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/sampler.cc8
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/sampler.h8
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/size_class_info.h8
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/size_classes.cc126
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/size_classes_test.cc196
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/size_classes_with_runtime_size_classes_test.cc30
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/span.cc228
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/span.h448
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/span_benchmark.cc10
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/span_stats.h10
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/span_test.cc18
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/stack_trace_table.cc20
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/stack_trace_table.h10
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/stack_trace_table_test.cc4
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/static_vars.cc50
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/static_vars.h64
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/stats.cc38
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/stats.h22
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/stats_test.cc20
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/system-alloc.cc160
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/system-alloc.h14
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/system-alloc_test.cc24
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/tcmalloc.cc682
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/tcmalloc.h12
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/tcmalloc_large_test.cc8
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/tcmalloc_policy.h178
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/thread_cache.cc14
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/thread_cache.h8
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/thread_cache_test.cc2
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/tracking.h16
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/transfer_cache.cc210
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/transfer_cache.h438
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/transfer_cache_benchmark.cc46
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/transfer_cache_fuzz.cc50
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/transfer_cache_internals.h1270
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/transfer_cache_stats.h8
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/transfer_cache_test.cc740
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/want_hpaa.cc10
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/want_hpaa_subrelease.cc10
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/want_legacy_spans.cc10
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/want_no_hpaa.cc10
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/want_numa_aware.cc56
-rw-r--r--contrib/libs/tcmalloc/ya.make30
-rw-r--r--contrib/libs/ya.make2
-rw-r--r--contrib/restricted/abseil-cpp/.yandex_meta/devtools.copyrights.report966
-rw-r--r--contrib/restricted/abseil-cpp/.yandex_meta/devtools.licenses.report1092
-rw-r--r--contrib/restricted/abseil-cpp/README.md68
-rw-r--r--contrib/restricted/abseil-cpp/absl/algorithm/container.h2
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/attributes.h48
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/call_once.h4
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/config.h66
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/dynamic_annotations.h48
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/direct_mmap.h8
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/endian.h122
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/low_level_scheduling.h4
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/raw_logging.cc68
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/raw_logging.h32
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/spinlock.cc22
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/spinlock.h28
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/spinlock_akaros.inc4
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/spinlock_linux.inc6
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/spinlock_posix.inc4
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/spinlock_wait.h22
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/spinlock_win32.inc10
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/strerror.cc2
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/sysinfo.cc2
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/thread_identity.cc18
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/thread_identity.h108
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/throw_delegate.cc228
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/log_severity.h2
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/macros.h22
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/optimization.h20
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/options.h2
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/thread_annotations.h2
-rw-r--r--contrib/restricted/abseil-cpp/absl/city/ya.make14
-rw-r--r--contrib/restricted/abseil-cpp/absl/cleanup/cleanup.h272
-rw-r--r--contrib/restricted/abseil-cpp/absl/cleanup/internal/cleanup.h142
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/btree_map.h6
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/fixed_array.h4
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/flat_hash_set.h2
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/inlined_vector.h8
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/absl_hashtablez_sampler/ya.make70
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/btree.h428
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/btree_container.h106
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler.cc10
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler.h4
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler_force_weak_definition.cc4
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/inlined_vector.h160
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/layout.h8
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set.cc14
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set.h246
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set/ya.make72
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/unordered_map_constructor_test.h2
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/node_hash_set.h30
-rw-r--r--contrib/restricted/abseil-cpp/absl/debugging/failure_signal_handler.cc56
-rw-r--r--contrib/restricted/abseil-cpp/absl/debugging/failure_signal_handler/ya.make70
-rw-r--r--contrib/restricted/abseil-cpp/absl/debugging/internal/address_is_readable.cc10
-rw-r--r--contrib/restricted/abseil-cpp/absl/debugging/internal/demangle.cc32
-rw-r--r--contrib/restricted/abseil-cpp/absl/debugging/internal/examine_stack.cc34
-rw-r--r--contrib/restricted/abseil-cpp/absl/debugging/internal/stacktrace_aarch64-inl.inc10
-rw-r--r--contrib/restricted/abseil-cpp/absl/debugging/internal/stacktrace_config.h6
-rw-r--r--contrib/restricted/abseil-cpp/absl/debugging/internal/stacktrace_powerpc-inl.inc12
-rw-r--r--contrib/restricted/abseil-cpp/absl/debugging/leak_check.cc32
-rw-r--r--contrib/restricted/abseil-cpp/absl/debugging/leak_check.h42
-rw-r--r--contrib/restricted/abseil-cpp/absl/debugging/symbolize/ya.make70
-rw-r--r--contrib/restricted/abseil-cpp/absl/debugging/symbolize_elf.inc4
-rw-r--r--contrib/restricted/abseil-cpp/absl/flags/commandlineflag/ya.make72
-rw-r--r--contrib/restricted/abseil-cpp/absl/flags/flag.h12
-rw-r--r--contrib/restricted/abseil-cpp/absl/flags/internal/commandlineflag.h2
-rw-r--r--contrib/restricted/abseil-cpp/absl/flags/internal/flag.cc98
-rw-r--r--contrib/restricted/abseil-cpp/absl/flags/internal/flag.h78
-rw-r--r--contrib/restricted/abseil-cpp/absl/flags/internal/flag/ya.make72
-rw-r--r--contrib/restricted/abseil-cpp/absl/flags/internal/registry.h2
-rw-r--r--contrib/restricted/abseil-cpp/absl/flags/internal/sequence_lock.h372
-rw-r--r--contrib/restricted/abseil-cpp/absl/flags/internal/usage.cc412
-rw-r--r--contrib/restricted/abseil-cpp/absl/flags/internal/usage.h62
-rw-r--r--contrib/restricted/abseil-cpp/absl/flags/internal/usage/ya.make14
-rw-r--r--contrib/restricted/abseil-cpp/absl/flags/marshalling.h4
-rw-r--r--contrib/restricted/abseil-cpp/absl/flags/marshalling/ya.make70
-rw-r--r--contrib/restricted/abseil-cpp/absl/flags/parse.cc10
-rw-r--r--contrib/restricted/abseil-cpp/absl/flags/reflection.cc38
-rw-r--r--contrib/restricted/abseil-cpp/absl/flags/reflection.h2
-rw-r--r--contrib/restricted/abseil-cpp/absl/flags/reflection/ya.make12
-rw-r--r--contrib/restricted/abseil-cpp/absl/flags/usage/ya.make2
-rw-r--r--contrib/restricted/abseil-cpp/absl/flags/usage_config.cc6
-rw-r--r--contrib/restricted/abseil-cpp/absl/flags/usage_config.h4
-rw-r--r--contrib/restricted/abseil-cpp/absl/flags/ya.make70
-rw-r--r--contrib/restricted/abseil-cpp/absl/hash/internal/city.cc10
-rw-r--r--contrib/restricted/abseil-cpp/absl/hash/internal/hash.cc24
-rw-r--r--contrib/restricted/abseil-cpp/absl/hash/internal/hash.h114
-rw-r--r--contrib/restricted/abseil-cpp/absl/hash/internal/low_level_hash.cc200
-rw-r--r--contrib/restricted/abseil-cpp/absl/hash/internal/low_level_hash.h72
-rw-r--r--contrib/restricted/abseil-cpp/absl/hash/ya.make70
-rw-r--r--contrib/restricted/abseil-cpp/absl/meta/type_traits.h30
-rw-r--r--contrib/restricted/abseil-cpp/absl/numeric/bits.h354
-rw-r--r--contrib/restricted/abseil-cpp/absl/numeric/int128.cc6
-rw-r--r--contrib/restricted/abseil-cpp/absl/numeric/internal/bits.h716
-rw-r--r--contrib/restricted/abseil-cpp/absl/numeric/internal/representation.h110
-rw-r--r--contrib/restricted/abseil-cpp/absl/random/internal/explicit_seed_seq.h2
-rw-r--r--contrib/restricted/abseil-cpp/absl/random/internal/fastmath.h6
-rw-r--r--contrib/restricted/abseil-cpp/absl/random/internal/generate_real.h4
-rw-r--r--contrib/restricted/abseil-cpp/absl/random/internal/mock_helpers.h26
-rw-r--r--contrib/restricted/abseil-cpp/absl/random/internal/mock_overload_set.h22
-rw-r--r--contrib/restricted/abseil-cpp/absl/random/internal/pcg_engine.h8
-rw-r--r--contrib/restricted/abseil-cpp/absl/random/internal/randen.cc2
-rw-r--r--contrib/restricted/abseil-cpp/absl/random/internal/randen.h2
-rw-r--r--contrib/restricted/abseil-cpp/absl/random/internal/randen_detect.cc4
-rw-r--r--contrib/restricted/abseil-cpp/absl/random/internal/randen_engine.h10
-rw-r--r--contrib/restricted/abseil-cpp/absl/random/internal/randen_hwaes.h2
-rw-r--r--contrib/restricted/abseil-cpp/absl/random/internal/randen_slow.h2
-rw-r--r--contrib/restricted/abseil-cpp/absl/random/internal/randen_traits.h2
-rw-r--r--contrib/restricted/abseil-cpp/absl/random/internal/wide_multiply.h2
-rw-r--r--contrib/restricted/abseil-cpp/absl/random/log_uniform_int_distribution.h10
-rw-r--r--contrib/restricted/abseil-cpp/absl/random/mocking_bit_gen.h116
-rw-r--r--contrib/restricted/abseil-cpp/absl/random/uniform_int_distribution.h2
-rw-r--r--contrib/restricted/abseil-cpp/absl/status/internal/status_internal.h26
-rw-r--r--contrib/restricted/abseil-cpp/absl/status/internal/statusor_internal.h6
-rw-r--r--contrib/restricted/abseil-cpp/absl/status/status.cc36
-rw-r--r--contrib/restricted/abseil-cpp/absl/status/status.h152
-rw-r--r--contrib/restricted/abseil-cpp/absl/status/statusor.h4
-rw-r--r--contrib/restricted/abseil-cpp/absl/status/statusor/ya.make72
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/charconv.cc10
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/cord.cc332
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/cord.h262
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/cord/ya.make76
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/escaping.cc8
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/internal/absl_strings_internal/ya.make4
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/internal/charconv_parse.cc12
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/internal/cord_internal.cc166
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/internal/cord_internal.h646
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/internal/cord_rep_flat.h278
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/internal/cord_rep_ring.cc1432
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/internal/cord_rep_ring.h1130
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/internal/cord_rep_ring_reader.h228
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/internal/cordz_handle/ya.make70
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/internal/cordz_info/ya.make70
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/internal/cordz_sample_token/ya.make70
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/internal/str_format/bind.cc2
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/internal/str_format/bind.h10
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/internal/str_format/float_conversion.cc44
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/internal/str_format/ya.make68
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/internal/str_split_internal.h26
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/internal/string_constant.h128
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/match.cc12
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/match.h32
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/numbers.cc264
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/numbers.h20
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/str_join.h2
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/str_split.h76
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/ya.make4
-rw-r--r--contrib/restricted/abseil-cpp/absl/synchronization/internal/futex.h30
-rw-r--r--contrib/restricted/abseil-cpp/absl/synchronization/internal/graphcycles.cc2
-rw-r--r--contrib/restricted/abseil-cpp/absl/synchronization/internal/per_thread_sem.cc4
-rw-r--r--contrib/restricted/abseil-cpp/absl/synchronization/internal/per_thread_sem.h8
-rw-r--r--contrib/restricted/abseil-cpp/absl/synchronization/internal/waiter.h4
-rw-r--r--contrib/restricted/abseil-cpp/absl/synchronization/mutex.cc138
-rw-r--r--contrib/restricted/abseil-cpp/absl/synchronization/mutex.h152
-rw-r--r--contrib/restricted/abseil-cpp/absl/time/clock.cc292
-rw-r--r--contrib/restricted/abseil-cpp/absl/time/clock.h4
-rw-r--r--contrib/restricted/abseil-cpp/absl/time/internal/cctz/include/cctz/civil_time_detail.h14
-rw-r--r--contrib/restricted/abseil-cpp/absl/time/internal/cctz/src/time_zone_libc.cc14
-rw-r--r--contrib/restricted/abseil-cpp/absl/time/internal/cctz/src/tzfile.h8
-rw-r--r--contrib/restricted/abseil-cpp/absl/time/time.h42
-rw-r--r--contrib/restricted/abseil-cpp/absl/types/variant.h18
-rw-r--r--contrib/restricted/abseil-cpp/ya.make4
-rw-r--r--contrib/restricted/ya.make2
348 files changed, 18924 insertions, 18924 deletions
diff --git a/contrib/libs/grpc/include/grpc/grpc.h b/contrib/libs/grpc/include/grpc/grpc.h
index 6001117521..f7f9ad68b2 100644
--- a/contrib/libs/grpc/include/grpc/grpc.h
+++ b/contrib/libs/grpc/include/grpc/grpc.h
@@ -58,9 +58,9 @@ GRPCAPI void grpc_call_details_destroy(grpc_call_details* details);
the reverse order they were initialized. */
GRPCAPI void grpc_register_plugin(void (*init)(void), void (*destroy)(void));
-GRPCAPI void grpc_init_openssl(void);
-GRPCAPI void grpc_dont_init_openssl(void);
-
+GRPCAPI void grpc_init_openssl(void);
+GRPCAPI void grpc_dont_init_openssl(void);
+
/** Initialize the grpc library.
After it's called, a matching invocation to grpc_shutdown() is expected.
diff --git a/contrib/libs/grpc/src/compiler/generator_helpers.h b/contrib/libs/grpc/src/compiler/generator_helpers.h
index 265713a16b..64d98e722d 100644
--- a/contrib/libs/grpc/src/compiler/generator_helpers.h
+++ b/contrib/libs/grpc/src/compiler/generator_helpers.h
@@ -26,7 +26,7 @@
#include <vector>
#include <util/generic/string.h>
-#include <util/string/split.h>
+#include <util/string/split.h>
#include <util/stream/str.h>
#include "src/compiler/config.h"
@@ -183,10 +183,10 @@ inline void Split(const TStringType& s, char /*delim*/,
template <>
inline void Split(const TString &s, char delim,
std::vector<TString> *append_to) {
- TVector<TString> parts;
- Split(s, TString(1, delim), parts);
- for (auto& p : parts) {
- append_to->push_back(std::move(p));
+ TVector<TString> parts;
+ Split(s, TString(1, delim), parts);
+ for (auto& p : parts) {
+ append_to->push_back(std::move(p));
}
}
diff --git a/contrib/libs/grpc/src/core/tsi/ssl_transport_security.cc b/contrib/libs/grpc/src/core/tsi/ssl_transport_security.cc
index 374f8e583f..963cdfe7d4 100644
--- a/contrib/libs/grpc/src/core/tsi/ssl_transport_security.cc
+++ b/contrib/libs/grpc/src/core/tsi/ssl_transport_security.cc
@@ -189,16 +189,16 @@ static void init_openssl(void) {
GPR_ASSERT(g_ssl_ctx_ex_factory_index != -1);
}
-static void do_nothing(void) {}
-
-extern "C" void grpc_dont_init_openssl(void) {
- gpr_once_init(&g_init_openssl_once, do_nothing);
-}
-
-extern "C" void grpc_init_openssl(void) {
- gpr_once_init(&g_init_openssl_once, init_openssl);
-}
-
+static void do_nothing(void) {}
+
+extern "C" void grpc_dont_init_openssl(void) {
+ gpr_once_init(&g_init_openssl_once, do_nothing);
+}
+
+extern "C" void grpc_init_openssl(void) {
+ gpr_once_init(&g_init_openssl_once, init_openssl);
+}
+
/* --- Ssl utils. ---*/
static const char* ssl_error_string(int error) {
diff --git a/contrib/libs/grpc/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi b/contrib/libs/grpc/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi
index 54eb7fdffc..e004943303 100644
--- a/contrib/libs/grpc/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi
+++ b/contrib/libs/grpc/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi
@@ -333,7 +333,7 @@ cdef extern from "grpc/grpc.h":
void * reserved
grpc_op_data data
- void grpc_dont_init_openssl() nogil
+ void grpc_dont_init_openssl() nogil
void grpc_init() nogil
void grpc_shutdown_blocking() nogil
int grpc_is_initialized() nogil
diff --git a/contrib/libs/grpc/src/python/grpcio/grpc/_cython/cygrpc.pyx b/contrib/libs/grpc/src/python/grpcio/grpc/_cython/cygrpc.pyx
index 8d355c6fbb..a983081018 100644
--- a/contrib/libs/grpc/src/python/grpcio/grpc/_cython/cygrpc.pyx
+++ b/contrib/libs/grpc/src/python/grpcio/grpc/_cython/cygrpc.pyx
@@ -92,8 +92,8 @@ cdef _initialize():
# We have Python callbacks called by c-core threads, this ensures the GIL
# is initialized.
PyEval_InitThreads()
- import ssl
- grpc_dont_init_openssl()
+ import ssl
+ grpc_dont_init_openssl()
# Load Arcadia certs in ComputePemRootCerts and do not override here.
_initialize()
diff --git a/contrib/libs/protobuf/src/google/protobuf/descriptor.pb.cc b/contrib/libs/protobuf/src/google/protobuf/descriptor.pb.cc
index 2e91b7c1ab..6877463905 100644
--- a/contrib/libs/protobuf/src/google/protobuf/descriptor.pb.cc
+++ b/contrib/libs/protobuf/src/google/protobuf/descriptor.pb.cc
@@ -6863,7 +6863,7 @@ void FileOptions::MergeFrom(const FileOptions& from) {
}
if (cached_has_bits & 0x00020000u) {
deprecated_ = from.deprecated_;
- }
+ }
if (cached_has_bits & 0x00040000u) {
optimize_for_ = from.optimize_for_;
}
diff --git a/contrib/libs/sqlite3/ya.make b/contrib/libs/sqlite3/ya.make
index e6b26dccc3..561f73b87b 100644
--- a/contrib/libs/sqlite3/ya.make
+++ b/contrib/libs/sqlite3/ya.make
@@ -24,7 +24,7 @@ ADDINCL(
NO_COMPILER_WARNINGS()
-NO_RUNTIME()
+NO_RUNTIME()
CFLAGS(
-DBUILD_sqlite
diff --git a/contrib/libs/tcmalloc/common.inc b/contrib/libs/tcmalloc/common.inc
index 077942c387..3c318bef14 100644
--- a/contrib/libs/tcmalloc/common.inc
+++ b/contrib/libs/tcmalloc/common.inc
@@ -1,58 +1,58 @@
-GLOBAL_SRCS(
- # TCMalloc
- tcmalloc/tcmalloc.cc
-
- # Common Sources
- tcmalloc/arena.cc
- tcmalloc/background.cc
- tcmalloc/central_freelist.cc
- tcmalloc/common.cc
- tcmalloc/cpu_cache.cc
- tcmalloc/experimental_pow2_below64_size_class.cc
- tcmalloc/experimental_pow2_size_class.cc
- tcmalloc/legacy_size_classes.cc
- tcmalloc/guarded_page_allocator.cc
- tcmalloc/huge_address_map.cc
- tcmalloc/huge_allocator.cc
- tcmalloc/huge_cache.cc
- tcmalloc/huge_page_aware_allocator.cc
- tcmalloc/page_allocator.cc
- tcmalloc/page_allocator_interface.cc
- tcmalloc/page_heap.cc
- tcmalloc/pagemap.cc
- tcmalloc/parameters.cc
- tcmalloc/peak_heap_tracker.cc
- tcmalloc/sampler.cc
- tcmalloc/size_classes.cc
- tcmalloc/span.cc
- tcmalloc/stack_trace_table.cc
- tcmalloc/static_vars.cc
- tcmalloc/stats.cc
- tcmalloc/system-alloc.cc
- tcmalloc/thread_cache.cc
- tcmalloc/transfer_cache.cc
-
- # Common deps
- tcmalloc/experiment.cc
- tcmalloc/noruntime_size_classes.cc
-
- # Internal libraries
- tcmalloc/internal/cache_topology.cc
- tcmalloc/internal/environment.cc
- tcmalloc/internal/logging.cc
- tcmalloc/internal/memory_stats.cc
- tcmalloc/internal/mincore.cc
- tcmalloc/internal/numa.cc
- tcmalloc/internal/percpu.cc
- tcmalloc/internal/percpu_rseq_asm.S
- tcmalloc/internal/percpu_rseq_unsupported.cc
- tcmalloc/internal/util.cc
-)
-
-PEERDIR(
- contrib/restricted/abseil-cpp
- contrib/libs/tcmalloc/malloc_extension
-)
-
-NO_UTIL()
-NO_COMPILER_WARNINGS()
+GLOBAL_SRCS(
+ # TCMalloc
+ tcmalloc/tcmalloc.cc
+
+ # Common Sources
+ tcmalloc/arena.cc
+ tcmalloc/background.cc
+ tcmalloc/central_freelist.cc
+ tcmalloc/common.cc
+ tcmalloc/cpu_cache.cc
+ tcmalloc/experimental_pow2_below64_size_class.cc
+ tcmalloc/experimental_pow2_size_class.cc
+ tcmalloc/legacy_size_classes.cc
+ tcmalloc/guarded_page_allocator.cc
+ tcmalloc/huge_address_map.cc
+ tcmalloc/huge_allocator.cc
+ tcmalloc/huge_cache.cc
+ tcmalloc/huge_page_aware_allocator.cc
+ tcmalloc/page_allocator.cc
+ tcmalloc/page_allocator_interface.cc
+ tcmalloc/page_heap.cc
+ tcmalloc/pagemap.cc
+ tcmalloc/parameters.cc
+ tcmalloc/peak_heap_tracker.cc
+ tcmalloc/sampler.cc
+ tcmalloc/size_classes.cc
+ tcmalloc/span.cc
+ tcmalloc/stack_trace_table.cc
+ tcmalloc/static_vars.cc
+ tcmalloc/stats.cc
+ tcmalloc/system-alloc.cc
+ tcmalloc/thread_cache.cc
+ tcmalloc/transfer_cache.cc
+
+ # Common deps
+ tcmalloc/experiment.cc
+ tcmalloc/noruntime_size_classes.cc
+
+ # Internal libraries
+ tcmalloc/internal/cache_topology.cc
+ tcmalloc/internal/environment.cc
+ tcmalloc/internal/logging.cc
+ tcmalloc/internal/memory_stats.cc
+ tcmalloc/internal/mincore.cc
+ tcmalloc/internal/numa.cc
+ tcmalloc/internal/percpu.cc
+ tcmalloc/internal/percpu_rseq_asm.S
+ tcmalloc/internal/percpu_rseq_unsupported.cc
+ tcmalloc/internal/util.cc
+)
+
+PEERDIR(
+ contrib/restricted/abseil-cpp
+ contrib/libs/tcmalloc/malloc_extension
+)
+
+NO_UTIL()
+NO_COMPILER_WARNINGS()
diff --git a/contrib/libs/tcmalloc/default/ya.make b/contrib/libs/tcmalloc/default/ya.make
index b69b077e19..dac75122ac 100644
--- a/contrib/libs/tcmalloc/default/ya.make
+++ b/contrib/libs/tcmalloc/default/ya.make
@@ -1,22 +1,22 @@
-LIBRARY()
-
+LIBRARY()
+
WITHOUT_LICENSE_TEXTS()
-LICENSE(Apache-2.0)
-
-OWNER(
- ayles
- prime
+LICENSE(Apache-2.0)
+
+OWNER(
+ ayles
+ prime
g:cpp-contrib
-)
-
-SRCDIR(contrib/libs/tcmalloc)
-
-INCLUDE(../common.inc)
-
-GLOBAL_SRCS(
- # Options
- tcmalloc/want_hpaa.cc
-)
-
-END()
+)
+
+SRCDIR(contrib/libs/tcmalloc)
+
+INCLUDE(../common.inc)
+
+GLOBAL_SRCS(
+ # Options
+ tcmalloc/want_hpaa.cc
+)
+
+END()
diff --git a/contrib/libs/tcmalloc/malloc_extension/ya.make b/contrib/libs/tcmalloc/malloc_extension/ya.make
index c9a07c2454..610323a904 100644
--- a/contrib/libs/tcmalloc/malloc_extension/ya.make
+++ b/contrib/libs/tcmalloc/malloc_extension/ya.make
@@ -1,31 +1,31 @@
-LIBRARY()
-
+LIBRARY()
+
WITHOUT_LICENSE_TEXTS()
-LICENSE(Apache-2.0)
-
-OWNER(
+LICENSE(Apache-2.0)
+
+OWNER(
prime
- g:cpp-contrib
-)
-
-NO_UTIL()
-
-NO_COMPILER_WARNINGS()
-
-# https://github.com/google/tcmalloc
-VERSION(2020-11-23-a643d89610317be1eff9f7298104eef4c987d8d5)
-
-SRCDIR(contrib/libs/tcmalloc)
-
-SRCS(
- tcmalloc/malloc_extension.cc
-)
-
+ g:cpp-contrib
+)
+
+NO_UTIL()
+
+NO_COMPILER_WARNINGS()
+
+# https://github.com/google/tcmalloc
+VERSION(2020-11-23-a643d89610317be1eff9f7298104eef4c987d8d5)
+
+SRCDIR(contrib/libs/tcmalloc)
+
+SRCS(
+ tcmalloc/malloc_extension.cc
+)
+
PEERDIR(
contrib/restricted/abseil-cpp
-)
-
+)
+
ADDINCL(
GLOBAL contrib/libs/tcmalloc
)
@@ -33,5 +33,5 @@ ADDINCL(
CFLAGS(
-DTCMALLOC_256K_PAGES
)
-
-END()
+
+END()
diff --git a/contrib/libs/tcmalloc/numa_256k/ya.make b/contrib/libs/tcmalloc/numa_256k/ya.make
index ffede5df8b..728c02816d 100644
--- a/contrib/libs/tcmalloc/numa_256k/ya.make
+++ b/contrib/libs/tcmalloc/numa_256k/ya.make
@@ -1,28 +1,28 @@
-LIBRARY()
-
+LIBRARY()
+
WITHOUT_LICENSE_TEXTS()
-LICENSE(Apache-2.0)
-
-OWNER(
- ayles
- prime
+LICENSE(Apache-2.0)
+
+OWNER(
+ ayles
+ prime
g:cpp-contrib
-)
-
-SRCDIR(contrib/libs/tcmalloc)
-
-INCLUDE(../common.inc)
-
-GLOBAL_SRCS(
- # Options
- tcmalloc/want_hpaa.cc
- tcmalloc/want_numa_aware.cc
-)
-
-CFLAGS(
- -DTCMALLOC_256K_PAGES
- -DTCMALLOC_NUMA_AWARE
-)
-
-END()
+)
+
+SRCDIR(contrib/libs/tcmalloc)
+
+INCLUDE(../common.inc)
+
+GLOBAL_SRCS(
+ # Options
+ tcmalloc/want_hpaa.cc
+ tcmalloc/want_numa_aware.cc
+)
+
+CFLAGS(
+ -DTCMALLOC_256K_PAGES
+ -DTCMALLOC_NUMA_AWARE
+)
+
+END()
diff --git a/contrib/libs/tcmalloc/numa_large_pages/ya.make b/contrib/libs/tcmalloc/numa_large_pages/ya.make
index f39c1e15ba..6f1de511ed 100644
--- a/contrib/libs/tcmalloc/numa_large_pages/ya.make
+++ b/contrib/libs/tcmalloc/numa_large_pages/ya.make
@@ -1,28 +1,28 @@
-LIBRARY()
-
-WITHOUT_LICENSE_TEXTS()
-
-LICENSE(Apache-2.0)
-
-OWNER(
- ayles
- prime
- g:cpp-contrib
-)
-
-SRCDIR(contrib/libs/tcmalloc)
-
-INCLUDE(../common.inc)
-
-GLOBAL_SRCS(
- # Options
- tcmalloc/want_hpaa.cc
- tcmalloc/want_numa_aware.cc
-)
-
-CFLAGS(
- -DTCMALLOC_LARGE_PAGES
- -DTCMALLOC_NUMA_AWARE
-)
-
-END()
+LIBRARY()
+
+WITHOUT_LICENSE_TEXTS()
+
+LICENSE(Apache-2.0)
+
+OWNER(
+ ayles
+ prime
+ g:cpp-contrib
+)
+
+SRCDIR(contrib/libs/tcmalloc)
+
+INCLUDE(../common.inc)
+
+GLOBAL_SRCS(
+ # Options
+ tcmalloc/want_hpaa.cc
+ tcmalloc/want_numa_aware.cc
+)
+
+CFLAGS(
+ -DTCMALLOC_LARGE_PAGES
+ -DTCMALLOC_NUMA_AWARE
+)
+
+END()
diff --git a/contrib/libs/tcmalloc/patches/fork.patch b/contrib/libs/tcmalloc/patches/fork.patch
index 2503394431..b29bb78261 100644
--- a/contrib/libs/tcmalloc/patches/fork.patch
+++ b/contrib/libs/tcmalloc/patches/fork.patch
@@ -1,310 +1,310 @@
---- contrib/libs/tcmalloc/tcmalloc/central_freelist.h (index)
-+++ contrib/libs/tcmalloc/tcmalloc/central_freelist.h (working tree)
-@@ -70,6 +70,14 @@ class CentralFreeList {
-
- SpanStats GetSpanStats() const;
-
-+ void AcquireInternalLocks() {
-+ lock_.Lock();
-+ }
-+
-+ void ReleaseInternalLocks() {
-+ lock_.Unlock();
-+ }
-+
- private:
- // Release an object to spans.
- // Returns object's span if it become completely free.
---- contrib/libs/tcmalloc/tcmalloc/cpu_cache.cc (index)
-+++ contrib/libs/tcmalloc/tcmalloc/cpu_cache.cc (working tree)
-@@ -1031,6 +1031,20 @@ void CPUCache::PrintInPbtxt(PbtxtRegion *region) const {
- }
- }
-
-+void CPUCache::AcquireInternalLocks() {
-+ for (int cpu = 0, num_cpus = absl::base_internal::NumCPUs(); cpu < num_cpus;
-+ ++cpu) {
-+ resize_[cpu].lock.Lock();
-+ }
-+}
-+
-+void CPUCache::ReleaseInternalLocks() {
-+ for (int cpu = 0, num_cpus = absl::base_internal::NumCPUs(); cpu < num_cpus;
-+ ++cpu) {
-+ resize_[cpu].lock.Unlock();
-+ }
-+}
-+
- void CPUCache::PerClassResizeInfo::Init() {
- state_.store(0, std::memory_order_relaxed);
- }
---- contrib/libs/tcmalloc/tcmalloc/cpu_cache.h (index)
-+++ contrib/libs/tcmalloc/tcmalloc/cpu_cache.h (working tree)
-@@ -164,6 +164,9 @@ class CPUCache {
- void Print(Printer* out) const;
- void PrintInPbtxt(PbtxtRegion* region) const;
-
-+ void AcquireInternalLocks();
-+ void ReleaseInternalLocks();
-+
- private:
- // Per-size-class freelist resizing info.
- class PerClassResizeInfo {
---- contrib/libs/tcmalloc/tcmalloc/internal_malloc_extension.h (index)
-+++ contrib/libs/tcmalloc/tcmalloc/internal_malloc_extension.h (working tree)
-@@ -116,6 +116,10 @@ ABSL_ATTRIBUTE_WEAK int64_t
- MallocExtension_Internal_GetMaxTotalThreadCacheBytes();
- ABSL_ATTRIBUTE_WEAK void MallocExtension_Internal_SetMaxTotalThreadCacheBytes(
- int64_t value);
-+
-+ABSL_ATTRIBUTE_WEAK void
-+MallocExtension_EnableForkSupport();
-+
- }
-
- #endif
---- contrib/libs/tcmalloc/tcmalloc/malloc_extension.cc (index)
-+++ contrib/libs/tcmalloc/tcmalloc/malloc_extension.cc (working tree)
-@@ -460,6 +460,14 @@ void MallocExtension::SetBackgroundReleaseRate(BytesPerSecond rate) {
- #endif
- }
-
-+void MallocExtension::EnableForkSupport() {
-+#if ABSL_INTERNAL_HAVE_WEAK_MALLOCEXTENSION_STUBS
-+ if (&MallocExtension_EnableForkSupport != nullptr) {
-+ MallocExtension_EnableForkSupport();
-+ }
-+#endif
-+}
-+
- } // namespace tcmalloc
-
- // Default implementation just returns size. The expectation is that
---- contrib/libs/tcmalloc/tcmalloc/malloc_extension.h (index)
-+++ contrib/libs/tcmalloc/tcmalloc/malloc_extension.h (working tree)
-@@ -468,6 +468,10 @@ class MallocExtension final {
- // Specifies the release rate from the page heap. ProcessBackgroundActions
- // must be called for this to be operative.
- static void SetBackgroundReleaseRate(BytesPerSecond rate);
-+
-+ // Enables fork support.
-+ // Allocator will continue to function correctly in the child, after calling fork().
-+ static void EnableForkSupport();
- };
-
- } // namespace tcmalloc
---- contrib/libs/tcmalloc/tcmalloc/static_vars.cc (index)
-+++ contrib/libs/tcmalloc/tcmalloc/static_vars.cc (working tree)
-@@ -59,6 +59,7 @@ ABSL_CONST_INIT PageHeapAllocator<StackTraceTable::Bucket>
- Static::bucket_allocator_;
- ABSL_CONST_INIT std::atomic<bool> Static::inited_{false};
- ABSL_CONST_INIT bool Static::cpu_cache_active_ = false;
-+ABSL_CONST_INIT bool Static::fork_support_enabled_ = false;
- ABSL_CONST_INIT Static::PageAllocatorStorage Static::page_allocator_;
- ABSL_CONST_INIT PageMap Static::pagemap_;
- ABSL_CONST_INIT absl::base_internal::SpinLock guarded_page_lock(
-@@ -116,6 +117,13 @@ ABSL_ATTRIBUTE_COLD ABSL_ATTRIBUTE_NOINLINE void Static::SlowInitIfNecessary() {
- pagemap_.MapRootWithSmallPages();
- guardedpage_allocator_.Init(/*max_alloced_pages=*/64, /*total_pages=*/128);
- inited_.store(true, std::memory_order_release);
-+
-+ pageheap_lock.Unlock();
-+ pthread_atfork(
-+ TCMallocPreFork,
-+ TCMallocPostFork,
-+ TCMallocPostFork);
-+ pageheap_lock.Lock();
- }
- }
-
---- contrib/libs/tcmalloc/tcmalloc/static_vars.h (index)
-+++ contrib/libs/tcmalloc/tcmalloc/static_vars.h (working tree)
-@@ -50,6 +50,9 @@ class CPUCache;
- class PageMap;
- class ThreadCache;
-
-+void TCMallocPreFork();
-+void TCMallocPostFork();
-+
- class Static {
- public:
- // True if InitIfNecessary() has run to completion.
-@@ -124,6 +127,9 @@ class Static {
- static void ActivateCPUCache() { cpu_cache_active_ = true; }
- static void DeactivateCPUCache() { cpu_cache_active_ = false; }
-
-+ static bool ForkSupportEnabled() { return fork_support_enabled_; }
-+ static void EnableForkSupport() { fork_support_enabled_ = true; }
-+
- static bool ABSL_ATTRIBUTE_ALWAYS_INLINE IsOnFastPath() {
- return
- #ifndef TCMALLOC_DEPRECATED_PERTHREAD
-@@ -169,6 +175,7 @@ class Static {
- static PageHeapAllocator<StackTraceTable::Bucket> bucket_allocator_;
- ABSL_CONST_INIT static std::atomic<bool> inited_;
- static bool cpu_cache_active_;
-+ static bool fork_support_enabled_;
- ABSL_CONST_INIT static PeakHeapTracker peak_heap_tracker_;
- ABSL_CONST_INIT static NumaTopology<kNumaPartitions, kNumBaseClasses>
- numa_topology_;
---- contrib/libs/tcmalloc/tcmalloc/system-alloc.cc (index)
-+++ contrib/libs/tcmalloc/tcmalloc/system-alloc.cc (working tree)
-@@ -354,6 +354,14 @@ ABSL_CONST_INIT std::atomic<int> system_release_errors = ATOMIC_VAR_INIT(0);
-
- } // namespace
-
-+void AcquireSystemAllocLock() {
-+ spinlock.Lock();
-+}
-+
-+void ReleaseSystemAllocLock() {
-+ spinlock.Unlock();
-+}
-+
- void* SystemAlloc(size_t bytes, size_t* actual_bytes, size_t alignment,
- const MemoryTag tag) {
- // If default alignment is set request the minimum alignment provided by
---- contrib/libs/tcmalloc/tcmalloc/system-alloc.h (index)
-+++ contrib/libs/tcmalloc/tcmalloc/system-alloc.h (working tree)
-@@ -50,6 +50,9 @@ void *SystemAlloc(size_t bytes, size_t *actual_bytes, size_t alignment,
- // call to SystemRelease.
- int SystemReleaseErrors();
-
-+void AcquireSystemAllocLock();
-+void ReleaseSystemAllocLock();
-+
- // This call is a hint to the operating system that the pages
- // contained in the specified range of memory will not be used for a
- // while, and can be released for use by other processes or the OS.
---- contrib/libs/tcmalloc/tcmalloc/tcmalloc.cc (index)
-+++ contrib/libs/tcmalloc/tcmalloc/tcmalloc.cc (working tree)
-@@ -1117,6 +1117,40 @@ extern "C" void MallocExtension_Internal_ReleaseMemoryToSystem(
- }
- }
-
-+extern "C" void MallocExtension_EnableForkSupport() {
-+ Static::EnableForkSupport();
-+}
-+
-+void TCMallocPreFork() {
-+ if (!Static::ForkSupportEnabled()) {
-+ return;
-+ }
-+
-+ if (Static::CPUCacheActive()) {
-+ Static::cpu_cache().AcquireInternalLocks();
-+ }
-+ Static::transfer_cache().AcquireInternalLocks();
-+ guarded_page_lock.Lock();
-+ release_lock.Lock();
-+ pageheap_lock.Lock();
-+ AcquireSystemAllocLock();
-+}
-+
-+void TCMallocPostFork() {
-+ if (!Static::ForkSupportEnabled()) {
-+ return;
-+ }
-+
-+ ReleaseSystemAllocLock();
-+ pageheap_lock.Unlock();
-+ guarded_page_lock.Unlock();
-+ release_lock.Unlock();
-+ Static::transfer_cache().ReleaseInternalLocks();
-+ if (Static::CPUCacheActive()) {
-+ Static::cpu_cache().ReleaseInternalLocks();
-+ }
-+}
-+
- // nallocx slow path.
- // Moved to a separate function because size_class_with_alignment is not inlined
- // which would cause nallocx to become non-leaf function with stack frame and
---- contrib/libs/tcmalloc/tcmalloc/tcmalloc.h (index)
-+++ contrib/libs/tcmalloc/tcmalloc/tcmalloc.h (working tree)
-@@ -120,4 +120,7 @@ void TCMallocInternalDeleteArrayNothrow(void* p, const std::nothrow_t&) __THROW
- }
- #endif
-
-+void TCMallocInternalAcquireLocks();
-+void TCMallocInternalReleaseLocks();
-+
- #endif // TCMALLOC_TCMALLOC_H_
---- contrib/libs/tcmalloc/tcmalloc/transfer_cache.h (index)
-+++ contrib/libs/tcmalloc/tcmalloc/transfer_cache.h (working tree)
-@@ -176,6 +176,26 @@ class TransferCacheManager : public StaticForwarder {
- }
- }
-
-+ void AcquireInternalLocks() {
-+ for (int i = 0; i < kNumClasses; ++i) {
-+ if (implementation_ == TransferCacheImplementation::Ring) {
-+ cache_[i].rbtc.AcquireInternalLocks();
-+ } else {
-+ cache_[i].tc.AcquireInternalLocks();
-+ }
-+ }
-+ }
-+
-+ void ReleaseInternalLocks() {
-+ for (int i = 0; i < kNumClasses; ++i) {
-+ if (implementation_ == TransferCacheImplementation::Ring) {
-+ cache_[i].rbtc.ReleaseInternalLocks();
-+ } else {
-+ cache_[i].tc.ReleaseInternalLocks();
-+ }
-+ }
-+ }
-+
- void InsertRange(int size_class, absl::Span<void *> batch) {
- if (implementation_ == TransferCacheImplementation::Ring) {
- cache_[size_class].rbtc.InsertRange(size_class, batch);
-@@ -295,6 +315,9 @@ class TransferCacheManager {
- return TransferCacheImplementation::None;
- }
-
-+ void AcquireInternalLocks() {}
-+ void ReleaseInternalLocks() {}
-+
- private:
- CentralFreeList freelist_[kNumClasses];
- } ABSL_CACHELINE_ALIGNED;
---- contrib/libs/tcmalloc/tcmalloc/transfer_cache_internals.h (index)
-+++ contrib/libs/tcmalloc/tcmalloc/transfer_cache_internals.h (working tree)
-@@ -366,6 +366,18 @@ class TransferCache {
- return freelist_do_not_access_directly_;
- }
-
-+ void AcquireInternalLocks()
-+ {
-+ freelist().AcquireInternalLocks();
-+ lock_.Lock();
-+ }
-+
-+ void ReleaseInternalLocks()
-+ {
-+ lock_.Unlock();
-+ freelist().ReleaseInternalLocks();
-+ }
-+
- private:
- // Returns first object of the i-th slot.
- void **GetSlot(size_t i) ABSL_EXCLUSIVE_LOCKS_REQUIRED(lock_) {
-@@ -468,6 +480,18 @@ class RingBufferTransferCache {
-
- // These methods all do internal locking.
-
-+ void AcquireInternalLocks()
-+ {
-+ freelist().AcquireInternalLocks();
-+ lock_.Lock();
-+ }
-+
-+ void ReleaseInternalLocks()
-+ {
-+ lock_.Unlock();
-+ freelist().ReleaseInternalLocks();
-+ }
-+
- // Insert the specified batch into the transfer cache. N is the number of
- // elements in the range. RemoveRange() is the opposite operation.
- void InsertRange(int size_class, absl::Span<void *> batch)
+--- contrib/libs/tcmalloc/tcmalloc/central_freelist.h (index)
++++ contrib/libs/tcmalloc/tcmalloc/central_freelist.h (working tree)
+@@ -70,6 +70,14 @@ class CentralFreeList {
+
+ SpanStats GetSpanStats() const;
+
++ void AcquireInternalLocks() {
++ lock_.Lock();
++ }
++
++ void ReleaseInternalLocks() {
++ lock_.Unlock();
++ }
++
+ private:
+ // Release an object to spans.
+ // Returns object's span if it become completely free.
+--- contrib/libs/tcmalloc/tcmalloc/cpu_cache.cc (index)
++++ contrib/libs/tcmalloc/tcmalloc/cpu_cache.cc (working tree)
+@@ -1031,6 +1031,20 @@ void CPUCache::PrintInPbtxt(PbtxtRegion *region) const {
+ }
+ }
+
++void CPUCache::AcquireInternalLocks() {
++ for (int cpu = 0, num_cpus = absl::base_internal::NumCPUs(); cpu < num_cpus;
++ ++cpu) {
++ resize_[cpu].lock.Lock();
++ }
++}
++
++void CPUCache::ReleaseInternalLocks() {
++ for (int cpu = 0, num_cpus = absl::base_internal::NumCPUs(); cpu < num_cpus;
++ ++cpu) {
++ resize_[cpu].lock.Unlock();
++ }
++}
++
+ void CPUCache::PerClassResizeInfo::Init() {
+ state_.store(0, std::memory_order_relaxed);
+ }
+--- contrib/libs/tcmalloc/tcmalloc/cpu_cache.h (index)
++++ contrib/libs/tcmalloc/tcmalloc/cpu_cache.h (working tree)
+@@ -164,6 +164,9 @@ class CPUCache {
+ void Print(Printer* out) const;
+ void PrintInPbtxt(PbtxtRegion* region) const;
+
++ void AcquireInternalLocks();
++ void ReleaseInternalLocks();
++
+ private:
+ // Per-size-class freelist resizing info.
+ class PerClassResizeInfo {
+--- contrib/libs/tcmalloc/tcmalloc/internal_malloc_extension.h (index)
++++ contrib/libs/tcmalloc/tcmalloc/internal_malloc_extension.h (working tree)
+@@ -116,6 +116,10 @@ ABSL_ATTRIBUTE_WEAK int64_t
+ MallocExtension_Internal_GetMaxTotalThreadCacheBytes();
+ ABSL_ATTRIBUTE_WEAK void MallocExtension_Internal_SetMaxTotalThreadCacheBytes(
+ int64_t value);
++
++ABSL_ATTRIBUTE_WEAK void
++MallocExtension_EnableForkSupport();
++
+ }
+
+ #endif
+--- contrib/libs/tcmalloc/tcmalloc/malloc_extension.cc (index)
++++ contrib/libs/tcmalloc/tcmalloc/malloc_extension.cc (working tree)
+@@ -460,6 +460,14 @@ void MallocExtension::SetBackgroundReleaseRate(BytesPerSecond rate) {
+ #endif
+ }
+
++void MallocExtension::EnableForkSupport() {
++#if ABSL_INTERNAL_HAVE_WEAK_MALLOCEXTENSION_STUBS
++ if (&MallocExtension_EnableForkSupport != nullptr) {
++ MallocExtension_EnableForkSupport();
++ }
++#endif
++}
++
+ } // namespace tcmalloc
+
+ // Default implementation just returns size. The expectation is that
+--- contrib/libs/tcmalloc/tcmalloc/malloc_extension.h (index)
++++ contrib/libs/tcmalloc/tcmalloc/malloc_extension.h (working tree)
+@@ -468,6 +468,10 @@ class MallocExtension final {
+ // Specifies the release rate from the page heap. ProcessBackgroundActions
+ // must be called for this to be operative.
+ static void SetBackgroundReleaseRate(BytesPerSecond rate);
++
++ // Enables fork support.
++ // Allocator will continue to function correctly in the child, after calling fork().
++ static void EnableForkSupport();
+ };
+
+ } // namespace tcmalloc
+--- contrib/libs/tcmalloc/tcmalloc/static_vars.cc (index)
++++ contrib/libs/tcmalloc/tcmalloc/static_vars.cc (working tree)
+@@ -59,6 +59,7 @@ ABSL_CONST_INIT PageHeapAllocator<StackTraceTable::Bucket>
+ Static::bucket_allocator_;
+ ABSL_CONST_INIT std::atomic<bool> Static::inited_{false};
+ ABSL_CONST_INIT bool Static::cpu_cache_active_ = false;
++ABSL_CONST_INIT bool Static::fork_support_enabled_ = false;
+ ABSL_CONST_INIT Static::PageAllocatorStorage Static::page_allocator_;
+ ABSL_CONST_INIT PageMap Static::pagemap_;
+ ABSL_CONST_INIT absl::base_internal::SpinLock guarded_page_lock(
+@@ -116,6 +117,13 @@ ABSL_ATTRIBUTE_COLD ABSL_ATTRIBUTE_NOINLINE void Static::SlowInitIfNecessary() {
+ pagemap_.MapRootWithSmallPages();
+ guardedpage_allocator_.Init(/*max_alloced_pages=*/64, /*total_pages=*/128);
+ inited_.store(true, std::memory_order_release);
++
++ pageheap_lock.Unlock();
++ pthread_atfork(
++ TCMallocPreFork,
++ TCMallocPostFork,
++ TCMallocPostFork);
++ pageheap_lock.Lock();
+ }
+ }
+
+--- contrib/libs/tcmalloc/tcmalloc/static_vars.h (index)
++++ contrib/libs/tcmalloc/tcmalloc/static_vars.h (working tree)
+@@ -50,6 +50,9 @@ class CPUCache;
+ class PageMap;
+ class ThreadCache;
+
++void TCMallocPreFork();
++void TCMallocPostFork();
++
+ class Static {
+ public:
+ // True if InitIfNecessary() has run to completion.
+@@ -124,6 +127,9 @@ class Static {
+ static void ActivateCPUCache() { cpu_cache_active_ = true; }
+ static void DeactivateCPUCache() { cpu_cache_active_ = false; }
+
++ static bool ForkSupportEnabled() { return fork_support_enabled_; }
++ static void EnableForkSupport() { fork_support_enabled_ = true; }
++
+ static bool ABSL_ATTRIBUTE_ALWAYS_INLINE IsOnFastPath() {
+ return
+ #ifndef TCMALLOC_DEPRECATED_PERTHREAD
+@@ -169,6 +175,7 @@ class Static {
+ static PageHeapAllocator<StackTraceTable::Bucket> bucket_allocator_;
+ ABSL_CONST_INIT static std::atomic<bool> inited_;
+ static bool cpu_cache_active_;
++ static bool fork_support_enabled_;
+ ABSL_CONST_INIT static PeakHeapTracker peak_heap_tracker_;
+ ABSL_CONST_INIT static NumaTopology<kNumaPartitions, kNumBaseClasses>
+ numa_topology_;
+--- contrib/libs/tcmalloc/tcmalloc/system-alloc.cc (index)
++++ contrib/libs/tcmalloc/tcmalloc/system-alloc.cc (working tree)
+@@ -354,6 +354,14 @@ ABSL_CONST_INIT std::atomic<int> system_release_errors = ATOMIC_VAR_INIT(0);
+
+ } // namespace
+
++void AcquireSystemAllocLock() {
++ spinlock.Lock();
++}
++
++void ReleaseSystemAllocLock() {
++ spinlock.Unlock();
++}
++
+ void* SystemAlloc(size_t bytes, size_t* actual_bytes, size_t alignment,
+ const MemoryTag tag) {
+ // If default alignment is set request the minimum alignment provided by
+--- contrib/libs/tcmalloc/tcmalloc/system-alloc.h (index)
++++ contrib/libs/tcmalloc/tcmalloc/system-alloc.h (working tree)
+@@ -50,6 +50,9 @@ void *SystemAlloc(size_t bytes, size_t *actual_bytes, size_t alignment,
+ // call to SystemRelease.
+ int SystemReleaseErrors();
+
++void AcquireSystemAllocLock();
++void ReleaseSystemAllocLock();
++
+ // This call is a hint to the operating system that the pages
+ // contained in the specified range of memory will not be used for a
+ // while, and can be released for use by other processes or the OS.
+--- contrib/libs/tcmalloc/tcmalloc/tcmalloc.cc (index)
++++ contrib/libs/tcmalloc/tcmalloc/tcmalloc.cc (working tree)
+@@ -1117,6 +1117,40 @@ extern "C" void MallocExtension_Internal_ReleaseMemoryToSystem(
+ }
+ }
+
++extern "C" void MallocExtension_EnableForkSupport() {
++ Static::EnableForkSupport();
++}
++
++void TCMallocPreFork() {
++ if (!Static::ForkSupportEnabled()) {
++ return;
++ }
++
++ if (Static::CPUCacheActive()) {
++ Static::cpu_cache().AcquireInternalLocks();
++ }
++ Static::transfer_cache().AcquireInternalLocks();
++ guarded_page_lock.Lock();
++ release_lock.Lock();
++ pageheap_lock.Lock();
++ AcquireSystemAllocLock();
++}
++
++void TCMallocPostFork() {
++ if (!Static::ForkSupportEnabled()) {
++ return;
++ }
++
++ ReleaseSystemAllocLock();
++ pageheap_lock.Unlock();
++ guarded_page_lock.Unlock();
++ release_lock.Unlock();
++ Static::transfer_cache().ReleaseInternalLocks();
++ if (Static::CPUCacheActive()) {
++ Static::cpu_cache().ReleaseInternalLocks();
++ }
++}
++
+ // nallocx slow path.
+ // Moved to a separate function because size_class_with_alignment is not inlined
+ // which would cause nallocx to become non-leaf function with stack frame and
+--- contrib/libs/tcmalloc/tcmalloc/tcmalloc.h (index)
++++ contrib/libs/tcmalloc/tcmalloc/tcmalloc.h (working tree)
+@@ -120,4 +120,7 @@ void TCMallocInternalDeleteArrayNothrow(void* p, const std::nothrow_t&) __THROW
+ }
+ #endif
+
++void TCMallocInternalAcquireLocks();
++void TCMallocInternalReleaseLocks();
++
+ #endif // TCMALLOC_TCMALLOC_H_
+--- contrib/libs/tcmalloc/tcmalloc/transfer_cache.h (index)
++++ contrib/libs/tcmalloc/tcmalloc/transfer_cache.h (working tree)
+@@ -176,6 +176,26 @@ class TransferCacheManager : public StaticForwarder {
+ }
+ }
+
++ void AcquireInternalLocks() {
++ for (int i = 0; i < kNumClasses; ++i) {
++ if (implementation_ == TransferCacheImplementation::Ring) {
++ cache_[i].rbtc.AcquireInternalLocks();
++ } else {
++ cache_[i].tc.AcquireInternalLocks();
++ }
++ }
++ }
++
++ void ReleaseInternalLocks() {
++ for (int i = 0; i < kNumClasses; ++i) {
++ if (implementation_ == TransferCacheImplementation::Ring) {
++ cache_[i].rbtc.ReleaseInternalLocks();
++ } else {
++ cache_[i].tc.ReleaseInternalLocks();
++ }
++ }
++ }
++
+ void InsertRange(int size_class, absl::Span<void *> batch) {
+ if (implementation_ == TransferCacheImplementation::Ring) {
+ cache_[size_class].rbtc.InsertRange(size_class, batch);
+@@ -295,6 +315,9 @@ class TransferCacheManager {
+ return TransferCacheImplementation::None;
+ }
+
++ void AcquireInternalLocks() {}
++ void ReleaseInternalLocks() {}
++
+ private:
+ CentralFreeList freelist_[kNumClasses];
+ } ABSL_CACHELINE_ALIGNED;
+--- contrib/libs/tcmalloc/tcmalloc/transfer_cache_internals.h (index)
++++ contrib/libs/tcmalloc/tcmalloc/transfer_cache_internals.h (working tree)
+@@ -366,6 +366,18 @@ class TransferCache {
+ return freelist_do_not_access_directly_;
+ }
+
++ void AcquireInternalLocks()
++ {
++ freelist().AcquireInternalLocks();
++ lock_.Lock();
++ }
++
++ void ReleaseInternalLocks()
++ {
++ lock_.Unlock();
++ freelist().ReleaseInternalLocks();
++ }
++
+ private:
+ // Returns first object of the i-th slot.
+ void **GetSlot(size_t i) ABSL_EXCLUSIVE_LOCKS_REQUIRED(lock_) {
+@@ -468,6 +480,18 @@ class RingBufferTransferCache {
+
+ // These methods all do internal locking.
+
++ void AcquireInternalLocks()
++ {
++ freelist().AcquireInternalLocks();
++ lock_.Lock();
++ }
++
++ void ReleaseInternalLocks()
++ {
++ lock_.Unlock();
++ freelist().ReleaseInternalLocks();
++ }
++
+ // Insert the specified batch into the transfer cache. N is the number of
+ // elements in the range. RemoveRange() is the opposite operation.
+ void InsertRange(int size_class, absl::Span<void *> batch)
diff --git a/contrib/libs/tcmalloc/patches/yandex.patch b/contrib/libs/tcmalloc/patches/yandex.patch
index 12d11f2dad..98eaf2f4d8 100644
--- a/contrib/libs/tcmalloc/patches/yandex.patch
+++ b/contrib/libs/tcmalloc/patches/yandex.patch
@@ -1,91 +1,91 @@
-commit ab4069ebdd376db4d32c29e1a2414565ec849249
-author: prime
-date: 2021-10-07T14:52:42+03:00
-
- Apply yandex patches
-
---- contrib/libs/tcmalloc/tcmalloc/cpu_cache.cc (5096009d22199137186c9a972bc88409d8ebd513)
-+++ contrib/libs/tcmalloc/tcmalloc/cpu_cache.cc (ab4069ebdd376db4d32c29e1a2414565ec849249)
-@@ -1112,6 +1112,11 @@ extern "C" bool MallocExtension_Internal_GetPerCpuCachesActive() {
- return tcmalloc::tcmalloc_internal::Static::CPUCacheActive();
- }
+commit ab4069ebdd376db4d32c29e1a2414565ec849249
+author: prime
+date: 2021-10-07T14:52:42+03:00
-+extern "C" void MallocExtension_Internal_DeactivatePerCpuCaches() {
-+ tcmalloc::tcmalloc_internal::Parameters::set_per_cpu_caches(false);
-+ tcmalloc::tcmalloc_internal::Static::DeactivateCPUCache();
-+}
-+
- extern "C" int32_t MallocExtension_Internal_GetMaxPerCpuCacheSize() {
- return tcmalloc::tcmalloc_internal::Parameters::max_per_cpu_cache_size();
- }
---- contrib/libs/tcmalloc/tcmalloc/internal_malloc_extension.h (5096009d22199137186c9a972bc88409d8ebd513)
-+++ contrib/libs/tcmalloc/tcmalloc/internal_malloc_extension.h (ab4069ebdd376db4d32c29e1a2414565ec849249)
-@@ -75,6 +75,7 @@ ABSL_ATTRIBUTE_WEAK void MallocExtension_Internal_GetMemoryLimit(
- ABSL_ATTRIBUTE_WEAK bool MallocExtension_Internal_GetNumericProperty(
- const char* name_data, size_t name_size, size_t* value);
- ABSL_ATTRIBUTE_WEAK bool MallocExtension_Internal_GetPerCpuCachesActive();
-+ABSL_ATTRIBUTE_WEAK void MallocExtension_Internal_DeactivatePerCpuCaches();
- ABSL_ATTRIBUTE_WEAK int32_t MallocExtension_Internal_GetMaxPerCpuCacheSize();
- ABSL_ATTRIBUTE_WEAK void MallocExtension_Internal_GetSkipSubreleaseInterval(
- absl::Duration* ret);
---- contrib/libs/tcmalloc/tcmalloc/malloc_extension.cc (5096009d22199137186c9a972bc88409d8ebd513)
-+++ contrib/libs/tcmalloc/tcmalloc/malloc_extension.cc (ab4069ebdd376db4d32c29e1a2414565ec849249)
-@@ -287,6 +287,16 @@ bool MallocExtension::PerCpuCachesActive() {
- #endif
- }
-
-+void MallocExtension::DeactivatePerCpuCaches() {
-+#if ABSL_INTERNAL_HAVE_WEAK_MALLOCEXTENSION_STUBS
-+ if (MallocExtension_Internal_DeactivatePerCpuCaches == nullptr) {
-+ return;
-+ }
-+
-+ MallocExtension_Internal_DeactivatePerCpuCaches();
-+#endif
-+}
-+
- int32_t MallocExtension::GetMaxPerCpuCacheSize() {
- #if ABSL_INTERNAL_HAVE_WEAK_MALLOCEXTENSION_STUBS
- if (MallocExtension_Internal_GetMaxPerCpuCacheSize == nullptr) {
---- contrib/libs/tcmalloc/tcmalloc/malloc_extension.h (5096009d22199137186c9a972bc88409d8ebd513)
-+++ contrib/libs/tcmalloc/tcmalloc/malloc_extension.h (ab4069ebdd376db4d32c29e1a2414565ec849249)
-@@ -329,6 +329,11 @@ class MallocExtension final {
- // Gets whether TCMalloc is using per-CPU caches.
- static bool PerCpuCachesActive();
-
-+ // Extension for unified agent.
-+ //
-+ // Should be removed in the future https://st.yandex-team.ru/UNIFIEDAGENT-321
-+ static void DeactivatePerCpuCaches();
-+
- // Gets the current maximum cache size per CPU cache.
- static int32_t GetMaxPerCpuCacheSize();
- // Sets the maximum cache size per CPU cache. This is a per-core limit.
---- contrib/libs/tcmalloc/tcmalloc/static_vars.h (5096009d22199137186c9a972bc88409d8ebd513)
-+++ contrib/libs/tcmalloc/tcmalloc/static_vars.h (ab4069ebdd376db4d32c29e1a2414565ec849249)
-@@ -122,6 +122,7 @@ class Static {
- return cpu_cache_active_;
- }
- static void ActivateCPUCache() { cpu_cache_active_ = true; }
-+ static void DeactivateCPUCache() { cpu_cache_active_ = false; }
-
- static bool ABSL_ATTRIBUTE_ALWAYS_INLINE IsOnFastPath() {
- return
---- contrib/libs/tcmalloc/tcmalloc/tcmalloc.cc (5096009d22199137186c9a972bc88409d8ebd513)
-+++ contrib/libs/tcmalloc/tcmalloc/tcmalloc.cc (ab4069ebdd376db4d32c29e1a2414565ec849249)
-@@ -2210,14 +2210,7 @@ extern "C" void* TCMallocInternalNewArray(size_t size)
- TCMALLOC_ALIAS(TCMallocInternalNew);
- #else
- {
-- void* p = fast_alloc(CppPolicy().WithoutHooks(), size);
-- // We keep this next instruction out of fast_alloc for a reason: when
-- // it's in, and new just calls fast_alloc, the optimizer may fold the
-- // new call into fast_alloc, which messes up our whole section-based
-- // stacktracing (see ABSL_ATTRIBUTE_SECTION, above). This ensures fast_alloc
-- // isn't the last thing this fn calls, and prevents the folding.
-- MallocHook::InvokeNewHook(p, size);
-- return p;
-+ return fast_alloc(CppPolicy().WithoutHooks(), size);
- }
- #endif // TCMALLOC_ALIAS
+ Apply yandex patches
+--- contrib/libs/tcmalloc/tcmalloc/cpu_cache.cc (5096009d22199137186c9a972bc88409d8ebd513)
++++ contrib/libs/tcmalloc/tcmalloc/cpu_cache.cc (ab4069ebdd376db4d32c29e1a2414565ec849249)
+@@ -1112,6 +1112,11 @@ extern "C" bool MallocExtension_Internal_GetPerCpuCachesActive() {
+ return tcmalloc::tcmalloc_internal::Static::CPUCacheActive();
+ }
+
++extern "C" void MallocExtension_Internal_DeactivatePerCpuCaches() {
++ tcmalloc::tcmalloc_internal::Parameters::set_per_cpu_caches(false);
++ tcmalloc::tcmalloc_internal::Static::DeactivateCPUCache();
++}
++
+ extern "C" int32_t MallocExtension_Internal_GetMaxPerCpuCacheSize() {
+ return tcmalloc::tcmalloc_internal::Parameters::max_per_cpu_cache_size();
+ }
+--- contrib/libs/tcmalloc/tcmalloc/internal_malloc_extension.h (5096009d22199137186c9a972bc88409d8ebd513)
++++ contrib/libs/tcmalloc/tcmalloc/internal_malloc_extension.h (ab4069ebdd376db4d32c29e1a2414565ec849249)
+@@ -75,6 +75,7 @@ ABSL_ATTRIBUTE_WEAK void MallocExtension_Internal_GetMemoryLimit(
+ ABSL_ATTRIBUTE_WEAK bool MallocExtension_Internal_GetNumericProperty(
+ const char* name_data, size_t name_size, size_t* value);
+ ABSL_ATTRIBUTE_WEAK bool MallocExtension_Internal_GetPerCpuCachesActive();
++ABSL_ATTRIBUTE_WEAK void MallocExtension_Internal_DeactivatePerCpuCaches();
+ ABSL_ATTRIBUTE_WEAK int32_t MallocExtension_Internal_GetMaxPerCpuCacheSize();
+ ABSL_ATTRIBUTE_WEAK void MallocExtension_Internal_GetSkipSubreleaseInterval(
+ absl::Duration* ret);
+--- contrib/libs/tcmalloc/tcmalloc/malloc_extension.cc (5096009d22199137186c9a972bc88409d8ebd513)
++++ contrib/libs/tcmalloc/tcmalloc/malloc_extension.cc (ab4069ebdd376db4d32c29e1a2414565ec849249)
+@@ -287,6 +287,16 @@ bool MallocExtension::PerCpuCachesActive() {
+ #endif
+ }
+
++void MallocExtension::DeactivatePerCpuCaches() {
++#if ABSL_INTERNAL_HAVE_WEAK_MALLOCEXTENSION_STUBS
++ if (MallocExtension_Internal_DeactivatePerCpuCaches == nullptr) {
++ return;
++ }
++
++ MallocExtension_Internal_DeactivatePerCpuCaches();
++#endif
++}
++
+ int32_t MallocExtension::GetMaxPerCpuCacheSize() {
+ #if ABSL_INTERNAL_HAVE_WEAK_MALLOCEXTENSION_STUBS
+ if (MallocExtension_Internal_GetMaxPerCpuCacheSize == nullptr) {
+--- contrib/libs/tcmalloc/tcmalloc/malloc_extension.h (5096009d22199137186c9a972bc88409d8ebd513)
++++ contrib/libs/tcmalloc/tcmalloc/malloc_extension.h (ab4069ebdd376db4d32c29e1a2414565ec849249)
+@@ -329,6 +329,11 @@ class MallocExtension final {
+ // Gets whether TCMalloc is using per-CPU caches.
+ static bool PerCpuCachesActive();
+
++ // Extension for unified agent.
++ //
++ // Should be removed in the future https://st.yandex-team.ru/UNIFIEDAGENT-321
++ static void DeactivatePerCpuCaches();
++
+ // Gets the current maximum cache size per CPU cache.
+ static int32_t GetMaxPerCpuCacheSize();
+ // Sets the maximum cache size per CPU cache. This is a per-core limit.
+--- contrib/libs/tcmalloc/tcmalloc/static_vars.h (5096009d22199137186c9a972bc88409d8ebd513)
++++ contrib/libs/tcmalloc/tcmalloc/static_vars.h (ab4069ebdd376db4d32c29e1a2414565ec849249)
+@@ -122,6 +122,7 @@ class Static {
+ return cpu_cache_active_;
+ }
+ static void ActivateCPUCache() { cpu_cache_active_ = true; }
++ static void DeactivateCPUCache() { cpu_cache_active_ = false; }
+
+ static bool ABSL_ATTRIBUTE_ALWAYS_INLINE IsOnFastPath() {
+ return
+--- contrib/libs/tcmalloc/tcmalloc/tcmalloc.cc (5096009d22199137186c9a972bc88409d8ebd513)
++++ contrib/libs/tcmalloc/tcmalloc/tcmalloc.cc (ab4069ebdd376db4d32c29e1a2414565ec849249)
+@@ -2210,14 +2210,7 @@ extern "C" void* TCMallocInternalNewArray(size_t size)
+ TCMALLOC_ALIAS(TCMallocInternalNew);
+ #else
+ {
+- void* p = fast_alloc(CppPolicy().WithoutHooks(), size);
+- // We keep this next instruction out of fast_alloc for a reason: when
+- // it's in, and new just calls fast_alloc, the optimizer may fold the
+- // new call into fast_alloc, which messes up our whole section-based
+- // stacktracing (see ABSL_ATTRIBUTE_SECTION, above). This ensures fast_alloc
+- // isn't the last thing this fn calls, and prevents the folding.
+- MallocHook::InvokeNewHook(p, size);
+- return p;
++ return fast_alloc(CppPolicy().WithoutHooks(), size);
+ }
+ #endif // TCMALLOC_ALIAS
+
diff --git a/contrib/libs/tcmalloc/slow_but_small/ya.make b/contrib/libs/tcmalloc/slow_but_small/ya.make
index ddcb157d30..0509efd720 100644
--- a/contrib/libs/tcmalloc/slow_but_small/ya.make
+++ b/contrib/libs/tcmalloc/slow_but_small/ya.make
@@ -1,21 +1,21 @@
-LIBRARY()
-
+LIBRARY()
+
WITHOUT_LICENSE_TEXTS()
-LICENSE(Apache-2.0)
-
-OWNER(
- ayles
- prime
+LICENSE(Apache-2.0)
+
+OWNER(
+ ayles
+ prime
g:cpp-contrib
-)
-
-SRCDIR(contrib/libs/tcmalloc)
-
-INCLUDE(../common.inc)
-
+)
+
+SRCDIR(contrib/libs/tcmalloc)
+
+INCLUDE(../common.inc)
+
CFLAGS(
-DTCMALLOC_SMALL_BUT_SLOW
)
-
-END()
+
+END()
diff --git a/contrib/libs/tcmalloc/tcmalloc/BUILD b/contrib/libs/tcmalloc/tcmalloc/BUILD
index e618b85eec..65b85ad2cf 100644
--- a/contrib/libs/tcmalloc/tcmalloc/BUILD
+++ b/contrib/libs/tcmalloc/tcmalloc/BUILD
@@ -1,1316 +1,1316 @@
-# Copyright 2019 The TCMalloc Authors
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# https://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# Description:
-#
-# tcmalloc is a fast malloc implementation. See
-# https://github.com/google/tcmalloc/tree/master/docs/design.md for a high-level description of
-# how this malloc works.
-
-load("@rules_fuzzing//fuzzing:cc_defs.bzl", "cc_fuzz_test")
-load("//tcmalloc:copts.bzl", "TCMALLOC_DEFAULT_COPTS")
-load("//tcmalloc:variants.bzl", "create_tcmalloc_benchmark", "create_tcmalloc_testsuite")
-
-package(default_visibility = ["//visibility:private"])
-
-licenses(["notice"])
-
-exports_files(["LICENSE"])
-
-config_setting(
- name = "llvm",
- flag_values = {
- "@bazel_tools//tools/cpp:compiler": "clang",
- },
- visibility = [
- "//tcmalloc/internal:__subpackages__",
- "//tcmalloc/testing:__subpackages__",
- ],
-)
-
-cc_library(
- name = "experiment",
- srcs = ["experiment.cc"],
- hdrs = [
- "experiment.h",
- "experiment_config.h",
- ],
- copts = TCMALLOC_DEFAULT_COPTS,
- deps = [
- ":malloc_extension",
- "//tcmalloc/internal:environment",
- "//tcmalloc/internal:logging",
- "@com_google_absl//absl/base:core_headers",
- "@com_google_absl//absl/strings",
- "@com_google_absl//absl/types:optional",
- ],
-)
-
-# Dependencies required by :tcmalloc and its variants. Since :common is built
-# several different ways, it should not be included on this list.
-tcmalloc_deps = [
- ":experiment",
- ":malloc_extension",
- "@com_google_absl//absl/base",
- "@com_google_absl//absl/base:config",
- "@com_google_absl//absl/base:core_headers",
- "@com_google_absl//absl/base:dynamic_annotations",
- "@com_google_absl//absl/debugging:leak_check",
- "@com_google_absl//absl/debugging:stacktrace",
- "@com_google_absl//absl/debugging:symbolize",
- "@com_google_absl//absl/memory",
- "@com_google_absl//absl/strings",
- "@com_google_absl//absl/numeric:bits",
- "//tcmalloc/internal:config",
- "//tcmalloc/internal:declarations",
- "//tcmalloc/internal:linked_list",
- "//tcmalloc/internal:logging",
- "//tcmalloc/internal:memory_stats",
- "//tcmalloc/internal:optimization",
- "//tcmalloc/internal:percpu",
-]
-
-# This library provides tcmalloc always
-cc_library(
- name = "tcmalloc",
- srcs = [
- "libc_override.h",
- "libc_override_gcc_and_weak.h",
- "libc_override_glibc.h",
- "libc_override_redefine.h",
- "tcmalloc.cc",
- "tcmalloc.h",
- ],
- copts = TCMALLOC_DEFAULT_COPTS,
- linkstatic = 1,
- visibility = ["//visibility:public"],
- deps = tcmalloc_deps + [
- ":common",
- ],
- alwayslink = 1,
-)
-
-# Provides tcmalloc always; use per-thread mode.
-cc_library(
- name = "tcmalloc_deprecated_perthread",
- srcs = [
- "libc_override.h",
- "libc_override_gcc_and_weak.h",
- "libc_override_glibc.h",
- "libc_override_redefine.h",
- "tcmalloc.cc",
- "tcmalloc.h",
- ],
- copts = ["-DTCMALLOC_DEPRECATED_PERTHREAD"] + TCMALLOC_DEFAULT_COPTS,
- linkstatic = 1,
- visibility = [
- "//tcmalloc/internal:__pkg__",
- "//tcmalloc/testing:__pkg__",
- ],
- deps = tcmalloc_deps + [
- ":common_deprecated_perthread",
- ],
- alwayslink = 1,
-)
-
-# An opt tcmalloc build with ASSERTs forced on (by turning off
-# NDEBUG). Useful for tracking down crashes in production binaries.
-# To use add malloc = "//tcmalloc:opt_with_assertions" in your
-# target's build rule.
-cc_library(
- name = "opt_with_assertions",
- srcs = [
- "libc_override.h",
- "libc_override_gcc_and_weak.h",
- "libc_override_glibc.h",
- "libc_override_redefine.h",
- "tcmalloc.cc",
- "tcmalloc.h",
- ],
- copts = [
- "-O2",
- "-UNDEBUG",
- ] + TCMALLOC_DEFAULT_COPTS,
- linkstatic = 1,
- visibility = ["//visibility:public"],
- deps = tcmalloc_deps + [
- ":common",
- ],
- alwayslink = 1,
-)
-
-cc_library(
- name = "size_class_info",
- hdrs = ["size_class_info.h"],
- copts = TCMALLOC_DEFAULT_COPTS,
- deps = [
- "//tcmalloc/internal:logging",
- ],
-)
-
-# List of common source files used by the various tcmalloc libraries.
-common_srcs = [
- "arena.cc",
- "arena.h",
- "background.cc",
- "central_freelist.cc",
- "central_freelist.h",
- "common.cc",
- "common.h",
- "cpu_cache.cc",
- "cpu_cache.h",
- "experimental_pow2_below64_size_class.cc",
- "experimental_pow2_size_class.cc",
- "legacy_size_classes.cc",
- "guarded_page_allocator.h",
- "guarded_page_allocator.cc",
- "huge_address_map.cc",
- "huge_allocator.cc",
- "huge_allocator.h",
- "huge_cache.cc",
- "huge_cache.h",
- "huge_region.h",
- "huge_page_aware_allocator.cc",
- "huge_page_aware_allocator.h",
- "huge_page_filler.h",
- "huge_pages.h",
- "page_allocator.cc",
- "page_allocator.h",
- "page_allocator_interface.cc",
- "page_allocator_interface.h",
- "page_heap.cc",
- "page_heap.h",
- "page_heap_allocator.h",
- "pagemap.cc",
- "pagemap.h",
- "parameters.cc",
- "peak_heap_tracker.cc",
- "sampler.cc",
- "sampler.h",
- "size_classes.cc",
- "span.cc",
- "span.h",
- "span_stats.h",
- "stack_trace_table.cc",
- "stack_trace_table.h",
- "static_vars.cc",
- "static_vars.h",
- "stats.cc",
- "system-alloc.cc",
- "system-alloc.h",
- "thread_cache.cc",
- "thread_cache.h",
- "tracking.h",
- "transfer_cache_stats.h",
- "transfer_cache.cc",
- "transfer_cache.h",
- "transfer_cache_internals.h",
-]
-
-common_hdrs = [
- "arena.h",
- "central_freelist.h",
- "common.h",
- "cpu_cache.h",
- "guarded_page_allocator.h",
- "huge_address_map.h",
- "huge_allocator.h",
- "tcmalloc_policy.h",
- "huge_cache.h",
- "huge_page_filler.h",
- "huge_pages.h",
- "huge_region.h",
- "huge_page_aware_allocator.h",
- "page_allocator.h",
- "page_allocator_interface.h",
- "page_heap.h",
- "page_heap_allocator.h",
- "pages.h",
- "pagemap.h",
- "parameters.h",
- "peak_heap_tracker.h",
- "sampler.h",
- "span.h",
- "span_stats.h",
- "stack_trace_table.h",
- "stats.h",
- "static_vars.h",
- "system-alloc.h",
- "thread_cache.h",
- "tracking.h",
- "transfer_cache_stats.h",
- "transfer_cache.h",
- "transfer_cache_internals.h",
-]
-
-common_deps = [
- ":experiment",
- ":malloc_extension",
- ":noruntime_size_classes",
- ":size_class_info",
- "@com_google_absl//absl/algorithm:container",
- "@com_google_absl//absl/base",
- "@com_google_absl//absl/base:config",
- "@com_google_absl//absl/base:core_headers",
- "@com_google_absl//absl/base:dynamic_annotations",
- "@com_google_absl//absl/container:fixed_array",
- "@com_google_absl//absl/debugging:debugging_internal",
- "@com_google_absl//absl/debugging:stacktrace",
- "@com_google_absl//absl/debugging:symbolize",
- "@com_google_absl//absl/synchronization",
- "@com_google_absl//absl/hash:hash",
- "@com_google_absl//absl/memory",
- "@com_google_absl//absl/strings",
- "@com_google_absl//absl/strings:str_format",
- "@com_google_absl//absl/time",
- "@com_google_absl//absl/types:optional",
- "@com_google_absl//absl/types:span",
- "//tcmalloc/internal:atomic_stats_counter",
- "@com_google_absl//absl/numeric:bits",
- "//tcmalloc/internal:config",
- "//tcmalloc/internal:declarations",
- "//tcmalloc/internal:environment",
- "//tcmalloc/internal:linked_list",
- "//tcmalloc/internal:logging",
- "//tcmalloc/internal:mincore",
- "//tcmalloc/internal:numa",
- "//tcmalloc/internal:cache_topology",
- "//tcmalloc/internal:optimization",
- "//tcmalloc/internal:parameter_accessors",
- "//tcmalloc/internal:percpu",
- "//tcmalloc/internal:percpu_tcmalloc",
- "//tcmalloc/internal:range_tracker",
- "//tcmalloc/internal:timeseries_tracker",
- "//tcmalloc/internal:util",
-]
-
-cc_library(
- name = "common",
- srcs = common_srcs,
- hdrs = common_hdrs,
- copts = TCMALLOC_DEFAULT_COPTS,
- linkstatic = 1,
- visibility = ["//tcmalloc:tcmalloc_tests"],
- deps = common_deps,
- alwayslink = 1,
-)
-
-cc_library(
- name = "common_deprecated_perthread",
- srcs = common_srcs,
- hdrs = common_hdrs,
- copts = ["-DTCMALLOC_DEPRECATED_PERTHREAD"] + TCMALLOC_DEFAULT_COPTS,
- linkstatic = 1,
- deps = common_deps,
- alwayslink = 1,
-)
-
-# TEMPORARY. WILL BE REMOVED.
-# Add a dep to this if you want your binary to use hugepage-aware
-# allocator.
-cc_library(
- name = "want_hpaa",
- srcs = ["want_hpaa.cc"],
- copts = ["-g0"] + TCMALLOC_DEFAULT_COPTS,
- visibility = ["//visibility:public"],
- deps = [
- "//tcmalloc/internal:config",
- "@com_google_absl//absl/base:core_headers",
- ],
- alwayslink = 1,
-)
-
-# TEMPORARY. WILL BE REMOVED.
-# Add a dep to this if you want your binary to use hugepage-aware
-# allocator with hpaa_subrelease=true.
-cc_library(
- name = "want_hpaa_subrelease",
- srcs = ["want_hpaa_subrelease.cc"],
- copts = ["-g0"] + TCMALLOC_DEFAULT_COPTS,
- visibility = ["//visibility:public"],
- deps = [
- "//tcmalloc/internal:config",
- "@com_google_absl//absl/base:core_headers",
- ],
- alwayslink = 1,
-)
-
-# TEMPORARY. WILL BE REMOVED.
-# Add a dep to this if you want your binary to not use hugepage-aware
-# allocator.
-cc_library(
- name = "want_no_hpaa",
- srcs = ["want_no_hpaa.cc"],
- copts = ["-g0"] + TCMALLOC_DEFAULT_COPTS,
- visibility = ["//tcmalloc/testing:__pkg__"],
- deps = [
- "//tcmalloc/internal:config",
- "@com_google_absl//absl/base:core_headers",
- ],
- alwayslink = 1,
-)
-
-# TEMPORARY. WILL BE REMOVED.
-# Add a dep to this if you want your binary to use old span sizes.
-cc_library(
- name = "want_legacy_spans",
- srcs = ["want_legacy_spans.cc"],
- copts = ["-g0"] + TCMALLOC_DEFAULT_COPTS,
- visibility = ["//tcmalloc/testing:__pkg__"],
- deps = [
- "//tcmalloc/internal:config",
- "@com_google_absl//absl/base:core_headers",
- ],
- alwayslink = 1,
-)
-
-# Add a dep to this if you want your binary to enable NUMA awareness by
-# default.
-cc_library(
- name = "want_numa_aware",
- srcs = ["want_numa_aware.cc"],
- copts = ["-g0"] + TCMALLOC_DEFAULT_COPTS,
- visibility = [
- "//tcmalloc:__pkg__",
- "//tcmalloc/internal:__pkg__",
- "//tcmalloc/testing:__pkg__",
- ],
- deps = [
- "//tcmalloc/internal:config",
- "@com_google_absl//absl/base:core_headers",
- ],
- alwayslink = 1,
-)
-
-cc_library(
- name = "runtime_size_classes",
- srcs = ["runtime_size_classes.cc"],
- hdrs = ["runtime_size_classes.h"],
- copts = TCMALLOC_DEFAULT_COPTS,
- visibility = ["//visibility:private"],
- deps = [
- ":size_class_info",
- "//tcmalloc/internal:environment",
- "//tcmalloc/internal:logging",
- "@com_google_absl//absl/base:core_headers",
- "@com_google_absl//absl/strings",
- ],
- alwayslink = 1,
-)
-
-cc_library(
- name = "noruntime_size_classes",
- srcs = ["noruntime_size_classes.cc"],
- hdrs = ["runtime_size_classes.h"],
- copts = TCMALLOC_DEFAULT_COPTS,
- deps = [
- ":size_class_info",
- "@com_google_absl//absl/base:core_headers",
- "@com_google_absl//absl/strings",
- ],
- alwayslink = 1,
-)
-
-# TCMalloc with large pages is usually faster but fragmentation is higher. See
-# https://github.com/google/tcmalloc/tree/master/docs/tuning.md for more details.
-cc_library(
- name = "tcmalloc_large_pages",
- srcs = [
- "libc_override.h",
- "libc_override_gcc_and_weak.h",
- "libc_override_glibc.h",
- "libc_override_redefine.h",
- "tcmalloc.cc",
- "tcmalloc.h",
- ],
- copts = ["-DTCMALLOC_LARGE_PAGES"] + TCMALLOC_DEFAULT_COPTS,
- linkstatic = 1,
- visibility = ["//visibility:public"],
- deps = tcmalloc_deps + [
- ":common_large_pages",
- ],
- alwayslink = 1,
-)
-
-cc_library(
- name = "common_large_pages",
- srcs = common_srcs,
- hdrs = common_hdrs,
- copts = ["-DTCMALLOC_LARGE_PAGES"] + TCMALLOC_DEFAULT_COPTS,
- linkstatic = 1,
- visibility = ["//tcmalloc:tcmalloc_tests"],
- deps = common_deps,
- alwayslink = 1,
-)
-
-# TCMalloc with 256k pages is usually faster but fragmentation is higher. See
-# https://github.com/google/tcmalloc/tree/master/docs/tuning.md for more details.
-cc_library(
- name = "tcmalloc_256k_pages",
- srcs = [
- "libc_override.h",
- "libc_override_gcc_and_weak.h",
- "libc_override_glibc.h",
- "libc_override_redefine.h",
- "tcmalloc.cc",
- "tcmalloc.h",
- ],
- copts = ["-DTCMALLOC_256K_PAGES"] + TCMALLOC_DEFAULT_COPTS,
- linkstatic = 1,
- visibility = ["//visibility:public"],
- deps = tcmalloc_deps + [
- ":common_256k_pages",
- ],
- alwayslink = 1,
-)
-
-cc_library(
- name = "common_256k_pages",
- srcs = common_srcs,
- hdrs = common_hdrs,
- copts = ["-DTCMALLOC_256K_PAGES"] + TCMALLOC_DEFAULT_COPTS,
- linkstatic = 1,
- visibility = ["//tcmalloc:tcmalloc_tests"],
- deps = common_deps,
- alwayslink = 1,
-)
-
-cc_library(
- name = "tcmalloc_256k_pages_and_numa",
- srcs = [
- "libc_override.h",
- "libc_override_gcc_and_weak.h",
- "libc_override_glibc.h",
- "libc_override_redefine.h",
- "tcmalloc.cc",
- "tcmalloc.h",
- ],
- copts = [
- "-DTCMALLOC_256K_PAGES",
- "-DTCMALLOC_NUMA_AWARE",
- ] + TCMALLOC_DEFAULT_COPTS,
- linkstatic = 1,
- visibility = ["//tcmalloc/testing:__pkg__"],
- deps = tcmalloc_deps + [
- ":common_256k_pages_and_numa",
- ],
- alwayslink = 1,
-)
-
-cc_library(
- name = "common_256k_pages_and_numa",
- srcs = common_srcs,
- hdrs = common_hdrs,
- copts = [
- "-DTCMALLOC_256K_PAGES",
- "-DTCMALLOC_NUMA_AWARE",
- ] + TCMALLOC_DEFAULT_COPTS,
- linkstatic = 1,
- visibility = ["//tcmalloc:tcmalloc_tests"],
- deps = common_deps,
- alwayslink = 1,
-)
-
-# TCMalloc small-but-slow is a a version of TCMalloc that chooses to minimize
-# fragmentation at a *severe* cost to performance. It should be used by
-# applications that have significant memory constraints, but don't need to
-# frequently allocate/free objects.
-#
-# See https://github.com/google/tcmalloc/tree/master/docs/tuning.md for more details.
-cc_library(
- name = "tcmalloc_small_but_slow",
- srcs = [
- "libc_override.h",
- "libc_override_gcc_and_weak.h",
- "libc_override_glibc.h",
- "libc_override_redefine.h",
- "tcmalloc.cc",
- "tcmalloc.h",
- ],
- copts = ["-DTCMALLOC_SMALL_BUT_SLOW"] + TCMALLOC_DEFAULT_COPTS,
- linkstatic = 1,
- visibility = ["//visibility:public"],
- deps = tcmalloc_deps + [
- ":common_small_but_slow",
- ],
- alwayslink = 1,
-)
-
-cc_library(
- name = "common_small_but_slow",
- srcs = common_srcs,
- hdrs = common_hdrs,
- copts = ["-DTCMALLOC_SMALL_BUT_SLOW"] + TCMALLOC_DEFAULT_COPTS,
- linkstatic = 1,
- visibility = ["//tcmalloc:tcmalloc_tests"],
- deps = common_deps,
- alwayslink = 1,
-)
-
-# TCMalloc with NUMA awareness compiled in. Note that by default NUMA awareness
-# will still be disabled at runtime - this default can be changed by adding a
-# dependency upon want_numa_aware, or overridden by setting the
-# TCMALLOC_NUMA_AWARE environment variable.
-cc_library(
- name = "tcmalloc_numa_aware",
- srcs = [
- "libc_override.h",
- "libc_override_gcc_and_weak.h",
- "libc_override_glibc.h",
- "libc_override_redefine.h",
- "tcmalloc.cc",
- "tcmalloc.h",
- ],
- copts = ["-DTCMALLOC_NUMA_AWARE"] + TCMALLOC_DEFAULT_COPTS,
- linkstatic = 1,
- visibility = ["//tcmalloc/testing:__pkg__"],
- deps = tcmalloc_deps + [
- ":common_numa_aware",
- ],
- alwayslink = 1,
-)
-
-cc_library(
- name = "common_numa_aware",
- srcs = common_srcs,
- hdrs = common_hdrs,
- copts = ["-DTCMALLOC_NUMA_AWARE"] + TCMALLOC_DEFAULT_COPTS,
- linkstatic = 1,
- visibility = ["//tcmalloc:tcmalloc_tests"],
- deps = common_deps,
- alwayslink = 1,
-)
-
-# Export some header files to //tcmalloc/testing/...
-package_group(
- name = "tcmalloc_tests",
- packages = [
- "//tcmalloc/...",
- ],
-)
-
-cc_library(
- name = "headers_for_tests",
- srcs = [
- "arena.h",
- "central_freelist.h",
- "guarded_page_allocator.h",
- "huge_address_map.h",
- "huge_allocator.h",
- "huge_cache.h",
- "huge_page_aware_allocator.h",
- "huge_page_filler.h",
- "huge_pages.h",
- "huge_region.h",
- "page_allocator.h",
- "page_allocator_interface.h",
- "page_heap.h",
- "page_heap_allocator.h",
- "pagemap.h",
- "parameters.h",
- "peak_heap_tracker.h",
- "span_stats.h",
- "stack_trace_table.h",
- "tracking.h",
- "transfer_cache.h",
- "transfer_cache_internals.h",
- "transfer_cache_stats.h",
- ],
- hdrs = [
- "common.h",
- "pages.h",
- "sampler.h",
- "size_class_info.h",
- "span.h",
- "static_vars.h",
- "stats.h",
- "system-alloc.h",
- ],
- copts = TCMALLOC_DEFAULT_COPTS,
- visibility = ["//tcmalloc:tcmalloc_tests"],
- deps = common_deps,
-)
-
-cc_library(
- name = "mock_central_freelist",
- testonly = 1,
- srcs = ["mock_central_freelist.cc"],
- hdrs = ["mock_central_freelist.h"],
- copts = TCMALLOC_DEFAULT_COPTS,
- deps = [
- ":common",
- "//tcmalloc/internal:logging",
- "@com_google_absl//absl/base",
- "@com_google_absl//absl/types:span",
- "@com_google_googletest//:gtest",
- ],
-)
-
-cc_library(
- name = "page_allocator_test_util",
- testonly = 1,
- srcs = [
- "page_allocator_test_util.h",
- ],
- hdrs = ["page_allocator_test_util.h"],
- copts = TCMALLOC_DEFAULT_COPTS,
- visibility = ["//tcmalloc:tcmalloc_tests"],
- deps = [
- ":common",
- ":malloc_extension",
- ],
-)
-
-cc_test(
- name = "page_heap_test",
- srcs = ["page_heap_test.cc"],
- copts = TCMALLOC_DEFAULT_COPTS,
- deps = [
- ":common",
- "@com_github_google_benchmark//:benchmark",
- "@com_google_absl//absl/base",
- "@com_google_absl//absl/memory",
- "@com_google_googletest//:gtest_main",
- ],
-)
-
-cc_library(
- name = "mock_transfer_cache",
- testonly = 1,
- srcs = ["mock_transfer_cache.cc"],
- hdrs = ["mock_transfer_cache.h"],
- copts = TCMALLOC_DEFAULT_COPTS,
- deps = [
- ":common",
- ":mock_central_freelist",
- "@com_google_absl//absl/random",
- "@com_google_absl//absl/random:distributions",
- "@com_google_googletest//:gtest",
- ],
-)
-
-cc_fuzz_test(
- name = "transfer_cache_fuzz",
- testonly = 1,
- srcs = ["transfer_cache_fuzz.cc"],
- copts = TCMALLOC_DEFAULT_COPTS,
- tags = [
- "noasan",
- "nomsan",
- "notsan",
- ],
- deps = [
- ":common",
- ":mock_central_freelist",
- ":mock_transfer_cache",
- ],
-)
-
-cc_test(
- name = "arena_test",
- timeout = "moderate",
- srcs = ["arena_test.cc"],
- copts = TCMALLOC_DEFAULT_COPTS,
- deps = [
- ":common",
- "@com_github_google_benchmark//:benchmark",
- "@com_google_googletest//:gtest_main",
- ],
-)
-
-cc_test(
- name = "transfer_cache_test",
- timeout = "moderate",
- srcs = ["transfer_cache_test.cc"],
- copts = TCMALLOC_DEFAULT_COPTS,
- shard_count = 3,
- deps = [
- ":common",
- ":mock_central_freelist",
- ":mock_transfer_cache",
- "//tcmalloc/testing:thread_manager",
- "@com_github_google_benchmark//:benchmark",
- "@com_google_absl//absl/base",
- "@com_google_absl//absl/random",
- "@com_google_absl//absl/random:distributions",
- "@com_google_absl//absl/time",
- "@com_google_absl//absl/types:span",
- "@com_google_googletest//:gtest_main",
- ],
-)
-
-create_tcmalloc_benchmark(
- name = "transfer_cache_benchmark",
- srcs = ["transfer_cache_benchmark.cc"],
- copts = TCMALLOC_DEFAULT_COPTS,
- malloc = "//tcmalloc",
- deps = [
- ":common",
- ":mock_central_freelist",
- ":mock_transfer_cache",
- "@com_github_google_benchmark//:benchmark",
- "@com_google_absl//absl/types:optional",
- ],
-)
-
-cc_test(
- name = "huge_cache_test",
- srcs = ["huge_cache_test.cc"],
- copts = TCMALLOC_DEFAULT_COPTS,
- deps = [
- ":common",
- "//tcmalloc/internal:logging",
- "@com_github_google_benchmark//:benchmark",
- "@com_google_absl//absl/base",
- "@com_google_absl//absl/memory",
- "@com_google_absl//absl/random",
- "@com_google_absl//absl/strings",
- "@com_google_absl//absl/time",
- "@com_google_googletest//:gtest_main",
- ],
-)
-
-cc_test(
- name = "huge_allocator_test",
- srcs = ["huge_allocator_test.cc"],
- copts = TCMALLOC_DEFAULT_COPTS,
- deps = [
- ":common",
- "//tcmalloc/internal:logging",
- "@com_github_google_benchmark//:benchmark",
- "@com_google_absl//absl/base",
- "@com_google_absl//absl/base:core_headers",
- "@com_google_absl//absl/random",
- "@com_google_absl//absl/time",
- "@com_google_googletest//:gtest_main",
- ],
-)
-
-cc_test(
- name = "huge_page_filler_test",
- timeout = "long",
- srcs = ["huge_page_filler_test.cc"],
- copts = TCMALLOC_DEFAULT_COPTS,
- linkstatic = 1,
- deps = [
- ":common",
- "//tcmalloc/internal:logging",
- "@com_github_google_benchmark//:benchmark",
- "@com_google_absl//absl/algorithm:container",
- "@com_google_absl//absl/base",
- "@com_google_absl//absl/base:core_headers",
- "@com_google_absl//absl/container:flat_hash_map",
- "@com_google_absl//absl/container:flat_hash_set",
- "@com_google_absl//absl/flags:flag",
- "@com_google_absl//absl/memory",
- "@com_google_absl//absl/random",
- "@com_google_absl//absl/random:distributions",
- "@com_google_absl//absl/strings",
- "@com_google_absl//absl/synchronization",
- "@com_google_absl//absl/time",
- "@com_google_googletest//:gtest_main",
- ],
-)
-
-cc_test(
- name = "huge_page_aware_allocator_test",
- srcs = ["huge_page_aware_allocator_test.cc"],
- copts = TCMALLOC_DEFAULT_COPTS,
- linkstatic = 1,
- malloc = "//tcmalloc",
- tags = [
- ],
- deps = [
- ":common",
- ":malloc_extension",
- ":page_allocator_test_util",
- "//tcmalloc/internal:logging",
- "//tcmalloc/testing:thread_manager",
- "@com_github_google_benchmark//:benchmark",
- "@com_google_absl//absl/base",
- "@com_google_absl//absl/base:core_headers",
- "@com_google_absl//absl/container:flat_hash_map",
- "@com_google_absl//absl/flags:flag",
- "@com_google_absl//absl/random",
- "@com_google_absl//absl/strings",
- "@com_google_absl//absl/strings:str_format",
- "@com_google_absl//absl/synchronization",
- "@com_google_absl//absl/time",
- "@com_google_googletest//:gtest_main",
- ],
-)
-
-cc_test(
- name = "huge_region_test",
- srcs = ["huge_region_test.cc"],
- copts = TCMALLOC_DEFAULT_COPTS,
- deps = [
- ":common",
- "//tcmalloc/internal:logging",
- "@com_github_google_benchmark//:benchmark",
- "@com_google_absl//absl/base",
- "@com_google_absl//absl/random",
- "@com_google_absl//absl/time",
- "@com_google_googletest//:gtest_main",
- ],
-)
-
-create_tcmalloc_benchmark(
- name = "guarded_page_allocator_benchmark",
- srcs = ["guarded_page_allocator_benchmark.cc"],
- copts = TCMALLOC_DEFAULT_COPTS,
- malloc = "//tcmalloc",
- deps = [
- ":common",
- "//tcmalloc/internal:logging",
- "@com_github_google_benchmark//:benchmark",
- "@com_google_absl//absl/base",
- ],
-)
-
-cc_test(
- name = "guarded_page_allocator_test",
- srcs = ["guarded_page_allocator_test.cc"],
- copts = TCMALLOC_DEFAULT_COPTS,
- malloc = "//tcmalloc",
- deps = [
- ":common",
- "//tcmalloc/internal:logging",
- "@com_github_google_benchmark//:benchmark",
- "@com_google_absl//absl/base",
- "@com_google_absl//absl/container:flat_hash_set",
- "@com_google_absl//absl/memory",
- "@com_google_absl//absl/numeric:bits",
- "@com_google_absl//absl/strings",
- "@com_google_absl//absl/time",
- "@com_google_googletest//:gtest_main",
- ],
-)
-
-cc_test(
- name = "pagemap_test",
- srcs = ["pagemap_test.cc"],
- copts = TCMALLOC_DEFAULT_COPTS,
- deps = [
- ":common",
- "@com_github_google_benchmark//:benchmark",
- "@com_google_absl//absl/random",
- "@com_google_googletest//:gtest_main",
- ],
-)
-
-cc_test(
- name = "realloc_test",
- srcs = ["realloc_test.cc"],
- copts = TCMALLOC_DEFAULT_COPTS,
- malloc = "//tcmalloc",
- deps = [
- "@com_github_google_benchmark//:benchmark",
- "@com_google_absl//absl/random",
- "@com_google_absl//absl/random:distributions",
- "@com_google_googletest//:gtest_main",
- ],
-)
-
-cc_test(
- name = "stack_trace_table_test",
- srcs = ["stack_trace_table_test.cc"],
- copts = TCMALLOC_DEFAULT_COPTS,
- deps = [
- ":common",
- "//tcmalloc/internal:logging",
- "@com_github_google_benchmark//:benchmark",
- "@com_google_absl//absl/base",
- "@com_google_absl//absl/base:core_headers",
- "@com_google_absl//absl/debugging:stacktrace",
- "@com_google_absl//absl/strings",
- "@com_google_absl//absl/strings:str_format",
- "@com_google_googletest//:gtest_main",
- ],
-)
-
-cc_test(
- name = "system-alloc_test",
- srcs = ["system-alloc_test.cc"],
- copts = TCMALLOC_DEFAULT_COPTS,
- malloc = "//tcmalloc",
- tags = ["nosan"],
- deps = [
- ":common",
- ":malloc_extension",
- "//tcmalloc/internal:logging",
- "@com_github_google_benchmark//:benchmark",
- "@com_google_absl//absl/strings:str_format",
- "@com_google_googletest//:gtest_main",
- ],
-)
-
-# This test has been named "large" since before tests were s/m/l.
-# The "large" refers to large allocation sizes.
-cc_test(
- name = "tcmalloc_large_test",
- size = "small",
- timeout = "moderate",
- srcs = ["tcmalloc_large_test.cc"],
- copts = TCMALLOC_DEFAULT_COPTS,
- malloc = "//tcmalloc",
- tags = [
- "noasan",
- "noubsan",
- ],
- deps = [
- ":common",
- ":malloc_extension",
- "//tcmalloc/internal:logging",
- "@com_github_google_benchmark//:benchmark",
- "@com_google_absl//absl/container:flat_hash_set",
- "@com_google_absl//absl/container:node_hash_set",
- "@com_google_googletest//:gtest_main",
- ],
-)
-
-cc_test(
- name = "malloc_extension_system_malloc_test",
- srcs = ["malloc_extension_system_malloc_test.cc"],
- copts = TCMALLOC_DEFAULT_COPTS,
- malloc = "//tcmalloc/internal:system_malloc",
- deps = [
- ":malloc_extension",
- "@com_github_google_benchmark//:benchmark",
- "@com_google_absl//absl/random",
- "@com_google_googletest//:gtest_main",
- ],
-)
-
-cc_test(
- name = "malloc_extension_test",
- srcs = ["malloc_extension_test.cc"],
- copts = TCMALLOC_DEFAULT_COPTS,
- malloc = "//tcmalloc",
- tags = [
- "nosan",
- ],
- deps = [
- ":malloc_extension",
- "@com_github_google_benchmark//:benchmark",
- "@com_google_absl//absl/time",
- "@com_google_googletest//:gtest_main",
- ],
-)
-
-cc_fuzz_test(
- name = "malloc_extension_fuzz",
- testonly = 1,
- srcs = ["malloc_extension_fuzz.cc"],
- copts = TCMALLOC_DEFAULT_COPTS,
- tags = [
- "noasan",
- "nomsan",
- "notsan",
- ],
- deps = [
- ":malloc_extension",
- "@com_google_absl//absl/strings",
- "@com_google_absl//absl/types:optional",
- ],
-)
-
-cc_test(
- name = "page_allocator_test",
- srcs = ["page_allocator_test.cc"],
- copts = TCMALLOC_DEFAULT_COPTS,
- linkstatic = 1,
- deps = [
- ":common",
- ":malloc_extension",
- ":page_allocator_test_util",
- "//tcmalloc/internal:logging",
- "@com_github_google_benchmark//:benchmark",
- "@com_google_absl//absl/base",
- "@com_google_absl//absl/base:core_headers",
- "@com_google_absl//absl/memory",
- "@com_google_absl//absl/strings",
- "@com_google_absl//absl/time",
- "@com_google_googletest//:gtest_main",
- ],
-)
-
-cc_test(
- name = "profile_test",
- size = "medium",
- timeout = "long",
- srcs = ["profile_test.cc"],
- copts = TCMALLOC_DEFAULT_COPTS,
- flaky = 1, # TODO(b/134690164)
- linkstatic = 1,
- malloc = "//tcmalloc",
- shard_count = 2,
- tags = [
- "noasan",
- "nomsan",
- "notsan",
- ],
- deps = [
- ":malloc_extension",
- "//tcmalloc/internal:declarations",
- "//tcmalloc/internal:linked_list",
- "//tcmalloc/testing:testutil",
- "@com_github_google_benchmark//:benchmark",
- "@com_google_absl//absl/container:flat_hash_map",
- "@com_google_absl//absl/synchronization",
- "@com_google_googletest//:gtest_main",
- ],
-)
-
-cc_test(
- name = "thread_cache_test",
- size = "medium",
- srcs = ["thread_cache_test.cc"],
- copts = TCMALLOC_DEFAULT_COPTS,
- linkstatic = 1,
- malloc = "//tcmalloc:tcmalloc_deprecated_perthread",
- tags = [
- "nosan",
- ],
- deps = [
- ":malloc_extension",
- "//tcmalloc/internal:logging",
- "//tcmalloc/internal:memory_stats",
- "//tcmalloc/internal:parameter_accessors",
- "@com_github_google_benchmark//:benchmark",
- "@com_google_absl//absl/base:core_headers",
- "@com_google_absl//absl/strings",
- "@com_google_googletest//:gtest_main",
- ],
-)
-
-create_tcmalloc_testsuite(
- name = "size_classes_test",
- srcs = ["size_classes_test.cc"],
- copts = TCMALLOC_DEFAULT_COPTS,
- deps = [
- ":size_class_info",
- "@com_github_google_benchmark//:benchmark",
- "@com_google_absl//absl/random",
- "@com_google_googletest//:gtest_main",
- ],
-)
-
-cc_test(
- name = "size_classes_test_with_runtime_size_classes",
- srcs = ["size_classes_with_runtime_size_classes_test.cc"],
- copts = TCMALLOC_DEFAULT_COPTS,
- linkstatic = 1,
- malloc = "//tcmalloc",
- deps = [
- ":common",
- ":runtime_size_classes",
- ":size_class_info",
- "@com_github_google_benchmark//:benchmark",
- "@com_google_absl//absl/strings",
- "@com_google_absl//absl/strings:str_format",
- "@com_google_googletest//:gtest_main",
- ],
-)
-
-cc_test(
- name = "heap_profiling_test",
- srcs = ["heap_profiling_test.cc"],
- copts = TCMALLOC_DEFAULT_COPTS,
- malloc = "//tcmalloc",
- tags = [
- "nosan",
- ],
- deps = [
- ":common",
- ":malloc_extension",
- "//tcmalloc/internal:logging",
- "//tcmalloc/internal:parameter_accessors",
- "@com_github_google_benchmark//:benchmark",
- "@com_google_googletest//:gtest_main",
- ],
-)
-
-cc_test(
- name = "runtime_size_classes_test",
- srcs = ["runtime_size_classes_test.cc"],
- copts = TCMALLOC_DEFAULT_COPTS,
- linkstatic = 1,
- malloc = "//tcmalloc",
- deps = [
- ":runtime_size_classes",
- "@com_github_google_benchmark//:benchmark",
- "@com_google_googletest//:gtest_main",
- ],
-)
-
-create_tcmalloc_testsuite(
- name = "span_test",
- srcs = ["span_test.cc"],
- copts = TCMALLOC_DEFAULT_COPTS,
- deps = [
- "//tcmalloc/internal:logging",
- "@com_github_google_benchmark//:benchmark",
- "@com_google_absl//absl/base",
- "@com_google_absl//absl/container:flat_hash_set",
- "@com_google_absl//absl/random",
- "@com_google_googletest//:gtest_main",
- ],
-)
-
-create_tcmalloc_benchmark(
- name = "span_benchmark",
- srcs = ["span_benchmark.cc"],
- copts = TCMALLOC_DEFAULT_COPTS,
- malloc = ":tcmalloc",
- deps = [
- ":common",
- "//tcmalloc/internal:logging",
- "@com_github_google_benchmark//:benchmark",
- "@com_google_absl//absl/base",
- "@com_google_absl//absl/random",
- ],
-)
-
-cc_test(
- name = "stats_test",
- srcs = ["stats_test.cc"],
- copts = TCMALLOC_DEFAULT_COPTS,
- malloc = "//tcmalloc",
- deps = [
- ":common",
- "@com_github_google_benchmark//:benchmark",
- "@com_google_absl//absl/base",
- "@com_google_absl//absl/time",
- "@com_google_googletest//:gtest_main",
- ],
-)
-
-cc_test(
- name = "huge_address_map_test",
- srcs = ["huge_address_map_test.cc"],
- copts = TCMALLOC_DEFAULT_COPTS,
- deps = [
- ":common",
- "@com_github_google_benchmark//:benchmark",
- "@com_google_googletest//:gtest_main",
- ],
-)
-
-cc_library(
- name = "malloc_extension",
- srcs = ["malloc_extension.cc"],
- hdrs = [
- "internal_malloc_extension.h",
- "malloc_extension.h",
- ],
- copts = TCMALLOC_DEFAULT_COPTS,
- visibility = [
- "//visibility:public",
- ],
- deps = [
- "//tcmalloc/internal:parameter_accessors",
- "@com_google_absl//absl/base:config",
- "@com_google_absl//absl/base:core_headers",
- "@com_google_absl//absl/base:malloc_internal",
- "@com_google_absl//absl/functional:function_ref",
- "@com_google_absl//absl/memory",
- "@com_google_absl//absl/strings",
- "@com_google_absl//absl/time",
- "@com_google_absl//absl/types:optional",
- "@com_google_absl//absl/types:span",
- ],
-)
-
-cc_test(
- name = "experiment_config_test",
- srcs = ["experiment_config_test.cc"],
- copts = TCMALLOC_DEFAULT_COPTS,
- deps = [
- ":experiment",
- "@com_github_google_benchmark//:benchmark",
- "@com_google_googletest//:gtest_main",
- ],
-)
-
-cc_fuzz_test(
- name = "experiment_fuzz",
- testonly = 1,
- srcs = ["experiment_fuzz.cc"],
- copts = TCMALLOC_DEFAULT_COPTS,
- deps = [
- ":experiment",
- "@com_google_absl//absl/strings",
- ],
-)
-
-cc_fuzz_test(
- name = "runtime_size_classes_fuzz",
- testonly = 1,
- srcs = ["runtime_size_classes_fuzz.cc"],
- copts = TCMALLOC_DEFAULT_COPTS,
- deps = [
- ":common",
- ":runtime_size_classes",
- ":size_class_info",
- "@com_google_absl//absl/strings",
- ],
-)
-
-cc_test(
- name = "cpu_cache_test",
- srcs = ["cpu_cache_test.cc"],
- copts = TCMALLOC_DEFAULT_COPTS,
- malloc = ":tcmalloc_deprecated_perthread",
- tags = [
- # TODO(b/193887621): Add TSan annotations to CPUCache and/or add
- # atomics to PageMap
- "notsan",
- ],
- deps = [
- ":common_deprecated_perthread",
- "//tcmalloc/internal:optimization",
- "//tcmalloc/internal:util",
- "//tcmalloc/testing:testutil",
- "@com_github_google_benchmark//:benchmark",
- "@com_google_absl//absl/random",
- "@com_google_absl//absl/random:seed_sequences",
- "@com_google_googletest//:gtest_main",
- ],
-)
-
-create_tcmalloc_testsuite(
- name = "central_freelist_test",
- srcs = ["central_freelist_test.cc"],
- copts = TCMALLOC_DEFAULT_COPTS,
- deps = [
- "@com_github_google_benchmark//:benchmark",
- "@com_google_absl//absl/random",
- "@com_google_googletest//:gtest_main",
- ],
-)
-
-create_tcmalloc_benchmark(
- name = "central_freelist_benchmark",
- srcs = ["central_freelist_benchmark.cc"],
- copts = TCMALLOC_DEFAULT_COPTS,
- malloc = "//tcmalloc",
- deps = [
- ":common",
- "@com_github_google_benchmark//:benchmark",
- "@com_google_absl//absl/algorithm:container",
- "@com_google_absl//absl/random",
- "@com_google_absl//absl/types:optional",
- ],
-)
+# Copyright 2019 The TCMalloc Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Description:
+#
+# tcmalloc is a fast malloc implementation. See
+# https://github.com/google/tcmalloc/tree/master/docs/design.md for a high-level description of
+# how this malloc works.
+
+load("@rules_fuzzing//fuzzing:cc_defs.bzl", "cc_fuzz_test")
+load("//tcmalloc:copts.bzl", "TCMALLOC_DEFAULT_COPTS")
+load("//tcmalloc:variants.bzl", "create_tcmalloc_benchmark", "create_tcmalloc_testsuite")
+
+package(default_visibility = ["//visibility:private"])
+
+licenses(["notice"])
+
+exports_files(["LICENSE"])
+
+config_setting(
+ name = "llvm",
+ flag_values = {
+ "@bazel_tools//tools/cpp:compiler": "clang",
+ },
+ visibility = [
+ "//tcmalloc/internal:__subpackages__",
+ "//tcmalloc/testing:__subpackages__",
+ ],
+)
+
+cc_library(
+ name = "experiment",
+ srcs = ["experiment.cc"],
+ hdrs = [
+ "experiment.h",
+ "experiment_config.h",
+ ],
+ copts = TCMALLOC_DEFAULT_COPTS,
+ deps = [
+ ":malloc_extension",
+ "//tcmalloc/internal:environment",
+ "//tcmalloc/internal:logging",
+ "@com_google_absl//absl/base:core_headers",
+ "@com_google_absl//absl/strings",
+ "@com_google_absl//absl/types:optional",
+ ],
+)
+
+# Dependencies required by :tcmalloc and its variants. Since :common is built
+# several different ways, it should not be included on this list.
+tcmalloc_deps = [
+ ":experiment",
+ ":malloc_extension",
+ "@com_google_absl//absl/base",
+ "@com_google_absl//absl/base:config",
+ "@com_google_absl//absl/base:core_headers",
+ "@com_google_absl//absl/base:dynamic_annotations",
+ "@com_google_absl//absl/debugging:leak_check",
+ "@com_google_absl//absl/debugging:stacktrace",
+ "@com_google_absl//absl/debugging:symbolize",
+ "@com_google_absl//absl/memory",
+ "@com_google_absl//absl/strings",
+ "@com_google_absl//absl/numeric:bits",
+ "//tcmalloc/internal:config",
+ "//tcmalloc/internal:declarations",
+ "//tcmalloc/internal:linked_list",
+ "//tcmalloc/internal:logging",
+ "//tcmalloc/internal:memory_stats",
+ "//tcmalloc/internal:optimization",
+ "//tcmalloc/internal:percpu",
+]
+
+# This library provides tcmalloc always
+cc_library(
+ name = "tcmalloc",
+ srcs = [
+ "libc_override.h",
+ "libc_override_gcc_and_weak.h",
+ "libc_override_glibc.h",
+ "libc_override_redefine.h",
+ "tcmalloc.cc",
+ "tcmalloc.h",
+ ],
+ copts = TCMALLOC_DEFAULT_COPTS,
+ linkstatic = 1,
+ visibility = ["//visibility:public"],
+ deps = tcmalloc_deps + [
+ ":common",
+ ],
+ alwayslink = 1,
+)
+
+# Provides tcmalloc always; use per-thread mode.
+cc_library(
+ name = "tcmalloc_deprecated_perthread",
+ srcs = [
+ "libc_override.h",
+ "libc_override_gcc_and_weak.h",
+ "libc_override_glibc.h",
+ "libc_override_redefine.h",
+ "tcmalloc.cc",
+ "tcmalloc.h",
+ ],
+ copts = ["-DTCMALLOC_DEPRECATED_PERTHREAD"] + TCMALLOC_DEFAULT_COPTS,
+ linkstatic = 1,
+ visibility = [
+ "//tcmalloc/internal:__pkg__",
+ "//tcmalloc/testing:__pkg__",
+ ],
+ deps = tcmalloc_deps + [
+ ":common_deprecated_perthread",
+ ],
+ alwayslink = 1,
+)
+
+# An opt tcmalloc build with ASSERTs forced on (by turning off
+# NDEBUG). Useful for tracking down crashes in production binaries.
+# To use add malloc = "//tcmalloc:opt_with_assertions" in your
+# target's build rule.
+cc_library(
+ name = "opt_with_assertions",
+ srcs = [
+ "libc_override.h",
+ "libc_override_gcc_and_weak.h",
+ "libc_override_glibc.h",
+ "libc_override_redefine.h",
+ "tcmalloc.cc",
+ "tcmalloc.h",
+ ],
+ copts = [
+ "-O2",
+ "-UNDEBUG",
+ ] + TCMALLOC_DEFAULT_COPTS,
+ linkstatic = 1,
+ visibility = ["//visibility:public"],
+ deps = tcmalloc_deps + [
+ ":common",
+ ],
+ alwayslink = 1,
+)
+
+cc_library(
+ name = "size_class_info",
+ hdrs = ["size_class_info.h"],
+ copts = TCMALLOC_DEFAULT_COPTS,
+ deps = [
+ "//tcmalloc/internal:logging",
+ ],
+)
+
+# List of common source files used by the various tcmalloc libraries.
+common_srcs = [
+ "arena.cc",
+ "arena.h",
+ "background.cc",
+ "central_freelist.cc",
+ "central_freelist.h",
+ "common.cc",
+ "common.h",
+ "cpu_cache.cc",
+ "cpu_cache.h",
+ "experimental_pow2_below64_size_class.cc",
+ "experimental_pow2_size_class.cc",
+ "legacy_size_classes.cc",
+ "guarded_page_allocator.h",
+ "guarded_page_allocator.cc",
+ "huge_address_map.cc",
+ "huge_allocator.cc",
+ "huge_allocator.h",
+ "huge_cache.cc",
+ "huge_cache.h",
+ "huge_region.h",
+ "huge_page_aware_allocator.cc",
+ "huge_page_aware_allocator.h",
+ "huge_page_filler.h",
+ "huge_pages.h",
+ "page_allocator.cc",
+ "page_allocator.h",
+ "page_allocator_interface.cc",
+ "page_allocator_interface.h",
+ "page_heap.cc",
+ "page_heap.h",
+ "page_heap_allocator.h",
+ "pagemap.cc",
+ "pagemap.h",
+ "parameters.cc",
+ "peak_heap_tracker.cc",
+ "sampler.cc",
+ "sampler.h",
+ "size_classes.cc",
+ "span.cc",
+ "span.h",
+ "span_stats.h",
+ "stack_trace_table.cc",
+ "stack_trace_table.h",
+ "static_vars.cc",
+ "static_vars.h",
+ "stats.cc",
+ "system-alloc.cc",
+ "system-alloc.h",
+ "thread_cache.cc",
+ "thread_cache.h",
+ "tracking.h",
+ "transfer_cache_stats.h",
+ "transfer_cache.cc",
+ "transfer_cache.h",
+ "transfer_cache_internals.h",
+]
+
+common_hdrs = [
+ "arena.h",
+ "central_freelist.h",
+ "common.h",
+ "cpu_cache.h",
+ "guarded_page_allocator.h",
+ "huge_address_map.h",
+ "huge_allocator.h",
+ "tcmalloc_policy.h",
+ "huge_cache.h",
+ "huge_page_filler.h",
+ "huge_pages.h",
+ "huge_region.h",
+ "huge_page_aware_allocator.h",
+ "page_allocator.h",
+ "page_allocator_interface.h",
+ "page_heap.h",
+ "page_heap_allocator.h",
+ "pages.h",
+ "pagemap.h",
+ "parameters.h",
+ "peak_heap_tracker.h",
+ "sampler.h",
+ "span.h",
+ "span_stats.h",
+ "stack_trace_table.h",
+ "stats.h",
+ "static_vars.h",
+ "system-alloc.h",
+ "thread_cache.h",
+ "tracking.h",
+ "transfer_cache_stats.h",
+ "transfer_cache.h",
+ "transfer_cache_internals.h",
+]
+
+common_deps = [
+ ":experiment",
+ ":malloc_extension",
+ ":noruntime_size_classes",
+ ":size_class_info",
+ "@com_google_absl//absl/algorithm:container",
+ "@com_google_absl//absl/base",
+ "@com_google_absl//absl/base:config",
+ "@com_google_absl//absl/base:core_headers",
+ "@com_google_absl//absl/base:dynamic_annotations",
+ "@com_google_absl//absl/container:fixed_array",
+ "@com_google_absl//absl/debugging:debugging_internal",
+ "@com_google_absl//absl/debugging:stacktrace",
+ "@com_google_absl//absl/debugging:symbolize",
+ "@com_google_absl//absl/synchronization",
+ "@com_google_absl//absl/hash:hash",
+ "@com_google_absl//absl/memory",
+ "@com_google_absl//absl/strings",
+ "@com_google_absl//absl/strings:str_format",
+ "@com_google_absl//absl/time",
+ "@com_google_absl//absl/types:optional",
+ "@com_google_absl//absl/types:span",
+ "//tcmalloc/internal:atomic_stats_counter",
+ "@com_google_absl//absl/numeric:bits",
+ "//tcmalloc/internal:config",
+ "//tcmalloc/internal:declarations",
+ "//tcmalloc/internal:environment",
+ "//tcmalloc/internal:linked_list",
+ "//tcmalloc/internal:logging",
+ "//tcmalloc/internal:mincore",
+ "//tcmalloc/internal:numa",
+ "//tcmalloc/internal:cache_topology",
+ "//tcmalloc/internal:optimization",
+ "//tcmalloc/internal:parameter_accessors",
+ "//tcmalloc/internal:percpu",
+ "//tcmalloc/internal:percpu_tcmalloc",
+ "//tcmalloc/internal:range_tracker",
+ "//tcmalloc/internal:timeseries_tracker",
+ "//tcmalloc/internal:util",
+]
+
+cc_library(
+ name = "common",
+ srcs = common_srcs,
+ hdrs = common_hdrs,
+ copts = TCMALLOC_DEFAULT_COPTS,
+ linkstatic = 1,
+ visibility = ["//tcmalloc:tcmalloc_tests"],
+ deps = common_deps,
+ alwayslink = 1,
+)
+
+cc_library(
+ name = "common_deprecated_perthread",
+ srcs = common_srcs,
+ hdrs = common_hdrs,
+ copts = ["-DTCMALLOC_DEPRECATED_PERTHREAD"] + TCMALLOC_DEFAULT_COPTS,
+ linkstatic = 1,
+ deps = common_deps,
+ alwayslink = 1,
+)
+
+# TEMPORARY. WILL BE REMOVED.
+# Add a dep to this if you want your binary to use hugepage-aware
+# allocator.
+cc_library(
+ name = "want_hpaa",
+ srcs = ["want_hpaa.cc"],
+ copts = ["-g0"] + TCMALLOC_DEFAULT_COPTS,
+ visibility = ["//visibility:public"],
+ deps = [
+ "//tcmalloc/internal:config",
+ "@com_google_absl//absl/base:core_headers",
+ ],
+ alwayslink = 1,
+)
+
+# TEMPORARY. WILL BE REMOVED.
+# Add a dep to this if you want your binary to use hugepage-aware
+# allocator with hpaa_subrelease=true.
+cc_library(
+ name = "want_hpaa_subrelease",
+ srcs = ["want_hpaa_subrelease.cc"],
+ copts = ["-g0"] + TCMALLOC_DEFAULT_COPTS,
+ visibility = ["//visibility:public"],
+ deps = [
+ "//tcmalloc/internal:config",
+ "@com_google_absl//absl/base:core_headers",
+ ],
+ alwayslink = 1,
+)
+
+# TEMPORARY. WILL BE REMOVED.
+# Add a dep to this if you want your binary to not use hugepage-aware
+# allocator.
+cc_library(
+ name = "want_no_hpaa",
+ srcs = ["want_no_hpaa.cc"],
+ copts = ["-g0"] + TCMALLOC_DEFAULT_COPTS,
+ visibility = ["//tcmalloc/testing:__pkg__"],
+ deps = [
+ "//tcmalloc/internal:config",
+ "@com_google_absl//absl/base:core_headers",
+ ],
+ alwayslink = 1,
+)
+
+# TEMPORARY. WILL BE REMOVED.
+# Add a dep to this if you want your binary to use old span sizes.
+cc_library(
+ name = "want_legacy_spans",
+ srcs = ["want_legacy_spans.cc"],
+ copts = ["-g0"] + TCMALLOC_DEFAULT_COPTS,
+ visibility = ["//tcmalloc/testing:__pkg__"],
+ deps = [
+ "//tcmalloc/internal:config",
+ "@com_google_absl//absl/base:core_headers",
+ ],
+ alwayslink = 1,
+)
+
+# Add a dep to this if you want your binary to enable NUMA awareness by
+# default.
+cc_library(
+ name = "want_numa_aware",
+ srcs = ["want_numa_aware.cc"],
+ copts = ["-g0"] + TCMALLOC_DEFAULT_COPTS,
+ visibility = [
+ "//tcmalloc:__pkg__",
+ "//tcmalloc/internal:__pkg__",
+ "//tcmalloc/testing:__pkg__",
+ ],
+ deps = [
+ "//tcmalloc/internal:config",
+ "@com_google_absl//absl/base:core_headers",
+ ],
+ alwayslink = 1,
+)
+
+cc_library(
+ name = "runtime_size_classes",
+ srcs = ["runtime_size_classes.cc"],
+ hdrs = ["runtime_size_classes.h"],
+ copts = TCMALLOC_DEFAULT_COPTS,
+ visibility = ["//visibility:private"],
+ deps = [
+ ":size_class_info",
+ "//tcmalloc/internal:environment",
+ "//tcmalloc/internal:logging",
+ "@com_google_absl//absl/base:core_headers",
+ "@com_google_absl//absl/strings",
+ ],
+ alwayslink = 1,
+)
+
+cc_library(
+ name = "noruntime_size_classes",
+ srcs = ["noruntime_size_classes.cc"],
+ hdrs = ["runtime_size_classes.h"],
+ copts = TCMALLOC_DEFAULT_COPTS,
+ deps = [
+ ":size_class_info",
+ "@com_google_absl//absl/base:core_headers",
+ "@com_google_absl//absl/strings",
+ ],
+ alwayslink = 1,
+)
+
+# TCMalloc with large pages is usually faster but fragmentation is higher. See
+# https://github.com/google/tcmalloc/tree/master/docs/tuning.md for more details.
+cc_library(
+ name = "tcmalloc_large_pages",
+ srcs = [
+ "libc_override.h",
+ "libc_override_gcc_and_weak.h",
+ "libc_override_glibc.h",
+ "libc_override_redefine.h",
+ "tcmalloc.cc",
+ "tcmalloc.h",
+ ],
+ copts = ["-DTCMALLOC_LARGE_PAGES"] + TCMALLOC_DEFAULT_COPTS,
+ linkstatic = 1,
+ visibility = ["//visibility:public"],
+ deps = tcmalloc_deps + [
+ ":common_large_pages",
+ ],
+ alwayslink = 1,
+)
+
+cc_library(
+ name = "common_large_pages",
+ srcs = common_srcs,
+ hdrs = common_hdrs,
+ copts = ["-DTCMALLOC_LARGE_PAGES"] + TCMALLOC_DEFAULT_COPTS,
+ linkstatic = 1,
+ visibility = ["//tcmalloc:tcmalloc_tests"],
+ deps = common_deps,
+ alwayslink = 1,
+)
+
+# TCMalloc with 256k pages is usually faster but fragmentation is higher. See
+# https://github.com/google/tcmalloc/tree/master/docs/tuning.md for more details.
+cc_library(
+ name = "tcmalloc_256k_pages",
+ srcs = [
+ "libc_override.h",
+ "libc_override_gcc_and_weak.h",
+ "libc_override_glibc.h",
+ "libc_override_redefine.h",
+ "tcmalloc.cc",
+ "tcmalloc.h",
+ ],
+ copts = ["-DTCMALLOC_256K_PAGES"] + TCMALLOC_DEFAULT_COPTS,
+ linkstatic = 1,
+ visibility = ["//visibility:public"],
+ deps = tcmalloc_deps + [
+ ":common_256k_pages",
+ ],
+ alwayslink = 1,
+)
+
+cc_library(
+ name = "common_256k_pages",
+ srcs = common_srcs,
+ hdrs = common_hdrs,
+ copts = ["-DTCMALLOC_256K_PAGES"] + TCMALLOC_DEFAULT_COPTS,
+ linkstatic = 1,
+ visibility = ["//tcmalloc:tcmalloc_tests"],
+ deps = common_deps,
+ alwayslink = 1,
+)
+
+cc_library(
+ name = "tcmalloc_256k_pages_and_numa",
+ srcs = [
+ "libc_override.h",
+ "libc_override_gcc_and_weak.h",
+ "libc_override_glibc.h",
+ "libc_override_redefine.h",
+ "tcmalloc.cc",
+ "tcmalloc.h",
+ ],
+ copts = [
+ "-DTCMALLOC_256K_PAGES",
+ "-DTCMALLOC_NUMA_AWARE",
+ ] + TCMALLOC_DEFAULT_COPTS,
+ linkstatic = 1,
+ visibility = ["//tcmalloc/testing:__pkg__"],
+ deps = tcmalloc_deps + [
+ ":common_256k_pages_and_numa",
+ ],
+ alwayslink = 1,
+)
+
+cc_library(
+ name = "common_256k_pages_and_numa",
+ srcs = common_srcs,
+ hdrs = common_hdrs,
+ copts = [
+ "-DTCMALLOC_256K_PAGES",
+ "-DTCMALLOC_NUMA_AWARE",
+ ] + TCMALLOC_DEFAULT_COPTS,
+ linkstatic = 1,
+ visibility = ["//tcmalloc:tcmalloc_tests"],
+ deps = common_deps,
+ alwayslink = 1,
+)
+
+# TCMalloc small-but-slow is a a version of TCMalloc that chooses to minimize
+# fragmentation at a *severe* cost to performance. It should be used by
+# applications that have significant memory constraints, but don't need to
+# frequently allocate/free objects.
+#
+# See https://github.com/google/tcmalloc/tree/master/docs/tuning.md for more details.
+cc_library(
+ name = "tcmalloc_small_but_slow",
+ srcs = [
+ "libc_override.h",
+ "libc_override_gcc_and_weak.h",
+ "libc_override_glibc.h",
+ "libc_override_redefine.h",
+ "tcmalloc.cc",
+ "tcmalloc.h",
+ ],
+ copts = ["-DTCMALLOC_SMALL_BUT_SLOW"] + TCMALLOC_DEFAULT_COPTS,
+ linkstatic = 1,
+ visibility = ["//visibility:public"],
+ deps = tcmalloc_deps + [
+ ":common_small_but_slow",
+ ],
+ alwayslink = 1,
+)
+
+cc_library(
+ name = "common_small_but_slow",
+ srcs = common_srcs,
+ hdrs = common_hdrs,
+ copts = ["-DTCMALLOC_SMALL_BUT_SLOW"] + TCMALLOC_DEFAULT_COPTS,
+ linkstatic = 1,
+ visibility = ["//tcmalloc:tcmalloc_tests"],
+ deps = common_deps,
+ alwayslink = 1,
+)
+
+# TCMalloc with NUMA awareness compiled in. Note that by default NUMA awareness
+# will still be disabled at runtime - this default can be changed by adding a
+# dependency upon want_numa_aware, or overridden by setting the
+# TCMALLOC_NUMA_AWARE environment variable.
+cc_library(
+ name = "tcmalloc_numa_aware",
+ srcs = [
+ "libc_override.h",
+ "libc_override_gcc_and_weak.h",
+ "libc_override_glibc.h",
+ "libc_override_redefine.h",
+ "tcmalloc.cc",
+ "tcmalloc.h",
+ ],
+ copts = ["-DTCMALLOC_NUMA_AWARE"] + TCMALLOC_DEFAULT_COPTS,
+ linkstatic = 1,
+ visibility = ["//tcmalloc/testing:__pkg__"],
+ deps = tcmalloc_deps + [
+ ":common_numa_aware",
+ ],
+ alwayslink = 1,
+)
+
+cc_library(
+ name = "common_numa_aware",
+ srcs = common_srcs,
+ hdrs = common_hdrs,
+ copts = ["-DTCMALLOC_NUMA_AWARE"] + TCMALLOC_DEFAULT_COPTS,
+ linkstatic = 1,
+ visibility = ["//tcmalloc:tcmalloc_tests"],
+ deps = common_deps,
+ alwayslink = 1,
+)
+
+# Export some header files to //tcmalloc/testing/...
+package_group(
+ name = "tcmalloc_tests",
+ packages = [
+ "//tcmalloc/...",
+ ],
+)
+
+cc_library(
+ name = "headers_for_tests",
+ srcs = [
+ "arena.h",
+ "central_freelist.h",
+ "guarded_page_allocator.h",
+ "huge_address_map.h",
+ "huge_allocator.h",
+ "huge_cache.h",
+ "huge_page_aware_allocator.h",
+ "huge_page_filler.h",
+ "huge_pages.h",
+ "huge_region.h",
+ "page_allocator.h",
+ "page_allocator_interface.h",
+ "page_heap.h",
+ "page_heap_allocator.h",
+ "pagemap.h",
+ "parameters.h",
+ "peak_heap_tracker.h",
+ "span_stats.h",
+ "stack_trace_table.h",
+ "tracking.h",
+ "transfer_cache.h",
+ "transfer_cache_internals.h",
+ "transfer_cache_stats.h",
+ ],
+ hdrs = [
+ "common.h",
+ "pages.h",
+ "sampler.h",
+ "size_class_info.h",
+ "span.h",
+ "static_vars.h",
+ "stats.h",
+ "system-alloc.h",
+ ],
+ copts = TCMALLOC_DEFAULT_COPTS,
+ visibility = ["//tcmalloc:tcmalloc_tests"],
+ deps = common_deps,
+)
+
+cc_library(
+ name = "mock_central_freelist",
+ testonly = 1,
+ srcs = ["mock_central_freelist.cc"],
+ hdrs = ["mock_central_freelist.h"],
+ copts = TCMALLOC_DEFAULT_COPTS,
+ deps = [
+ ":common",
+ "//tcmalloc/internal:logging",
+ "@com_google_absl//absl/base",
+ "@com_google_absl//absl/types:span",
+ "@com_google_googletest//:gtest",
+ ],
+)
+
+cc_library(
+ name = "page_allocator_test_util",
+ testonly = 1,
+ srcs = [
+ "page_allocator_test_util.h",
+ ],
+ hdrs = ["page_allocator_test_util.h"],
+ copts = TCMALLOC_DEFAULT_COPTS,
+ visibility = ["//tcmalloc:tcmalloc_tests"],
+ deps = [
+ ":common",
+ ":malloc_extension",
+ ],
+)
+
+cc_test(
+ name = "page_heap_test",
+ srcs = ["page_heap_test.cc"],
+ copts = TCMALLOC_DEFAULT_COPTS,
+ deps = [
+ ":common",
+ "@com_github_google_benchmark//:benchmark",
+ "@com_google_absl//absl/base",
+ "@com_google_absl//absl/memory",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
+cc_library(
+ name = "mock_transfer_cache",
+ testonly = 1,
+ srcs = ["mock_transfer_cache.cc"],
+ hdrs = ["mock_transfer_cache.h"],
+ copts = TCMALLOC_DEFAULT_COPTS,
+ deps = [
+ ":common",
+ ":mock_central_freelist",
+ "@com_google_absl//absl/random",
+ "@com_google_absl//absl/random:distributions",
+ "@com_google_googletest//:gtest",
+ ],
+)
+
+cc_fuzz_test(
+ name = "transfer_cache_fuzz",
+ testonly = 1,
+ srcs = ["transfer_cache_fuzz.cc"],
+ copts = TCMALLOC_DEFAULT_COPTS,
+ tags = [
+ "noasan",
+ "nomsan",
+ "notsan",
+ ],
+ deps = [
+ ":common",
+ ":mock_central_freelist",
+ ":mock_transfer_cache",
+ ],
+)
+
+cc_test(
+ name = "arena_test",
+ timeout = "moderate",
+ srcs = ["arena_test.cc"],
+ copts = TCMALLOC_DEFAULT_COPTS,
+ deps = [
+ ":common",
+ "@com_github_google_benchmark//:benchmark",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
+cc_test(
+ name = "transfer_cache_test",
+ timeout = "moderate",
+ srcs = ["transfer_cache_test.cc"],
+ copts = TCMALLOC_DEFAULT_COPTS,
+ shard_count = 3,
+ deps = [
+ ":common",
+ ":mock_central_freelist",
+ ":mock_transfer_cache",
+ "//tcmalloc/testing:thread_manager",
+ "@com_github_google_benchmark//:benchmark",
+ "@com_google_absl//absl/base",
+ "@com_google_absl//absl/random",
+ "@com_google_absl//absl/random:distributions",
+ "@com_google_absl//absl/time",
+ "@com_google_absl//absl/types:span",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
+create_tcmalloc_benchmark(
+ name = "transfer_cache_benchmark",
+ srcs = ["transfer_cache_benchmark.cc"],
+ copts = TCMALLOC_DEFAULT_COPTS,
+ malloc = "//tcmalloc",
+ deps = [
+ ":common",
+ ":mock_central_freelist",
+ ":mock_transfer_cache",
+ "@com_github_google_benchmark//:benchmark",
+ "@com_google_absl//absl/types:optional",
+ ],
+)
+
+cc_test(
+ name = "huge_cache_test",
+ srcs = ["huge_cache_test.cc"],
+ copts = TCMALLOC_DEFAULT_COPTS,
+ deps = [
+ ":common",
+ "//tcmalloc/internal:logging",
+ "@com_github_google_benchmark//:benchmark",
+ "@com_google_absl//absl/base",
+ "@com_google_absl//absl/memory",
+ "@com_google_absl//absl/random",
+ "@com_google_absl//absl/strings",
+ "@com_google_absl//absl/time",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
+cc_test(
+ name = "huge_allocator_test",
+ srcs = ["huge_allocator_test.cc"],
+ copts = TCMALLOC_DEFAULT_COPTS,
+ deps = [
+ ":common",
+ "//tcmalloc/internal:logging",
+ "@com_github_google_benchmark//:benchmark",
+ "@com_google_absl//absl/base",
+ "@com_google_absl//absl/base:core_headers",
+ "@com_google_absl//absl/random",
+ "@com_google_absl//absl/time",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
+cc_test(
+ name = "huge_page_filler_test",
+ timeout = "long",
+ srcs = ["huge_page_filler_test.cc"],
+ copts = TCMALLOC_DEFAULT_COPTS,
+ linkstatic = 1,
+ deps = [
+ ":common",
+ "//tcmalloc/internal:logging",
+ "@com_github_google_benchmark//:benchmark",
+ "@com_google_absl//absl/algorithm:container",
+ "@com_google_absl//absl/base",
+ "@com_google_absl//absl/base:core_headers",
+ "@com_google_absl//absl/container:flat_hash_map",
+ "@com_google_absl//absl/container:flat_hash_set",
+ "@com_google_absl//absl/flags:flag",
+ "@com_google_absl//absl/memory",
+ "@com_google_absl//absl/random",
+ "@com_google_absl//absl/random:distributions",
+ "@com_google_absl//absl/strings",
+ "@com_google_absl//absl/synchronization",
+ "@com_google_absl//absl/time",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
+cc_test(
+ name = "huge_page_aware_allocator_test",
+ srcs = ["huge_page_aware_allocator_test.cc"],
+ copts = TCMALLOC_DEFAULT_COPTS,
+ linkstatic = 1,
+ malloc = "//tcmalloc",
+ tags = [
+ ],
+ deps = [
+ ":common",
+ ":malloc_extension",
+ ":page_allocator_test_util",
+ "//tcmalloc/internal:logging",
+ "//tcmalloc/testing:thread_manager",
+ "@com_github_google_benchmark//:benchmark",
+ "@com_google_absl//absl/base",
+ "@com_google_absl//absl/base:core_headers",
+ "@com_google_absl//absl/container:flat_hash_map",
+ "@com_google_absl//absl/flags:flag",
+ "@com_google_absl//absl/random",
+ "@com_google_absl//absl/strings",
+ "@com_google_absl//absl/strings:str_format",
+ "@com_google_absl//absl/synchronization",
+ "@com_google_absl//absl/time",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
+cc_test(
+ name = "huge_region_test",
+ srcs = ["huge_region_test.cc"],
+ copts = TCMALLOC_DEFAULT_COPTS,
+ deps = [
+ ":common",
+ "//tcmalloc/internal:logging",
+ "@com_github_google_benchmark//:benchmark",
+ "@com_google_absl//absl/base",
+ "@com_google_absl//absl/random",
+ "@com_google_absl//absl/time",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
+create_tcmalloc_benchmark(
+ name = "guarded_page_allocator_benchmark",
+ srcs = ["guarded_page_allocator_benchmark.cc"],
+ copts = TCMALLOC_DEFAULT_COPTS,
+ malloc = "//tcmalloc",
+ deps = [
+ ":common",
+ "//tcmalloc/internal:logging",
+ "@com_github_google_benchmark//:benchmark",
+ "@com_google_absl//absl/base",
+ ],
+)
+
+cc_test(
+ name = "guarded_page_allocator_test",
+ srcs = ["guarded_page_allocator_test.cc"],
+ copts = TCMALLOC_DEFAULT_COPTS,
+ malloc = "//tcmalloc",
+ deps = [
+ ":common",
+ "//tcmalloc/internal:logging",
+ "@com_github_google_benchmark//:benchmark",
+ "@com_google_absl//absl/base",
+ "@com_google_absl//absl/container:flat_hash_set",
+ "@com_google_absl//absl/memory",
+ "@com_google_absl//absl/numeric:bits",
+ "@com_google_absl//absl/strings",
+ "@com_google_absl//absl/time",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
+cc_test(
+ name = "pagemap_test",
+ srcs = ["pagemap_test.cc"],
+ copts = TCMALLOC_DEFAULT_COPTS,
+ deps = [
+ ":common",
+ "@com_github_google_benchmark//:benchmark",
+ "@com_google_absl//absl/random",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
+cc_test(
+ name = "realloc_test",
+ srcs = ["realloc_test.cc"],
+ copts = TCMALLOC_DEFAULT_COPTS,
+ malloc = "//tcmalloc",
+ deps = [
+ "@com_github_google_benchmark//:benchmark",
+ "@com_google_absl//absl/random",
+ "@com_google_absl//absl/random:distributions",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
+cc_test(
+ name = "stack_trace_table_test",
+ srcs = ["stack_trace_table_test.cc"],
+ copts = TCMALLOC_DEFAULT_COPTS,
+ deps = [
+ ":common",
+ "//tcmalloc/internal:logging",
+ "@com_github_google_benchmark//:benchmark",
+ "@com_google_absl//absl/base",
+ "@com_google_absl//absl/base:core_headers",
+ "@com_google_absl//absl/debugging:stacktrace",
+ "@com_google_absl//absl/strings",
+ "@com_google_absl//absl/strings:str_format",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
+cc_test(
+ name = "system-alloc_test",
+ srcs = ["system-alloc_test.cc"],
+ copts = TCMALLOC_DEFAULT_COPTS,
+ malloc = "//tcmalloc",
+ tags = ["nosan"],
+ deps = [
+ ":common",
+ ":malloc_extension",
+ "//tcmalloc/internal:logging",
+ "@com_github_google_benchmark//:benchmark",
+ "@com_google_absl//absl/strings:str_format",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
+# This test has been named "large" since before tests were s/m/l.
+# The "large" refers to large allocation sizes.
+cc_test(
+ name = "tcmalloc_large_test",
+ size = "small",
+ timeout = "moderate",
+ srcs = ["tcmalloc_large_test.cc"],
+ copts = TCMALLOC_DEFAULT_COPTS,
+ malloc = "//tcmalloc",
+ tags = [
+ "noasan",
+ "noubsan",
+ ],
+ deps = [
+ ":common",
+ ":malloc_extension",
+ "//tcmalloc/internal:logging",
+ "@com_github_google_benchmark//:benchmark",
+ "@com_google_absl//absl/container:flat_hash_set",
+ "@com_google_absl//absl/container:node_hash_set",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
+cc_test(
+ name = "malloc_extension_system_malloc_test",
+ srcs = ["malloc_extension_system_malloc_test.cc"],
+ copts = TCMALLOC_DEFAULT_COPTS,
+ malloc = "//tcmalloc/internal:system_malloc",
+ deps = [
+ ":malloc_extension",
+ "@com_github_google_benchmark//:benchmark",
+ "@com_google_absl//absl/random",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
+cc_test(
+ name = "malloc_extension_test",
+ srcs = ["malloc_extension_test.cc"],
+ copts = TCMALLOC_DEFAULT_COPTS,
+ malloc = "//tcmalloc",
+ tags = [
+ "nosan",
+ ],
+ deps = [
+ ":malloc_extension",
+ "@com_github_google_benchmark//:benchmark",
+ "@com_google_absl//absl/time",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
+cc_fuzz_test(
+ name = "malloc_extension_fuzz",
+ testonly = 1,
+ srcs = ["malloc_extension_fuzz.cc"],
+ copts = TCMALLOC_DEFAULT_COPTS,
+ tags = [
+ "noasan",
+ "nomsan",
+ "notsan",
+ ],
+ deps = [
+ ":malloc_extension",
+ "@com_google_absl//absl/strings",
+ "@com_google_absl//absl/types:optional",
+ ],
+)
+
+cc_test(
+ name = "page_allocator_test",
+ srcs = ["page_allocator_test.cc"],
+ copts = TCMALLOC_DEFAULT_COPTS,
+ linkstatic = 1,
+ deps = [
+ ":common",
+ ":malloc_extension",
+ ":page_allocator_test_util",
+ "//tcmalloc/internal:logging",
+ "@com_github_google_benchmark//:benchmark",
+ "@com_google_absl//absl/base",
+ "@com_google_absl//absl/base:core_headers",
+ "@com_google_absl//absl/memory",
+ "@com_google_absl//absl/strings",
+ "@com_google_absl//absl/time",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
+cc_test(
+ name = "profile_test",
+ size = "medium",
+ timeout = "long",
+ srcs = ["profile_test.cc"],
+ copts = TCMALLOC_DEFAULT_COPTS,
+ flaky = 1, # TODO(b/134690164)
+ linkstatic = 1,
+ malloc = "//tcmalloc",
+ shard_count = 2,
+ tags = [
+ "noasan",
+ "nomsan",
+ "notsan",
+ ],
+ deps = [
+ ":malloc_extension",
+ "//tcmalloc/internal:declarations",
+ "//tcmalloc/internal:linked_list",
+ "//tcmalloc/testing:testutil",
+ "@com_github_google_benchmark//:benchmark",
+ "@com_google_absl//absl/container:flat_hash_map",
+ "@com_google_absl//absl/synchronization",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
+cc_test(
+ name = "thread_cache_test",
+ size = "medium",
+ srcs = ["thread_cache_test.cc"],
+ copts = TCMALLOC_DEFAULT_COPTS,
+ linkstatic = 1,
+ malloc = "//tcmalloc:tcmalloc_deprecated_perthread",
+ tags = [
+ "nosan",
+ ],
+ deps = [
+ ":malloc_extension",
+ "//tcmalloc/internal:logging",
+ "//tcmalloc/internal:memory_stats",
+ "//tcmalloc/internal:parameter_accessors",
+ "@com_github_google_benchmark//:benchmark",
+ "@com_google_absl//absl/base:core_headers",
+ "@com_google_absl//absl/strings",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
+create_tcmalloc_testsuite(
+ name = "size_classes_test",
+ srcs = ["size_classes_test.cc"],
+ copts = TCMALLOC_DEFAULT_COPTS,
+ deps = [
+ ":size_class_info",
+ "@com_github_google_benchmark//:benchmark",
+ "@com_google_absl//absl/random",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
+cc_test(
+ name = "size_classes_test_with_runtime_size_classes",
+ srcs = ["size_classes_with_runtime_size_classes_test.cc"],
+ copts = TCMALLOC_DEFAULT_COPTS,
+ linkstatic = 1,
+ malloc = "//tcmalloc",
+ deps = [
+ ":common",
+ ":runtime_size_classes",
+ ":size_class_info",
+ "@com_github_google_benchmark//:benchmark",
+ "@com_google_absl//absl/strings",
+ "@com_google_absl//absl/strings:str_format",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
+cc_test(
+ name = "heap_profiling_test",
+ srcs = ["heap_profiling_test.cc"],
+ copts = TCMALLOC_DEFAULT_COPTS,
+ malloc = "//tcmalloc",
+ tags = [
+ "nosan",
+ ],
+ deps = [
+ ":common",
+ ":malloc_extension",
+ "//tcmalloc/internal:logging",
+ "//tcmalloc/internal:parameter_accessors",
+ "@com_github_google_benchmark//:benchmark",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
+cc_test(
+ name = "runtime_size_classes_test",
+ srcs = ["runtime_size_classes_test.cc"],
+ copts = TCMALLOC_DEFAULT_COPTS,
+ linkstatic = 1,
+ malloc = "//tcmalloc",
+ deps = [
+ ":runtime_size_classes",
+ "@com_github_google_benchmark//:benchmark",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
+create_tcmalloc_testsuite(
+ name = "span_test",
+ srcs = ["span_test.cc"],
+ copts = TCMALLOC_DEFAULT_COPTS,
+ deps = [
+ "//tcmalloc/internal:logging",
+ "@com_github_google_benchmark//:benchmark",
+ "@com_google_absl//absl/base",
+ "@com_google_absl//absl/container:flat_hash_set",
+ "@com_google_absl//absl/random",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
+create_tcmalloc_benchmark(
+ name = "span_benchmark",
+ srcs = ["span_benchmark.cc"],
+ copts = TCMALLOC_DEFAULT_COPTS,
+ malloc = ":tcmalloc",
+ deps = [
+ ":common",
+ "//tcmalloc/internal:logging",
+ "@com_github_google_benchmark//:benchmark",
+ "@com_google_absl//absl/base",
+ "@com_google_absl//absl/random",
+ ],
+)
+
+cc_test(
+ name = "stats_test",
+ srcs = ["stats_test.cc"],
+ copts = TCMALLOC_DEFAULT_COPTS,
+ malloc = "//tcmalloc",
+ deps = [
+ ":common",
+ "@com_github_google_benchmark//:benchmark",
+ "@com_google_absl//absl/base",
+ "@com_google_absl//absl/time",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
+cc_test(
+ name = "huge_address_map_test",
+ srcs = ["huge_address_map_test.cc"],
+ copts = TCMALLOC_DEFAULT_COPTS,
+ deps = [
+ ":common",
+ "@com_github_google_benchmark//:benchmark",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
+cc_library(
+ name = "malloc_extension",
+ srcs = ["malloc_extension.cc"],
+ hdrs = [
+ "internal_malloc_extension.h",
+ "malloc_extension.h",
+ ],
+ copts = TCMALLOC_DEFAULT_COPTS,
+ visibility = [
+ "//visibility:public",
+ ],
+ deps = [
+ "//tcmalloc/internal:parameter_accessors",
+ "@com_google_absl//absl/base:config",
+ "@com_google_absl//absl/base:core_headers",
+ "@com_google_absl//absl/base:malloc_internal",
+ "@com_google_absl//absl/functional:function_ref",
+ "@com_google_absl//absl/memory",
+ "@com_google_absl//absl/strings",
+ "@com_google_absl//absl/time",
+ "@com_google_absl//absl/types:optional",
+ "@com_google_absl//absl/types:span",
+ ],
+)
+
+cc_test(
+ name = "experiment_config_test",
+ srcs = ["experiment_config_test.cc"],
+ copts = TCMALLOC_DEFAULT_COPTS,
+ deps = [
+ ":experiment",
+ "@com_github_google_benchmark//:benchmark",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
+cc_fuzz_test(
+ name = "experiment_fuzz",
+ testonly = 1,
+ srcs = ["experiment_fuzz.cc"],
+ copts = TCMALLOC_DEFAULT_COPTS,
+ deps = [
+ ":experiment",
+ "@com_google_absl//absl/strings",
+ ],
+)
+
+cc_fuzz_test(
+ name = "runtime_size_classes_fuzz",
+ testonly = 1,
+ srcs = ["runtime_size_classes_fuzz.cc"],
+ copts = TCMALLOC_DEFAULT_COPTS,
+ deps = [
+ ":common",
+ ":runtime_size_classes",
+ ":size_class_info",
+ "@com_google_absl//absl/strings",
+ ],
+)
+
+cc_test(
+ name = "cpu_cache_test",
+ srcs = ["cpu_cache_test.cc"],
+ copts = TCMALLOC_DEFAULT_COPTS,
+ malloc = ":tcmalloc_deprecated_perthread",
+ tags = [
+ # TODO(b/193887621): Add TSan annotations to CPUCache and/or add
+ # atomics to PageMap
+ "notsan",
+ ],
+ deps = [
+ ":common_deprecated_perthread",
+ "//tcmalloc/internal:optimization",
+ "//tcmalloc/internal:util",
+ "//tcmalloc/testing:testutil",
+ "@com_github_google_benchmark//:benchmark",
+ "@com_google_absl//absl/random",
+ "@com_google_absl//absl/random:seed_sequences",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
+create_tcmalloc_testsuite(
+ name = "central_freelist_test",
+ srcs = ["central_freelist_test.cc"],
+ copts = TCMALLOC_DEFAULT_COPTS,
+ deps = [
+ "@com_github_google_benchmark//:benchmark",
+ "@com_google_absl//absl/random",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
+create_tcmalloc_benchmark(
+ name = "central_freelist_benchmark",
+ srcs = ["central_freelist_benchmark.cc"],
+ copts = TCMALLOC_DEFAULT_COPTS,
+ malloc = "//tcmalloc",
+ deps = [
+ ":common",
+ "@com_github_google_benchmark//:benchmark",
+ "@com_google_absl//absl/algorithm:container",
+ "@com_google_absl//absl/random",
+ "@com_google_absl//absl/types:optional",
+ ],
+)
diff --git a/contrib/libs/tcmalloc/tcmalloc/arena.cc b/contrib/libs/tcmalloc/tcmalloc/arena.cc
index 5ba1a65bf3..d71241e617 100644
--- a/contrib/libs/tcmalloc/tcmalloc/arena.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/arena.cc
@@ -15,24 +15,24 @@
#include "tcmalloc/arena.h"
#include "tcmalloc/internal/logging.h"
-#include "tcmalloc/static_vars.h"
+#include "tcmalloc/static_vars.h"
#include "tcmalloc/system-alloc.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
-void* Arena::Alloc(size_t bytes, int alignment) {
- ASSERT(alignment > 0);
- { // First we need to move up to the correct alignment.
- const int misalignment =
- reinterpret_cast<uintptr_t>(free_area_) % alignment;
- const int alignment_bytes =
- misalignment != 0 ? alignment - misalignment : 0;
- free_area_ += alignment_bytes;
- free_avail_ -= alignment_bytes;
- bytes_allocated_ += alignment_bytes;
- }
+void* Arena::Alloc(size_t bytes, int alignment) {
+ ASSERT(alignment > 0);
+ { // First we need to move up to the correct alignment.
+ const int misalignment =
+ reinterpret_cast<uintptr_t>(free_area_) % alignment;
+ const int alignment_bytes =
+ misalignment != 0 ? alignment - misalignment : 0;
+ free_area_ += alignment_bytes;
+ free_avail_ -= alignment_bytes;
+ bytes_allocated_ += alignment_bytes;
+ }
char* result;
if (free_avail_ < bytes) {
size_t ask = bytes > kAllocIncrement ? bytes : kAllocIncrement;
@@ -40,32 +40,32 @@ void* Arena::Alloc(size_t bytes, int alignment) {
// TODO(b/171081864): Arena allocations should be made relatively
// infrequently. Consider tagging this memory with sampled objects which
// are also infrequently allocated.
- //
- // In the meantime it is important that we use the current NUMA partition
- // rather than always using a particular one because it's possible that any
- // single partition we choose might only contain nodes that the process is
- // unable to allocate from due to cgroup restrictions.
- MemoryTag tag;
- const auto& numa_topology = Static::numa_topology();
- if (numa_topology.numa_aware()) {
- tag = NumaNormalTag(numa_topology.GetCurrentPartition());
- } else {
- tag = MemoryTag::kNormal;
- }
- free_area_ =
- reinterpret_cast<char*>(SystemAlloc(ask, &actual_size, kPageSize, tag));
+ //
+ // In the meantime it is important that we use the current NUMA partition
+ // rather than always using a particular one because it's possible that any
+ // single partition we choose might only contain nodes that the process is
+ // unable to allocate from due to cgroup restrictions.
+ MemoryTag tag;
+ const auto& numa_topology = Static::numa_topology();
+ if (numa_topology.numa_aware()) {
+ tag = NumaNormalTag(numa_topology.GetCurrentPartition());
+ } else {
+ tag = MemoryTag::kNormal;
+ }
+ free_area_ =
+ reinterpret_cast<char*>(SystemAlloc(ask, &actual_size, kPageSize, tag));
if (ABSL_PREDICT_FALSE(free_area_ == nullptr)) {
Crash(kCrash, __FILE__, __LINE__,
"FATAL ERROR: Out of memory trying to allocate internal tcmalloc "
- "data (bytes, object-size); is something preventing mmap from "
- "succeeding (sandbox, VSS limitations)?",
+ "data (bytes, object-size); is something preventing mmap from "
+ "succeeding (sandbox, VSS limitations)?",
kAllocIncrement, bytes);
}
SystemBack(free_area_, actual_size);
free_avail_ = actual_size;
}
- ASSERT(reinterpret_cast<uintptr_t>(free_area_) % alignment == 0);
+ ASSERT(reinterpret_cast<uintptr_t>(free_area_) % alignment == 0);
result = free_area_;
free_area_ += bytes;
free_avail_ -= bytes;
@@ -73,6 +73,6 @@ void* Arena::Alloc(size_t bytes, int alignment) {
return reinterpret_cast<void*>(result);
}
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
diff --git a/contrib/libs/tcmalloc/tcmalloc/arena.h b/contrib/libs/tcmalloc/tcmalloc/arena.h
index 0655253540..5ff9a31e96 100644
--- a/contrib/libs/tcmalloc/tcmalloc/arena.h
+++ b/contrib/libs/tcmalloc/tcmalloc/arena.h
@@ -18,13 +18,13 @@
#include <stddef.h>
#include <stdint.h>
-#include "absl/base/attributes.h"
+#include "absl/base/attributes.h"
#include "absl/base/thread_annotations.h"
#include "tcmalloc/common.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
// Arena allocation; designed for use by tcmalloc internal data structures like
// spans, profiles, etc. Always expands.
@@ -35,9 +35,9 @@ class Arena {
// Return a properly aligned byte array of length "bytes". Crashes if
// allocation fails. Requires pageheap_lock is held.
- ABSL_ATTRIBUTE_RETURNS_NONNULL void* Alloc(size_t bytes,
- int alignment = kAlignment)
- ABSL_EXCLUSIVE_LOCKS_REQUIRED(pageheap_lock);
+ ABSL_ATTRIBUTE_RETURNS_NONNULL void* Alloc(size_t bytes,
+ int alignment = kAlignment)
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(pageheap_lock);
// Returns the total number of bytes allocated from this arena. Requires
// pageheap_lock is held.
@@ -61,8 +61,8 @@ class Arena {
Arena& operator=(const Arena&) = delete;
};
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
#endif // TCMALLOC_ARENA_H_
diff --git a/contrib/libs/tcmalloc/tcmalloc/arena_test.cc b/contrib/libs/tcmalloc/tcmalloc/arena_test.cc
index 2fb728cac9..65996d2d9d 100644
--- a/contrib/libs/tcmalloc/tcmalloc/arena_test.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/arena_test.cc
@@ -1,38 +1,38 @@
-// Copyright 2021 The TCMalloc Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "tcmalloc/arena.h"
-
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-
-namespace tcmalloc {
-namespace tcmalloc_internal {
-namespace {
-
-TEST(Arena, AlignedAlloc) {
- Arena arena;
- absl::base_internal::SpinLockHolder h(&pageheap_lock);
- EXPECT_EQ(reinterpret_cast<uintptr_t>(arena.Alloc(64, 64)) % 64, 0);
- EXPECT_EQ(reinterpret_cast<uintptr_t>(arena.Alloc(7)) % 8, 0);
- EXPECT_EQ(reinterpret_cast<uintptr_t>(arena.Alloc(128, 64)) % 64, 0);
- for (int alignment = 1; alignment < 100; ++alignment) {
- EXPECT_EQ(
- reinterpret_cast<uintptr_t>(arena.Alloc(7, alignment)) % alignment, 0);
- }
-}
-
-} // namespace
-} // namespace tcmalloc_internal
-} // namespace tcmalloc
+// Copyright 2021 The TCMalloc Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "tcmalloc/arena.h"
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+
+namespace tcmalloc {
+namespace tcmalloc_internal {
+namespace {
+
+TEST(Arena, AlignedAlloc) {
+ Arena arena;
+ absl::base_internal::SpinLockHolder h(&pageheap_lock);
+ EXPECT_EQ(reinterpret_cast<uintptr_t>(arena.Alloc(64, 64)) % 64, 0);
+ EXPECT_EQ(reinterpret_cast<uintptr_t>(arena.Alloc(7)) % 8, 0);
+ EXPECT_EQ(reinterpret_cast<uintptr_t>(arena.Alloc(128, 64)) % 64, 0);
+ for (int alignment = 1; alignment < 100; ++alignment) {
+ EXPECT_EQ(
+ reinterpret_cast<uintptr_t>(arena.Alloc(7, alignment)) % alignment, 0);
+ }
+}
+
+} // namespace
+} // namespace tcmalloc_internal
+} // namespace tcmalloc
diff --git a/contrib/libs/tcmalloc/tcmalloc/background.cc b/contrib/libs/tcmalloc/tcmalloc/background.cc
index ec57c03901..bc62a5bf53 100644
--- a/contrib/libs/tcmalloc/tcmalloc/background.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/background.cc
@@ -17,17 +17,17 @@
#include "absl/base/internal/sysinfo.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
-#include "tcmalloc/cpu_cache.h"
+#include "tcmalloc/cpu_cache.h"
#include "tcmalloc/internal/logging.h"
#include "tcmalloc/internal/percpu.h"
#include "tcmalloc/internal_malloc_extension.h"
#include "tcmalloc/malloc_extension.h"
#include "tcmalloc/parameters.h"
-#include "tcmalloc/static_vars.h"
+#include "tcmalloc/static_vars.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
namespace {
// Called by MallocExtension_Internal_ProcessBackgroundActions.
@@ -84,98 +84,98 @@ void ReleasePerCpuMemoryToOS() {
memcpy(&prev_allowed_cpus, &allowed_cpus, sizeof(cpu_set_t));
}
-void ShuffleCpuCaches() {
- if (!MallocExtension::PerCpuCachesActive()) {
- return;
- }
-
- // Shuffle per-cpu caches
- Static::cpu_cache().ShuffleCpuCaches();
-}
-
-// Reclaims per-cpu caches. The CPU mask used in ReleasePerCpuMemoryToOS does
-// not provide useful information about virtual CPU state and hence, does not
-// reclaim memory when virtual CPUs are enabled.
-//
-// Here, we use heuristics that are based on cache usage and misses, to
-// determine if the caches have been recently inactive and if they may be
-// reclaimed.
-void ReclaimIdleCpuCaches() {
- // Attempts reclaim only when per-CPU caches are in use.
- if (!MallocExtension::PerCpuCachesActive()) {
- return;
- }
-
- Static::cpu_cache().TryReclaimingCaches();
-}
-
+void ShuffleCpuCaches() {
+ if (!MallocExtension::PerCpuCachesActive()) {
+ return;
+ }
+
+ // Shuffle per-cpu caches
+ Static::cpu_cache().ShuffleCpuCaches();
+}
+
+// Reclaims per-cpu caches. The CPU mask used in ReleasePerCpuMemoryToOS does
+// not provide useful information about virtual CPU state and hence, does not
+// reclaim memory when virtual CPUs are enabled.
+//
+// Here, we use heuristics that are based on cache usage and misses, to
+// determine if the caches have been recently inactive and if they may be
+// reclaimed.
+void ReclaimIdleCpuCaches() {
+ // Attempts reclaim only when per-CPU caches are in use.
+ if (!MallocExtension::PerCpuCachesActive()) {
+ return;
+ }
+
+ Static::cpu_cache().TryReclaimingCaches();
+}
+
} // namespace
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
// Release memory to the system at a constant rate.
void MallocExtension_Internal_ProcessBackgroundActions() {
tcmalloc::MallocExtension::MarkThreadIdle();
// Initialize storage for ReleasePerCpuMemoryToOS().
- CPU_ZERO(&tcmalloc::tcmalloc_internal::prev_allowed_cpus);
+ CPU_ZERO(&tcmalloc::tcmalloc_internal::prev_allowed_cpus);
absl::Time prev_time = absl::Now();
constexpr absl::Duration kSleepTime = absl::Seconds(1);
-
- // Reclaim inactive per-cpu caches once per kCpuCacheReclaimPeriod.
- //
- // We use a longer 30 sec reclaim period to make sure that caches are indeed
- // idle. Reclaim drains entire cache, as opposed to cache shuffle for instance
- // that only shrinks a cache by a few objects at a time. So, we might have
- // larger performance degradation if we use a shorter reclaim interval and
- // drain caches that weren't supposed to.
- constexpr absl::Duration kCpuCacheReclaimPeriod = absl::Seconds(30);
- absl::Time last_reclaim = absl::Now();
-
- // Shuffle per-cpu caches once per kCpuCacheShufflePeriod secs.
- constexpr absl::Duration kCpuCacheShufflePeriod = absl::Seconds(5);
- absl::Time last_shuffle = absl::Now();
-
+
+ // Reclaim inactive per-cpu caches once per kCpuCacheReclaimPeriod.
+ //
+ // We use a longer 30 sec reclaim period to make sure that caches are indeed
+ // idle. Reclaim drains entire cache, as opposed to cache shuffle for instance
+ // that only shrinks a cache by a few objects at a time. So, we might have
+ // larger performance degradation if we use a shorter reclaim interval and
+ // drain caches that weren't supposed to.
+ constexpr absl::Duration kCpuCacheReclaimPeriod = absl::Seconds(30);
+ absl::Time last_reclaim = absl::Now();
+
+ // Shuffle per-cpu caches once per kCpuCacheShufflePeriod secs.
+ constexpr absl::Duration kCpuCacheShufflePeriod = absl::Seconds(5);
+ absl::Time last_shuffle = absl::Now();
+
while (true) {
absl::Time now = absl::Now();
const ssize_t bytes_to_release =
- static_cast<size_t>(tcmalloc::tcmalloc_internal::Parameters::
- background_release_rate()) *
+ static_cast<size_t>(tcmalloc::tcmalloc_internal::Parameters::
+ background_release_rate()) *
absl::ToDoubleSeconds(now - prev_time);
if (bytes_to_release > 0) { // may be negative if time goes backwards
tcmalloc::MallocExtension::ReleaseMemoryToSystem(bytes_to_release);
}
- const bool reclaim_idle_per_cpu_caches =
- tcmalloc::tcmalloc_internal::Parameters::reclaim_idle_per_cpu_caches();
-
- // If enabled, we use heuristics to determine if the per-cpu caches are
- // inactive. If disabled, we use a more conservative approach, that uses
- // allowed cpu masks, to reclaim cpu caches.
- if (reclaim_idle_per_cpu_caches) {
- // Try to reclaim per-cpu caches once every kCpuCacheReclaimPeriod
- // when enabled.
- if (now - last_reclaim >= kCpuCacheReclaimPeriod) {
- tcmalloc::tcmalloc_internal::ReclaimIdleCpuCaches();
- last_reclaim = now;
- }
- } else {
- tcmalloc::tcmalloc_internal::ReleasePerCpuMemoryToOS();
- }
-
- const bool shuffle_per_cpu_caches =
- tcmalloc::tcmalloc_internal::Parameters::shuffle_per_cpu_caches();
-
- if (shuffle_per_cpu_caches) {
- if (now - last_shuffle >= kCpuCacheShufflePeriod) {
- tcmalloc::tcmalloc_internal::ShuffleCpuCaches();
- last_shuffle = now;
- }
- }
-
- tcmalloc::tcmalloc_internal::Static().sharded_transfer_cache().Plunder();
+ const bool reclaim_idle_per_cpu_caches =
+ tcmalloc::tcmalloc_internal::Parameters::reclaim_idle_per_cpu_caches();
+
+ // If enabled, we use heuristics to determine if the per-cpu caches are
+ // inactive. If disabled, we use a more conservative approach, that uses
+ // allowed cpu masks, to reclaim cpu caches.
+ if (reclaim_idle_per_cpu_caches) {
+ // Try to reclaim per-cpu caches once every kCpuCacheReclaimPeriod
+ // when enabled.
+ if (now - last_reclaim >= kCpuCacheReclaimPeriod) {
+ tcmalloc::tcmalloc_internal::ReclaimIdleCpuCaches();
+ last_reclaim = now;
+ }
+ } else {
+ tcmalloc::tcmalloc_internal::ReleasePerCpuMemoryToOS();
+ }
+
+ const bool shuffle_per_cpu_caches =
+ tcmalloc::tcmalloc_internal::Parameters::shuffle_per_cpu_caches();
+
+ if (shuffle_per_cpu_caches) {
+ if (now - last_shuffle >= kCpuCacheShufflePeriod) {
+ tcmalloc::tcmalloc_internal::ShuffleCpuCaches();
+ last_shuffle = now;
+ }
+ }
+
+ tcmalloc::tcmalloc_internal::Static().sharded_transfer_cache().Plunder();
prev_time = now;
absl::SleepFor(kSleepTime);
}
diff --git a/contrib/libs/tcmalloc/tcmalloc/central_freelist.cc b/contrib/libs/tcmalloc/tcmalloc/central_freelist.cc
index 8620e228a1..09d6798839 100644
--- a/contrib/libs/tcmalloc/tcmalloc/central_freelist.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/central_freelist.cc
@@ -24,15 +24,15 @@
#include "tcmalloc/pages.h"
#include "tcmalloc/static_vars.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
static MemoryTag MemoryTagFromSizeClass(size_t cl) {
- if (!Static::numa_topology().numa_aware()) {
- return MemoryTag::kNormal;
- }
- return NumaNormalTag(cl / kNumBaseClasses);
+ if (!Static::numa_topology().numa_aware()) {
+ return MemoryTag::kNormal;
+ }
+ return NumaNormalTag(cl / kNumBaseClasses);
}
// Like a constructor and hence we disable thread safety analysis.
@@ -50,28 +50,28 @@ static Span* MapObjectToSpan(void* object) {
return span;
}
-Span* CentralFreeList::ReleaseToSpans(void* object, Span* span,
- size_t object_size) {
- if (ABSL_PREDICT_FALSE(span->FreelistEmpty(object_size))) {
+Span* CentralFreeList::ReleaseToSpans(void* object, Span* span,
+ size_t object_size) {
+ if (ABSL_PREDICT_FALSE(span->FreelistEmpty(object_size))) {
nonempty_.prepend(span);
}
- if (ABSL_PREDICT_TRUE(span->FreelistPush(object, object_size))) {
+ if (ABSL_PREDICT_TRUE(span->FreelistPush(object, object_size))) {
return nullptr;
}
span->RemoveFromList(); // from nonempty_
return span;
}
-void CentralFreeList::InsertRange(absl::Span<void*> batch) {
- CHECK_CONDITION(!batch.empty() && batch.size() <= kMaxObjectsToMove);
+void CentralFreeList::InsertRange(absl::Span<void*> batch) {
+ CHECK_CONDITION(!batch.empty() && batch.size() <= kMaxObjectsToMove);
Span* spans[kMaxObjectsToMove];
// Safe to store free spans into freed up space in span array.
Span** free_spans = spans;
int free_count = 0;
// Prefetch Span objects to reduce cache misses.
- for (int i = 0; i < batch.size(); ++i) {
+ for (int i = 0; i < batch.size(); ++i) {
Span* span = MapObjectToSpan(batch[i]);
ASSERT(span != nullptr);
span->Prefetch();
@@ -81,55 +81,55 @@ void CentralFreeList::InsertRange(absl::Span<void*> batch) {
// First, release all individual objects into spans under our mutex
// and collect spans that become completely free.
{
- // Use local copy of variable to ensure that it is not reloaded.
- size_t object_size = object_size_;
+ // Use local copy of variable to ensure that it is not reloaded.
+ size_t object_size = object_size_;
absl::base_internal::SpinLockHolder h(&lock_);
- for (int i = 0; i < batch.size(); ++i) {
- Span* span = ReleaseToSpans(batch[i], spans[i], object_size);
- if (ABSL_PREDICT_FALSE(span)) {
+ for (int i = 0; i < batch.size(); ++i) {
+ Span* span = ReleaseToSpans(batch[i], spans[i], object_size);
+ if (ABSL_PREDICT_FALSE(span)) {
free_spans[free_count] = span;
free_count++;
}
}
-
+
RecordMultiSpansDeallocated(free_count);
- UpdateObjectCounts(batch.size());
+ UpdateObjectCounts(batch.size());
}
// Then, release all free spans into page heap under its mutex.
- if (ABSL_PREDICT_FALSE(free_count)) {
- // Unregister size class doesn't require holding any locks.
+ if (ABSL_PREDICT_FALSE(free_count)) {
+ // Unregister size class doesn't require holding any locks.
for (int i = 0; i < free_count; ++i) {
Span* const free_span = free_spans[i];
ASSERT(IsNormalMemory(free_span->start_address())
);
Static::pagemap().UnregisterSizeClass(free_span);
-
- // Before taking pageheap_lock, prefetch the PageTrackers these spans are
- // on.
- //
- // Small-but-slow does not use the HugePageAwareAllocator (by default), so
- // do not prefetch on this config.
-#ifndef TCMALLOC_SMALL_BUT_SLOW
- const PageId p = free_span->first_page();
-
- // In huge_page_filler.h, we static_assert that PageTracker's key elements
- // for deallocation are within the first two cachelines.
- void* pt = Static::pagemap().GetHugepage(p);
- // Prefetch for writing, as we will issue stores to the PageTracker
- // instance.
- __builtin_prefetch(pt, 1, 3);
- __builtin_prefetch(
- reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(pt) +
- ABSL_CACHELINE_SIZE),
- 1, 3);
-#endif // TCMALLOC_SMALL_BUT_SLOW
- }
-
- const MemoryTag tag = MemoryTagFromSizeClass(size_class_);
- absl::base_internal::SpinLockHolder h(&pageheap_lock);
- for (int i = 0; i < free_count; ++i) {
- Span* const free_span = free_spans[i];
+
+ // Before taking pageheap_lock, prefetch the PageTrackers these spans are
+ // on.
+ //
+ // Small-but-slow does not use the HugePageAwareAllocator (by default), so
+ // do not prefetch on this config.
+#ifndef TCMALLOC_SMALL_BUT_SLOW
+ const PageId p = free_span->first_page();
+
+ // In huge_page_filler.h, we static_assert that PageTracker's key elements
+ // for deallocation are within the first two cachelines.
+ void* pt = Static::pagemap().GetHugepage(p);
+ // Prefetch for writing, as we will issue stores to the PageTracker
+ // instance.
+ __builtin_prefetch(pt, 1, 3);
+ __builtin_prefetch(
+ reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(pt) +
+ ABSL_CACHELINE_SIZE),
+ 1, 3);
+#endif // TCMALLOC_SMALL_BUT_SLOW
+ }
+
+ const MemoryTag tag = MemoryTagFromSizeClass(size_class_);
+ absl::base_internal::SpinLockHolder h(&pageheap_lock);
+ for (int i = 0; i < free_count; ++i) {
+ Span* const free_span = free_spans[i];
ASSERT(tag == GetMemoryTag(free_span->start_address()));
Static::page_allocator().Delete(free_span, tag);
}
@@ -138,64 +138,64 @@ void CentralFreeList::InsertRange(absl::Span<void*> batch) {
int CentralFreeList::RemoveRange(void** batch, int N) {
ASSUME(N > 0);
- // Use local copy of variable to ensure that it is not reloaded.
- size_t object_size = object_size_;
- int result = 0;
+ // Use local copy of variable to ensure that it is not reloaded.
+ size_t object_size = object_size_;
+ int result = 0;
absl::base_internal::SpinLockHolder h(&lock_);
- if (ABSL_PREDICT_FALSE(nonempty_.empty())) {
- result = Populate(batch, N);
- } else {
- do {
- Span* span = nonempty_.first();
- int here =
- span->FreelistPopBatch(batch + result, N - result, object_size);
- ASSERT(here > 0);
- if (span->FreelistEmpty(object_size)) {
- span->RemoveFromList(); // from nonempty_
- }
- result += here;
- } while (result < N && !nonempty_.empty());
+ if (ABSL_PREDICT_FALSE(nonempty_.empty())) {
+ result = Populate(batch, N);
+ } else {
+ do {
+ Span* span = nonempty_.first();
+ int here =
+ span->FreelistPopBatch(batch + result, N - result, object_size);
+ ASSERT(here > 0);
+ if (span->FreelistEmpty(object_size)) {
+ span->RemoveFromList(); // from nonempty_
+ }
+ result += here;
+ } while (result < N && !nonempty_.empty());
}
UpdateObjectCounts(-result);
return result;
}
// Fetch memory from the system and add to the central cache freelist.
-int CentralFreeList::Populate(void** batch,
- int N) ABSL_NO_THREAD_SAFETY_ANALYSIS {
+int CentralFreeList::Populate(void** batch,
+ int N) ABSL_NO_THREAD_SAFETY_ANALYSIS {
// Release central list lock while operating on pageheap
- // Note, this could result in multiple calls to populate each allocating
- // a new span and the pushing those partially full spans onto nonempty.
+ // Note, this could result in multiple calls to populate each allocating
+ // a new span and the pushing those partially full spans onto nonempty.
lock_.Unlock();
const MemoryTag tag = MemoryTagFromSizeClass(size_class_);
Span* span = Static::page_allocator().New(pages_per_span_, tag);
- if (ABSL_PREDICT_FALSE(span == nullptr)) {
+ if (ABSL_PREDICT_FALSE(span == nullptr)) {
Log(kLog, __FILE__, __LINE__, "tcmalloc: allocation failed",
pages_per_span_.in_bytes());
lock_.Lock();
- return 0;
+ return 0;
}
- ASSERT(tag == GetMemoryTag(span->start_address()));
+ ASSERT(tag == GetMemoryTag(span->start_address()));
ASSERT(span->num_pages() == pages_per_span_);
Static::pagemap().RegisterSizeClass(span, size_class_);
- size_t objects_per_span = objects_per_span_;
- int result = span->BuildFreelist(object_size_, objects_per_span, batch, N);
- ASSERT(result > 0);
- // This is a cheaper check than using FreelistEmpty().
- bool span_empty = result == objects_per_span;
+ size_t objects_per_span = objects_per_span_;
+ int result = span->BuildFreelist(object_size_, objects_per_span, batch, N);
+ ASSERT(result > 0);
+ // This is a cheaper check than using FreelistEmpty().
+ bool span_empty = result == objects_per_span;
lock_.Lock();
- if (!span_empty) {
- nonempty_.prepend(span);
- }
+ if (!span_empty) {
+ nonempty_.prepend(span);
+ }
RecordSpanAllocated();
- return result;
+ return result;
}
-size_t CentralFreeList::OverheadBytes() const {
- if (ABSL_PREDICT_FALSE(object_size_ == 0)) {
+size_t CentralFreeList::OverheadBytes() const {
+ if (ABSL_PREDICT_FALSE(object_size_ == 0)) {
return 0;
}
const size_t overhead_per_span = pages_per_span_.in_bytes() % object_size_;
@@ -204,7 +204,7 @@ size_t CentralFreeList::OverheadBytes() const {
SpanStats CentralFreeList::GetSpanStats() const {
SpanStats stats;
- if (ABSL_PREDICT_FALSE(objects_per_span_ == 0)) {
+ if (ABSL_PREDICT_FALSE(objects_per_span_ == 0)) {
return stats;
}
stats.num_spans_requested = static_cast<size_t>(num_spans_requested_.value());
@@ -213,6 +213,6 @@ SpanStats CentralFreeList::GetSpanStats() const {
return stats;
}
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
diff --git a/contrib/libs/tcmalloc/tcmalloc/central_freelist.h b/contrib/libs/tcmalloc/tcmalloc/central_freelist.h
index 266f184d6b..3d766af8c3 100644
--- a/contrib/libs/tcmalloc/tcmalloc/central_freelist.h
+++ b/contrib/libs/tcmalloc/tcmalloc/central_freelist.h
@@ -28,9 +28,9 @@
#include "tcmalloc/span.h"
#include "tcmalloc/span_stats.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
// Data kept per size-class in central cache.
class CentralFreeList {
@@ -50,9 +50,9 @@ class CentralFreeList {
// These methods all do internal locking.
- // Insert batch into the central freelist.
- // REQUIRES: batch.size() > 0 && batch.size() <= kMaxObjectsToMove.
- void InsertRange(absl::Span<void*> batch) ABSL_LOCKS_EXCLUDED(lock_);
+ // Insert batch into the central freelist.
+ // REQUIRES: batch.size() > 0 && batch.size() <= kMaxObjectsToMove.
+ void InsertRange(absl::Span<void*> batch) ABSL_LOCKS_EXCLUDED(lock_);
// Fill a prefix of batch[0..N-1] with up to N elements removed from central
// freelist. Return the number of elements removed.
@@ -60,35 +60,35 @@ class CentralFreeList {
ABSL_LOCKS_EXCLUDED(lock_);
// Returns the number of free objects in cache.
- size_t length() const { return static_cast<size_t>(counter_.value()); }
+ size_t length() const { return static_cast<size_t>(counter_.value()); }
// Returns the memory overhead (internal fragmentation) attributable
// to the freelist. This is memory lost when the size of elements
// in a freelist doesn't exactly divide the page-size (an 8192-byte
// page full of 5-byte objects would have 2 bytes memory overhead).
- size_t OverheadBytes() const;
+ size_t OverheadBytes() const;
SpanStats GetSpanStats() const;
- void AcquireInternalLocks() {
- lock_.Lock();
- }
-
- void ReleaseInternalLocks() {
- lock_.Unlock();
- }
-
+ void AcquireInternalLocks() {
+ lock_.Lock();
+ }
+
+ void ReleaseInternalLocks() {
+ lock_.Unlock();
+ }
+
private:
// Release an object to spans.
// Returns object's span if it become completely free.
- Span* ReleaseToSpans(void* object, Span* span, size_t object_size)
+ Span* ReleaseToSpans(void* object, Span* span, size_t object_size)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(lock_);
// Populate cache by fetching from the page heap.
// May temporarily release lock_.
- // Fill a prefix of batch[0..N-1] with up to N elements removed from central
- // freelist. Returns the number of elements removed.
- int Populate(void** batch, int N) ABSL_EXCLUSIVE_LOCKS_REQUIRED(lock_);
+ // Fill a prefix of batch[0..N-1] with up to N elements removed from central
+ // freelist. Returns the number of elements removed.
+ int Populate(void** batch, int N) ABSL_EXCLUSIVE_LOCKS_REQUIRED(lock_);
// This lock protects all the mutable data members.
absl::base_internal::SpinLock lock_;
@@ -126,17 +126,17 @@ class CentralFreeList {
// guarantees accuracy.
// Num free objects in cache entry
- StatsCounter counter_;
+ StatsCounter counter_;
- StatsCounter num_spans_requested_;
- StatsCounter num_spans_returned_;
+ StatsCounter num_spans_requested_;
+ StatsCounter num_spans_returned_;
// Dummy header for non-empty spans
SpanList nonempty_ ABSL_GUARDED_BY(lock_);
};
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
#endif // TCMALLOC_CENTRAL_FREELIST_H_
diff --git a/contrib/libs/tcmalloc/tcmalloc/central_freelist_benchmark.cc b/contrib/libs/tcmalloc/tcmalloc/central_freelist_benchmark.cc
index a80d580753..ac0768a3d7 100644
--- a/contrib/libs/tcmalloc/tcmalloc/central_freelist_benchmark.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/central_freelist_benchmark.cc
@@ -1,198 +1,198 @@
-// Copyright 2021 The TCMalloc Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <algorithm>
-#include <cstddef>
-#include <cstdint>
-#include <vector>
-
-#include "absl/algorithm/container.h"
-#include "absl/random/random.h"
-#include "benchmark/benchmark.h"
-#include "tcmalloc/central_freelist.h"
-#include "tcmalloc/common.h"
-#include "tcmalloc/static_vars.h"
-#include "tcmalloc/tcmalloc_policy.h"
-
-namespace tcmalloc {
-namespace tcmalloc_internal {
-namespace {
-
-// This benchmark measures how long it takes to populate multiple
-// spans. The spans are freed in the same order as they were populated
-// to minimize the time it takes to free them.
-void BM_Populate(benchmark::State& state) {
- size_t object_size = state.range(0);
- size_t cl = Static::sizemap().SizeClass(CppPolicy(), object_size);
- int batch_size = Static::sizemap().num_objects_to_move(cl);
- int num_objects = 64 * 1024 * 1024 / object_size;
- CentralFreeList cfl;
- // Initialize the span to contain the appropriate size of object.
- cfl.Init(cl);
-
- // Allocate an array large enough to hold 64 MiB of objects.
- std::vector<void*> buffer(num_objects);
- int64_t items_processed = 0;
- absl::BitGen rnd;
-
- for (auto s : state) {
- int index = 0;
- // The cost of fetching objects will include the cost of fetching and
- // populating the span.
- while (index < num_objects) {
- int count = std::min(batch_size, num_objects - index);
- int got = cfl.RemoveRange(&buffer[index], count);
- index += got;
- }
-
- // Don't include the cost of returning the objects to the span, and the
- // span to the pageheap.
- state.PauseTiming();
- index = 0;
- while (index < num_objects) {
- uint64_t count = std::min(batch_size, num_objects - index);
- cfl.InsertRange({&buffer[index], count});
- index += count;
- }
- items_processed += index;
- state.ResumeTiming();
- }
- state.SetItemsProcessed(items_processed);
-}
-BENCHMARK(BM_Populate)
- ->DenseRange(8, 64, 16)
- ->DenseRange(64, 1024, 64)
- ->DenseRange(4096, 28 * 1024, 4096)
- ->DenseRange(32 * 1024, 256 * 1024, 32 * 1024);
-
-// This benchmark fills a large array with objects, shuffles the objects
-// and then returns them.
-// This should be relatively representative of what happens at runtime.
-// Fetching objects from the CFL is usually done in batches, but returning
-// them is usually done spread over many active spans.
-void BM_MixAndReturn(benchmark::State& state) {
- size_t object_size = state.range(0);
- size_t cl = Static::sizemap().SizeClass(CppPolicy(), object_size);
- int batch_size = Static::sizemap().num_objects_to_move(cl);
- int num_objects = 64 * 1024 * 1024 / object_size;
- CentralFreeList cfl;
- // Initialize the span to contain the appropriate size of object.
- cfl.Init(cl);
-
- // Allocate an array large enough to hold 64 MiB of objects.
- std::vector<void*> buffer(num_objects);
- int64_t items_processed = 0;
- absl::BitGen rnd;
-
- for (auto s : state) {
- int index = 0;
- while (index < num_objects) {
- int count = std::min(batch_size, num_objects - index);
- int got = cfl.RemoveRange(&buffer[index], count);
- index += got;
- }
-
- state.PauseTiming();
- // Shuffle the vector so that we don't return the objects in the same
- // order as they were allocated.
- absl::c_shuffle(buffer, rnd);
- state.ResumeTiming();
-
- index = 0;
- while (index < num_objects) {
- unsigned int count = std::min(batch_size, num_objects - index);
- cfl.InsertRange({&buffer[index], count});
- index += count;
- }
- items_processed += index;
- }
- state.SetItemsProcessed(items_processed);
-}
-BENCHMARK(BM_MixAndReturn)
- ->DenseRange(8, 64, 16)
- ->DenseRange(64, 1024, 64)
- ->DenseRange(4096, 28 * 1024, 4096)
- ->DenseRange(32 * 1024, 256 * 1024, 32 * 1024);
-
-// This benchmark holds onto half the allocated objects so that (except for
-// single object spans) spans are never allocated or freed during the
-// benchmark run. This evaluates the performance of just the span handling
-// code, and avoids timing the pageheap code.
-void BM_SpanReuse(benchmark::State& state) {
- size_t object_size = state.range(0);
- size_t cl = Static::sizemap().SizeClass(CppPolicy(), object_size);
- int batch_size = Static::sizemap().num_objects_to_move(cl);
- int num_objects = 64 * 1024 * 1024 / object_size;
- CentralFreeList cfl;
- // Initialize the span to contain the appropriate size of object.
- cfl.Init(cl);
-
- // Array used to hold onto half of the objects
- std::vector<void*> held_objects(2 * num_objects);
- // Request twice the objects we need
- for (int index = 0; index < 2 * num_objects;) {
- int count = std::min(batch_size, 2 * num_objects - index);
- int got = cfl.RemoveRange(&held_objects[index], count);
- index += got;
- }
-
- // Return half of the objects. This will stop the spans from being
- // returned to the pageheap. So future operations will not touch the
- // pageheap.
- for (int index = 0; index < 2 * num_objects; index += 2) {
- cfl.InsertRange({&held_objects[index], 1});
- }
- // Allocate an array large enough to hold 64 MiB of objects.
- std::vector<void*> buffer(num_objects);
- int64_t items_processed = 0;
- absl::BitGen rnd;
-
- for (auto s : state) {
- int index = 0;
- while (index < num_objects) {
- int count = std::min(batch_size, num_objects - index);
- int got = cfl.RemoveRange(&buffer[index], count);
- index += got;
- }
-
- state.PauseTiming();
- // Shuffle the vector so that we don't return the objects in the same
- // order as they were allocated.
- absl::c_shuffle(buffer, rnd);
- state.ResumeTiming();
-
- index = 0;
- while (index < num_objects) {
- uint64_t count = std::min(batch_size, num_objects - index);
- cfl.InsertRange({&buffer[index], count});
- index += count;
- }
- items_processed += index;
- }
- state.SetItemsProcessed(items_processed);
-
- // Return the other half of the objects.
- for (int index = 1; index < 2 * num_objects; index += 2) {
- cfl.InsertRange({&held_objects[index], 1});
- }
-}
-// Want to avoid benchmarking spans where there is a single object per span.
-BENCHMARK(BM_SpanReuse)
- ->DenseRange(8, 64, 16)
- ->DenseRange(64, 1024, 64)
- ->DenseRange(1024, 4096, 512);
-
-} // namespace
-} // namespace tcmalloc_internal
-} // namespace tcmalloc
+// Copyright 2021 The TCMalloc Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <algorithm>
+#include <cstddef>
+#include <cstdint>
+#include <vector>
+
+#include "absl/algorithm/container.h"
+#include "absl/random/random.h"
+#include "benchmark/benchmark.h"
+#include "tcmalloc/central_freelist.h"
+#include "tcmalloc/common.h"
+#include "tcmalloc/static_vars.h"
+#include "tcmalloc/tcmalloc_policy.h"
+
+namespace tcmalloc {
+namespace tcmalloc_internal {
+namespace {
+
+// This benchmark measures how long it takes to populate multiple
+// spans. The spans are freed in the same order as they were populated
+// to minimize the time it takes to free them.
+void BM_Populate(benchmark::State& state) {
+ size_t object_size = state.range(0);
+ size_t cl = Static::sizemap().SizeClass(CppPolicy(), object_size);
+ int batch_size = Static::sizemap().num_objects_to_move(cl);
+ int num_objects = 64 * 1024 * 1024 / object_size;
+ CentralFreeList cfl;
+ // Initialize the span to contain the appropriate size of object.
+ cfl.Init(cl);
+
+ // Allocate an array large enough to hold 64 MiB of objects.
+ std::vector<void*> buffer(num_objects);
+ int64_t items_processed = 0;
+ absl::BitGen rnd;
+
+ for (auto s : state) {
+ int index = 0;
+ // The cost of fetching objects will include the cost of fetching and
+ // populating the span.
+ while (index < num_objects) {
+ int count = std::min(batch_size, num_objects - index);
+ int got = cfl.RemoveRange(&buffer[index], count);
+ index += got;
+ }
+
+ // Don't include the cost of returning the objects to the span, and the
+ // span to the pageheap.
+ state.PauseTiming();
+ index = 0;
+ while (index < num_objects) {
+ uint64_t count = std::min(batch_size, num_objects - index);
+ cfl.InsertRange({&buffer[index], count});
+ index += count;
+ }
+ items_processed += index;
+ state.ResumeTiming();
+ }
+ state.SetItemsProcessed(items_processed);
+}
+BENCHMARK(BM_Populate)
+ ->DenseRange(8, 64, 16)
+ ->DenseRange(64, 1024, 64)
+ ->DenseRange(4096, 28 * 1024, 4096)
+ ->DenseRange(32 * 1024, 256 * 1024, 32 * 1024);
+
+// This benchmark fills a large array with objects, shuffles the objects
+// and then returns them.
+// This should be relatively representative of what happens at runtime.
+// Fetching objects from the CFL is usually done in batches, but returning
+// them is usually done spread over many active spans.
+void BM_MixAndReturn(benchmark::State& state) {
+ size_t object_size = state.range(0);
+ size_t cl = Static::sizemap().SizeClass(CppPolicy(), object_size);
+ int batch_size = Static::sizemap().num_objects_to_move(cl);
+ int num_objects = 64 * 1024 * 1024 / object_size;
+ CentralFreeList cfl;
+ // Initialize the span to contain the appropriate size of object.
+ cfl.Init(cl);
+
+ // Allocate an array large enough to hold 64 MiB of objects.
+ std::vector<void*> buffer(num_objects);
+ int64_t items_processed = 0;
+ absl::BitGen rnd;
+
+ for (auto s : state) {
+ int index = 0;
+ while (index < num_objects) {
+ int count = std::min(batch_size, num_objects - index);
+ int got = cfl.RemoveRange(&buffer[index], count);
+ index += got;
+ }
+
+ state.PauseTiming();
+ // Shuffle the vector so that we don't return the objects in the same
+ // order as they were allocated.
+ absl::c_shuffle(buffer, rnd);
+ state.ResumeTiming();
+
+ index = 0;
+ while (index < num_objects) {
+ unsigned int count = std::min(batch_size, num_objects - index);
+ cfl.InsertRange({&buffer[index], count});
+ index += count;
+ }
+ items_processed += index;
+ }
+ state.SetItemsProcessed(items_processed);
+}
+BENCHMARK(BM_MixAndReturn)
+ ->DenseRange(8, 64, 16)
+ ->DenseRange(64, 1024, 64)
+ ->DenseRange(4096, 28 * 1024, 4096)
+ ->DenseRange(32 * 1024, 256 * 1024, 32 * 1024);
+
+// This benchmark holds onto half the allocated objects so that (except for
+// single object spans) spans are never allocated or freed during the
+// benchmark run. This evaluates the performance of just the span handling
+// code, and avoids timing the pageheap code.
+void BM_SpanReuse(benchmark::State& state) {
+ size_t object_size = state.range(0);
+ size_t cl = Static::sizemap().SizeClass(CppPolicy(), object_size);
+ int batch_size = Static::sizemap().num_objects_to_move(cl);
+ int num_objects = 64 * 1024 * 1024 / object_size;
+ CentralFreeList cfl;
+ // Initialize the span to contain the appropriate size of object.
+ cfl.Init(cl);
+
+ // Array used to hold onto half of the objects
+ std::vector<void*> held_objects(2 * num_objects);
+ // Request twice the objects we need
+ for (int index = 0; index < 2 * num_objects;) {
+ int count = std::min(batch_size, 2 * num_objects - index);
+ int got = cfl.RemoveRange(&held_objects[index], count);
+ index += got;
+ }
+
+ // Return half of the objects. This will stop the spans from being
+ // returned to the pageheap. So future operations will not touch the
+ // pageheap.
+ for (int index = 0; index < 2 * num_objects; index += 2) {
+ cfl.InsertRange({&held_objects[index], 1});
+ }
+ // Allocate an array large enough to hold 64 MiB of objects.
+ std::vector<void*> buffer(num_objects);
+ int64_t items_processed = 0;
+ absl::BitGen rnd;
+
+ for (auto s : state) {
+ int index = 0;
+ while (index < num_objects) {
+ int count = std::min(batch_size, num_objects - index);
+ int got = cfl.RemoveRange(&buffer[index], count);
+ index += got;
+ }
+
+ state.PauseTiming();
+ // Shuffle the vector so that we don't return the objects in the same
+ // order as they were allocated.
+ absl::c_shuffle(buffer, rnd);
+ state.ResumeTiming();
+
+ index = 0;
+ while (index < num_objects) {
+ uint64_t count = std::min(batch_size, num_objects - index);
+ cfl.InsertRange({&buffer[index], count});
+ index += count;
+ }
+ items_processed += index;
+ }
+ state.SetItemsProcessed(items_processed);
+
+ // Return the other half of the objects.
+ for (int index = 1; index < 2 * num_objects; index += 2) {
+ cfl.InsertRange({&held_objects[index], 1});
+ }
+}
+// Want to avoid benchmarking spans where there is a single object per span.
+BENCHMARK(BM_SpanReuse)
+ ->DenseRange(8, 64, 16)
+ ->DenseRange(64, 1024, 64)
+ ->DenseRange(1024, 4096, 512);
+
+} // namespace
+} // namespace tcmalloc_internal
+} // namespace tcmalloc
diff --git a/contrib/libs/tcmalloc/tcmalloc/central_freelist_test.cc b/contrib/libs/tcmalloc/tcmalloc/central_freelist_test.cc
index de5960120d..f951303af0 100644
--- a/contrib/libs/tcmalloc/tcmalloc/central_freelist_test.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/central_freelist_test.cc
@@ -23,7 +23,7 @@
#include "tcmalloc/static_vars.h"
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
namespace {
// TODO(b/162552708) Mock out the page heap to interact with CFL instead
@@ -53,9 +53,9 @@ class CFLTest : public testing::TestWithParam<size_t> {
TEST_P(CFLTest, SingleBatch) {
void* batch[kMaxObjectsToMove];
- uint64_t got = cfl_.RemoveRange(batch, batch_size_);
- ASSERT_GT(got, 0);
- cfl_.InsertRange({batch, got});
+ uint64_t got = cfl_.RemoveRange(batch, batch_size_);
+ ASSERT_GT(got, 0);
+ cfl_.InsertRange({batch, got});
SpanStats stats = cfl_.GetSpanStats();
EXPECT_EQ(stats.num_spans_requested, 1);
EXPECT_EQ(stats.num_spans_returned, 1);
@@ -72,8 +72,8 @@ TEST_P(CFLTest, MultipleSpans) {
const int num_objects_to_fetch = num_spans * objects_per_span_;
int total_fetched = 0;
while (total_fetched < num_objects_to_fetch) {
- size_t n = num_objects_to_fetch - total_fetched;
- int got = cfl_.RemoveRange(batch, std::min(n, batch_size_));
+ size_t n = num_objects_to_fetch - total_fetched;
+ int got = cfl_.RemoveRange(batch, std::min(n, batch_size_));
for (int i = 0; i < got; ++i) {
all_objects.push_back(batch[i]);
}
@@ -94,13 +94,13 @@ TEST_P(CFLTest, MultipleSpans) {
int total_returned = 0;
bool checked_half = false;
while (total_returned < num_objects_to_fetch) {
- uint64_t size_to_pop =
+ uint64_t size_to_pop =
std::min(all_objects.size() - total_returned, batch_size_);
for (int i = 0; i < size_to_pop; ++i) {
batch[i] = all_objects[i + total_returned];
}
total_returned += size_to_pop;
- cfl_.InsertRange({batch, size_to_pop});
+ cfl_.InsertRange({batch, size_to_pop});
// sanity check
if (!checked_half && total_returned >= (num_objects_to_fetch / 2)) {
stats = cfl_.GetSpanStats();
@@ -117,5 +117,5 @@ TEST_P(CFLTest, MultipleSpans) {
INSTANTIATE_TEST_SUITE_P(All, CFLTest, testing::Range(size_t(1), kNumClasses));
} // namespace
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
diff --git a/contrib/libs/tcmalloc/tcmalloc/common.cc b/contrib/libs/tcmalloc/tcmalloc/common.cc
index 38443040ca..85b30fee12 100644
--- a/contrib/libs/tcmalloc/tcmalloc/common.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/common.cc
@@ -15,22 +15,22 @@
#include "tcmalloc/common.h"
#include "tcmalloc/experiment.h"
-#include "tcmalloc/internal/environment.h"
+#include "tcmalloc/internal/environment.h"
#include "tcmalloc/internal/optimization.h"
-#include "tcmalloc/pages.h"
+#include "tcmalloc/pages.h"
#include "tcmalloc/runtime_size_classes.h"
#include "tcmalloc/sampler.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
absl::string_view MemoryTagToLabel(MemoryTag tag) {
switch (tag) {
case MemoryTag::kNormal:
return "NORMAL";
- case MemoryTag::kNormalP1:
- return "NORMAL_P1";
+ case MemoryTag::kNormalP1:
+ return "NORMAL_P1";
case MemoryTag::kSampled:
return "SAMPLED";
default:
@@ -73,21 +73,21 @@ void SizeMap::SetSizeClasses(int num_classes, const SizeClassInfo* parsed) {
}
// Fill any unspecified size classes with 0.
- for (int x = num_classes; x < kNumBaseClasses; x++) {
+ for (int x = num_classes; x < kNumBaseClasses; x++) {
class_to_size_[x] = 0;
class_to_pages_[x] = 0;
num_objects_to_move_[x] = 0;
}
-
- // Copy selected size classes into the upper registers.
- for (int i = 1; i < (kNumClasses / kNumBaseClasses); i++) {
- std::copy(&class_to_size_[0], &class_to_size_[kNumBaseClasses],
- &class_to_size_[kNumBaseClasses * i]);
- std::copy(&class_to_pages_[0], &class_to_pages_[kNumBaseClasses],
- &class_to_pages_[kNumBaseClasses * i]);
- std::copy(&num_objects_to_move_[0], &num_objects_to_move_[kNumBaseClasses],
- &num_objects_to_move_[kNumBaseClasses * i]);
- }
+
+ // Copy selected size classes into the upper registers.
+ for (int i = 1; i < (kNumClasses / kNumBaseClasses); i++) {
+ std::copy(&class_to_size_[0], &class_to_size_[kNumBaseClasses],
+ &class_to_size_[kNumBaseClasses * i]);
+ std::copy(&class_to_pages_[0], &class_to_pages_[kNumBaseClasses],
+ &class_to_pages_[kNumBaseClasses * i]);
+ std::copy(&num_objects_to_move_[0], &num_objects_to_move_[kNumBaseClasses],
+ &num_objects_to_move_[kNumBaseClasses * i]);
+ }
}
// Return true if all size classes meet the requirements for alignment
@@ -96,8 +96,8 @@ bool SizeMap::ValidSizeClasses(int num_classes, const SizeClassInfo* parsed) {
if (num_classes <= 0) {
return false;
}
- if (kHasExpandedClasses && num_classes > kNumBaseClasses) {
- num_classes = kNumBaseClasses;
+ if (kHasExpandedClasses && num_classes > kNumBaseClasses) {
+ num_classes = kNumBaseClasses;
}
for (int c = 1; c < num_classes; c++) {
@@ -167,17 +167,17 @@ void SizeMap::Init() {
static_assert(kAlignment <= 16, "kAlignment is too large");
- if (IsExperimentActive(Experiment::TEST_ONLY_TCMALLOC_POW2_SIZECLASS)) {
- SetSizeClasses(kExperimentalPow2SizeClassesCount,
- kExperimentalPow2SizeClasses);
- } else if (IsExperimentActive(
- Experiment::TEST_ONLY_TCMALLOC_POW2_BELOW64_SIZECLASS)) {
- SetSizeClasses(kExperimentalPow2Below64SizeClassesCount,
- kExperimentalPow2Below64SizeClasses);
+ if (IsExperimentActive(Experiment::TEST_ONLY_TCMALLOC_POW2_SIZECLASS)) {
+ SetSizeClasses(kExperimentalPow2SizeClassesCount,
+ kExperimentalPow2SizeClasses);
+ } else if (IsExperimentActive(
+ Experiment::TEST_ONLY_TCMALLOC_POW2_BELOW64_SIZECLASS)) {
+ SetSizeClasses(kExperimentalPow2Below64SizeClassesCount,
+ kExperimentalPow2Below64SizeClasses);
} else {
if (default_want_legacy_spans != nullptr &&
- default_want_legacy_spans() > 0
- ) {
+ default_want_legacy_spans() > 0
+ ) {
SetSizeClasses(kLegacySizeClassesCount, kLegacySizeClasses);
} else {
SetSizeClasses(kSizeClassesCount, kSizeClasses);
@@ -199,6 +199,6 @@ void SizeMap::Init() {
}
}
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
diff --git a/contrib/libs/tcmalloc/tcmalloc/common.h b/contrib/libs/tcmalloc/tcmalloc/common.h
index d44811c726..43e2aa5f7b 100644
--- a/contrib/libs/tcmalloc/tcmalloc/common.h
+++ b/contrib/libs/tcmalloc/tcmalloc/common.h
@@ -20,27 +20,27 @@
#include <stddef.h>
#include <stdint.h>
-#include <limits>
-#include <type_traits>
-
+#include <limits>
+#include <type_traits>
+
#include "absl/base/attributes.h"
#include "absl/base/dynamic_annotations.h"
#include "absl/base/internal/spinlock.h"
-#include "absl/base/macros.h"
+#include "absl/base/macros.h"
#include "absl/base/optimization.h"
-#include "absl/numeric/bits.h"
+#include "absl/numeric/bits.h"
#include "absl/strings/string_view.h"
-#include "absl/types/span.h"
-#include "tcmalloc/experiment.h"
+#include "absl/types/span.h"
+#include "tcmalloc/experiment.h"
#include "tcmalloc/internal/config.h"
#include "tcmalloc/internal/logging.h"
-#include "tcmalloc/internal/optimization.h"
+#include "tcmalloc/internal/optimization.h"
#include "tcmalloc/size_class_info.h"
-GOOGLE_MALLOC_SECTION_BEGIN
-namespace tcmalloc {
-namespace tcmalloc_internal {
-
+GOOGLE_MALLOC_SECTION_BEGIN
+namespace tcmalloc {
+namespace tcmalloc_internal {
+
//-------------------------------------------------------------------
// Configuration
//-------------------------------------------------------------------
@@ -79,7 +79,7 @@ namespace tcmalloc_internal {
// The constants that vary between models are:
//
// kPageShift - Shift amount used to compute the page size.
-// kNumBaseClasses - Number of size classes serviced by bucket allocators
+// kNumBaseClasses - Number of size classes serviced by bucket allocators
// kMaxSize - Maximum size serviced by bucket allocators (thread/cpu/central)
// kMinThreadCacheSize - The minimum size in bytes of each ThreadCache.
// kMaxThreadCacheSize - The maximum size in bytes of each ThreadCache.
@@ -109,7 +109,7 @@ namespace tcmalloc_internal {
#if TCMALLOC_PAGE_SHIFT == 12
inline constexpr size_t kPageShift = 12;
-inline constexpr size_t kNumBaseClasses = 46;
+inline constexpr size_t kNumBaseClasses = 46;
inline constexpr bool kHasExpandedClasses = false;
inline constexpr size_t kMaxSize = 8 << 10;
inline constexpr size_t kMinThreadCacheSize = 4 * 1024;
@@ -121,7 +121,7 @@ inline constexpr size_t kDefaultProfileSamplingRate = 1 << 19;
inline constexpr size_t kMinPages = 2;
#elif TCMALLOC_PAGE_SHIFT == 15
inline constexpr size_t kPageShift = 15;
-inline constexpr size_t kNumBaseClasses = 78;
+inline constexpr size_t kNumBaseClasses = 78;
inline constexpr bool kHasExpandedClasses = true;
inline constexpr size_t kMaxSize = 256 * 1024;
inline constexpr size_t kMinThreadCacheSize = kMaxSize * 2;
@@ -134,7 +134,7 @@ inline constexpr size_t kDefaultProfileSamplingRate = 1 << 21;
inline constexpr size_t kMinPages = 8;
#elif TCMALLOC_PAGE_SHIFT == 18
inline constexpr size_t kPageShift = 18;
-inline constexpr size_t kNumBaseClasses = 89;
+inline constexpr size_t kNumBaseClasses = 89;
inline constexpr bool kHasExpandedClasses = true;
inline constexpr size_t kMaxSize = 256 * 1024;
inline constexpr size_t kMinThreadCacheSize = kMaxSize * 2;
@@ -147,7 +147,7 @@ inline constexpr size_t kDefaultProfileSamplingRate = 1 << 21;
inline constexpr size_t kMinPages = 8;
#elif TCMALLOC_PAGE_SHIFT == 13
inline constexpr size_t kPageShift = 13;
-inline constexpr size_t kNumBaseClasses = 86;
+inline constexpr size_t kNumBaseClasses = 86;
inline constexpr bool kHasExpandedClasses = true;
inline constexpr size_t kMaxSize = 256 * 1024;
inline constexpr size_t kMinThreadCacheSize = kMaxSize * 2;
@@ -162,36 +162,36 @@ inline constexpr size_t kMinPages = 8;
#error "Unsupported TCMALLOC_PAGE_SHIFT value!"
#endif
-// Sanitizers constrain the memory layout which causes problems with the
-// enlarged tags required to represent NUMA partitions. Disable NUMA awareness
-// to avoid failing to mmap memory.
-#if defined(TCMALLOC_NUMA_AWARE) && !defined(MEMORY_SANITIZER) && \
- !defined(THREAD_SANITIZER)
-inline constexpr size_t kNumaPartitions = 2;
-#else
-inline constexpr size_t kNumaPartitions = 1;
-#endif
-
-// We have copies of kNumBaseClasses size classes for each NUMA node, followed
-// by any expanded classes.
-inline constexpr size_t kExpandedClassesStart =
- kNumBaseClasses * kNumaPartitions;
-inline constexpr size_t kNumClasses =
- kExpandedClassesStart + (kHasExpandedClasses ? kNumBaseClasses : 0);
-
-// Size classes are often stored as uint32_t values, but there are some
-// situations where we need to store a size class with as compact a
-// representation as possible (e.g. in PageMap). Here we determine the integer
-// type to use in these situations - i.e. the smallest integer type large
-// enough to store values in the range [0,kNumClasses).
-constexpr size_t kMaxClass = kNumClasses - 1;
-using CompactSizeClass =
- std::conditional_t<kMaxClass <= std::numeric_limits<uint8_t>::max(),
- uint8_t, uint16_t>;
-
-// ~64K classes ought to be enough for anybody, but let's be sure.
-static_assert(kMaxClass <= std::numeric_limits<CompactSizeClass>::max());
-
+// Sanitizers constrain the memory layout which causes problems with the
+// enlarged tags required to represent NUMA partitions. Disable NUMA awareness
+// to avoid failing to mmap memory.
+#if defined(TCMALLOC_NUMA_AWARE) && !defined(MEMORY_SANITIZER) && \
+ !defined(THREAD_SANITIZER)
+inline constexpr size_t kNumaPartitions = 2;
+#else
+inline constexpr size_t kNumaPartitions = 1;
+#endif
+
+// We have copies of kNumBaseClasses size classes for each NUMA node, followed
+// by any expanded classes.
+inline constexpr size_t kExpandedClassesStart =
+ kNumBaseClasses * kNumaPartitions;
+inline constexpr size_t kNumClasses =
+ kExpandedClassesStart + (kHasExpandedClasses ? kNumBaseClasses : 0);
+
+// Size classes are often stored as uint32_t values, but there are some
+// situations where we need to store a size class with as compact a
+// representation as possible (e.g. in PageMap). Here we determine the integer
+// type to use in these situations - i.e. the smallest integer type large
+// enough to store values in the range [0,kNumClasses).
+constexpr size_t kMaxClass = kNumClasses - 1;
+using CompactSizeClass =
+ std::conditional_t<kMaxClass <= std::numeric_limits<uint8_t>::max(),
+ uint8_t, uint16_t>;
+
+// ~64K classes ought to be enough for anybody, but let's be sure.
+static_assert(kMaxClass <= std::numeric_limits<CompactSizeClass>::max());
+
// Minimum/maximum number of batches in TransferCache per size class.
// Actual numbers depends on a number of factors, see TransferCache::Init
// for details.
@@ -205,7 +205,7 @@ inline constexpr size_t kPageSize = 1 << kPageShift;
// of increasing kMaxSize to be multiple of kPageSize is unclear. Object size
// profile data indicates that the number of simultaneously live objects (of
// size >= 256k) tends to be very small. Keeping those objects as 'large'
-// objects won't cause too much memory waste, while heap memory reuse can be
+// objects won't cause too much memory waste, while heap memory reuse can be
// improved. Increasing kMaxSize to be too large has another bad side effect --
// the thread cache pressure is increased, which will in turn increase traffic
// between central cache and thread cache, leading to performance degradation.
@@ -214,7 +214,7 @@ static_assert((kMaxSize / kPageSize) >= kMinPages || kPageShift >= 18,
inline constexpr size_t kAlignment = 8;
// log2 (kAlignment)
-inline constexpr size_t kAlignmentShift = absl::bit_width(kAlignment - 1u);
+inline constexpr size_t kAlignmentShift = absl::bit_width(kAlignment - 1u);
// The number of times that a deallocation can cause a freelist to
// go over its max_length() before shrinking max_length().
@@ -228,18 +228,18 @@ inline constexpr int kMaxOverages = 3;
inline constexpr int kMaxDynamicFreeListLength = 8192;
enum class MemoryTag : uint8_t {
- // Sampled, infrequently allocated
- kSampled = 0x0,
- // Not sampled, NUMA partition 0
- kNormalP0 = 0x1,
- // Not sampled, NUMA partition 1
- kNormalP1 = (kNumaPartitions > 1) ? 0x2 : 0xff,
- // Not sampled
- kNormal = kNormalP0,
+ // Sampled, infrequently allocated
+ kSampled = 0x0,
+ // Not sampled, NUMA partition 0
+ kNormalP0 = 0x1,
+ // Not sampled, NUMA partition 1
+ kNormalP1 = (kNumaPartitions > 1) ? 0x2 : 0xff,
+ // Not sampled
+ kNormal = kNormalP0,
};
inline constexpr uintptr_t kTagShift = std::min(kAddressBits - 4, 42);
-inline constexpr uintptr_t kTagMask = uintptr_t{0x3} << kTagShift;
+inline constexpr uintptr_t kTagMask = uintptr_t{0x3} << kTagShift;
// Returns true if ptr is tagged.
ABSL_DEPRECATED("Replace with specific tests")
@@ -248,21 +248,21 @@ inline bool IsTaggedMemory(const void* ptr) {
}
inline bool IsSampledMemory(const void* ptr) {
- constexpr uintptr_t kSampledNormalMask = kNumaPartitions > 1 ? 0x3 : 0x1;
-
- static_assert(static_cast<uintptr_t>(MemoryTag::kNormalP0) &
- kSampledNormalMask);
- static_assert(static_cast<uintptr_t>(MemoryTag::kNormalP1) &
- kSampledNormalMask);
-
- const uintptr_t tag =
- (reinterpret_cast<uintptr_t>(ptr) & kTagMask) >> kTagShift;
- return (tag & kSampledNormalMask) ==
- static_cast<uintptr_t>(MemoryTag::kSampled);
+ constexpr uintptr_t kSampledNormalMask = kNumaPartitions > 1 ? 0x3 : 0x1;
+
+ static_assert(static_cast<uintptr_t>(MemoryTag::kNormalP0) &
+ kSampledNormalMask);
+ static_assert(static_cast<uintptr_t>(MemoryTag::kNormalP1) &
+ kSampledNormalMask);
+
+ const uintptr_t tag =
+ (reinterpret_cast<uintptr_t>(ptr) & kTagMask) >> kTagShift;
+ return (tag & kSampledNormalMask) ==
+ static_cast<uintptr_t>(MemoryTag::kSampled);
}
-inline bool IsNormalMemory(const void* ptr) { return !IsSampledMemory(ptr); }
-
+inline bool IsNormalMemory(const void* ptr) { return !IsSampledMemory(ptr); }
+
inline MemoryTag GetMemoryTag(const void* ptr) {
return static_cast<MemoryTag>((reinterpret_cast<uintptr_t>(ptr) & kTagMask) >>
kTagShift);
@@ -271,10 +271,10 @@ inline MemoryTag GetMemoryTag(const void* ptr) {
absl::string_view MemoryTagToLabel(MemoryTag tag);
inline constexpr bool IsExpandedSizeClass(unsigned cl) {
- return kHasExpandedClasses && (cl >= kExpandedClassesStart);
+ return kHasExpandedClasses && (cl >= kExpandedClassesStart);
}
-#if !defined(TCMALLOC_SMALL_BUT_SLOW) && __SIZEOF_POINTER__ != 4
+#if !defined(TCMALLOC_SMALL_BUT_SLOW) && __SIZEOF_POINTER__ != 4
// Always allocate at least a huge page
inline constexpr size_t kMinSystemAlloc = kHugePageSize;
inline constexpr size_t kMinMmapAlloc = 1 << 30; // mmap() in 1GiB ranges.
@@ -291,31 +291,31 @@ static_assert(kMinMmapAlloc % kMinSystemAlloc == 0,
"Minimum mmap allocation size is not a multiple of"
" minimum system allocation size");
-inline MemoryTag NumaNormalTag(size_t numa_partition) {
- switch (numa_partition) {
- case 0:
- return MemoryTag::kNormalP0;
- case 1:
- return MemoryTag::kNormalP1;
- default:
- ASSUME(false);
- __builtin_unreachable();
- }
-}
-
-inline size_t NumaPartitionFromPointer(void* ptr) {
- if constexpr (kNumaPartitions == 1) {
- return 0;
- }
-
- switch (GetMemoryTag(ptr)) {
- case MemoryTag::kNormalP1:
- return 1;
- default:
- return 0;
- }
-}
-
+inline MemoryTag NumaNormalTag(size_t numa_partition) {
+ switch (numa_partition) {
+ case 0:
+ return MemoryTag::kNormalP0;
+ case 1:
+ return MemoryTag::kNormalP1;
+ default:
+ ASSUME(false);
+ __builtin_unreachable();
+ }
+}
+
+inline size_t NumaPartitionFromPointer(void* ptr) {
+ if constexpr (kNumaPartitions == 1) {
+ return 0;
+ }
+
+ switch (GetMemoryTag(ptr)) {
+ case MemoryTag::kNormalP1:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
// Size-class information + mapping
class SizeMap {
public:
@@ -325,7 +325,7 @@ class SizeMap {
static constexpr size_t kMultiPageAlignment = 64;
// log2 (kMultiPageAlignment)
static constexpr size_t kMultiPageAlignmentShift =
- absl::bit_width(kMultiPageAlignment - 1u);
+ absl::bit_width(kMultiPageAlignment - 1u);
private:
//-------------------------------------------------------------------
@@ -361,8 +361,8 @@ class SizeMap {
// first member so that it inherits the overall alignment of a SizeMap
// instance. In particular, if we create a SizeMap instance that's cache-line
// aligned, this member is also aligned to the width of a cache line.
- CompactSizeClass
- class_array_[kClassArraySize * (kHasExpandedClasses ? 2 : 1)] = {0};
+ CompactSizeClass
+ class_array_[kClassArraySize * (kHasExpandedClasses ? 2 : 1)] = {0};
// Number of objects to move between a per-thread list and a central
// list in one shot. We want this to be not too small so we can
@@ -413,11 +413,11 @@ class SizeMap {
static const SizeClassInfo kSizeClasses[];
static const int kSizeClassesCount;
- static const SizeClassInfo kExperimentalPow2Below64SizeClasses[];
- static const int kExperimentalPow2Below64SizeClassesCount;
- // kExperimentalPowBelow64SizeClassesCount
- static const SizeClassInfo kExperimentalPow2SizeClasses[];
- static const int kExperimentalPow2SizeClassesCount;
+ static const SizeClassInfo kExperimentalPow2Below64SizeClasses[];
+ static const int kExperimentalPow2Below64SizeClassesCount;
+ // kExperimentalPowBelow64SizeClassesCount
+ static const SizeClassInfo kExperimentalPow2SizeClasses[];
+ static const int kExperimentalPow2SizeClassesCount;
// Definition of size class that is set in size_classes.cc
static const SizeClassInfo kLegacySizeClasses[];
@@ -431,41 +431,41 @@ class SizeMap {
// Initialize the mapping arrays
void Init();
- // Returns the size class for size `size` respecting the alignment
- // requirements of `policy`.
+ // Returns the size class for size `size` respecting the alignment
+ // requirements of `policy`.
//
// Returns true on success. Returns false if either:
// - the size exceeds the maximum size class size.
// - the align size is greater or equal to the default page size
// - no matching properly aligned size class is available
//
- // Requires that policy.align() returns a non-zero power of 2.
+ // Requires that policy.align() returns a non-zero power of 2.
//
- // When policy.align() = 1 the default alignment of the size table will be
- // used. If policy.align() is constexpr 1 (e.g. when using
- // DefaultAlignPolicy) then alignment-related code will optimize away.
- //
- // TODO(b/171978365): Replace the output parameter with returning
- // absl::optional<uint32_t>.
- template <typename Policy>
- inline bool ABSL_ATTRIBUTE_ALWAYS_INLINE GetSizeClass(Policy policy,
- size_t size,
+ // When policy.align() = 1 the default alignment of the size table will be
+ // used. If policy.align() is constexpr 1 (e.g. when using
+ // DefaultAlignPolicy) then alignment-related code will optimize away.
+ //
+ // TODO(b/171978365): Replace the output parameter with returning
+ // absl::optional<uint32_t>.
+ template <typename Policy>
+ inline bool ABSL_ATTRIBUTE_ALWAYS_INLINE GetSizeClass(Policy policy,
+ size_t size,
uint32_t* cl) {
- const size_t align = policy.align();
- ASSERT(absl::has_single_bit(align));
+ const size_t align = policy.align();
+ ASSERT(absl::has_single_bit(align));
if (ABSL_PREDICT_FALSE(align >= kPageSize)) {
// TODO(b/172060547): Consider changing this to align > kPageSize.
ABSL_ANNOTATE_MEMORY_IS_UNINITIALIZED(cl, sizeof(*cl));
return false;
}
-
- uint32_t idx;
- if (ABSL_PREDICT_FALSE(!ClassIndexMaybe(size, &idx))) {
+
+ uint32_t idx;
+ if (ABSL_PREDICT_FALSE(!ClassIndexMaybe(size, &idx))) {
ABSL_ANNOTATE_MEMORY_IS_UNINITIALIZED(cl, sizeof(*cl));
return false;
}
- *cl = class_array_[idx] + policy.scaled_numa_partition();
+ *cl = class_array_[idx] + policy.scaled_numa_partition();
// Predict that size aligned allocs most often directly map to a proper
// size class, i.e., multiples of 32, 64, etc, matching our class sizes.
@@ -474,7 +474,7 @@ class SizeMap {
if (ABSL_PREDICT_TRUE((class_to_size(*cl) & mask) == 0)) {
return true;
}
- } while ((++*cl % kNumBaseClasses) != 0);
+ } while ((++*cl % kNumBaseClasses) != 0);
ABSL_ANNOTATE_MEMORY_IS_UNINITIALIZED(cl, sizeof(*cl));
return false;
@@ -482,12 +482,12 @@ class SizeMap {
// Returns size class for given size, or 0 if this instance has not been
// initialized yet. REQUIRES: size <= kMaxSize.
- template <typename Policy>
- inline size_t ABSL_ATTRIBUTE_ALWAYS_INLINE SizeClass(Policy policy,
- size_t size) {
+ template <typename Policy>
+ inline size_t ABSL_ATTRIBUTE_ALWAYS_INLINE SizeClass(Policy policy,
+ size_t size) {
ASSERT(size <= kMaxSize);
uint32_t ret = 0;
- GetSizeClass(policy, size, &ret);
+ GetSizeClass(policy, size, &ret);
return ret;
}
@@ -517,8 +517,8 @@ class SizeMap {
// Linker initialized, so this lock can be accessed at any time.
extern absl::base_internal::SpinLock pageheap_lock;
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
#endif // TCMALLOC_COMMON_H_
diff --git a/contrib/libs/tcmalloc/tcmalloc/cpu_cache.cc b/contrib/libs/tcmalloc/tcmalloc/cpu_cache.cc
index 8ae02b38e9..6bd70745d4 100644
--- a/contrib/libs/tcmalloc/tcmalloc/cpu_cache.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/cpu_cache.cc
@@ -25,7 +25,7 @@
#include "absl/base/internal/sysinfo.h"
#include "absl/base/macros.h"
#include "absl/base/thread_annotations.h"
-#include "absl/container/fixed_array.h"
+#include "absl/container/fixed_array.h"
#include "tcmalloc/arena.h"
#include "tcmalloc/common.h"
#include "tcmalloc/internal/logging.h"
@@ -34,9 +34,9 @@
#include "tcmalloc/static_vars.h"
#include "tcmalloc/transfer_cache.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
static cpu_set_t FillActiveCpuMask() {
cpu_set_t allowed_cpus;
@@ -45,7 +45,7 @@ static cpu_set_t FillActiveCpuMask() {
}
#ifdef PERCPU_USE_RSEQ
- const bool real_cpus = !subtle::percpu::UsingFlatVirtualCpus();
+ const bool real_cpus = !subtle::percpu::UsingFlatVirtualCpus();
#else
const bool real_cpus = true;
#endif
@@ -70,7 +70,7 @@ static size_t MaxCapacity(size_t cl) {
static constexpr size_t kNumSmall = 10;
// The memory used for each per-CPU slab is the sum of:
- // sizeof(std::atomic<int64_t>) * kNumClasses
+ // sizeof(std::atomic<int64_t>) * kNumClasses
// sizeof(void*) * (kSmallObjectDepth + 1) * kNumSmall
// sizeof(void*) * (kLargeObjectDepth + 1) * kNumLarge
//
@@ -84,36 +84,36 @@ static size_t MaxCapacity(size_t cl) {
// With SMALL_BUT_SLOW we have 4KiB of per-cpu slab and 46 class sizes we
// allocate:
// == 8 * 46 + 8 * ((16 + 1) * 10 + (6 + 1) * 35) = 4038 bytes of 4096
- static const uint16_t kSmallObjectDepth = 16;
- static const uint16_t kLargeObjectDepth = 6;
+ static const uint16_t kSmallObjectDepth = 16;
+ static const uint16_t kLargeObjectDepth = 6;
#else
// We allocate 256KiB per-cpu for pointers to cached per-cpu memory.
// Each 256KiB is a subtle::percpu::TcmallocSlab::Slabs
// Max(kNumClasses) is 89, so the maximum footprint per CPU is:
// 89 * 8 + 8 * ((2048 + 1) * 10 + (152 + 1) * 78 + 88) = 254 KiB
- static const uint16_t kSmallObjectDepth = 2048;
- static const uint16_t kLargeObjectDepth = 152;
+ static const uint16_t kSmallObjectDepth = 2048;
+ static const uint16_t kLargeObjectDepth = 152;
#endif
if (cl == 0 || cl >= kNumClasses) return 0;
- if (Static::sharded_transfer_cache().should_use(cl)) {
- return 0;
- }
-
+ if (Static::sharded_transfer_cache().should_use(cl)) {
+ return 0;
+ }
+
if (Static::sizemap().class_to_size(cl) == 0) {
return 0;
}
- if (!IsExpandedSizeClass(cl) && (cl % kNumBaseClasses) <= kNumSmall) {
+ if (!IsExpandedSizeClass(cl) && (cl % kNumBaseClasses) <= kNumSmall) {
// Small object sizes are very heavily used and need very deep caches for
// good performance (well over 90% of malloc calls are for cl <= 10.)
return kSmallObjectDepth;
}
- if (IsExpandedSizeClass(cl)) {
- return 0;
- }
-
+ if (IsExpandedSizeClass(cl)) {
+ return 0;
+ }
+
return kLargeObjectDepth;
}
@@ -126,38 +126,38 @@ void CPUCache::Activate(ActivationMode mode) {
ASSERT(Static::IsInited());
int num_cpus = absl::base_internal::NumCPUs();
- size_t per_cpu_shift = kPerCpuShift;
- const auto &topology = Static::numa_topology();
- if (topology.numa_aware()) {
- per_cpu_shift += absl::bit_ceil(topology.active_partitions() - 1);
- }
-
- const size_t kBytesAvailable = (1 << per_cpu_shift);
- size_t bytes_required = sizeof(std::atomic<int64_t>) * kNumClasses;
-
- // Deal with size classes that correspond only to NUMA partitions that are in
- // use. If NUMA awareness is disabled then we may have a smaller shift than
- // would suffice for all of the unused size classes.
- for (int cl = 0;
- cl < Static::numa_topology().active_partitions() * kNumBaseClasses;
- ++cl) {
- const uint16_t mc = MaxCapacity(cl);
- max_capacity_[cl] = mc;
- bytes_required += sizeof(void *) * mc;
- }
-
- // Deal with expanded size classes.
- for (int cl = kExpandedClassesStart; cl < kNumClasses; ++cl) {
- const uint16_t mc = MaxCapacity(cl);
- max_capacity_[cl] = mc;
- bytes_required += sizeof(void *) * mc;
+ size_t per_cpu_shift = kPerCpuShift;
+ const auto &topology = Static::numa_topology();
+ if (topology.numa_aware()) {
+ per_cpu_shift += absl::bit_ceil(topology.active_partitions() - 1);
+ }
+
+ const size_t kBytesAvailable = (1 << per_cpu_shift);
+ size_t bytes_required = sizeof(std::atomic<int64_t>) * kNumClasses;
+
+ // Deal with size classes that correspond only to NUMA partitions that are in
+ // use. If NUMA awareness is disabled then we may have a smaller shift than
+ // would suffice for all of the unused size classes.
+ for (int cl = 0;
+ cl < Static::numa_topology().active_partitions() * kNumBaseClasses;
+ ++cl) {
+ const uint16_t mc = MaxCapacity(cl);
+ max_capacity_[cl] = mc;
+ bytes_required += sizeof(void *) * mc;
}
+ // Deal with expanded size classes.
+ for (int cl = kExpandedClassesStart; cl < kNumClasses; ++cl) {
+ const uint16_t mc = MaxCapacity(cl);
+ max_capacity_[cl] = mc;
+ bytes_required += sizeof(void *) * mc;
+ }
+
// As we may make certain size classes no-ops by selecting "0" at runtime,
// using a compile-time calculation overestimates the worst-case memory usage.
- if (ABSL_PREDICT_FALSE(bytes_required > kBytesAvailable)) {
+ if (ABSL_PREDICT_FALSE(bytes_required > kBytesAvailable)) {
Crash(kCrash, __FILE__, __LINE__, "per-CPU memory exceeded, have ",
- kBytesAvailable, " need ", bytes_required);
+ kBytesAvailable, " need ", bytes_required);
}
absl::base_internal::SpinLockHolder h(&pageheap_lock);
@@ -173,11 +173,11 @@ void CPUCache::Activate(ActivationMode mode) {
resize_[cpu].per_class[cl].Init();
}
resize_[cpu].available.store(max_cache_size, std::memory_order_relaxed);
- resize_[cpu].capacity.store(max_cache_size, std::memory_order_relaxed);
+ resize_[cpu].capacity.store(max_cache_size, std::memory_order_relaxed);
resize_[cpu].last_steal.store(1, std::memory_order_relaxed);
}
- freelist_.Init(SlabAlloc, MaxCapacityHelper, lazy_slabs_, per_cpu_shift);
+ freelist_.Init(SlabAlloc, MaxCapacityHelper, lazy_slabs_, per_cpu_shift);
if (mode == ActivationMode::FastPathOn) {
Static::ActivateCPUCache();
}
@@ -228,15 +228,15 @@ void *CPUCache::Refill(int cpu, size_t cl) {
if (i != 0) {
static_assert(ABSL_ARRAYSIZE(batch) >= kMaxObjectsToMove,
"not enough space in batch");
- Static::transfer_cache().InsertRange(cl, absl::Span<void *>(batch, i));
+ Static::transfer_cache().InsertRange(cl, absl::Span<void *>(batch, i));
}
}
- } while (got == batch_length && i == 0 && total < target &&
- cpu == freelist_.GetCurrentVirtualCpuUnsafe());
+ } while (got == batch_length && i == 0 && total < target &&
+ cpu == freelist_.GetCurrentVirtualCpuUnsafe());
for (int i = to_return.count; i < kMaxToReturn; ++i) {
Static::transfer_cache().InsertRange(
- to_return.cl[i], absl::Span<void *>(&(to_return.obj[i]), 1));
+ to_return.cl[i], absl::Span<void *>(&(to_return.obj[i]), 1));
}
return result;
@@ -263,7 +263,7 @@ size_t CPUCache::UpdateCapacity(int cpu, size_t cl, size_t batch_length,
// it again. Also we will shrink it by 1, but grow by a batch. So we should
// have lots of time until we need to grow it again.
- const size_t max_capacity = max_capacity_[cl];
+ const size_t max_capacity = max_capacity_[cl];
size_t capacity = freelist_.Capacity(cpu, cl);
// We assert that the return value, target, is non-zero, so starting from an
// initial capacity of zero means we may be populating this core for the
@@ -273,7 +273,7 @@ size_t CPUCache::UpdateCapacity(int cpu, size_t cl, size_t batch_length,
[](CPUCache *cache, int cpu) {
if (cache->lazy_slabs_) {
absl::base_internal::SpinLockHolder h(&cache->resize_[cpu].lock);
- cache->freelist_.InitCPU(cpu, MaxCapacityHelper);
+ cache->freelist_.InitCPU(cpu, MaxCapacityHelper);
}
// While we could unconditionally store, a lazy slab population
@@ -352,7 +352,7 @@ void CPUCache::Grow(int cpu, size_t cl, size_t desired_increase,
size_t actual_increase = acquired_bytes / size;
actual_increase = std::min(actual_increase, desired_increase);
// Remember, Grow may not give us all we ask for.
- size_t increase = freelist_.Grow(cpu, cl, actual_increase, max_capacity_[cl]);
+ size_t increase = freelist_.Grow(cpu, cl, actual_increase, max_capacity_[cl]);
size_t increased_bytes = increase * size;
if (increased_bytes < acquired_bytes) {
// return whatever we didn't use to the slack.
@@ -361,285 +361,285 @@ void CPUCache::Grow(int cpu, size_t cl, size_t desired_increase,
}
}
-void CPUCache::TryReclaimingCaches() {
- const int num_cpus = absl::base_internal::NumCPUs();
-
- for (int cpu = 0; cpu < num_cpus; ++cpu) {
- // Nothing to reclaim if the cpu is not populated.
- if (!HasPopulated(cpu)) {
- continue;
- }
-
- uint64_t used_bytes = UsedBytes(cpu);
- uint64_t prev_used_bytes =
- resize_[cpu].reclaim_used_bytes.load(std::memory_order_relaxed);
-
- // Get reclaim miss and used bytes stats that were captured at the end of
- // the previous interval.
- const CpuCacheMissStats miss_stats = GetReclaimCacheMissStats(cpu);
- uint64_t misses =
- uint64_t{miss_stats.underflows} + uint64_t{miss_stats.overflows};
-
- // Reclaim the cache if the number of used bytes and total number of misses
- // stayed constant since the last interval.
- if (used_bytes != 0 && used_bytes == prev_used_bytes && misses == 0) {
- Reclaim(cpu);
- }
-
- // Takes a snapshot of used bytes in the cache at the end of this interval
- // so that we can calculate if cache usage changed in the next interval.
- //
- // Reclaim occurs on a single thread. So, the relaxed store to used_bytes
- // is safe.
- resize_[cpu].reclaim_used_bytes.store(used_bytes,
- std::memory_order_relaxed);
- }
-}
-
-void CPUCache::ShuffleCpuCaches() {
- // Knobs that we can potentially tune depending on the workloads.
- constexpr double kBytesToStealPercent = 5.0;
- constexpr int kMaxNumStealCpus = 5;
-
- const int num_cpus = absl::base_internal::NumCPUs();
- absl::FixedArray<std::pair<int, uint64_t>> misses(num_cpus);
-
- // Record the cumulative misses for the caches so that we can select the
- // caches with the highest misses as the candidates to steal the cache for.
- int max_populated_cpu = -1;
- int num_populated_cpus = 0;
- for (int cpu = 0; cpu < num_cpus; ++cpu) {
- if (!HasPopulated(cpu)) {
- continue;
- }
- const CpuCacheMissStats miss_stats = GetIntervalCacheMissStats(cpu);
- misses[num_populated_cpus] = {
- cpu, uint64_t{miss_stats.underflows} + uint64_t{miss_stats.overflows}};
- max_populated_cpu = cpu;
- ++num_populated_cpus;
- }
- if (max_populated_cpu == -1) {
- return;
- }
-
- // Sorts misses to identify cpus with highest misses.
- //
- // TODO(vgogte): We can potentially sort the entire misses array and use that
- // in StealFromOtherCache to determine cpus to steal from. That is, [0,
- // num_dest_cpus) may be the destination cpus and [num_dest_cpus, num_cpus)
- // may be cpus we may steal from. We can iterate through the array in a
- // descending order to steal from them. The upside of this mechanism is that
- // we would be able to do a more fair stealing, starting with cpus with lowest
- // misses. The downside of this mechanism is that we would have to sort the
- // entire misses array. This might be compute intensive on servers with high
- // number of cpus (eg. Rome, Milan). We need to investigate the compute
- // required to implement this.
- const int num_dest_cpus = std::min(num_populated_cpus, kMaxNumStealCpus);
- std::partial_sort(misses.begin(), misses.begin() + num_dest_cpus,
- misses.end(),
- [](std::pair<int, uint64_t> a, std::pair<int, uint64_t> b) {
- if (a.second == b.second) {
- return a.first < b.first;
- }
- return a.second > b.second;
- });
-
- // Try to steal kBytesToStealPercent percentage of max_per_cpu_cache_size for
- // each destination cpu cache.
- size_t to_steal =
- kBytesToStealPercent / 100.0 * Parameters::max_per_cpu_cache_size();
- for (int i = 0; i < num_dest_cpus; ++i) {
- StealFromOtherCache(misses[i].first, max_populated_cpu, to_steal);
- }
-
- // Takes a snapshot of underflows and overflows at the end of this interval
- // so that we can calculate the misses that occurred in the next interval.
- for (int cpu = 0; cpu < num_cpus; ++cpu) {
- size_t underflows =
- resize_[cpu].total_underflows.load(std::memory_order_relaxed);
- size_t overflows =
- resize_[cpu].total_overflows.load(std::memory_order_relaxed);
-
- // Shuffle occurs on a single thread. So, the relaxed stores to
- // prev_underflow and pre_overflow counters are safe.
- resize_[cpu].shuffle_underflows.store(underflows,
- std::memory_order_relaxed);
- resize_[cpu].shuffle_overflows.store(overflows, std::memory_order_relaxed);
- }
-}
-
-static void ShrinkHandler(void *arg, size_t cl, void **batch, size_t count) {
- const size_t batch_length = Static::sizemap().num_objects_to_move(cl);
- for (size_t i = 0; i < count; i += batch_length) {
- size_t n = std::min(batch_length, count - i);
- Static::transfer_cache().InsertRange(cl, absl::Span<void *>(batch + i, n));
- }
-}
-
-void CPUCache::StealFromOtherCache(int cpu, int max_populated_cpu,
- size_t bytes) {
- constexpr double kCacheMissThreshold = 0.80;
-
- const CpuCacheMissStats dest_misses = GetIntervalCacheMissStats(cpu);
-
- // If both underflows and overflows are 0, we should not need to steal.
- if (dest_misses.underflows == 0 && dest_misses.overflows == 0) return;
-
- size_t acquired = 0;
-
- // We use last_cpu_cache_steal_ as a hint to start our search for cpu ids to
- // steal from so that we can iterate through the cpus in a nice round-robin
- // fashion.
- int src_cpu = std::min(last_cpu_cache_steal_.load(std::memory_order_relaxed),
- max_populated_cpu);
-
- // We iterate through max_populate_cpus number of cpus to steal from.
- // max_populate_cpus records the max cpu id that has been populated. Note
- // that, any intermediate changes since the max_populated_cpus was measured
- // may have populated higher cpu ids, but we do not include those in the
- // search. The approximation prevents us from doing another pass through the
- // cpus to just find the latest populated cpu id.
- //
- // We break from the loop once we iterate through all the cpus once, or if the
- // total number of acquired bytes is higher than or equal to the desired bytes
- // we want to steal.
- for (int cpu_offset = 1; cpu_offset <= max_populated_cpu && acquired < bytes;
- ++cpu_offset) {
- if (--src_cpu < 0) {
- src_cpu = max_populated_cpu;
- }
- ASSERT(0 <= src_cpu);
- ASSERT(src_cpu <= max_populated_cpu);
-
- // We do not steal from the same CPU. Maybe we can explore combining this
- // with stealing from the same CPU later.
- if (src_cpu == cpu) continue;
-
- // We do not steal from the cache that hasn't been populated yet.
- if (!HasPopulated(src_cpu)) continue;
-
- // We do not steal from cache that has capacity less than our lower
- // capacity threshold.
- if (Capacity(src_cpu) <
- kCacheCapacityThreshold * Parameters::max_per_cpu_cache_size())
- continue;
-
- const CpuCacheMissStats src_misses = GetIntervalCacheMissStats(src_cpu);
-
- // If underflows and overflows from the source cpu are higher, we do not
- // steal from that cache. We consider the cache as a candidate to steal from
- // only when its misses are lower than 0.8x that of the dest cache.
- if (src_misses.underflows > kCacheMissThreshold * dest_misses.underflows ||
- src_misses.overflows > kCacheMissThreshold * dest_misses.overflows)
- continue;
-
- size_t start_cl =
- resize_[src_cpu].last_steal.load(std::memory_order_relaxed);
-
- ASSERT(start_cl < kNumClasses);
- ASSERT(0 < start_cl);
- size_t source_cl = start_cl;
- for (size_t offset = 1; offset < kNumClasses; ++offset) {
- source_cl = start_cl + offset;
- if (source_cl >= kNumClasses) {
- source_cl -= kNumClasses - 1;
- }
- ASSERT(0 < source_cl);
- ASSERT(source_cl < kNumClasses);
-
- const size_t capacity = freelist_.Capacity(src_cpu, source_cl);
- if (capacity == 0) {
- // Nothing to steal.
- continue;
- }
- const size_t length = freelist_.Length(src_cpu, source_cl);
-
- // TODO(vgogte): Currently, scoring is similar to stealing from the
- // same cpu in CpuCache::Steal(). Revisit this later to tune the
- // knobs.
- const size_t batch_length =
- Static::sizemap().num_objects_to_move(source_cl);
- size_t size = Static::sizemap().class_to_size(source_cl);
-
- // Clock-like algorithm to prioritize size classes for shrinking.
- //
- // Each size class has quiescent ticks counter which is incremented as we
- // pass it, the counter is reset to 0 in UpdateCapacity on grow.
- // If the counter value is 0, then we've just tried to grow the size
- // class, so it makes little sense to shrink it back. The higher counter
- // value the longer ago we grew the list and the more probable it is that
- // the full capacity is unused.
- //
- // Then, we calculate "shrinking score", the higher the score the less we
- // we want to shrink this size class. The score is considerably skewed
- // towards larger size classes: smaller classes are usually used more
- // actively and we also benefit less from shrinking smaller classes (steal
- // less capacity). Then, we also avoid shrinking full freelists as we will
- // need to evict an object and then go to the central freelist to return
- // it. Then, we also avoid shrinking freelists that are just above batch
- // size, because shrinking them will disable transfer cache.
- //
- // Finally, we shrink if the ticks counter is >= the score.
- uint32_t qticks = resize_[src_cpu].per_class[source_cl].Tick();
- uint32_t score = 0;
- // Note: the following numbers are based solely on intuition, common sense
- // and benchmarking results.
- if (size <= 144) {
- score = 2 + (length >= capacity) +
- (length >= batch_length && length < 2 * batch_length);
- } else if (size <= 1024) {
- score = 1 + (length >= capacity) +
- (length >= batch_length && length < 2 * batch_length);
- } else if (size <= (64 << 10)) {
- score = (length >= capacity);
- }
- if (score > qticks) {
- continue;
- }
-
- // Finally, try to shrink (can fail if we were migrated).
- // We always shrink by 1 object. The idea is that inactive lists will be
- // shrunk to zero eventually anyway (or they just would not grow in the
- // first place), but for active lists it does not make sense to
- // aggressively shuffle capacity all the time.
- //
- // If the list is full, ShrinkOtherCache first tries to pop enough items
- // to make space and then shrinks the capacity.
- // TODO(vgogte): Maybe we can steal more from a single list to avoid
- // frequent locking overhead.
- {
- absl::base_internal::SpinLockHolder h(&resize_[src_cpu].lock);
- if (freelist_.ShrinkOtherCache(src_cpu, source_cl, 1, nullptr,
- ShrinkHandler) == 1) {
- acquired += size;
- resize_[src_cpu].capacity.fetch_sub(size, std::memory_order_relaxed);
- }
- }
-
- if (acquired >= bytes) {
- break;
- }
- }
- resize_[cpu].last_steal.store(source_cl, std::memory_order_relaxed);
- }
- // Record the last cpu id we stole from, which would provide a hint to the
- // next time we iterate through the cpus for stealing.
- last_cpu_cache_steal_.store(src_cpu, std::memory_order_relaxed);
-
- // Increment the capacity of the destination cpu cache by the amount of bytes
- // acquired from source caches.
- if (acquired) {
- size_t before = resize_[cpu].available.load(std::memory_order_relaxed);
- size_t bytes_with_stolen;
- do {
- bytes_with_stolen = before + acquired;
- } while (!resize_[cpu].available.compare_exchange_weak(
- before, bytes_with_stolen, std::memory_order_relaxed,
- std::memory_order_relaxed));
- resize_[cpu].capacity.fetch_add(acquired, std::memory_order_relaxed);
- }
-}
-
+void CPUCache::TryReclaimingCaches() {
+ const int num_cpus = absl::base_internal::NumCPUs();
+
+ for (int cpu = 0; cpu < num_cpus; ++cpu) {
+ // Nothing to reclaim if the cpu is not populated.
+ if (!HasPopulated(cpu)) {
+ continue;
+ }
+
+ uint64_t used_bytes = UsedBytes(cpu);
+ uint64_t prev_used_bytes =
+ resize_[cpu].reclaim_used_bytes.load(std::memory_order_relaxed);
+
+ // Get reclaim miss and used bytes stats that were captured at the end of
+ // the previous interval.
+ const CpuCacheMissStats miss_stats = GetReclaimCacheMissStats(cpu);
+ uint64_t misses =
+ uint64_t{miss_stats.underflows} + uint64_t{miss_stats.overflows};
+
+ // Reclaim the cache if the number of used bytes and total number of misses
+ // stayed constant since the last interval.
+ if (used_bytes != 0 && used_bytes == prev_used_bytes && misses == 0) {
+ Reclaim(cpu);
+ }
+
+ // Takes a snapshot of used bytes in the cache at the end of this interval
+ // so that we can calculate if cache usage changed in the next interval.
+ //
+ // Reclaim occurs on a single thread. So, the relaxed store to used_bytes
+ // is safe.
+ resize_[cpu].reclaim_used_bytes.store(used_bytes,
+ std::memory_order_relaxed);
+ }
+}
+
+void CPUCache::ShuffleCpuCaches() {
+ // Knobs that we can potentially tune depending on the workloads.
+ constexpr double kBytesToStealPercent = 5.0;
+ constexpr int kMaxNumStealCpus = 5;
+
+ const int num_cpus = absl::base_internal::NumCPUs();
+ absl::FixedArray<std::pair<int, uint64_t>> misses(num_cpus);
+
+ // Record the cumulative misses for the caches so that we can select the
+ // caches with the highest misses as the candidates to steal the cache for.
+ int max_populated_cpu = -1;
+ int num_populated_cpus = 0;
+ for (int cpu = 0; cpu < num_cpus; ++cpu) {
+ if (!HasPopulated(cpu)) {
+ continue;
+ }
+ const CpuCacheMissStats miss_stats = GetIntervalCacheMissStats(cpu);
+ misses[num_populated_cpus] = {
+ cpu, uint64_t{miss_stats.underflows} + uint64_t{miss_stats.overflows}};
+ max_populated_cpu = cpu;
+ ++num_populated_cpus;
+ }
+ if (max_populated_cpu == -1) {
+ return;
+ }
+
+ // Sorts misses to identify cpus with highest misses.
+ //
+ // TODO(vgogte): We can potentially sort the entire misses array and use that
+ // in StealFromOtherCache to determine cpus to steal from. That is, [0,
+ // num_dest_cpus) may be the destination cpus and [num_dest_cpus, num_cpus)
+ // may be cpus we may steal from. We can iterate through the array in a
+ // descending order to steal from them. The upside of this mechanism is that
+ // we would be able to do a more fair stealing, starting with cpus with lowest
+ // misses. The downside of this mechanism is that we would have to sort the
+ // entire misses array. This might be compute intensive on servers with high
+ // number of cpus (eg. Rome, Milan). We need to investigate the compute
+ // required to implement this.
+ const int num_dest_cpus = std::min(num_populated_cpus, kMaxNumStealCpus);
+ std::partial_sort(misses.begin(), misses.begin() + num_dest_cpus,
+ misses.end(),
+ [](std::pair<int, uint64_t> a, std::pair<int, uint64_t> b) {
+ if (a.second == b.second) {
+ return a.first < b.first;
+ }
+ return a.second > b.second;
+ });
+
+ // Try to steal kBytesToStealPercent percentage of max_per_cpu_cache_size for
+ // each destination cpu cache.
+ size_t to_steal =
+ kBytesToStealPercent / 100.0 * Parameters::max_per_cpu_cache_size();
+ for (int i = 0; i < num_dest_cpus; ++i) {
+ StealFromOtherCache(misses[i].first, max_populated_cpu, to_steal);
+ }
+
+ // Takes a snapshot of underflows and overflows at the end of this interval
+ // so that we can calculate the misses that occurred in the next interval.
+ for (int cpu = 0; cpu < num_cpus; ++cpu) {
+ size_t underflows =
+ resize_[cpu].total_underflows.load(std::memory_order_relaxed);
+ size_t overflows =
+ resize_[cpu].total_overflows.load(std::memory_order_relaxed);
+
+ // Shuffle occurs on a single thread. So, the relaxed stores to
+ // prev_underflow and pre_overflow counters are safe.
+ resize_[cpu].shuffle_underflows.store(underflows,
+ std::memory_order_relaxed);
+ resize_[cpu].shuffle_overflows.store(overflows, std::memory_order_relaxed);
+ }
+}
+
+static void ShrinkHandler(void *arg, size_t cl, void **batch, size_t count) {
+ const size_t batch_length = Static::sizemap().num_objects_to_move(cl);
+ for (size_t i = 0; i < count; i += batch_length) {
+ size_t n = std::min(batch_length, count - i);
+ Static::transfer_cache().InsertRange(cl, absl::Span<void *>(batch + i, n));
+ }
+}
+
+void CPUCache::StealFromOtherCache(int cpu, int max_populated_cpu,
+ size_t bytes) {
+ constexpr double kCacheMissThreshold = 0.80;
+
+ const CpuCacheMissStats dest_misses = GetIntervalCacheMissStats(cpu);
+
+ // If both underflows and overflows are 0, we should not need to steal.
+ if (dest_misses.underflows == 0 && dest_misses.overflows == 0) return;
+
+ size_t acquired = 0;
+
+ // We use last_cpu_cache_steal_ as a hint to start our search for cpu ids to
+ // steal from so that we can iterate through the cpus in a nice round-robin
+ // fashion.
+ int src_cpu = std::min(last_cpu_cache_steal_.load(std::memory_order_relaxed),
+ max_populated_cpu);
+
+ // We iterate through max_populate_cpus number of cpus to steal from.
+ // max_populate_cpus records the max cpu id that has been populated. Note
+ // that, any intermediate changes since the max_populated_cpus was measured
+ // may have populated higher cpu ids, but we do not include those in the
+ // search. The approximation prevents us from doing another pass through the
+ // cpus to just find the latest populated cpu id.
+ //
+ // We break from the loop once we iterate through all the cpus once, or if the
+ // total number of acquired bytes is higher than or equal to the desired bytes
+ // we want to steal.
+ for (int cpu_offset = 1; cpu_offset <= max_populated_cpu && acquired < bytes;
+ ++cpu_offset) {
+ if (--src_cpu < 0) {
+ src_cpu = max_populated_cpu;
+ }
+ ASSERT(0 <= src_cpu);
+ ASSERT(src_cpu <= max_populated_cpu);
+
+ // We do not steal from the same CPU. Maybe we can explore combining this
+ // with stealing from the same CPU later.
+ if (src_cpu == cpu) continue;
+
+ // We do not steal from the cache that hasn't been populated yet.
+ if (!HasPopulated(src_cpu)) continue;
+
+ // We do not steal from cache that has capacity less than our lower
+ // capacity threshold.
+ if (Capacity(src_cpu) <
+ kCacheCapacityThreshold * Parameters::max_per_cpu_cache_size())
+ continue;
+
+ const CpuCacheMissStats src_misses = GetIntervalCacheMissStats(src_cpu);
+
+ // If underflows and overflows from the source cpu are higher, we do not
+ // steal from that cache. We consider the cache as a candidate to steal from
+ // only when its misses are lower than 0.8x that of the dest cache.
+ if (src_misses.underflows > kCacheMissThreshold * dest_misses.underflows ||
+ src_misses.overflows > kCacheMissThreshold * dest_misses.overflows)
+ continue;
+
+ size_t start_cl =
+ resize_[src_cpu].last_steal.load(std::memory_order_relaxed);
+
+ ASSERT(start_cl < kNumClasses);
+ ASSERT(0 < start_cl);
+ size_t source_cl = start_cl;
+ for (size_t offset = 1; offset < kNumClasses; ++offset) {
+ source_cl = start_cl + offset;
+ if (source_cl >= kNumClasses) {
+ source_cl -= kNumClasses - 1;
+ }
+ ASSERT(0 < source_cl);
+ ASSERT(source_cl < kNumClasses);
+
+ const size_t capacity = freelist_.Capacity(src_cpu, source_cl);
+ if (capacity == 0) {
+ // Nothing to steal.
+ continue;
+ }
+ const size_t length = freelist_.Length(src_cpu, source_cl);
+
+ // TODO(vgogte): Currently, scoring is similar to stealing from the
+ // same cpu in CpuCache::Steal(). Revisit this later to tune the
+ // knobs.
+ const size_t batch_length =
+ Static::sizemap().num_objects_to_move(source_cl);
+ size_t size = Static::sizemap().class_to_size(source_cl);
+
+ // Clock-like algorithm to prioritize size classes for shrinking.
+ //
+ // Each size class has quiescent ticks counter which is incremented as we
+ // pass it, the counter is reset to 0 in UpdateCapacity on grow.
+ // If the counter value is 0, then we've just tried to grow the size
+ // class, so it makes little sense to shrink it back. The higher counter
+ // value the longer ago we grew the list and the more probable it is that
+ // the full capacity is unused.
+ //
+ // Then, we calculate "shrinking score", the higher the score the less we
+ // we want to shrink this size class. The score is considerably skewed
+ // towards larger size classes: smaller classes are usually used more
+ // actively and we also benefit less from shrinking smaller classes (steal
+ // less capacity). Then, we also avoid shrinking full freelists as we will
+ // need to evict an object and then go to the central freelist to return
+ // it. Then, we also avoid shrinking freelists that are just above batch
+ // size, because shrinking them will disable transfer cache.
+ //
+ // Finally, we shrink if the ticks counter is >= the score.
+ uint32_t qticks = resize_[src_cpu].per_class[source_cl].Tick();
+ uint32_t score = 0;
+ // Note: the following numbers are based solely on intuition, common sense
+ // and benchmarking results.
+ if (size <= 144) {
+ score = 2 + (length >= capacity) +
+ (length >= batch_length && length < 2 * batch_length);
+ } else if (size <= 1024) {
+ score = 1 + (length >= capacity) +
+ (length >= batch_length && length < 2 * batch_length);
+ } else if (size <= (64 << 10)) {
+ score = (length >= capacity);
+ }
+ if (score > qticks) {
+ continue;
+ }
+
+ // Finally, try to shrink (can fail if we were migrated).
+ // We always shrink by 1 object. The idea is that inactive lists will be
+ // shrunk to zero eventually anyway (or they just would not grow in the
+ // first place), but for active lists it does not make sense to
+ // aggressively shuffle capacity all the time.
+ //
+ // If the list is full, ShrinkOtherCache first tries to pop enough items
+ // to make space and then shrinks the capacity.
+ // TODO(vgogte): Maybe we can steal more from a single list to avoid
+ // frequent locking overhead.
+ {
+ absl::base_internal::SpinLockHolder h(&resize_[src_cpu].lock);
+ if (freelist_.ShrinkOtherCache(src_cpu, source_cl, 1, nullptr,
+ ShrinkHandler) == 1) {
+ acquired += size;
+ resize_[src_cpu].capacity.fetch_sub(size, std::memory_order_relaxed);
+ }
+ }
+
+ if (acquired >= bytes) {
+ break;
+ }
+ }
+ resize_[cpu].last_steal.store(source_cl, std::memory_order_relaxed);
+ }
+ // Record the last cpu id we stole from, which would provide a hint to the
+ // next time we iterate through the cpus for stealing.
+ last_cpu_cache_steal_.store(src_cpu, std::memory_order_relaxed);
+
+ // Increment the capacity of the destination cpu cache by the amount of bytes
+ // acquired from source caches.
+ if (acquired) {
+ size_t before = resize_[cpu].available.load(std::memory_order_relaxed);
+ size_t bytes_with_stolen;
+ do {
+ bytes_with_stolen = before + acquired;
+ } while (!resize_[cpu].available.compare_exchange_weak(
+ before, bytes_with_stolen, std::memory_order_relaxed,
+ std::memory_order_relaxed));
+ resize_[cpu].capacity.fetch_add(acquired, std::memory_order_relaxed);
+ }
+}
+
// There are rather a lot of policy knobs we could tweak here.
size_t CPUCache::Steal(int cpu, size_t dest_cl, size_t bytes,
ObjectsToReturn *to_return) {
@@ -734,7 +734,7 @@ size_t CPUCache::Steal(int cpu, size_t dest_cl, size_t bytes,
acquired += size;
}
- if (cpu != freelist_.GetCurrentVirtualCpuUnsafe() || acquired >= bytes) {
+ if (cpu != freelist_.GetCurrentVirtualCpuUnsafe() || acquired >= bytes) {
// can't steal any more or don't need to
break;
}
@@ -762,28 +762,28 @@ int CPUCache::Overflow(void *ptr, size_t cl, int cpu) {
total += count;
static_assert(ABSL_ARRAYSIZE(batch) >= kMaxObjectsToMove,
"not enough space in batch");
- Static::transfer_cache().InsertRange(cl, absl::Span<void *>(batch, count));
+ Static::transfer_cache().InsertRange(cl, absl::Span<void *>(batch, count));
if (count != batch_length) break;
count = 0;
- } while (total < target && cpu == freelist_.GetCurrentVirtualCpuUnsafe());
+ } while (total < target && cpu == freelist_.GetCurrentVirtualCpuUnsafe());
tracking::Report(kFreeTruncations, cl, 1);
return 1;
}
-uint64_t CPUCache::Allocated(int target_cpu) const {
- ASSERT(target_cpu >= 0);
- if (!HasPopulated(target_cpu)) {
- return 0;
- }
-
- uint64_t total = 0;
- for (int cl = 1; cl < kNumClasses; cl++) {
- int size = Static::sizemap().class_to_size(cl);
- total += size * freelist_.Capacity(target_cpu, cl);
- }
- return total;
-}
-
+uint64_t CPUCache::Allocated(int target_cpu) const {
+ ASSERT(target_cpu >= 0);
+ if (!HasPopulated(target_cpu)) {
+ return 0;
+ }
+
+ uint64_t total = 0;
+ for (int cl = 1; cl < kNumClasses; cl++) {
+ int size = Static::sizemap().class_to_size(cl);
+ total += size * freelist_.Capacity(target_cpu, cl);
+ }
+ return total;
+}
+
uint64_t CPUCache::UsedBytes(int target_cpu) const {
ASSERT(target_cpu >= 0);
if (!HasPopulated(target_cpu)) {
@@ -834,10 +834,10 @@ uint64_t CPUCache::Unallocated(int cpu) const {
return resize_[cpu].available.load(std::memory_order_relaxed);
}
-uint64_t CPUCache::Capacity(int cpu) const {
- return resize_[cpu].capacity.load(std::memory_order_relaxed);
-}
-
+uint64_t CPUCache::Capacity(int cpu) const {
+ return resize_[cpu].capacity.load(std::memory_order_relaxed);
+}
+
uint64_t CPUCache::CacheLimit() const {
return Parameters::max_per_cpu_cache_size();
}
@@ -858,7 +858,7 @@ static void DrainHandler(void *arg, size_t cl, void **batch, size_t count,
ctx->available->fetch_add(cap * size, std::memory_order_relaxed);
for (size_t i = 0; i < count; i += batch_length) {
size_t n = std::min(batch_length, count - i);
- Static::transfer_cache().InsertRange(cl, absl::Span<void *>(batch + i, n));
+ Static::transfer_cache().InsertRange(cl, absl::Span<void *>(batch + i, n));
}
}
@@ -874,101 +874,101 @@ uint64_t CPUCache::Reclaim(int cpu) {
DrainContext ctx{&resize_[cpu].available, 0};
freelist_.Drain(cpu, &ctx, DrainHandler);
-
- // Record that the reclaim occurred for this CPU.
- resize_[cpu].num_reclaims.store(
- resize_[cpu].num_reclaims.load(std::memory_order_relaxed) + 1,
- std::memory_order_relaxed);
+
+ // Record that the reclaim occurred for this CPU.
+ resize_[cpu].num_reclaims.store(
+ resize_[cpu].num_reclaims.load(std::memory_order_relaxed) + 1,
+ std::memory_order_relaxed);
return ctx.bytes;
}
-uint64_t CPUCache::GetNumReclaims(int cpu) const {
- return resize_[cpu].num_reclaims.load(std::memory_order_relaxed);
-}
-
-void CPUCache::RecordCacheMissStat(const int cpu, const bool is_malloc) {
- CPUCache &cpu_cache = Static::cpu_cache();
- if (is_malloc) {
- cpu_cache.resize_[cpu].total_underflows.fetch_add(
- 1, std::memory_order_relaxed);
- } else {
- cpu_cache.resize_[cpu].total_overflows.fetch_add(1,
- std::memory_order_relaxed);
- }
-}
-
-CPUCache::CpuCacheMissStats CPUCache::GetReclaimCacheMissStats(int cpu) const {
- CpuCacheMissStats stats;
- size_t total_underflows =
- resize_[cpu].total_underflows.load(std::memory_order_relaxed);
- size_t prev_reclaim_underflows =
- resize_[cpu].reclaim_underflows.load(std::memory_order_relaxed);
- // Takes a snapshot of underflows at the end of this interval so that we can
- // calculate the misses that occurred in the next interval.
- //
- // Reclaim occurs on a single thread. So, a relaxed store to the reclaim
- // underflow stat is safe.
- resize_[cpu].reclaim_underflows.store(total_underflows,
- std::memory_order_relaxed);
-
- // In case of a size_t overflow, we wrap around to 0.
- stats.underflows = total_underflows > prev_reclaim_underflows
- ? total_underflows - prev_reclaim_underflows
- : 0;
-
- size_t total_overflows =
- resize_[cpu].total_overflows.load(std::memory_order_relaxed);
- size_t prev_reclaim_overflows =
- resize_[cpu].reclaim_overflows.load(std::memory_order_relaxed);
- // Takes a snapshot of overflows at the end of this interval so that we can
- // calculate the misses that occurred in the next interval.
- //
- // Reclaim occurs on a single thread. So, a relaxed store to the reclaim
- // overflow stat is safe.
- resize_[cpu].reclaim_overflows.store(total_overflows,
- std::memory_order_relaxed);
-
- // In case of a size_t overflow, we wrap around to 0.
- stats.overflows = total_overflows > prev_reclaim_overflows
- ? total_overflows - prev_reclaim_overflows
- : 0;
-
- return stats;
-}
-
-CPUCache::CpuCacheMissStats CPUCache::GetIntervalCacheMissStats(int cpu) const {
- CpuCacheMissStats stats;
- size_t total_underflows =
- resize_[cpu].total_underflows.load(std::memory_order_relaxed);
- size_t shuffle_underflows =
- resize_[cpu].shuffle_underflows.load(std::memory_order_relaxed);
- // In case of a size_t overflow, we wrap around to 0.
- stats.underflows = total_underflows > shuffle_underflows
- ? total_underflows - shuffle_underflows
- : 0;
-
- size_t total_overflows =
- resize_[cpu].total_overflows.load(std::memory_order_relaxed);
- size_t shuffle_overflows =
- resize_[cpu].shuffle_overflows.load(std::memory_order_relaxed);
- // In case of a size_t overflow, we wrap around to 0.
- stats.overflows = total_overflows > shuffle_overflows
- ? total_overflows - shuffle_overflows
- : 0;
-
- return stats;
-}
-
-CPUCache::CpuCacheMissStats CPUCache::GetTotalCacheMissStats(int cpu) const {
- CpuCacheMissStats stats;
- stats.underflows =
- resize_[cpu].total_underflows.load(std::memory_order_relaxed);
- stats.overflows =
- resize_[cpu].total_overflows.load(std::memory_order_relaxed);
- return stats;
-}
-
-void CPUCache::Print(Printer *out) const {
+uint64_t CPUCache::GetNumReclaims(int cpu) const {
+ return resize_[cpu].num_reclaims.load(std::memory_order_relaxed);
+}
+
+void CPUCache::RecordCacheMissStat(const int cpu, const bool is_malloc) {
+ CPUCache &cpu_cache = Static::cpu_cache();
+ if (is_malloc) {
+ cpu_cache.resize_[cpu].total_underflows.fetch_add(
+ 1, std::memory_order_relaxed);
+ } else {
+ cpu_cache.resize_[cpu].total_overflows.fetch_add(1,
+ std::memory_order_relaxed);
+ }
+}
+
+CPUCache::CpuCacheMissStats CPUCache::GetReclaimCacheMissStats(int cpu) const {
+ CpuCacheMissStats stats;
+ size_t total_underflows =
+ resize_[cpu].total_underflows.load(std::memory_order_relaxed);
+ size_t prev_reclaim_underflows =
+ resize_[cpu].reclaim_underflows.load(std::memory_order_relaxed);
+ // Takes a snapshot of underflows at the end of this interval so that we can
+ // calculate the misses that occurred in the next interval.
+ //
+ // Reclaim occurs on a single thread. So, a relaxed store to the reclaim
+ // underflow stat is safe.
+ resize_[cpu].reclaim_underflows.store(total_underflows,
+ std::memory_order_relaxed);
+
+ // In case of a size_t overflow, we wrap around to 0.
+ stats.underflows = total_underflows > prev_reclaim_underflows
+ ? total_underflows - prev_reclaim_underflows
+ : 0;
+
+ size_t total_overflows =
+ resize_[cpu].total_overflows.load(std::memory_order_relaxed);
+ size_t prev_reclaim_overflows =
+ resize_[cpu].reclaim_overflows.load(std::memory_order_relaxed);
+ // Takes a snapshot of overflows at the end of this interval so that we can
+ // calculate the misses that occurred in the next interval.
+ //
+ // Reclaim occurs on a single thread. So, a relaxed store to the reclaim
+ // overflow stat is safe.
+ resize_[cpu].reclaim_overflows.store(total_overflows,
+ std::memory_order_relaxed);
+
+ // In case of a size_t overflow, we wrap around to 0.
+ stats.overflows = total_overflows > prev_reclaim_overflows
+ ? total_overflows - prev_reclaim_overflows
+ : 0;
+
+ return stats;
+}
+
+CPUCache::CpuCacheMissStats CPUCache::GetIntervalCacheMissStats(int cpu) const {
+ CpuCacheMissStats stats;
+ size_t total_underflows =
+ resize_[cpu].total_underflows.load(std::memory_order_relaxed);
+ size_t shuffle_underflows =
+ resize_[cpu].shuffle_underflows.load(std::memory_order_relaxed);
+ // In case of a size_t overflow, we wrap around to 0.
+ stats.underflows = total_underflows > shuffle_underflows
+ ? total_underflows - shuffle_underflows
+ : 0;
+
+ size_t total_overflows =
+ resize_[cpu].total_overflows.load(std::memory_order_relaxed);
+ size_t shuffle_overflows =
+ resize_[cpu].shuffle_overflows.load(std::memory_order_relaxed);
+ // In case of a size_t overflow, we wrap around to 0.
+ stats.overflows = total_overflows > shuffle_overflows
+ ? total_overflows - shuffle_overflows
+ : 0;
+
+ return stats;
+}
+
+CPUCache::CpuCacheMissStats CPUCache::GetTotalCacheMissStats(int cpu) const {
+ CpuCacheMissStats stats;
+ stats.underflows =
+ resize_[cpu].total_underflows.load(std::memory_order_relaxed);
+ stats.overflows =
+ resize_[cpu].total_overflows.load(std::memory_order_relaxed);
+ return stats;
+}
+
+void CPUCache::Print(Printer *out) const {
out->printf("------------------------------------------------\n");
out->printf("Bytes in per-CPU caches (per cpu limit: %" PRIu64 " bytes)\n",
Static::cpu_cache().CacheLimit());
@@ -990,23 +990,23 @@ void CPUCache::Print(Printer *out) const {
CPU_ISSET(cpu, &allowed_cpus) ? " active" : "",
populated ? " populated" : "");
}
-
- out->printf("------------------------------------------------\n");
- out->printf("Number of per-CPU cache underflows, overflows and reclaims\n");
- out->printf("------------------------------------------------\n");
- for (int cpu = 0, num_cpus = absl::base_internal::NumCPUs(); cpu < num_cpus;
- ++cpu) {
- CpuCacheMissStats miss_stats = GetTotalCacheMissStats(cpu);
- uint64_t reclaims = GetNumReclaims(cpu);
- out->printf(
- "cpu %3d:"
- "%12" PRIu64
- " underflows,"
- "%12" PRIu64
- " overflows,"
- "%12" PRIu64 " reclaims\n",
- cpu, miss_stats.underflows, miss_stats.overflows, reclaims);
- }
+
+ out->printf("------------------------------------------------\n");
+ out->printf("Number of per-CPU cache underflows, overflows and reclaims\n");
+ out->printf("------------------------------------------------\n");
+ for (int cpu = 0, num_cpus = absl::base_internal::NumCPUs(); cpu < num_cpus;
+ ++cpu) {
+ CpuCacheMissStats miss_stats = GetTotalCacheMissStats(cpu);
+ uint64_t reclaims = GetNumReclaims(cpu);
+ out->printf(
+ "cpu %3d:"
+ "%12" PRIu64
+ " underflows,"
+ "%12" PRIu64
+ " overflows,"
+ "%12" PRIu64 " reclaims\n",
+ cpu, miss_stats.underflows, miss_stats.overflows, reclaims);
+ }
}
void CPUCache::PrintInPbtxt(PbtxtRegion *region) const {
@@ -1018,33 +1018,33 @@ void CPUCache::PrintInPbtxt(PbtxtRegion *region) const {
uint64_t rbytes = UsedBytes(cpu);
bool populated = HasPopulated(cpu);
uint64_t unallocated = Unallocated(cpu);
- CpuCacheMissStats miss_stats = GetTotalCacheMissStats(cpu);
- uint64_t reclaims = GetNumReclaims(cpu);
+ CpuCacheMissStats miss_stats = GetTotalCacheMissStats(cpu);
+ uint64_t reclaims = GetNumReclaims(cpu);
entry.PrintI64("cpu", uint64_t(cpu));
entry.PrintI64("used", rbytes);
entry.PrintI64("unused", unallocated);
entry.PrintBool("active", CPU_ISSET(cpu, &allowed_cpus));
entry.PrintBool("populated", populated);
- entry.PrintI64("underflows", miss_stats.underflows);
- entry.PrintI64("overflows", miss_stats.overflows);
- entry.PrintI64("reclaims", reclaims);
- }
-}
-
-void CPUCache::AcquireInternalLocks() {
- for (int cpu = 0, num_cpus = absl::base_internal::NumCPUs(); cpu < num_cpus;
- ++cpu) {
- resize_[cpu].lock.Lock();
- }
-}
-
-void CPUCache::ReleaseInternalLocks() {
- for (int cpu = 0, num_cpus = absl::base_internal::NumCPUs(); cpu < num_cpus;
- ++cpu) {
- resize_[cpu].lock.Unlock();
+ entry.PrintI64("underflows", miss_stats.underflows);
+ entry.PrintI64("overflows", miss_stats.overflows);
+ entry.PrintI64("reclaims", reclaims);
}
}
+void CPUCache::AcquireInternalLocks() {
+ for (int cpu = 0, num_cpus = absl::base_internal::NumCPUs(); cpu < num_cpus;
+ ++cpu) {
+ resize_[cpu].lock.Lock();
+ }
+}
+
+void CPUCache::ReleaseInternalLocks() {
+ for (int cpu = 0, num_cpus = absl::base_internal::NumCPUs(); cpu < num_cpus;
+ ++cpu) {
+ resize_[cpu].lock.Unlock();
+ }
+}
+
void CPUCache::PerClassResizeInfo::Init() {
state_.store(0, std::memory_order_relaxed);
}
@@ -1077,17 +1077,17 @@ uint32_t CPUCache::PerClassResizeInfo::Tick() {
return state.quiescent_ticks - 1;
}
-#ifdef ABSL_HAVE_THREAD_SANITIZER
-extern "C" int RunningOnValgrind();
-#endif
-
+#ifdef ABSL_HAVE_THREAD_SANITIZER
+extern "C" int RunningOnValgrind();
+#endif
+
static void ActivatePerCPUCaches() {
- if (tcmalloc::tcmalloc_internal::Static::CPUCacheActive()) {
- // Already active.
- return;
- }
-
-#ifdef ABSL_HAVE_THREAD_SANITIZER
+ if (tcmalloc::tcmalloc_internal::Static::CPUCacheActive()) {
+ // Already active.
+ return;
+ }
+
+#ifdef ABSL_HAVE_THREAD_SANITIZER
// RunningOnValgrind is a proxy for "is something intercepting malloc."
//
// If Valgrind, et. al., are in use, TCMalloc isn't in use and we shouldn't
@@ -1095,7 +1095,7 @@ static void ActivatePerCPUCaches() {
if (RunningOnValgrind()) {
return;
}
-#endif
+#endif
if (Parameters::per_cpu_caches() && subtle::percpu::IsFast()) {
Static::InitIfNecessary();
Static::cpu_cache().Activate(CPUCache::ActivationMode::FastPathOn);
@@ -1114,27 +1114,27 @@ class PerCPUInitializer {
};
static PerCPUInitializer module_enter_exit;
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
-
-extern "C" void TCMalloc_Internal_ForceCpuCacheActivation() {
- tcmalloc::tcmalloc_internal::ActivatePerCPUCaches();
-}
+GOOGLE_MALLOC_SECTION_END
+extern "C" void TCMalloc_Internal_ForceCpuCacheActivation() {
+ tcmalloc::tcmalloc_internal::ActivatePerCPUCaches();
+}
+
extern "C" bool MallocExtension_Internal_GetPerCpuCachesActive() {
- return tcmalloc::tcmalloc_internal::Static::CPUCacheActive();
-}
-
-extern "C" void MallocExtension_Internal_DeactivatePerCpuCaches() {
- tcmalloc::tcmalloc_internal::Parameters::set_per_cpu_caches(false);
- tcmalloc::tcmalloc_internal::Static::DeactivateCPUCache();
+ return tcmalloc::tcmalloc_internal::Static::CPUCacheActive();
}
+extern "C" void MallocExtension_Internal_DeactivatePerCpuCaches() {
+ tcmalloc::tcmalloc_internal::Parameters::set_per_cpu_caches(false);
+ tcmalloc::tcmalloc_internal::Static::DeactivateCPUCache();
+}
+
extern "C" int32_t MallocExtension_Internal_GetMaxPerCpuCacheSize() {
- return tcmalloc::tcmalloc_internal::Parameters::max_per_cpu_cache_size();
+ return tcmalloc::tcmalloc_internal::Parameters::max_per_cpu_cache_size();
}
extern "C" void MallocExtension_Internal_SetMaxPerCpuCacheSize(int32_t value) {
- tcmalloc::tcmalloc_internal::Parameters::set_max_per_cpu_cache_size(value);
+ tcmalloc::tcmalloc_internal::Parameters::set_max_per_cpu_cache_size(value);
}
diff --git a/contrib/libs/tcmalloc/tcmalloc/cpu_cache.h b/contrib/libs/tcmalloc/tcmalloc/cpu_cache.h
index dab7d18910..c66bfa9f9f 100644
--- a/contrib/libs/tcmalloc/tcmalloc/cpu_cache.h
+++ b/contrib/libs/tcmalloc/tcmalloc/cpu_cache.h
@@ -32,14 +32,14 @@
#include "tcmalloc/thread_cache.h"
#include "tcmalloc/tracking.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
class CPUCache {
public:
- constexpr CPUCache() = default;
-
+ constexpr CPUCache() = default;
+
enum class ActivationMode {
FastPathOn,
FastPathOffTestOnly,
@@ -51,11 +51,11 @@ class CPUCache {
void Activate(ActivationMode mode);
// Allocate an object of the given size class. When allocation fails
- // (from this cache and after running Refill), OOMHandler(size) is
+ // (from this cache and after running Refill), OOMHandler(size) is
// called and its return value is returned from
- // Allocate. OOMHandler is used to parameterize out-of-memory
+ // Allocate. OOMHandler is used to parameterize out-of-memory
// handling (raising exception, returning nullptr, calling
- // new_handler or anything else). "Passing" OOMHandler in this way
+ // new_handler or anything else). "Passing" OOMHandler in this way
// allows Allocate to be used in tail-call position in fast-path,
// making Allocate use jump (tail-call) to slow path code.
template <void* OOMHandler(size_t)>
@@ -67,9 +67,9 @@ class CPUCache {
// Give the number of bytes in <cpu>'s cache
uint64_t UsedBytes(int cpu) const;
- // Give the allocated number of bytes in <cpu>'s cache
- uint64_t Allocated(int cpu) const;
-
+ // Give the allocated number of bytes in <cpu>'s cache
+ uint64_t Allocated(int cpu) const;
+
// Whether <cpu>'s cache has ever been populated with objects
bool HasPopulated(int cpu) const;
@@ -84,48 +84,48 @@ class CPUCache {
// Give the number of bytes unallocated to any sizeclass in <cpu>'s cache.
uint64_t Unallocated(int cpu) const;
- // Gives the total capacity of <cpu>'s cache in bytes.
- //
- // The total capacity of <cpu>'s cache should be equal to the sum of allocated
- // and unallocated bytes for that cache.
- uint64_t Capacity(int cpu) const;
-
+ // Gives the total capacity of <cpu>'s cache in bytes.
+ //
+ // The total capacity of <cpu>'s cache should be equal to the sum of allocated
+ // and unallocated bytes for that cache.
+ uint64_t Capacity(int cpu) const;
+
// Give the per-cpu limit of cache size.
uint64_t CacheLimit() const;
- // Shuffles per-cpu caches using the number of underflows and overflows that
- // occurred in the prior interval. It selects the top per-cpu caches
- // with highest misses as candidates, iterates through the other per-cpu
- // caches to steal capacity from them and adds the stolen bytes to the
- // available capacity of the per-cpu caches. May be called from any processor.
- //
- // TODO(vgogte): There are quite a few knobs that we can play around with in
- // ShuffleCpuCaches.
- void ShuffleCpuCaches();
-
- // Sets the lower limit on the capacity that can be stolen from the cpu cache.
- static constexpr double kCacheCapacityThreshold = 0.20;
-
- // Tries to steal <bytes> for the destination <cpu>. It iterates through the
- // the set of populated cpu caches and steals the bytes from them. A cpu is
- // considered a good candidate to steal from if:
- // (1) the cache is populated
- // (2) the numbers of underflows and overflows are both less than 0.8x those
- // of the destination per-cpu cache
- // (3) source cpu is not the same as the destination cpu
- // (4) capacity of the source cpu/cl is non-zero
- //
- // For a given source cpu, we iterate through the size classes to steal from
- // them. Currently, we use a similar clock-like algorithm from Steal() to
- // identify the cl to steal from.
- void StealFromOtherCache(int cpu, int max_populated_cpu, size_t bytes);
-
- // Tries to reclaim inactive per-CPU caches. It iterates through the set of
- // populated cpu caches and reclaims the caches that:
- // (1) had same number of used bytes since the last interval,
- // (2) had no change in the number of misses since the last interval.
- void TryReclaimingCaches();
-
+ // Shuffles per-cpu caches using the number of underflows and overflows that
+ // occurred in the prior interval. It selects the top per-cpu caches
+ // with highest misses as candidates, iterates through the other per-cpu
+ // caches to steal capacity from them and adds the stolen bytes to the
+ // available capacity of the per-cpu caches. May be called from any processor.
+ //
+ // TODO(vgogte): There are quite a few knobs that we can play around with in
+ // ShuffleCpuCaches.
+ void ShuffleCpuCaches();
+
+ // Sets the lower limit on the capacity that can be stolen from the cpu cache.
+ static constexpr double kCacheCapacityThreshold = 0.20;
+
+ // Tries to steal <bytes> for the destination <cpu>. It iterates through the
+ // the set of populated cpu caches and steals the bytes from them. A cpu is
+ // considered a good candidate to steal from if:
+ // (1) the cache is populated
+ // (2) the numbers of underflows and overflows are both less than 0.8x those
+ // of the destination per-cpu cache
+ // (3) source cpu is not the same as the destination cpu
+ // (4) capacity of the source cpu/cl is non-zero
+ //
+ // For a given source cpu, we iterate through the size classes to steal from
+ // them. Currently, we use a similar clock-like algorithm from Steal() to
+ // identify the cl to steal from.
+ void StealFromOtherCache(int cpu, int max_populated_cpu, size_t bytes);
+
+ // Tries to reclaim inactive per-CPU caches. It iterates through the set of
+ // populated cpu caches and reclaims the caches that:
+ // (1) had same number of used bytes since the last interval,
+ // (2) had no change in the number of misses since the last interval.
+ void TryReclaimingCaches();
+
// Empty out the cache on <cpu>; move all objects to the central
// cache. (If other threads run concurrently on that cpu, we can't
// guarantee it will be fully empty on return, but if the cpu is
@@ -133,9 +133,9 @@ class CPUCache {
// of bytes we sent back. This function is thread safe.
uint64_t Reclaim(int cpu);
- // Reports number of times the <cpu> has been reclaimed.
- uint64_t GetNumReclaims(int cpu) const;
-
+ // Reports number of times the <cpu> has been reclaimed.
+ uint64_t GetNumReclaims(int cpu) const;
+
// Determine number of bits we should use for allocating per-cpu cache
// The amount of per-cpu cache is 2 ^ kPerCpuShift
#if defined(TCMALLOC_SMALL_BUT_SLOW)
@@ -144,29 +144,29 @@ class CPUCache {
static constexpr size_t kPerCpuShift = 18;
#endif
- struct CpuCacheMissStats {
- size_t underflows;
- size_t overflows;
- };
-
- // Reports total cache underflows and overflows for <cpu>.
- CpuCacheMissStats GetTotalCacheMissStats(int cpu) const;
-
- // Reports the cache underflows and overflows for <cpu> that were recorded at
- // the end of the previous interval. It also records current underflows and
- // overflows in the reclaim underflow and overflow stats.
- CpuCacheMissStats GetReclaimCacheMissStats(int cpu) const;
-
- // Reports cache underflows and overflows for <cpu> this interval.
- CpuCacheMissStats GetIntervalCacheMissStats(int cpu) const;
-
+ struct CpuCacheMissStats {
+ size_t underflows;
+ size_t overflows;
+ };
+
+ // Reports total cache underflows and overflows for <cpu>.
+ CpuCacheMissStats GetTotalCacheMissStats(int cpu) const;
+
+ // Reports the cache underflows and overflows for <cpu> that were recorded at
+ // the end of the previous interval. It also records current underflows and
+ // overflows in the reclaim underflow and overflow stats.
+ CpuCacheMissStats GetReclaimCacheMissStats(int cpu) const;
+
+ // Reports cache underflows and overflows for <cpu> this interval.
+ CpuCacheMissStats GetIntervalCacheMissStats(int cpu) const;
+
// Report statistics
- void Print(Printer* out) const;
+ void Print(Printer* out) const;
void PrintInPbtxt(PbtxtRegion* region) const;
- void AcquireInternalLocks();
- void ReleaseInternalLocks();
-
+ void AcquireInternalLocks();
+ void ReleaseInternalLocks();
+
private:
// Per-size-class freelist resizing info.
class PerClassResizeInfo {
@@ -195,7 +195,7 @@ class CPUCache {
"size mismatch");
};
- subtle::percpu::TcmallocSlab<kNumClasses> freelist_;
+ subtle::percpu::TcmallocSlab<kNumClasses> freelist_;
struct ResizeInfoUnpadded {
// cache space on this CPU we're not using. Modify atomically;
@@ -210,45 +210,45 @@ class CPUCache {
// For cross-cpu operations.
absl::base_internal::SpinLock lock;
PerClassResizeInfo per_class[kNumClasses];
- // tracks number of underflows on allocate.
- std::atomic<size_t> total_underflows;
- // tracks number of overflows on deallocate.
- std::atomic<size_t> total_overflows;
- // tracks number of underflows recorded as of the end of the last shuffle
- // interval.
- std::atomic<size_t> shuffle_underflows;
- // tracks number of overflows recorded as of the end of the last shuffle
- // interval.
- std::atomic<size_t> shuffle_overflows;
- // total cache space available on this CPU. This tracks the total
- // allocated and unallocated bytes on this CPU cache.
- std::atomic<size_t> capacity;
- // Number of underflows as of the end of the last resize interval.
- std::atomic<size_t> reclaim_underflows;
- // Number of overflows as of the end of the last resize interval.
- std::atomic<size_t> reclaim_overflows;
- // Used bytes in the cache as of the end of the last resize interval.
- std::atomic<uint64_t> reclaim_used_bytes;
- // Tracks number of times this CPU has been reclaimed.
- std::atomic<size_t> num_reclaims;
+ // tracks number of underflows on allocate.
+ std::atomic<size_t> total_underflows;
+ // tracks number of overflows on deallocate.
+ std::atomic<size_t> total_overflows;
+ // tracks number of underflows recorded as of the end of the last shuffle
+ // interval.
+ std::atomic<size_t> shuffle_underflows;
+ // tracks number of overflows recorded as of the end of the last shuffle
+ // interval.
+ std::atomic<size_t> shuffle_overflows;
+ // total cache space available on this CPU. This tracks the total
+ // allocated and unallocated bytes on this CPU cache.
+ std::atomic<size_t> capacity;
+ // Number of underflows as of the end of the last resize interval.
+ std::atomic<size_t> reclaim_underflows;
+ // Number of overflows as of the end of the last resize interval.
+ std::atomic<size_t> reclaim_overflows;
+ // Used bytes in the cache as of the end of the last resize interval.
+ std::atomic<uint64_t> reclaim_used_bytes;
+ // Tracks number of times this CPU has been reclaimed.
+ std::atomic<size_t> num_reclaims;
};
struct ResizeInfo : ResizeInfoUnpadded {
char pad[ABSL_CACHELINE_SIZE -
sizeof(ResizeInfoUnpadded) % ABSL_CACHELINE_SIZE];
};
// Tracking data for each CPU's cache resizing efforts.
- ResizeInfo* resize_ = nullptr;
-
+ ResizeInfo* resize_ = nullptr;
+
// Track whether we are lazily initializing slabs. We cannot use the latest
// value in Parameters, as it can change after initialization.
- bool lazy_slabs_ = false;
- // The maximum capacity of each size class within the slab.
- uint16_t max_capacity_[kNumClasses] = {0};
-
- // Provides a hint to StealFromOtherCache() so that we can steal from the
- // caches in a round-robin fashion.
- std::atomic<int> last_cpu_cache_steal_ = 0;
-
+ bool lazy_slabs_ = false;
+ // The maximum capacity of each size class within the slab.
+ uint16_t max_capacity_[kNumClasses] = {0};
+
+ // Provides a hint to StealFromOtherCache() so that we can steal from the
+ // caches in a round-robin fashion.
+ std::atomic<int> last_cpu_cache_steal_ = 0;
+
// Return a set of objects to be returned to the Transfer Cache.
static constexpr int kMaxToReturn = 16;
struct ObjectsToReturn {
@@ -256,17 +256,17 @@ class CPUCache {
int count = kMaxToReturn;
// The size class of the returned object. kNumClasses is the
// largest value that needs to be stored in cl.
- CompactSizeClass cl[kMaxToReturn];
+ CompactSizeClass cl[kMaxToReturn];
void* obj[kMaxToReturn];
};
- static size_t MaxCapacityHelper(size_t cl) {
- CPUCache& cpu_cache = Static::cpu_cache();
- // Heuristic that the CPUCache has been activated.
- ASSERT(cpu_cache.resize_ != nullptr);
- return cpu_cache.max_capacity_[cl];
- }
-
+ static size_t MaxCapacityHelper(size_t cl) {
+ CPUCache& cpu_cache = Static::cpu_cache();
+ // Heuristic that the CPUCache has been activated.
+ ASSERT(cpu_cache.resize_ != nullptr);
+ return cpu_cache.max_capacity_[cl];
+ }
+
void* Refill(int cpu, size_t cl);
// This is called after finding a full freelist when attempting to push <ptr>
@@ -292,12 +292,12 @@ class CPUCache {
// be freed.
size_t Steal(int cpu, size_t cl, size_t bytes, ObjectsToReturn* to_return);
- // Records a cache underflow or overflow on <cpu>, increments underflow or
- // overflow by 1.
- // <is_malloc> determines whether the associated count corresponds to an
- // underflow or overflow.
- void RecordCacheMissStat(const int cpu, const bool is_malloc);
-
+ // Records a cache underflow or overflow on <cpu>, increments underflow or
+ // overflow by 1.
+ // <is_malloc> determines whether the associated count corresponds to an
+ // underflow or overflow.
+ void RecordCacheMissStat(const int cpu, const bool is_malloc);
+
static void* NoopUnderflow(int cpu, size_t cl) { return nullptr; }
static int NoopOverflow(int cpu, size_t cl, void* item) { return -1; }
};
@@ -312,15 +312,15 @@ inline void* ABSL_ATTRIBUTE_ALWAYS_INLINE CPUCache::Allocate(size_t cl) {
// we've optimistically reported hit in Allocate, lets undo it and
// report miss instead.
tracking::Report(kMallocHit, cl, -1);
- void* ret = nullptr;
- if (Static::sharded_transfer_cache().should_use(cl)) {
- ret = Static::sharded_transfer_cache().Pop(cl);
- } else {
- tracking::Report(kMallocMiss, cl, 1);
- CPUCache& cache = Static::cpu_cache();
- cache.RecordCacheMissStat(cpu, true);
- ret = cache.Refill(cpu, cl);
- }
+ void* ret = nullptr;
+ if (Static::sharded_transfer_cache().should_use(cl)) {
+ ret = Static::sharded_transfer_cache().Pop(cl);
+ } else {
+ tracking::Report(kMallocMiss, cl, 1);
+ CPUCache& cache = Static::cpu_cache();
+ cache.RecordCacheMissStat(cpu, true);
+ ret = cache.Refill(cpu, cl);
+ }
if (ABSL_PREDICT_FALSE(ret == nullptr)) {
size_t size = Static::sizemap().class_to_size(cl);
return OOMHandler(size);
@@ -341,14 +341,14 @@ inline void ABSL_ATTRIBUTE_ALWAYS_INLINE CPUCache::Deallocate(void* ptr,
// When we reach here we've already optimistically bumped FreeHits.
// Fix that.
tracking::Report(kFreeHit, cl, -1);
- if (Static::sharded_transfer_cache().should_use(cl)) {
- Static::sharded_transfer_cache().Push(cl, ptr);
- return 1;
- }
+ if (Static::sharded_transfer_cache().should_use(cl)) {
+ Static::sharded_transfer_cache().Push(cl, ptr);
+ return 1;
+ }
tracking::Report(kFreeMiss, cl, 1);
- CPUCache& cache = Static::cpu_cache();
- cache.RecordCacheMissStat(cpu, false);
- return cache.Overflow(ptr, cl, cpu);
+ CPUCache& cache = Static::cpu_cache();
+ cache.RecordCacheMissStat(cpu, false);
+ return cache.Overflow(ptr, cl, cpu);
}
};
freelist_.Push(cl, ptr, Helper::Overflow);
@@ -361,7 +361,7 @@ inline bool UsePerCpuCache() {
return false;
}
- if (ABSL_PREDICT_TRUE(subtle::percpu::IsFastNoInit())) {
+ if (ABSL_PREDICT_TRUE(subtle::percpu::IsFastNoInit())) {
return true;
}
@@ -376,7 +376,7 @@ inline bool UsePerCpuCache() {
// If the per-CPU cache for a thread is not initialized, we push ourselves
// onto the slow path (if !defined(TCMALLOC_DEPRECATED_PERTHREAD)) until this
// occurs. See fast_alloc's use of TryRecordAllocationFast.
- if (ABSL_PREDICT_TRUE(subtle::percpu::IsFast())) {
+ if (ABSL_PREDICT_TRUE(subtle::percpu::IsFast())) {
ThreadCache::BecomeIdle();
return true;
}
@@ -384,7 +384,7 @@ inline bool UsePerCpuCache() {
return false;
}
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
#endif // TCMALLOC_CPU_CACHE_H_
diff --git a/contrib/libs/tcmalloc/tcmalloc/cpu_cache_test.cc b/contrib/libs/tcmalloc/tcmalloc/cpu_cache_test.cc
index fd4282b9c3..8cecda36f3 100644
--- a/contrib/libs/tcmalloc/tcmalloc/cpu_cache_test.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/cpu_cache_test.cc
@@ -14,24 +14,24 @@
#include "tcmalloc/cpu_cache.h"
-#include <thread> // NOLINT(build/c++11)
-
+#include <thread> // NOLINT(build/c++11)
+
#include "gmock/gmock.h"
#include "gtest/gtest.h"
-#include "absl/random/random.h"
-#include "absl/random/seed_sequences.h"
+#include "absl/random/random.h"
+#include "absl/random/seed_sequences.h"
#include "tcmalloc/common.h"
#include "tcmalloc/internal/optimization.h"
#include "tcmalloc/internal/util.h"
#include "tcmalloc/parameters.h"
#include "tcmalloc/static_vars.h"
-#include "tcmalloc/testing/testutil.h"
+#include "tcmalloc/testing/testutil.h"
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
namespace {
-constexpr size_t kStressSlabs = 4;
+constexpr size_t kStressSlabs = 4;
void* OOMHandler(size_t) { return nullptr; }
TEST(CpuCacheTest, Metadata) {
@@ -69,9 +69,9 @@ TEST(CpuCacheTest, Metadata) {
int allowed_cpu_id;
const size_t kSizeClass = 3;
const size_t num_to_move = Static::sizemap().num_objects_to_move(kSizeClass);
- const size_t virtual_cpu_id_offset = subtle::percpu::UsingFlatVirtualCpus()
- ? offsetof(kernel_rseq, vcpu_id)
- : offsetof(kernel_rseq, cpu_id);
+ const size_t virtual_cpu_id_offset = subtle::percpu::UsingFlatVirtualCpus()
+ ? offsetof(kernel_rseq, vcpu_id)
+ : offsetof(kernel_rseq, cpu_id);
void* ptr;
{
// Restrict this thread to a single core while allocating and processing the
@@ -82,14 +82,14 @@ TEST(CpuCacheTest, Metadata) {
// pages to be faulted for those cores, leading to test flakiness.
tcmalloc_internal::ScopedAffinityMask mask(
tcmalloc_internal::AllowedCpus()[0]);
- allowed_cpu_id =
- subtle::percpu::GetCurrentVirtualCpuUnsafe(virtual_cpu_id_offset);
+ allowed_cpu_id =
+ subtle::percpu::GetCurrentVirtualCpuUnsafe(virtual_cpu_id_offset);
ptr = cache.Allocate<OOMHandler>(kSizeClass);
if (mask.Tampered() ||
- allowed_cpu_id !=
- subtle::percpu::GetCurrentVirtualCpuUnsafe(virtual_cpu_id_offset)) {
+ allowed_cpu_id !=
+ subtle::percpu::GetCurrentVirtualCpuUnsafe(virtual_cpu_id_offset)) {
return;
}
}
@@ -146,9 +146,9 @@ TEST(CpuCacheTest, Metadata) {
}
EXPECT_LE(cache.Unallocated(cpu), max_cpu_cache_size);
- EXPECT_EQ(cache.Capacity(cpu), max_cpu_cache_size);
- EXPECT_EQ(cache.Allocated(cpu) + cache.Unallocated(cpu),
- cache.Capacity(cpu));
+ EXPECT_EQ(cache.Capacity(cpu), max_cpu_cache_size);
+ EXPECT_EQ(cache.Allocated(cpu) + cache.Unallocated(cpu),
+ cache.Capacity(cpu));
}
for (int cl = 0; cl < kNumClasses; ++cl) {
@@ -180,420 +180,420 @@ TEST(CpuCacheTest, Metadata) {
}
}
-TEST(CpuCacheTest, CacheMissStats) {
- if (!subtle::percpu::IsFast()) {
- return;
- }
-
- const int num_cpus = absl::base_internal::NumCPUs();
-
- CPUCache& cache = Static::cpu_cache();
- // Since this test allocates memory, avoid activating the real fast path to
- // minimize allocations against the per-CPU cache.
- cache.Activate(CPUCache::ActivationMode::FastPathOffTestOnly);
-
- // The number of underflows and overflows must be zero for all the caches.
- for (int cpu = 0; cpu < num_cpus; ++cpu) {
- CPUCache::CpuCacheMissStats total_misses =
- cache.GetTotalCacheMissStats(cpu);
- CPUCache::CpuCacheMissStats interval_misses =
- cache.GetIntervalCacheMissStats(cpu);
- EXPECT_EQ(total_misses.underflows, 0);
- EXPECT_EQ(total_misses.overflows, 0);
- EXPECT_EQ(interval_misses.underflows, 0);
- EXPECT_EQ(interval_misses.overflows, 0);
- }
-
- int allowed_cpu_id;
- const size_t kSizeClass = 3;
- const size_t virtual_cpu_id_offset = subtle::percpu::UsingFlatVirtualCpus()
- ? offsetof(kernel_rseq, vcpu_id)
- : offsetof(kernel_rseq, cpu_id);
- void* ptr;
- {
- // Restrict this thread to a single core while allocating and processing the
- // slow path.
- //
- // TODO(b/151313823): Without this restriction, we may access--for reading
- // only--other slabs if we end up being migrated. These may cause huge
- // pages to be faulted for those cores, leading to test flakiness.
- tcmalloc_internal::ScopedAffinityMask mask(
- tcmalloc_internal::AllowedCpus()[0]);
- allowed_cpu_id =
- subtle::percpu::GetCurrentVirtualCpuUnsafe(virtual_cpu_id_offset);
-
- ptr = cache.Allocate<OOMHandler>(kSizeClass);
-
- if (mask.Tampered() ||
- allowed_cpu_id !=
- subtle::percpu::GetCurrentVirtualCpuUnsafe(virtual_cpu_id_offset)) {
- return;
- }
- }
-
- for (int cpu = 0; cpu < num_cpus; ++cpu) {
- CPUCache::CpuCacheMissStats total_misses =
- cache.GetTotalCacheMissStats(cpu);
- CPUCache::CpuCacheMissStats interval_misses =
- cache.GetIntervalCacheMissStats(cpu);
- if (cpu == allowed_cpu_id) {
- EXPECT_EQ(total_misses.underflows, 1);
- EXPECT_EQ(interval_misses.underflows, 1);
- } else {
- EXPECT_EQ(total_misses.underflows, 0);
- EXPECT_EQ(interval_misses.underflows, 0);
- }
- EXPECT_EQ(total_misses.overflows, 0);
- EXPECT_EQ(interval_misses.overflows, 0);
- }
-
- // Tear down.
- //
- // TODO(ckennelly): We're interacting with the real TransferCache.
- cache.Deallocate(ptr, kSizeClass);
-
- for (int i = 0; i < num_cpus; i++) {
- cache.Reclaim(i);
- }
-}
-
-static void ShuffleThread(const std::atomic<bool>& stop) {
- if (!subtle::percpu::IsFast()) {
- return;
- }
-
- CPUCache& cache = Static::cpu_cache();
- // Wake up every 10ms to shuffle the caches so that we can allow misses to
- // accumulate during that interval
- while (!stop) {
- cache.ShuffleCpuCaches();
- absl::SleepFor(absl::Milliseconds(10));
- }
-}
-
-static void StressThread(size_t thread_id, const std::atomic<bool>& stop) {
- if (!subtle::percpu::IsFast()) {
- return;
- }
-
- CPUCache& cache = Static::cpu_cache();
- std::vector<std::pair<size_t, void*>> blocks;
- absl::BitGen rnd;
- while (!stop) {
- const int what = absl::Uniform<int32_t>(rnd, 0, 2);
- if (what) {
- // Allocate an object for a class
- size_t cl = absl::Uniform<int32_t>(rnd, 1, kStressSlabs + 1);
- void* ptr = cache.Allocate<OOMHandler>(cl);
- blocks.emplace_back(std::make_pair(cl, ptr));
- } else {
- // Deallocate an object for a class
- if (!blocks.empty()) {
- cache.Deallocate(blocks.back().second, blocks.back().first);
- blocks.pop_back();
- }
- }
- }
-
- // Cleaup. Deallocate rest of the allocated memory.
- for (int i = 0; i < blocks.size(); i++) {
- cache.Deallocate(blocks[i].second, blocks[i].first);
- }
-}
-
-TEST(CpuCacheTest, StealCpuCache) {
- if (!subtle::percpu::IsFast()) {
- return;
- }
-
- CPUCache& cache = Static::cpu_cache();
- // Since this test allocates memory, avoid activating the real fast path to
- // minimize allocations against the per-CPU cache.
- cache.Activate(CPUCache::ActivationMode::FastPathOffTestOnly);
-
- std::vector<std::thread> threads;
- std::thread shuffle_thread;
- const int n_threads = absl::base_internal::NumCPUs();
- std::atomic<bool> stop(false);
-
- for (size_t t = 0; t < n_threads; ++t) {
- threads.push_back(std::thread(StressThread, t, std::ref(stop)));
- }
- shuffle_thread = std::thread(ShuffleThread, std::ref(stop));
-
- absl::SleepFor(absl::Seconds(5));
- stop = true;
- for (auto& t : threads) {
- t.join();
- }
- shuffle_thread.join();
-
- // Check that the total capacity is preserved after the shuffle.
- size_t capacity = 0;
- const int num_cpus = absl::base_internal::NumCPUs();
- const size_t kTotalCapacity = num_cpus * Parameters::max_per_cpu_cache_size();
- for (int cpu = 0; cpu < num_cpus; ++cpu) {
- EXPECT_EQ(cache.Allocated(cpu) + cache.Unallocated(cpu),
- cache.Capacity(cpu));
- capacity += cache.Capacity(cpu);
- }
- EXPECT_EQ(capacity, kTotalCapacity);
-
- for (int cpu = 0; cpu < num_cpus; ++cpu) {
- cache.Reclaim(cpu);
- }
-}
-
-// Runs a single allocate and deallocate operation to warm up the cache. Once a
-// few objects are allocated in the cold cache, we can shuffle cpu caches to
-// steal that capacity from the cold cache to the hot cache.
-static void ColdCacheOperations(int cpu_id, size_t size_class) {
- // Temporarily fake being on the given CPU.
- ScopedFakeCpuId fake_cpu_id(cpu_id);
-
- CPUCache& cache = Static::cpu_cache();
-#if TCMALLOC_PERCPU_USE_RSEQ
- if (subtle::percpu::UsingFlatVirtualCpus()) {
- subtle::percpu::__rseq_abi.vcpu_id = cpu_id;
- }
-#endif
-
- void* ptr = cache.Allocate<OOMHandler>(size_class);
- cache.Deallocate(ptr, size_class);
-}
-
-// Runs multiple allocate and deallocate operation on the cpu cache to collect
-// misses. Once we collect enough misses on this cache, we can shuffle cpu
-// caches to steal capacity from colder caches to the hot cache.
-static void HotCacheOperations(int cpu_id) {
- // Temporarily fake being on the given CPU.
- ScopedFakeCpuId fake_cpu_id(cpu_id);
-
- CPUCache& cache = Static::cpu_cache();
-#if TCMALLOC_PERCPU_USE_RSEQ
- if (subtle::percpu::UsingFlatVirtualCpus()) {
- subtle::percpu::__rseq_abi.vcpu_id = cpu_id;
- }
-#endif
-
- // Allocate and deallocate objects to make sure we have enough misses on the
- // cache. This will make sure we have sufficient disparity in misses between
- // the hotter and colder cache, and that we may be able to steal bytes from
- // the colder cache.
- for (size_t cl = 1; cl <= kStressSlabs; ++cl) {
- void* ptr = cache.Allocate<OOMHandler>(cl);
- cache.Deallocate(ptr, cl);
- }
-
- // We reclaim the cache to reset it so that we record underflows/overflows the
- // next time we allocate and deallocate objects. Without reclaim, the cache
- // would stay warmed up and it would take more time to drain the colder cache.
- cache.Reclaim(cpu_id);
-}
-
-TEST(CpuCacheTest, ColdHotCacheShuffleTest) {
- if (!subtle::percpu::IsFast()) {
- return;
- }
-
- CPUCache& cache = Static::cpu_cache();
- // Since this test allocates memory, avoid activating the real fast path to
- // minimize allocations against the per-CPU cache.
- cache.Activate(CPUCache::ActivationMode::FastPathOffTestOnly);
-
- constexpr int hot_cpu_id = 0;
- constexpr int cold_cpu_id = 1;
-
- const size_t max_cpu_cache_size = Parameters::max_per_cpu_cache_size();
-
- // Empirical tests suggest that we should be able to steal all the steal-able
- // capacity from colder cache in < 100 tries. Keeping enough buffer here to
- // make sure we steal from colder cache, while at the same time avoid timeouts
- // if something goes bad.
- constexpr int kMaxStealTries = 1000;
-
- // We allocate and deallocate a single highest cl object.
- // This makes sure that we have a single large object in the cache that faster
- // cache can steal.
- const size_t size_class = kNumClasses - 1;
-
- for (int num_tries = 0;
- num_tries < kMaxStealTries &&
- cache.Capacity(cold_cpu_id) >
- CPUCache::kCacheCapacityThreshold * max_cpu_cache_size;
- ++num_tries) {
- ColdCacheOperations(cold_cpu_id, size_class);
- HotCacheOperations(hot_cpu_id);
- cache.ShuffleCpuCaches();
-
- // Check that the capacity is preserved.
- EXPECT_EQ(cache.Allocated(cold_cpu_id) + cache.Unallocated(cold_cpu_id),
- cache.Capacity(cold_cpu_id));
- EXPECT_EQ(cache.Allocated(hot_cpu_id) + cache.Unallocated(hot_cpu_id),
- cache.Capacity(hot_cpu_id));
- }
-
- size_t cold_cache_capacity = cache.Capacity(cold_cpu_id);
- size_t hot_cache_capacity = cache.Capacity(hot_cpu_id);
-
- // Check that we drained cold cache to the lower capacity limit.
- // We also keep some tolerance, up to the largest class size, below the lower
- // capacity threshold that we can drain cold cache to.
- EXPECT_GT(cold_cache_capacity,
- CPUCache::kCacheCapacityThreshold * max_cpu_cache_size -
- Static::sizemap().class_to_size(kNumClasses - 1));
-
- // Check that we have at least stolen some capacity.
- EXPECT_GT(hot_cache_capacity, max_cpu_cache_size);
-
- // Perform a few more shuffles to make sure that lower cache capacity limit
- // has been reached for the cold cache. A few more shuffles should not
- // change the capacity of either of the caches.
- for (int i = 0; i < 100; ++i) {
- ColdCacheOperations(cold_cpu_id, size_class);
- HotCacheOperations(hot_cpu_id);
- cache.ShuffleCpuCaches();
-
- // Check that the capacity is preserved.
- EXPECT_EQ(cache.Allocated(cold_cpu_id) + cache.Unallocated(cold_cpu_id),
- cache.Capacity(cold_cpu_id));
- EXPECT_EQ(cache.Allocated(hot_cpu_id) + cache.Unallocated(hot_cpu_id),
- cache.Capacity(hot_cpu_id));
- }
-
- // Check that the capacity of cold and hot caches is same as before.
- EXPECT_EQ(cache.Capacity(cold_cpu_id), cold_cache_capacity);
- EXPECT_EQ(cache.Capacity(hot_cpu_id), hot_cache_capacity);
-
- // Make sure that the total capacity is preserved.
- EXPECT_EQ(cache.Capacity(cold_cpu_id) + cache.Capacity(hot_cpu_id),
- 2 * max_cpu_cache_size);
-
- // Reclaim caches.
- const int num_cpus = absl::base_internal::NumCPUs();
- for (int cpu = 0; cpu < num_cpus; ++cpu) {
- cache.Reclaim(cpu);
- }
-}
-
-TEST(CpuCacheTest, ReclaimCpuCache) {
- if (!subtle::percpu::IsFast()) {
- return;
- }
-
- CPUCache& cache = Static::cpu_cache();
- // Since this test allocates memory, avoid activating the real fast path to
- // minimize allocations against the per-CPU cache.
- cache.Activate(CPUCache::ActivationMode::FastPathOffTestOnly);
-
- // The number of underflows and overflows must be zero for all the caches.
- const int num_cpus = absl::base_internal::NumCPUs();
- for (int cpu = 0; cpu < num_cpus; ++cpu) {
- SCOPED_TRACE(absl::StrFormat("Failed CPU: %d", cpu));
- // Check that reclaim miss metrics are reset.
- CPUCache::CpuCacheMissStats reclaim_misses =
- cache.GetReclaimCacheMissStats(cpu);
- EXPECT_EQ(reclaim_misses.underflows, 0);
- EXPECT_EQ(reclaim_misses.overflows, 0);
-
- // None of the caches should have been reclaimed yet.
- EXPECT_EQ(cache.GetNumReclaims(cpu), 0);
-
- // Check that caches are empty.
- uint64_t used_bytes = cache.UsedBytes(cpu);
- EXPECT_EQ(used_bytes, 0);
- }
-
- const size_t kSizeClass = 3;
-
- // We chose a different size class here so that we can populate different size
- // class slots and change the number of bytes used by the busy cache later in
- // our test.
- const size_t kBusySizeClass = 4;
-
- // Perform some operations to warm up caches and make sure they are populated.
- for (int cpu = 0; cpu < num_cpus; ++cpu) {
- SCOPED_TRACE(absl::StrFormat("Failed CPU: %d", cpu));
- ColdCacheOperations(cpu, kSizeClass);
- EXPECT_TRUE(cache.HasPopulated(cpu));
- }
-
- for (int cpu = 0; cpu < num_cpus; ++cpu) {
- SCOPED_TRACE(absl::StrFormat("Failed CPU: %d", cpu));
- CPUCache::CpuCacheMissStats misses_last_interval =
- cache.GetReclaimCacheMissStats(cpu);
- CPUCache::CpuCacheMissStats total_misses =
- cache.GetTotalCacheMissStats(cpu);
-
- // Misses since the last reclaim (i.e. since we initialized the caches)
- // should match the total miss metrics.
- EXPECT_EQ(misses_last_interval.underflows, total_misses.underflows);
- EXPECT_EQ(misses_last_interval.overflows, total_misses.overflows);
-
- // Caches should have non-zero used bytes.
- EXPECT_GT(cache.UsedBytes(cpu), 0);
- }
-
- cache.TryReclaimingCaches();
-
- // Miss metrics since the last interval were non-zero and the change in used
- // bytes was non-zero, so none of the caches should get reclaimed.
- for (int cpu = 0; cpu < num_cpus; ++cpu) {
- SCOPED_TRACE(absl::StrFormat("Failed CPU: %d", cpu));
- // As no cache operations were performed since the last reclaim
- // operation, the reclaim misses captured during the last interval (i.e.
- // since the last reclaim) should be zero.
- CPUCache::CpuCacheMissStats reclaim_misses =
- cache.GetReclaimCacheMissStats(cpu);
- EXPECT_EQ(reclaim_misses.underflows, 0);
- EXPECT_EQ(reclaim_misses.overflows, 0);
-
- // None of the caches should have been reclaimed as the caches were
- // accessed in the previous interval.
- EXPECT_EQ(cache.GetNumReclaims(cpu), 0);
-
- // Caches should not have been reclaimed; used bytes should be non-zero.
- EXPECT_GT(cache.UsedBytes(cpu), 0);
- }
-
- absl::BitGen rnd;
- const int busy_cpu =
- absl::Uniform<int32_t>(rnd, 0, absl::base_internal::NumCPUs());
- const size_t prev_used = cache.UsedBytes(busy_cpu);
- ColdCacheOperations(busy_cpu, kBusySizeClass);
- EXPECT_GT(cache.UsedBytes(busy_cpu), prev_used);
-
- // Try reclaiming caches again.
- cache.TryReclaimingCaches();
-
- // All caches, except the busy cpu cache against which we performed some
- // operations in the previous interval, should have been reclaimed exactly
- // once.
- for (int cpu = 0; cpu < num_cpus; ++cpu) {
- SCOPED_TRACE(absl::StrFormat("Failed CPU: %d", cpu));
- if (cpu == busy_cpu) {
- EXPECT_GT(cache.UsedBytes(cpu), 0);
- EXPECT_EQ(cache.GetNumReclaims(cpu), 0);
- } else {
- EXPECT_EQ(cache.UsedBytes(cpu), 0);
- EXPECT_EQ(cache.GetNumReclaims(cpu), 1);
- }
- }
-
- // Try reclaiming caches again.
- cache.TryReclaimingCaches();
-
- // All caches, including the busy cache, should have been reclaimed this
- // time. Note that the caches that were reclaimed in the previous interval
- // should not be reclaimed again and the number of reclaims reported for them
- // should still be one.
- for (int cpu = 0; cpu < num_cpus; ++cpu) {
- SCOPED_TRACE(absl::StrFormat("Failed CPU: %d", cpu));
- EXPECT_EQ(cache.UsedBytes(cpu), 0);
- EXPECT_EQ(cache.GetNumReclaims(cpu), 1);
- }
-}
-
+TEST(CpuCacheTest, CacheMissStats) {
+ if (!subtle::percpu::IsFast()) {
+ return;
+ }
+
+ const int num_cpus = absl::base_internal::NumCPUs();
+
+ CPUCache& cache = Static::cpu_cache();
+ // Since this test allocates memory, avoid activating the real fast path to
+ // minimize allocations against the per-CPU cache.
+ cache.Activate(CPUCache::ActivationMode::FastPathOffTestOnly);
+
+ // The number of underflows and overflows must be zero for all the caches.
+ for (int cpu = 0; cpu < num_cpus; ++cpu) {
+ CPUCache::CpuCacheMissStats total_misses =
+ cache.GetTotalCacheMissStats(cpu);
+ CPUCache::CpuCacheMissStats interval_misses =
+ cache.GetIntervalCacheMissStats(cpu);
+ EXPECT_EQ(total_misses.underflows, 0);
+ EXPECT_EQ(total_misses.overflows, 0);
+ EXPECT_EQ(interval_misses.underflows, 0);
+ EXPECT_EQ(interval_misses.overflows, 0);
+ }
+
+ int allowed_cpu_id;
+ const size_t kSizeClass = 3;
+ const size_t virtual_cpu_id_offset = subtle::percpu::UsingFlatVirtualCpus()
+ ? offsetof(kernel_rseq, vcpu_id)
+ : offsetof(kernel_rseq, cpu_id);
+ void* ptr;
+ {
+ // Restrict this thread to a single core while allocating and processing the
+ // slow path.
+ //
+ // TODO(b/151313823): Without this restriction, we may access--for reading
+ // only--other slabs if we end up being migrated. These may cause huge
+ // pages to be faulted for those cores, leading to test flakiness.
+ tcmalloc_internal::ScopedAffinityMask mask(
+ tcmalloc_internal::AllowedCpus()[0]);
+ allowed_cpu_id =
+ subtle::percpu::GetCurrentVirtualCpuUnsafe(virtual_cpu_id_offset);
+
+ ptr = cache.Allocate<OOMHandler>(kSizeClass);
+
+ if (mask.Tampered() ||
+ allowed_cpu_id !=
+ subtle::percpu::GetCurrentVirtualCpuUnsafe(virtual_cpu_id_offset)) {
+ return;
+ }
+ }
+
+ for (int cpu = 0; cpu < num_cpus; ++cpu) {
+ CPUCache::CpuCacheMissStats total_misses =
+ cache.GetTotalCacheMissStats(cpu);
+ CPUCache::CpuCacheMissStats interval_misses =
+ cache.GetIntervalCacheMissStats(cpu);
+ if (cpu == allowed_cpu_id) {
+ EXPECT_EQ(total_misses.underflows, 1);
+ EXPECT_EQ(interval_misses.underflows, 1);
+ } else {
+ EXPECT_EQ(total_misses.underflows, 0);
+ EXPECT_EQ(interval_misses.underflows, 0);
+ }
+ EXPECT_EQ(total_misses.overflows, 0);
+ EXPECT_EQ(interval_misses.overflows, 0);
+ }
+
+ // Tear down.
+ //
+ // TODO(ckennelly): We're interacting with the real TransferCache.
+ cache.Deallocate(ptr, kSizeClass);
+
+ for (int i = 0; i < num_cpus; i++) {
+ cache.Reclaim(i);
+ }
+}
+
+static void ShuffleThread(const std::atomic<bool>& stop) {
+ if (!subtle::percpu::IsFast()) {
+ return;
+ }
+
+ CPUCache& cache = Static::cpu_cache();
+ // Wake up every 10ms to shuffle the caches so that we can allow misses to
+ // accumulate during that interval
+ while (!stop) {
+ cache.ShuffleCpuCaches();
+ absl::SleepFor(absl::Milliseconds(10));
+ }
+}
+
+static void StressThread(size_t thread_id, const std::atomic<bool>& stop) {
+ if (!subtle::percpu::IsFast()) {
+ return;
+ }
+
+ CPUCache& cache = Static::cpu_cache();
+ std::vector<std::pair<size_t, void*>> blocks;
+ absl::BitGen rnd;
+ while (!stop) {
+ const int what = absl::Uniform<int32_t>(rnd, 0, 2);
+ if (what) {
+ // Allocate an object for a class
+ size_t cl = absl::Uniform<int32_t>(rnd, 1, kStressSlabs + 1);
+ void* ptr = cache.Allocate<OOMHandler>(cl);
+ blocks.emplace_back(std::make_pair(cl, ptr));
+ } else {
+ // Deallocate an object for a class
+ if (!blocks.empty()) {
+ cache.Deallocate(blocks.back().second, blocks.back().first);
+ blocks.pop_back();
+ }
+ }
+ }
+
+ // Cleaup. Deallocate rest of the allocated memory.
+ for (int i = 0; i < blocks.size(); i++) {
+ cache.Deallocate(blocks[i].second, blocks[i].first);
+ }
+}
+
+TEST(CpuCacheTest, StealCpuCache) {
+ if (!subtle::percpu::IsFast()) {
+ return;
+ }
+
+ CPUCache& cache = Static::cpu_cache();
+ // Since this test allocates memory, avoid activating the real fast path to
+ // minimize allocations against the per-CPU cache.
+ cache.Activate(CPUCache::ActivationMode::FastPathOffTestOnly);
+
+ std::vector<std::thread> threads;
+ std::thread shuffle_thread;
+ const int n_threads = absl::base_internal::NumCPUs();
+ std::atomic<bool> stop(false);
+
+ for (size_t t = 0; t < n_threads; ++t) {
+ threads.push_back(std::thread(StressThread, t, std::ref(stop)));
+ }
+ shuffle_thread = std::thread(ShuffleThread, std::ref(stop));
+
+ absl::SleepFor(absl::Seconds(5));
+ stop = true;
+ for (auto& t : threads) {
+ t.join();
+ }
+ shuffle_thread.join();
+
+ // Check that the total capacity is preserved after the shuffle.
+ size_t capacity = 0;
+ const int num_cpus = absl::base_internal::NumCPUs();
+ const size_t kTotalCapacity = num_cpus * Parameters::max_per_cpu_cache_size();
+ for (int cpu = 0; cpu < num_cpus; ++cpu) {
+ EXPECT_EQ(cache.Allocated(cpu) + cache.Unallocated(cpu),
+ cache.Capacity(cpu));
+ capacity += cache.Capacity(cpu);
+ }
+ EXPECT_EQ(capacity, kTotalCapacity);
+
+ for (int cpu = 0; cpu < num_cpus; ++cpu) {
+ cache.Reclaim(cpu);
+ }
+}
+
+// Runs a single allocate and deallocate operation to warm up the cache. Once a
+// few objects are allocated in the cold cache, we can shuffle cpu caches to
+// steal that capacity from the cold cache to the hot cache.
+static void ColdCacheOperations(int cpu_id, size_t size_class) {
+ // Temporarily fake being on the given CPU.
+ ScopedFakeCpuId fake_cpu_id(cpu_id);
+
+ CPUCache& cache = Static::cpu_cache();
+#if TCMALLOC_PERCPU_USE_RSEQ
+ if (subtle::percpu::UsingFlatVirtualCpus()) {
+ subtle::percpu::__rseq_abi.vcpu_id = cpu_id;
+ }
+#endif
+
+ void* ptr = cache.Allocate<OOMHandler>(size_class);
+ cache.Deallocate(ptr, size_class);
+}
+
+// Runs multiple allocate and deallocate operation on the cpu cache to collect
+// misses. Once we collect enough misses on this cache, we can shuffle cpu
+// caches to steal capacity from colder caches to the hot cache.
+static void HotCacheOperations(int cpu_id) {
+ // Temporarily fake being on the given CPU.
+ ScopedFakeCpuId fake_cpu_id(cpu_id);
+
+ CPUCache& cache = Static::cpu_cache();
+#if TCMALLOC_PERCPU_USE_RSEQ
+ if (subtle::percpu::UsingFlatVirtualCpus()) {
+ subtle::percpu::__rseq_abi.vcpu_id = cpu_id;
+ }
+#endif
+
+ // Allocate and deallocate objects to make sure we have enough misses on the
+ // cache. This will make sure we have sufficient disparity in misses between
+ // the hotter and colder cache, and that we may be able to steal bytes from
+ // the colder cache.
+ for (size_t cl = 1; cl <= kStressSlabs; ++cl) {
+ void* ptr = cache.Allocate<OOMHandler>(cl);
+ cache.Deallocate(ptr, cl);
+ }
+
+ // We reclaim the cache to reset it so that we record underflows/overflows the
+ // next time we allocate and deallocate objects. Without reclaim, the cache
+ // would stay warmed up and it would take more time to drain the colder cache.
+ cache.Reclaim(cpu_id);
+}
+
+TEST(CpuCacheTest, ColdHotCacheShuffleTest) {
+ if (!subtle::percpu::IsFast()) {
+ return;
+ }
+
+ CPUCache& cache = Static::cpu_cache();
+ // Since this test allocates memory, avoid activating the real fast path to
+ // minimize allocations against the per-CPU cache.
+ cache.Activate(CPUCache::ActivationMode::FastPathOffTestOnly);
+
+ constexpr int hot_cpu_id = 0;
+ constexpr int cold_cpu_id = 1;
+
+ const size_t max_cpu_cache_size = Parameters::max_per_cpu_cache_size();
+
+ // Empirical tests suggest that we should be able to steal all the steal-able
+ // capacity from colder cache in < 100 tries. Keeping enough buffer here to
+ // make sure we steal from colder cache, while at the same time avoid timeouts
+ // if something goes bad.
+ constexpr int kMaxStealTries = 1000;
+
+ // We allocate and deallocate a single highest cl object.
+ // This makes sure that we have a single large object in the cache that faster
+ // cache can steal.
+ const size_t size_class = kNumClasses - 1;
+
+ for (int num_tries = 0;
+ num_tries < kMaxStealTries &&
+ cache.Capacity(cold_cpu_id) >
+ CPUCache::kCacheCapacityThreshold * max_cpu_cache_size;
+ ++num_tries) {
+ ColdCacheOperations(cold_cpu_id, size_class);
+ HotCacheOperations(hot_cpu_id);
+ cache.ShuffleCpuCaches();
+
+ // Check that the capacity is preserved.
+ EXPECT_EQ(cache.Allocated(cold_cpu_id) + cache.Unallocated(cold_cpu_id),
+ cache.Capacity(cold_cpu_id));
+ EXPECT_EQ(cache.Allocated(hot_cpu_id) + cache.Unallocated(hot_cpu_id),
+ cache.Capacity(hot_cpu_id));
+ }
+
+ size_t cold_cache_capacity = cache.Capacity(cold_cpu_id);
+ size_t hot_cache_capacity = cache.Capacity(hot_cpu_id);
+
+ // Check that we drained cold cache to the lower capacity limit.
+ // We also keep some tolerance, up to the largest class size, below the lower
+ // capacity threshold that we can drain cold cache to.
+ EXPECT_GT(cold_cache_capacity,
+ CPUCache::kCacheCapacityThreshold * max_cpu_cache_size -
+ Static::sizemap().class_to_size(kNumClasses - 1));
+
+ // Check that we have at least stolen some capacity.
+ EXPECT_GT(hot_cache_capacity, max_cpu_cache_size);
+
+ // Perform a few more shuffles to make sure that lower cache capacity limit
+ // has been reached for the cold cache. A few more shuffles should not
+ // change the capacity of either of the caches.
+ for (int i = 0; i < 100; ++i) {
+ ColdCacheOperations(cold_cpu_id, size_class);
+ HotCacheOperations(hot_cpu_id);
+ cache.ShuffleCpuCaches();
+
+ // Check that the capacity is preserved.
+ EXPECT_EQ(cache.Allocated(cold_cpu_id) + cache.Unallocated(cold_cpu_id),
+ cache.Capacity(cold_cpu_id));
+ EXPECT_EQ(cache.Allocated(hot_cpu_id) + cache.Unallocated(hot_cpu_id),
+ cache.Capacity(hot_cpu_id));
+ }
+
+ // Check that the capacity of cold and hot caches is same as before.
+ EXPECT_EQ(cache.Capacity(cold_cpu_id), cold_cache_capacity);
+ EXPECT_EQ(cache.Capacity(hot_cpu_id), hot_cache_capacity);
+
+ // Make sure that the total capacity is preserved.
+ EXPECT_EQ(cache.Capacity(cold_cpu_id) + cache.Capacity(hot_cpu_id),
+ 2 * max_cpu_cache_size);
+
+ // Reclaim caches.
+ const int num_cpus = absl::base_internal::NumCPUs();
+ for (int cpu = 0; cpu < num_cpus; ++cpu) {
+ cache.Reclaim(cpu);
+ }
+}
+
+TEST(CpuCacheTest, ReclaimCpuCache) {
+ if (!subtle::percpu::IsFast()) {
+ return;
+ }
+
+ CPUCache& cache = Static::cpu_cache();
+ // Since this test allocates memory, avoid activating the real fast path to
+ // minimize allocations against the per-CPU cache.
+ cache.Activate(CPUCache::ActivationMode::FastPathOffTestOnly);
+
+ // The number of underflows and overflows must be zero for all the caches.
+ const int num_cpus = absl::base_internal::NumCPUs();
+ for (int cpu = 0; cpu < num_cpus; ++cpu) {
+ SCOPED_TRACE(absl::StrFormat("Failed CPU: %d", cpu));
+ // Check that reclaim miss metrics are reset.
+ CPUCache::CpuCacheMissStats reclaim_misses =
+ cache.GetReclaimCacheMissStats(cpu);
+ EXPECT_EQ(reclaim_misses.underflows, 0);
+ EXPECT_EQ(reclaim_misses.overflows, 0);
+
+ // None of the caches should have been reclaimed yet.
+ EXPECT_EQ(cache.GetNumReclaims(cpu), 0);
+
+ // Check that caches are empty.
+ uint64_t used_bytes = cache.UsedBytes(cpu);
+ EXPECT_EQ(used_bytes, 0);
+ }
+
+ const size_t kSizeClass = 3;
+
+ // We chose a different size class here so that we can populate different size
+ // class slots and change the number of bytes used by the busy cache later in
+ // our test.
+ const size_t kBusySizeClass = 4;
+
+ // Perform some operations to warm up caches and make sure they are populated.
+ for (int cpu = 0; cpu < num_cpus; ++cpu) {
+ SCOPED_TRACE(absl::StrFormat("Failed CPU: %d", cpu));
+ ColdCacheOperations(cpu, kSizeClass);
+ EXPECT_TRUE(cache.HasPopulated(cpu));
+ }
+
+ for (int cpu = 0; cpu < num_cpus; ++cpu) {
+ SCOPED_TRACE(absl::StrFormat("Failed CPU: %d", cpu));
+ CPUCache::CpuCacheMissStats misses_last_interval =
+ cache.GetReclaimCacheMissStats(cpu);
+ CPUCache::CpuCacheMissStats total_misses =
+ cache.GetTotalCacheMissStats(cpu);
+
+ // Misses since the last reclaim (i.e. since we initialized the caches)
+ // should match the total miss metrics.
+ EXPECT_EQ(misses_last_interval.underflows, total_misses.underflows);
+ EXPECT_EQ(misses_last_interval.overflows, total_misses.overflows);
+
+ // Caches should have non-zero used bytes.
+ EXPECT_GT(cache.UsedBytes(cpu), 0);
+ }
+
+ cache.TryReclaimingCaches();
+
+ // Miss metrics since the last interval were non-zero and the change in used
+ // bytes was non-zero, so none of the caches should get reclaimed.
+ for (int cpu = 0; cpu < num_cpus; ++cpu) {
+ SCOPED_TRACE(absl::StrFormat("Failed CPU: %d", cpu));
+ // As no cache operations were performed since the last reclaim
+ // operation, the reclaim misses captured during the last interval (i.e.
+ // since the last reclaim) should be zero.
+ CPUCache::CpuCacheMissStats reclaim_misses =
+ cache.GetReclaimCacheMissStats(cpu);
+ EXPECT_EQ(reclaim_misses.underflows, 0);
+ EXPECT_EQ(reclaim_misses.overflows, 0);
+
+ // None of the caches should have been reclaimed as the caches were
+ // accessed in the previous interval.
+ EXPECT_EQ(cache.GetNumReclaims(cpu), 0);
+
+ // Caches should not have been reclaimed; used bytes should be non-zero.
+ EXPECT_GT(cache.UsedBytes(cpu), 0);
+ }
+
+ absl::BitGen rnd;
+ const int busy_cpu =
+ absl::Uniform<int32_t>(rnd, 0, absl::base_internal::NumCPUs());
+ const size_t prev_used = cache.UsedBytes(busy_cpu);
+ ColdCacheOperations(busy_cpu, kBusySizeClass);
+ EXPECT_GT(cache.UsedBytes(busy_cpu), prev_used);
+
+ // Try reclaiming caches again.
+ cache.TryReclaimingCaches();
+
+ // All caches, except the busy cpu cache against which we performed some
+ // operations in the previous interval, should have been reclaimed exactly
+ // once.
+ for (int cpu = 0; cpu < num_cpus; ++cpu) {
+ SCOPED_TRACE(absl::StrFormat("Failed CPU: %d", cpu));
+ if (cpu == busy_cpu) {
+ EXPECT_GT(cache.UsedBytes(cpu), 0);
+ EXPECT_EQ(cache.GetNumReclaims(cpu), 0);
+ } else {
+ EXPECT_EQ(cache.UsedBytes(cpu), 0);
+ EXPECT_EQ(cache.GetNumReclaims(cpu), 1);
+ }
+ }
+
+ // Try reclaiming caches again.
+ cache.TryReclaimingCaches();
+
+ // All caches, including the busy cache, should have been reclaimed this
+ // time. Note that the caches that were reclaimed in the previous interval
+ // should not be reclaimed again and the number of reclaims reported for them
+ // should still be one.
+ for (int cpu = 0; cpu < num_cpus; ++cpu) {
+ SCOPED_TRACE(absl::StrFormat("Failed CPU: %d", cpu));
+ EXPECT_EQ(cache.UsedBytes(cpu), 0);
+ EXPECT_EQ(cache.GetNumReclaims(cpu), 1);
+ }
+}
+
} // namespace
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
diff --git a/contrib/libs/tcmalloc/tcmalloc/experiment.cc b/contrib/libs/tcmalloc/tcmalloc/experiment.cc
index 1c425fbf9e..4f6f5dbe31 100644
--- a/contrib/libs/tcmalloc/tcmalloc/experiment.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/experiment.cc
@@ -22,9 +22,9 @@
#include "tcmalloc/internal/environment.h"
#include "tcmalloc/internal/logging.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
namespace {
const char kDelimiter = ',';
@@ -50,9 +50,9 @@ const bool* GetSelectedExperiments() {
static const bool* status = [&]() {
const char* active_experiments = thread_safe_getenv(kExperiments);
const char* disabled_experiments = thread_safe_getenv(kDisableExperiments);
- return SelectExperiments(by_id,
- active_experiments ? active_experiments : "",
- disabled_experiments ? disabled_experiments : "");
+ return SelectExperiments(by_id,
+ active_experiments ? active_experiments : "",
+ disabled_experiments ? disabled_experiments : "");
}();
return status;
}
@@ -106,7 +106,7 @@ const bool* SelectExperiments(bool* buffer, absl::string_view active,
return buffer;
}
-void PrintExperiments(Printer* printer) {
+void PrintExperiments(Printer* printer) {
// Index experiments by their positions in the experiments array, rather than
// by experiment ID.
static bool active[ABSL_ARRAYSIZE(experiments)];
@@ -131,32 +131,32 @@ void PrintExperiments(Printer* printer) {
printer->printf("\n");
}
-void FillExperimentProperties(
- std::map<std::string, MallocExtension::Property>* result) {
- for (const auto& config : experiments) {
- (*result)[absl::StrCat("tcmalloc.experiment.", config.name)].value =
- IsExperimentActive(config.id) ? 1 : 0;
- }
-}
-
-} // namespace tcmalloc_internal
-
-bool IsExperimentActive(Experiment exp) {
- ASSERT(static_cast<int>(exp) >= 0);
- ASSERT(exp < Experiment::kMaxExperimentID);
-
- return tcmalloc_internal::GetSelectedExperiments()[static_cast<int>(exp)];
-}
-
-absl::optional<Experiment> FindExperimentByName(absl::string_view name) {
- for (const auto& config : experiments) {
- if (name == config.name) {
- return config.id;
- }
- }
-
- return absl::nullopt;
-}
-
+void FillExperimentProperties(
+ std::map<std::string, MallocExtension::Property>* result) {
+ for (const auto& config : experiments) {
+ (*result)[absl::StrCat("tcmalloc.experiment.", config.name)].value =
+ IsExperimentActive(config.id) ? 1 : 0;
+ }
+}
+
+} // namespace tcmalloc_internal
+
+bool IsExperimentActive(Experiment exp) {
+ ASSERT(static_cast<int>(exp) >= 0);
+ ASSERT(exp < Experiment::kMaxExperimentID);
+
+ return tcmalloc_internal::GetSelectedExperiments()[static_cast<int>(exp)];
+}
+
+absl::optional<Experiment> FindExperimentByName(absl::string_view name) {
+ for (const auto& config : experiments) {
+ if (name == config.name) {
+ return config.id;
+ }
+ }
+
+ return absl::nullopt;
+}
+
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
diff --git a/contrib/libs/tcmalloc/tcmalloc/experiment.h b/contrib/libs/tcmalloc/tcmalloc/experiment.h
index 90b3049df1..d04387952f 100644
--- a/contrib/libs/tcmalloc/tcmalloc/experiment.h
+++ b/contrib/libs/tcmalloc/tcmalloc/experiment.h
@@ -38,9 +38,9 @@
// BORG_DISABLE_EXPERIMENTS=all *or*
// BORG_DISABLE_EXPERIMENTS=BAD_EXPERIMENT_LABEL
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
constexpr size_t kNumExperiments =
static_cast<size_t>(Experiment::kMaxExperimentID);
@@ -54,18 +54,18 @@ constexpr size_t kNumExperiments =
const bool* SelectExperiments(bool* buffer, absl::string_view active,
absl::string_view disabled);
-void FillExperimentProperties(
- std::map<std::string, MallocExtension::Property>* result);
-
-void PrintExperiments(Printer* printer);
-
-} // namespace tcmalloc_internal
+void FillExperimentProperties(
+ std::map<std::string, MallocExtension::Property>* result);
+void PrintExperiments(Printer* printer);
+
+} // namespace tcmalloc_internal
+
bool IsExperimentActive(Experiment exp);
absl::optional<Experiment> FindExperimentByName(absl::string_view name);
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
#endif // TCMALLOC_EXPERIMENT_H_
diff --git a/contrib/libs/tcmalloc/tcmalloc/experiment_config.h b/contrib/libs/tcmalloc/tcmalloc/experiment_config.h
index 294c0374e4..a34969c4b6 100644
--- a/contrib/libs/tcmalloc/tcmalloc/experiment_config.h
+++ b/contrib/libs/tcmalloc/tcmalloc/experiment_config.h
@@ -23,10 +23,10 @@ namespace tcmalloc {
enum class Experiment : int {
TCMALLOC_TEMERAIRE,
TCMALLOC_SANS_56_SIZECLASS,
- TEST_ONLY_TCMALLOC_POW2_SIZECLASS,
- TEST_ONLY_TCMALLOC_POW2_BELOW64_SIZECLASS,
- TEST_ONLY_TCMALLOC_RING_BUFFER_TRANSFER_CACHE,
- TEST_ONLY_TCMALLOC_SHARDED_TRANSFER_CACHE,
+ TEST_ONLY_TCMALLOC_POW2_SIZECLASS,
+ TEST_ONLY_TCMALLOC_POW2_BELOW64_SIZECLASS,
+ TEST_ONLY_TCMALLOC_RING_BUFFER_TRANSFER_CACHE,
+ TEST_ONLY_TCMALLOC_SHARDED_TRANSFER_CACHE,
kMaxExperimentID,
};
@@ -39,10 +39,10 @@ struct ExperimentConfig {
inline constexpr ExperimentConfig experiments[] = {
{Experiment::TCMALLOC_TEMERAIRE, "TCMALLOC_TEMERAIRE"},
{Experiment::TCMALLOC_SANS_56_SIZECLASS, "TCMALLOC_SANS_56_SIZECLASS"},
- {Experiment::TEST_ONLY_TCMALLOC_POW2_SIZECLASS, "TEST_ONLY_TCMALLOC_POW2_SIZECLASS"},
- {Experiment::TEST_ONLY_TCMALLOC_POW2_BELOW64_SIZECLASS, "TEST_ONLY_TCMALLOC_POW2_BELOW64_SIZECLASS"},
- {Experiment::TEST_ONLY_TCMALLOC_RING_BUFFER_TRANSFER_CACHE, "TEST_ONLY_TCMALLOC_RING_BUFFER_TRANSFER_CACHE"},
- {Experiment::TEST_ONLY_TCMALLOC_SHARDED_TRANSFER_CACHE, "TEST_ONLY_TCMALLOC_SHARDED_TRANSFER_CACHE"},
+ {Experiment::TEST_ONLY_TCMALLOC_POW2_SIZECLASS, "TEST_ONLY_TCMALLOC_POW2_SIZECLASS"},
+ {Experiment::TEST_ONLY_TCMALLOC_POW2_BELOW64_SIZECLASS, "TEST_ONLY_TCMALLOC_POW2_BELOW64_SIZECLASS"},
+ {Experiment::TEST_ONLY_TCMALLOC_RING_BUFFER_TRANSFER_CACHE, "TEST_ONLY_TCMALLOC_RING_BUFFER_TRANSFER_CACHE"},
+ {Experiment::TEST_ONLY_TCMALLOC_SHARDED_TRANSFER_CACHE, "TEST_ONLY_TCMALLOC_SHARDED_TRANSFER_CACHE"},
};
// clang-format on
diff --git a/contrib/libs/tcmalloc/tcmalloc/experiment_fuzz.cc b/contrib/libs/tcmalloc/tcmalloc/experiment_fuzz.cc
index 2a7afe9b85..f392cfba17 100644
--- a/contrib/libs/tcmalloc/tcmalloc/experiment_fuzz.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/experiment_fuzz.cc
@@ -22,7 +22,7 @@
extern "C" int LLVMFuzzerTestOneInput(const uint8_t* d, size_t size) {
const char* data = reinterpret_cast<const char*>(d);
- bool buffer[tcmalloc::tcmalloc_internal::kNumExperiments];
+ bool buffer[tcmalloc::tcmalloc_internal::kNumExperiments];
absl::string_view active, disabled;
const char* split = static_cast<const char*>(memchr(data, ';', size));
@@ -33,6 +33,6 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* d, size_t size) {
disabled = absl::string_view(split + 1, size - (split - data + 1));
}
- tcmalloc::tcmalloc_internal::SelectExperiments(buffer, active, disabled);
+ tcmalloc::tcmalloc_internal::SelectExperiments(buffer, active, disabled);
return 0;
}
diff --git a/contrib/libs/tcmalloc/tcmalloc/experimental_pow2_below64_size_class.cc b/contrib/libs/tcmalloc/tcmalloc/experimental_pow2_below64_size_class.cc
index c6769f450e..ba41dd2ee3 100755
--- a/contrib/libs/tcmalloc/tcmalloc/experimental_pow2_below64_size_class.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/experimental_pow2_below64_size_class.cc
@@ -1,679 +1,679 @@
-// Copyright 2019 The TCMalloc Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "tcmalloc/common.h"
-
-GOOGLE_MALLOC_SECTION_BEGIN
-namespace tcmalloc {
-
-namespace tcmalloc_internal {
-
-// <fixed> is fixed per-size-class overhead due to end-of-span fragmentation
-// and other factors. For instance, if we have a 96 byte size class, and use a
-// single 8KiB page, then we will hold 85 objects per span, and have 32 bytes
-// left over. There is also a fixed component of 48 bytes of TCMalloc metadata
-// per span. Together, the fixed overhead would be wasted/allocated =
-// (32 + 48) / (8192 - 32) ~= 0.98%.
-// There is also a dynamic component to overhead based on mismatches between the
-// number of bytes requested and the number of bytes provided by the size class.
-// Together they sum to the total overhead; for instance if you asked for a
-// 50-byte allocation that rounds up to a 64-byte size class, the dynamic
-// overhead would be 28%, and if <fixed> were 22% it would mean (on average)
-// 25 bytes of overhead for allocations of that size.
-
-// clang-format off
-#if defined(__cpp_aligned_new) && __STDCPP_DEFAULT_NEW_ALIGNMENT__ <= 8
-#if TCMALLOC_PAGE_SHIFT == 13
-static_assert(kMaxSize == 262144, "kMaxSize mismatch");
-static const int kCount = 82;
-static_assert(kCount <= kNumClasses);
-const int SizeMap::kExperimentalPow2Below64SizeClassesCount = kCount;
-const SizeClassInfo SizeMap::kExperimentalPow2Below64SizeClasses[SizeMap::kExperimentalPow2Below64SizeClassesCount] = {
- // <bytes>, <pages>, <batch size> <fixed>
- { 0, 0, 0}, // +Inf%
- { 8, 1, 32}, // 0.59%
- { 16, 1, 32}, // 0.59%
- { 32, 1, 32}, // 0.59%
- { 64, 1, 32}, // 0.59%
- { 72, 1, 32}, // 1.28%
- { 80, 1, 32}, // 0.98%
- { 88, 1, 32}, // 0.68%
- { 96, 1, 32}, // 0.98%
- { 104, 1, 32}, // 1.58%
- { 112, 1, 32}, // 0.78%
- { 120, 1, 32}, // 0.98%
- { 128, 1, 32}, // 0.59%
- { 136, 1, 32}, // 0.98%
- { 144, 1, 32}, // 2.18%
- { 160, 1, 32}, // 0.98%
- { 176, 1, 32}, // 1.78%
- { 192, 1, 32}, // 2.18%
- { 208, 1, 32}, // 1.58%
- { 224, 1, 32}, // 2.18%
- { 240, 1, 32}, // 0.98%
- { 256, 1, 32}, // 0.59%
- { 272, 1, 32}, // 0.98%
- { 296, 1, 32}, // 3.10%
- { 312, 1, 32}, // 1.58%
- { 336, 1, 32}, // 2.18%
- { 352, 1, 32}, // 1.78%
- { 368, 1, 32}, // 1.78%
- { 408, 1, 32}, // 0.98%
- { 448, 1, 32}, // 2.18%
- { 480, 1, 32}, // 0.98%
- { 512, 1, 32}, // 0.59%
- { 576, 1, 32}, // 2.18%
- { 640, 1, 32}, // 7.29%
- { 704, 1, 32}, // 6.40%
- { 768, 1, 32}, // 7.29%
- { 896, 1, 32}, // 2.18%
- { 1024, 1, 32}, // 0.59%
- { 1152, 2, 32}, // 1.88%
- { 1280, 2, 32}, // 6.98%
- { 1408, 2, 32}, // 6.10%
- { 1536, 2, 32}, // 6.98%
- { 1792, 2, 32}, // 1.88%
- { 2048, 2, 32}, // 0.29%
- { 2304, 2, 28}, // 1.88%
- { 2688, 2, 24}, // 1.88%
- { 2816, 3, 23}, // 9.30%
- { 3200, 2, 20}, // 2.70%
- { 3456, 3, 18}, // 1.79%
- { 3584, 4, 18}, // 1.74%
- { 4096, 1, 16}, // 0.29%
- { 4736, 3, 13}, // 3.99%
- { 5376, 2, 12}, // 1.88%
- { 6144, 3, 10}, // 0.20%
- { 6528, 4, 10}, // 0.54%
- { 7168, 7, 9}, // 0.08%
- { 8192, 1, 8}, // 0.29%
- { 9472, 5, 6}, // 8.23%
- { 10240, 4, 6}, // 6.82%
- { 12288, 3, 5}, // 0.20%
- { 13568, 5, 4}, // 0.75%
- { 14336, 7, 4}, // 0.08%
- { 16384, 2, 4}, // 0.29%
- { 20480, 5, 3}, // 0.12%
- { 24576, 3, 2}, // 0.20%
- { 28672, 7, 2}, // 0.08%
- { 32768, 4, 2}, // 0.15%
- { 40960, 5, 2}, // 0.12%
- { 49152, 6, 2}, // 0.10%
- { 57344, 7, 2}, // 0.08%
- { 65536, 8, 2}, // 0.07%
- { 73728, 9, 2}, // 0.07%
- { 81920, 10, 2}, // 0.06%
- { 98304, 12, 2}, // 0.05%
- { 114688, 14, 2}, // 0.04%
- { 131072, 16, 2}, // 0.04%
- { 147456, 18, 2}, // 0.03%
- { 163840, 20, 2}, // 0.03%
- { 180224, 22, 2}, // 0.03%
- { 204800, 25, 2}, // 0.02%
- { 237568, 29, 2}, // 0.02%
- { 262144, 32, 2}, // 0.02%
-};
-#elif TCMALLOC_PAGE_SHIFT == 15
-static_assert(kMaxSize == 262144, "kMaxSize mismatch");
-static const int kCount = 74;
-static_assert(kCount <= kNumClasses);
-const int SizeMap::kExperimentalPow2Below64SizeClassesCount = kCount;
-const SizeClassInfo SizeMap::kExperimentalPow2Below64SizeClasses[SizeMap::kExperimentalPow2Below64SizeClassesCount] = {
- // <bytes>, <pages>, <batch size> <fixed>
- { 0, 0, 0}, // +Inf%
- { 8, 1, 32}, // 0.15%
- { 16, 1, 32}, // 0.15%
- { 32, 1, 32}, // 0.15%
- { 64, 1, 32}, // 0.15%
- { 72, 1, 32}, // 0.17%
- { 80, 1, 32}, // 0.29%
- { 88, 1, 32}, // 0.24%
- { 96, 1, 32}, // 0.24%
- { 104, 1, 32}, // 0.17%
- { 112, 1, 32}, // 0.34%
- { 128, 1, 32}, // 0.15%
- { 144, 1, 32}, // 0.39%
- { 160, 1, 32}, // 0.54%
- { 176, 1, 32}, // 0.24%
- { 192, 1, 32}, // 0.54%
- { 208, 1, 32}, // 0.49%
- { 224, 1, 32}, // 0.34%
- { 240, 1, 32}, // 0.54%
- { 256, 1, 32}, // 0.15%
- { 280, 1, 32}, // 0.17%
- { 304, 1, 32}, // 0.89%
- { 328, 1, 32}, // 1.06%
- { 352, 1, 32}, // 0.24%
- { 384, 1, 32}, // 0.54%
- { 416, 1, 32}, // 1.13%
- { 448, 1, 32}, // 0.34%
- { 488, 1, 32}, // 0.37%
- { 512, 1, 32}, // 0.15%
- { 576, 1, 32}, // 1.74%
- { 640, 1, 32}, // 0.54%
- { 704, 1, 32}, // 1.33%
- { 832, 1, 32}, // 1.13%
- { 896, 1, 32}, // 1.74%
- { 1024, 1, 32}, // 0.15%
- { 1152, 1, 32}, // 1.74%
- { 1280, 1, 32}, // 2.55%
- { 1536, 1, 32}, // 1.74%
- { 1792, 1, 32}, // 1.74%
- { 2048, 1, 32}, // 0.15%
- { 2176, 1, 30}, // 0.54%
- { 2304, 1, 28}, // 1.74%
- { 2688, 1, 24}, // 1.74%
- { 2944, 1, 22}, // 1.33%
- { 3200, 1, 20}, // 2.55%
- { 3584, 1, 18}, // 1.74%
- { 4096, 1, 16}, // 0.15%
- { 4608, 1, 14}, // 1.74%
- { 5376, 1, 12}, // 1.74%
- { 6528, 1, 10}, // 0.54%
- { 7168, 2, 9}, // 1.66%
- { 8192, 1, 8}, // 0.15%
- { 9344, 2, 7}, // 0.27%
- { 10880, 1, 6}, // 0.54%
- { 13952, 3, 4}, // 0.70%
- { 16384, 1, 4}, // 0.15%
- { 19072, 3, 3}, // 3.14%
- { 21760, 2, 3}, // 0.47%
- { 24576, 3, 2}, // 0.05%
- { 28032, 6, 2}, // 0.22%
- { 32768, 1, 2}, // 0.15%
- { 38144, 5, 2}, // 7.41%
- { 40960, 4, 2}, // 6.71%
- { 49152, 3, 2}, // 0.05%
- { 57344, 7, 2}, // 0.02%
- { 65536, 2, 2}, // 0.07%
- { 81920, 5, 2}, // 0.03%
- { 98304, 3, 2}, // 0.05%
- { 114688, 7, 2}, // 0.02%
- { 131072, 4, 2}, // 0.04%
- { 163840, 5, 2}, // 0.03%
- { 196608, 6, 2}, // 0.02%
- { 229376, 7, 2}, // 0.02%
- { 262144, 8, 2}, // 0.02%
-};
-#elif TCMALLOC_PAGE_SHIFT == 18
-static_assert(kMaxSize == 262144, "kMaxSize mismatch");
-static const int kCount = 85;
-static_assert(kCount <= kNumClasses);
-const int SizeMap::kExperimentalPow2Below64SizeClassesCount = kCount;
-const SizeClassInfo SizeMap::kExperimentalPow2Below64SizeClasses[SizeMap::kExperimentalPow2Below64SizeClassesCount] = {
- // <bytes>, <pages>, <batch size> <fixed>
- { 0, 0, 0}, // +Inf%
- { 8, 1, 32}, // 0.02%
- { 16, 1, 32}, // 0.02%
- { 32, 1, 32}, // 0.02%
- { 64, 1, 32}, // 0.02%
- { 72, 1, 32}, // 0.04%
- { 80, 1, 32}, // 0.04%
- { 88, 1, 32}, // 0.05%
- { 96, 1, 32}, // 0.04%
- { 104, 1, 32}, // 0.04%
- { 112, 1, 32}, // 0.04%
- { 128, 1, 32}, // 0.02%
- { 144, 1, 32}, // 0.04%
- { 160, 1, 32}, // 0.04%
- { 176, 1, 32}, // 0.05%
- { 192, 1, 32}, // 0.04%
- { 208, 1, 32}, // 0.04%
- { 240, 1, 32}, // 0.04%
- { 256, 1, 32}, // 0.02%
- { 304, 1, 32}, // 0.05%
- { 336, 1, 32}, // 0.04%
- { 360, 1, 32}, // 0.04%
- { 408, 1, 32}, // 0.10%
- { 456, 1, 32}, // 0.17%
- { 512, 1, 32}, // 0.02%
- { 576, 1, 32}, // 0.04%
- { 640, 1, 32}, // 0.17%
- { 704, 1, 32}, // 0.12%
- { 768, 1, 32}, // 0.12%
- { 832, 1, 32}, // 0.04%
- { 896, 1, 32}, // 0.21%
- { 1024, 1, 32}, // 0.02%
- { 1152, 1, 32}, // 0.26%
- { 1280, 1, 32}, // 0.41%
- { 1536, 1, 32}, // 0.41%
- { 1664, 1, 32}, // 0.36%
- { 1792, 1, 32}, // 0.21%
- { 1920, 1, 32}, // 0.41%
- { 2048, 1, 32}, // 0.02%
- { 2176, 1, 30}, // 0.41%
- { 2304, 1, 28}, // 0.71%
- { 2432, 1, 26}, // 0.76%
- { 2560, 1, 25}, // 0.41%
- { 2688, 1, 24}, // 0.56%
- { 2816, 1, 23}, // 0.12%
- { 2944, 1, 22}, // 0.07%
- { 3072, 1, 21}, // 0.41%
- { 3328, 1, 19}, // 1.00%
- { 3584, 1, 18}, // 0.21%
- { 3840, 1, 17}, // 0.41%
- { 4096, 1, 16}, // 0.02%
- { 4736, 1, 13}, // 0.66%
- { 5504, 1, 11}, // 1.35%
- { 6144, 1, 10}, // 1.61%
- { 6528, 1, 10}, // 0.41%
- { 6784, 1, 9}, // 1.71%
- { 7168, 1, 9}, // 1.61%
- { 7680, 1, 8}, // 0.41%
- { 8192, 1, 8}, // 0.02%
- { 8704, 1, 7}, // 0.41%
- { 9344, 1, 7}, // 0.21%
- { 10880, 1, 6}, // 0.41%
- { 11904, 1, 5}, // 0.12%
- { 13056, 1, 5}, // 0.41%
- { 14464, 1, 4}, // 0.71%
- { 16384, 1, 4}, // 0.02%
- { 18688, 1, 3}, // 0.21%
- { 21760, 1, 3}, // 0.41%
- { 26112, 1, 2}, // 0.41%
- { 29056, 1, 2}, // 0.26%
- { 32768, 1, 2}, // 0.02%
- { 37376, 1, 2}, // 0.21%
- { 43648, 1, 2}, // 0.12%
- { 52352, 1, 2}, // 0.17%
- { 56064, 2, 2}, // 3.92%
- { 65536, 1, 2}, // 0.02%
- { 74880, 2, 2}, // 0.03%
- { 87296, 1, 2}, // 0.12%
- { 104832, 2, 2}, // 0.03%
- { 112256, 3, 2}, // 0.09%
- { 131072, 1, 2}, // 0.02%
- { 149760, 3, 2}, // 5.03%
- { 174720, 2, 2}, // 0.03%
- { 209664, 4, 2}, // 0.03%
- { 262144, 1, 2}, // 0.02%
-};
-#elif TCMALLOC_PAGE_SHIFT == 12
-static_assert(kMaxSize == 8192, "kMaxSize mismatch");
-static const int kCount = 42;
-static_assert(kCount <= kNumClasses);
-const int SizeMap::kExperimentalPow2Below64SizeClassesCount = kCount;
-const SizeClassInfo SizeMap::kExperimentalPow2Below64SizeClasses[SizeMap::kExperimentalPow2Below64SizeClassesCount] = {
- // <bytes>, <pages>, <batch size> <fixed>
- { 0, 0, 0}, // +Inf%
- { 8, 1, 32}, // 1.17%
- { 16, 1, 32}, // 1.17%
- { 32, 1, 32}, // 1.17%
- { 64, 1, 32}, // 1.17%
- { 72, 1, 32}, // 2.78%
- { 80, 1, 32}, // 1.57%
- { 88, 1, 32}, // 2.37%
- { 96, 1, 32}, // 2.78%
- { 104, 1, 32}, // 2.17%
- { 120, 1, 32}, // 1.57%
- { 128, 1, 32}, // 1.17%
- { 144, 1, 32}, // 2.78%
- { 160, 1, 32}, // 3.60%
- { 184, 1, 32}, // 2.37%
- { 208, 1, 32}, // 4.86%
- { 240, 1, 32}, // 1.57%
- { 256, 1, 32}, // 1.17%
- { 272, 1, 32}, // 1.57%
- { 312, 1, 32}, // 2.17%
- { 336, 1, 32}, // 2.78%
- { 368, 1, 32}, // 2.37%
- { 408, 1, 32}, // 1.57%
- { 512, 1, 32}, // 1.17%
- { 576, 2, 32}, // 2.18%
- { 704, 2, 32}, // 6.40%
- { 768, 2, 32}, // 7.29%
- { 896, 2, 32}, // 2.18%
- { 1024, 2, 32}, // 0.59%
- { 1152, 3, 32}, // 7.08%
- { 1280, 3, 32}, // 7.08%
- { 1536, 3, 32}, // 0.39%
- { 1792, 4, 32}, // 1.88%
- { 2048, 4, 32}, // 0.29%
- { 2304, 4, 28}, // 1.88%
- { 2688, 4, 24}, // 1.88%
- { 3456, 6, 18}, // 1.79%
- { 4096, 4, 16}, // 0.29%
- { 5376, 4, 12}, // 1.88%
- { 6144, 3, 10}, // 0.39%
- { 7168, 7, 9}, // 0.17%
- { 8192, 4, 8}, // 0.29%
-};
-#else
-#error "Unsupported TCMALLOC_PAGE_SHIFT value!"
-#endif
-#else
-#if TCMALLOC_PAGE_SHIFT == 13
-static_assert(kMaxSize == 262144, "kMaxSize mismatch");
-static const int kCount = 82;
-static_assert(kCount <= kNumClasses);
-const int SizeMap::kExperimentalPow2Below64SizeClassesCount = kCount;
-const SizeClassInfo SizeMap::kExperimentalPow2Below64SizeClasses[SizeMap::kExperimentalPow2Below64SizeClassesCount] = {
- // <bytes>, <pages>, <batch size> <fixed>
- { 0, 0, 0}, // +Inf%
- { 8, 1, 32}, // 0.59%
- { 16, 1, 32}, // 0.59%
- { 32, 1, 32}, // 0.59%
- { 64, 1, 32}, // 0.59%
- { 80, 1, 32}, // 0.98%
- { 96, 1, 32}, // 0.98%
- { 112, 1, 32}, // 0.78%
- { 128, 1, 32}, // 0.59%
- { 144, 1, 32}, // 2.18%
- { 160, 1, 32}, // 0.98%
- { 176, 1, 32}, // 1.78%
- { 192, 1, 32}, // 2.18%
- { 208, 1, 32}, // 1.58%
- { 224, 1, 32}, // 2.18%
- { 240, 1, 32}, // 0.98%
- { 256, 1, 32}, // 0.59%
- { 272, 1, 32}, // 0.98%
- { 288, 1, 32}, // 2.18%
- { 304, 1, 32}, // 4.25%
- { 320, 1, 32}, // 3.00%
- { 336, 1, 32}, // 2.18%
- { 352, 1, 32}, // 1.78%
- { 368, 1, 32}, // 1.78%
- { 384, 1, 32}, // 2.18%
- { 400, 1, 32}, // 3.00%
- { 416, 1, 32}, // 4.25%
- { 448, 1, 32}, // 2.18%
- { 480, 1, 32}, // 0.98%
- { 512, 1, 32}, // 0.59%
- { 576, 1, 32}, // 2.18%
- { 640, 1, 32}, // 7.29%
- { 704, 1, 32}, // 6.40%
- { 768, 1, 32}, // 7.29%
- { 896, 1, 32}, // 2.18%
- { 1024, 1, 32}, // 0.59%
- { 1152, 2, 32}, // 1.88%
- { 1280, 2, 32}, // 6.98%
- { 1408, 2, 32}, // 6.10%
- { 1536, 2, 32}, // 6.98%
- { 1792, 2, 32}, // 1.88%
- { 2048, 2, 32}, // 0.29%
- { 2304, 2, 28}, // 1.88%
- { 2688, 2, 24}, // 1.88%
- { 2816, 3, 23}, // 9.30%
- { 3200, 2, 20}, // 2.70%
- { 3456, 3, 18}, // 1.79%
- { 3584, 4, 18}, // 1.74%
- { 4096, 1, 16}, // 0.29%
- { 4736, 3, 13}, // 3.99%
- { 5376, 2, 12}, // 1.88%
- { 6144, 3, 10}, // 0.20%
- { 6528, 4, 10}, // 0.54%
- { 7168, 7, 9}, // 0.08%
- { 8192, 1, 8}, // 0.29%
- { 9472, 5, 6}, // 8.23%
- { 10240, 4, 6}, // 6.82%
- { 12288, 3, 5}, // 0.20%
- { 13568, 5, 4}, // 0.75%
- { 14336, 7, 4}, // 0.08%
- { 16384, 2, 4}, // 0.29%
- { 20480, 5, 3}, // 0.12%
- { 24576, 3, 2}, // 0.20%
- { 28672, 7, 2}, // 0.08%
- { 32768, 4, 2}, // 0.15%
- { 40960, 5, 2}, // 0.12%
- { 49152, 6, 2}, // 0.10%
- { 57344, 7, 2}, // 0.08%
- { 65536, 8, 2}, // 0.07%
- { 73728, 9, 2}, // 0.07%
- { 81920, 10, 2}, // 0.06%
- { 90112, 11, 2}, // 0.05%
- { 98304, 12, 2}, // 0.05%
- { 106496, 13, 2}, // 0.05%
- { 114688, 14, 2}, // 0.04%
- { 131072, 16, 2}, // 0.04%
- { 147456, 18, 2}, // 0.03%
- { 163840, 20, 2}, // 0.03%
- { 180224, 22, 2}, // 0.03%
- { 204800, 25, 2}, // 0.02%
- { 237568, 29, 2}, // 0.02%
- { 262144, 32, 2}, // 0.02%
-};
-#elif TCMALLOC_PAGE_SHIFT == 15
-static_assert(kMaxSize == 262144, "kMaxSize mismatch");
-static const int kCount = 74;
-static_assert(kCount <= kNumClasses);
-const int SizeMap::kExperimentalPow2Below64SizeClassesCount = kCount;
-const SizeClassInfo SizeMap::kExperimentalPow2Below64SizeClasses[SizeMap::kExperimentalPow2Below64SizeClassesCount] = {
- // <bytes>, <pages>, <batch size> <fixed>
- { 0, 0, 0}, // +Inf%
- { 8, 1, 32}, // 0.15%
- { 16, 1, 32}, // 0.15%
- { 32, 1, 32}, // 0.15%
- { 64, 1, 32}, // 0.15%
- { 80, 1, 32}, // 0.29%
- { 96, 1, 32}, // 0.24%
- { 112, 1, 32}, // 0.34%
- { 128, 1, 32}, // 0.15%
- { 144, 1, 32}, // 0.39%
- { 160, 1, 32}, // 0.54%
- { 176, 1, 32}, // 0.24%
- { 192, 1, 32}, // 0.54%
- { 208, 1, 32}, // 0.49%
- { 224, 1, 32}, // 0.34%
- { 240, 1, 32}, // 0.54%
- { 256, 1, 32}, // 0.15%
- { 272, 1, 32}, // 0.54%
- { 288, 1, 32}, // 0.84%
- { 304, 1, 32}, // 0.89%
- { 320, 1, 32}, // 0.54%
- { 352, 1, 32}, // 0.24%
- { 384, 1, 32}, // 0.54%
- { 416, 1, 32}, // 1.13%
- { 448, 1, 32}, // 0.34%
- { 480, 1, 32}, // 0.54%
- { 512, 1, 32}, // 0.15%
- { 576, 1, 32}, // 1.74%
- { 640, 1, 32}, // 0.54%
- { 704, 1, 32}, // 1.33%
- { 768, 1, 32}, // 1.74%
- { 832, 1, 32}, // 1.13%
- { 896, 1, 32}, // 1.74%
- { 1024, 1, 32}, // 0.15%
- { 1152, 1, 32}, // 1.74%
- { 1280, 1, 32}, // 2.55%
- { 1408, 1, 32}, // 1.33%
- { 1536, 1, 32}, // 1.74%
- { 1792, 1, 32}, // 1.74%
- { 2048, 1, 32}, // 0.15%
- { 2176, 1, 30}, // 0.54%
- { 2304, 1, 28}, // 1.74%
- { 2688, 1, 24}, // 1.74%
- { 2944, 1, 22}, // 1.33%
- { 3200, 1, 20}, // 2.55%
- { 3584, 1, 18}, // 1.74%
- { 4096, 1, 16}, // 0.15%
- { 4608, 1, 14}, // 1.74%
- { 5376, 1, 12}, // 1.74%
- { 6528, 1, 10}, // 0.54%
- { 7168, 2, 9}, // 1.66%
- { 8192, 1, 8}, // 0.15%
- { 9344, 2, 7}, // 0.27%
- { 10880, 1, 6}, // 0.54%
- { 13952, 3, 4}, // 0.70%
- { 16384, 1, 4}, // 0.15%
- { 19072, 3, 3}, // 3.14%
- { 21760, 2, 3}, // 0.47%
- { 24576, 3, 2}, // 0.05%
- { 28032, 6, 2}, // 0.22%
- { 32768, 1, 2}, // 0.15%
- { 38144, 5, 2}, // 7.41%
- { 40960, 4, 2}, // 6.71%
- { 49152, 3, 2}, // 0.05%
- { 57344, 7, 2}, // 0.02%
- { 65536, 2, 2}, // 0.07%
- { 81920, 5, 2}, // 0.03%
- { 98304, 3, 2}, // 0.05%
- { 114688, 7, 2}, // 0.02%
- { 131072, 4, 2}, // 0.04%
- { 163840, 5, 2}, // 0.03%
- { 196608, 6, 2}, // 0.02%
- { 229376, 7, 2}, // 0.02%
- { 262144, 8, 2}, // 0.02%
-};
-#elif TCMALLOC_PAGE_SHIFT == 18
-static_assert(kMaxSize == 262144, "kMaxSize mismatch");
-static const int kCount = 85;
-static_assert(kCount <= kNumClasses);
-const int SizeMap::kExperimentalPow2Below64SizeClassesCount = kCount;
-const SizeClassInfo SizeMap::kExperimentalPow2Below64SizeClasses[SizeMap::kExperimentalPow2Below64SizeClassesCount] = {
- // <bytes>, <pages>, <batch size> <fixed>
- { 0, 0, 0}, // +Inf%
- { 8, 1, 32}, // 0.02%
- { 16, 1, 32}, // 0.02%
- { 32, 1, 32}, // 0.02%
- { 64, 1, 32}, // 0.02%
- { 80, 1, 32}, // 0.04%
- { 96, 1, 32}, // 0.04%
- { 112, 1, 32}, // 0.04%
- { 128, 1, 32}, // 0.02%
- { 144, 1, 32}, // 0.04%
- { 160, 1, 32}, // 0.04%
- { 176, 1, 32}, // 0.05%
- { 192, 1, 32}, // 0.04%
- { 208, 1, 32}, // 0.04%
- { 240, 1, 32}, // 0.04%
- { 256, 1, 32}, // 0.02%
- { 304, 1, 32}, // 0.05%
- { 336, 1, 32}, // 0.04%
- { 368, 1, 32}, // 0.07%
- { 416, 1, 32}, // 0.04%
- { 464, 1, 32}, // 0.19%
- { 512, 1, 32}, // 0.02%
- { 576, 1, 32}, // 0.04%
- { 640, 1, 32}, // 0.17%
- { 704, 1, 32}, // 0.12%
- { 768, 1, 32}, // 0.12%
- { 832, 1, 32}, // 0.04%
- { 896, 1, 32}, // 0.21%
- { 1024, 1, 32}, // 0.02%
- { 1152, 1, 32}, // 0.26%
- { 1280, 1, 32}, // 0.41%
- { 1408, 1, 32}, // 0.12%
- { 1536, 1, 32}, // 0.41%
- { 1664, 1, 32}, // 0.36%
- { 1792, 1, 32}, // 0.21%
- { 1920, 1, 32}, // 0.41%
- { 2048, 1, 32}, // 0.02%
- { 2176, 1, 30}, // 0.41%
- { 2304, 1, 28}, // 0.71%
- { 2432, 1, 26}, // 0.76%
- { 2560, 1, 25}, // 0.41%
- { 2688, 1, 24}, // 0.56%
- { 2816, 1, 23}, // 0.12%
- { 2944, 1, 22}, // 0.07%
- { 3072, 1, 21}, // 0.41%
- { 3200, 1, 20}, // 1.15%
- { 3328, 1, 19}, // 1.00%
- { 3584, 1, 18}, // 0.21%
- { 3840, 1, 17}, // 0.41%
- { 4096, 1, 16}, // 0.02%
- { 4736, 1, 13}, // 0.66%
- { 5504, 1, 11}, // 1.35%
- { 6144, 1, 10}, // 1.61%
- { 6528, 1, 10}, // 0.41%
- { 6784, 1, 9}, // 1.71%
- { 7168, 1, 9}, // 1.61%
- { 7680, 1, 8}, // 0.41%
- { 8192, 1, 8}, // 0.02%
- { 8704, 1, 7}, // 0.41%
- { 9344, 1, 7}, // 0.21%
- { 10368, 1, 6}, // 1.15%
- { 11392, 1, 5}, // 0.07%
- { 12416, 1, 5}, // 0.56%
- { 13696, 1, 4}, // 0.76%
- { 14464, 1, 4}, // 0.71%
- { 16384, 1, 4}, // 0.02%
- { 18688, 1, 3}, // 0.21%
- { 21760, 1, 3}, // 0.41%
- { 26112, 1, 2}, // 0.41%
- { 29056, 1, 2}, // 0.26%
- { 32768, 1, 2}, // 0.02%
- { 37376, 1, 2}, // 0.21%
- { 43648, 1, 2}, // 0.12%
- { 52352, 1, 2}, // 0.17%
- { 56064, 2, 2}, // 3.92%
- { 65536, 1, 2}, // 0.02%
- { 74880, 2, 2}, // 0.03%
- { 87296, 1, 2}, // 0.12%
- { 104832, 2, 2}, // 0.03%
- { 112256, 3, 2}, // 0.09%
- { 131072, 1, 2}, // 0.02%
- { 149760, 3, 2}, // 5.03%
- { 174720, 2, 2}, // 0.03%
- { 209664, 4, 2}, // 0.03%
- { 262144, 1, 2}, // 0.02%
-};
-#elif TCMALLOC_PAGE_SHIFT == 12
-static_assert(kMaxSize == 8192, "kMaxSize mismatch");
-static const int kCount = 42;
-static_assert(kCount <= kNumClasses);
-const int SizeMap::kExperimentalPow2Below64SizeClassesCount = kCount;
-const SizeClassInfo SizeMap::kExperimentalPow2Below64SizeClasses[SizeMap::kExperimentalPow2Below64SizeClassesCount] = {
- // <bytes>, <pages>, <batch size> <fixed>
- { 0, 0, 0}, // +Inf%
- { 8, 1, 32}, // 1.17%
- { 16, 1, 32}, // 1.17%
- { 32, 1, 32}, // 1.17%
- { 64, 1, 32}, // 1.17%
- { 80, 1, 32}, // 1.57%
- { 96, 1, 32}, // 2.78%
- { 112, 1, 32}, // 2.78%
- { 128, 1, 32}, // 1.17%
- { 144, 1, 32}, // 2.78%
- { 160, 1, 32}, // 3.60%
- { 176, 1, 32}, // 2.37%
- { 192, 1, 32}, // 2.78%
- { 208, 1, 32}, // 4.86%
- { 240, 1, 32}, // 1.57%
- { 256, 1, 32}, // 1.17%
- { 272, 1, 32}, // 1.57%
- { 304, 1, 32}, // 4.86%
- { 336, 1, 32}, // 2.78%
- { 368, 1, 32}, // 2.37%
- { 400, 1, 32}, // 3.60%
- { 448, 1, 32}, // 2.78%
- { 512, 1, 32}, // 1.17%
- { 576, 2, 32}, // 2.18%
- { 640, 2, 32}, // 7.29%
- { 704, 2, 32}, // 6.40%
- { 768, 2, 32}, // 7.29%
- { 896, 2, 32}, // 2.18%
- { 1024, 2, 32}, // 0.59%
- { 1152, 3, 32}, // 7.08%
- { 1280, 3, 32}, // 7.08%
- { 1536, 3, 32}, // 0.39%
- { 1792, 4, 32}, // 1.88%
- { 2048, 4, 32}, // 0.29%
- { 2304, 4, 28}, // 1.88%
- { 2688, 4, 24}, // 1.88%
- { 3456, 6, 18}, // 1.79%
- { 4096, 4, 16}, // 0.29%
- { 5376, 4, 12}, // 1.88%
- { 6144, 3, 10}, // 0.39%
- { 7168, 7, 9}, // 0.17%
- { 8192, 4, 8}, // 0.29%
-};
-#else
-#error "Unsupported TCMALLOC_PAGE_SHIFT value!"
-#endif
-#endif
-// clang-format on
-
-} // namespace tcmalloc_internal
-} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+// Copyright 2019 The TCMalloc Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "tcmalloc/common.h"
+
+GOOGLE_MALLOC_SECTION_BEGIN
+namespace tcmalloc {
+
+namespace tcmalloc_internal {
+
+// <fixed> is fixed per-size-class overhead due to end-of-span fragmentation
+// and other factors. For instance, if we have a 96 byte size class, and use a
+// single 8KiB page, then we will hold 85 objects per span, and have 32 bytes
+// left over. There is also a fixed component of 48 bytes of TCMalloc metadata
+// per span. Together, the fixed overhead would be wasted/allocated =
+// (32 + 48) / (8192 - 32) ~= 0.98%.
+// There is also a dynamic component to overhead based on mismatches between the
+// number of bytes requested and the number of bytes provided by the size class.
+// Together they sum to the total overhead; for instance if you asked for a
+// 50-byte allocation that rounds up to a 64-byte size class, the dynamic
+// overhead would be 28%, and if <fixed> were 22% it would mean (on average)
+// 25 bytes of overhead for allocations of that size.
+
+// clang-format off
+#if defined(__cpp_aligned_new) && __STDCPP_DEFAULT_NEW_ALIGNMENT__ <= 8
+#if TCMALLOC_PAGE_SHIFT == 13
+static_assert(kMaxSize == 262144, "kMaxSize mismatch");
+static const int kCount = 82;
+static_assert(kCount <= kNumClasses);
+const int SizeMap::kExperimentalPow2Below64SizeClassesCount = kCount;
+const SizeClassInfo SizeMap::kExperimentalPow2Below64SizeClasses[SizeMap::kExperimentalPow2Below64SizeClassesCount] = {
+ // <bytes>, <pages>, <batch size> <fixed>
+ { 0, 0, 0}, // +Inf%
+ { 8, 1, 32}, // 0.59%
+ { 16, 1, 32}, // 0.59%
+ { 32, 1, 32}, // 0.59%
+ { 64, 1, 32}, // 0.59%
+ { 72, 1, 32}, // 1.28%
+ { 80, 1, 32}, // 0.98%
+ { 88, 1, 32}, // 0.68%
+ { 96, 1, 32}, // 0.98%
+ { 104, 1, 32}, // 1.58%
+ { 112, 1, 32}, // 0.78%
+ { 120, 1, 32}, // 0.98%
+ { 128, 1, 32}, // 0.59%
+ { 136, 1, 32}, // 0.98%
+ { 144, 1, 32}, // 2.18%
+ { 160, 1, 32}, // 0.98%
+ { 176, 1, 32}, // 1.78%
+ { 192, 1, 32}, // 2.18%
+ { 208, 1, 32}, // 1.58%
+ { 224, 1, 32}, // 2.18%
+ { 240, 1, 32}, // 0.98%
+ { 256, 1, 32}, // 0.59%
+ { 272, 1, 32}, // 0.98%
+ { 296, 1, 32}, // 3.10%
+ { 312, 1, 32}, // 1.58%
+ { 336, 1, 32}, // 2.18%
+ { 352, 1, 32}, // 1.78%
+ { 368, 1, 32}, // 1.78%
+ { 408, 1, 32}, // 0.98%
+ { 448, 1, 32}, // 2.18%
+ { 480, 1, 32}, // 0.98%
+ { 512, 1, 32}, // 0.59%
+ { 576, 1, 32}, // 2.18%
+ { 640, 1, 32}, // 7.29%
+ { 704, 1, 32}, // 6.40%
+ { 768, 1, 32}, // 7.29%
+ { 896, 1, 32}, // 2.18%
+ { 1024, 1, 32}, // 0.59%
+ { 1152, 2, 32}, // 1.88%
+ { 1280, 2, 32}, // 6.98%
+ { 1408, 2, 32}, // 6.10%
+ { 1536, 2, 32}, // 6.98%
+ { 1792, 2, 32}, // 1.88%
+ { 2048, 2, 32}, // 0.29%
+ { 2304, 2, 28}, // 1.88%
+ { 2688, 2, 24}, // 1.88%
+ { 2816, 3, 23}, // 9.30%
+ { 3200, 2, 20}, // 2.70%
+ { 3456, 3, 18}, // 1.79%
+ { 3584, 4, 18}, // 1.74%
+ { 4096, 1, 16}, // 0.29%
+ { 4736, 3, 13}, // 3.99%
+ { 5376, 2, 12}, // 1.88%
+ { 6144, 3, 10}, // 0.20%
+ { 6528, 4, 10}, // 0.54%
+ { 7168, 7, 9}, // 0.08%
+ { 8192, 1, 8}, // 0.29%
+ { 9472, 5, 6}, // 8.23%
+ { 10240, 4, 6}, // 6.82%
+ { 12288, 3, 5}, // 0.20%
+ { 13568, 5, 4}, // 0.75%
+ { 14336, 7, 4}, // 0.08%
+ { 16384, 2, 4}, // 0.29%
+ { 20480, 5, 3}, // 0.12%
+ { 24576, 3, 2}, // 0.20%
+ { 28672, 7, 2}, // 0.08%
+ { 32768, 4, 2}, // 0.15%
+ { 40960, 5, 2}, // 0.12%
+ { 49152, 6, 2}, // 0.10%
+ { 57344, 7, 2}, // 0.08%
+ { 65536, 8, 2}, // 0.07%
+ { 73728, 9, 2}, // 0.07%
+ { 81920, 10, 2}, // 0.06%
+ { 98304, 12, 2}, // 0.05%
+ { 114688, 14, 2}, // 0.04%
+ { 131072, 16, 2}, // 0.04%
+ { 147456, 18, 2}, // 0.03%
+ { 163840, 20, 2}, // 0.03%
+ { 180224, 22, 2}, // 0.03%
+ { 204800, 25, 2}, // 0.02%
+ { 237568, 29, 2}, // 0.02%
+ { 262144, 32, 2}, // 0.02%
+};
+#elif TCMALLOC_PAGE_SHIFT == 15
+static_assert(kMaxSize == 262144, "kMaxSize mismatch");
+static const int kCount = 74;
+static_assert(kCount <= kNumClasses);
+const int SizeMap::kExperimentalPow2Below64SizeClassesCount = kCount;
+const SizeClassInfo SizeMap::kExperimentalPow2Below64SizeClasses[SizeMap::kExperimentalPow2Below64SizeClassesCount] = {
+ // <bytes>, <pages>, <batch size> <fixed>
+ { 0, 0, 0}, // +Inf%
+ { 8, 1, 32}, // 0.15%
+ { 16, 1, 32}, // 0.15%
+ { 32, 1, 32}, // 0.15%
+ { 64, 1, 32}, // 0.15%
+ { 72, 1, 32}, // 0.17%
+ { 80, 1, 32}, // 0.29%
+ { 88, 1, 32}, // 0.24%
+ { 96, 1, 32}, // 0.24%
+ { 104, 1, 32}, // 0.17%
+ { 112, 1, 32}, // 0.34%
+ { 128, 1, 32}, // 0.15%
+ { 144, 1, 32}, // 0.39%
+ { 160, 1, 32}, // 0.54%
+ { 176, 1, 32}, // 0.24%
+ { 192, 1, 32}, // 0.54%
+ { 208, 1, 32}, // 0.49%
+ { 224, 1, 32}, // 0.34%
+ { 240, 1, 32}, // 0.54%
+ { 256, 1, 32}, // 0.15%
+ { 280, 1, 32}, // 0.17%
+ { 304, 1, 32}, // 0.89%
+ { 328, 1, 32}, // 1.06%
+ { 352, 1, 32}, // 0.24%
+ { 384, 1, 32}, // 0.54%
+ { 416, 1, 32}, // 1.13%
+ { 448, 1, 32}, // 0.34%
+ { 488, 1, 32}, // 0.37%
+ { 512, 1, 32}, // 0.15%
+ { 576, 1, 32}, // 1.74%
+ { 640, 1, 32}, // 0.54%
+ { 704, 1, 32}, // 1.33%
+ { 832, 1, 32}, // 1.13%
+ { 896, 1, 32}, // 1.74%
+ { 1024, 1, 32}, // 0.15%
+ { 1152, 1, 32}, // 1.74%
+ { 1280, 1, 32}, // 2.55%
+ { 1536, 1, 32}, // 1.74%
+ { 1792, 1, 32}, // 1.74%
+ { 2048, 1, 32}, // 0.15%
+ { 2176, 1, 30}, // 0.54%
+ { 2304, 1, 28}, // 1.74%
+ { 2688, 1, 24}, // 1.74%
+ { 2944, 1, 22}, // 1.33%
+ { 3200, 1, 20}, // 2.55%
+ { 3584, 1, 18}, // 1.74%
+ { 4096, 1, 16}, // 0.15%
+ { 4608, 1, 14}, // 1.74%
+ { 5376, 1, 12}, // 1.74%
+ { 6528, 1, 10}, // 0.54%
+ { 7168, 2, 9}, // 1.66%
+ { 8192, 1, 8}, // 0.15%
+ { 9344, 2, 7}, // 0.27%
+ { 10880, 1, 6}, // 0.54%
+ { 13952, 3, 4}, // 0.70%
+ { 16384, 1, 4}, // 0.15%
+ { 19072, 3, 3}, // 3.14%
+ { 21760, 2, 3}, // 0.47%
+ { 24576, 3, 2}, // 0.05%
+ { 28032, 6, 2}, // 0.22%
+ { 32768, 1, 2}, // 0.15%
+ { 38144, 5, 2}, // 7.41%
+ { 40960, 4, 2}, // 6.71%
+ { 49152, 3, 2}, // 0.05%
+ { 57344, 7, 2}, // 0.02%
+ { 65536, 2, 2}, // 0.07%
+ { 81920, 5, 2}, // 0.03%
+ { 98304, 3, 2}, // 0.05%
+ { 114688, 7, 2}, // 0.02%
+ { 131072, 4, 2}, // 0.04%
+ { 163840, 5, 2}, // 0.03%
+ { 196608, 6, 2}, // 0.02%
+ { 229376, 7, 2}, // 0.02%
+ { 262144, 8, 2}, // 0.02%
+};
+#elif TCMALLOC_PAGE_SHIFT == 18
+static_assert(kMaxSize == 262144, "kMaxSize mismatch");
+static const int kCount = 85;
+static_assert(kCount <= kNumClasses);
+const int SizeMap::kExperimentalPow2Below64SizeClassesCount = kCount;
+const SizeClassInfo SizeMap::kExperimentalPow2Below64SizeClasses[SizeMap::kExperimentalPow2Below64SizeClassesCount] = {
+ // <bytes>, <pages>, <batch size> <fixed>
+ { 0, 0, 0}, // +Inf%
+ { 8, 1, 32}, // 0.02%
+ { 16, 1, 32}, // 0.02%
+ { 32, 1, 32}, // 0.02%
+ { 64, 1, 32}, // 0.02%
+ { 72, 1, 32}, // 0.04%
+ { 80, 1, 32}, // 0.04%
+ { 88, 1, 32}, // 0.05%
+ { 96, 1, 32}, // 0.04%
+ { 104, 1, 32}, // 0.04%
+ { 112, 1, 32}, // 0.04%
+ { 128, 1, 32}, // 0.02%
+ { 144, 1, 32}, // 0.04%
+ { 160, 1, 32}, // 0.04%
+ { 176, 1, 32}, // 0.05%
+ { 192, 1, 32}, // 0.04%
+ { 208, 1, 32}, // 0.04%
+ { 240, 1, 32}, // 0.04%
+ { 256, 1, 32}, // 0.02%
+ { 304, 1, 32}, // 0.05%
+ { 336, 1, 32}, // 0.04%
+ { 360, 1, 32}, // 0.04%
+ { 408, 1, 32}, // 0.10%
+ { 456, 1, 32}, // 0.17%
+ { 512, 1, 32}, // 0.02%
+ { 576, 1, 32}, // 0.04%
+ { 640, 1, 32}, // 0.17%
+ { 704, 1, 32}, // 0.12%
+ { 768, 1, 32}, // 0.12%
+ { 832, 1, 32}, // 0.04%
+ { 896, 1, 32}, // 0.21%
+ { 1024, 1, 32}, // 0.02%
+ { 1152, 1, 32}, // 0.26%
+ { 1280, 1, 32}, // 0.41%
+ { 1536, 1, 32}, // 0.41%
+ { 1664, 1, 32}, // 0.36%
+ { 1792, 1, 32}, // 0.21%
+ { 1920, 1, 32}, // 0.41%
+ { 2048, 1, 32}, // 0.02%
+ { 2176, 1, 30}, // 0.41%
+ { 2304, 1, 28}, // 0.71%
+ { 2432, 1, 26}, // 0.76%
+ { 2560, 1, 25}, // 0.41%
+ { 2688, 1, 24}, // 0.56%
+ { 2816, 1, 23}, // 0.12%
+ { 2944, 1, 22}, // 0.07%
+ { 3072, 1, 21}, // 0.41%
+ { 3328, 1, 19}, // 1.00%
+ { 3584, 1, 18}, // 0.21%
+ { 3840, 1, 17}, // 0.41%
+ { 4096, 1, 16}, // 0.02%
+ { 4736, 1, 13}, // 0.66%
+ { 5504, 1, 11}, // 1.35%
+ { 6144, 1, 10}, // 1.61%
+ { 6528, 1, 10}, // 0.41%
+ { 6784, 1, 9}, // 1.71%
+ { 7168, 1, 9}, // 1.61%
+ { 7680, 1, 8}, // 0.41%
+ { 8192, 1, 8}, // 0.02%
+ { 8704, 1, 7}, // 0.41%
+ { 9344, 1, 7}, // 0.21%
+ { 10880, 1, 6}, // 0.41%
+ { 11904, 1, 5}, // 0.12%
+ { 13056, 1, 5}, // 0.41%
+ { 14464, 1, 4}, // 0.71%
+ { 16384, 1, 4}, // 0.02%
+ { 18688, 1, 3}, // 0.21%
+ { 21760, 1, 3}, // 0.41%
+ { 26112, 1, 2}, // 0.41%
+ { 29056, 1, 2}, // 0.26%
+ { 32768, 1, 2}, // 0.02%
+ { 37376, 1, 2}, // 0.21%
+ { 43648, 1, 2}, // 0.12%
+ { 52352, 1, 2}, // 0.17%
+ { 56064, 2, 2}, // 3.92%
+ { 65536, 1, 2}, // 0.02%
+ { 74880, 2, 2}, // 0.03%
+ { 87296, 1, 2}, // 0.12%
+ { 104832, 2, 2}, // 0.03%
+ { 112256, 3, 2}, // 0.09%
+ { 131072, 1, 2}, // 0.02%
+ { 149760, 3, 2}, // 5.03%
+ { 174720, 2, 2}, // 0.03%
+ { 209664, 4, 2}, // 0.03%
+ { 262144, 1, 2}, // 0.02%
+};
+#elif TCMALLOC_PAGE_SHIFT == 12
+static_assert(kMaxSize == 8192, "kMaxSize mismatch");
+static const int kCount = 42;
+static_assert(kCount <= kNumClasses);
+const int SizeMap::kExperimentalPow2Below64SizeClassesCount = kCount;
+const SizeClassInfo SizeMap::kExperimentalPow2Below64SizeClasses[SizeMap::kExperimentalPow2Below64SizeClassesCount] = {
+ // <bytes>, <pages>, <batch size> <fixed>
+ { 0, 0, 0}, // +Inf%
+ { 8, 1, 32}, // 1.17%
+ { 16, 1, 32}, // 1.17%
+ { 32, 1, 32}, // 1.17%
+ { 64, 1, 32}, // 1.17%
+ { 72, 1, 32}, // 2.78%
+ { 80, 1, 32}, // 1.57%
+ { 88, 1, 32}, // 2.37%
+ { 96, 1, 32}, // 2.78%
+ { 104, 1, 32}, // 2.17%
+ { 120, 1, 32}, // 1.57%
+ { 128, 1, 32}, // 1.17%
+ { 144, 1, 32}, // 2.78%
+ { 160, 1, 32}, // 3.60%
+ { 184, 1, 32}, // 2.37%
+ { 208, 1, 32}, // 4.86%
+ { 240, 1, 32}, // 1.57%
+ { 256, 1, 32}, // 1.17%
+ { 272, 1, 32}, // 1.57%
+ { 312, 1, 32}, // 2.17%
+ { 336, 1, 32}, // 2.78%
+ { 368, 1, 32}, // 2.37%
+ { 408, 1, 32}, // 1.57%
+ { 512, 1, 32}, // 1.17%
+ { 576, 2, 32}, // 2.18%
+ { 704, 2, 32}, // 6.40%
+ { 768, 2, 32}, // 7.29%
+ { 896, 2, 32}, // 2.18%
+ { 1024, 2, 32}, // 0.59%
+ { 1152, 3, 32}, // 7.08%
+ { 1280, 3, 32}, // 7.08%
+ { 1536, 3, 32}, // 0.39%
+ { 1792, 4, 32}, // 1.88%
+ { 2048, 4, 32}, // 0.29%
+ { 2304, 4, 28}, // 1.88%
+ { 2688, 4, 24}, // 1.88%
+ { 3456, 6, 18}, // 1.79%
+ { 4096, 4, 16}, // 0.29%
+ { 5376, 4, 12}, // 1.88%
+ { 6144, 3, 10}, // 0.39%
+ { 7168, 7, 9}, // 0.17%
+ { 8192, 4, 8}, // 0.29%
+};
+#else
+#error "Unsupported TCMALLOC_PAGE_SHIFT value!"
+#endif
+#else
+#if TCMALLOC_PAGE_SHIFT == 13
+static_assert(kMaxSize == 262144, "kMaxSize mismatch");
+static const int kCount = 82;
+static_assert(kCount <= kNumClasses);
+const int SizeMap::kExperimentalPow2Below64SizeClassesCount = kCount;
+const SizeClassInfo SizeMap::kExperimentalPow2Below64SizeClasses[SizeMap::kExperimentalPow2Below64SizeClassesCount] = {
+ // <bytes>, <pages>, <batch size> <fixed>
+ { 0, 0, 0}, // +Inf%
+ { 8, 1, 32}, // 0.59%
+ { 16, 1, 32}, // 0.59%
+ { 32, 1, 32}, // 0.59%
+ { 64, 1, 32}, // 0.59%
+ { 80, 1, 32}, // 0.98%
+ { 96, 1, 32}, // 0.98%
+ { 112, 1, 32}, // 0.78%
+ { 128, 1, 32}, // 0.59%
+ { 144, 1, 32}, // 2.18%
+ { 160, 1, 32}, // 0.98%
+ { 176, 1, 32}, // 1.78%
+ { 192, 1, 32}, // 2.18%
+ { 208, 1, 32}, // 1.58%
+ { 224, 1, 32}, // 2.18%
+ { 240, 1, 32}, // 0.98%
+ { 256, 1, 32}, // 0.59%
+ { 272, 1, 32}, // 0.98%
+ { 288, 1, 32}, // 2.18%
+ { 304, 1, 32}, // 4.25%
+ { 320, 1, 32}, // 3.00%
+ { 336, 1, 32}, // 2.18%
+ { 352, 1, 32}, // 1.78%
+ { 368, 1, 32}, // 1.78%
+ { 384, 1, 32}, // 2.18%
+ { 400, 1, 32}, // 3.00%
+ { 416, 1, 32}, // 4.25%
+ { 448, 1, 32}, // 2.18%
+ { 480, 1, 32}, // 0.98%
+ { 512, 1, 32}, // 0.59%
+ { 576, 1, 32}, // 2.18%
+ { 640, 1, 32}, // 7.29%
+ { 704, 1, 32}, // 6.40%
+ { 768, 1, 32}, // 7.29%
+ { 896, 1, 32}, // 2.18%
+ { 1024, 1, 32}, // 0.59%
+ { 1152, 2, 32}, // 1.88%
+ { 1280, 2, 32}, // 6.98%
+ { 1408, 2, 32}, // 6.10%
+ { 1536, 2, 32}, // 6.98%
+ { 1792, 2, 32}, // 1.88%
+ { 2048, 2, 32}, // 0.29%
+ { 2304, 2, 28}, // 1.88%
+ { 2688, 2, 24}, // 1.88%
+ { 2816, 3, 23}, // 9.30%
+ { 3200, 2, 20}, // 2.70%
+ { 3456, 3, 18}, // 1.79%
+ { 3584, 4, 18}, // 1.74%
+ { 4096, 1, 16}, // 0.29%
+ { 4736, 3, 13}, // 3.99%
+ { 5376, 2, 12}, // 1.88%
+ { 6144, 3, 10}, // 0.20%
+ { 6528, 4, 10}, // 0.54%
+ { 7168, 7, 9}, // 0.08%
+ { 8192, 1, 8}, // 0.29%
+ { 9472, 5, 6}, // 8.23%
+ { 10240, 4, 6}, // 6.82%
+ { 12288, 3, 5}, // 0.20%
+ { 13568, 5, 4}, // 0.75%
+ { 14336, 7, 4}, // 0.08%
+ { 16384, 2, 4}, // 0.29%
+ { 20480, 5, 3}, // 0.12%
+ { 24576, 3, 2}, // 0.20%
+ { 28672, 7, 2}, // 0.08%
+ { 32768, 4, 2}, // 0.15%
+ { 40960, 5, 2}, // 0.12%
+ { 49152, 6, 2}, // 0.10%
+ { 57344, 7, 2}, // 0.08%
+ { 65536, 8, 2}, // 0.07%
+ { 73728, 9, 2}, // 0.07%
+ { 81920, 10, 2}, // 0.06%
+ { 90112, 11, 2}, // 0.05%
+ { 98304, 12, 2}, // 0.05%
+ { 106496, 13, 2}, // 0.05%
+ { 114688, 14, 2}, // 0.04%
+ { 131072, 16, 2}, // 0.04%
+ { 147456, 18, 2}, // 0.03%
+ { 163840, 20, 2}, // 0.03%
+ { 180224, 22, 2}, // 0.03%
+ { 204800, 25, 2}, // 0.02%
+ { 237568, 29, 2}, // 0.02%
+ { 262144, 32, 2}, // 0.02%
+};
+#elif TCMALLOC_PAGE_SHIFT == 15
+static_assert(kMaxSize == 262144, "kMaxSize mismatch");
+static const int kCount = 74;
+static_assert(kCount <= kNumClasses);
+const int SizeMap::kExperimentalPow2Below64SizeClassesCount = kCount;
+const SizeClassInfo SizeMap::kExperimentalPow2Below64SizeClasses[SizeMap::kExperimentalPow2Below64SizeClassesCount] = {
+ // <bytes>, <pages>, <batch size> <fixed>
+ { 0, 0, 0}, // +Inf%
+ { 8, 1, 32}, // 0.15%
+ { 16, 1, 32}, // 0.15%
+ { 32, 1, 32}, // 0.15%
+ { 64, 1, 32}, // 0.15%
+ { 80, 1, 32}, // 0.29%
+ { 96, 1, 32}, // 0.24%
+ { 112, 1, 32}, // 0.34%
+ { 128, 1, 32}, // 0.15%
+ { 144, 1, 32}, // 0.39%
+ { 160, 1, 32}, // 0.54%
+ { 176, 1, 32}, // 0.24%
+ { 192, 1, 32}, // 0.54%
+ { 208, 1, 32}, // 0.49%
+ { 224, 1, 32}, // 0.34%
+ { 240, 1, 32}, // 0.54%
+ { 256, 1, 32}, // 0.15%
+ { 272, 1, 32}, // 0.54%
+ { 288, 1, 32}, // 0.84%
+ { 304, 1, 32}, // 0.89%
+ { 320, 1, 32}, // 0.54%
+ { 352, 1, 32}, // 0.24%
+ { 384, 1, 32}, // 0.54%
+ { 416, 1, 32}, // 1.13%
+ { 448, 1, 32}, // 0.34%
+ { 480, 1, 32}, // 0.54%
+ { 512, 1, 32}, // 0.15%
+ { 576, 1, 32}, // 1.74%
+ { 640, 1, 32}, // 0.54%
+ { 704, 1, 32}, // 1.33%
+ { 768, 1, 32}, // 1.74%
+ { 832, 1, 32}, // 1.13%
+ { 896, 1, 32}, // 1.74%
+ { 1024, 1, 32}, // 0.15%
+ { 1152, 1, 32}, // 1.74%
+ { 1280, 1, 32}, // 2.55%
+ { 1408, 1, 32}, // 1.33%
+ { 1536, 1, 32}, // 1.74%
+ { 1792, 1, 32}, // 1.74%
+ { 2048, 1, 32}, // 0.15%
+ { 2176, 1, 30}, // 0.54%
+ { 2304, 1, 28}, // 1.74%
+ { 2688, 1, 24}, // 1.74%
+ { 2944, 1, 22}, // 1.33%
+ { 3200, 1, 20}, // 2.55%
+ { 3584, 1, 18}, // 1.74%
+ { 4096, 1, 16}, // 0.15%
+ { 4608, 1, 14}, // 1.74%
+ { 5376, 1, 12}, // 1.74%
+ { 6528, 1, 10}, // 0.54%
+ { 7168, 2, 9}, // 1.66%
+ { 8192, 1, 8}, // 0.15%
+ { 9344, 2, 7}, // 0.27%
+ { 10880, 1, 6}, // 0.54%
+ { 13952, 3, 4}, // 0.70%
+ { 16384, 1, 4}, // 0.15%
+ { 19072, 3, 3}, // 3.14%
+ { 21760, 2, 3}, // 0.47%
+ { 24576, 3, 2}, // 0.05%
+ { 28032, 6, 2}, // 0.22%
+ { 32768, 1, 2}, // 0.15%
+ { 38144, 5, 2}, // 7.41%
+ { 40960, 4, 2}, // 6.71%
+ { 49152, 3, 2}, // 0.05%
+ { 57344, 7, 2}, // 0.02%
+ { 65536, 2, 2}, // 0.07%
+ { 81920, 5, 2}, // 0.03%
+ { 98304, 3, 2}, // 0.05%
+ { 114688, 7, 2}, // 0.02%
+ { 131072, 4, 2}, // 0.04%
+ { 163840, 5, 2}, // 0.03%
+ { 196608, 6, 2}, // 0.02%
+ { 229376, 7, 2}, // 0.02%
+ { 262144, 8, 2}, // 0.02%
+};
+#elif TCMALLOC_PAGE_SHIFT == 18
+static_assert(kMaxSize == 262144, "kMaxSize mismatch");
+static const int kCount = 85;
+static_assert(kCount <= kNumClasses);
+const int SizeMap::kExperimentalPow2Below64SizeClassesCount = kCount;
+const SizeClassInfo SizeMap::kExperimentalPow2Below64SizeClasses[SizeMap::kExperimentalPow2Below64SizeClassesCount] = {
+ // <bytes>, <pages>, <batch size> <fixed>
+ { 0, 0, 0}, // +Inf%
+ { 8, 1, 32}, // 0.02%
+ { 16, 1, 32}, // 0.02%
+ { 32, 1, 32}, // 0.02%
+ { 64, 1, 32}, // 0.02%
+ { 80, 1, 32}, // 0.04%
+ { 96, 1, 32}, // 0.04%
+ { 112, 1, 32}, // 0.04%
+ { 128, 1, 32}, // 0.02%
+ { 144, 1, 32}, // 0.04%
+ { 160, 1, 32}, // 0.04%
+ { 176, 1, 32}, // 0.05%
+ { 192, 1, 32}, // 0.04%
+ { 208, 1, 32}, // 0.04%
+ { 240, 1, 32}, // 0.04%
+ { 256, 1, 32}, // 0.02%
+ { 304, 1, 32}, // 0.05%
+ { 336, 1, 32}, // 0.04%
+ { 368, 1, 32}, // 0.07%
+ { 416, 1, 32}, // 0.04%
+ { 464, 1, 32}, // 0.19%
+ { 512, 1, 32}, // 0.02%
+ { 576, 1, 32}, // 0.04%
+ { 640, 1, 32}, // 0.17%
+ { 704, 1, 32}, // 0.12%
+ { 768, 1, 32}, // 0.12%
+ { 832, 1, 32}, // 0.04%
+ { 896, 1, 32}, // 0.21%
+ { 1024, 1, 32}, // 0.02%
+ { 1152, 1, 32}, // 0.26%
+ { 1280, 1, 32}, // 0.41%
+ { 1408, 1, 32}, // 0.12%
+ { 1536, 1, 32}, // 0.41%
+ { 1664, 1, 32}, // 0.36%
+ { 1792, 1, 32}, // 0.21%
+ { 1920, 1, 32}, // 0.41%
+ { 2048, 1, 32}, // 0.02%
+ { 2176, 1, 30}, // 0.41%
+ { 2304, 1, 28}, // 0.71%
+ { 2432, 1, 26}, // 0.76%
+ { 2560, 1, 25}, // 0.41%
+ { 2688, 1, 24}, // 0.56%
+ { 2816, 1, 23}, // 0.12%
+ { 2944, 1, 22}, // 0.07%
+ { 3072, 1, 21}, // 0.41%
+ { 3200, 1, 20}, // 1.15%
+ { 3328, 1, 19}, // 1.00%
+ { 3584, 1, 18}, // 0.21%
+ { 3840, 1, 17}, // 0.41%
+ { 4096, 1, 16}, // 0.02%
+ { 4736, 1, 13}, // 0.66%
+ { 5504, 1, 11}, // 1.35%
+ { 6144, 1, 10}, // 1.61%
+ { 6528, 1, 10}, // 0.41%
+ { 6784, 1, 9}, // 1.71%
+ { 7168, 1, 9}, // 1.61%
+ { 7680, 1, 8}, // 0.41%
+ { 8192, 1, 8}, // 0.02%
+ { 8704, 1, 7}, // 0.41%
+ { 9344, 1, 7}, // 0.21%
+ { 10368, 1, 6}, // 1.15%
+ { 11392, 1, 5}, // 0.07%
+ { 12416, 1, 5}, // 0.56%
+ { 13696, 1, 4}, // 0.76%
+ { 14464, 1, 4}, // 0.71%
+ { 16384, 1, 4}, // 0.02%
+ { 18688, 1, 3}, // 0.21%
+ { 21760, 1, 3}, // 0.41%
+ { 26112, 1, 2}, // 0.41%
+ { 29056, 1, 2}, // 0.26%
+ { 32768, 1, 2}, // 0.02%
+ { 37376, 1, 2}, // 0.21%
+ { 43648, 1, 2}, // 0.12%
+ { 52352, 1, 2}, // 0.17%
+ { 56064, 2, 2}, // 3.92%
+ { 65536, 1, 2}, // 0.02%
+ { 74880, 2, 2}, // 0.03%
+ { 87296, 1, 2}, // 0.12%
+ { 104832, 2, 2}, // 0.03%
+ { 112256, 3, 2}, // 0.09%
+ { 131072, 1, 2}, // 0.02%
+ { 149760, 3, 2}, // 5.03%
+ { 174720, 2, 2}, // 0.03%
+ { 209664, 4, 2}, // 0.03%
+ { 262144, 1, 2}, // 0.02%
+};
+#elif TCMALLOC_PAGE_SHIFT == 12
+static_assert(kMaxSize == 8192, "kMaxSize mismatch");
+static const int kCount = 42;
+static_assert(kCount <= kNumClasses);
+const int SizeMap::kExperimentalPow2Below64SizeClassesCount = kCount;
+const SizeClassInfo SizeMap::kExperimentalPow2Below64SizeClasses[SizeMap::kExperimentalPow2Below64SizeClassesCount] = {
+ // <bytes>, <pages>, <batch size> <fixed>
+ { 0, 0, 0}, // +Inf%
+ { 8, 1, 32}, // 1.17%
+ { 16, 1, 32}, // 1.17%
+ { 32, 1, 32}, // 1.17%
+ { 64, 1, 32}, // 1.17%
+ { 80, 1, 32}, // 1.57%
+ { 96, 1, 32}, // 2.78%
+ { 112, 1, 32}, // 2.78%
+ { 128, 1, 32}, // 1.17%
+ { 144, 1, 32}, // 2.78%
+ { 160, 1, 32}, // 3.60%
+ { 176, 1, 32}, // 2.37%
+ { 192, 1, 32}, // 2.78%
+ { 208, 1, 32}, // 4.86%
+ { 240, 1, 32}, // 1.57%
+ { 256, 1, 32}, // 1.17%
+ { 272, 1, 32}, // 1.57%
+ { 304, 1, 32}, // 4.86%
+ { 336, 1, 32}, // 2.78%
+ { 368, 1, 32}, // 2.37%
+ { 400, 1, 32}, // 3.60%
+ { 448, 1, 32}, // 2.78%
+ { 512, 1, 32}, // 1.17%
+ { 576, 2, 32}, // 2.18%
+ { 640, 2, 32}, // 7.29%
+ { 704, 2, 32}, // 6.40%
+ { 768, 2, 32}, // 7.29%
+ { 896, 2, 32}, // 2.18%
+ { 1024, 2, 32}, // 0.59%
+ { 1152, 3, 32}, // 7.08%
+ { 1280, 3, 32}, // 7.08%
+ { 1536, 3, 32}, // 0.39%
+ { 1792, 4, 32}, // 1.88%
+ { 2048, 4, 32}, // 0.29%
+ { 2304, 4, 28}, // 1.88%
+ { 2688, 4, 24}, // 1.88%
+ { 3456, 6, 18}, // 1.79%
+ { 4096, 4, 16}, // 0.29%
+ { 5376, 4, 12}, // 1.88%
+ { 6144, 3, 10}, // 0.39%
+ { 7168, 7, 9}, // 0.17%
+ { 8192, 4, 8}, // 0.29%
+};
+#else
+#error "Unsupported TCMALLOC_PAGE_SHIFT value!"
+#endif
+#endif
+// clang-format on
+
+} // namespace tcmalloc_internal
+} // namespace tcmalloc
+GOOGLE_MALLOC_SECTION_END
diff --git a/contrib/libs/tcmalloc/tcmalloc/experimental_pow2_size_class.cc b/contrib/libs/tcmalloc/tcmalloc/experimental_pow2_size_class.cc
index 1e6da051ca..3bd5e54c3c 100755
--- a/contrib/libs/tcmalloc/tcmalloc/experimental_pow2_size_class.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/experimental_pow2_size_class.cc
@@ -1,239 +1,239 @@
-// Copyright 2019 The TCMalloc Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "tcmalloc/common.h"
-
-GOOGLE_MALLOC_SECTION_BEGIN
-namespace tcmalloc {
-
-namespace tcmalloc_internal {
-
-// <fixed> is fixed per-size-class overhead due to end-of-span fragmentation
-// and other factors. For instance, if we have a 96 byte size class, and use a
-// single 8KiB page, then we will hold 85 objects per span, and have 32 bytes
-// left over. There is also a fixed component of 48 bytes of TCMalloc metadata
-// per span. Together, the fixed overhead would be wasted/allocated =
-// (32 + 48) / (8192 - 32) ~= 0.98%.
-// There is also a dynamic component to overhead based on mismatches between the
-// number of bytes requested and the number of bytes provided by the size class.
-// Together they sum to the total overhead; for instance if you asked for a
-// 50-byte allocation that rounds up to a 64-byte size class, the dynamic
-// overhead would be 28%, and if <fixed> were 22% it would mean (on average)
-// 25 bytes of overhead for allocations of that size.
-
-// clang-format off
-#if defined(__cpp_aligned_new) && __STDCPP_DEFAULT_NEW_ALIGNMENT__ <= 8
-#if TCMALLOC_PAGE_SHIFT == 13
-static_assert(kMaxSize == 262144, "kMaxSize mismatch");
-static const int kCount = 17;
-static_assert(kCount <= kNumClasses);
-const int SizeMap::kExperimentalPow2SizeClassesCount = kCount;
-const SizeClassInfo SizeMap::kExperimentalPow2SizeClasses[SizeMap::kExperimentalPow2SizeClassesCount] = {
- // <bytes>, <pages>, <batch size> <fixed>
- { 0, 0, 0}, // +Inf%
- { 8, 1, 32}, // 0.59%
- { 16, 1, 32}, // 0.59%
- { 32, 1, 32}, // 0.59%
- { 64, 1, 32}, // 0.59%
- { 128, 1, 32}, // 0.59%
- { 256, 1, 32}, // 0.59%
- { 512, 1, 32}, // 0.59%
- { 1024, 1, 32}, // 0.59%
- { 2048, 2, 32}, // 0.29%
- { 4096, 1, 16}, // 0.29%
- { 8192, 1, 8}, // 0.29%
- { 16384, 2, 4}, // 0.29%
- { 32768, 4, 2}, // 0.15%
- { 65536, 8, 2}, // 0.07%
- { 131072, 16, 2}, // 0.04%
- { 262144, 32, 2}, // 0.02%
-};
-#elif TCMALLOC_PAGE_SHIFT == 15
-static_assert(kMaxSize == 262144, "kMaxSize mismatch");
-static const int kCount = 17;
-static_assert(kCount <= kNumClasses);
-const int SizeMap::kExperimentalPow2SizeClassesCount = kCount;
-const SizeClassInfo SizeMap::kExperimentalPow2SizeClasses[SizeMap::kExperimentalPow2SizeClassesCount] = {
- // <bytes>, <pages>, <batch size> <fixed>
- { 0, 0, 0}, // +Inf%
- { 8, 1, 32}, // 0.15%
- { 16, 1, 32}, // 0.15%
- { 32, 1, 32}, // 0.15%
- { 64, 1, 32}, // 0.15%
- { 128, 1, 32}, // 0.15%
- { 256, 1, 32}, // 0.15%
- { 512, 1, 32}, // 0.15%
- { 1024, 1, 32}, // 0.15%
- { 2048, 1, 32}, // 0.15%
- { 4096, 1, 16}, // 0.15%
- { 8192, 1, 8}, // 0.15%
- { 16384, 1, 4}, // 0.15%
- { 32768, 1, 2}, // 0.15%
- { 65536, 2, 2}, // 0.07%
- { 131072, 4, 2}, // 0.04%
- { 262144, 8, 2}, // 0.02%
-};
-#elif TCMALLOC_PAGE_SHIFT == 18
-static_assert(kMaxSize == 262144, "kMaxSize mismatch");
-static const int kCount = 17;
-static_assert(kCount <= kNumClasses);
-const int SizeMap::kExperimentalPow2SizeClassesCount = kCount;
-const SizeClassInfo SizeMap::kExperimentalPow2SizeClasses[SizeMap::kExperimentalPow2SizeClassesCount] = {
- // <bytes>, <pages>, <batch size> <fixed>
- { 0, 0, 0}, // +Inf%
- { 8, 1, 32}, // 0.02%
- { 16, 1, 32}, // 0.02%
- { 32, 1, 32}, // 0.02%
- { 64, 1, 32}, // 0.02%
- { 128, 1, 32}, // 0.02%
- { 256, 1, 32}, // 0.02%
- { 512, 1, 32}, // 0.02%
- { 1024, 1, 32}, // 0.02%
- { 2048, 1, 32}, // 0.02%
- { 4096, 1, 16}, // 0.02%
- { 8192, 1, 8}, // 0.02%
- { 16384, 1, 4}, // 0.02%
- { 32768, 1, 2}, // 0.02%
- { 65536, 1, 2}, // 0.02%
- { 131072, 1, 2}, // 0.02%
- { 262144, 1, 2}, // 0.02%
-};
-#elif TCMALLOC_PAGE_SHIFT == 12
-static_assert(kMaxSize == 8192, "kMaxSize mismatch");
-static const int kCount = 12;
-static_assert(kCount <= kNumClasses);
-const int SizeMap::kExperimentalPow2SizeClassesCount = kCount;
-const SizeClassInfo SizeMap::kExperimentalPow2SizeClasses[SizeMap::kExperimentalPow2SizeClassesCount] = {
- // <bytes>, <pages>, <batch size> <fixed>
- { 0, 0, 0}, // +Inf%
- { 8, 1, 32}, // 1.17%
- { 16, 1, 32}, // 1.17%
- { 32, 1, 32}, // 1.17%
- { 64, 1, 32}, // 1.17%
- { 128, 1, 32}, // 1.17%
- { 256, 1, 32}, // 1.17%
- { 512, 1, 32}, // 1.17%
- { 1024, 2, 32}, // 0.59%
- { 2048, 4, 32}, // 0.29%
- { 4096, 4, 16}, // 0.29%
- { 8192, 4, 8}, // 0.29%
-};
-#else
-#error "Unsupported TCMALLOC_PAGE_SHIFT value!"
-#endif
-#else
-#if TCMALLOC_PAGE_SHIFT == 13
-static_assert(kMaxSize == 262144, "kMaxSize mismatch");
-static const int kCount = 17;
-static_assert(kCount <= kNumClasses);
-const int SizeMap::kExperimentalPow2SizeClassesCount = kCount;
-const SizeClassInfo SizeMap::kExperimentalPow2SizeClasses[SizeMap::kExperimentalPow2SizeClassesCount] = {
- // <bytes>, <pages>, <batch size> <fixed>
- { 0, 0, 0}, // +Inf%
- { 8, 1, 32}, // 0.59%
- { 16, 1, 32}, // 0.59%
- { 32, 1, 32}, // 0.59%
- { 64, 1, 32}, // 0.59%
- { 128, 1, 32}, // 0.59%
- { 256, 1, 32}, // 0.59%
- { 512, 1, 32}, // 0.59%
- { 1024, 1, 32}, // 0.59%
- { 2048, 2, 32}, // 0.29%
- { 4096, 1, 16}, // 0.29%
- { 8192, 1, 8}, // 0.29%
- { 16384, 2, 4}, // 0.29%
- { 32768, 4, 2}, // 0.15%
- { 65536, 8, 2}, // 0.07%
- { 131072, 16, 2}, // 0.04%
- { 262144, 32, 2}, // 0.02%
-};
-#elif TCMALLOC_PAGE_SHIFT == 15
-static_assert(kMaxSize == 262144, "kMaxSize mismatch");
-static const int kCount = 17;
-static_assert(kCount <= kNumClasses);
-const int SizeMap::kExperimentalPow2SizeClassesCount = kCount;
-const SizeClassInfo SizeMap::kExperimentalPow2SizeClasses[SizeMap::kExperimentalPow2SizeClassesCount] = {
- // <bytes>, <pages>, <batch size> <fixed>
- { 0, 0, 0}, // +Inf%
- { 8, 1, 32}, // 0.15%
- { 16, 1, 32}, // 0.15%
- { 32, 1, 32}, // 0.15%
- { 64, 1, 32}, // 0.15%
- { 128, 1, 32}, // 0.15%
- { 256, 1, 32}, // 0.15%
- { 512, 1, 32}, // 0.15%
- { 1024, 1, 32}, // 0.15%
- { 2048, 1, 32}, // 0.15%
- { 4096, 1, 16}, // 0.15%
- { 8192, 1, 8}, // 0.15%
- { 16384, 1, 4}, // 0.15%
- { 32768, 1, 2}, // 0.15%
- { 65536, 2, 2}, // 0.07%
- { 131072, 4, 2}, // 0.04%
- { 262144, 8, 2}, // 0.02%
-};
-#elif TCMALLOC_PAGE_SHIFT == 18
-static_assert(kMaxSize == 262144, "kMaxSize mismatch");
-static const int kCount = 17;
-static_assert(kCount <= kNumClasses);
-const int SizeMap::kExperimentalPow2SizeClassesCount = kCount;
-const SizeClassInfo SizeMap::kExperimentalPow2SizeClasses[SizeMap::kExperimentalPow2SizeClassesCount] = {
- // <bytes>, <pages>, <batch size> <fixed>
- { 0, 0, 0}, // +Inf%
- { 8, 1, 32}, // 0.02%
- { 16, 1, 32}, // 0.02%
- { 32, 1, 32}, // 0.02%
- { 64, 1, 32}, // 0.02%
- { 128, 1, 32}, // 0.02%
- { 256, 1, 32}, // 0.02%
- { 512, 1, 32}, // 0.02%
- { 1024, 1, 32}, // 0.02%
- { 2048, 1, 32}, // 0.02%
- { 4096, 1, 16}, // 0.02%
- { 8192, 1, 8}, // 0.02%
- { 16384, 1, 4}, // 0.02%
- { 32768, 1, 2}, // 0.02%
- { 65536, 1, 2}, // 0.02%
- { 131072, 1, 2}, // 0.02%
- { 262144, 1, 2}, // 0.02%
-};
-#elif TCMALLOC_PAGE_SHIFT == 12
-static_assert(kMaxSize == 8192, "kMaxSize mismatch");
-static const int kCount = 12;
-static_assert(kCount <= kNumClasses);
-const int SizeMap::kExperimentalPow2SizeClassesCount = kCount;
-const SizeClassInfo SizeMap::kExperimentalPow2SizeClasses[SizeMap::kExperimentalPow2SizeClassesCount] = {
- // <bytes>, <pages>, <batch size> <fixed>
- { 0, 0, 0}, // +Inf%
- { 8, 1, 32}, // 1.17%
- { 16, 1, 32}, // 1.17%
- { 32, 1, 32}, // 1.17%
- { 64, 1, 32}, // 1.17%
- { 128, 1, 32}, // 1.17%
- { 256, 1, 32}, // 1.17%
- { 512, 1, 32}, // 1.17%
- { 1024, 2, 32}, // 0.59%
- { 2048, 4, 32}, // 0.29%
- { 4096, 4, 16}, // 0.29%
- { 8192, 4, 8}, // 0.29%
-};
-#else
-#error "Unsupported TCMALLOC_PAGE_SHIFT value!"
-#endif
-#endif
-// clang-format on
-
-} // namespace tcmalloc_internal
-} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+// Copyright 2019 The TCMalloc Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "tcmalloc/common.h"
+
+GOOGLE_MALLOC_SECTION_BEGIN
+namespace tcmalloc {
+
+namespace tcmalloc_internal {
+
+// <fixed> is fixed per-size-class overhead due to end-of-span fragmentation
+// and other factors. For instance, if we have a 96 byte size class, and use a
+// single 8KiB page, then we will hold 85 objects per span, and have 32 bytes
+// left over. There is also a fixed component of 48 bytes of TCMalloc metadata
+// per span. Together, the fixed overhead would be wasted/allocated =
+// (32 + 48) / (8192 - 32) ~= 0.98%.
+// There is also a dynamic component to overhead based on mismatches between the
+// number of bytes requested and the number of bytes provided by the size class.
+// Together they sum to the total overhead; for instance if you asked for a
+// 50-byte allocation that rounds up to a 64-byte size class, the dynamic
+// overhead would be 28%, and if <fixed> were 22% it would mean (on average)
+// 25 bytes of overhead for allocations of that size.
+
+// clang-format off
+#if defined(__cpp_aligned_new) && __STDCPP_DEFAULT_NEW_ALIGNMENT__ <= 8
+#if TCMALLOC_PAGE_SHIFT == 13
+static_assert(kMaxSize == 262144, "kMaxSize mismatch");
+static const int kCount = 17;
+static_assert(kCount <= kNumClasses);
+const int SizeMap::kExperimentalPow2SizeClassesCount = kCount;
+const SizeClassInfo SizeMap::kExperimentalPow2SizeClasses[SizeMap::kExperimentalPow2SizeClassesCount] = {
+ // <bytes>, <pages>, <batch size> <fixed>
+ { 0, 0, 0}, // +Inf%
+ { 8, 1, 32}, // 0.59%
+ { 16, 1, 32}, // 0.59%
+ { 32, 1, 32}, // 0.59%
+ { 64, 1, 32}, // 0.59%
+ { 128, 1, 32}, // 0.59%
+ { 256, 1, 32}, // 0.59%
+ { 512, 1, 32}, // 0.59%
+ { 1024, 1, 32}, // 0.59%
+ { 2048, 2, 32}, // 0.29%
+ { 4096, 1, 16}, // 0.29%
+ { 8192, 1, 8}, // 0.29%
+ { 16384, 2, 4}, // 0.29%
+ { 32768, 4, 2}, // 0.15%
+ { 65536, 8, 2}, // 0.07%
+ { 131072, 16, 2}, // 0.04%
+ { 262144, 32, 2}, // 0.02%
+};
+#elif TCMALLOC_PAGE_SHIFT == 15
+static_assert(kMaxSize == 262144, "kMaxSize mismatch");
+static const int kCount = 17;
+static_assert(kCount <= kNumClasses);
+const int SizeMap::kExperimentalPow2SizeClassesCount = kCount;
+const SizeClassInfo SizeMap::kExperimentalPow2SizeClasses[SizeMap::kExperimentalPow2SizeClassesCount] = {
+ // <bytes>, <pages>, <batch size> <fixed>
+ { 0, 0, 0}, // +Inf%
+ { 8, 1, 32}, // 0.15%
+ { 16, 1, 32}, // 0.15%
+ { 32, 1, 32}, // 0.15%
+ { 64, 1, 32}, // 0.15%
+ { 128, 1, 32}, // 0.15%
+ { 256, 1, 32}, // 0.15%
+ { 512, 1, 32}, // 0.15%
+ { 1024, 1, 32}, // 0.15%
+ { 2048, 1, 32}, // 0.15%
+ { 4096, 1, 16}, // 0.15%
+ { 8192, 1, 8}, // 0.15%
+ { 16384, 1, 4}, // 0.15%
+ { 32768, 1, 2}, // 0.15%
+ { 65536, 2, 2}, // 0.07%
+ { 131072, 4, 2}, // 0.04%
+ { 262144, 8, 2}, // 0.02%
+};
+#elif TCMALLOC_PAGE_SHIFT == 18
+static_assert(kMaxSize == 262144, "kMaxSize mismatch");
+static const int kCount = 17;
+static_assert(kCount <= kNumClasses);
+const int SizeMap::kExperimentalPow2SizeClassesCount = kCount;
+const SizeClassInfo SizeMap::kExperimentalPow2SizeClasses[SizeMap::kExperimentalPow2SizeClassesCount] = {
+ // <bytes>, <pages>, <batch size> <fixed>
+ { 0, 0, 0}, // +Inf%
+ { 8, 1, 32}, // 0.02%
+ { 16, 1, 32}, // 0.02%
+ { 32, 1, 32}, // 0.02%
+ { 64, 1, 32}, // 0.02%
+ { 128, 1, 32}, // 0.02%
+ { 256, 1, 32}, // 0.02%
+ { 512, 1, 32}, // 0.02%
+ { 1024, 1, 32}, // 0.02%
+ { 2048, 1, 32}, // 0.02%
+ { 4096, 1, 16}, // 0.02%
+ { 8192, 1, 8}, // 0.02%
+ { 16384, 1, 4}, // 0.02%
+ { 32768, 1, 2}, // 0.02%
+ { 65536, 1, 2}, // 0.02%
+ { 131072, 1, 2}, // 0.02%
+ { 262144, 1, 2}, // 0.02%
+};
+#elif TCMALLOC_PAGE_SHIFT == 12
+static_assert(kMaxSize == 8192, "kMaxSize mismatch");
+static const int kCount = 12;
+static_assert(kCount <= kNumClasses);
+const int SizeMap::kExperimentalPow2SizeClassesCount = kCount;
+const SizeClassInfo SizeMap::kExperimentalPow2SizeClasses[SizeMap::kExperimentalPow2SizeClassesCount] = {
+ // <bytes>, <pages>, <batch size> <fixed>
+ { 0, 0, 0}, // +Inf%
+ { 8, 1, 32}, // 1.17%
+ { 16, 1, 32}, // 1.17%
+ { 32, 1, 32}, // 1.17%
+ { 64, 1, 32}, // 1.17%
+ { 128, 1, 32}, // 1.17%
+ { 256, 1, 32}, // 1.17%
+ { 512, 1, 32}, // 1.17%
+ { 1024, 2, 32}, // 0.59%
+ { 2048, 4, 32}, // 0.29%
+ { 4096, 4, 16}, // 0.29%
+ { 8192, 4, 8}, // 0.29%
+};
+#else
+#error "Unsupported TCMALLOC_PAGE_SHIFT value!"
+#endif
+#else
+#if TCMALLOC_PAGE_SHIFT == 13
+static_assert(kMaxSize == 262144, "kMaxSize mismatch");
+static const int kCount = 17;
+static_assert(kCount <= kNumClasses);
+const int SizeMap::kExperimentalPow2SizeClassesCount = kCount;
+const SizeClassInfo SizeMap::kExperimentalPow2SizeClasses[SizeMap::kExperimentalPow2SizeClassesCount] = {
+ // <bytes>, <pages>, <batch size> <fixed>
+ { 0, 0, 0}, // +Inf%
+ { 8, 1, 32}, // 0.59%
+ { 16, 1, 32}, // 0.59%
+ { 32, 1, 32}, // 0.59%
+ { 64, 1, 32}, // 0.59%
+ { 128, 1, 32}, // 0.59%
+ { 256, 1, 32}, // 0.59%
+ { 512, 1, 32}, // 0.59%
+ { 1024, 1, 32}, // 0.59%
+ { 2048, 2, 32}, // 0.29%
+ { 4096, 1, 16}, // 0.29%
+ { 8192, 1, 8}, // 0.29%
+ { 16384, 2, 4}, // 0.29%
+ { 32768, 4, 2}, // 0.15%
+ { 65536, 8, 2}, // 0.07%
+ { 131072, 16, 2}, // 0.04%
+ { 262144, 32, 2}, // 0.02%
+};
+#elif TCMALLOC_PAGE_SHIFT == 15
+static_assert(kMaxSize == 262144, "kMaxSize mismatch");
+static const int kCount = 17;
+static_assert(kCount <= kNumClasses);
+const int SizeMap::kExperimentalPow2SizeClassesCount = kCount;
+const SizeClassInfo SizeMap::kExperimentalPow2SizeClasses[SizeMap::kExperimentalPow2SizeClassesCount] = {
+ // <bytes>, <pages>, <batch size> <fixed>
+ { 0, 0, 0}, // +Inf%
+ { 8, 1, 32}, // 0.15%
+ { 16, 1, 32}, // 0.15%
+ { 32, 1, 32}, // 0.15%
+ { 64, 1, 32}, // 0.15%
+ { 128, 1, 32}, // 0.15%
+ { 256, 1, 32}, // 0.15%
+ { 512, 1, 32}, // 0.15%
+ { 1024, 1, 32}, // 0.15%
+ { 2048, 1, 32}, // 0.15%
+ { 4096, 1, 16}, // 0.15%
+ { 8192, 1, 8}, // 0.15%
+ { 16384, 1, 4}, // 0.15%
+ { 32768, 1, 2}, // 0.15%
+ { 65536, 2, 2}, // 0.07%
+ { 131072, 4, 2}, // 0.04%
+ { 262144, 8, 2}, // 0.02%
+};
+#elif TCMALLOC_PAGE_SHIFT == 18
+static_assert(kMaxSize == 262144, "kMaxSize mismatch");
+static const int kCount = 17;
+static_assert(kCount <= kNumClasses);
+const int SizeMap::kExperimentalPow2SizeClassesCount = kCount;
+const SizeClassInfo SizeMap::kExperimentalPow2SizeClasses[SizeMap::kExperimentalPow2SizeClassesCount] = {
+ // <bytes>, <pages>, <batch size> <fixed>
+ { 0, 0, 0}, // +Inf%
+ { 8, 1, 32}, // 0.02%
+ { 16, 1, 32}, // 0.02%
+ { 32, 1, 32}, // 0.02%
+ { 64, 1, 32}, // 0.02%
+ { 128, 1, 32}, // 0.02%
+ { 256, 1, 32}, // 0.02%
+ { 512, 1, 32}, // 0.02%
+ { 1024, 1, 32}, // 0.02%
+ { 2048, 1, 32}, // 0.02%
+ { 4096, 1, 16}, // 0.02%
+ { 8192, 1, 8}, // 0.02%
+ { 16384, 1, 4}, // 0.02%
+ { 32768, 1, 2}, // 0.02%
+ { 65536, 1, 2}, // 0.02%
+ { 131072, 1, 2}, // 0.02%
+ { 262144, 1, 2}, // 0.02%
+};
+#elif TCMALLOC_PAGE_SHIFT == 12
+static_assert(kMaxSize == 8192, "kMaxSize mismatch");
+static const int kCount = 12;
+static_assert(kCount <= kNumClasses);
+const int SizeMap::kExperimentalPow2SizeClassesCount = kCount;
+const SizeClassInfo SizeMap::kExperimentalPow2SizeClasses[SizeMap::kExperimentalPow2SizeClassesCount] = {
+ // <bytes>, <pages>, <batch size> <fixed>
+ { 0, 0, 0}, // +Inf%
+ { 8, 1, 32}, // 1.17%
+ { 16, 1, 32}, // 1.17%
+ { 32, 1, 32}, // 1.17%
+ { 64, 1, 32}, // 1.17%
+ { 128, 1, 32}, // 1.17%
+ { 256, 1, 32}, // 1.17%
+ { 512, 1, 32}, // 1.17%
+ { 1024, 2, 32}, // 0.59%
+ { 2048, 4, 32}, // 0.29%
+ { 4096, 4, 16}, // 0.29%
+ { 8192, 4, 8}, // 0.29%
+};
+#else
+#error "Unsupported TCMALLOC_PAGE_SHIFT value!"
+#endif
+#endif
+// clang-format on
+
+} // namespace tcmalloc_internal
+} // namespace tcmalloc
+GOOGLE_MALLOC_SECTION_END
diff --git a/contrib/libs/tcmalloc/tcmalloc/guarded_page_allocator.cc b/contrib/libs/tcmalloc/tcmalloc/guarded_page_allocator.cc
index cc02ed7a05..817fc52324 100644
--- a/contrib/libs/tcmalloc/tcmalloc/guarded_page_allocator.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/guarded_page_allocator.cc
@@ -20,7 +20,7 @@
#include <unistd.h>
#include <algorithm>
-#include <array>
+#include <array>
#include <cmath>
#include <csignal>
#include <tuple>
@@ -30,7 +30,7 @@
#include "absl/base/internal/spinlock.h"
#include "absl/base/internal/sysinfo.h"
#include "absl/debugging/stacktrace.h"
-#include "absl/numeric/bits.h"
+#include "absl/numeric/bits.h"
#include "absl/strings/string_view.h"
#include "tcmalloc/common.h"
#include "tcmalloc/internal/environment.h"
@@ -41,9 +41,9 @@
#include "tcmalloc/static_vars.h"
#include "tcmalloc/system-alloc.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
const size_t GuardedPageAllocator::kMagicSize; // NOLINT
@@ -82,7 +82,7 @@ void *GuardedPageAllocator::Allocate(size_t size, size_t alignment) {
ASSERT(size <= page_size_);
ASSERT(alignment <= page_size_);
- ASSERT(alignment == 0 || absl::has_single_bit(alignment));
+ ASSERT(alignment == 0 || absl::has_single_bit(alignment));
void *result = reinterpret_cast<void *>(SlotToAddr(free_slot));
if (mprotect(result, page_size_, PROT_READ | PROT_WRITE) == -1) {
ASSERT(false && "mprotect failed");
@@ -175,7 +175,7 @@ static int GetChainedRate() {
}
}
-void GuardedPageAllocator::Print(Printer *out) {
+void GuardedPageAllocator::Print(Printer *out) {
absl::base_internal::SpinLockHolder h(&guarded_page_lock);
out->printf(
"\n"
@@ -360,14 +360,14 @@ void GuardedPageAllocator::MaybeRightAlign(size_t slot, size_t size,
// If alignment == 0, the necessary alignment is never larger than the size
// rounded up to the next power of 2. We use this fact to minimize alignment
- // padding between the end of small allocations and their guard pages.
- //
- // For allocations larger than the greater of kAlignment and
- // __STDCPP_DEFAULT_NEW_ALIGNMENT__, we're safe aligning to that value.
- size_t default_alignment =
- std::min(absl::bit_ceil(size),
- std::max(kAlignment,
- static_cast<size_t>(__STDCPP_DEFAULT_NEW_ALIGNMENT__)));
+ // padding between the end of small allocations and their guard pages.
+ //
+ // For allocations larger than the greater of kAlignment and
+ // __STDCPP_DEFAULT_NEW_ALIGNMENT__, we're safe aligning to that value.
+ size_t default_alignment =
+ std::min(absl::bit_ceil(size),
+ std::max(kAlignment,
+ static_cast<size_t>(__STDCPP_DEFAULT_NEW_ALIGNMENT__)));
// Ensure valid alignment.
alignment = std::max(alignment, default_alignment);
@@ -384,7 +384,7 @@ void GuardedPageAllocator::MaybeRightAlign(size_t slot, size_t size,
// If this failure occurs during "bazel test", writes a warning for Bazel to
// display.
static void RecordBazelWarning(absl::string_view error) {
- const char *warning_file = thread_safe_getenv("TEST_WARNINGS_OUTPUT_FILE");
+ const char *warning_file = thread_safe_getenv("TEST_WARNINGS_OUTPUT_FILE");
if (!warning_file) return; // Not a bazel test.
constexpr char warning[] = "GWP-ASan error detected: ";
@@ -402,7 +402,7 @@ static void RecordBazelWarning(absl::string_view error) {
// do here). So we write directly to the XML file instead.
//
static void RecordTestFailure(absl::string_view error) {
- const char *xml_file = thread_safe_getenv("XML_OUTPUT_FILE");
+ const char *xml_file = thread_safe_getenv("XML_OUTPUT_FILE");
if (!xml_file) return; // Not a gUnit test.
// Record test failure for Sponge.
@@ -467,9 +467,9 @@ static void SegvHandler(int signo, siginfo_t *info, void *context) {
Static::guardedpage_allocator().GetAllocationOffsetAndSize(fault);
Log(kLog, __FILE__, __LINE__,
- "*** GWP-ASan "
- "(https://google.github.io/tcmalloc/gwp-asan.html) "
- "has detected a memory error ***");
+ "*** GWP-ASan "
+ "(https://google.github.io/tcmalloc/gwp-asan.html) "
+ "has detected a memory error ***");
Log(kLog, __FILE__, __LINE__, ">>> Access at offset", offset,
"into buffer of length", size);
Log(kLog, __FILE__, __LINE__,
@@ -557,6 +557,6 @@ extern "C" void MallocExtension_Internal_ActivateGuardedSampling() {
});
}
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
diff --git a/contrib/libs/tcmalloc/tcmalloc/guarded_page_allocator.h b/contrib/libs/tcmalloc/tcmalloc/guarded_page_allocator.h
index e5a6118c08..bd45c7da48 100644
--- a/contrib/libs/tcmalloc/tcmalloc/guarded_page_allocator.h
+++ b/contrib/libs/tcmalloc/tcmalloc/guarded_page_allocator.h
@@ -27,9 +27,9 @@
#include "tcmalloc/common.h"
#include "tcmalloc/internal/logging.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
ABSL_CONST_INIT extern absl::base_internal::SpinLock guarded_page_lock;
@@ -172,7 +172,7 @@ class GuardedPageAllocator {
// Writes a human-readable summary of GuardedPageAllocator's internal state to
// *out.
- void Print(Printer *out) ABSL_LOCKS_EXCLUDED(guarded_page_lock);
+ void Print(Printer *out) ABSL_LOCKS_EXCLUDED(guarded_page_lock);
void PrintInPbtxt(PbtxtRegion *gwp_asan) const
ABSL_LOCKS_EXCLUDED(guarded_page_lock);
@@ -304,8 +304,8 @@ struct ConstexprCheck {
"GuardedPageAllocator must have a constexpr constructor");
};
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
#endif // TCMALLOC_GUARDED_PAGE_ALLOCATOR_H_
diff --git a/contrib/libs/tcmalloc/tcmalloc/guarded_page_allocator_benchmark.cc b/contrib/libs/tcmalloc/tcmalloc/guarded_page_allocator_benchmark.cc
index fb6d0ea265..eace78815b 100644
--- a/contrib/libs/tcmalloc/tcmalloc/guarded_page_allocator_benchmark.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/guarded_page_allocator_benchmark.cc
@@ -19,12 +19,12 @@
#include "tcmalloc/guarded_page_allocator.h"
#include "tcmalloc/internal/logging.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
namespace {
-static constexpr size_t kMaxGpaPages = GuardedPageAllocator::kGpaMaxPages;
+static constexpr size_t kMaxGpaPages = GuardedPageAllocator::kGpaMaxPages;
// Size of pages used by GuardedPageAllocator.
static size_t PageSize() {
@@ -34,9 +34,9 @@ static size_t PageSize() {
}
void BM_AllocDealloc(benchmark::State& state) {
- static GuardedPageAllocator* gpa = []() {
- auto gpa = new GuardedPageAllocator;
- absl::base_internal::SpinLockHolder h(&pageheap_lock);
+ static GuardedPageAllocator* gpa = []() {
+ auto gpa = new GuardedPageAllocator;
+ absl::base_internal::SpinLockHolder h(&pageheap_lock);
gpa->Init(kMaxGpaPages, kMaxGpaPages);
gpa->AllowAllocations();
return gpa;
@@ -55,6 +55,6 @@ BENCHMARK(BM_AllocDealloc)->Range(1, PageSize());
BENCHMARK(BM_AllocDealloc)->Arg(1)->ThreadRange(1, kMaxGpaPages);
} // namespace
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
diff --git a/contrib/libs/tcmalloc/tcmalloc/guarded_page_allocator_test.cc b/contrib/libs/tcmalloc/tcmalloc/guarded_page_allocator_test.cc
index 0d603de690..463af9b8bc 100644
--- a/contrib/libs/tcmalloc/tcmalloc/guarded_page_allocator_test.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/guarded_page_allocator_test.cc
@@ -27,12 +27,12 @@
#include "gmock/gmock.h"
#include "gtest/gtest.h"
-#include "absl/base/casts.h"
+#include "absl/base/casts.h"
#include "absl/base/internal/spinlock.h"
#include "absl/base/internal/sysinfo.h"
#include "absl/container/flat_hash_set.h"
#include "absl/memory/memory.h"
-#include "absl/numeric/bits.h"
+#include "absl/numeric/bits.h"
#include "absl/strings/str_cat.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
@@ -41,10 +41,10 @@
#include "tcmalloc/static_vars.h"
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
namespace {
-static constexpr size_t kMaxGpaPages = GuardedPageAllocator::kGpaMaxPages;
+static constexpr size_t kMaxGpaPages = GuardedPageAllocator::kGpaMaxPages;
// Size of pages used by GuardedPageAllocator.
static size_t PageSize() {
@@ -56,20 +56,20 @@ static size_t PageSize() {
class GuardedPageAllocatorTest : public testing::Test {
protected:
GuardedPageAllocatorTest() {
- absl::base_internal::SpinLockHolder h(&pageheap_lock);
+ absl::base_internal::SpinLockHolder h(&pageheap_lock);
gpa_.Init(kMaxGpaPages, kMaxGpaPages);
gpa_.AllowAllocations();
}
explicit GuardedPageAllocatorTest(size_t num_pages) {
- absl::base_internal::SpinLockHolder h(&pageheap_lock);
+ absl::base_internal::SpinLockHolder h(&pageheap_lock);
gpa_.Init(num_pages, kMaxGpaPages);
gpa_.AllowAllocations();
}
~GuardedPageAllocatorTest() override { gpa_.Destroy(); }
- GuardedPageAllocator gpa_;
+ GuardedPageAllocator gpa_;
};
class GuardedPageAllocatorParamTest
@@ -92,36 +92,36 @@ TEST_F(GuardedPageAllocatorTest, SingleAllocDealloc) {
EXPECT_DEATH(buf[PageSize() - 1] = 'B', "");
}
-TEST_F(GuardedPageAllocatorTest, NoAlignmentProvided) {
- constexpr size_t kLargeObjectAlignment = std::max(
- kAlignment, static_cast<size_t>(__STDCPP_DEFAULT_NEW_ALIGNMENT__));
-
- for (size_t base_size = 1; base_size <= 64; base_size <<= 1) {
- for (size_t size : {base_size, base_size + 1}) {
- SCOPED_TRACE(size);
-
- constexpr int kElements = 10;
- std::array<void *, kElements> ptrs;
-
- // Make several allocation attempts to encounter left/right-alignment in
- // the guarded region.
- for (int i = 0; i < kElements; i++) {
- ptrs[i] = gpa_.Allocate(size, 0);
- EXPECT_NE(ptrs[i], nullptr);
- EXPECT_TRUE(gpa_.PointerIsMine(ptrs[i]));
-
- size_t observed_alignment =
- 1 << absl::countr_zero(absl::bit_cast<uintptr_t>(ptrs[i]));
- EXPECT_GE(observed_alignment, std::min(size, kLargeObjectAlignment));
- }
-
- for (void *ptr : ptrs) {
- gpa_.Deallocate(ptr);
- }
- }
- }
-}
-
+TEST_F(GuardedPageAllocatorTest, NoAlignmentProvided) {
+ constexpr size_t kLargeObjectAlignment = std::max(
+ kAlignment, static_cast<size_t>(__STDCPP_DEFAULT_NEW_ALIGNMENT__));
+
+ for (size_t base_size = 1; base_size <= 64; base_size <<= 1) {
+ for (size_t size : {base_size, base_size + 1}) {
+ SCOPED_TRACE(size);
+
+ constexpr int kElements = 10;
+ std::array<void *, kElements> ptrs;
+
+ // Make several allocation attempts to encounter left/right-alignment in
+ // the guarded region.
+ for (int i = 0; i < kElements; i++) {
+ ptrs[i] = gpa_.Allocate(size, 0);
+ EXPECT_NE(ptrs[i], nullptr);
+ EXPECT_TRUE(gpa_.PointerIsMine(ptrs[i]));
+
+ size_t observed_alignment =
+ 1 << absl::countr_zero(absl::bit_cast<uintptr_t>(ptrs[i]));
+ EXPECT_GE(observed_alignment, std::min(size, kLargeObjectAlignment));
+ }
+
+ for (void *ptr : ptrs) {
+ gpa_.Deallocate(ptr);
+ }
+ }
+ }
+}
+
TEST_F(GuardedPageAllocatorTest, AllocDeallocAligned) {
for (size_t align = 1; align <= PageSize(); align <<= 1) {
constexpr size_t alloc_size = 1;
@@ -164,7 +164,7 @@ TEST_F(GuardedPageAllocatorTest, PointerIsMine) {
TEST_F(GuardedPageAllocatorTest, Print) {
char buf[1024] = {};
- Printer out(buf, sizeof(buf));
+ Printer out(buf, sizeof(buf));
gpa_.Print(&out);
EXPECT_THAT(buf, testing::ContainsRegex("GWP-ASan Status"));
}
@@ -239,5 +239,5 @@ TEST_F(GuardedPageAllocatorTest, ThreadedHighContention) {
}
} // namespace
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
diff --git a/contrib/libs/tcmalloc/tcmalloc/heap_profiling_test.cc b/contrib/libs/tcmalloc/tcmalloc/heap_profiling_test.cc
index 5c2473ffed..88172e6657 100644
--- a/contrib/libs/tcmalloc/tcmalloc/heap_profiling_test.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/heap_profiling_test.cc
@@ -19,7 +19,7 @@
#include <new>
#include "gtest/gtest.h"
-#include "benchmark/benchmark.h"
+#include "benchmark/benchmark.h"
#include "tcmalloc/internal/logging.h"
#include "tcmalloc/internal/parameter_accessors.h"
#include "tcmalloc/malloc_extension.h"
@@ -62,23 +62,23 @@ TEST(HeapProfilingTest, PeakHeapTracking) {
// make a large allocation to force a new peak heap sample
// (total live: 50MiB)
void *first = ::operator new(50 << 20);
- // TODO(b/183453911): Remove workaround for GCC 10.x deleting operator new,
- // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=94295.
- benchmark::DoNotOptimize(first);
+ // TODO(b/183453911): Remove workaround for GCC 10.x deleting operator new,
+ // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=94295.
+ benchmark::DoNotOptimize(first);
int64_t peak_after_first = ProfileSize(ProfileType::kPeakHeap);
EXPECT_NEAR(peak_after_first, start_peak_sz + (50 << 20), 10 << 20);
// a small allocation shouldn't increase the peak
// (total live: 54MiB)
void *second = ::operator new(4 << 20);
- benchmark::DoNotOptimize(second);
+ benchmark::DoNotOptimize(second);
int64_t peak_after_second = ProfileSize(ProfileType::kPeakHeap);
EXPECT_EQ(peak_after_second, peak_after_first);
// but a large one should
// (total live: 254MiB)
void *third = ::operator new(200 << 20);
- benchmark::DoNotOptimize(third);
+ benchmark::DoNotOptimize(third);
int64_t peak_after_third = ProfileSize(ProfileType::kPeakHeap);
EXPECT_NEAR(peak_after_third, peak_after_second + (200 << 20), 10 << 20);
@@ -96,9 +96,9 @@ TEST(HeapProfilingTest, PeakHeapTracking) {
// going back up less than previous peak shouldn't affect the peak
// (total live: 200MiB)
void *fourth = ::operator new(100 << 20);
- benchmark::DoNotOptimize(fourth);
+ benchmark::DoNotOptimize(fourth);
void *fifth = ::operator new(100 << 20);
- benchmark::DoNotOptimize(fifth);
+ benchmark::DoNotOptimize(fifth);
EXPECT_EQ(ProfileSize(ProfileType::kPeakHeap), peak_after_third);
// passing the old peak significantly, even with many small allocations,
@@ -107,7 +107,7 @@ TEST(HeapProfilingTest, PeakHeapTracking) {
void *bitsy[1 << 10];
for (int i = 0; i < 1 << 10; i++) {
bitsy[i] = ::operator new(1 << 18);
- benchmark::DoNotOptimize(bitsy[i]);
+ benchmark::DoNotOptimize(bitsy[i]);
}
EXPECT_GT(ProfileSize(ProfileType::kPeakHeap), peak_after_third);
diff --git a/contrib/libs/tcmalloc/tcmalloc/huge_address_map.cc b/contrib/libs/tcmalloc/tcmalloc/huge_address_map.cc
index 898c6d934a..fca1125532 100644
--- a/contrib/libs/tcmalloc/tcmalloc/huge_address_map.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/huge_address_map.cc
@@ -22,9 +22,9 @@
#include "absl/base/internal/cycleclock.h"
#include "tcmalloc/internal/logging.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
const HugeAddressMap::Node *HugeAddressMap::Node::next() const {
const Node *n = right_;
@@ -116,7 +116,7 @@ size_t HugeAddressMap::nranges() const { return used_nodes_; }
HugeLength HugeAddressMap::total_mapped() const { return total_size_; }
-void HugeAddressMap::Print(Printer *out) const {
+void HugeAddressMap::Print(Printer *out) const {
out->printf("HugeAddressMap: treap %zu / %zu nodes used / created\n",
used_nodes_, total_nodes_);
const size_t longest = root_ ? root_->longest_.raw_num() : 0;
@@ -369,6 +369,6 @@ HugeAddressMap::Node *HugeAddressMap::Get(HugeRange r) {
HugeAddressMap::Node::Node(HugeRange r, int prio)
: range_(r), prio_(prio), when_(absl::base_internal::CycleClock::Now()) {}
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
diff --git a/contrib/libs/tcmalloc/tcmalloc/huge_address_map.h b/contrib/libs/tcmalloc/tcmalloc/huge_address_map.h
index 3c71f19a3f..4a9889e765 100644
--- a/contrib/libs/tcmalloc/tcmalloc/huge_address_map.h
+++ b/contrib/libs/tcmalloc/tcmalloc/huge_address_map.h
@@ -20,9 +20,9 @@
#include "tcmalloc/huge_pages.h"
#include "tcmalloc/internal/logging.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
// Maintains a set of disjoint HugeRanges, merging adjacent ranges into one.
// Exposes a balanced (somehow) binary tree of free ranges on address,
@@ -93,7 +93,7 @@ class HugeAddressMap {
// Statistics
size_t nranges() const;
HugeLength total_mapped() const;
- void Print(Printer *out) const;
+ void Print(Printer *out) const;
void PrintInPbtxt(PbtxtRegion *hpaa) const;
// Add <r> to the map, merging with adjacent ranges as needed.
@@ -141,8 +141,8 @@ inline const HugeAddressMap::Node *HugeAddressMap::root() const {
return root_;
}
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
#endif // TCMALLOC_HUGE_ADDRESS_MAP_H_
diff --git a/contrib/libs/tcmalloc/tcmalloc/huge_address_map_test.cc b/contrib/libs/tcmalloc/tcmalloc/huge_address_map_test.cc
index 455cd63809..801c797c11 100644
--- a/contrib/libs/tcmalloc/tcmalloc/huge_address_map_test.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/huge_address_map_test.cc
@@ -22,7 +22,7 @@
#include "gtest/gtest.h"
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
namespace {
class HugeAddressMapTest : public ::testing::Test {
@@ -81,5 +81,5 @@ TEST_F(HugeAddressMapTest, Merging) {
}
} // namespace
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
diff --git a/contrib/libs/tcmalloc/tcmalloc/huge_allocator.cc b/contrib/libs/tcmalloc/tcmalloc/huge_allocator.cc
index c77f4522ad..552d4f51b7 100644
--- a/contrib/libs/tcmalloc/tcmalloc/huge_allocator.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/huge_allocator.cc
@@ -19,11 +19,11 @@
#include "tcmalloc/huge_address_map.h"
#include "tcmalloc/internal/logging.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
-void HugeAllocator::Print(Printer *out) {
+void HugeAllocator::Print(Printer *out) {
out->printf("HugeAllocator: contiguous, unbacked hugepage(s)\n");
free_.Print(out);
out->printf(
@@ -170,6 +170,6 @@ void HugeAllocator::AddSpanStats(SmallSpanStats *small, LargeSpanStats *large,
}
}
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
diff --git a/contrib/libs/tcmalloc/tcmalloc/huge_allocator.h b/contrib/libs/tcmalloc/tcmalloc/huge_allocator.h
index 6242805c49..7e3936832d 100644
--- a/contrib/libs/tcmalloc/tcmalloc/huge_allocator.h
+++ b/contrib/libs/tcmalloc/tcmalloc/huge_allocator.h
@@ -24,10 +24,10 @@
#include "tcmalloc/huge_pages.h"
#include "tcmalloc/stats.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
-
+namespace tcmalloc_internal {
+
// these typedefs allow replacement of tcmalloc::System* for tests.
typedef void *(*MemoryAllocFunction)(size_t bytes, size_t *actual,
size_t align);
@@ -68,7 +68,7 @@ class HugeAllocator {
return s;
}
- void Print(Printer *out);
+ void Print(Printer *out);
void PrintInPbtxt(PbtxtRegion *hpaa) const;
private:
@@ -101,8 +101,8 @@ class HugeAllocator {
HugeRange AllocateRange(HugeLength n);
};
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
#endif // TCMALLOC_HUGE_ALLOCATOR_H_
diff --git a/contrib/libs/tcmalloc/tcmalloc/huge_allocator_test.cc b/contrib/libs/tcmalloc/tcmalloc/huge_allocator_test.cc
index 150075b88e..32fe91c3b5 100644
--- a/contrib/libs/tcmalloc/tcmalloc/huge_allocator_test.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/huge_allocator_test.cc
@@ -32,7 +32,7 @@
#include "tcmalloc/internal/logging.h"
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
namespace {
class HugeAllocatorTest : public testing::TestWithParam<bool> {
@@ -445,5 +445,5 @@ INSTANTIATE_TEST_SUITE_P(
});
} // namespace
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
diff --git a/contrib/libs/tcmalloc/tcmalloc/huge_cache.cc b/contrib/libs/tcmalloc/tcmalloc/huge_cache.cc
index 0d25da2983..1d39783efc 100644
--- a/contrib/libs/tcmalloc/tcmalloc/huge_cache.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/huge_cache.cc
@@ -23,9 +23,9 @@
#include "tcmalloc/internal/logging.h"
#include "tcmalloc/stats.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
template <size_t kEpochs>
void MinMaxTracker<kEpochs>::Report(HugeLength val) {
@@ -53,7 +53,7 @@ HugeLength MinMaxTracker<kEpochs>::MinOverTime(absl::Duration t) const {
}
template <size_t kEpochs>
-void MinMaxTracker<kEpochs>::Print(Printer *out) const {
+void MinMaxTracker<kEpochs>::Print(Printer *out) const {
// Prints timestamp:min_pages:max_pages for each window with records.
// Timestamp == kEpochs - 1 is the most recent measurement.
const int64_t millis = absl::ToInt64Milliseconds(kEpochLength);
@@ -372,7 +372,7 @@ HugeAddressMap::Node *HugeCache::Find(HugeLength n) {
return best;
}
-void HugeCache::Print(Printer *out) {
+void HugeCache::Print(Printer *out) {
const int64_t millis = absl::ToInt64Milliseconds(kCacheTime);
out->printf(
"HugeCache: contains unused, backed hugepage(s) "
@@ -439,9 +439,9 @@ void HugeCache::PrintInPbtxt(PbtxtRegion *hpaa) {
const double overflow_rate = safe_ratio(overflows_, fills_);
// number of bytes in HugeCache
- hpaa->PrintI64("cached_huge_page_bytes", size_.in_bytes());
+ hpaa->PrintI64("cached_huge_page_bytes", size_.in_bytes());
// max allowed bytes in HugeCache
- hpaa->PrintI64("max_cached_huge_page_bytes", limit().in_bytes());
+ hpaa->PrintI64("max_cached_huge_page_bytes", limit().in_bytes());
// lifetime cache hit rate
hpaa->PrintDouble("huge_cache_hit_rate", hit_rate);
// lifetime cache overflow rate
@@ -489,6 +489,6 @@ void HugeCache::PrintInPbtxt(PbtxtRegion *hpaa) {
detailed_tracker_.PrintInPbtxt(hpaa);
}
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
diff --git a/contrib/libs/tcmalloc/tcmalloc/huge_cache.h b/contrib/libs/tcmalloc/tcmalloc/huge_cache.h
index 2ffda26cb2..c225834a96 100644
--- a/contrib/libs/tcmalloc/tcmalloc/huge_cache.h
+++ b/contrib/libs/tcmalloc/tcmalloc/huge_cache.h
@@ -28,14 +28,14 @@
#include "tcmalloc/experiment_config.h"
#include "tcmalloc/huge_allocator.h"
#include "tcmalloc/huge_pages.h"
-#include "tcmalloc/internal/config.h"
+#include "tcmalloc/internal/config.h"
#include "tcmalloc/internal/logging.h"
#include "tcmalloc/internal/timeseries_tracker.h"
#include "tcmalloc/stats.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
typedef void (*MemoryModifyFunction)(void *start, size_t len);
@@ -48,7 +48,7 @@ class MinMaxTracker {
: kEpochLength(w / kEpochs), timeseries_(clock, w) {}
void Report(HugeLength val);
- void Print(Printer *out) const;
+ void Print(Printer *out) const;
void PrintInPbtxt(PbtxtRegion *hpaa) const;
// If t < kEpochLength, these functions return statistics for last epoch. The
@@ -151,7 +151,7 @@ class HugeCache {
return s;
}
- void Print(Printer *out);
+ void Print(Printer *out);
void PrintInPbtxt(PbtxtRegion *hpaa);
private:
@@ -221,8 +221,8 @@ class HugeCache {
MemoryModifyFunction unback_;
};
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
#endif // TCMALLOC_HUGE_CACHE_H_
diff --git a/contrib/libs/tcmalloc/tcmalloc/huge_cache_test.cc b/contrib/libs/tcmalloc/tcmalloc/huge_cache_test.cc
index 2699b44303..41a6427519 100644
--- a/contrib/libs/tcmalloc/tcmalloc/huge_cache_test.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/huge_cache_test.cc
@@ -36,7 +36,7 @@
#include "tcmalloc/stats.h"
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
namespace {
class HugeCacheTest : public testing::Test {
@@ -55,10 +55,10 @@ class HugeCacheTest : public testing::Test {
// Use a tiny fraction of actual size so we can test aggressively.
static void* AllocateFake(size_t bytes, size_t* actual, size_t align) {
if (bytes % kHugePageSize != 0) {
- Crash(kCrash, __FILE__, __LINE__, "not aligned", bytes, kHugePageSize);
+ Crash(kCrash, __FILE__, __LINE__, "not aligned", bytes, kHugePageSize);
}
if (align % kHugePageSize != 0) {
- Crash(kCrash, __FILE__, __LINE__, "not aligned", align, kHugePageSize);
+ Crash(kCrash, __FILE__, __LINE__, "not aligned", align, kHugePageSize);
}
*actual = bytes;
// we'll actually provide hidden backing, one word per hugepage.
@@ -127,7 +127,7 @@ class HugeCacheTest : public testing::Test {
clock_offset_ += absl::ToInt64Nanoseconds(d);
}
- HugeAllocator alloc_{AllocateFake, MallocMetadata};
+ HugeAllocator alloc_{AllocateFake, MallocMetadata};
HugeCache cache_{&alloc_, MallocMetadata, MockUnback,
Clock{.now = GetClock, .freq = GetClockFrequency}};
};
@@ -213,7 +213,7 @@ TEST_F(HugeCacheTest, Regret) {
absl::Duration d = absl::Seconds(20);
Advance(d);
char buf[512];
- Printer out(buf, 512);
+ Printer out(buf, 512);
cache_.Print(&out); // To update the regret
uint64_t expected_regret = absl::ToInt64Nanoseconds(d) * cached.raw_num();
// Not exactly accurate since the mock clock advances with real time, and
@@ -511,7 +511,7 @@ int64_t MinMaxTrackerTest::clock_{0};
TEST_F(MinMaxTrackerTest, Works) {
const absl::Duration kDuration = absl::Seconds(2);
- MinMaxTracker<> tracker{
+ MinMaxTracker<> tracker{
Clock{.now = FakeClock, .freq = GetFakeClockFrequency}, kDuration};
tracker.Report(NHugePages(0));
@@ -559,5 +559,5 @@ TEST_F(MinMaxTrackerTest, Works) {
}
} // namespace
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
diff --git a/contrib/libs/tcmalloc/tcmalloc/huge_page_aware_allocator.cc b/contrib/libs/tcmalloc/tcmalloc/huge_page_aware_allocator.cc
index e662456df6..f9aa10d134 100644
--- a/contrib/libs/tcmalloc/tcmalloc/huge_page_aware_allocator.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/huge_page_aware_allocator.cc
@@ -36,12 +36,12 @@
#include "tcmalloc/static_vars.h"
#include "tcmalloc/stats.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
bool decide_want_hpaa();
-ABSL_ATTRIBUTE_WEAK int default_want_hpaa();
+ABSL_ATTRIBUTE_WEAK int default_want_hpaa();
ABSL_ATTRIBUTE_WEAK int default_subrelease();
bool decide_subrelease() {
@@ -50,34 +50,34 @@ bool decide_subrelease() {
return false;
}
- const char *e = thread_safe_getenv("TCMALLOC_HPAA_CONTROL");
+ const char *e = thread_safe_getenv("TCMALLOC_HPAA_CONTROL");
if (e) {
- switch (e[0]) {
- case '0':
- if (kPageShift <= 12) {
- return false;
- }
-
- if (default_want_hpaa != nullptr) {
- int default_hpaa = default_want_hpaa();
- if (default_hpaa < 0) {
- return false;
- }
- }
-
- Log(kLog, __FILE__, __LINE__,
- "Runtime opt-out from HPAA requires building with "
- "//tcmalloc:want_no_hpaa."
- );
- break;
- case '1':
- return false;
- case '2':
- return true;
- default:
- Crash(kCrash, __FILE__, __LINE__, "bad env var", e);
- return false;
- }
+ switch (e[0]) {
+ case '0':
+ if (kPageShift <= 12) {
+ return false;
+ }
+
+ if (default_want_hpaa != nullptr) {
+ int default_hpaa = default_want_hpaa();
+ if (default_hpaa < 0) {
+ return false;
+ }
+ }
+
+ Log(kLog, __FILE__, __LINE__,
+ "Runtime opt-out from HPAA requires building with "
+ "//tcmalloc:want_no_hpaa."
+ );
+ break;
+ case '1':
+ return false;
+ case '2':
+ return true;
+ default:
+ Crash(kCrash, __FILE__, __LINE__, "bad env var", e);
+ return false;
+ }
}
if (default_subrelease != nullptr) {
@@ -95,7 +95,7 @@ bool decide_subrelease() {
}
FillerPartialRerelease decide_partial_rerelease() {
- const char *e = thread_safe_getenv("TCMALLOC_PARTIAL_RELEASE_CONTROL");
+ const char *e = thread_safe_getenv("TCMALLOC_PARTIAL_RELEASE_CONTROL");
if (e) {
if (e[0] == '0') {
return FillerPartialRerelease::Return;
@@ -126,8 +126,8 @@ HugePageAwareAllocator::HugePageAwareAllocator(MemoryTag tag)
switch (tag) {
case MemoryTag::kNormal:
return AllocAndReport<MemoryTag::kNormal>;
- case MemoryTag::kNormalP1:
- return AllocAndReport<MemoryTag::kNormalP1>;
+ case MemoryTag::kNormalP1:
+ return AllocAndReport<MemoryTag::kNormalP1>;
case MemoryTag::kSampled:
return AllocAndReport<MemoryTag::kSampled>;
default:
@@ -184,7 +184,7 @@ PageId HugePageAwareAllocator::RefillFiller(Length n, bool *from_released) {
Span *HugePageAwareAllocator::Finalize(Length n, PageId page)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(pageheap_lock) {
- ASSERT(page != PageId{0});
+ ASSERT(page != PageId{0});
Span *ret = Span::New(page, n);
Static::pagemap().Set(page, ret);
ASSERT(!ret->sampled());
@@ -196,16 +196,16 @@ Span *HugePageAwareAllocator::Finalize(Length n, PageId page)
// For anything <= half a huge page, we will unconditionally use the filler
// to pack it into a single page. If we need another page, that's fine.
Span *HugePageAwareAllocator::AllocSmall(Length n, bool *from_released) {
- auto [pt, page] = filler_.TryGet(n);
- if (ABSL_PREDICT_TRUE(pt != nullptr)) {
+ auto [pt, page] = filler_.TryGet(n);
+ if (ABSL_PREDICT_TRUE(pt != nullptr)) {
*from_released = false;
return Finalize(n, page);
}
page = RefillFiller(n, from_released);
- if (ABSL_PREDICT_FALSE(page == PageId{0})) {
- return nullptr;
- }
+ if (ABSL_PREDICT_FALSE(page == PageId{0})) {
+ return nullptr;
+ }
return Finalize(n, page);
}
@@ -219,8 +219,8 @@ Span *HugePageAwareAllocator::AllocLarge(Length n, bool *from_released) {
PageId page;
// If we fit in a single hugepage, try the Filler first.
if (n < kPagesPerHugePage) {
- auto [pt, page] = filler_.TryGet(n);
- if (ABSL_PREDICT_TRUE(pt != nullptr)) {
+ auto [pt, page] = filler_.TryGet(n);
+ if (ABSL_PREDICT_TRUE(pt != nullptr)) {
*from_released = false;
return Finalize(n, page);
}
@@ -307,11 +307,11 @@ Span *HugePageAwareAllocator::New(Length n) {
CHECK_CONDITION(n > Length(0));
bool from_released;
Span *s = LockAndAlloc(n, &from_released);
- if (s) {
- // Prefetch for writing, as we anticipate using the memory soon.
- __builtin_prefetch(s->start_address(), 1, 3);
- if (from_released) BackSpan(s);
- }
+ if (s) {
+ // Prefetch for writing, as we anticipate using the memory soon.
+ __builtin_prefetch(s->start_address(), 1, 3);
+ if (from_released) BackSpan(s);
+ }
ASSERT(!s || GetMemoryTag(s->start_address()) == tag_);
return s;
}
@@ -326,7 +326,7 @@ Span *HugePageAwareAllocator::LockAndAlloc(Length n, bool *from_released) {
// For anything too big for the filler, we use either a direct hugepage
// allocation, or possibly the regions if we are worried about slack.
- if (n <= HugeRegion::size().in_pages()) {
+ if (n <= HugeRegion::size().in_pages()) {
return AllocLarge(n, from_released);
}
@@ -357,7 +357,7 @@ Span *HugePageAwareAllocator::NewAligned(Length n, Length align) {
void HugePageAwareAllocator::DeleteFromHugepage(FillerType::Tracker *pt,
PageId p, Length n) {
- if (ABSL_PREDICT_TRUE(filler_.Put(pt, p, n) == nullptr)) return;
+ if (ABSL_PREDICT_TRUE(filler_.Put(pt, p, n) == nullptr)) return;
if (pt->donated()) {
--donated_huge_pages_;
}
@@ -365,10 +365,10 @@ void HugePageAwareAllocator::DeleteFromHugepage(FillerType::Tracker *pt,
}
bool HugePageAwareAllocator::AddRegion() {
- HugeRange r = alloc_.Get(HugeRegion::size());
+ HugeRange r = alloc_.Get(HugeRegion::size());
if (!r.valid()) return false;
- HugeRegion *region = region_allocator_.New();
- new (region) HugeRegion(r, SystemRelease);
+ HugeRegion *region = region_allocator_.New();
+ new (region) HugeRegion(r, SystemRelease);
regions_.Contribute(region);
return true;
}
@@ -387,7 +387,7 @@ void HugePageAwareAllocator::Delete(Span *span) {
FillerType::Tracker *pt = GetTracker(hp);
// a) We got packed by the filler onto a single hugepage - return our
// allocation to that hugepage in the filler.
- if (ABSL_PREDICT_TRUE(pt != nullptr)) {
+ if (ABSL_PREDICT_TRUE(pt != nullptr)) {
ASSERT(hp == HugePageContaining(p + n - Length(1)));
DeleteFromHugepage(pt, p, n);
return;
@@ -522,7 +522,7 @@ static double BytesToMiB(size_t bytes) {
return bytes / MiB;
}
-static void BreakdownStats(Printer *out, const BackingStats &s,
+static void BreakdownStats(Printer *out, const BackingStats &s,
const char *label) {
out->printf("%s %6.1f MiB used, %6.1f MiB free, %6.1f MiB unmapped\n", label,
BytesToMiB(s.system_bytes - s.free_bytes - s.unmapped_bytes),
@@ -538,9 +538,9 @@ static void BreakdownStatsInPbtxt(PbtxtRegion *hpaa, const BackingStats &s,
}
// public
-void HugePageAwareAllocator::Print(Printer *out) { Print(out, true); }
+void HugePageAwareAllocator::Print(Printer *out) { Print(out, true); }
-void HugePageAwareAllocator::Print(Printer *out, bool everything) {
+void HugePageAwareAllocator::Print(Printer *out, bool everything) {
SmallSpanStats small;
LargeSpanStats large;
BackingStats bstats;
@@ -671,6 +671,6 @@ void HugePageAwareAllocator::UnbackWithoutLock(void *start, size_t length) {
pageheap_lock.Lock();
}
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
diff --git a/contrib/libs/tcmalloc/tcmalloc/huge_page_aware_allocator.h b/contrib/libs/tcmalloc/tcmalloc/huge_page_aware_allocator.h
index c36a1e515e..ee0d0c93a5 100644
--- a/contrib/libs/tcmalloc/tcmalloc/huge_page_aware_allocator.h
+++ b/contrib/libs/tcmalloc/tcmalloc/huge_page_aware_allocator.h
@@ -25,7 +25,7 @@
#include "tcmalloc/huge_page_filler.h"
#include "tcmalloc/huge_pages.h"
#include "tcmalloc/huge_region.h"
-#include "tcmalloc/internal/config.h"
+#include "tcmalloc/internal/config.h"
#include "tcmalloc/internal/logging.h"
#include "tcmalloc/page_allocator_interface.h"
#include "tcmalloc/page_heap_allocator.h"
@@ -33,9 +33,9 @@
#include "tcmalloc/stats.h"
#include "tcmalloc/system-alloc.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
bool decide_subrelease();
@@ -45,7 +45,7 @@ bool decide_subrelease();
class HugePageAwareAllocator final : public PageAllocatorInterface {
public:
explicit HugePageAwareAllocator(MemoryTag tag);
- ~HugePageAwareAllocator() override = default;
+ ~HugePageAwareAllocator() override = default;
// Allocate a run of "n" pages. Returns zero if out of memory.
// Caller should not pass "n == 0" -- instead, n should have
@@ -84,11 +84,11 @@ class HugePageAwareAllocator final : public PageAllocatorInterface {
ABSL_EXCLUSIVE_LOCKS_REQUIRED(pageheap_lock);
// Prints stats about the page heap to *out.
- void Print(Printer* out) ABSL_LOCKS_EXCLUDED(pageheap_lock) override;
+ void Print(Printer* out) ABSL_LOCKS_EXCLUDED(pageheap_lock) override;
// Print stats to *out, excluding long/likely uninteresting things
// unless <everything> is true.
- void Print(Printer* out, bool everything) ABSL_LOCKS_EXCLUDED(pageheap_lock);
+ void Print(Printer* out, bool everything) ABSL_LOCKS_EXCLUDED(pageheap_lock);
void PrintInPbtxt(PbtxtRegion* region)
ABSL_LOCKS_EXCLUDED(pageheap_lock) override;
@@ -108,10 +108,10 @@ class HugePageAwareAllocator final : public PageAllocatorInterface {
static void UnbackWithoutLock(void* start, size_t length)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(pageheap_lock);
- HugeRegionSet<HugeRegion> regions_;
+ HugeRegionSet<HugeRegion> regions_;
PageHeapAllocator<FillerType::Tracker> tracker_allocator_;
- PageHeapAllocator<HugeRegion> region_allocator_;
+ PageHeapAllocator<HugeRegion> region_allocator_;
FillerType::Tracker* GetTracker(HugePage p);
@@ -168,8 +168,8 @@ class HugePageAwareAllocator final : public PageAllocatorInterface {
Span* Finalize(Length n, PageId page);
};
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
#endif // TCMALLOC_HUGE_PAGE_AWARE_ALLOCATOR_H_
diff --git a/contrib/libs/tcmalloc/tcmalloc/huge_page_aware_allocator_test.cc b/contrib/libs/tcmalloc/tcmalloc/huge_page_aware_allocator_test.cc
index 83ae930e44..90e179c939 100644
--- a/contrib/libs/tcmalloc/tcmalloc/huge_page_aware_allocator_test.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/huge_page_aware_allocator_test.cc
@@ -44,7 +44,7 @@
#include "absl/synchronization/barrier.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/time.h"
-#include "benchmark/benchmark.h"
+#include "benchmark/benchmark.h"
#include "tcmalloc/common.h"
#include "tcmalloc/huge_pages.h"
#include "tcmalloc/internal/logging.h"
@@ -62,7 +62,7 @@ ABSL_FLAG(uint64_t, limit, 0, "");
ABSL_FLAG(bool, always_check_usage, false, "enable expensive memory checks");
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
namespace {
using testing::HasSubstr;
@@ -181,7 +181,7 @@ class HugePageAwareAllocatorTest : public ::testing::Test {
std::string ret;
const size_t kSize = 1 << 20;
ret.resize(kSize);
- Printer p(&ret[0], kSize);
+ Printer p(&ret[0], kSize);
allocator_->Print(&p);
ret.erase(p.SpaceRequired());
return ret;
@@ -191,7 +191,7 @@ class HugePageAwareAllocatorTest : public ::testing::Test {
std::string ret;
const size_t kSize = 1 << 20;
ret.resize(kSize);
- Printer p(&ret[0], kSize);
+ Printer p(&ret[0], kSize);
{
PbtxtRegion region(&p, kNested, 0);
allocator_->PrintInPbtxt(&region);
@@ -473,7 +473,7 @@ TEST_F(HugePageAwareAllocatorTest, LargeSmall) {
constexpr size_t kBufferSize = 1024 * 1024;
char buffer[kBufferSize];
- Printer printer(buffer, kBufferSize);
+ Printer printer(buffer, kBufferSize);
allocator_->Print(&printer);
// Verify that we have less free memory than we allocated in total. We have
// to account for bytes tied up in the cache.
@@ -953,5 +953,5 @@ TEST_F(HugePageAwareAllocatorTest, ParallelRelease) {
}
} // namespace
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
diff --git a/contrib/libs/tcmalloc/tcmalloc/huge_page_filler.h b/contrib/libs/tcmalloc/tcmalloc/huge_page_filler.h
index 2f72b43881..8a35158298 100644
--- a/contrib/libs/tcmalloc/tcmalloc/huge_page_filler.h
+++ b/contrib/libs/tcmalloc/tcmalloc/huge_page_filler.h
@@ -24,26 +24,26 @@
#include "absl/algorithm/container.h"
#include "absl/base/internal/cycleclock.h"
#include "absl/time/time.h"
-#include "tcmalloc/common.h"
+#include "tcmalloc/common.h"
#include "tcmalloc/huge_allocator.h"
#include "tcmalloc/huge_cache.h"
#include "tcmalloc/huge_pages.h"
#include "tcmalloc/internal/linked_list.h"
-#include "tcmalloc/internal/optimization.h"
+#include "tcmalloc/internal/optimization.h"
#include "tcmalloc/internal/range_tracker.h"
#include "tcmalloc/internal/timeseries_tracker.h"
#include "tcmalloc/span.h"
#include "tcmalloc/stats.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
-
-// This and the following classes implement the adaptive hugepage subrelease
-// mechanism and realized fragmentation metric described in "Adaptive Hugepage
-// Subrelease for Non-moving Memory Allocators in Warehouse-Scale Computers"
-// (ISMM 2021).
+namespace tcmalloc_internal {
+// This and the following classes implement the adaptive hugepage subrelease
+// mechanism and realized fragmentation metric described in "Adaptive Hugepage
+// Subrelease for Non-moving Memory Allocators in Warehouse-Scale Computers"
+// (ISMM 2021).
+
// Tracks correctness of skipped subrelease decisions over time.
template <size_t kEpochs = 16>
class SkippedSubreleaseCorrectnessTracker {
@@ -284,7 +284,7 @@ class FillerStatsTracker {
}
}
- void Print(Printer* out) const;
+ void Print(Printer* out) const;
void PrintInPbtxt(PbtxtRegion* hpaa) const;
// Calculates recent peaks for skipping subrelease decisions. If our allocated
@@ -457,15 +457,15 @@ inline double safe_div(Length a, Length b) {
}
template <size_t kEpochs>
-void FillerStatsTracker<kEpochs>::Print(Printer* out) const {
+void FillerStatsTracker<kEpochs>::Print(Printer* out) const {
NumberOfFreePages free_pages = min_free_pages(summary_interval_);
out->printf("HugePageFiller: time series over %d min interval\n\n",
absl::ToInt64Minutes(summary_interval_));
-
- // Realized fragmentation is equivalent to backed minimum free pages over a
- // 5-min interval. It is printed for convenience but not included in pbtxt.
- out->printf("HugePageFiller: realized fragmentation: %.1f MiB\n",
- free_pages.free_backed.in_mib());
+
+ // Realized fragmentation is equivalent to backed minimum free pages over a
+ // 5-min interval. It is printed for convenience but not included in pbtxt.
+ out->printf("HugePageFiller: realized fragmentation: %.1f MiB\n",
+ free_pages.free_backed.in_mib());
out->printf("HugePageFiller: minimum free pages: %zu (%zu backed)\n",
free_pages.free.raw_num(), free_pages.free_backed.raw_num());
@@ -632,56 +632,56 @@ class PageTracker : public TList<PageTracker<Unback>>::Elem {
public:
static void UnbackImpl(void* p, size_t size) { Unback(p, size); }
- constexpr PageTracker(HugePage p, uint64_t when)
+ constexpr PageTracker(HugePage p, uint64_t when)
: location_(p),
released_count_(0),
donated_(false),
- unbroken_(true),
- free_{} {
- init_when(when);
-
-#ifndef __ppc64__
-#if defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Winvalid-offsetof"
-#endif
- // Verify fields are structured so commonly accessed members (as part of
- // Put) are on the first two cache lines. This allows the CentralFreeList
- // to accelerate deallocations by prefetching PageTracker instances before
- // taking the pageheap_lock.
- //
- // On PPC64, kHugePageSize / kPageSize is typically ~2K (16MB / 8KB),
- // requiring 512 bytes for representing free_. While its cache line size is
- // larger, the entirety of free_ will not fit on two cache lines.
- static_assert(
- offsetof(PageTracker<Unback>, location_) + sizeof(location_) <=
- 2 * ABSL_CACHELINE_SIZE,
- "location_ should fall within the first two cachelines of "
- "PageTracker.");
- static_assert(offsetof(PageTracker<Unback>, when_numerator_) +
- sizeof(when_numerator_) <=
- 2 * ABSL_CACHELINE_SIZE,
- "when_numerator_ should fall within the first two cachelines "
- "of PageTracker.");
- static_assert(offsetof(PageTracker<Unback>, when_denominator_) +
- sizeof(when_denominator_) <=
- 2 * ABSL_CACHELINE_SIZE,
- "when_denominator_ should fall within the first two "
- "cachelines of PageTracker.");
- static_assert(
- offsetof(PageTracker<Unback>, donated_) + sizeof(donated_) <=
- 2 * ABSL_CACHELINE_SIZE,
- "donated_ should fall within the first two cachelines of PageTracker.");
- static_assert(
- offsetof(PageTracker<Unback>, free_) + sizeof(free_) <=
- 2 * ABSL_CACHELINE_SIZE,
- "free_ should fall within the first two cachelines of PageTracker.");
-#if defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif
-#endif // __ppc64__
- }
-
+ unbroken_(true),
+ free_{} {
+ init_when(when);
+
+#ifndef __ppc64__
+#if defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Winvalid-offsetof"
+#endif
+ // Verify fields are structured so commonly accessed members (as part of
+ // Put) are on the first two cache lines. This allows the CentralFreeList
+ // to accelerate deallocations by prefetching PageTracker instances before
+ // taking the pageheap_lock.
+ //
+ // On PPC64, kHugePageSize / kPageSize is typically ~2K (16MB / 8KB),
+ // requiring 512 bytes for representing free_. While its cache line size is
+ // larger, the entirety of free_ will not fit on two cache lines.
+ static_assert(
+ offsetof(PageTracker<Unback>, location_) + sizeof(location_) <=
+ 2 * ABSL_CACHELINE_SIZE,
+ "location_ should fall within the first two cachelines of "
+ "PageTracker.");
+ static_assert(offsetof(PageTracker<Unback>, when_numerator_) +
+ sizeof(when_numerator_) <=
+ 2 * ABSL_CACHELINE_SIZE,
+ "when_numerator_ should fall within the first two cachelines "
+ "of PageTracker.");
+ static_assert(offsetof(PageTracker<Unback>, when_denominator_) +
+ sizeof(when_denominator_) <=
+ 2 * ABSL_CACHELINE_SIZE,
+ "when_denominator_ should fall within the first two "
+ "cachelines of PageTracker.");
+ static_assert(
+ offsetof(PageTracker<Unback>, donated_) + sizeof(donated_) <=
+ 2 * ABSL_CACHELINE_SIZE,
+ "donated_ should fall within the first two cachelines of PageTracker.");
+ static_assert(
+ offsetof(PageTracker<Unback>, free_) + sizeof(free_) <=
+ 2 * ABSL_CACHELINE_SIZE,
+ "free_ should fall within the first two cachelines of PageTracker.");
+#if defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+#endif // __ppc64__
+ }
+
struct PageAllocation {
PageId page;
Length previously_unbacked;
@@ -754,26 +754,26 @@ class PageTracker : public TList<PageTracker<Unback>>::Elem {
PageAgeHistograms* ages) const;
private:
- void init_when(uint64_t w) {
- const Length before = Length(free_.total_free());
- when_numerator_ = w * before.raw_num();
- when_denominator_ = before.raw_num();
- }
-
+ void init_when(uint64_t w) {
+ const Length before = Length(free_.total_free());
+ when_numerator_ = w * before.raw_num();
+ when_denominator_ = before.raw_num();
+ }
+
HugePage location_;
- // We keep track of an average time weighted by Length::raw_num. In order to
- // avoid doing division on fast path, store the numerator and denominator and
- // only do the division when we need the average.
- uint64_t when_numerator_;
- uint64_t when_denominator_;
-
- // Cached value of released_by_page_.CountBits(0, kPagesPerHugePages)
- //
- // TODO(b/151663108): Logically, this is guarded by pageheap_lock.
- uint16_t released_count_;
- bool donated_;
- bool unbroken_;
-
+ // We keep track of an average time weighted by Length::raw_num. In order to
+ // avoid doing division on fast path, store the numerator and denominator and
+ // only do the division when we need the average.
+ uint64_t when_numerator_;
+ uint64_t when_denominator_;
+
+ // Cached value of released_by_page_.CountBits(0, kPagesPerHugePages)
+ //
+ // TODO(b/151663108): Logically, this is guarded by pageheap_lock.
+ uint16_t released_count_;
+ bool donated_;
+ bool unbroken_;
+
RangeTracker<kPagesPerHugePage.raw_num()> free_;
// Bitmap of pages based on them being released to the OS.
// * Not yet released pages are unset (considered "free")
@@ -837,19 +837,19 @@ class HugePageFiller {
typedef TrackerType Tracker;
- struct TryGetResult {
- TrackerType* pt;
- PageId page;
- };
-
- // Our API is simple, but note that it does not include an unconditional
- // allocation, only a "try"; we expect callers to allocate new hugepages if
- // needed. This simplifies using it in a few different contexts (and improves
- // the testing story - no dependencies.)
- //
- // On failure, returns nullptr/PageId{0}.
- TryGetResult TryGet(Length n) ABSL_EXCLUSIVE_LOCKS_REQUIRED(pageheap_lock);
-
+ struct TryGetResult {
+ TrackerType* pt;
+ PageId page;
+ };
+
+ // Our API is simple, but note that it does not include an unconditional
+ // allocation, only a "try"; we expect callers to allocate new hugepages if
+ // needed. This simplifies using it in a few different contexts (and improves
+ // the testing story - no dependencies.)
+ //
+ // On failure, returns nullptr/PageId{0}.
+ TryGetResult TryGet(Length n) ABSL_EXCLUSIVE_LOCKS_REQUIRED(pageheap_lock);
+
// Marks [p, p + n) as usable by new allocations into *pt; returns pt
// if that hugepage is now empty (nullptr otherwise.)
// REQUIRES: pt is owned by this object (has been Contribute()), and
@@ -903,7 +903,7 @@ class HugePageFiller {
BackingStats stats() const;
SubreleaseStats subrelease_stats() const { return subrelease_stats_; }
- void Print(Printer* out, bool everything) const;
+ void Print(Printer* out, bool everything) const;
void PrintInPbtxt(PbtxtRegion* hpaa) const;
private:
@@ -1070,18 +1070,18 @@ inline typename PageTracker<Unback>::PageAllocation PageTracker<Unback>::Get(
ASSERT(released_by_page_.CountBits(0, kPagesPerHugePage.raw_num()) ==
released_count_);
- size_t unbacked = 0;
- // If release_count_ == 0, CountBits will return 0 and ClearRange will be a
- // no-op (but will touch cachelines) due to the invariants guaranteed by
- // CountBits() == released_count_.
- //
- // This is a performance optimization, not a logical requirement.
- if (ABSL_PREDICT_FALSE(released_count_ > 0)) {
- unbacked = released_by_page_.CountBits(index, n.raw_num());
- released_by_page_.ClearRange(index, n.raw_num());
- ASSERT(released_count_ >= unbacked);
- released_count_ -= unbacked;
- }
+ size_t unbacked = 0;
+ // If release_count_ == 0, CountBits will return 0 and ClearRange will be a
+ // no-op (but will touch cachelines) due to the invariants guaranteed by
+ // CountBits() == released_count_.
+ //
+ // This is a performance optimization, not a logical requirement.
+ if (ABSL_PREDICT_FALSE(released_count_ > 0)) {
+ unbacked = released_by_page_.CountBits(index, n.raw_num());
+ released_by_page_.ClearRange(index, n.raw_num());
+ ASSERT(released_count_ >= unbacked);
+ released_count_ -= unbacked;
+ }
ASSERT(released_by_page_.CountBits(0, kPagesPerHugePage.raw_num()) ==
released_count_);
@@ -1094,8 +1094,8 @@ inline void PageTracker<Unback>::Put(PageId p, Length n) {
Length index = p - location_.first_page();
free_.Unmark(index.raw_num(), n.raw_num());
- when_numerator_ += n.raw_num() * absl::base_internal::CycleClock::Now();
- when_denominator_ += n.raw_num();
+ when_numerator_ += n.raw_num() * absl::base_internal::CycleClock::Now();
+ when_denominator_ += n.raw_num();
}
template <MemoryModifyFunction Unback>
@@ -1145,7 +1145,7 @@ inline Length PageTracker<Unback>::ReleaseFree() {
ASSERT(Length(released_count_) <= kPagesPerHugePage);
ASSERT(released_by_page_.CountBits(0, kPagesPerHugePage.raw_num()) ==
released_count_);
- init_when(absl::base_internal::CycleClock::Now());
+ init_when(absl::base_internal::CycleClock::Now());
return Length(count);
}
@@ -1155,8 +1155,8 @@ inline void PageTracker<Unback>::AddSpanStats(SmallSpanStats* small,
PageAgeHistograms* ages) const {
size_t index = 0, n;
- uint64_t w = when_denominator_ == 0 ? when_numerator_
- : when_numerator_ / when_denominator_;
+ uint64_t w = when_denominator_ == 0 ? when_numerator_
+ : when_numerator_ / when_denominator_;
while (free_.NextFreeRange(index, &index, &n)) {
bool is_released = released_by_page_.GetBit(index);
// Find the last bit in the run with the same state (set or cleared) as
@@ -1224,8 +1224,8 @@ inline HugePageFiller<TrackerType>::HugePageFiller(
fillerstats_tracker_(clock, absl::Minutes(10), absl::Minutes(5)) {}
template <class TrackerType>
-inline typename HugePageFiller<TrackerType>::TryGetResult
-HugePageFiller<TrackerType>::TryGet(Length n) {
+inline typename HugePageFiller<TrackerType>::TryGetResult
+HugePageFiller<TrackerType>::TryGet(Length n) {
ASSERT(n > Length(0));
// How do we choose which hugepage to allocate from (among those with
@@ -1292,7 +1292,7 @@ HugePageFiller<TrackerType>::TryGet(Length n) {
// So all we have to do is find the first nonempty freelist in the regular
// HintedTrackerList that *could* support our allocation, and it will be our
// best choice. If there is none we repeat with the donated HintedTrackerList.
- ASSUME(n < kPagesPerHugePage);
+ ASSUME(n < kPagesPerHugePage);
TrackerType* pt;
bool was_released = false;
@@ -1325,11 +1325,11 @@ HugePageFiller<TrackerType>::TryGet(Length n) {
break;
}
- return {nullptr, PageId{0}};
+ return {nullptr, PageId{0}};
} while (false);
- ASSUME(pt != nullptr);
+ ASSUME(pt != nullptr);
ASSERT(pt->longest_free_range() >= n);
- const auto page_allocation = pt->Get(n);
+ const auto page_allocation = pt->Get(n);
AddToFillerList(pt);
allocated_ += n;
@@ -1341,7 +1341,7 @@ HugePageFiller<TrackerType>::TryGet(Length n) {
// donated by this point.
ASSERT(!pt->donated());
UpdateFillerStatsTracker();
- return {pt, page_allocation.page};
+ return {pt, page_allocation.page};
}
// Marks [p, p + n) as usable by new allocations into *pt; returns pt
@@ -1668,7 +1668,7 @@ inline BackingStats HugePageFiller<TrackerType>::stats() const {
return s;
}
-namespace huge_page_filler_internal {
+namespace huge_page_filler_internal {
// Computes some histograms of fullness. Because nearly empty/full huge pages
// are much more interesting, we calculate 4 buckets at each of the beginning
// and end of size one, and then divide the overall space by 16 to have 16
@@ -1719,7 +1719,7 @@ class UsageInfo {
nalloc_histo_[which][BucketNum(nalloc - 1)]++;
}
- void Print(Printer* out) {
+ void Print(Printer* out) {
PrintHisto(out, free_page_histo_[kRegular],
"# of regular hps with a<= # of free pages <b", 0);
PrintHisto(out, free_page_histo_[kDonated],
@@ -1769,7 +1769,7 @@ class UsageInfo {
return it - bucket_bounds_ - 1;
}
- void PrintHisto(Printer* out, Histo h, const char blurb[], size_t offset) {
+ void PrintHisto(Printer* out, Histo h, const char blurb[], size_t offset) {
out->printf("\nHugePageFiller: %s", blurb);
for (size_t i = 0; i < buckets_size_; ++i) {
if (i % 6 == 0) {
@@ -1799,10 +1799,10 @@ class UsageInfo {
size_t bucket_bounds_[kBucketCapacity];
int buckets_size_ = 0;
};
-} // namespace huge_page_filler_internal
+} // namespace huge_page_filler_internal
template <class TrackerType>
-inline void HugePageFiller<TrackerType>::Print(Printer* out,
+inline void HugePageFiller<TrackerType>::Print(Printer* out,
bool everything) const {
out->printf("HugePageFiller: densely pack small requests into hugepages\n");
@@ -1864,7 +1864,7 @@ inline void HugePageFiller<TrackerType>::Print(Printer* out,
if (!everything) return;
// Compute some histograms of fullness.
- using huge_page_filler_internal::UsageInfo;
+ using huge_page_filler_internal::UsageInfo;
UsageInfo usage;
regular_alloc_.Iter(
[&](const TrackerType* pt) { usage.Record(pt, UsageInfo::kRegular); }, 0);
@@ -1942,7 +1942,7 @@ inline void HugePageFiller<TrackerType>::PrintInPbtxt(PbtxtRegion* hpaa) const {
"filler_num_hugepages_broken_due_to_limit",
subrelease_stats_.total_hugepages_broken_due_to_limit.raw_num());
// Compute some histograms of fullness.
- using huge_page_filler_internal::UsageInfo;
+ using huge_page_filler_internal::UsageInfo;
UsageInfo usage;
regular_alloc_.Iter(
[&](const TrackerType* pt) { usage.Record(pt, UsageInfo::kRegular); }, 0);
@@ -2106,8 +2106,8 @@ inline Length HugePageFiller<TrackerType>::free_pages() const {
return size().in_pages() - used_pages() - unmapped_pages();
}
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
#endif // TCMALLOC_HUGE_PAGE_FILLER_H_
diff --git a/contrib/libs/tcmalloc/tcmalloc/huge_page_filler_test.cc b/contrib/libs/tcmalloc/tcmalloc/huge_page_filler_test.cc
index 9879d41d79..7695f0d140 100644
--- a/contrib/libs/tcmalloc/tcmalloc/huge_page_filler_test.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/huge_page_filler_test.cc
@@ -45,21 +45,21 @@
#include "absl/synchronization/mutex.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
-#include "benchmark/benchmark.h"
+#include "benchmark/benchmark.h"
#include "tcmalloc/common.h"
#include "tcmalloc/huge_pages.h"
#include "tcmalloc/internal/logging.h"
#include "tcmalloc/pages.h"
#include "tcmalloc/stats.h"
-using tcmalloc::tcmalloc_internal::Length;
-
-ABSL_FLAG(Length, page_tracker_defrag_lim, Length(32),
+using tcmalloc::tcmalloc_internal::Length;
+
+ABSL_FLAG(Length, page_tracker_defrag_lim, Length(32),
"Max allocation size for defrag test");
-ABSL_FLAG(Length, frag_req_limit, Length(32),
+ABSL_FLAG(Length, frag_req_limit, Length(32),
"request size limit for frag test");
-ABSL_FLAG(Length, frag_size, Length(512 * 1024),
+ABSL_FLAG(Length, frag_size, Length(512 * 1024),
"target number of pages for frag test");
ABSL_FLAG(uint64_t, frag_iters, 10 * 1000 * 1000, "iterations for frag test");
@@ -69,7 +69,7 @@ ABSL_FLAG(uint64_t, bytes, 1024 * 1024 * 1024, "baseline usage");
ABSL_FLAG(double, growth_factor, 2.0, "growth over baseline");
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
namespace {
// This is an arbitrary distribution taken from page requests from
@@ -719,15 +719,15 @@ class FillerTest : public testing::TestWithParam<FillerPartialRerelease> {
EXPECT_LT(n, kPagesPerHugePage);
PAlloc ret;
ret.n = n;
- ret.pt = nullptr;
+ ret.pt = nullptr;
ret.mark = ++next_mark_;
if (!donated) { // Donated means always create a new hugepage
absl::base_internal::SpinLockHolder l(&pageheap_lock);
- auto [pt, page] = filler_.TryGet(n);
- ret.pt = pt;
- ret.p = page;
+ auto [pt, page] = filler_.TryGet(n);
+ ret.pt = pt;
+ ret.p = page;
}
- if (ret.pt == nullptr) {
+ if (ret.pt == nullptr) {
ret.pt =
new FakeTracker(GetBacking(), absl::base_internal::CycleClock::Now());
{
@@ -940,7 +940,7 @@ TEST_P(FillerTest, PrintFreeRatio) {
std::string buffer(1024 * 1024, '\0');
{
- Printer printer(&*buffer.begin(), buffer.size());
+ Printer printer(&*buffer.begin(), buffer.size());
filler_.Print(&printer, /*everything=*/true);
buffer.erase(printer.SpaceRequired());
}
@@ -1429,7 +1429,7 @@ TEST_P(FillerTest, SkipSubrelease) {
std::string buffer(1024 * 1024, '\0');
{
- Printer printer(&*buffer.begin(), buffer.size());
+ Printer printer(&*buffer.begin(), buffer.size());
filler_.Print(&printer, true);
}
buffer.resize(strlen(buffer.c_str()));
@@ -1451,7 +1451,7 @@ class FillerStatsTrackerTest : public testing::Test {
protected:
static constexpr absl::Duration kWindow = absl::Minutes(10);
- using StatsTrackerType = FillerStatsTracker<16>;
+ using StatsTrackerType = FillerStatsTracker<16>;
StatsTrackerType tracker_{
Clock{.now = FakeClock, .freq = GetFakeClockFrequency}, kWindow,
absl::Minutes(5)};
@@ -1528,7 +1528,7 @@ TEST_F(FillerStatsTrackerTest, Works) {
// Test text output (time series summary).
{
std::string buffer(1024 * 1024, '\0');
- Printer printer(&*buffer.begin(), buffer.size());
+ Printer printer(&*buffer.begin(), buffer.size());
{
tracker_.Print(&printer);
buffer.erase(printer.SpaceRequired());
@@ -1536,7 +1536,7 @@ TEST_F(FillerStatsTrackerTest, Works) {
EXPECT_THAT(buffer, StrEq(R"(HugePageFiller: time series over 5 min interval
-HugePageFiller: realized fragmentation: 0.8 MiB
+HugePageFiller: realized fragmentation: 0.8 MiB
HugePageFiller: minimum free pages: 110 (100 backed)
HugePageFiller: at peak demand: 208 pages (and 111 free, 10 unmapped)
HugePageFiller: at peak demand: 26 hps (14 regular, 10 donated, 1 partial, 1 released)
@@ -1552,7 +1552,7 @@ HugePageFiller: Subrelease stats last 10 min: total 0 pages subreleased, 0 hugep
// Test pbtxt output (full time series).
{
std::string buffer(1024 * 1024, '\0');
- Printer printer(&*buffer.begin(), buffer.size());
+ Printer printer(&*buffer.begin(), buffer.size());
{
PbtxtRegion region(&printer, kTop, /*indent=*/0);
tracker_.PrintInPbtxt(&region);
@@ -1870,7 +1870,7 @@ TEST_P(FillerTest, Print) {
std::string buffer(1024 * 1024, '\0');
{
- Printer printer(&*buffer.begin(), buffer.size());
+ Printer printer(&*buffer.begin(), buffer.size());
filler_.Print(&printer, /*everything=*/true);
buffer.erase(printer.SpaceRequired());
}
@@ -1950,7 +1950,7 @@ HugePageFiller: <225<= 0 <241<= 0 <253<= 0 <254<= 0 <255<= 0
HugePageFiller: time series over 5 min interval
-HugePageFiller: realized fragmentation: 0.0 MiB
+HugePageFiller: realized fragmentation: 0.0 MiB
HugePageFiller: minimum free pages: 0 (0 backed)
HugePageFiller: at peak demand: 1774 pages (and 261 free, 13 unmapped)
HugePageFiller: at peak demand: 8 hps (5 regular, 1 donated, 0 partial, 2 released)
@@ -1977,7 +1977,7 @@ TEST_P(FillerTest, PrintInPbtxt) {
auto allocs = GenerateInterestingAllocs();
std::string buffer(1024 * 1024, '\0');
- Printer printer(&*buffer.begin(), buffer.size());
+ Printer printer(&*buffer.begin(), buffer.size());
{
PbtxtRegion region(&printer, kTop, /*indent=*/0);
filler_.PrintInPbtxt(&region);
@@ -3577,7 +3577,7 @@ TEST_P(FillerTest, CheckSubreleaseStats) {
std::string buffer(1024 * 1024, '\0');
{
- Printer printer(&*buffer.begin(), buffer.size());
+ Printer printer(&*buffer.begin(), buffer.size());
filler_.Print(&printer, /*everything=*/true);
buffer.erase(printer.SpaceRequired());
}
@@ -3629,7 +3629,7 @@ TEST_P(FillerTest, ConstantBrokenHugePages) {
std::string buffer(1024 * 1024, '\0');
{
- Printer printer(&*buffer.begin(), buffer.size());
+ Printer printer(&*buffer.begin(), buffer.size());
filler_.Print(&printer, /*everything=*/false);
buffer.erase(printer.SpaceRequired());
}
@@ -3677,7 +3677,7 @@ TEST_P(FillerTest, CheckBufferSize) {
Delete(big);
std::string buffer(1024 * 1024, '\0');
- Printer printer(&*buffer.begin(), buffer.size());
+ Printer printer(&*buffer.begin(), buffer.size());
{
PbtxtRegion region(&printer, kTop, /*indent=*/0);
filler_.PrintInPbtxt(&region);
@@ -3795,5 +3795,5 @@ INSTANTIATE_TEST_SUITE_P(All, FillerTest,
FillerPartialRerelease::Retain));
} // namespace
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
diff --git a/contrib/libs/tcmalloc/tcmalloc/huge_pages.h b/contrib/libs/tcmalloc/tcmalloc/huge_pages.h
index 4498994f75..e58cb2df5e 100644
--- a/contrib/libs/tcmalloc/tcmalloc/huge_pages.h
+++ b/contrib/libs/tcmalloc/tcmalloc/huge_pages.h
@@ -28,12 +28,12 @@
#include "tcmalloc/common.h"
#include "tcmalloc/internal/logging.h"
-#include "tcmalloc/internal/optimization.h"
+#include "tcmalloc/internal/optimization.h"
#include "tcmalloc/pages.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
inline constexpr Length kPagesPerHugePage =
Length(1 << (kHugePageShift - kPageShift));
@@ -41,23 +41,23 @@ inline constexpr Length kPagesPerHugePage =
// A single aligned huge page.
struct HugePage {
void *start_addr() const {
- ASSERT(pn <= kMaxPageNumber);
+ ASSERT(pn <= kMaxPageNumber);
return reinterpret_cast<void *>(pn << kHugePageShift);
}
- PageId first_page() const {
- ASSERT(pn <= kMaxPageNumber);
- return PageId(pn << (kHugePageShift - kPageShift));
- }
-
- size_t index() const {
- ASSERT(pn <= kMaxPageNumber);
- return pn;
- }
+ PageId first_page() const {
+ ASSERT(pn <= kMaxPageNumber);
+ return PageId(pn << (kHugePageShift - kPageShift));
+ }
- static constexpr uintptr_t kMaxPageNumber =
- std::numeric_limits<uintptr_t>::max() >> kHugePageShift;
+ size_t index() const {
+ ASSERT(pn <= kMaxPageNumber);
+ return pn;
+ }
+ static constexpr uintptr_t kMaxPageNumber =
+ std::numeric_limits<uintptr_t>::max() >> kHugePageShift;
+
uintptr_t pn;
};
@@ -85,16 +85,16 @@ struct HugeLength {
// Literal constructors (made explicit to avoid accidental uses when
// another unit was meant.)
-TCMALLOC_ATTRIBUTE_CONST
+TCMALLOC_ATTRIBUTE_CONST
inline constexpr HugeLength NHugePages(size_t n) { return HugeLength(n); }
-TCMALLOC_ATTRIBUTE_CONST
+TCMALLOC_ATTRIBUTE_CONST
inline constexpr HugeLength HLFromBytes(size_t bytes) {
return NHugePages(bytes / kHugePageSize);
}
// Rounds *up* to the nearest hugepage.
-TCMALLOC_ATTRIBUTE_CONST
+TCMALLOC_ATTRIBUTE_CONST
inline constexpr HugeLength HLFromPages(Length pages) {
return NHugePages((pages + kPagesPerHugePage - Length(1)) /
kPagesPerHugePage);
@@ -106,7 +106,7 @@ inline HugeLength &operator++(HugeLength &len) { // NOLINT(runtime/references)
}
inline HugePage &operator++(HugePage &p) { // NOLINT(runtime/references)
- ASSERT(p.pn + 1 <= HugePage::kMaxPageNumber);
+ ASSERT(p.pn + 1 <= HugePage::kMaxPageNumber);
p.pn++;
return p;
}
@@ -121,72 +121,72 @@ inline constexpr bool operator<(HugeLength lhs, HugeLength rhs) {
return lhs.n < rhs.n;
}
-TCMALLOC_ATTRIBUTE_CONST
+TCMALLOC_ATTRIBUTE_CONST
inline constexpr bool operator>(HugeLength lhs, HugeLength rhs) {
return lhs.n > rhs.n;
}
-TCMALLOC_ATTRIBUTE_CONST
+TCMALLOC_ATTRIBUTE_CONST
inline constexpr bool operator<=(HugeLength lhs, HugeLength rhs) {
return lhs.n <= rhs.n;
}
-TCMALLOC_ATTRIBUTE_CONST
+TCMALLOC_ATTRIBUTE_CONST
inline constexpr bool operator<(HugePage lhs, HugePage rhs) {
return lhs.pn < rhs.pn;
}
-TCMALLOC_ATTRIBUTE_CONST
+TCMALLOC_ATTRIBUTE_CONST
inline constexpr bool operator>(HugePage lhs, HugePage rhs) {
return lhs.pn > rhs.pn;
}
-TCMALLOC_ATTRIBUTE_CONST
+TCMALLOC_ATTRIBUTE_CONST
inline constexpr bool operator>=(HugeLength lhs, HugeLength rhs) {
return lhs.n >= rhs.n;
}
-TCMALLOC_ATTRIBUTE_CONST
+TCMALLOC_ATTRIBUTE_CONST
inline constexpr bool operator<=(HugePage lhs, HugePage rhs) {
return lhs.pn <= rhs.pn;
}
-TCMALLOC_ATTRIBUTE_CONST
+TCMALLOC_ATTRIBUTE_CONST
inline constexpr bool operator>=(HugePage lhs, HugePage rhs) {
return lhs.pn >= rhs.pn;
}
-TCMALLOC_ATTRIBUTE_CONST
+TCMALLOC_ATTRIBUTE_CONST
inline constexpr bool operator==(HugePage lhs, HugePage rhs) {
return lhs.pn == rhs.pn;
}
-TCMALLOC_ATTRIBUTE_CONST
+TCMALLOC_ATTRIBUTE_CONST
inline constexpr bool operator!=(HugePage lhs, HugePage rhs) {
return !(lhs == rhs);
}
-TCMALLOC_ATTRIBUTE_CONST
+TCMALLOC_ATTRIBUTE_CONST
inline constexpr bool operator==(HugeLength lhs, HugeLength rhs) {
return lhs.n == rhs.n;
}
-TCMALLOC_ATTRIBUTE_CONST
+TCMALLOC_ATTRIBUTE_CONST
inline constexpr bool operator!=(HugeLength lhs, HugeLength rhs) {
return lhs.n != rhs.n;
}
-TCMALLOC_ATTRIBUTE_CONST
+TCMALLOC_ATTRIBUTE_CONST
inline constexpr size_t operator/(HugeLength lhs, HugeLength rhs) {
return lhs.n / rhs.n;
}
-TCMALLOC_ATTRIBUTE_CONST
+TCMALLOC_ATTRIBUTE_CONST
inline constexpr HugeLength operator*(HugeLength lhs, size_t rhs) {
return NHugePages(lhs.n * rhs);
}
-TCMALLOC_ATTRIBUTE_CONST
+TCMALLOC_ATTRIBUTE_CONST
inline constexpr HugeLength operator/(HugeLength lhs, size_t rhs) {
return NHugePages(lhs.n / rhs);
}
@@ -196,39 +196,39 @@ inline HugeLength &operator*=(HugeLength &lhs, size_t rhs) {
return lhs;
}
-TCMALLOC_ATTRIBUTE_CONST
+TCMALLOC_ATTRIBUTE_CONST
inline constexpr HugeLength operator%(HugeLength lhs, HugeLength rhs) {
return NHugePages(lhs.n % rhs.n);
}
-TCMALLOC_ATTRIBUTE_CONST
+TCMALLOC_ATTRIBUTE_CONST
inline constexpr HugePage operator+(HugePage lhs, HugeLength rhs) {
- ASSERT(lhs.pn + rhs.n <= HugePage::kMaxPageNumber);
+ ASSERT(lhs.pn + rhs.n <= HugePage::kMaxPageNumber);
return HugePage{lhs.pn + rhs.n};
}
-TCMALLOC_ATTRIBUTE_CONST
+TCMALLOC_ATTRIBUTE_CONST
inline constexpr HugePage operator+(HugeLength lhs, HugePage rhs) {
return rhs + lhs;
}
-TCMALLOC_ATTRIBUTE_CONST
+TCMALLOC_ATTRIBUTE_CONST
inline constexpr HugePage operator-(HugePage lhs, HugeLength rhs) {
return ASSERT(lhs.pn >= rhs.n), HugePage{lhs.pn - rhs.n};
}
-TCMALLOC_ATTRIBUTE_CONST
+TCMALLOC_ATTRIBUTE_CONST
inline constexpr HugeLength operator-(HugePage lhs, HugePage rhs) {
return ASSERT(lhs.pn >= rhs.pn), NHugePages(lhs.pn - rhs.pn);
}
inline HugePage &operator+=(HugePage &lhs, HugeLength rhs) {
- ASSERT(lhs.pn + rhs.n <= HugePage::kMaxPageNumber);
+ ASSERT(lhs.pn + rhs.n <= HugePage::kMaxPageNumber);
lhs.pn += rhs.n;
return lhs;
}
-TCMALLOC_ATTRIBUTE_CONST
+TCMALLOC_ATTRIBUTE_CONST
inline constexpr HugeLength operator+(HugeLength lhs, HugeLength rhs) {
return NHugePages(lhs.n + rhs.n);
}
@@ -238,7 +238,7 @@ inline HugeLength &operator+=(HugeLength &lhs, HugeLength rhs) {
return lhs;
}
-TCMALLOC_ATTRIBUTE_CONST
+TCMALLOC_ATTRIBUTE_CONST
inline constexpr HugeLength operator-(HugeLength lhs, HugeLength rhs) {
return ASSERT(lhs.n >= rhs.n), NHugePages(lhs.n - rhs.n);
}
@@ -257,12 +257,12 @@ inline void PrintTo(const HugeLength &n, ::std::ostream *os) {
*os << n.raw_num() << "hps";
}
-TCMALLOC_ATTRIBUTE_CONST
+TCMALLOC_ATTRIBUTE_CONST
inline HugePage HugePageContaining(PageId p) {
return {p.index() >> (kHugePageShift - kPageShift)};
}
-TCMALLOC_ATTRIBUTE_CONST
+TCMALLOC_ATTRIBUTE_CONST
inline HugePage HugePageContaining(void *p) {
return HugePageContaining(PageIdContaining(p));
}
@@ -337,7 +337,7 @@ inline std::pair<HugeRange, HugeRange> Split(HugeRange r, HugeLength n) {
}
}
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
#endif // TCMALLOC_HUGE_PAGES_H_
diff --git a/contrib/libs/tcmalloc/tcmalloc/huge_region.h b/contrib/libs/tcmalloc/tcmalloc/huge_region.h
index 0262c007b2..4d7195642a 100644
--- a/contrib/libs/tcmalloc/tcmalloc/huge_region.h
+++ b/contrib/libs/tcmalloc/tcmalloc/huge_region.h
@@ -27,9 +27,9 @@
#include "tcmalloc/pages.h"
#include "tcmalloc/stats.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
// Track allocations from a fixed-size multiple huge page region.
// Similar to PageTracker but a few important differences:
@@ -43,16 +43,16 @@ namespace tcmalloc_internal {
// available gaps (1.75 MiB), and lengths that don't fit, but would
// introduce unacceptable fragmentation (2.1 MiB).
//
-class HugeRegion : public TList<HugeRegion>::Elem {
+class HugeRegion : public TList<HugeRegion>::Elem {
public:
// We could template this if there was any need.
- static constexpr HugeLength kRegionSize = HLFromBytes(1024 * 1024 * 1024);
- static constexpr size_t kNumHugePages = kRegionSize.raw_num();
- static constexpr HugeLength size() { return kRegionSize; }
+ static constexpr HugeLength kRegionSize = HLFromBytes(1024 * 1024 * 1024);
+ static constexpr size_t kNumHugePages = kRegionSize.raw_num();
+ static constexpr HugeLength size() { return kRegionSize; }
// REQUIRES: r.len() == size(); r unbacked.
- HugeRegion(HugeRange r, MemoryModifyFunction unback);
- HugeRegion() = delete;
+ HugeRegion(HugeRange r, MemoryModifyFunction unback);
+ HugeRegion() = delete;
// If available, return a range of n free pages, setting *from_released =
// true iff the returned range is currently unbacked.
@@ -82,7 +82,7 @@ class HugeRegion : public TList<HugeRegion>::Elem {
HugeLength backed() const;
- void Print(Printer *out) const;
+ void Print(Printer *out) const;
void PrintInPbtxt(PbtxtRegion *detail) const;
BackingStats stats() const;
@@ -97,7 +97,7 @@ class HugeRegion : public TList<HugeRegion>::Elem {
void append_it(HugeRegion *other) { this->append(other); }
private:
- RangeTracker<kRegionSize.in_pages().raw_num()> tracker_;
+ RangeTracker<kRegionSize.in_pages().raw_num()> tracker_;
HugeRange location_;
@@ -126,8 +126,8 @@ class HugeRegion : public TList<HugeRegion>::Elem {
HugeLength nbacked_;
int64_t whens_[kNumHugePages];
HugeLength total_unbacked_{NHugePages(0)};
-
- MemoryModifyFunction unback_;
+
+ MemoryModifyFunction unback_;
};
// Manage a set of regions from which we allocate.
@@ -152,7 +152,7 @@ class HugeRegionSet {
// we managed to release.
HugeLength Release();
- void Print(Printer *out) const;
+ void Print(Printer *out) const;
void PrintInPbtxt(PbtxtRegion *hpaa) const;
void AddSpanStats(SmallSpanStats *small, LargeSpanStats *large,
PageAgeHistograms *ages) const;
@@ -217,13 +217,13 @@ class HugeRegionSet {
};
// REQUIRES: r.len() == size(); r unbacked.
-inline HugeRegion::HugeRegion(HugeRange r, MemoryModifyFunction unback)
+inline HugeRegion::HugeRegion(HugeRange r, MemoryModifyFunction unback)
: tracker_{},
location_(r),
pages_used_{},
backed_{},
- nbacked_(NHugePages(0)),
- unback_(unback) {
+ nbacked_(NHugePages(0)),
+ unback_(unback) {
int64_t now = absl::base_internal::CycleClock::Now();
for (int i = 0; i < kNumHugePages; ++i) {
whens_[i] = now;
@@ -233,7 +233,7 @@ inline HugeRegion::HugeRegion(HugeRange r, MemoryModifyFunction unback)
}
}
-inline bool HugeRegion::MaybeGet(Length n, PageId *p, bool *from_released) {
+inline bool HugeRegion::MaybeGet(Length n, PageId *p, bool *from_released) {
if (n > longest_free()) return false;
auto index = Length(tracker_.FindAndMark(n.raw_num()));
@@ -246,7 +246,7 @@ inline bool HugeRegion::MaybeGet(Length n, PageId *p, bool *from_released) {
}
// If release=true, release any hugepages made empty as a result.
-inline void HugeRegion::Put(PageId p, Length n, bool release) {
+inline void HugeRegion::Put(PageId p, Length n, bool release) {
Length index = p - location_.start().first_page();
tracker_.Unmark(index.raw_num(), n.raw_num());
@@ -254,7 +254,7 @@ inline void HugeRegion::Put(PageId p, Length n, bool release) {
}
// Release any hugepages that are unused but backed.
-inline HugeLength HugeRegion::Release() {
+inline HugeLength HugeRegion::Release() {
HugeLength r = NHugePages(0);
bool should_unback_[kNumHugePages] = {};
for (size_t i = 0; i < kNumHugePages; ++i) {
@@ -267,9 +267,9 @@ inline HugeLength HugeRegion::Release() {
return r;
}
-inline void HugeRegion::AddSpanStats(SmallSpanStats *small,
- LargeSpanStats *large,
- PageAgeHistograms *ages) const {
+inline void HugeRegion::AddSpanStats(SmallSpanStats *small,
+ LargeSpanStats *large,
+ PageAgeHistograms *ages) const {
size_t index = 0, n;
Length f, u;
// This is complicated a bit by the backed/unbacked status of pages.
@@ -329,7 +329,7 @@ inline void HugeRegion::AddSpanStats(SmallSpanStats *small,
CHECK_CONDITION(u == unmapped_pages());
}
-inline HugeLength HugeRegion::backed() const {
+inline HugeLength HugeRegion::backed() const {
HugeLength b;
for (int i = 0; i < kNumHugePages; ++i) {
if (backed_[i]) {
@@ -340,7 +340,7 @@ inline HugeLength HugeRegion::backed() const {
return b;
}
-inline void HugeRegion::Print(Printer *out) const {
+inline void HugeRegion::Print(Printer *out) const {
const size_t kib_used = used_pages().in_bytes() / 1024;
const size_t kib_free = free_pages().in_bytes() / 1024;
const size_t kib_longest_free = longest_free().in_bytes() / 1024;
@@ -354,7 +354,7 @@ inline void HugeRegion::Print(Printer *out) const {
total_unbacked_.in_bytes() / 1024 / 1024);
}
-inline void HugeRegion::PrintInPbtxt(PbtxtRegion *detail) const {
+inline void HugeRegion::PrintInPbtxt(PbtxtRegion *detail) const {
detail->PrintI64("used_bytes", used_pages().in_bytes());
detail->PrintI64("free_bytes", free_pages().in_bytes());
detail->PrintI64("longest_free_range_bytes", longest_free().in_bytes());
@@ -363,7 +363,7 @@ inline void HugeRegion::PrintInPbtxt(PbtxtRegion *detail) const {
detail->PrintI64("total_unbacked_bytes", total_unbacked_.in_bytes());
}
-inline BackingStats HugeRegion::stats() const {
+inline BackingStats HugeRegion::stats() const {
BackingStats s;
s.system_bytes = location_.len().in_bytes();
s.free_bytes = free_pages().in_bytes();
@@ -371,7 +371,7 @@ inline BackingStats HugeRegion::stats() const {
return s;
}
-inline void HugeRegion::Inc(PageId p, Length n, bool *from_released) {
+inline void HugeRegion::Inc(PageId p, Length n, bool *from_released) {
bool should_back = false;
const int64_t now = absl::base_internal::CycleClock::Now();
while (n > Length(0)) {
@@ -393,7 +393,7 @@ inline void HugeRegion::Inc(PageId p, Length n, bool *from_released) {
*from_released = should_back;
}
-inline void HugeRegion::Dec(PageId p, Length n, bool release) {
+inline void HugeRegion::Dec(PageId p, Length n, bool release) {
const int64_t now = absl::base_internal::CycleClock::Now();
bool should_unback_[kNumHugePages] = {};
while (n > Length(0)) {
@@ -418,7 +418,7 @@ inline void HugeRegion::Dec(PageId p, Length n, bool release) {
}
}
-inline void HugeRegion::UnbackHugepages(bool should[kNumHugePages]) {
+inline void HugeRegion::UnbackHugepages(bool should[kNumHugePages]) {
const int64_t now = absl::base_internal::CycleClock::Now();
size_t i = 0;
while (i < kNumHugePages) {
@@ -436,7 +436,7 @@ inline void HugeRegion::UnbackHugepages(bool should[kNumHugePages]) {
HugeLength hl = NHugePages(j - i);
nbacked_ -= hl;
HugePage p = location_.start() + NHugePages(i);
- unback_(p.start_addr(), hl.in_bytes());
+ unback_(p.start_addr(), hl.in_bytes());
total_unbacked_ += hl;
i = j;
}
@@ -491,7 +491,7 @@ inline HugeLength HugeRegionSet<Region>::Release() {
}
template <typename Region>
-inline void HugeRegionSet<Region>::Print(Printer *out) const {
+inline void HugeRegionSet<Region>::Print(Printer *out) const {
out->printf("HugeRegionSet: 1 MiB+ allocations best-fit into %zu MiB slabs\n",
Region::size().in_bytes() / 1024 / 1024);
out->printf("HugeRegionSet: %zu total regions\n", n_);
@@ -544,8 +544,8 @@ inline BackingStats HugeRegionSet<Region>::stats() const {
return stats;
}
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
#endif // TCMALLOC_HUGE_REGION_H_
diff --git a/contrib/libs/tcmalloc/tcmalloc/huge_region_test.cc b/contrib/libs/tcmalloc/tcmalloc/huge_region_test.cc
index 4370b92762..aab450b3d0 100644
--- a/contrib/libs/tcmalloc/tcmalloc/huge_region_test.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/huge_region_test.cc
@@ -32,7 +32,7 @@
#include "tcmalloc/stats.h"
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
namespace {
using testing::NiceMock;
@@ -43,7 +43,7 @@ class HugeRegionTest : public ::testing::Test {
HugeRegionTest()
: // an unlikely magic page
p_(HugePageContaining(reinterpret_cast<void *>(0x1faced200000))),
- region_({p_, region_.size()}, MockUnback) {
+ region_({p_, region_.size()}, MockUnback) {
// we usually don't care about backing calls, unless testing that
// specifically.
mock_ = absl::make_unique<NiceMock<MockBackingInterface>>();
@@ -82,7 +82,7 @@ class HugeRegionTest : public ::testing::Test {
};
HugePage p_;
- typedef HugeRegion Region;
+ typedef HugeRegion Region;
Region region_;
size_t next_mark_{0};
size_t marks_[Region::size().in_pages().raw_num()];
@@ -454,13 +454,13 @@ static void NilUnback(void *p, size_t bytes) {}
class HugeRegionSetTest : public testing::Test {
protected:
- typedef HugeRegion Region;
+ typedef HugeRegion Region;
HugeRegionSetTest() { next_ = HugePageContaining(nullptr); }
std::unique_ptr<Region> GetRegion() {
- // These regions are backed by "real" memory, but we don't touch it.
- std::unique_ptr<Region> r(new Region({next_, Region::size()}, NilUnback));
+ // These regions are backed by "real" memory, but we don't touch it.
+ std::unique_ptr<Region> r(new Region({next_, Region::size()}, NilUnback));
next_ += Region::size();
return r;
}
@@ -528,9 +528,9 @@ TEST_F(HugeRegionSetTest, Set) {
});
for (int i = 0; i < regions.size(); i++) {
- Log(kLog, __FILE__, __LINE__, i, regions[i]->used_pages().raw_num(),
- regions[i]->free_pages().raw_num(),
- regions[i]->unmapped_pages().raw_num());
+ Log(kLog, __FILE__, __LINE__, i, regions[i]->used_pages().raw_num(),
+ regions[i]->free_pages().raw_num(),
+ regions[i]->unmapped_pages().raw_num());
}
// Now first two should be "full" (ish)
EXPECT_LE(Region::size().in_pages().raw_num() * 0.9,
@@ -555,11 +555,11 @@ TEST_F(HugeRegionSetTest, Set) {
// Print out the stats for inspection of formats.
std::vector<char> buf(64 * 1024);
- Printer out(&buf[0], buf.size());
+ Printer out(&buf[0], buf.size());
set_.Print(&out);
printf("%s\n", &buf[0]);
}
} // namespace
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
diff --git a/contrib/libs/tcmalloc/tcmalloc/internal/atomic_danger.h b/contrib/libs/tcmalloc/tcmalloc/internal/atomic_danger.h
index 49c95d66cb..2b83981257 100644
--- a/contrib/libs/tcmalloc/tcmalloc/internal/atomic_danger.h
+++ b/contrib/libs/tcmalloc/tcmalloc/internal/atomic_danger.h
@@ -20,9 +20,9 @@
#include <atomic>
#include <type_traits>
-#include "tcmalloc/internal/config.h"
-
-GOOGLE_MALLOC_SECTION_BEGIN
+#include "tcmalloc/internal/config.h"
+
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
namespace tcmalloc_internal {
namespace atomic_danger {
@@ -55,6 +55,6 @@ IntType* CastToIntegral(std::atomic<IntType>* atomic_for_syscall) {
} // namespace atomic_danger
} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
#endif // TCMALLOC_INTERNAL_ATOMIC_DANGER_H_
diff --git a/contrib/libs/tcmalloc/tcmalloc/internal/atomic_stats_counter.h b/contrib/libs/tcmalloc/tcmalloc/internal/atomic_stats_counter.h
index da7f30646d..45cdaa6a17 100644
--- a/contrib/libs/tcmalloc/tcmalloc/internal/atomic_stats_counter.h
+++ b/contrib/libs/tcmalloc/tcmalloc/internal/atomic_stats_counter.h
@@ -18,9 +18,9 @@
#include <atomic>
#include "absl/base/macros.h"
-#include "tcmalloc/internal/config.h"
+#include "tcmalloc/internal/config.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
namespace tcmalloc_internal {
@@ -69,6 +69,6 @@ class StatsCounter {
} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
#endif // TCMALLOC_INTERNAL_ATOMIC_STATS_COUNTER_H_
diff --git a/contrib/libs/tcmalloc/tcmalloc/internal/cache_topology.cc b/contrib/libs/tcmalloc/tcmalloc/internal/cache_topology.cc
index 12a1709b34..1f1ff6986c 100644
--- a/contrib/libs/tcmalloc/tcmalloc/internal/cache_topology.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/internal/cache_topology.cc
@@ -1,88 +1,88 @@
-// Copyright 2021 The TCMalloc Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "tcmalloc/internal/cache_topology.h"
-
-#include <fcntl.h>
-#include <string.h>
-
-#include "absl/strings/numbers.h"
-#include "absl/strings/string_view.h"
-#include "tcmalloc/internal/config.h"
-#include "tcmalloc/internal/logging.h"
-#include "tcmalloc/internal/util.h"
-
-GOOGLE_MALLOC_SECTION_BEGIN
-namespace tcmalloc {
-namespace tcmalloc_internal {
-
-namespace {
-int OpenSysfsCacheList(size_t cpu) {
- char path[PATH_MAX];
- snprintf(path, sizeof(path),
- "/sys/devices/system/cpu/cpu%zu/cache/index3/shared_cpu_list", cpu);
- return signal_safe_open(path, O_RDONLY | O_CLOEXEC);
-}
-} // namespace
-
-int BuildCpuToL3CacheMap_FindFirstNumberInBuf(absl::string_view current) {
- // Remove all parts coming after a dash or comma.
- const size_t dash = current.find('-');
- if (dash != absl::string_view::npos) current = current.substr(0, dash);
- const size_t comma = current.find(',');
- if (comma != absl::string_view::npos) current = current.substr(0, comma);
-
- int first_cpu;
- CHECK_CONDITION(absl::SimpleAtoi(current, &first_cpu));
- CHECK_CONDITION(first_cpu < CPU_SETSIZE);
- return first_cpu;
-}
-
-int BuildCpuToL3CacheMap(uint8_t l3_cache_index[CPU_SETSIZE]) {
- int index = 0;
- // Set to a sane value.
- memset(l3_cache_index, 0, CPU_SETSIZE);
- for (int cpu = 0; cpu < CPU_SETSIZE; ++cpu) {
- const int fd = OpenSysfsCacheList(cpu);
- if (fd == -1) {
- // At some point we reach the number of CPU on the system, and
- // we should exit. We verify that there was no other problem.
- CHECK_CONDITION(errno == ENOENT);
- return index;
- }
- // The file contains something like:
- // 0-11,22-33
- // we are looking for the first number in that file.
- char buf[10];
- const size_t bytes_read =
- signal_safe_read(fd, buf, 10, /*bytes_read=*/nullptr);
- signal_safe_close(fd);
- CHECK_CONDITION(bytes_read >= 0);
-
- const int first_cpu =
- BuildCpuToL3CacheMap_FindFirstNumberInBuf({buf, bytes_read});
- CHECK_CONDITION(first_cpu < CPU_SETSIZE);
- CHECK_CONDITION(first_cpu <= cpu);
- if (cpu == first_cpu) {
- l3_cache_index[cpu] = index++;
- } else {
- l3_cache_index[cpu] = l3_cache_index[first_cpu];
- }
- }
- return index;
-}
-
-} // namespace tcmalloc_internal
-} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+// Copyright 2021 The TCMalloc Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "tcmalloc/internal/cache_topology.h"
+
+#include <fcntl.h>
+#include <string.h>
+
+#include "absl/strings/numbers.h"
+#include "absl/strings/string_view.h"
+#include "tcmalloc/internal/config.h"
+#include "tcmalloc/internal/logging.h"
+#include "tcmalloc/internal/util.h"
+
+GOOGLE_MALLOC_SECTION_BEGIN
+namespace tcmalloc {
+namespace tcmalloc_internal {
+
+namespace {
+int OpenSysfsCacheList(size_t cpu) {
+ char path[PATH_MAX];
+ snprintf(path, sizeof(path),
+ "/sys/devices/system/cpu/cpu%zu/cache/index3/shared_cpu_list", cpu);
+ return signal_safe_open(path, O_RDONLY | O_CLOEXEC);
+}
+} // namespace
+
+int BuildCpuToL3CacheMap_FindFirstNumberInBuf(absl::string_view current) {
+ // Remove all parts coming after a dash or comma.
+ const size_t dash = current.find('-');
+ if (dash != absl::string_view::npos) current = current.substr(0, dash);
+ const size_t comma = current.find(',');
+ if (comma != absl::string_view::npos) current = current.substr(0, comma);
+
+ int first_cpu;
+ CHECK_CONDITION(absl::SimpleAtoi(current, &first_cpu));
+ CHECK_CONDITION(first_cpu < CPU_SETSIZE);
+ return first_cpu;
+}
+
+int BuildCpuToL3CacheMap(uint8_t l3_cache_index[CPU_SETSIZE]) {
+ int index = 0;
+ // Set to a sane value.
+ memset(l3_cache_index, 0, CPU_SETSIZE);
+ for (int cpu = 0; cpu < CPU_SETSIZE; ++cpu) {
+ const int fd = OpenSysfsCacheList(cpu);
+ if (fd == -1) {
+ // At some point we reach the number of CPU on the system, and
+ // we should exit. We verify that there was no other problem.
+ CHECK_CONDITION(errno == ENOENT);
+ return index;
+ }
+ // The file contains something like:
+ // 0-11,22-33
+ // we are looking for the first number in that file.
+ char buf[10];
+ const size_t bytes_read =
+ signal_safe_read(fd, buf, 10, /*bytes_read=*/nullptr);
+ signal_safe_close(fd);
+ CHECK_CONDITION(bytes_read >= 0);
+
+ const int first_cpu =
+ BuildCpuToL3CacheMap_FindFirstNumberInBuf({buf, bytes_read});
+ CHECK_CONDITION(first_cpu < CPU_SETSIZE);
+ CHECK_CONDITION(first_cpu <= cpu);
+ if (cpu == first_cpu) {
+ l3_cache_index[cpu] = index++;
+ } else {
+ l3_cache_index[cpu] = l3_cache_index[first_cpu];
+ }
+ }
+ return index;
+}
+
+} // namespace tcmalloc_internal
+} // namespace tcmalloc
+GOOGLE_MALLOC_SECTION_END
diff --git a/contrib/libs/tcmalloc/tcmalloc/internal/cache_topology.h b/contrib/libs/tcmalloc/tcmalloc/internal/cache_topology.h
index 292f175470..0058f23de9 100644
--- a/contrib/libs/tcmalloc/tcmalloc/internal/cache_topology.h
+++ b/contrib/libs/tcmalloc/tcmalloc/internal/cache_topology.h
@@ -1,36 +1,36 @@
-// Copyright 2021 The TCMalloc Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef TCMALLOC_INTERNAL_CACHE_TOPOLOGY_H_
-#define TCMALLOC_INTERNAL_CACHE_TOPOLOGY_H_
-
-#include "tcmalloc/internal/config.h"
-#include "tcmalloc/internal/util.h"
-
-GOOGLE_MALLOC_SECTION_BEGIN
-namespace tcmalloc {
-namespace tcmalloc_internal {
-
-// Build a mapping from cpuid to the index of the L3 cache used by that cpu.
-// Returns the number of caches detected.
-int BuildCpuToL3CacheMap(uint8_t l3_cache_index[CPU_SETSIZE]);
-
-// Helper function exposed to permit testing it.
-int BuildCpuToL3CacheMap_FindFirstNumberInBuf(absl::string_view current);
-
-} // namespace tcmalloc_internal
-} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
-
-#endif // TCMALLOC_INTERNAL_CACHE_TOPOLOGY_H_
+// Copyright 2021 The TCMalloc Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef TCMALLOC_INTERNAL_CACHE_TOPOLOGY_H_
+#define TCMALLOC_INTERNAL_CACHE_TOPOLOGY_H_
+
+#include "tcmalloc/internal/config.h"
+#include "tcmalloc/internal/util.h"
+
+GOOGLE_MALLOC_SECTION_BEGIN
+namespace tcmalloc {
+namespace tcmalloc_internal {
+
+// Build a mapping from cpuid to the index of the L3 cache used by that cpu.
+// Returns the number of caches detected.
+int BuildCpuToL3CacheMap(uint8_t l3_cache_index[CPU_SETSIZE]);
+
+// Helper function exposed to permit testing it.
+int BuildCpuToL3CacheMap_FindFirstNumberInBuf(absl::string_view current);
+
+} // namespace tcmalloc_internal
+} // namespace tcmalloc
+GOOGLE_MALLOC_SECTION_END
+
+#endif // TCMALLOC_INTERNAL_CACHE_TOPOLOGY_H_
diff --git a/contrib/libs/tcmalloc/tcmalloc/internal/cache_topology_test.cc b/contrib/libs/tcmalloc/tcmalloc/internal/cache_topology_test.cc
index 927ecace94..3145f28584 100644
--- a/contrib/libs/tcmalloc/tcmalloc/internal/cache_topology_test.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/internal/cache_topology_test.cc
@@ -1,51 +1,51 @@
-// Copyright 2021 The TCMalloc Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "tcmalloc/internal/cache_topology.h"
-
-#include <sched.h>
-
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-
-namespace {
-
-TEST(CacheToplogy, ComputesSomethingReasonable) {
- // This test verifies that each L3 cache serves the same number of CPU. This
- // is not a strict requirement for the correct operation of this code, but a
- // sign of sanity.
- uint8_t l3_cache_index[CPU_SETSIZE];
- const int num_nodes =
- tcmalloc::tcmalloc_internal::BuildCpuToL3CacheMap(l3_cache_index);
- EXPECT_EQ(absl::base_internal::NumCPUs() % num_nodes, 0);
- ASSERT_GT(num_nodes, 0);
- static const int kMaxNodes = 256 / 8;
- int count_per_node[kMaxNodes] = {0};
- for (int i = 0; i < absl::base_internal::NumCPUs(); ++i) {
- count_per_node[l3_cache_index[i]]++;
- }
- for (int i = 0; i < num_nodes; ++i) {
- EXPECT_EQ(count_per_node[i], absl::base_internal::NumCPUs() / num_nodes);
- }
-}
-
-TEST(CacheTopology, FindFirstNumberInBuf) {
- using tcmalloc::tcmalloc_internal::BuildCpuToL3CacheMap_FindFirstNumberInBuf;
- EXPECT_EQ(7, BuildCpuToL3CacheMap_FindFirstNumberInBuf("7,-787"));
- EXPECT_EQ(5, BuildCpuToL3CacheMap_FindFirstNumberInBuf("5"));
- EXPECT_EQ(5, BuildCpuToL3CacheMap_FindFirstNumberInBuf("5-9"));
- EXPECT_EQ(5, BuildCpuToL3CacheMap_FindFirstNumberInBuf("5,9"));
-}
-
-} // namespace
+// Copyright 2021 The TCMalloc Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "tcmalloc/internal/cache_topology.h"
+
+#include <sched.h>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+
+namespace {
+
+TEST(CacheToplogy, ComputesSomethingReasonable) {
+ // This test verifies that each L3 cache serves the same number of CPU. This
+ // is not a strict requirement for the correct operation of this code, but a
+ // sign of sanity.
+ uint8_t l3_cache_index[CPU_SETSIZE];
+ const int num_nodes =
+ tcmalloc::tcmalloc_internal::BuildCpuToL3CacheMap(l3_cache_index);
+ EXPECT_EQ(absl::base_internal::NumCPUs() % num_nodes, 0);
+ ASSERT_GT(num_nodes, 0);
+ static const int kMaxNodes = 256 / 8;
+ int count_per_node[kMaxNodes] = {0};
+ for (int i = 0; i < absl::base_internal::NumCPUs(); ++i) {
+ count_per_node[l3_cache_index[i]]++;
+ }
+ for (int i = 0; i < num_nodes; ++i) {
+ EXPECT_EQ(count_per_node[i], absl::base_internal::NumCPUs() / num_nodes);
+ }
+}
+
+TEST(CacheTopology, FindFirstNumberInBuf) {
+ using tcmalloc::tcmalloc_internal::BuildCpuToL3CacheMap_FindFirstNumberInBuf;
+ EXPECT_EQ(7, BuildCpuToL3CacheMap_FindFirstNumberInBuf("7,-787"));
+ EXPECT_EQ(5, BuildCpuToL3CacheMap_FindFirstNumberInBuf("5"));
+ EXPECT_EQ(5, BuildCpuToL3CacheMap_FindFirstNumberInBuf("5-9"));
+ EXPECT_EQ(5, BuildCpuToL3CacheMap_FindFirstNumberInBuf("5,9"));
+}
+
+} // namespace
diff --git a/contrib/libs/tcmalloc/tcmalloc/internal/clock.h b/contrib/libs/tcmalloc/tcmalloc/internal/clock.h
index 65c765203c..7d1782177d 100644
--- a/contrib/libs/tcmalloc/tcmalloc/internal/clock.h
+++ b/contrib/libs/tcmalloc/tcmalloc/internal/clock.h
@@ -1,41 +1,41 @@
-// Copyright 2021 The TCMalloc Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef TCMALLOC_INTERNAL_CLOCK_H_
-#define TCMALLOC_INTERNAL_CLOCK_H_
-
-#include <stdint.h>
-
-#include "tcmalloc/internal/config.h"
-
-GOOGLE_MALLOC_SECTION_BEGIN
-namespace tcmalloc {
-namespace tcmalloc_internal {
-
-// Represents an abstract clock. The now and freq functions are analogous to
-// CycleClock::Now and CycleClock::Frequency, which will be the most commonly
-// used implementations. Tests can use this interface to mock out the clock.
-struct Clock {
- // Returns the current time in ticks (relative to an arbitrary time base).
- int64_t (*now)();
-
- // Returns the number of ticks per second.
- double (*freq)();
-};
-
-} // namespace tcmalloc_internal
-} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
-
-#endif // TCMALLOC_INTERNAL_CLOCK_H_
+// Copyright 2021 The TCMalloc Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef TCMALLOC_INTERNAL_CLOCK_H_
+#define TCMALLOC_INTERNAL_CLOCK_H_
+
+#include <stdint.h>
+
+#include "tcmalloc/internal/config.h"
+
+GOOGLE_MALLOC_SECTION_BEGIN
+namespace tcmalloc {
+namespace tcmalloc_internal {
+
+// Represents an abstract clock. The now and freq functions are analogous to
+// CycleClock::Now and CycleClock::Frequency, which will be the most commonly
+// used implementations. Tests can use this interface to mock out the clock.
+struct Clock {
+ // Returns the current time in ticks (relative to an arbitrary time base).
+ int64_t (*now)();
+
+ // Returns the number of ticks per second.
+ double (*freq)();
+};
+
+} // namespace tcmalloc_internal
+} // namespace tcmalloc
+GOOGLE_MALLOC_SECTION_END
+
+#endif // TCMALLOC_INTERNAL_CLOCK_H_
diff --git a/contrib/libs/tcmalloc/tcmalloc/internal/config.h b/contrib/libs/tcmalloc/tcmalloc/internal/config.h
index 73dbab06aa..b83fd4a13b 100644
--- a/contrib/libs/tcmalloc/tcmalloc/internal/config.h
+++ b/contrib/libs/tcmalloc/tcmalloc/internal/config.h
@@ -17,8 +17,8 @@
#include <stddef.h>
-#include "absl/base/policy_checks.h"
-
+#include "absl/base/policy_checks.h"
+
// TCMALLOC_HAVE_SCHED_GETCPU is defined when the system implements
// sched_getcpu(3) as by glibc and it's imitators.
#if defined(__linux__) || defined(__ros__)
@@ -27,63 +27,63 @@
#undef TCMALLOC_HAVE_SCHED_GETCPU
#endif
-// TCMALLOC_HAVE_STRUCT_MALLINFO is defined when we know that the system has
-// `struct mallinfo` available.
-//
-// The FreeBSD libc, and subsequently macOS, does not provide the `mallopt`
-// interfaces. We know that bionic, glibc (and variants), newlib, and uclibc do
-// provide the `mallopt` interface. The musl libc is known to not provide the
-// interface, nor does it provide a macro for checking. As a result, we
-// conservatively state that `struct mallinfo` is only available on these
-// environments.
-#if !defined(OS_FREEBSD) && !defined(OS_MACOSX) && \
- (defined(__BIONIC__) || defined(__GLIBC__) || defined(__NEWLIB__) || \
- defined(__UCLIBC__))
-#define TCMALLOC_HAVE_STRUCT_MALLINFO 1
-#else
-#undef TCMALLOC_HAVE_STRUCT_MALLINFO
-#endif
-
-// When possible, name the text section as google_malloc. This macro should not
-// be added to header files as that may move unrelated code to google_malloc
-// section.
-#if defined(__clang__) && defined(__linux__)
-#define GOOGLE_MALLOC_SECTION_BEGIN \
- _Pragma("clang section text = \"google_malloc\"")
-#define GOOGLE_MALLOC_SECTION_END _Pragma("clang section text = \"\"")
-#else
-#define GOOGLE_MALLOC_SECTION_BEGIN
-#define GOOGLE_MALLOC_SECTION_END
-#endif
-
-#if defined(__GNUC__) && !defined(__clang__)
-#if __GNUC__ < 9 || (__GNUC__ == 9 && __GNUC_MINOR__ < 2)
-#error "GCC 9.2 or higher is required."
-#endif
-#endif
-
-#if defined(__clang__)
-#if __clang_major__ < 9
-#error "Clang 9 or higher is required."
-#endif
-#endif
-
-#if !defined(__x86_64__) && !defined(__ppc64__) && !defined(__arm__) && \
- !defined(__aarch64__) && !defined(__riscv)
-#error "Unsupported architecture."
-#endif
-
-#if !defined(__cplusplus) || __cplusplus < 201703L
-#error "TCMalloc requires C++17 or later."
-#else
-// Also explicitly use some C++17 syntax, to prevent detect flags like
-// `-Wc++14-compat`.
-namespace tcmalloc::google3_requires_cpp17_or_later {}
-#endif
-
-GOOGLE_MALLOC_SECTION_BEGIN
+// TCMALLOC_HAVE_STRUCT_MALLINFO is defined when we know that the system has
+// `struct mallinfo` available.
+//
+// The FreeBSD libc, and subsequently macOS, does not provide the `mallopt`
+// interfaces. We know that bionic, glibc (and variants), newlib, and uclibc do
+// provide the `mallopt` interface. The musl libc is known to not provide the
+// interface, nor does it provide a macro for checking. As a result, we
+// conservatively state that `struct mallinfo` is only available on these
+// environments.
+#if !defined(OS_FREEBSD) && !defined(OS_MACOSX) && \
+ (defined(__BIONIC__) || defined(__GLIBC__) || defined(__NEWLIB__) || \
+ defined(__UCLIBC__))
+#define TCMALLOC_HAVE_STRUCT_MALLINFO 1
+#else
+#undef TCMALLOC_HAVE_STRUCT_MALLINFO
+#endif
+
+// When possible, name the text section as google_malloc. This macro should not
+// be added to header files as that may move unrelated code to google_malloc
+// section.
+#if defined(__clang__) && defined(__linux__)
+#define GOOGLE_MALLOC_SECTION_BEGIN \
+ _Pragma("clang section text = \"google_malloc\"")
+#define GOOGLE_MALLOC_SECTION_END _Pragma("clang section text = \"\"")
+#else
+#define GOOGLE_MALLOC_SECTION_BEGIN
+#define GOOGLE_MALLOC_SECTION_END
+#endif
+
+#if defined(__GNUC__) && !defined(__clang__)
+#if __GNUC__ < 9 || (__GNUC__ == 9 && __GNUC_MINOR__ < 2)
+#error "GCC 9.2 or higher is required."
+#endif
+#endif
+
+#if defined(__clang__)
+#if __clang_major__ < 9
+#error "Clang 9 or higher is required."
+#endif
+#endif
+
+#if !defined(__x86_64__) && !defined(__ppc64__) && !defined(__arm__) && \
+ !defined(__aarch64__) && !defined(__riscv)
+#error "Unsupported architecture."
+#endif
+
+#if !defined(__cplusplus) || __cplusplus < 201703L
+#error "TCMalloc requires C++17 or later."
+#else
+// Also explicitly use some C++17 syntax, to prevent detect flags like
+// `-Wc++14-compat`.
+namespace tcmalloc::google3_requires_cpp17_or_later {}
+#endif
+
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
#if defined __x86_64__
// All current and planned x86_64 processors only look at the lower 48 bits
@@ -105,9 +105,9 @@ inline constexpr int kAddressBits =
// AARCH64 kernel supports 48-bit virtual addresses for both user and kernel.
inline constexpr int kAddressBits =
(sizeof(void*) < 8 ? (8 * sizeof(void*)) : 48);
-#elif defined __riscv && defined __linux__
-inline constexpr int kAddressBits =
- (sizeof(void *) < 8 ? (8 * sizeof(void *)) : 48);
+#elif defined __riscv && defined __linux__
+inline constexpr int kAddressBits =
+ (sizeof(void *) < 8 ? (8 * sizeof(void *)) : 48);
#else
inline constexpr int kAddressBits = 8 * sizeof(void*);
#endif
@@ -119,8 +119,8 @@ static constexpr size_t kHugePageShift = 21;
static constexpr size_t kHugePageShift = 24;
#elif defined __aarch64__ && defined __linux__
static constexpr size_t kHugePageShift = 21;
-#elif defined __riscv && defined __linux__
-static constexpr size_t kHugePageShift = 21;
+#elif defined __riscv && defined __linux__
+static constexpr size_t kHugePageShift = 21;
#else
// ...whatever, guess something big-ish
static constexpr size_t kHugePageShift = 21;
@@ -129,8 +129,8 @@ static constexpr size_t kHugePageShift = 21;
static constexpr size_t kHugePageSize = static_cast<size_t>(1)
<< kHugePageShift;
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
#endif // TCMALLOC_INTERNAL_CONFIG_H_
diff --git a/contrib/libs/tcmalloc/tcmalloc/internal/environment.cc b/contrib/libs/tcmalloc/tcmalloc/internal/environment.cc
index e786dd7a96..4f7e2698e3 100644
--- a/contrib/libs/tcmalloc/tcmalloc/internal/environment.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/internal/environment.cc
@@ -15,7 +15,7 @@
#include <string.h>
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
namespace tcmalloc_internal {
@@ -42,4 +42,4 @@ const char* thread_safe_getenv(const char* env_var) {
} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
diff --git a/contrib/libs/tcmalloc/tcmalloc/internal/environment.h b/contrib/libs/tcmalloc/tcmalloc/internal/environment.h
index f54840e8d7..30d160cbff 100644
--- a/contrib/libs/tcmalloc/tcmalloc/internal/environment.h
+++ b/contrib/libs/tcmalloc/tcmalloc/internal/environment.h
@@ -15,9 +15,9 @@
#ifndef TCMALLOC_INTERNAL_ENVIRONMENT_H_
#define TCMALLOC_INTERNAL_ENVIRONMENT_H_
-#include "tcmalloc/internal/config.h"
-
-GOOGLE_MALLOC_SECTION_BEGIN
+#include "tcmalloc/internal/config.h"
+
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
namespace tcmalloc_internal {
@@ -37,6 +37,6 @@ const char* thread_safe_getenv(const char* env_var);
} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
#endif // TCMALLOC_INTERNAL_ENVIRONMENT_H_
diff --git a/contrib/libs/tcmalloc/tcmalloc/internal/lifetime_predictions.h b/contrib/libs/tcmalloc/tcmalloc/internal/lifetime_predictions.h
index 514dd4a73e..936409ca7d 100644
--- a/contrib/libs/tcmalloc/tcmalloc/internal/lifetime_predictions.h
+++ b/contrib/libs/tcmalloc/tcmalloc/internal/lifetime_predictions.h
@@ -1,252 +1,252 @@
-// Copyright 2020 The TCMalloc Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef TCMALLOC_INTERNAL_LIFETIME_PREDICTIONS_H_
-#define TCMALLOC_INTERNAL_LIFETIME_PREDICTIONS_H_
-
-#include <algorithm>
-#include <cstdlib>
-#include <functional>
-
-#include "absl/algorithm/container.h"
-#include "absl/base/const_init.h"
-#include "absl/base/internal/low_level_alloc.h"
-#include "absl/base/internal/spinlock.h"
-#include "absl/debugging/stacktrace.h"
-#include "absl/hash/hash.h"
-#include "absl/time/clock.h"
-#include "absl/time/time.h"
-#include "tcmalloc/internal/linked_list.h"
-
-GOOGLE_MALLOC_SECTION_BEGIN
-namespace tcmalloc {
-namespace tcmalloc_internal {
-
-// Counts how many times we observed objects with a particular stack trace
-// that were short lived/long lived. Each LifetimeStats object is associated
-// with a particular allocation site (i.e., allocation stack trace) and each
-// allocation site has at most one LifetimeStats object. All accesses to
-// LifetimeStats objects need to be synchronized via the page heap lock.
-class LifetimeStats : public TList<LifetimeStats>::Elem {
- public:
- enum class Certainty { kLowCertainty, kHighCertainty };
- enum class Prediction { kShortLived, kLongLived };
-
- void Update(Prediction prediction) {
- if (prediction == Prediction::kShortLived) {
- short_lived_++;
- } else {
- long_lived_++;
- }
- }
-
- Prediction Predict(Certainty certainty) {
- if (certainty == Certainty::kLowCertainty) {
- return (short_lived_ > long_lived_) ? Prediction::kShortLived
- : Prediction::kLongLived;
- } else {
- // If little data was collected, predict as long-lived (current behavior).
- return (short_lived_ > (long_lived_ + 10)) ? Prediction::kShortLived
- : Prediction::kLongLived;
- }
- }
-
- // Reference counts are protected by LifetimeDatabase::table_lock_.
-
- // Increments the reference count of this entry.
- void IncRef() { ++refcount_; }
-
- // Returns true if and only if the reference count reaches 0.
- bool DecRef() { return --refcount_ == 0; }
-
- private:
- uint64_t refcount_ = 1;
- uint64_t short_lived_ = 0;
- uint64_t long_lived_ = 0;
-};
-
-// Manages stack traces and statistics about their associated lifetimes. Since
-// the database can fill up, old entries are evicted. Evicted entries need to
-// survive as long as the last lifetime tracker referencing them and are thus
-// reference-counted.
-class LifetimeDatabase {
- public:
- struct Key {
- int depth; // Number of PC values stored in array below
- void* stack[kMaxStackDepth];
-
- // Statically instantiate at the start of the allocation to acquire
- // the allocation stack trace.
- Key() { depth = absl::GetStackTrace(stack, kMaxStackDepth, 1); }
-
- template <typename H>
- friend H AbslHashValue(H h, const Key& c) {
- return H::combine(H::combine_contiguous(std::move(h), c.stack, c.depth),
- c.depth);
- }
-
- bool operator==(const Key& other) const {
- if (depth != other.depth) {
- return false;
- }
- return std::equal(stack, stack + depth, other.stack);
- }
- };
-
- // Captures statistics associated with the low-level allocator backing the
- // memory used by the database.
- struct ArenaStats {
- uint64_t bytes_allocated;
- };
-
- static constexpr int kMaxDatabaseSize = 1024;
-
- LifetimeDatabase() {}
- ~LifetimeDatabase() {}
-
- // Not copyable or movable
- LifetimeDatabase(const LifetimeDatabase&) = delete;
- LifetimeDatabase& operator=(const LifetimeDatabase&) = delete;
-
- // Identifies the current stack trace and returns a handle to the lifetime
- // statistics associated with this stack trace. May run outside the page heap
- // lock -- we therefore need to do our own locking. This increments the
- // reference count of the lifetime stats object and the caller is responsible
- // for calling RemoveLifetimeStatsReference when finished with the object.
- LifetimeStats* LookupOrAddLifetimeStats(Key* k) {
- absl::base_internal::SpinLockHolder h(&table_lock_);
- auto it = table_.find(*k);
- LifetimeStats* s;
- if (it == table_.end()) {
- MaybeEvictLRU();
- // Allocate a new entry using the low-level allocator, which is safe
- // to call from within TCMalloc.
- s = stats_allocator_.allocate(1);
- new (s) LifetimeStats();
- table_.insert(std::make_pair(*k, s));
- stats_fifo_.append(s);
- } else {
- s = it->second;
- UpdateLRU(s);
- }
- s->IncRef();
- return s;
- }
-
- void RemoveLifetimeStatsReference(LifetimeStats* s) {
- absl::base_internal::SpinLockHolder h(&table_lock_);
- if (s->DecRef()) {
- stats_allocator_.deallocate(s, 1);
- }
- }
-
- size_t size() const {
- absl::base_internal::SpinLockHolder h(&table_lock_);
- return table_.size();
- }
-
- size_t evictions() const {
- absl::base_internal::SpinLockHolder h(&table_lock_);
- return n_evictions_;
- }
-
- static ArenaStats* arena_stats() {
- static ArenaStats stats = {0};
- return &stats;
- }
-
- protected:
- static const int kMaxStackDepth = 64;
-
- static absl::base_internal::LowLevelAlloc::Arena* GetArena() {
- static absl::base_internal::LowLevelAlloc::Arena* arena =
- absl::base_internal::LowLevelAlloc::NewArena(0);
- return arena;
- }
-
- static uint64_t bytes_allocated_ ABSL_GUARDED_BY(table_lock_);
-
- void UpdateLRU(LifetimeStats* stats)
- ABSL_EXCLUSIVE_LOCKS_REQUIRED(table_lock_) {
- stats_fifo_.remove(stats);
- stats_fifo_.append(stats);
- }
-
- // If an entry is evicted, it is returned (nullptr otherwise).
- void MaybeEvictLRU() ABSL_EXCLUSIVE_LOCKS_REQUIRED(table_lock_) {
- if (table_.size() < kMaxDatabaseSize) {
- return;
- }
- n_evictions_++;
- LifetimeStats* evict = stats_fifo_.first();
- stats_fifo_.remove(evict);
- for (auto it = table_.begin(); it != table_.end(); ++it) {
- if (it->second == evict) {
- table_.erase(it);
- if (evict->DecRef()) {
- stats_allocator_.deallocate(evict, 1);
- }
- return;
- }
- }
- CHECK_CONDITION(false); // Should not happen
- }
-
- private:
- template <typename T>
- class MyAllocator : public std::allocator<T> {
- public:
- template <typename U>
- struct rebind {
- using other = MyAllocator<U>;
- };
-
- MyAllocator() noexcept {}
-
- template <typename U>
- explicit MyAllocator(const MyAllocator<U>&) noexcept {}
-
- T* allocate(size_t num_objects, const void* = nullptr) {
- size_t bytes = num_objects * sizeof(T);
- arena_stats()->bytes_allocated += bytes;
- return static_cast<T*>(absl::base_internal::LowLevelAlloc::AllocWithArena(
- bytes, GetArena()));
- }
-
- void deallocate(T* p, size_t num_objects) {
- size_t bytes = num_objects * sizeof(T);
- arena_stats()->bytes_allocated -= bytes;
- absl::base_internal::LowLevelAlloc::Free(p);
- }
- };
-
- MyAllocator<LifetimeStats> stats_allocator_ ABSL_GUARDED_BY(table_lock_);
- mutable absl::base_internal::SpinLock table_lock_{
- absl::kConstInit, absl::base_internal::SCHEDULE_KERNEL_ONLY};
-
- // Stores the current mapping from allocation site to LifetimeStats.
- std::unordered_map<Key, LifetimeStats*, absl::Hash<Key>, std::equal_to<Key>,
- MyAllocator<std::pair<const Key, LifetimeStats*>>>
- table_ ABSL_GUARDED_BY(table_lock_);
-
- // Stores the entries ordered by how many times they have been accessed.
- TList<LifetimeStats> stats_fifo_ ABSL_GUARDED_BY(table_lock_);
- size_t n_evictions_ ABSL_GUARDED_BY(table_lock_) = 0;
-};
-
-} // namespace tcmalloc_internal
-} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
-
-#endif // TCMALLOC_INTERNAL_LIFETIME_PREDICTIONS_H_
+// Copyright 2020 The TCMalloc Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef TCMALLOC_INTERNAL_LIFETIME_PREDICTIONS_H_
+#define TCMALLOC_INTERNAL_LIFETIME_PREDICTIONS_H_
+
+#include <algorithm>
+#include <cstdlib>
+#include <functional>
+
+#include "absl/algorithm/container.h"
+#include "absl/base/const_init.h"
+#include "absl/base/internal/low_level_alloc.h"
+#include "absl/base/internal/spinlock.h"
+#include "absl/debugging/stacktrace.h"
+#include "absl/hash/hash.h"
+#include "absl/time/clock.h"
+#include "absl/time/time.h"
+#include "tcmalloc/internal/linked_list.h"
+
+GOOGLE_MALLOC_SECTION_BEGIN
+namespace tcmalloc {
+namespace tcmalloc_internal {
+
+// Counts how many times we observed objects with a particular stack trace
+// that were short lived/long lived. Each LifetimeStats object is associated
+// with a particular allocation site (i.e., allocation stack trace) and each
+// allocation site has at most one LifetimeStats object. All accesses to
+// LifetimeStats objects need to be synchronized via the page heap lock.
+class LifetimeStats : public TList<LifetimeStats>::Elem {
+ public:
+ enum class Certainty { kLowCertainty, kHighCertainty };
+ enum class Prediction { kShortLived, kLongLived };
+
+ void Update(Prediction prediction) {
+ if (prediction == Prediction::kShortLived) {
+ short_lived_++;
+ } else {
+ long_lived_++;
+ }
+ }
+
+ Prediction Predict(Certainty certainty) {
+ if (certainty == Certainty::kLowCertainty) {
+ return (short_lived_ > long_lived_) ? Prediction::kShortLived
+ : Prediction::kLongLived;
+ } else {
+ // If little data was collected, predict as long-lived (current behavior).
+ return (short_lived_ > (long_lived_ + 10)) ? Prediction::kShortLived
+ : Prediction::kLongLived;
+ }
+ }
+
+ // Reference counts are protected by LifetimeDatabase::table_lock_.
+
+ // Increments the reference count of this entry.
+ void IncRef() { ++refcount_; }
+
+ // Returns true if and only if the reference count reaches 0.
+ bool DecRef() { return --refcount_ == 0; }
+
+ private:
+ uint64_t refcount_ = 1;
+ uint64_t short_lived_ = 0;
+ uint64_t long_lived_ = 0;
+};
+
+// Manages stack traces and statistics about their associated lifetimes. Since
+// the database can fill up, old entries are evicted. Evicted entries need to
+// survive as long as the last lifetime tracker referencing them and are thus
+// reference-counted.
+class LifetimeDatabase {
+ public:
+ struct Key {
+ int depth; // Number of PC values stored in array below
+ void* stack[kMaxStackDepth];
+
+ // Statically instantiate at the start of the allocation to acquire
+ // the allocation stack trace.
+ Key() { depth = absl::GetStackTrace(stack, kMaxStackDepth, 1); }
+
+ template <typename H>
+ friend H AbslHashValue(H h, const Key& c) {
+ return H::combine(H::combine_contiguous(std::move(h), c.stack, c.depth),
+ c.depth);
+ }
+
+ bool operator==(const Key& other) const {
+ if (depth != other.depth) {
+ return false;
+ }
+ return std::equal(stack, stack + depth, other.stack);
+ }
+ };
+
+ // Captures statistics associated with the low-level allocator backing the
+ // memory used by the database.
+ struct ArenaStats {
+ uint64_t bytes_allocated;
+ };
+
+ static constexpr int kMaxDatabaseSize = 1024;
+
+ LifetimeDatabase() {}
+ ~LifetimeDatabase() {}
+
+ // Not copyable or movable
+ LifetimeDatabase(const LifetimeDatabase&) = delete;
+ LifetimeDatabase& operator=(const LifetimeDatabase&) = delete;
+
+ // Identifies the current stack trace and returns a handle to the lifetime
+ // statistics associated with this stack trace. May run outside the page heap
+ // lock -- we therefore need to do our own locking. This increments the
+ // reference count of the lifetime stats object and the caller is responsible
+ // for calling RemoveLifetimeStatsReference when finished with the object.
+ LifetimeStats* LookupOrAddLifetimeStats(Key* k) {
+ absl::base_internal::SpinLockHolder h(&table_lock_);
+ auto it = table_.find(*k);
+ LifetimeStats* s;
+ if (it == table_.end()) {
+ MaybeEvictLRU();
+ // Allocate a new entry using the low-level allocator, which is safe
+ // to call from within TCMalloc.
+ s = stats_allocator_.allocate(1);
+ new (s) LifetimeStats();
+ table_.insert(std::make_pair(*k, s));
+ stats_fifo_.append(s);
+ } else {
+ s = it->second;
+ UpdateLRU(s);
+ }
+ s->IncRef();
+ return s;
+ }
+
+ void RemoveLifetimeStatsReference(LifetimeStats* s) {
+ absl::base_internal::SpinLockHolder h(&table_lock_);
+ if (s->DecRef()) {
+ stats_allocator_.deallocate(s, 1);
+ }
+ }
+
+ size_t size() const {
+ absl::base_internal::SpinLockHolder h(&table_lock_);
+ return table_.size();
+ }
+
+ size_t evictions() const {
+ absl::base_internal::SpinLockHolder h(&table_lock_);
+ return n_evictions_;
+ }
+
+ static ArenaStats* arena_stats() {
+ static ArenaStats stats = {0};
+ return &stats;
+ }
+
+ protected:
+ static const int kMaxStackDepth = 64;
+
+ static absl::base_internal::LowLevelAlloc::Arena* GetArena() {
+ static absl::base_internal::LowLevelAlloc::Arena* arena =
+ absl::base_internal::LowLevelAlloc::NewArena(0);
+ return arena;
+ }
+
+ static uint64_t bytes_allocated_ ABSL_GUARDED_BY(table_lock_);
+
+ void UpdateLRU(LifetimeStats* stats)
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(table_lock_) {
+ stats_fifo_.remove(stats);
+ stats_fifo_.append(stats);
+ }
+
+ // If an entry is evicted, it is returned (nullptr otherwise).
+ void MaybeEvictLRU() ABSL_EXCLUSIVE_LOCKS_REQUIRED(table_lock_) {
+ if (table_.size() < kMaxDatabaseSize) {
+ return;
+ }
+ n_evictions_++;
+ LifetimeStats* evict = stats_fifo_.first();
+ stats_fifo_.remove(evict);
+ for (auto it = table_.begin(); it != table_.end(); ++it) {
+ if (it->second == evict) {
+ table_.erase(it);
+ if (evict->DecRef()) {
+ stats_allocator_.deallocate(evict, 1);
+ }
+ return;
+ }
+ }
+ CHECK_CONDITION(false); // Should not happen
+ }
+
+ private:
+ template <typename T>
+ class MyAllocator : public std::allocator<T> {
+ public:
+ template <typename U>
+ struct rebind {
+ using other = MyAllocator<U>;
+ };
+
+ MyAllocator() noexcept {}
+
+ template <typename U>
+ explicit MyAllocator(const MyAllocator<U>&) noexcept {}
+
+ T* allocate(size_t num_objects, const void* = nullptr) {
+ size_t bytes = num_objects * sizeof(T);
+ arena_stats()->bytes_allocated += bytes;
+ return static_cast<T*>(absl::base_internal::LowLevelAlloc::AllocWithArena(
+ bytes, GetArena()));
+ }
+
+ void deallocate(T* p, size_t num_objects) {
+ size_t bytes = num_objects * sizeof(T);
+ arena_stats()->bytes_allocated -= bytes;
+ absl::base_internal::LowLevelAlloc::Free(p);
+ }
+ };
+
+ MyAllocator<LifetimeStats> stats_allocator_ ABSL_GUARDED_BY(table_lock_);
+ mutable absl::base_internal::SpinLock table_lock_{
+ absl::kConstInit, absl::base_internal::SCHEDULE_KERNEL_ONLY};
+
+ // Stores the current mapping from allocation site to LifetimeStats.
+ std::unordered_map<Key, LifetimeStats*, absl::Hash<Key>, std::equal_to<Key>,
+ MyAllocator<std::pair<const Key, LifetimeStats*>>>
+ table_ ABSL_GUARDED_BY(table_lock_);
+
+ // Stores the entries ordered by how many times they have been accessed.
+ TList<LifetimeStats> stats_fifo_ ABSL_GUARDED_BY(table_lock_);
+ size_t n_evictions_ ABSL_GUARDED_BY(table_lock_) = 0;
+};
+
+} // namespace tcmalloc_internal
+} // namespace tcmalloc
+GOOGLE_MALLOC_SECTION_END
+
+#endif // TCMALLOC_INTERNAL_LIFETIME_PREDICTIONS_H_
diff --git a/contrib/libs/tcmalloc/tcmalloc/internal/lifetime_predictions_test.cc b/contrib/libs/tcmalloc/tcmalloc/internal/lifetime_predictions_test.cc
index 4280890afe..7562c97858 100644
--- a/contrib/libs/tcmalloc/tcmalloc/internal/lifetime_predictions_test.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/internal/lifetime_predictions_test.cc
@@ -1,156 +1,156 @@
-// Copyright 2019 The TCMalloc Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "tcmalloc/internal/lifetime_predictions.h"
-
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-#include "tcmalloc/testing/testutil.h"
-
-namespace tcmalloc {
-namespace tcmalloc_internal {
-namespace {
-
-class LifetimeDatabaseTest : public testing::Test {
- protected:
- LifetimeDatabase lifetime_database_;
-
- ABSL_ATTRIBUTE_NOINLINE ABSL_ATTRIBUTE_NO_TAIL_CALL LifetimeStats*
- AllocateA() {
- LifetimeDatabase::Key key;
- return lifetime_database_.LookupOrAddLifetimeStats(&key);
- }
-
- ABSL_ATTRIBUTE_NOINLINE ABSL_ATTRIBUTE_NO_TAIL_CALL LifetimeStats*
- AllocateB() {
- LifetimeDatabase::Key key;
- return lifetime_database_.LookupOrAddLifetimeStats(&key);
- }
-
- ABSL_ATTRIBUTE_NOINLINE ABSL_ATTRIBUTE_NO_TAIL_CALL LifetimeStats*
- AllocateWithStacktraceId(int id) {
- if (id == 0) {
- LifetimeDatabase::Key key;
- return lifetime_database_.LookupOrAddLifetimeStats(&key);
- } else if (id % 2 == 0) {
- return AllocateWithStacktraceId(id / 2);
- } else {
- return AllocateWithStacktraceId_2(id / 2);
- }
- }
-
- // Record a sufficiently large number of short-lived allocations to make
- // a prediction short-lived, absent any long-lived allocations.
- void MakeShortLived(LifetimeStats* stats, bool high_certainty) {
- for (int i = 0; i < (high_certainty ? 100 : 2); i++) {
- stats->Update(LifetimeStats::Prediction::kShortLived);
- }
- }
-
- private:
- ABSL_ATTRIBUTE_NOINLINE ABSL_ATTRIBUTE_NO_TAIL_CALL LifetimeStats*
- AllocateWithStacktraceId_2(int id) {
- if (id == 0) {
- LifetimeDatabase::Key key;
- return lifetime_database_.LookupOrAddLifetimeStats(&key);
- } else if (id % 2 == 0) {
- return AllocateWithStacktraceId(id / 2);
- } else {
- return AllocateWithStacktraceId_2(id / 2);
- }
- }
-};
-
-TEST_F(LifetimeDatabaseTest, Basic) {
- PRAGMA_NO_UNROLL
- for (int i = 0; i < 2; i++) {
- LifetimeStats* r1 = AllocateA();
- LifetimeStats* r2 = AllocateB();
- LifetimeStats* r3 = AllocateB();
- ASSERT_NE(nullptr, r1);
- ASSERT_NE(nullptr, r2);
- ASSERT_NE(nullptr, r3);
-
- // First iteration: set short-lived count.
- if (i == 0) {
- MakeShortLived(r1, false);
- MakeShortLived(r2, true);
- } else {
- EXPECT_EQ(LifetimeStats::Prediction::kShortLived,
- r1->Predict(LifetimeStats::Certainty::kLowCertainty));
- EXPECT_EQ(LifetimeStats::Prediction::kLongLived,
- r1->Predict(LifetimeStats::Certainty::kHighCertainty));
- EXPECT_EQ(LifetimeStats::Prediction::kShortLived,
- r2->Predict(LifetimeStats::Certainty::kLowCertainty));
- EXPECT_EQ(LifetimeStats::Prediction::kShortLived,
- r2->Predict(LifetimeStats::Certainty::kHighCertainty));
- EXPECT_EQ(LifetimeStats::Prediction::kLongLived,
- r3->Predict(LifetimeStats::Certainty::kLowCertainty));
- EXPECT_EQ(LifetimeStats::Prediction::kLongLived,
- r3->Predict(LifetimeStats::Certainty::kHighCertainty));
- }
-
- lifetime_database_.RemoveLifetimeStatsReference(r1);
- lifetime_database_.RemoveLifetimeStatsReference(r2);
- lifetime_database_.RemoveLifetimeStatsReference(r3);
- }
-}
-
-TEST_F(LifetimeDatabaseTest, Eviction) {
- const int kEntries = 5 * LifetimeDatabase::kMaxDatabaseSize;
-
- std::vector<LifetimeStats*> refs;
-
- PRAGMA_NO_UNROLL
- for (int i = 0; i < kEntries; i++) {
- LifetimeStats* r = AllocateWithStacktraceId(i);
- refs.push_back(r);
-
- ASSERT_NE(nullptr, r);
- if (i < LifetimeDatabase::kMaxDatabaseSize) {
- MakeShortLived(r, true);
- }
- }
-
- // Check that even evicted entries are still accessible due to refcounts.
- for (int i = 0; i < kEntries; i++) {
- if (i < LifetimeDatabase::kMaxDatabaseSize) {
- EXPECT_EQ(LifetimeStats::Prediction::kShortLived,
- refs[i]->Predict(LifetimeStats::Certainty::kLowCertainty));
- } else {
- EXPECT_EQ(LifetimeStats::Prediction::kLongLived,
- refs[i]->Predict(LifetimeStats::Certainty::kLowCertainty));
- }
- }
-
- EXPECT_EQ(LifetimeDatabase::kMaxDatabaseSize, lifetime_database_.size());
- EXPECT_EQ(kEntries - LifetimeDatabase::kMaxDatabaseSize,
- lifetime_database_.evictions());
-
- uint64_t before_bytes = lifetime_database_.arena_stats()->bytes_allocated;
-
- // Return all of the references, which should drop the remaining refcounts.
- for (int i = 0; i < kEntries; i++) {
- lifetime_database_.RemoveLifetimeStatsReference(refs[i]);
- }
-
- uint64_t after_bytes = lifetime_database_.arena_stats()->bytes_allocated;
-
- // Check that this freed up memory
- EXPECT_LT(after_bytes, before_bytes);
-}
-
-} // namespace
-} // namespace tcmalloc_internal
-} // namespace tcmalloc
+// Copyright 2019 The TCMalloc Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "tcmalloc/internal/lifetime_predictions.h"
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "tcmalloc/testing/testutil.h"
+
+namespace tcmalloc {
+namespace tcmalloc_internal {
+namespace {
+
+class LifetimeDatabaseTest : public testing::Test {
+ protected:
+ LifetimeDatabase lifetime_database_;
+
+ ABSL_ATTRIBUTE_NOINLINE ABSL_ATTRIBUTE_NO_TAIL_CALL LifetimeStats*
+ AllocateA() {
+ LifetimeDatabase::Key key;
+ return lifetime_database_.LookupOrAddLifetimeStats(&key);
+ }
+
+ ABSL_ATTRIBUTE_NOINLINE ABSL_ATTRIBUTE_NO_TAIL_CALL LifetimeStats*
+ AllocateB() {
+ LifetimeDatabase::Key key;
+ return lifetime_database_.LookupOrAddLifetimeStats(&key);
+ }
+
+ ABSL_ATTRIBUTE_NOINLINE ABSL_ATTRIBUTE_NO_TAIL_CALL LifetimeStats*
+ AllocateWithStacktraceId(int id) {
+ if (id == 0) {
+ LifetimeDatabase::Key key;
+ return lifetime_database_.LookupOrAddLifetimeStats(&key);
+ } else if (id % 2 == 0) {
+ return AllocateWithStacktraceId(id / 2);
+ } else {
+ return AllocateWithStacktraceId_2(id / 2);
+ }
+ }
+
+ // Record a sufficiently large number of short-lived allocations to make
+ // a prediction short-lived, absent any long-lived allocations.
+ void MakeShortLived(LifetimeStats* stats, bool high_certainty) {
+ for (int i = 0; i < (high_certainty ? 100 : 2); i++) {
+ stats->Update(LifetimeStats::Prediction::kShortLived);
+ }
+ }
+
+ private:
+ ABSL_ATTRIBUTE_NOINLINE ABSL_ATTRIBUTE_NO_TAIL_CALL LifetimeStats*
+ AllocateWithStacktraceId_2(int id) {
+ if (id == 0) {
+ LifetimeDatabase::Key key;
+ return lifetime_database_.LookupOrAddLifetimeStats(&key);
+ } else if (id % 2 == 0) {
+ return AllocateWithStacktraceId(id / 2);
+ } else {
+ return AllocateWithStacktraceId_2(id / 2);
+ }
+ }
+};
+
+TEST_F(LifetimeDatabaseTest, Basic) {
+ PRAGMA_NO_UNROLL
+ for (int i = 0; i < 2; i++) {
+ LifetimeStats* r1 = AllocateA();
+ LifetimeStats* r2 = AllocateB();
+ LifetimeStats* r3 = AllocateB();
+ ASSERT_NE(nullptr, r1);
+ ASSERT_NE(nullptr, r2);
+ ASSERT_NE(nullptr, r3);
+
+ // First iteration: set short-lived count.
+ if (i == 0) {
+ MakeShortLived(r1, false);
+ MakeShortLived(r2, true);
+ } else {
+ EXPECT_EQ(LifetimeStats::Prediction::kShortLived,
+ r1->Predict(LifetimeStats::Certainty::kLowCertainty));
+ EXPECT_EQ(LifetimeStats::Prediction::kLongLived,
+ r1->Predict(LifetimeStats::Certainty::kHighCertainty));
+ EXPECT_EQ(LifetimeStats::Prediction::kShortLived,
+ r2->Predict(LifetimeStats::Certainty::kLowCertainty));
+ EXPECT_EQ(LifetimeStats::Prediction::kShortLived,
+ r2->Predict(LifetimeStats::Certainty::kHighCertainty));
+ EXPECT_EQ(LifetimeStats::Prediction::kLongLived,
+ r3->Predict(LifetimeStats::Certainty::kLowCertainty));
+ EXPECT_EQ(LifetimeStats::Prediction::kLongLived,
+ r3->Predict(LifetimeStats::Certainty::kHighCertainty));
+ }
+
+ lifetime_database_.RemoveLifetimeStatsReference(r1);
+ lifetime_database_.RemoveLifetimeStatsReference(r2);
+ lifetime_database_.RemoveLifetimeStatsReference(r3);
+ }
+}
+
+TEST_F(LifetimeDatabaseTest, Eviction) {
+ const int kEntries = 5 * LifetimeDatabase::kMaxDatabaseSize;
+
+ std::vector<LifetimeStats*> refs;
+
+ PRAGMA_NO_UNROLL
+ for (int i = 0; i < kEntries; i++) {
+ LifetimeStats* r = AllocateWithStacktraceId(i);
+ refs.push_back(r);
+
+ ASSERT_NE(nullptr, r);
+ if (i < LifetimeDatabase::kMaxDatabaseSize) {
+ MakeShortLived(r, true);
+ }
+ }
+
+ // Check that even evicted entries are still accessible due to refcounts.
+ for (int i = 0; i < kEntries; i++) {
+ if (i < LifetimeDatabase::kMaxDatabaseSize) {
+ EXPECT_EQ(LifetimeStats::Prediction::kShortLived,
+ refs[i]->Predict(LifetimeStats::Certainty::kLowCertainty));
+ } else {
+ EXPECT_EQ(LifetimeStats::Prediction::kLongLived,
+ refs[i]->Predict(LifetimeStats::Certainty::kLowCertainty));
+ }
+ }
+
+ EXPECT_EQ(LifetimeDatabase::kMaxDatabaseSize, lifetime_database_.size());
+ EXPECT_EQ(kEntries - LifetimeDatabase::kMaxDatabaseSize,
+ lifetime_database_.evictions());
+
+ uint64_t before_bytes = lifetime_database_.arena_stats()->bytes_allocated;
+
+ // Return all of the references, which should drop the remaining refcounts.
+ for (int i = 0; i < kEntries; i++) {
+ lifetime_database_.RemoveLifetimeStatsReference(refs[i]);
+ }
+
+ uint64_t after_bytes = lifetime_database_.arena_stats()->bytes_allocated;
+
+ // Check that this freed up memory
+ EXPECT_LT(after_bytes, before_bytes);
+}
+
+} // namespace
+} // namespace tcmalloc_internal
+} // namespace tcmalloc
diff --git a/contrib/libs/tcmalloc/tcmalloc/internal/lifetime_tracker.h b/contrib/libs/tcmalloc/tcmalloc/internal/lifetime_tracker.h
index d348dbe609..d84c08d287 100644
--- a/contrib/libs/tcmalloc/tcmalloc/internal/lifetime_tracker.h
+++ b/contrib/libs/tcmalloc/tcmalloc/internal/lifetime_tracker.h
@@ -1,172 +1,172 @@
-// Copyright 2020 The TCMalloc Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef TCMALLOC_INTERNAL_LIFETIME_TRACKER_H_
-#define TCMALLOC_INTERNAL_LIFETIME_TRACKER_H_
-
-#include "absl/base/internal/cycleclock.h"
-#include "absl/time/time.h"
-#include "tcmalloc/internal/clock.h"
-#include "tcmalloc/internal/lifetime_predictions.h"
-#include "tcmalloc/internal/linked_list.h"
-
-GOOGLE_MALLOC_SECTION_BEGIN
-namespace tcmalloc {
-namespace tcmalloc_internal {
-
-template <typename LifetimeDatabaseT, typename LifetimeStatsT>
-class LifetimeTrackerImpl {
- public:
- // A tracker is attached to an individual allocation and tracks its lifetime.
- // This allocation can either be in a region or in the filler. It contains
- // a pointer back to the LifetimeStats of the allocation site that generated
- // this allocation, so that statistics can be updated.
- struct Tracker : public TList<Tracker>::Elem {
- // The deadline after which the object is considered long-lived.
- uint64_t deadline = 0;
-
- // If the allocation is associated with a counterfactual, this contains
- // the hypothetical location in the short-lived region (null otherwise).
- void* counterfactual_ptr = nullptr;
-
- // Lifetime statistics associated with this allocation (will be updated when
- // the lifetime is known).
- LifetimeStatsT* lifetime;
-
- // The allocation this stat belongs to was predicted short-lived.
- bool predicted_short_lived = false;
-
- // Is this element currently tracked by the lifetime tracker?
- bool is_tracked() { return deadline != 0; }
-
- // Reset the element (implies not tracked).
- void reset() {
- deadline = 0;
- counterfactual_ptr = nullptr;
- }
- };
-
- struct Stats {
- uint64_t expired_lifetimes = 0;
- uint64_t overestimated_lifetimes = 0;
- uint64_t short_lived_predictions = 0;
- uint64_t long_lived_predictions = 0;
- };
-
- explicit LifetimeTrackerImpl(
- LifetimeDatabaseT* lifetime_database, absl::Duration timeout,
- Clock clock = Clock{.now = absl::base_internal::CycleClock::Now,
- .freq = absl::base_internal::CycleClock::Frequency})
- : timeout_(absl::ToDoubleSeconds(timeout) * clock.freq()),
- lifetime_database_(*lifetime_database),
- clock_(clock) {}
-
- // Registers a donated allocation with the tracker.
- void AddAllocation(Tracker* tracker, LifetimeStatsT* lifetime,
- bool predicted_short_lived) {
- CheckForLifetimeExpirations();
-
- if (predicted_short_lived) {
- stats_.short_lived_predictions++;
- } else {
- stats_.long_lived_predictions++;
- }
-
- ASSERT(tracker != nullptr);
- ASSERT(lifetime != nullptr);
- tracker->deadline = clock_.now() + timeout_;
- tracker->lifetime = lifetime;
- tracker->predicted_short_lived = predicted_short_lived;
- list_.append(tracker);
- }
-
- // Remove an allocation from the tracker. This will stop tracking the
- // allocation and record whether it was correctly predicted.
- void RemoveAllocation(Tracker* tracker) {
- CheckForLifetimeExpirations();
-
- // This is not tracked anymore.
- if (!tracker->is_tracked()) {
- return;
- }
-
- if (!tracker->predicted_short_lived) {
- stats_.overestimated_lifetimes++;
- }
-
- if (tracker->lifetime != nullptr) {
- tracker->lifetime->Update(LifetimeStatsT::Prediction::kShortLived);
- lifetime_database_.RemoveLifetimeStatsReference(tracker->lifetime);
- }
-
- tracker->reset();
-
- list_.remove(tracker);
- }
-
- // Check whether any lifetimes in the tracker have passed the threshold after
- // which they are not short-lived anymore.
- void CheckForLifetimeExpirations() {
- // TODO(mmaas): Expirations are fairly cheap, but there is a theoretical
- // possibility of having an arbitrary number of expirations at once, which
- // could affect tail latency. We may want to limit the number of pages we
- // let expire per unit time.
- uint64_t now = clock_.now();
- Tracker* tracker = TryGetExpired(now);
- while (tracker != nullptr) {
- ASSERT(tracker->is_tracked());
-
- // A page that was predicted short-lived was actually long-lived.
- if (tracker->predicted_short_lived) {
- stats_.expired_lifetimes++;
- }
-
- if (tracker->lifetime != nullptr) {
- tracker->lifetime->Update(LifetimeStatsT::Prediction::kLongLived);
- lifetime_database_.RemoveLifetimeStatsReference(tracker->lifetime);
- }
-
- tracker->reset();
- tracker = TryGetExpired(now);
- }
- }
-
- Stats stats() const { return stats_; }
-
- private:
- // Returns the earliest expiring entry, or nullptr if none expired.
- Tracker* TryGetExpired(uint64_t now) {
- if (!list_.empty() && list_.first()->deadline < now) {
- Tracker* s = list_.first();
- list_.remove(s);
- return s;
- }
- return nullptr;
- }
-
- const uint64_t timeout_;
-
- TList<Tracker> list_;
- Stats stats_;
- LifetimeDatabaseT& lifetime_database_;
- Clock clock_;
-};
-
-using LifetimeTracker = LifetimeTrackerImpl<LifetimeDatabase, LifetimeStats>;
-
-} // namespace tcmalloc_internal
-} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
-
-#endif // TCMALLOC_INTERNAL_LIFETIME_TRACKER_H_
+// Copyright 2020 The TCMalloc Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef TCMALLOC_INTERNAL_LIFETIME_TRACKER_H_
+#define TCMALLOC_INTERNAL_LIFETIME_TRACKER_H_
+
+#include "absl/base/internal/cycleclock.h"
+#include "absl/time/time.h"
+#include "tcmalloc/internal/clock.h"
+#include "tcmalloc/internal/lifetime_predictions.h"
+#include "tcmalloc/internal/linked_list.h"
+
+GOOGLE_MALLOC_SECTION_BEGIN
+namespace tcmalloc {
+namespace tcmalloc_internal {
+
+template <typename LifetimeDatabaseT, typename LifetimeStatsT>
+class LifetimeTrackerImpl {
+ public:
+ // A tracker is attached to an individual allocation and tracks its lifetime.
+ // This allocation can either be in a region or in the filler. It contains
+ // a pointer back to the LifetimeStats of the allocation site that generated
+ // this allocation, so that statistics can be updated.
+ struct Tracker : public TList<Tracker>::Elem {
+ // The deadline after which the object is considered long-lived.
+ uint64_t deadline = 0;
+
+ // If the allocation is associated with a counterfactual, this contains
+ // the hypothetical location in the short-lived region (null otherwise).
+ void* counterfactual_ptr = nullptr;
+
+ // Lifetime statistics associated with this allocation (will be updated when
+ // the lifetime is known).
+ LifetimeStatsT* lifetime;
+
+ // The allocation this stat belongs to was predicted short-lived.
+ bool predicted_short_lived = false;
+
+ // Is this element currently tracked by the lifetime tracker?
+ bool is_tracked() { return deadline != 0; }
+
+ // Reset the element (implies not tracked).
+ void reset() {
+ deadline = 0;
+ counterfactual_ptr = nullptr;
+ }
+ };
+
+ struct Stats {
+ uint64_t expired_lifetimes = 0;
+ uint64_t overestimated_lifetimes = 0;
+ uint64_t short_lived_predictions = 0;
+ uint64_t long_lived_predictions = 0;
+ };
+
+ explicit LifetimeTrackerImpl(
+ LifetimeDatabaseT* lifetime_database, absl::Duration timeout,
+ Clock clock = Clock{.now = absl::base_internal::CycleClock::Now,
+ .freq = absl::base_internal::CycleClock::Frequency})
+ : timeout_(absl::ToDoubleSeconds(timeout) * clock.freq()),
+ lifetime_database_(*lifetime_database),
+ clock_(clock) {}
+
+ // Registers a donated allocation with the tracker.
+ void AddAllocation(Tracker* tracker, LifetimeStatsT* lifetime,
+ bool predicted_short_lived) {
+ CheckForLifetimeExpirations();
+
+ if (predicted_short_lived) {
+ stats_.short_lived_predictions++;
+ } else {
+ stats_.long_lived_predictions++;
+ }
+
+ ASSERT(tracker != nullptr);
+ ASSERT(lifetime != nullptr);
+ tracker->deadline = clock_.now() + timeout_;
+ tracker->lifetime = lifetime;
+ tracker->predicted_short_lived = predicted_short_lived;
+ list_.append(tracker);
+ }
+
+ // Remove an allocation from the tracker. This will stop tracking the
+ // allocation and record whether it was correctly predicted.
+ void RemoveAllocation(Tracker* tracker) {
+ CheckForLifetimeExpirations();
+
+ // This is not tracked anymore.
+ if (!tracker->is_tracked()) {
+ return;
+ }
+
+ if (!tracker->predicted_short_lived) {
+ stats_.overestimated_lifetimes++;
+ }
+
+ if (tracker->lifetime != nullptr) {
+ tracker->lifetime->Update(LifetimeStatsT::Prediction::kShortLived);
+ lifetime_database_.RemoveLifetimeStatsReference(tracker->lifetime);
+ }
+
+ tracker->reset();
+
+ list_.remove(tracker);
+ }
+
+ // Check whether any lifetimes in the tracker have passed the threshold after
+ // which they are not short-lived anymore.
+ void CheckForLifetimeExpirations() {
+ // TODO(mmaas): Expirations are fairly cheap, but there is a theoretical
+ // possibility of having an arbitrary number of expirations at once, which
+ // could affect tail latency. We may want to limit the number of pages we
+ // let expire per unit time.
+ uint64_t now = clock_.now();
+ Tracker* tracker = TryGetExpired(now);
+ while (tracker != nullptr) {
+ ASSERT(tracker->is_tracked());
+
+ // A page that was predicted short-lived was actually long-lived.
+ if (tracker->predicted_short_lived) {
+ stats_.expired_lifetimes++;
+ }
+
+ if (tracker->lifetime != nullptr) {
+ tracker->lifetime->Update(LifetimeStatsT::Prediction::kLongLived);
+ lifetime_database_.RemoveLifetimeStatsReference(tracker->lifetime);
+ }
+
+ tracker->reset();
+ tracker = TryGetExpired(now);
+ }
+ }
+
+ Stats stats() const { return stats_; }
+
+ private:
+ // Returns the earliest expiring entry, or nullptr if none expired.
+ Tracker* TryGetExpired(uint64_t now) {
+ if (!list_.empty() && list_.first()->deadline < now) {
+ Tracker* s = list_.first();
+ list_.remove(s);
+ return s;
+ }
+ return nullptr;
+ }
+
+ const uint64_t timeout_;
+
+ TList<Tracker> list_;
+ Stats stats_;
+ LifetimeDatabaseT& lifetime_database_;
+ Clock clock_;
+};
+
+using LifetimeTracker = LifetimeTrackerImpl<LifetimeDatabase, LifetimeStats>;
+
+} // namespace tcmalloc_internal
+} // namespace tcmalloc
+GOOGLE_MALLOC_SECTION_END
+
+#endif // TCMALLOC_INTERNAL_LIFETIME_TRACKER_H_
diff --git a/contrib/libs/tcmalloc/tcmalloc/internal/lifetime_tracker_test.cc b/contrib/libs/tcmalloc/tcmalloc/internal/lifetime_tracker_test.cc
index 78ed38ecae..6435a04e69 100644
--- a/contrib/libs/tcmalloc/tcmalloc/internal/lifetime_tracker_test.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/internal/lifetime_tracker_test.cc
@@ -1,129 +1,129 @@
-// Copyright 2019 The TCMalloc Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "tcmalloc/internal/lifetime_tracker.h"
-
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-#include "absl/time/time.h"
-#include "tcmalloc/internal/lifetime_predictions.h"
-
-namespace tcmalloc {
-namespace tcmalloc_internal {
-namespace {
-
-class MockLifetimeStats {
- public:
- enum class Prediction { kShortLived, kLongLived };
- MOCK_METHOD(void, Update, (Prediction prediction), ());
-};
-
-class MockLifetimeDatabase {
- public:
- MOCK_METHOD(void, RemoveLifetimeStatsReference, (MockLifetimeStats*), ());
-};
-
-using LifetimeTrackerUnderTest =
- LifetimeTrackerImpl<MockLifetimeDatabase, MockLifetimeStats>;
-
-class LifetimeTrackerTest : public testing::Test {
- protected:
- const Clock kFakeClock =
- Clock{.now = FakeClock, .freq = GetFakeClockFrequency};
-
- void Advance(absl::Duration d) {
- clock_ += absl::ToDoubleSeconds(d) * GetFakeClockFrequency();
- }
-
- private:
- static int64_t FakeClock() { return clock_; }
-
- static double GetFakeClockFrequency() {
- return absl::ToDoubleNanoseconds(absl::Seconds(2));
- }
-
- static int64_t clock_;
-};
-
-int64_t LifetimeTrackerTest::clock_{0};
-
-TEST_F(LifetimeTrackerTest, Basic) {
- MockLifetimeDatabase database;
- LifetimeTrackerUnderTest tracker(&database, absl::Seconds(0.5), kFakeClock);
- MockLifetimeStats stats;
-
- LifetimeTrackerUnderTest::Tracker tracker1;
- tracker.AddAllocation(&tracker1, &stats, false);
- Advance(absl::Seconds(1));
-
- EXPECT_CALL(stats, Update(MockLifetimeStats::Prediction::kLongLived));
- EXPECT_CALL(database, RemoveLifetimeStatsReference(&stats));
-
- LifetimeTrackerUnderTest::Tracker tracker2;
- tracker.AddAllocation(&tracker2, &stats, false);
-
- EXPECT_CALL(stats, Update(MockLifetimeStats::Prediction::kShortLived));
- EXPECT_CALL(database, RemoveLifetimeStatsReference(&stats));
-
- Advance(absl::Seconds(0.1));
- tracker.RemoveAllocation(&tracker2);
-
- EXPECT_EQ(tracker.stats().expired_lifetimes, 0);
- EXPECT_EQ(tracker.stats().overestimated_lifetimes, 1);
- EXPECT_EQ(tracker.stats().short_lived_predictions, 0);
- EXPECT_EQ(tracker.stats().long_lived_predictions, 2);
-}
-
-TEST_F(LifetimeTrackerTest, ExpirationLogic) {
- MockLifetimeDatabase database;
- LifetimeTrackerUnderTest tracker(&database, absl::Seconds(0.5), kFakeClock);
-
- // Create 100 trackers, all predicted short-lived. Every second tracker will
- // be long-lived and therefore expire.
- const int kNumTrackers = 100;
- std::vector<LifetimeTrackerUnderTest::Tracker> trackers(kNumTrackers);
- MockLifetimeStats stats[] = {MockLifetimeStats(), MockLifetimeStats()};
-
- for (int i = 0; i < kNumTrackers; ++i) {
- tracker.AddAllocation(&trackers[i], &stats[i % 2], true);
- Advance(absl::Milliseconds(1));
- }
-
- EXPECT_CALL(stats[0], Update(MockLifetimeStats::Prediction::kShortLived))
- .Times(kNumTrackers / 2);
- EXPECT_CALL(database, RemoveLifetimeStatsReference(&stats[0]))
- .Times(kNumTrackers / 2);
-
- for (int i = 0; i < kNumTrackers; i += 2) {
- tracker.RemoveAllocation(&trackers[i]);
- }
-
- // After an additional 450ms, 1/4 of the allocations should have expired.
- EXPECT_CALL(stats[1], Update(MockLifetimeStats::Prediction::kLongLived))
- .Times(kNumTrackers / 4);
- EXPECT_CALL(database, RemoveLifetimeStatsReference(&stats[1]))
- .Times(kNumTrackers / 4);
-
- Advance(absl::Milliseconds(450));
- tracker.CheckForLifetimeExpirations();
-
- EXPECT_EQ(tracker.stats().expired_lifetimes, kNumTrackers / 4);
- EXPECT_EQ(tracker.stats().overestimated_lifetimes, 0);
- EXPECT_EQ(tracker.stats().short_lived_predictions, kNumTrackers);
- EXPECT_EQ(tracker.stats().long_lived_predictions, 0);
-}
-
-} // namespace
-} // namespace tcmalloc_internal
-} // namespace tcmalloc
+// Copyright 2019 The TCMalloc Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "tcmalloc/internal/lifetime_tracker.h"
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/time/time.h"
+#include "tcmalloc/internal/lifetime_predictions.h"
+
+namespace tcmalloc {
+namespace tcmalloc_internal {
+namespace {
+
+class MockLifetimeStats {
+ public:
+ enum class Prediction { kShortLived, kLongLived };
+ MOCK_METHOD(void, Update, (Prediction prediction), ());
+};
+
+class MockLifetimeDatabase {
+ public:
+ MOCK_METHOD(void, RemoveLifetimeStatsReference, (MockLifetimeStats*), ());
+};
+
+using LifetimeTrackerUnderTest =
+ LifetimeTrackerImpl<MockLifetimeDatabase, MockLifetimeStats>;
+
+class LifetimeTrackerTest : public testing::Test {
+ protected:
+ const Clock kFakeClock =
+ Clock{.now = FakeClock, .freq = GetFakeClockFrequency};
+
+ void Advance(absl::Duration d) {
+ clock_ += absl::ToDoubleSeconds(d) * GetFakeClockFrequency();
+ }
+
+ private:
+ static int64_t FakeClock() { return clock_; }
+
+ static double GetFakeClockFrequency() {
+ return absl::ToDoubleNanoseconds(absl::Seconds(2));
+ }
+
+ static int64_t clock_;
+};
+
+int64_t LifetimeTrackerTest::clock_{0};
+
+TEST_F(LifetimeTrackerTest, Basic) {
+ MockLifetimeDatabase database;
+ LifetimeTrackerUnderTest tracker(&database, absl::Seconds(0.5), kFakeClock);
+ MockLifetimeStats stats;
+
+ LifetimeTrackerUnderTest::Tracker tracker1;
+ tracker.AddAllocation(&tracker1, &stats, false);
+ Advance(absl::Seconds(1));
+
+ EXPECT_CALL(stats, Update(MockLifetimeStats::Prediction::kLongLived));
+ EXPECT_CALL(database, RemoveLifetimeStatsReference(&stats));
+
+ LifetimeTrackerUnderTest::Tracker tracker2;
+ tracker.AddAllocation(&tracker2, &stats, false);
+
+ EXPECT_CALL(stats, Update(MockLifetimeStats::Prediction::kShortLived));
+ EXPECT_CALL(database, RemoveLifetimeStatsReference(&stats));
+
+ Advance(absl::Seconds(0.1));
+ tracker.RemoveAllocation(&tracker2);
+
+ EXPECT_EQ(tracker.stats().expired_lifetimes, 0);
+ EXPECT_EQ(tracker.stats().overestimated_lifetimes, 1);
+ EXPECT_EQ(tracker.stats().short_lived_predictions, 0);
+ EXPECT_EQ(tracker.stats().long_lived_predictions, 2);
+}
+
+TEST_F(LifetimeTrackerTest, ExpirationLogic) {
+ MockLifetimeDatabase database;
+ LifetimeTrackerUnderTest tracker(&database, absl::Seconds(0.5), kFakeClock);
+
+ // Create 100 trackers, all predicted short-lived. Every second tracker will
+ // be long-lived and therefore expire.
+ const int kNumTrackers = 100;
+ std::vector<LifetimeTrackerUnderTest::Tracker> trackers(kNumTrackers);
+ MockLifetimeStats stats[] = {MockLifetimeStats(), MockLifetimeStats()};
+
+ for (int i = 0; i < kNumTrackers; ++i) {
+ tracker.AddAllocation(&trackers[i], &stats[i % 2], true);
+ Advance(absl::Milliseconds(1));
+ }
+
+ EXPECT_CALL(stats[0], Update(MockLifetimeStats::Prediction::kShortLived))
+ .Times(kNumTrackers / 2);
+ EXPECT_CALL(database, RemoveLifetimeStatsReference(&stats[0]))
+ .Times(kNumTrackers / 2);
+
+ for (int i = 0; i < kNumTrackers; i += 2) {
+ tracker.RemoveAllocation(&trackers[i]);
+ }
+
+ // After an additional 450ms, 1/4 of the allocations should have expired.
+ EXPECT_CALL(stats[1], Update(MockLifetimeStats::Prediction::kLongLived))
+ .Times(kNumTrackers / 4);
+ EXPECT_CALL(database, RemoveLifetimeStatsReference(&stats[1]))
+ .Times(kNumTrackers / 4);
+
+ Advance(absl::Milliseconds(450));
+ tracker.CheckForLifetimeExpirations();
+
+ EXPECT_EQ(tracker.stats().expired_lifetimes, kNumTrackers / 4);
+ EXPECT_EQ(tracker.stats().overestimated_lifetimes, 0);
+ EXPECT_EQ(tracker.stats().short_lived_predictions, kNumTrackers);
+ EXPECT_EQ(tracker.stats().long_lived_predictions, 0);
+}
+
+} // namespace
+} // namespace tcmalloc_internal
+} // namespace tcmalloc
diff --git a/contrib/libs/tcmalloc/tcmalloc/internal/linked_list.h b/contrib/libs/tcmalloc/tcmalloc/internal/linked_list.h
index 181a480275..eba71c70c2 100644
--- a/contrib/libs/tcmalloc/tcmalloc/internal/linked_list.h
+++ b/contrib/libs/tcmalloc/tcmalloc/internal/linked_list.h
@@ -25,9 +25,9 @@
#include "absl/base/optimization.h"
#include "tcmalloc/internal/logging.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
inline ABSL_ATTRIBUTE_ALWAYS_INLINE void* SLL_Next(void* t) {
return *(reinterpret_cast<void**>(t));
@@ -189,16 +189,16 @@ class TList {
}
// Returns first element in the list. The list must not be empty.
- ABSL_ATTRIBUTE_RETURNS_NONNULL T* first() const {
+ ABSL_ATTRIBUTE_RETURNS_NONNULL T* first() const {
ASSERT(!empty());
- ASSERT(head_.next_ != nullptr);
+ ASSERT(head_.next_ != nullptr);
return static_cast<T*>(head_.next_);
}
// Returns last element in the list. The list must not be empty.
- ABSL_ATTRIBUTE_RETURNS_NONNULL T* last() const {
+ ABSL_ATTRIBUTE_RETURNS_NONNULL T* last() const {
ASSERT(!empty());
- ASSERT(head_.prev_ != nullptr);
+ ASSERT(head_.prev_ != nullptr);
return static_cast<T*>(head_.prev_);
}
@@ -247,8 +247,8 @@ class TList {
Elem head_;
};
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
#endif // TCMALLOC_INTERNAL_LINKED_LIST_H_
diff --git a/contrib/libs/tcmalloc/tcmalloc/internal/linked_list_benchmark.cc b/contrib/libs/tcmalloc/tcmalloc/internal/linked_list_benchmark.cc
index 505b1b62c2..613eccc5c0 100644
--- a/contrib/libs/tcmalloc/tcmalloc/internal/linked_list_benchmark.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/internal/linked_list_benchmark.cc
@@ -23,9 +23,9 @@
#include "tcmalloc/internal/logging.h"
#include "tcmalloc/internal/mock_span.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
namespace {
void BM_PushPop(benchmark::State& state) {
@@ -141,6 +141,6 @@ static void BM_AppendRemove(benchmark::State& state) {
BENCHMARK(BM_AppendRemove)->Range(32, 32 * 1024);
} // namespace
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
diff --git a/contrib/libs/tcmalloc/tcmalloc/internal/linked_list_test.cc b/contrib/libs/tcmalloc/tcmalloc/internal/linked_list_test.cc
index 3299bca8d8..bfc1a73966 100644
--- a/contrib/libs/tcmalloc/tcmalloc/internal/linked_list_test.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/internal/linked_list_test.cc
@@ -20,14 +20,14 @@
#include <vector>
#include "gtest/gtest.h"
-#include "absl/container/flat_hash_set.h"
+#include "absl/container/flat_hash_set.h"
#include "absl/container/node_hash_set.h"
#include "absl/random/random.h"
-#include "benchmark/benchmark.h"
+#include "benchmark/benchmark.h"
#include "tcmalloc/internal/mock_span.h"
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
namespace {
class LinkedListTest : public ::testing::Test {
@@ -76,7 +76,7 @@ TEST_F(LinkedListTest, PushPop) {
// the batch is maintained.
TEST_F(LinkedListTest, PushPopBatch) {
const std::vector<int> batch_sizes{1, 3, 5, 7, 10, 16};
- absl::flat_hash_set<void*> pushed;
+ absl::flat_hash_set<void*> pushed;
size_t length = 0;
for (int batch_size : batch_sizes) {
@@ -96,7 +96,7 @@ TEST_F(LinkedListTest, PushPopBatch) {
EXPECT_EQ(length == 0, list_.empty());
}
- absl::flat_hash_set<void*> popped;
+ absl::flat_hash_set<void*> popped;
for (int batch_size : batch_sizes) {
std::vector<void*> batch(batch_size, nullptr);
list_.PopBatch(batch_size, batch.data());
@@ -235,5 +235,5 @@ TEST_F(TListTest, AppendRandomRemove) {
}
} // namespace
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
diff --git a/contrib/libs/tcmalloc/tcmalloc/internal/logging.cc b/contrib/libs/tcmalloc/tcmalloc/internal/logging.cc
index 2b70bc1502..b90ab85f7a 100644
--- a/contrib/libs/tcmalloc/tcmalloc/internal/logging.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/internal/logging.cc
@@ -31,10 +31,10 @@
#include "tcmalloc/internal/parameter_accessors.h"
#include "tcmalloc/malloc_extension.h"
-GOOGLE_MALLOC_SECTION_BEGIN
-namespace tcmalloc {
-namespace tcmalloc_internal {
-
+GOOGLE_MALLOC_SECTION_BEGIN
+namespace tcmalloc {
+namespace tcmalloc_internal {
+
// Variables for storing crash output. Allocated statically since we
// may not be able to heap-allocate while crashing.
ABSL_CONST_INIT static absl::base_internal::SpinLock crash_lock(
@@ -82,7 +82,7 @@ static Logger FormatLog(bool with_stack, const char* filename, int line,
if (with_stack) {
state.trace.depth =
- absl::GetStackTrace(state.trace.stack, kMaxStackDepth, 1);
+ absl::GetStackTrace(state.trace.stack, kMaxStackDepth, 1);
state.Add(LogItem("@"));
for (int i = 0; i < state.trace.depth; i++) {
state.Add(LogItem(state.trace.stack[i]));
@@ -210,7 +210,7 @@ bool Logger::AddNum(uint64_t num, int base) {
return AddStr(pos, end - pos);
}
-PbtxtRegion::PbtxtRegion(Printer* out, PbtxtRegionType type, int indent)
+PbtxtRegion::PbtxtRegion(Printer* out, PbtxtRegionType type, int indent)
: out_(out), type_(type), indent_(indent) {
switch (type_) {
case kTop:
@@ -270,7 +270,7 @@ PbtxtRegion PbtxtRegion::CreateSubRegion(absl::string_view key) {
PbtxtRegion sub(out_, kNested, indent_);
return sub;
}
-
-} // namespace tcmalloc_internal
-} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+
+} // namespace tcmalloc_internal
+} // namespace tcmalloc
+GOOGLE_MALLOC_SECTION_END
diff --git a/contrib/libs/tcmalloc/tcmalloc/internal/logging.h b/contrib/libs/tcmalloc/tcmalloc/internal/logging.h
index 4d42aa40a9..252568fda2 100644
--- a/contrib/libs/tcmalloc/tcmalloc/internal/logging.h
+++ b/contrib/libs/tcmalloc/tcmalloc/internal/logging.h
@@ -24,7 +24,7 @@
#include "absl/base/optimization.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
-#include "tcmalloc/internal/config.h"
+#include "tcmalloc/internal/config.h"
//-------------------------------------------------------------------
// Utility routines
@@ -37,9 +37,9 @@
// Example:
// Log(kLog, __FILE__, __LINE__, "error", bytes);
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
static constexpr int kMaxStackDepth = 64;
@@ -59,7 +59,7 @@ struct StackTrace {
uintptr_t requested_size;
uintptr_t requested_alignment;
uintptr_t allocated_size; // size after sizeclass/page rounding
-
+
uintptr_t depth; // Number of PC values stored in array below
void* stack[kMaxStackDepth];
@@ -75,8 +75,8 @@ struct StackTrace {
// produce a hasher for the fields used as keys.
return H::combine(H::combine_contiguous(std::move(h), t.stack, t.depth),
t.depth, t.requested_size, t.requested_alignment,
- t.allocated_size
- );
+ t.allocated_size
+ );
}
};
@@ -130,11 +130,11 @@ extern void (*log_message_writer)(const char* msg, int length);
// Like assert(), but executed even in NDEBUG mode
#undef CHECK_CONDITION
-#define CHECK_CONDITION(cond) \
- (ABSL_PREDICT_TRUE(cond) ? (void)0 \
- : (::tcmalloc::tcmalloc_internal::Crash( \
- ::tcmalloc::tcmalloc_internal::kCrash, \
- __FILE__, __LINE__, #cond)))
+#define CHECK_CONDITION(cond) \
+ (ABSL_PREDICT_TRUE(cond) ? (void)0 \
+ : (::tcmalloc::tcmalloc_internal::Crash( \
+ ::tcmalloc::tcmalloc_internal::kCrash, \
+ __FILE__, __LINE__, #cond)))
// Our own version of assert() so we can avoid hanging by trying to do
// all kinds of goofy printing while holding the malloc lock.
@@ -145,7 +145,7 @@ extern void (*log_message_writer)(const char* msg, int length);
#endif
// Print into buffer
-class Printer {
+class Printer {
private:
char* buf_; // Where should we write next
int left_; // Space left in buffer (including space for \0)
@@ -154,7 +154,7 @@ class Printer {
public:
// REQUIRES: "length > 0"
- Printer(char* buf, int length) : buf_(buf), left_(length), required_(0) {
+ Printer(char* buf, int length) : buf_(buf), left_(length), required_(0) {
ASSERT(length > 0);
buf[0] = '\0';
}
@@ -191,7 +191,7 @@ enum PbtxtRegionType { kTop, kNested };
// brackets).
class PbtxtRegion {
public:
- PbtxtRegion(Printer* out, PbtxtRegionType type, int indent);
+ PbtxtRegion(Printer* out, PbtxtRegionType type, int indent);
~PbtxtRegion();
PbtxtRegion(const PbtxtRegion&) = delete;
@@ -210,13 +210,13 @@ class PbtxtRegion {
private:
void NewLineAndIndent();
- Printer* out_;
+ Printer* out_;
PbtxtRegionType type_;
int indent_;
};
-} // namespace tcmalloc_internal
-} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
-
+} // namespace tcmalloc_internal
+} // namespace tcmalloc
+GOOGLE_MALLOC_SECTION_END
+
#endif // TCMALLOC_INTERNAL_LOGGING_H_
diff --git a/contrib/libs/tcmalloc/tcmalloc/internal/logging_test.cc b/contrib/libs/tcmalloc/tcmalloc/internal/logging_test.cc
index c7b58de40f..2d3ae00436 100644
--- a/contrib/libs/tcmalloc/tcmalloc/internal/logging_test.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/internal/logging_test.cc
@@ -20,12 +20,12 @@
#include "gmock/gmock.h"
#include "gtest/gtest.h"
-#include "absl/flags/flag.h"
-
-namespace tcmalloc {
-namespace tcmalloc_internal {
-namespace {
+#include "absl/flags/flag.h"
+namespace tcmalloc {
+namespace tcmalloc_internal {
+namespace {
+
static std::string* log_buffer;
static void RecordLogMessage(const char* msg, int length) {
@@ -94,7 +94,7 @@ TEST(Printer, RequiredSpace) {
for (int i = 0; i < 10; i++) {
int length = strlen(kChunk) * i + 1;
std::unique_ptr<char[]> buf(new char[length]);
- Printer printer(buf.get(), length);
+ Printer printer(buf.get(), length);
for (int j = 0; j < i; j++) {
printer.printf("%s", kChunk);
@@ -112,6 +112,6 @@ TEST(Printer, RequiredSpace) {
}
}
-} // namespace
-} // namespace tcmalloc_internal
+} // namespace
+} // namespace tcmalloc_internal
} // namespace tcmalloc
diff --git a/contrib/libs/tcmalloc/tcmalloc/internal/logging_test_helper.cc b/contrib/libs/tcmalloc/tcmalloc/internal/logging_test_helper.cc
index 36c2b38771..96af48c34c 100644
--- a/contrib/libs/tcmalloc/tcmalloc/internal/logging_test_helper.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/internal/logging_test_helper.cc
@@ -1,18 +1,18 @@
-// Copyright 2021 The TCMalloc Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// This is a trivial program. When run with a virtual address size rlimit,
-// TCMalloc should crash cleanly, rather than hang.
-
-int main() { return 0; }
+// Copyright 2021 The TCMalloc Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This is a trivial program. When run with a virtual address size rlimit,
+// TCMalloc should crash cleanly, rather than hang.
+
+int main() { return 0; }
diff --git a/contrib/libs/tcmalloc/tcmalloc/internal/memory_stats.cc b/contrib/libs/tcmalloc/tcmalloc/internal/memory_stats.cc
index 71591834d4..009799d0ec 100644
--- a/contrib/libs/tcmalloc/tcmalloc/internal/memory_stats.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/internal/memory_stats.cc
@@ -20,11 +20,11 @@
#include <unistd.h>
#include "absl/strings/numbers.h"
-#include "tcmalloc/internal/config.h"
+#include "tcmalloc/internal/config.h"
#include "tcmalloc/internal/logging.h"
#include "tcmalloc/internal/util.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
namespace tcmalloc_internal {
@@ -129,4 +129,4 @@ bool GetMemoryStats(MemoryStats* stats) {
} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
diff --git a/contrib/libs/tcmalloc/tcmalloc/internal/memory_stats.h b/contrib/libs/tcmalloc/tcmalloc/internal/memory_stats.h
index a65f5b03d3..1d7dc8d28a 100644
--- a/contrib/libs/tcmalloc/tcmalloc/internal/memory_stats.h
+++ b/contrib/libs/tcmalloc/tcmalloc/internal/memory_stats.h
@@ -17,9 +17,9 @@
#include <stdint.h>
-#include "tcmalloc/internal/config.h"
-
-GOOGLE_MALLOC_SECTION_BEGIN
+#include "tcmalloc/internal/config.h"
+
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
namespace tcmalloc_internal {
@@ -36,6 +36,6 @@ bool GetMemoryStats(MemoryStats* stats);
} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
#endif // TCMALLOC_INTERNAL_MEMORY_STATS_H_
diff --git a/contrib/libs/tcmalloc/tcmalloc/internal/mincore.cc b/contrib/libs/tcmalloc/tcmalloc/internal/mincore.cc
index e4120bcf5a..2baf01beaa 100644
--- a/contrib/libs/tcmalloc/tcmalloc/internal/mincore.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/internal/mincore.cc
@@ -20,20 +20,20 @@
#include <algorithm>
#include <cstdint>
-#include "tcmalloc/internal/config.h"
-
-GOOGLE_MALLOC_SECTION_BEGIN
+#include "tcmalloc/internal/config.h"
+
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
// Class that implements the call into the OS provided mincore() function.
class OsMInCore final : public MInCoreInterface {
- public:
+ public:
int mincore(void* addr, size_t length, unsigned char* result) final {
return ::mincore(addr, length, result);
}
-
- ~OsMInCore() override = default;
+
+ ~OsMInCore() override = default;
};
// Returns the number of resident bytes for an range of memory of arbitrary
@@ -124,6 +124,6 @@ size_t MInCore::residence(void* addr, size_t size) {
return residence_impl(addr, size, &mc);
}
-} // namespace tcmalloc_internal
-} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+} // namespace tcmalloc_internal
+} // namespace tcmalloc
+GOOGLE_MALLOC_SECTION_END
diff --git a/contrib/libs/tcmalloc/tcmalloc/internal/mincore.h b/contrib/libs/tcmalloc/tcmalloc/internal/mincore.h
index c353bdac87..de23932032 100644
--- a/contrib/libs/tcmalloc/tcmalloc/internal/mincore.h
+++ b/contrib/libs/tcmalloc/tcmalloc/internal/mincore.h
@@ -17,11 +17,11 @@
#include <stddef.h>
-#include "tcmalloc/internal/config.h"
-
-GOOGLE_MALLOC_SECTION_BEGIN
+#include "tcmalloc/internal/config.h"
+
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
// Class to wrap mincore so that we can replace it for testing.
class MInCoreInterface {
@@ -58,8 +58,8 @@ class MInCore {
friend class MInCoreTest;
};
-} // namespace tcmalloc_internal
-} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+} // namespace tcmalloc_internal
+} // namespace tcmalloc
+GOOGLE_MALLOC_SECTION_END
#endif // TCMALLOC_INTERNAL_MINCORE_H_
diff --git a/contrib/libs/tcmalloc/tcmalloc/internal/mincore_benchmark.cc b/contrib/libs/tcmalloc/tcmalloc/internal/mincore_benchmark.cc
index 02c8ead48d..cd42e8f440 100644
--- a/contrib/libs/tcmalloc/tcmalloc/internal/mincore_benchmark.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/internal/mincore_benchmark.cc
@@ -25,7 +25,7 @@
#include "tcmalloc/internal/logging.h"
#include "tcmalloc/internal/mincore.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
namespace {
@@ -58,4 +58,4 @@ BENCHMARK(BM_mincore)->Range(1, 16 * 1024);
} // namespace
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
diff --git a/contrib/libs/tcmalloc/tcmalloc/internal/mincore_test.cc b/contrib/libs/tcmalloc/tcmalloc/internal/mincore_test.cc
index daa1178b25..e04dc60a94 100644
--- a/contrib/libs/tcmalloc/tcmalloc/internal/mincore_test.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/internal/mincore_test.cc
@@ -24,17 +24,17 @@
#include "gmock/gmock.h"
#include "gtest/gtest.h"
-#include "benchmark/benchmark.h"
+#include "benchmark/benchmark.h"
#include "tcmalloc/internal/logging.h"
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
using ::testing::Eq;
// Mock interface to mincore() which has reports residence based on
// an array provided at construction.
-class MInCoreMock : public MInCoreInterface {
+class MInCoreMock : public MInCoreInterface {
public:
MInCoreMock() : mapped_() {}
~MInCoreMock() override {}
@@ -77,10 +77,10 @@ class MInCoreTest {
// Expose the internal size of array that we use to call mincore() so
// that we can be sure to need multiple calls to cover large memory regions.
- const size_t chunkSize() { return MInCore::kArrayLength; }
+ const size_t chunkSize() { return MInCore::kArrayLength; }
private:
- MInCoreMock mcm_;
+ MInCoreMock mcm_;
};
namespace {
@@ -88,7 +88,7 @@ namespace {
using ::testing::Eq;
TEST(StaticVarsTest, TestResidence) {
- MInCoreTest mct;
+ MInCoreTest mct;
const size_t kPageSize = getpagesize();
// Set up a pattern with a few resident pages.
@@ -142,7 +142,7 @@ TEST(StaticVarsTest, TestResidence) {
// Test whether we are correctly handling multiple calls to mincore.
TEST(StaticVarsTest, TestLargeResidence) {
- MInCoreTest mct;
+ MInCoreTest mct;
uintptr_t uAddress = 0;
const size_t kPageSize = getpagesize();
// Set up a pattern covering 6 * page size * MInCore::kArrayLength to
@@ -189,5 +189,5 @@ TEST(StaticVarsTest, UnmappedMemory) {
}
} // namespace
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
diff --git a/contrib/libs/tcmalloc/tcmalloc/internal/numa.cc b/contrib/libs/tcmalloc/tcmalloc/internal/numa.cc
index 1639bd1b6d..45161e3085 100644
--- a/contrib/libs/tcmalloc/tcmalloc/internal/numa.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/internal/numa.cc
@@ -1,220 +1,220 @@
-// Copyright 2021 The TCMalloc Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "tcmalloc/internal/numa.h"
-
-#include <errno.h>
-#include <fcntl.h>
-#include <limits.h>
-#include <sched.h>
-#include <stddef.h>
-#include <stdint.h>
-#include <stdio.h>
-#include <unistd.h>
-
-#include <array>
-#include <cstring>
-
-#include "absl/base/attributes.h"
-#include "absl/base/internal/sysinfo.h"
-#include "absl/functional/function_ref.h"
-#include "absl/strings/numbers.h"
-#include "absl/strings/string_view.h"
-#include "tcmalloc/internal/config.h"
-#include "tcmalloc/internal/environment.h"
-#include "tcmalloc/internal/logging.h"
-#include "tcmalloc/internal/util.h"
-
-GOOGLE_MALLOC_SECTION_BEGIN
-namespace tcmalloc {
-namespace tcmalloc_internal {
-
-// Returns true iff NUMA awareness should be enabled by default (i.e. in the
-// absence of the TCMALLOC_NUMA_AWARE environment variable). This weak
-// implementation may be overridden by the one in want_numa_aware.cc.
-ABSL_ATTRIBUTE_WEAK bool default_want_numa_aware() { return false; }
-
-int OpenSysfsCpulist(size_t node) {
- char path[PATH_MAX];
- snprintf(path, sizeof(path), "/sys/devices/system/node/node%zu/cpulist",
- node);
- return signal_safe_open(path, O_RDONLY | O_CLOEXEC);
-}
-
-cpu_set_t ParseCpulist(absl::FunctionRef<ssize_t(char *, size_t)> read) {
- cpu_set_t set;
- CPU_ZERO(&set);
-
- std::array<char, 16> buf;
- size_t carry_over = 0;
- int cpu_from = -1;
-
- while (true) {
- const ssize_t rc = read(buf.data() + carry_over, buf.size() - carry_over);
- CHECK_CONDITION(rc >= 0);
-
- const absl::string_view current(buf.data(), carry_over + rc);
-
- // If we have no more data to parse & couldn't read any then we've reached
- // the end of the input & are done.
- if (current.empty() && rc == 0) {
- break;
- }
-
- size_t consumed;
- const size_t dash = current.find('-');
- const size_t comma = current.find(',');
- if (dash != absl::string_view::npos && dash < comma) {
- CHECK_CONDITION(absl::SimpleAtoi(current.substr(0, dash), &cpu_from));
- consumed = dash + 1;
- } else if (comma != absl::string_view::npos || rc == 0) {
- int cpu;
- CHECK_CONDITION(absl::SimpleAtoi(current.substr(0, comma), &cpu));
- if (comma == absl::string_view::npos) {
- consumed = current.size();
- } else {
- consumed = comma + 1;
- }
- if (cpu_from != -1) {
- for (int c = cpu_from; c <= cpu; c++) {
- CPU_SET(c, &set);
- }
- cpu_from = -1;
- } else {
- CPU_SET(cpu, &set);
- }
- } else {
- consumed = 0;
- }
-
- carry_over = current.size() - consumed;
- memmove(buf.data(), buf.data() + consumed, carry_over);
- }
-
- return set;
-}
-
-bool InitNumaTopology(size_t cpu_to_scaled_partition[CPU_SETSIZE],
- uint64_t *const partition_to_nodes,
- NumaBindMode *const bind_mode,
- const size_t num_partitions, const size_t scale_by,
- absl::FunctionRef<int(size_t)> open_node_cpulist) {
- // Node 0 will always map to partition 0; record it here in case the system
- // doesn't support NUMA or the user opts out of our awareness of it - in
- // either case we'll record nothing in the loop below.
- partition_to_nodes[NodeToPartition(0, num_partitions)] |= 1 << 0;
-
- // If we only compiled in support for one partition then we're trivially
- // done; NUMA awareness is unavailable.
- if (num_partitions == 1) return false;
-
- // We rely on rseq to quickly obtain a CPU ID & lookup the appropriate
- // partition in NumaTopology::GetCurrentPartition(). If rseq is unavailable,
- // disable NUMA awareness.
- if (!subtle::percpu::IsFast()) return false;
-
- // Honor default_want_numa_aware() to allow compile time configuration of
- // whether to enable NUMA awareness by default, and allow the user to
- // override that either way by setting TCMALLOC_NUMA_AWARE in the
- // environment.
- //
- // In cases where we don't enable NUMA awareness we simply return. Since the
- // cpu_to_scaled_partition & partition_to_nodes arrays are zero initialized
- // we're trivially done - CPUs all map to partition 0, which contains only
- // CPU 0 added above.
- const char *e =
- tcmalloc::tcmalloc_internal::thread_safe_getenv("TCMALLOC_NUMA_AWARE");
- if (e == nullptr) {
- // Enable NUMA awareness iff default_want_numa_aware().
- if (!default_want_numa_aware()) return false;
- } else if (!strcmp(e, "no-binding")) {
- // Enable NUMA awareness with no memory binding behavior.
- *bind_mode = NumaBindMode::kNone;
- } else if (!strcmp(e, "advisory-binding") || !strcmp(e, "1")) {
- // Enable NUMA awareness with advisory memory binding behavior.
- *bind_mode = NumaBindMode::kAdvisory;
- } else if (!strcmp(e, "strict-binding")) {
- // Enable NUMA awareness with strict memory binding behavior.
- *bind_mode = NumaBindMode::kStrict;
- } else if (!strcmp(e, "0")) {
- // Disable NUMA awareness.
- return false;
- } else {
- Crash(kCrash, __FILE__, __LINE__, "bad TCMALLOC_NUMA_AWARE env var", e);
- }
-
- // The cpu_to_scaled_partition array has a fixed size so that we can
- // statically allocate it & avoid the need to check whether it has been
- // allocated prior to lookups. It has CPU_SETSIZE entries which ought to be
- // sufficient, but sanity check that indexing it by CPU number shouldn't
- // exceed its bounds.
- int num_cpus = absl::base_internal::NumCPUs();
- CHECK_CONDITION(num_cpus <= CPU_SETSIZE);
-
- // We could just always report that we're NUMA aware, but if a NUMA-aware
- // binary runs on a system that doesn't include multiple NUMA nodes then our
- // NUMA awareness will offer no benefit whilst incurring the cost of
- // redundant work & stats. As such we only report that we're NUMA aware if
- // there's actually NUMA to be aware of, which we track here.
- bool numa_aware = false;
-
- for (size_t node = 0;; node++) {
- // Detect NUMA nodes by opening their cpulist files from sysfs.
- const int fd = open_node_cpulist(node);
- if (fd == -1) {
- // We expect to encounter ENOENT once node surpasses the actual number of
- // nodes present in the system. Any other error is a problem.
- CHECK_CONDITION(errno == ENOENT);
- break;
- }
-
- // Record this node in partition_to_nodes.
- const size_t partition = NodeToPartition(node, num_partitions);
- partition_to_nodes[partition] |= 1 << node;
-
- // cpu_to_scaled_partition_ entries are default initialized to zero, so
- // skip redundantly parsing CPU lists for nodes that map to partition 0.
- if (partition == 0) {
- signal_safe_close(fd);
- continue;
- }
-
- // Parse the cpulist file to determine which CPUs are local to this node.
- const cpu_set_t node_cpus =
- ParseCpulist([&](char *const buf, const size_t count) {
- return signal_safe_read(fd, buf, count, /*bytes_read=*/nullptr);
- });
-
- // Assign local CPUs to the appropriate partition.
- for (size_t cpu = 0; cpu < CPU_SETSIZE; cpu++) {
- if (CPU_ISSET(cpu, &node_cpus)) {
- cpu_to_scaled_partition[cpu + kNumaCpuFudge] = partition * scale_by;
- }
- }
-
- // If we observed any CPUs for this node then we've now got CPUs assigned
- // to a non-zero partition; report that we're NUMA aware.
- if (CPU_COUNT(&node_cpus) != 0) {
- numa_aware = true;
- }
-
- signal_safe_close(fd);
- }
-
- return numa_aware;
-}
-
-} // namespace tcmalloc_internal
-} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+// Copyright 2021 The TCMalloc Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "tcmalloc/internal/numa.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#include <limits.h>
+#include <sched.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <unistd.h>
+
+#include <array>
+#include <cstring>
+
+#include "absl/base/attributes.h"
+#include "absl/base/internal/sysinfo.h"
+#include "absl/functional/function_ref.h"
+#include "absl/strings/numbers.h"
+#include "absl/strings/string_view.h"
+#include "tcmalloc/internal/config.h"
+#include "tcmalloc/internal/environment.h"
+#include "tcmalloc/internal/logging.h"
+#include "tcmalloc/internal/util.h"
+
+GOOGLE_MALLOC_SECTION_BEGIN
+namespace tcmalloc {
+namespace tcmalloc_internal {
+
+// Returns true iff NUMA awareness should be enabled by default (i.e. in the
+// absence of the TCMALLOC_NUMA_AWARE environment variable). This weak
+// implementation may be overridden by the one in want_numa_aware.cc.
+ABSL_ATTRIBUTE_WEAK bool default_want_numa_aware() { return false; }
+
+int OpenSysfsCpulist(size_t node) {
+ char path[PATH_MAX];
+ snprintf(path, sizeof(path), "/sys/devices/system/node/node%zu/cpulist",
+ node);
+ return signal_safe_open(path, O_RDONLY | O_CLOEXEC);
+}
+
+cpu_set_t ParseCpulist(absl::FunctionRef<ssize_t(char *, size_t)> read) {
+ cpu_set_t set;
+ CPU_ZERO(&set);
+
+ std::array<char, 16> buf;
+ size_t carry_over = 0;
+ int cpu_from = -1;
+
+ while (true) {
+ const ssize_t rc = read(buf.data() + carry_over, buf.size() - carry_over);
+ CHECK_CONDITION(rc >= 0);
+
+ const absl::string_view current(buf.data(), carry_over + rc);
+
+ // If we have no more data to parse & couldn't read any then we've reached
+ // the end of the input & are done.
+ if (current.empty() && rc == 0) {
+ break;
+ }
+
+ size_t consumed;
+ const size_t dash = current.find('-');
+ const size_t comma = current.find(',');
+ if (dash != absl::string_view::npos && dash < comma) {
+ CHECK_CONDITION(absl::SimpleAtoi(current.substr(0, dash), &cpu_from));
+ consumed = dash + 1;
+ } else if (comma != absl::string_view::npos || rc == 0) {
+ int cpu;
+ CHECK_CONDITION(absl::SimpleAtoi(current.substr(0, comma), &cpu));
+ if (comma == absl::string_view::npos) {
+ consumed = current.size();
+ } else {
+ consumed = comma + 1;
+ }
+ if (cpu_from != -1) {
+ for (int c = cpu_from; c <= cpu; c++) {
+ CPU_SET(c, &set);
+ }
+ cpu_from = -1;
+ } else {
+ CPU_SET(cpu, &set);
+ }
+ } else {
+ consumed = 0;
+ }
+
+ carry_over = current.size() - consumed;
+ memmove(buf.data(), buf.data() + consumed, carry_over);
+ }
+
+ return set;
+}
+
+bool InitNumaTopology(size_t cpu_to_scaled_partition[CPU_SETSIZE],
+ uint64_t *const partition_to_nodes,
+ NumaBindMode *const bind_mode,
+ const size_t num_partitions, const size_t scale_by,
+ absl::FunctionRef<int(size_t)> open_node_cpulist) {
+ // Node 0 will always map to partition 0; record it here in case the system
+ // doesn't support NUMA or the user opts out of our awareness of it - in
+ // either case we'll record nothing in the loop below.
+ partition_to_nodes[NodeToPartition(0, num_partitions)] |= 1 << 0;
+
+ // If we only compiled in support for one partition then we're trivially
+ // done; NUMA awareness is unavailable.
+ if (num_partitions == 1) return false;
+
+ // We rely on rseq to quickly obtain a CPU ID & lookup the appropriate
+ // partition in NumaTopology::GetCurrentPartition(). If rseq is unavailable,
+ // disable NUMA awareness.
+ if (!subtle::percpu::IsFast()) return false;
+
+ // Honor default_want_numa_aware() to allow compile time configuration of
+ // whether to enable NUMA awareness by default, and allow the user to
+ // override that either way by setting TCMALLOC_NUMA_AWARE in the
+ // environment.
+ //
+ // In cases where we don't enable NUMA awareness we simply return. Since the
+ // cpu_to_scaled_partition & partition_to_nodes arrays are zero initialized
+ // we're trivially done - CPUs all map to partition 0, which contains only
+ // CPU 0 added above.
+ const char *e =
+ tcmalloc::tcmalloc_internal::thread_safe_getenv("TCMALLOC_NUMA_AWARE");
+ if (e == nullptr) {
+ // Enable NUMA awareness iff default_want_numa_aware().
+ if (!default_want_numa_aware()) return false;
+ } else if (!strcmp(e, "no-binding")) {
+ // Enable NUMA awareness with no memory binding behavior.
+ *bind_mode = NumaBindMode::kNone;
+ } else if (!strcmp(e, "advisory-binding") || !strcmp(e, "1")) {
+ // Enable NUMA awareness with advisory memory binding behavior.
+ *bind_mode = NumaBindMode::kAdvisory;
+ } else if (!strcmp(e, "strict-binding")) {
+ // Enable NUMA awareness with strict memory binding behavior.
+ *bind_mode = NumaBindMode::kStrict;
+ } else if (!strcmp(e, "0")) {
+ // Disable NUMA awareness.
+ return false;
+ } else {
+ Crash(kCrash, __FILE__, __LINE__, "bad TCMALLOC_NUMA_AWARE env var", e);
+ }
+
+ // The cpu_to_scaled_partition array has a fixed size so that we can
+ // statically allocate it & avoid the need to check whether it has been
+ // allocated prior to lookups. It has CPU_SETSIZE entries which ought to be
+ // sufficient, but sanity check that indexing it by CPU number shouldn't
+ // exceed its bounds.
+ int num_cpus = absl::base_internal::NumCPUs();
+ CHECK_CONDITION(num_cpus <= CPU_SETSIZE);
+
+ // We could just always report that we're NUMA aware, but if a NUMA-aware
+ // binary runs on a system that doesn't include multiple NUMA nodes then our
+ // NUMA awareness will offer no benefit whilst incurring the cost of
+ // redundant work & stats. As such we only report that we're NUMA aware if
+ // there's actually NUMA to be aware of, which we track here.
+ bool numa_aware = false;
+
+ for (size_t node = 0;; node++) {
+ // Detect NUMA nodes by opening their cpulist files from sysfs.
+ const int fd = open_node_cpulist(node);
+ if (fd == -1) {
+ // We expect to encounter ENOENT once node surpasses the actual number of
+ // nodes present in the system. Any other error is a problem.
+ CHECK_CONDITION(errno == ENOENT);
+ break;
+ }
+
+ // Record this node in partition_to_nodes.
+ const size_t partition = NodeToPartition(node, num_partitions);
+ partition_to_nodes[partition] |= 1 << node;
+
+ // cpu_to_scaled_partition_ entries are default initialized to zero, so
+ // skip redundantly parsing CPU lists for nodes that map to partition 0.
+ if (partition == 0) {
+ signal_safe_close(fd);
+ continue;
+ }
+
+ // Parse the cpulist file to determine which CPUs are local to this node.
+ const cpu_set_t node_cpus =
+ ParseCpulist([&](char *const buf, const size_t count) {
+ return signal_safe_read(fd, buf, count, /*bytes_read=*/nullptr);
+ });
+
+ // Assign local CPUs to the appropriate partition.
+ for (size_t cpu = 0; cpu < CPU_SETSIZE; cpu++) {
+ if (CPU_ISSET(cpu, &node_cpus)) {
+ cpu_to_scaled_partition[cpu + kNumaCpuFudge] = partition * scale_by;
+ }
+ }
+
+ // If we observed any CPUs for this node then we've now got CPUs assigned
+ // to a non-zero partition; report that we're NUMA aware.
+ if (CPU_COUNT(&node_cpus) != 0) {
+ numa_aware = true;
+ }
+
+ signal_safe_close(fd);
+ }
+
+ return numa_aware;
+}
+
+} // namespace tcmalloc_internal
+} // namespace tcmalloc
+GOOGLE_MALLOC_SECTION_END
diff --git a/contrib/libs/tcmalloc/tcmalloc/internal/numa.h b/contrib/libs/tcmalloc/tcmalloc/internal/numa.h
index bf04c65c21..d6e5b34d5a 100644
--- a/contrib/libs/tcmalloc/tcmalloc/internal/numa.h
+++ b/contrib/libs/tcmalloc/tcmalloc/internal/numa.h
@@ -1,227 +1,227 @@
-// Copyright 2021 The TCMalloc Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef TCMALLOC_INTERNAL_NUMA_H_
-#define TCMALLOC_INTERNAL_NUMA_H_
-
-#include <sched.h>
-#include <stddef.h>
-#include <sys/types.h>
-
-#include "absl/functional/function_ref.h"
-#include "tcmalloc/internal/config.h"
-#include "tcmalloc/internal/percpu.h"
-
-GOOGLE_MALLOC_SECTION_BEGIN
-namespace tcmalloc {
-namespace tcmalloc_internal {
-
-// Indicates how TCMalloc should handle binding memory regions to nodes within
-// particular NUMA partitions.
-enum class NumaBindMode {
- // Don't bind memory at all. Note that this does not make NUMA awareness
- // pointless so long as the NUMA memory policy of threads performing
- // allocations favors the local node. It does mean that we won't be certain
- // that memory is local to any particular partition, it will just be likely.
- kNone,
- // Attempt to bind memory but don't treat failure as fatal. If binding fails
- // then a warning will be logged & we'll then be in much the same state as
- // kNone.
- kAdvisory,
- // Strictly bind memory to nodes within the partition we expect - any error
- // in doing so is fatal & the program will crash. This allows a program to
- // ensure that memory is definitely bound to the set of nodes we expect.
- kStrict,
-};
-
-// We use the result of RseqCpuId() in GetCurrentPartition() to avoid branching
-// in the fast path, but this means that the CPU number we look up in
-// cpu_to_scaled_partition_ might equal kCpuIdUninitialized or
-// kCpuIdUnsupported. We add this fudge factor to the value to compensate,
-// ensuring that our accesses to the cpu_to_scaled_partition_ array are always
-// in bounds.
-static constexpr size_t kNumaCpuFudge = -subtle::percpu::kCpuIdUnsupported;
-
-// Provides information about the topology of a NUMA system.
-//
-// In general we cannot know at compile time how many NUMA nodes the system
-// that we run upon will include, but we also cannot size our data structures
-// arbitrarily at runtime in the name of efficiency. In order to resolve the
-// conflict between these two constraints we define the concept of a NUMA
-// 'partition' as being an arbitrary set of NUMA nodes, disjoint from all other
-// partitions. At compile time we select a fixed number of partitions to
-// support, and at runtime we map each NUMA node in the system to a partition.
-// If the number of supported partitions is greater than or equal to the number
-// of NUMA nodes in the system then partition & node are effectively identical.
-// If however the system has more nodes than we do partitions then nodes
-// assigned to the same partition will share size classes & thus memory. This
-// may incur a performance hit, but allows us to at least run on any system.
-template <size_t NumPartitions, size_t ScaleBy = 1>
-class NumaTopology {
- public:
- // Trivially zero initialize data members.
- constexpr NumaTopology() = default;
-
- // Initialize topology information. This must be called only once, before any
- // of the functions below.
- void Init();
-
- // Like Init(), but allows a test to specify a different `open_node_cpulist`
- // function in order to provide NUMA topology information that doesn't
- // reflect the system we're running upon.
- void InitForTest(absl::FunctionRef<int(size_t)> open_node_cpulist);
-
- // Returns true if NUMA awareness is available & enabled, otherwise false.
- bool numa_aware() const {
- // Explicitly checking NumPartitions here provides a compile time constant
- // false in cases where NumPartitions==1, allowing NUMA awareness to be
- // optimized away.
- return (NumPartitions > 1) && numa_aware_;
- }
-
- // Returns the number of NUMA partitions deemed 'active' - i.e. the number of
- // partitions that other parts of TCMalloc need to concern themselves with.
- // Checking this rather than using kNumaPartitions allows users to avoid work
- // on non-zero partitions when NUMA awareness is disabled.
- size_t active_partitions() const { return numa_aware() ? NumPartitions : 1; }
-
- // Return a value indicating how we should behave with regards to binding
- // memory regions to NUMA nodes.
- NumaBindMode bind_mode() const { return bind_mode_; }
-
- // Return the NUMA partition number to which the CPU we're currently
- // executing upon belongs. Note that whilst the CPU->partition mapping is
- // fixed, the return value of this function may change at arbitrary times as
- // this thread migrates between CPUs.
- size_t GetCurrentPartition() const;
-
- // Like GetCurrentPartition(), but returns a partition number multiplied by
- // ScaleBy.
- size_t GetCurrentScaledPartition() const;
-
- // Return the NUMA partition number to which `cpu` belongs.
- //
- // It is valid for cpu to equal subtle::percpu::kCpuIdUninitialized or
- // subtle::percpu::kCpuIdUnsupported. In either case partition 0 will be
- // returned.
- size_t GetCpuPartition(int cpu) const;
-
- // Like GetCpuPartition(), but returns a partition number multiplied by
- // ScaleBy.
- size_t GetCpuScaledPartition(int cpu) const;
-
- // Return a bitmap in which set bits identify the nodes that belong to the
- // specified NUMA `partition`.
- uint64_t GetPartitionNodes(int partition) const;
-
- private:
- // Maps from CPU number (plus kNumaCpuFudge) to NUMA partition.
- size_t cpu_to_scaled_partition_[CPU_SETSIZE + kNumaCpuFudge] = {0};
- // Maps from NUMA partition to a bitmap of NUMA nodes within the partition.
- uint64_t partition_to_nodes_[NumPartitions] = {0};
- // Indicates whether NUMA awareness is available & enabled.
- bool numa_aware_ = false;
- // Desired memory binding behavior.
- NumaBindMode bind_mode_ = NumaBindMode::kAdvisory;
-};
-
-// Opens a /sys/devices/system/node/nodeX/cpulist file for read only access &
-// returns the file descriptor.
-int OpenSysfsCpulist(size_t node);
-
-// Parse a CPU list in the format used by
-// /sys/devices/system/node/nodeX/cpulist files - that is, individual CPU
-// numbers or ranges in the format <start>-<end> inclusive all joined by comma
-// characters.
-//
-// The read function is expected to operate much like the read syscall. It
-// should read up to `count` bytes into `buf` and return the number of bytes
-// actually read. If an error occurs during reading it should return -1 with
-// errno set to an appropriate error code.
-cpu_set_t ParseCpulist(
- absl::FunctionRef<ssize_t(char *buf, size_t count)> read);
-
-// Initialize the data members of a NumaTopology<> instance.
-//
-// This function must only be called once per NumaTopology<> instance, and
-// relies upon the data members of that instance being default initialized.
-//
-// The `open_node_cpulist` function is typically OpenSysfsCpulist but tests may
-// use a different implementation.
-//
-// Returns true if we're actually NUMA aware; i.e. if we have CPUs mapped to
-// multiple partitions.
-bool InitNumaTopology(size_t cpu_to_scaled_partition[CPU_SETSIZE],
- uint64_t *partition_to_nodes, NumaBindMode *bind_mode,
- size_t num_partitions, size_t scale_by,
- absl::FunctionRef<int(size_t)> open_node_cpulist);
-
-// Returns the NUMA partition to which `node` belongs.
-inline size_t NodeToPartition(const size_t node, const size_t num_partitions) {
- return node % num_partitions;
-}
-
-template <size_t NumPartitions, size_t ScaleBy>
-inline void NumaTopology<NumPartitions, ScaleBy>::Init() {
- numa_aware_ =
- InitNumaTopology(cpu_to_scaled_partition_, partition_to_nodes_,
- &bind_mode_, NumPartitions, ScaleBy, OpenSysfsCpulist);
-}
-
-template <size_t NumPartitions, size_t ScaleBy>
-inline void NumaTopology<NumPartitions, ScaleBy>::InitForTest(
- absl::FunctionRef<int(size_t)> open_node_cpulist) {
- numa_aware_ =
- InitNumaTopology(cpu_to_scaled_partition_, partition_to_nodes_,
- &bind_mode_, NumPartitions, ScaleBy, open_node_cpulist);
-}
-
-template <size_t NumPartitions, size_t ScaleBy>
-inline size_t NumaTopology<NumPartitions, ScaleBy>::GetCurrentPartition()
- const {
- if constexpr (NumPartitions == 1) return 0;
- return GetCpuPartition(subtle::percpu::RseqCpuId());
-}
-
-template <size_t NumPartitions, size_t ScaleBy>
-inline size_t NumaTopology<NumPartitions, ScaleBy>::GetCurrentScaledPartition()
- const {
- if constexpr (NumPartitions == 1) return 0;
- return GetCpuScaledPartition(subtle::percpu::RseqCpuId());
-}
-
-template <size_t NumPartitions, size_t ScaleBy>
-inline size_t NumaTopology<NumPartitions, ScaleBy>::GetCpuPartition(
- const int cpu) const {
- return GetCpuScaledPartition(cpu) / ScaleBy;
-}
-
-template <size_t NumPartitions, size_t ScaleBy>
-inline size_t NumaTopology<NumPartitions, ScaleBy>::GetCpuScaledPartition(
- const int cpu) const {
- if constexpr (NumPartitions == 1) return 0;
- return cpu_to_scaled_partition_[cpu + kNumaCpuFudge];
-}
-
-template <size_t NumPartitions, size_t ScaleBy>
-inline uint64_t NumaTopology<NumPartitions, ScaleBy>::GetPartitionNodes(
- const int partition) const {
- return partition_to_nodes_[partition];
-}
-
-} // namespace tcmalloc_internal
-} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
-
-#endif // TCMALLOC_INTERNAL_NUMA_H_
+// Copyright 2021 The TCMalloc Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef TCMALLOC_INTERNAL_NUMA_H_
+#define TCMALLOC_INTERNAL_NUMA_H_
+
+#include <sched.h>
+#include <stddef.h>
+#include <sys/types.h>
+
+#include "absl/functional/function_ref.h"
+#include "tcmalloc/internal/config.h"
+#include "tcmalloc/internal/percpu.h"
+
+GOOGLE_MALLOC_SECTION_BEGIN
+namespace tcmalloc {
+namespace tcmalloc_internal {
+
+// Indicates how TCMalloc should handle binding memory regions to nodes within
+// particular NUMA partitions.
+enum class NumaBindMode {
+ // Don't bind memory at all. Note that this does not make NUMA awareness
+ // pointless so long as the NUMA memory policy of threads performing
+ // allocations favors the local node. It does mean that we won't be certain
+ // that memory is local to any particular partition, it will just be likely.
+ kNone,
+ // Attempt to bind memory but don't treat failure as fatal. If binding fails
+ // then a warning will be logged & we'll then be in much the same state as
+ // kNone.
+ kAdvisory,
+ // Strictly bind memory to nodes within the partition we expect - any error
+ // in doing so is fatal & the program will crash. This allows a program to
+ // ensure that memory is definitely bound to the set of nodes we expect.
+ kStrict,
+};
+
+// We use the result of RseqCpuId() in GetCurrentPartition() to avoid branching
+// in the fast path, but this means that the CPU number we look up in
+// cpu_to_scaled_partition_ might equal kCpuIdUninitialized or
+// kCpuIdUnsupported. We add this fudge factor to the value to compensate,
+// ensuring that our accesses to the cpu_to_scaled_partition_ array are always
+// in bounds.
+static constexpr size_t kNumaCpuFudge = -subtle::percpu::kCpuIdUnsupported;
+
+// Provides information about the topology of a NUMA system.
+//
+// In general we cannot know at compile time how many NUMA nodes the system
+// that we run upon will include, but we also cannot size our data structures
+// arbitrarily at runtime in the name of efficiency. In order to resolve the
+// conflict between these two constraints we define the concept of a NUMA
+// 'partition' as being an arbitrary set of NUMA nodes, disjoint from all other
+// partitions. At compile time we select a fixed number of partitions to
+// support, and at runtime we map each NUMA node in the system to a partition.
+// If the number of supported partitions is greater than or equal to the number
+// of NUMA nodes in the system then partition & node are effectively identical.
+// If however the system has more nodes than we do partitions then nodes
+// assigned to the same partition will share size classes & thus memory. This
+// may incur a performance hit, but allows us to at least run on any system.
+template <size_t NumPartitions, size_t ScaleBy = 1>
+class NumaTopology {
+ public:
+ // Trivially zero initialize data members.
+ constexpr NumaTopology() = default;
+
+ // Initialize topology information. This must be called only once, before any
+ // of the functions below.
+ void Init();
+
+ // Like Init(), but allows a test to specify a different `open_node_cpulist`
+ // function in order to provide NUMA topology information that doesn't
+ // reflect the system we're running upon.
+ void InitForTest(absl::FunctionRef<int(size_t)> open_node_cpulist);
+
+ // Returns true if NUMA awareness is available & enabled, otherwise false.
+ bool numa_aware() const {
+ // Explicitly checking NumPartitions here provides a compile time constant
+ // false in cases where NumPartitions==1, allowing NUMA awareness to be
+ // optimized away.
+ return (NumPartitions > 1) && numa_aware_;
+ }
+
+ // Returns the number of NUMA partitions deemed 'active' - i.e. the number of
+ // partitions that other parts of TCMalloc need to concern themselves with.
+ // Checking this rather than using kNumaPartitions allows users to avoid work
+ // on non-zero partitions when NUMA awareness is disabled.
+ size_t active_partitions() const { return numa_aware() ? NumPartitions : 1; }
+
+ // Return a value indicating how we should behave with regards to binding
+ // memory regions to NUMA nodes.
+ NumaBindMode bind_mode() const { return bind_mode_; }
+
+ // Return the NUMA partition number to which the CPU we're currently
+ // executing upon belongs. Note that whilst the CPU->partition mapping is
+ // fixed, the return value of this function may change at arbitrary times as
+ // this thread migrates between CPUs.
+ size_t GetCurrentPartition() const;
+
+ // Like GetCurrentPartition(), but returns a partition number multiplied by
+ // ScaleBy.
+ size_t GetCurrentScaledPartition() const;
+
+ // Return the NUMA partition number to which `cpu` belongs.
+ //
+ // It is valid for cpu to equal subtle::percpu::kCpuIdUninitialized or
+ // subtle::percpu::kCpuIdUnsupported. In either case partition 0 will be
+ // returned.
+ size_t GetCpuPartition(int cpu) const;
+
+ // Like GetCpuPartition(), but returns a partition number multiplied by
+ // ScaleBy.
+ size_t GetCpuScaledPartition(int cpu) const;
+
+ // Return a bitmap in which set bits identify the nodes that belong to the
+ // specified NUMA `partition`.
+ uint64_t GetPartitionNodes(int partition) const;
+
+ private:
+ // Maps from CPU number (plus kNumaCpuFudge) to NUMA partition.
+ size_t cpu_to_scaled_partition_[CPU_SETSIZE + kNumaCpuFudge] = {0};
+ // Maps from NUMA partition to a bitmap of NUMA nodes within the partition.
+ uint64_t partition_to_nodes_[NumPartitions] = {0};
+ // Indicates whether NUMA awareness is available & enabled.
+ bool numa_aware_ = false;
+ // Desired memory binding behavior.
+ NumaBindMode bind_mode_ = NumaBindMode::kAdvisory;
+};
+
+// Opens a /sys/devices/system/node/nodeX/cpulist file for read only access &
+// returns the file descriptor.
+int OpenSysfsCpulist(size_t node);
+
+// Parse a CPU list in the format used by
+// /sys/devices/system/node/nodeX/cpulist files - that is, individual CPU
+// numbers or ranges in the format <start>-<end> inclusive all joined by comma
+// characters.
+//
+// The read function is expected to operate much like the read syscall. It
+// should read up to `count` bytes into `buf` and return the number of bytes
+// actually read. If an error occurs during reading it should return -1 with
+// errno set to an appropriate error code.
+cpu_set_t ParseCpulist(
+ absl::FunctionRef<ssize_t(char *buf, size_t count)> read);
+
+// Initialize the data members of a NumaTopology<> instance.
+//
+// This function must only be called once per NumaTopology<> instance, and
+// relies upon the data members of that instance being default initialized.
+//
+// The `open_node_cpulist` function is typically OpenSysfsCpulist but tests may
+// use a different implementation.
+//
+// Returns true if we're actually NUMA aware; i.e. if we have CPUs mapped to
+// multiple partitions.
+bool InitNumaTopology(size_t cpu_to_scaled_partition[CPU_SETSIZE],
+ uint64_t *partition_to_nodes, NumaBindMode *bind_mode,
+ size_t num_partitions, size_t scale_by,
+ absl::FunctionRef<int(size_t)> open_node_cpulist);
+
+// Returns the NUMA partition to which `node` belongs.
+inline size_t NodeToPartition(const size_t node, const size_t num_partitions) {
+ return node % num_partitions;
+}
+
+template <size_t NumPartitions, size_t ScaleBy>
+inline void NumaTopology<NumPartitions, ScaleBy>::Init() {
+ numa_aware_ =
+ InitNumaTopology(cpu_to_scaled_partition_, partition_to_nodes_,
+ &bind_mode_, NumPartitions, ScaleBy, OpenSysfsCpulist);
+}
+
+template <size_t NumPartitions, size_t ScaleBy>
+inline void NumaTopology<NumPartitions, ScaleBy>::InitForTest(
+ absl::FunctionRef<int(size_t)> open_node_cpulist) {
+ numa_aware_ =
+ InitNumaTopology(cpu_to_scaled_partition_, partition_to_nodes_,
+ &bind_mode_, NumPartitions, ScaleBy, open_node_cpulist);
+}
+
+template <size_t NumPartitions, size_t ScaleBy>
+inline size_t NumaTopology<NumPartitions, ScaleBy>::GetCurrentPartition()
+ const {
+ if constexpr (NumPartitions == 1) return 0;
+ return GetCpuPartition(subtle::percpu::RseqCpuId());
+}
+
+template <size_t NumPartitions, size_t ScaleBy>
+inline size_t NumaTopology<NumPartitions, ScaleBy>::GetCurrentScaledPartition()
+ const {
+ if constexpr (NumPartitions == 1) return 0;
+ return GetCpuScaledPartition(subtle::percpu::RseqCpuId());
+}
+
+template <size_t NumPartitions, size_t ScaleBy>
+inline size_t NumaTopology<NumPartitions, ScaleBy>::GetCpuPartition(
+ const int cpu) const {
+ return GetCpuScaledPartition(cpu) / ScaleBy;
+}
+
+template <size_t NumPartitions, size_t ScaleBy>
+inline size_t NumaTopology<NumPartitions, ScaleBy>::GetCpuScaledPartition(
+ const int cpu) const {
+ if constexpr (NumPartitions == 1) return 0;
+ return cpu_to_scaled_partition_[cpu + kNumaCpuFudge];
+}
+
+template <size_t NumPartitions, size_t ScaleBy>
+inline uint64_t NumaTopology<NumPartitions, ScaleBy>::GetPartitionNodes(
+ const int partition) const {
+ return partition_to_nodes_[partition];
+}
+
+} // namespace tcmalloc_internal
+} // namespace tcmalloc
+GOOGLE_MALLOC_SECTION_END
+
+#endif // TCMALLOC_INTERNAL_NUMA_H_
diff --git a/contrib/libs/tcmalloc/tcmalloc/internal/numa_test.cc b/contrib/libs/tcmalloc/tcmalloc/internal/numa_test.cc
index bbd86a3f7d..29dbeffd71 100644
--- a/contrib/libs/tcmalloc/tcmalloc/internal/numa_test.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/internal/numa_test.cc
@@ -1,284 +1,284 @@
-// Copyright 2021 The TCMalloc Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "tcmalloc/internal/numa.h"
-
-#include <errno.h>
-#include <linux/memfd.h>
-#include <sched.h>
-#include <stddef.h>
-#include <stdio.h>
-#include <string.h>
-#include <syscall.h>
-#include <unistd.h>
-
-#include <algorithm>
-#include <new>
-#include <string>
-#include <utility>
-#include <vector>
-
-#include "gtest/gtest.h"
-#include "absl/random/random.h"
-#include "absl/strings/str_cat.h"
-#include "absl/strings/str_join.h"
-#include "absl/strings/string_view.h"
-#include "absl/types/span.h"
-#include "tcmalloc/internal/logging.h"
-
-namespace tcmalloc {
-namespace tcmalloc_internal {
-namespace {
-
-int memfd_create(const char *name, unsigned int flags) {
-#ifdef __NR_memfd_create
- return syscall(__NR_memfd_create, name, flags);
-#else
- errno = ENOSYS;
- return -1;
-#endif
-}
-
-// A synthetic cpulist that can be read from a file descriptor.
-class SyntheticCpuList {
- public:
- explicit SyntheticCpuList(const absl::string_view content) {
- fd_ = memfd_create("cpulist", MFD_CLOEXEC);
- CHECK_CONDITION(fd_ != -1);
-
- CHECK_CONDITION(write(fd_, content.data(), content.size()) ==
- content.size());
- CHECK_CONDITION(write(fd_, "\n", 1) == 1);
- CHECK_CONDITION(lseek(fd_, 0, SEEK_SET) == 0);
- }
-
- ~SyntheticCpuList() { close(fd_); }
-
- // Disallow copies, which would make require reference counting to know when
- // we should close fd_.
- SyntheticCpuList(const SyntheticCpuList &) = delete;
- SyntheticCpuList &operator=(const SyntheticCpuList &) = delete;
-
- // Moves are fine - only one instance at a time holds the fd.
- SyntheticCpuList(SyntheticCpuList &&other)
- : fd_(std::exchange(other.fd_, -1)) {}
- SyntheticCpuList &operator=(SyntheticCpuList &&other) {
- new (this) SyntheticCpuList(std::move(other));
- return *this;
- }
-
- int fd() const { return fd_; }
-
- private:
- // The underlying memfd.
- int fd_;
-};
-
-class NumaTopologyTest : public ::testing::Test {
- protected:
- void SetUp() override {
- // We use memfd to create synthetic cpulist files, and can't run without
- // it. Skip all affected tests if memfd is not supported (i.e. Linux <
- // 3.17).
- const int fd = memfd_create("test", MFD_CLOEXEC);
- if (fd == -1 && errno == ENOSYS) {
- GTEST_SKIP() << "Test requires memfd support";
- }
- close(fd);
-
- // If rseq is unavailable the NumaTopology never enables NUMA awareness.
- if (!subtle::percpu::IsFast()) {
- GTEST_SKIP() << "Test requires rseq support";
- }
- }
-};
-
-template <size_t NumPartitions>
-NumaTopology<NumPartitions> CreateNumaTopology(
- const absl::Span<const SyntheticCpuList> cpu_lists) {
- NumaTopology<NumPartitions> nt;
- nt.InitForTest([&](const size_t node) {
- if (node >= cpu_lists.size()) {
- errno = ENOENT;
- return -1;
- }
- return cpu_lists[node].fd();
- });
- return nt;
-}
-
-// Ensure that if we set NumPartitions=1 then NUMA awareness is disabled even
-// in the presence of a system with multiple NUMA nodes.
-TEST_F(NumaTopologyTest, NoCompileTimeNuma) {
- std::vector<SyntheticCpuList> nodes;
- nodes.emplace_back("0");
- nodes.emplace_back("1");
-
- const auto nt = CreateNumaTopology<1>(nodes);
-
- EXPECT_EQ(nt.numa_aware(), false);
- EXPECT_EQ(nt.active_partitions(), 1);
-}
-
-// Ensure that if we run on a system with no NUMA support at all (i.e. no
-// /sys/devices/system/node/nodeX/cpulist files) we correctly disable NUMA
-// awareness.
-TEST_F(NumaTopologyTest, NoRunTimeNuma) {
- const auto nt = CreateNumaTopology<2>({});
-
- EXPECT_EQ(nt.numa_aware(), false);
- EXPECT_EQ(nt.active_partitions(), 1);
-}
-
-// Ensure that if we run on a system with only 1 node then we disable NUMA
-// awareness.
-TEST_F(NumaTopologyTest, SingleNode) {
- std::vector<SyntheticCpuList> nodes;
- nodes.emplace_back("0-27");
-
- const auto nt = CreateNumaTopology<4>(nodes);
-
- EXPECT_EQ(nt.numa_aware(), false);
- EXPECT_EQ(nt.active_partitions(), 1);
-}
-
-// Basic sanity test modelling a simple 2 node system.
-TEST_F(NumaTopologyTest, TwoNode) {
- std::vector<SyntheticCpuList> nodes;
- nodes.emplace_back("0-5");
- nodes.emplace_back("6-11");
-
- const auto nt = CreateNumaTopology<2>(nodes);
-
- EXPECT_EQ(nt.numa_aware(), true);
- EXPECT_EQ(nt.active_partitions(), 2);
-
- for (int cpu = 0; cpu <= 5; cpu++) {
- EXPECT_EQ(nt.GetCpuPartition(cpu), 0);
- }
- for (int cpu = 6; cpu <= 11; cpu++) {
- EXPECT_EQ(nt.GetCpuPartition(cpu), 1);
- }
-}
-
-// Test that cpulists too long to fit into the 16 byte buffer used by
-// InitNumaTopology() parse successfully.
-TEST_F(NumaTopologyTest, LongCpuLists) {
- std::vector<SyntheticCpuList> nodes;
-
- // Content from here onwards lies |
- // beyond the 16 byte buffer. |
- // v
- nodes.emplace_back("0-1,2-3,4-5,6-7,8"); // Right after a comma
- nodes.emplace_back("9,10,11,12,13,14,15-19"); // Right before a comma
- nodes.emplace_back("20-21,22-23,24-25,26-29"); // Within range end
- nodes.emplace_back("30-32,33,34,35,36-38,39"); // Within range start
- nodes.emplace_back("40-43,44,45-49");
-
- const auto nt = CreateNumaTopology<3>(nodes);
-
- EXPECT_EQ(nt.numa_aware(), true);
- EXPECT_EQ(nt.active_partitions(), 3);
-
- for (int cpu = 0; cpu <= 8; cpu++) {
- EXPECT_EQ(nt.GetCpuPartition(cpu), 0);
- }
- for (int cpu = 9; cpu <= 19; cpu++) {
- EXPECT_EQ(nt.GetCpuPartition(cpu), 1);
- }
- for (int cpu = 20; cpu <= 29; cpu++) {
- EXPECT_EQ(nt.GetCpuPartition(cpu), 2);
- }
- for (int cpu = 30; cpu <= 39; cpu++) {
- EXPECT_EQ(nt.GetCpuPartition(cpu), 0);
- }
- for (int cpu = 40; cpu <= 49; cpu++) {
- EXPECT_EQ(nt.GetCpuPartition(cpu), 1);
- }
-}
-
-// Ensure we can initialize using the host system's real NUMA topology
-// information.
-TEST_F(NumaTopologyTest, Host) {
- NumaTopology<4> nt;
- nt.Init();
-
- // We don't actually know anything about the host, so there's not much more
- // we can do beyond checking that we didn't crash.
-}
-
-// Ensure that we can parse randomized cpulists correctly.
-TEST(ParseCpulistTest, Random) {
- absl::BitGen gen;
-
- static constexpr int kIterations = 100;
- for (int i = 0; i < kIterations; i++) {
- cpu_set_t reference;
- CPU_ZERO(&reference);
-
- // Set a random number of CPUs within the reference set.
- const double density = absl::Uniform(gen, 0.0, 1.0);
- for (int cpu = 0; cpu < CPU_SETSIZE; cpu++) {
- if (absl::Bernoulli(gen, density)) {
- CPU_SET(cpu, &reference);
- }
- }
-
- // Serialize the reference set into a cpulist-style string.
- std::vector<std::string> components;
- for (int cpu = 0; cpu < CPU_SETSIZE; cpu++) {
- if (!CPU_ISSET(cpu, &reference)) continue;
-
- const int start = cpu;
- int next = cpu + 1;
- while (next < CPU_SETSIZE && CPU_ISSET(next, &reference)) {
- cpu = next;
- next = cpu + 1;
- }
-
- if (cpu == start) {
- components.push_back(absl::StrCat(cpu));
- } else {
- components.push_back(absl::StrCat(start, "-", cpu));
- }
- }
- const std::string serialized = absl::StrJoin(components, ",");
-
- // Now parse that string using our ParseCpulist function, randomizing the
- // amount of data we provide to it from each read.
- absl::string_view remaining(serialized);
- const cpu_set_t parsed =
- ParseCpulist([&](char *const buf, const size_t count) -> ssize_t {
- // Calculate how much data we have left to provide.
- const size_t max = std::min(count, remaining.size());
-
- // If none, we have no choice but to provide nothing.
- if (max == 0) return 0;
-
- // If we do have data, return a randomly sized subset of it to stress
- // the logic around reading partial values.
- const size_t copy = absl::Uniform(gen, static_cast<size_t>(1), max);
- memcpy(buf, remaining.data(), copy);
- remaining.remove_prefix(copy);
- return copy;
- });
-
- // We ought to have parsed the same set of CPUs that we serialized.
- EXPECT_TRUE(CPU_EQUAL(&parsed, &reference));
- }
-}
-
-} // namespace
-} // namespace tcmalloc_internal
-} // namespace tcmalloc
+// Copyright 2021 The TCMalloc Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "tcmalloc/internal/numa.h"
+
+#include <errno.h>
+#include <linux/memfd.h>
+#include <sched.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <string.h>
+#include <syscall.h>
+#include <unistd.h>
+
+#include <algorithm>
+#include <new>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "gtest/gtest.h"
+#include "absl/random/random.h"
+#include "absl/strings/str_cat.h"
+#include "absl/strings/str_join.h"
+#include "absl/strings/string_view.h"
+#include "absl/types/span.h"
+#include "tcmalloc/internal/logging.h"
+
+namespace tcmalloc {
+namespace tcmalloc_internal {
+namespace {
+
+int memfd_create(const char *name, unsigned int flags) {
+#ifdef __NR_memfd_create
+ return syscall(__NR_memfd_create, name, flags);
+#else
+ errno = ENOSYS;
+ return -1;
+#endif
+}
+
+// A synthetic cpulist that can be read from a file descriptor.
+class SyntheticCpuList {
+ public:
+ explicit SyntheticCpuList(const absl::string_view content) {
+ fd_ = memfd_create("cpulist", MFD_CLOEXEC);
+ CHECK_CONDITION(fd_ != -1);
+
+ CHECK_CONDITION(write(fd_, content.data(), content.size()) ==
+ content.size());
+ CHECK_CONDITION(write(fd_, "\n", 1) == 1);
+ CHECK_CONDITION(lseek(fd_, 0, SEEK_SET) == 0);
+ }
+
+ ~SyntheticCpuList() { close(fd_); }
+
+ // Disallow copies, which would make require reference counting to know when
+ // we should close fd_.
+ SyntheticCpuList(const SyntheticCpuList &) = delete;
+ SyntheticCpuList &operator=(const SyntheticCpuList &) = delete;
+
+ // Moves are fine - only one instance at a time holds the fd.
+ SyntheticCpuList(SyntheticCpuList &&other)
+ : fd_(std::exchange(other.fd_, -1)) {}
+ SyntheticCpuList &operator=(SyntheticCpuList &&other) {
+ new (this) SyntheticCpuList(std::move(other));
+ return *this;
+ }
+
+ int fd() const { return fd_; }
+
+ private:
+ // The underlying memfd.
+ int fd_;
+};
+
+class NumaTopologyTest : public ::testing::Test {
+ protected:
+ void SetUp() override {
+ // We use memfd to create synthetic cpulist files, and can't run without
+ // it. Skip all affected tests if memfd is not supported (i.e. Linux <
+ // 3.17).
+ const int fd = memfd_create("test", MFD_CLOEXEC);
+ if (fd == -1 && errno == ENOSYS) {
+ GTEST_SKIP() << "Test requires memfd support";
+ }
+ close(fd);
+
+ // If rseq is unavailable the NumaTopology never enables NUMA awareness.
+ if (!subtle::percpu::IsFast()) {
+ GTEST_SKIP() << "Test requires rseq support";
+ }
+ }
+};
+
+template <size_t NumPartitions>
+NumaTopology<NumPartitions> CreateNumaTopology(
+ const absl::Span<const SyntheticCpuList> cpu_lists) {
+ NumaTopology<NumPartitions> nt;
+ nt.InitForTest([&](const size_t node) {
+ if (node >= cpu_lists.size()) {
+ errno = ENOENT;
+ return -1;
+ }
+ return cpu_lists[node].fd();
+ });
+ return nt;
+}
+
+// Ensure that if we set NumPartitions=1 then NUMA awareness is disabled even
+// in the presence of a system with multiple NUMA nodes.
+TEST_F(NumaTopologyTest, NoCompileTimeNuma) {
+ std::vector<SyntheticCpuList> nodes;
+ nodes.emplace_back("0");
+ nodes.emplace_back("1");
+
+ const auto nt = CreateNumaTopology<1>(nodes);
+
+ EXPECT_EQ(nt.numa_aware(), false);
+ EXPECT_EQ(nt.active_partitions(), 1);
+}
+
+// Ensure that if we run on a system with no NUMA support at all (i.e. no
+// /sys/devices/system/node/nodeX/cpulist files) we correctly disable NUMA
+// awareness.
+TEST_F(NumaTopologyTest, NoRunTimeNuma) {
+ const auto nt = CreateNumaTopology<2>({});
+
+ EXPECT_EQ(nt.numa_aware(), false);
+ EXPECT_EQ(nt.active_partitions(), 1);
+}
+
+// Ensure that if we run on a system with only 1 node then we disable NUMA
+// awareness.
+TEST_F(NumaTopologyTest, SingleNode) {
+ std::vector<SyntheticCpuList> nodes;
+ nodes.emplace_back("0-27");
+
+ const auto nt = CreateNumaTopology<4>(nodes);
+
+ EXPECT_EQ(nt.numa_aware(), false);
+ EXPECT_EQ(nt.active_partitions(), 1);
+}
+
+// Basic sanity test modelling a simple 2 node system.
+TEST_F(NumaTopologyTest, TwoNode) {
+ std::vector<SyntheticCpuList> nodes;
+ nodes.emplace_back("0-5");
+ nodes.emplace_back("6-11");
+
+ const auto nt = CreateNumaTopology<2>(nodes);
+
+ EXPECT_EQ(nt.numa_aware(), true);
+ EXPECT_EQ(nt.active_partitions(), 2);
+
+ for (int cpu = 0; cpu <= 5; cpu++) {
+ EXPECT_EQ(nt.GetCpuPartition(cpu), 0);
+ }
+ for (int cpu = 6; cpu <= 11; cpu++) {
+ EXPECT_EQ(nt.GetCpuPartition(cpu), 1);
+ }
+}
+
+// Test that cpulists too long to fit into the 16 byte buffer used by
+// InitNumaTopology() parse successfully.
+TEST_F(NumaTopologyTest, LongCpuLists) {
+ std::vector<SyntheticCpuList> nodes;
+
+ // Content from here onwards lies |
+ // beyond the 16 byte buffer. |
+ // v
+ nodes.emplace_back("0-1,2-3,4-5,6-7,8"); // Right after a comma
+ nodes.emplace_back("9,10,11,12,13,14,15-19"); // Right before a comma
+ nodes.emplace_back("20-21,22-23,24-25,26-29"); // Within range end
+ nodes.emplace_back("30-32,33,34,35,36-38,39"); // Within range start
+ nodes.emplace_back("40-43,44,45-49");
+
+ const auto nt = CreateNumaTopology<3>(nodes);
+
+ EXPECT_EQ(nt.numa_aware(), true);
+ EXPECT_EQ(nt.active_partitions(), 3);
+
+ for (int cpu = 0; cpu <= 8; cpu++) {
+ EXPECT_EQ(nt.GetCpuPartition(cpu), 0);
+ }
+ for (int cpu = 9; cpu <= 19; cpu++) {
+ EXPECT_EQ(nt.GetCpuPartition(cpu), 1);
+ }
+ for (int cpu = 20; cpu <= 29; cpu++) {
+ EXPECT_EQ(nt.GetCpuPartition(cpu), 2);
+ }
+ for (int cpu = 30; cpu <= 39; cpu++) {
+ EXPECT_EQ(nt.GetCpuPartition(cpu), 0);
+ }
+ for (int cpu = 40; cpu <= 49; cpu++) {
+ EXPECT_EQ(nt.GetCpuPartition(cpu), 1);
+ }
+}
+
+// Ensure we can initialize using the host system's real NUMA topology
+// information.
+TEST_F(NumaTopologyTest, Host) {
+ NumaTopology<4> nt;
+ nt.Init();
+
+ // We don't actually know anything about the host, so there's not much more
+ // we can do beyond checking that we didn't crash.
+}
+
+// Ensure that we can parse randomized cpulists correctly.
+TEST(ParseCpulistTest, Random) {
+ absl::BitGen gen;
+
+ static constexpr int kIterations = 100;
+ for (int i = 0; i < kIterations; i++) {
+ cpu_set_t reference;
+ CPU_ZERO(&reference);
+
+ // Set a random number of CPUs within the reference set.
+ const double density = absl::Uniform(gen, 0.0, 1.0);
+ for (int cpu = 0; cpu < CPU_SETSIZE; cpu++) {
+ if (absl::Bernoulli(gen, density)) {
+ CPU_SET(cpu, &reference);
+ }
+ }
+
+ // Serialize the reference set into a cpulist-style string.
+ std::vector<std::string> components;
+ for (int cpu = 0; cpu < CPU_SETSIZE; cpu++) {
+ if (!CPU_ISSET(cpu, &reference)) continue;
+
+ const int start = cpu;
+ int next = cpu + 1;
+ while (next < CPU_SETSIZE && CPU_ISSET(next, &reference)) {
+ cpu = next;
+ next = cpu + 1;
+ }
+
+ if (cpu == start) {
+ components.push_back(absl::StrCat(cpu));
+ } else {
+ components.push_back(absl::StrCat(start, "-", cpu));
+ }
+ }
+ const std::string serialized = absl::StrJoin(components, ",");
+
+ // Now parse that string using our ParseCpulist function, randomizing the
+ // amount of data we provide to it from each read.
+ absl::string_view remaining(serialized);
+ const cpu_set_t parsed =
+ ParseCpulist([&](char *const buf, const size_t count) -> ssize_t {
+ // Calculate how much data we have left to provide.
+ const size_t max = std::min(count, remaining.size());
+
+ // If none, we have no choice but to provide nothing.
+ if (max == 0) return 0;
+
+ // If we do have data, return a randomly sized subset of it to stress
+ // the logic around reading partial values.
+ const size_t copy = absl::Uniform(gen, static_cast<size_t>(1), max);
+ memcpy(buf, remaining.data(), copy);
+ remaining.remove_prefix(copy);
+ return copy;
+ });
+
+ // We ought to have parsed the same set of CPUs that we serialized.
+ EXPECT_TRUE(CPU_EQUAL(&parsed, &reference));
+ }
+}
+
+} // namespace
+} // namespace tcmalloc_internal
+} // namespace tcmalloc
diff --git a/contrib/libs/tcmalloc/tcmalloc/internal/optimization.h b/contrib/libs/tcmalloc/tcmalloc/internal/optimization.h
index 6380183a50..22878aacda 100644
--- a/contrib/libs/tcmalloc/tcmalloc/internal/optimization.h
+++ b/contrib/libs/tcmalloc/tcmalloc/internal/optimization.h
@@ -34,12 +34,12 @@
#endif
#endif
-// Annotations for functions that are not affected by nor affect observable
-// state of the program.
-#if ABSL_HAVE_ATTRIBUTE(const)
-#define TCMALLOC_ATTRIBUTE_CONST __attribute__((const))
-#else
-#define TCMALLOC_ATTRIBUTE_CONST
-#endif
-
+// Annotations for functions that are not affected by nor affect observable
+// state of the program.
+#if ABSL_HAVE_ATTRIBUTE(const)
+#define TCMALLOC_ATTRIBUTE_CONST __attribute__((const))
+#else
+#define TCMALLOC_ATTRIBUTE_CONST
+#endif
+
#endif // TCMALLOC_INTERNAL_OPTIMIZATION_H_
diff --git a/contrib/libs/tcmalloc/tcmalloc/internal/parameter_accessors.h b/contrib/libs/tcmalloc/tcmalloc/internal/parameter_accessors.h
index f14798fe74..76161f89b8 100644
--- a/contrib/libs/tcmalloc/tcmalloc/internal/parameter_accessors.h
+++ b/contrib/libs/tcmalloc/tcmalloc/internal/parameter_accessors.h
@@ -26,8 +26,8 @@ ABSL_ATTRIBUTE_WEAK uint64_t TCMalloc_Internal_GetHeapSizeHardLimit();
ABSL_ATTRIBUTE_WEAK bool TCMalloc_Internal_GetHPAASubrelease();
ABSL_ATTRIBUTE_WEAK void
TCMalloc_Internal_GetHugePageFillerSkipSubreleaseInterval(absl::Duration* v);
-ABSL_ATTRIBUTE_WEAK bool TCMalloc_Internal_GetShufflePerCpuCachesEnabled();
-ABSL_ATTRIBUTE_WEAK bool TCMalloc_Internal_GetReclaimIdlePerCpuCachesEnabled();
+ABSL_ATTRIBUTE_WEAK bool TCMalloc_Internal_GetShufflePerCpuCachesEnabled();
+ABSL_ATTRIBUTE_WEAK bool TCMalloc_Internal_GetReclaimIdlePerCpuCachesEnabled();
ABSL_ATTRIBUTE_WEAK bool TCMalloc_Internal_GetLazyPerCpuCachesEnabled();
ABSL_ATTRIBUTE_WEAK double
TCMalloc_Internal_GetPeakSamplingHeapGrowthFraction();
@@ -37,10 +37,10 @@ ABSL_ATTRIBUTE_WEAK size_t TCMalloc_Internal_GetStats(char* buffer,
ABSL_ATTRIBUTE_WEAK void TCMalloc_Internal_SetGuardedSamplingRate(int64_t v);
ABSL_ATTRIBUTE_WEAK void TCMalloc_Internal_SetHeapSizeHardLimit(uint64_t v);
ABSL_ATTRIBUTE_WEAK void TCMalloc_Internal_SetHPAASubrelease(bool v);
-ABSL_ATTRIBUTE_WEAK void TCMalloc_Internal_SetShufflePerCpuCachesEnabled(
- bool v);
-ABSL_ATTRIBUTE_WEAK void TCMalloc_Internal_SetReclaimIdlePerCpuCachesEnabled(
- bool v);
+ABSL_ATTRIBUTE_WEAK void TCMalloc_Internal_SetShufflePerCpuCachesEnabled(
+ bool v);
+ABSL_ATTRIBUTE_WEAK void TCMalloc_Internal_SetReclaimIdlePerCpuCachesEnabled(
+ bool v);
ABSL_ATTRIBUTE_WEAK void TCMalloc_Internal_SetLazyPerCpuCachesEnabled(bool v);
ABSL_ATTRIBUTE_WEAK void TCMalloc_Internal_SetMaxPerCpuCacheSize(int32_t v);
ABSL_ATTRIBUTE_WEAK void TCMalloc_Internal_SetMaxTotalThreadCacheBytes(
diff --git a/contrib/libs/tcmalloc/tcmalloc/internal/percpu.cc b/contrib/libs/tcmalloc/tcmalloc/internal/percpu.cc
index f8706f0f21..770367f05b 100644
--- a/contrib/libs/tcmalloc/tcmalloc/internal/percpu.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/internal/percpu.cc
@@ -18,7 +18,7 @@
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/types.h>
-#include <syscall.h>
+#include <syscall.h>
#include <unistd.h>
#include <atomic>
@@ -28,12 +28,12 @@
#include "absl/base/internal/sysinfo.h"
#include "tcmalloc/internal/linux_syscall_support.h"
#include "tcmalloc/internal/logging.h"
-#include "tcmalloc/internal/optimization.h"
+#include "tcmalloc/internal/optimization.h"
#include "tcmalloc/internal/util.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
namespace subtle {
namespace percpu {
@@ -81,22 +81,22 @@ enum PerCpuInitStatus {
ABSL_CONST_INIT static PerCpuInitStatus init_status = kSlowMode;
ABSL_CONST_INIT static absl::once_flag init_per_cpu_once;
-#if TCMALLOC_PERCPU_USE_RSEQ
-ABSL_CONST_INIT static std::atomic<bool> using_upstream_fence{false};
-#endif // TCMALLOC_PERCPU_USE_RSEQ
-
-// Is this thread's __rseq_abi struct currently registered with the kernel?
-static bool ThreadRegistered() { return RseqCpuId() >= kCpuIdInitialized; }
+#if TCMALLOC_PERCPU_USE_RSEQ
+ABSL_CONST_INIT static std::atomic<bool> using_upstream_fence{false};
+#endif // TCMALLOC_PERCPU_USE_RSEQ
+// Is this thread's __rseq_abi struct currently registered with the kernel?
+static bool ThreadRegistered() { return RseqCpuId() >= kCpuIdInitialized; }
+
static bool InitThreadPerCpu() {
- // If we're already registered, there's nothing further for us to do.
- if (ThreadRegistered()) {
+ // If we're already registered, there's nothing further for us to do.
+ if (ThreadRegistered()) {
return true;
}
#ifdef __NR_rseq
- return 0 == syscall(__NR_rseq, &__rseq_abi, sizeof(__rseq_abi), 0,
- TCMALLOC_PERCPU_RSEQ_SIGNATURE);
+ return 0 == syscall(__NR_rseq, &__rseq_abi, sizeof(__rseq_abi), 0,
+ TCMALLOC_PERCPU_RSEQ_SIGNATURE);
#endif // __NR_rseq
return false;
}
@@ -115,12 +115,12 @@ static void InitPerCpu() {
init_status = kFastMode;
#if TCMALLOC_PERCPU_USE_RSEQ
- constexpr int kMEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ = (1 << 8);
- // It is safe to make the syscall below multiple times.
- using_upstream_fence.store(
- 0 == syscall(__NR_membarrier,
- kMEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ, 0, 0),
- std::memory_order_relaxed);
+ constexpr int kMEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ = (1 << 8);
+ // It is safe to make the syscall below multiple times.
+ using_upstream_fence.store(
+ 0 == syscall(__NR_membarrier,
+ kMEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ, 0, 0),
+ std::memory_order_relaxed);
#endif // TCMALLOC_PERCPU_USE_RSEQ
}
}
@@ -262,22 +262,22 @@ static void SlowFence(const cpu_set_t* cpus) {
}
}
-#if TCMALLOC_PERCPU_USE_RSEQ
-static void UpstreamRseqFenceCpu(int cpu) {
- ABSL_RAW_CHECK(using_upstream_fence.load(std::memory_order_relaxed),
- "upstream fence unavailable.");
-
- constexpr int kMEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ = (1 << 7);
- constexpr int kMEMBARRIER_CMD_FLAG_CPU = (1 << 0);
-
- int64_t res = syscall(__NR_membarrier, kMEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ,
- kMEMBARRIER_CMD_FLAG_CPU, cpu);
-
- ABSL_RAW_CHECK(res == 0 || res == -ENXIO /* missing CPU */,
- "Upstream fence failed.");
-}
-#endif // TCMALLOC_PERCPU_USE_RSEQ
-
+#if TCMALLOC_PERCPU_USE_RSEQ
+static void UpstreamRseqFenceCpu(int cpu) {
+ ABSL_RAW_CHECK(using_upstream_fence.load(std::memory_order_relaxed),
+ "upstream fence unavailable.");
+
+ constexpr int kMEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ = (1 << 7);
+ constexpr int kMEMBARRIER_CMD_FLAG_CPU = (1 << 0);
+
+ int64_t res = syscall(__NR_membarrier, kMEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ,
+ kMEMBARRIER_CMD_FLAG_CPU, cpu);
+
+ ABSL_RAW_CHECK(res == 0 || res == -ENXIO /* missing CPU */,
+ "Upstream fence failed.");
+}
+#endif // TCMALLOC_PERCPU_USE_RSEQ
+
// Interrupt every concurrently running sibling thread on any cpu in
// "cpus", and guarantee our writes up til now are visible to every
// other CPU. (cpus == NULL is equivalent to all CPUs.)
@@ -295,17 +295,17 @@ void Fence() {
// Other operations (or all in RSEQ mode) might just be running on another
// CPU. Do something about that: use RSEQ::Fence() to just send interrupts
// and restart any such operation.
-#if TCMALLOC_PERCPU_USE_RSEQ
- if (using_upstream_fence.load(std::memory_order_relaxed)) {
- UpstreamRseqFenceCpu(-1);
- return;
- }
-#endif // TCMALLOC_PERCPU_USE_RSEQ
-
+#if TCMALLOC_PERCPU_USE_RSEQ
+ if (using_upstream_fence.load(std::memory_order_relaxed)) {
+ UpstreamRseqFenceCpu(-1);
+ return;
+ }
+#endif // TCMALLOC_PERCPU_USE_RSEQ
+
FenceInterruptCPUs(nullptr);
}
-void FenceCpu(int cpu, const size_t virtual_cpu_id_offset) {
+void FenceCpu(int cpu, const size_t virtual_cpu_id_offset) {
// Prevent compiler re-ordering of code below. In particular, the call to
// GetCurrentCpu must not appear in assembly program order until after any
// code that comes before FenceCpu in C++ program order.
@@ -313,32 +313,32 @@ void FenceCpu(int cpu, const size_t virtual_cpu_id_offset) {
// A useful fast path: nothing needs doing at all to order us with respect
// to our own CPU.
- if (GetCurrentVirtualCpu(virtual_cpu_id_offset) == cpu) {
+ if (GetCurrentVirtualCpu(virtual_cpu_id_offset) == cpu) {
return;
}
- if (virtual_cpu_id_offset == offsetof(kernel_rseq, vcpu_id)) {
- ASSUME(false);
-
+ if (virtual_cpu_id_offset == offsetof(kernel_rseq, vcpu_id)) {
+ ASSUME(false);
+
// With virtual CPUs, we cannot identify the true physical core we need to
// interrupt.
-#if TCMALLOC_PERCPU_USE_RSEQ
- if (using_upstream_fence.load(std::memory_order_relaxed)) {
- UpstreamRseqFenceCpu(-1);
- return;
- }
-#endif // TCMALLOC_PERCPU_USE_RSEQ
+#if TCMALLOC_PERCPU_USE_RSEQ
+ if (using_upstream_fence.load(std::memory_order_relaxed)) {
+ UpstreamRseqFenceCpu(-1);
+ return;
+ }
+#endif // TCMALLOC_PERCPU_USE_RSEQ
FenceInterruptCPUs(nullptr);
return;
}
-#if TCMALLOC_PERCPU_USE_RSEQ
- if (using_upstream_fence.load(std::memory_order_relaxed)) {
- UpstreamRseqFenceCpu(cpu);
- return;
- }
-#endif // TCMALLOC_PERCPU_USE_RSEQ
-
+#if TCMALLOC_PERCPU_USE_RSEQ
+ if (using_upstream_fence.load(std::memory_order_relaxed)) {
+ UpstreamRseqFenceCpu(cpu);
+ return;
+ }
+#endif // TCMALLOC_PERCPU_USE_RSEQ
+
cpu_set_t set;
CPU_ZERO(&set);
CPU_SET(cpu, &set);
@@ -347,6 +347,6 @@ void FenceCpu(int cpu, const size_t virtual_cpu_id_offset) {
} // namespace percpu
} // namespace subtle
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
diff --git a/contrib/libs/tcmalloc/tcmalloc/internal/percpu.h b/contrib/libs/tcmalloc/tcmalloc/internal/percpu.h
index ad2124e0d1..c5f26c0b92 100644
--- a/contrib/libs/tcmalloc/tcmalloc/internal/percpu.h
+++ b/contrib/libs/tcmalloc/tcmalloc/internal/percpu.h
@@ -71,9 +71,9 @@
#endif
#endif // !defined(TCMALLOC_PERCPU_USE_RSEQ)
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
namespace subtle {
namespace percpu {
@@ -89,23 +89,23 @@ extern "C" ABSL_PER_THREAD_TLS_KEYWORD volatile kernel_rseq __rseq_abi;
static inline int RseqCpuId() { return __rseq_abi.cpu_id; }
-static inline int VirtualRseqCpuId(const size_t virtual_cpu_id_offset) {
+static inline int VirtualRseqCpuId(const size_t virtual_cpu_id_offset) {
#ifdef __x86_64__
- ASSERT(virtual_cpu_id_offset == offsetof(kernel_rseq, cpu_id) ||
- virtual_cpu_id_offset == offsetof(kernel_rseq, vcpu_id));
+ ASSERT(virtual_cpu_id_offset == offsetof(kernel_rseq, cpu_id) ||
+ virtual_cpu_id_offset == offsetof(kernel_rseq, vcpu_id));
return *reinterpret_cast<short *>(reinterpret_cast<uintptr_t>(&__rseq_abi) +
- virtual_cpu_id_offset);
+ virtual_cpu_id_offset);
#else
- ASSERT(virtual_cpu_id_offset == offsetof(kernel_rseq, cpu_id));
+ ASSERT(virtual_cpu_id_offset == offsetof(kernel_rseq, cpu_id));
return RseqCpuId();
#endif
}
#else // !TCMALLOC_PERCPU_USE_RSEQ
static inline int RseqCpuId() { return kCpuIdUnsupported; }
-static inline int VirtualRseqCpuId(const size_t virtual_cpu_id_offset) {
- return kCpuIdUnsupported;
-}
+static inline int VirtualRseqCpuId(const size_t virtual_cpu_id_offset) {
+ return kCpuIdUnsupported;
+}
#endif
typedef int (*OverflowHandler)(int cpu, size_t cl, void *item);
@@ -114,39 +114,39 @@ typedef void *(*UnderflowHandler)(int cpu, size_t cl);
// Functions below are implemented in the architecture-specific percpu_rseq_*.S
// files.
extern "C" {
-int TcmallocSlab_Internal_PerCpuCmpxchg64(int target_cpu, intptr_t *p,
- intptr_t old_val, intptr_t new_val);
+int TcmallocSlab_Internal_PerCpuCmpxchg64(int target_cpu, intptr_t *p,
+ intptr_t old_val, intptr_t new_val);
#ifndef __x86_64__
-int TcmallocSlab_Internal_Push(void *ptr, size_t cl, void *item, size_t shift,
- OverflowHandler f);
-int TcmallocSlab_Internal_Push_FixedShift(void *ptr, size_t cl, void *item,
- OverflowHandler f);
-void *TcmallocSlab_Internal_Pop(void *ptr, size_t cl, UnderflowHandler f,
- size_t shift);
-void *TcmallocSlab_Internal_Pop_FixedShift(void *ptr, size_t cl,
- UnderflowHandler f);
+int TcmallocSlab_Internal_Push(void *ptr, size_t cl, void *item, size_t shift,
+ OverflowHandler f);
+int TcmallocSlab_Internal_Push_FixedShift(void *ptr, size_t cl, void *item,
+ OverflowHandler f);
+void *TcmallocSlab_Internal_Pop(void *ptr, size_t cl, UnderflowHandler f,
+ size_t shift);
+void *TcmallocSlab_Internal_Pop_FixedShift(void *ptr, size_t cl,
+ UnderflowHandler f);
#endif // __x86_64__
// Push a batch for a slab which the Shift equal to
// TCMALLOC_PERCPU_TCMALLOC_FIXED_SLAB_SHIFT
-size_t TcmallocSlab_Internal_PushBatch_FixedShift(void *ptr, size_t cl,
- void **batch, size_t len);
+size_t TcmallocSlab_Internal_PushBatch_FixedShift(void *ptr, size_t cl,
+ void **batch, size_t len);
// Pop a batch for a slab which the Shift equal to
// TCMALLOC_PERCPU_TCMALLOC_FIXED_SLAB_SHIFT
-size_t TcmallocSlab_Internal_PopBatch_FixedShift(void *ptr, size_t cl,
- void **batch, size_t len);
+size_t TcmallocSlab_Internal_PopBatch_FixedShift(void *ptr, size_t cl,
+ void **batch, size_t len);
#ifdef __x86_64__
-int TcmallocSlab_Internal_PerCpuCmpxchg64_VCPU(int target_cpu, intptr_t *p,
- intptr_t old_val,
- intptr_t new_val);
-size_t TcmallocSlab_Internal_PushBatch_FixedShift_VCPU(void *ptr, size_t cl,
- void **batch,
- size_t len);
-size_t TcmallocSlab_Internal_PopBatch_FixedShift_VCPU(void *ptr, size_t cl,
- void **batch, size_t len);
+int TcmallocSlab_Internal_PerCpuCmpxchg64_VCPU(int target_cpu, intptr_t *p,
+ intptr_t old_val,
+ intptr_t new_val);
+size_t TcmallocSlab_Internal_PushBatch_FixedShift_VCPU(void *ptr, size_t cl,
+ void **batch,
+ size_t len);
+size_t TcmallocSlab_Internal_PopBatch_FixedShift_VCPU(void *ptr, size_t cl,
+ void **batch, size_t len);
#endif
}
@@ -207,15 +207,15 @@ inline int GetCurrentCpu() {
return cpu;
}
-inline int GetCurrentVirtualCpuUnsafe(const size_t virtual_cpu_id_offset) {
- return VirtualRseqCpuId(virtual_cpu_id_offset);
-}
+inline int GetCurrentVirtualCpuUnsafe(const size_t virtual_cpu_id_offset) {
+ return VirtualRseqCpuId(virtual_cpu_id_offset);
+}
-inline int GetCurrentVirtualCpu(const size_t virtual_cpu_id_offset) {
+inline int GetCurrentVirtualCpu(const size_t virtual_cpu_id_offset) {
// We can't use the unsafe version unless we have the appropriate version of
// the rseq extension. This also allows us a convenient escape hatch if the
// kernel changes the way it uses special-purpose registers for CPU IDs.
- int cpu = VirtualRseqCpuId(virtual_cpu_id_offset);
+ int cpu = VirtualRseqCpuId(virtual_cpu_id_offset);
// We open-code the check for fast-cpu availability since we do not want to
// force initialization in the first-call case. This so done so that we can
@@ -307,18 +307,18 @@ inline void TSANMemoryBarrierOn(void *p) {
// These methods may *only* be called if IsFast() has been called by the current
// thread (and it returned true).
inline int CompareAndSwapUnsafe(int target_cpu, std::atomic<intptr_t> *p,
- intptr_t old_val, intptr_t new_val,
- const size_t virtual_cpu_id_offset) {
+ intptr_t old_val, intptr_t new_val,
+ const size_t virtual_cpu_id_offset) {
TSANMemoryBarrierOn(p);
#if TCMALLOC_PERCPU_USE_RSEQ
- switch (virtual_cpu_id_offset) {
+ switch (virtual_cpu_id_offset) {
case offsetof(kernel_rseq, cpu_id):
- return TcmallocSlab_Internal_PerCpuCmpxchg64(
+ return TcmallocSlab_Internal_PerCpuCmpxchg64(
target_cpu, tcmalloc_internal::atomic_danger::CastToIntegral(p),
old_val, new_val);
#ifdef __x86_64__
case offsetof(kernel_rseq, vcpu_id):
- return TcmallocSlab_Internal_PerCpuCmpxchg64_VCPU(
+ return TcmallocSlab_Internal_PerCpuCmpxchg64_VCPU(
target_cpu, tcmalloc_internal::atomic_danger::CastToIntegral(p),
old_val, new_val);
#endif // __x86_64__
@@ -330,13 +330,13 @@ inline int CompareAndSwapUnsafe(int target_cpu, std::atomic<intptr_t> *p,
#endif // !TCMALLOC_PERCPU_USE_RSEQ
}
-void FenceCpu(int cpu, const size_t virtual_cpu_id_offset);
+void FenceCpu(int cpu, const size_t virtual_cpu_id_offset);
} // namespace percpu
} // namespace subtle
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
#endif // !__ASSEMBLER__
#endif // TCMALLOC_INTERNAL_PERCPU_H_
diff --git a/contrib/libs/tcmalloc/tcmalloc/internal/percpu_rseq_aarch64.S b/contrib/libs/tcmalloc/tcmalloc/internal/percpu_rseq_aarch64.S
index 3cdaf17835..8abe7c9a08 100644
--- a/contrib/libs/tcmalloc/tcmalloc/internal/percpu_rseq_aarch64.S
+++ b/contrib/libs/tcmalloc/tcmalloc/internal/percpu_rseq_aarch64.S
@@ -70,20 +70,20 @@
#define PINSECTION(label)
#endif
-// A function within a guarded memory region must start with a BTI C
-// instruction.
-// So per ABI that includes any externally visible code label.
-// Using hint to make sure we can use this on targets that support BTI and
-// targets that don't. It will behave as a no-op on targets that do not
-// support BTI or outside a guarded memory region.
-#ifdef __ARM_FEATURE_BTI_DEFAULT
-#define BTI_C hint 34
-#define TAILCALL(x) mov x16, x; br x16
-#else
-#define BTI_C
-#define TAILCALL(x) br x
-#endif
-
+// A function within a guarded memory region must start with a BTI C
+// instruction.
+// So per ABI that includes any externally visible code label.
+// Using hint to make sure we can use this on targets that support BTI and
+// targets that don't. It will behave as a no-op on targets that do not
+// support BTI or outside a guarded memory region.
+#ifdef __ARM_FEATURE_BTI_DEFAULT
+#define BTI_C hint 34
+#define TAILCALL(x) mov x16, x; br x16
+#else
+#define BTI_C
+#define TAILCALL(x) br x
+#endif
+
// This macro defines:
// * the rseq_cs instance that we'll use for label's critical section.
// * a trampoline to return to when we abort. This label_trampoline is
@@ -115,7 +115,7 @@
.type label##_trampoline, @function; \
label##_trampoline: \
.cfi_startproc; \
- BTI_C; \
+ BTI_C; \
b .L##label##_abort; \
.cfi_endproc; \
.size label##_trampoline, . - label##_trampoline; \
@@ -169,7 +169,7 @@ label##_trampoline: \
* we can not guarantee it will we must save and restore the registers used to
* store the arguments of our functions. The function with most arguments has 5
* arguments, so we save x0-x4 and lr.
- * TODO: Add PAC support because we are spiling LR.
+ * TODO: Add PAC support because we are spiling LR.
*/
#define START_RSEQ(src) \
.L##src##_abort: \
@@ -199,7 +199,7 @@ label##_trampoline: \
/* start of atomic restartable sequences */
/*
- * int TcmallocSlab_Internal_PerCpuCmpxchg64(int target_cpu, long *p,
+ * int TcmallocSlab_Internal_PerCpuCmpxchg64(int target_cpu, long *p,
* long old_val, long new_val)
* w0: target_cpu
* x1: p
@@ -207,30 +207,30 @@ label##_trampoline: \
* x3: new_val
*/
.p2align 6 /* aligns to 2^6 with NOP filling */
- .globl TcmallocSlab_Internal_PerCpuCmpxchg64
- .type TcmallocSlab_Internal_PerCpuCmpxchg64, @function
-TcmallocSlab_Internal_PerCpuCmpxchg64:
+ .globl TcmallocSlab_Internal_PerCpuCmpxchg64
+ .type TcmallocSlab_Internal_PerCpuCmpxchg64, @function
+TcmallocSlab_Internal_PerCpuCmpxchg64:
.cfi_startproc
- BTI_C
- START_RSEQ(TcmallocSlab_Internal_PerCpuCmpxchg64)
+ BTI_C
+ START_RSEQ(TcmallocSlab_Internal_PerCpuCmpxchg64)
FETCH_CPU(w4)
cmp w0, w4 /* check cpu vs current_cpu */
- bne .LTcmallocSlab_Internal_PerCpuCmpxchg64_commit
+ bne .LTcmallocSlab_Internal_PerCpuCmpxchg64_commit
ldr x6, [x1]
cmp x6, x2 /* verify *p == old */
- bne .LTcmallocSlab_Internal_PerCpuCmpxchg64_mismatch
+ bne .LTcmallocSlab_Internal_PerCpuCmpxchg64_mismatch
str x3, [x1]
-.LTcmallocSlab_Internal_PerCpuCmpxchg64_commit:
+.LTcmallocSlab_Internal_PerCpuCmpxchg64_commit:
mov x0, x4
ret /* return current cpu, indicating mismatch OR success */
-.LTcmallocSlab_Internal_PerCpuCmpxchg64_mismatch:
+.LTcmallocSlab_Internal_PerCpuCmpxchg64_mismatch:
mov x0, #-1 /* mismatch versus "old" or "check", return -1 */
ret
.cfi_endproc
-ENCODE_SIZE(TcmallocSlab_Internal_PerCpuCmpxchg64)
-DEFINE_UPSTREAM_CS(TcmallocSlab_Internal_PerCpuCmpxchg64)
+ENCODE_SIZE(TcmallocSlab_Internal_PerCpuCmpxchg64)
+DEFINE_UPSTREAM_CS(TcmallocSlab_Internal_PerCpuCmpxchg64)
-/* size_t TcmallocSlab_Internal_PushBatch_FixedShift(
+/* size_t TcmallocSlab_Internal_PushBatch_FixedShift(
* void *ptr (x0),
* size_t cl (w1),
* void** batch (x2),
@@ -255,12 +255,12 @@ DEFINE_UPSTREAM_CS(TcmallocSlab_Internal_PerCpuCmpxchg64)
* }
*/
.p2align 6 /* aligns to 2^6 with NOP filling */
- .globl TcmallocSlab_Internal_PushBatch_FixedShift
- .type TcmallocSlab_Internal_PushBatch_FixedShift, @function
-TcmallocSlab_Internal_PushBatch_FixedShift:
+ .globl TcmallocSlab_Internal_PushBatch_FixedShift
+ .type TcmallocSlab_Internal_PushBatch_FixedShift, @function
+TcmallocSlab_Internal_PushBatch_FixedShift:
.cfi_startproc
- BTI_C
- START_RSEQ(TcmallocSlab_Internal_PushBatch_FixedShift)
+ BTI_C
+ START_RSEQ(TcmallocSlab_Internal_PushBatch_FixedShift)
FETCH_CPU(w8)
lsl x8, x8, #TCMALLOC_PERCPU_TCMALLOC_FIXED_SLAB_SHIFT /* multiply cpu by 256k */
add x8, x0, x8
@@ -268,7 +268,7 @@ TcmallocSlab_Internal_PushBatch_FixedShift:
ldrh w9, [x4] /* r9 = current */
ldrh w10, [x4, #6] /* r10 = end */
cmp w9, w10
- bge .LTcmallocSlab_Internal_PushBatch_FixedShift_no_capacity
+ bge .LTcmallocSlab_Internal_PushBatch_FixedShift_no_capacity
add x11, x2, x3, LSL #3 /* r11 = batch + len * 8 */
sub w10, w10, w9 /* r10 = free capacity */
cmp w3, w10
@@ -277,24 +277,24 @@ TcmallocSlab_Internal_PushBatch_FixedShift:
add x13, x9, x10 /* r13 = current + amount we are pushing. */
add x9, x8, x9, LSL #3 /* r9 = current cpu slab stack */
add x14, x8, x13, LSL #3 /* r14 = new current address */
-.LTcmallocSlab_Internal_PushBatch_FixedShift_loop:
+.LTcmallocSlab_Internal_PushBatch_FixedShift_loop:
ldr x12, [x11, #-8]! /* r12 = [--r11] */
str x12, [x9], #8 /* [r9++] = r12 */
cmp x9, x14 /* if current cpu slab address == new current
address */
- bne .LTcmallocSlab_Internal_PushBatch_FixedShift_loop
+ bne .LTcmallocSlab_Internal_PushBatch_FixedShift_loop
strh w13, [x4] /* store new current index */
-.LTcmallocSlab_Internal_PushBatch_FixedShift_commit:
+.LTcmallocSlab_Internal_PushBatch_FixedShift_commit:
mov x0, x10
ret
-.LTcmallocSlab_Internal_PushBatch_FixedShift_no_capacity:
+.LTcmallocSlab_Internal_PushBatch_FixedShift_no_capacity:
mov x0, #0
ret
.cfi_endproc
-ENCODE_SIZE(TcmallocSlab_Internal_PushBatch_FixedShift)
-DEFINE_UPSTREAM_CS(TcmallocSlab_Internal_PushBatch_FixedShift)
+ENCODE_SIZE(TcmallocSlab_Internal_PushBatch_FixedShift)
+DEFINE_UPSTREAM_CS(TcmallocSlab_Internal_PushBatch_FixedShift)
-/* size_t TcmallocSlab_Internal_PopBatch_FixedShift(
+/* size_t TcmallocSlab_Internal_PopBatch_FixedShift(
* void *ptr (x0),
* size_t cl (w1),
* void** batch (x2),
@@ -319,12 +319,12 @@ DEFINE_UPSTREAM_CS(TcmallocSlab_Internal_PushBatch_FixedShift)
* }
*/
.p2align 6 /* aligns to 2^6 with NOP filling */
- .globl TcmallocSlab_Internal_PopBatch_FixedShift
- .type TcmallocSlab_Internal_PopBatch_FixedShift, @function
-TcmallocSlab_Internal_PopBatch_FixedShift:
+ .globl TcmallocSlab_Internal_PopBatch_FixedShift
+ .type TcmallocSlab_Internal_PopBatch_FixedShift, @function
+TcmallocSlab_Internal_PopBatch_FixedShift:
.cfi_startproc
- BTI_C
- START_RSEQ(TcmallocSlab_Internal_PopBatch_FixedShift)
+ BTI_C
+ START_RSEQ(TcmallocSlab_Internal_PopBatch_FixedShift)
FETCH_CPU(w8)
lsl x8, x8, #TCMALLOC_PERCPU_TCMALLOC_FIXED_SLAB_SHIFT /* multiply cpu by 256k */
add x8, x0, x8
@@ -332,7 +332,7 @@ TcmallocSlab_Internal_PopBatch_FixedShift:
ldrh w9, [x4] /* current */
ldrh w10, [x4, #4] /* begin */
cmp w10, w9
- bhs .LTcmallocSlab_Internal_PopBatch_FixedShift_no_items
+ bhs .LTcmallocSlab_Internal_PopBatch_FixedShift_no_items
sub w11, w9, w10 /* r11 = available items */
cmp w3, w11
csel w11, w3, w11, ls /* r11 = min(len, available items), amount we are
@@ -341,27 +341,27 @@ TcmallocSlab_Internal_PopBatch_FixedShift:
sub x9, x9, x11 /* update new current */
mov x12, x2 /* r12 = batch */
add x14, x2, x11, LSL #3 /* r14 = batch + amount we are popping*8 */
-.LTcmallocSlab_Internal_PopBatch_FixedShift_loop:
+.LTcmallocSlab_Internal_PopBatch_FixedShift_loop:
ldr x10, [x13, #-8]! /* r10 = [--r13] */
str x10, [x12], #8 /* [r12++] = r10 */
cmp x12, x14 /* if current batch == batch + amount we are
popping */
- bne .LTcmallocSlab_Internal_PopBatch_FixedShift_loop
+ bne .LTcmallocSlab_Internal_PopBatch_FixedShift_loop
strh w9, [x4] /* store new current */
-.LTcmallocSlab_Internal_PopBatch_FixedShift_commit:
+.LTcmallocSlab_Internal_PopBatch_FixedShift_commit:
mov x0, x11
ret
-.LTcmallocSlab_Internal_PopBatch_FixedShift_no_items:
+.LTcmallocSlab_Internal_PopBatch_FixedShift_no_items:
mov x0, #0
ret
.cfi_endproc
-ENCODE_SIZE(TcmallocSlab_Internal_PopBatch_FixedShift)
-DEFINE_UPSTREAM_CS(TcmallocSlab_Internal_PopBatch_FixedShift)
+ENCODE_SIZE(TcmallocSlab_Internal_PopBatch_FixedShift)
+DEFINE_UPSTREAM_CS(TcmallocSlab_Internal_PopBatch_FixedShift)
- .globl TcmallocSlab_Internal_Push
- .type TcmallocSlab_Internal_Push, @function
-TcmallocSlab_Internal_Push:
-.LTcmallocSlab_Internal_Push_entry:
+ .globl TcmallocSlab_Internal_Push
+ .type TcmallocSlab_Internal_Push, @function
+TcmallocSlab_Internal_Push:
+.LTcmallocSlab_Internal_Push_entry:
.cfi_startproc
// Arguments use:
// * x0: (Argument: Slabs*) cpu_0_slab_ptr
@@ -372,8 +372,8 @@ TcmallocSlab_Internal_Push:
// Return value: current CPU
// Available x5-x15
- BTI_C
- START_RSEQ(TcmallocSlab_Internal_Push)
+ BTI_C
+ START_RSEQ(TcmallocSlab_Internal_Push)
FETCH_CPU(w8)
lsl x9, x8, x3
add x9, x0, x9
@@ -381,25 +381,25 @@ TcmallocSlab_Internal_Push:
ldrh w12, [x10] /* current */
ldrh w11, [x10, #6] /* end */
cmp w11, w12
- ble .LTcmallocSlab_Internal_Push_no_capacity
+ ble .LTcmallocSlab_Internal_Push_no_capacity
str x2, [x9, x12, LSL #3]
add w12, w12, #1
strh w12, [x10]
-.LTcmallocSlab_Internal_Push_commit:
+.LTcmallocSlab_Internal_Push_commit:
mov x0, x8
ret
-.LTcmallocSlab_Internal_Push_no_capacity:
+.LTcmallocSlab_Internal_Push_no_capacity:
mov x0, x8
- TAILCALL(x4)
-.LTcmallocSlab_Internal_Push_region3:
+ TAILCALL(x4)
+.LTcmallocSlab_Internal_Push_region3:
.cfi_endproc
-ENCODE_SIZE(TcmallocSlab_Internal_Push)
-DEFINE_UPSTREAM_CS(TcmallocSlab_Internal_Push)
+ENCODE_SIZE(TcmallocSlab_Internal_Push)
+DEFINE_UPSTREAM_CS(TcmallocSlab_Internal_Push)
- .globl TcmallocSlab_Internal_Push_FixedShift
- .type TcmallocSlab_Internal_Push_FixedShift, @function
-TcmallocSlab_Internal_Push_FixedShift:
+ .globl TcmallocSlab_Internal_Push_FixedShift
+ .type TcmallocSlab_Internal_Push_FixedShift, @function
+TcmallocSlab_Internal_Push_FixedShift:
.cfi_startproc
// Arguments use:
// * x0: (Argument: Slabs*) cpu_0_slab_ptr
@@ -409,8 +409,8 @@ TcmallocSlab_Internal_Push_FixedShift:
// Return value: current CPU
// Available x4-x15
- BTI_C
- START_RSEQ(TcmallocSlab_Internal_Push_FixedShift)
+ BTI_C
+ START_RSEQ(TcmallocSlab_Internal_Push_FixedShift)
FETCH_CPU(w8)
lsl x9, x8, #TCMALLOC_PERCPU_TCMALLOC_FIXED_SLAB_SHIFT
add x9, x0, x9
@@ -418,23 +418,23 @@ TcmallocSlab_Internal_Push_FixedShift:
ldrh w12, [x10] /* current */
ldrh w11, [x10, #6] /* end */
cmp w11, w12
- ble .LTcmallocSlab_Internal_Push_FixedShift_no_capacity
+ ble .LTcmallocSlab_Internal_Push_FixedShift_no_capacity
str x2, [x9, x12, LSL #3]
add w12, w12, #1
strh w12, [x10]
-.LTcmallocSlab_Internal_Push_FixedShift_commit:
+.LTcmallocSlab_Internal_Push_FixedShift_commit:
mov x0, x8
ret
-.LTcmallocSlab_Internal_Push_FixedShift_no_capacity:
+.LTcmallocSlab_Internal_Push_FixedShift_no_capacity:
mov x0, x8
- TAILCALL(x3)
+ TAILCALL(x3)
.cfi_endproc
-ENCODE_SIZE(TcmallocSlab_Internal_Push_FixedShift)
-DEFINE_UPSTREAM_CS(TcmallocSlab_Internal_Push_FixedShift)
+ENCODE_SIZE(TcmallocSlab_Internal_Push_FixedShift)
+DEFINE_UPSTREAM_CS(TcmallocSlab_Internal_Push_FixedShift)
- .globl TcmallocSlab_Internal_Pop_FixedShift
- .type TcmallocSlab_Internal_Pop_FixedShift, @function
-TcmallocSlab_Internal_Pop_FixedShift:
+ .globl TcmallocSlab_Internal_Pop_FixedShift
+ .type TcmallocSlab_Internal_Pop_FixedShift, @function
+TcmallocSlab_Internal_Pop_FixedShift:
.cfi_startproc
// Arguments use:
// * x0: (Argument: Slabs*) cpu_0_slab_ptr
@@ -443,8 +443,8 @@ TcmallocSlab_Internal_Pop_FixedShift:
// Return value: current CPU
// Available x3-x15
- BTI_C
- START_RSEQ(TcmallocSlab_Internal_Pop_FixedShift)
+ BTI_C
+ START_RSEQ(TcmallocSlab_Internal_Pop_FixedShift)
FETCH_CPU(w8) /* r8 = CPU */
lsl x9, x8, #TCMALLOC_PERCPU_TCMALLOC_FIXED_SLAB_SHIFT
/* r9 = CPU shifted */
@@ -453,23 +453,23 @@ TcmallocSlab_Internal_Pop_FixedShift:
ldrh w12, [x10] /* r12 = current index */
ldrh w11, [x10, #4] /* r11 = begin index */
cmp w11, w12 /* if begin >= current */
- bge .LTcmallocSlab_Internal_Pop_FixedShift_no_items
+ bge .LTcmallocSlab_Internal_Pop_FixedShift_no_items
sub w12, w12, #1 /* r12 = current-- */
ldr x3, [x9, x12, LSL #3] /* r3 = [start + current * 8] */
strh w12, [x10] /* store new current index */
-.LTcmallocSlab_Internal_Pop_FixedShift_commit:
+.LTcmallocSlab_Internal_Pop_FixedShift_commit:
mov x0, x3 /* return popped item */
ret
-.LTcmallocSlab_Internal_Pop_FixedShift_no_items:
+.LTcmallocSlab_Internal_Pop_FixedShift_no_items:
mov x0, x8 /* call overflow handler with CPU ID */
- TAILCALL(x2)
+ TAILCALL(x2)
.cfi_endproc
-ENCODE_SIZE(TcmallocSlab_Internal_Pop_FixedShift)
-DEFINE_UPSTREAM_CS(TcmallocSlab_Internal_Pop_FixedShift)
+ENCODE_SIZE(TcmallocSlab_Internal_Pop_FixedShift)
+DEFINE_UPSTREAM_CS(TcmallocSlab_Internal_Pop_FixedShift)
- .globl TcmallocSlab_Internal_Pop
- .type TcmallocSlab_Internal_Pop, @function
-TcmallocSlab_Internal_Pop:
+ .globl TcmallocSlab_Internal_Pop
+ .type TcmallocSlab_Internal_Pop, @function
+TcmallocSlab_Internal_Pop:
.cfi_startproc
// Arguments use:
// * x0: (Argument: Slabs*) cpu_0_slab_ptr
@@ -479,8 +479,8 @@ TcmallocSlab_Internal_Pop:
// Return value: Value
// Available x4-x15
- BTI_C
- START_RSEQ(TcmallocSlab_Internal_Pop)
+ BTI_C
+ START_RSEQ(TcmallocSlab_Internal_Pop)
FETCH_CPU(w8) /* r8 = CPU ID */
lsl x9, x8, x3 /* x9 = CPU shifted by (r3) */
add x9, x0, x9 /* x9 = start of this CPU region */
@@ -488,37 +488,37 @@ TcmallocSlab_Internal_Pop:
ldrh w12, [x10] /* r12 = current index */
ldrh w11, [x10, #4] /* x11 = begin index */
cmp w11, w12 /* if begin >= current */
- bge .LTcmallocSlab_Internal_Pop_no_items
+ bge .LTcmallocSlab_Internal_Pop_no_items
sub w12, w12, #1 /* r12 = current-- */
ldr x4, [x9, x12, LSL #3] /* r4 = [start + current * 8] */
strh w12, [x10] /* update current index */
-.LTcmallocSlab_Internal_Pop_commit:
+.LTcmallocSlab_Internal_Pop_commit:
mov x0, x4 /* return popped item */
ret
-.LTcmallocSlab_Internal_Pop_no_items:
+.LTcmallocSlab_Internal_Pop_no_items:
mov x0, x8 /* call overflow handler with CPU ID */
- TAILCALL(x2)
+ TAILCALL(x2)
.cfi_endproc
-ENCODE_SIZE(TcmallocSlab_Internal_Pop)
-DEFINE_UPSTREAM_CS(TcmallocSlab_Internal_Pop)
+ENCODE_SIZE(TcmallocSlab_Internal_Pop)
+DEFINE_UPSTREAM_CS(TcmallocSlab_Internal_Pop)
.section .note.GNU-stack,"",@progbits
-
-/* Add a NT_GNU_PROPERTY_TYPE_0 note. */
-#define GNU_PROPERTY(type, value) \
- .section .note.gnu.property, "a"; \
- .p2align 3; \
- .word 4; \
- .word 16; \
- .word 5; \
- .asciz "GNU"; \
- .word type; \
- .word 4; \
- .word value; \
- .word 0;
-
-/* Add GNU property note if built with branch protection. */
-
-#if defined(__ARM_FEATURE_BTI_DEFAULT)
-GNU_PROPERTY (0xc0000000, 1)
-#endif
+
+/* Add a NT_GNU_PROPERTY_TYPE_0 note. */
+#define GNU_PROPERTY(type, value) \
+ .section .note.gnu.property, "a"; \
+ .p2align 3; \
+ .word 4; \
+ .word 16; \
+ .word 5; \
+ .asciz "GNU"; \
+ .word type; \
+ .word 4; \
+ .word value; \
+ .word 0;
+
+/* Add GNU property note if built with branch protection. */
+
+#if defined(__ARM_FEATURE_BTI_DEFAULT)
+GNU_PROPERTY (0xc0000000, 1)
+#endif
diff --git a/contrib/libs/tcmalloc/tcmalloc/internal/percpu_rseq_ppc.S b/contrib/libs/tcmalloc/tcmalloc/internal/percpu_rseq_ppc.S
index 234f28c2e7..4a63738446 100644
--- a/contrib/libs/tcmalloc/tcmalloc/internal/percpu_rseq_ppc.S
+++ b/contrib/libs/tcmalloc/tcmalloc/internal/percpu_rseq_ppc.S
@@ -223,13 +223,13 @@ label##_trampoline: \
#endif
////////////////////////////////////////////////////////////////////////
-// TcmallocSlab_Internal_PerCpuCmpxchg64
+// TcmallocSlab_Internal_PerCpuCmpxchg64
////////////////////////////////////////////////////////////////////////
-.globl TcmallocSlab_Internal_PerCpuCmpxchg64
-.type TcmallocSlab_Internal_PerCpuCmpxchg64, @function
-TcmallocSlab_Internal_PerCpuCmpxchg64:
-.LTcmallocSlab_Internal_PerCpuCmpxchg64_entry:
+.globl TcmallocSlab_Internal_PerCpuCmpxchg64
+.type TcmallocSlab_Internal_PerCpuCmpxchg64, @function
+TcmallocSlab_Internal_PerCpuCmpxchg64:
+.LTcmallocSlab_Internal_PerCpuCmpxchg64_entry:
.cfi_startproc
// Register use:
//
@@ -241,7 +241,7 @@ TcmallocSlab_Internal_PerCpuCmpxchg64:
// * r8: The current value of *p.
//
- START_RSEQ(TcmallocSlab_Internal_PerCpuCmpxchg64)
+ START_RSEQ(TcmallocSlab_Internal_PerCpuCmpxchg64)
// Are we running on the target CPU?
GET_CPU(%r7)
@@ -257,7 +257,7 @@ TcmallocSlab_Internal_PerCpuCmpxchg64:
// Store the new value, committing the operation.
std %r6, 0(%r4)
-.LTcmallocSlab_Internal_PerCpuCmpxchg64_critical_limit:
+.LTcmallocSlab_Internal_PerCpuCmpxchg64_critical_limit:
// Return the target CPU, which is already in r3.
blr
@@ -272,20 +272,20 @@ TcmallocSlab_Internal_PerCpuCmpxchg64:
li %r3, -1
blr
-.LTcmallocSlab_Internal_PerCpuCmpxchg64_function_limit:
+.LTcmallocSlab_Internal_PerCpuCmpxchg64_function_limit:
.cfi_endproc
-ENCODE_SIZE(TcmallocSlab_Internal_PerCpuCmpxchg64);
-DEFINE_UPSTREAM_CS(TcmallocSlab_Internal_PerCpuCmpxchg64);
+ENCODE_SIZE(TcmallocSlab_Internal_PerCpuCmpxchg64);
+DEFINE_UPSTREAM_CS(TcmallocSlab_Internal_PerCpuCmpxchg64);
////////////////////////////////////////////////////////////////////////
-// TcmallocSlab_Internal_Push
+// TcmallocSlab_Internal_Push
////////////////////////////////////////////////////////////////////////
-.globl TcmallocSlab_Internal_Push
-.type TcmallocSlab_Internal_Push, @function
-TcmallocSlab_Internal_Push:
-.LTcmallocSlab_Internal_Push_entry:
+.globl TcmallocSlab_Internal_Push
+.type TcmallocSlab_Internal_Push, @function
+TcmallocSlab_Internal_Push:
+.LTcmallocSlab_Internal_Push_entry:
.cfi_startproc
// Arguments use:
// * r3: (Argument: Slabs*) cpu_0_slab_ptr
@@ -298,7 +298,7 @@ TcmallocSlab_Internal_Push:
// Note that r12 may be overwritten in rseq_restart_address_internal so
// cannot be relied upon across restartable sequence boundaries.
- START_RSEQ(TcmallocSlab_Internal_Push)
+ START_RSEQ(TcmallocSlab_Internal_Push)
GET_CPU(%r8) // r8 = current CPU, includes MASK operation
sld %r9, %r8, %r6 // r9 = r8 << shift (r6)
@@ -308,34 +308,34 @@ TcmallocSlab_Internal_Push:
lhz %r12, 0(%r10) // r12 = current index
lhz %r11, 6(%r10) // r11 = length
cmpld %cr7, %r11, %r12 // compare current index with length
- ble %cr7, .LTcmallocSlab_Internal_Push_no_capacity
+ ble %cr7, .LTcmallocSlab_Internal_Push_no_capacity
rldicr %r11, %r12, 3, 60 // r11 = offset of current index
addi %r12, %r12, 1 // current index += 1
stdx %r5, %r9, %r11 // store pointer p (r5) into current offset
sth %r12, 0(%r10) // update current index
-.LTcmallocSlab_Internal_Push_critical_limit:
+.LTcmallocSlab_Internal_Push_critical_limit:
mr %r3, %r8 // Return current CPU in r3
blr
-.LTcmallocSlab_Internal_Push_no_capacity:
+.LTcmallocSlab_Internal_Push_no_capacity:
mr %r3, %r8 // Place current CPU in r3
// r7 already contains target function
b .LPushOverflowTrampoline
-.LTcmallocSlab_Internal_Push_function_limit:
+.LTcmallocSlab_Internal_Push_function_limit:
.cfi_endproc
-ENCODE_SIZE(TcmallocSlab_Internal_Push);
-DEFINE_UPSTREAM_CS(TcmallocSlab_Internal_Push);
+ENCODE_SIZE(TcmallocSlab_Internal_Push);
+DEFINE_UPSTREAM_CS(TcmallocSlab_Internal_Push);
////////////////////////////////////////////////////////////////////////
-// TcmallocSlab_Internal_Push_FixedShift
+// TcmallocSlab_Internal_Push_FixedShift
////////////////////////////////////////////////////////////////////////
-.globl TcmallocSlab_Internal_Push_FixedShift
-.type TcmallocSlab_Internal_Push_FixedShift, @function
-TcmallocSlab_Internal_Push_FixedShift:
-.LTcmallocSlab_Internal_Push_FixedShift_entry:
+.globl TcmallocSlab_Internal_Push_FixedShift
+.type TcmallocSlab_Internal_Push_FixedShift, @function
+TcmallocSlab_Internal_Push_FixedShift:
+.LTcmallocSlab_Internal_Push_FixedShift_entry:
.cfi_startproc
// Arguments use:
// * r3: (Argument: Slabs*) cpu_0_slab_ptr
@@ -343,7 +343,7 @@ TcmallocSlab_Internal_Push_FixedShift:
// * r5: (Argument: uintptr_t) p
// * r6: (Argument: uintptr_t) f
- START_RSEQ(TcmallocSlab_Internal_Push_FixedShift)
+ START_RSEQ(TcmallocSlab_Internal_Push_FixedShift)
GET_CPU_UNMASKED(%r7) // r7 = unmasked CPU
// Mask upper 52 bits of %r7 and shift left in single
@@ -356,35 +356,35 @@ TcmallocSlab_Internal_Push_FixedShift:
lhz %r10, 0(%r9) // r10 = current index
lhz %r11, 6(%r9) // r11 = end index
cmpld %cr7, %r11, %r10 // Check for space
- ble %cr7, .LTcmallocSlab_Internal_Push_FixedShift_no_capacity
+ ble %cr7, .LTcmallocSlab_Internal_Push_FixedShift_no_capacity
rldicr %r11, %r10, 3, 60 // r11 = offset of current index
addi %r10, %r10, 1 // current index ++
stdx %r5, %r8, %r11 // store the item (from r5)
sth %r10, 0(%r9) // store current index
-.LTcmallocSlab_Internal_Push_FixedShift_critical_limit:
+.LTcmallocSlab_Internal_Push_FixedShift_critical_limit:
MASK_CPU(%r3, %r7) // Return and mask CPU into %r3
blr
-.LTcmallocSlab_Internal_Push_FixedShift_no_capacity:
+.LTcmallocSlab_Internal_Push_FixedShift_no_capacity:
MASK_CPU(%r3, %r7) // Move and mask CPU into %r3
mr %r7, %r6 // Move target function into r7
b .LPushOverflowTrampoline
-.LTcmallocSlab_Internal_Push_FixedShift_function_limit:
+.LTcmallocSlab_Internal_Push_FixedShift_function_limit:
.cfi_endproc
-ENCODE_SIZE(TcmallocSlab_Internal_Push_FixedShift);
-DEFINE_UPSTREAM_CS(TcmallocSlab_Internal_Push_FixedShift);
+ENCODE_SIZE(TcmallocSlab_Internal_Push_FixedShift);
+DEFINE_UPSTREAM_CS(TcmallocSlab_Internal_Push_FixedShift);
////////////////////////////////////////////////////////////////////////
-// TcmallocSlab_Internal_Pop
+// TcmallocSlab_Internal_Pop
////////////////////////////////////////////////////////////////////////
-.globl TcmallocSlab_Internal_Pop
-.type TcmallocSlab_Internal_Pop, @function
-TcmallocSlab_Internal_Pop:
-.LTcmallocSlab_Internal_Pop_entry:
+.globl TcmallocSlab_Internal_Pop
+.type TcmallocSlab_Internal_Pop, @function
+TcmallocSlab_Internal_Pop:
+.LTcmallocSlab_Internal_Pop_entry:
.cfi_startproc
// Arguments use:
// * r3: (Argument: Slabs*) cpu_0_slab_ptr
@@ -394,7 +394,7 @@ TcmallocSlab_Internal_Pop:
// Available r7 r8 r9 r10 r11
// r12 can be used as a temporary within rseq
- START_RSEQ(TcmallocSlab_Internal_Pop)
+ START_RSEQ(TcmallocSlab_Internal_Pop)
GET_CPU(%r7) // r7 = CPU, includes mask operation
sld %r12, %r7, %r6 // r12 = CPU shifted by shift (r6)
@@ -404,41 +404,41 @@ TcmallocSlab_Internal_Pop:
lhz %r9, 0(%r8) // r9 = current index
lhz %r10, 4(%r8) // r10 = begin
cmpld %cr7, %r10, %r9 // Check that we have items to pop
- bge %cr7, .LTcmallocSlab_Internal_Pop_no_item
+ bge %cr7, .LTcmallocSlab_Internal_Pop_no_item
subi %r9, %r9, 1 // r9 = current index --
rldicr %r10, %r9, 3, 60 // r10 = offset to current item
ldx %r11, %r12, %r10 // load the item from base + index
sth %r9, 0(%r8) // store current index
-.LTcmallocSlab_Internal_Pop_critical_limit:
+.LTcmallocSlab_Internal_Pop_critical_limit:
// Move the item into r3, now that it's safe to do so.
mr %r3, %r11
blr
-.LTcmallocSlab_Internal_Pop_no_item:
+.LTcmallocSlab_Internal_Pop_no_item:
mr %r3, %r7 // Place CPU into r3
b .LPopUnderflowTrampoline
-.LTcmallocSlab_Internal_Pop_function_limit:
+.LTcmallocSlab_Internal_Pop_function_limit:
.cfi_endproc
-ENCODE_SIZE(TcmallocSlab_Internal_Pop);
-DEFINE_UPSTREAM_CS(TcmallocSlab_Internal_Pop);
+ENCODE_SIZE(TcmallocSlab_Internal_Pop);
+DEFINE_UPSTREAM_CS(TcmallocSlab_Internal_Pop);
////////////////////////////////////////////////////////////////////////
-// TcmallocSlab_Internal_Pop_FixedShift
+// TcmallocSlab_Internal_Pop_FixedShift
////////////////////////////////////////////////////////////////////////
-.globl TcmallocSlab_Internal_Pop_FixedShift
-.type TcmallocSlab_Internal_Pop_FixedShift, @function
-TcmallocSlab_Internal_Pop_FixedShift:
-.LTcmallocSlab_Internal_Pop_FixedShift_entry:
+.globl TcmallocSlab_Internal_Pop_FixedShift
+.type TcmallocSlab_Internal_Pop_FixedShift, @function
+TcmallocSlab_Internal_Pop_FixedShift:
+.LTcmallocSlab_Internal_Pop_FixedShift_entry:
.cfi_startproc
// Arguments use:
// * r3: (Argument: Slabs*) cpu_0_slab_ptr
// * r4: (Argument: uintptr_t) cl
// * r5: (Argument: uintptr_t) f
- START_RSEQ(TcmallocSlab_Internal_Pop_FixedShift)
+ START_RSEQ(TcmallocSlab_Internal_Pop_FixedShift)
GET_CPU_UNMASKED(%r6) // r6 = current CPU
// Following instruction combines mask and shift
@@ -450,34 +450,34 @@ TcmallocSlab_Internal_Pop_FixedShift:
lhz %r9, 0(%r8) // r9 = current index
lhz %r10, 4(%r8) // r10 = begin index
cmpld %cr7, %r10, %r9 // Check that there are elements available
- bge %cr7, .LTcmallocSlab_Internal_Pop_FixedShift_no_item
+ bge %cr7, .LTcmallocSlab_Internal_Pop_FixedShift_no_item
subi %r9, %r9, 1 // current index --
rldicr %r10, %r9, 3, 60 // r10 = offset of current index
ldx %r11, %r7, %r10 // r11 = load the item
sth %r9, 0(%r8) // update current index
-.LTcmallocSlab_Internal_Pop_FixedShift_critical_limit:
+.LTcmallocSlab_Internal_Pop_FixedShift_critical_limit:
// Move the item into r3, now that it's safe to do so.
mr %r3, %r11
blr
-.LTcmallocSlab_Internal_Pop_FixedShift_no_item:
+.LTcmallocSlab_Internal_Pop_FixedShift_no_item:
MASK_CPU(%r3, %r6) // Extract CPU from unmasked value in %r6
b .LPopUnderflowTrampoline
-.LTcmallocSlab_Internal_Pop_FixedShift_function_limit:
+.LTcmallocSlab_Internal_Pop_FixedShift_function_limit:
.cfi_endproc
-ENCODE_SIZE(TcmallocSlab_Internal_Pop_FixedShift);
-DEFINE_UPSTREAM_CS(TcmallocSlab_Internal_Pop_FixedShift);
+ENCODE_SIZE(TcmallocSlab_Internal_Pop_FixedShift);
+DEFINE_UPSTREAM_CS(TcmallocSlab_Internal_Pop_FixedShift);
////////////////////////////////////////////////////////////////////////
-// TcmallocSlab_Internal_PushBatch_FixedShift
+// TcmallocSlab_Internal_PushBatch_FixedShift
////////////////////////////////////////////////////////////////////////
-.globl TcmallocSlab_Internal_PushBatch_FixedShift
-.type TcmallocSlab_Internal_PushBatch_FixedShift, @function
-TcmallocSlab_Internal_PushBatch_FixedShift:
-.LTcmallocSlab_Internal_PushBatch_FixedShift_entry:
+.globl TcmallocSlab_Internal_PushBatch_FixedShift
+.type TcmallocSlab_Internal_PushBatch_FixedShift, @function
+TcmallocSlab_Internal_PushBatch_FixedShift:
+.LTcmallocSlab_Internal_PushBatch_FixedShift_entry:
.cfi_startproc
// Arguments use:
// * r3: (Argument: Slabs*) cpu_0_slab_ptr
@@ -485,7 +485,7 @@ TcmallocSlab_Internal_PushBatch_FixedShift:
// * r5: (Argument: uintptr_t) batch
// * r6: (Argument: uintptr_t) len
- START_RSEQ(TcmallocSlab_Internal_PushBatch_FixedShift)
+ START_RSEQ(TcmallocSlab_Internal_PushBatch_FixedShift)
GET_CPU_UNMASKED(%r7)
clrlsldi %r8, %r7, 52, TCMALLOC_PERCPU_TCMALLOC_FIXED_SLAB_SHIFT
@@ -496,13 +496,13 @@ TcmallocSlab_Internal_PushBatch_FixedShift:
lhz %r11, 6(%r9) // r11 - end
sldi %r7, %r6, 3 // r7 - len * 8
cmpld %cr7, %r11, %r10 // current < end?
- ble %cr7, .LTcmallocSlab_Internal_PushBatch_FixedShift_critical_limit
+ ble %cr7, .LTcmallocSlab_Internal_PushBatch_FixedShift_critical_limit
sub %r11, %r11, %r10 // r11 - available capacity
// r11 = min(r11, r6)
cmpld %cr7, %r6, %r11
- bge %cr7, .LTcmallocSlab_Internal_PushBatch_FixedShift_min
+ bge %cr7, .LTcmallocSlab_Internal_PushBatch_FixedShift_min
mr %r11, %r6
-.LTcmallocSlab_Internal_PushBatch_FixedShift_min:
+.LTcmallocSlab_Internal_PushBatch_FixedShift_min:
add %r11, %r10, %r11
sldi %r11, %r11, 3
sldi %r10, %r10, 3
@@ -510,35 +510,35 @@ TcmallocSlab_Internal_PushBatch_FixedShift:
// At this point:
// r5 - batch, r7 - offset in the batch
// r8 - cpu region, r10 - offset into the cpu region, r11 - limit of offset
-.LTcmallocSlab_Internal_PushBatch_FixedShift_loop:
+.LTcmallocSlab_Internal_PushBatch_FixedShift_loop:
subi %r7, %r7, 8
ldx %r12, %r5, %r7 // load the item
stdx %r12, %r8, %r10 // store the item
addi %r10, %r10, 8
cmpld %cr7, %r10, %r11
- bne %cr7, .LTcmallocSlab_Internal_PushBatch_FixedShift_loop
+ bne %cr7, .LTcmallocSlab_Internal_PushBatch_FixedShift_loop
rotrdi %r10, %r10, 3
sth %r10, 0(%r9) // update current
-.LTcmallocSlab_Internal_PushBatch_FixedShift_critical_limit:
+.LTcmallocSlab_Internal_PushBatch_FixedShift_critical_limit:
// return r6 - r7 / 8
rotrdi %r7, %r7, 3
sub %r3, %r6, %r7
blr
-.LTcmallocSlab_Internal_PushBatch_FixedShift_function_limit:
+.LTcmallocSlab_Internal_PushBatch_FixedShift_function_limit:
.cfi_endproc
-ENCODE_SIZE(TcmallocSlab_Internal_PushBatch_FixedShift);
-DEFINE_UPSTREAM_CS(TcmallocSlab_Internal_PushBatch_FixedShift);
+ENCODE_SIZE(TcmallocSlab_Internal_PushBatch_FixedShift);
+DEFINE_UPSTREAM_CS(TcmallocSlab_Internal_PushBatch_FixedShift);
////////////////////////////////////////////////////////////////////////
-// TcmallocSlab_Internal_PopBatch_FixedShift
+// TcmallocSlab_Internal_PopBatch_FixedShift
////////////////////////////////////////////////////////////////////////
-.globl TcmallocSlab_Internal_PopBatch_FixedShift
-.type TcmallocSlab_Internal_PopBatch_FixedShift, @function
-TcmallocSlab_Internal_PopBatch_FixedShift:
-.LTcmallocSlab_Internal_PopBatch_FixedShift_entry:
+.globl TcmallocSlab_Internal_PopBatch_FixedShift
+.type TcmallocSlab_Internal_PopBatch_FixedShift, @function
+TcmallocSlab_Internal_PopBatch_FixedShift:
+.LTcmallocSlab_Internal_PopBatch_FixedShift_entry:
.cfi_startproc
// Arguments use:
// * r3: (Argument: Slabs*) cpu_0_slab_ptr
@@ -546,7 +546,7 @@ TcmallocSlab_Internal_PopBatch_FixedShift:
// * r5: (Argument: uintptr_t) batch
// * r6: (Argument: uintptr_t) len
- START_RSEQ(TcmallocSlab_Internal_PopBatch_FixedShift)
+ START_RSEQ(TcmallocSlab_Internal_PopBatch_FixedShift)
GET_CPU_UNMASKED(%r7)
clrlsldi %r7, %r7, 52, TCMALLOC_PERCPU_TCMALLOC_FIXED_SLAB_SHIFT
@@ -557,13 +557,13 @@ TcmallocSlab_Internal_PopBatch_FixedShift:
lhz %r10, 4(%r8) // r10 - begin
li %r11, 0 // current position in batch
cmpld %cr7, %r10, %r9
- bge %cr7, .LTcmallocSlab_Internal_PopBatch_FixedShift_critical_limit
+ bge %cr7, .LTcmallocSlab_Internal_PopBatch_FixedShift_critical_limit
sub %r10, %r9, %r10 // r10 - available items
// r10 = min(r10, r6)
cmpld %cr7, %r6, %r10
- bge %cr7, .LTcmallocSlab_Internal_PopBatch_FixedShift_min
+ bge %cr7, .LTcmallocSlab_Internal_PopBatch_FixedShift_min
mr %r10, %r6
-.LTcmallocSlab_Internal_PopBatch_FixedShift_min:
+.LTcmallocSlab_Internal_PopBatch_FixedShift_min:
sub %r10, %r9, %r10
sldi %r10, %r10, 3
sldi %r9, %r9, 3
@@ -571,24 +571,24 @@ TcmallocSlab_Internal_PopBatch_FixedShift:
// At this point:
// r5 - batch, r11 - offset in the batch
// r7 - cpu region, r9 - offset into the cpu region, r10 - limit of offset
-.LTcmallocSlab_Internal_PopBatch_FixedShift_loop:
+.LTcmallocSlab_Internal_PopBatch_FixedShift_loop:
subi %r9, %r9, 8
ldx %r12, %r7, %r9 // load the item
stdx %r12, %r5, %r11 // store the item
addi %r11, %r11, 8
cmpld %cr7, %r9, %r10
- bne %cr7, .LTcmallocSlab_Internal_PopBatch_FixedShift_loop
+ bne %cr7, .LTcmallocSlab_Internal_PopBatch_FixedShift_loop
rotrdi %r9, %r9, 3
sth %r9, 0(%r8) // update current
-.LTcmallocSlab_Internal_PopBatch_FixedShift_critical_limit:
+.LTcmallocSlab_Internal_PopBatch_FixedShift_critical_limit:
rotrdi %r3, %r11, 3
blr
-.LTcmallocSlab_Internal_PopBatch_FixedShift_function_limit:
+.LTcmallocSlab_Internal_PopBatch_FixedShift_function_limit:
.cfi_endproc
-ENCODE_SIZE(TcmallocSlab_Internal_PopBatch_FixedShift);
-DEFINE_UPSTREAM_CS(TcmallocSlab_Internal_PopBatch_FixedShift);
+ENCODE_SIZE(TcmallocSlab_Internal_PopBatch_FixedShift);
+DEFINE_UPSTREAM_CS(TcmallocSlab_Internal_PopBatch_FixedShift);
// Input: r7 points to the function to tail call. r3...r6 are args for it.
.LPushOverflowTrampoline:
diff --git a/contrib/libs/tcmalloc/tcmalloc/internal/percpu_rseq_unsupported.cc b/contrib/libs/tcmalloc/tcmalloc/internal/percpu_rseq_unsupported.cc
index 1438d8c3d8..1616086b1f 100644
--- a/contrib/libs/tcmalloc/tcmalloc/internal/percpu_rseq_unsupported.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/internal/percpu_rseq_unsupported.cc
@@ -20,9 +20,9 @@
#if !TCMALLOC_PERCPU_RSEQ_SUPPORTED_PLATFORM
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
namespace subtle {
namespace percpu {
@@ -31,44 +31,44 @@ static void Unsupported() {
"RSEQ function called on unsupported platform.");
}
-int TcmallocSlab_Internal_PerCpuCmpxchg64(int target_cpu, intptr_t *p,
- intptr_t old_val, intptr_t new_val) {
+int TcmallocSlab_Internal_PerCpuCmpxchg64(int target_cpu, intptr_t *p,
+ intptr_t old_val, intptr_t new_val) {
Unsupported();
return -1;
}
-int TcmallocSlab_Internal_Push(void *ptr, size_t cl, void *item, size_t shift,
- OverflowHandler f) {
+int TcmallocSlab_Internal_Push(void *ptr, size_t cl, void *item, size_t shift,
+ OverflowHandler f) {
Unsupported();
return -1;
}
-int TcmallocSlab_Internal_Push_FixedShift(void *ptr, size_t cl, void *item,
- OverflowHandler f) {
+int TcmallocSlab_Internal_Push_FixedShift(void *ptr, size_t cl, void *item,
+ OverflowHandler f) {
Unsupported();
return -1;
}
-void *TcmallocSlab_Internal_Pop(void *ptr, size_t cl, UnderflowHandler f,
- size_t shift) {
+void *TcmallocSlab_Internal_Pop(void *ptr, size_t cl, UnderflowHandler f,
+ size_t shift) {
Unsupported();
return nullptr;
}
-void *TcmallocSlab_Internal_Pop_FixedShift(void *ptr, size_t cl,
- UnderflowHandler f) {
+void *TcmallocSlab_Internal_Pop_FixedShift(void *ptr, size_t cl,
+ UnderflowHandler f) {
Unsupported();
return nullptr;
}
-size_t TcmallocSlab_Internal_PushBatch_FixedShift(void *ptr, size_t cl,
- void **batch, size_t len) {
+size_t TcmallocSlab_Internal_PushBatch_FixedShift(void *ptr, size_t cl,
+ void **batch, size_t len) {
Unsupported();
return 0;
}
-size_t TcmallocSlab_Internal_PopBatch_FixedShift(void *ptr, size_t cl,
- void **batch, size_t len) {
+size_t TcmallocSlab_Internal_PopBatch_FixedShift(void *ptr, size_t cl,
+ void **batch, size_t len) {
Unsupported();
return 0;
}
@@ -80,8 +80,8 @@ int PerCpuReadCycleCounter(int64_t *cycles) {
} // namespace percpu
} // namespace subtle
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
#endif // !TCMALLOC_PERCPU_RSEQ_SUPPORTED_PLATFORM
diff --git a/contrib/libs/tcmalloc/tcmalloc/internal/percpu_rseq_x86_64.S b/contrib/libs/tcmalloc/tcmalloc/internal/percpu_rseq_x86_64.S
index 866f4f90ca..fb9c311033 100644
--- a/contrib/libs/tcmalloc/tcmalloc/internal/percpu_rseq_x86_64.S
+++ b/contrib/libs/tcmalloc/tcmalloc/internal/percpu_rseq_x86_64.S
@@ -155,11 +155,11 @@ label##_trampoline: \
movl 4(%rax), dest; /* cpuid is 32-bits */
#define FETCH_VCPU(dest) \
movzwl 30(%rax), dest; /* vcpu_id is 16-bits */
-#define START_RSEQ(src) \
- .L##src##_abort: \
- call tcmalloc_internal_tls_fetch_pic@PLT; \
- leaq __rseq_cs_##src(%rip), %r11; \
- movq %r11, 8(%rax); \
+#define START_RSEQ(src) \
+ .L##src##_abort: \
+ call tcmalloc_internal_tls_fetch_pic@PLT; \
+ leaq __rseq_cs_##src(%rip), %r11; \
+ movq %r11, 8(%rax); \
.L##src##_start:
/*
@@ -167,9 +167,9 @@ label##_trampoline: \
* generates a thread-local address which will not change across a missed
* restart. This must precede the construction of any preparatory state.
*/
- .local tcmalloc_internal_tls_fetch_pic
- .type tcmalloc_internal_tls_fetch_pic, @function
-tcmalloc_internal_tls_fetch_pic:
+ .local tcmalloc_internal_tls_fetch_pic
+ .type tcmalloc_internal_tls_fetch_pic, @function
+tcmalloc_internal_tls_fetch_pic:
.cfi_startproc
push %rbp
.cfi_def_cfa_offset 16
@@ -205,7 +205,7 @@ tcmalloc_internal_tls_fetch_pic:
.cfi_def_cfa_offset 8
ret; /* &__rseq_abi in %rax */
.cfi_endproc
-ENCODE_SIZE(tcmalloc_internal_tls_fetch_pic)
+ENCODE_SIZE(tcmalloc_internal_tls_fetch_pic)
#endif /* !defined(__PIC__) || defined(__PIE__) */
/* ---------------- end helper macros ---------------- */
@@ -221,52 +221,52 @@ ENCODE_SIZE(tcmalloc_internal_tls_fetch_pic)
*/
/*
- * int TcmallocSlab_Internal_PerCpuCmpxchg64(int target_cpu, long *p,
+ * int TcmallocSlab_Internal_PerCpuCmpxchg64(int target_cpu, long *p,
* long old_val, long new_val)
*/
.p2align 6; /* aligns to 2^6 with NOP filling */
- .globl TcmallocSlab_Internal_PerCpuCmpxchg64
- .type TcmallocSlab_Internal_PerCpuCmpxchg64, @function
-TcmallocSlab_Internal_PerCpuCmpxchg64:
+ .globl TcmallocSlab_Internal_PerCpuCmpxchg64
+ .type TcmallocSlab_Internal_PerCpuCmpxchg64, @function
+TcmallocSlab_Internal_PerCpuCmpxchg64:
.cfi_startproc
- START_RSEQ(TcmallocSlab_Internal_PerCpuCmpxchg64);
+ START_RSEQ(TcmallocSlab_Internal_PerCpuCmpxchg64);
FETCH_CPU(%eax);
cmp %eax, %edi; /* check cpu vs current_cpu */
- jne .LTcmallocSlab_Internal_PerCpuCmpxchg64_commit;
+ jne .LTcmallocSlab_Internal_PerCpuCmpxchg64_commit;
cmp %rdx, (%rsi); /* verify *p == old */
- jne .LTcmallocSlab_Internal_PerCpuCmpxchg64_value_mismatch;
+ jne .LTcmallocSlab_Internal_PerCpuCmpxchg64_value_mismatch;
mov %rcx, (%rsi);
-.LTcmallocSlab_Internal_PerCpuCmpxchg64_commit:
+.LTcmallocSlab_Internal_PerCpuCmpxchg64_commit:
ret; /* return current cpu, indicating mismatch OR success */
-.LTcmallocSlab_Internal_PerCpuCmpxchg64_value_mismatch:
+.LTcmallocSlab_Internal_PerCpuCmpxchg64_value_mismatch:
mov $-1, %eax; /* mismatch versus "old" or "check", return -1 */
ret;
.cfi_endproc
-ENCODE_SIZE(TcmallocSlab_Internal_PerCpuCmpxchg64)
-DEFINE_UPSTREAM_CS(TcmallocSlab_Internal_PerCpuCmpxchg64)
+ENCODE_SIZE(TcmallocSlab_Internal_PerCpuCmpxchg64)
+DEFINE_UPSTREAM_CS(TcmallocSlab_Internal_PerCpuCmpxchg64)
.p2align 6; /* aligns to 2^6 with NOP filling */
- .globl TcmallocSlab_Internal_PerCpuCmpxchg64_VCPU
- .type TcmallocSlab_Internal_PerCpuCmpxchg64_VCPU, @function
-TcmallocSlab_Internal_PerCpuCmpxchg64_VCPU:
+ .globl TcmallocSlab_Internal_PerCpuCmpxchg64_VCPU
+ .type TcmallocSlab_Internal_PerCpuCmpxchg64_VCPU, @function
+TcmallocSlab_Internal_PerCpuCmpxchg64_VCPU:
.cfi_startproc
- START_RSEQ(TcmallocSlab_Internal_PerCpuCmpxchg64_VCPU);
+ START_RSEQ(TcmallocSlab_Internal_PerCpuCmpxchg64_VCPU);
FETCH_VCPU(%eax);
cmp %eax, %edi; /* check cpu vs current_cpu */
- jne .LTcmallocSlab_Internal_PerCpuCmpxchg64_VCPU_commit;
+ jne .LTcmallocSlab_Internal_PerCpuCmpxchg64_VCPU_commit;
cmp %rdx, (%rsi); /* verify *p == old */
- jne .LTcmallocSlab_Internal_PerCpuCmpxchg64_VCPU_value_mismatch;
+ jne .LTcmallocSlab_Internal_PerCpuCmpxchg64_VCPU_value_mismatch;
mov %rcx, (%rsi);
-.LTcmallocSlab_Internal_PerCpuCmpxchg64_VCPU_commit:
+.LTcmallocSlab_Internal_PerCpuCmpxchg64_VCPU_commit:
ret; /* return current cpu, indicating mismatch OR success */
-.LTcmallocSlab_Internal_PerCpuCmpxchg64_VCPU_value_mismatch:
+.LTcmallocSlab_Internal_PerCpuCmpxchg64_VCPU_value_mismatch:
mov $-1, %eax; /* mismatch versus "old" or "check", return -1 */
ret;
.cfi_endproc
-ENCODE_SIZE(TcmallocSlab_Internal_PerCpuCmpxchg64_VCPU)
-DEFINE_UPSTREAM_CS(TcmallocSlab_Internal_PerCpuCmpxchg64_VCPU)
+ENCODE_SIZE(TcmallocSlab_Internal_PerCpuCmpxchg64_VCPU)
+DEFINE_UPSTREAM_CS(TcmallocSlab_Internal_PerCpuCmpxchg64_VCPU)
-/* size_t TcmallocSlab_Internal_PushBatch_FixedShift(
+/* size_t TcmallocSlab_Internal_PushBatch_FixedShift(
* void *ptr (%rdi),
* size_t cl (%rsi),
* void** batch (%rdx),
@@ -290,11 +290,11 @@ DEFINE_UPSTREAM_CS(TcmallocSlab_Internal_PerCpuCmpxchg64_VCPU)
* }
*/
.p2align 6; /* aligns to 2^6 with NOP filling */
- .globl TcmallocSlab_Internal_PushBatch_FixedShift
- .type TcmallocSlab_Internal_PushBatch_FixedShift, @function
-TcmallocSlab_Internal_PushBatch_FixedShift:
+ .globl TcmallocSlab_Internal_PushBatch_FixedShift
+ .type TcmallocSlab_Internal_PushBatch_FixedShift, @function
+TcmallocSlab_Internal_PushBatch_FixedShift:
.cfi_startproc
- START_RSEQ(TcmallocSlab_Internal_PushBatch_FixedShift);
+ START_RSEQ(TcmallocSlab_Internal_PushBatch_FixedShift);
FETCH_CPU(%r8d);
shl $TCMALLOC_PERCPU_TCMALLOC_FIXED_SLAB_SHIFT, %r8;
/* multiply cpu by 256k */
@@ -302,37 +302,37 @@ TcmallocSlab_Internal_PushBatch_FixedShift:
movzwq (%r8, %rsi, 8), %r9; /* current */
movzwq 6(%r8, %rsi, 8), %r10; /* end */
cmpq %r10, %r9;
- jae .LTcmallocSlab_Internal_PushBatch_FixedShift_full;
+ jae .LTcmallocSlab_Internal_PushBatch_FixedShift_full;
movq %rcx, %r11; /* r11 = copy of len */
subq %r9, %r10; /* r10 = free capacity */
cmpq %rcx, %r10;
cmovaq %rcx, %r10; /* r10 = min(len, free capacity) */
addq %r9, %r10;
-.LTcmallocSlab_Internal_PushBatch_FixedShift_loop:
+.LTcmallocSlab_Internal_PushBatch_FixedShift_loop:
decq %r11;
movq (%rdx, %r11, 8), %rax;
movq %rax, (%r8, %r9, 8);
incq %r9;
cmpq %r9, %r10;
- jne .LTcmallocSlab_Internal_PushBatch_FixedShift_loop
+ jne .LTcmallocSlab_Internal_PushBatch_FixedShift_loop
movw %r9w, (%r8, %rsi, 8);
-.LTcmallocSlab_Internal_PushBatch_FixedShift_commit:
+.LTcmallocSlab_Internal_PushBatch_FixedShift_commit:
movq %rcx, %rax;
subq %r11, %rax;
ret;
-.LTcmallocSlab_Internal_PushBatch_FixedShift_full:
+.LTcmallocSlab_Internal_PushBatch_FixedShift_full:
xor %rax, %rax;
ret;
.cfi_endproc
-ENCODE_SIZE(TcmallocSlab_Internal_PushBatch_FixedShift)
-DEFINE_UPSTREAM_CS(TcmallocSlab_Internal_PushBatch_FixedShift)
+ENCODE_SIZE(TcmallocSlab_Internal_PushBatch_FixedShift)
+DEFINE_UPSTREAM_CS(TcmallocSlab_Internal_PushBatch_FixedShift)
.p2align 6; /* aligns to 2^6 with NOP filling */
- .globl TcmallocSlab_Internal_PushBatch_FixedShift_VCPU
- .type TcmallocSlab_Internal_PushBatch_FixedShift_VCPU, @function
-TcmallocSlab_Internal_PushBatch_FixedShift_VCPU:
+ .globl TcmallocSlab_Internal_PushBatch_FixedShift_VCPU
+ .type TcmallocSlab_Internal_PushBatch_FixedShift_VCPU, @function
+TcmallocSlab_Internal_PushBatch_FixedShift_VCPU:
.cfi_startproc
- START_RSEQ(TcmallocSlab_Internal_PushBatch_FixedShift_VCPU);
+ START_RSEQ(TcmallocSlab_Internal_PushBatch_FixedShift_VCPU);
FETCH_VCPU(%r8d);
shl $TCMALLOC_PERCPU_TCMALLOC_FIXED_SLAB_SHIFT, %r8;
/* multiply cpu by 256k */
@@ -340,32 +340,32 @@ TcmallocSlab_Internal_PushBatch_FixedShift_VCPU:
movzwq (%r8, %rsi, 8), %r9; /* current */
movzwq 6(%r8, %rsi, 8), %r10; /* end */
cmpq %r10, %r9;
- jae .LTcmallocSlab_Internal_PushBatch_FixedShift_VCPU_full;
+ jae .LTcmallocSlab_Internal_PushBatch_FixedShift_VCPU_full;
movq %rcx, %r11; /* r11 = copy of len */
subq %r9, %r10; /* r10 = free capacity */
cmpq %rcx, %r10;
cmovaq %rcx, %r10; /* r10 = min(len, free capacity) */
addq %r9, %r10;
-.LTcmallocSlab_Internal_PushBatch_FixedShift_VCPU_loop:
+.LTcmallocSlab_Internal_PushBatch_FixedShift_VCPU_loop:
decq %r11;
movq (%rdx, %r11, 8), %rax;
movq %rax, (%r8, %r9, 8);
incq %r9;
cmpq %r9, %r10;
- jne .LTcmallocSlab_Internal_PushBatch_FixedShift_VCPU_loop
+ jne .LTcmallocSlab_Internal_PushBatch_FixedShift_VCPU_loop
movw %r9w, (%r8, %rsi, 8);
-.LTcmallocSlab_Internal_PushBatch_FixedShift_VCPU_commit:
+.LTcmallocSlab_Internal_PushBatch_FixedShift_VCPU_commit:
movq %rcx, %rax;
subq %r11, %rax;
ret;
-.LTcmallocSlab_Internal_PushBatch_FixedShift_VCPU_full:
+.LTcmallocSlab_Internal_PushBatch_FixedShift_VCPU_full:
xor %rax, %rax;
ret;
.cfi_endproc
-ENCODE_SIZE(TcmallocSlab_Internal_PushBatch_FixedShift_VCPU)
-DEFINE_UPSTREAM_CS(TcmallocSlab_Internal_PushBatch_FixedShift_VCPU)
+ENCODE_SIZE(TcmallocSlab_Internal_PushBatch_FixedShift_VCPU)
+DEFINE_UPSTREAM_CS(TcmallocSlab_Internal_PushBatch_FixedShift_VCPU)
-/* size_t TcmallocSlab_Internal_PopBatch_FixedShift(
+/* size_t TcmallocSlab_Internal_PopBatch_FixedShift(
* void *ptr (%rdi),
* size_t cl (%rsi),
* void** batch (%rdx),
@@ -389,11 +389,11 @@ DEFINE_UPSTREAM_CS(TcmallocSlab_Internal_PushBatch_FixedShift_VCPU)
* }
*/
.p2align 6; /* aligns to 2^6 with NOP filling */
- .globl TcmallocSlab_Internal_PopBatch_FixedShift
- .type TcmallocSlab_Internal_PopBatch_FixedShift, @function
-TcmallocSlab_Internal_PopBatch_FixedShift:
+ .globl TcmallocSlab_Internal_PopBatch_FixedShift
+ .type TcmallocSlab_Internal_PopBatch_FixedShift, @function
+TcmallocSlab_Internal_PopBatch_FixedShift:
.cfi_startproc
- START_RSEQ(TcmallocSlab_Internal_PopBatch_FixedShift);
+ START_RSEQ(TcmallocSlab_Internal_PopBatch_FixedShift);
FETCH_CPU(%r8d);
shl $TCMALLOC_PERCPU_TCMALLOC_FIXED_SLAB_SHIFT, %r8;
/* multiply cpu by 256k */
@@ -401,35 +401,35 @@ TcmallocSlab_Internal_PopBatch_FixedShift:
movzwq (%r8, %rsi, 8), %r9; /* current */
movzwq 4(%r8, %rsi, 8), %r10; /* begin */
cmp %r10, %r9;
- jbe .LTcmallocSlab_Internal_PopBatch_FixedShift_empty;
+ jbe .LTcmallocSlab_Internal_PopBatch_FixedShift_empty;
movq %r9, %r11;
subq %r10, %r11; /* r11 = available items */
cmpq %rcx, %r11;
cmovaq %rcx, %r11; /* r11 = min(len, available items) */
xorq %rax, %rax;
-.LTcmallocSlab_Internal_PopBatch_FixedShift_loop:
+.LTcmallocSlab_Internal_PopBatch_FixedShift_loop:
decq %r9;
movq (%r8, %r9, 8), %r10;
movq %r10, (%rdx, %rax, 8);
incq %rax;
cmpq %rax, %r11;
- jne .LTcmallocSlab_Internal_PopBatch_FixedShift_loop
+ jne .LTcmallocSlab_Internal_PopBatch_FixedShift_loop
movw %r9w, (%r8, %rsi, 8);
-.LTcmallocSlab_Internal_PopBatch_FixedShift_commit:
+.LTcmallocSlab_Internal_PopBatch_FixedShift_commit:
ret;
-.LTcmallocSlab_Internal_PopBatch_FixedShift_empty:
+.LTcmallocSlab_Internal_PopBatch_FixedShift_empty:
xor %rax, %rax;
ret;
.cfi_endproc
-ENCODE_SIZE(TcmallocSlab_Internal_PopBatch_FixedShift)
-DEFINE_UPSTREAM_CS(TcmallocSlab_Internal_PopBatch_FixedShift)
+ENCODE_SIZE(TcmallocSlab_Internal_PopBatch_FixedShift)
+DEFINE_UPSTREAM_CS(TcmallocSlab_Internal_PopBatch_FixedShift)
.p2align 6; /* aligns to 2^6 with NOP filling */
- .globl TcmallocSlab_Internal_PopBatch_FixedShift_VCPU
- .type TcmallocSlab_Internal_PopBatch_FixedShift_VCPU, @function
-TcmallocSlab_Internal_PopBatch_FixedShift_VCPU:
+ .globl TcmallocSlab_Internal_PopBatch_FixedShift_VCPU
+ .type TcmallocSlab_Internal_PopBatch_FixedShift_VCPU, @function
+TcmallocSlab_Internal_PopBatch_FixedShift_VCPU:
.cfi_startproc
- START_RSEQ(TcmallocSlab_Internal_PopBatch_FixedShift_VCPU);
+ START_RSEQ(TcmallocSlab_Internal_PopBatch_FixedShift_VCPU);
FETCH_VCPU(%r8d);
shl $TCMALLOC_PERCPU_TCMALLOC_FIXED_SLAB_SHIFT, %r8;
/* multiply cpu by 256k */
@@ -437,27 +437,27 @@ TcmallocSlab_Internal_PopBatch_FixedShift_VCPU:
movzwq (%r8, %rsi, 8), %r9; /* current */
movzwq 4(%r8, %rsi, 8), %r10; /* begin */
cmp %r10, %r9;
- jbe .LTcmallocSlab_Internal_PopBatch_FixedShift_VCPU_empty;
+ jbe .LTcmallocSlab_Internal_PopBatch_FixedShift_VCPU_empty;
movq %r9, %r11;
subq %r10, %r11; /* r11 = available items */
cmpq %rcx, %r11;
cmovaq %rcx, %r11; /* r11 = min(len, available items) */
xorq %rax, %rax;
-.LTcmallocSlab_Internal_PopBatch_FixedShift_VCPU_loop:
+.LTcmallocSlab_Internal_PopBatch_FixedShift_VCPU_loop:
decq %r9;
movq (%r8, %r9, 8), %r10;
movq %r10, (%rdx, %rax, 8);
incq %rax;
cmpq %rax, %r11;
- jne .LTcmallocSlab_Internal_PopBatch_FixedShift_VCPU_loop
+ jne .LTcmallocSlab_Internal_PopBatch_FixedShift_VCPU_loop
movw %r9w, (%r8, %rsi, 8);
-.LTcmallocSlab_Internal_PopBatch_FixedShift_VCPU_commit:
+.LTcmallocSlab_Internal_PopBatch_FixedShift_VCPU_commit:
ret;
-.LTcmallocSlab_Internal_PopBatch_FixedShift_VCPU_empty:
+.LTcmallocSlab_Internal_PopBatch_FixedShift_VCPU_empty:
xor %rax, %rax;
ret;
.cfi_endproc
-ENCODE_SIZE(TcmallocSlab_Internal_PopBatch_FixedShift_VCPU)
-DEFINE_UPSTREAM_CS(TcmallocSlab_Internal_PopBatch_FixedShift_VCPU)
+ENCODE_SIZE(TcmallocSlab_Internal_PopBatch_FixedShift_VCPU)
+DEFINE_UPSTREAM_CS(TcmallocSlab_Internal_PopBatch_FixedShift_VCPU)
.section .note.GNU-stack,"",@progbits
diff --git a/contrib/libs/tcmalloc/tcmalloc/internal/percpu_tcmalloc.h b/contrib/libs/tcmalloc/tcmalloc/internal/percpu_tcmalloc.h
index 91d15ba908..5264075f1b 100644
--- a/contrib/libs/tcmalloc/tcmalloc/internal/percpu_tcmalloc.h
+++ b/contrib/libs/tcmalloc/tcmalloc/internal/percpu_tcmalloc.h
@@ -18,7 +18,7 @@
#include <atomic>
#include <cstring>
-#include "absl/base/casts.h"
+#include "absl/base/casts.h"
#include "absl/base/dynamic_annotations.h"
#include "absl/base/internal/sysinfo.h"
#include "tcmalloc/internal/mincore.h"
@@ -46,9 +46,9 @@
#define TCMALLOC_PERCPU_USE_RSEQ_ASM_GOTO 0
#endif
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
struct PerCPUMetadataState {
size_t virtual_size;
@@ -66,24 +66,24 @@ namespace percpu {
// Methods of this type must only be used in threads where it is known that the
// percpu primitives are available and percpu::IsFast() has previously returned
// 'true'.
-template <size_t NumClasses>
+template <size_t NumClasses>
class TcmallocSlab {
public:
- constexpr TcmallocSlab() = default;
+ constexpr TcmallocSlab() = default;
// Init must be called before any other methods.
// <alloc> is memory allocation callback (e.g. malloc).
// <capacity> callback returns max capacity for size class <cl>.
// <lazy> indicates that per-CPU slabs should be populated on demand
- // <shift> indicates the number of bits to shift the CPU ID in order to
- // obtain the location of the per-CPU slab. If this parameter matches
- // TCMALLOC_PERCPU_TCMALLOC_FIXED_SLAB_SHIFT as set in
- // percpu_intenal.h then the assembly language versions of push/pop
- // batch can be used; otherwise batch operations are emulated.
+ // <shift> indicates the number of bits to shift the CPU ID in order to
+ // obtain the location of the per-CPU slab. If this parameter matches
+ // TCMALLOC_PERCPU_TCMALLOC_FIXED_SLAB_SHIFT as set in
+ // percpu_intenal.h then the assembly language versions of push/pop
+ // batch can be used; otherwise batch operations are emulated.
//
// Initial capacity is 0 for all slabs.
- void Init(void*(alloc)(size_t size), size_t (*capacity)(size_t cl), bool lazy,
- size_t shift);
+ void Init(void*(alloc)(size_t size), size_t (*capacity)(size_t cl), bool lazy,
+ size_t shift);
// Only may be called if Init(..., lazy = true) was used.
void InitCPU(int cpu, size_t (*capacity)(size_t cl));
@@ -129,18 +129,18 @@ class TcmallocSlab {
// REQUIRES: len > 0.
size_t PopBatch(size_t cl, void** batch, size_t len);
- // Decrements the cpu/cl slab's capacity to no less than max(capacity-len, 0)
- // and returns the actual decrement applied. It attempts to shrink any
- // unused capacity (i.e end-current) in cpu/cl's slab; if it does not have
- // enough unused items, it pops up to <len> items from cpu/cl slab and then
- // shrinks the freed capacity.
- //
- // May be called from another processor, not just the <cpu>.
- // REQUIRES: len > 0.
- typedef void (*ShrinkHandler)(void* arg, size_t cl, void** batch, size_t n);
- size_t ShrinkOtherCache(int cpu, size_t cl, size_t len, void* shrink_ctx,
- ShrinkHandler f);
-
+ // Decrements the cpu/cl slab's capacity to no less than max(capacity-len, 0)
+ // and returns the actual decrement applied. It attempts to shrink any
+ // unused capacity (i.e end-current) in cpu/cl's slab; if it does not have
+ // enough unused items, it pops up to <len> items from cpu/cl slab and then
+ // shrinks the freed capacity.
+ //
+ // May be called from another processor, not just the <cpu>.
+ // REQUIRES: len > 0.
+ typedef void (*ShrinkHandler)(void* arg, size_t cl, void** batch, size_t n);
+ size_t ShrinkOtherCache(int cpu, size_t cl, size_t len, void* shrink_ctx,
+ ShrinkHandler f);
+
// Remove all items (of all classes) from <cpu>'s slab; reset capacity for all
// classes to zero. Then, for each sizeclass, invoke
// DrainHandler(drain_ctx, cl, <items from slab>, <previous slab capacity>);
@@ -159,13 +159,13 @@ class TcmallocSlab {
// headers (Header struct). The remaining memory contain slab arrays.
struct Slabs {
std::atomic<int64_t> header[NumClasses];
- void* mem[];
+ void* mem[];
};
- inline int GetCurrentVirtualCpuUnsafe() {
- return VirtualRseqCpuId(virtual_cpu_id_offset_);
- }
-
+ inline int GetCurrentVirtualCpuUnsafe() {
+ return VirtualRseqCpuId(virtual_cpu_id_offset_);
+ }
+
private:
// Slab header (packed, atomically updated 64-bit).
struct Header {
@@ -175,13 +175,13 @@ class TcmallocSlab {
// Copy of end. Updated by Shrink/Grow, but is not overwritten by Drain.
uint16_t end_copy;
// Lock updates only begin and end with a 32-bit write.
- union {
- struct {
- uint16_t begin;
- uint16_t end;
- };
- uint32_t lock_update;
- };
+ union {
+ struct {
+ uint16_t begin;
+ uint16_t end;
+ };
+ uint32_t lock_update;
+ };
// Lock is used by Drain to stop concurrent mutations of the Header.
// Lock sets begin to 0xffff and end to 0, which makes Push and Pop fail
@@ -194,36 +194,36 @@ class TcmallocSlab {
static_assert(sizeof(Header) == sizeof(std::atomic<int64_t>),
"bad Header size");
- Slabs* slabs_ = nullptr;
- size_t shift_ = 0;
- // This is in units of bytes.
- size_t virtual_cpu_id_offset_ = offsetof(kernel_rseq, cpu_id);
+ Slabs* slabs_ = nullptr;
+ size_t shift_ = 0;
+ // This is in units of bytes.
+ size_t virtual_cpu_id_offset_ = offsetof(kernel_rseq, cpu_id);
Slabs* CpuMemoryStart(int cpu) const;
std::atomic<int64_t>* GetHeader(int cpu, size_t cl) const;
static Header LoadHeader(std::atomic<int64_t>* hdrp);
static void StoreHeader(std::atomic<int64_t>* hdrp, Header hdr);
static int CompareAndSwapHeader(int cpu, std::atomic<int64_t>* hdrp,
- Header old, Header hdr,
- size_t virtual_cpu_id_offset);
+ Header old, Header hdr,
+ size_t virtual_cpu_id_offset);
};
-template <size_t NumClasses>
-inline size_t TcmallocSlab<NumClasses>::Length(int cpu, size_t cl) const {
+template <size_t NumClasses>
+inline size_t TcmallocSlab<NumClasses>::Length(int cpu, size_t cl) const {
Header hdr = LoadHeader(GetHeader(cpu, cl));
return hdr.IsLocked() ? 0 : hdr.current - hdr.begin;
}
-template <size_t NumClasses>
-inline size_t TcmallocSlab<NumClasses>::Capacity(int cpu, size_t cl) const {
+template <size_t NumClasses>
+inline size_t TcmallocSlab<NumClasses>::Capacity(int cpu, size_t cl) const {
Header hdr = LoadHeader(GetHeader(cpu, cl));
return hdr.IsLocked() ? 0 : hdr.end - hdr.begin;
}
-template <size_t NumClasses>
-inline size_t TcmallocSlab<NumClasses>::Grow(int cpu, size_t cl, size_t len,
- size_t max_cap) {
- const size_t virtual_cpu_id_offset = virtual_cpu_id_offset_;
+template <size_t NumClasses>
+inline size_t TcmallocSlab<NumClasses>::Grow(int cpu, size_t cl, size_t len,
+ size_t max_cap) {
+ const size_t virtual_cpu_id_offset = virtual_cpu_id_offset_;
std::atomic<int64_t>* hdrp = GetHeader(cpu, cl);
for (;;) {
Header old = LoadHeader(hdrp);
@@ -234,8 +234,8 @@ inline size_t TcmallocSlab<NumClasses>::Grow(int cpu, size_t cl, size_t len,
Header hdr = old;
hdr.end += n;
hdr.end_copy += n;
- const int ret =
- CompareAndSwapHeader(cpu, hdrp, old, hdr, virtual_cpu_id_offset);
+ const int ret =
+ CompareAndSwapHeader(cpu, hdrp, old, hdr, virtual_cpu_id_offset);
if (ret == cpu) {
return n;
} else if (ret >= 0) {
@@ -244,9 +244,9 @@ inline size_t TcmallocSlab<NumClasses>::Grow(int cpu, size_t cl, size_t len,
}
}
-template <size_t NumClasses>
-inline size_t TcmallocSlab<NumClasses>::Shrink(int cpu, size_t cl, size_t len) {
- const size_t virtual_cpu_id_offset = virtual_cpu_id_offset_;
+template <size_t NumClasses>
+inline size_t TcmallocSlab<NumClasses>::Shrink(int cpu, size_t cl, size_t len) {
+ const size_t virtual_cpu_id_offset = virtual_cpu_id_offset_;
std::atomic<int64_t>* hdrp = GetHeader(cpu, cl);
for (;;) {
Header old = LoadHeader(hdrp);
@@ -257,8 +257,8 @@ inline size_t TcmallocSlab<NumClasses>::Shrink(int cpu, size_t cl, size_t len) {
Header hdr = old;
hdr.end -= n;
hdr.end_copy -= n;
- const int ret =
- CompareAndSwapHeader(cpu, hdrp, old, hdr, virtual_cpu_id_offset);
+ const int ret =
+ CompareAndSwapHeader(cpu, hdrp, old, hdr, virtual_cpu_id_offset);
if (ret == cpu) {
return n;
} else if (ret >= 0) {
@@ -268,10 +268,10 @@ inline size_t TcmallocSlab<NumClasses>::Shrink(int cpu, size_t cl, size_t len) {
}
#if defined(__x86_64__)
-template <size_t NumClasses>
-static inline ABSL_ATTRIBUTE_ALWAYS_INLINE int TcmallocSlab_Internal_Push(
- typename TcmallocSlab<NumClasses>::Slabs* slabs, size_t cl, void* item,
- const size_t shift, OverflowHandler f, const size_t virtual_cpu_id_offset) {
+template <size_t NumClasses>
+static inline ABSL_ATTRIBUTE_ALWAYS_INLINE int TcmallocSlab_Internal_Push(
+ typename TcmallocSlab<NumClasses>::Slabs* slabs, size_t cl, void* item,
+ const size_t shift, OverflowHandler f, const size_t virtual_cpu_id_offset) {
#if TCMALLOC_PERCPU_USE_RSEQ_ASM_GOTO
asm goto(
#else
@@ -282,10 +282,10 @@ static inline ABSL_ATTRIBUTE_ALWAYS_INLINE int TcmallocSlab_Internal_Push(
// relocations, but could be read-only for non-PIE builds.
".pushsection __rseq_cs, \"aw?\"\n"
".balign 32\n"
- ".local __rseq_cs_TcmallocSlab_Internal_Push_%=\n"
- ".type __rseq_cs_TcmallocSlab_Internal_Push_%=,@object\n"
- ".size __rseq_cs_TcmallocSlab_Internal_Push_%=,32\n"
- "__rseq_cs_TcmallocSlab_Internal_Push_%=:\n"
+ ".local __rseq_cs_TcmallocSlab_Internal_Push_%=\n"
+ ".type __rseq_cs_TcmallocSlab_Internal_Push_%=,@object\n"
+ ".size __rseq_cs_TcmallocSlab_Internal_Push_%=,32\n"
+ "__rseq_cs_TcmallocSlab_Internal_Push_%=:\n"
".long 0x0\n"
".long 0x0\n"
".quad 4f\n"
@@ -298,20 +298,20 @@ static inline ABSL_ATTRIBUTE_ALWAYS_INLINE int TcmallocSlab_Internal_Push(
".pushsection __rseq_cs_ptr_array, \"aw?\"\n"
"1:\n"
".balign 8;"
- ".quad __rseq_cs_TcmallocSlab_Internal_Push_%=\n"
+ ".quad __rseq_cs_TcmallocSlab_Internal_Push_%=\n"
// Force this section to be retained. It is for debugging, but is
// otherwise not referenced.
".popsection\n"
".pushsection .text.unlikely, \"ax?\"\n"
".byte 0x0f, 0x1f, 0x05\n"
".long %c[rseq_sig]\n"
- ".local TcmallocSlab_Internal_Push_trampoline_%=\n"
- ".type TcmallocSlab_Internal_Push_trampoline_%=,@function\n"
- "TcmallocSlab_Internal_Push_trampoline_%=:\n"
+ ".local TcmallocSlab_Internal_Push_trampoline_%=\n"
+ ".type TcmallocSlab_Internal_Push_trampoline_%=,@function\n"
+ "TcmallocSlab_Internal_Push_trampoline_%=:\n"
"2:\n"
"jmp 3f\n"
- ".size TcmallocSlab_Internal_Push_trampoline_%=, . - "
- "TcmallocSlab_Internal_Push_trampoline_%=;\n"
+ ".size TcmallocSlab_Internal_Push_trampoline_%=, . - "
+ "TcmallocSlab_Internal_Push_trampoline_%=;\n"
".popsection\n"
// Prepare
//
@@ -325,14 +325,14 @@ static inline ABSL_ATTRIBUTE_ALWAYS_INLINE int TcmallocSlab_Internal_Push(
// r10: Scratch
// r11: Current
"3:\n"
- "lea __rseq_cs_TcmallocSlab_Internal_Push_%=(%%rip), %%r10\n"
+ "lea __rseq_cs_TcmallocSlab_Internal_Push_%=(%%rip), %%r10\n"
"mov %%r10, %c[rseq_cs_offset](%[rseq_abi])\n"
// Start
"4:\n"
// scratch = __rseq_abi.cpu_id;
"movzwl (%[rseq_abi], %[rseq_cpu_offset]), %%r10d\n"
// scratch = slabs + scratch
- "shlq %b[shift], %%r10\n"
+ "shlq %b[shift], %%r10\n"
"add %[slabs], %%r10\n"
// r11 = slabs->current;
"movzwq (%%r10, %[cl], 8), %%r11\n"
@@ -356,8 +356,8 @@ static inline ABSL_ATTRIBUTE_ALWAYS_INLINE int TcmallocSlab_Internal_Push(
#endif
: [rseq_abi] "r"(&__rseq_abi),
[rseq_cs_offset] "n"(offsetof(kernel_rseq, rseq_cs)),
- [rseq_cpu_offset] "r"(virtual_cpu_id_offset),
- [rseq_sig] "in"(TCMALLOC_PERCPU_RSEQ_SIGNATURE), [shift] "c"(shift),
+ [rseq_cpu_offset] "r"(virtual_cpu_id_offset),
+ [rseq_sig] "in"(TCMALLOC_PERCPU_RSEQ_SIGNATURE), [shift] "c"(shift),
[slabs] "r"(slabs), [cl] "r"(cl), [item] "r"(item)
: "cc", "memory", "r10", "r11"
#if TCMALLOC_PERCPU_USE_RSEQ_ASM_GOTO
@@ -374,168 +374,168 @@ overflow_label:
// As of 3/2020, LLVM's asm goto (even with output constraints) only provides
// values for the fallthrough path. The values on the taken branches are
// undefined.
- int cpu = VirtualRseqCpuId(virtual_cpu_id_offset);
+ int cpu = VirtualRseqCpuId(virtual_cpu_id_offset);
return f(cpu, cl, item);
}
#endif // defined(__x86_64__)
-#if defined(__aarch64__)
-
-template <size_t NumClasses>
-static inline ABSL_ATTRIBUTE_ALWAYS_INLINE int TcmallocSlab_Internal_Push(
- typename TcmallocSlab<NumClasses>::Slabs* slabs, size_t cl, void* item,
- const size_t shift, OverflowHandler f, const size_t virtual_cpu_id_offset) {
- void* region_start;
- uint64_t cpu_id;
- void* end_ptr;
- uintptr_t current;
- uintptr_t end;
- // Multiply cl by the bytesize of each header
- size_t cl_lsl3 = cl * 8;
-#if TCMALLOC_PERCPU_USE_RSEQ_ASM_GOTO
- asm goto(
-#else
- bool overflow;
- asm volatile(
-#endif
- // TODO(b/141629158): __rseq_cs only needs to be writeable to allow for
- // relocations, but could be read-only for non-PIE builds.
- ".pushsection __rseq_cs, \"aw?\"\n"
- ".balign 32\n"
- ".local __rseq_cs_TcmallocSlab_Internal_Push_%=\n"
- ".type __rseq_cs_TcmallocSlab_Internal_Push_%=,@object\n"
- ".size __rseq_cs_TcmallocSlab_Internal_Push_%=,32\n"
- "__rseq_cs_TcmallocSlab_Internal_Push_%=:\n"
- ".long 0x0\n"
- ".long 0x0\n"
- ".quad 4f\n"
- ".quad 5f - 4f\n"
- ".quad 2f\n"
- ".popsection\n"
-#if !defined(__clang_major__) || __clang_major__ >= 9
- ".reloc 0, R_AARCH64_NONE, 1f\n"
-#endif
- ".pushsection __rseq_cs_ptr_array, \"aw?\"\n"
- "1:\n"
- ".balign 8;"
- ".quad __rseq_cs_TcmallocSlab_Internal_Push_%=\n"
- // Force this section to be retained. It is for debugging, but is
- // otherwise not referenced.
- ".popsection\n"
- ".pushsection .text.unlikely, \"ax?\"\n"
- ".long %c[rseq_sig]\n"
- ".local TcmallocSlab_Internal_Push_trampoline_%=\n"
- ".type TcmallocSlab_Internal_Push_trampoline_%=,@function\n"
- "TcmallocSlab_Internal_Push_trampoline_%=:\n"
- "2:\n"
- "b 3f\n"
- ".popsection\n"
- // Prepare
- //
- // TODO(b/151503411): Pending widespread availability of LLVM's asm
- // goto with output contraints
- // (https://github.com/llvm/llvm-project/commit/23c2a5ce33f0), we can
- // return the register allocations to the compiler rather than using
- // explicit clobbers. Prior to this, blocks which use asm goto cannot
- // also specify outputs.
- "3:\n"
- // Use current as scratch here to hold address of this function's
- // critical section
- "adrp %[current], __rseq_cs_TcmallocSlab_Internal_Push_%=\n"
- "add %[current], %[current], "
- ":lo12:__rseq_cs_TcmallocSlab_Internal_Push_%=\n"
- "str %[current], [%[rseq_abi], %c[rseq_cs_offset]]\n"
- // Start
- "4:\n"
- // cpu_id = __rseq_abi.cpu_id;
- "ldr %w[cpu_id], [%[rseq_abi], %[rseq_cpu_offset]]\n"
- // region_start = Start of cpu region
- "lsl %[region_start], %[cpu_id], %[shift]\n"
- "add %[region_start], %[region_start], %[slabs]\n"
- // end_ptr = &(slab_headers[0]->end)
- "add %[end_ptr], %[region_start], #6\n"
- // current = slab_headers[cl]->current (current index)
- "ldrh %w[current], [%[region_start], %[cl_lsl3]]\n"
- // end = slab_headers[cl]->end (end index)
- "ldrh %w[end], [%[end_ptr], %[cl_lsl3]]\n"
- // if (ABSL_PREDICT_FALSE(current >= end)) { goto overflow; }
- "cmp %[end], %[current]\n"
-#if TCMALLOC_PERCPU_USE_RSEQ_ASM_GOTO
- "b.le %l[overflow_label]\n"
-#else
- "b.le 5f\n"
- // Important! code below this must not affect any flags (i.e.: ccae)
- // If so, the above code needs to explicitly set a ccae return value.
-#endif
- "str %[item], [%[region_start], %[current], LSL #3]\n"
- "add %w[current], %w[current], #1\n"
- "strh %w[current], [%[region_start], %[cl_lsl3]]\n"
- // Commit
- "5:\n"
- : [end_ptr] "=&r"(end_ptr), [cpu_id] "=&r"(cpu_id),
- [current] "=&r"(current), [end] "=&r"(end),
- [region_start] "=&r"(region_start)
-
-#if !TCMALLOC_PERCPU_USE_RSEQ_ASM_GOTO
- ,
- [overflow] "=@ccae"(overflow)
-#endif
- : [rseq_cpu_offset] "r"(virtual_cpu_id_offset), [slabs] "r"(slabs),
- [cl_lsl3] "r"(cl_lsl3), [item] "r"(item), [rseq_abi] "r"(&__rseq_abi),
- [shift] "r"(shift),
- // Constants
- [rseq_cs_offset] "n"(offsetof(kernel_rseq, rseq_cs)),
- [rseq_sig] "in"(TCMALLOC_PERCPU_RSEQ_SIGNATURE)
- : "cc", "memory"
-#if TCMALLOC_PERCPU_USE_RSEQ_ASM_GOTO
- : overflow_label
-#endif
- );
-#if !TCMALLOC_PERCPU_USE_RSEQ_ASM_GOTO
- if (ABSL_PREDICT_FALSE(overflow)) {
- goto overflow_label;
- }
-#endif
- return 0;
-overflow_label:
-#if TCMALLOC_PERCPU_USE_RSEQ_ASM_GOTO_OUTPUT
- // As of 3/2020, LLVM's asm goto (even with output constraints) only provides
- // values for the fallthrough path. The values on the taken branches are
- // undefined.
- int cpu = VirtualRseqCpuId(virtual_cpu_id_offset);
-#else
- // With asm goto--without output constraints--the value of scratch is
- // well-defined by the compiler and our implementation. As an optimization on
- // this case, we can avoid looking up cpu_id again, by undoing the
- // transformation of cpu_id to the value of scratch.
- int cpu = cpu_id;
-#endif
- return f(cpu, cl, item);
-}
-#endif // defined (__aarch64__)
-
-template <size_t NumClasses>
-inline ABSL_ATTRIBUTE_ALWAYS_INLINE bool TcmallocSlab<NumClasses>::Push(
+#if defined(__aarch64__)
+
+template <size_t NumClasses>
+static inline ABSL_ATTRIBUTE_ALWAYS_INLINE int TcmallocSlab_Internal_Push(
+ typename TcmallocSlab<NumClasses>::Slabs* slabs, size_t cl, void* item,
+ const size_t shift, OverflowHandler f, const size_t virtual_cpu_id_offset) {
+ void* region_start;
+ uint64_t cpu_id;
+ void* end_ptr;
+ uintptr_t current;
+ uintptr_t end;
+ // Multiply cl by the bytesize of each header
+ size_t cl_lsl3 = cl * 8;
+#if TCMALLOC_PERCPU_USE_RSEQ_ASM_GOTO
+ asm goto(
+#else
+ bool overflow;
+ asm volatile(
+#endif
+ // TODO(b/141629158): __rseq_cs only needs to be writeable to allow for
+ // relocations, but could be read-only for non-PIE builds.
+ ".pushsection __rseq_cs, \"aw?\"\n"
+ ".balign 32\n"
+ ".local __rseq_cs_TcmallocSlab_Internal_Push_%=\n"
+ ".type __rseq_cs_TcmallocSlab_Internal_Push_%=,@object\n"
+ ".size __rseq_cs_TcmallocSlab_Internal_Push_%=,32\n"
+ "__rseq_cs_TcmallocSlab_Internal_Push_%=:\n"
+ ".long 0x0\n"
+ ".long 0x0\n"
+ ".quad 4f\n"
+ ".quad 5f - 4f\n"
+ ".quad 2f\n"
+ ".popsection\n"
+#if !defined(__clang_major__) || __clang_major__ >= 9
+ ".reloc 0, R_AARCH64_NONE, 1f\n"
+#endif
+ ".pushsection __rseq_cs_ptr_array, \"aw?\"\n"
+ "1:\n"
+ ".balign 8;"
+ ".quad __rseq_cs_TcmallocSlab_Internal_Push_%=\n"
+ // Force this section to be retained. It is for debugging, but is
+ // otherwise not referenced.
+ ".popsection\n"
+ ".pushsection .text.unlikely, \"ax?\"\n"
+ ".long %c[rseq_sig]\n"
+ ".local TcmallocSlab_Internal_Push_trampoline_%=\n"
+ ".type TcmallocSlab_Internal_Push_trampoline_%=,@function\n"
+ "TcmallocSlab_Internal_Push_trampoline_%=:\n"
+ "2:\n"
+ "b 3f\n"
+ ".popsection\n"
+ // Prepare
+ //
+ // TODO(b/151503411): Pending widespread availability of LLVM's asm
+ // goto with output contraints
+ // (https://github.com/llvm/llvm-project/commit/23c2a5ce33f0), we can
+ // return the register allocations to the compiler rather than using
+ // explicit clobbers. Prior to this, blocks which use asm goto cannot
+ // also specify outputs.
+ "3:\n"
+ // Use current as scratch here to hold address of this function's
+ // critical section
+ "adrp %[current], __rseq_cs_TcmallocSlab_Internal_Push_%=\n"
+ "add %[current], %[current], "
+ ":lo12:__rseq_cs_TcmallocSlab_Internal_Push_%=\n"
+ "str %[current], [%[rseq_abi], %c[rseq_cs_offset]]\n"
+ // Start
+ "4:\n"
+ // cpu_id = __rseq_abi.cpu_id;
+ "ldr %w[cpu_id], [%[rseq_abi], %[rseq_cpu_offset]]\n"
+ // region_start = Start of cpu region
+ "lsl %[region_start], %[cpu_id], %[shift]\n"
+ "add %[region_start], %[region_start], %[slabs]\n"
+ // end_ptr = &(slab_headers[0]->end)
+ "add %[end_ptr], %[region_start], #6\n"
+ // current = slab_headers[cl]->current (current index)
+ "ldrh %w[current], [%[region_start], %[cl_lsl3]]\n"
+ // end = slab_headers[cl]->end (end index)
+ "ldrh %w[end], [%[end_ptr], %[cl_lsl3]]\n"
+ // if (ABSL_PREDICT_FALSE(current >= end)) { goto overflow; }
+ "cmp %[end], %[current]\n"
+#if TCMALLOC_PERCPU_USE_RSEQ_ASM_GOTO
+ "b.le %l[overflow_label]\n"
+#else
+ "b.le 5f\n"
+ // Important! code below this must not affect any flags (i.e.: ccae)
+ // If so, the above code needs to explicitly set a ccae return value.
+#endif
+ "str %[item], [%[region_start], %[current], LSL #3]\n"
+ "add %w[current], %w[current], #1\n"
+ "strh %w[current], [%[region_start], %[cl_lsl3]]\n"
+ // Commit
+ "5:\n"
+ : [end_ptr] "=&r"(end_ptr), [cpu_id] "=&r"(cpu_id),
+ [current] "=&r"(current), [end] "=&r"(end),
+ [region_start] "=&r"(region_start)
+
+#if !TCMALLOC_PERCPU_USE_RSEQ_ASM_GOTO
+ ,
+ [overflow] "=@ccae"(overflow)
+#endif
+ : [rseq_cpu_offset] "r"(virtual_cpu_id_offset), [slabs] "r"(slabs),
+ [cl_lsl3] "r"(cl_lsl3), [item] "r"(item), [rseq_abi] "r"(&__rseq_abi),
+ [shift] "r"(shift),
+ // Constants
+ [rseq_cs_offset] "n"(offsetof(kernel_rseq, rseq_cs)),
+ [rseq_sig] "in"(TCMALLOC_PERCPU_RSEQ_SIGNATURE)
+ : "cc", "memory"
+#if TCMALLOC_PERCPU_USE_RSEQ_ASM_GOTO
+ : overflow_label
+#endif
+ );
+#if !TCMALLOC_PERCPU_USE_RSEQ_ASM_GOTO
+ if (ABSL_PREDICT_FALSE(overflow)) {
+ goto overflow_label;
+ }
+#endif
+ return 0;
+overflow_label:
+#if TCMALLOC_PERCPU_USE_RSEQ_ASM_GOTO_OUTPUT
+ // As of 3/2020, LLVM's asm goto (even with output constraints) only provides
+ // values for the fallthrough path. The values on the taken branches are
+ // undefined.
+ int cpu = VirtualRseqCpuId(virtual_cpu_id_offset);
+#else
+ // With asm goto--without output constraints--the value of scratch is
+ // well-defined by the compiler and our implementation. As an optimization on
+ // this case, we can avoid looking up cpu_id again, by undoing the
+ // transformation of cpu_id to the value of scratch.
+ int cpu = cpu_id;
+#endif
+ return f(cpu, cl, item);
+}
+#endif // defined (__aarch64__)
+
+template <size_t NumClasses>
+inline ABSL_ATTRIBUTE_ALWAYS_INLINE bool TcmallocSlab<NumClasses>::Push(
size_t cl, void* item, OverflowHandler f) {
ASSERT(item != nullptr);
-#if defined(__x86_64__) || defined(__aarch64__)
- return TcmallocSlab_Internal_Push<NumClasses>(slabs_, cl, item, shift_, f,
- virtual_cpu_id_offset_) >= 0;
+#if defined(__x86_64__) || defined(__aarch64__)
+ return TcmallocSlab_Internal_Push<NumClasses>(slabs_, cl, item, shift_, f,
+ virtual_cpu_id_offset_) >= 0;
#else
- if (shift_ == TCMALLOC_PERCPU_TCMALLOC_FIXED_SLAB_SHIFT) {
- return TcmallocSlab_Internal_Push_FixedShift(slabs_, cl, item, f) >= 0;
+ if (shift_ == TCMALLOC_PERCPU_TCMALLOC_FIXED_SLAB_SHIFT) {
+ return TcmallocSlab_Internal_Push_FixedShift(slabs_, cl, item, f) >= 0;
} else {
- return TcmallocSlab_Internal_Push(slabs_, cl, item, shift_, f) >= 0;
+ return TcmallocSlab_Internal_Push(slabs_, cl, item, shift_, f) >= 0;
}
#endif
}
#if defined(__x86_64__)
-template <size_t NumClasses>
-static inline ABSL_ATTRIBUTE_ALWAYS_INLINE void* TcmallocSlab_Internal_Pop(
- typename TcmallocSlab<NumClasses>::Slabs* slabs, size_t cl,
- UnderflowHandler f, const size_t shift,
- const size_t virtual_cpu_id_offset) {
+template <size_t NumClasses>
+static inline ABSL_ATTRIBUTE_ALWAYS_INLINE void* TcmallocSlab_Internal_Pop(
+ typename TcmallocSlab<NumClasses>::Slabs* slabs, size_t cl,
+ UnderflowHandler f, const size_t shift,
+ const size_t virtual_cpu_id_offset) {
void* result;
void* scratch;
uintptr_t current;
@@ -550,10 +550,10 @@ static inline ABSL_ATTRIBUTE_ALWAYS_INLINE void* TcmallocSlab_Internal_Pop(
// for relocations, but could be read-only for non-PIE builds.
".pushsection __rseq_cs, \"aw?\"\n"
".balign 32\n"
- ".local __rseq_cs_TcmallocSlab_Internal_Pop_%=\n"
- ".type __rseq_cs_TcmallocSlab_Internal_Pop_%=,@object\n"
- ".size __rseq_cs_TcmallocSlab_Internal_Pop_%=,32\n"
- "__rseq_cs_TcmallocSlab_Internal_Pop_%=:\n"
+ ".local __rseq_cs_TcmallocSlab_Internal_Pop_%=\n"
+ ".type __rseq_cs_TcmallocSlab_Internal_Pop_%=,@object\n"
+ ".size __rseq_cs_TcmallocSlab_Internal_Pop_%=,32\n"
+ "__rseq_cs_TcmallocSlab_Internal_Pop_%=:\n"
".long 0x0\n"
".long 0x0\n"
".quad 4f\n"
@@ -566,31 +566,31 @@ static inline ABSL_ATTRIBUTE_ALWAYS_INLINE void* TcmallocSlab_Internal_Pop(
".pushsection __rseq_cs_ptr_array, \"aw?\"\n"
"1:\n"
".balign 8;"
- ".quad __rseq_cs_TcmallocSlab_Internal_Pop_%=\n"
+ ".quad __rseq_cs_TcmallocSlab_Internal_Pop_%=\n"
// Force this section to be retained. It is for debugging, but is
// otherwise not referenced.
".popsection\n"
".pushsection .text.unlikely, \"ax?\"\n"
".byte 0x0f, 0x1f, 0x05\n"
".long %c[rseq_sig]\n"
- ".local TcmallocSlab_Internal_Pop_trampoline_%=\n"
- ".type TcmallocSlab_Internal_Pop_trampoline_%=,@function\n"
- "TcmallocSlab_Internal_Pop_trampoline_%=:\n"
+ ".local TcmallocSlab_Internal_Pop_trampoline_%=\n"
+ ".type TcmallocSlab_Internal_Pop_trampoline_%=,@function\n"
+ "TcmallocSlab_Internal_Pop_trampoline_%=:\n"
"2:\n"
"jmp 3f\n"
- ".size TcmallocSlab_Internal_Pop_trampoline_%=, . - "
- "TcmallocSlab_Internal_Pop_trampoline_%=;\n"
+ ".size TcmallocSlab_Internal_Pop_trampoline_%=, . - "
+ "TcmallocSlab_Internal_Pop_trampoline_%=;\n"
".popsection\n"
// Prepare
"3:\n"
- "lea __rseq_cs_TcmallocSlab_Internal_Pop_%=(%%rip), %[scratch];\n"
+ "lea __rseq_cs_TcmallocSlab_Internal_Pop_%=(%%rip), %[scratch];\n"
"mov %[scratch], %c[rseq_cs_offset](%[rseq_abi])\n"
// Start
"4:\n"
// scratch = __rseq_abi.cpu_id;
"movzwl (%[rseq_abi], %[rseq_cpu_offset]), %k[scratch]\n"
// scratch = slabs + scratch
- "shlq %b[shift], %[scratch]\n"
+ "shlq %b[shift], %[scratch]\n"
"add %[slabs], %[scratch]\n"
// current = scratch->header[cl].current;
"movzwq (%[scratch], %[cl], 8), %[current]\n"
@@ -623,8 +623,8 @@ static inline ABSL_ATTRIBUTE_ALWAYS_INLINE void* TcmallocSlab_Internal_Pop(
[scratch] "=&r"(scratch), [current] "=&r"(current)
: [rseq_abi] "r"(&__rseq_abi),
[rseq_cs_offset] "n"(offsetof(kernel_rseq, rseq_cs)),
- [rseq_cpu_offset] "r"(virtual_cpu_id_offset),
- [rseq_sig] "n"(TCMALLOC_PERCPU_RSEQ_SIGNATURE), [shift] "c"(shift),
+ [rseq_cpu_offset] "r"(virtual_cpu_id_offset),
+ [rseq_sig] "n"(TCMALLOC_PERCPU_RSEQ_SIGNATURE), [shift] "c"(shift),
[slabs] "r"(slabs), [cl] "r"(cl)
: "cc", "memory"
#if TCMALLOC_PERCPU_USE_RSEQ_ASM_GOTO_OUTPUT
@@ -643,166 +643,166 @@ underflow_path:
// As of 3/2020, LLVM's asm goto (even with output constraints) only provides
// values for the fallthrough path. The values on the taken branches are
// undefined.
- int cpu = VirtualRseqCpuId(virtual_cpu_id_offset);
+ int cpu = VirtualRseqCpuId(virtual_cpu_id_offset);
#else
// With asm goto--without output constraints--the value of scratch is
// well-defined by the compiler and our implementation. As an optimization on
// this case, we can avoid looking up cpu_id again, by undoing the
// transformation of cpu_id to the value of scratch.
- int cpu =
- (reinterpret_cast<char*>(scratch) - reinterpret_cast<char*>(slabs)) >>
- shift;
+ int cpu =
+ (reinterpret_cast<char*>(scratch) - reinterpret_cast<char*>(slabs)) >>
+ shift;
#endif
return f(cpu, cl);
}
#endif // defined(__x86_64__)
-#if defined(__aarch64__)
-template <size_t NumClasses>
-static inline ABSL_ATTRIBUTE_ALWAYS_INLINE void* TcmallocSlab_Internal_Pop(
- typename TcmallocSlab<NumClasses>::Slabs* slabs, size_t cl,
- UnderflowHandler f, const size_t shift,
- const size_t virtual_cpu_id_offset) {
- void* result;
- void* region_start;
- uint64_t cpu_id;
- void* begin_ptr;
- uintptr_t current;
- uintptr_t new_current;
- uintptr_t begin;
- // Multiply cl by the bytesize of each header
- size_t cl_lsl3 = cl * 8;
-#if TCMALLOC_PERCPU_USE_RSEQ_ASM_GOTO_OUTPUT
- asm goto
-#else
- bool underflow;
- asm
-#endif
- (
- // TODO(b/141629158): __rseq_cs only needs to be writeable to allow
- // for relocations, but could be read-only for non-PIE builds.
- ".pushsection __rseq_cs, \"aw?\"\n"
- ".balign 32\n"
- ".local __rseq_cs_TcmallocSlab_Internal_Pop_%=\n"
- ".type __rseq_cs_TcmallocSlab_Internal_Pop_%=,@object\n"
- ".size __rseq_cs_TcmallocSlab_Internal_Pop_%=,32\n"
- "__rseq_cs_TcmallocSlab_Internal_Pop_%=:\n"
- ".long 0x0\n"
- ".long 0x0\n"
- ".quad 4f\n"
- ".quad 5f - 4f\n"
- ".quad 2f\n"
- ".popsection\n"
-#if !defined(__clang_major__) || __clang_major__ >= 9
- ".reloc 0, R_AARCH64_NONE, 1f\n"
-#endif
- ".pushsection __rseq_cs_ptr_array, \"aw?\"\n"
- "1:\n"
- ".balign 8;"
- ".quad __rseq_cs_TcmallocSlab_Internal_Pop_%=\n"
- // Force this section to be retained. It is for debugging, but is
- // otherwise not referenced.
- ".popsection\n"
- ".pushsection .text.unlikely, \"ax?\"\n"
- ".long %c[rseq_sig]\n"
- ".local TcmallocSlab_Internal_Pop_trampoline_%=\n"
- ".type TcmallocSlab_Internal_Pop_trampoline_%=,@function\n"
- "TcmallocSlab_Internal_Pop_trampoline_%=:\n"
- "2:\n"
- "b 3f\n"
- ".popsection\n"
- // Prepare
- "3:\n"
- // Use current as scratch here to hold address of this function's
- // critical section
- "adrp %[current], __rseq_cs_TcmallocSlab_Internal_Pop_%=\n"
- "add %[current], %[current], "
- ":lo12:__rseq_cs_TcmallocSlab_Internal_Pop_%=\n"
- "str %[current], [%[rseq_abi], %c[rseq_cs_offset]]\n"
- // Start
- "4:\n"
- // cpu_id = __rseq_abi.cpu_id;
- "ldr %w[cpu_id], [%[rseq_abi], %[rseq_cpu_offset]]\n"
- // region_start = Start of cpu region
- "lsl %[region_start], %[cpu_id], %[shift]\n"
- "add %[region_start], %[region_start], %[slabs]\n"
- // begin_ptr = &(slab_headers[0]->begin)
- "add %[begin_ptr], %[region_start], #4\n"
- // current = slab_headers[cl]->current (current index)
- "ldrh %w[current], [%[region_start], %[cl_lsl3]]\n"
- // begin = slab_headers[cl]->begin (begin index)
- "ldrh %w[begin], [%[begin_ptr], %[cl_lsl3]]\n"
- // if (ABSL_PREDICT_FALSE(begin >= current)) { goto overflow; }
- "cmp %w[begin], %w[current]\n"
- "sub %w[new_current], %w[current], #1\n"
-#if TCMALLOC_PERCPU_USE_RSEQ_ASM_GOTO_OUTPUT
- "b.ge %l[underflow_path]\n"
-#else
- "b.ge 5f\n"
- // Important! code below this must not affect any flags (i.e.: ccbe)
- // If so, the above code needs to explicitly set a ccbe return value.
-#endif
- // current--
- "ldr %[result], [%[region_start], %[new_current], LSL #3]\n"
- "strh %w[new_current], [%[region_start], %[cl_lsl3]]\n"
- // Commit
- "5:\n"
- :
-#if !TCMALLOC_PERCPU_USE_RSEQ_ASM_GOTO_OUTPUT
- [underflow] "=@ccbe"(underflow),
-#endif
- [result] "=&r"(result),
- // Temps
- [cpu_id] "=&r"(cpu_id), [region_start] "=&r"(region_start),
- [begin] "=&r"(begin), [current] "=&r"(current),
- [new_current] "=&r"(new_current), [begin_ptr] "=&r"(begin_ptr)
- // Real inputs
- : [rseq_cpu_offset] "r"(virtual_cpu_id_offset), [slabs] "r"(slabs),
- [cl_lsl3] "r"(cl_lsl3), [rseq_abi] "r"(&__rseq_abi),
- [shift] "r"(shift),
- // constants
- [rseq_cs_offset] "in"(offsetof(kernel_rseq, rseq_cs)),
- [rseq_sig] "in"(TCMALLOC_PERCPU_RSEQ_SIGNATURE)
- : "cc", "memory"
-#if TCMALLOC_PERCPU_USE_RSEQ_ASM_GOTO_OUTPUT
- : underflow_path
-#endif
- );
-#if !TCMALLOC_PERCPU_USE_RSEQ_ASM_GOTO_OUTPUT
- if (ABSL_PREDICT_FALSE(underflow)) {
- goto underflow_path;
- }
-#endif
-
- return result;
-underflow_path:
-#if TCMALLOC_PERCPU_USE_RSEQ_ASM_GOTO_OUTPUT
- // As of 3/2020, LLVM's asm goto (even with output constraints) only provides
- // values for the fallthrough path. The values on the taken branches are
- // undefined.
- int cpu = VirtualRseqCpuId(virtual_cpu_id_offset);
-#else
- // With asm goto--without output constraints--the value of scratch is
- // well-defined by the compiler and our implementation. As an optimization on
- // this case, we can avoid looking up cpu_id again, by undoing the
- // transformation of cpu_id to the value of scratch.
- int cpu = cpu_id;
-#endif
- return f(cpu, cl);
-}
-#endif // defined(__aarch64__)
-
-template <size_t NumClasses>
-inline ABSL_ATTRIBUTE_ALWAYS_INLINE void* TcmallocSlab<NumClasses>::Pop(
+#if defined(__aarch64__)
+template <size_t NumClasses>
+static inline ABSL_ATTRIBUTE_ALWAYS_INLINE void* TcmallocSlab_Internal_Pop(
+ typename TcmallocSlab<NumClasses>::Slabs* slabs, size_t cl,
+ UnderflowHandler f, const size_t shift,
+ const size_t virtual_cpu_id_offset) {
+ void* result;
+ void* region_start;
+ uint64_t cpu_id;
+ void* begin_ptr;
+ uintptr_t current;
+ uintptr_t new_current;
+ uintptr_t begin;
+ // Multiply cl by the bytesize of each header
+ size_t cl_lsl3 = cl * 8;
+#if TCMALLOC_PERCPU_USE_RSEQ_ASM_GOTO_OUTPUT
+ asm goto
+#else
+ bool underflow;
+ asm
+#endif
+ (
+ // TODO(b/141629158): __rseq_cs only needs to be writeable to allow
+ // for relocations, but could be read-only for non-PIE builds.
+ ".pushsection __rseq_cs, \"aw?\"\n"
+ ".balign 32\n"
+ ".local __rseq_cs_TcmallocSlab_Internal_Pop_%=\n"
+ ".type __rseq_cs_TcmallocSlab_Internal_Pop_%=,@object\n"
+ ".size __rseq_cs_TcmallocSlab_Internal_Pop_%=,32\n"
+ "__rseq_cs_TcmallocSlab_Internal_Pop_%=:\n"
+ ".long 0x0\n"
+ ".long 0x0\n"
+ ".quad 4f\n"
+ ".quad 5f - 4f\n"
+ ".quad 2f\n"
+ ".popsection\n"
+#if !defined(__clang_major__) || __clang_major__ >= 9
+ ".reloc 0, R_AARCH64_NONE, 1f\n"
+#endif
+ ".pushsection __rseq_cs_ptr_array, \"aw?\"\n"
+ "1:\n"
+ ".balign 8;"
+ ".quad __rseq_cs_TcmallocSlab_Internal_Pop_%=\n"
+ // Force this section to be retained. It is for debugging, but is
+ // otherwise not referenced.
+ ".popsection\n"
+ ".pushsection .text.unlikely, \"ax?\"\n"
+ ".long %c[rseq_sig]\n"
+ ".local TcmallocSlab_Internal_Pop_trampoline_%=\n"
+ ".type TcmallocSlab_Internal_Pop_trampoline_%=,@function\n"
+ "TcmallocSlab_Internal_Pop_trampoline_%=:\n"
+ "2:\n"
+ "b 3f\n"
+ ".popsection\n"
+ // Prepare
+ "3:\n"
+ // Use current as scratch here to hold address of this function's
+ // critical section
+ "adrp %[current], __rseq_cs_TcmallocSlab_Internal_Pop_%=\n"
+ "add %[current], %[current], "
+ ":lo12:__rseq_cs_TcmallocSlab_Internal_Pop_%=\n"
+ "str %[current], [%[rseq_abi], %c[rseq_cs_offset]]\n"
+ // Start
+ "4:\n"
+ // cpu_id = __rseq_abi.cpu_id;
+ "ldr %w[cpu_id], [%[rseq_abi], %[rseq_cpu_offset]]\n"
+ // region_start = Start of cpu region
+ "lsl %[region_start], %[cpu_id], %[shift]\n"
+ "add %[region_start], %[region_start], %[slabs]\n"
+ // begin_ptr = &(slab_headers[0]->begin)
+ "add %[begin_ptr], %[region_start], #4\n"
+ // current = slab_headers[cl]->current (current index)
+ "ldrh %w[current], [%[region_start], %[cl_lsl3]]\n"
+ // begin = slab_headers[cl]->begin (begin index)
+ "ldrh %w[begin], [%[begin_ptr], %[cl_lsl3]]\n"
+ // if (ABSL_PREDICT_FALSE(begin >= current)) { goto overflow; }
+ "cmp %w[begin], %w[current]\n"
+ "sub %w[new_current], %w[current], #1\n"
+#if TCMALLOC_PERCPU_USE_RSEQ_ASM_GOTO_OUTPUT
+ "b.ge %l[underflow_path]\n"
+#else
+ "b.ge 5f\n"
+ // Important! code below this must not affect any flags (i.e.: ccbe)
+ // If so, the above code needs to explicitly set a ccbe return value.
+#endif
+ // current--
+ "ldr %[result], [%[region_start], %[new_current], LSL #3]\n"
+ "strh %w[new_current], [%[region_start], %[cl_lsl3]]\n"
+ // Commit
+ "5:\n"
+ :
+#if !TCMALLOC_PERCPU_USE_RSEQ_ASM_GOTO_OUTPUT
+ [underflow] "=@ccbe"(underflow),
+#endif
+ [result] "=&r"(result),
+ // Temps
+ [cpu_id] "=&r"(cpu_id), [region_start] "=&r"(region_start),
+ [begin] "=&r"(begin), [current] "=&r"(current),
+ [new_current] "=&r"(new_current), [begin_ptr] "=&r"(begin_ptr)
+ // Real inputs
+ : [rseq_cpu_offset] "r"(virtual_cpu_id_offset), [slabs] "r"(slabs),
+ [cl_lsl3] "r"(cl_lsl3), [rseq_abi] "r"(&__rseq_abi),
+ [shift] "r"(shift),
+ // constants
+ [rseq_cs_offset] "in"(offsetof(kernel_rseq, rseq_cs)),
+ [rseq_sig] "in"(TCMALLOC_PERCPU_RSEQ_SIGNATURE)
+ : "cc", "memory"
+#if TCMALLOC_PERCPU_USE_RSEQ_ASM_GOTO_OUTPUT
+ : underflow_path
+#endif
+ );
+#if !TCMALLOC_PERCPU_USE_RSEQ_ASM_GOTO_OUTPUT
+ if (ABSL_PREDICT_FALSE(underflow)) {
+ goto underflow_path;
+ }
+#endif
+
+ return result;
+underflow_path:
+#if TCMALLOC_PERCPU_USE_RSEQ_ASM_GOTO_OUTPUT
+ // As of 3/2020, LLVM's asm goto (even with output constraints) only provides
+ // values for the fallthrough path. The values on the taken branches are
+ // undefined.
+ int cpu = VirtualRseqCpuId(virtual_cpu_id_offset);
+#else
+ // With asm goto--without output constraints--the value of scratch is
+ // well-defined by the compiler and our implementation. As an optimization on
+ // this case, we can avoid looking up cpu_id again, by undoing the
+ // transformation of cpu_id to the value of scratch.
+ int cpu = cpu_id;
+#endif
+ return f(cpu, cl);
+}
+#endif // defined(__aarch64__)
+
+template <size_t NumClasses>
+inline ABSL_ATTRIBUTE_ALWAYS_INLINE void* TcmallocSlab<NumClasses>::Pop(
size_t cl, UnderflowHandler f) {
-#if defined(__x86_64__) || defined(__aarch64__)
- return TcmallocSlab_Internal_Pop<NumClasses>(slabs_, cl, f, shift_,
- virtual_cpu_id_offset_);
+#if defined(__x86_64__) || defined(__aarch64__)
+ return TcmallocSlab_Internal_Pop<NumClasses>(slabs_, cl, f, shift_,
+ virtual_cpu_id_offset_);
#else
- if (shift_ == TCMALLOC_PERCPU_TCMALLOC_FIXED_SLAB_SHIFT) {
- return TcmallocSlab_Internal_Pop_FixedShift(slabs_, cl, f);
+ if (shift_ == TCMALLOC_PERCPU_TCMALLOC_FIXED_SLAB_SHIFT) {
+ return TcmallocSlab_Internal_Pop_FixedShift(slabs_, cl, f);
} else {
- return TcmallocSlab_Internal_Pop(slabs_, cl, f, shift_);
+ return TcmallocSlab_Internal_Pop(slabs_, cl, f, shift_);
}
#endif
}
@@ -811,24 +811,24 @@ static inline void* NoopUnderflow(int cpu, size_t cl) { return nullptr; }
static inline int NoopOverflow(int cpu, size_t cl, void* item) { return -1; }
-template <size_t NumClasses>
-inline size_t TcmallocSlab<NumClasses>::PushBatch(size_t cl, void** batch,
- size_t len) {
+template <size_t NumClasses>
+inline size_t TcmallocSlab<NumClasses>::PushBatch(size_t cl, void** batch,
+ size_t len) {
ASSERT(len != 0);
- if (shift_ == TCMALLOC_PERCPU_TCMALLOC_FIXED_SLAB_SHIFT) {
+ if (shift_ == TCMALLOC_PERCPU_TCMALLOC_FIXED_SLAB_SHIFT) {
#if TCMALLOC_PERCPU_USE_RSEQ
- // TODO(b/159923407): TcmallocSlab_Internal_PushBatch_FixedShift needs to be
- // refactored to take a 5th parameter (virtual_cpu_id_offset) to avoid
- // needing to dispatch on two separate versions of the same function with
- // only minor differences between them.
- switch (virtual_cpu_id_offset_) {
+ // TODO(b/159923407): TcmallocSlab_Internal_PushBatch_FixedShift needs to be
+ // refactored to take a 5th parameter (virtual_cpu_id_offset) to avoid
+ // needing to dispatch on two separate versions of the same function with
+ // only minor differences between them.
+ switch (virtual_cpu_id_offset_) {
case offsetof(kernel_rseq, cpu_id):
- return TcmallocSlab_Internal_PushBatch_FixedShift(slabs_, cl, batch,
- len);
+ return TcmallocSlab_Internal_PushBatch_FixedShift(slabs_, cl, batch,
+ len);
#ifdef __x86_64__
case offsetof(kernel_rseq, vcpu_id):
- return TcmallocSlab_Internal_PushBatch_FixedShift_VCPU(slabs_, cl,
- batch, len);
+ return TcmallocSlab_Internal_PushBatch_FixedShift_VCPU(slabs_, cl,
+ batch, len);
#endif // __x86_64__
default:
__builtin_unreachable();
@@ -846,25 +846,25 @@ inline size_t TcmallocSlab<NumClasses>::PushBatch(size_t cl, void** batch,
}
}
-template <size_t NumClasses>
-inline size_t TcmallocSlab<NumClasses>::PopBatch(size_t cl, void** batch,
- size_t len) {
+template <size_t NumClasses>
+inline size_t TcmallocSlab<NumClasses>::PopBatch(size_t cl, void** batch,
+ size_t len) {
ASSERT(len != 0);
size_t n = 0;
- if (shift_ == TCMALLOC_PERCPU_TCMALLOC_FIXED_SLAB_SHIFT) {
+ if (shift_ == TCMALLOC_PERCPU_TCMALLOC_FIXED_SLAB_SHIFT) {
#if TCMALLOC_PERCPU_USE_RSEQ
- // TODO(b/159923407): TcmallocSlab_Internal_PopBatch_FixedShift needs to be
- // refactored to take a 5th parameter (virtual_cpu_id_offset) to avoid
- // needing to dispatch on two separate versions of the same function with
- // only minor differences between them.
- switch (virtual_cpu_id_offset_) {
+ // TODO(b/159923407): TcmallocSlab_Internal_PopBatch_FixedShift needs to be
+ // refactored to take a 5th parameter (virtual_cpu_id_offset) to avoid
+ // needing to dispatch on two separate versions of the same function with
+ // only minor differences between them.
+ switch (virtual_cpu_id_offset_) {
case offsetof(kernel_rseq, cpu_id):
- n = TcmallocSlab_Internal_PopBatch_FixedShift(slabs_, cl, batch, len);
+ n = TcmallocSlab_Internal_PopBatch_FixedShift(slabs_, cl, batch, len);
break;
#ifdef __x86_64__
case offsetof(kernel_rseq, vcpu_id):
- n = TcmallocSlab_Internal_PopBatch_FixedShift_VCPU(slabs_, cl, batch,
- len);
+ n = TcmallocSlab_Internal_PopBatch_FixedShift_VCPU(slabs_, cl, batch,
+ len);
break;
#endif // __x86_64__
default:
@@ -886,77 +886,77 @@ inline size_t TcmallocSlab<NumClasses>::PopBatch(size_t cl, void** batch,
return n;
}
-template <size_t NumClasses>
-inline typename TcmallocSlab<NumClasses>::Slabs*
-TcmallocSlab<NumClasses>::CpuMemoryStart(int cpu) const {
- char* const bytes = reinterpret_cast<char*>(slabs_);
- return reinterpret_cast<Slabs*>(&bytes[cpu << shift_]);
+template <size_t NumClasses>
+inline typename TcmallocSlab<NumClasses>::Slabs*
+TcmallocSlab<NumClasses>::CpuMemoryStart(int cpu) const {
+ char* const bytes = reinterpret_cast<char*>(slabs_);
+ return reinterpret_cast<Slabs*>(&bytes[cpu << shift_]);
}
-template <size_t NumClasses>
-inline std::atomic<int64_t>* TcmallocSlab<NumClasses>::GetHeader(
+template <size_t NumClasses>
+inline std::atomic<int64_t>* TcmallocSlab<NumClasses>::GetHeader(
int cpu, size_t cl) const {
return &CpuMemoryStart(cpu)->header[cl];
}
-template <size_t NumClasses>
-inline typename TcmallocSlab<NumClasses>::Header
-TcmallocSlab<NumClasses>::LoadHeader(std::atomic<int64_t>* hdrp) {
- return absl::bit_cast<Header>(hdrp->load(std::memory_order_relaxed));
+template <size_t NumClasses>
+inline typename TcmallocSlab<NumClasses>::Header
+TcmallocSlab<NumClasses>::LoadHeader(std::atomic<int64_t>* hdrp) {
+ return absl::bit_cast<Header>(hdrp->load(std::memory_order_relaxed));
}
-template <size_t NumClasses>
-inline void TcmallocSlab<NumClasses>::StoreHeader(std::atomic<int64_t>* hdrp,
- Header hdr) {
- hdrp->store(absl::bit_cast<int64_t>(hdr), std::memory_order_relaxed);
+template <size_t NumClasses>
+inline void TcmallocSlab<NumClasses>::StoreHeader(std::atomic<int64_t>* hdrp,
+ Header hdr) {
+ hdrp->store(absl::bit_cast<int64_t>(hdr), std::memory_order_relaxed);
}
-template <size_t NumClasses>
-inline int TcmallocSlab<NumClasses>::CompareAndSwapHeader(
- int cpu, std::atomic<int64_t>* hdrp, Header old, Header hdr,
- const size_t virtual_cpu_id_offset) {
-#if __SIZEOF_POINTER__ == 8
- const int64_t old_raw = absl::bit_cast<int64_t>(old);
- const int64_t new_raw = absl::bit_cast<int64_t>(hdr);
+template <size_t NumClasses>
+inline int TcmallocSlab<NumClasses>::CompareAndSwapHeader(
+ int cpu, std::atomic<int64_t>* hdrp, Header old, Header hdr,
+ const size_t virtual_cpu_id_offset) {
+#if __SIZEOF_POINTER__ == 8
+ const int64_t old_raw = absl::bit_cast<int64_t>(old);
+ const int64_t new_raw = absl::bit_cast<int64_t>(hdr);
return CompareAndSwapUnsafe(cpu, hdrp, static_cast<intptr_t>(old_raw),
- static_cast<intptr_t>(new_raw),
- virtual_cpu_id_offset);
+ static_cast<intptr_t>(new_raw),
+ virtual_cpu_id_offset);
#else
Crash(kCrash, __FILE__, __LINE__, "This architecture is not supported.");
#endif
}
-template <size_t NumClasses>
-inline bool TcmallocSlab<NumClasses>::Header::IsLocked() const {
+template <size_t NumClasses>
+inline bool TcmallocSlab<NumClasses>::Header::IsLocked() const {
return begin == 0xffffu;
}
-template <size_t NumClasses>
-inline void TcmallocSlab<NumClasses>::Header::Lock() {
+template <size_t NumClasses>
+inline void TcmallocSlab<NumClasses>::Header::Lock() {
// Write 0xffff to begin and 0 to end. This blocks new Push'es and Pop's.
// Note: we write only 4 bytes. The first 4 bytes are left intact.
// See Drain method for details. tl;dr: C++ does not allow us to legally
// express this without undefined behavior.
- std::atomic<int32_t>* p =
- reinterpret_cast<std::atomic<int32_t>*>(&lock_update);
+ std::atomic<int32_t>* p =
+ reinterpret_cast<std::atomic<int32_t>*>(&lock_update);
Header hdr;
hdr.begin = 0xffffu;
hdr.end = 0;
- p->store(absl::bit_cast<int32_t>(hdr.lock_update), std::memory_order_relaxed);
+ p->store(absl::bit_cast<int32_t>(hdr.lock_update), std::memory_order_relaxed);
}
-template <size_t NumClasses>
-void TcmallocSlab<NumClasses>::Init(void*(alloc)(size_t size),
- size_t (*capacity)(size_t cl), bool lazy,
- size_t shift) {
-#ifdef __x86_64__
- if (UsingFlatVirtualCpus()) {
- virtual_cpu_id_offset_ = offsetof(kernel_rseq, vcpu_id);
- }
-#endif // __x86_64__
-
- shift_ = shift;
- size_t mem_size = absl::base_internal::NumCPUs() * (1ul << shift);
+template <size_t NumClasses>
+void TcmallocSlab<NumClasses>::Init(void*(alloc)(size_t size),
+ size_t (*capacity)(size_t cl), bool lazy,
+ size_t shift) {
+#ifdef __x86_64__
+ if (UsingFlatVirtualCpus()) {
+ virtual_cpu_id_offset_ = offsetof(kernel_rseq, vcpu_id);
+ }
+#endif // __x86_64__
+
+ shift_ = shift;
+ size_t mem_size = absl::base_internal::NumCPUs() * (1ul << shift);
void* backing = alloc(mem_size);
// MSan does not see writes in assembly.
ANNOTATE_MEMORY_IS_INITIALIZED(backing, mem_size);
@@ -967,7 +967,7 @@ void TcmallocSlab<NumClasses>::Init(void*(alloc)(size_t size),
size_t bytes_used = 0;
for (int cpu = 0; cpu < absl::base_internal::NumCPUs(); ++cpu) {
bytes_used += sizeof(std::atomic<int64_t>) * NumClasses;
- void** elems = CpuMemoryStart(cpu)->mem;
+ void** elems = CpuMemoryStart(cpu)->mem;
for (size_t cl = 0; cl < NumClasses; ++cl) {
size_t cap = capacity(cl);
@@ -1009,7 +1009,7 @@ void TcmallocSlab<NumClasses>::Init(void*(alloc)(size_t size),
elems += cap;
CHECK_CONDITION(reinterpret_cast<char*>(elems) -
reinterpret_cast<char*>(CpuMemoryStart(cpu)) <=
- (1 << shift_));
+ (1 << shift_));
}
}
// Check for less than 90% usage of the reserved memory
@@ -1019,10 +1019,10 @@ void TcmallocSlab<NumClasses>::Init(void*(alloc)(size_t size),
}
}
-template <size_t NumClasses>
-void TcmallocSlab<NumClasses>::InitCPU(int cpu, size_t (*capacity)(size_t cl)) {
- const size_t virtual_cpu_id_offset = virtual_cpu_id_offset_;
-
+template <size_t NumClasses>
+void TcmallocSlab<NumClasses>::InitCPU(int cpu, size_t (*capacity)(size_t cl)) {
+ const size_t virtual_cpu_id_offset = virtual_cpu_id_offset_;
+
// TODO(ckennelly): Consolidate this logic with Drain.
// Phase 1: verify no header is locked
for (size_t cl = 0; cl < NumClasses; ++cl) {
@@ -1040,7 +1040,7 @@ void TcmallocSlab<NumClasses>::InitCPU(int cpu, size_t (*capacity)(size_t cl)) {
// of different sizes.
reinterpret_cast<Header*>(GetHeader(cpu, cl))->Lock();
}
- FenceCpu(cpu, virtual_cpu_id_offset);
+ FenceCpu(cpu, virtual_cpu_id_offset);
done = true;
for (size_t cl = 0; cl < NumClasses; ++cl) {
Header hdr = LoadHeader(GetHeader(cpu, cl));
@@ -1054,7 +1054,7 @@ void TcmallocSlab<NumClasses>::InitCPU(int cpu, size_t (*capacity)(size_t cl)) {
// Phase 3: Initialize prefetch target and compute the offsets for the
// boundaries of each size class' cache.
- void** elems = CpuMemoryStart(cpu)->mem;
+ void** elems = CpuMemoryStart(cpu)->mem;
uint16_t begin[NumClasses];
for (size_t cl = 0; cl < NumClasses; ++cl) {
size_t cap = capacity(cl);
@@ -1076,7 +1076,7 @@ void TcmallocSlab<NumClasses>::InitCPU(int cpu, size_t (*capacity)(size_t cl)) {
elems += cap;
CHECK_CONDITION(reinterpret_cast<char*>(elems) -
reinterpret_cast<char*>(CpuMemoryStart(cpu)) <=
- (1 << shift_));
+ (1 << shift_));
}
// Phase 4: Store current. No restartable sequence will proceed
@@ -1087,7 +1087,7 @@ void TcmallocSlab<NumClasses>::InitCPU(int cpu, size_t (*capacity)(size_t cl)) {
hdr.current = begin[cl];
StoreHeader(hdrp, hdr);
}
- FenceCpu(cpu, virtual_cpu_id_offset);
+ FenceCpu(cpu, virtual_cpu_id_offset);
// Phase 5: Allow access to this cache.
for (size_t cl = 0; cl < NumClasses; ++cl) {
@@ -1100,84 +1100,84 @@ void TcmallocSlab<NumClasses>::InitCPU(int cpu, size_t (*capacity)(size_t cl)) {
}
}
-template <size_t NumClasses>
-void TcmallocSlab<NumClasses>::Destroy(void(free)(void*)) {
+template <size_t NumClasses>
+void TcmallocSlab<NumClasses>::Destroy(void(free)(void*)) {
free(slabs_);
slabs_ = nullptr;
}
-template <size_t NumClasses>
-size_t TcmallocSlab<NumClasses>::ShrinkOtherCache(int cpu, size_t cl,
- size_t len, void* ctx,
- ShrinkHandler f) {
- ASSERT(cpu >= 0);
- ASSERT(cpu < absl::base_internal::NumCPUs());
- const size_t virtual_cpu_id_offset = virtual_cpu_id_offset_;
-
- // Phase 1: Collect begin as it will be overwritten by the lock.
- std::atomic<int64_t>* hdrp = GetHeader(cpu, cl);
- Header hdr = LoadHeader(hdrp);
- CHECK_CONDITION(!hdr.IsLocked());
- const uint16_t begin = hdr.begin;
-
- // Phase 2: stop concurrent mutations.
- for (bool done = false; !done;) {
- reinterpret_cast<Header*>(GetHeader(cpu, cl))->Lock();
- FenceCpu(cpu, virtual_cpu_id_offset);
- done = true;
-
- hdr = LoadHeader(GetHeader(cpu, cl));
- if (!hdr.IsLocked()) {
- // Header was overwritten by Grow/Shrink. Retry.
- done = false;
- }
- }
-
- // Phase 3: If we do not have len number of items to shrink, we try
- // to pop items from the list first to create enough capacity that can be
- // shrunk. If we pop items, we also execute callbacks.
- //
- // We can't write all 4 fields at once with a single write, because Pop does
- // several non-atomic loads of the fields. Consider that a concurrent Pop
- // loads old current (still pointing somewhere in the middle of the region);
- // then we update all fields with a single write; then Pop loads the updated
- // begin which allows it to proceed; then it decrements current below begin.
- //
- // So we instead first just update current--our locked begin/end guarantee
- // no Push/Pop will make progress. Once we Fence below, we know no Push/Pop
- // is using the old current, and can safely update begin/end to be an empty
- // slab.
-
- const uint16_t unused = hdr.end_copy - hdr.current;
- if (unused < len) {
- const uint16_t expected_pop = len - unused;
- const uint16_t actual_pop =
- std::min<uint16_t>(expected_pop, hdr.current - begin);
- void** batch =
- reinterpret_cast<void**>(GetHeader(cpu, 0) + hdr.current - actual_pop);
- f(ctx, cl, batch, actual_pop);
- hdr.current -= actual_pop;
- StoreHeader(hdrp, hdr);
- FenceCpu(cpu, virtual_cpu_id_offset);
- }
-
- // Phase 4: Shrink the capacity. Use a copy of begin and end_copy to
- // restore the header, shrink it, and return the length by which the
- // region was shrunk.
- hdr.begin = begin;
- const uint16_t to_shrink =
- std::min<uint16_t>(len, hdr.end_copy - hdr.current);
- hdr.end_copy -= to_shrink;
- hdr.end = hdr.end_copy;
- StoreHeader(hdrp, hdr);
- return to_shrink;
-}
-
-template <size_t NumClasses>
-void TcmallocSlab<NumClasses>::Drain(int cpu, void* ctx, DrainHandler f) {
+template <size_t NumClasses>
+size_t TcmallocSlab<NumClasses>::ShrinkOtherCache(int cpu, size_t cl,
+ size_t len, void* ctx,
+ ShrinkHandler f) {
+ ASSERT(cpu >= 0);
+ ASSERT(cpu < absl::base_internal::NumCPUs());
+ const size_t virtual_cpu_id_offset = virtual_cpu_id_offset_;
+
+ // Phase 1: Collect begin as it will be overwritten by the lock.
+ std::atomic<int64_t>* hdrp = GetHeader(cpu, cl);
+ Header hdr = LoadHeader(hdrp);
+ CHECK_CONDITION(!hdr.IsLocked());
+ const uint16_t begin = hdr.begin;
+
+ // Phase 2: stop concurrent mutations.
+ for (bool done = false; !done;) {
+ reinterpret_cast<Header*>(GetHeader(cpu, cl))->Lock();
+ FenceCpu(cpu, virtual_cpu_id_offset);
+ done = true;
+
+ hdr = LoadHeader(GetHeader(cpu, cl));
+ if (!hdr.IsLocked()) {
+ // Header was overwritten by Grow/Shrink. Retry.
+ done = false;
+ }
+ }
+
+ // Phase 3: If we do not have len number of items to shrink, we try
+ // to pop items from the list first to create enough capacity that can be
+ // shrunk. If we pop items, we also execute callbacks.
+ //
+ // We can't write all 4 fields at once with a single write, because Pop does
+ // several non-atomic loads of the fields. Consider that a concurrent Pop
+ // loads old current (still pointing somewhere in the middle of the region);
+ // then we update all fields with a single write; then Pop loads the updated
+ // begin which allows it to proceed; then it decrements current below begin.
+ //
+ // So we instead first just update current--our locked begin/end guarantee
+ // no Push/Pop will make progress. Once we Fence below, we know no Push/Pop
+ // is using the old current, and can safely update begin/end to be an empty
+ // slab.
+
+ const uint16_t unused = hdr.end_copy - hdr.current;
+ if (unused < len) {
+ const uint16_t expected_pop = len - unused;
+ const uint16_t actual_pop =
+ std::min<uint16_t>(expected_pop, hdr.current - begin);
+ void** batch =
+ reinterpret_cast<void**>(GetHeader(cpu, 0) + hdr.current - actual_pop);
+ f(ctx, cl, batch, actual_pop);
+ hdr.current -= actual_pop;
+ StoreHeader(hdrp, hdr);
+ FenceCpu(cpu, virtual_cpu_id_offset);
+ }
+
+ // Phase 4: Shrink the capacity. Use a copy of begin and end_copy to
+ // restore the header, shrink it, and return the length by which the
+ // region was shrunk.
+ hdr.begin = begin;
+ const uint16_t to_shrink =
+ std::min<uint16_t>(len, hdr.end_copy - hdr.current);
+ hdr.end_copy -= to_shrink;
+ hdr.end = hdr.end_copy;
+ StoreHeader(hdrp, hdr);
+ return to_shrink;
+}
+
+template <size_t NumClasses>
+void TcmallocSlab<NumClasses>::Drain(int cpu, void* ctx, DrainHandler f) {
CHECK_CONDITION(cpu >= 0);
CHECK_CONDITION(cpu < absl::base_internal::NumCPUs());
- const size_t virtual_cpu_id_offset = virtual_cpu_id_offset_;
+ const size_t virtual_cpu_id_offset = virtual_cpu_id_offset_;
// Push/Pop/Grow/Shrink can be executed concurrently with Drain.
// That's not an expected case, but it must be handled for correctness.
@@ -1207,7 +1207,7 @@ void TcmallocSlab<NumClasses>::Drain(int cpu, void* ctx, DrainHandler f) {
// of different sizes.
reinterpret_cast<Header*>(GetHeader(cpu, cl))->Lock();
}
- FenceCpu(cpu, virtual_cpu_id_offset);
+ FenceCpu(cpu, virtual_cpu_id_offset);
done = true;
for (size_t cl = 0; cl < NumClasses; ++cl) {
Header hdr = LoadHeader(GetHeader(cpu, cl));
@@ -1250,7 +1250,7 @@ void TcmallocSlab<NumClasses>::Drain(int cpu, void* ctx, DrainHandler f) {
// Phase 5: fence and reset the remaining fields to beginning of the region.
// This allows concurrent mutations again.
- FenceCpu(cpu, virtual_cpu_id_offset);
+ FenceCpu(cpu, virtual_cpu_id_offset);
for (size_t cl = 0; cl < NumClasses; ++cl) {
std::atomic<int64_t>* hdrp = GetHeader(cpu, cl);
Header hdr;
@@ -1262,18 +1262,18 @@ void TcmallocSlab<NumClasses>::Drain(int cpu, void* ctx, DrainHandler f) {
}
}
-template <size_t NumClasses>
-PerCPUMetadataState TcmallocSlab<NumClasses>::MetadataMemoryUsage() const {
+template <size_t NumClasses>
+PerCPUMetadataState TcmallocSlab<NumClasses>::MetadataMemoryUsage() const {
PerCPUMetadataState result;
- result.virtual_size = absl::base_internal::NumCPUs() * (1ul << shift_);
+ result.virtual_size = absl::base_internal::NumCPUs() * (1ul << shift_);
result.resident_size = MInCore::residence(slabs_, result.virtual_size);
return result;
}
} // namespace percpu
} // namespace subtle
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
#endif // TCMALLOC_INTERNAL_PERCPU_TCMALLOC_H_
diff --git a/contrib/libs/tcmalloc/tcmalloc/internal/percpu_tcmalloc_test.cc b/contrib/libs/tcmalloc/tcmalloc/internal/percpu_tcmalloc_test.cc
index 39f07fbe67..d10ca98dd0 100644
--- a/contrib/libs/tcmalloc/tcmalloc/internal/percpu_tcmalloc_test.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/internal/percpu_tcmalloc_test.cc
@@ -36,15 +36,15 @@
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "absl/types/span.h"
-#include "benchmark/benchmark.h"
+#include "benchmark/benchmark.h"
#include "tcmalloc/internal/config.h"
#include "tcmalloc/internal/logging.h"
#include "tcmalloc/internal/util.h"
#include "tcmalloc/malloc_extension.h"
-#include "tcmalloc/testing/testutil.h"
+#include "tcmalloc/testing/testutil.h"
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
namespace subtle {
namespace percpu {
namespace {
@@ -96,8 +96,8 @@ void RunOnSingleCpu(std::function<bool(int)> test) {
constexpr size_t kStressSlabs = 4;
constexpr size_t kStressCapacity = 4;
-constexpr size_t kShift = 18;
-typedef class TcmallocSlab<kStressSlabs> TcmallocSlab;
+constexpr size_t kShift = 18;
+typedef class TcmallocSlab<kStressSlabs> TcmallocSlab;
enum class SlabInit {
kEager,
@@ -110,12 +110,12 @@ class TcmallocSlabTest : public testing::TestWithParam<SlabInit> {
slab_test_ = &slab_;
metadata_bytes_ = 0;
-// Ignore false-positive warning in GCC. For more information, see:
-// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=96003
-#pragma GCC diagnostic ignored "-Wnonnull"
+// Ignore false-positive warning in GCC. For more information, see:
+// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=96003
+#pragma GCC diagnostic ignored "-Wnonnull"
slab_.Init(
&ByteCountingMalloc, [](size_t cl) { return kCapacity; },
- GetParam() == SlabInit::kLazy, kShift);
+ GetParam() == SlabInit::kLazy, kShift);
for (int i = 0; i < kCapacity; ++i) {
object_ptrs_[i] = &objects_[i];
@@ -267,14 +267,14 @@ TEST_P(TcmallocSlabTest, Unit) {
for (auto cpu : AllowedCpus()) {
SCOPED_TRACE(cpu);
- // Temporarily fake being on the given CPU.
- ScopedFakeCpuId fake_cpu_id(cpu);
-
-#if !defined(__ppc__)
+ // Temporarily fake being on the given CPU.
+ ScopedFakeCpuId fake_cpu_id(cpu);
+
+#if !defined(__ppc__)
if (UsingFlatVirtualCpus()) {
-#if TCMALLOC_PERCPU_USE_RSEQ
+#if TCMALLOC_PERCPU_USE_RSEQ
__rseq_abi.vcpu_id = cpu ^ 1;
-#endif
+#endif
cpu = cpu ^ 1;
}
#endif
@@ -288,7 +288,7 @@ TEST_P(TcmallocSlabTest, Unit) {
// This is imperfect but the window between operations below is small. We
// can make this more precise around individual operations if we see
// measurable flakiness as a result.
- if (fake_cpu_id.Tampered()) break;
+ if (fake_cpu_id.Tampered()) break;
#endif
// Check new slab state.
@@ -296,7 +296,7 @@ TEST_P(TcmallocSlabTest, Unit) {
ASSERT_EQ(slab_.Capacity(cpu, cl), 0);
if (!initialized[cpu]) {
-#pragma GCC diagnostic ignored "-Wnonnull"
+#pragma GCC diagnostic ignored "-Wnonnull"
void* ptr = slab_.Pop(cl, [](int cpu, size_t cl) {
slab_test_->InitCPU(cpu, [](size_t cl) { return kCapacity; });
@@ -506,7 +506,7 @@ static void StressThread(size_t thread_id, TcmallocSlab* slab,
absl::BitGen rnd(absl::SeedSeq({thread_id}));
while (!*stop) {
size_t cl = absl::Uniform<int32_t>(rnd, 0, kStressSlabs);
- const int what = absl::Uniform<int32_t>(rnd, 0, 91);
+ const int what = absl::Uniform<int32_t>(rnd, 0, 91);
if (what < 10) {
if (!block->empty()) {
if (slab->Push(cl, block->back(), &Handler::Overflow)) {
@@ -554,14 +554,14 @@ static void StressThread(size_t thread_id, TcmallocSlab* slab,
}
}
if (n != 0) {
- size_t res = slab->Grow(slab->GetCurrentVirtualCpuUnsafe(), cl, n,
- kStressCapacity);
+ size_t res = slab->Grow(slab->GetCurrentVirtualCpuUnsafe(), cl, n,
+ kStressCapacity);
EXPECT_LE(res, n);
capacity->fetch_add(n - res);
}
} else if (what < 60) {
size_t n =
- slab->Shrink(slab->GetCurrentVirtualCpuUnsafe(), cl,
+ slab->Shrink(slab->GetCurrentVirtualCpuUnsafe(), cl,
absl::Uniform<int32_t>(rnd, 0, kStressCapacity) + 1);
capacity->fetch_add(n);
} else if (what < 70) {
@@ -572,37 +572,37 @@ static void StressThread(size_t thread_id, TcmallocSlab* slab,
size_t cap = slab->Capacity(
absl::Uniform<int32_t>(rnd, 0, absl::base_internal::NumCPUs()), cl);
EXPECT_LE(cap, kStressCapacity);
- } else if (what < 90) {
- struct Context {
- std::vector<void*>* block;
- std::atomic<size_t>* capacity;
- };
- Context ctx = {block, capacity};
- int cpu = absl::Uniform<int32_t>(rnd, 0, absl::base_internal::NumCPUs());
- if (mutexes->at(cpu).TryLock()) {
- size_t to_shrink = absl::Uniform<int32_t>(rnd, 0, kStressCapacity) + 1;
- size_t total_shrunk = slab->ShrinkOtherCache(
- cpu, cl, to_shrink, &ctx,
- [](void* arg, size_t cl, void** batch, size_t n) {
- Context* ctx = static_cast<Context*>(arg);
- EXPECT_LT(cl, kStressSlabs);
- EXPECT_LE(n, kStressCapacity);
- for (size_t i = 0; i < n; ++i) {
- EXPECT_NE(batch[i], nullptr);
- ctx->block->push_back(batch[i]);
- }
- });
- EXPECT_LE(total_shrunk, to_shrink);
- EXPECT_LE(0, total_shrunk);
- capacity->fetch_add(total_shrunk);
- mutexes->at(cpu).Unlock();
- }
+ } else if (what < 90) {
+ struct Context {
+ std::vector<void*>* block;
+ std::atomic<size_t>* capacity;
+ };
+ Context ctx = {block, capacity};
+ int cpu = absl::Uniform<int32_t>(rnd, 0, absl::base_internal::NumCPUs());
+ if (mutexes->at(cpu).TryLock()) {
+ size_t to_shrink = absl::Uniform<int32_t>(rnd, 0, kStressCapacity) + 1;
+ size_t total_shrunk = slab->ShrinkOtherCache(
+ cpu, cl, to_shrink, &ctx,
+ [](void* arg, size_t cl, void** batch, size_t n) {
+ Context* ctx = static_cast<Context*>(arg);
+ EXPECT_LT(cl, kStressSlabs);
+ EXPECT_LE(n, kStressCapacity);
+ for (size_t i = 0; i < n; ++i) {
+ EXPECT_NE(batch[i], nullptr);
+ ctx->block->push_back(batch[i]);
+ }
+ });
+ EXPECT_LE(total_shrunk, to_shrink);
+ EXPECT_LE(0, total_shrunk);
+ capacity->fetch_add(total_shrunk);
+ mutexes->at(cpu).Unlock();
+ }
} else {
struct Context {
std::vector<void*>* block;
std::atomic<size_t>* capacity;
};
- Context ctx = {block, capacity};
+ Context ctx = {block, capacity};
int cpu = absl::Uniform<int32_t>(rnd, 0, absl::base_internal::NumCPUs());
if (mutexes->at(cpu).TryLock()) {
slab->Drain(
@@ -646,8 +646,8 @@ TEST(TcmallocSlab, Stress) {
TcmallocSlab slab;
slab.Init(
allocator,
- [](size_t cl) { return cl < kStressSlabs ? kStressCapacity : 0; }, false,
- kShift);
+ [](size_t cl) { return cl < kStressSlabs ? kStressCapacity : 0; }, false,
+ kShift);
std::vector<std::thread> threads;
const int n_threads = 2 * absl::base_internal::NumCPUs();
@@ -799,12 +799,12 @@ static void BM_PushPop(benchmark::State& state) {
RunOnSingleCpu([&](int this_cpu) {
const int kBatchSize = 32;
TcmallocSlab slab;
-
-#pragma GCC diagnostic ignored "-Wnonnull"
+
+#pragma GCC diagnostic ignored "-Wnonnull"
slab.Init(
- allocator, [](size_t cl) -> size_t { return kBatchSize; }, false,
- kShift);
-
+ allocator, [](size_t cl) -> size_t { return kBatchSize; }, false,
+ kShift);
+
CHECK_CONDITION(slab.Grow(this_cpu, 0, kBatchSize, kBatchSize) ==
kBatchSize);
void* batch[kBatchSize];
@@ -831,8 +831,8 @@ static void BM_PushPopBatch(benchmark::State& state) {
const int kBatchSize = 32;
TcmallocSlab slab;
slab.Init(
- allocator, [](size_t cl) -> size_t { return kBatchSize; }, false,
- kShift);
+ allocator, [](size_t cl) -> size_t { return kBatchSize; }, false,
+ kShift);
CHECK_CONDITION(slab.Grow(this_cpu, 0, kBatchSize, kBatchSize) ==
kBatchSize);
void* batch[kBatchSize];
@@ -851,5 +851,5 @@ BENCHMARK(BM_PushPopBatch);
} // namespace
} // namespace percpu
} // namespace subtle
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
diff --git a/contrib/libs/tcmalloc/tcmalloc/internal/proc_maps.cc b/contrib/libs/tcmalloc/tcmalloc/internal/proc_maps.cc
index 5a5586cfff..415b8302ab 100644
--- a/contrib/libs/tcmalloc/tcmalloc/internal/proc_maps.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/internal/proc_maps.cc
@@ -23,9 +23,9 @@
#include "absl/strings/str_format.h"
#include "tcmalloc/internal/logging.h"
-#include "tcmalloc/internal/util.h"
+#include "tcmalloc/internal/util.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
namespace tcmalloc_internal {
@@ -69,7 +69,7 @@ void ProcMapsIterator::Init(pid_t pid, Buffer* buffer) {
// No error logging since this can be called from the crash dump
// handler at awkward moments. Users should call Valid() before
// using.
- TCMALLOC_RETRY_ON_TEMP_FAILURE(fd_ = open(ibuf_, O_RDONLY));
+ TCMALLOC_RETRY_ON_TEMP_FAILURE(fd_ = open(ibuf_, O_RDONLY));
#else
fd_ = -1; // so Valid() is always false
#endif
@@ -107,8 +107,8 @@ bool ProcMapsIterator::NextExt(uint64_t* start, uint64_t* end, char** flags,
int nread = 0; // fill up buffer with text
while (etext_ < ebuf_) {
- TCMALLOC_RETRY_ON_TEMP_FAILURE(nread =
- read(fd_, etext_, ebuf_ - etext_));
+ TCMALLOC_RETRY_ON_TEMP_FAILURE(nread =
+ read(fd_, etext_, ebuf_ - etext_));
if (nread > 0)
etext_ += nread;
else
@@ -168,4 +168,4 @@ bool ProcMapsIterator::NextExt(uint64_t* start, uint64_t* end, char** flags,
} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
diff --git a/contrib/libs/tcmalloc/tcmalloc/internal/proc_maps.h b/contrib/libs/tcmalloc/tcmalloc/internal/proc_maps.h
index c5c763a1e8..81e54bae3b 100644
--- a/contrib/libs/tcmalloc/tcmalloc/internal/proc_maps.h
+++ b/contrib/libs/tcmalloc/tcmalloc/internal/proc_maps.h
@@ -19,9 +19,9 @@
#include <stdint.h>
#include <sys/types.h>
-#include "tcmalloc/internal/config.h"
-
-GOOGLE_MALLOC_SECTION_BEGIN
+#include "tcmalloc/internal/config.h"
+
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
namespace tcmalloc_internal {
@@ -65,6 +65,6 @@ class ProcMapsIterator {
} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
#endif // TCMALLOC_INTERNAL_PROC_MAPS_H_
diff --git a/contrib/libs/tcmalloc/tcmalloc/internal/range_tracker.h b/contrib/libs/tcmalloc/tcmalloc/internal/range_tracker.h
index 25b863934f..86ba100d2b 100644
--- a/contrib/libs/tcmalloc/tcmalloc/internal/range_tracker.h
+++ b/contrib/libs/tcmalloc/tcmalloc/internal/range_tracker.h
@@ -23,13 +23,13 @@
#include <limits>
#include <type_traits>
-#include "absl/numeric/bits.h"
+#include "absl/numeric/bits.h"
#include "tcmalloc/internal/logging.h"
#include "tcmalloc/internal/optimization.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
// Keeps a bitmap of some fixed size (N bits).
template <size_t N>
@@ -46,16 +46,16 @@ class Bitmap {
// Returns the number of set bits [index, ..., index + n - 1].
size_t CountBits(size_t index, size_t n) const;
- // Returns whether the bitmap is entirely zero or not.
- bool IsZero() const;
-
+ // Returns whether the bitmap is entirely zero or not.
+ bool IsZero() const;
+
// Equivalent to SetBit on bits [index, index + 1, ... index + n - 1].
void SetRange(size_t index, size_t n);
void ClearRange(size_t index, size_t n);
- // Clears the lowest set bit. Special case is faster than more flexible code.
- void ClearLowestBit();
-
+ // Clears the lowest set bit. Special case is faster than more flexible code.
+ void ClearLowestBit();
+
// If there is at least one free range at or after <start>,
// put it in *index, *length and return true; else return false.
bool NextFreeRange(size_t start, size_t *index, size_t *length) const;
@@ -280,8 +280,8 @@ inline size_t Bitmap<N>::CountWordBits(size_t i, size_t from, size_t to) const {
ASSERT(0 < n && n <= kWordSize);
const size_t mask = (all_ones >> (kWordSize - n)) << from;
- ASSUME(i < kWords);
- return absl::popcount(bits_[i] & mask);
+ ASSUME(i < kWords);
+ return absl::popcount(bits_[i] & mask);
}
// Set the bits [from, to) in the i-th word to Value.
@@ -305,34 +305,34 @@ inline void Bitmap<N>::SetWordBits(size_t i, size_t from, size_t to) {
template <size_t N>
inline bool Bitmap<N>::GetBit(size_t i) const {
- ASSERT(i < N);
+ ASSERT(i < N);
size_t word = i / kWordSize;
size_t offset = i % kWordSize;
- ASSUME(word < kWords);
+ ASSUME(word < kWords);
return bits_[word] & (size_t{1} << offset);
}
template <size_t N>
inline void Bitmap<N>::SetBit(size_t i) {
- ASSERT(i < N);
+ ASSERT(i < N);
size_t word = i / kWordSize;
size_t offset = i % kWordSize;
- ASSUME(word < kWords);
+ ASSUME(word < kWords);
bits_[word] |= (size_t{1} << offset);
}
template <size_t N>
inline void Bitmap<N>::ClearBit(size_t i) {
- ASSERT(i < N);
+ ASSERT(i < N);
size_t word = i / kWordSize;
size_t offset = i % kWordSize;
- ASSUME(word < kWords);
+ ASSUME(word < kWords);
bits_[word] &= ~(size_t{1} << offset);
}
template <size_t N>
inline size_t Bitmap<N>::CountBits(size_t index, size_t n) const {
- ASSUME(index + n <= N);
+ ASSUME(index + n <= N);
size_t count = 0;
if (n == 0) {
return count;
@@ -354,16 +354,16 @@ inline size_t Bitmap<N>::CountBits(size_t index, size_t n) const {
}
template <size_t N>
-inline bool Bitmap<N>::IsZero() const {
- for (int i = 0; i < kWords; ++i) {
- if (bits_[i] != 0) {
- return false;
- }
- }
- return true;
-}
-
-template <size_t N>
+inline bool Bitmap<N>::IsZero() const {
+ for (int i = 0; i < kWords; ++i) {
+ if (bits_[i] != 0) {
+ return false;
+ }
+ }
+ return true;
+}
+
+template <size_t N>
inline void Bitmap<N>::SetRange(size_t index, size_t n) {
SetRangeValue<true>(index, n);
}
@@ -374,16 +374,16 @@ inline void Bitmap<N>::ClearRange(size_t index, size_t n) {
}
template <size_t N>
-inline void Bitmap<N>::ClearLowestBit() {
- for (int i = 0; i < kWords; ++i) {
- if (bits_[i] != 0) {
- bits_[i] &= bits_[i] - 1;
- break;
- }
- }
-}
-
-template <size_t N>
+inline void Bitmap<N>::ClearLowestBit() {
+ for (int i = 0; i < kWords; ++i) {
+ if (bits_[i] != 0) {
+ bits_[i] &= bits_[i] - 1;
+ break;
+ }
+ }
+}
+
+template <size_t N>
template <bool Value>
inline void Bitmap<N>::SetRangeValue(size_t index, size_t n) {
ASSERT(index + n <= N);
@@ -444,10 +444,10 @@ inline void Bitmap<N>::Clear() {
template <size_t N>
template <bool Goal>
inline size_t Bitmap<N>::FindValue(size_t index) const {
- ASSERT(index < N);
+ ASSERT(index < N);
size_t offset = index % kWordSize;
size_t word = index / kWordSize;
- ASSUME(word < kWords);
+ ASSUME(word < kWords);
size_t here = bits_[word];
if (!Goal) here = ~here;
size_t mask = ~static_cast<size_t>(0) << offset;
@@ -462,8 +462,8 @@ inline size_t Bitmap<N>::FindValue(size_t index) const {
}
word *= kWordSize;
- ASSUME(here != 0);
- size_t ret = absl::countr_zero(here) + word;
+ ASSUME(here != 0);
+ size_t ret = absl::countr_zero(here) + word;
if (kDeadBits > 0) {
if (ret > N) ret = N;
}
@@ -473,10 +473,10 @@ inline size_t Bitmap<N>::FindValue(size_t index) const {
template <size_t N>
template <bool Goal>
inline ssize_t Bitmap<N>::FindValueBackwards(size_t index) const {
- ASSERT(index < N);
+ ASSERT(index < N);
size_t offset = index % kWordSize;
ssize_t word = index / kWordSize;
- ASSUME(word < kWords);
+ ASSUME(word < kWords);
size_t here = bits_[word];
if (!Goal) here = ~here;
size_t mask = (static_cast<size_t>(2) << offset) - 1;
@@ -491,13 +491,13 @@ inline ssize_t Bitmap<N>::FindValueBackwards(size_t index) const {
}
word *= kWordSize;
- ASSUME(here != 0);
- size_t ret = absl::bit_width(here) - 1 + word;
+ ASSUME(here != 0);
+ size_t ret = absl::bit_width(here) - 1 + word;
return ret;
}
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
#endif // TCMALLOC_INTERNAL_RANGE_TRACKER_H_
diff --git a/contrib/libs/tcmalloc/tcmalloc/internal/range_tracker_benchmark.cc b/contrib/libs/tcmalloc/tcmalloc/internal/range_tracker_benchmark.cc
index 278fc9ef1e..04dfc46c55 100644
--- a/contrib/libs/tcmalloc/tcmalloc/internal/range_tracker_benchmark.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/internal/range_tracker_benchmark.cc
@@ -22,9 +22,9 @@
#include "benchmark/benchmark.h"
#include "tcmalloc/internal/range_tracker.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
namespace {
template <size_t N>
@@ -382,6 +382,6 @@ BENCHMARK_TEMPLATE(BM_ScanChunks, 256);
BENCHMARK_TEMPLATE(BM_ScanChunks, 256 * 32);
} // namespace
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
diff --git a/contrib/libs/tcmalloc/tcmalloc/internal/range_tracker_test.cc b/contrib/libs/tcmalloc/tcmalloc/internal/range_tracker_test.cc
index 4f9202e221..8557c8e3b2 100644
--- a/contrib/libs/tcmalloc/tcmalloc/internal/range_tracker_test.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/internal/range_tracker_test.cc
@@ -26,7 +26,7 @@
#include "absl/random/random.h"
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
namespace {
using testing::ElementsAre;
@@ -98,29 +98,29 @@ TEST_F(BitmapTest, GetBitEmpty) {
}
}
-TEST_F(BitmapTest, CheckIsZero) {
- Bitmap<253> map;
- EXPECT_EQ(map.IsZero(), true);
- for (size_t i = 0; i < map.size(); ++i) {
- map.Clear();
- EXPECT_EQ(map.IsZero(), true);
- map.SetBit(i);
- EXPECT_EQ(map.IsZero(), false);
- }
-}
-
-TEST_F(BitmapTest, CheckClearLowestBit) {
- Bitmap<253> map;
- for (size_t i = 0; i < map.size(); ++i) {
- map.SetBit(i);
- }
- for (size_t i = 0; i < map.size(); ++i) {
- size_t index = map.FindSet(0);
- EXPECT_EQ(index, i);
- map.ClearLowestBit();
- }
-}
-
+TEST_F(BitmapTest, CheckIsZero) {
+ Bitmap<253> map;
+ EXPECT_EQ(map.IsZero(), true);
+ for (size_t i = 0; i < map.size(); ++i) {
+ map.Clear();
+ EXPECT_EQ(map.IsZero(), true);
+ map.SetBit(i);
+ EXPECT_EQ(map.IsZero(), false);
+ }
+}
+
+TEST_F(BitmapTest, CheckClearLowestBit) {
+ Bitmap<253> map;
+ for (size_t i = 0; i < map.size(); ++i) {
+ map.SetBit(i);
+ }
+ for (size_t i = 0; i < map.size(); ++i) {
+ size_t index = map.FindSet(0);
+ EXPECT_EQ(index, i);
+ map.ClearLowestBit();
+ }
+}
+
TEST_F(BitmapTest, GetBitOneSet) {
const size_t N = 251;
for (size_t s = 0; s < N; s++) {
@@ -290,5 +290,5 @@ TEST_F(RangeTrackerTest, Trivial) {
}
} // namespace
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
diff --git a/contrib/libs/tcmalloc/tcmalloc/internal/timeseries_tracker.h b/contrib/libs/tcmalloc/tcmalloc/internal/timeseries_tracker.h
index f1b6d3375f..053f9863e9 100644
--- a/contrib/libs/tcmalloc/tcmalloc/internal/timeseries_tracker.h
+++ b/contrib/libs/tcmalloc/tcmalloc/internal/timeseries_tracker.h
@@ -23,16 +23,16 @@
#include "absl/base/internal/cycleclock.h"
#include "absl/functional/function_ref.h"
-#include "absl/numeric/bits.h"
-#include "absl/numeric/int128.h"
+#include "absl/numeric/bits.h"
+#include "absl/numeric/int128.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
-#include "tcmalloc/internal/clock.h"
+#include "tcmalloc/internal/clock.h"
#include "tcmalloc/internal/logging.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
// Aggregates a series of reported values of type S in a set of entries of type
// T, one entry per epoch. This class factors out common functionality of
@@ -44,16 +44,16 @@ class TimeSeriesTracker {
enum SkipEntriesSetting { kSkipEmptyEntries, kDoNotSkipEmptyEntries };
explicit constexpr TimeSeriesTracker(Clock clock, absl::Duration w)
- : window_(w), epoch_length_(window_ / kEpochs), clock_(clock) {
- // See comment in GetCurrentEpoch().
- auto d = static_cast<uint64_t>(absl::ToDoubleSeconds(epoch_length_) *
- clock.freq());
- div_precision_ = 63 + absl::bit_width(d);
- epoch_ticks_m_ =
- static_cast<uint64_t>(
- (static_cast<absl::uint128>(1) << div_precision_) / d) +
- 1;
- }
+ : window_(w), epoch_length_(window_ / kEpochs), clock_(clock) {
+ // See comment in GetCurrentEpoch().
+ auto d = static_cast<uint64_t>(absl::ToDoubleSeconds(epoch_length_) *
+ clock.freq());
+ div_precision_ = 63 + absl::bit_width(d);
+ epoch_ticks_m_ =
+ static_cast<uint64_t>(
+ (static_cast<absl::uint128>(1) << div_precision_) / d) +
+ 1;
+ }
bool Report(S val);
@@ -66,7 +66,7 @@ class TimeSeriesTracker {
// Iterates over the last num_epochs data points (if -1, iterate to the
// oldest entry). Offsets are relative to the end of the buffer.
void IterBackwards(absl::FunctionRef<void(size_t, int64_t, const T&)> f,
- int64_t num_epochs = -1) const;
+ int64_t num_epochs = -1) const;
// This retrieves a particular data point (if offset is outside the valid
// range, the default data point will be returned).
@@ -82,21 +82,21 @@ class TimeSeriesTracker {
bool UpdateClock();
// Returns the current epoch based on the clock.
- int64_t GetCurrentEpoch() {
- // This is equivalent to
- // `clock_.now() / (absl::ToDoubleSeconds(epoch_length_) * clock_.freq())`.
- // We basically follow the technique from
- // https://ridiculousfish.com/blog/posts/labor-of-division-episode-i.html,
- // except that we use one fewer bit of precision than necessary to always
- // get the correct answer if the numerator were a 64-bit unsigned number. In
- // this case, because clock_.now() returns a signed 64-bit number (i.e. max
- // is <2^63), it shouldn't cause a problem. This way, we don't need to
- // handle overflow so it's simpler. See also:
- // https://lemire.me/blog/2019/02/20/more-fun-with-fast-remainders-when-the-divisor-is-a-constant/.
- return static_cast<int64_t>(static_cast<absl::uint128>(epoch_ticks_m_) *
- clock_.now() >>
- div_precision_);
- }
+ int64_t GetCurrentEpoch() {
+ // This is equivalent to
+ // `clock_.now() / (absl::ToDoubleSeconds(epoch_length_) * clock_.freq())`.
+ // We basically follow the technique from
+ // https://ridiculousfish.com/blog/posts/labor-of-division-episode-i.html,
+ // except that we use one fewer bit of precision than necessary to always
+ // get the correct answer if the numerator were a 64-bit unsigned number. In
+ // this case, because clock_.now() returns a signed 64-bit number (i.e. max
+ // is <2^63), it shouldn't cause a problem. This way, we don't need to
+ // handle overflow so it's simpler. See also:
+ // https://lemire.me/blog/2019/02/20/more-fun-with-fast-remainders-when-the-divisor-is-a-constant/.
+ return static_cast<int64_t>(static_cast<absl::uint128>(epoch_ticks_m_) *
+ clock_.now() >>
+ div_precision_);
+ }
const absl::Duration window_;
const absl::Duration epoch_length_;
@@ -104,10 +104,10 @@ class TimeSeriesTracker {
T entries_[kEpochs]{};
size_t last_epoch_{0};
size_t current_epoch_{0};
- // This is the magic constant from
- // https://ridiculousfish.com/blog/posts/labor-of-division-episode-i.html.
- uint64_t epoch_ticks_m_;
- uint8_t div_precision_;
+ // This is the magic constant from
+ // https://ridiculousfish.com/blog/posts/labor-of-division-episode-i.html.
+ uint64_t epoch_ticks_m_;
+ uint8_t div_precision_;
Clock clock_;
};
@@ -158,7 +158,7 @@ void TimeSeriesTracker<T, S, kEpochs>::Iter(
template <class T, class S, size_t kEpochs>
void TimeSeriesTracker<T, S, kEpochs>::IterBackwards(
absl::FunctionRef<void(size_t, int64_t, const T&)> f,
- int64_t num_epochs) const {
+ int64_t num_epochs) const {
// -1 means that we are outputting all epochs.
num_epochs = (num_epochs == -1) ? kEpochs : num_epochs;
size_t j = current_epoch_;
@@ -188,8 +188,8 @@ bool TimeSeriesTracker<T, S, kEpochs>::Report(S val) {
return updated_clock;
}
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
#endif // TCMALLOC_INTERNAL_TIMESERIES_TRACKER_H_
diff --git a/contrib/libs/tcmalloc/tcmalloc/internal/timeseries_tracker_test.cc b/contrib/libs/tcmalloc/tcmalloc/internal/timeseries_tracker_test.cc
index 1f75306161..c5f647cf1e 100644
--- a/contrib/libs/tcmalloc/tcmalloc/internal/timeseries_tracker_test.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/internal/timeseries_tracker_test.cc
@@ -20,7 +20,7 @@
using ::testing::ElementsAre;
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
namespace {
class TimeSeriesTrackerTest : public testing::Test {
@@ -42,7 +42,7 @@ class TimeSeriesTrackerTest : public testing::Test {
static constexpr absl::Duration kDuration = absl::Seconds(2);
- TimeSeriesTracker<TestEntry, int, 8> tracker_{
+ TimeSeriesTracker<TestEntry, int, 8> tracker_{
Clock{.now = FakeClock, .freq = GetFakeClockFrequency}, kDuration};
private:
@@ -59,10 +59,10 @@ int64_t TimeSeriesTrackerTest::clock_{0};
// Test that frequency conversion in the cycle clock works correctly
TEST(TimeSeriesTest, CycleClock) {
- TimeSeriesTracker<TimeSeriesTrackerTest::TestEntry, int, 100> tracker{
- Clock{absl::base_internal::CycleClock::Now,
- absl::base_internal::CycleClock::Frequency},
- absl::Seconds(10)}; // 100ms epochs
+ TimeSeriesTracker<TimeSeriesTrackerTest::TestEntry, int, 100> tracker{
+ Clock{absl::base_internal::CycleClock::Now,
+ absl::base_internal::CycleClock::Frequency},
+ absl::Seconds(10)}; // 100ms epochs
tracker.Report(1);
absl::SleepFor(absl::Milliseconds(100));
@@ -187,5 +187,5 @@ TEST_F(TimeSeriesTrackerTest, Works) {
}
} // namespace
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
diff --git a/contrib/libs/tcmalloc/tcmalloc/internal/util.cc b/contrib/libs/tcmalloc/tcmalloc/internal/util.cc
index ef705b02e3..ff36e00985 100644
--- a/contrib/libs/tcmalloc/tcmalloc/internal/util.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/internal/util.cc
@@ -27,7 +27,7 @@
#include "absl/time/time.h"
#include "tcmalloc/internal/logging.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
namespace tcmalloc_internal {
@@ -192,4 +192,4 @@ bool ScopedAffinityMask::Tampered() {
} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
diff --git a/contrib/libs/tcmalloc/tcmalloc/internal/util.h b/contrib/libs/tcmalloc/tcmalloc/internal/util.h
index b43e322257..23139b09b9 100644
--- a/contrib/libs/tcmalloc/tcmalloc/internal/util.h
+++ b/contrib/libs/tcmalloc/tcmalloc/internal/util.h
@@ -29,20 +29,20 @@
#include "absl/base/internal/sysinfo.h"
#include "absl/time/time.h"
#include "absl/types/span.h"
-#include "tcmalloc/internal/config.h"
-
-#define TCMALLOC_RETRY_ON_TEMP_FAILURE(expression) \
- (__extension__({ \
- long int _temp_failure_retry_result; \
- do _temp_failure_retry_result = (long int)(expression); \
- while (_temp_failure_retry_result == -1L && errno == EINTR); \
- _temp_failure_retry_result; \
- }))
-
+#include "tcmalloc/internal/config.h"
+
+#define TCMALLOC_RETRY_ON_TEMP_FAILURE(expression) \
+ (__extension__({ \
+ long int _temp_failure_retry_result; \
+ do _temp_failure_retry_result = (long int)(expression); \
+ while (_temp_failure_retry_result == -1L && errno == EINTR); \
+ _temp_failure_retry_result; \
+ }))
+
// Useful internal utility functions. These calls are async-signal safe
// provided the signal handler saves errno at entry and restores it before
// return.
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
namespace tcmalloc_internal {
@@ -133,6 +133,6 @@ class ScopedAffinityMask {
} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
#endif // TCMALLOC_INTERNAL_UTIL_H_
diff --git a/contrib/libs/tcmalloc/tcmalloc/internal_malloc_extension.h b/contrib/libs/tcmalloc/tcmalloc/internal_malloc_extension.h
index 66027418ed..83f9b91242 100644
--- a/contrib/libs/tcmalloc/tcmalloc/internal_malloc_extension.h
+++ b/contrib/libs/tcmalloc/tcmalloc/internal_malloc_extension.h
@@ -54,8 +54,8 @@ class ProfileAccessor {
extern "C" {
-ABSL_ATTRIBUTE_WEAK void TCMalloc_Internal_ForceCpuCacheActivation();
-
+ABSL_ATTRIBUTE_WEAK void TCMalloc_Internal_ForceCpuCacheActivation();
+
ABSL_ATTRIBUTE_WEAK tcmalloc::AddressRegionFactory*
MallocExtension_Internal_GetRegionFactory();
ABSL_ATTRIBUTE_WEAK void MallocExtension_Internal_SetRegionFactory(
@@ -75,17 +75,17 @@ ABSL_ATTRIBUTE_WEAK void MallocExtension_Internal_GetMemoryLimit(
ABSL_ATTRIBUTE_WEAK bool MallocExtension_Internal_GetNumericProperty(
const char* name_data, size_t name_size, size_t* value);
ABSL_ATTRIBUTE_WEAK bool MallocExtension_Internal_GetPerCpuCachesActive();
-ABSL_ATTRIBUTE_WEAK void MallocExtension_Internal_DeactivatePerCpuCaches();
+ABSL_ATTRIBUTE_WEAK void MallocExtension_Internal_DeactivatePerCpuCaches();
ABSL_ATTRIBUTE_WEAK int32_t MallocExtension_Internal_GetMaxPerCpuCacheSize();
-ABSL_ATTRIBUTE_WEAK void MallocExtension_Internal_GetSkipSubreleaseInterval(
- absl::Duration* ret);
+ABSL_ATTRIBUTE_WEAK void MallocExtension_Internal_GetSkipSubreleaseInterval(
+ absl::Duration* ret);
ABSL_ATTRIBUTE_WEAK void MallocExtension_Internal_GetProperties(
std::map<std::string, tcmalloc::MallocExtension::Property>* ret);
ABSL_ATTRIBUTE_WEAK void MallocExtension_Internal_GetStats(std::string* ret);
ABSL_ATTRIBUTE_WEAK void MallocExtension_Internal_SetMaxPerCpuCacheSize(
int32_t value);
-ABSL_ATTRIBUTE_WEAK void MallocExtension_Internal_SetSkipSubreleaseInterval(
- absl::Duration value);
+ABSL_ATTRIBUTE_WEAK void MallocExtension_Internal_SetSkipSubreleaseInterval(
+ absl::Duration value);
ABSL_ATTRIBUTE_WEAK size_t MallocExtension_Internal_ReleaseCpuMemory(int cpu);
ABSL_ATTRIBUTE_WEAK void MallocExtension_Internal_ReleaseMemoryToSystem(
size_t bytes);
@@ -116,10 +116,10 @@ ABSL_ATTRIBUTE_WEAK int64_t
MallocExtension_Internal_GetMaxTotalThreadCacheBytes();
ABSL_ATTRIBUTE_WEAK void MallocExtension_Internal_SetMaxTotalThreadCacheBytes(
int64_t value);
-
-ABSL_ATTRIBUTE_WEAK void
-MallocExtension_EnableForkSupport();
-
+
+ABSL_ATTRIBUTE_WEAK void
+MallocExtension_EnableForkSupport();
+
ABSL_ATTRIBUTE_WEAK void
MallocExtension_SetSampleUserDataCallbacks(
tcmalloc::MallocExtension::CreateSampleUserDataCallback create,
diff --git a/contrib/libs/tcmalloc/tcmalloc/legacy_size_classes.cc b/contrib/libs/tcmalloc/tcmalloc/legacy_size_classes.cc
index 5395252719..11c05c08da 100644
--- a/contrib/libs/tcmalloc/tcmalloc/legacy_size_classes.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/legacy_size_classes.cc
@@ -14,11 +14,11 @@
#include "tcmalloc/common.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
-
+namespace tcmalloc_internal {
+
// <fixed> is fixed per-size-class overhead due to end-of-span fragmentation
// and other factors. For instance, if we have a 96 byte size class, and use a
// single 8KiB page, then we will hold 85 objects per span, and have 32 bytes
@@ -68,10 +68,10 @@ const SizeClassInfo SizeMap::kLegacySizeClasses[SizeMap::kLegacySizeClassesCount
{ 240, 1, 32}, // 0.98%
{ 256, 1, 32}, // 0.59%
{ 272, 1, 32}, // 0.98%
- { 296, 1, 32}, // 3.10%
+ { 296, 1, 32}, // 3.10%
{ 312, 1, 32}, // 1.58%
{ 336, 1, 32}, // 2.18%
- { 352, 1, 32}, // 1.78%
+ { 352, 1, 32}, // 1.78%
{ 368, 1, 32}, // 1.78%
{ 408, 1, 32}, // 0.98%
{ 448, 1, 32}, // 2.18%
@@ -105,7 +105,7 @@ const SizeClassInfo SizeMap::kLegacySizeClasses[SizeMap::kLegacySizeClassesCount
{ 9472, 5, 6}, // 8.23%
{ 10240, 4, 6}, // 6.82%
{ 12288, 3, 5}, // 0.20%
- { 13568, 5, 4}, // 0.75%
+ { 13568, 5, 4}, // 0.75%
{ 14336, 7, 4}, // 0.08%
{ 16384, 2, 4}, // 0.29%
{ 20480, 5, 3}, // 0.12%
@@ -119,13 +119,13 @@ const SizeClassInfo SizeMap::kLegacySizeClasses[SizeMap::kLegacySizeClassesCount
{ 73728, 9, 2}, // 0.07%
{ 81920, 10, 2}, // 0.06%
{ 98304, 12, 2}, // 0.05%
- { 114688, 14, 2}, // 0.04%
+ { 114688, 14, 2}, // 0.04%
{ 131072, 16, 2}, // 0.04%
{ 147456, 18, 2}, // 0.03%
{ 163840, 20, 2}, // 0.03%
{ 180224, 22, 2}, // 0.03%
{ 204800, 25, 2}, // 0.02%
- { 237568, 29, 2}, // 0.02%
+ { 237568, 29, 2}, // 0.02%
{ 262144, 32, 2}, // 0.02%
};
#elif TCMALLOC_PAGE_SHIFT == 15
@@ -156,16 +156,16 @@ const SizeClassInfo SizeMap::kLegacySizeClasses[SizeMap::kLegacySizeClassesCount
{ 176, 1, 32}, // 0.24%
{ 192, 1, 32}, // 0.54%
{ 208, 1, 32}, // 0.49%
- { 224, 1, 32}, // 0.34%
- { 240, 1, 32}, // 0.54%
+ { 224, 1, 32}, // 0.34%
+ { 240, 1, 32}, // 0.54%
{ 256, 1, 32}, // 0.15%
{ 280, 1, 32}, // 0.17%
{ 304, 1, 32}, // 0.89%
- { 328, 1, 32}, // 1.06%
- { 352, 1, 32}, // 0.24%
- { 384, 1, 32}, // 0.54%
+ { 328, 1, 32}, // 1.06%
+ { 352, 1, 32}, // 0.24%
+ { 384, 1, 32}, // 0.54%
{ 416, 1, 32}, // 1.13%
- { 448, 1, 32}, // 0.34%
+ { 448, 1, 32}, // 0.34%
{ 488, 1, 32}, // 0.37%
{ 512, 1, 32}, // 0.15%
{ 576, 1, 32}, // 1.74%
@@ -176,8 +176,8 @@ const SizeClassInfo SizeMap::kLegacySizeClasses[SizeMap::kLegacySizeClassesCount
{ 1024, 1, 32}, // 0.15%
{ 1152, 1, 32}, // 1.74%
{ 1280, 1, 32}, // 2.55%
- { 1536, 1, 32}, // 1.74%
- { 1792, 1, 32}, // 1.74%
+ { 1536, 1, 32}, // 1.74%
+ { 1792, 1, 32}, // 1.74%
{ 2048, 1, 32}, // 0.15%
{ 2176, 1, 30}, // 0.54%
{ 2304, 1, 28}, // 1.74%
@@ -189,7 +189,7 @@ const SizeClassInfo SizeMap::kLegacySizeClasses[SizeMap::kLegacySizeClassesCount
{ 4608, 1, 14}, // 1.74%
{ 5376, 1, 12}, // 1.74%
{ 6528, 1, 10}, // 0.54%
- { 7168, 2, 9}, // 1.66%
+ { 7168, 2, 9}, // 1.66%
{ 8192, 1, 8}, // 0.15%
{ 9344, 2, 7}, // 0.27%
{ 10880, 1, 6}, // 0.54%
@@ -200,7 +200,7 @@ const SizeClassInfo SizeMap::kLegacySizeClasses[SizeMap::kLegacySizeClassesCount
{ 24576, 3, 2}, // 0.05%
{ 28032, 6, 2}, // 0.22%
{ 32768, 1, 2}, // 0.15%
- { 38144, 5, 2}, // 7.41%
+ { 38144, 5, 2}, // 7.41%
{ 40960, 4, 2}, // 6.71%
{ 49152, 3, 2}, // 0.05%
{ 57344, 7, 2}, // 0.02%
@@ -234,32 +234,32 @@ const SizeClassInfo SizeMap::kLegacySizeClasses[SizeMap::kLegacySizeClassesCount
{ 80, 1, 32}, // 0.04%
{ 88, 1, 32}, // 0.05%
{ 96, 1, 32}, // 0.04%
- { 104, 1, 32}, // 0.04%
+ { 104, 1, 32}, // 0.04%
{ 112, 1, 32}, // 0.04%
{ 128, 1, 32}, // 0.02%
{ 144, 1, 32}, // 0.04%
{ 160, 1, 32}, // 0.04%
{ 176, 1, 32}, // 0.05%
{ 192, 1, 32}, // 0.04%
- { 208, 1, 32}, // 0.04%
+ { 208, 1, 32}, // 0.04%
{ 240, 1, 32}, // 0.04%
{ 256, 1, 32}, // 0.02%
- { 304, 1, 32}, // 0.05%
- { 336, 1, 32}, // 0.04%
+ { 304, 1, 32}, // 0.05%
+ { 336, 1, 32}, // 0.04%
{ 360, 1, 32}, // 0.04%
- { 408, 1, 32}, // 0.10%
- { 456, 1, 32}, // 0.17%
+ { 408, 1, 32}, // 0.10%
+ { 456, 1, 32}, // 0.17%
{ 512, 1, 32}, // 0.02%
{ 576, 1, 32}, // 0.04%
{ 640, 1, 32}, // 0.17%
{ 704, 1, 32}, // 0.12%
- { 768, 1, 32}, // 0.12%
+ { 768, 1, 32}, // 0.12%
{ 832, 1, 32}, // 0.04%
- { 896, 1, 32}, // 0.21%
+ { 896, 1, 32}, // 0.21%
{ 1024, 1, 32}, // 0.02%
{ 1152, 1, 32}, // 0.26%
{ 1280, 1, 32}, // 0.41%
- { 1536, 1, 32}, // 0.41%
+ { 1536, 1, 32}, // 0.41%
{ 1664, 1, 32}, // 0.36%
{ 1792, 1, 32}, // 0.21%
{ 1920, 1, 32}, // 0.41%
@@ -267,24 +267,24 @@ const SizeClassInfo SizeMap::kLegacySizeClasses[SizeMap::kLegacySizeClassesCount
{ 2176, 1, 30}, // 0.41%
{ 2304, 1, 28}, // 0.71%
{ 2432, 1, 26}, // 0.76%
- { 2560, 1, 25}, // 0.41%
+ { 2560, 1, 25}, // 0.41%
{ 2688, 1, 24}, // 0.56%
- { 2816, 1, 23}, // 0.12%
+ { 2816, 1, 23}, // 0.12%
{ 2944, 1, 22}, // 0.07%
{ 3072, 1, 21}, // 0.41%
{ 3328, 1, 19}, // 1.00%
{ 3584, 1, 18}, // 0.21%
{ 3840, 1, 17}, // 0.41%
{ 4096, 1, 16}, // 0.02%
- { 4736, 1, 13}, // 0.66%
+ { 4736, 1, 13}, // 0.66%
{ 5504, 1, 11}, // 1.35%
{ 6144, 1, 10}, // 1.61%
- { 6528, 1, 10}, // 0.41%
- { 6784, 1, 9}, // 1.71%
+ { 6528, 1, 10}, // 0.41%
+ { 6784, 1, 9}, // 1.71%
{ 7168, 1, 9}, // 1.61%
{ 7680, 1, 8}, // 0.41%
{ 8192, 1, 8}, // 0.02%
- { 8704, 1, 7}, // 0.41%
+ { 8704, 1, 7}, // 0.41%
{ 9344, 1, 7}, // 0.21%
{ 10880, 1, 6}, // 0.41%
{ 11904, 1, 5}, // 0.12%
@@ -332,11 +332,11 @@ const SizeClassInfo SizeMap::kLegacySizeClasses[SizeMap::kLegacySizeClassesCount
{ 88, 1, 32}, // 2.37%
{ 96, 1, 32}, // 2.78%
{ 104, 1, 32}, // 2.17%
- { 120, 1, 32}, // 1.57%
+ { 120, 1, 32}, // 1.57%
{ 128, 1, 32}, // 1.17%
{ 144, 1, 32}, // 2.78%
{ 160, 1, 32}, // 3.60%
- { 184, 1, 32}, // 2.37%
+ { 184, 1, 32}, // 2.37%
{ 208, 1, 32}, // 4.86%
{ 240, 1, 32}, // 1.57%
{ 256, 1, 32}, // 1.17%
@@ -347,22 +347,22 @@ const SizeClassInfo SizeMap::kLegacySizeClasses[SizeMap::kLegacySizeClassesCount
{ 408, 1, 32}, // 1.57%
{ 512, 1, 32}, // 1.17%
{ 576, 2, 32}, // 2.18%
- { 704, 2, 32}, // 6.40%
+ { 704, 2, 32}, // 6.40%
{ 768, 2, 32}, // 7.29%
{ 896, 2, 32}, // 2.18%
{ 1024, 2, 32}, // 0.59%
{ 1152, 3, 32}, // 7.08%
{ 1280, 3, 32}, // 7.08%
{ 1536, 3, 32}, // 0.39%
- { 1792, 4, 32}, // 1.88%
+ { 1792, 4, 32}, // 1.88%
{ 2048, 4, 32}, // 0.29%
{ 2304, 4, 28}, // 1.88%
{ 2688, 4, 24}, // 1.88%
- { 3456, 6, 18}, // 1.79%
+ { 3456, 6, 18}, // 1.79%
{ 4096, 4, 16}, // 0.29%
- { 5376, 4, 12}, // 1.88%
+ { 5376, 4, 12}, // 1.88%
{ 6144, 3, 10}, // 0.39%
- { 7168, 7, 9}, // 0.17%
+ { 7168, 7, 9}, // 0.17%
{ 8192, 4, 8}, // 0.29%
};
#else
@@ -452,12 +452,12 @@ const SizeClassInfo SizeMap::kLegacySizeClasses[SizeMap::kLegacySizeClassesCount
{ 90112, 11, 2}, // 0.05%
{ 98304, 12, 2}, // 0.05%
{ 106496, 13, 2}, // 0.05%
- { 114688, 14, 2}, // 0.04%
+ { 114688, 14, 2}, // 0.04%
{ 131072, 16, 2}, // 0.04%
{ 139264, 17, 2}, // 0.03%
{ 155648, 19, 2}, // 0.03%
- { 172032, 21, 2}, // 0.03%
- { 188416, 23, 2}, // 0.03%
+ { 172032, 21, 2}, // 0.03%
+ { 188416, 23, 2}, // 0.03%
{ 204800, 25, 2}, // 0.02%
{ 221184, 27, 2}, // 0.02%
{ 237568, 29, 2}, // 0.02%
@@ -491,10 +491,10 @@ const SizeClassInfo SizeMap::kLegacySizeClasses[SizeMap::kLegacySizeClassesCount
{ 272, 1, 32}, // 0.54%
{ 288, 1, 32}, // 0.84%
{ 304, 1, 32}, // 0.89%
- { 320, 1, 32}, // 0.54%
+ { 320, 1, 32}, // 0.54%
{ 336, 1, 32}, // 0.69%
- { 352, 1, 32}, // 0.24%
- { 384, 1, 32}, // 0.54%
+ { 352, 1, 32}, // 0.24%
+ { 384, 1, 32}, // 0.54%
{ 416, 1, 32}, // 1.13%
{ 448, 1, 32}, // 0.34%
{ 480, 1, 32}, // 0.54%
@@ -510,7 +510,7 @@ const SizeClassInfo SizeMap::kLegacySizeClasses[SizeMap::kLegacySizeClassesCount
{ 1280, 1, 32}, // 2.55%
{ 1408, 1, 32}, // 1.33%
{ 1536, 1, 32}, // 1.74%
- { 1792, 1, 32}, // 1.74%
+ { 1792, 1, 32}, // 1.74%
{ 2048, 1, 32}, // 0.15%
{ 2176, 1, 30}, // 0.54%
{ 2304, 1, 28}, // 1.74%
@@ -570,11 +570,11 @@ const SizeClassInfo SizeMap::kLegacySizeClasses[SizeMap::kLegacySizeClassesCount
{ 160, 1, 32}, // 0.04%
{ 176, 1, 32}, // 0.05%
{ 192, 1, 32}, // 0.04%
- { 208, 1, 32}, // 0.04%
+ { 208, 1, 32}, // 0.04%
{ 240, 1, 32}, // 0.04%
{ 256, 1, 32}, // 0.02%
- { 304, 1, 32}, // 0.05%
- { 336, 1, 32}, // 0.04%
+ { 304, 1, 32}, // 0.05%
+ { 336, 1, 32}, // 0.04%
{ 368, 1, 32}, // 0.07%
{ 416, 1, 32}, // 0.04%
{ 464, 1, 32}, // 0.19%
@@ -582,7 +582,7 @@ const SizeClassInfo SizeMap::kLegacySizeClasses[SizeMap::kLegacySizeClassesCount
{ 576, 1, 32}, // 0.04%
{ 640, 1, 32}, // 0.17%
{ 704, 1, 32}, // 0.12%
- { 768, 1, 32}, // 0.12%
+ { 768, 1, 32}, // 0.12%
{ 832, 1, 32}, // 0.04%
{ 896, 1, 32}, // 0.21%
{ 1024, 1, 32}, // 0.02%
@@ -597,30 +597,30 @@ const SizeClassInfo SizeMap::kLegacySizeClasses[SizeMap::kLegacySizeClassesCount
{ 2176, 1, 30}, // 0.41%
{ 2304, 1, 28}, // 0.71%
{ 2432, 1, 26}, // 0.76%
- { 2560, 1, 25}, // 0.41%
+ { 2560, 1, 25}, // 0.41%
{ 2688, 1, 24}, // 0.56%
- { 2816, 1, 23}, // 0.12%
+ { 2816, 1, 23}, // 0.12%
{ 2944, 1, 22}, // 0.07%
{ 3072, 1, 21}, // 0.41%
- { 3200, 1, 20}, // 1.15%
+ { 3200, 1, 20}, // 1.15%
{ 3328, 1, 19}, // 1.00%
{ 3584, 1, 18}, // 0.21%
{ 3840, 1, 17}, // 0.41%
{ 4096, 1, 16}, // 0.02%
- { 4736, 1, 13}, // 0.66%
+ { 4736, 1, 13}, // 0.66%
{ 5504, 1, 11}, // 1.35%
{ 6144, 1, 10}, // 1.61%
{ 6528, 1, 10}, // 0.41%
- { 6784, 1, 9}, // 1.71%
+ { 6784, 1, 9}, // 1.71%
{ 7168, 1, 9}, // 1.61%
{ 7680, 1, 8}, // 0.41%
{ 8192, 1, 8}, // 0.02%
{ 8704, 1, 7}, // 0.41%
{ 9344, 1, 7}, // 0.21%
- { 10368, 1, 6}, // 1.15%
- { 11392, 1, 5}, // 0.07%
- { 12416, 1, 5}, // 0.56%
- { 13696, 1, 4}, // 0.76%
+ { 10368, 1, 6}, // 1.15%
+ { 11392, 1, 5}, // 0.07%
+ { 12416, 1, 5}, // 0.56%
+ { 13696, 1, 4}, // 0.76%
{ 14464, 1, 4}, // 0.71%
{ 16384, 1, 4}, // 0.02%
{ 17408, 1, 3}, // 0.41%
@@ -695,7 +695,7 @@ const SizeClassInfo SizeMap::kLegacySizeClasses[SizeMap::kLegacySizeClassesCount
{ 3200, 4, 20}, // 2.70%
{ 3584, 7, 18}, // 0.17%
{ 4096, 4, 16}, // 0.29%
- { 5376, 4, 12}, // 1.88%
+ { 5376, 4, 12}, // 1.88%
{ 6144, 3, 10}, // 0.39%
{ 7168, 7, 9}, // 0.17%
{ 8192, 4, 8}, // 0.29%
@@ -706,6 +706,6 @@ const SizeClassInfo SizeMap::kLegacySizeClasses[SizeMap::kLegacySizeClassesCount
#endif
// clang-format on
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
diff --git a/contrib/libs/tcmalloc/tcmalloc/libc_override.h b/contrib/libs/tcmalloc/tcmalloc/libc_override.h
index 89f8e4e5c8..97c282895d 100644
--- a/contrib/libs/tcmalloc/tcmalloc/libc_override.h
+++ b/contrib/libs/tcmalloc/tcmalloc/libc_override.h
@@ -30,8 +30,8 @@
#if defined(__GLIBC__)
#include "tcmalloc/libc_override_glibc.h"
-
-#else
+
+#else
#include "tcmalloc/libc_override_redefine.h"
#endif
diff --git a/contrib/libs/tcmalloc/tcmalloc/libc_override_gcc_and_weak.h b/contrib/libs/tcmalloc/tcmalloc/libc_override_gcc_and_weak.h
index 709bcb727f..f66dd7f05b 100644
--- a/contrib/libs/tcmalloc/tcmalloc/libc_override_gcc_and_weak.h
+++ b/contrib/libs/tcmalloc/tcmalloc/libc_override_gcc_and_weak.h
@@ -102,10 +102,10 @@ int posix_memalign(void** r, size_t a, size_t s) noexcept
void malloc_stats(void) noexcept TCMALLOC_ALIAS(TCMallocInternalMallocStats);
int mallopt(int cmd, int value) noexcept
TCMALLOC_ALIAS(TCMallocInternalMallOpt);
-#ifdef TCMALLOC_HAVE_STRUCT_MALLINFO
+#ifdef TCMALLOC_HAVE_STRUCT_MALLINFO
struct mallinfo mallinfo(void) noexcept
TCMALLOC_ALIAS(TCMallocInternalMallocInfo);
-#endif
+#endif
size_t malloc_size(void* p) noexcept TCMALLOC_ALIAS(TCMallocInternalMallocSize);
size_t malloc_usable_size(void* p) noexcept
TCMALLOC_ALIAS(TCMallocInternalMallocSize);
diff --git a/contrib/libs/tcmalloc/tcmalloc/libc_override_redefine.h b/contrib/libs/tcmalloc/tcmalloc/libc_override_redefine.h
index b1655461c3..19be8bc470 100644
--- a/contrib/libs/tcmalloc/tcmalloc/libc_override_redefine.h
+++ b/contrib/libs/tcmalloc/tcmalloc/libc_override_redefine.h
@@ -44,57 +44,57 @@ void operator delete(void* ptr, const std::nothrow_t& nt) noexcept {
void operator delete[](void* ptr, const std::nothrow_t& nt) noexcept {
return TCMallocInternalDeleteArrayNothrow(ptr, nt);
}
-
+
extern "C" {
-void* malloc(size_t s) { return TCMallocInternalMalloc(s); }
-void* calloc(size_t n, size_t s) { return TCMallocInternalCalloc(n, s); }
-void* realloc(void* p, size_t s) { return TCMallocInternalRealloc(p, s); }
-void free(void* p) { TCMallocInternalFree(p); }
-void* memalign(size_t a, size_t s) { return TCMallocInternalMemalign(a, s); }
-int posix_memalign(void** r, size_t a, size_t s) {
- return TCMallocInternalPosixMemalign(r, a, s);
-}
-size_t malloc_usable_size(void* p) { return TCMallocInternalMallocSize(p); }
-
-// tcmalloc extension
+void* malloc(size_t s) { return TCMallocInternalMalloc(s); }
+void* calloc(size_t n, size_t s) { return TCMallocInternalCalloc(n, s); }
+void* realloc(void* p, size_t s) { return TCMallocInternalRealloc(p, s); }
+void free(void* p) { TCMallocInternalFree(p); }
+void* memalign(size_t a, size_t s) { return TCMallocInternalMemalign(a, s); }
+int posix_memalign(void** r, size_t a, size_t s) {
+ return TCMallocInternalPosixMemalign(r, a, s);
+}
+size_t malloc_usable_size(void* p) { return TCMallocInternalMallocSize(p); }
+
+// tcmalloc extension
void sdallocx(void* p, size_t s, int flags) noexcept {
TCMallocInternalSdallocx(p, s, flags);
}
-
-#if defined(__GLIBC__) || defined(__NEWLIB__)
-// SunOS extension
-void cfree(void* p) { TCMallocInternalCfree(p); }
-#endif
-
-#if defined(OS_MACOSX) || defined(__BIONIC__) || defined(__GLIBC__) || \
- defined(__NEWLIB__) || defined(__UCLIBC__)
-// Obsolete memalign
-void* valloc(size_t s) { return TCMallocInternalValloc(s); }
-#endif
-
-#if defined(__BIONIC__) || defined(__GLIBC__) || defined(__NEWLIB__)
-// Obsolete memalign
-void* pvalloc(size_t s) { return TCMallocInternalPvalloc(s); }
-#endif
-
-#if defined(__GLIBC__) || defined(__NEWLIB__) || defined(__UCLIBC__)
-void malloc_stats(void) { TCMallocInternalMallocStats(); }
-#endif
-
-#if defined(__BIONIC__) || defined(__GLIBC__) || defined(__NEWLIB__) || \
- defined(__UCLIBC__)
-int mallopt(int cmd, int v) { return TCMallocInternalMallOpt(cmd, v); }
-#endif
-
-#ifdef TCMALLOC_HAVE_STRUCT_MALLINFO
-struct mallinfo mallinfo(void) {
+
+#if defined(__GLIBC__) || defined(__NEWLIB__)
+// SunOS extension
+void cfree(void* p) { TCMallocInternalCfree(p); }
+#endif
+
+#if defined(OS_MACOSX) || defined(__BIONIC__) || defined(__GLIBC__) || \
+ defined(__NEWLIB__) || defined(__UCLIBC__)
+// Obsolete memalign
+void* valloc(size_t s) { return TCMallocInternalValloc(s); }
+#endif
+
+#if defined(__BIONIC__) || defined(__GLIBC__) || defined(__NEWLIB__)
+// Obsolete memalign
+void* pvalloc(size_t s) { return TCMallocInternalPvalloc(s); }
+#endif
+
+#if defined(__GLIBC__) || defined(__NEWLIB__) || defined(__UCLIBC__)
+void malloc_stats(void) { TCMallocInternalMallocStats(); }
+#endif
+
+#if defined(__BIONIC__) || defined(__GLIBC__) || defined(__NEWLIB__) || \
+ defined(__UCLIBC__)
+int mallopt(int cmd, int v) { return TCMallocInternalMallOpt(cmd, v); }
+#endif
+
+#ifdef TCMALLOC_HAVE_STRUCT_MALLINFO
+struct mallinfo mallinfo(void) {
return TCMallocInternalMallocInfo();
}
#endif
-
-#if defined(__GLIBC__)
-size_t malloc_size(void* p) { return TCMallocInternalMallocSize(p); }
-#endif
+
+#if defined(__GLIBC__)
+size_t malloc_size(void* p) { return TCMallocInternalMallocSize(p); }
+#endif
} // extern "C"
#endif // TCMALLOC_LIBC_OVERRIDE_REDEFINE_H_
diff --git a/contrib/libs/tcmalloc/tcmalloc/malloc_extension.cc b/contrib/libs/tcmalloc/tcmalloc/malloc_extension.cc
index ad3205fcdc..4ccf813bff 100644
--- a/contrib/libs/tcmalloc/tcmalloc/malloc_extension.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/malloc_extension.cc
@@ -26,7 +26,7 @@
#include "absl/base/attributes.h"
#include "absl/base/internal/low_level_alloc.h"
#include "absl/memory/memory.h"
-#include "absl/time/time.h"
+#include "absl/time/time.h"
#include "tcmalloc/internal/parameter_accessors.h"
#include "tcmalloc/internal_malloc_extension.h"
@@ -287,16 +287,16 @@ bool MallocExtension::PerCpuCachesActive() {
#endif
}
-void MallocExtension::DeactivatePerCpuCaches() {
-#if ABSL_INTERNAL_HAVE_WEAK_MALLOCEXTENSION_STUBS
- if (MallocExtension_Internal_DeactivatePerCpuCaches == nullptr) {
- return;
- }
-
- MallocExtension_Internal_DeactivatePerCpuCaches();
-#endif
-}
-
+void MallocExtension::DeactivatePerCpuCaches() {
+#if ABSL_INTERNAL_HAVE_WEAK_MALLOCEXTENSION_STUBS
+ if (MallocExtension_Internal_DeactivatePerCpuCaches == nullptr) {
+ return;
+ }
+
+ MallocExtension_Internal_DeactivatePerCpuCaches();
+#endif
+}
+
int32_t MallocExtension::GetMaxPerCpuCacheSize() {
#if ABSL_INTERNAL_HAVE_WEAK_MALLOCEXTENSION_STUBS
if (MallocExtension_Internal_GetMaxPerCpuCacheSize == nullptr) {
@@ -345,32 +345,32 @@ void MallocExtension::SetMaxTotalThreadCacheBytes(int64_t value) {
#endif
}
-absl::Duration MallocExtension::GetSkipSubreleaseInterval() {
-#if ABSL_INTERNAL_HAVE_WEAK_MALLOCEXTENSION_STUBS
- if (MallocExtension_Internal_GetSkipSubreleaseInterval == nullptr) {
- return absl::ZeroDuration();
- }
-
- absl::Duration value;
- MallocExtension_Internal_GetSkipSubreleaseInterval(&value);
- return value;
-#else
- return absl::ZeroDuration();
-#endif
-}
-
-void MallocExtension::SetSkipSubreleaseInterval(absl::Duration value) {
-#if ABSL_INTERNAL_HAVE_WEAK_MALLOCEXTENSION_STUBS
- if (MallocExtension_Internal_SetSkipSubreleaseInterval == nullptr) {
- return;
- }
-
- MallocExtension_Internal_SetSkipSubreleaseInterval(value);
-#else
- (void)value;
-#endif
-}
-
+absl::Duration MallocExtension::GetSkipSubreleaseInterval() {
+#if ABSL_INTERNAL_HAVE_WEAK_MALLOCEXTENSION_STUBS
+ if (MallocExtension_Internal_GetSkipSubreleaseInterval == nullptr) {
+ return absl::ZeroDuration();
+ }
+
+ absl::Duration value;
+ MallocExtension_Internal_GetSkipSubreleaseInterval(&value);
+ return value;
+#else
+ return absl::ZeroDuration();
+#endif
+}
+
+void MallocExtension::SetSkipSubreleaseInterval(absl::Duration value) {
+#if ABSL_INTERNAL_HAVE_WEAK_MALLOCEXTENSION_STUBS
+ if (MallocExtension_Internal_SetSkipSubreleaseInterval == nullptr) {
+ return;
+ }
+
+ MallocExtension_Internal_SetSkipSubreleaseInterval(value);
+#else
+ (void)value;
+#endif
+}
+
absl::optional<size_t> MallocExtension::GetNumericProperty(
absl::string_view property) {
#if ABSL_INTERNAL_HAVE_WEAK_MALLOCEXTENSION_STUBS
@@ -429,20 +429,20 @@ size_t MallocExtension::ReleaseCpuMemory(int cpu) {
void MallocExtension::ProcessBackgroundActions() {
#if ABSL_INTERNAL_HAVE_WEAK_MALLOCEXTENSION_STUBS
- if (NeedsProcessBackgroundActions()) {
+ if (NeedsProcessBackgroundActions()) {
MallocExtension_Internal_ProcessBackgroundActions();
}
#endif
}
-bool MallocExtension::NeedsProcessBackgroundActions() {
-#if ABSL_INTERNAL_HAVE_WEAK_MALLOCEXTENSION_STUBS
- return &MallocExtension_Internal_ProcessBackgroundActions != nullptr;
-#else
- return false;
-#endif
-}
-
+bool MallocExtension::NeedsProcessBackgroundActions() {
+#if ABSL_INTERNAL_HAVE_WEAK_MALLOCEXTENSION_STUBS
+ return &MallocExtension_Internal_ProcessBackgroundActions != nullptr;
+#else
+ return false;
+#endif
+}
+
MallocExtension::BytesPerSecond MallocExtension::GetBackgroundReleaseRate() {
#if ABSL_INTERNAL_HAVE_WEAK_MALLOCEXTENSION_STUBS
if (&MallocExtension_Internal_GetBackgroundReleaseRate != nullptr) {
@@ -460,14 +460,14 @@ void MallocExtension::SetBackgroundReleaseRate(BytesPerSecond rate) {
#endif
}
-void MallocExtension::EnableForkSupport() {
-#if ABSL_INTERNAL_HAVE_WEAK_MALLOCEXTENSION_STUBS
- if (&MallocExtension_EnableForkSupport != nullptr) {
- MallocExtension_EnableForkSupport();
- }
-#endif
-}
-
+void MallocExtension::EnableForkSupport() {
+#if ABSL_INTERNAL_HAVE_WEAK_MALLOCEXTENSION_STUBS
+ if (&MallocExtension_EnableForkSupport != nullptr) {
+ MallocExtension_EnableForkSupport();
+ }
+#endif
+}
+
void MallocExtension::SetSampleUserDataCallbacks(
CreateSampleUserDataCallback create,
CopySampleUserDataCallback copy,
diff --git a/contrib/libs/tcmalloc/tcmalloc/malloc_extension.h b/contrib/libs/tcmalloc/tcmalloc/malloc_extension.h
index fcbd347ca1..a55be1850a 100644
--- a/contrib/libs/tcmalloc/tcmalloc/malloc_extension.h
+++ b/contrib/libs/tcmalloc/tcmalloc/malloc_extension.h
@@ -35,11 +35,11 @@
#include "absl/base/attributes.h"
#include "absl/base/macros.h"
-#include "absl/base/policy_checks.h"
+#include "absl/base/policy_checks.h"
#include "absl/base/port.h"
#include "absl/functional/function_ref.h"
#include "absl/strings/string_view.h"
-#include "absl/time/time.h"
+#include "absl/time/time.h"
#include "absl/types/optional.h"
#include "absl/types/span.h"
@@ -299,9 +299,9 @@ class MallocExtension final {
// Note: limit=SIZE_T_MAX implies no limit.
size_t limit = std::numeric_limits<size_t>::max();
bool hard = false;
-
- // Explicitly declare the ctor to put it in the google_malloc section.
- MemoryLimit() = default;
+
+ // Explicitly declare the ctor to put it in the google_malloc section.
+ MemoryLimit() = default;
};
static MemoryLimit GetMemoryLimit();
@@ -315,13 +315,13 @@ class MallocExtension final {
// Gets the guarded sampling rate. Returns a value < 0 if unknown.
static int64_t GetGuardedSamplingRate();
- // Sets the guarded sampling rate for sampled allocations. TCMalloc samples
- // approximately every rate bytes allocated, subject to implementation
- // limitations in GWP-ASan.
- //
- // Guarded samples provide probablistic protections against buffer underflow,
- // overflow, and use-after-free when GWP-ASan is active (via calling
- // ActivateGuardedSampling).
+ // Sets the guarded sampling rate for sampled allocations. TCMalloc samples
+ // approximately every rate bytes allocated, subject to implementation
+ // limitations in GWP-ASan.
+ //
+ // Guarded samples provide probablistic protections against buffer underflow,
+ // overflow, and use-after-free when GWP-ASan is active (via calling
+ // ActivateGuardedSampling).
static void SetGuardedSamplingRate(int64_t rate);
// Switches TCMalloc to guard sampled allocations for underflow, overflow, and
@@ -331,11 +331,11 @@ class MallocExtension final {
// Gets whether TCMalloc is using per-CPU caches.
static bool PerCpuCachesActive();
- // Extension for unified agent.
- //
- // Should be removed in the future https://st.yandex-team.ru/UNIFIEDAGENT-321
- static void DeactivatePerCpuCaches();
-
+ // Extension for unified agent.
+ //
+ // Should be removed in the future https://st.yandex-team.ru/UNIFIEDAGENT-321
+ static void DeactivatePerCpuCaches();
+
// Gets the current maximum cache size per CPU cache.
static int32_t GetMaxPerCpuCacheSize();
// Sets the maximum cache size per CPU cache. This is a per-core limit.
@@ -346,11 +346,11 @@ class MallocExtension final {
// Sets the maximum thread cache size. This is a whole-process limit.
static void SetMaxTotalThreadCacheBytes(int64_t value);
- // Gets the delayed subrelease interval (0 if delayed subrelease is disabled)
- static absl::Duration GetSkipSubreleaseInterval();
- // Sets the delayed subrelease interval (0 to disable delayed subrelease)
- static void SetSkipSubreleaseInterval(absl::Duration value);
-
+ // Gets the delayed subrelease interval (0 if delayed subrelease is disabled)
+ static absl::Duration GetSkipSubreleaseInterval();
+ // Sets the delayed subrelease interval (0 to disable delayed subrelease)
+ static void SetSkipSubreleaseInterval(absl::Duration value);
+
// Returns the estimated number of bytes that will be allocated for a request
// of "size" bytes. This is an estimate: an allocation of "size" bytes may
// reserve more bytes, but will never reserve fewer.
@@ -454,11 +454,11 @@ class MallocExtension final {
// When linked against TCMalloc, this method does not return.
static void ProcessBackgroundActions();
- // Return true if ProcessBackgroundActions should be called on this platform.
- // Not all platforms need/support background actions. As of 2021 this
- // includes Apple and Emscripten.
- static bool NeedsProcessBackgroundActions();
-
+ // Return true if ProcessBackgroundActions should be called on this platform.
+ // Not all platforms need/support background actions. As of 2021 this
+ // includes Apple and Emscripten.
+ static bool NeedsProcessBackgroundActions();
+
// Specifies a rate in bytes per second.
//
// The enum is used to provide strong-typing for the value.
@@ -470,10 +470,10 @@ class MallocExtension final {
// Specifies the release rate from the page heap. ProcessBackgroundActions
// must be called for this to be operative.
static void SetBackgroundReleaseRate(BytesPerSecond rate);
-
- // Enables fork support.
- // Allocator will continue to function correctly in the child, after calling fork().
- static void EnableForkSupport();
+
+ // Enables fork support.
+ // Allocator will continue to function correctly in the child, after calling fork().
+ static void EnableForkSupport();
using CreateSampleUserDataCallback = void*();
using CopySampleUserDataCallback = void*(void*);
@@ -550,7 +550,7 @@ tcmalloc::sized_ptr_t tcmalloc_size_returning_operator_new_nothrow(
// Aligned size returning new is only supported for libc++ because of issues
// with libstdcxx.so linkage. See http://b/110969867 for background.
-#if defined(__cpp_aligned_new)
+#if defined(__cpp_aligned_new)
// Identical to `tcmalloc_size_returning_operator_new` except that the returned
// memory is aligned according to the `alignment` argument.
@@ -559,7 +559,7 @@ tcmalloc::sized_ptr_t tcmalloc_size_returning_operator_new_aligned(
tcmalloc::sized_ptr_t tcmalloc_size_returning_operator_new_aligned_nothrow(
size_t size, std::align_val_t alignment) noexcept;
-#endif // __cpp_aligned_new
+#endif // __cpp_aligned_new
} // extern "C"
@@ -578,9 +578,9 @@ namespace tcmalloc_internal {
// while allowing the library to compile and link.
class AllocationProfilingTokenBase {
public:
- // Explicitly declare the ctor to put it in the google_malloc section.
- AllocationProfilingTokenBase() = default;
-
+ // Explicitly declare the ctor to put it in the google_malloc section.
+ AllocationProfilingTokenBase() = default;
+
virtual ~AllocationProfilingTokenBase() = default;
// Finish recording started during construction of this object.
diff --git a/contrib/libs/tcmalloc/tcmalloc/malloc_extension_test.cc b/contrib/libs/tcmalloc/tcmalloc/malloc_extension_test.cc
index 5088806ff8..ce5fb0501a 100644
--- a/contrib/libs/tcmalloc/tcmalloc/malloc_extension_test.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/malloc_extension_test.cc
@@ -18,7 +18,7 @@
#include "gmock/gmock.h"
#include "gtest/gtest.h"
-#include "absl/time/time.h"
+#include "absl/time/time.h"
namespace tcmalloc {
namespace {
@@ -39,17 +39,17 @@ TEST(MallocExtension, BackgroundReleaseRate) {
0);
}
-TEST(MallocExtension, SkipSubreleaseInterval) {
-
- // Mutate via MallocExtension.
- MallocExtension::SetSkipSubreleaseInterval(absl::Seconds(10));
- EXPECT_EQ(MallocExtension::GetSkipSubreleaseInterval(), absl::Seconds(10));
-
- // Disable skip subrelease
- MallocExtension::SetSkipSubreleaseInterval(absl::ZeroDuration());
- EXPECT_EQ(MallocExtension::GetSkipSubreleaseInterval(), absl::ZeroDuration());
-}
-
+TEST(MallocExtension, SkipSubreleaseInterval) {
+
+ // Mutate via MallocExtension.
+ MallocExtension::SetSkipSubreleaseInterval(absl::Seconds(10));
+ EXPECT_EQ(MallocExtension::GetSkipSubreleaseInterval(), absl::Seconds(10));
+
+ // Disable skip subrelease
+ MallocExtension::SetSkipSubreleaseInterval(absl::ZeroDuration());
+ EXPECT_EQ(MallocExtension::GetSkipSubreleaseInterval(), absl::ZeroDuration());
+}
+
TEST(MallocExtension, Properties) {
// Verify that every property under GetProperties also works with
// GetNumericProperty.
diff --git a/contrib/libs/tcmalloc/tcmalloc/mock_central_freelist.cc b/contrib/libs/tcmalloc/tcmalloc/mock_central_freelist.cc
index 13308b947a..600cbbf18f 100644
--- a/contrib/libs/tcmalloc/tcmalloc/mock_central_freelist.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/mock_central_freelist.cc
@@ -18,19 +18,19 @@
#include "tcmalloc/internal/logging.h"
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
void MinimalFakeCentralFreeList::AllocateBatch(void** batch, int n) {
for (int i = 0; i < n; ++i) batch[i] = &batch[i];
}
-void MinimalFakeCentralFreeList::FreeBatch(absl::Span<void*> batch) {
- for (void* x : batch) CHECK_CONDITION(x != nullptr);
+void MinimalFakeCentralFreeList::FreeBatch(absl::Span<void*> batch) {
+ for (void* x : batch) CHECK_CONDITION(x != nullptr);
}
-void MinimalFakeCentralFreeList::InsertRange(absl::Span<void*> batch) {
+void MinimalFakeCentralFreeList::InsertRange(absl::Span<void*> batch) {
absl::base_internal::SpinLockHolder h(&lock_);
- FreeBatch(batch);
+ FreeBatch(batch);
}
int MinimalFakeCentralFreeList::RemoveRange(void** batch, int n) {
@@ -45,14 +45,14 @@ void FakeCentralFreeList::AllocateBatch(void** batch, int n) {
}
}
-void FakeCentralFreeList::FreeBatch(absl::Span<void*> batch) {
- for (void* x : batch) {
- ::operator delete(x);
+void FakeCentralFreeList::FreeBatch(absl::Span<void*> batch) {
+ for (void* x : batch) {
+ ::operator delete(x);
}
}
-void FakeCentralFreeList::InsertRange(absl::Span<void*> batch) {
- FreeBatch(batch);
+void FakeCentralFreeList::InsertRange(absl::Span<void*> batch) {
+ FreeBatch(batch);
}
int FakeCentralFreeList::RemoveRange(void** batch, int n) {
@@ -60,5 +60,5 @@ int FakeCentralFreeList::RemoveRange(void** batch, int n) {
return n;
}
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
diff --git a/contrib/libs/tcmalloc/tcmalloc/mock_central_freelist.h b/contrib/libs/tcmalloc/tcmalloc/mock_central_freelist.h
index c2a56c0c60..0eb3c8dbfc 100644
--- a/contrib/libs/tcmalloc/tcmalloc/mock_central_freelist.h
+++ b/contrib/libs/tcmalloc/tcmalloc/mock_central_freelist.h
@@ -19,18 +19,18 @@
#include "gmock/gmock.h"
#include "absl/base/internal/spinlock.h"
-#include "absl/types/span.h"
+#include "absl/types/span.h"
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
class FakeCentralFreeListBase {
public:
- FakeCentralFreeListBase() {}
+ FakeCentralFreeListBase() {}
FakeCentralFreeListBase(const FakeCentralFreeListBase&) = delete;
FakeCentralFreeListBase& operator=(const FakeCentralFreeListBase&) = delete;
- static constexpr void Init(size_t) {}
+ static constexpr void Init(size_t) {}
};
// CentralFreeList implementation that backs onto the system's malloc.
@@ -39,11 +39,11 @@ class FakeCentralFreeListBase {
// is important.
class FakeCentralFreeList : public FakeCentralFreeListBase {
public:
- void InsertRange(absl::Span<void*> batch);
+ void InsertRange(absl::Span<void*> batch);
int RemoveRange(void** batch, int N);
void AllocateBatch(void** batch, int n);
- void FreeBatch(absl::Span<void*> batch);
+ void FreeBatch(absl::Span<void*> batch);
};
// CentralFreeList implementation that does minimal work but no correctness
@@ -52,11 +52,11 @@ class FakeCentralFreeList : public FakeCentralFreeListBase {
// Useful for benchmarks where you want to avoid unrelated expensive operations.
class MinimalFakeCentralFreeList : public FakeCentralFreeListBase {
public:
- void InsertRange(absl::Span<void*> batch);
+ void InsertRange(absl::Span<void*> batch);
int RemoveRange(void** batch, int N);
void AllocateBatch(void** batch, int n);
- void FreeBatch(absl::Span<void*> batch);
+ void FreeBatch(absl::Span<void*> batch);
private:
absl::base_internal::SpinLock lock_;
@@ -69,21 +69,21 @@ class MinimalFakeCentralFreeList : public FakeCentralFreeListBase {
class RawMockCentralFreeList : public FakeCentralFreeList {
public:
RawMockCentralFreeList() : FakeCentralFreeList() {
- ON_CALL(*this, InsertRange).WillByDefault([this](absl::Span<void*> batch) {
- return static_cast<FakeCentralFreeList*>(this)->InsertRange(batch);
+ ON_CALL(*this, InsertRange).WillByDefault([this](absl::Span<void*> batch) {
+ return static_cast<FakeCentralFreeList*>(this)->InsertRange(batch);
});
ON_CALL(*this, RemoveRange).WillByDefault([this](void** batch, int n) {
return static_cast<FakeCentralFreeList*>(this)->RemoveRange(batch, n);
});
}
- MOCK_METHOD(void, InsertRange, (absl::Span<void*> batch));
+ MOCK_METHOD(void, InsertRange, (absl::Span<void*> batch));
MOCK_METHOD(int, RemoveRange, (void** batch, int N));
};
using MockCentralFreeList = testing::NiceMock<RawMockCentralFreeList>;
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
#endif // TCMALLOC_MOCK_CENTRAL_FREELIST_H_
diff --git a/contrib/libs/tcmalloc/tcmalloc/mock_transfer_cache.cc b/contrib/libs/tcmalloc/tcmalloc/mock_transfer_cache.cc
index b8b2bcf131..b8216d22e2 100644
--- a/contrib/libs/tcmalloc/tcmalloc/mock_transfer_cache.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/mock_transfer_cache.cc
@@ -15,10 +15,10 @@
#include "tcmalloc/mock_transfer_cache.h"
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
int FakeTransferCacheManager::DetermineSizeClassToEvict() { return 3; }
bool FakeTransferCacheManager::ShrinkCache(int) { return true; }
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
diff --git a/contrib/libs/tcmalloc/tcmalloc/mock_transfer_cache.h b/contrib/libs/tcmalloc/tcmalloc/mock_transfer_cache.h
index 5b5192f6dc..72d3d79802 100644
--- a/contrib/libs/tcmalloc/tcmalloc/mock_transfer_cache.h
+++ b/contrib/libs/tcmalloc/tcmalloc/mock_transfer_cache.h
@@ -17,8 +17,8 @@
#include <stddef.h>
-#include <algorithm>
-#include <memory>
+#include <algorithm>
+#include <memory>
#include <random>
#include "gmock/gmock.h"
@@ -26,14 +26,14 @@
#include "absl/random/random.h"
#include "tcmalloc/common.h"
#include "tcmalloc/mock_central_freelist.h"
-#include "tcmalloc/transfer_cache_internals.h"
+#include "tcmalloc/transfer_cache_internals.h"
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
inline constexpr size_t kClassSize = 8;
inline constexpr size_t kNumToMove = 32;
-inline constexpr int kSizeClass = 0;
+inline constexpr int kSizeClass = 0;
class FakeTransferCacheManagerBase {
public:
@@ -110,16 +110,16 @@ class FakeTransferCacheEnvironment {
using Manager = typename TransferCache::Manager;
using FreeList = typename TransferCache::FreeList;
- static constexpr int kMaxObjectsToMove =
- ::tcmalloc::tcmalloc_internal::kMaxObjectsToMove;
+ static constexpr int kMaxObjectsToMove =
+ ::tcmalloc::tcmalloc_internal::kMaxObjectsToMove;
static constexpr int kBatchSize = Manager::num_objects_to_move(1);
- FakeTransferCacheEnvironment() : manager_(), cache_(&manager_, 1) {}
+ FakeTransferCacheEnvironment() : manager_(), cache_(&manager_, 1) {}
~FakeTransferCacheEnvironment() { Drain(); }
- void Shrink() { cache_.ShrinkCache(kSizeClass); }
- void Grow() { cache_.GrowCache(kSizeClass); }
+ void Shrink() { cache_.ShrinkCache(kSizeClass); }
+ void Grow() { cache_.GrowCache(kSizeClass); }
void Insert(int n) {
std::vector<void*> bufs;
@@ -127,7 +127,7 @@ class FakeTransferCacheEnvironment {
int b = std::min(n, kBatchSize);
bufs.resize(b);
central_freelist().AllocateBatch(&bufs[0], b);
- cache_.InsertRange(kSizeClass, absl::MakeSpan(bufs));
+ cache_.InsertRange(kSizeClass, absl::MakeSpan(bufs));
n -= b;
}
}
@@ -137,11 +137,11 @@ class FakeTransferCacheEnvironment {
while (n > 0) {
int b = std::min(n, kBatchSize);
bufs.resize(b);
- int removed = cache_.RemoveRange(kSizeClass, &bufs[0], b);
+ int removed = cache_.RemoveRange(kSizeClass, &bufs[0], b);
// Ensure we make progress.
ASSERT_GT(removed, 0);
ASSERT_LE(removed, b);
- central_freelist().FreeBatch({&bufs[0], static_cast<size_t>(removed)});
+ central_freelist().FreeBatch({&bufs[0], static_cast<size_t>(removed)});
n -= removed;
}
}
@@ -158,9 +158,9 @@ class FakeTransferCacheEnvironment {
Shrink();
} else if (choice < 0.2) {
Grow();
- } else if (choice < 0.3) {
- cache_.HasSpareCapacity(kSizeClass);
- } else if (choice < 0.65) {
+ } else if (choice < 0.3) {
+ cache_.HasSpareCapacity(kSizeClass);
+ } else if (choice < 0.65) {
Insert(absl::Uniform(gen, 1, kBatchSize));
} else {
Remove(absl::Uniform(gen, 1, kBatchSize));
@@ -178,133 +178,133 @@ class FakeTransferCacheEnvironment {
TransferCache cache_;
};
-// A fake transfer cache manager class which supports two size classes instead
-// of just the one. To make this work, we have to store the transfer caches
-// inside the cache manager, like in production code.
-template <typename FreeListT,
- template <typename FreeList, typename Manager> class TransferCacheT>
-class TwoSizeClassManager : public FakeTransferCacheManagerBase {
- public:
- using FreeList = FreeListT;
- using TransferCache = TransferCacheT<FreeList, TwoSizeClassManager>;
-
- // This is 3 instead of 2 because we hard code cl == 0 to be invalid in many
- // places. We only use cl 1 and 2 here.
- static constexpr int kSizeClasses = 3;
- static constexpr size_t kClassSize1 = 8;
- static constexpr size_t kClassSize2 = 16;
- static constexpr size_t kNumToMove1 = 32;
- static constexpr size_t kNumToMove2 = 16;
-
- TwoSizeClassManager() {
- caches_.push_back(absl::make_unique<TransferCache>(this, 0));
- caches_.push_back(absl::make_unique<TransferCache>(this, 1));
- caches_.push_back(absl::make_unique<TransferCache>(this, 2));
- }
-
- constexpr static size_t class_to_size(int size_class) {
- switch (size_class) {
- case 1:
- return kClassSize1;
- case 2:
- return kClassSize2;
- default:
- return 0;
- }
- }
- constexpr static size_t num_objects_to_move(int size_class) {
- switch (size_class) {
- case 1:
- return kNumToMove1;
- case 2:
- return kNumToMove2;
- default:
- return 0;
- }
- }
-
- int DetermineSizeClassToEvict() { return evicting_from_; }
-
- bool ShrinkCache(int size_class) {
- return caches_[size_class]->ShrinkCache(size_class);
- }
-
- FreeList& central_freelist(int cl) { return caches_[cl]->freelist(); }
-
- void InsertRange(int cl, absl::Span<void*> batch) {
- caches_[cl]->InsertRange(cl, batch);
- }
-
- int RemoveRange(int cl, void** batch, int N) {
- return caches_[cl]->RemoveRange(cl, batch, N);
- }
-
- bool HasSpareCapacity(int cl) { return caches_[cl]->HasSpareCapacity(cl); }
-
- size_t tc_length(int cl) { return caches_[cl]->tc_length(); }
-
- std::vector<std::unique_ptr<TransferCache>> caches_;
-
- // From which size class to evict.
- int evicting_from_ = 1;
-};
-
-template <template <typename FreeList, typename Manager> class TransferCacheT>
-class TwoSizeClassEnv {
- public:
- using FreeList = MockCentralFreeList;
- using Manager = TwoSizeClassManager<FreeList, TransferCacheT>;
- using TransferCache = typename Manager::TransferCache;
-
- static constexpr int kMaxObjectsToMove =
- ::tcmalloc::tcmalloc_internal::kMaxObjectsToMove;
-
- explicit TwoSizeClassEnv() = default;
-
- ~TwoSizeClassEnv() { Drain(); }
-
- void Insert(int cl, int n) {
- const size_t batch_size = Manager::num_objects_to_move(cl);
- std::vector<void*> bufs;
- while (n > 0) {
- int b = std::min<int>(n, batch_size);
- bufs.resize(b);
- central_freelist(cl).AllocateBatch(&bufs[0], b);
- manager_.InsertRange(cl, absl::MakeSpan(bufs));
- n -= b;
- }
- }
-
- void Remove(int cl, int n) {
- const size_t batch_size = Manager::num_objects_to_move(cl);
- std::vector<void*> bufs;
- while (n > 0) {
- const int b = std::min<int>(n, batch_size);
- bufs.resize(b);
- const int removed = manager_.RemoveRange(cl, &bufs[0], b);
- // Ensure we make progress.
- ASSERT_GT(removed, 0);
- ASSERT_LE(removed, b);
- central_freelist(cl).FreeBatch({&bufs[0], static_cast<size_t>(removed)});
- n -= removed;
- }
- }
-
- void Drain() {
- for (int i = 0; i < Manager::kSizeClasses; ++i) {
- Remove(i, manager_.tc_length(i));
- }
- }
-
- Manager& transfer_cache_manager() { return manager_; }
-
- FreeList& central_freelist(int cl) { return manager_.central_freelist(cl); }
-
- private:
- Manager manager_;
-};
-
-} // namespace tcmalloc_internal
+// A fake transfer cache manager class which supports two size classes instead
+// of just the one. To make this work, we have to store the transfer caches
+// inside the cache manager, like in production code.
+template <typename FreeListT,
+ template <typename FreeList, typename Manager> class TransferCacheT>
+class TwoSizeClassManager : public FakeTransferCacheManagerBase {
+ public:
+ using FreeList = FreeListT;
+ using TransferCache = TransferCacheT<FreeList, TwoSizeClassManager>;
+
+ // This is 3 instead of 2 because we hard code cl == 0 to be invalid in many
+ // places. We only use cl 1 and 2 here.
+ static constexpr int kSizeClasses = 3;
+ static constexpr size_t kClassSize1 = 8;
+ static constexpr size_t kClassSize2 = 16;
+ static constexpr size_t kNumToMove1 = 32;
+ static constexpr size_t kNumToMove2 = 16;
+
+ TwoSizeClassManager() {
+ caches_.push_back(absl::make_unique<TransferCache>(this, 0));
+ caches_.push_back(absl::make_unique<TransferCache>(this, 1));
+ caches_.push_back(absl::make_unique<TransferCache>(this, 2));
+ }
+
+ constexpr static size_t class_to_size(int size_class) {
+ switch (size_class) {
+ case 1:
+ return kClassSize1;
+ case 2:
+ return kClassSize2;
+ default:
+ return 0;
+ }
+ }
+ constexpr static size_t num_objects_to_move(int size_class) {
+ switch (size_class) {
+ case 1:
+ return kNumToMove1;
+ case 2:
+ return kNumToMove2;
+ default:
+ return 0;
+ }
+ }
+
+ int DetermineSizeClassToEvict() { return evicting_from_; }
+
+ bool ShrinkCache(int size_class) {
+ return caches_[size_class]->ShrinkCache(size_class);
+ }
+
+ FreeList& central_freelist(int cl) { return caches_[cl]->freelist(); }
+
+ void InsertRange(int cl, absl::Span<void*> batch) {
+ caches_[cl]->InsertRange(cl, batch);
+ }
+
+ int RemoveRange(int cl, void** batch, int N) {
+ return caches_[cl]->RemoveRange(cl, batch, N);
+ }
+
+ bool HasSpareCapacity(int cl) { return caches_[cl]->HasSpareCapacity(cl); }
+
+ size_t tc_length(int cl) { return caches_[cl]->tc_length(); }
+
+ std::vector<std::unique_ptr<TransferCache>> caches_;
+
+ // From which size class to evict.
+ int evicting_from_ = 1;
+};
+
+template <template <typename FreeList, typename Manager> class TransferCacheT>
+class TwoSizeClassEnv {
+ public:
+ using FreeList = MockCentralFreeList;
+ using Manager = TwoSizeClassManager<FreeList, TransferCacheT>;
+ using TransferCache = typename Manager::TransferCache;
+
+ static constexpr int kMaxObjectsToMove =
+ ::tcmalloc::tcmalloc_internal::kMaxObjectsToMove;
+
+ explicit TwoSizeClassEnv() = default;
+
+ ~TwoSizeClassEnv() { Drain(); }
+
+ void Insert(int cl, int n) {
+ const size_t batch_size = Manager::num_objects_to_move(cl);
+ std::vector<void*> bufs;
+ while (n > 0) {
+ int b = std::min<int>(n, batch_size);
+ bufs.resize(b);
+ central_freelist(cl).AllocateBatch(&bufs[0], b);
+ manager_.InsertRange(cl, absl::MakeSpan(bufs));
+ n -= b;
+ }
+ }
+
+ void Remove(int cl, int n) {
+ const size_t batch_size = Manager::num_objects_to_move(cl);
+ std::vector<void*> bufs;
+ while (n > 0) {
+ const int b = std::min<int>(n, batch_size);
+ bufs.resize(b);
+ const int removed = manager_.RemoveRange(cl, &bufs[0], b);
+ // Ensure we make progress.
+ ASSERT_GT(removed, 0);
+ ASSERT_LE(removed, b);
+ central_freelist(cl).FreeBatch({&bufs[0], static_cast<size_t>(removed)});
+ n -= removed;
+ }
+ }
+
+ void Drain() {
+ for (int i = 0; i < Manager::kSizeClasses; ++i) {
+ Remove(i, manager_.tc_length(i));
+ }
+ }
+
+ Manager& transfer_cache_manager() { return manager_; }
+
+ FreeList& central_freelist(int cl) { return manager_.central_freelist(cl); }
+
+ private:
+ Manager manager_;
+};
+
+} // namespace tcmalloc_internal
} // namespace tcmalloc
#endif // TCMALLOC_MOCK_TRANSFER_CACHE_H_
diff --git a/contrib/libs/tcmalloc/tcmalloc/noruntime_size_classes.cc b/contrib/libs/tcmalloc/tcmalloc/noruntime_size_classes.cc
index c6dc90adcc..898fda1adb 100644
--- a/contrib/libs/tcmalloc/tcmalloc/noruntime_size_classes.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/noruntime_size_classes.cc
@@ -16,9 +16,9 @@
#include "tcmalloc/runtime_size_classes.h"
#include "tcmalloc/size_class_info.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
// Default implementation doesn't load runtime size classes.
// To enable runtime size classes, link with :runtime_size_classes.
@@ -28,6 +28,6 @@ ABSL_ATTRIBUTE_WEAK ABSL_ATTRIBUTE_NOINLINE int MaybeSizeClassesFromEnv(
return -1;
}
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
diff --git a/contrib/libs/tcmalloc/tcmalloc/page_allocator.cc b/contrib/libs/tcmalloc/tcmalloc/page_allocator.cc
index e9599ef46a..b379935fc1 100644
--- a/contrib/libs/tcmalloc/tcmalloc/page_allocator.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/page_allocator.cc
@@ -25,9 +25,9 @@
#include "tcmalloc/parameters.h"
#include "tcmalloc/static_vars.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
int ABSL_ATTRIBUTE_WEAK default_want_hpaa();
@@ -44,32 +44,32 @@ bool decide_want_hpaa() {
const char *e =
tcmalloc::tcmalloc_internal::thread_safe_getenv("TCMALLOC_HPAA_CONTROL");
if (e) {
- switch (e[0]) {
- case '0':
- if (kPageShift <= 12) {
- return false;
- }
-
- if (default_want_hpaa != nullptr) {
- int default_hpaa = default_want_hpaa();
- if (default_hpaa < 0) {
- return false;
- }
- }
-
- Log(kLog, __FILE__, __LINE__,
- "Runtime opt-out from HPAA requires building with "
- "//tcmalloc:want_no_hpaa."
- );
- break;
- case '1':
- return true;
- case '2':
- return true;
- default:
- Crash(kCrash, __FILE__, __LINE__, "bad env var", e);
- return false;
- }
+ switch (e[0]) {
+ case '0':
+ if (kPageShift <= 12) {
+ return false;
+ }
+
+ if (default_want_hpaa != nullptr) {
+ int default_hpaa = default_want_hpaa();
+ if (default_hpaa < 0) {
+ return false;
+ }
+ }
+
+ Log(kLog, __FILE__, __LINE__,
+ "Runtime opt-out from HPAA requires building with "
+ "//tcmalloc:want_no_hpaa."
+ );
+ break;
+ case '1':
+ return true;
+ case '2':
+ return true;
+ default:
+ Crash(kCrash, __FILE__, __LINE__, "bad env var", e);
+ return false;
+ }
}
if (default_want_hpaa != nullptr) {
@@ -96,22 +96,22 @@ bool want_hpaa() {
PageAllocator::PageAllocator() {
const bool kUseHPAA = want_hpaa();
if (kUseHPAA) {
- normal_impl_[0] =
+ normal_impl_[0] =
new (&choices_[0].hpaa) HugePageAwareAllocator(MemoryTag::kNormal);
- if (Static::numa_topology().numa_aware()) {
- normal_impl_[1] =
- new (&choices_[1].hpaa) HugePageAwareAllocator(MemoryTag::kNormalP1);
- }
- sampled_impl_ = new (&choices_[kNumaPartitions + 0].hpaa)
- HugePageAwareAllocator(MemoryTag::kSampled);
+ if (Static::numa_topology().numa_aware()) {
+ normal_impl_[1] =
+ new (&choices_[1].hpaa) HugePageAwareAllocator(MemoryTag::kNormalP1);
+ }
+ sampled_impl_ = new (&choices_[kNumaPartitions + 0].hpaa)
+ HugePageAwareAllocator(MemoryTag::kSampled);
alg_ = HPAA;
} else {
- normal_impl_[0] = new (&choices_[0].ph) PageHeap(MemoryTag::kNormal);
- if (Static::numa_topology().numa_aware()) {
- normal_impl_[1] = new (&choices_[1].ph) PageHeap(MemoryTag::kNormalP1);
- }
- sampled_impl_ =
- new (&choices_[kNumaPartitions + 0].ph) PageHeap(MemoryTag::kSampled);
+ normal_impl_[0] = new (&choices_[0].ph) PageHeap(MemoryTag::kNormal);
+ if (Static::numa_topology().numa_aware()) {
+ normal_impl_[1] = new (&choices_[1].ph) PageHeap(MemoryTag::kNormalP1);
+ }
+ sampled_impl_ =
+ new (&choices_[kNumaPartitions + 0].ph) PageHeap(MemoryTag::kSampled);
alg_ = PAGE_HEAP;
}
}
@@ -172,12 +172,12 @@ bool PageAllocator::ShrinkHardBy(Length pages) {
limit_, "without breaking hugepages - performance will drop");
warned_hugepages = true;
}
- for (int partition = 0; partition < active_numa_partitions(); partition++) {
- ret += static_cast<HugePageAwareAllocator *>(normal_impl_[partition])
- ->ReleaseAtLeastNPagesBreakingHugepages(pages - ret);
- if (ret >= pages) {
- return true;
- }
+ for (int partition = 0; partition < active_numa_partitions(); partition++) {
+ ret += static_cast<HugePageAwareAllocator *>(normal_impl_[partition])
+ ->ReleaseAtLeastNPagesBreakingHugepages(pages - ret);
+ if (ret >= pages) {
+ return true;
+ }
}
ret += static_cast<HugePageAwareAllocator *>(sampled_impl_)
@@ -187,10 +187,10 @@ bool PageAllocator::ShrinkHardBy(Length pages) {
return (pages <= ret);
}
-size_t PageAllocator::active_numa_partitions() const {
- return Static::numa_topology().active_partitions();
-}
-
-} // namespace tcmalloc_internal
+size_t PageAllocator::active_numa_partitions() const {
+ return Static::numa_topology().active_partitions();
+}
+
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
diff --git a/contrib/libs/tcmalloc/tcmalloc/page_allocator.h b/contrib/libs/tcmalloc/tcmalloc/page_allocator.h
index 611482f999..174fb791cd 100644
--- a/contrib/libs/tcmalloc/tcmalloc/page_allocator.h
+++ b/contrib/libs/tcmalloc/tcmalloc/page_allocator.h
@@ -31,9 +31,9 @@
#include "tcmalloc/span.h"
#include "tcmalloc/stats.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
class PageAllocator {
public:
@@ -76,7 +76,7 @@ class PageAllocator {
ABSL_EXCLUSIVE_LOCKS_REQUIRED(pageheap_lock);
// Prints stats about the page heap to *out.
- void Print(Printer* out, MemoryTag tag) ABSL_LOCKS_EXCLUDED(pageheap_lock);
+ void Print(Printer* out, MemoryTag tag) ABSL_LOCKS_EXCLUDED(pageheap_lock);
void PrintInPbtxt(PbtxtRegion* region, MemoryTag tag)
ABSL_LOCKS_EXCLUDED(pageheap_lock);
@@ -104,10 +104,10 @@ class PageAllocator {
ABSL_ATTRIBUTE_RETURNS_NONNULL PageAllocatorInterface* impl(
MemoryTag tag) const;
- size_t active_numa_partitions() const;
-
- static constexpr size_t kNumHeaps = kNumaPartitions + 1;
+ size_t active_numa_partitions() const;
+ static constexpr size_t kNumHeaps = kNumaPartitions + 1;
+
union Choices {
Choices() : dummy(0) {}
~Choices() {}
@@ -115,7 +115,7 @@ class PageAllocator {
PageHeap ph;
HugePageAwareAllocator hpaa;
} choices_[kNumHeaps];
- std::array<PageAllocatorInterface*, kNumaPartitions> normal_impl_;
+ std::array<PageAllocatorInterface*, kNumaPartitions> normal_impl_;
PageAllocatorInterface* sampled_impl_;
Algorithm alg_;
@@ -128,10 +128,10 @@ class PageAllocator {
inline PageAllocatorInterface* PageAllocator::impl(MemoryTag tag) const {
switch (tag) {
- case MemoryTag::kNormalP0:
- return normal_impl_[0];
- case MemoryTag::kNormalP1:
- return normal_impl_[1];
+ case MemoryTag::kNormalP0:
+ return normal_impl_[0];
+ case MemoryTag::kNormalP1:
+ return normal_impl_[1];
case MemoryTag::kSampled:
return sampled_impl_;
default:
@@ -153,51 +153,51 @@ inline void PageAllocator::Delete(Span* span, MemoryTag tag) {
}
inline BackingStats PageAllocator::stats() const {
- BackingStats ret = normal_impl_[0]->stats();
- for (int partition = 1; partition < active_numa_partitions(); partition++) {
- ret += normal_impl_[partition]->stats();
- }
- ret += sampled_impl_->stats();
- return ret;
+ BackingStats ret = normal_impl_[0]->stats();
+ for (int partition = 1; partition < active_numa_partitions(); partition++) {
+ ret += normal_impl_[partition]->stats();
+ }
+ ret += sampled_impl_->stats();
+ return ret;
}
inline void PageAllocator::GetSmallSpanStats(SmallSpanStats* result) {
SmallSpanStats normal, sampled;
- for (int partition = 0; partition < active_numa_partitions(); partition++) {
- SmallSpanStats part_stats;
- normal_impl_[partition]->GetSmallSpanStats(&part_stats);
- normal += part_stats;
- }
+ for (int partition = 0; partition < active_numa_partitions(); partition++) {
+ SmallSpanStats part_stats;
+ normal_impl_[partition]->GetSmallSpanStats(&part_stats);
+ normal += part_stats;
+ }
sampled_impl_->GetSmallSpanStats(&sampled);
*result = normal + sampled;
}
inline void PageAllocator::GetLargeSpanStats(LargeSpanStats* result) {
LargeSpanStats normal, sampled;
- for (int partition = 0; partition < active_numa_partitions(); partition++) {
- LargeSpanStats part_stats;
- normal_impl_[partition]->GetLargeSpanStats(&part_stats);
- normal += part_stats;
- }
+ for (int partition = 0; partition < active_numa_partitions(); partition++) {
+ LargeSpanStats part_stats;
+ normal_impl_[partition]->GetLargeSpanStats(&part_stats);
+ normal += part_stats;
+ }
sampled_impl_->GetLargeSpanStats(&sampled);
*result = normal + sampled;
}
inline Length PageAllocator::ReleaseAtLeastNPages(Length num_pages) {
- Length released;
- for (int partition = 0; partition < active_numa_partitions(); partition++) {
- released +=
- normal_impl_[partition]->ReleaseAtLeastNPages(num_pages - released);
- if (released >= num_pages) {
- return released;
- }
+ Length released;
+ for (int partition = 0; partition < active_numa_partitions(); partition++) {
+ released +=
+ normal_impl_[partition]->ReleaseAtLeastNPages(num_pages - released);
+ if (released >= num_pages) {
+ return released;
+ }
}
released += sampled_impl_->ReleaseAtLeastNPages(num_pages - released);
return released;
}
-inline void PageAllocator::Print(Printer* out, MemoryTag tag) {
+inline void PageAllocator::Print(Printer* out, MemoryTag tag) {
const absl::string_view label = MemoryTagToLabel(tag);
if (tag != MemoryTag::kNormal) {
out->printf("\n>>>>>>> Begin %s page allocator <<<<<<<\n", label);
@@ -234,8 +234,8 @@ inline const PageAllocInfo& PageAllocator::info(MemoryTag tag) const {
return impl(tag)->info();
}
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
#endif // TCMALLOC_PAGE_ALLOCATOR_H_
diff --git a/contrib/libs/tcmalloc/tcmalloc/page_allocator_interface.cc b/contrib/libs/tcmalloc/tcmalloc/page_allocator_interface.cc
index 3173247acb..5707fbf4f3 100644
--- a/contrib/libs/tcmalloc/tcmalloc/page_allocator_interface.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/page_allocator_interface.cc
@@ -27,17 +27,17 @@
#include "tcmalloc/internal/util.h"
#include "tcmalloc/static_vars.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
static int OpenLog(MemoryTag tag) {
const char *fname = [&]() {
switch (tag) {
case MemoryTag::kNormal:
return thread_safe_getenv("TCMALLOC_PAGE_LOG_FILE");
- case MemoryTag::kNormalP1:
- return thread_safe_getenv("TCMALLOC_PAGE_LOG_FILE_P1");
+ case MemoryTag::kNormalP1:
+ return thread_safe_getenv("TCMALLOC_PAGE_LOG_FILE_P1");
case MemoryTag::kSampled:
return thread_safe_getenv("TCMALLOC_SAMPLED_PAGE_LOG_FILE");
default:
@@ -84,6 +84,6 @@ PageAllocatorInterface::~PageAllocatorInterface() {
Crash(kCrash, __FILE__, __LINE__, "should never destroy this");
}
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
diff --git a/contrib/libs/tcmalloc/tcmalloc/page_allocator_interface.h b/contrib/libs/tcmalloc/tcmalloc/page_allocator_interface.h
index cf1dc67897..3dd7436586 100644
--- a/contrib/libs/tcmalloc/tcmalloc/page_allocator_interface.h
+++ b/contrib/libs/tcmalloc/tcmalloc/page_allocator_interface.h
@@ -26,9 +26,9 @@
#include "tcmalloc/span.h"
#include "tcmalloc/stats.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
class PageMap;
@@ -73,7 +73,7 @@ class PageAllocatorInterface {
ABSL_EXCLUSIVE_LOCKS_REQUIRED(pageheap_lock) = 0;
// Prints stats about the page heap to *out.
- virtual void Print(Printer* out) ABSL_LOCKS_EXCLUDED(pageheap_lock) = 0;
+ virtual void Print(Printer* out) ABSL_LOCKS_EXCLUDED(pageheap_lock) = 0;
// Prints stats about the page heap in pbtxt format.
//
@@ -90,8 +90,8 @@ class PageAllocatorInterface {
MemoryTag tag_; // The type of tagged memory this heap manages
};
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
#endif // TCMALLOC_PAGE_ALLOCATOR_INTERFACE_H_
diff --git a/contrib/libs/tcmalloc/tcmalloc/page_allocator_test.cc b/contrib/libs/tcmalloc/tcmalloc/page_allocator_test.cc
index d302c085a9..af7b215050 100644
--- a/contrib/libs/tcmalloc/tcmalloc/page_allocator_test.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/page_allocator_test.cc
@@ -39,7 +39,7 @@
#include "tcmalloc/stats.h"
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
namespace {
class PageAllocatorTest : public testing::Test {
@@ -79,7 +79,7 @@ class PageAllocatorTest : public testing::Test {
std::string Print() {
std::vector<char> buf(1024 * 1024);
- Printer out(&buf[0], buf.size());
+ Printer out(&buf[0], buf.size());
allocator_->Print(&out, MemoryTag::kNormal);
return std::string(&buf[0]);
@@ -141,5 +141,5 @@ TEST_F(PageAllocatorTest, PrintIt) {
}
} // namespace
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
diff --git a/contrib/libs/tcmalloc/tcmalloc/page_allocator_test_util.h b/contrib/libs/tcmalloc/tcmalloc/page_allocator_test_util.h
index 55f134bfdd..8cfe7507ed 100644
--- a/contrib/libs/tcmalloc/tcmalloc/page_allocator_test_util.h
+++ b/contrib/libs/tcmalloc/tcmalloc/page_allocator_test_util.h
@@ -23,9 +23,9 @@
// TODO(b/116000878): Remove dependency on common.h if it causes ODR issues.
#include "tcmalloc/common.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
// AddressRegion that adds some padding on either side of each
// allocation. This prevents multiple PageAllocators in the system
@@ -72,8 +72,8 @@ class ExtraRegionFactory : public AddressRegionFactory {
AddressRegionFactory *under_;
};
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
#endif // TCMALLOC_HUGE_PAGE_AWARE_ALLOCATOR_TEST_UTIL_H_
diff --git a/contrib/libs/tcmalloc/tcmalloc/page_heap.cc b/contrib/libs/tcmalloc/tcmalloc/page_heap.cc
index c6b4c6dbd1..9bd8a7573c 100644
--- a/contrib/libs/tcmalloc/tcmalloc/page_heap.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/page_heap.cc
@@ -20,7 +20,7 @@
#include "absl/base/internal/cycleclock.h"
#include "absl/base/internal/spinlock.h"
-#include "absl/numeric/bits.h"
+#include "absl/numeric/bits.h"
#include "tcmalloc/common.h"
#include "tcmalloc/internal/logging.h"
#include "tcmalloc/page_heap_allocator.h"
@@ -30,9 +30,9 @@
#include "tcmalloc/static_vars.h"
#include "tcmalloc/system-alloc.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
// Helper function to record span address into pageheap
void PageHeap::RecordSpan(Span* span) {
@@ -132,7 +132,7 @@ static bool IsSpanBetter(Span* span, Span* best, Length n) {
// don't bother.
Span* PageHeap::NewAligned(Length n, Length align) {
ASSERT(n > Length(0));
- ASSERT(absl::has_single_bit(align.raw_num()));
+ ASSERT(absl::has_single_bit(align.raw_num()));
if (align <= Length(1)) {
return New(n);
@@ -493,7 +493,7 @@ void PageHeap::PrintInPbtxt(PbtxtRegion* region) {
// We do not collect info_.PrintInPbtxt for now.
}
-void PageHeap::Print(Printer* out) {
+void PageHeap::Print(Printer* out) {
absl::base_internal::SpinLockHolder h(&pageheap_lock);
SmallSpanStats small;
GetSmallSpanStats(&small);
@@ -523,6 +523,6 @@ void PageHeap::Print(Printer* out) {
info_.Print(out);
}
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
diff --git a/contrib/libs/tcmalloc/tcmalloc/page_heap.h b/contrib/libs/tcmalloc/tcmalloc/page_heap.h
index 86cf5d01df..1a5ec27a59 100644
--- a/contrib/libs/tcmalloc/tcmalloc/page_heap.h
+++ b/contrib/libs/tcmalloc/tcmalloc/page_heap.h
@@ -23,9 +23,9 @@
#include "tcmalloc/span.h"
#include "tcmalloc/stats.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
// -------------------------------------------------------------------------
// Page-level allocator
@@ -40,7 +40,7 @@ class PageHeap final : public PageAllocatorInterface {
explicit PageHeap(MemoryTag tag);
// for testing
PageHeap(PageMap* map, MemoryTag tag);
- ~PageHeap() override = default;
+ ~PageHeap() override = default;
// Allocate a run of "n" pages. Returns zero if out of memory.
// Caller should not pass "n == 0" -- instead, n should have
@@ -79,7 +79,7 @@ class PageHeap final : public PageAllocatorInterface {
ABSL_EXCLUSIVE_LOCKS_REQUIRED(pageheap_lock) override;
// Prints stats about the page heap to *out.
- void Print(Printer* out) ABSL_LOCKS_EXCLUDED(pageheap_lock) override;
+ void Print(Printer* out) ABSL_LOCKS_EXCLUDED(pageheap_lock) override;
void PrintInPbtxt(PbtxtRegion* region)
ABSL_LOCKS_EXCLUDED(pageheap_lock) override;
@@ -154,8 +154,8 @@ class PageHeap final : public PageAllocatorInterface {
void RecordSpan(Span* span) ABSL_EXCLUSIVE_LOCKS_REQUIRED(pageheap_lock);
};
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
#endif // TCMALLOC_PAGE_HEAP_H_
diff --git a/contrib/libs/tcmalloc/tcmalloc/page_heap_allocator.h b/contrib/libs/tcmalloc/tcmalloc/page_heap_allocator.h
index 5d2bbfe92c..63a80a4bda 100644
--- a/contrib/libs/tcmalloc/tcmalloc/page_heap_allocator.h
+++ b/contrib/libs/tcmalloc/tcmalloc/page_heap_allocator.h
@@ -17,16 +17,16 @@
#include <stddef.h>
-#include "absl/base/attributes.h"
+#include "absl/base/attributes.h"
#include "absl/base/optimization.h"
#include "absl/base/thread_annotations.h"
#include "tcmalloc/arena.h"
#include "tcmalloc/common.h"
#include "tcmalloc/internal/logging.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
struct AllocatorStats {
// Number of allocated but unfreed objects
@@ -52,8 +52,8 @@ class PageHeapAllocator {
Delete(New());
}
- ABSL_ATTRIBUTE_RETURNS_NONNULL T* New()
- ABSL_EXCLUSIVE_LOCKS_REQUIRED(pageheap_lock) {
+ ABSL_ATTRIBUTE_RETURNS_NONNULL T* New()
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(pageheap_lock) {
// Consult free list
T* result = free_list_;
stats_.in_use++;
@@ -65,8 +65,8 @@ class PageHeapAllocator {
return result;
}
- void Delete(T* p) ABSL_ATTRIBUTE_NONNULL()
- ABSL_EXCLUSIVE_LOCKS_REQUIRED(pageheap_lock) {
+ void Delete(T* p) ABSL_ATTRIBUTE_NONNULL()
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(pageheap_lock) {
*(reinterpret_cast<void**>(p)) = free_list_;
free_list_ = p;
stats_.in_use--;
@@ -86,8 +86,8 @@ class PageHeapAllocator {
AllocatorStats stats_ ABSL_GUARDED_BY(pageheap_lock);
};
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
#endif // TCMALLOC_PAGE_HEAP_ALLOCATOR_H_
diff --git a/contrib/libs/tcmalloc/tcmalloc/page_heap_test.cc b/contrib/libs/tcmalloc/tcmalloc/page_heap_test.cc
index dc13a60cb7..249a91f7d0 100644
--- a/contrib/libs/tcmalloc/tcmalloc/page_heap_test.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/page_heap_test.cc
@@ -28,18 +28,18 @@
#include "tcmalloc/static_vars.h"
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
namespace {
// PageHeap expands by kMinSystemAlloc by default, so use this as the minimum
// Span length to not get more memory than expected.
constexpr Length kMinSpanLength = BytesToLengthFloor(kMinSystemAlloc);
-void CheckStats(const PageHeap* ph, Length system_pages, Length free_pages,
- Length unmapped_pages) ABSL_LOCKS_EXCLUDED(pageheap_lock) {
- BackingStats stats;
+void CheckStats(const PageHeap* ph, Length system_pages, Length free_pages,
+ Length unmapped_pages) ABSL_LOCKS_EXCLUDED(pageheap_lock) {
+ BackingStats stats;
{
- absl::base_internal::SpinLockHolder h(&pageheap_lock);
+ absl::base_internal::SpinLockHolder h(&pageheap_lock);
stats = ph->stats();
}
@@ -48,15 +48,15 @@ void CheckStats(const PageHeap* ph, Length system_pages, Length free_pages,
ASSERT_EQ(unmapped_pages.in_bytes(), stats.unmapped_bytes);
}
-static void Delete(PageHeap* ph, Span* s) ABSL_LOCKS_EXCLUDED(pageheap_lock) {
+static void Delete(PageHeap* ph, Span* s) ABSL_LOCKS_EXCLUDED(pageheap_lock) {
{
- absl::base_internal::SpinLockHolder h(&pageheap_lock);
+ absl::base_internal::SpinLockHolder h(&pageheap_lock);
ph->Delete(s);
}
}
-static Length Release(PageHeap* ph, Length n) {
- absl::base_internal::SpinLockHolder h(&pageheap_lock);
+static Length Release(PageHeap* ph, Length n) {
+ absl::base_internal::SpinLockHolder h(&pageheap_lock);
return ph->ReleaseAtLeastNPages(n);
}
@@ -71,20 +71,20 @@ class PageHeapTest : public ::testing::Test {
// TODO(b/36484267): replace this test wholesale.
TEST_F(PageHeapTest, Stats) {
- auto pagemap = absl::make_unique<PageMap>();
- void* memory = calloc(1, sizeof(PageHeap));
- PageHeap* ph = new (memory) PageHeap(pagemap.get(), MemoryTag::kNormal);
+ auto pagemap = absl::make_unique<PageMap>();
+ void* memory = calloc(1, sizeof(PageHeap));
+ PageHeap* ph = new (memory) PageHeap(pagemap.get(), MemoryTag::kNormal);
// Empty page heap
CheckStats(ph, Length(0), Length(0), Length(0));
// Allocate a span 's1'
- Span* s1 = ph->New(kMinSpanLength);
+ Span* s1 = ph->New(kMinSpanLength);
CheckStats(ph, kMinSpanLength, Length(0), Length(0));
// Allocate an aligned span 's2'
static const Length kHalf = kMinSpanLength / 2;
- Span* s2 = ph->NewAligned(kHalf, kHalf);
+ Span* s2 = ph->NewAligned(kHalf, kHalf);
ASSERT_EQ(s2->first_page().index() % kHalf.raw_num(), 0);
CheckStats(ph, kMinSpanLength * 2, Length(0), kHalf);
@@ -105,5 +105,5 @@ TEST_F(PageHeapTest, Stats) {
}
} // namespace
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
diff --git a/contrib/libs/tcmalloc/tcmalloc/pagemap.cc b/contrib/libs/tcmalloc/tcmalloc/pagemap.cc
index 25962302c3..4270f58d12 100644
--- a/contrib/libs/tcmalloc/tcmalloc/pagemap.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/pagemap.cc
@@ -20,9 +20,9 @@
#include "tcmalloc/span.h"
#include "tcmalloc/static_vars.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
void PageMap::RegisterSizeClass(Span* span, size_t sc) {
ASSERT(span->location() == Span::IN_USE);
@@ -68,6 +68,6 @@ void* MetaDataAlloc(size_t bytes) ABSL_EXCLUSIVE_LOCKS_REQUIRED(pageheap_lock) {
return Static::arena().Alloc(bytes);
}
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
diff --git a/contrib/libs/tcmalloc/tcmalloc/pagemap.h b/contrib/libs/tcmalloc/tcmalloc/pagemap.h
index 0cafa8a38d..e6da30b938 100644
--- a/contrib/libs/tcmalloc/tcmalloc/pagemap.h
+++ b/contrib/libs/tcmalloc/tcmalloc/pagemap.h
@@ -37,9 +37,9 @@
#include "tcmalloc/span.h"
#include "tcmalloc/static_vars.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
// Two-level radix tree
typedef void* (*PagemapAllocator)(size_t);
@@ -69,8 +69,8 @@ class PageMap2 {
// information. The size class information is kept segregated
// since small object deallocations are so frequent and do not
// need the other information kept in a Span.
- CompactSizeClass sizeclass[kLeafLength];
- Span* span[kLeafLength];
+ CompactSizeClass sizeclass[kLeafLength];
+ Span* span[kLeafLength];
void* hugepage[kLeafHugepages];
};
@@ -94,7 +94,7 @@ class PageMap2 {
// No locks required. See SYNCHRONIZATION explanation at top of tcmalloc.cc.
// Requires that the span is known to already exist.
- Span* get_existing(Number k) const ABSL_NO_THREAD_SAFETY_ANALYSIS {
+ Span* get_existing(Number k) const ABSL_NO_THREAD_SAFETY_ANALYSIS {
const Number i1 = k >> kLeafBits;
const Number i2 = k & (kLeafLength - 1);
ASSERT((k >> BITS) == 0);
@@ -104,7 +104,7 @@ class PageMap2 {
// No locks required. See SYNCHRONIZATION explanation at top of tcmalloc.cc.
// REQUIRES: Must be a valid page number previously Ensure()d.
- CompactSizeClass ABSL_ATTRIBUTE_ALWAYS_INLINE
+ CompactSizeClass ABSL_ATTRIBUTE_ALWAYS_INLINE
sizeclass(Number k) const ABSL_NO_THREAD_SAFETY_ANALYSIS {
const Number i1 = k >> kLeafBits;
const Number i2 = k & (kLeafLength - 1);
@@ -113,19 +113,19 @@ class PageMap2 {
return root_[i1]->sizeclass[i2];
}
- void set(Number k, Span* s) {
+ void set(Number k, Span* s) {
ASSERT(k >> BITS == 0);
const Number i1 = k >> kLeafBits;
const Number i2 = k & (kLeafLength - 1);
- root_[i1]->span[i2] = s;
+ root_[i1]->span[i2] = s;
}
- void set_with_sizeclass(Number k, Span* s, CompactSizeClass sc) {
+ void set_with_sizeclass(Number k, Span* s, CompactSizeClass sc) {
ASSERT(k >> BITS == 0);
const Number i1 = k >> kLeafBits;
const Number i2 = k & (kLeafLength - 1);
Leaf* leaf = root_[i1];
- leaf->span[i2] = s;
+ leaf->span[i2] = s;
leaf->sizeclass[i2] = sc;
}
@@ -140,9 +140,9 @@ class PageMap2 {
ASSERT(k >> BITS == 0);
const Number i1 = k >> kLeafBits;
const Number i2 = k & (kLeafLength - 1);
- const Leaf* leaf = root_[i1];
- ASSERT(leaf != nullptr);
- return leaf->hugepage[i2 >> (kLeafBits - kLeafHugeBits)];
+ const Leaf* leaf = root_[i1];
+ ASSERT(leaf != nullptr);
+ return leaf->hugepage[i2 >> (kLeafBits - kLeafHugeBits)];
}
void set_hugepage(Number k, void* v) {
@@ -216,8 +216,8 @@ class PageMap3 {
// information. The size class information is kept segregated
// since small object deallocations are so frequent and do not
// need the other information kept in a Span.
- CompactSizeClass sizeclass[kLeafLength];
- Span* span[kLeafLength];
+ CompactSizeClass sizeclass[kLeafLength];
+ Span* span[kLeafLength];
void* hugepage[kLeafHugepages];
};
@@ -248,7 +248,7 @@ class PageMap3 {
// No locks required. See SYNCHRONIZATION explanation at top of tcmalloc.cc.
// Requires that the span is known to already exist.
- Span* get_existing(Number k) const ABSL_NO_THREAD_SAFETY_ANALYSIS {
+ Span* get_existing(Number k) const ABSL_NO_THREAD_SAFETY_ANALYSIS {
const Number i1 = k >> (kLeafBits + kMidBits);
const Number i2 = (k >> kLeafBits) & (kMidLength - 1);
const Number i3 = k & (kLeafLength - 1);
@@ -260,7 +260,7 @@ class PageMap3 {
// No locks required. See SYNCHRONIZATION explanation at top of tcmalloc.cc.
// REQUIRES: Must be a valid page number previously Ensure()d.
- CompactSizeClass ABSL_ATTRIBUTE_ALWAYS_INLINE
+ CompactSizeClass ABSL_ATTRIBUTE_ALWAYS_INLINE
sizeclass(Number k) const ABSL_NO_THREAD_SAFETY_ANALYSIS {
const Number i1 = k >> (kLeafBits + kMidBits);
const Number i2 = (k >> kLeafBits) & (kMidLength - 1);
@@ -271,21 +271,21 @@ class PageMap3 {
return root_[i1]->leafs[i2]->sizeclass[i3];
}
- void set(Number k, Span* s) {
+ void set(Number k, Span* s) {
ASSERT(k >> BITS == 0);
const Number i1 = k >> (kLeafBits + kMidBits);
const Number i2 = (k >> kLeafBits) & (kMidLength - 1);
const Number i3 = k & (kLeafLength - 1);
- root_[i1]->leafs[i2]->span[i3] = s;
+ root_[i1]->leafs[i2]->span[i3] = s;
}
- void set_with_sizeclass(Number k, Span* s, CompactSizeClass sc) {
+ void set_with_sizeclass(Number k, Span* s, CompactSizeClass sc) {
ASSERT(k >> BITS == 0);
const Number i1 = k >> (kLeafBits + kMidBits);
const Number i2 = (k >> kLeafBits) & (kMidLength - 1);
const Number i3 = k & (kLeafLength - 1);
Leaf* leaf = root_[i1]->leafs[i2];
- leaf->span[i3] = s;
+ leaf->span[i3] = s;
leaf->sizeclass[i3] = sc;
}
@@ -302,11 +302,11 @@ class PageMap3 {
const Number i1 = k >> (kLeafBits + kMidBits);
const Number i2 = (k >> kLeafBits) & (kMidLength - 1);
const Number i3 = k & (kLeafLength - 1);
- const Node* node = root_[i1];
- ASSERT(node != nullptr);
- const Leaf* leaf = node->leafs[i2];
- ASSERT(leaf != nullptr);
- return leaf->hugepage[i3 >> (kLeafBits - kLeafHugeBits)];
+ const Node* node = root_[i1];
+ ASSERT(node != nullptr);
+ const Leaf* leaf = node->leafs[i2];
+ ASSERT(leaf != nullptr);
+ return leaf->hugepage[i3 >> (kLeafBits - kLeafHugeBits)];
}
void set_hugepage(Number k, void* v) {
@@ -362,7 +362,7 @@ class PageMap {
// Return the size class for p, or 0 if it is not known to tcmalloc
// or is a page containing large objects.
// No locks required. See SYNCHRONIZATION explanation at top of tcmalloc.cc.
- CompactSizeClass sizeclass(PageId p) ABSL_NO_THREAD_SAFETY_ANALYSIS {
+ CompactSizeClass sizeclass(PageId p) ABSL_NO_THREAD_SAFETY_ANALYSIS {
return map_.sizeclass(p.index());
}
@@ -397,7 +397,7 @@ class PageMap {
// No locks required. See SYNCHRONIZATION explanation at top of tcmalloc.cc.
ABSL_ATTRIBUTE_RETURNS_NONNULL inline Span* GetExistingDescriptor(
PageId p) const ABSL_NO_THREAD_SAFETY_ANALYSIS {
- Span* span = map_.get_existing(p.index());
+ Span* span = map_.get_existing(p.index());
ASSERT(span != nullptr);
return span;
}
@@ -424,8 +424,8 @@ class PageMap {
#endif
};
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
#endif // TCMALLOC_PAGEMAP_H_
diff --git a/contrib/libs/tcmalloc/tcmalloc/pagemap_test.cc b/contrib/libs/tcmalloc/tcmalloc/pagemap_test.cc
index 49ef5477d8..ba31e36943 100644
--- a/contrib/libs/tcmalloc/tcmalloc/pagemap_test.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/pagemap_test.cc
@@ -32,11 +32,11 @@
// create too many maps.
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
namespace {
// Pick span pointer to use for page numbered i
-Span* span(intptr_t i) { return reinterpret_cast<Span*>(i + 1); }
+Span* span(intptr_t i) { return reinterpret_cast<Span*>(i + 1); }
// Pick sizeclass to use for page numbered i
uint8_t sc(intptr_t i) { return i % 16; }
@@ -69,7 +69,7 @@ class PageMapTest : public ::testing::TestWithParam<int> {
}
public:
- using Map = PageMap2<20, alloc>;
+ using Map = PageMap2<20, alloc>;
Map* map;
private:
@@ -139,22 +139,22 @@ INSTANTIATE_TEST_SUITE_P(Limits, PageMapTest, ::testing::Values(100, 1 << 20));
// that this is true even if this structure is mapped with huge pages.
static struct PaddedPageMap {
constexpr PaddedPageMap() : padding_before{}, pagemap{}, padding_after{} {}
- uint64_t padding_before[kHugePageSize / sizeof(uint64_t)];
- PageMap pagemap;
- uint64_t padding_after[kHugePageSize / sizeof(uint64_t)];
+ uint64_t padding_before[kHugePageSize / sizeof(uint64_t)];
+ PageMap pagemap;
+ uint64_t padding_after[kHugePageSize / sizeof(uint64_t)];
} padded_pagemap_;
TEST(TestMemoryFootprint, Test) {
uint64_t pagesize = sysconf(_SC_PAGESIZE);
ASSERT_NE(pagesize, 0);
- size_t pages = sizeof(PageMap) / pagesize + 1;
+ size_t pages = sizeof(PageMap) / pagesize + 1;
std::vector<unsigned char> present(pages);
// mincore needs the address rounded to the start page
uint64_t basepage =
reinterpret_cast<uintptr_t>(&padded_pagemap_.pagemap) & ~(pagesize - 1);
- ASSERT_EQ(mincore(reinterpret_cast<void*>(basepage), sizeof(PageMap),
- present.data()),
+ ASSERT_EQ(mincore(reinterpret_cast<void*>(basepage), sizeof(PageMap),
+ present.data()),
0);
for (int i = 0; i < pages; i++) {
EXPECT_EQ(present[i], 0);
@@ -162,5 +162,5 @@ TEST(TestMemoryFootprint, Test) {
}
} // namespace
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
diff --git a/contrib/libs/tcmalloc/tcmalloc/pages.h b/contrib/libs/tcmalloc/tcmalloc/pages.h
index e674c9c9c8..0ff8fa3d5a 100644
--- a/contrib/libs/tcmalloc/tcmalloc/pages.h
+++ b/contrib/libs/tcmalloc/tcmalloc/pages.h
@@ -23,11 +23,11 @@
#include "absl/strings/string_view.h"
#include "tcmalloc/common.h"
#include "tcmalloc/internal/logging.h"
-#include "tcmalloc/internal/optimization.h"
+#include "tcmalloc/internal/optimization.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
// Type that can hold the length of a run of pages
class Length {
@@ -144,20 +144,20 @@ class PageId {
uintptr_t pn_;
};
-TCMALLOC_ATTRIBUTE_CONST
+TCMALLOC_ATTRIBUTE_CONST
inline constexpr Length LengthFromBytes(size_t bytes) {
return Length(bytes >> kPageShift);
}
// Convert byte size into pages. This won't overflow, but may return
// an unreasonably large value if bytes is huge enough.
-TCMALLOC_ATTRIBUTE_CONST
+TCMALLOC_ATTRIBUTE_CONST
inline constexpr Length BytesToLengthCeil(size_t bytes) {
return Length((bytes >> kPageShift) +
((bytes & (kPageSize - 1)) > 0 ? 1 : 0));
}
-TCMALLOC_ATTRIBUTE_CONST
+TCMALLOC_ATTRIBUTE_CONST
inline constexpr Length BytesToLengthFloor(size_t bytes) {
return Length(bytes >> kPageShift);
}
@@ -170,82 +170,82 @@ inline PageId& operator++(PageId& p) { // NOLINT(runtime/references)
return p += Length(1);
}
-TCMALLOC_ATTRIBUTE_CONST
+TCMALLOC_ATTRIBUTE_CONST
inline constexpr bool operator<(PageId lhs, PageId rhs) {
return lhs.pn_ < rhs.pn_;
}
-TCMALLOC_ATTRIBUTE_CONST
+TCMALLOC_ATTRIBUTE_CONST
inline constexpr bool operator>(PageId lhs, PageId rhs) {
return lhs.pn_ > rhs.pn_;
}
-TCMALLOC_ATTRIBUTE_CONST
+TCMALLOC_ATTRIBUTE_CONST
inline constexpr bool operator<=(PageId lhs, PageId rhs) {
return lhs.pn_ <= rhs.pn_;
}
-TCMALLOC_ATTRIBUTE_CONST
+TCMALLOC_ATTRIBUTE_CONST
inline constexpr bool operator>=(PageId lhs, PageId rhs) {
return lhs.pn_ >= rhs.pn_;
}
-TCMALLOC_ATTRIBUTE_CONST
+TCMALLOC_ATTRIBUTE_CONST
inline constexpr bool operator==(PageId lhs, PageId rhs) {
return lhs.pn_ == rhs.pn_;
}
-TCMALLOC_ATTRIBUTE_CONST
+TCMALLOC_ATTRIBUTE_CONST
inline constexpr bool operator!=(PageId lhs, PageId rhs) {
return lhs.pn_ != rhs.pn_;
}
-TCMALLOC_ATTRIBUTE_CONST
+TCMALLOC_ATTRIBUTE_CONST
inline constexpr PageId operator+(PageId lhs, Length rhs) { return lhs += rhs; }
-TCMALLOC_ATTRIBUTE_CONST
+TCMALLOC_ATTRIBUTE_CONST
inline constexpr PageId operator+(Length lhs, PageId rhs) { return rhs += lhs; }
-TCMALLOC_ATTRIBUTE_CONST
+TCMALLOC_ATTRIBUTE_CONST
inline constexpr PageId operator-(PageId lhs, Length rhs) { return lhs -= rhs; }
-TCMALLOC_ATTRIBUTE_CONST
+TCMALLOC_ATTRIBUTE_CONST
inline constexpr Length operator-(PageId lhs, PageId rhs) {
ASSERT(lhs.pn_ >= rhs.pn_);
return Length(lhs.pn_ - rhs.pn_);
}
-TCMALLOC_ATTRIBUTE_CONST
+TCMALLOC_ATTRIBUTE_CONST
inline PageId PageIdContaining(const void* p) {
return PageId(reinterpret_cast<uintptr_t>(p) >> kPageShift);
}
-TCMALLOC_ATTRIBUTE_CONST
+TCMALLOC_ATTRIBUTE_CONST
inline constexpr bool operator<(Length lhs, Length rhs) {
return lhs.n_ < rhs.n_;
}
-TCMALLOC_ATTRIBUTE_CONST
+TCMALLOC_ATTRIBUTE_CONST
inline constexpr bool operator>(Length lhs, Length rhs) {
return lhs.n_ > rhs.n_;
}
-TCMALLOC_ATTRIBUTE_CONST
+TCMALLOC_ATTRIBUTE_CONST
inline constexpr bool operator<=(Length lhs, Length rhs) {
return lhs.n_ <= rhs.n_;
}
-TCMALLOC_ATTRIBUTE_CONST
+TCMALLOC_ATTRIBUTE_CONST
inline constexpr bool operator>=(Length lhs, Length rhs) {
return lhs.n_ >= rhs.n_;
}
-TCMALLOC_ATTRIBUTE_CONST
+TCMALLOC_ATTRIBUTE_CONST
inline constexpr bool operator==(Length lhs, Length rhs) {
return lhs.n_ == rhs.n_;
}
-TCMALLOC_ATTRIBUTE_CONST
+TCMALLOC_ATTRIBUTE_CONST
inline constexpr bool operator!=(Length lhs, Length rhs) {
return lhs.n_ != rhs.n_;
}
@@ -254,45 +254,45 @@ inline Length& operator++(Length& l) { return l += Length(1); }
inline Length& operator--(Length& l) { return l -= Length(1); }
-TCMALLOC_ATTRIBUTE_CONST
+TCMALLOC_ATTRIBUTE_CONST
inline constexpr Length operator+(Length lhs, Length rhs) {
return Length(lhs.raw_num() + rhs.raw_num());
}
-TCMALLOC_ATTRIBUTE_CONST
+TCMALLOC_ATTRIBUTE_CONST
inline constexpr Length operator-(Length lhs, Length rhs) {
return Length(lhs.raw_num() - rhs.raw_num());
}
-TCMALLOC_ATTRIBUTE_CONST
+TCMALLOC_ATTRIBUTE_CONST
inline constexpr Length operator*(Length lhs, size_t rhs) {
return Length(lhs.raw_num() * rhs);
}
-TCMALLOC_ATTRIBUTE_CONST
+TCMALLOC_ATTRIBUTE_CONST
inline constexpr Length operator*(size_t lhs, Length rhs) {
return Length(lhs * rhs.raw_num());
}
-TCMALLOC_ATTRIBUTE_CONST
+TCMALLOC_ATTRIBUTE_CONST
inline constexpr size_t operator/(Length lhs, Length rhs) {
return lhs.raw_num() / rhs.raw_num();
}
-TCMALLOC_ATTRIBUTE_CONST
+TCMALLOC_ATTRIBUTE_CONST
inline constexpr Length operator/(Length lhs, size_t rhs) {
ASSERT(rhs != 0);
return Length(lhs.raw_num() / rhs);
}
-TCMALLOC_ATTRIBUTE_CONST
+TCMALLOC_ATTRIBUTE_CONST
inline constexpr Length operator%(Length lhs, Length rhs) {
ASSERT(rhs.raw_num() != 0);
return Length(lhs.raw_num() % rhs.raw_num());
}
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
#endif // TCMALLOC_PAGES_H_
diff --git a/contrib/libs/tcmalloc/tcmalloc/parameters.cc b/contrib/libs/tcmalloc/tcmalloc/parameters.cc
index 3f8e6e1ef8..3eb9a1efb0 100644
--- a/contrib/libs/tcmalloc/tcmalloc/parameters.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/parameters.cc
@@ -22,9 +22,9 @@
#include "tcmalloc/static_vars.h"
#include "tcmalloc/thread_cache.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
// As decide_subrelease() is determined at runtime, we cannot require constant
// initialization for the atomic. This avoids an initialization order fiasco.
@@ -69,10 +69,10 @@ ABSL_CONST_INIT std::atomic<MallocExtension::BytesPerSecond>
});
ABSL_CONST_INIT std::atomic<int64_t> Parameters::guarded_sampling_rate_(
50 * kDefaultProfileSamplingRate);
-ABSL_CONST_INIT std::atomic<bool> Parameters::shuffle_per_cpu_caches_enabled_(
- false);
-ABSL_CONST_INIT std::atomic<bool>
- Parameters::reclaim_idle_per_cpu_caches_enabled_(true);
+ABSL_CONST_INIT std::atomic<bool> Parameters::shuffle_per_cpu_caches_enabled_(
+ false);
+ABSL_CONST_INIT std::atomic<bool>
+ Parameters::reclaim_idle_per_cpu_caches_enabled_(true);
ABSL_CONST_INIT std::atomic<bool> Parameters::lazy_per_cpu_caches_enabled_(
true);
ABSL_CONST_INIT std::atomic<int32_t> Parameters::max_per_cpu_cache_size_(
@@ -97,94 +97,94 @@ absl::Duration Parameters::filler_skip_subrelease_interval() {
skip_subrelease_interval_ns().load(std::memory_order_relaxed));
}
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
-
-using tcmalloc::tcmalloc_internal::kLog;
-using tcmalloc::tcmalloc_internal::Log;
-using tcmalloc::tcmalloc_internal::Parameters;
-using tcmalloc::tcmalloc_internal::Static;
+GOOGLE_MALLOC_SECTION_END
+using tcmalloc::tcmalloc_internal::kLog;
+using tcmalloc::tcmalloc_internal::Log;
+using tcmalloc::tcmalloc_internal::Parameters;
+using tcmalloc::tcmalloc_internal::Static;
+
extern "C" {
int64_t MallocExtension_Internal_GetProfileSamplingRate() {
- return Parameters::profile_sampling_rate();
+ return Parameters::profile_sampling_rate();
}
void MallocExtension_Internal_SetProfileSamplingRate(int64_t value) {
- Parameters::set_profile_sampling_rate(value);
+ Parameters::set_profile_sampling_rate(value);
}
int64_t MallocExtension_Internal_GetGuardedSamplingRate() {
- return Parameters::guarded_sampling_rate();
+ return Parameters::guarded_sampling_rate();
}
void MallocExtension_Internal_SetGuardedSamplingRate(int64_t value) {
- Parameters::set_guarded_sampling_rate(value);
+ Parameters::set_guarded_sampling_rate(value);
}
int64_t MallocExtension_Internal_GetMaxTotalThreadCacheBytes() {
- return Parameters::max_total_thread_cache_bytes();
+ return Parameters::max_total_thread_cache_bytes();
}
void MallocExtension_Internal_SetMaxTotalThreadCacheBytes(int64_t value) {
- Parameters::set_max_total_thread_cache_bytes(value);
-}
-
-void MallocExtension_Internal_GetSkipSubreleaseInterval(absl::Duration* ret) {
- *ret = Parameters::filler_skip_subrelease_interval();
-}
-
-void MallocExtension_Internal_SetSkipSubreleaseInterval(absl::Duration value) {
- Parameters::set_filler_skip_subrelease_interval(value);
+ Parameters::set_max_total_thread_cache_bytes(value);
}
+void MallocExtension_Internal_GetSkipSubreleaseInterval(absl::Duration* ret) {
+ *ret = Parameters::filler_skip_subrelease_interval();
+}
+
+void MallocExtension_Internal_SetSkipSubreleaseInterval(absl::Duration value) {
+ Parameters::set_filler_skip_subrelease_interval(value);
+}
+
tcmalloc::MallocExtension::BytesPerSecond
MallocExtension_Internal_GetBackgroundReleaseRate() {
- return Parameters::background_release_rate();
+ return Parameters::background_release_rate();
}
void MallocExtension_Internal_SetBackgroundReleaseRate(
tcmalloc::MallocExtension::BytesPerSecond rate) {
- Parameters::set_background_release_rate(rate);
+ Parameters::set_background_release_rate(rate);
}
void TCMalloc_Internal_SetBackgroundReleaseRate(size_t value) {
- Parameters::background_release_rate_.store(
+ Parameters::background_release_rate_.store(
static_cast<tcmalloc::MallocExtension::BytesPerSecond>(value));
}
uint64_t TCMalloc_Internal_GetHeapSizeHardLimit() {
- return Parameters::heap_size_hard_limit();
+ return Parameters::heap_size_hard_limit();
}
bool TCMalloc_Internal_GetHPAASubrelease() {
- return Parameters::hpaa_subrelease();
-}
-
-bool TCMalloc_Internal_GetShufflePerCpuCachesEnabled() {
- return Parameters::shuffle_per_cpu_caches();
-}
-
-bool TCMalloc_Internal_GetReclaimIdlePerCpuCachesEnabled() {
- return Parameters::reclaim_idle_per_cpu_caches();
+ return Parameters::hpaa_subrelease();
}
+bool TCMalloc_Internal_GetShufflePerCpuCachesEnabled() {
+ return Parameters::shuffle_per_cpu_caches();
+}
+
+bool TCMalloc_Internal_GetReclaimIdlePerCpuCachesEnabled() {
+ return Parameters::reclaim_idle_per_cpu_caches();
+}
+
bool TCMalloc_Internal_GetLazyPerCpuCachesEnabled() {
- return Parameters::lazy_per_cpu_caches();
+ return Parameters::lazy_per_cpu_caches();
}
double TCMalloc_Internal_GetPeakSamplingHeapGrowthFraction() {
- return Parameters::peak_sampling_heap_growth_fraction();
+ return Parameters::peak_sampling_heap_growth_fraction();
}
bool TCMalloc_Internal_GetPerCpuCachesEnabled() {
- return Parameters::per_cpu_caches();
+ return Parameters::per_cpu_caches();
}
void TCMalloc_Internal_SetGuardedSamplingRate(int64_t v) {
- Parameters::guarded_sampling_rate_.store(v, std::memory_order_relaxed);
+ Parameters::guarded_sampling_rate_.store(v, std::memory_order_relaxed);
}
// update_lock guards changes via SetHeapSizeHardLimit.
@@ -193,7 +193,7 @@ ABSL_CONST_INIT static absl::base_internal::SpinLock update_lock(
void TCMalloc_Internal_SetHeapSizeHardLimit(uint64_t value) {
// Ensure that page allocator is set up.
- Static::InitIfNecessary();
+ Static::InitIfNecessary();
absl::base_internal::SpinLockHolder l(&update_lock);
@@ -204,68 +204,68 @@ void TCMalloc_Internal_SetHeapSizeHardLimit(uint64_t value) {
active = true;
}
- bool currently_hard = Static::page_allocator().limit().second;
+ bool currently_hard = Static::page_allocator().limit().second;
if (active || currently_hard) {
// Avoid resetting limit when current limit is soft.
- Static::page_allocator().set_limit(limit, active /* is_hard */);
- Log(kLog, __FILE__, __LINE__, "[tcmalloc] set page heap hard limit to",
- limit, "bytes");
+ Static::page_allocator().set_limit(limit, active /* is_hard */);
+ Log(kLog, __FILE__, __LINE__, "[tcmalloc] set page heap hard limit to",
+ limit, "bytes");
}
}
void TCMalloc_Internal_SetHPAASubrelease(bool v) {
- tcmalloc::tcmalloc_internal::hpaa_subrelease_ptr()->store(
- v, std::memory_order_relaxed);
-}
-
-void TCMalloc_Internal_SetShufflePerCpuCachesEnabled(bool v) {
- Parameters::shuffle_per_cpu_caches_enabled_.store(v,
- std::memory_order_relaxed);
-}
-
-void TCMalloc_Internal_SetReclaimIdlePerCpuCachesEnabled(bool v) {
- Parameters::reclaim_idle_per_cpu_caches_enabled_.store(
- v, std::memory_order_relaxed);
-}
-
+ tcmalloc::tcmalloc_internal::hpaa_subrelease_ptr()->store(
+ v, std::memory_order_relaxed);
+}
+
+void TCMalloc_Internal_SetShufflePerCpuCachesEnabled(bool v) {
+ Parameters::shuffle_per_cpu_caches_enabled_.store(v,
+ std::memory_order_relaxed);
+}
+
+void TCMalloc_Internal_SetReclaimIdlePerCpuCachesEnabled(bool v) {
+ Parameters::reclaim_idle_per_cpu_caches_enabled_.store(
+ v, std::memory_order_relaxed);
+}
+
void TCMalloc_Internal_SetLazyPerCpuCachesEnabled(bool v) {
- Parameters::lazy_per_cpu_caches_enabled_.store(v, std::memory_order_relaxed);
+ Parameters::lazy_per_cpu_caches_enabled_.store(v, std::memory_order_relaxed);
}
void TCMalloc_Internal_SetMaxPerCpuCacheSize(int32_t v) {
- Parameters::max_per_cpu_cache_size_.store(v, std::memory_order_relaxed);
+ Parameters::max_per_cpu_cache_size_.store(v, std::memory_order_relaxed);
}
void TCMalloc_Internal_SetMaxTotalThreadCacheBytes(int64_t v) {
- Parameters::max_total_thread_cache_bytes_.store(v, std::memory_order_relaxed);
+ Parameters::max_total_thread_cache_bytes_.store(v, std::memory_order_relaxed);
- absl::base_internal::SpinLockHolder l(
- &tcmalloc::tcmalloc_internal::pageheap_lock);
- tcmalloc::tcmalloc_internal::ThreadCache::set_overall_thread_cache_size(v);
+ absl::base_internal::SpinLockHolder l(
+ &tcmalloc::tcmalloc_internal::pageheap_lock);
+ tcmalloc::tcmalloc_internal::ThreadCache::set_overall_thread_cache_size(v);
}
void TCMalloc_Internal_SetPeakSamplingHeapGrowthFraction(double v) {
- Parameters::peak_sampling_heap_growth_fraction_.store(
+ Parameters::peak_sampling_heap_growth_fraction_.store(
v, std::memory_order_relaxed);
}
void TCMalloc_Internal_SetPerCpuCachesEnabled(bool v) {
- Parameters::per_cpu_caches_enabled_.store(v, std::memory_order_relaxed);
+ Parameters::per_cpu_caches_enabled_.store(v, std::memory_order_relaxed);
}
void TCMalloc_Internal_SetProfileSamplingRate(int64_t v) {
- Parameters::profile_sampling_rate_.store(v, std::memory_order_relaxed);
+ Parameters::profile_sampling_rate_.store(v, std::memory_order_relaxed);
}
void TCMalloc_Internal_GetHugePageFillerSkipSubreleaseInterval(
absl::Duration* v) {
- *v = Parameters::filler_skip_subrelease_interval();
+ *v = Parameters::filler_skip_subrelease_interval();
}
void TCMalloc_Internal_SetHugePageFillerSkipSubreleaseInterval(
absl::Duration v) {
- tcmalloc::tcmalloc_internal::skip_subrelease_interval_ns().store(
- absl::ToInt64Nanoseconds(v), std::memory_order_relaxed);
+ tcmalloc::tcmalloc_internal::skip_subrelease_interval_ns().store(
+ absl::ToInt64Nanoseconds(v), std::memory_order_relaxed);
}
} // extern "C"
diff --git a/contrib/libs/tcmalloc/tcmalloc/parameters.h b/contrib/libs/tcmalloc/tcmalloc/parameters.h
index 64893f0402..e1786486f9 100644
--- a/contrib/libs/tcmalloc/tcmalloc/parameters.h
+++ b/contrib/libs/tcmalloc/tcmalloc/parameters.h
@@ -22,13 +22,13 @@
#include "absl/base/internal/spinlock.h"
#include "absl/time/time.h"
#include "absl/types/optional.h"
-#include "tcmalloc/internal/config.h"
+#include "tcmalloc/internal/config.h"
#include "tcmalloc/internal/parameter_accessors.h"
#include "tcmalloc/malloc_extension.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
class Parameters {
public:
@@ -79,14 +79,14 @@ class Parameters {
TCMalloc_Internal_SetPeakSamplingHeapGrowthFraction(value);
}
- static bool shuffle_per_cpu_caches() {
- return shuffle_per_cpu_caches_enabled_.load(std::memory_order_relaxed);
- }
-
- static bool reclaim_idle_per_cpu_caches() {
- return reclaim_idle_per_cpu_caches_enabled_.load(std::memory_order_relaxed);
- }
-
+ static bool shuffle_per_cpu_caches() {
+ return shuffle_per_cpu_caches_enabled_.load(std::memory_order_relaxed);
+ }
+
+ static bool reclaim_idle_per_cpu_caches() {
+ return reclaim_idle_per_cpu_caches_enabled_.load(std::memory_order_relaxed);
+ }
+
static bool lazy_per_cpu_caches() {
return lazy_per_cpu_caches_enabled_.load(std::memory_order_relaxed);
}
@@ -121,8 +121,8 @@ class Parameters {
friend void ::TCMalloc_Internal_SetBackgroundReleaseRate(size_t v);
friend void ::TCMalloc_Internal_SetGuardedSamplingRate(int64_t v);
friend void ::TCMalloc_Internal_SetHPAASubrelease(bool v);
- friend void ::TCMalloc_Internal_SetShufflePerCpuCachesEnabled(bool v);
- friend void ::TCMalloc_Internal_SetReclaimIdlePerCpuCachesEnabled(bool v);
+ friend void ::TCMalloc_Internal_SetShufflePerCpuCachesEnabled(bool v);
+ friend void ::TCMalloc_Internal_SetReclaimIdlePerCpuCachesEnabled(bool v);
friend void ::TCMalloc_Internal_SetLazyPerCpuCachesEnabled(bool v);
friend void ::TCMalloc_Internal_SetMaxPerCpuCacheSize(int32_t v);
friend void ::TCMalloc_Internal_SetMaxTotalThreadCacheBytes(int64_t v);
@@ -135,8 +135,8 @@ class Parameters {
static std::atomic<MallocExtension::BytesPerSecond> background_release_rate_;
static std::atomic<int64_t> guarded_sampling_rate_;
- static std::atomic<bool> shuffle_per_cpu_caches_enabled_;
- static std::atomic<bool> reclaim_idle_per_cpu_caches_enabled_;
+ static std::atomic<bool> shuffle_per_cpu_caches_enabled_;
+ static std::atomic<bool> reclaim_idle_per_cpu_caches_enabled_;
static std::atomic<bool> lazy_per_cpu_caches_enabled_;
static std::atomic<int32_t> max_per_cpu_cache_size_;
static std::atomic<int64_t> max_total_thread_cache_bytes_;
@@ -145,8 +145,8 @@ class Parameters {
static std::atomic<int64_t> profile_sampling_rate_;
};
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
#endif // TCMALLOC_PARAMETERS_H_
diff --git a/contrib/libs/tcmalloc/tcmalloc/peak_heap_tracker.cc b/contrib/libs/tcmalloc/tcmalloc/peak_heap_tracker.cc
index 0dcc0df536..d247a16d50 100644
--- a/contrib/libs/tcmalloc/tcmalloc/peak_heap_tracker.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/peak_heap_tracker.cc
@@ -26,9 +26,9 @@
#include "tcmalloc/stack_trace_table.h"
#include "tcmalloc/static_vars.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
bool PeakHeapTracker::IsNewPeak() {
return peak_sampled_heap_size_.value() == 0 ||
@@ -76,18 +76,18 @@ void PeakHeapTracker::MaybeSaveSample() {
peak_sampled_span_stacks_ = t;
}
-std::unique_ptr<ProfileBase> PeakHeapTracker::DumpSample() const {
+std::unique_ptr<ProfileBase> PeakHeapTracker::DumpSample() const {
auto profile = absl::make_unique<StackTraceTable>(
ProfileType::kPeakHeap, Sampler::GetSamplePeriod(), true, true);
absl::base_internal::SpinLockHolder h(&pageheap_lock);
for (StackTrace* t = peak_sampled_span_stacks_; t != nullptr;
- t = reinterpret_cast<StackTrace*>(t->stack[kMaxStackDepth - 1])) {
+ t = reinterpret_cast<StackTrace*>(t->stack[kMaxStackDepth - 1])) {
profile->AddTrace(1.0, *t);
}
return profile;
}
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
diff --git a/contrib/libs/tcmalloc/tcmalloc/peak_heap_tracker.h b/contrib/libs/tcmalloc/tcmalloc/peak_heap_tracker.h
index a9f071d1b5..87d90fc548 100644
--- a/contrib/libs/tcmalloc/tcmalloc/peak_heap_tracker.h
+++ b/contrib/libs/tcmalloc/tcmalloc/peak_heap_tracker.h
@@ -21,9 +21,9 @@
#include "tcmalloc/internal/logging.h"
#include "tcmalloc/malloc_extension.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
class PeakHeapTracker {
public:
@@ -36,7 +36,7 @@ class PeakHeapTracker {
void MaybeSaveSample() ABSL_LOCKS_EXCLUDED(pageheap_lock);
// Return the saved high-water-mark heap profile, if any.
- std::unique_ptr<ProfileBase> DumpSample() const
+ std::unique_ptr<ProfileBase> DumpSample() const
ABSL_LOCKS_EXCLUDED(pageheap_lock);
size_t CurrentPeakSize() const { return peak_sampled_heap_size_.value(); }
@@ -49,13 +49,13 @@ class PeakHeapTracker {
// Sampled heap size last time peak_sampled_span_stacks_ was saved. Only
// written under pageheap_lock; may be read without it.
- StatsCounter peak_sampled_heap_size_;
+ StatsCounter peak_sampled_heap_size_;
bool IsNewPeak();
};
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
#endif // TCMALLOC_PEAK_HEAP_TRACKER_H_
diff --git a/contrib/libs/tcmalloc/tcmalloc/profile_test.cc b/contrib/libs/tcmalloc/tcmalloc/profile_test.cc
index 0bd62cd428..242d5e2327 100644
--- a/contrib/libs/tcmalloc/tcmalloc/profile_test.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/profile_test.cc
@@ -28,7 +28,7 @@
#include "gtest/gtest.h"
#include "absl/container/flat_hash_map.h"
#include "absl/synchronization/blocking_counter.h"
-#include "benchmark/benchmark.h"
+#include "benchmark/benchmark.h"
#include "tcmalloc/internal/declarations.h"
#include "tcmalloc/internal/linked_list.h"
#include "tcmalloc/malloc_extension.h"
@@ -39,21 +39,21 @@ namespace {
TEST(AllocationSampleTest, TokenAbuse) {
auto token = MallocExtension::StartAllocationProfiling();
- void *ptr = ::operator new(512 * 1024 * 1024);
- // TODO(b/183453911): Remove workaround for GCC 10.x deleting operator new,
- // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=94295.
- benchmark::DoNotOptimize(ptr);
- ::operator delete(ptr);
+ void *ptr = ::operator new(512 * 1024 * 1024);
+ // TODO(b/183453911): Remove workaround for GCC 10.x deleting operator new,
+ // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=94295.
+ benchmark::DoNotOptimize(ptr);
+ ::operator delete(ptr);
// Repeated Claims should happily return null.
auto profile = std::move(token).Stop();
int count = 0;
profile.Iterate([&](const Profile::Sample &) { count++; });
-
-#if !defined(UNDEFINED_BEHAVIOR_SANITIZER)
- // UBSan does not implement our profiling API, but running the test can
- // validate the correctness of the new/delete pairs.
+
+#if !defined(UNDEFINED_BEHAVIOR_SANITIZER)
+ // UBSan does not implement our profiling API, but running the test can
+ // validate the correctness of the new/delete pairs.
EXPECT_EQ(count, 1);
-#endif
+#endif
auto profile2 = std::move(token).Stop(); // NOLINT: use-after-move intended
int count2 = 0;
@@ -122,8 +122,8 @@ TEST(AllocationSampleTest, SampleAccuracy) {
size_t size;
size_t alignment;
bool keep;
- // objects we don't delete as we go
- void *list = nullptr;
+ // objects we don't delete as we go
+ void *list = nullptr;
};
std::vector<Requests> sizes = {
{8, 0, false}, {16, 16, true}, {1024, 0, false},
@@ -136,7 +136,7 @@ TEST(AllocationSampleTest, SampleAccuracy) {
// We use new/delete to allocate memory, as malloc returns objects aligned to
// std::max_align_t.
- for (auto &s : sizes) {
+ for (auto &s : sizes) {
for (size_t bytes = 0; bytes < kTotalPerSize; bytes += s.size) {
void *obj;
if (s.alignment > 0) {
@@ -145,9 +145,9 @@ TEST(AllocationSampleTest, SampleAccuracy) {
obj = operator new(s.size);
}
if (s.keep) {
- tcmalloc_internal::SLL_Push(&s.list, obj);
- } else if (s.alignment > 0) {
- operator delete(obj, static_cast<std::align_val_t>(s.alignment));
+ tcmalloc_internal::SLL_Push(&s.list, obj);
+ } else if (s.alignment > 0) {
+ operator delete(obj, static_cast<std::align_val_t>(s.alignment));
} else {
operator delete(obj);
}
@@ -166,21 +166,21 @@ TEST(AllocationSampleTest, SampleAccuracy) {
}
profile.Iterate([&](const tcmalloc::Profile::Sample &e) {
- // Skip unexpected sizes. They may have been triggered by a background
- // thread.
- if (sizes_expected.find(e.allocated_size) == sizes_expected.end()) {
- return;
- }
-
+ // Skip unexpected sizes. They may have been triggered by a background
+ // thread.
+ if (sizes_expected.find(e.allocated_size) == sizes_expected.end()) {
+ return;
+ }
+
// Don't check stack traces until we have evidence that's broken, it's
// tedious and done fairly well elsewhere.
m[e.allocated_size] += e.sum;
EXPECT_EQ(alignment[e.requested_size], e.requested_alignment);
});
-#if !defined(UNDEFINED_BEHAVIOR_SANITIZER)
- // UBSan does not implement our profiling API, but running the test can
- // validate the correctness of the new/delete pairs.
+#if !defined(UNDEFINED_BEHAVIOR_SANITIZER)
+ // UBSan does not implement our profiling API, but running the test can
+ // validate the correctness of the new/delete pairs.
size_t max_bytes = 0, min_bytes = std::numeric_limits<size_t>::max();
EXPECT_EQ(m.size(), sizes_expected.size());
for (auto seen : m) {
@@ -194,18 +194,18 @@ TEST(AllocationSampleTest, SampleAccuracy) {
EXPECT_GE((min_bytes * 3) / 2, max_bytes);
EXPECT_LE((min_bytes * 3) / 4, kTotalPerSize);
EXPECT_LE(kTotalPerSize, (max_bytes * 4) / 3);
-#endif
-
+#endif
+
// Remove the objects we left alive
- for (auto &s : sizes) {
- while (s.list != nullptr) {
- void *obj = tcmalloc_internal::SLL_Pop(&s.list);
- if (s.alignment > 0) {
- operator delete(obj, static_cast<std::align_val_t>(s.alignment));
- } else {
- operator delete(obj);
- }
- }
+ for (auto &s : sizes) {
+ while (s.list != nullptr) {
+ void *obj = tcmalloc_internal::SLL_Pop(&s.list);
+ if (s.alignment > 0) {
+ operator delete(obj, static_cast<std::align_val_t>(s.alignment));
+ } else {
+ operator delete(obj);
+ }
+ }
}
}
diff --git a/contrib/libs/tcmalloc/tcmalloc/realloc_test.cc b/contrib/libs/tcmalloc/tcmalloc/realloc_test.cc
index e0e6aba606..3bb8bee5b9 100644
--- a/contrib/libs/tcmalloc/tcmalloc/realloc_test.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/realloc_test.cc
@@ -25,7 +25,7 @@
#include "gtest/gtest.h"
#include "absl/random/random.h"
-#include "benchmark/benchmark.h"
+#include "benchmark/benchmark.h"
namespace tcmalloc {
namespace {
diff --git a/contrib/libs/tcmalloc/tcmalloc/runtime_size_classes.cc b/contrib/libs/tcmalloc/tcmalloc/runtime_size_classes.cc
index 4bca6485ca..4ff76c6caf 100644
--- a/contrib/libs/tcmalloc/tcmalloc/runtime_size_classes.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/runtime_size_classes.cc
@@ -20,10 +20,10 @@
#include "tcmalloc/internal/environment.h"
#include "tcmalloc/internal/logging.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
-namespace runtime_size_classes_internal {
+namespace tcmalloc_internal {
+namespace runtime_size_classes_internal {
int ParseSizeClasses(absl::string_view env, int max_size, int max_classes,
SizeClassInfo* parsed) {
@@ -63,19 +63,19 @@ int ParseSizeClasses(absl::string_view env, int max_size, int max_classes,
return c + 1;
}
-} // namespace runtime_size_classes_internal
+} // namespace runtime_size_classes_internal
int ABSL_ATTRIBUTE_NOINLINE MaybeSizeClassesFromEnv(int max_size,
int max_classes,
SizeClassInfo* parsed) {
- const char* e = thread_safe_getenv("TCMALLOC_SIZE_CLASSES");
+ const char* e = thread_safe_getenv("TCMALLOC_SIZE_CLASSES");
if (!e) {
return 0;
}
- return runtime_size_classes_internal::ParseSizeClasses(e, max_size,
- max_classes, parsed);
+ return runtime_size_classes_internal::ParseSizeClasses(e, max_size,
+ max_classes, parsed);
}
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
diff --git a/contrib/libs/tcmalloc/tcmalloc/runtime_size_classes.h b/contrib/libs/tcmalloc/tcmalloc/runtime_size_classes.h
index 42c5aa8859..106058aed8 100644
--- a/contrib/libs/tcmalloc/tcmalloc/runtime_size_classes.h
+++ b/contrib/libs/tcmalloc/tcmalloc/runtime_size_classes.h
@@ -19,10 +19,10 @@
#include "absl/strings/string_view.h"
#include "tcmalloc/size_class_info.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
-namespace runtime_size_classes_internal {
+namespace tcmalloc_internal {
+namespace runtime_size_classes_internal {
// Set size classes from a string.
// Format: "size,pages,num_to_move;"
@@ -33,7 +33,7 @@ namespace runtime_size_classes_internal {
int ParseSizeClasses(absl::string_view env, int max_size, int max_classes,
SizeClassInfo* parsed);
-} // namespace runtime_size_classes_internal
+} // namespace runtime_size_classes_internal
// If the environment variable TCMALLOC_SIZE_CLASSES is defined, its value is
// parsed using ParseSizeClasses and ApplySizeClassDefaults into parsed. The
@@ -42,8 +42,8 @@ int ParseSizeClasses(absl::string_view env, int max_size, int max_classes,
int MaybeSizeClassesFromEnv(int max_size, int max_classes,
SizeClassInfo* parsed);
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
#endif // TCMALLOC_RUNTIME_SIZE_CLASSES_H_
diff --git a/contrib/libs/tcmalloc/tcmalloc/runtime_size_classes_fuzz.cc b/contrib/libs/tcmalloc/tcmalloc/runtime_size_classes_fuzz.cc
index 89a111e3b8..74c0ce5748 100644
--- a/contrib/libs/tcmalloc/tcmalloc/runtime_size_classes_fuzz.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/runtime_size_classes_fuzz.cc
@@ -21,10 +21,10 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* d, size_t size) {
absl::string_view env =
absl::string_view(reinterpret_cast<const char*>(d), size);
- tcmalloc::tcmalloc_internal::SizeClassInfo
- parsed[tcmalloc::tcmalloc_internal::kNumClasses];
- tcmalloc::tcmalloc_internal::runtime_size_classes_internal::ParseSizeClasses(
- env, tcmalloc::tcmalloc_internal::kMaxSize,
- tcmalloc::tcmalloc_internal::kNumClasses, parsed);
+ tcmalloc::tcmalloc_internal::SizeClassInfo
+ parsed[tcmalloc::tcmalloc_internal::kNumClasses];
+ tcmalloc::tcmalloc_internal::runtime_size_classes_internal::ParseSizeClasses(
+ env, tcmalloc::tcmalloc_internal::kMaxSize,
+ tcmalloc::tcmalloc_internal::kNumClasses, parsed);
return 0;
}
diff --git a/contrib/libs/tcmalloc/tcmalloc/runtime_size_classes_test.cc b/contrib/libs/tcmalloc/tcmalloc/runtime_size_classes_test.cc
index 6a8771f9e2..8d6ccca514 100644
--- a/contrib/libs/tcmalloc/tcmalloc/runtime_size_classes_test.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/runtime_size_classes_test.cc
@@ -19,11 +19,11 @@
#include "gtest/gtest.h"
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
namespace {
-using runtime_size_classes_internal::ParseSizeClasses;
-
+using runtime_size_classes_internal::ParseSizeClasses;
+
constexpr int kNumClasses = 4;
constexpr int kMaxSize = 1024 * 1024;
@@ -110,5 +110,5 @@ TEST(RuntimeSizeClassesTest, EnvVariableExamined) {
}
} // namespace
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
diff --git a/contrib/libs/tcmalloc/tcmalloc/sampler.cc b/contrib/libs/tcmalloc/tcmalloc/sampler.cc
index 5e89c9e830..d26531487f 100644
--- a/contrib/libs/tcmalloc/tcmalloc/sampler.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/sampler.cc
@@ -23,9 +23,9 @@
#include "tcmalloc/parameters.h"
#include "tcmalloc/static_vars.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
ssize_t Sampler::GetSamplePeriod() {
return Parameters::profile_sampling_rate();
@@ -201,6 +201,6 @@ double AllocatedBytes(const StackTrace& stack, bool unsample) {
}
}
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
diff --git a/contrib/libs/tcmalloc/tcmalloc/sampler.h b/contrib/libs/tcmalloc/tcmalloc/sampler.h
index d18dd44234..66f2baadf9 100644
--- a/contrib/libs/tcmalloc/tcmalloc/sampler.h
+++ b/contrib/libs/tcmalloc/tcmalloc/sampler.h
@@ -25,9 +25,9 @@
#include "tcmalloc/parameters.h"
#include "tcmalloc/static_vars.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
//-------------------------------------------------------------------
// Sampler to decide when to create a sample trace for an allocation
@@ -291,8 +291,8 @@ inline void Sampler::UpdateFastPathState() {
// If unsample is false, the caller will handle unsampling.
double AllocatedBytes(const StackTrace &stack, bool unsample);
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
#endif // TCMALLOC_SAMPLER_H_
diff --git a/contrib/libs/tcmalloc/tcmalloc/size_class_info.h b/contrib/libs/tcmalloc/tcmalloc/size_class_info.h
index a424432b75..58b3e54159 100644
--- a/contrib/libs/tcmalloc/tcmalloc/size_class_info.h
+++ b/contrib/libs/tcmalloc/tcmalloc/size_class_info.h
@@ -20,9 +20,9 @@
#include "tcmalloc/internal/logging.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
// The number of members in SizeClassInfo
static constexpr int kSizeClassInfoMembers = 3;
@@ -72,8 +72,8 @@ struct SizeClassInfo {
size_t num_to_move;
};
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
#endif // TCMALLOC_size_class_info_H_
diff --git a/contrib/libs/tcmalloc/tcmalloc/size_classes.cc b/contrib/libs/tcmalloc/tcmalloc/size_classes.cc
index f4b444994d..7b8ad73459 100644
--- a/contrib/libs/tcmalloc/tcmalloc/size_classes.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/size_classes.cc
@@ -14,11 +14,11 @@
#include "tcmalloc/common.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
-
+namespace tcmalloc_internal {
+
// <fixed> is fixed per-size-class overhead due to end-of-span fragmentation
// and other factors. For instance, if we have a 96 byte size class, and use a
// single 8KiB page, then we will hold 85 objects per span, and have 32 bytes
@@ -68,10 +68,10 @@ const SizeClassInfo SizeMap::kSizeClasses[SizeMap::kSizeClassesCount] = {
{ 240, 1, 32}, // 0.98%
{ 256, 1, 32}, // 0.59%
{ 272, 1, 32}, // 0.98%
- { 296, 1, 32}, // 3.10%
+ { 296, 1, 32}, // 3.10%
{ 312, 1, 32}, // 1.58%
{ 336, 1, 32}, // 2.18%
- { 352, 1, 32}, // 1.78%
+ { 352, 1, 32}, // 1.78%
{ 368, 1, 32}, // 1.78%
{ 408, 1, 32}, // 0.98%
{ 448, 1, 32}, // 2.18%
@@ -105,7 +105,7 @@ const SizeClassInfo SizeMap::kSizeClasses[SizeMap::kSizeClassesCount] = {
{ 9472, 5, 6}, // 8.23%
{ 10240, 4, 6}, // 6.82%
{ 12288, 3, 5}, // 0.20%
- { 13568, 5, 4}, // 0.75%
+ { 13568, 5, 4}, // 0.75%
{ 14336, 7, 4}, // 0.08%
{ 16384, 2, 4}, // 0.29%
{ 20480, 5, 3}, // 0.12%
@@ -119,13 +119,13 @@ const SizeClassInfo SizeMap::kSizeClasses[SizeMap::kSizeClassesCount] = {
{ 73728, 9, 2}, // 0.07%
{ 81920, 10, 2}, // 0.06%
{ 98304, 12, 2}, // 0.05%
- { 114688, 14, 2}, // 0.04%
+ { 114688, 14, 2}, // 0.04%
{ 131072, 16, 2}, // 0.04%
{ 147456, 18, 2}, // 0.03%
{ 163840, 20, 2}, // 0.03%
{ 180224, 22, 2}, // 0.03%
{ 204800, 25, 2}, // 0.02%
- { 237568, 29, 2}, // 0.02%
+ { 237568, 29, 2}, // 0.02%
{ 262144, 32, 2}, // 0.02%
};
#elif TCMALLOC_PAGE_SHIFT == 15
@@ -156,16 +156,16 @@ const SizeClassInfo SizeMap::kSizeClasses[SizeMap::kSizeClassesCount] = {
{ 176, 1, 32}, // 0.24%
{ 192, 1, 32}, // 0.54%
{ 208, 1, 32}, // 0.49%
- { 224, 1, 32}, // 0.34%
- { 240, 1, 32}, // 0.54%
+ { 224, 1, 32}, // 0.34%
+ { 240, 1, 32}, // 0.54%
{ 256, 1, 32}, // 0.15%
{ 280, 1, 32}, // 0.17%
{ 304, 1, 32}, // 0.89%
- { 328, 1, 32}, // 1.06%
- { 352, 1, 32}, // 0.24%
- { 384, 1, 32}, // 0.54%
+ { 328, 1, 32}, // 1.06%
+ { 352, 1, 32}, // 0.24%
+ { 384, 1, 32}, // 0.54%
{ 416, 1, 32}, // 1.13%
- { 448, 1, 32}, // 0.34%
+ { 448, 1, 32}, // 0.34%
{ 488, 1, 32}, // 0.37%
{ 512, 1, 32}, // 0.15%
{ 576, 1, 32}, // 1.74%
@@ -176,8 +176,8 @@ const SizeClassInfo SizeMap::kSizeClasses[SizeMap::kSizeClassesCount] = {
{ 1024, 1, 32}, // 0.15%
{ 1152, 1, 32}, // 1.74%
{ 1280, 1, 32}, // 2.55%
- { 1536, 1, 32}, // 1.74%
- { 1792, 1, 32}, // 1.74%
+ { 1536, 1, 32}, // 1.74%
+ { 1792, 1, 32}, // 1.74%
{ 2048, 1, 32}, // 0.15%
{ 2176, 1, 30}, // 0.54%
{ 2304, 1, 28}, // 1.74%
@@ -189,7 +189,7 @@ const SizeClassInfo SizeMap::kSizeClasses[SizeMap::kSizeClassesCount] = {
{ 4608, 1, 14}, // 1.74%
{ 5376, 1, 12}, // 1.74%
{ 6528, 1, 10}, // 0.54%
- { 7168, 2, 9}, // 1.66%
+ { 7168, 2, 9}, // 1.66%
{ 8192, 1, 8}, // 0.15%
{ 9344, 2, 7}, // 0.27%
{ 10880, 1, 6}, // 0.54%
@@ -200,7 +200,7 @@ const SizeClassInfo SizeMap::kSizeClasses[SizeMap::kSizeClassesCount] = {
{ 24576, 3, 2}, // 0.05%
{ 28032, 6, 2}, // 0.22%
{ 32768, 1, 2}, // 0.15%
- { 38144, 5, 2}, // 7.41%
+ { 38144, 5, 2}, // 7.41%
{ 40960, 4, 2}, // 6.71%
{ 49152, 3, 2}, // 0.05%
{ 57344, 7, 2}, // 0.02%
@@ -234,32 +234,32 @@ const SizeClassInfo SizeMap::kSizeClasses[SizeMap::kSizeClassesCount] = {
{ 80, 1, 32}, // 0.04%
{ 88, 1, 32}, // 0.05%
{ 96, 1, 32}, // 0.04%
- { 104, 1, 32}, // 0.04%
+ { 104, 1, 32}, // 0.04%
{ 112, 1, 32}, // 0.04%
{ 128, 1, 32}, // 0.02%
{ 144, 1, 32}, // 0.04%
{ 160, 1, 32}, // 0.04%
{ 176, 1, 32}, // 0.05%
{ 192, 1, 32}, // 0.04%
- { 208, 1, 32}, // 0.04%
+ { 208, 1, 32}, // 0.04%
{ 240, 1, 32}, // 0.04%
{ 256, 1, 32}, // 0.02%
- { 304, 1, 32}, // 0.05%
- { 336, 1, 32}, // 0.04%
+ { 304, 1, 32}, // 0.05%
+ { 336, 1, 32}, // 0.04%
{ 360, 1, 32}, // 0.04%
- { 408, 1, 32}, // 0.10%
- { 456, 1, 32}, // 0.17%
+ { 408, 1, 32}, // 0.10%
+ { 456, 1, 32}, // 0.17%
{ 512, 1, 32}, // 0.02%
{ 576, 1, 32}, // 0.04%
{ 640, 1, 32}, // 0.17%
{ 704, 1, 32}, // 0.12%
- { 768, 1, 32}, // 0.12%
+ { 768, 1, 32}, // 0.12%
{ 832, 1, 32}, // 0.04%
- { 896, 1, 32}, // 0.21%
+ { 896, 1, 32}, // 0.21%
{ 1024, 1, 32}, // 0.02%
{ 1152, 1, 32}, // 0.26%
{ 1280, 1, 32}, // 0.41%
- { 1536, 1, 32}, // 0.41%
+ { 1536, 1, 32}, // 0.41%
{ 1664, 1, 32}, // 0.36%
{ 1792, 1, 32}, // 0.21%
{ 1920, 1, 32}, // 0.41%
@@ -267,24 +267,24 @@ const SizeClassInfo SizeMap::kSizeClasses[SizeMap::kSizeClassesCount] = {
{ 2176, 1, 30}, // 0.41%
{ 2304, 1, 28}, // 0.71%
{ 2432, 1, 26}, // 0.76%
- { 2560, 1, 25}, // 0.41%
+ { 2560, 1, 25}, // 0.41%
{ 2688, 1, 24}, // 0.56%
- { 2816, 1, 23}, // 0.12%
+ { 2816, 1, 23}, // 0.12%
{ 2944, 1, 22}, // 0.07%
{ 3072, 1, 21}, // 0.41%
{ 3328, 1, 19}, // 1.00%
{ 3584, 1, 18}, // 0.21%
{ 3840, 1, 17}, // 0.41%
{ 4096, 1, 16}, // 0.02%
- { 4736, 1, 13}, // 0.66%
+ { 4736, 1, 13}, // 0.66%
{ 5504, 1, 11}, // 1.35%
{ 6144, 1, 10}, // 1.61%
- { 6528, 1, 10}, // 0.41%
- { 6784, 1, 9}, // 1.71%
+ { 6528, 1, 10}, // 0.41%
+ { 6784, 1, 9}, // 1.71%
{ 7168, 1, 9}, // 1.61%
{ 7680, 1, 8}, // 0.41%
{ 8192, 1, 8}, // 0.02%
- { 8704, 1, 7}, // 0.41%
+ { 8704, 1, 7}, // 0.41%
{ 9344, 1, 7}, // 0.21%
{ 10880, 1, 6}, // 0.41%
{ 11904, 1, 5}, // 0.12%
@@ -332,11 +332,11 @@ const SizeClassInfo SizeMap::kSizeClasses[SizeMap::kSizeClassesCount] = {
{ 88, 1, 32}, // 2.37%
{ 96, 1, 32}, // 2.78%
{ 104, 1, 32}, // 2.17%
- { 120, 1, 32}, // 1.57%
+ { 120, 1, 32}, // 1.57%
{ 128, 1, 32}, // 1.17%
{ 144, 1, 32}, // 2.78%
{ 160, 1, 32}, // 3.60%
- { 184, 1, 32}, // 2.37%
+ { 184, 1, 32}, // 2.37%
{ 208, 1, 32}, // 4.86%
{ 240, 1, 32}, // 1.57%
{ 256, 1, 32}, // 1.17%
@@ -347,22 +347,22 @@ const SizeClassInfo SizeMap::kSizeClasses[SizeMap::kSizeClassesCount] = {
{ 408, 1, 32}, // 1.57%
{ 512, 1, 32}, // 1.17%
{ 576, 2, 32}, // 2.18%
- { 704, 2, 32}, // 6.40%
+ { 704, 2, 32}, // 6.40%
{ 768, 2, 32}, // 7.29%
{ 896, 2, 32}, // 2.18%
{ 1024, 2, 32}, // 0.59%
{ 1152, 3, 32}, // 7.08%
{ 1280, 3, 32}, // 7.08%
{ 1536, 3, 32}, // 0.39%
- { 1792, 4, 32}, // 1.88%
+ { 1792, 4, 32}, // 1.88%
{ 2048, 4, 32}, // 0.29%
{ 2304, 4, 28}, // 1.88%
{ 2688, 4, 24}, // 1.88%
- { 3456, 6, 18}, // 1.79%
+ { 3456, 6, 18}, // 1.79%
{ 4096, 4, 16}, // 0.29%
- { 5376, 4, 12}, // 1.88%
+ { 5376, 4, 12}, // 1.88%
{ 6144, 3, 10}, // 0.39%
- { 7168, 7, 9}, // 0.17%
+ { 7168, 7, 9}, // 0.17%
{ 8192, 4, 8}, // 0.29%
};
#else
@@ -452,12 +452,12 @@ const SizeClassInfo SizeMap::kSizeClasses[SizeMap::kSizeClassesCount] = {
{ 90112, 11, 2}, // 0.05%
{ 98304, 12, 2}, // 0.05%
{ 106496, 13, 2}, // 0.05%
- { 114688, 14, 2}, // 0.04%
+ { 114688, 14, 2}, // 0.04%
{ 131072, 16, 2}, // 0.04%
{ 139264, 17, 2}, // 0.03%
{ 155648, 19, 2}, // 0.03%
- { 172032, 21, 2}, // 0.03%
- { 188416, 23, 2}, // 0.03%
+ { 172032, 21, 2}, // 0.03%
+ { 188416, 23, 2}, // 0.03%
{ 204800, 25, 2}, // 0.02%
{ 221184, 27, 2}, // 0.02%
{ 237568, 29, 2}, // 0.02%
@@ -491,10 +491,10 @@ const SizeClassInfo SizeMap::kSizeClasses[SizeMap::kSizeClassesCount] = {
{ 272, 1, 32}, // 0.54%
{ 288, 1, 32}, // 0.84%
{ 304, 1, 32}, // 0.89%
- { 320, 1, 32}, // 0.54%
+ { 320, 1, 32}, // 0.54%
{ 336, 1, 32}, // 0.69%
- { 352, 1, 32}, // 0.24%
- { 384, 1, 32}, // 0.54%
+ { 352, 1, 32}, // 0.24%
+ { 384, 1, 32}, // 0.54%
{ 416, 1, 32}, // 1.13%
{ 448, 1, 32}, // 0.34%
{ 480, 1, 32}, // 0.54%
@@ -510,7 +510,7 @@ const SizeClassInfo SizeMap::kSizeClasses[SizeMap::kSizeClassesCount] = {
{ 1280, 1, 32}, // 2.55%
{ 1408, 1, 32}, // 1.33%
{ 1536, 1, 32}, // 1.74%
- { 1792, 1, 32}, // 1.74%
+ { 1792, 1, 32}, // 1.74%
{ 2048, 1, 32}, // 0.15%
{ 2176, 1, 30}, // 0.54%
{ 2304, 1, 28}, // 1.74%
@@ -570,11 +570,11 @@ const SizeClassInfo SizeMap::kSizeClasses[SizeMap::kSizeClassesCount] = {
{ 160, 1, 32}, // 0.04%
{ 176, 1, 32}, // 0.05%
{ 192, 1, 32}, // 0.04%
- { 208, 1, 32}, // 0.04%
+ { 208, 1, 32}, // 0.04%
{ 240, 1, 32}, // 0.04%
{ 256, 1, 32}, // 0.02%
- { 304, 1, 32}, // 0.05%
- { 336, 1, 32}, // 0.04%
+ { 304, 1, 32}, // 0.05%
+ { 336, 1, 32}, // 0.04%
{ 368, 1, 32}, // 0.07%
{ 416, 1, 32}, // 0.04%
{ 464, 1, 32}, // 0.19%
@@ -582,7 +582,7 @@ const SizeClassInfo SizeMap::kSizeClasses[SizeMap::kSizeClassesCount] = {
{ 576, 1, 32}, // 0.04%
{ 640, 1, 32}, // 0.17%
{ 704, 1, 32}, // 0.12%
- { 768, 1, 32}, // 0.12%
+ { 768, 1, 32}, // 0.12%
{ 832, 1, 32}, // 0.04%
{ 896, 1, 32}, // 0.21%
{ 1024, 1, 32}, // 0.02%
@@ -597,30 +597,30 @@ const SizeClassInfo SizeMap::kSizeClasses[SizeMap::kSizeClassesCount] = {
{ 2176, 1, 30}, // 0.41%
{ 2304, 1, 28}, // 0.71%
{ 2432, 1, 26}, // 0.76%
- { 2560, 1, 25}, // 0.41%
+ { 2560, 1, 25}, // 0.41%
{ 2688, 1, 24}, // 0.56%
- { 2816, 1, 23}, // 0.12%
+ { 2816, 1, 23}, // 0.12%
{ 2944, 1, 22}, // 0.07%
{ 3072, 1, 21}, // 0.41%
- { 3200, 1, 20}, // 1.15%
+ { 3200, 1, 20}, // 1.15%
{ 3328, 1, 19}, // 1.00%
{ 3584, 1, 18}, // 0.21%
{ 3840, 1, 17}, // 0.41%
{ 4096, 1, 16}, // 0.02%
- { 4736, 1, 13}, // 0.66%
+ { 4736, 1, 13}, // 0.66%
{ 5504, 1, 11}, // 1.35%
{ 6144, 1, 10}, // 1.61%
{ 6528, 1, 10}, // 0.41%
- { 6784, 1, 9}, // 1.71%
+ { 6784, 1, 9}, // 1.71%
{ 7168, 1, 9}, // 1.61%
{ 7680, 1, 8}, // 0.41%
{ 8192, 1, 8}, // 0.02%
{ 8704, 1, 7}, // 0.41%
{ 9344, 1, 7}, // 0.21%
- { 10368, 1, 6}, // 1.15%
- { 11392, 1, 5}, // 0.07%
- { 12416, 1, 5}, // 0.56%
- { 13696, 1, 4}, // 0.76%
+ { 10368, 1, 6}, // 1.15%
+ { 11392, 1, 5}, // 0.07%
+ { 12416, 1, 5}, // 0.56%
+ { 13696, 1, 4}, // 0.76%
{ 14464, 1, 4}, // 0.71%
{ 16384, 1, 4}, // 0.02%
{ 17408, 1, 3}, // 0.41%
@@ -695,7 +695,7 @@ const SizeClassInfo SizeMap::kSizeClasses[SizeMap::kSizeClassesCount] = {
{ 3200, 4, 20}, // 2.70%
{ 3584, 7, 18}, // 0.17%
{ 4096, 4, 16}, // 0.29%
- { 5376, 4, 12}, // 1.88%
+ { 5376, 4, 12}, // 1.88%
{ 6144, 3, 10}, // 0.39%
{ 7168, 7, 9}, // 0.17%
{ 8192, 4, 8}, // 0.29%
@@ -706,6 +706,6 @@ const SizeClassInfo SizeMap::kSizeClasses[SizeMap::kSizeClassesCount] = {
#endif
// clang-format on
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
diff --git a/contrib/libs/tcmalloc/tcmalloc/size_classes_test.cc b/contrib/libs/tcmalloc/tcmalloc/size_classes_test.cc
index d66ce5b186..de29d57954 100644
--- a/contrib/libs/tcmalloc/tcmalloc/size_classes_test.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/size_classes_test.cc
@@ -20,26 +20,26 @@
#include "tcmalloc/common.h"
#include "tcmalloc/size_class_info.h"
#include "tcmalloc/span.h"
-#include "tcmalloc/tcmalloc_policy.h"
+#include "tcmalloc/tcmalloc_policy.h"
namespace tcmalloc {
-namespace tcmalloc_internal {
-
-// Moved out of anonymous namespace so that it can be found by friend class in
-// span.h. This allows tests to access span internals so that we can
-// validate that scaling by a reciprocal correctly converts a pointer into
-// an offset within a span.
-class SpanTestPeer {
- public:
- static uint16_t CalcReciprocal(size_t size) {
- return Span::CalcReciprocal(size);
- }
- static Span::ObjIdx TestOffsetToIdx(uintptr_t offset, size_t size,
- uint16_t reciprocal) {
- return Span::TestOffsetToIdx(offset, size, reciprocal);
- }
-};
-
+namespace tcmalloc_internal {
+
+// Moved out of anonymous namespace so that it can be found by friend class in
+// span.h. This allows tests to access span internals so that we can
+// validate that scaling by a reciprocal correctly converts a pointer into
+// an offset within a span.
+class SpanTestPeer {
+ public:
+ static uint16_t CalcReciprocal(size_t size) {
+ return Span::CalcReciprocal(size);
+ }
+ static Span::ObjIdx TestOffsetToIdx(uintptr_t offset, size_t size,
+ uint16_t reciprocal) {
+ return Span::TestOffsetToIdx(offset, size, reciprocal);
+ }
+};
+
namespace {
size_t Alignment(size_t size) {
@@ -92,49 +92,49 @@ TEST_F(SizeClassesTest, SpanPages) {
}
}
-TEST_F(SizeClassesTest, ValidateSufficientBitmapCapacity) {
- // Validate that all the objects in a span can fit into a bitmap.
- // The cut-off for using a bitmap is kBitmapMinObjectSize, so it is
- // theoretically possible that a span could exceed this threshold
- // for object size and contain more than 64 objects.
- for (int c = 1; c < kNumClasses; ++c) {
- const size_t max_size_in_class = m_.class_to_size(c);
- if (max_size_in_class >= kBitmapMinObjectSize) {
- const size_t objects_per_span =
- Length(m_.class_to_pages(c)).in_bytes() / m_.class_to_size(c);
- // Span can hold at most 64 objects of this size.
- EXPECT_LE(objects_per_span, 64);
- }
- }
-}
-
-TEST_F(SizeClassesTest, ValidateCorrectScalingByReciprocal) {
- // Validate that multiplying by the reciprocal works for all size classes.
- // When converting an offset within a span into an index we avoid a
- // division operation by scaling by the reciprocal. The test ensures
- // that this approach works for all objects in a span, for all object
- // sizes.
- for (int c = 1; c < kNumClasses; ++c) {
- const size_t max_size_in_class = m_.class_to_size(c);
- // Only test for sizes where object availability is recorded in a bitmap.
- if (max_size_in_class < kBitmapMinObjectSize) {
- continue;
- }
- size_t reciprocal = SpanTestPeer::CalcReciprocal(max_size_in_class);
- const size_t objects_per_span =
- Length(m_.class_to_pages(c)).in_bytes() / m_.class_to_size(c);
- for (int index = 0; index < objects_per_span; index++) {
- // Calculate the address of the object.
- uintptr_t address = index * max_size_in_class;
- // Calculate the index into the page using the reciprocal method.
- int idx =
- SpanTestPeer::TestOffsetToIdx(address, max_size_in_class, reciprocal);
- // Check that the starting address back is correct.
- ASSERT_EQ(address, idx * max_size_in_class);
- }
- }
-}
-
+TEST_F(SizeClassesTest, ValidateSufficientBitmapCapacity) {
+ // Validate that all the objects in a span can fit into a bitmap.
+ // The cut-off for using a bitmap is kBitmapMinObjectSize, so it is
+ // theoretically possible that a span could exceed this threshold
+ // for object size and contain more than 64 objects.
+ for (int c = 1; c < kNumClasses; ++c) {
+ const size_t max_size_in_class = m_.class_to_size(c);
+ if (max_size_in_class >= kBitmapMinObjectSize) {
+ const size_t objects_per_span =
+ Length(m_.class_to_pages(c)).in_bytes() / m_.class_to_size(c);
+ // Span can hold at most 64 objects of this size.
+ EXPECT_LE(objects_per_span, 64);
+ }
+ }
+}
+
+TEST_F(SizeClassesTest, ValidateCorrectScalingByReciprocal) {
+ // Validate that multiplying by the reciprocal works for all size classes.
+ // When converting an offset within a span into an index we avoid a
+ // division operation by scaling by the reciprocal. The test ensures
+ // that this approach works for all objects in a span, for all object
+ // sizes.
+ for (int c = 1; c < kNumClasses; ++c) {
+ const size_t max_size_in_class = m_.class_to_size(c);
+ // Only test for sizes where object availability is recorded in a bitmap.
+ if (max_size_in_class < kBitmapMinObjectSize) {
+ continue;
+ }
+ size_t reciprocal = SpanTestPeer::CalcReciprocal(max_size_in_class);
+ const size_t objects_per_span =
+ Length(m_.class_to_pages(c)).in_bytes() / m_.class_to_size(c);
+ for (int index = 0; index < objects_per_span; index++) {
+ // Calculate the address of the object.
+ uintptr_t address = index * max_size_in_class;
+ // Calculate the index into the page using the reciprocal method.
+ int idx =
+ SpanTestPeer::TestOffsetToIdx(address, max_size_in_class, reciprocal);
+ // Check that the starting address back is correct.
+ ASSERT_EQ(address, idx * max_size_in_class);
+ }
+ }
+}
+
TEST_F(SizeClassesTest, Aligned) {
// Validate that each size class is properly aligned.
for (int c = 1; c < kNumClasses; c++) {
@@ -152,19 +152,19 @@ TEST_F(SizeClassesTest, Distinguishable) {
// ClassIndexMaybe provides 8 byte granularity below 1024 bytes and 128 byte
// granularity for larger sizes, so our chosen size classes cannot be any
// finer (otherwise they would map to the same entry in the lookup table).
- //
- // We don't check expanded size classes which are intentionally duplicated.
- for (int partition = 0; partition < kNumaPartitions; partition++) {
- for (int c = (partition * kNumBaseClasses) + 1;
- c < (partition + 1) * kNumBaseClasses; c++) {
- const size_t max_size_in_class = m_.class_to_size(c);
- if (max_size_in_class == 0) {
- continue;
- }
- const int class_index = m_.SizeClass(
- CppPolicy().InNumaPartition(partition), max_size_in_class);
-
- EXPECT_EQ(c, class_index) << max_size_in_class;
+ //
+ // We don't check expanded size classes which are intentionally duplicated.
+ for (int partition = 0; partition < kNumaPartitions; partition++) {
+ for (int c = (partition * kNumBaseClasses) + 1;
+ c < (partition + 1) * kNumBaseClasses; c++) {
+ const size_t max_size_in_class = m_.class_to_size(c);
+ if (max_size_in_class == 0) {
+ continue;
+ }
+ const int class_index = m_.SizeClass(
+ CppPolicy().InNumaPartition(partition), max_size_in_class);
+
+ EXPECT_EQ(c, class_index) << max_size_in_class;
}
}
}
@@ -189,11 +189,11 @@ TEST_F(SizeClassesTest, DoubleCheckedConsistency) {
// Validate that every size on [0, kMaxSize] maps to a size class that is
// neither too big nor too small.
for (size_t size = 0; size <= kMaxSize; size++) {
- const int sc = m_.SizeClass(CppPolicy(), size);
+ const int sc = m_.SizeClass(CppPolicy(), size);
EXPECT_GT(sc, 0) << size;
EXPECT_LT(sc, kNumClasses) << size;
- if ((sc % kNumBaseClasses) > 1) {
+ if ((sc % kNumBaseClasses) > 1) {
EXPECT_GT(size, m_.class_to_size(sc - 1))
<< "Allocating unnecessarily large class";
}
@@ -239,9 +239,9 @@ TEST_F(RunTimeSizeClassesTest, ExpandedSizeClasses) {
// Verify that none of the default size classes are considered expanded size
// classes.
for (int i = 0; i < kNumClasses; i++) {
- EXPECT_EQ(i < (m_.DefaultSizeClassesCount() * kNumaPartitions),
- !IsExpandedSizeClass(i))
- << i;
+ EXPECT_EQ(i < (m_.DefaultSizeClassesCount() * kNumaPartitions),
+ !IsExpandedSizeClass(i))
+ << i;
}
}
@@ -346,14 +346,14 @@ TEST(SizeMapTest, GetSizeClass) {
constexpr int kTrials = 1000;
SizeMap m;
- // Before m.Init(), SizeClass should always return 0 or the equivalent in a
- // non-zero NUMA partition.
+ // Before m.Init(), SizeClass should always return 0 or the equivalent in a
+ // non-zero NUMA partition.
for (int i = 0; i < kTrials; ++i) {
const size_t size = absl::LogUniform(rng, 0, 4 << 20);
uint32_t cl;
- if (m.GetSizeClass(CppPolicy(), size, &cl)) {
- EXPECT_EQ(cl % kNumBaseClasses, 0) << size;
- EXPECT_LT(cl, kExpandedClassesStart) << size;
+ if (m.GetSizeClass(CppPolicy(), size, &cl)) {
+ EXPECT_EQ(cl % kNumBaseClasses, 0) << size;
+ EXPECT_LT(cl, kExpandedClassesStart) << size;
} else {
// We should only fail to lookup the size class when size is outside of
// the size classes.
@@ -367,7 +367,7 @@ TEST(SizeMapTest, GetSizeClass) {
for (int i = 0; i < kTrials; ++i) {
const size_t size = absl::LogUniform(rng, 0, 4 << 20);
uint32_t cl;
- if (m.GetSizeClass(CppPolicy(), size, &cl)) {
+ if (m.GetSizeClass(CppPolicy(), size, &cl)) {
const size_t mapped_size = m.class_to_size(cl);
// The size class needs to hold size.
ASSERT_GE(mapped_size, size);
@@ -384,15 +384,15 @@ TEST(SizeMapTest, GetSizeClassWithAlignment) {
constexpr int kTrials = 1000;
SizeMap m;
- // Before m.Init(), SizeClass should always return 0 or the equivalent in a
- // non-zero NUMA partition.
+ // Before m.Init(), SizeClass should always return 0 or the equivalent in a
+ // non-zero NUMA partition.
for (int i = 0; i < kTrials; ++i) {
const size_t size = absl::LogUniform(rng, 0, 4 << 20);
const size_t alignment = 1 << absl::Uniform(rng, 0u, kHugePageShift);
uint32_t cl;
- if (m.GetSizeClass(CppPolicy().AlignAs(alignment), size, &cl)) {
- EXPECT_EQ(cl % kNumBaseClasses, 0) << size << " " << alignment;
- EXPECT_LT(cl, kExpandedClassesStart) << size << " " << alignment;
+ if (m.GetSizeClass(CppPolicy().AlignAs(alignment), size, &cl)) {
+ EXPECT_EQ(cl % kNumBaseClasses, 0) << size << " " << alignment;
+ EXPECT_LT(cl, kExpandedClassesStart) << size << " " << alignment;
} else if (alignment < kPageSize) {
// When alignment > kPageSize, we do not produce a size class.
// TODO(b/172060547): alignment == kPageSize could fit into the size
@@ -410,7 +410,7 @@ TEST(SizeMapTest, GetSizeClassWithAlignment) {
const size_t size = absl::LogUniform(rng, 0, 4 << 20);
const size_t alignment = 1 << absl::Uniform(rng, 0u, kHugePageShift);
uint32_t cl;
- if (m.GetSizeClass(CppPolicy().AlignAs(alignment), size, &cl)) {
+ if (m.GetSizeClass(CppPolicy().AlignAs(alignment), size, &cl)) {
const size_t mapped_size = m.class_to_size(cl);
// The size class needs to hold size.
ASSERT_GE(mapped_size, size);
@@ -432,13 +432,13 @@ TEST(SizeMapTest, SizeClass) {
constexpr int kTrials = 1000;
SizeMap m;
- // Before m.Init(), SizeClass should always return 0 or the equivalent in a
- // non-zero NUMA partition.
+ // Before m.Init(), SizeClass should always return 0 or the equivalent in a
+ // non-zero NUMA partition.
for (int i = 0; i < kTrials; ++i) {
const size_t size = absl::LogUniform<size_t>(rng, 0u, kMaxSize);
- const uint32_t cl = m.SizeClass(CppPolicy(), size);
- EXPECT_EQ(cl % kNumBaseClasses, 0) << size;
- EXPECT_LT(cl, kExpandedClassesStart) << size;
+ const uint32_t cl = m.SizeClass(CppPolicy(), size);
+ EXPECT_EQ(cl % kNumBaseClasses, 0) << size;
+ EXPECT_LT(cl, kExpandedClassesStart) << size;
}
// After m.Init(), SizeClass should return a size class.
@@ -446,7 +446,7 @@ TEST(SizeMapTest, SizeClass) {
for (int i = 0; i < kTrials; ++i) {
const size_t size = absl::LogUniform<size_t>(rng, 0u, kMaxSize);
- uint32_t cl = m.SizeClass(CppPolicy(), size);
+ uint32_t cl = m.SizeClass(CppPolicy(), size);
const size_t mapped_size = m.class_to_size(cl);
// The size class needs to hold size.
@@ -465,5 +465,5 @@ TEST(SizeMapTest, Preinit) {
}
} // namespace
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
diff --git a/contrib/libs/tcmalloc/tcmalloc/size_classes_with_runtime_size_classes_test.cc b/contrib/libs/tcmalloc/tcmalloc/size_classes_with_runtime_size_classes_test.cc
index 17badddac9..fffd4b478a 100644
--- a/contrib/libs/tcmalloc/tcmalloc/size_classes_with_runtime_size_classes_test.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/size_classes_with_runtime_size_classes_test.cc
@@ -24,7 +24,7 @@
#include "tcmalloc/span.h"
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
namespace {
class TestingSizeMap : public SizeMap {
@@ -106,22 +106,22 @@ TEST_F(RunTimeSizeClassesTest, EnvRealClasses) {
// With the runtime_size_classes library linked, the environment variable
// will be parsed.
- for (int c = 0; c < kNumClasses;) {
- for (int end = c + count; c < end; c++) {
- const SizeClassInfo& default_info =
- m_.DefaultSizeClasses()[c % kNumBaseClasses];
- EXPECT_EQ(m_.class_to_size(c), default_info.size) << c;
- EXPECT_EQ(m_.class_to_pages(c), default_info.pages);
- EXPECT_EQ(m_.num_objects_to_move(c), default_info.num_to_move);
- }
- for (; (c % kNumBaseClasses) != 0; c++) {
- EXPECT_EQ(m_.class_to_size(c), 0);
- EXPECT_EQ(m_.class_to_pages(c), 0);
- EXPECT_EQ(m_.num_objects_to_move(c), 0);
- }
+ for (int c = 0; c < kNumClasses;) {
+ for (int end = c + count; c < end; c++) {
+ const SizeClassInfo& default_info =
+ m_.DefaultSizeClasses()[c % kNumBaseClasses];
+ EXPECT_EQ(m_.class_to_size(c), default_info.size) << c;
+ EXPECT_EQ(m_.class_to_pages(c), default_info.pages);
+ EXPECT_EQ(m_.num_objects_to_move(c), default_info.num_to_move);
+ }
+ for (; (c % kNumBaseClasses) != 0; c++) {
+ EXPECT_EQ(m_.class_to_size(c), 0);
+ EXPECT_EQ(m_.class_to_pages(c), 0);
+ EXPECT_EQ(m_.num_objects_to_move(c), 0);
+ }
}
}
} // namespace
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
diff --git a/contrib/libs/tcmalloc/tcmalloc/span.cc b/contrib/libs/tcmalloc/tcmalloc/span.cc
index 87e6f29244..f6fc842e75 100644
--- a/contrib/libs/tcmalloc/tcmalloc/span.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/span.cc
@@ -18,8 +18,8 @@
#include <algorithm>
-#include "absl/base/optimization.h" // ABSL_INTERNAL_ASSUME
-#include "absl/numeric/bits.h"
+#include "absl/base/optimization.h" // ABSL_INTERNAL_ASSUME
+#include "absl/numeric/bits.h"
#include "tcmalloc/common.h"
#include "tcmalloc/internal/atomic_stats_counter.h"
#include "tcmalloc/internal/logging.h"
@@ -29,22 +29,22 @@
#include "tcmalloc/sampler.h"
#include "tcmalloc/static_vars.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
void Span::Sample(StackTrace* stack) {
ASSERT(!sampled_ && stack);
sampled_ = 1;
sampled_stack_ = stack;
Static::sampled_objects_.prepend(this);
-
+
// The cast to value matches Unsample.
- tcmalloc_internal::StatsCounter::Value allocated_bytes =
+ tcmalloc_internal::StatsCounter::Value allocated_bytes =
static_cast<tcmalloc_internal::StatsCounter::Value>(
- AllocatedBytes(*stack, true));
- // LossyAdd is ok: writes to sampled_objects_size_ guarded by pageheap_lock.
- Static::sampled_objects_size_.LossyAdd(allocated_bytes);
+ AllocatedBytes(*stack, true));
+ // LossyAdd is ok: writes to sampled_objects_size_ guarded by pageheap_lock.
+ Static::sampled_objects_size_.LossyAdd(allocated_bytes);
}
StackTrace* Span::Unsample() {
@@ -57,11 +57,11 @@ StackTrace* Span::Unsample() {
RemoveFromList(); // from Static::sampled_objects_
// The cast to Value ensures no funny business happens during the negation if
// sizeof(size_t) != sizeof(Value).
- tcmalloc_internal::StatsCounter::Value neg_allocated_bytes =
+ tcmalloc_internal::StatsCounter::Value neg_allocated_bytes =
-static_cast<tcmalloc_internal::StatsCounter::Value>(
- AllocatedBytes(*stack, true));
- // LossyAdd is ok: writes to sampled_objects_size_ guarded by pageheap_lock.
- Static::sampled_objects_size_.LossyAdd(neg_allocated_bytes);
+ AllocatedBytes(*stack, true));
+ // LossyAdd is ok: writes to sampled_objects_size_ guarded by pageheap_lock.
+ Static::sampled_objects_size_.LossyAdd(neg_allocated_bytes);
return stack;
}
@@ -176,109 +176,109 @@ Span::ObjIdx* Span::IdxToPtr(ObjIdx idx, size_t size) const {
return ptr;
}
-Span::ObjIdx* Span::BitmapIdxToPtr(ObjIdx idx, size_t size) const {
- uintptr_t off =
- first_page_.start_uintptr() + (static_cast<uintptr_t>(idx) * size);
- ObjIdx* ptr = reinterpret_cast<ObjIdx*>(off);
- return ptr;
-}
-
-size_t Span::BitmapFreelistPopBatch(void** __restrict batch, size_t N,
- size_t size) {
-#ifndef NDEBUG
- size_t before = bitmap_.CountBits(0, 64);
-#endif // NDEBUG
-
- size_t count = 0;
- // Want to fill the batch either with N objects, or the number of objects
- // remaining in the span.
- while (!bitmap_.IsZero() && count < N) {
- size_t offset = bitmap_.FindSet(0);
- ASSERT(offset < 64);
- batch[count] = BitmapIdxToPtr(offset, size);
- bitmap_.ClearLowestBit();
- count++;
+Span::ObjIdx* Span::BitmapIdxToPtr(ObjIdx idx, size_t size) const {
+ uintptr_t off =
+ first_page_.start_uintptr() + (static_cast<uintptr_t>(idx) * size);
+ ObjIdx* ptr = reinterpret_cast<ObjIdx*>(off);
+ return ptr;
+}
+
+size_t Span::BitmapFreelistPopBatch(void** __restrict batch, size_t N,
+ size_t size) {
+#ifndef NDEBUG
+ size_t before = bitmap_.CountBits(0, 64);
+#endif // NDEBUG
+
+ size_t count = 0;
+ // Want to fill the batch either with N objects, or the number of objects
+ // remaining in the span.
+ while (!bitmap_.IsZero() && count < N) {
+ size_t offset = bitmap_.FindSet(0);
+ ASSERT(offset < 64);
+ batch[count] = BitmapIdxToPtr(offset, size);
+ bitmap_.ClearLowestBit();
+ count++;
}
-#ifndef NDEBUG
- size_t after = bitmap_.CountBits(0, 64);
- ASSERT(after + count == before);
- ASSERT(allocated_ + count == embed_count_ - after);
-#endif // NDEBUG
- allocated_ += count;
- return count;
+#ifndef NDEBUG
+ size_t after = bitmap_.CountBits(0, 64);
+ ASSERT(after + count == before);
+ ASSERT(allocated_ + count == embed_count_ - after);
+#endif // NDEBUG
+ allocated_ += count;
+ return count;
}
size_t Span::FreelistPopBatch(void** __restrict batch, size_t N, size_t size) {
- // Handle spans with 64 or fewer objects using a bitmap. We expect spans
- // to frequently hold smaller objects.
- if (ABSL_PREDICT_FALSE(size >= kBitmapMinObjectSize)) {
- return BitmapFreelistPopBatch(batch, N, size);
- }
- if (ABSL_PREDICT_TRUE(size <= SizeMap::kMultiPageSize)) {
+ // Handle spans with 64 or fewer objects using a bitmap. We expect spans
+ // to frequently hold smaller objects.
+ if (ABSL_PREDICT_FALSE(size >= kBitmapMinObjectSize)) {
+ return BitmapFreelistPopBatch(batch, N, size);
+ }
+ if (ABSL_PREDICT_TRUE(size <= SizeMap::kMultiPageSize)) {
return FreelistPopBatchSized<Align::SMALL>(batch, N, size);
} else {
return FreelistPopBatchSized<Align::LARGE>(batch, N, size);
}
}
-uint16_t Span::CalcReciprocal(size_t size) {
- // Calculate scaling factor. We want to avoid dividing by the size of the
- // object. Instead we'll multiply by a scaled version of the reciprocal.
- // We divide kBitmapScalingDenominator by the object size, so later we can
- // multiply by this reciprocal, and then divide this scaling factor out.
- // TODO(djgove) These divides can be computed once at start up.
- size_t reciprocal = 0;
- // The spans hold objects up to kMaxSize, so it's safe to assume.
- ABSL_INTERNAL_ASSUME(size <= kMaxSize);
- if (size <= SizeMap::kMultiPageSize) {
- reciprocal = kBitmapScalingDenominator / (size >> kAlignmentShift);
- } else {
- reciprocal =
- kBitmapScalingDenominator / (size >> SizeMap::kMultiPageAlignmentShift);
- }
- ASSERT(reciprocal < 65536);
- return static_cast<uint16_t>(reciprocal);
-}
-
-void Span::BitmapBuildFreelist(size_t size, size_t count) {
- // We are using a bitmap to indicate whether objects are used or not. The
- // maximum capacity for the bitmap is 64 objects.
- ASSERT(count <= 64);
-#ifndef NDEBUG
- // For bitmap_ use embed_count_ to record objects per span.
- embed_count_ = count;
-#endif // NDEBUG
- reciprocal_ = CalcReciprocal(size);
+uint16_t Span::CalcReciprocal(size_t size) {
+ // Calculate scaling factor. We want to avoid dividing by the size of the
+ // object. Instead we'll multiply by a scaled version of the reciprocal.
+ // We divide kBitmapScalingDenominator by the object size, so later we can
+ // multiply by this reciprocal, and then divide this scaling factor out.
+ // TODO(djgove) These divides can be computed once at start up.
+ size_t reciprocal = 0;
+ // The spans hold objects up to kMaxSize, so it's safe to assume.
+ ABSL_INTERNAL_ASSUME(size <= kMaxSize);
+ if (size <= SizeMap::kMultiPageSize) {
+ reciprocal = kBitmapScalingDenominator / (size >> kAlignmentShift);
+ } else {
+ reciprocal =
+ kBitmapScalingDenominator / (size >> SizeMap::kMultiPageAlignmentShift);
+ }
+ ASSERT(reciprocal < 65536);
+ return static_cast<uint16_t>(reciprocal);
+}
+
+void Span::BitmapBuildFreelist(size_t size, size_t count) {
+ // We are using a bitmap to indicate whether objects are used or not. The
+ // maximum capacity for the bitmap is 64 objects.
+ ASSERT(count <= 64);
+#ifndef NDEBUG
+ // For bitmap_ use embed_count_ to record objects per span.
+ embed_count_ = count;
+#endif // NDEBUG
+ reciprocal_ = CalcReciprocal(size);
allocated_ = 0;
- bitmap_.Clear(); // bitmap_ can be non-zero from a previous use.
- bitmap_.SetRange(0, count);
- ASSERT(bitmap_.CountBits(0, 64) == count);
-}
-
-int Span::BuildFreelist(size_t size, size_t count, void** batch, int N) {
+ bitmap_.Clear(); // bitmap_ can be non-zero from a previous use.
+ bitmap_.SetRange(0, count);
+ ASSERT(bitmap_.CountBits(0, 64) == count);
+}
+
+int Span::BuildFreelist(size_t size, size_t count, void** batch, int N) {
freelist_ = kListEnd;
- if (size >= kBitmapMinObjectSize) {
- BitmapBuildFreelist(size, count);
- return BitmapFreelistPopBatch(batch, N, size);
- }
-
- // First, push as much as we can into the batch.
- char* ptr = static_cast<char*>(start_address());
- int result = N <= count ? N : count;
- for (int i = 0; i < result; ++i) {
- batch[i] = ptr;
- ptr += size;
- }
- allocated_ = result;
-
+ if (size >= kBitmapMinObjectSize) {
+ BitmapBuildFreelist(size, count);
+ return BitmapFreelistPopBatch(batch, N, size);
+ }
+
+ // First, push as much as we can into the batch.
+ char* ptr = static_cast<char*>(start_address());
+ int result = N <= count ? N : count;
+ for (int i = 0; i < result; ++i) {
+ batch[i] = ptr;
+ ptr += size;
+ }
+ allocated_ = result;
+
ObjIdx idxStep = size / kAlignment;
// Valid objects are {0, idxStep, idxStep * 2, ..., idxStep * (count - 1)}.
if (size > SizeMap::kMultiPageSize) {
idxStep = size / SizeMap::kMultiPageAlignment;
}
- ObjIdx idx = idxStep * result;
+ ObjIdx idx = idxStep * result;
// Verify that the end of the useful portion of the span (and the beginning of
// the span waste) has an index that doesn't overflow or risk confusion with
@@ -290,43 +290,43 @@ int Span::BuildFreelist(size_t size, size_t count, void** batch, int N) {
// The index of the end of the useful portion of the span.
ObjIdx idxEnd = count * idxStep;
-
- // Then, push as much as we can into the cache_.
- int cache_size = 0;
- for (; idx < idxEnd && cache_size < kCacheSize; idx += idxStep) {
- cache_[cache_size] = idx;
- cache_size++;
+
+ // Then, push as much as we can into the cache_.
+ int cache_size = 0;
+ for (; idx < idxEnd && cache_size < kCacheSize; idx += idxStep) {
+ cache_[cache_size] = idx;
+ cache_size++;
}
- cache_size_ = cache_size;
-
+ cache_size_ = cache_size;
+
// Now, build freelist and stack other objects onto freelist objects.
// Note: we take freelist objects from the beginning and stacked objects
// from the end. This has a nice property of not paging in whole span at once
// and not draining whole cache.
ObjIdx* host = nullptr; // cached first object on freelist
const size_t max_embed = size / sizeof(ObjIdx) - 1;
- int embed_count = 0;
+ int embed_count = 0;
while (idx < idxEnd) {
// Check the no idx can be confused with kListEnd.
ASSERT(idx != kListEnd);
- if (host && embed_count != max_embed) {
+ if (host && embed_count != max_embed) {
// Push onto first object on the freelist.
- embed_count++;
+ embed_count++;
idxEnd -= idxStep;
- host[embed_count] = idxEnd;
+ host[embed_count] = idxEnd;
} else {
// The first object is full, push new object onto freelist.
host = IdxToPtr(idx, size);
host[0] = freelist_;
freelist_ = idx;
- embed_count = 0;
+ embed_count = 0;
idx += idxStep;
}
}
- embed_count_ = embed_count;
- return result;
+ embed_count_ = embed_count;
+ return result;
}
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
diff --git a/contrib/libs/tcmalloc/tcmalloc/span.h b/contrib/libs/tcmalloc/tcmalloc/span.h
index c589709094..c11f635bd8 100644
--- a/contrib/libs/tcmalloc/tcmalloc/span.h
+++ b/contrib/libs/tcmalloc/tcmalloc/span.h
@@ -22,29 +22,29 @@
#include <string.h>
#include "absl/base/thread_annotations.h"
-#include "absl/numeric/bits.h"
+#include "absl/numeric/bits.h"
#include "tcmalloc/common.h"
#include "tcmalloc/internal/linked_list.h"
#include "tcmalloc/internal/logging.h"
#include "tcmalloc/internal/optimization.h"
-#include "tcmalloc/internal/range_tracker.h"
+#include "tcmalloc/internal/range_tracker.h"
#include "tcmalloc/pages.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
-
-// Can fit 64 objects into a bitmap, so determine what the minimum object
-// size needs to be in order for that to work. This makes the assumption that
-// we don't increase the number of pages at a point where the object count
-// ends up exceeding 64.
-inline constexpr size_t kBitmapMinObjectSize = kPageSize / 64;
-
-// Denominator for bitmap scaling factor. The idea is that instead of dividing
-// by N we multiply by M = kBitmapScalingDenominator / N and round the resulting
-// value.
-inline constexpr size_t kBitmapScalingDenominator = 65536;
-
+namespace tcmalloc_internal {
+
+// Can fit 64 objects into a bitmap, so determine what the minimum object
+// size needs to be in order for that to work. This makes the assumption that
+// we don't increase the number of pages at a point where the object count
+// ends up exceeding 64.
+inline constexpr size_t kBitmapMinObjectSize = kPageSize / 64;
+
+// Denominator for bitmap scaling factor. The idea is that instead of dividing
+// by N we multiply by M = kBitmapScalingDenominator / N and round the resulting
+// value.
+inline constexpr size_t kBitmapScalingDenominator = 65536;
+
// Information kept for a span (a contiguous run of pages).
//
// Spans can be in different states. The current state determines set of methods
@@ -163,38 +163,38 @@ class Span : public SpanList::Elem {
// These methods REQUIRE a SMALL_OBJECT span.
// ---------------------------------------------------------------------------
- // Indicates whether the object is considered large or small based on
- // size > SizeMap::kMultiPageSize.
- enum class Align { SMALL, LARGE };
-
- // Indicate whether the Span is empty. Size is used to determine whether
- // the span is using a compressed linked list of objects, or a bitmap
- // to hold available objects.
- bool FreelistEmpty(size_t size) const;
+ // Indicates whether the object is considered large or small based on
+ // size > SizeMap::kMultiPageSize.
+ enum class Align { SMALL, LARGE };
+ // Indicate whether the Span is empty. Size is used to determine whether
+ // the span is using a compressed linked list of objects, or a bitmap
+ // to hold available objects.
+ bool FreelistEmpty(size_t size) const;
+
// Pushes ptr onto freelist unless the freelist becomes full,
// in which case just return false.
- bool FreelistPush(void* ptr, size_t size) {
- ASSERT(allocated_ > 0);
- if (ABSL_PREDICT_FALSE(allocated_ == 1)) {
- return false;
- }
- allocated_--;
- // Bitmaps are used to record object availability when there are fewer than
- // 64 objects in a span.
- if (ABSL_PREDICT_FALSE(size >= kBitmapMinObjectSize)) {
- if (ABSL_PREDICT_TRUE(size <= SizeMap::kMultiPageSize)) {
- return BitmapFreelistPush<Align::SMALL>(ptr, size);
- } else {
- return BitmapFreelistPush<Align::LARGE>(ptr, size);
- }
- }
- if (ABSL_PREDICT_TRUE(size <= SizeMap::kMultiPageSize)) {
- return FreelistPushSized<Align::SMALL>(ptr, size);
- } else {
- return FreelistPushSized<Align::LARGE>(ptr, size);
- }
- }
+ bool FreelistPush(void* ptr, size_t size) {
+ ASSERT(allocated_ > 0);
+ if (ABSL_PREDICT_FALSE(allocated_ == 1)) {
+ return false;
+ }
+ allocated_--;
+ // Bitmaps are used to record object availability when there are fewer than
+ // 64 objects in a span.
+ if (ABSL_PREDICT_FALSE(size >= kBitmapMinObjectSize)) {
+ if (ABSL_PREDICT_TRUE(size <= SizeMap::kMultiPageSize)) {
+ return BitmapFreelistPush<Align::SMALL>(ptr, size);
+ } else {
+ return BitmapFreelistPush<Align::LARGE>(ptr, size);
+ }
+ }
+ if (ABSL_PREDICT_TRUE(size <= SizeMap::kMultiPageSize)) {
+ return FreelistPushSized<Align::SMALL>(ptr, size);
+ } else {
+ return FreelistPushSized<Align::LARGE>(ptr, size);
+ }
+ }
// Pops up to N objects from the freelist and returns them in the batch array.
// Returns number of objects actually popped.
@@ -204,9 +204,9 @@ class Span : public SpanList::Elem {
void Init(PageId p, Length n);
// Initialize freelist to contain all objects in the span.
- // Pops up to N objects from the freelist and returns them in the batch array.
- // Returns number of objects actually popped.
- int BuildFreelist(size_t size, size_t count, void** batch, int N);
+ // Pops up to N objects from the freelist and returns them in the batch array.
+ // Returns number of objects actually popped.
+ int BuildFreelist(size_t size, size_t count, void** batch, int N);
// Prefetch cacheline containing most important span information.
void Prefetch();
@@ -227,15 +227,15 @@ class Span : public SpanList::Elem {
// look at b/35680381 and cl/199502226.
uint16_t allocated_; // Number of non-free objects
uint16_t embed_count_;
- // For available objects stored as a compressed linked list, the index of
- // the first object in recorded in freelist_. When a bitmap is used to
- // represent available objects, the reciprocal of the object size is
- // stored to enable conversion from the offset of an object within a
- // span to the index of the object.
- union {
- uint16_t freelist_;
- uint16_t reciprocal_;
- };
+ // For available objects stored as a compressed linked list, the index of
+ // the first object in recorded in freelist_. When a bitmap is used to
+ // represent available objects, the reciprocal of the object size is
+ // stored to enable conversion from the offset of an object within a
+ // span to the index of the object.
+ union {
+ uint16_t freelist_;
+ uint16_t reciprocal_;
+ };
uint8_t cache_size_;
uint8_t location_ : 2; // Is the span on a freelist, and if so, which?
uint8_t sampled_ : 1; // Sampled object?
@@ -245,11 +245,11 @@ class Span : public SpanList::Elem {
// Embed cache of free objects.
ObjIdx cache_[kCacheSize];
- // Used for spans with in CentralFreeList with fewer than 64 objects.
- // Each bit is set to one when the object is available, and zero
- // when the object is used.
- Bitmap<64> bitmap_{};
-
+ // Used for spans with in CentralFreeList with fewer than 64 objects.
+ // Each bit is set to one when the object is available, and zero
+ // when the object is used.
+ Bitmap<64> bitmap_{};
+
// Used only for sampled spans (SAMPLED state).
StackTrace* sampled_stack_;
@@ -268,61 +268,61 @@ class Span : public SpanList::Elem {
ObjIdx PtrToIdx(void* ptr, size_t size) const;
ObjIdx* IdxToPtr(ObjIdx idx, size_t size) const;
- // For bitmap'd spans conversion from an offset to an index is performed
- // by multiplying by the scaled reciprocal of the object size.
- static uint16_t CalcReciprocal(size_t size);
-
- // Convert object pointer <-> freelist index for bitmap managed objects.
- template <Align align>
- ObjIdx BitmapPtrToIdx(void* ptr, size_t size) const;
- ObjIdx* BitmapIdxToPtr(ObjIdx idx, size_t size) const;
-
- // Helper function for converting a pointer to an index.
- template <Align align>
- static ObjIdx OffsetToIdx(uintptr_t offset, size_t size, uint16_t reciprocal);
- // Helper function for testing round trips between pointers and indexes.
- static ObjIdx TestOffsetToIdx(uintptr_t ptr, size_t size,
- uint16_t reciprocal) {
- if (size <= SizeMap::kMultiPageSize) {
- return OffsetToIdx<Align::SMALL>(ptr, size, reciprocal);
- } else {
- return OffsetToIdx<Align::LARGE>(ptr, size, reciprocal);
- }
- }
+ // For bitmap'd spans conversion from an offset to an index is performed
+ // by multiplying by the scaled reciprocal of the object size.
+ static uint16_t CalcReciprocal(size_t size);
+ // Convert object pointer <-> freelist index for bitmap managed objects.
template <Align align>
+ ObjIdx BitmapPtrToIdx(void* ptr, size_t size) const;
+ ObjIdx* BitmapIdxToPtr(ObjIdx idx, size_t size) const;
+
+ // Helper function for converting a pointer to an index.
+ template <Align align>
+ static ObjIdx OffsetToIdx(uintptr_t offset, size_t size, uint16_t reciprocal);
+ // Helper function for testing round trips between pointers and indexes.
+ static ObjIdx TestOffsetToIdx(uintptr_t ptr, size_t size,
+ uint16_t reciprocal) {
+ if (size <= SizeMap::kMultiPageSize) {
+ return OffsetToIdx<Align::SMALL>(ptr, size, reciprocal);
+ } else {
+ return OffsetToIdx<Align::LARGE>(ptr, size, reciprocal);
+ }
+ }
+
+ template <Align align>
ObjIdx* IdxToPtrSized(ObjIdx idx, size_t size) const;
template <Align align>
- ObjIdx PtrToIdxSized(void* ptr, size_t size) const;
-
- template <Align align>
+ ObjIdx PtrToIdxSized(void* ptr, size_t size) const;
+
+ template <Align align>
size_t FreelistPopBatchSized(void** __restrict batch, size_t N, size_t size);
-
- template <Align align>
- bool FreelistPushSized(void* ptr, size_t size);
-
- // For spans containing 64 or fewer objects, indicate that the object at the
- // index has been returned. Always returns true.
- template <Align align>
- bool BitmapFreelistPush(void* ptr, size_t size);
-
- // A bitmap is used to indicate object availability for spans containing
- // 64 or fewer objects.
- void BitmapBuildFreelist(size_t size, size_t count);
-
- // For spans with 64 or fewer objects populate batch with up to N objects.
- // Returns number of objects actually popped.
- size_t BitmapFreelistPopBatch(void** batch, size_t N, size_t size);
-
- // Friend class to enable more indepth testing of bitmap code.
- friend class SpanTestPeer;
+
+ template <Align align>
+ bool FreelistPushSized(void* ptr, size_t size);
+
+ // For spans containing 64 or fewer objects, indicate that the object at the
+ // index has been returned. Always returns true.
+ template <Align align>
+ bool BitmapFreelistPush(void* ptr, size_t size);
+
+ // A bitmap is used to indicate object availability for spans containing
+ // 64 or fewer objects.
+ void BitmapBuildFreelist(size_t size, size_t count);
+
+ // For spans with 64 or fewer objects populate batch with up to N objects.
+ // Returns number of objects actually popped.
+ size_t BitmapFreelistPopBatch(void** batch, size_t N, size_t size);
+
+ // Friend class to enable more indepth testing of bitmap code.
+ friend class SpanTestPeer;
};
template <Span::Align align>
Span::ObjIdx* Span::IdxToPtrSized(ObjIdx idx, size_t size) const {
ASSERT(idx != kListEnd);
- static_assert(align == Align::LARGE || align == Align::SMALL);
+ static_assert(align == Align::LARGE || align == Align::SMALL);
uintptr_t off =
first_page_.start_uintptr() +
(static_cast<uintptr_t>(idx)
@@ -334,38 +334,38 @@ Span::ObjIdx* Span::IdxToPtrSized(ObjIdx idx, size_t size) const {
}
template <Span::Align align>
-Span::ObjIdx Span::PtrToIdxSized(void* ptr, size_t size) const {
- // Object index is an offset from span start divided by a power-of-two.
- // The divisors are choosen so that
- // (1) objects are aligned on the divisor,
- // (2) index fits into 16 bits and
- // (3) the index of the beginning of all objects is strictly less than
- // kListEnd (note that we have 256K pages and multi-page spans).
- // For example with 1M spans we need kMultiPageAlignment >= 16.
- // An ASSERT in BuildFreelist() verifies a condition which implies (3).
- uintptr_t p = reinterpret_cast<uintptr_t>(ptr);
- uintptr_t off;
- if (align == Align::SMALL) {
- // Generally we need to load first_page_ to compute the offset.
- // But first_page_ can be in a different cache line then the fields that
- // we use in FreelistPush otherwise (cache_, cache_size_, freelist_).
- // So we avoid loading first_page_ for smaller sizes that have one page per
- // span, instead we compute the offset by taking low kPageShift bits of the
- // pointer.
- ASSERT(PageIdContaining(ptr) == first_page_);
- ASSERT(num_pages_ == Length(1));
- off = (p & (kPageSize - 1)) / kAlignment;
- } else {
- off = (p - first_page_.start_uintptr()) / SizeMap::kMultiPageAlignment;
- }
- ObjIdx idx = static_cast<ObjIdx>(off);
- ASSERT(idx != kListEnd);
- ASSERT(idx == off);
- ASSERT(IdxToPtr(idx, size) == ptr);
- return idx;
-}
-
-template <Span::Align align>
+Span::ObjIdx Span::PtrToIdxSized(void* ptr, size_t size) const {
+ // Object index is an offset from span start divided by a power-of-two.
+ // The divisors are choosen so that
+ // (1) objects are aligned on the divisor,
+ // (2) index fits into 16 bits and
+ // (3) the index of the beginning of all objects is strictly less than
+ // kListEnd (note that we have 256K pages and multi-page spans).
+ // For example with 1M spans we need kMultiPageAlignment >= 16.
+ // An ASSERT in BuildFreelist() verifies a condition which implies (3).
+ uintptr_t p = reinterpret_cast<uintptr_t>(ptr);
+ uintptr_t off;
+ if (align == Align::SMALL) {
+ // Generally we need to load first_page_ to compute the offset.
+ // But first_page_ can be in a different cache line then the fields that
+ // we use in FreelistPush otherwise (cache_, cache_size_, freelist_).
+ // So we avoid loading first_page_ for smaller sizes that have one page per
+ // span, instead we compute the offset by taking low kPageShift bits of the
+ // pointer.
+ ASSERT(PageIdContaining(ptr) == first_page_);
+ ASSERT(num_pages_ == Length(1));
+ off = (p & (kPageSize - 1)) / kAlignment;
+ } else {
+ off = (p - first_page_.start_uintptr()) / SizeMap::kMultiPageAlignment;
+ }
+ ObjIdx idx = static_cast<ObjIdx>(off);
+ ASSERT(idx != kListEnd);
+ ASSERT(idx == off);
+ ASSERT(IdxToPtr(idx, size) == ptr);
+ return idx;
+}
+
+template <Span::Align align>
size_t Span::FreelistPopBatchSized(void** __restrict batch, size_t N,
size_t size) {
size_t result = 0;
@@ -422,84 +422,84 @@ size_t Span::FreelistPopBatchSized(void** __restrict batch, size_t N,
return result;
}
-template <Span::Align align>
-bool Span::FreelistPushSized(void* ptr, size_t size) {
- ObjIdx idx = PtrToIdxSized<align>(ptr, size);
- if (cache_size_ != kCacheSize) {
- // Have empty space in the cache, push there.
- cache_[cache_size_] = idx;
- cache_size_++;
- } else if (ABSL_PREDICT_TRUE(freelist_ != kListEnd) &&
- // -1 because the first slot is used by freelist link.
- ABSL_PREDICT_TRUE(embed_count_ != size / sizeof(ObjIdx) - 1)) {
- // Push onto the first object on freelist.
- ObjIdx* host;
- if (align == Align::SMALL) {
- // Avoid loading first_page_ in this case (see the comment in PtrToIdx).
- ASSERT(num_pages_ == Length(1));
- host = reinterpret_cast<ObjIdx*>(
- (reinterpret_cast<uintptr_t>(ptr) & ~(kPageSize - 1)) +
- static_cast<uintptr_t>(freelist_) * kAlignment);
- ASSERT(PtrToIdx(host, size) == freelist_);
- } else {
- host = IdxToPtrSized<align>(freelist_, size);
- }
- embed_count_++;
- host[embed_count_] = idx;
- } else {
- // Push onto freelist.
- *reinterpret_cast<ObjIdx*>(ptr) = freelist_;
- freelist_ = idx;
- embed_count_ = 0;
- }
- return true;
-}
-
-template <Span::Align align>
-Span::ObjIdx Span::OffsetToIdx(uintptr_t offset, size_t size,
- uint16_t reciprocal) {
- if (align == Align::SMALL) {
- return static_cast<ObjIdx>(
- // Add kBitmapScalingDenominator / 2 to round to nearest integer.
- ((offset >> kAlignmentShift) * reciprocal +
- kBitmapScalingDenominator / 2) /
- kBitmapScalingDenominator);
- } else {
- return static_cast<ObjIdx>(
- ((offset >> SizeMap::kMultiPageAlignmentShift) * reciprocal +
- kBitmapScalingDenominator / 2) /
- kBitmapScalingDenominator);
- }
-}
-
-template <Span::Align align>
-Span::ObjIdx Span::BitmapPtrToIdx(void* ptr, size_t size) const {
- uintptr_t p = reinterpret_cast<uintptr_t>(ptr);
- uintptr_t off = static_cast<uint32_t>(p - first_page_.start_uintptr());
- ObjIdx idx = OffsetToIdx<align>(off, size, reciprocal_);
- ASSERT(BitmapIdxToPtr(idx, size) == ptr);
- return idx;
-}
-
-template <Span::Align align>
-bool Span::BitmapFreelistPush(void* ptr, size_t size) {
-#ifndef NDEBUG
- size_t before = bitmap_.CountBits(0, 64);
-#endif
- // TODO(djgove) Conversions to offsets can be computed outside of lock.
- ObjIdx idx = BitmapPtrToIdx<align>(ptr, size);
- // Check that the object is not already returned.
- ASSERT(bitmap_.GetBit(idx) == 0);
- // Set the bit indicating where the object was returned.
- bitmap_.SetBit(idx);
-#ifndef NDEBUG
- size_t after = bitmap_.CountBits(0, 64);
- ASSERT(before + 1 == after);
- ASSERT(allocated_ == embed_count_ - after);
-#endif
- return true;
-}
-
+template <Span::Align align>
+bool Span::FreelistPushSized(void* ptr, size_t size) {
+ ObjIdx idx = PtrToIdxSized<align>(ptr, size);
+ if (cache_size_ != kCacheSize) {
+ // Have empty space in the cache, push there.
+ cache_[cache_size_] = idx;
+ cache_size_++;
+ } else if (ABSL_PREDICT_TRUE(freelist_ != kListEnd) &&
+ // -1 because the first slot is used by freelist link.
+ ABSL_PREDICT_TRUE(embed_count_ != size / sizeof(ObjIdx) - 1)) {
+ // Push onto the first object on freelist.
+ ObjIdx* host;
+ if (align == Align::SMALL) {
+ // Avoid loading first_page_ in this case (see the comment in PtrToIdx).
+ ASSERT(num_pages_ == Length(1));
+ host = reinterpret_cast<ObjIdx*>(
+ (reinterpret_cast<uintptr_t>(ptr) & ~(kPageSize - 1)) +
+ static_cast<uintptr_t>(freelist_) * kAlignment);
+ ASSERT(PtrToIdx(host, size) == freelist_);
+ } else {
+ host = IdxToPtrSized<align>(freelist_, size);
+ }
+ embed_count_++;
+ host[embed_count_] = idx;
+ } else {
+ // Push onto freelist.
+ *reinterpret_cast<ObjIdx*>(ptr) = freelist_;
+ freelist_ = idx;
+ embed_count_ = 0;
+ }
+ return true;
+}
+
+template <Span::Align align>
+Span::ObjIdx Span::OffsetToIdx(uintptr_t offset, size_t size,
+ uint16_t reciprocal) {
+ if (align == Align::SMALL) {
+ return static_cast<ObjIdx>(
+ // Add kBitmapScalingDenominator / 2 to round to nearest integer.
+ ((offset >> kAlignmentShift) * reciprocal +
+ kBitmapScalingDenominator / 2) /
+ kBitmapScalingDenominator);
+ } else {
+ return static_cast<ObjIdx>(
+ ((offset >> SizeMap::kMultiPageAlignmentShift) * reciprocal +
+ kBitmapScalingDenominator / 2) /
+ kBitmapScalingDenominator);
+ }
+}
+
+template <Span::Align align>
+Span::ObjIdx Span::BitmapPtrToIdx(void* ptr, size_t size) const {
+ uintptr_t p = reinterpret_cast<uintptr_t>(ptr);
+ uintptr_t off = static_cast<uint32_t>(p - first_page_.start_uintptr());
+ ObjIdx idx = OffsetToIdx<align>(off, size, reciprocal_);
+ ASSERT(BitmapIdxToPtr(idx, size) == ptr);
+ return idx;
+}
+
+template <Span::Align align>
+bool Span::BitmapFreelistPush(void* ptr, size_t size) {
+#ifndef NDEBUG
+ size_t before = bitmap_.CountBits(0, 64);
+#endif
+ // TODO(djgove) Conversions to offsets can be computed outside of lock.
+ ObjIdx idx = BitmapPtrToIdx<align>(ptr, size);
+ // Check that the object is not already returned.
+ ASSERT(bitmap_.GetBit(idx) == 0);
+ // Set the bit indicating where the object was returned.
+ bitmap_.SetBit(idx);
+#ifndef NDEBUG
+ size_t after = bitmap_.CountBits(0, 64);
+ ASSERT(before + 1 == after);
+ ASSERT(allocated_ == embed_count_ - after);
+#endif
+ return true;
+}
+
inline Span::Location Span::location() const {
return static_cast<Location>(location_);
}
@@ -539,12 +539,12 @@ inline uint64_t Span::freelist_added_time() const {
return freelist_added_time_;
}
-inline bool Span::FreelistEmpty(size_t size) const {
- if (size < kBitmapMinObjectSize) {
- return (cache_size_ == 0 && freelist_ == kListEnd);
- } else {
- return (bitmap_.IsZero());
- }
+inline bool Span::FreelistEmpty(size_t size) const {
+ if (size < kBitmapMinObjectSize) {
+ return (cache_size_ == 0 && freelist_ == kListEnd);
+ } else {
+ return (bitmap_.IsZero());
+ }
}
inline void Span::RemoveFromList() { SpanList::Elem::remove(); }
@@ -582,8 +582,8 @@ inline void Span::Init(PageId p, Length n) {
sampled_ = 0;
}
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
#endif // TCMALLOC_SPAN_H_
diff --git a/contrib/libs/tcmalloc/tcmalloc/span_benchmark.cc b/contrib/libs/tcmalloc/tcmalloc/span_benchmark.cc
index 6e4569dd83..40ebeb88d4 100644
--- a/contrib/libs/tcmalloc/tcmalloc/span_benchmark.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/span_benchmark.cc
@@ -25,9 +25,9 @@
#include "tcmalloc/span.h"
#include "tcmalloc/static_vars.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
namespace {
class RawSpan {
@@ -42,7 +42,7 @@ class RawSpan {
CHECK_CONDITION(res == 0);
span_.set_first_page(PageIdContaining(mem));
span_.set_num_pages(npages);
- span_.BuildFreelist(size, objects_per_span, nullptr, 0);
+ span_.BuildFreelist(size, objects_per_span, nullptr, 0);
}
~RawSpan() { free(span_.start_address()); }
@@ -207,6 +207,6 @@ BENCHMARK(BM_multiple_spans)
->Arg(kNumClasses - 1);
} // namespace
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
diff --git a/contrib/libs/tcmalloc/tcmalloc/span_stats.h b/contrib/libs/tcmalloc/tcmalloc/span_stats.h
index 8c0b40b0fd..c64ce052f7 100644
--- a/contrib/libs/tcmalloc/tcmalloc/span_stats.h
+++ b/contrib/libs/tcmalloc/tcmalloc/span_stats.h
@@ -18,11 +18,11 @@
#include <stddef.h>
#include "absl/base/macros.h"
-#include "tcmalloc/internal/config.h"
+#include "tcmalloc/internal/config.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
struct SpanStats {
size_t num_spans_requested = 0;
@@ -43,8 +43,8 @@ struct SpanStats {
}
};
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
#endif // TCMALLOC_SPAN_STATS_H_
diff --git a/contrib/libs/tcmalloc/tcmalloc/span_test.cc b/contrib/libs/tcmalloc/tcmalloc/span_test.cc
index 750f3cca26..c7f33b1006 100644
--- a/contrib/libs/tcmalloc/tcmalloc/span_test.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/span_test.cc
@@ -28,7 +28,7 @@
#include "tcmalloc/static_vars.h"
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
namespace {
class RawSpan {
@@ -43,7 +43,7 @@ class RawSpan {
CHECK_CONDITION(res == 0);
span_.set_first_page(PageIdContaining(mem));
span_.set_num_pages(npages);
- span_.BuildFreelist(size, objects_per_span, nullptr, 0);
+ span_.BuildFreelist(size, objects_per_span, nullptr, 0);
}
~RawSpan() { free(span_.start_address()); }
@@ -84,7 +84,7 @@ class SpanTest : public testing::TestWithParam<size_t> {
TEST_P(SpanTest, FreelistBasic) {
Span &span_ = raw_span_.span();
- EXPECT_FALSE(span_.FreelistEmpty(size_));
+ EXPECT_FALSE(span_.FreelistEmpty(size_));
void *batch[kMaxObjectsToMove];
size_t popped = 0;
size_t want = 1;
@@ -96,7 +96,7 @@ TEST_P(SpanTest, FreelistBasic) {
for (;;) {
size_t n = span_.FreelistPopBatch(batch, want, size_);
popped += n;
- EXPECT_EQ(span_.FreelistEmpty(size_), popped == objects_per_span_);
+ EXPECT_EQ(span_.FreelistEmpty(size_), popped == objects_per_span_);
for (size_t i = 0; i < n; ++i) {
void *p = batch[i];
uintptr_t off = reinterpret_cast<char *>(p) - start;
@@ -114,7 +114,7 @@ TEST_P(SpanTest, FreelistBasic) {
want = 1;
}
}
- EXPECT_TRUE(span_.FreelistEmpty(size_));
+ EXPECT_TRUE(span_.FreelistEmpty(size_));
EXPECT_EQ(span_.FreelistPopBatch(batch, 1, size_), 0);
EXPECT_EQ(popped, objects_per_span_);
@@ -123,7 +123,7 @@ TEST_P(SpanTest, FreelistBasic) {
EXPECT_TRUE(objects[idx]);
bool ok = span_.FreelistPush(start + idx * size_, size_);
EXPECT_TRUE(ok);
- EXPECT_FALSE(span_.FreelistEmpty(size_));
+ EXPECT_FALSE(span_.FreelistEmpty(size_));
objects[idx] = false;
--popped;
}
@@ -153,12 +153,12 @@ TEST_P(SpanTest, FreelistRandomized) {
} else {
EXPECT_EQ(objects.size(), 1);
}
- EXPECT_EQ(span_.FreelistEmpty(size_), objects_per_span_ == 1);
+ EXPECT_EQ(span_.FreelistEmpty(size_), objects_per_span_ == 1);
} else {
size_t want = absl::Uniform<int32_t>(rng, 0, batch_size_) + 1;
size_t n = span_.FreelistPopBatch(batch, want, size_);
if (n < want) {
- EXPECT_TRUE(span_.FreelistEmpty(size_));
+ EXPECT_TRUE(span_.FreelistEmpty(size_));
}
for (size_t i = 0; i < n; ++i) {
EXPECT_TRUE(objects.insert(batch[i]).second);
@@ -187,5 +187,5 @@ TEST_P(SpanTest, FreelistRandomized) {
INSTANTIATE_TEST_SUITE_P(All, SpanTest, testing::Range(size_t(1), kNumClasses));
} // namespace
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
diff --git a/contrib/libs/tcmalloc/tcmalloc/stack_trace_table.cc b/contrib/libs/tcmalloc/tcmalloc/stack_trace_table.cc
index 5b5741b6a8..3933a55fc4 100644
--- a/contrib/libs/tcmalloc/tcmalloc/stack_trace_table.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/stack_trace_table.cc
@@ -25,9 +25,9 @@
#include "tcmalloc/sampler.h"
#include "tcmalloc/static_vars.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
bool StackTraceTable::Bucket::KeyEqual(uintptr_t h, const StackTrace& t) const {
// Do not merge entries with different sizes so that profiling tools
@@ -103,13 +103,13 @@ void StackTraceTable::AddTrace(double count, const StackTrace& t) {
depth_total_ += t.depth;
bucket_total_++;
b = Static::bucket_allocator().New();
- b->hash = h;
- b->trace = t;
+ b->hash = h;
+ b->trace = t;
b->trace.user_data = Static::CopySampleUserData(t.user_data);
- b->count = count;
- b->total_weight = t.weight * count;
- b->next = table_[idx];
- table_[idx] = b;
+ b->count = count;
+ b->total_weight = t.weight * count;
+ b->next = table_[idx];
+ table_[idx] = b;
}
}
@@ -150,6 +150,6 @@ void StackTraceTable::Iterate(
}
}
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
diff --git a/contrib/libs/tcmalloc/tcmalloc/stack_trace_table.h b/contrib/libs/tcmalloc/tcmalloc/stack_trace_table.h
index a5a4a03636..bb6a1cc938 100644
--- a/contrib/libs/tcmalloc/tcmalloc/stack_trace_table.h
+++ b/contrib/libs/tcmalloc/tcmalloc/stack_trace_table.h
@@ -26,11 +26,11 @@
#include "tcmalloc/internal_malloc_extension.h"
#include "tcmalloc/malloc_extension.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
-class StackTraceTable final : public ProfileBase {
+class StackTraceTable final : public ProfileBase {
public:
// If merge is true, traces with identical size and stack are merged
// together. Else they are kept distinct.
@@ -90,8 +90,8 @@ class StackTraceTable final : public ProfileBase {
int num_buckets() const { return bucket_mask_ + 1; }
};
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
#endif // TCMALLOC_STACK_TRACE_TABLE_H_
diff --git a/contrib/libs/tcmalloc/tcmalloc/stack_trace_table_test.cc b/contrib/libs/tcmalloc/tcmalloc/stack_trace_table_test.cc
index 4579798906..f33b189d06 100644
--- a/contrib/libs/tcmalloc/tcmalloc/stack_trace_table_test.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/stack_trace_table_test.cc
@@ -30,7 +30,7 @@
#include "tcmalloc/static_vars.h"
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
namespace {
// Rather than deal with heap allocating stack/tags, AllocationEntry contains
@@ -385,5 +385,5 @@ TEST(StackTraceTableTest, StackTraceTable) {
}
} // namespace
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
diff --git a/contrib/libs/tcmalloc/tcmalloc/static_vars.cc b/contrib/libs/tcmalloc/tcmalloc/static_vars.cc
index 08a70de493..379880f200 100644
--- a/contrib/libs/tcmalloc/tcmalloc/static_vars.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/static_vars.cc
@@ -26,16 +26,16 @@
#include "tcmalloc/cpu_cache.h"
#include "tcmalloc/internal/logging.h"
#include "tcmalloc/internal/mincore.h"
-#include "tcmalloc/internal/numa.h"
+#include "tcmalloc/internal/numa.h"
#include "tcmalloc/malloc_extension.h"
#include "tcmalloc/pagemap.h"
#include "tcmalloc/sampler.h"
#include "tcmalloc/thread_cache.h"
#include "tcmalloc/tracking.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
// Cacheline-align our SizeMap and CPUCache. They both have very hot arrays as
// their first member variables, and aligning them reduces the number of cache
@@ -47,8 +47,8 @@ ABSL_CONST_INIT absl::base_internal::SpinLock pageheap_lock(
ABSL_CONST_INIT Arena Static::arena_;
ABSL_CONST_INIT SizeMap ABSL_CACHELINE_ALIGNED Static::sizemap_;
ABSL_CONST_INIT TransferCacheManager Static::transfer_cache_;
-ABSL_CONST_INIT ShardedTransferCacheManager Static::sharded_transfer_cache_;
-ABSL_CONST_INIT CPUCache ABSL_CACHELINE_ALIGNED Static::cpu_cache_;
+ABSL_CONST_INIT ShardedTransferCacheManager Static::sharded_transfer_cache_;
+ABSL_CONST_INIT CPUCache ABSL_CACHELINE_ALIGNED Static::cpu_cache_;
ABSL_CONST_INIT PageHeapAllocator<Span> Static::span_allocator_;
ABSL_CONST_INIT PageHeapAllocator<StackTrace> Static::stacktrace_allocator_;
ABSL_CONST_INIT PageHeapAllocator<ThreadCache> Static::threadcache_allocator_;
@@ -58,21 +58,21 @@ ABSL_CONST_INIT PeakHeapTracker Static::peak_heap_tracker_;
ABSL_CONST_INIT PageHeapAllocator<StackTraceTable::Bucket>
Static::bucket_allocator_;
ABSL_CONST_INIT std::atomic<bool> Static::inited_{false};
-ABSL_CONST_INIT bool Static::cpu_cache_active_ = false;
-ABSL_CONST_INIT bool Static::fork_support_enabled_ = false;
+ABSL_CONST_INIT bool Static::cpu_cache_active_ = false;
+ABSL_CONST_INIT bool Static::fork_support_enabled_ = false;
ABSL_CONST_INIT Static::CreateSampleUserDataCallback*
Static::create_sample_user_data_callback_ = nullptr;
ABSL_CONST_INIT Static::CopySampleUserDataCallback*
Static::copy_sample_user_data_callback_ = nullptr;
ABSL_CONST_INIT Static::DestroySampleUserDataCallback*
Static::destroy_sample_user_data_callback_ = nullptr;
-ABSL_CONST_INIT Static::PageAllocatorStorage Static::page_allocator_;
-ABSL_CONST_INIT PageMap Static::pagemap_;
+ABSL_CONST_INIT Static::PageAllocatorStorage Static::page_allocator_;
+ABSL_CONST_INIT PageMap Static::pagemap_;
ABSL_CONST_INIT absl::base_internal::SpinLock guarded_page_lock(
absl::kConstInit, absl::base_internal::SCHEDULE_KERNEL_ONLY);
ABSL_CONST_INIT GuardedPageAllocator Static::guardedpage_allocator_;
-ABSL_CONST_INIT NumaTopology<kNumaPartitions, kNumBaseClasses>
- Static::numa_topology_;
+ABSL_CONST_INIT NumaTopology<kNumaPartitions, kNumBaseClasses>
+ Static::numa_topology_;
size_t Static::metadata_bytes() {
// This is ugly and doesn't nicely account for e.g. alignment losses
@@ -80,14 +80,14 @@ size_t Static::metadata_bytes() {
// struct's size. But we can't due to linking issues.
const size_t static_var_size =
sizeof(pageheap_lock) + sizeof(arena_) + sizeof(sizemap_) +
- sizeof(sharded_transfer_cache_) + sizeof(transfer_cache_) +
- sizeof(cpu_cache_) + sizeof(span_allocator_) +
+ sizeof(sharded_transfer_cache_) + sizeof(transfer_cache_) +
+ sizeof(cpu_cache_) + sizeof(span_allocator_) +
sizeof(stacktrace_allocator_) + sizeof(threadcache_allocator_) +
sizeof(sampled_objects_) + sizeof(bucket_allocator_) +
sizeof(inited_) + sizeof(cpu_cache_active_) + sizeof(page_allocator_) +
sizeof(pagemap_) + sizeof(sampled_objects_size_) +
sizeof(peak_heap_tracker_) + sizeof(guarded_page_lock) +
- sizeof(guardedpage_allocator_) + sizeof(numa_topology_);
+ sizeof(guardedpage_allocator_) + sizeof(numa_topology_);
const size_t allocated = arena().bytes_allocated() +
AddressRegionFactory::InternalBytesAllocated();
@@ -107,7 +107,7 @@ ABSL_ATTRIBUTE_COLD ABSL_ATTRIBUTE_NOINLINE void Static::SlowInitIfNecessary() {
if (!inited_.load(std::memory_order_acquire)) {
tracking::Init();
sizemap_.Init();
- numa_topology_.Init();
+ numa_topology_.Init();
span_allocator_.Init(&arena_);
span_allocator_.New(); // Reduce cache conflicts
span_allocator_.New(); // Reduce cache conflicts
@@ -116,23 +116,23 @@ ABSL_ATTRIBUTE_COLD ABSL_ATTRIBUTE_NOINLINE void Static::SlowInitIfNecessary() {
// Do a bit of sanitizing: make sure central_cache is aligned properly
CHECK_CONDITION((sizeof(transfer_cache_) % ABSL_CACHELINE_SIZE) == 0);
transfer_cache_.Init();
- sharded_transfer_cache_.Init();
+ sharded_transfer_cache_.Init();
new (page_allocator_.memory) PageAllocator;
threadcache_allocator_.Init(&arena_);
cpu_cache_active_ = false;
pagemap_.MapRootWithSmallPages();
guardedpage_allocator_.Init(/*max_alloced_pages=*/64, /*total_pages=*/128);
inited_.store(true, std::memory_order_release);
-
- pageheap_lock.Unlock();
- pthread_atfork(
- TCMallocPreFork,
- TCMallocPostFork,
- TCMallocPostFork);
- pageheap_lock.Lock();
+
+ pageheap_lock.Unlock();
+ pthread_atfork(
+ TCMallocPreFork,
+ TCMallocPostFork,
+ TCMallocPostFork);
+ pageheap_lock.Lock();
}
}
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
diff --git a/contrib/libs/tcmalloc/tcmalloc/static_vars.h b/contrib/libs/tcmalloc/tcmalloc/static_vars.h
index be68edc189..da4d56075c 100644
--- a/contrib/libs/tcmalloc/tcmalloc/static_vars.h
+++ b/contrib/libs/tcmalloc/tcmalloc/static_vars.h
@@ -27,12 +27,12 @@
#include "absl/base/optimization.h"
#include "absl/base/thread_annotations.h"
#include "tcmalloc/arena.h"
-#include "tcmalloc/central_freelist.h"
+#include "tcmalloc/central_freelist.h"
#include "tcmalloc/common.h"
#include "tcmalloc/guarded_page_allocator.h"
#include "tcmalloc/internal/atomic_stats_counter.h"
#include "tcmalloc/internal/logging.h"
-#include "tcmalloc/internal/numa.h"
+#include "tcmalloc/internal/numa.h"
#include "tcmalloc/internal/percpu.h"
#include "tcmalloc/page_allocator.h"
#include "tcmalloc/page_heap.h"
@@ -42,17 +42,17 @@
#include "tcmalloc/stack_trace_table.h"
#include "tcmalloc/transfer_cache.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
class CPUCache;
class PageMap;
class ThreadCache;
-void TCMallocPreFork();
-void TCMallocPostFork();
-
+void TCMallocPreFork();
+void TCMallocPostFork();
+
class Static {
public:
// True if InitIfNecessary() has run to completion.
@@ -61,29 +61,29 @@ class Static {
// Safe to call multiple times.
static void InitIfNecessary();
- // Central cache.
- static const CentralFreeList& central_freelist(int size_class) {
- return transfer_cache().central_freelist(size_class);
- }
+ // Central cache.
+ static const CentralFreeList& central_freelist(int size_class) {
+ return transfer_cache().central_freelist(size_class);
+ }
// Central cache -- an array of free-lists, one per size-class.
// We have a separate lock per free-list to reduce contention.
static TransferCacheManager& transfer_cache() { return transfer_cache_; }
- // A per-cache domain TransferCache.
- static ShardedTransferCacheManager& sharded_transfer_cache() {
- return sharded_transfer_cache_;
- }
-
+ // A per-cache domain TransferCache.
+ static ShardedTransferCacheManager& sharded_transfer_cache() {
+ return sharded_transfer_cache_;
+ }
+
static SizeMap& sizemap() { return sizemap_; }
static CPUCache& cpu_cache() { return cpu_cache_; }
static PeakHeapTracker& peak_heap_tracker() { return peak_heap_tracker_; }
- static NumaTopology<kNumaPartitions, kNumBaseClasses>& numa_topology() {
- return numa_topology_;
- }
-
+ static NumaTopology<kNumaPartitions, kNumBaseClasses>& numa_topology() {
+ return numa_topology_;
+ }
+
//////////////////////////////////////////////////////////////////////
// In addition to the explicit initialization comment, the variables below
// must be protected by pageheap_lock.
@@ -116,7 +116,7 @@ class Static {
// LossyAdd and reads do not require locking.
static SpanList sampled_objects_ ABSL_GUARDED_BY(pageheap_lock);
ABSL_CONST_INIT static tcmalloc_internal::StatsCounter sampled_objects_size_;
-
+
static PageHeapAllocator<StackTraceTable::Bucket>& bucket_allocator() {
return bucket_allocator_;
}
@@ -127,9 +127,9 @@ class Static {
static void ActivateCPUCache() { cpu_cache_active_ = true; }
static void DeactivateCPUCache() { cpu_cache_active_ = false; }
- static bool ForkSupportEnabled() { return fork_support_enabled_; }
- static void EnableForkSupport() { fork_support_enabled_ = true; }
-
+ static bool ForkSupportEnabled() { return fork_support_enabled_; }
+ static void EnableForkSupport() { fork_support_enabled_ = true; }
+
using CreateSampleUserDataCallback = void*();
using CopySampleUserDataCallback = void*(void*);
using DestroySampleUserDataCallback = void(void*);
@@ -194,7 +194,7 @@ class Static {
ABSL_CONST_INIT static Arena arena_;
static SizeMap sizemap_;
ABSL_CONST_INIT static TransferCacheManager transfer_cache_;
- ABSL_CONST_INIT static ShardedTransferCacheManager sharded_transfer_cache_;
+ ABSL_CONST_INIT static ShardedTransferCacheManager sharded_transfer_cache_;
static CPUCache cpu_cache_;
ABSL_CONST_INIT static GuardedPageAllocator guardedpage_allocator_;
static PageHeapAllocator<Span> span_allocator_;
@@ -203,20 +203,20 @@ class Static {
static PageHeapAllocator<StackTraceTable::Bucket> bucket_allocator_;
ABSL_CONST_INIT static std::atomic<bool> inited_;
static bool cpu_cache_active_;
- static bool fork_support_enabled_;
+ static bool fork_support_enabled_;
static CreateSampleUserDataCallback* create_sample_user_data_callback_;
static CopySampleUserDataCallback* copy_sample_user_data_callback_;
static DestroySampleUserDataCallback* destroy_sample_user_data_callback_;
ABSL_CONST_INIT static PeakHeapTracker peak_heap_tracker_;
- ABSL_CONST_INIT static NumaTopology<kNumaPartitions, kNumBaseClasses>
- numa_topology_;
+ ABSL_CONST_INIT static NumaTopology<kNumaPartitions, kNumBaseClasses>
+ numa_topology_;
// PageHeap uses a constructor for initialization. Like the members above,
// we can't depend on initialization order, so pageheap is new'd
// into this buffer.
union PageAllocatorStorage {
- constexpr PageAllocatorStorage() : extra(0) {}
-
+ constexpr PageAllocatorStorage() : extra(0) {}
+
char memory[sizeof(PageAllocator)];
uintptr_t extra; // To force alignment
};
@@ -255,8 +255,8 @@ inline void Span::Delete(Span* span) {
Static::span_allocator().Delete(span);
}
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
#endif // TCMALLOC_STATIC_VARS_H_
diff --git a/contrib/libs/tcmalloc/tcmalloc/stats.cc b/contrib/libs/tcmalloc/tcmalloc/stats.cc
index bb553ee5cd..c056501c93 100644
--- a/contrib/libs/tcmalloc/tcmalloc/stats.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/stats.cc
@@ -26,7 +26,7 @@
#include "absl/base/dynamic_annotations.h"
#include "absl/base/internal/cycleclock.h"
#include "absl/base/macros.h"
-#include "absl/numeric/bits.h"
+#include "absl/numeric/bits.h"
#include "absl/strings/string_view.h"
#include "absl/time/time.h"
#include "tcmalloc/common.h"
@@ -35,9 +35,9 @@
#include "tcmalloc/internal/util.h"
#include "tcmalloc/pages.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
static double BytesToMiB(size_t bytes) {
const double MiB = 1048576.0;
@@ -49,8 +49,8 @@ static double PagesToMiB(uint64_t pages) {
}
// For example, PrintRightAdjustedWithPrefix(out, ">=", 42, 6) prints " >=42".
-static void PrintRightAdjustedWithPrefix(Printer *out, const char *prefix,
- Length num, int width) {
+static void PrintRightAdjustedWithPrefix(Printer *out, const char *prefix,
+ Length num, int width) {
width -= strlen(prefix);
int num_tmp = num.raw_num();
for (int i = 0; i < width - 1; i++) {
@@ -62,9 +62,9 @@ static void PrintRightAdjustedWithPrefix(Printer *out, const char *prefix,
out->printf("%s%zu", prefix, num.raw_num());
}
-void PrintStats(const char *label, Printer *out, const BackingStats &backing,
- const SmallSpanStats &small, const LargeSpanStats &large,
- bool everything) {
+void PrintStats(const char *label, Printer *out, const BackingStats &backing,
+ const SmallSpanStats &small, const LargeSpanStats &large,
+ bool everything) {
size_t nonempty_sizes = 0;
for (int i = 0; i < kMaxPages.raw_num(); ++i) {
const size_t norm = small.normal_length[i];
@@ -275,7 +275,7 @@ void PageAgeHistograms::Histogram::Record(Length pages, double age) {
total_age_ += pages.raw_num() * age;
}
-void PageAgeHistograms::Print(const char *label, Printer *out) const {
+void PageAgeHistograms::Print(const char *label, Printer *out) const {
out->printf("------------------------------------------------\n");
out->printf(
"%s cache entry age (count of pages in spans of "
@@ -295,8 +295,8 @@ void PageAgeHistograms::Print(const char *label, Printer *out) const {
returned_.Print("Unmapped span", out);
}
-static void PrintLineHeader(Printer *out, const char *kind, const char *prefix,
- Length num) {
+static void PrintLineHeader(Printer *out, const char *kind, const char *prefix,
+ Length num) {
// Print the beginning of the line, e.g. "Live span, >=128 pages: ". The
// span size ("128" in the example) is padded such that it plus the span
// prefix ("Live") plus the span size prefix (">=") is kHeaderExtraChars wide.
@@ -309,7 +309,7 @@ static void PrintLineHeader(Printer *out, const char *kind, const char *prefix,
}
void PageAgeHistograms::PerSizeHistograms::Print(const char *kind,
- Printer *out) const {
+ Printer *out) const {
out->printf("%-15s TOTAL PAGES: ", kind);
total.Print(out);
@@ -326,7 +326,7 @@ void PageAgeHistograms::PerSizeHistograms::Print(const char *kind,
}
}
-void PageAgeHistograms::Histogram::Print(Printer *out) const {
+void PageAgeHistograms::Histogram::Print(Printer *out) const {
const double mean = avg_age();
out->printf(" %7.1f", mean);
for (int b = 0; b < kNumBuckets; ++b) {
@@ -336,7 +336,7 @@ void PageAgeHistograms::Histogram::Print(Printer *out) const {
out->printf("\n");
}
-void PageAllocInfo::Print(Printer *out) const {
+void PageAllocInfo::Print(Printer *out) const {
int64_t ticks = TimeTicks();
double hz = freq_ / ticks;
out->printf("%s: stats on allocation sizes\n", label_);
@@ -443,7 +443,7 @@ void PageAllocInfo::RecordAlloc(PageId p, Length n) {
} else {
Length slack = RoundUp(n, kPagesPerHugePage) - n;
total_slack_ += slack;
- size_t i = absl::bit_width(n.raw_num() - 1);
+ size_t i = absl::bit_width(n.raw_num() - 1);
large_[i].Alloc(n);
}
}
@@ -460,7 +460,7 @@ void PageAllocInfo::RecordFree(PageId p, Length n) {
} else {
Length slack = RoundUp(n, kPagesPerHugePage) - n;
total_slack_ -= slack;
- size_t i = absl::bit_width(n.raw_num() - 1);
+ size_t i = absl::bit_width(n.raw_num() - 1);
large_[i].Free(n);
}
}
@@ -476,7 +476,7 @@ const PageAllocInfo::Counts &PageAllocInfo::counts_for(Length n) const {
if (n <= kMaxPages) {
return small_[n.raw_num() - 1];
}
- size_t i = absl::bit_width(n.raw_num() - 1);
+ size_t i = absl::bit_width(n.raw_num() - 1);
return large_[i];
}
@@ -548,6 +548,6 @@ int64_t PageAllocInfo::TimeTicks() const {
return absl::base_internal::CycleClock::Now() - baseline_ticks_;
}
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
diff --git a/contrib/libs/tcmalloc/tcmalloc/stats.h b/contrib/libs/tcmalloc/tcmalloc/stats.h
index 19070d867d..348077a063 100644
--- a/contrib/libs/tcmalloc/tcmalloc/stats.h
+++ b/contrib/libs/tcmalloc/tcmalloc/stats.h
@@ -24,9 +24,9 @@
#include "tcmalloc/internal/logging.h"
#include "tcmalloc/pages.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
struct BackingStats {
BackingStats() : system_bytes(0), free_bytes(0), unmapped_bytes(0) {}
@@ -85,9 +85,9 @@ inline LargeSpanStats operator+(LargeSpanStats lhs, LargeSpanStats rhs) {
return lhs += rhs;
}
-void PrintStats(const char* label, Printer* out, const BackingStats& backing,
- const SmallSpanStats& small, const LargeSpanStats& large,
- bool everything);
+void PrintStats(const char* label, Printer* out, const BackingStats& backing,
+ const SmallSpanStats& small, const LargeSpanStats& large,
+ bool everything);
class PageAgeHistograms {
public:
@@ -99,7 +99,7 @@ class PageAgeHistograms {
// changed.
void RecordRange(Length pages, bool released, int64_t when);
- void Print(const char* label, Printer* out) const;
+ void Print(const char* label, Printer* out) const;
static constexpr size_t kNumBuckets = 7;
static constexpr size_t kNumSizes = 64;
@@ -108,7 +108,7 @@ class PageAgeHistograms {
class Histogram {
public:
void Record(Length pages, double age);
- void Print(Printer* out) const;
+ void Print(Printer* out) const;
uint32_t pages_in_bucket(size_t i) const { return buckets_[i]; }
@@ -158,7 +158,7 @@ class PageAgeHistograms {
private:
struct PerSizeHistograms {
void Record(Length pages, double age);
- void Print(const char* kind, Printer* out) const;
+ void Print(const char* kind, Printer* out) const;
Histogram* GetSmall(Length n) {
CHECK_CONDITION(n.raw_num() < kNumSizes);
@@ -204,7 +204,7 @@ class PageAllocInfo {
void RecordFree(PageId p, Length n);
void RecordRelease(Length n, Length got);
// And invoking this in their Print() implementation.
- void Print(Printer* out) const;
+ void Print(Printer* out) const;
void PrintInPbtxt(PbtxtRegion* region, absl::string_view stat_name) const;
// Total size of allocations < 1 MiB
@@ -264,8 +264,8 @@ class PageAllocInfo {
void LogRelease(int64_t when, Length n) { Write(when, 2, PageId{0}, n); }
};
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
#endif // TCMALLOC_STATS_H_
diff --git a/contrib/libs/tcmalloc/tcmalloc/stats_test.cc b/contrib/libs/tcmalloc/tcmalloc/stats_test.cc
index 733fcc9534..f79e81246f 100644
--- a/contrib/libs/tcmalloc/tcmalloc/stats_test.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/stats_test.cc
@@ -24,7 +24,7 @@
#include "tcmalloc/huge_pages.h"
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
namespace {
class PrintTest : public ::testing::Test {
@@ -34,8 +34,8 @@ class PrintTest : public ::testing::Test {
void ExpectStats(const BackingStats &back, const SmallSpanStats &small,
const LargeSpanStats &large, const std::string &expected) {
- Printer out(&buf_[0], kBufferSize);
- PrintStats("PrintTest", &out, back, small, large, true);
+ Printer out(&buf_[0], kBufferSize);
+ PrintStats("PrintTest", &out, back, small, large, true);
EXPECT_EQ(expected, buf_);
}
@@ -93,8 +93,8 @@ class AgeTest : public testing::Test {
return kNow - freq * age;
}
- void ExpectAges(const PageAgeHistograms &ages, const std::string &expected) {
- Printer out(&buf_[0], kBufferSize);
+ void ExpectAges(const PageAgeHistograms &ages, const std::string &expected) {
+ Printer out(&buf_[0], kBufferSize);
ages.Print("AgeTest", &out);
std::string got = buf_;
EXPECT_EQ(expected, got);
@@ -102,7 +102,7 @@ class AgeTest : public testing::Test {
};
TEST_F(AgeTest, Basic) {
- PageAgeHistograms ages(kNow);
+ PageAgeHistograms ages(kNow);
ages.RecordRange(Length(1), false, WhenForAge(0.5));
ages.RecordRange(Length(1), false, WhenForAge(1.2));
ages.RecordRange(Length(1), false, WhenForAge(3.7));
@@ -134,7 +134,7 @@ Unmapped span, >=64 pages: 600.0 0 0 0 200 0
}
TEST_F(AgeTest, Overflow) {
- PageAgeHistograms ages(kNow);
+ PageAgeHistograms ages(kNow);
const Length too_big = Length(4 * (std::numeric_limits<uint32_t>::max() / 5));
ages.RecordRange(too_big, false, WhenForAge(0.5));
ages.RecordRange(too_big, false, WhenForAge(0.5));
@@ -155,8 +155,8 @@ Unmapped span TOTAL PAGES: 0.0 0 0 0 0 0
}
TEST_F(AgeTest, ManySizes) {
- PageAgeHistograms ages(kNow);
- const Length N = PageAgeHistograms::kLargeSize;
+ PageAgeHistograms ages(kNow);
+ const Length N = PageAgeHistograms::kLargeSize;
for (auto i = Length(1); i <= N; ++i) {
ages.RecordRange(i, false, WhenForAge(i.raw_num() * 3));
}
@@ -264,5 +264,5 @@ TEST(ClockTest, ClockTicks) {
}
} // namespace
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
diff --git a/contrib/libs/tcmalloc/tcmalloc/system-alloc.cc b/contrib/libs/tcmalloc/tcmalloc/system-alloc.cc
index b079c9c966..61854abdcf 100644
--- a/contrib/libs/tcmalloc/tcmalloc/system-alloc.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/system-alloc.cc
@@ -14,12 +14,12 @@
#include "tcmalloc/system-alloc.h"
-#include <asm/unistd.h>
+#include <asm/unistd.h>
#include <errno.h>
#include <stddef.h>
#include <stdint.h>
#include <sys/mman.h>
-#include <sys/syscall.h>
+#include <sys/syscall.h>
#include <unistd.h>
#include <algorithm>
@@ -34,7 +34,7 @@
#include "absl/base/internal/spinlock.h"
#include "absl/base/macros.h"
#include "absl/base/optimization.h"
-#include "absl/types/optional.h"
+#include "absl/types/optional.h"
#include "tcmalloc/common.h"
#include "tcmalloc/internal/logging.h"
#include "tcmalloc/internal/optimization.h"
@@ -54,13 +54,13 @@
extern "C" int madvise(caddr_t, size_t, int);
#endif
-#ifdef __linux__
-#include <linux/mempolicy.h>
-#endif
-
-GOOGLE_MALLOC_SECTION_BEGIN
+#ifdef __linux__
+#include <linux/mempolicy.h>
+#endif
+
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
namespace {
@@ -100,7 +100,7 @@ AddressRegionFactory* region_factory = nullptr;
// Rounds size down to a multiple of alignment.
size_t RoundDown(const size_t size, const size_t alignment) {
// Checks that the alignment has only one bit set.
- ASSERT(absl::has_single_bit(alignment));
+ ASSERT(absl::has_single_bit(alignment));
return (size) & ~(alignment - 1);
}
@@ -114,7 +114,7 @@ class MmapRegion final : public AddressRegion {
MmapRegion(uintptr_t start, size_t size, AddressRegionFactory::UsageHint hint)
: start_(start), free_size_(size), hint_(hint) {}
std::pair<void*, size_t> Alloc(size_t size, size_t alignment) override;
- ~MmapRegion() override = default;
+ ~MmapRegion() override = default;
private:
const uintptr_t start_;
@@ -127,7 +127,7 @@ class MmapRegionFactory final : public AddressRegionFactory {
AddressRegion* Create(void* start, size_t size, UsageHint hint) override;
size_t GetStats(absl::Span<char> buffer) override;
size_t GetStatsInPbtxt(absl::Span<char> buffer) override;
- ~MmapRegionFactory() override = default;
+ ~MmapRegionFactory() override = default;
private:
std::atomic<size_t> bytes_reserved_{0};
@@ -140,7 +140,7 @@ class RegionManager {
std::pair<void*, size_t> Alloc(size_t size, size_t alignment, MemoryTag tag);
void DiscardMappedRegions() {
- std::fill(normal_region_.begin(), normal_region_.end(), nullptr);
+ std::fill(normal_region_.begin(), normal_region_.end(), nullptr);
sampled_region_ = nullptr;
}
@@ -151,7 +151,7 @@ class RegionManager {
std::pair<void*, size_t> Allocate(size_t size, size_t alignment,
MemoryTag tag);
- std::array<AddressRegion*, kNumaPartitions> normal_region_{{nullptr}};
+ std::array<AddressRegion*, kNumaPartitions> normal_region_{{nullptr}};
AddressRegion* sampled_region_{nullptr};
};
std::aligned_storage<sizeof(RegionManager), alignof(RegionManager)>::type
@@ -198,7 +198,7 @@ AddressRegion* MmapRegionFactory::Create(void* start, size_t size,
}
size_t MmapRegionFactory::GetStats(absl::Span<char> buffer) {
- Printer printer(buffer.data(), buffer.size());
+ Printer printer(buffer.data(), buffer.size());
size_t allocated = bytes_reserved_.load(std::memory_order_relaxed);
constexpr double MiB = 1048576.0;
printer.printf("MmapSysAllocator: %zu bytes (%.1f MiB) reserved\n", allocated,
@@ -208,7 +208,7 @@ size_t MmapRegionFactory::GetStats(absl::Span<char> buffer) {
}
size_t MmapRegionFactory::GetStatsInPbtxt(absl::Span<char> buffer) {
- Printer printer(buffer.data(), buffer.size());
+ Printer printer(buffer.data(), buffer.size());
size_t allocated = bytes_reserved_.load(std::memory_order_relaxed);
printer.printf("mmap_sys_allocator: %lld\n", allocated);
@@ -219,7 +219,7 @@ static AddressRegionFactory::UsageHint TagToHint(MemoryTag tag) {
using UsageHint = AddressRegionFactory::UsageHint;
switch (tag) {
case MemoryTag::kNormal:
- case MemoryTag::kNormalP1:
+ case MemoryTag::kNormalP1:
return UsageHint::kNormal;
break;
case MemoryTag::kSampled:
@@ -275,9 +275,9 @@ std::pair<void*, size_t> RegionManager::Allocate(size_t size, size_t alignment,
AddressRegion*& region = *[&]() {
switch (tag) {
case MemoryTag::kNormal:
- return &normal_region_[0];
- case MemoryTag::kNormalP1:
- return &normal_region_[1];
+ return &normal_region_[0];
+ case MemoryTag::kNormalP1:
+ return &normal_region_[1];
case MemoryTag::kSampled:
return &sampled_region_;
default:
@@ -318,50 +318,50 @@ void InitSystemAllocatorIfNecessary() {
region_factory = new (&mmap_space) MmapRegionFactory();
}
-// Bind the memory region spanning `size` bytes starting from `base` to NUMA
-// nodes assigned to `partition`. Returns zero upon success, or a standard
-// error code upon failure.
-void BindMemory(void* const base, const size_t size, const size_t partition) {
- auto& topology = Static::numa_topology();
-
- // If NUMA awareness is unavailable or disabled, or the user requested that
- // we don't bind memory then do nothing.
- const NumaBindMode bind_mode = topology.bind_mode();
- if (!topology.numa_aware() || bind_mode == NumaBindMode::kNone) {
- return;
- }
-
- const uint64_t nodemask = topology.GetPartitionNodes(partition);
- int err =
- syscall(__NR_mbind, base, size, MPOL_BIND | MPOL_F_STATIC_NODES,
- &nodemask, sizeof(nodemask) * 8, MPOL_MF_STRICT | MPOL_MF_MOVE);
- if (err == 0) {
- return;
- }
-
- if (bind_mode == NumaBindMode::kAdvisory) {
- Log(kLogWithStack, __FILE__, __LINE__, "Warning: Unable to mbind memory",
- err, base, nodemask);
- return;
- }
-
- ASSERT(bind_mode == NumaBindMode::kStrict);
- Crash(kCrash, __FILE__, __LINE__, "Unable to mbind memory", err, base,
- nodemask);
-}
-
+// Bind the memory region spanning `size` bytes starting from `base` to NUMA
+// nodes assigned to `partition`. Returns zero upon success, or a standard
+// error code upon failure.
+void BindMemory(void* const base, const size_t size, const size_t partition) {
+ auto& topology = Static::numa_topology();
+
+ // If NUMA awareness is unavailable or disabled, or the user requested that
+ // we don't bind memory then do nothing.
+ const NumaBindMode bind_mode = topology.bind_mode();
+ if (!topology.numa_aware() || bind_mode == NumaBindMode::kNone) {
+ return;
+ }
+
+ const uint64_t nodemask = topology.GetPartitionNodes(partition);
+ int err =
+ syscall(__NR_mbind, base, size, MPOL_BIND | MPOL_F_STATIC_NODES,
+ &nodemask, sizeof(nodemask) * 8, MPOL_MF_STRICT | MPOL_MF_MOVE);
+ if (err == 0) {
+ return;
+ }
+
+ if (bind_mode == NumaBindMode::kAdvisory) {
+ Log(kLogWithStack, __FILE__, __LINE__, "Warning: Unable to mbind memory",
+ err, base, nodemask);
+ return;
+ }
+
+ ASSERT(bind_mode == NumaBindMode::kStrict);
+ Crash(kCrash, __FILE__, __LINE__, "Unable to mbind memory", err, base,
+ nodemask);
+}
+
ABSL_CONST_INIT std::atomic<int> system_release_errors = ATOMIC_VAR_INIT(0);
} // namespace
-void AcquireSystemAllocLock() {
- spinlock.Lock();
-}
-
-void ReleaseSystemAllocLock() {
- spinlock.Unlock();
-}
-
+void AcquireSystemAllocLock() {
+ spinlock.Lock();
+}
+
+void ReleaseSystemAllocLock() {
+ spinlock.Unlock();
+}
+
void* SystemAlloc(size_t bytes, size_t* actual_bytes, size_t alignment,
const MemoryTag tag) {
// If default alignment is set request the minimum alignment provided by
@@ -386,7 +386,7 @@ void* SystemAlloc(size_t bytes, size_t* actual_bytes, size_t alignment,
if (result != nullptr) {
CheckAddressBits<kAddressBits>(reinterpret_cast<uintptr_t>(result) +
*actual_bytes - 1);
- ASSERT(GetMemoryTag(result) == tag);
+ ASSERT(GetMemoryTag(result) == tag);
}
return result;
}
@@ -541,7 +541,7 @@ static uintptr_t RandomMmapHint(size_t size, size_t alignment,
// Ensure alignment >= size so we're guaranteed the full mapping has the same
// tag.
- alignment = absl::bit_ceil(std::max(alignment, size));
+ alignment = absl::bit_ceil(std::max(alignment, size));
rnd = Sampler::NextRandom(rnd);
uintptr_t addr = rnd & kAddrMask & ~(alignment - 1) & ~kTagMask;
@@ -555,19 +555,19 @@ void* MmapAligned(size_t size, size_t alignment, const MemoryTag tag) {
ASSERT(alignment <= kTagMask);
static uintptr_t next_sampled_addr = 0;
- static std::array<uintptr_t, kNumaPartitions> next_normal_addr = {0};
+ static std::array<uintptr_t, kNumaPartitions> next_normal_addr = {0};
- absl::optional<int> numa_partition;
+ absl::optional<int> numa_partition;
uintptr_t& next_addr = *[&]() {
switch (tag) {
case MemoryTag::kSampled:
return &next_sampled_addr;
- case MemoryTag::kNormalP0:
- numa_partition = 0;
- return &next_normal_addr[0];
- case MemoryTag::kNormalP1:
- numa_partition = 1;
- return &next_normal_addr[1];
+ case MemoryTag::kNormalP0:
+ numa_partition = 0;
+ return &next_normal_addr[0];
+ case MemoryTag::kNormalP1:
+ numa_partition = 1;
+ return &next_normal_addr[1];
default:
ASSUME(false);
__builtin_unreachable();
@@ -579,17 +579,17 @@ void* MmapAligned(size_t size, size_t alignment, const MemoryTag tag) {
GetMemoryTag(reinterpret_cast<void*>(next_addr + size - 1)) != tag) {
next_addr = RandomMmapHint(size, alignment, tag);
}
- void* hint;
+ void* hint;
for (int i = 0; i < 1000; ++i) {
- hint = reinterpret_cast<void*>(next_addr);
+ hint = reinterpret_cast<void*>(next_addr);
ASSERT(GetMemoryTag(hint) == tag);
// TODO(b/140190055): Use MAP_FIXED_NOREPLACE once available.
void* result =
mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (result == hint) {
- if (numa_partition.has_value()) {
- BindMemory(result, size, *numa_partition);
- }
+ if (numa_partition.has_value()) {
+ BindMemory(result, size, *numa_partition);
+ }
// Attempt to keep the next mmap contiguous in the common case.
next_addr += size;
CHECK_CONDITION(kAddressBits == std::numeric_limits<uintptr_t>::digits ||
@@ -612,12 +612,12 @@ void* MmapAligned(size_t size, size_t alignment, const MemoryTag tag) {
}
Log(kLogWithStack, __FILE__, __LINE__,
- "MmapAligned() failed - unable to allocate with tag (hint, size, "
- "alignment) - is something limiting address placement?",
- hint, size, alignment);
+ "MmapAligned() failed - unable to allocate with tag (hint, size, "
+ "alignment) - is something limiting address placement?",
+ hint, size, alignment);
return nullptr;
}
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
diff --git a/contrib/libs/tcmalloc/tcmalloc/system-alloc.h b/contrib/libs/tcmalloc/tcmalloc/system-alloc.h
index 3d1e7fd60b..a38192c233 100644
--- a/contrib/libs/tcmalloc/tcmalloc/system-alloc.h
+++ b/contrib/libs/tcmalloc/tcmalloc/system-alloc.h
@@ -23,9 +23,9 @@
#include "tcmalloc/common.h"
#include "tcmalloc/malloc_extension.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
// REQUIRES: "alignment" is a power of two or "0" to indicate default alignment
// REQUIRES: "alignment" and "size" <= kTagMask
@@ -50,9 +50,9 @@ void *SystemAlloc(size_t bytes, size_t *actual_bytes, size_t alignment,
// call to SystemRelease.
int SystemReleaseErrors();
-void AcquireSystemAllocLock();
-void ReleaseSystemAllocLock();
-
+void AcquireSystemAllocLock();
+void ReleaseSystemAllocLock();
+
// This call is a hint to the operating system that the pages
// contained in the specified range of memory will not be used for a
// while, and can be released for use by other processes or the OS.
@@ -84,8 +84,8 @@ void SetRegionFactory(AddressRegionFactory *factory);
// REQUIRES: size <= kTagMask
void *MmapAligned(size_t size, size_t alignment, MemoryTag tag);
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
#endif // TCMALLOC_SYSTEM_ALLOC_H_
diff --git a/contrib/libs/tcmalloc/tcmalloc/system-alloc_test.cc b/contrib/libs/tcmalloc/tcmalloc/system-alloc_test.cc
index 496bd048ee..c52bd569d9 100644
--- a/contrib/libs/tcmalloc/tcmalloc/system-alloc_test.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/system-alloc_test.cc
@@ -25,13 +25,13 @@
#include "gtest/gtest.h"
#include "absl/strings/str_format.h"
-#include "benchmark/benchmark.h"
+#include "benchmark/benchmark.h"
#include "tcmalloc/common.h"
#include "tcmalloc/internal/logging.h"
#include "tcmalloc/malloc_extension.h"
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
namespace {
class MmapAlignedTest : public testing::TestWithParam<size_t> {
@@ -42,12 +42,12 @@ class MmapAlignedTest : public testing::TestWithParam<size_t> {
for (MemoryTag tag : {MemoryTag::kNormal, MemoryTag::kSampled}) {
SCOPED_TRACE(static_cast<unsigned int>(tag));
- void* p = MmapAligned(size, alignment, tag);
+ void* p = MmapAligned(size, alignment, tag);
EXPECT_NE(p, nullptr);
EXPECT_EQ(reinterpret_cast<uintptr_t>(p) % alignment, 0);
- EXPECT_EQ(IsTaggedMemory(p), tag == MemoryTag::kSampled);
- EXPECT_EQ(GetMemoryTag(p), tag);
- EXPECT_EQ(GetMemoryTag(static_cast<char*>(p) + size - 1), tag);
+ EXPECT_EQ(IsTaggedMemory(p), tag == MemoryTag::kSampled);
+ EXPECT_EQ(GetMemoryTag(p), tag);
+ EXPECT_EQ(GetMemoryTag(static_cast<char*>(p) + size - 1), tag);
EXPECT_EQ(munmap(p, size), 0);
}
}
@@ -107,11 +107,11 @@ TEST(Basic, InvokedTest) {
MallocExtension::SetRegionFactory(&f);
// An allocation size that is likely to trigger the system allocator.
- void* ptr = ::operator new(kMinSystemAlloc);
- // TODO(b/183453911): Remove workaround for GCC 10.x deleting operator new,
- // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=94295.
- benchmark::DoNotOptimize(ptr);
- ::operator delete(ptr);
+ void* ptr = ::operator new(kMinSystemAlloc);
+ // TODO(b/183453911): Remove workaround for GCC 10.x deleting operator new,
+ // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=94295.
+ benchmark::DoNotOptimize(ptr);
+ ::operator delete(ptr);
// Make sure that our allocator was invoked.
ASSERT_TRUE(simple_region_alloc_invoked);
@@ -143,5 +143,5 @@ TEST(Basic, RetryFailTest) {
}
} // namespace
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
diff --git a/contrib/libs/tcmalloc/tcmalloc/tcmalloc.cc b/contrib/libs/tcmalloc/tcmalloc/tcmalloc.cc
index 8e62ba91b9..75ef562f2c 100644
--- a/contrib/libs/tcmalloc/tcmalloc/tcmalloc.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/tcmalloc.cc
@@ -81,7 +81,7 @@
#include "absl/base/thread_annotations.h"
#include "absl/debugging/stacktrace.h"
#include "absl/memory/memory.h"
-#include "absl/numeric/bits.h"
+#include "absl/numeric/bits.h"
#include "absl/strings/match.h"
#include "absl/strings/numbers.h"
#include "absl/strings/strip.h"
@@ -115,13 +115,13 @@
#include "tcmalloc/transfer_cache.h"
#include "tcmalloc/transfer_cache_stats.h"
-#if defined(TCMALLOC_HAVE_STRUCT_MALLINFO)
+#if defined(TCMALLOC_HAVE_STRUCT_MALLINFO)
#include <malloc.h>
#endif
-GOOGLE_MALLOC_SECTION_BEGIN
-namespace tcmalloc {
-namespace tcmalloc_internal {
+GOOGLE_MALLOC_SECTION_BEGIN
+namespace tcmalloc {
+namespace tcmalloc_internal {
// ----------------------- IMPLEMENTATION -------------------------------
@@ -131,7 +131,7 @@ struct TCMallocStats {
uint64_t central_bytes; // Bytes in central cache
uint64_t transfer_bytes; // Bytes in central transfer cache
uint64_t metadata_bytes; // Bytes alloced for metadata
- uint64_t sharded_transfer_bytes; // Bytes in per-CCX cache
+ uint64_t sharded_transfer_bytes; // Bytes in per-CCX cache
uint64_t per_cpu_bytes; // Bytes in per-CPU cache
uint64_t pagemap_root_bytes_res; // Resident bytes of pagemap root node
uint64_t percpu_metadata_bytes_res; // Resident bytes of the per-CPU metadata
@@ -141,10 +141,10 @@ struct TCMallocStats {
AllocatorStats bucket_stats; // StackTraceTable::Bucket objects
size_t pagemap_bytes; // included in metadata bytes
size_t percpu_metadata_bytes; // included in metadata bytes
- BackingStats pageheap; // Stats from page heap
-
- // Explicitly declare the ctor to put it in the google_malloc section.
- TCMallocStats() = default;
+ BackingStats pageheap; // Stats from page heap
+
+ // Explicitly declare the ctor to put it in the google_malloc section.
+ TCMallocStats() = default;
};
// Get stats into "r". Also, if class_count != NULL, class_count[k]
@@ -155,15 +155,15 @@ struct TCMallocStats {
// should be captured or not. Residence info requires a potentially
// costly OS call, and is not necessary in all situations.
static void ExtractStats(TCMallocStats* r, uint64_t* class_count,
- SpanStats* span_stats, SmallSpanStats* small_spans,
- LargeSpanStats* large_spans,
- TransferCacheStats* tc_stats, bool report_residence) {
+ SpanStats* span_stats, SmallSpanStats* small_spans,
+ LargeSpanStats* large_spans,
+ TransferCacheStats* tc_stats, bool report_residence) {
r->central_bytes = 0;
r->transfer_bytes = 0;
for (int cl = 0; cl < kNumClasses; ++cl) {
- const size_t length = Static::central_freelist(cl).length();
+ const size_t length = Static::central_freelist(cl).length();
const size_t tc_length = Static::transfer_cache().tc_length(cl);
- const size_t cache_overhead = Static::central_freelist(cl).OverheadBytes();
+ const size_t cache_overhead = Static::central_freelist(cl).OverheadBytes();
const size_t size = Static::sizemap().class_to_size(cl);
r->central_bytes += (size * length) + cache_overhead;
r->transfer_bytes += (size * tc_length);
@@ -171,12 +171,12 @@ static void ExtractStats(TCMallocStats* r, uint64_t* class_count,
// Sum the lengths of all per-class freelists, except the per-thread
// freelists, which get counted when we call GetThreadStats(), below.
class_count[cl] = length + tc_length;
- if (UsePerCpuCache()) {
+ if (UsePerCpuCache()) {
class_count[cl] += Static::cpu_cache().TotalObjectsOfClass(cl);
}
}
if (span_stats) {
- span_stats[cl] = Static::central_freelist(cl).GetSpanStats();
+ span_stats[cl] = Static::central_freelist(cl).GetSpanStats();
}
if (tc_stats) {
tc_stats[cl] = Static::transfer_cache().GetHitRateStats(cl);
@@ -215,12 +215,12 @@ static void ExtractStats(TCMallocStats* r, uint64_t* class_count,
}
r->per_cpu_bytes = 0;
- r->sharded_transfer_bytes = 0;
+ r->sharded_transfer_bytes = 0;
r->percpu_metadata_bytes_res = 0;
r->percpu_metadata_bytes = 0;
- if (UsePerCpuCache()) {
+ if (UsePerCpuCache()) {
r->per_cpu_bytes = Static::cpu_cache().TotalUsedBytes();
- r->sharded_transfer_bytes = Static::sharded_transfer_cache().TotalBytes();
+ r->sharded_transfer_bytes = Static::sharded_transfer_cache().TotalBytes();
if (report_residence) {
auto percpu_metadata = Static::cpu_cache().MetadataMemoryUsage();
@@ -251,8 +251,8 @@ static uint64_t InUseByApp(const TCMallocStats& stats) {
return StatSub(stats.pageheap.system_bytes,
stats.thread_bytes + stats.central_bytes +
stats.transfer_bytes + stats.per_cpu_bytes +
- stats.sharded_transfer_bytes + stats.pageheap.free_bytes +
- stats.pageheap.unmapped_bytes);
+ stats.sharded_transfer_bytes + stats.pageheap.free_bytes +
+ stats.pageheap.unmapped_bytes);
}
static uint64_t VirtualMemoryUsed(const TCMallocStats& stats) {
@@ -269,21 +269,21 @@ static uint64_t RequiredBytes(const TCMallocStats& stats) {
return StatSub(PhysicalMemoryUsed(stats), stats.pageheap.free_bytes);
}
-static int CountAllowedCpus() {
- cpu_set_t allowed_cpus;
- if (sched_getaffinity(0, sizeof(allowed_cpus), &allowed_cpus) != 0) {
- return 0;
- }
-
- return CPU_COUNT(&allowed_cpus);
-}
-
+static int CountAllowedCpus() {
+ cpu_set_t allowed_cpus;
+ if (sched_getaffinity(0, sizeof(allowed_cpus), &allowed_cpus) != 0) {
+ return 0;
+ }
+
+ return CPU_COUNT(&allowed_cpus);
+}
+
// WRITE stats to "out"
-static void DumpStats(Printer* out, int level) {
+static void DumpStats(Printer* out, int level) {
TCMallocStats stats;
uint64_t class_count[kNumClasses];
- SpanStats span_stats[kNumClasses];
- TransferCacheStats tc_stats[kNumClasses];
+ SpanStats span_stats[kNumClasses];
+ TransferCacheStats tc_stats[kNumClasses];
if (level >= 2) {
ExtractStats(&stats, class_count, span_stats, nullptr, nullptr, tc_stats,
true);
@@ -312,7 +312,7 @@ static void DumpStats(Printer* out, int level) {
"MALLOC: + %12" PRIu64 " (%7.1f MiB) Bytes in page heap freelist\n"
"MALLOC: + %12" PRIu64 " (%7.1f MiB) Bytes in central cache freelist\n"
"MALLOC: + %12" PRIu64 " (%7.1f MiB) Bytes in per-CPU cache freelist\n"
- "MALLOC: + %12" PRIu64 " (%7.1f MiB) Bytes in Sharded cache freelist\n"
+ "MALLOC: + %12" PRIu64 " (%7.1f MiB) Bytes in Sharded cache freelist\n"
"MALLOC: + %12" PRIu64 " (%7.1f MiB) Bytes in transfer cache freelist\n"
"MALLOC: + %12" PRIu64 " (%7.1f MiB) Bytes in thread cache freelists\n"
"MALLOC: + %12" PRIu64 " (%7.1f MiB) Bytes in malloc metadata\n"
@@ -335,13 +335,13 @@ static void DumpStats(Printer* out, int level) {
"MALLOC: %12" PRIu64 " (%7.1f MiB) per-CPU slab bytes used\n"
"MALLOC: %12" PRIu64 " (%7.1f MiB) per-CPU slab resident bytes\n"
"MALLOC: %12" PRIu64 " Tcmalloc page size\n"
- "MALLOC: %12" PRIu64 " Tcmalloc hugepage size\n"
- "MALLOC: %12" PRIu64 " CPUs Allowed in Mask\n",
+ "MALLOC: %12" PRIu64 " Tcmalloc hugepage size\n"
+ "MALLOC: %12" PRIu64 " CPUs Allowed in Mask\n",
bytes_in_use_by_app, bytes_in_use_by_app / MiB,
stats.pageheap.free_bytes, stats.pageheap.free_bytes / MiB,
stats.central_bytes, stats.central_bytes / MiB,
stats.per_cpu_bytes, stats.per_cpu_bytes / MiB,
- stats.sharded_transfer_bytes, stats.sharded_transfer_bytes / MiB,
+ stats.sharded_transfer_bytes, stats.sharded_transfer_bytes / MiB,
stats.transfer_bytes, stats.transfer_bytes / MiB,
stats.thread_bytes, stats.thread_bytes / MiB,
stats.metadata_bytes, stats.metadata_bytes / MiB,
@@ -367,18 +367,18 @@ static void DumpStats(Printer* out, int level) {
stats.percpu_metadata_bytes / MiB,
stats.percpu_metadata_bytes_res, stats.percpu_metadata_bytes_res / MiB,
uint64_t(kPageSize),
- uint64_t(kHugePageSize),
- CountAllowedCpus());
+ uint64_t(kHugePageSize),
+ CountAllowedCpus());
// clang-format on
- PrintExperiments(out);
+ PrintExperiments(out);
out->printf(
"MALLOC SAMPLED PROFILES: %zu bytes (current), %zu bytes (peak)\n",
- static_cast<size_t>(Static::sampled_objects_size_.value()),
- Static::peak_heap_tracker().CurrentPeakSize());
+ static_cast<size_t>(Static::sampled_objects_size_.value()),
+ Static::peak_heap_tracker().CurrentPeakSize());
- MemoryStats memstats;
- if (GetMemoryStats(&memstats)) {
+ MemoryStats memstats;
+ if (GetMemoryStats(&memstats)) {
uint64_t rss = memstats.rss;
uint64_t vss = memstats.vss;
// clang-format off
@@ -423,33 +423,33 @@ static void DumpStats(Printer* out, int level) {
}
out->printf("------------------------------------------------\n");
- out->printf("Transfer cache implementation: %s\n",
- TransferCacheImplementationToLabel(
- Static::transfer_cache().implementation()));
-
- out->printf("------------------------------------------------\n");
+ out->printf("Transfer cache implementation: %s\n",
+ TransferCacheImplementationToLabel(
+ Static::transfer_cache().implementation()));
+
+ out->printf("------------------------------------------------\n");
out->printf("Transfer cache insert/remove hits/misses by size class\n");
for (int cl = 1; cl < kNumClasses; ++cl) {
out->printf(
"class %3d [ %8zu bytes ] : %8" PRIu64 " insert hits; %8" PRIu64
- " insert misses (%8lu partial); %8" PRIu64 " remove hits; %8" PRIu64
- " remove misses (%8lu partial);\n",
+ " insert misses (%8lu partial); %8" PRIu64 " remove hits; %8" PRIu64
+ " remove misses (%8lu partial);\n",
cl, Static::sizemap().class_to_size(cl), tc_stats[cl].insert_hits,
- tc_stats[cl].insert_misses, tc_stats[cl].insert_non_batch_misses,
- tc_stats[cl].remove_hits, tc_stats[cl].remove_misses,
- tc_stats[cl].remove_non_batch_misses);
+ tc_stats[cl].insert_misses, tc_stats[cl].insert_non_batch_misses,
+ tc_stats[cl].remove_hits, tc_stats[cl].remove_misses,
+ tc_stats[cl].remove_non_batch_misses);
}
- if (UsePerCpuCache()) {
+ if (UsePerCpuCache()) {
Static::cpu_cache().Print(out);
}
- Static::page_allocator().Print(out, MemoryTag::kNormal);
- if (Static::numa_topology().active_partitions() > 1) {
- Static::page_allocator().Print(out, MemoryTag::kNormalP1);
- }
- Static::page_allocator().Print(out, MemoryTag::kSampled);
- tracking::Print(out);
+ Static::page_allocator().Print(out, MemoryTag::kNormal);
+ if (Static::numa_topology().active_partitions() > 1) {
+ Static::page_allocator().Print(out, MemoryTag::kNormalP1);
+ }
+ Static::page_allocator().Print(out, MemoryTag::kSampled);
+ tracking::Print(out);
Static::guardedpage_allocator().Print(out);
uint64_t limit_bytes;
@@ -461,28 +461,28 @@ static void DumpStats(Printer* out, int level) {
Static::page_allocator().limit_hits());
out->printf("PARAMETER tcmalloc_per_cpu_caches %d\n",
- Parameters::per_cpu_caches() ? 1 : 0);
+ Parameters::per_cpu_caches() ? 1 : 0);
out->printf("PARAMETER tcmalloc_max_per_cpu_cache_size %d\n",
- Parameters::max_per_cpu_cache_size());
+ Parameters::max_per_cpu_cache_size());
out->printf("PARAMETER tcmalloc_max_total_thread_cache_bytes %lld\n",
- Parameters::max_total_thread_cache_bytes());
+ Parameters::max_total_thread_cache_bytes());
out->printf("PARAMETER malloc_release_bytes_per_sec %llu\n",
- Parameters::background_release_rate());
- out->printf(
- "PARAMETER tcmalloc_skip_subrelease_interval %s\n",
- absl::FormatDuration(Parameters::filler_skip_subrelease_interval()));
+ Parameters::background_release_rate());
+ out->printf(
+ "PARAMETER tcmalloc_skip_subrelease_interval %s\n",
+ absl::FormatDuration(Parameters::filler_skip_subrelease_interval()));
out->printf("PARAMETER flat vcpus %d\n",
- subtle::percpu::UsingFlatVirtualCpus() ? 1 : 0);
+ subtle::percpu::UsingFlatVirtualCpus() ? 1 : 0);
}
}
namespace {
-/*static*/ void DumpStatsInPbtxt(Printer* out, int level) {
+/*static*/ void DumpStatsInPbtxt(Printer* out, int level) {
TCMallocStats stats;
uint64_t class_count[kNumClasses];
- SpanStats span_stats[kNumClasses];
- TransferCacheStats tc_stats[kNumClasses];
+ SpanStats span_stats[kNumClasses];
+ TransferCacheStats tc_stats[kNumClasses];
if (level >= 2) {
ExtractStats(&stats, class_count, span_stats, nullptr, nullptr, tc_stats,
true);
@@ -499,8 +499,8 @@ namespace {
region.PrintI64("page_heap_freelist", stats.pageheap.free_bytes);
region.PrintI64("central_cache_freelist", stats.central_bytes);
region.PrintI64("per_cpu_cache_freelist", stats.per_cpu_bytes);
- region.PrintI64("sharded_transfer_cache_freelist",
- stats.sharded_transfer_bytes);
+ region.PrintI64("sharded_transfer_cache_freelist",
+ stats.sharded_transfer_bytes);
region.PrintI64("transfer_cache_freelist", stats.transfer_bytes);
region.PrintI64("thread_cache_freelists", stats.thread_bytes);
region.PrintI64("malloc_metadata", stats.metadata_bytes);
@@ -522,19 +522,19 @@ namespace {
region.PrintI64("percpu_slab_size", stats.percpu_metadata_bytes);
region.PrintI64("percpu_slab_residence", stats.percpu_metadata_bytes_res);
region.PrintI64("tcmalloc_page_size", uint64_t(kPageSize));
- region.PrintI64("tcmalloc_huge_page_size", uint64_t(kHugePageSize));
- region.PrintI64("cpus_allowed", CountAllowedCpus());
+ region.PrintI64("tcmalloc_huge_page_size", uint64_t(kHugePageSize));
+ region.PrintI64("cpus_allowed", CountAllowedCpus());
{
auto sampled_profiles = region.CreateSubRegion("sampled_profiles");
sampled_profiles.PrintI64("current_bytes",
- Static::sampled_objects_size_.value());
- sampled_profiles.PrintI64("peak_bytes",
- Static::peak_heap_tracker().CurrentPeakSize());
+ Static::sampled_objects_size_.value());
+ sampled_profiles.PrintI64("peak_bytes",
+ Static::peak_heap_tracker().CurrentPeakSize());
}
// Print total process stats (inclusive of non-malloc sources).
- MemoryStats memstats;
+ MemoryStats memstats;
if (GetMemoryStats(&memstats)) {
region.PrintI64("total_resident", uint64_t(memstats.rss));
region.PrintI64("total_mapped", uint64_t(memstats.vss));
@@ -561,28 +561,28 @@ namespace {
entry.PrintI64("sizeclass", Static::sizemap().class_to_size(cl));
entry.PrintI64("insert_hits", tc_stats[cl].insert_hits);
entry.PrintI64("insert_misses", tc_stats[cl].insert_misses);
- entry.PrintI64("insert_non_batch_misses",
- tc_stats[cl].insert_non_batch_misses);
+ entry.PrintI64("insert_non_batch_misses",
+ tc_stats[cl].insert_non_batch_misses);
entry.PrintI64("remove_hits", tc_stats[cl].remove_hits);
entry.PrintI64("remove_misses", tc_stats[cl].remove_misses);
- entry.PrintI64("remove_non_batch_misses",
- tc_stats[cl].remove_non_batch_misses);
+ entry.PrintI64("remove_non_batch_misses",
+ tc_stats[cl].remove_non_batch_misses);
}
}
- region.PrintRaw("transfer_cache_implementation",
- TransferCacheImplementationToLabel(
- Static::transfer_cache().implementation()));
-
- if (UsePerCpuCache()) {
+ region.PrintRaw("transfer_cache_implementation",
+ TransferCacheImplementationToLabel(
+ Static::transfer_cache().implementation()));
+
+ if (UsePerCpuCache()) {
Static::cpu_cache().PrintInPbtxt(&region);
}
}
- Static::page_allocator().PrintInPbtxt(&region, MemoryTag::kNormal);
- if (Static::numa_topology().active_partitions() > 1) {
- Static::page_allocator().PrintInPbtxt(&region, MemoryTag::kNormalP1);
- }
- Static::page_allocator().PrintInPbtxt(&region, MemoryTag::kSampled);
+ Static::page_allocator().PrintInPbtxt(&region, MemoryTag::kNormal);
+ if (Static::numa_topology().active_partitions() > 1) {
+ Static::page_allocator().PrintInPbtxt(&region, MemoryTag::kNormalP1);
+ }
+ Static::page_allocator().PrintInPbtxt(&region, MemoryTag::kSampled);
// We do not collect tracking information in pbtxt.
size_t limit_bytes;
@@ -597,20 +597,20 @@ namespace {
Static::guardedpage_allocator().PrintInPbtxt(&gwp_asan);
}
- region.PrintI64("memory_release_failures", SystemReleaseErrors());
+ region.PrintI64("memory_release_failures", SystemReleaseErrors());
- region.PrintBool("tcmalloc_per_cpu_caches", Parameters::per_cpu_caches());
+ region.PrintBool("tcmalloc_per_cpu_caches", Parameters::per_cpu_caches());
region.PrintI64("tcmalloc_max_per_cpu_cache_size",
- Parameters::max_per_cpu_cache_size());
+ Parameters::max_per_cpu_cache_size());
region.PrintI64("tcmalloc_max_total_thread_cache_bytes",
- Parameters::max_total_thread_cache_bytes());
- region.PrintI64("malloc_release_bytes_per_sec",
- static_cast<int64_t>(Parameters::background_release_rate()));
+ Parameters::max_total_thread_cache_bytes());
+ region.PrintI64("malloc_release_bytes_per_sec",
+ static_cast<int64_t>(Parameters::background_release_rate()));
region.PrintI64(
- "tcmalloc_skip_subrelease_interval_ns",
- absl::ToInt64Nanoseconds(Parameters::filler_skip_subrelease_interval()));
- region.PrintRaw("percpu_vcpu_type",
- subtle::percpu::UsingFlatVirtualCpus() ? "FLAT" : "NONE");
+ "tcmalloc_skip_subrelease_interval_ns",
+ absl::ToInt64Nanoseconds(Parameters::filler_skip_subrelease_interval()));
+ region.PrintRaw("percpu_vcpu_type",
+ subtle::percpu::UsingFlatVirtualCpus() ? "FLAT" : "NONE");
}
} // namespace
@@ -629,7 +629,7 @@ namespace {
extern "C" ABSL_ATTRIBUTE_UNUSED int MallocExtension_Internal_GetStatsInPbtxt(
char* buffer, int buffer_length) {
ASSERT(buffer_length > 0);
- Printer printer(buffer, buffer_length);
+ Printer printer(buffer, buffer_length);
// Print level one stats unless lots of space is available
if (buffer_length < 10000) {
@@ -642,7 +642,7 @@ extern "C" ABSL_ATTRIBUTE_UNUSED int MallocExtension_Internal_GetStatsInPbtxt(
if (buffer_length > required) {
absl::base_internal::SpinLockHolder h(&pageheap_lock);
- required += GetRegionFactory()->GetStatsInPbtxt(
+ required += GetRegionFactory()->GetStatsInPbtxt(
absl::Span<char>(buffer + required, buffer_length - required));
}
@@ -652,7 +652,7 @@ extern "C" ABSL_ATTRIBUTE_UNUSED int MallocExtension_Internal_GetStatsInPbtxt(
static void PrintStats(int level) {
const int kBufferSize = (TCMALLOC_HAVE_TRACKING ? 2 << 20 : 64 << 10);
char* buffer = new char[kBufferSize];
- Printer printer(buffer, kBufferSize);
+ Printer printer(buffer, kBufferSize);
DumpStats(&printer, level);
(void)write(STDERR_FILENO, buffer, strlen(buffer));
delete[] buffer;
@@ -661,9 +661,9 @@ static void PrintStats(int level) {
// This function computes a profile that maps a live stack trace to
// the number of bytes of central-cache memory pinned by an allocation
// at that stack trace.
-static std::unique_ptr<const ProfileBase> DumpFragmentationProfile() {
- auto profile = absl::make_unique<StackTraceTable>(ProfileType::kFragmentation,
- 1, true, true);
+static std::unique_ptr<const ProfileBase> DumpFragmentationProfile() {
+ auto profile = absl::make_unique<StackTraceTable>(ProfileType::kFragmentation,
+ 1, true, true);
{
absl::base_internal::SpinLockHolder h(&pageheap_lock);
@@ -700,9 +700,9 @@ static std::unique_ptr<const ProfileBase> DumpFragmentationProfile() {
// compensated for (that is, it reports 8000 16-byte objects iff we believe the
// program has that many live objects.) Otherwise, do not adjust for sampling
// (the caller will do so somehow.)
-static std::unique_ptr<const ProfileBase> DumpHeapProfile(bool unsample) {
+static std::unique_ptr<const ProfileBase> DumpHeapProfile(bool unsample) {
auto profile = absl::make_unique<StackTraceTable>(
- ProfileType::kHeap, Sampler::GetSamplePeriod(), true, unsample);
+ ProfileType::kHeap, Sampler::GetSamplePeriod(), true, unsample);
absl::base_internal::SpinLockHolder h(&pageheap_lock);
for (Span* s : Static::sampled_objects_) {
profile->AddTrace(1.0, *s->sampled_stack());
@@ -712,12 +712,12 @@ static std::unique_ptr<const ProfileBase> DumpHeapProfile(bool unsample) {
class AllocationSampleList;
-class AllocationSample final : public AllocationProfilingTokenBase {
+class AllocationSample final : public AllocationProfilingTokenBase {
public:
AllocationSample();
~AllocationSample() override;
- Profile Stop() && override;
+ Profile Stop() && override;
private:
std::unique_ptr<StackTraceTable> mallocs_;
@@ -760,7 +760,7 @@ class AllocationSampleList {
AllocationSample::AllocationSample() {
mallocs_ = absl::make_unique<StackTraceTable>(
- ProfileType::kAllocations, Sampler::GetSamplePeriod(), true, true);
+ ProfileType::kAllocations, Sampler::GetSamplePeriod(), true, true);
absl::base_internal::SpinLockHolder h(&pageheap_lock);
allocation_samples_.Add(this);
}
@@ -777,14 +777,14 @@ AllocationSample::~AllocationSample() {
}
}
-Profile AllocationSample::Stop() && ABSL_LOCKS_EXCLUDED(pageheap_lock) {
+Profile AllocationSample::Stop() && ABSL_LOCKS_EXCLUDED(pageheap_lock) {
// We need to remove ourselves from the allocation_samples_ list before we
// mutate mallocs_;
if (mallocs_) {
absl::base_internal::SpinLockHolder h(&pageheap_lock);
allocation_samples_.Remove(this);
}
- return ProfileAccessor::MakeProfile(std::move(mallocs_));
+ return ProfileAccessor::MakeProfile(std::move(mallocs_));
}
extern "C" void MallocExtension_Internal_GetStats(std::string* ret) {
@@ -808,7 +808,7 @@ extern "C" void MallocExtension_Internal_GetStats(std::string* ret) {
extern "C" size_t TCMalloc_Internal_GetStats(char* buffer,
size_t buffer_length) {
- Printer printer(buffer, buffer_length);
+ Printer printer(buffer, buffer_length);
if (buffer_length < 10000) {
DumpStats(&printer, 1);
} else {
@@ -816,34 +816,34 @@ extern "C" size_t TCMalloc_Internal_GetStats(char* buffer,
}
printer.printf("\nLow-level allocator stats:\n");
- printer.printf("Memory Release Failures: %d\n", SystemReleaseErrors());
+ printer.printf("Memory Release Failures: %d\n", SystemReleaseErrors());
size_t n = printer.SpaceRequired();
size_t bytes_remaining = buffer_length > n ? buffer_length - n : 0;
if (bytes_remaining > 0) {
- n += GetRegionFactory()->GetStats(
+ n += GetRegionFactory()->GetStats(
absl::Span<char>(buffer + n, bytes_remaining));
}
return n;
}
-extern "C" const ProfileBase* MallocExtension_Internal_SnapshotCurrent(
- ProfileType type) {
+extern "C" const ProfileBase* MallocExtension_Internal_SnapshotCurrent(
+ ProfileType type) {
switch (type) {
- case ProfileType::kHeap:
+ case ProfileType::kHeap:
return DumpHeapProfile(true).release();
- case ProfileType::kFragmentation:
+ case ProfileType::kFragmentation:
return DumpFragmentationProfile().release();
- case ProfileType::kPeakHeap:
+ case ProfileType::kPeakHeap:
return Static::peak_heap_tracker().DumpSample().release();
default:
return nullptr;
}
}
-extern "C" AllocationProfilingTokenBase*
+extern "C" AllocationProfilingTokenBase*
MallocExtension_Internal_StartAllocationProfiling() {
return new AllocationSample();
}
@@ -903,13 +903,13 @@ bool GetNumericProperty(const char* name_data, size_t name_size,
return true;
}
- if (name == "tcmalloc.sharded_transfer_cache_free") {
- TCMallocStats stats;
- ExtractTCMallocStats(&stats, false);
- *value = stats.sharded_transfer_bytes;
- return true;
- }
-
+ if (name == "tcmalloc.sharded_transfer_cache_free") {
+ TCMallocStats stats;
+ ExtractTCMallocStats(&stats, false);
+ *value = stats.sharded_transfer_bytes;
+ return true;
+ }
+
if (name == "tcmalloc.slack_bytes") {
// Kept for backwards compatibility. Now defined externally as:
// pageheap_free_bytes + pageheap_unmapped_bytes.
@@ -963,18 +963,18 @@ bool GetNumericProperty(const char* name_data, size_t name_size,
if (name == "tcmalloc.local_bytes") {
TCMallocStats stats;
ExtractTCMallocStats(&stats, false);
- *value =
- stats.thread_bytes + stats.per_cpu_bytes + stats.sharded_transfer_bytes;
- ;
+ *value =
+ stats.thread_bytes + stats.per_cpu_bytes + stats.sharded_transfer_bytes;
+ ;
return true;
}
if (name == "tcmalloc.external_fragmentation_bytes") {
TCMallocStats stats;
ExtractTCMallocStats(&stats, false);
- *value = (stats.pageheap.free_bytes + stats.central_bytes +
- stats.per_cpu_bytes + stats.sharded_transfer_bytes +
- stats.transfer_bytes + stats.thread_bytes + stats.metadata_bytes);
+ *value = (stats.pageheap.free_bytes + stats.central_bytes +
+ stats.per_cpu_bytes + stats.sharded_transfer_bytes +
+ stats.transfer_bytes + stats.thread_bytes + stats.metadata_bytes);
return true;
}
@@ -1024,11 +1024,11 @@ bool GetNumericProperty(const char* name_data, size_t name_size,
return false;
}
-MallocExtension::Ownership GetOwnership(const void* ptr) {
+MallocExtension::Ownership GetOwnership(const void* ptr) {
const PageId p = PageIdContaining(ptr);
return Static::pagemap().GetDescriptor(p)
- ? MallocExtension::Ownership::kOwned
- : MallocExtension::Ownership::kNotOwned;
+ ? MallocExtension::Ownership::kOwned
+ : MallocExtension::Ownership::kNotOwned;
}
extern "C" bool MallocExtension_Internal_GetNumericProperty(
@@ -1037,21 +1037,21 @@ extern "C" bool MallocExtension_Internal_GetNumericProperty(
}
extern "C" void MallocExtension_Internal_GetMemoryLimit(
- MallocExtension::MemoryLimit* limit) {
+ MallocExtension::MemoryLimit* limit) {
ASSERT(limit != nullptr);
std::tie(limit->limit, limit->hard) = Static::page_allocator().limit();
}
extern "C" void MallocExtension_Internal_SetMemoryLimit(
- const MallocExtension::MemoryLimit* limit) {
+ const MallocExtension::MemoryLimit* limit) {
ASSERT(limit != nullptr);
if (!limit->hard) {
- Parameters::set_heap_size_hard_limit(0);
- Static::page_allocator().set_limit(limit->limit, false /* !hard */);
+ Parameters::set_heap_size_hard_limit(0);
+ Static::page_allocator().set_limit(limit->limit, false /* !hard */);
} else {
- Parameters::set_heap_size_hard_limit(limit->limit);
+ Parameters::set_heap_size_hard_limit(limit->limit);
}
}
@@ -1059,15 +1059,15 @@ extern "C" void MallocExtension_Internal_MarkThreadIdle() {
ThreadCache::BecomeIdle();
}
-extern "C" AddressRegionFactory* MallocExtension_Internal_GetRegionFactory() {
+extern "C" AddressRegionFactory* MallocExtension_Internal_GetRegionFactory() {
absl::base_internal::SpinLockHolder h(&pageheap_lock);
- return GetRegionFactory();
+ return GetRegionFactory();
}
extern "C" void MallocExtension_Internal_SetRegionFactory(
- AddressRegionFactory* factory) {
+ AddressRegionFactory* factory) {
absl::base_internal::SpinLockHolder h(&pageheap_lock);
- SetRegionFactory(factory);
+ SetRegionFactory(factory);
}
// ReleaseMemoryToSystem drops the page heap lock while actually calling to
@@ -1101,7 +1101,7 @@ extern "C" void MallocExtension_Internal_ReleaseMemoryToSystem(
if (num_bytes > 0) {
// A sub-page size request may round down to zero. Assume the caller wants
// some memory released.
- num_pages = BytesToLengthCeil(num_bytes);
+ num_pages = BytesToLengthCeil(num_bytes);
ASSERT(num_pages > Length(0));
} else {
num_pages = Length(0);
@@ -1117,40 +1117,40 @@ extern "C" void MallocExtension_Internal_ReleaseMemoryToSystem(
}
}
-extern "C" void MallocExtension_EnableForkSupport() {
- Static::EnableForkSupport();
-}
-
-void TCMallocPreFork() {
- if (!Static::ForkSupportEnabled()) {
- return;
- }
-
- if (Static::CPUCacheActive()) {
- Static::cpu_cache().AcquireInternalLocks();
- }
- Static::transfer_cache().AcquireInternalLocks();
- guarded_page_lock.Lock();
- release_lock.Lock();
- pageheap_lock.Lock();
- AcquireSystemAllocLock();
-}
-
-void TCMallocPostFork() {
- if (!Static::ForkSupportEnabled()) {
- return;
- }
-
- ReleaseSystemAllocLock();
- pageheap_lock.Unlock();
- guarded_page_lock.Unlock();
- release_lock.Unlock();
- Static::transfer_cache().ReleaseInternalLocks();
- if (Static::CPUCacheActive()) {
- Static::cpu_cache().ReleaseInternalLocks();
- }
-}
-
+extern "C" void MallocExtension_EnableForkSupport() {
+ Static::EnableForkSupport();
+}
+
+void TCMallocPreFork() {
+ if (!Static::ForkSupportEnabled()) {
+ return;
+ }
+
+ if (Static::CPUCacheActive()) {
+ Static::cpu_cache().AcquireInternalLocks();
+ }
+ Static::transfer_cache().AcquireInternalLocks();
+ guarded_page_lock.Lock();
+ release_lock.Lock();
+ pageheap_lock.Lock();
+ AcquireSystemAllocLock();
+}
+
+void TCMallocPostFork() {
+ if (!Static::ForkSupportEnabled()) {
+ return;
+ }
+
+ ReleaseSystemAllocLock();
+ pageheap_lock.Unlock();
+ guarded_page_lock.Unlock();
+ release_lock.Unlock();
+ Static::transfer_cache().ReleaseInternalLocks();
+ if (Static::CPUCacheActive()) {
+ Static::cpu_cache().ReleaseInternalLocks();
+ }
+}
+
extern "C" void MallocExtension_SetSampleUserDataCallbacks(
MallocExtension::CreateSampleUserDataCallback create,
MallocExtension::CopySampleUserDataCallback copy,
@@ -1168,12 +1168,12 @@ static ABSL_ATTRIBUTE_NOINLINE size_t nallocx_slow(size_t size, int flags) {
Static::InitIfNecessary();
size_t align = static_cast<size_t>(1ull << (flags & 0x3f));
uint32_t cl;
- if (ABSL_PREDICT_TRUE(Static::sizemap().GetSizeClass(
- CppPolicy().AlignAs(align), size, &cl))) {
+ if (ABSL_PREDICT_TRUE(Static::sizemap().GetSizeClass(
+ CppPolicy().AlignAs(align), size, &cl))) {
ASSERT(cl != 0);
return Static::sizemap().class_to_size(cl);
} else {
- return BytesToLengthCeil(size).in_bytes();
+ return BytesToLengthCeil(size).in_bytes();
}
}
@@ -1187,22 +1187,22 @@ extern "C" size_t nallocx(size_t size, int flags) noexcept {
return nallocx_slow(size, flags);
}
uint32_t cl;
- if (ABSL_PREDICT_TRUE(
- Static::sizemap().GetSizeClass(CppPolicy(), size, &cl))) {
+ if (ABSL_PREDICT_TRUE(
+ Static::sizemap().GetSizeClass(CppPolicy(), size, &cl))) {
ASSERT(cl != 0);
return Static::sizemap().class_to_size(cl);
} else {
- return BytesToLengthCeil(size).in_bytes();
+ return BytesToLengthCeil(size).in_bytes();
}
}
-extern "C" MallocExtension::Ownership MallocExtension_Internal_GetOwnership(
- const void* ptr) {
- return GetOwnership(ptr);
+extern "C" MallocExtension::Ownership MallocExtension_Internal_GetOwnership(
+ const void* ptr) {
+ return GetOwnership(ptr);
}
extern "C" void MallocExtension_Internal_GetProperties(
- std::map<std::string, MallocExtension::Property>* result) {
+ std::map<std::string, MallocExtension::Property>* result) {
TCMallocStats stats;
ExtractTCMallocStats(&stats, true);
@@ -1229,8 +1229,8 @@ extern "C" void MallocExtension_Internal_GetProperties(
(*result)["tcmalloc.transfer_cache_free"].value = stats.transfer_bytes;
// Per CPU Cache Free List
(*result)["tcmalloc.cpu_free"].value = stats.per_cpu_bytes;
- (*result)["tcmalloc.sharded_transfer_cache_free"].value =
- stats.sharded_transfer_bytes;
+ (*result)["tcmalloc.sharded_transfer_cache_free"].value =
+ stats.sharded_transfer_bytes;
(*result)["tcmalloc.per_cpu_caches_active"].value = Static::CPUCacheActive();
// Thread Cache Free List
(*result)["tcmalloc.thread_cache_free"].value = stats.thread_bytes;
@@ -1243,8 +1243,8 @@ extern "C" void MallocExtension_Internal_GetProperties(
(*result)["tcmalloc.page_algorithm"].value =
Static::page_allocator().algorithm();
- FillExperimentProperties(result);
- tracking::GetProperties(result);
+ FillExperimentProperties(result);
+ tracking::GetProperties(result);
}
extern "C" size_t MallocExtension_Internal_ReleaseCpuMemory(int cpu) {
@@ -1311,7 +1311,7 @@ inline void SetClassCapacity(const void* ptr, uint32_t cl, size_t* psize) {
inline void SetPagesCapacity(const void*, size_t, std::nullptr_t) {}
inline void SetPagesCapacity(const void* ptr, size_t size, size_t* psize) {
if (ABSL_PREDICT_TRUE(ptr != nullptr)) {
- *psize = BytesToLengthCeil(size).in_bytes();
+ *psize = BytesToLengthCeil(size).in_bytes();
} else {
*psize = 0;
}
@@ -1345,14 +1345,14 @@ static inline ABSL_ATTRIBUTE_ALWAYS_INLINE void FreeSmall(void* ptr,
size_t cl) {
if (ABSL_PREDICT_FALSE(!GetThreadSampler()->IsOnFastPath())) {
// Take the slow path.
- invoke_delete_hooks_and_free<FreeSmallSlow, hooks_state>(ptr, cl);
+ invoke_delete_hooks_and_free<FreeSmallSlow, hooks_state>(ptr, cl);
return;
}
#ifndef TCMALLOC_DEPRECATED_PERTHREAD
// The CPU Cache is enabled, so we're able to take the fastpath.
ASSERT(Static::CPUCacheActive());
- ASSERT(subtle::percpu::IsFastNoInit());
+ ASSERT(subtle::percpu::IsFastNoInit());
Static::cpu_cache().Deallocate(ptr, cl);
#else // TCMALLOC_DEPRECATED_PERTHREAD
@@ -1381,7 +1381,7 @@ static inline ABSL_ATTRIBUTE_ALWAYS_INLINE void FreeSmall(void* ptr,
// function prologue/epilogue.
ABSL_ATTRIBUTE_NOINLINE
static void FreeSmallSlow(void* ptr, size_t cl) {
- if (ABSL_PREDICT_TRUE(UsePerCpuCache())) {
+ if (ABSL_PREDICT_TRUE(UsePerCpuCache())) {
Static::cpu_cache().Deallocate(ptr, cl);
} else if (ThreadCache* cache = ThreadCache::GetCacheIfPresent()) {
// TODO(b/134691947): If we reach this path from the ThreadCache fastpath,
@@ -1391,7 +1391,7 @@ static void FreeSmallSlow(void* ptr, size_t cl) {
} else {
// This thread doesn't have thread-cache yet or already. Delete directly
// into central cache.
- Static::transfer_cache().InsertRange(cl, absl::Span<void*>(&ptr, 1));
+ Static::transfer_cache().InsertRange(cl, absl::Span<void*>(&ptr, 1));
}
}
@@ -1459,10 +1459,10 @@ static void* SampleifyAllocation(size_t requested_size, size_t weight,
allocated_size = Static::sizemap().class_to_size(cl);
// If the caller didn't provide a span, allocate one:
- Length num_pages = BytesToLengthCeil(allocated_size);
+ Length num_pages = BytesToLengthCeil(allocated_size);
if ((guarded_alloc = TrySampleGuardedAllocation(
requested_size, requested_alignment, num_pages))) {
- ASSERT(IsSampledMemory(guarded_alloc));
+ ASSERT(IsSampledMemory(guarded_alloc));
const PageId p = PageIdContaining(guarded_alloc);
absl::base_internal::SpinLockHolder h(&pageheap_lock);
span = Span::New(p, num_pages);
@@ -1502,7 +1502,7 @@ static void* SampleifyAllocation(size_t requested_size, size_t weight,
// Grab the stack trace outside the heap lock
StackTrace tmp;
tmp.proxy = proxy;
- tmp.depth = absl::GetStackTrace(tmp.stack, kMaxStackDepth, 1);
+ tmp.depth = absl::GetStackTrace(tmp.stack, kMaxStackDepth, 1);
tmp.requested_size = requested_size;
tmp.requested_alignment = requested_alignment;
tmp.allocated_size = allocated_size;
@@ -1513,12 +1513,12 @@ static void* SampleifyAllocation(size_t requested_size, size_t weight,
absl::base_internal::SpinLockHolder h(&pageheap_lock);
// Allocate stack trace
StackTrace* stack = Static::stacktrace_allocator().New();
- allocation_samples_.ReportMalloc(tmp);
- *stack = tmp;
- span->Sample(stack);
+ allocation_samples_.ReportMalloc(tmp);
+ *stack = tmp;
+ span->Sample(stack);
}
- Static::peak_heap_tracker().MaybeSaveSample();
+ Static::peak_heap_tracker().MaybeSaveSample();
if (obj != nullptr) {
#if TCMALLOC_HAVE_TRACKING
@@ -1530,7 +1530,7 @@ static void* SampleifyAllocation(size_t requested_size, size_t weight,
// TODO(b/158678747): As of cl/315283185, we may occasionally see a hit in
// the TransferCache here. Prior to that CL, we always forced a miss. Both
// of these may artificially skew our tracking data.
- Static::transfer_cache().InsertRange(cl, absl::Span<void*>(&obj, 1));
+ Static::transfer_cache().InsertRange(cl, absl::Span<void*>(&obj, 1));
#else
// We are not maintaining precise statistics on malloc hit/miss rates at our
// cache tiers. We can deallocate into our ordinary cache.
@@ -1551,18 +1551,18 @@ inline size_t ShouldSampleAllocation(size_t size) {
return GetThreadSampler()->RecordAllocation(size);
}
-template <typename Policy>
-inline void* do_malloc_pages(Policy policy, size_t size) {
+template <typename Policy>
+inline void* do_malloc_pages(Policy policy, size_t size) {
// Page allocator does not deal well with num_pages = 0.
- Length num_pages = std::max<Length>(BytesToLengthCeil(size), Length(1));
+ Length num_pages = std::max<Length>(BytesToLengthCeil(size), Length(1));
MemoryTag tag = MemoryTag::kNormal;
- if (Static::numa_topology().numa_aware()) {
- tag = NumaNormalTag(policy.numa_partition());
- }
- const size_t alignment = policy.align();
+ if (Static::numa_topology().numa_aware()) {
+ tag = NumaNormalTag(policy.numa_partition());
+ }
+ const size_t alignment = policy.align();
Span* span = Static::page_allocator().NewAligned(
- num_pages, BytesToLengthCeil(alignment), tag);
+ num_pages, BytesToLengthCeil(alignment), tag);
if (span == nullptr) {
return nullptr;
@@ -1570,7 +1570,7 @@ inline void* do_malloc_pages(Policy policy, size_t size) {
void* result = span->start_address();
ASSERT(
- tag == GetMemoryTag(span->start_address()));
+ tag == GetMemoryTag(span->start_address()));
if (size_t weight = ShouldSampleAllocation(size)) {
CHECK_CONDITION(result == SampleifyAllocation(size, weight, alignment, 0,
@@ -1587,7 +1587,7 @@ inline void* ABSL_ATTRIBUTE_ALWAYS_INLINE AllocSmall(Policy policy, size_t cl,
ASSERT(cl != 0);
void* result;
- if (UsePerCpuCache()) {
+ if (UsePerCpuCache()) {
result = Static::cpu_cache().Allocate<Policy::handle_oom>(cl);
} else {
result = ThreadCache::GetCache()->Allocate<Policy::handle_oom>(cl);
@@ -1622,8 +1622,8 @@ static void do_free_pages(void* ptr, const PageId p) {
Span* span = Static::pagemap().GetExistingDescriptor(p);
ASSERT(span != nullptr);
- // Prefetch now to avoid a stall accessing *span while under the lock.
- span->Prefetch();
+ // Prefetch now to avoid a stall accessing *span while under the lock.
+ span->Prefetch();
{
absl::base_internal::SpinLockHolder h(&pageheap_lock);
ASSERT(span->first_page() == p);
@@ -1631,16 +1631,16 @@ static void do_free_pages(void* ptr, const PageId p) {
proxy = st->proxy;
size = st->allocated_size;
if (proxy == nullptr && size <= kMaxSize) {
- tracking::Report(kFreeMiss,
- Static::sizemap().SizeClass(
- CppPolicy().InSameNumaPartitionAs(ptr), size),
- 1);
+ tracking::Report(kFreeMiss,
+ Static::sizemap().SizeClass(
+ CppPolicy().InSameNumaPartitionAs(ptr), size),
+ 1);
}
notify_sampled_alloc = true;
Static::DestroySampleUserData(st->user_data);
Static::stacktrace_allocator().Delete(st);
}
- if (IsSampledMemory(ptr)) {
+ if (IsSampledMemory(ptr)) {
if (Static::guardedpage_allocator().PointerIsMine(ptr)) {
// Release lock while calling Deallocate() since it does a system call.
pageheap_lock.Unlock();
@@ -1651,9 +1651,9 @@ static void do_free_pages(void* ptr, const PageId p) {
ASSERT(reinterpret_cast<uintptr_t>(ptr) % kPageSize == 0);
Static::page_allocator().Delete(span, MemoryTag::kSampled);
}
- } else if (kNumaPartitions != 1) {
- ASSERT(reinterpret_cast<uintptr_t>(ptr) % kPageSize == 0);
- Static::page_allocator().Delete(span, GetMemoryTag(ptr));
+ } else if (kNumaPartitions != 1) {
+ ASSERT(reinterpret_cast<uintptr_t>(ptr) % kPageSize == 0);
+ Static::page_allocator().Delete(span, GetMemoryTag(ptr));
} else {
ASSERT(reinterpret_cast<uintptr_t>(ptr) % kPageSize == 0);
Static::page_allocator().Delete(span, MemoryTag::kNormal);
@@ -1664,8 +1664,8 @@ static void do_free_pages(void* ptr, const PageId p) {
}
if (proxy) {
- const auto policy = CppPolicy().InSameNumaPartitionAs(proxy);
- const size_t cl = Static::sizemap().SizeClass(policy, size);
+ const auto policy = CppPolicy().InSameNumaPartitionAs(proxy);
+ const size_t cl = Static::sizemap().SizeClass(policy, size);
FreeSmall<Hooks::NO>(proxy, cl);
}
}
@@ -1718,7 +1718,7 @@ inline ABSL_ATTRIBUTE_ALWAYS_INLINE void do_free_with_cl(void* ptr, size_t cl) {
ASSERT(!Static::pagemap().GetExistingDescriptor(p)->sampled());
FreeSmall<hooks_state>(ptr, cl);
} else {
- invoke_delete_hooks_and_free<do_free_pages, hooks_state>(ptr, p);
+ invoke_delete_hooks_and_free<do_free_pages, hooks_state>(ptr, p);
}
}
@@ -1737,7 +1737,7 @@ bool CorrectAlignment(void* ptr, std::align_val_t alignment);
inline ABSL_ATTRIBUTE_ALWAYS_INLINE void FreePages(void* ptr) {
const PageId p = PageIdContaining(ptr);
- invoke_delete_hooks_and_free<do_free_pages, Hooks::RUN>(ptr, p);
+ invoke_delete_hooks_and_free<do_free_pages, Hooks::RUN>(ptr, p);
}
template <typename AlignPolicy>
@@ -1754,7 +1754,7 @@ inline ABSL_ATTRIBUTE_ALWAYS_INLINE void do_free_with_size(void* ptr,
//
// The optimized path doesn't work with sampled objects, whose deletions
// trigger more operations and require to visit metadata.
- if (ABSL_PREDICT_FALSE(IsSampledMemory(ptr))) {
+ if (ABSL_PREDICT_FALSE(IsSampledMemory(ptr))) {
// we don't know true class size of the ptr
if (ptr == nullptr) return;
return FreePages(ptr);
@@ -1768,9 +1768,9 @@ inline ABSL_ATTRIBUTE_ALWAYS_INLINE void do_free_with_size(void* ptr,
ASSERT(ptr != nullptr);
uint32_t cl;
- if (ABSL_PREDICT_FALSE(!Static::sizemap().GetSizeClass(
- CppPolicy().AlignAs(align.align()).InSameNumaPartitionAs(ptr), size,
- &cl))) {
+ if (ABSL_PREDICT_FALSE(!Static::sizemap().GetSizeClass(
+ CppPolicy().AlignAs(align.align()).InSameNumaPartitionAs(ptr), size,
+ &cl))) {
// We couldn't calculate the size class, which means size > kMaxSize.
ASSERT(size > kMaxSize || align.align() > alignof(std::max_align_t));
static_assert(kMaxSize >= kPageSize, "kMaxSize must be at least kPageSize");
@@ -1810,11 +1810,11 @@ bool CorrectSize(void* ptr, size_t size, AlignPolicy align) {
// Round-up passed in size to how much tcmalloc allocates for that size.
if (Static::guardedpage_allocator().PointerIsMine(ptr)) {
size = Static::guardedpage_allocator().GetRequestedSize(ptr);
- } else if (Static::sizemap().GetSizeClass(CppPolicy().AlignAs(align.align()),
- size, &cl)) {
+ } else if (Static::sizemap().GetSizeClass(CppPolicy().AlignAs(align.align()),
+ size, &cl)) {
size = Static::sizemap().class_to_size(cl);
} else {
- size = BytesToLengthCeil(size).in_bytes();
+ size = BytesToLengthCeil(size).in_bytes();
}
size_t actual = GetSize(ptr);
if (ABSL_PREDICT_TRUE(actual == size)) return true;
@@ -1825,7 +1825,7 @@ bool CorrectSize(void* ptr, size_t size, AlignPolicy align) {
// Checks that an asserted object <ptr> has <align> alignment.
bool CorrectAlignment(void* ptr, std::align_val_t alignment) {
size_t align = static_cast<size_t>(alignment);
- ASSERT(absl::has_single_bit(align));
+ ASSERT(absl::has_single_bit(align));
return ((reinterpret_cast<uintptr_t>(ptr) & (align - 1)) == 0);
}
@@ -1837,7 +1837,7 @@ inline int do_mallopt(int cmd, int value) {
return 1; // Indicates error
}
-#ifdef TCMALLOC_HAVE_STRUCT_MALLINFO
+#ifdef TCMALLOC_HAVE_STRUCT_MALLINFO
inline struct mallinfo do_mallinfo() {
TCMallocStats stats;
ExtractTCMallocStats(&stats, false);
@@ -1857,33 +1857,33 @@ inline struct mallinfo do_mallinfo() {
return info;
}
-#endif // TCMALLOC_HAVE_STRUCT_MALLINFO
+#endif // TCMALLOC_HAVE_STRUCT_MALLINFO
} // namespace
-} // namespace tcmalloc_internal
-} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
-
-using tcmalloc::tcmalloc_internal::AllocSmall;
-using tcmalloc::tcmalloc_internal::CppPolicy;
-using tcmalloc::tcmalloc_internal::do_free_no_hooks;
-#ifdef TCMALLOC_HAVE_STRUCT_MALLINFO
-using tcmalloc::tcmalloc_internal::do_mallinfo;
-#endif
-using tcmalloc::tcmalloc_internal::do_malloc_pages;
-using tcmalloc::tcmalloc_internal::do_malloc_stats;
-using tcmalloc::tcmalloc_internal::do_mallopt;
-using tcmalloc::tcmalloc_internal::GetThreadSampler;
-using tcmalloc::tcmalloc_internal::MallocPolicy;
-using tcmalloc::tcmalloc_internal::SetClassCapacity;
-using tcmalloc::tcmalloc_internal::SetPagesCapacity;
-using tcmalloc::tcmalloc_internal::Static;
-using tcmalloc::tcmalloc_internal::UsePerCpuCache;
-
-#ifdef TCMALLOC_DEPRECATED_PERTHREAD
-using tcmalloc::tcmalloc_internal::ThreadCache;
-#endif // TCMALLOC_DEPRECATED_PERTHREAD
-
+} // namespace tcmalloc_internal
+} // namespace tcmalloc
+GOOGLE_MALLOC_SECTION_END
+
+using tcmalloc::tcmalloc_internal::AllocSmall;
+using tcmalloc::tcmalloc_internal::CppPolicy;
+using tcmalloc::tcmalloc_internal::do_free_no_hooks;
+#ifdef TCMALLOC_HAVE_STRUCT_MALLINFO
+using tcmalloc::tcmalloc_internal::do_mallinfo;
+#endif
+using tcmalloc::tcmalloc_internal::do_malloc_pages;
+using tcmalloc::tcmalloc_internal::do_malloc_stats;
+using tcmalloc::tcmalloc_internal::do_mallopt;
+using tcmalloc::tcmalloc_internal::GetThreadSampler;
+using tcmalloc::tcmalloc_internal::MallocPolicy;
+using tcmalloc::tcmalloc_internal::SetClassCapacity;
+using tcmalloc::tcmalloc_internal::SetPagesCapacity;
+using tcmalloc::tcmalloc_internal::Static;
+using tcmalloc::tcmalloc_internal::UsePerCpuCache;
+
+#ifdef TCMALLOC_DEPRECATED_PERTHREAD
+using tcmalloc::tcmalloc_internal::ThreadCache;
+#endif // TCMALLOC_DEPRECATED_PERTHREAD
+
// Slow path implementation.
// This function is used by `fast_alloc` if the allocation requires page sized
// allocations or some complex logic is required such as initialization,
@@ -1898,11 +1898,11 @@ static void* ABSL_ATTRIBUTE_SECTION(google_malloc)
GetThreadSampler()->UpdateFastPathState();
void* p;
uint32_t cl;
- bool is_small = Static::sizemap().GetSizeClass(policy, size, &cl);
+ bool is_small = Static::sizemap().GetSizeClass(policy, size, &cl);
if (ABSL_PREDICT_TRUE(is_small)) {
p = AllocSmall(policy, cl, size, capacity);
} else {
- p = do_malloc_pages(policy, size);
+ p = do_malloc_pages(policy, size);
// Set capacity to the exact size for a page allocation.
// This needs to be revisited if we introduce gwp-asan
// sampling / guarded allocations to do_malloc_pages().
@@ -1925,7 +1925,7 @@ fast_alloc(Policy policy, size_t size, CapacityPtr capacity = nullptr) {
// (regardless of size), but in this case should also delegate to the slow
// path by the fast path check further down.
uint32_t cl;
- bool is_small = Static::sizemap().GetSizeClass(policy, size, &cl);
+ bool is_small = Static::sizemap().GetSizeClass(policy, size, &cl);
if (ABSL_PREDICT_FALSE(!is_small)) {
return slow_alloc(policy, size, capacity);
}
@@ -1971,12 +1971,12 @@ fast_alloc(Policy policy, size_t size, CapacityPtr capacity = nullptr) {
return ret;
}
-using tcmalloc::tcmalloc_internal::GetOwnership;
-using tcmalloc::tcmalloc_internal::GetSize;
-
+using tcmalloc::tcmalloc_internal::GetOwnership;
+using tcmalloc::tcmalloc_internal::GetSize;
+
extern "C" size_t MallocExtension_Internal_GetAllocatedSize(const void* ptr) {
- ASSERT(!ptr ||
- GetOwnership(ptr) != tcmalloc::MallocExtension::Ownership::kNotOwned);
+ ASSERT(!ptr ||
+ GetOwnership(ptr) != tcmalloc::MallocExtension::Ownership::kNotOwned);
return GetSize(ptr);
}
@@ -1985,7 +1985,7 @@ extern "C" void MallocExtension_Internal_MarkThreadBusy() {
// invoking any hooks.
Static::InitIfNecessary();
- if (UsePerCpuCache()) {
+ if (UsePerCpuCache()) {
return;
}
@@ -1996,18 +1996,18 @@ extern "C" void MallocExtension_Internal_MarkThreadBusy() {
// Exported routines
//-------------------------------------------------------------------
-using tcmalloc::tcmalloc_internal::AlignAsPolicy;
-using tcmalloc::tcmalloc_internal::CorrectAlignment;
-using tcmalloc::tcmalloc_internal::CorrectSize;
-using tcmalloc::tcmalloc_internal::DefaultAlignPolicy;
-using tcmalloc::tcmalloc_internal::do_free;
-using tcmalloc::tcmalloc_internal::do_free_with_size;
-
-// depends on TCMALLOC_HAVE_STRUCT_MALLINFO, so needs to come after that.
+using tcmalloc::tcmalloc_internal::AlignAsPolicy;
+using tcmalloc::tcmalloc_internal::CorrectAlignment;
+using tcmalloc::tcmalloc_internal::CorrectSize;
+using tcmalloc::tcmalloc_internal::DefaultAlignPolicy;
+using tcmalloc::tcmalloc_internal::do_free;
+using tcmalloc::tcmalloc_internal::do_free_with_size;
+
+// depends on TCMALLOC_HAVE_STRUCT_MALLINFO, so needs to come after that.
#include "tcmalloc/libc_override.h"
extern "C" ABSL_CACHELINE_ALIGNED void* TCMallocInternalMalloc(
- size_t size) noexcept {
+ size_t size) noexcept {
// Use TCMallocInternalMemalign to avoid requiring size %
// alignof(std::max_align_t) == 0. TCMallocInternalAlignedAlloc enforces this
// property.
@@ -2026,7 +2026,7 @@ extern "C" ABSL_ATTRIBUTE_SECTION(google_malloc) tcmalloc::sized_ptr_t
}
extern "C" ABSL_CACHELINE_ALIGNED void* TCMallocInternalMalloc_aligned(
- size_t size, std::align_val_t alignment) noexcept {
+ size_t size, std::align_val_t alignment) noexcept {
return fast_alloc(MallocPolicy().AlignAs(alignment), size);
}
@@ -2053,12 +2053,12 @@ extern "C" ABSL_ATTRIBUTE_SECTION(
#endif // TCMALLOC_ALIAS
extern "C" ABSL_CACHELINE_ALIGNED void TCMallocInternalFree(
- void* ptr) noexcept {
+ void* ptr) noexcept {
do_free(ptr);
}
extern "C" void TCMallocInternalSdallocx(void* ptr, size_t size,
- int flags) noexcept {
+ int flags) noexcept {
size_t alignment = alignof(std::max_align_t);
if (ABSL_PREDICT_FALSE(flags != 0)) {
@@ -2066,10 +2066,10 @@ extern "C" void TCMallocInternalSdallocx(void* ptr, size_t size,
alignment = static_cast<size_t>(1ull << (flags & 0x3f));
}
- return do_free_with_size(ptr, size, AlignAsPolicy(alignment));
+ return do_free_with_size(ptr, size, AlignAsPolicy(alignment));
}
-extern "C" void* TCMallocInternalCalloc(size_t n, size_t elem_size) noexcept {
+extern "C" void* TCMallocInternalCalloc(size_t n, size_t elem_size) noexcept {
// Overflow check
const size_t size = n * elem_size;
if (elem_size != 0 && size / elem_size != n) {
@@ -2085,7 +2085,7 @@ extern "C" void* TCMallocInternalCalloc(size_t n, size_t elem_size) noexcept {
// Here and below we use TCMALLOC_ALIAS (if supported) to make
// identical functions aliases. This saves space in L1 instruction
// cache. As of now it saves ~9K.
-extern "C" void TCMallocInternalCfree(void* ptr) noexcept
+extern "C" void TCMallocInternalCfree(void* ptr) noexcept
#ifdef TCMALLOC_ALIAS
TCMALLOC_ALIAS(TCMallocInternalFree);
#else
@@ -2143,7 +2143,7 @@ static inline ABSL_ATTRIBUTE_ALWAYS_INLINE void* do_realloc(void* old_ptr,
}
extern "C" void* TCMallocInternalRealloc(void* old_ptr,
- size_t new_size) noexcept {
+ size_t new_size) noexcept {
if (old_ptr == NULL) {
return fast_alloc(MallocPolicy(), new_size);
}
@@ -2155,7 +2155,7 @@ extern "C" void* TCMallocInternalRealloc(void* old_ptr,
}
extern "C" void* TCMallocInternalNewNothrow(size_t size,
- const std::nothrow_t&) noexcept {
+ const std::nothrow_t&) noexcept {
return fast_alloc(CppPolicy().Nothrow(), size);
}
@@ -2166,7 +2166,7 @@ extern "C" tcmalloc::sized_ptr_t tcmalloc_size_returning_operator_new_nothrow(
return {p, capacity};
}
-extern "C" ABSL_CACHELINE_ALIGNED void TCMallocInternalDelete(void* p) noexcept
+extern "C" ABSL_CACHELINE_ALIGNED void TCMallocInternalDelete(void* p) noexcept
#ifdef TCMALLOC_ALIAS
TCMALLOC_ALIAS(TCMallocInternalFree);
#else
@@ -2176,7 +2176,7 @@ extern "C" ABSL_CACHELINE_ALIGNED void TCMallocInternalDelete(void* p) noexcept
#endif // TCMALLOC_ALIAS
extern "C" void TCMallocInternalDeleteAligned(
- void* p, std::align_val_t alignment) noexcept
+ void* p, std::align_val_t alignment) noexcept
#if defined(TCMALLOC_ALIAS) && defined(NDEBUG)
TCMALLOC_ALIAS(TCMallocInternalDelete);
#else
@@ -2190,27 +2190,27 @@ extern "C" void TCMallocInternalDeleteAligned(
#endif
extern "C" ABSL_CACHELINE_ALIGNED void TCMallocInternalDeleteSized(
- void* p, size_t size) noexcept {
- ASSERT(CorrectSize(p, size, DefaultAlignPolicy()));
- do_free_with_size(p, size, DefaultAlignPolicy());
+ void* p, size_t size) noexcept {
+ ASSERT(CorrectSize(p, size, DefaultAlignPolicy()));
+ do_free_with_size(p, size, DefaultAlignPolicy());
}
extern "C" void TCMallocInternalDeleteSizedAligned(
- void* p, size_t t, std::align_val_t alignment) noexcept {
- return do_free_with_size(p, t, AlignAsPolicy(alignment));
+ void* p, size_t t, std::align_val_t alignment) noexcept {
+ return do_free_with_size(p, t, AlignAsPolicy(alignment));
}
-extern "C" void TCMallocInternalDeleteArraySized(void* p, size_t size) noexcept
+extern "C" void TCMallocInternalDeleteArraySized(void* p, size_t size) noexcept
#ifdef TCMALLOC_ALIAS
TCMALLOC_ALIAS(TCMallocInternalDeleteSized);
#else
{
- do_free_with_size(p, size, DefaultAlignPolicy());
+ do_free_with_size(p, size, DefaultAlignPolicy());
}
#endif
extern "C" void TCMallocInternalDeleteArraySizedAligned(
- void* p, size_t t, std::align_val_t alignment) noexcept
+ void* p, size_t t, std::align_val_t alignment) noexcept
#ifdef TCMALLOC_ALIAS
TCMALLOC_ALIAS(TCMallocInternalDeleteSizedAligned);
#else
@@ -2223,7 +2223,7 @@ extern "C" void TCMallocInternalDeleteArraySizedAligned(
// (via ::operator delete(ptr, nothrow)).
// But it's really the same as normal delete, so we just do the same thing.
extern "C" void TCMallocInternalDeleteNothrow(void* p,
- const std::nothrow_t&) noexcept
+ const std::nothrow_t&) noexcept
#ifdef TCMALLOC_ALIAS
TCMALLOC_ALIAS(TCMallocInternalFree);
#else
@@ -2234,7 +2234,7 @@ extern "C" void TCMallocInternalDeleteNothrow(void* p,
#if defined(TCMALLOC_ALIAS) && defined(NDEBUG)
extern "C" void TCMallocInternalDeleteAligned_nothrow(
- void* p, std::align_val_t alignment, const std::nothrow_t& nt) noexcept
+ void* p, std::align_val_t alignment, const std::nothrow_t& nt) noexcept
TCMALLOC_ALIAS(TCMallocInternalDelete);
#else
extern "C" ABSL_ATTRIBUTE_SECTION(
@@ -2242,7 +2242,7 @@ extern "C" ABSL_ATTRIBUTE_SECTION(
std::align_val_t
alignment,
const std::nothrow_t&
- nt) noexcept {
+ nt) noexcept {
ASSERT(CorrectAlignment(p, alignment));
return TCMallocInternalDelete(p);
}
@@ -2253,7 +2253,7 @@ extern "C" void* TCMallocInternalNewArray(size_t size)
TCMALLOC_ALIAS(TCMallocInternalNew);
#else
{
- return fast_alloc(CppPolicy().WithoutHooks(), size);
+ return fast_alloc(CppPolicy().WithoutHooks(), size);
}
#endif // TCMALLOC_ALIAS
@@ -2268,7 +2268,7 @@ extern "C" void* TCMallocInternalNewArrayAligned(size_t size,
#endif
extern "C" void* TCMallocInternalNewArrayNothrow(size_t size,
- const std::nothrow_t&) noexcept
+ const std::nothrow_t&) noexcept
#ifdef TCMALLOC_ALIAS
TCMALLOC_ALIAS(TCMallocInternalNewNothrow);
#else
@@ -2294,7 +2294,7 @@ extern "C" ABSL_ATTRIBUTE_SECTION(
}
#endif
-extern "C" void TCMallocInternalDeleteArray(void* p) noexcept
+extern "C" void TCMallocInternalDeleteArray(void* p) noexcept
#ifdef TCMALLOC_ALIAS
TCMALLOC_ALIAS(TCMallocInternalFree);
#else
@@ -2304,7 +2304,7 @@ extern "C" void TCMallocInternalDeleteArray(void* p) noexcept
#endif // TCMALLOC_ALIAS
extern "C" void TCMallocInternalDeleteArrayAligned(
- void* p, std::align_val_t alignment) noexcept
+ void* p, std::align_val_t alignment) noexcept
#if defined(TCMALLOC_ALIAS) && defined(NDEBUG)
TCMALLOC_ALIAS(TCMallocInternalDelete);
#else
@@ -2315,7 +2315,7 @@ extern "C" void TCMallocInternalDeleteArrayAligned(
#endif
extern "C" void TCMallocInternalDeleteArrayNothrow(
- void* p, const std::nothrow_t&) noexcept
+ void* p, const std::nothrow_t&) noexcept
#ifdef TCMALLOC_ALIAS
TCMALLOC_ALIAS(TCMallocInternalFree);
#else
@@ -2326,7 +2326,7 @@ extern "C" void TCMallocInternalDeleteArrayNothrow(
#if defined(TCMALLOC_ALIAS) && defined(NDEBUG)
extern "C" void TCMallocInternalDeleteArrayAligned_nothrow(
- void* p, std::align_val_t alignment, const std::nothrow_t&) noexcept
+ void* p, std::align_val_t alignment, const std::nothrow_t&) noexcept
TCMALLOC_ALIAS(TCMallocInternalDelete);
#else
extern "C" ABSL_ATTRIBUTE_SECTION(
@@ -2334,19 +2334,19 @@ extern "C" ABSL_ATTRIBUTE_SECTION(
std::align_val_t
alignment,
const std::
- nothrow_t&) noexcept {
+ nothrow_t&) noexcept {
ASSERT(CorrectAlignment(p, alignment));
return TCMallocInternalDelete(p);
}
#endif
-extern "C" void* TCMallocInternalMemalign(size_t align, size_t size) noexcept {
- ASSERT(absl::has_single_bit(align));
+extern "C" void* TCMallocInternalMemalign(size_t align, size_t size) noexcept {
+ ASSERT(absl::has_single_bit(align));
return fast_alloc(MallocPolicy().AlignAs(align), size);
}
extern "C" void* TCMallocInternalAlignedAlloc(size_t align,
- size_t size) noexcept
+ size_t size) noexcept
#if defined(TCMALLOC_ALIAS) && defined(NDEBUG)
TCMALLOC_ALIAS(TCMallocInternalMemalign);
#else
@@ -2362,8 +2362,8 @@ extern "C" void* TCMallocInternalAlignedAlloc(size_t align,
#endif
extern "C" int TCMallocInternalPosixMemalign(void** result_ptr, size_t align,
- size_t size) noexcept {
- if (((align % sizeof(void*)) != 0) || !absl::has_single_bit(align)) {
+ size_t size) noexcept {
+ if (((align % sizeof(void*)) != 0) || !absl::has_single_bit(align)) {
return EINVAL;
}
void* result = fast_alloc(MallocPolicy().Nothrow().AlignAs(align), size);
@@ -2377,13 +2377,13 @@ extern "C" int TCMallocInternalPosixMemalign(void** result_ptr, size_t align,
static size_t pagesize = 0;
-extern "C" void* TCMallocInternalValloc(size_t size) noexcept {
+extern "C" void* TCMallocInternalValloc(size_t size) noexcept {
// Allocate page-aligned object of length >= size bytes
if (pagesize == 0) pagesize = getpagesize();
return fast_alloc(MallocPolicy().Nothrow().AlignAs(pagesize), size);
}
-extern "C" void* TCMallocInternalPvalloc(size_t size) noexcept {
+extern "C" void* TCMallocInternalPvalloc(size_t size) noexcept {
// Round up size to a multiple of pagesize
if (pagesize == 0) pagesize = getpagesize();
if (size == 0) { // pvalloc(0) should allocate one page, according to
@@ -2393,30 +2393,30 @@ extern "C" void* TCMallocInternalPvalloc(size_t size) noexcept {
return fast_alloc(MallocPolicy().Nothrow().AlignAs(pagesize), size);
}
-extern "C" void TCMallocInternalMallocStats(void) noexcept {
+extern "C" void TCMallocInternalMallocStats(void) noexcept {
do_malloc_stats();
}
-extern "C" int TCMallocInternalMallOpt(int cmd, int value) noexcept {
+extern "C" int TCMallocInternalMallOpt(int cmd, int value) noexcept {
return do_mallopt(cmd, value);
}
-#ifdef TCMALLOC_HAVE_STRUCT_MALLINFO
-extern "C" struct mallinfo TCMallocInternalMallocInfo(void) noexcept {
+#ifdef TCMALLOC_HAVE_STRUCT_MALLINFO
+extern "C" struct mallinfo TCMallocInternalMallocInfo(void) noexcept {
return do_mallinfo();
}
#endif
-extern "C" size_t TCMallocInternalMallocSize(void* ptr) noexcept {
- ASSERT(GetOwnership(ptr) != tcmalloc::MallocExtension::Ownership::kNotOwned);
+extern "C" size_t TCMallocInternalMallocSize(void* ptr) noexcept {
+ ASSERT(GetOwnership(ptr) != tcmalloc::MallocExtension::Ownership::kNotOwned);
return GetSize(ptr);
}
-GOOGLE_MALLOC_SECTION_BEGIN
-namespace tcmalloc {
-namespace tcmalloc_internal {
-namespace {
-
+GOOGLE_MALLOC_SECTION_BEGIN
+namespace tcmalloc {
+namespace tcmalloc_internal {
+namespace {
+
// The constructor allocates an object to ensure that initialization
// runs before main(), and therefore we do not have a chance to become
// multi-threaded before initialization. We also create the TSD key
@@ -2434,8 +2434,8 @@ class TCMallocGuard {
};
static TCMallocGuard module_enter_exit_hook;
-
-} // namespace
-} // namespace tcmalloc_internal
-} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+
+} // namespace
+} // namespace tcmalloc_internal
+} // namespace tcmalloc
+GOOGLE_MALLOC_SECTION_END
diff --git a/contrib/libs/tcmalloc/tcmalloc/tcmalloc.h b/contrib/libs/tcmalloc/tcmalloc/tcmalloc.h
index 1a8eeb4157..e17bd43ba0 100644
--- a/contrib/libs/tcmalloc/tcmalloc/tcmalloc.h
+++ b/contrib/libs/tcmalloc/tcmalloc/tcmalloc.h
@@ -26,14 +26,14 @@
#include <stddef.h>
#include "absl/base/attributes.h"
-#include "tcmalloc/internal/config.h"
+#include "tcmalloc/internal/config.h"
#include "tcmalloc/internal/declarations.h"
// __THROW is defined in glibc systems. It means, counter-intuitively,
// "This function will never throw an exception." It's an optional
// optimization tool, but we may need to use it to match glibc prototypes.
#ifndef __THROW // I guess we're not on a glibc system
-#define __THROW __attribute__((__nothrow__))
+#define __THROW __attribute__((__nothrow__))
#endif
#ifdef __cplusplus
@@ -68,7 +68,7 @@ void TCMallocInternalMallocStats(void) __THROW
ABSL_ATTRIBUTE_SECTION(google_malloc);
int TCMallocInternalMallOpt(int cmd, int value) __THROW
ABSL_ATTRIBUTE_SECTION(google_malloc);
-#if defined(TCMALLOC_HAVE_STRUCT_MALLINFO)
+#if defined(TCMALLOC_HAVE_STRUCT_MALLINFO)
struct mallinfo TCMallocInternalMallocInfo(void) __THROW
ABSL_ATTRIBUTE_SECTION(google_malloc);
#endif
@@ -120,7 +120,7 @@ void TCMallocInternalDeleteArrayNothrow(void* p, const std::nothrow_t&) __THROW
}
#endif
-void TCMallocInternalAcquireLocks();
-void TCMallocInternalReleaseLocks();
-
+void TCMallocInternalAcquireLocks();
+void TCMallocInternalReleaseLocks();
+
#endif // TCMALLOC_TCMALLOC_H_
diff --git a/contrib/libs/tcmalloc/tcmalloc/tcmalloc_large_test.cc b/contrib/libs/tcmalloc/tcmalloc/tcmalloc_large_test.cc
index f940120f46..fac4c5bb56 100644
--- a/contrib/libs/tcmalloc/tcmalloc/tcmalloc_large_test.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/tcmalloc_large_test.cc
@@ -25,14 +25,14 @@
#include "gmock/gmock.h"
#include "gtest/gtest.h"
-#include "absl/container/flat_hash_set.h"
+#include "absl/container/flat_hash_set.h"
#include "absl/container/node_hash_set.h"
#include "tcmalloc/common.h"
#include "tcmalloc/internal/logging.h"
#include "tcmalloc/malloc_extension.h"
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
namespace {
// Alloc a size that should always fail.
@@ -142,7 +142,7 @@ class LargeAllocationTest : public ::testing::Test {
TEST_F(LargeAllocationTest, UniqueAddresses) {
constexpr int kZeroTimes = 1024;
- absl::flat_hash_set<void*> ptrs;
+ absl::flat_hash_set<void*> ptrs;
for (int i = 0; i < kZeroTimes; ++i) {
void* p = malloc(1);
ASSERT_NE(p, nullptr);
@@ -200,5 +200,5 @@ TEST_F(LargeAllocationTest, NearMaxAddressBits) {
}
} // namespace
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
diff --git a/contrib/libs/tcmalloc/tcmalloc/tcmalloc_policy.h b/contrib/libs/tcmalloc/tcmalloc/tcmalloc_policy.h
index d81f8f3be0..111ac66829 100644
--- a/contrib/libs/tcmalloc/tcmalloc/tcmalloc_policy.h
+++ b/contrib/libs/tcmalloc/tcmalloc/tcmalloc_policy.h
@@ -14,7 +14,7 @@
//
// This file defines policies used when allocation memory.
//
-// An allocation policy encapsulates four policies:
+// An allocation policy encapsulates four policies:
//
// - Out of memory policy.
// Dictates how to handle OOM conditions.
@@ -43,20 +43,20 @@
// // Returns true if allocation hooks must be invoked.
// static bool invoke_hooks();
// };
-//
-// - NUMA partition policy
-// When NUMA awareness is enabled this dictates which NUMA partition we will
-// allocate memory from. Must be trivially copyable.
-//
-// struct NumaPartitionPolicyTemplate {
-// // Returns the NUMA partition to allocate from.
-// size_t partition() const;
-//
-// // Returns the NUMA partition to allocate from multiplied by
-// // kNumBaseClasses - i.e. the first size class that corresponds to the
-// // NUMA partition to allocate from.
-// size_t scaled_partition() const;
-// };
+//
+// - NUMA partition policy
+// When NUMA awareness is enabled this dictates which NUMA partition we will
+// allocate memory from. Must be trivially copyable.
+//
+// struct NumaPartitionPolicyTemplate {
+// // Returns the NUMA partition to allocate from.
+// size_t partition() const;
+//
+// // Returns the NUMA partition to allocate from multiplied by
+// // kNumBaseClasses - i.e. the first size class that corresponds to the
+// // NUMA partition to allocate from.
+// size_t scaled_partition() const;
+// };
#ifndef TCMALLOC_TCMALLOC_POLICY_H_
#define TCMALLOC_TCMALLOC_POLICY_H_
@@ -68,13 +68,13 @@
#include <cstddef>
#include "tcmalloc/internal/logging.h"
-#include "tcmalloc/internal/numa.h"
-#include "tcmalloc/internal/percpu.h"
-#include "tcmalloc/static_vars.h"
+#include "tcmalloc/internal/numa.h"
+#include "tcmalloc/internal/percpu.h"
+#include "tcmalloc/static_vars.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
// NullOomPolicy: returns nullptr
struct NullOomPolicy {
@@ -143,47 +143,47 @@ struct NoHooksPolicy {
static constexpr bool invoke_hooks() { return false; }
};
-// Use a fixed NUMA partition.
-class FixedNumaPartitionPolicy {
- public:
- explicit constexpr FixedNumaPartitionPolicy(size_t partition)
- : partition_(partition) {}
-
- size_t constexpr partition() const { return partition_; }
-
- size_t constexpr scaled_partition() const {
- return partition_ * kNumBaseClasses;
- }
-
- private:
- size_t partition_;
-};
-
-// Use the NUMA partition which the executing CPU is local to.
-struct LocalNumaPartitionPolicy {
- // Note that the partition returned may change between calls if the executing
- // thread migrates between NUMA nodes & partitions. Users of this function
- // should not rely upon multiple invocations returning the same partition.
- size_t partition() const {
- return Static::numa_topology().GetCurrentPartition();
- }
- size_t scaled_partition() const {
- return Static::numa_topology().GetCurrentScaledPartition();
- }
-};
-
+// Use a fixed NUMA partition.
+class FixedNumaPartitionPolicy {
+ public:
+ explicit constexpr FixedNumaPartitionPolicy(size_t partition)
+ : partition_(partition) {}
+
+ size_t constexpr partition() const { return partition_; }
+
+ size_t constexpr scaled_partition() const {
+ return partition_ * kNumBaseClasses;
+ }
+
+ private:
+ size_t partition_;
+};
+
+// Use the NUMA partition which the executing CPU is local to.
+struct LocalNumaPartitionPolicy {
+ // Note that the partition returned may change between calls if the executing
+ // thread migrates between NUMA nodes & partitions. Users of this function
+ // should not rely upon multiple invocations returning the same partition.
+ size_t partition() const {
+ return Static::numa_topology().GetCurrentPartition();
+ }
+ size_t scaled_partition() const {
+ return Static::numa_topology().GetCurrentScaledPartition();
+ }
+};
+
// TCMallocPolicy defines the compound policy object containing
// the OOM, alignment and hooks policies.
// Is trivially constructible, copyable and destructible.
template <typename OomPolicy = CppOomPolicy,
typename AlignPolicy = DefaultAlignPolicy,
- typename HooksPolicy = InvokeHooksPolicy,
- typename NumaPolicy = LocalNumaPartitionPolicy>
+ typename HooksPolicy = InvokeHooksPolicy,
+ typename NumaPolicy = LocalNumaPartitionPolicy>
class TCMallocPolicy {
public:
constexpr TCMallocPolicy() = default;
- explicit constexpr TCMallocPolicy(AlignPolicy align, NumaPolicy numa)
- : align_(align), numa_(numa) {}
+ explicit constexpr TCMallocPolicy(AlignPolicy align, NumaPolicy numa)
+ : align_(align), numa_(numa) {}
// OOM policy
static void* handle_oom(size_t size) { return OomPolicy::handle_oom(size); }
@@ -191,70 +191,70 @@ class TCMallocPolicy {
// Alignment policy
constexpr size_t align() const { return align_.align(); }
- // NUMA partition
- constexpr size_t numa_partition() const { return numa_.partition(); }
-
- // NUMA partition multiplied by kNumBaseClasses
- constexpr size_t scaled_numa_partition() const {
- return numa_.scaled_partition();
- }
-
+ // NUMA partition
+ constexpr size_t numa_partition() const { return numa_.partition(); }
+
+ // NUMA partition multiplied by kNumBaseClasses
+ constexpr size_t scaled_numa_partition() const {
+ return numa_.scaled_partition();
+ }
+
// Hooks policy
static constexpr bool invoke_hooks() { return HooksPolicy::invoke_hooks(); }
// Returns this policy aligned as 'align'
template <typename align_t>
- constexpr TCMallocPolicy<OomPolicy, AlignAsPolicy, HooksPolicy, NumaPolicy>
- AlignAs(
+ constexpr TCMallocPolicy<OomPolicy, AlignAsPolicy, HooksPolicy, NumaPolicy>
+ AlignAs(
align_t align) const {
- return TCMallocPolicy<OomPolicy, AlignAsPolicy, HooksPolicy, NumaPolicy>(
- AlignAsPolicy{align}, numa_);
+ return TCMallocPolicy<OomPolicy, AlignAsPolicy, HooksPolicy, NumaPolicy>(
+ AlignAsPolicy{align}, numa_);
}
// Returns this policy with a nullptr OOM policy.
- constexpr TCMallocPolicy<NullOomPolicy, AlignPolicy, HooksPolicy,
- NumaPolicy> Nothrow()
+ constexpr TCMallocPolicy<NullOomPolicy, AlignPolicy, HooksPolicy,
+ NumaPolicy> Nothrow()
const {
- return TCMallocPolicy<NullOomPolicy, AlignPolicy, HooksPolicy,
- NumaPolicy>(align_, numa_);
+ return TCMallocPolicy<NullOomPolicy, AlignPolicy, HooksPolicy,
+ NumaPolicy>(align_, numa_);
}
// Returns this policy with NewAllocHook invocations disabled.
- constexpr TCMallocPolicy<OomPolicy, AlignPolicy, NoHooksPolicy, NumaPolicy>
+ constexpr TCMallocPolicy<OomPolicy, AlignPolicy, NoHooksPolicy, NumaPolicy>
WithoutHooks()
const {
- return TCMallocPolicy<OomPolicy, AlignPolicy, NoHooksPolicy,
- NumaPolicy>(align_, numa_);
- }
-
- // Returns this policy with a fixed NUMA partition.
- constexpr TCMallocPolicy<OomPolicy, AlignPolicy, NoHooksPolicy,
- FixedNumaPartitionPolicy> InNumaPartition(size_t partition) const {
- return TCMallocPolicy<OomPolicy, AlignPolicy, NoHooksPolicy,
- FixedNumaPartitionPolicy>(
- align_, FixedNumaPartitionPolicy{partition});
- }
-
- // Returns this policy with a fixed NUMA partition matching that of the
- // previously allocated `ptr`.
- constexpr auto InSameNumaPartitionAs(void* ptr) const {
- return InNumaPartition(NumaPartitionFromPointer(ptr));
+ return TCMallocPolicy<OomPolicy, AlignPolicy, NoHooksPolicy,
+ NumaPolicy>(align_, numa_);
}
+ // Returns this policy with a fixed NUMA partition.
+ constexpr TCMallocPolicy<OomPolicy, AlignPolicy, NoHooksPolicy,
+ FixedNumaPartitionPolicy> InNumaPartition(size_t partition) const {
+ return TCMallocPolicy<OomPolicy, AlignPolicy, NoHooksPolicy,
+ FixedNumaPartitionPolicy>(
+ align_, FixedNumaPartitionPolicy{partition});
+ }
+
+ // Returns this policy with a fixed NUMA partition matching that of the
+ // previously allocated `ptr`.
+ constexpr auto InSameNumaPartitionAs(void* ptr) const {
+ return InNumaPartition(NumaPartitionFromPointer(ptr));
+ }
+
static constexpr bool can_return_nullptr() {
return OomPolicy::can_return_nullptr();
}
private:
AlignPolicy align_;
- NumaPolicy numa_;
+ NumaPolicy numa_;
};
using CppPolicy = TCMallocPolicy<CppOomPolicy, DefaultAlignPolicy>;
using MallocPolicy = TCMallocPolicy<MallocOomPolicy, MallocAlignPolicy>;
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
#endif // TCMALLOC_TCMALLOC_POLICY_H_
diff --git a/contrib/libs/tcmalloc/tcmalloc/thread_cache.cc b/contrib/libs/tcmalloc/tcmalloc/thread_cache.cc
index 89cc779af1..0469e9a49e 100644
--- a/contrib/libs/tcmalloc/tcmalloc/thread_cache.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/thread_cache.cc
@@ -20,9 +20,9 @@
#include "absl/base/macros.h"
#include "tcmalloc/transfer_cache.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
size_t ThreadCache::per_thread_cache_size_ = kMaxThreadCacheSize;
size_t ThreadCache::overall_thread_cache_size_ = kDefaultOverallThreadCacheSize;
@@ -148,14 +148,14 @@ void ThreadCache::ReleaseToCentralCache(FreeList* src, size_t cl, int N) {
src->PopBatch(batch_size, batch);
static_assert(ABSL_ARRAYSIZE(batch) >= kMaxObjectsToMove,
"not enough space in batch");
- Static::transfer_cache().InsertRange(cl,
- absl::Span<void*>(batch, batch_size));
+ Static::transfer_cache().InsertRange(cl,
+ absl::Span<void*>(batch, batch_size));
N -= batch_size;
}
src->PopBatch(N, batch);
static_assert(ABSL_ARRAYSIZE(batch) >= kMaxObjectsToMove,
"not enough space in batch");
- Static::transfer_cache().InsertRange(cl, absl::Span<void*>(batch, N));
+ Static::transfer_cache().InsertRange(cl, absl::Span<void*>(batch, N));
size_ -= delta_bytes;
}
@@ -412,6 +412,6 @@ void ThreadCache::set_overall_thread_cache_size(size_t new_size) {
RecomputePerThreadCacheSize();
}
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
diff --git a/contrib/libs/tcmalloc/tcmalloc/thread_cache.h b/contrib/libs/tcmalloc/tcmalloc/thread_cache.h
index ae6cef869f..48d89d61b6 100644
--- a/contrib/libs/tcmalloc/tcmalloc/thread_cache.h
+++ b/contrib/libs/tcmalloc/tcmalloc/thread_cache.h
@@ -32,9 +32,9 @@
#include "tcmalloc/static_vars.h"
#include "tcmalloc/tracking.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
//-------------------------------------------------------------------
// Data kept per thread
@@ -338,8 +338,8 @@ inline ThreadCache* ThreadCache::GetCache() {
return (ABSL_PREDICT_TRUE(tc != nullptr)) ? tc : CreateCacheIfNecessary();
}
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
#endif // TCMALLOC_THREAD_CACHE_H_
diff --git a/contrib/libs/tcmalloc/tcmalloc/thread_cache_test.cc b/contrib/libs/tcmalloc/tcmalloc/thread_cache_test.cc
index 5b2d10b2ac..f2231b3183 100644
--- a/contrib/libs/tcmalloc/tcmalloc/thread_cache_test.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/thread_cache_test.cc
@@ -25,7 +25,7 @@
#include "gtest/gtest.h"
#include "absl/strings/str_cat.h"
-#include "benchmark/benchmark.h"
+#include "benchmark/benchmark.h"
#include "tcmalloc/internal/logging.h"
#include "tcmalloc/internal/memory_stats.h"
#include "tcmalloc/internal/parameter_accessors.h"
diff --git a/contrib/libs/tcmalloc/tcmalloc/tracking.h b/contrib/libs/tcmalloc/tcmalloc/tracking.h
index 68d4c59b9c..43ec993b86 100644
--- a/contrib/libs/tcmalloc/tcmalloc/tracking.h
+++ b/contrib/libs/tcmalloc/tcmalloc/tracking.h
@@ -47,9 +47,9 @@
#ifndef TCMALLOC_TRACK_ALLOCS
// #define TCMALLOC_TRACK_ALLOCS
#endif
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
#if 1
#define TCMALLOC_HAVE_TRACKING 0
@@ -72,8 +72,8 @@ enum TrackingStat {
kTCInsertMiss = 7, // # of times the object list misses the transfer cache.
kTCRemoveHit = 8, // # of times object list fetching hits transfer cache.
kTCRemoveMiss = 9, // # of times object list fetching misses transfer cache.
- kTCElementsPlunder = 10, // # of elements plundered from the transfer cache.
- kNumTrackingStats = 11,
+ kTCElementsPlunder = 10, // # of elements plundered from the transfer cache.
+ kNumTrackingStats = 11,
};
namespace tracking {
@@ -83,7 +83,7 @@ void Report(TrackingStat stat, size_t cl, ssize_t count);
// Dump all tracking data to <out>. We could support various other
// mechanisms for data delivery without too much trouble...
-void Print(Printer* out);
+void Print(Printer* out);
// Call on startup during tcmalloc initialization.
void Init();
@@ -95,15 +95,15 @@ void GetProperties(std::map<std::string, MallocExtension::Property>* result);
#if !TCMALLOC_HAVE_TRACKING
// no tracking, these are all no-ops
inline void Report(TrackingStat stat, size_t cl, ssize_t count) {}
-inline void Print(Printer* out) {}
+inline void Print(Printer* out) {}
inline void Init() {}
inline void GetProperties(
std::map<std::string, MallocExtension::Property>* result) {}
#endif
} // namespace tracking
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
#endif // TCMALLOC_TRACKING_H_
diff --git a/contrib/libs/tcmalloc/tcmalloc/transfer_cache.cc b/contrib/libs/tcmalloc/tcmalloc/transfer_cache.cc
index efde485288..9138af43f8 100644
--- a/contrib/libs/tcmalloc/tcmalloc/transfer_cache.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/transfer_cache.cc
@@ -14,7 +14,7 @@
#include "tcmalloc/transfer_cache.h"
-#include <fcntl.h>
+#include <fcntl.h>
#include <string.h>
#include <algorithm>
@@ -24,128 +24,128 @@
#include "tcmalloc/common.h"
#include "tcmalloc/experiment.h"
#include "tcmalloc/guarded_page_allocator.h"
-#include "tcmalloc/internal/cache_topology.h"
-#include "tcmalloc/internal/environment.h"
+#include "tcmalloc/internal/cache_topology.h"
+#include "tcmalloc/internal/environment.h"
#include "tcmalloc/internal/linked_list.h"
#include "tcmalloc/internal/logging.h"
-#include "tcmalloc/internal/optimization.h"
-#include "tcmalloc/internal/util.h"
+#include "tcmalloc/internal/optimization.h"
+#include "tcmalloc/internal/util.h"
#include "tcmalloc/static_vars.h"
#include "tcmalloc/tracking.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
-
-absl::string_view TransferCacheImplementationToLabel(
- TransferCacheImplementation type) {
- switch (type) {
- case TransferCacheImplementation::Legacy:
- return "LEGACY";
- case TransferCacheImplementation::None:
- return "NO_TRANSFERCACHE";
- case TransferCacheImplementation::Ring:
- return "RING";
- default:
- ASSUME(false);
- }
-}
-
+namespace tcmalloc_internal {
+
+absl::string_view TransferCacheImplementationToLabel(
+ TransferCacheImplementation type) {
+ switch (type) {
+ case TransferCacheImplementation::Legacy:
+ return "LEGACY";
+ case TransferCacheImplementation::None:
+ return "NO_TRANSFERCACHE";
+ case TransferCacheImplementation::Ring:
+ return "RING";
+ default:
+ ASSUME(false);
+ }
+}
+
#ifndef TCMALLOC_SMALL_BUT_SLOW
-size_t StaticForwarder::class_to_size(int size_class) {
+size_t StaticForwarder::class_to_size(int size_class) {
return Static::sizemap().class_to_size(size_class);
}
-size_t StaticForwarder::num_objects_to_move(int size_class) {
+size_t StaticForwarder::num_objects_to_move(int size_class) {
return Static::sizemap().num_objects_to_move(size_class);
}
-void *StaticForwarder::Alloc(size_t size, int alignment) {
- return Static::arena().Alloc(size, alignment);
-}
-
-void ShardedTransferCacheManager::Init() {
- if (!IsExperimentActive(
- Experiment::TEST_ONLY_TCMALLOC_SHARDED_TRANSFER_CACHE)) {
- return;
- }
- num_shards_ = BuildCpuToL3CacheMap(l3_cache_index_);
- cache_ = reinterpret_cast<Cache *>(Static::arena().Alloc(
- sizeof(Cache) * kNumClasses * num_shards_, ABSL_CACHELINE_SIZE));
- ASSERT(cache_ != nullptr);
- for (int shard = 0; shard < num_shards_; ++shard) {
- for (int cl = 0; cl < kNumClasses; ++cl) {
- const int index = shard * kNumClasses + cl;
- const int size_per_object = Static::sizemap().class_to_size(cl);
- static constexpr int k12MB = 12 << 20;
- static constexpr int min_size = 4096;
- const int use_this_size_class = size_per_object >= min_size;
- const int capacity = use_this_size_class ? k12MB / size_per_object : 0;
- active_for_class_[cl] = use_this_size_class;
- new (&cache_[index].tc)
- TransferCache(nullptr, capacity > 0 ? cl : 0, {capacity, capacity});
- cache_[index].tc.freelist().Init(cl);
- }
- }
-}
-
-size_t ShardedTransferCacheManager::TotalBytes() {
- if (cache_ == nullptr) return 0;
- size_t out = 0;
- for (int shard = 0; shard < num_shards_; ++shard) {
- for (int cl = 0; cl < kNumClasses; ++cl) {
- const int bytes_per_entry = Static::sizemap().class_to_size(cl);
- if (bytes_per_entry <= 0) continue;
- const int index = shard * kNumClasses + cl;
- out += cache_[index].tc.tc_length() * bytes_per_entry;
- }
- }
- return out;
-}
-
-void ShardedTransferCacheManager::BackingTransferCache::InsertRange(
- absl::Span<void *> batch) const {
- Static::transfer_cache().InsertRange(size_class_, batch);
-}
-
-ABSL_MUST_USE_RESULT int
-ShardedTransferCacheManager::BackingTransferCache::RemoveRange(void **batch,
- int n) const {
- return Static::transfer_cache().RemoveRange(size_class_, batch, n);
-}
-
-TransferCacheImplementation TransferCacheManager::ChooseImplementation() {
- // Prefer ring, if we're forcing it on.
- if (IsExperimentActive(
- Experiment::TEST_ONLY_TCMALLOC_RING_BUFFER_TRANSFER_CACHE)) {
- return TransferCacheImplementation::Ring;
- }
-
- // Consider opt-outs
- const char *e = thread_safe_getenv("TCMALLOC_INTERNAL_TRANSFERCACHE_CONTROL");
- if (e) {
- if (e[0] == '0') {
- return TransferCacheImplementation::Legacy;
- }
- if (e[0] == '1') {
- return TransferCacheImplementation::Ring;
- }
- Crash(kCrash, __FILE__, __LINE__, "bad env var", e);
- }
-
- // Otherwise, default to ring.
- return TransferCacheImplementation::Ring;
+void *StaticForwarder::Alloc(size_t size, int alignment) {
+ return Static::arena().Alloc(size, alignment);
}
+void ShardedTransferCacheManager::Init() {
+ if (!IsExperimentActive(
+ Experiment::TEST_ONLY_TCMALLOC_SHARDED_TRANSFER_CACHE)) {
+ return;
+ }
+ num_shards_ = BuildCpuToL3CacheMap(l3_cache_index_);
+ cache_ = reinterpret_cast<Cache *>(Static::arena().Alloc(
+ sizeof(Cache) * kNumClasses * num_shards_, ABSL_CACHELINE_SIZE));
+ ASSERT(cache_ != nullptr);
+ for (int shard = 0; shard < num_shards_; ++shard) {
+ for (int cl = 0; cl < kNumClasses; ++cl) {
+ const int index = shard * kNumClasses + cl;
+ const int size_per_object = Static::sizemap().class_to_size(cl);
+ static constexpr int k12MB = 12 << 20;
+ static constexpr int min_size = 4096;
+ const int use_this_size_class = size_per_object >= min_size;
+ const int capacity = use_this_size_class ? k12MB / size_per_object : 0;
+ active_for_class_[cl] = use_this_size_class;
+ new (&cache_[index].tc)
+ TransferCache(nullptr, capacity > 0 ? cl : 0, {capacity, capacity});
+ cache_[index].tc.freelist().Init(cl);
+ }
+ }
+}
+
+size_t ShardedTransferCacheManager::TotalBytes() {
+ if (cache_ == nullptr) return 0;
+ size_t out = 0;
+ for (int shard = 0; shard < num_shards_; ++shard) {
+ for (int cl = 0; cl < kNumClasses; ++cl) {
+ const int bytes_per_entry = Static::sizemap().class_to_size(cl);
+ if (bytes_per_entry <= 0) continue;
+ const int index = shard * kNumClasses + cl;
+ out += cache_[index].tc.tc_length() * bytes_per_entry;
+ }
+ }
+ return out;
+}
+
+void ShardedTransferCacheManager::BackingTransferCache::InsertRange(
+ absl::Span<void *> batch) const {
+ Static::transfer_cache().InsertRange(size_class_, batch);
+}
+
+ABSL_MUST_USE_RESULT int
+ShardedTransferCacheManager::BackingTransferCache::RemoveRange(void **batch,
+ int n) const {
+ return Static::transfer_cache().RemoveRange(size_class_, batch, n);
+}
+
+TransferCacheImplementation TransferCacheManager::ChooseImplementation() {
+ // Prefer ring, if we're forcing it on.
+ if (IsExperimentActive(
+ Experiment::TEST_ONLY_TCMALLOC_RING_BUFFER_TRANSFER_CACHE)) {
+ return TransferCacheImplementation::Ring;
+ }
+
+ // Consider opt-outs
+ const char *e = thread_safe_getenv("TCMALLOC_INTERNAL_TRANSFERCACHE_CONTROL");
+ if (e) {
+ if (e[0] == '0') {
+ return TransferCacheImplementation::Legacy;
+ }
+ if (e[0] == '1') {
+ return TransferCacheImplementation::Ring;
+ }
+ Crash(kCrash, __FILE__, __LINE__, "bad env var", e);
+ }
+
+ // Otherwise, default to ring.
+ return TransferCacheImplementation::Ring;
+}
+
int TransferCacheManager::DetermineSizeClassToEvict() {
int t = next_to_evict_.load(std::memory_order_relaxed);
if (t >= kNumClasses) t = 1;
next_to_evict_.store(t + 1, std::memory_order_relaxed);
// Ask nicely first.
- if (implementation_ == TransferCacheImplementation::Ring) {
- if (cache_[t].rbtc.HasSpareCapacity(t)) return t;
+ if (implementation_ == TransferCacheImplementation::Ring) {
+ if (cache_[t].rbtc.HasSpareCapacity(t)) return t;
} else {
- if (cache_[t].tc.HasSpareCapacity(t)) return t;
+ if (cache_[t].tc.HasSpareCapacity(t)) return t;
}
// But insist on the second try.
@@ -156,7 +156,7 @@ int TransferCacheManager::DetermineSizeClassToEvict() {
}
#endif
-
-} // namespace tcmalloc_internal
+
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
diff --git a/contrib/libs/tcmalloc/tcmalloc/transfer_cache.h b/contrib/libs/tcmalloc/tcmalloc/transfer_cache.h
index 8b47eefafb..a63fb23beb 100644
--- a/contrib/libs/tcmalloc/tcmalloc/transfer_cache.h
+++ b/contrib/libs/tcmalloc/tcmalloc/transfer_cache.h
@@ -19,7 +19,7 @@
#include <stdint.h>
#include <atomic>
-#include <limits>
+#include <limits>
#include <utility>
#include "absl/base/attributes.h"
@@ -30,249 +30,249 @@
#include "absl/types/span.h"
#include "tcmalloc/central_freelist.h"
#include "tcmalloc/common.h"
-#include "tcmalloc/internal/logging.h"
+#include "tcmalloc/internal/logging.h"
#include "tcmalloc/transfer_cache_stats.h"
#ifndef TCMALLOC_SMALL_BUT_SLOW
#include "tcmalloc/transfer_cache_internals.h"
#endif
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
-
-enum class TransferCacheImplementation {
- Legacy,
- None,
- Ring,
-};
-
-absl::string_view TransferCacheImplementationToLabel(
- TransferCacheImplementation type);
-
+namespace tcmalloc_internal {
+
+enum class TransferCacheImplementation {
+ Legacy,
+ None,
+ Ring,
+};
+
+absl::string_view TransferCacheImplementationToLabel(
+ TransferCacheImplementation type);
+
#ifndef TCMALLOC_SMALL_BUT_SLOW
-class StaticForwarder {
- public:
- static size_t class_to_size(int size_class);
- static size_t num_objects_to_move(int size_class);
- static void *Alloc(size_t size, int alignment = kAlignment)
- ABSL_EXCLUSIVE_LOCKS_REQUIRED(pageheap_lock);
-};
-
-// This transfer-cache is set up to be sharded per L3 cache. It is backed by
-// the non-sharded "normal" TransferCacheManager.
-class ShardedTransferCacheManager {
- public:
- constexpr ShardedTransferCacheManager() {}
-
- void Init() ABSL_EXCLUSIVE_LOCKS_REQUIRED(pageheap_lock);
-
- bool should_use(int cl) const { return active_for_class_[cl]; }
-
- size_t TotalBytes();
-
- void *Pop(int cl) {
- void *batch[1];
- const int got = cache_[get_index(cl)].tc.RemoveRange(cl, batch, 1);
- return got == 1 ? batch[0] : nullptr;
- }
-
- void Push(int cl, void *ptr) {
- cache_[get_index(cl)].tc.InsertRange(cl, {&ptr, 1});
- }
-
- // All caches not touched since last attempt will return all objects
- // to the non-sharded TransferCache.
- void Plunder() {
- if (cache_ == nullptr || num_shards_ == 0) return;
- for (int i = 0; i < num_shards_ * kNumClasses; ++i) {
- cache_[i].tc.TryPlunder(cache_[i].tc.freelist().size_class());
- }
- }
-
- private:
- // The Manager is set up so that stealing is disabled for this TransferCache.
- class Manager : public StaticForwarder {
- public:
- static constexpr int DetermineSizeClassToEvict() { return -1; }
- static constexpr bool MakeCacheSpace(int) { return false; }
- static constexpr bool ShrinkCache(int) { return false; }
- };
-
- // Forwards calls to the unsharded TransferCache.
- class BackingTransferCache {
- public:
- void Init(int cl) { size_class_ = cl; }
- void InsertRange(absl::Span<void *> batch) const;
- ABSL_MUST_USE_RESULT int RemoveRange(void **batch, int n) const;
- int size_class() const { return size_class_; }
-
- private:
- int size_class_ = -1;
- };
-
- using TransferCache =
- internal_transfer_cache::RingBufferTransferCache<BackingTransferCache,
- Manager>;
-
- union Cache {
- constexpr Cache() : dummy(false) {}
- ~Cache() {}
- TransferCache tc;
- bool dummy;
- };
-
- int get_index(int cl) {
- const int cpu = tcmalloc::tcmalloc_internal::subtle::percpu::RseqCpuId();
- ASSERT(cpu < 256);
- ASSERT(cpu >= 0);
- return get_index(cpu, cl);
- }
-
- int get_index(int cpu, int cl) {
- const int shard = l3_cache_index_[cpu];
- ASSERT(shard < num_shards_);
- const int index = shard * kNumClasses + cl;
- ASSERT(index < num_shards_ * kNumClasses);
- return index;
- }
-
- // Mapping from cpu to the L3 cache used.
- uint8_t l3_cache_index_[CPU_SETSIZE] = {0};
-
- Cache *cache_ = nullptr;
- int num_shards_ = 0;
- bool active_for_class_[kNumClasses] = {false};
-};
-
-class TransferCacheManager : public StaticForwarder {
+class StaticForwarder {
+ public:
+ static size_t class_to_size(int size_class);
+ static size_t num_objects_to_move(int size_class);
+ static void *Alloc(size_t size, int alignment = kAlignment)
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(pageheap_lock);
+};
+
+// This transfer-cache is set up to be sharded per L3 cache. It is backed by
+// the non-sharded "normal" TransferCacheManager.
+class ShardedTransferCacheManager {
+ public:
+ constexpr ShardedTransferCacheManager() {}
+
+ void Init() ABSL_EXCLUSIVE_LOCKS_REQUIRED(pageheap_lock);
+
+ bool should_use(int cl) const { return active_for_class_[cl]; }
+
+ size_t TotalBytes();
+
+ void *Pop(int cl) {
+ void *batch[1];
+ const int got = cache_[get_index(cl)].tc.RemoveRange(cl, batch, 1);
+ return got == 1 ? batch[0] : nullptr;
+ }
+
+ void Push(int cl, void *ptr) {
+ cache_[get_index(cl)].tc.InsertRange(cl, {&ptr, 1});
+ }
+
+ // All caches not touched since last attempt will return all objects
+ // to the non-sharded TransferCache.
+ void Plunder() {
+ if (cache_ == nullptr || num_shards_ == 0) return;
+ for (int i = 0; i < num_shards_ * kNumClasses; ++i) {
+ cache_[i].tc.TryPlunder(cache_[i].tc.freelist().size_class());
+ }
+ }
+
+ private:
+ // The Manager is set up so that stealing is disabled for this TransferCache.
+ class Manager : public StaticForwarder {
+ public:
+ static constexpr int DetermineSizeClassToEvict() { return -1; }
+ static constexpr bool MakeCacheSpace(int) { return false; }
+ static constexpr bool ShrinkCache(int) { return false; }
+ };
+
+ // Forwards calls to the unsharded TransferCache.
+ class BackingTransferCache {
+ public:
+ void Init(int cl) { size_class_ = cl; }
+ void InsertRange(absl::Span<void *> batch) const;
+ ABSL_MUST_USE_RESULT int RemoveRange(void **batch, int n) const;
+ int size_class() const { return size_class_; }
+
+ private:
+ int size_class_ = -1;
+ };
+
+ using TransferCache =
+ internal_transfer_cache::RingBufferTransferCache<BackingTransferCache,
+ Manager>;
+
+ union Cache {
+ constexpr Cache() : dummy(false) {}
+ ~Cache() {}
+ TransferCache tc;
+ bool dummy;
+ };
+
+ int get_index(int cl) {
+ const int cpu = tcmalloc::tcmalloc_internal::subtle::percpu::RseqCpuId();
+ ASSERT(cpu < 256);
+ ASSERT(cpu >= 0);
+ return get_index(cpu, cl);
+ }
+
+ int get_index(int cpu, int cl) {
+ const int shard = l3_cache_index_[cpu];
+ ASSERT(shard < num_shards_);
+ const int index = shard * kNumClasses + cl;
+ ASSERT(index < num_shards_ * kNumClasses);
+ return index;
+ }
+
+ // Mapping from cpu to the L3 cache used.
+ uint8_t l3_cache_index_[CPU_SETSIZE] = {0};
+
+ Cache *cache_ = nullptr;
+ int num_shards_ = 0;
+ bool active_for_class_[kNumClasses] = {false};
+};
+
+class TransferCacheManager : public StaticForwarder {
template <typename CentralFreeList, typename Manager>
friend class internal_transfer_cache::TransferCache;
using TransferCache =
- internal_transfer_cache::TransferCache<tcmalloc_internal::CentralFreeList,
+ internal_transfer_cache::TransferCache<tcmalloc_internal::CentralFreeList,
TransferCacheManager>;
template <typename CentralFreeList, typename Manager>
- friend class internal_transfer_cache::RingBufferTransferCache;
- using RingBufferTransferCache =
- internal_transfer_cache::RingBufferTransferCache<
- tcmalloc_internal::CentralFreeList, TransferCacheManager>;
+ friend class internal_transfer_cache::RingBufferTransferCache;
+ using RingBufferTransferCache =
+ internal_transfer_cache::RingBufferTransferCache<
+ tcmalloc_internal::CentralFreeList, TransferCacheManager>;
public:
- constexpr TransferCacheManager() : next_to_evict_(1) {}
+ constexpr TransferCacheManager() : next_to_evict_(1) {}
TransferCacheManager(const TransferCacheManager &) = delete;
TransferCacheManager &operator=(const TransferCacheManager &) = delete;
void Init() ABSL_EXCLUSIVE_LOCKS_REQUIRED(pageheap_lock) {
- implementation_ = ChooseImplementation();
- for (int i = 0; i < kNumClasses; ++i) {
- if (implementation_ == TransferCacheImplementation::Ring) {
- new (&cache_[i].rbtc) RingBufferTransferCache(this, i);
- } else {
- new (&cache_[i].tc) TransferCache(this, i);
- }
- }
- }
-
- void AcquireInternalLocks() {
+ implementation_ = ChooseImplementation();
for (int i = 0; i < kNumClasses; ++i) {
- if (implementation_ == TransferCacheImplementation::Ring) {
- cache_[i].rbtc.AcquireInternalLocks();
+ if (implementation_ == TransferCacheImplementation::Ring) {
+ new (&cache_[i].rbtc) RingBufferTransferCache(this, i);
} else {
- cache_[i].tc.AcquireInternalLocks();
+ new (&cache_[i].tc) TransferCache(this, i);
}
}
}
- void ReleaseInternalLocks() {
- for (int i = 0; i < kNumClasses; ++i) {
- if (implementation_ == TransferCacheImplementation::Ring) {
- cache_[i].rbtc.ReleaseInternalLocks();
- } else {
- cache_[i].tc.ReleaseInternalLocks();
- }
- }
- }
-
- void InsertRange(int size_class, absl::Span<void *> batch) {
- if (implementation_ == TransferCacheImplementation::Ring) {
- cache_[size_class].rbtc.InsertRange(size_class, batch);
- } else {
- cache_[size_class].tc.InsertRange(size_class, batch);
- }
+ void AcquireInternalLocks() {
+ for (int i = 0; i < kNumClasses; ++i) {
+ if (implementation_ == TransferCacheImplementation::Ring) {
+ cache_[i].rbtc.AcquireInternalLocks();
+ } else {
+ cache_[i].tc.AcquireInternalLocks();
+ }
+ }
+ }
+
+ void ReleaseInternalLocks() {
+ for (int i = 0; i < kNumClasses; ++i) {
+ if (implementation_ == TransferCacheImplementation::Ring) {
+ cache_[i].rbtc.ReleaseInternalLocks();
+ } else {
+ cache_[i].tc.ReleaseInternalLocks();
+ }
+ }
+ }
+
+ void InsertRange(int size_class, absl::Span<void *> batch) {
+ if (implementation_ == TransferCacheImplementation::Ring) {
+ cache_[size_class].rbtc.InsertRange(size_class, batch);
+ } else {
+ cache_[size_class].tc.InsertRange(size_class, batch);
+ }
}
ABSL_MUST_USE_RESULT int RemoveRange(int size_class, void **batch, int n) {
- if (implementation_ == TransferCacheImplementation::Ring) {
- return cache_[size_class].rbtc.RemoveRange(size_class, batch, n);
- } else {
- return cache_[size_class].tc.RemoveRange(size_class, batch, n);
- }
+ if (implementation_ == TransferCacheImplementation::Ring) {
+ return cache_[size_class].rbtc.RemoveRange(size_class, batch, n);
+ } else {
+ return cache_[size_class].tc.RemoveRange(size_class, batch, n);
+ }
}
- // All caches which have not been modified since the last time this method has
- // been called will return all objects to the freelist.
- void Plunder() {
- for (int i = 0; i < kNumClasses; ++i) {
- if (implementation_ == TransferCacheImplementation::Ring) {
- cache_[i].rbtc.TryPlunder(i);
- } else {
- cache_[i].tc.TryPlunder(i);
- }
- }
+ // All caches which have not been modified since the last time this method has
+ // been called will return all objects to the freelist.
+ void Plunder() {
+ for (int i = 0; i < kNumClasses; ++i) {
+ if (implementation_ == TransferCacheImplementation::Ring) {
+ cache_[i].rbtc.TryPlunder(i);
+ } else {
+ cache_[i].tc.TryPlunder(i);
+ }
+ }
}
- // This is not const because the underlying ring-buffer transfer cache
- // function requires acquiring a lock.
+ // This is not const because the underlying ring-buffer transfer cache
+ // function requires acquiring a lock.
size_t tc_length(int size_class) {
- if (implementation_ == TransferCacheImplementation::Ring) {
- return cache_[size_class].rbtc.tc_length();
- } else {
- return cache_[size_class].tc.tc_length();
- }
+ if (implementation_ == TransferCacheImplementation::Ring) {
+ return cache_[size_class].rbtc.tc_length();
+ } else {
+ return cache_[size_class].tc.tc_length();
+ }
}
- TransferCacheStats GetHitRateStats(int size_class) const {
- if (implementation_ == TransferCacheImplementation::Ring) {
- return cache_[size_class].rbtc.GetHitRateStats();
- } else {
- return cache_[size_class].tc.GetHitRateStats();
- }
+ TransferCacheStats GetHitRateStats(int size_class) const {
+ if (implementation_ == TransferCacheImplementation::Ring) {
+ return cache_[size_class].rbtc.GetHitRateStats();
+ } else {
+ return cache_[size_class].tc.GetHitRateStats();
+ }
}
- const CentralFreeList &central_freelist(int size_class) const {
- if (implementation_ == TransferCacheImplementation::Ring) {
- return cache_[size_class].rbtc.freelist();
- } else {
- return cache_[size_class].tc.freelist();
- }
+ const CentralFreeList &central_freelist(int size_class) const {
+ if (implementation_ == TransferCacheImplementation::Ring) {
+ return cache_[size_class].rbtc.freelist();
+ } else {
+ return cache_[size_class].tc.freelist();
+ }
}
- TransferCacheImplementation implementation() const { return implementation_; }
-
+ TransferCacheImplementation implementation() const { return implementation_; }
+
private:
- static TransferCacheImplementation ChooseImplementation();
-
+ static TransferCacheImplementation ChooseImplementation();
+
int DetermineSizeClassToEvict();
bool ShrinkCache(int size_class) {
- if (implementation_ == TransferCacheImplementation::Ring) {
- return cache_[size_class].rbtc.ShrinkCache(size_class);
- } else {
- return cache_[size_class].tc.ShrinkCache(size_class);
- }
+ if (implementation_ == TransferCacheImplementation::Ring) {
+ return cache_[size_class].rbtc.ShrinkCache(size_class);
+ } else {
+ return cache_[size_class].tc.ShrinkCache(size_class);
+ }
}
- TransferCacheImplementation implementation_ =
- TransferCacheImplementation::Legacy;
+ TransferCacheImplementation implementation_ =
+ TransferCacheImplementation::Legacy;
std::atomic<int32_t> next_to_evict_;
union Cache {
constexpr Cache() : dummy(false) {}
~Cache() {}
- TransferCache tc;
- RingBufferTransferCache rbtc;
+ TransferCache tc;
+ RingBufferTransferCache rbtc;
bool dummy;
};
Cache cache_[kNumClasses];
@@ -293,49 +293,49 @@ class TransferCacheManager {
}
}
- void InsertRange(int size_class, absl::Span<void *> batch) {
- freelist_[size_class].InsertRange(batch);
+ void InsertRange(int size_class, absl::Span<void *> batch) {
+ freelist_[size_class].InsertRange(batch);
}
ABSL_MUST_USE_RESULT int RemoveRange(int size_class, void **batch, int n) {
return freelist_[size_class].RemoveRange(batch, n);
}
- static constexpr size_t tc_length(int size_class) { return 0; }
-
- static constexpr TransferCacheStats GetHitRateStats(int size_class) {
- return {0, 0, 0, 0};
- }
+ static constexpr size_t tc_length(int size_class) { return 0; }
- const CentralFreeList &central_freelist(int size_class) const {
- return freelist_[size_class];
+ static constexpr TransferCacheStats GetHitRateStats(int size_class) {
+ return {0, 0, 0, 0};
}
- TransferCacheImplementation implementation() const {
- return TransferCacheImplementation::None;
+ const CentralFreeList &central_freelist(int size_class) const {
+ return freelist_[size_class];
}
- void AcquireInternalLocks() {}
- void ReleaseInternalLocks() {}
-
+ TransferCacheImplementation implementation() const {
+ return TransferCacheImplementation::None;
+ }
+
+ void AcquireInternalLocks() {}
+ void ReleaseInternalLocks() {}
+
private:
CentralFreeList freelist_[kNumClasses];
} ABSL_CACHELINE_ALIGNED;
-// A trivial no-op implementation.
-struct ShardedTransferCacheManager {
- static constexpr void Init() {}
- static constexpr bool should_use(int cl) { return false; }
- static constexpr void *Pop(int cl) { return nullptr; }
- static constexpr void Push(int cl, void *ptr) {}
- static constexpr size_t TotalBytes() { return 0; }
- static constexpr void Plunder() {}
-};
-
+// A trivial no-op implementation.
+struct ShardedTransferCacheManager {
+ static constexpr void Init() {}
+ static constexpr bool should_use(int cl) { return false; }
+ static constexpr void *Pop(int cl) { return nullptr; }
+ static constexpr void Push(int cl, void *ptr) {}
+ static constexpr size_t TotalBytes() { return 0; }
+ static constexpr void Plunder() {}
+};
+
#endif
-
-} // namespace tcmalloc_internal
+
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
#endif // TCMALLOC_TRANSFER_CACHE_H_
diff --git a/contrib/libs/tcmalloc/tcmalloc/transfer_cache_benchmark.cc b/contrib/libs/tcmalloc/tcmalloc/transfer_cache_benchmark.cc
index 70b1dcffc1..365366cca8 100644
--- a/contrib/libs/tcmalloc/tcmalloc/transfer_cache_benchmark.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/transfer_cache_benchmark.cc
@@ -23,18 +23,18 @@
#include "tcmalloc/transfer_cache_internals.h"
#include "tcmalloc/transfer_cache_stats.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
namespace {
using TransferCacheEnv =
FakeTransferCacheEnvironment<internal_transfer_cache::TransferCache<
MinimalFakeCentralFreeList, FakeTransferCacheManager>>;
-using RingBufferTransferCacheEnv = FakeTransferCacheEnvironment<
- internal_transfer_cache::RingBufferTransferCache<MinimalFakeCentralFreeList,
- FakeTransferCacheManager>>;
-static constexpr int kSizeClass = 0;
+using RingBufferTransferCacheEnv = FakeTransferCacheEnvironment<
+ internal_transfer_cache::RingBufferTransferCache<MinimalFakeCentralFreeList,
+ FakeTransferCacheManager>>;
+static constexpr int kSizeClass = 0;
template <typename Env>
void BM_CrossThread(benchmark::State& state) {
@@ -44,21 +44,21 @@ void BM_CrossThread(benchmark::State& state) {
void* batch[kMaxObjectsToMove];
struct CrossThreadState {
- CrossThreadState() : m{}, c{Cache(&m, 1), Cache(&m, 1)} {}
- FakeTransferCacheManager m;
+ CrossThreadState() : m{}, c{Cache(&m, 1), Cache(&m, 1)} {}
+ FakeTransferCacheManager m;
Cache c[2];
};
static CrossThreadState* s = nullptr;
if (state.thread_index == 0) {
s = new CrossThreadState();
- for (int i = 0; i < ::tcmalloc::tcmalloc_internal::internal_transfer_cache::
- kInitialCapacityInBatches /
- 2;
- ++i) {
+ for (int i = 0; i < ::tcmalloc::tcmalloc_internal::internal_transfer_cache::
+ kInitialCapacityInBatches /
+ 2;
+ ++i) {
for (Cache& c : s->c) {
c.freelist().AllocateBatch(batch, kBatchSize);
- c.InsertRange(kSizeClass, {batch, kBatchSize});
+ c.InsertRange(kSizeClass, {batch, kBatchSize});
}
}
}
@@ -67,9 +67,9 @@ void BM_CrossThread(benchmark::State& state) {
int dst = (src + 1) % 2;
for (auto iter : state) {
benchmark::DoNotOptimize(batch);
- (void)s->c[src].RemoveRange(kSizeClass, batch, kBatchSize);
+ (void)s->c[src].RemoveRange(kSizeClass, batch, kBatchSize);
benchmark::DoNotOptimize(batch);
- s->c[dst].InsertRange(kSizeClass, {batch, kBatchSize});
+ s->c[dst].InsertRange(kSizeClass, {batch, kBatchSize});
benchmark::DoNotOptimize(batch);
}
if (state.thread_index == 0) {
@@ -110,7 +110,7 @@ void BM_InsertRange(benchmark::State& state) {
benchmark::DoNotOptimize(batch);
state.ResumeTiming();
- e->transfer_cache().InsertRange(kSizeClass, {batch, kBatchSize});
+ e->transfer_cache().InsertRange(kSizeClass, {batch, kBatchSize});
}
}
@@ -130,20 +130,20 @@ void BM_RemoveRange(benchmark::State& state) {
benchmark::DoNotOptimize(e);
state.ResumeTiming();
- (void)e->transfer_cache().RemoveRange(kSizeClass, batch, kBatchSize);
+ (void)e->transfer_cache().RemoveRange(kSizeClass, batch, kBatchSize);
benchmark::DoNotOptimize(batch);
}
}
BENCHMARK_TEMPLATE(BM_CrossThread, TransferCacheEnv)->ThreadRange(2, 64);
-BENCHMARK_TEMPLATE(BM_CrossThread, RingBufferTransferCacheEnv)
- ->ThreadRange(2, 64);
+BENCHMARK_TEMPLATE(BM_CrossThread, RingBufferTransferCacheEnv)
+ ->ThreadRange(2, 64);
BENCHMARK_TEMPLATE(BM_InsertRange, TransferCacheEnv);
-BENCHMARK_TEMPLATE(BM_InsertRange, RingBufferTransferCacheEnv);
+BENCHMARK_TEMPLATE(BM_InsertRange, RingBufferTransferCacheEnv);
BENCHMARK_TEMPLATE(BM_RemoveRange, TransferCacheEnv);
-BENCHMARK_TEMPLATE(BM_RemoveRange, RingBufferTransferCacheEnv);
+BENCHMARK_TEMPLATE(BM_RemoveRange, RingBufferTransferCacheEnv);
} // namespace
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
diff --git a/contrib/libs/tcmalloc/tcmalloc/transfer_cache_fuzz.cc b/contrib/libs/tcmalloc/tcmalloc/transfer_cache_fuzz.cc
index a31b06135e..5b5364ccb1 100644
--- a/contrib/libs/tcmalloc/tcmalloc/transfer_cache_fuzz.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/transfer_cache_fuzz.cc
@@ -19,26 +19,26 @@
#include "tcmalloc/mock_transfer_cache.h"
#include "tcmalloc/transfer_cache_internals.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
namespace {
-using TransferCache = tcmalloc_internal::internal_transfer_cache::TransferCache<
- tcmalloc_internal::MockCentralFreeList,
- tcmalloc_internal::MockTransferCacheManager>;
-using TransferCacheEnv =
- tcmalloc_internal::FakeTransferCacheEnvironment<TransferCache>;
+using TransferCache = tcmalloc_internal::internal_transfer_cache::TransferCache<
+ tcmalloc_internal::MockCentralFreeList,
+ tcmalloc_internal::MockTransferCacheManager>;
+using TransferCacheEnv =
+ tcmalloc_internal::FakeTransferCacheEnvironment<TransferCache>;
-using RingBufferTransferCache =
- tcmalloc_internal::internal_transfer_cache::RingBufferTransferCache<
- tcmalloc_internal::MockCentralFreeList,
- tcmalloc_internal::MockTransferCacheManager>;
-using RingBufferTransferCacheEnv =
- tcmalloc_internal::FakeTransferCacheEnvironment<RingBufferTransferCache>;
+using RingBufferTransferCache =
+ tcmalloc_internal::internal_transfer_cache::RingBufferTransferCache<
+ tcmalloc_internal::MockCentralFreeList,
+ tcmalloc_internal::MockTransferCacheManager>;
+using RingBufferTransferCacheEnv =
+ tcmalloc_internal::FakeTransferCacheEnvironment<RingBufferTransferCache>;
-template <typename Env>
-int RunFuzzer(const uint8_t *data, size_t size) {
- Env env;
+template <typename Env>
+int RunFuzzer(const uint8_t *data, size_t size) {
+ Env env;
for (int i = 0; i < size; ++i) {
switch (data[i] % 10) {
case 0:
@@ -61,13 +61,13 @@ int RunFuzzer(const uint8_t *data, size_t size) {
}
return 0;
}
-
-} // namespace
-} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
-
-extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
- tcmalloc::RunFuzzer<tcmalloc::TransferCacheEnv>(data, size);
- tcmalloc::RunFuzzer<tcmalloc::RingBufferTransferCacheEnv>(data, size);
- return 0;
-}
+
+} // namespace
+} // namespace tcmalloc
+GOOGLE_MALLOC_SECTION_END
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
+ tcmalloc::RunFuzzer<tcmalloc::TransferCacheEnv>(data, size);
+ tcmalloc::RunFuzzer<tcmalloc::RingBufferTransferCacheEnv>(data, size);
+ return 0;
+}
diff --git a/contrib/libs/tcmalloc/tcmalloc/transfer_cache_internals.h b/contrib/libs/tcmalloc/tcmalloc/transfer_cache_internals.h
index 26d18fd99d..41b017e4ed 100644
--- a/contrib/libs/tcmalloc/tcmalloc/transfer_cache_internals.h
+++ b/contrib/libs/tcmalloc/tcmalloc/transfer_cache_internals.h
@@ -19,12 +19,12 @@
#include <stddef.h>
#include <stdint.h>
-#include <cmath>
+#include <cmath>
#include <limits>
-#include "absl/numeric/bits.h"
-#include "tcmalloc/internal/config.h"
-
+#include "absl/numeric/bits.h"
+#include "tcmalloc/internal/config.h"
+
#ifdef __x86_64__
#include <emmintrin.h>
#include <xmmintrin.h>
@@ -48,20 +48,20 @@
#include "tcmalloc/central_freelist.h"
#include "tcmalloc/common.h"
#include "tcmalloc/experiment.h"
-#include "tcmalloc/internal/atomic_stats_counter.h"
+#include "tcmalloc/internal/atomic_stats_counter.h"
#include "tcmalloc/internal/logging.h"
#include "tcmalloc/tracking.h"
#include "tcmalloc/transfer_cache_stats.h"
-GOOGLE_MALLOC_SECTION_BEGIN
-namespace tcmalloc::tcmalloc_internal::internal_transfer_cache {
+GOOGLE_MALLOC_SECTION_BEGIN
+namespace tcmalloc::tcmalloc_internal::internal_transfer_cache {
struct alignas(8) SizeInfo {
int32_t used;
int32_t capacity;
};
-static constexpr int kMaxCapacityInBatches = 64;
-static constexpr int kInitialCapacityInBatches = 16;
+static constexpr int kMaxCapacityInBatches = 64;
+static constexpr int kInitialCapacityInBatches = 16;
// TransferCache is used to cache transfers of
// sizemap.num_objects_to_move(size_class) back and forth between
@@ -72,198 +72,198 @@ class TransferCache {
using Manager = TransferCacheManager;
using FreeList = CentralFreeList;
- TransferCache(Manager *owner, int cl)
- : TransferCache(owner, cl, CapacityNeeded(cl)) {}
-
- struct Capacity {
- int capacity;
- int max_capacity;
- };
+ TransferCache(Manager *owner, int cl)
+ : TransferCache(owner, cl, CapacityNeeded(cl)) {}
- TransferCache(Manager *owner, int cl, Capacity capacity)
+ struct Capacity {
+ int capacity;
+ int max_capacity;
+ };
+
+ TransferCache(Manager *owner, int cl, Capacity capacity)
: owner_(owner),
lock_(absl::kConstInit, absl::base_internal::SCHEDULE_KERNEL_ONLY),
- max_capacity_(capacity.max_capacity),
- slot_info_(SizeInfo({0, capacity.capacity})),
- low_water_mark_(std::numeric_limits<int>::max()),
+ max_capacity_(capacity.max_capacity),
+ slot_info_(SizeInfo({0, capacity.capacity})),
+ low_water_mark_(std::numeric_limits<int>::max()),
slots_(nullptr),
- freelist_do_not_access_directly_() {
- freelist().Init(cl);
- slots_ = max_capacity_ != 0 ? reinterpret_cast<void **>(owner_->Alloc(
- max_capacity_ * sizeof(void *)))
- : nullptr;
- }
+ freelist_do_not_access_directly_() {
+ freelist().Init(cl);
+ slots_ = max_capacity_ != 0 ? reinterpret_cast<void **>(owner_->Alloc(
+ max_capacity_ * sizeof(void *)))
+ : nullptr;
+ }
TransferCache(const TransferCache &) = delete;
TransferCache &operator=(const TransferCache &) = delete;
- // Compute initial and max capacity that we should configure this cache for.
- static Capacity CapacityNeeded(size_t cl) {
+ // Compute initial and max capacity that we should configure this cache for.
+ static Capacity CapacityNeeded(size_t cl) {
// We need at least 2 slots to store list head and tail.
- static_assert(kMinObjectsToMove >= 2);
-
- const size_t bytes = Manager::class_to_size(cl);
- if (cl <= 0 || bytes <= 0) return {0, 0};
-
- // Limit the maximum size of the cache based on the size class. If this
- // is not done, large size class objects will consume a lot of memory if
- // they just sit in the transfer cache.
- const size_t objs_to_move = Manager::num_objects_to_move(cl);
- ASSERT(objs_to_move > 0);
-
- // Starting point for the maximum number of entries in the transfer cache.
- // This actual maximum for a given size class may be lower than this
- // maximum value.
- int max_capacity = kMaxCapacityInBatches * objs_to_move;
- // A transfer cache freelist can have anywhere from 0 to
- // max_capacity_ slots to put link list chains into.
- int capacity = kInitialCapacityInBatches * objs_to_move;
-
- // Limit each size class cache to at most 1MB of objects or one entry,
- // whichever is greater. Total transfer cache memory used across all
- // size classes then can't be greater than approximately
- // 1MB * kMaxNumTransferEntries.
- max_capacity = std::min<int>(
- max_capacity,
- std::max<int>(objs_to_move,
- (1024 * 1024) / (bytes * objs_to_move) * objs_to_move));
- capacity = std::min(capacity, max_capacity);
-
- return {capacity, max_capacity};
- }
-
- // This transfercache implementation does not deal well with non-batch sized
- // inserts and removes.
- static constexpr bool IsFlexible() { return false; }
-
+ static_assert(kMinObjectsToMove >= 2);
+
+ const size_t bytes = Manager::class_to_size(cl);
+ if (cl <= 0 || bytes <= 0) return {0, 0};
+
+ // Limit the maximum size of the cache based on the size class. If this
+ // is not done, large size class objects will consume a lot of memory if
+ // they just sit in the transfer cache.
+ const size_t objs_to_move = Manager::num_objects_to_move(cl);
+ ASSERT(objs_to_move > 0);
+
+ // Starting point for the maximum number of entries in the transfer cache.
+ // This actual maximum for a given size class may be lower than this
+ // maximum value.
+ int max_capacity = kMaxCapacityInBatches * objs_to_move;
+ // A transfer cache freelist can have anywhere from 0 to
+ // max_capacity_ slots to put link list chains into.
+ int capacity = kInitialCapacityInBatches * objs_to_move;
+
+ // Limit each size class cache to at most 1MB of objects or one entry,
+ // whichever is greater. Total transfer cache memory used across all
+ // size classes then can't be greater than approximately
+ // 1MB * kMaxNumTransferEntries.
+ max_capacity = std::min<int>(
+ max_capacity,
+ std::max<int>(objs_to_move,
+ (1024 * 1024) / (bytes * objs_to_move) * objs_to_move));
+ capacity = std::min(capacity, max_capacity);
+
+ return {capacity, max_capacity};
+ }
+
+ // This transfercache implementation does not deal well with non-batch sized
+ // inserts and removes.
+ static constexpr bool IsFlexible() { return false; }
+
// These methods all do internal locking.
// Insert the specified batch into the transfer cache. N is the number of
// elements in the range. RemoveRange() is the opposite operation.
- void InsertRange(int size_class, absl::Span<void *> batch)
- ABSL_LOCKS_EXCLUDED(lock_) {
- const int N = batch.size();
- const int B = Manager::num_objects_to_move(size_class);
+ void InsertRange(int size_class, absl::Span<void *> batch)
+ ABSL_LOCKS_EXCLUDED(lock_) {
+ const int N = batch.size();
+ const int B = Manager::num_objects_to_move(size_class);
ASSERT(0 < N && N <= B);
auto info = slot_info_.load(std::memory_order_relaxed);
- if (N == B) {
- if (info.used + N <= max_capacity_) {
- absl::base_internal::SpinLockHolder h(&lock_);
- if (MakeCacheSpace(size_class, N)) {
- // MakeCacheSpace can drop the lock, so refetch
- info = slot_info_.load(std::memory_order_relaxed);
- info.used += N;
- SetSlotInfo(info);
-
- void **entry = GetSlot(info.used - N);
- memcpy(entry, batch.data(), sizeof(void *) * N);
- tracking::Report(kTCInsertHit, size_class, 1);
- insert_hits_.LossyAdd(1);
- return;
- }
+ if (N == B) {
+ if (info.used + N <= max_capacity_) {
+ absl::base_internal::SpinLockHolder h(&lock_);
+ if (MakeCacheSpace(size_class, N)) {
+ // MakeCacheSpace can drop the lock, so refetch
+ info = slot_info_.load(std::memory_order_relaxed);
+ info.used += N;
+ SetSlotInfo(info);
+
+ void **entry = GetSlot(info.used - N);
+ memcpy(entry, batch.data(), sizeof(void *) * N);
+ tracking::Report(kTCInsertHit, size_class, 1);
+ insert_hits_.LossyAdd(1);
+ return;
+ }
}
- insert_misses_.Add(1);
- } else {
- insert_non_batch_misses_.Add(1);
+ insert_misses_.Add(1);
+ } else {
+ insert_non_batch_misses_.Add(1);
}
-
- tracking::Report(kTCInsertMiss, size_class, 1);
- freelist().InsertRange(batch);
+
+ tracking::Report(kTCInsertMiss, size_class, 1);
+ freelist().InsertRange(batch);
}
// Returns the actual number of fetched elements and stores elements in the
// batch.
- ABSL_MUST_USE_RESULT int RemoveRange(int size_class, void **batch, int N)
+ ABSL_MUST_USE_RESULT int RemoveRange(int size_class, void **batch, int N)
ABSL_LOCKS_EXCLUDED(lock_) {
ASSERT(N > 0);
- const int B = Manager::num_objects_to_move(size_class);
+ const int B = Manager::num_objects_to_move(size_class);
auto info = slot_info_.load(std::memory_order_relaxed);
- if (N == B) {
+ if (N == B) {
if (info.used >= N) {
- absl::base_internal::SpinLockHolder h(&lock_);
- // Refetch with the lock
- info = slot_info_.load(std::memory_order_relaxed);
- if (info.used >= N) {
- info.used -= N;
- SetSlotInfo(info);
- void **entry = GetSlot(info.used);
- memcpy(batch, entry, sizeof(void *) * N);
- tracking::Report(kTCRemoveHit, size_class, 1);
- remove_hits_.LossyAdd(1);
- low_water_mark_.store(
- std::min(low_water_mark_.load(std::memory_order_acquire),
- info.used),
- std::memory_order_release);
- return N;
- }
+ absl::base_internal::SpinLockHolder h(&lock_);
+ // Refetch with the lock
+ info = slot_info_.load(std::memory_order_relaxed);
+ if (info.used >= N) {
+ info.used -= N;
+ SetSlotInfo(info);
+ void **entry = GetSlot(info.used);
+ memcpy(batch, entry, sizeof(void *) * N);
+ tracking::Report(kTCRemoveHit, size_class, 1);
+ remove_hits_.LossyAdd(1);
+ low_water_mark_.store(
+ std::min(low_water_mark_.load(std::memory_order_acquire),
+ info.used),
+ std::memory_order_release);
+ return N;
+ }
}
- remove_misses_.Add(1);
- } else {
- remove_non_batch_misses_.Add(1);
- }
- low_water_mark_.store(0, std::memory_order_release);
-
- tracking::Report(kTCRemoveMiss, size_class, 1);
- return freelist().RemoveRange(batch, N);
- }
-
- // If this object has not been touched since the last attempt, then
- // return all objects to 'freelist()'.
- void TryPlunder(int size_class) ABSL_LOCKS_EXCLUDED(lock_) {
- if (max_capacity_ == 0) return;
- int low_water_mark = low_water_mark_.load(std::memory_order_acquire);
- low_water_mark_.store(std::numeric_limits<int>::max(),
- std::memory_order_release);
- while (low_water_mark > 0) {
- if (!lock_.TryLock()) return;
- if (low_water_mark_.load(std::memory_order_acquire) !=
- std::numeric_limits<int>::max()) {
- lock_.Unlock();
- return;
- }
- const int B = Manager::num_objects_to_move(size_class);
- SizeInfo info = GetSlotInfo();
- if (info.used == 0) {
- lock_.Unlock();
- return;
- }
- const size_t num_to_move = std::min(B, info.used);
- void *buf[kMaxObjectsToMove];
- void **const entry = GetSlot(info.used - B);
- memcpy(buf, entry, sizeof(void *) * B);
- info.used -= num_to_move;
- low_water_mark -= num_to_move;
+ remove_misses_.Add(1);
+ } else {
+ remove_non_batch_misses_.Add(1);
+ }
+ low_water_mark_.store(0, std::memory_order_release);
+
+ tracking::Report(kTCRemoveMiss, size_class, 1);
+ return freelist().RemoveRange(batch, N);
+ }
+
+ // If this object has not been touched since the last attempt, then
+ // return all objects to 'freelist()'.
+ void TryPlunder(int size_class) ABSL_LOCKS_EXCLUDED(lock_) {
+ if (max_capacity_ == 0) return;
+ int low_water_mark = low_water_mark_.load(std::memory_order_acquire);
+ low_water_mark_.store(std::numeric_limits<int>::max(),
+ std::memory_order_release);
+ while (low_water_mark > 0) {
+ if (!lock_.TryLock()) return;
+ if (low_water_mark_.load(std::memory_order_acquire) !=
+ std::numeric_limits<int>::max()) {
+ lock_.Unlock();
+ return;
+ }
+ const int B = Manager::num_objects_to_move(size_class);
+ SizeInfo info = GetSlotInfo();
+ if (info.used == 0) {
+ lock_.Unlock();
+ return;
+ }
+ const size_t num_to_move = std::min(B, info.used);
+ void *buf[kMaxObjectsToMove];
+ void **const entry = GetSlot(info.used - B);
+ memcpy(buf, entry, sizeof(void *) * B);
+ info.used -= num_to_move;
+ low_water_mark -= num_to_move;
SetSlotInfo(info);
- lock_.Unlock();
- tracking::Report(kTCElementsPlunder, size_class, num_to_move);
- freelist().InsertRange({buf, num_to_move});
+ lock_.Unlock();
+ tracking::Report(kTCElementsPlunder, size_class, num_to_move);
+ freelist().InsertRange({buf, num_to_move});
}
}
// Returns the number of free objects in the transfer cache.
- size_t tc_length() const {
+ size_t tc_length() const {
return static_cast<size_t>(slot_info_.load(std::memory_order_relaxed).used);
}
// Returns the number of transfer cache insert/remove hits/misses.
- TransferCacheStats GetHitRateStats() const ABSL_LOCKS_EXCLUDED(lock_) {
+ TransferCacheStats GetHitRateStats() const ABSL_LOCKS_EXCLUDED(lock_) {
TransferCacheStats stats;
-
- stats.insert_hits = insert_hits_.value();
- stats.remove_hits = remove_hits_.value();
- stats.insert_misses = insert_misses_.value();
- stats.insert_non_batch_misses = insert_non_batch_misses_.value();
- stats.remove_misses = remove_misses_.value();
- stats.remove_non_batch_misses = remove_non_batch_misses_.value();
-
- // For performance reasons, we only update a single atomic as part of the
- // actual allocation operation. For reporting, we keep reporting all
- // misses together and separately break-out how many of those misses were
- // non-batch sized.
- stats.insert_misses += stats.insert_non_batch_misses;
- stats.remove_misses += stats.remove_non_batch_misses;
-
+
+ stats.insert_hits = insert_hits_.value();
+ stats.remove_hits = remove_hits_.value();
+ stats.insert_misses = insert_misses_.value();
+ stats.insert_non_batch_misses = insert_non_batch_misses_.value();
+ stats.remove_misses = remove_misses_.value();
+ stats.remove_non_batch_misses = remove_non_batch_misses_.value();
+
+ // For performance reasons, we only update a single atomic as part of the
+ // actual allocation operation. For reporting, we keep reporting all
+ // misses together and separately break-out how many of those misses were
+ // non-batch sized.
+ stats.insert_misses += stats.insert_non_batch_misses;
+ stats.remove_misses += stats.remove_non_batch_misses;
+
return stats;
}
@@ -272,11 +272,11 @@ class TransferCache {
}
// REQUIRES: lock is held.
- // Tries to make room for N elements. If the cache is full it will try to
- // expand it at the cost of some other cache size. Return false if there is
- // no space.
- bool MakeCacheSpace(int size_class, int N)
- ABSL_EXCLUSIVE_LOCKS_REQUIRED(lock_) {
+ // Tries to make room for N elements. If the cache is full it will try to
+ // expand it at the cost of some other cache size. Return false if there is
+ // no space.
+ bool MakeCacheSpace(int size_class, int N)
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(lock_) {
auto info = slot_info_.load(std::memory_order_relaxed);
// Is there room in the cache?
if (info.used + N <= info.capacity) return true;
@@ -284,7 +284,7 @@ class TransferCache {
if (info.capacity + N > max_capacity_) return false;
int to_evict = owner_->DetermineSizeClassToEvict();
- if (to_evict == size_class) return false;
+ if (to_evict == size_class) return false;
// Release the held lock before the other instance tries to grab its lock.
lock_.Unlock();
@@ -304,24 +304,24 @@ class TransferCache {
return true;
}
- bool HasSpareCapacity(int size_class) const {
- int n = Manager::num_objects_to_move(size_class);
+ bool HasSpareCapacity(int size_class) const {
+ int n = Manager::num_objects_to_move(size_class);
auto info = GetSlotInfo();
return info.capacity - info.used >= n;
}
// Takes lock_ and invokes MakeCacheSpace() on this cache. Returns true if it
- // succeeded at growing the cache by a batch size.
- bool GrowCache(int size_class) ABSL_LOCKS_EXCLUDED(lock_) {
+ // succeeded at growing the cache by a batch size.
+ bool GrowCache(int size_class) ABSL_LOCKS_EXCLUDED(lock_) {
absl::base_internal::SpinLockHolder h(&lock_);
- return MakeCacheSpace(size_class, Manager::num_objects_to_move(size_class));
+ return MakeCacheSpace(size_class, Manager::num_objects_to_move(size_class));
}
// REQUIRES: lock_ is *not* held.
// Tries to shrink the Cache. Return false if it failed to shrink the cache.
// Decreases cache_slots_ on success.
- bool ShrinkCache(int size_class) ABSL_LOCKS_EXCLUDED(lock_) {
- int N = Manager::num_objects_to_move(size_class);
+ bool ShrinkCache(int size_class) ABSL_LOCKS_EXCLUDED(lock_) {
+ int N = Manager::num_objects_to_move(size_class);
void *to_free[kMaxObjectsToMove];
int num_to_free;
@@ -329,7 +329,7 @@ class TransferCache {
absl::base_internal::SpinLockHolder h(&lock_);
auto info = slot_info_.load(std::memory_order_relaxed);
if (info.capacity == 0) return false;
- if (info.capacity < N) return false;
+ if (info.capacity < N) return false;
N = std::min(N, info.capacity);
int unused = info.capacity - info.used;
@@ -350,7 +350,7 @@ class TransferCache {
}
// Access the freelist without holding the lock.
- freelist().InsertRange({to_free, static_cast<uint64_t>(num_to_free)});
+ freelist().InsertRange({to_free, static_cast<uint64_t>(num_to_free)});
return true;
}
@@ -366,18 +366,18 @@ class TransferCache {
return freelist_do_not_access_directly_;
}
- void AcquireInternalLocks()
- {
- freelist().AcquireInternalLocks();
- lock_.Lock();
- }
-
- void ReleaseInternalLocks()
- {
- lock_.Unlock();
- freelist().ReleaseInternalLocks();
- }
-
+ void AcquireInternalLocks()
+ {
+ freelist().AcquireInternalLocks();
+ lock_.Lock();
+ }
+
+ void ReleaseInternalLocks()
+ {
+ lock_.Unlock();
+ freelist().ReleaseInternalLocks();
+ }
+
private:
// Returns first object of the i-th slot.
void **GetSlot(size_t i) ABSL_EXCLUSIVE_LOCKS_REQUIRED(lock_) {
@@ -397,30 +397,30 @@ class TransferCache {
// may be looked at without holding the lock.
absl::base_internal::SpinLock lock_;
- // Maximum size of the cache.
- const int32_t max_capacity_;
-
- // insert_hits_ and remove_hits_ are logically guarded by lock_ for mutations
- // and use LossyAdd, but the thread annotations cannot indicate that we do not
- // need a lock for reads.
- StatsCounter insert_hits_;
- StatsCounter remove_hits_;
- // Miss counters do not hold lock_, so they use Add.
- StatsCounter insert_misses_;
- StatsCounter insert_non_batch_misses_;
- StatsCounter remove_misses_;
- StatsCounter remove_non_batch_misses_;
-
- // Number of currently used and available cached entries in slots_. This
+ // Maximum size of the cache.
+ const int32_t max_capacity_;
+
+ // insert_hits_ and remove_hits_ are logically guarded by lock_ for mutations
+ // and use LossyAdd, but the thread annotations cannot indicate that we do not
+ // need a lock for reads.
+ StatsCounter insert_hits_;
+ StatsCounter remove_hits_;
+ // Miss counters do not hold lock_, so they use Add.
+ StatsCounter insert_misses_;
+ StatsCounter insert_non_batch_misses_;
+ StatsCounter remove_misses_;
+ StatsCounter remove_non_batch_misses_;
+
+ // Number of currently used and available cached entries in slots_. This
// variable is updated under a lock but can be read without one.
// INVARIANT: [0 <= slot_info_.used <= slot_info.capacity <= max_cache_slots_]
std::atomic<SizeInfo> slot_info_;
- // Lowest value of "slot_info_.used" since last call to TryPlunder. All
- // elements not used for a full cycle (2 seconds) are unlikely to get used
- // again.
- std::atomic<int> low_water_mark_;
-
+ // Lowest value of "slot_info_.used" since last call to TryPlunder. All
+ // elements not used for a full cycle (2 seconds) are unlikely to get used
+ // again.
+ std::atomic<int> low_water_mark_;
+
// Pointer to array of free objects. Use GetSlot() to get pointers to
// entries.
void **slots_ ABSL_GUARDED_BY(lock_);
@@ -428,469 +428,469 @@ class TransferCache {
FreeList freelist_do_not_access_directly_;
} ABSL_CACHELINE_ALIGNED;
-struct RingBufferSizeInfo {
- // The starting index of data stored in the ring buffer.
- int32_t start;
- // How many elements are stored.
- int32_t used;
- // How many elements are allowed to be stored at most.
- int32_t capacity;
-};
-
-// RingBufferTransferCache is a transfer cache which stores cache entries in a
-// ring buffer instead of a stack.
+struct RingBufferSizeInfo {
+ // The starting index of data stored in the ring buffer.
+ int32_t start;
+ // How many elements are stored.
+ int32_t used;
+ // How many elements are allowed to be stored at most.
+ int32_t capacity;
+};
+
+// RingBufferTransferCache is a transfer cache which stores cache entries in a
+// ring buffer instead of a stack.
template <typename CentralFreeList, typename TransferCacheManager>
-class RingBufferTransferCache {
+class RingBufferTransferCache {
public:
using Manager = TransferCacheManager;
using FreeList = CentralFreeList;
-
- RingBufferTransferCache(Manager *owner, int cl)
- : RingBufferTransferCache(owner, cl, CapacityNeeded(cl)) {}
-
- RingBufferTransferCache(
- Manager *owner, int cl,
- typename TransferCache<CentralFreeList, TransferCacheManager>::Capacity
- capacity)
- : lock_(absl::kConstInit, absl::base_internal::SCHEDULE_KERNEL_ONLY),
- slot_info_(RingBufferSizeInfo({0, 0, capacity.capacity})),
- max_capacity_(capacity.max_capacity),
- freelist_do_not_access_directly_(),
- owner_(owner) {
- freelist().Init(cl);
- if (max_capacity_ == 0) {
- // We don't allocate a buffer. Set slots_bitmask_ to 0 to prevent UB.
- slots_bitmask_ = 0;
- } else {
- const size_t slots_size = absl::bit_ceil<size_t>(max_capacity_);
- ASSERT(slots_size >= max_capacity_);
- ASSERT(slots_size < max_capacity_ * 2);
- slots_ =
- reinterpret_cast<void **>(owner_->Alloc(slots_size * sizeof(void *)));
- slots_bitmask_ = slots_size - 1;
- }
- }
-
- RingBufferTransferCache(const RingBufferTransferCache &) = delete;
- RingBufferTransferCache &operator=(const RingBufferTransferCache &) = delete;
-
- // This transfercache implementation handles non-batch sized
- // inserts and removes efficiently.
- static constexpr bool IsFlexible() { return true; }
-
- // These methods all do internal locking.
-
- void AcquireInternalLocks()
- {
- freelist().AcquireInternalLocks();
- lock_.Lock();
- }
-
- void ReleaseInternalLocks()
- {
- lock_.Unlock();
- freelist().ReleaseInternalLocks();
- }
-
- // Insert the specified batch into the transfer cache. N is the number of
- // elements in the range. RemoveRange() is the opposite operation.
- void InsertRange(int size_class, absl::Span<void *> batch)
- ABSL_LOCKS_EXCLUDED(lock_) {
- const int N = batch.size();
- const int B = Manager::num_objects_to_move(size_class);
- ASSERT(0 < N && N <= B);
- void *to_free_buf[kMaxObjectsToMove];
- int to_free_num = 0;
-
- {
- absl::base_internal::SpinLockHolder h(&lock_);
- RingBufferSizeInfo info = GetSlotInfo();
- if (info.used + N <= max_capacity_) {
- const bool cache_grown = MakeCacheSpace(size_class, N);
- // MakeCacheSpace can drop the lock, so refetch
- info = GetSlotInfo();
- if (cache_grown) {
- CopyIntoEnd(batch.data(), N, info);
- SetSlotInfo(info);
- tracking::Report(kTCInsertHit, size_class, 1);
- insert_hits_.LossyAdd(1);
- return;
- }
- }
-
- // If we arrive here, this means that there is not enough capacity in the
- // current cache to include the new items, and we cannot grow it.
-
- // We want to return up to `B` items from the transfer cache and currently
- // inserted items.
- const int returned_from_cache = std::min<int>(B, info.used);
- if (returned_from_cache > 0) {
- CopyOutOfStart(to_free_buf, returned_from_cache, info);
- }
- to_free_num = returned_from_cache;
- if (info.used > 0) {
- // We didn't have to return the whole cache. This means we can copy
- // in all of the inserted items.
- ASSERT(info.used + N <= info.capacity);
- CopyIntoEnd(batch.data(), N, info);
+
+ RingBufferTransferCache(Manager *owner, int cl)
+ : RingBufferTransferCache(owner, cl, CapacityNeeded(cl)) {}
+
+ RingBufferTransferCache(
+ Manager *owner, int cl,
+ typename TransferCache<CentralFreeList, TransferCacheManager>::Capacity
+ capacity)
+ : lock_(absl::kConstInit, absl::base_internal::SCHEDULE_KERNEL_ONLY),
+ slot_info_(RingBufferSizeInfo({0, 0, capacity.capacity})),
+ max_capacity_(capacity.max_capacity),
+ freelist_do_not_access_directly_(),
+ owner_(owner) {
+ freelist().Init(cl);
+ if (max_capacity_ == 0) {
+ // We don't allocate a buffer. Set slots_bitmask_ to 0 to prevent UB.
+ slots_bitmask_ = 0;
+ } else {
+ const size_t slots_size = absl::bit_ceil<size_t>(max_capacity_);
+ ASSERT(slots_size >= max_capacity_);
+ ASSERT(slots_size < max_capacity_ * 2);
+ slots_ =
+ reinterpret_cast<void **>(owner_->Alloc(slots_size * sizeof(void *)));
+ slots_bitmask_ = slots_size - 1;
+ }
+ }
+
+ RingBufferTransferCache(const RingBufferTransferCache &) = delete;
+ RingBufferTransferCache &operator=(const RingBufferTransferCache &) = delete;
+
+ // This transfercache implementation handles non-batch sized
+ // inserts and removes efficiently.
+ static constexpr bool IsFlexible() { return true; }
+
+ // These methods all do internal locking.
+
+ void AcquireInternalLocks()
+ {
+ freelist().AcquireInternalLocks();
+ lock_.Lock();
+ }
+
+ void ReleaseInternalLocks()
+ {
+ lock_.Unlock();
+ freelist().ReleaseInternalLocks();
+ }
+
+ // Insert the specified batch into the transfer cache. N is the number of
+ // elements in the range. RemoveRange() is the opposite operation.
+ void InsertRange(int size_class, absl::Span<void *> batch)
+ ABSL_LOCKS_EXCLUDED(lock_) {
+ const int N = batch.size();
+ const int B = Manager::num_objects_to_move(size_class);
+ ASSERT(0 < N && N <= B);
+ void *to_free_buf[kMaxObjectsToMove];
+ int to_free_num = 0;
+
+ {
+ absl::base_internal::SpinLockHolder h(&lock_);
+ RingBufferSizeInfo info = GetSlotInfo();
+ if (info.used + N <= max_capacity_) {
+ const bool cache_grown = MakeCacheSpace(size_class, N);
+ // MakeCacheSpace can drop the lock, so refetch
+ info = GetSlotInfo();
+ if (cache_grown) {
+ CopyIntoEnd(batch.data(), N, info);
+ SetSlotInfo(info);
+ tracking::Report(kTCInsertHit, size_class, 1);
+ insert_hits_.LossyAdd(1);
+ return;
+ }
+ }
+
+ // If we arrive here, this means that there is not enough capacity in the
+ // current cache to include the new items, and we cannot grow it.
+
+ // We want to return up to `B` items from the transfer cache and currently
+ // inserted items.
+ const int returned_from_cache = std::min<int>(B, info.used);
+ if (returned_from_cache > 0) {
+ CopyOutOfStart(to_free_buf, returned_from_cache, info);
+ }
+ to_free_num = returned_from_cache;
+ if (info.used > 0) {
+ // We didn't have to return the whole cache. This means we can copy
+ // in all of the inserted items.
+ ASSERT(info.used + N <= info.capacity);
+ CopyIntoEnd(batch.data(), N, info);
} else {
- // The transfercache is empty. We might still not have enough capacity
- // to store all of the inserted items though.
- const int to_insert_start = std::max(0, N - info.capacity);
- ASSERT(returned_from_cache + to_insert_start <= B);
- if (to_insert_start > 0) {
- // We also want to return some of the inserted items in this case.
- memcpy(to_free_buf + to_free_num, batch.data(),
- to_insert_start * sizeof(void *));
- to_free_num += to_insert_start;
- }
- // This is only false if info.capacity is 0.
- if (ABSL_PREDICT_TRUE(N > to_insert_start)) {
- CopyIntoEnd(batch.data() + to_insert_start, N - to_insert_start,
- info);
- }
+ // The transfercache is empty. We might still not have enough capacity
+ // to store all of the inserted items though.
+ const int to_insert_start = std::max(0, N - info.capacity);
+ ASSERT(returned_from_cache + to_insert_start <= B);
+ if (to_insert_start > 0) {
+ // We also want to return some of the inserted items in this case.
+ memcpy(to_free_buf + to_free_num, batch.data(),
+ to_insert_start * sizeof(void *));
+ to_free_num += to_insert_start;
+ }
+ // This is only false if info.capacity is 0.
+ if (ABSL_PREDICT_TRUE(N > to_insert_start)) {
+ CopyIntoEnd(batch.data() + to_insert_start, N - to_insert_start,
+ info);
+ }
}
- SetSlotInfo(info);
- }
- // It can work out that we manage to insert all items into the cache after
- // all.
- if (to_free_num > 0) {
- ASSERT(to_free_num <= kMaxObjectsToMove);
- ASSERT(to_free_num <= B);
- insert_misses_.Add(1);
- tracking::Report(kTCInsertMiss, size_class, 1);
- freelist().InsertRange(absl::Span<void *>(to_free_buf, to_free_num));
+ SetSlotInfo(info);
}
- }
-
- // Returns the actual number of fetched elements and stores elements in the
- // batch. This might return less than N if the transfercache is non-empty but
- // contains fewer elements than N. It is guaranteed to return at least 1 as
- // long as either the transfercache or the free list are not empty.
- ABSL_MUST_USE_RESULT int RemoveRange(int size_class, void **batch, int N)
- ABSL_LOCKS_EXCLUDED(lock_) {
- ASSERT(N > 0);
-
- {
- absl::base_internal::SpinLockHolder h(&lock_);
- RingBufferSizeInfo info = GetSlotInfo();
- if (info.used > 0) {
- // Return up to however much we have in our local cache.
- const int copied = std::min<int>(N, info.used);
- CopyOutOfEnd(batch, copied, info);
- SetSlotInfo(info);
- tracking::Report(kTCRemoveHit, size_class, 1);
- remove_hits_.LossyAdd(1);
- low_water_mark_ = std::min(low_water_mark_, info.used);
- return copied;
- }
- low_water_mark_ = 0;
+ // It can work out that we manage to insert all items into the cache after
+ // all.
+ if (to_free_num > 0) {
+ ASSERT(to_free_num <= kMaxObjectsToMove);
+ ASSERT(to_free_num <= B);
+ insert_misses_.Add(1);
+ tracking::Report(kTCInsertMiss, size_class, 1);
+ freelist().InsertRange(absl::Span<void *>(to_free_buf, to_free_num));
+ }
+ }
+
+ // Returns the actual number of fetched elements and stores elements in the
+ // batch. This might return less than N if the transfercache is non-empty but
+ // contains fewer elements than N. It is guaranteed to return at least 1 as
+ // long as either the transfercache or the free list are not empty.
+ ABSL_MUST_USE_RESULT int RemoveRange(int size_class, void **batch, int N)
+ ABSL_LOCKS_EXCLUDED(lock_) {
+ ASSERT(N > 0);
+
+ {
+ absl::base_internal::SpinLockHolder h(&lock_);
+ RingBufferSizeInfo info = GetSlotInfo();
+ if (info.used > 0) {
+ // Return up to however much we have in our local cache.
+ const int copied = std::min<int>(N, info.used);
+ CopyOutOfEnd(batch, copied, info);
+ SetSlotInfo(info);
+ tracking::Report(kTCRemoveHit, size_class, 1);
+ remove_hits_.LossyAdd(1);
+ low_water_mark_ = std::min(low_water_mark_, info.used);
+ return copied;
+ }
+ low_water_mark_ = 0;
}
- remove_misses_.Add(1);
- tracking::Report(kTCRemoveMiss, size_class, 1);
- return freelist().RemoveRange(batch, N);
- }
-
- // Return all objects not touched since last call to this function.
- void TryPlunder(int size_class) ABSL_LOCKS_EXCLUDED(lock_) {
- if (max_capacity_ == 0) return;
- // If the lock is being held, someone is modifying the cache.
- if (!lock_.TryLock()) return;
- int low_water_mark = low_water_mark_;
- low_water_mark_ = std::numeric_limits<int>::max();
- const int B = Manager::num_objects_to_move(size_class);
- while (slot_info_.used > 0 && low_water_mark >= B &&
- (low_water_mark_ == std::numeric_limits<int>::max())) {
- const size_t num_to_move(std::min(B, slot_info_.used));
- void *buf[kMaxObjectsToMove];
- CopyOutOfEnd(buf, num_to_move, slot_info_);
- low_water_mark -= num_to_move;
- lock_.Unlock();
- freelist().InsertRange({buf, num_to_move});
- tracking::Report(kTCElementsPlunder, size_class, num_to_move);
- // If someone is starting to use the cache, stop doing this.
- if (!lock_.TryLock()) {
- return;
- }
+ remove_misses_.Add(1);
+ tracking::Report(kTCRemoveMiss, size_class, 1);
+ return freelist().RemoveRange(batch, N);
+ }
+
+ // Return all objects not touched since last call to this function.
+ void TryPlunder(int size_class) ABSL_LOCKS_EXCLUDED(lock_) {
+ if (max_capacity_ == 0) return;
+ // If the lock is being held, someone is modifying the cache.
+ if (!lock_.TryLock()) return;
+ int low_water_mark = low_water_mark_;
+ low_water_mark_ = std::numeric_limits<int>::max();
+ const int B = Manager::num_objects_to_move(size_class);
+ while (slot_info_.used > 0 && low_water_mark >= B &&
+ (low_water_mark_ == std::numeric_limits<int>::max())) {
+ const size_t num_to_move(std::min(B, slot_info_.used));
+ void *buf[kMaxObjectsToMove];
+ CopyOutOfEnd(buf, num_to_move, slot_info_);
+ low_water_mark -= num_to_move;
+ lock_.Unlock();
+ freelist().InsertRange({buf, num_to_move});
+ tracking::Report(kTCElementsPlunder, size_class, num_to_move);
+ // If someone is starting to use the cache, stop doing this.
+ if (!lock_.TryLock()) {
+ return;
+ }
}
- lock_.Unlock();
+ lock_.Unlock();
}
// Returns the number of free objects in the transfer cache.
- size_t tc_length() ABSL_LOCKS_EXCLUDED(lock_) {
- absl::base_internal::SpinLockHolder h(&lock_);
- return static_cast<size_t>(GetSlotInfo().used);
+ size_t tc_length() ABSL_LOCKS_EXCLUDED(lock_) {
+ absl::base_internal::SpinLockHolder h(&lock_);
+ return static_cast<size_t>(GetSlotInfo().used);
}
// Returns the number of transfer cache insert/remove hits/misses.
- TransferCacheStats GetHitRateStats() const ABSL_LOCKS_EXCLUDED(lock_) {
+ TransferCacheStats GetHitRateStats() const ABSL_LOCKS_EXCLUDED(lock_) {
TransferCacheStats stats;
-
- stats.insert_hits = insert_hits_.value();
- stats.remove_hits = remove_hits_.value();
- stats.insert_misses = insert_misses_.value();
- stats.insert_non_batch_misses = 0;
- stats.remove_misses = remove_misses_.value();
- stats.remove_non_batch_misses = 0;
-
+
+ stats.insert_hits = insert_hits_.value();
+ stats.remove_hits = remove_hits_.value();
+ stats.insert_misses = insert_misses_.value();
+ stats.insert_non_batch_misses = 0;
+ stats.remove_misses = remove_misses_.value();
+ stats.remove_non_batch_misses = 0;
+
return stats;
}
- RingBufferSizeInfo GetSlotInfo() const ABSL_EXCLUSIVE_LOCKS_REQUIRED(lock_) {
- return slot_info_;
- }
-
- // REQUIRES: lock is held.
- // Tries to make room for N elements. If the cache is full it will try to
- // expand it at the cost of some other cache size. Return false if there is
- // no space.
- bool MakeCacheSpace(int size_class, int N)
- ABSL_EXCLUSIVE_LOCKS_REQUIRED(lock_) {
- // Increase capacity in number of batches, as we do when reducing capacity.
- const int B = Manager::num_objects_to_move(size_class);
- ASSERT(B >= N);
-
- auto info = GetSlotInfo();
- // Is there room in the cache?
- if (info.used + N <= info.capacity) return true;
- // Check if we can expand this cache?
- if (info.capacity + B > max_capacity_) return false;
-
- // Release the held lock before the other instance tries to grab its lock.
- lock_.Unlock();
- int to_evict = owner_->DetermineSizeClassToEvict();
- if (to_evict == size_class) {
- lock_.Lock();
- return false;
+ RingBufferSizeInfo GetSlotInfo() const ABSL_EXCLUSIVE_LOCKS_REQUIRED(lock_) {
+ return slot_info_;
+ }
+
+ // REQUIRES: lock is held.
+ // Tries to make room for N elements. If the cache is full it will try to
+ // expand it at the cost of some other cache size. Return false if there is
+ // no space.
+ bool MakeCacheSpace(int size_class, int N)
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(lock_) {
+ // Increase capacity in number of batches, as we do when reducing capacity.
+ const int B = Manager::num_objects_to_move(size_class);
+ ASSERT(B >= N);
+
+ auto info = GetSlotInfo();
+ // Is there room in the cache?
+ if (info.used + N <= info.capacity) return true;
+ // Check if we can expand this cache?
+ if (info.capacity + B > max_capacity_) return false;
+
+ // Release the held lock before the other instance tries to grab its lock.
+ lock_.Unlock();
+ int to_evict = owner_->DetermineSizeClassToEvict();
+ if (to_evict == size_class) {
+ lock_.Lock();
+ return false;
}
- bool made_space = owner_->ShrinkCache(to_evict);
- lock_.Lock();
-
- if (!made_space) return false;
-
- // Succeeded in evicting, we're going to make our cache larger. However, we
- // have dropped and re-acquired the lock, so slot_info_ may have
- // changed. Therefore, check and verify that it is still OK to increase the
- // cache size.
- info = GetSlotInfo();
- if (info.capacity + B > max_capacity_) return false;
- info.capacity += B;
- SetSlotInfo(info);
+ bool made_space = owner_->ShrinkCache(to_evict);
+ lock_.Lock();
+
+ if (!made_space) return false;
+
+ // Succeeded in evicting, we're going to make our cache larger. However, we
+ // have dropped and re-acquired the lock, so slot_info_ may have
+ // changed. Therefore, check and verify that it is still OK to increase the
+ // cache size.
+ info = GetSlotInfo();
+ if (info.capacity + B > max_capacity_) return false;
+ info.capacity += B;
+ SetSlotInfo(info);
return true;
}
- bool HasSpareCapacity(int size_class) ABSL_LOCKS_EXCLUDED(lock_) {
- const int n = Manager::num_objects_to_move(size_class);
- absl::base_internal::SpinLockHolder h(&lock_);
- const auto info = GetSlotInfo();
- return info.capacity - info.used >= n;
- }
-
- // Takes lock_ and invokes MakeCacheSpace() on this cache. Returns true if it
- // succeeded at growing the cache by a batch size.
- bool GrowCache(int size_class) ABSL_LOCKS_EXCLUDED(lock_) {
- absl::base_internal::SpinLockHolder h(&lock_);
- return MakeCacheSpace(size_class, Manager::num_objects_to_move(size_class));
- }
-
- // REQUIRES: lock_ is *not* held.
- // Tries to shrink the Cache. Return false if it failed to shrink the cache.
- // Decreases cache_slots_ on success.
- bool ShrinkCache(int size_class) ABSL_LOCKS_EXCLUDED(lock_) {
- const int N = Manager::num_objects_to_move(size_class);
-
- void *to_free[kMaxObjectsToMove];
- int num_to_free;
- {
- absl::base_internal::SpinLockHolder h(&lock_);
- auto info = GetSlotInfo();
- if (info.capacity == 0) return false;
- if (info.capacity < N) return false;
-
- const int unused = info.capacity - info.used;
- if (N <= unused) {
- info.capacity -= N;
- SetSlotInfo(info);
- return true;
- }
-
- num_to_free = N - unused;
-
- // Remove from the beginning of the buffer which holds the oldest entries.
- // Our internal slot array may get overwritten as soon as we drop the
- // lock, so copy the items to free to an on stack buffer.
- CopyOutOfStart(to_free, num_to_free, info);
- low_water_mark_ = info.used;
- info.capacity -= N;
- SetSlotInfo(info);
+ bool HasSpareCapacity(int size_class) ABSL_LOCKS_EXCLUDED(lock_) {
+ const int n = Manager::num_objects_to_move(size_class);
+ absl::base_internal::SpinLockHolder h(&lock_);
+ const auto info = GetSlotInfo();
+ return info.capacity - info.used >= n;
+ }
+
+ // Takes lock_ and invokes MakeCacheSpace() on this cache. Returns true if it
+ // succeeded at growing the cache by a batch size.
+ bool GrowCache(int size_class) ABSL_LOCKS_EXCLUDED(lock_) {
+ absl::base_internal::SpinLockHolder h(&lock_);
+ return MakeCacheSpace(size_class, Manager::num_objects_to_move(size_class));
+ }
+
+ // REQUIRES: lock_ is *not* held.
+ // Tries to shrink the Cache. Return false if it failed to shrink the cache.
+ // Decreases cache_slots_ on success.
+ bool ShrinkCache(int size_class) ABSL_LOCKS_EXCLUDED(lock_) {
+ const int N = Manager::num_objects_to_move(size_class);
+
+ void *to_free[kMaxObjectsToMove];
+ int num_to_free;
+ {
+ absl::base_internal::SpinLockHolder h(&lock_);
+ auto info = GetSlotInfo();
+ if (info.capacity == 0) return false;
+ if (info.capacity < N) return false;
+
+ const int unused = info.capacity - info.used;
+ if (N <= unused) {
+ info.capacity -= N;
+ SetSlotInfo(info);
+ return true;
+ }
+
+ num_to_free = N - unused;
+
+ // Remove from the beginning of the buffer which holds the oldest entries.
+ // Our internal slot array may get overwritten as soon as we drop the
+ // lock, so copy the items to free to an on stack buffer.
+ CopyOutOfStart(to_free, num_to_free, info);
+ low_water_mark_ = info.used;
+ info.capacity -= N;
+ SetSlotInfo(info);
}
- // Access the freelist without holding the lock.
- freelist().InsertRange({to_free, static_cast<uint64_t>(num_to_free)});
- return true;
- }
-
- // This is a thin wrapper for the CentralFreeList. It is intended to ensure
- // that we are not holding lock_ when we access it.
- ABSL_ATTRIBUTE_ALWAYS_INLINE FreeList &freelist() ABSL_LOCKS_EXCLUDED(lock_) {
- return freelist_do_not_access_directly_;
- }
-
- // The const version of the wrapper, needed to call stats on
- ABSL_ATTRIBUTE_ALWAYS_INLINE const FreeList &freelist() const
- ABSL_LOCKS_EXCLUDED(lock_) {
- return freelist_do_not_access_directly_;
- }
-
- private:
- // Due to decreased downward pressure, the ring buffer based transfer cache
- // contains on average more bytes than the legacy implementation.
- // To counteract this, decrease the capacity (but not max capacity).
- // TODO(b/161927252): Revisit TransferCache rebalancing strategy
- static typename TransferCache<CentralFreeList, TransferCacheManager>::Capacity
- CapacityNeeded(int cl) {
- auto capacity =
- TransferCache<CentralFreeList, TransferCacheManager>::CapacityNeeded(
- cl);
- const int N = Manager::num_objects_to_move(cl);
- if (N == 0) return {0, 0};
- ASSERT(capacity.capacity % N == 0);
- // We still want capacity to be in multiples of batches.
- const int capacity_in_batches = capacity.capacity / N;
- // This factor was found by trial and error.
- const int new_batches =
- static_cast<int>(std::ceil(capacity_in_batches / 1.5));
- capacity.capacity = new_batches * N;
- return capacity;
- }
-
- // Converts a logical index (i.e. i-th element stored in the ring buffer) into
- // a physical index into slots_.
- size_t GetSlotIndex(size_t start, size_t i) const {
- return (start + i) & slots_bitmask_;
- }
-
- // Copies N elements from source to the end of the ring buffer. It updates
- // `info`, be sure to call SetSlotInfo() to save the modifications.
- // N has to be > 0.
- void CopyIntoEnd(void *const *source, size_t N, RingBufferSizeInfo &info)
- ABSL_EXCLUSIVE_LOCKS_REQUIRED(lock_) {
- ASSERT(N > 0);
- ASSERT(info.used + N <= info.capacity);
- const size_t begin = GetSlotIndex(info.start, info.used);
- const size_t end = GetSlotIndex(info.start, info.used + N);
- if (ABSL_PREDICT_FALSE(end < begin && end != 0)) {
- // We wrap around the buffer.
- memcpy(slots_ + begin, source, sizeof(void *) * (N - end));
- memcpy(slots_, source + (N - end), sizeof(void *) * end);
+ // Access the freelist without holding the lock.
+ freelist().InsertRange({to_free, static_cast<uint64_t>(num_to_free)});
+ return true;
+ }
+
+ // This is a thin wrapper for the CentralFreeList. It is intended to ensure
+ // that we are not holding lock_ when we access it.
+ ABSL_ATTRIBUTE_ALWAYS_INLINE FreeList &freelist() ABSL_LOCKS_EXCLUDED(lock_) {
+ return freelist_do_not_access_directly_;
+ }
+
+ // The const version of the wrapper, needed to call stats on
+ ABSL_ATTRIBUTE_ALWAYS_INLINE const FreeList &freelist() const
+ ABSL_LOCKS_EXCLUDED(lock_) {
+ return freelist_do_not_access_directly_;
+ }
+
+ private:
+ // Due to decreased downward pressure, the ring buffer based transfer cache
+ // contains on average more bytes than the legacy implementation.
+ // To counteract this, decrease the capacity (but not max capacity).
+ // TODO(b/161927252): Revisit TransferCache rebalancing strategy
+ static typename TransferCache<CentralFreeList, TransferCacheManager>::Capacity
+ CapacityNeeded(int cl) {
+ auto capacity =
+ TransferCache<CentralFreeList, TransferCacheManager>::CapacityNeeded(
+ cl);
+ const int N = Manager::num_objects_to_move(cl);
+ if (N == 0) return {0, 0};
+ ASSERT(capacity.capacity % N == 0);
+ // We still want capacity to be in multiples of batches.
+ const int capacity_in_batches = capacity.capacity / N;
+ // This factor was found by trial and error.
+ const int new_batches =
+ static_cast<int>(std::ceil(capacity_in_batches / 1.5));
+ capacity.capacity = new_batches * N;
+ return capacity;
+ }
+
+ // Converts a logical index (i.e. i-th element stored in the ring buffer) into
+ // a physical index into slots_.
+ size_t GetSlotIndex(size_t start, size_t i) const {
+ return (start + i) & slots_bitmask_;
+ }
+
+ // Copies N elements from source to the end of the ring buffer. It updates
+ // `info`, be sure to call SetSlotInfo() to save the modifications.
+ // N has to be > 0.
+ void CopyIntoEnd(void *const *source, size_t N, RingBufferSizeInfo &info)
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(lock_) {
+ ASSERT(N > 0);
+ ASSERT(info.used + N <= info.capacity);
+ const size_t begin = GetSlotIndex(info.start, info.used);
+ const size_t end = GetSlotIndex(info.start, info.used + N);
+ if (ABSL_PREDICT_FALSE(end < begin && end != 0)) {
+ // We wrap around the buffer.
+ memcpy(slots_ + begin, source, sizeof(void *) * (N - end));
+ memcpy(slots_, source + (N - end), sizeof(void *) * end);
} else {
- memcpy(slots_ + begin, source, sizeof(void *) * N);
+ memcpy(slots_ + begin, source, sizeof(void *) * N);
}
- info.used += N;
- }
-
- // Copies N elements stored in slots_ starting at the given logic index into
- // target. Does not do any updates to slot_info_.
- // N has to be > 0.
- // You should use CopyOutOfEnd or CopyOutOfStart instead in most cases.
- void CopyOutOfSlots(void **target, size_t N, size_t start, size_t index) const
- ABSL_EXCLUSIVE_LOCKS_REQUIRED(lock_) {
- ASSERT(N > 0);
- const size_t begin = GetSlotIndex(start, index);
- const size_t end = GetSlotIndex(start, index + N);
- if (ABSL_PREDICT_FALSE(end < begin && end != 0)) {
- // We wrap around the buffer.
- memcpy(target, slots_ + begin, sizeof(void *) * (N - end));
- memcpy(target + (N - end), slots_, sizeof(void *) * end);
+ info.used += N;
+ }
+
+ // Copies N elements stored in slots_ starting at the given logic index into
+ // target. Does not do any updates to slot_info_.
+ // N has to be > 0.
+ // You should use CopyOutOfEnd or CopyOutOfStart instead in most cases.
+ void CopyOutOfSlots(void **target, size_t N, size_t start, size_t index) const
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(lock_) {
+ ASSERT(N > 0);
+ const size_t begin = GetSlotIndex(start, index);
+ const size_t end = GetSlotIndex(start, index + N);
+ if (ABSL_PREDICT_FALSE(end < begin && end != 0)) {
+ // We wrap around the buffer.
+ memcpy(target, slots_ + begin, sizeof(void *) * (N - end));
+ memcpy(target + (N - end), slots_, sizeof(void *) * end);
} else {
- memcpy(target, slots_ + begin, sizeof(void *) * N);
+ memcpy(target, slots_ + begin, sizeof(void *) * N);
}
}
- // Copies N elements from the start of the ring buffer into target. Updates
- // `info`, be sure to call SetSlotInfo() to save the modifications.
- // N has to be > 0.
- void CopyOutOfStart(void **target, size_t N, RingBufferSizeInfo &info)
- ABSL_EXCLUSIVE_LOCKS_REQUIRED(lock_) {
- ASSERT(N > 0);
- ASSERT(N <= info.used);
- CopyOutOfSlots(target, N, info.start, 0);
- info.used -= N;
- if (info.used == 0) {
- // This makes it less likely that we will have to do copies that wrap
- // around in the immediate future.
- info.start = 0;
- } else {
- info.start = (info.start + N) & slots_bitmask_;
+ // Copies N elements from the start of the ring buffer into target. Updates
+ // `info`, be sure to call SetSlotInfo() to save the modifications.
+ // N has to be > 0.
+ void CopyOutOfStart(void **target, size_t N, RingBufferSizeInfo &info)
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(lock_) {
+ ASSERT(N > 0);
+ ASSERT(N <= info.used);
+ CopyOutOfSlots(target, N, info.start, 0);
+ info.used -= N;
+ if (info.used == 0) {
+ // This makes it less likely that we will have to do copies that wrap
+ // around in the immediate future.
+ info.start = 0;
+ } else {
+ info.start = (info.start + N) & slots_bitmask_;
}
}
- // Copies N elements from the end of the ring buffer into target. Updates
- // `info`, be sure to call SetSlotInfo() to save the modifications.
- // N has to be > 0.
- void CopyOutOfEnd(void **target, size_t N, RingBufferSizeInfo &info)
- ABSL_EXCLUSIVE_LOCKS_REQUIRED(lock_) {
- ASSERT(N > 0);
- ASSERT(N <= info.used);
- info.used -= N;
- CopyOutOfSlots(target, N, info.start, info.used);
- if (info.used == 0) {
- // This makes it less likely that we will have to do copies that wrap
- // around in the immediate future.
- info.start = 0;
+ // Copies N elements from the end of the ring buffer into target. Updates
+ // `info`, be sure to call SetSlotInfo() to save the modifications.
+ // N has to be > 0.
+ void CopyOutOfEnd(void **target, size_t N, RingBufferSizeInfo &info)
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(lock_) {
+ ASSERT(N > 0);
+ ASSERT(N <= info.used);
+ info.used -= N;
+ CopyOutOfSlots(target, N, info.start, info.used);
+ if (info.used == 0) {
+ // This makes it less likely that we will have to do copies that wrap
+ // around in the immediate future.
+ info.start = 0;
}
}
- void SetSlotInfo(RingBufferSizeInfo info)
- ABSL_EXCLUSIVE_LOCKS_REQUIRED(lock_) {
- ASSERT(0 <= info.start);
- ASSERT((info.start & slots_bitmask_) == info.start);
- ASSERT(0 <= info.used);
- ASSERT(info.used <= info.capacity);
- ASSERT(info.capacity <= max_capacity_);
- slot_info_ = info;
- }
-
- // Pointer to array of free objects.
- void **slots_ ABSL_GUARDED_BY(lock_);
-
- // This lock protects all the data members. used_slots_ and cache_slots_
- // may be looked at without holding the lock.
- absl::base_internal::SpinLock lock_;
-
- // Number of currently used and available cached entries in slots_. Use
- // GetSlotInfo() to read this.
- // INVARIANT: [0 <= slot_info_.used <= slot_info.capacity <= max_cache_slots_]
- RingBufferSizeInfo slot_info_ ABSL_GUARDED_BY(lock_);
-
- // Lowest value of "slot_info_.used" since last call to TryPlunder. All
- // elements not used for a full cycle (2 seconds) are unlikely to get used
- // again.
- int low_water_mark_ ABSL_GUARDED_BY(lock_) = std::numeric_limits<int>::max();
-
- // Maximum size of the cache.
- const int32_t max_capacity_;
- // This is a bitmask used instead of a modulus in the ringbuffer index
- // calculations. This is 1 smaller than the size of slots_ which itself has
- // the size of `absl::bit_ceil(max_capacity_)`, i.e. the smallest power of two
- // >= max_capacity_.
- size_t slots_bitmask_;
-
- // insert_hits_ and remove_hits_ are logically guarded by lock_ for mutations
- // and use LossyAdd, but the thread annotations cannot indicate that we do not
- // need a lock for reads.
- StatsCounter insert_hits_;
- StatsCounter remove_hits_;
- // Miss counters do not hold lock_, so they use Add.
- StatsCounter insert_misses_;
- StatsCounter remove_misses_;
-
- FreeList freelist_do_not_access_directly_;
- Manager *const owner_;
+ void SetSlotInfo(RingBufferSizeInfo info)
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(lock_) {
+ ASSERT(0 <= info.start);
+ ASSERT((info.start & slots_bitmask_) == info.start);
+ ASSERT(0 <= info.used);
+ ASSERT(info.used <= info.capacity);
+ ASSERT(info.capacity <= max_capacity_);
+ slot_info_ = info;
+ }
+
+ // Pointer to array of free objects.
+ void **slots_ ABSL_GUARDED_BY(lock_);
+
+ // This lock protects all the data members. used_slots_ and cache_slots_
+ // may be looked at without holding the lock.
+ absl::base_internal::SpinLock lock_;
+
+ // Number of currently used and available cached entries in slots_. Use
+ // GetSlotInfo() to read this.
+ // INVARIANT: [0 <= slot_info_.used <= slot_info.capacity <= max_cache_slots_]
+ RingBufferSizeInfo slot_info_ ABSL_GUARDED_BY(lock_);
+
+ // Lowest value of "slot_info_.used" since last call to TryPlunder. All
+ // elements not used for a full cycle (2 seconds) are unlikely to get used
+ // again.
+ int low_water_mark_ ABSL_GUARDED_BY(lock_) = std::numeric_limits<int>::max();
+
+ // Maximum size of the cache.
+ const int32_t max_capacity_;
+ // This is a bitmask used instead of a modulus in the ringbuffer index
+ // calculations. This is 1 smaller than the size of slots_ which itself has
+ // the size of `absl::bit_ceil(max_capacity_)`, i.e. the smallest power of two
+ // >= max_capacity_.
+ size_t slots_bitmask_;
+
+ // insert_hits_ and remove_hits_ are logically guarded by lock_ for mutations
+ // and use LossyAdd, but the thread annotations cannot indicate that we do not
+ // need a lock for reads.
+ StatsCounter insert_hits_;
+ StatsCounter remove_hits_;
+ // Miss counters do not hold lock_, so they use Add.
+ StatsCounter insert_misses_;
+ StatsCounter remove_misses_;
+
+ FreeList freelist_do_not_access_directly_;
+ Manager *const owner_;
} ABSL_CACHELINE_ALIGNED;
-} // namespace tcmalloc::tcmalloc_internal::internal_transfer_cache
-GOOGLE_MALLOC_SECTION_END
+} // namespace tcmalloc::tcmalloc_internal::internal_transfer_cache
+GOOGLE_MALLOC_SECTION_END
#endif // TCMALLOC_TRANSFER_CACHE_INTERNAL_H_
diff --git a/contrib/libs/tcmalloc/tcmalloc/transfer_cache_stats.h b/contrib/libs/tcmalloc/tcmalloc/transfer_cache_stats.h
index fdc8fba53c..ffa551c26b 100644
--- a/contrib/libs/tcmalloc/tcmalloc/transfer_cache_stats.h
+++ b/contrib/libs/tcmalloc/tcmalloc/transfer_cache_stats.h
@@ -18,18 +18,18 @@
#include <stddef.h>
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
struct TransferCacheStats {
size_t insert_hits;
size_t insert_misses;
- size_t insert_non_batch_misses;
+ size_t insert_non_batch_misses;
size_t remove_hits;
size_t remove_misses;
- size_t remove_non_batch_misses;
+ size_t remove_non_batch_misses;
};
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
#endif // TCMALLOC_TRANSFER_CACHE_STATS_H_
diff --git a/contrib/libs/tcmalloc/tcmalloc/transfer_cache_test.cc b/contrib/libs/tcmalloc/tcmalloc/transfer_cache_test.cc
index 4531f7a921..9a9ed5bed5 100644
--- a/contrib/libs/tcmalloc/tcmalloc/transfer_cache_test.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/transfer_cache_test.cc
@@ -14,7 +14,7 @@
#include "tcmalloc/transfer_cache.h"
-#include <algorithm>
+#include <algorithm>
#include <atomic>
#include <cmath>
#include <cstring>
@@ -38,11 +38,11 @@
#include "tcmalloc/transfer_cache_internals.h"
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
namespace {
-static constexpr int kSizeClass = 0;
-
+static constexpr int kSizeClass = 0;
+
template <typename Env>
using TransferCacheTest = ::testing::Test;
TYPED_TEST_SUITE_P(TransferCacheTest);
@@ -50,112 +50,112 @@ TYPED_TEST_SUITE_P(TransferCacheTest);
TYPED_TEST_P(TransferCacheTest, IsolatedSmoke) {
const int batch_size = TypeParam::kBatchSize;
TypeParam e;
- EXPECT_CALL(e.central_freelist(), InsertRange)
- .Times(e.transfer_cache().IsFlexible() ? 0 : 1);
- EXPECT_CALL(e.central_freelist(), RemoveRange)
- .Times(e.transfer_cache().IsFlexible() ? 0 : 1);
+ EXPECT_CALL(e.central_freelist(), InsertRange)
+ .Times(e.transfer_cache().IsFlexible() ? 0 : 1);
+ EXPECT_CALL(e.central_freelist(), RemoveRange)
+ .Times(e.transfer_cache().IsFlexible() ? 0 : 1);
EXPECT_EQ(e.transfer_cache().GetHitRateStats().insert_hits, 0);
EXPECT_EQ(e.transfer_cache().GetHitRateStats().insert_misses, 0);
- EXPECT_EQ(e.transfer_cache().GetHitRateStats().insert_non_batch_misses, 0);
+ EXPECT_EQ(e.transfer_cache().GetHitRateStats().insert_non_batch_misses, 0);
EXPECT_EQ(e.transfer_cache().GetHitRateStats().remove_hits, 0);
EXPECT_EQ(e.transfer_cache().GetHitRateStats().remove_misses, 0);
- EXPECT_EQ(e.transfer_cache().GetHitRateStats().remove_non_batch_misses, 0);
+ EXPECT_EQ(e.transfer_cache().GetHitRateStats().remove_non_batch_misses, 0);
e.Insert(batch_size);
EXPECT_EQ(e.transfer_cache().GetHitRateStats().insert_hits, 1);
e.Insert(batch_size);
EXPECT_EQ(e.transfer_cache().GetHitRateStats().insert_hits, 2);
- e.Insert(batch_size - 1);
- if (e.transfer_cache().IsFlexible()) {
- EXPECT_EQ(e.transfer_cache().GetHitRateStats().insert_hits, 3);
- EXPECT_EQ(e.transfer_cache().GetHitRateStats().insert_misses, 0);
- EXPECT_EQ(e.transfer_cache().GetHitRateStats().insert_non_batch_misses, 0);
- } else {
- EXPECT_EQ(e.transfer_cache().GetHitRateStats().insert_hits, 2);
- EXPECT_EQ(e.transfer_cache().GetHitRateStats().insert_misses, 1);
- EXPECT_EQ(e.transfer_cache().GetHitRateStats().insert_non_batch_misses, 1);
- }
+ e.Insert(batch_size - 1);
+ if (e.transfer_cache().IsFlexible()) {
+ EXPECT_EQ(e.transfer_cache().GetHitRateStats().insert_hits, 3);
+ EXPECT_EQ(e.transfer_cache().GetHitRateStats().insert_misses, 0);
+ EXPECT_EQ(e.transfer_cache().GetHitRateStats().insert_non_batch_misses, 0);
+ } else {
+ EXPECT_EQ(e.transfer_cache().GetHitRateStats().insert_hits, 2);
+ EXPECT_EQ(e.transfer_cache().GetHitRateStats().insert_misses, 1);
+ EXPECT_EQ(e.transfer_cache().GetHitRateStats().insert_non_batch_misses, 1);
+ }
e.Remove(batch_size);
EXPECT_EQ(e.transfer_cache().GetHitRateStats().remove_hits, 1);
e.Remove(batch_size);
EXPECT_EQ(e.transfer_cache().GetHitRateStats().remove_hits, 2);
- e.Remove(batch_size - 1);
- if (e.transfer_cache().IsFlexible()) {
- EXPECT_EQ(e.transfer_cache().GetHitRateStats().remove_hits, 3);
- EXPECT_EQ(e.transfer_cache().GetHitRateStats().remove_misses, 0);
- EXPECT_EQ(e.transfer_cache().GetHitRateStats().remove_non_batch_misses, 0);
- } else {
- EXPECT_EQ(e.transfer_cache().GetHitRateStats().remove_hits, 2);
- EXPECT_EQ(e.transfer_cache().GetHitRateStats().remove_misses, 1);
- EXPECT_EQ(e.transfer_cache().GetHitRateStats().remove_non_batch_misses, 1);
- }
-}
-
-TYPED_TEST_P(TransferCacheTest, ReadStats) {
- const int batch_size = TypeParam::kBatchSize;
- TypeParam e;
- EXPECT_CALL(e.central_freelist(), InsertRange).Times(0);
- EXPECT_CALL(e.central_freelist(), RemoveRange).Times(0);
-
- // Ensure there is at least one insert hit/remove hit, so we can assert a
- // non-tautology in t2.
- e.Insert(batch_size);
- e.Remove(batch_size);
-
- EXPECT_EQ(e.transfer_cache().GetHitRateStats().insert_hits, 1);
- EXPECT_EQ(e.transfer_cache().GetHitRateStats().insert_misses, 0);
- EXPECT_EQ(e.transfer_cache().GetHitRateStats().insert_non_batch_misses, 0);
- EXPECT_EQ(e.transfer_cache().GetHitRateStats().remove_hits, 1);
- EXPECT_EQ(e.transfer_cache().GetHitRateStats().remove_misses, 0);
- EXPECT_EQ(e.transfer_cache().GetHitRateStats().remove_non_batch_misses, 0);
-
- std::atomic<bool> stop{false};
-
- std::thread t1([&]() {
- while (!stop.load(std::memory_order_acquire)) {
- e.Insert(batch_size);
- e.Remove(batch_size);
- }
- });
-
- std::thread t2([&]() {
- while (!stop.load(std::memory_order_acquire)) {
- auto stats = e.transfer_cache().GetHitRateStats();
- CHECK_CONDITION(stats.insert_hits >= 1);
- CHECK_CONDITION(stats.insert_misses == 0);
- CHECK_CONDITION(stats.insert_non_batch_misses == 0);
- CHECK_CONDITION(stats.remove_hits >= 1);
- CHECK_CONDITION(stats.remove_misses == 0);
- CHECK_CONDITION(stats.remove_non_batch_misses == 0);
- }
- });
-
- absl::SleepFor(absl::Seconds(1));
- stop.store(true, std::memory_order_release);
-
- t1.join();
- t2.join();
-}
-
-TYPED_TEST_P(TransferCacheTest, SingleItemSmoke) {
- const int batch_size = TypeParam::kBatchSize;
- if (batch_size == 1) {
- GTEST_SKIP() << "skipping trivial batch size";
- }
- TypeParam e;
- const int actions = e.transfer_cache().IsFlexible() ? 2 : 0;
- EXPECT_CALL(e.central_freelist(), InsertRange).Times(2 - actions);
- EXPECT_CALL(e.central_freelist(), RemoveRange).Times(2 - actions);
-
- e.Insert(1);
- e.Insert(1);
- EXPECT_EQ(e.transfer_cache().GetHitRateStats().insert_hits, actions);
- e.Remove(1);
- e.Remove(1);
- EXPECT_EQ(e.transfer_cache().GetHitRateStats().remove_hits, actions);
+ e.Remove(batch_size - 1);
+ if (e.transfer_cache().IsFlexible()) {
+ EXPECT_EQ(e.transfer_cache().GetHitRateStats().remove_hits, 3);
+ EXPECT_EQ(e.transfer_cache().GetHitRateStats().remove_misses, 0);
+ EXPECT_EQ(e.transfer_cache().GetHitRateStats().remove_non_batch_misses, 0);
+ } else {
+ EXPECT_EQ(e.transfer_cache().GetHitRateStats().remove_hits, 2);
+ EXPECT_EQ(e.transfer_cache().GetHitRateStats().remove_misses, 1);
+ EXPECT_EQ(e.transfer_cache().GetHitRateStats().remove_non_batch_misses, 1);
+ }
}
+TYPED_TEST_P(TransferCacheTest, ReadStats) {
+ const int batch_size = TypeParam::kBatchSize;
+ TypeParam e;
+ EXPECT_CALL(e.central_freelist(), InsertRange).Times(0);
+ EXPECT_CALL(e.central_freelist(), RemoveRange).Times(0);
+
+ // Ensure there is at least one insert hit/remove hit, so we can assert a
+ // non-tautology in t2.
+ e.Insert(batch_size);
+ e.Remove(batch_size);
+
+ EXPECT_EQ(e.transfer_cache().GetHitRateStats().insert_hits, 1);
+ EXPECT_EQ(e.transfer_cache().GetHitRateStats().insert_misses, 0);
+ EXPECT_EQ(e.transfer_cache().GetHitRateStats().insert_non_batch_misses, 0);
+ EXPECT_EQ(e.transfer_cache().GetHitRateStats().remove_hits, 1);
+ EXPECT_EQ(e.transfer_cache().GetHitRateStats().remove_misses, 0);
+ EXPECT_EQ(e.transfer_cache().GetHitRateStats().remove_non_batch_misses, 0);
+
+ std::atomic<bool> stop{false};
+
+ std::thread t1([&]() {
+ while (!stop.load(std::memory_order_acquire)) {
+ e.Insert(batch_size);
+ e.Remove(batch_size);
+ }
+ });
+
+ std::thread t2([&]() {
+ while (!stop.load(std::memory_order_acquire)) {
+ auto stats = e.transfer_cache().GetHitRateStats();
+ CHECK_CONDITION(stats.insert_hits >= 1);
+ CHECK_CONDITION(stats.insert_misses == 0);
+ CHECK_CONDITION(stats.insert_non_batch_misses == 0);
+ CHECK_CONDITION(stats.remove_hits >= 1);
+ CHECK_CONDITION(stats.remove_misses == 0);
+ CHECK_CONDITION(stats.remove_non_batch_misses == 0);
+ }
+ });
+
+ absl::SleepFor(absl::Seconds(1));
+ stop.store(true, std::memory_order_release);
+
+ t1.join();
+ t2.join();
+}
+
+TYPED_TEST_P(TransferCacheTest, SingleItemSmoke) {
+ const int batch_size = TypeParam::kBatchSize;
+ if (batch_size == 1) {
+ GTEST_SKIP() << "skipping trivial batch size";
+ }
+ TypeParam e;
+ const int actions = e.transfer_cache().IsFlexible() ? 2 : 0;
+ EXPECT_CALL(e.central_freelist(), InsertRange).Times(2 - actions);
+ EXPECT_CALL(e.central_freelist(), RemoveRange).Times(2 - actions);
+
+ e.Insert(1);
+ e.Insert(1);
+ EXPECT_EQ(e.transfer_cache().GetHitRateStats().insert_hits, actions);
+ e.Remove(1);
+ e.Remove(1);
+ EXPECT_EQ(e.transfer_cache().GetHitRateStats().remove_hits, actions);
+}
+
TYPED_TEST_P(TransferCacheTest, FetchesFromFreelist) {
const int batch_size = TypeParam::kBatchSize;
TypeParam e;
@@ -192,7 +192,7 @@ TYPED_TEST_P(TransferCacheTest, EvictsOtherCaches) {
});
EXPECT_CALL(e.central_freelist(), InsertRange).Times(0);
- while (e.transfer_cache().HasSpareCapacity(kSizeClass)) {
+ while (e.transfer_cache().HasSpareCapacity(kSizeClass)) {
e.Insert(batch_size);
}
size_t old_hits = e.transfer_cache().GetHitRateStats().insert_hits;
@@ -201,62 +201,62 @@ TYPED_TEST_P(TransferCacheTest, EvictsOtherCaches) {
EXPECT_EQ(e.transfer_cache().GetHitRateStats().insert_misses, 0);
}
-TYPED_TEST_P(TransferCacheTest, EvictsOtherCachesFlex) {
- const int batch_size = TypeParam::kBatchSize;
- TypeParam e;
-
- EXPECT_CALL(e.transfer_cache_manager(), ShrinkCache).WillRepeatedly([]() {
- return true;
- });
- if (e.transfer_cache().IsFlexible()) {
- EXPECT_CALL(e.central_freelist(), InsertRange).Times(0);
- } else {
- EXPECT_CALL(e.central_freelist(), InsertRange).Times(batch_size - 1);
- }
- EXPECT_EQ(e.transfer_cache().GetHitRateStats().insert_hits, 0);
- EXPECT_EQ(e.transfer_cache().GetHitRateStats().insert_misses, 0);
-
- int total = 0;
- for (int i = 1; i <= batch_size; i++) {
- e.Insert(i);
- total += i;
- }
-
- if (e.transfer_cache().IsFlexible()) {
- EXPECT_EQ(e.transfer_cache().tc_length(), total);
- EXPECT_EQ(e.transfer_cache().GetHitRateStats().insert_hits, batch_size);
- EXPECT_EQ(e.transfer_cache().GetHitRateStats().insert_misses, 0);
- } else {
- EXPECT_EQ(e.transfer_cache().tc_length(), 1 * batch_size);
- EXPECT_EQ(e.transfer_cache().GetHitRateStats().insert_hits, 1);
- EXPECT_EQ(e.transfer_cache().GetHitRateStats().insert_misses,
- batch_size - 1);
- }
-}
-
-// Similar to EvictsOtherCachesFlex, but with full cache.
-TYPED_TEST_P(TransferCacheTest, FullCacheFlex) {
- const int batch_size = TypeParam::kBatchSize;
- TypeParam e;
-
- EXPECT_CALL(e.transfer_cache_manager(), ShrinkCache).WillRepeatedly([]() {
- return true;
- });
- if (e.transfer_cache().IsFlexible()) {
- EXPECT_CALL(e.central_freelist(), InsertRange).Times(0);
- } else {
- EXPECT_CALL(e.central_freelist(), InsertRange)
- .Times(testing::AtLeast(batch_size));
- }
-
- while (e.transfer_cache().HasSpareCapacity(kSizeClass)) {
- e.Insert(batch_size);
- }
- for (int i = 1; i < batch_size + 2; i++) {
- e.Insert(i);
- }
-}
-
+TYPED_TEST_P(TransferCacheTest, EvictsOtherCachesFlex) {
+ const int batch_size = TypeParam::kBatchSize;
+ TypeParam e;
+
+ EXPECT_CALL(e.transfer_cache_manager(), ShrinkCache).WillRepeatedly([]() {
+ return true;
+ });
+ if (e.transfer_cache().IsFlexible()) {
+ EXPECT_CALL(e.central_freelist(), InsertRange).Times(0);
+ } else {
+ EXPECT_CALL(e.central_freelist(), InsertRange).Times(batch_size - 1);
+ }
+ EXPECT_EQ(e.transfer_cache().GetHitRateStats().insert_hits, 0);
+ EXPECT_EQ(e.transfer_cache().GetHitRateStats().insert_misses, 0);
+
+ int total = 0;
+ for (int i = 1; i <= batch_size; i++) {
+ e.Insert(i);
+ total += i;
+ }
+
+ if (e.transfer_cache().IsFlexible()) {
+ EXPECT_EQ(e.transfer_cache().tc_length(), total);
+ EXPECT_EQ(e.transfer_cache().GetHitRateStats().insert_hits, batch_size);
+ EXPECT_EQ(e.transfer_cache().GetHitRateStats().insert_misses, 0);
+ } else {
+ EXPECT_EQ(e.transfer_cache().tc_length(), 1 * batch_size);
+ EXPECT_EQ(e.transfer_cache().GetHitRateStats().insert_hits, 1);
+ EXPECT_EQ(e.transfer_cache().GetHitRateStats().insert_misses,
+ batch_size - 1);
+ }
+}
+
+// Similar to EvictsOtherCachesFlex, but with full cache.
+TYPED_TEST_P(TransferCacheTest, FullCacheFlex) {
+ const int batch_size = TypeParam::kBatchSize;
+ TypeParam e;
+
+ EXPECT_CALL(e.transfer_cache_manager(), ShrinkCache).WillRepeatedly([]() {
+ return true;
+ });
+ if (e.transfer_cache().IsFlexible()) {
+ EXPECT_CALL(e.central_freelist(), InsertRange).Times(0);
+ } else {
+ EXPECT_CALL(e.central_freelist(), InsertRange)
+ .Times(testing::AtLeast(batch_size));
+ }
+
+ while (e.transfer_cache().HasSpareCapacity(kSizeClass)) {
+ e.Insert(batch_size);
+ }
+ for (int i = 1; i < batch_size + 2; i++) {
+ e.Insert(i);
+ }
+}
+
TYPED_TEST_P(TransferCacheTest, PushesToFreelist) {
const int batch_size = TypeParam::kBatchSize;
TypeParam e;
@@ -266,7 +266,7 @@ TYPED_TEST_P(TransferCacheTest, PushesToFreelist) {
});
EXPECT_CALL(e.central_freelist(), InsertRange).Times(1);
- while (e.transfer_cache().HasSpareCapacity(kSizeClass)) {
+ while (e.transfer_cache().HasSpareCapacity(kSizeClass)) {
e.Insert(batch_size);
}
size_t old_hits = e.transfer_cache().GetHitRateStats().insert_hits;
@@ -281,7 +281,7 @@ TYPED_TEST_P(TransferCacheTest, WrappingWorks) {
TypeParam env;
EXPECT_CALL(env.transfer_cache_manager(), ShrinkCache).Times(0);
- while (env.transfer_cache().HasSpareCapacity(kSizeClass)) {
+ while (env.transfer_cache().HasSpareCapacity(kSizeClass)) {
env.Insert(batch_size);
}
for (int i = 0; i < 100; ++i) {
@@ -290,60 +290,60 @@ TYPED_TEST_P(TransferCacheTest, WrappingWorks) {
}
}
-TYPED_TEST_P(TransferCacheTest, WrappingFlex) {
- const int batch_size = TypeParam::kBatchSize;
-
- TypeParam env;
- EXPECT_CALL(env.transfer_cache_manager(), ShrinkCache).Times(0);
- if (env.transfer_cache().IsFlexible()) {
- EXPECT_CALL(env.central_freelist(), InsertRange).Times(0);
- EXPECT_CALL(env.central_freelist(), RemoveRange).Times(0);
- }
-
- while (env.transfer_cache().HasSpareCapacity(kSizeClass)) {
- env.Insert(batch_size);
- }
- for (int i = 0; i < 100; ++i) {
- for (size_t size = 1; size < batch_size + 2; size++) {
- env.Remove(size);
- env.Insert(size);
- }
- }
-}
-
-TYPED_TEST_P(TransferCacheTest, Plunder) {
- TypeParam env;
- // EXPECT_CALL(env.central_freelist(), RemoveRange).Times(0);
- // EXPECT_CALL(env.central_freelist(), InsertRange).Times(1);
- // Fill in some elements.
- env.Insert(TypeParam::kBatchSize);
- env.Insert(TypeParam::kBatchSize);
- ASSERT_EQ(env.transfer_cache().tc_length(), 2 * TypeParam::kBatchSize);
- // All these elements will be plundered.
- env.transfer_cache().TryPlunder(kSizeClass);
- ASSERT_EQ(env.transfer_cache().tc_length(), 0);
-
- env.Insert(TypeParam::kBatchSize);
- env.Insert(TypeParam::kBatchSize);
- ASSERT_EQ(env.transfer_cache().tc_length(), 2 * TypeParam::kBatchSize);
-
- void* buf[TypeParam::kBatchSize];
- // -1 +1, this sets the low_water_mark (the lowest end-state after a
- // call to RemoveRange to 1 batch.
- (void)env.transfer_cache().RemoveRange(kSizeClass, buf,
- TypeParam::kBatchSize);
- env.transfer_cache().InsertRange(kSizeClass, {buf, TypeParam::kBatchSize});
- ASSERT_EQ(env.transfer_cache().tc_length(), 2 * TypeParam::kBatchSize);
- // We have one batch, and this is the same as the low water mark, so nothing
- // gets plundered.
- env.transfer_cache().TryPlunder(kSizeClass);
- ASSERT_EQ(env.transfer_cache().tc_length(), TypeParam::kBatchSize);
- // If we plunder immediately the low_water_mark is at maxint, and eveything
- // gets plundered.
- env.transfer_cache().TryPlunder(kSizeClass);
- ASSERT_EQ(env.transfer_cache().tc_length(), 0);
-}
-
+TYPED_TEST_P(TransferCacheTest, WrappingFlex) {
+ const int batch_size = TypeParam::kBatchSize;
+
+ TypeParam env;
+ EXPECT_CALL(env.transfer_cache_manager(), ShrinkCache).Times(0);
+ if (env.transfer_cache().IsFlexible()) {
+ EXPECT_CALL(env.central_freelist(), InsertRange).Times(0);
+ EXPECT_CALL(env.central_freelist(), RemoveRange).Times(0);
+ }
+
+ while (env.transfer_cache().HasSpareCapacity(kSizeClass)) {
+ env.Insert(batch_size);
+ }
+ for (int i = 0; i < 100; ++i) {
+ for (size_t size = 1; size < batch_size + 2; size++) {
+ env.Remove(size);
+ env.Insert(size);
+ }
+ }
+}
+
+TYPED_TEST_P(TransferCacheTest, Plunder) {
+ TypeParam env;
+ // EXPECT_CALL(env.central_freelist(), RemoveRange).Times(0);
+ // EXPECT_CALL(env.central_freelist(), InsertRange).Times(1);
+ // Fill in some elements.
+ env.Insert(TypeParam::kBatchSize);
+ env.Insert(TypeParam::kBatchSize);
+ ASSERT_EQ(env.transfer_cache().tc_length(), 2 * TypeParam::kBatchSize);
+ // All these elements will be plundered.
+ env.transfer_cache().TryPlunder(kSizeClass);
+ ASSERT_EQ(env.transfer_cache().tc_length(), 0);
+
+ env.Insert(TypeParam::kBatchSize);
+ env.Insert(TypeParam::kBatchSize);
+ ASSERT_EQ(env.transfer_cache().tc_length(), 2 * TypeParam::kBatchSize);
+
+ void* buf[TypeParam::kBatchSize];
+ // -1 +1, this sets the low_water_mark (the lowest end-state after a
+ // call to RemoveRange to 1 batch.
+ (void)env.transfer_cache().RemoveRange(kSizeClass, buf,
+ TypeParam::kBatchSize);
+ env.transfer_cache().InsertRange(kSizeClass, {buf, TypeParam::kBatchSize});
+ ASSERT_EQ(env.transfer_cache().tc_length(), 2 * TypeParam::kBatchSize);
+ // We have one batch, and this is the same as the low water mark, so nothing
+ // gets plundered.
+ env.transfer_cache().TryPlunder(kSizeClass);
+ ASSERT_EQ(env.transfer_cache().tc_length(), TypeParam::kBatchSize);
+ // If we plunder immediately the low_water_mark is at maxint, and eveything
+ // gets plundered.
+ env.transfer_cache().TryPlunder(kSizeClass);
+ ASSERT_EQ(env.transfer_cache().tc_length(), 0);
+}
+
// PickCoprimeBatchSize picks a batch size in [2, max_batch_size) that is
// coprime with 2^32. We choose the largest possible batch size within that
// constraint to minimize the number of iterations of insert/remove required.
@@ -358,12 +358,12 @@ static size_t PickCoprimeBatchSize(size_t max_batch_size) {
return max_batch_size;
}
-TEST(RingBufferTest, b172283201) {
+TEST(RingBufferTest, b172283201) {
// This test is designed to exercise the wraparound behavior for the
- // RingBufferTransferCache, which manages its indices in uint32_t's. Because
- // it uses a non-standard batch size (kBatchSize) as part of
- // PickCoprimeBatchSize, it triggers a TransferCache miss to the
- // CentralFreeList, which is uninteresting for exercising b/172283201.
+ // RingBufferTransferCache, which manages its indices in uint32_t's. Because
+ // it uses a non-standard batch size (kBatchSize) as part of
+ // PickCoprimeBatchSize, it triggers a TransferCache miss to the
+ // CentralFreeList, which is uninteresting for exercising b/172283201.
// For performance reasons, limit to optimized builds.
#if !defined(NDEBUG)
@@ -374,8 +374,8 @@ TEST(RingBufferTest, b172283201) {
#endif
using EnvType = FakeTransferCacheEnvironment<
- internal_transfer_cache::RingBufferTransferCache<
- MockCentralFreeList, MockTransferCacheManager>>;
+ internal_transfer_cache::RingBufferTransferCache<
+ MockCentralFreeList, MockTransferCacheManager>>;
EnvType env;
// We pick the largest value <= EnvType::kBatchSize to use as a batch size,
@@ -396,63 +396,63 @@ TEST(RingBufferTest, b172283201) {
pointers.push_back(&buffer[i]);
}
- // To produce wraparound in the RingBufferTransferCache, we fill up the cache
- // completely and then keep inserting new elements. This makes the cache
- // return old elements to the freelist and eventually wrap around.
+ // To produce wraparound in the RingBufferTransferCache, we fill up the cache
+ // completely and then keep inserting new elements. This makes the cache
+ // return old elements to the freelist and eventually wrap around.
EXPECT_CALL(env.central_freelist(), RemoveRange).Times(0);
- // We do return items to the freelist, don't try to actually free them.
- ON_CALL(env.central_freelist(), InsertRange).WillByDefault(testing::Return());
- ON_CALL(env.transfer_cache_manager(), DetermineSizeClassToEvict)
- .WillByDefault(testing::Return(kSizeClass));
-
- // First fill up the cache to its capacity.
-
- while (env.transfer_cache().HasSpareCapacity(kSizeClass) ||
- env.transfer_cache().GrowCache(kSizeClass)) {
- env.transfer_cache().InsertRange(kSizeClass, absl::MakeSpan(pointers));
+ // We do return items to the freelist, don't try to actually free them.
+ ON_CALL(env.central_freelist(), InsertRange).WillByDefault(testing::Return());
+ ON_CALL(env.transfer_cache_manager(), DetermineSizeClassToEvict)
+ .WillByDefault(testing::Return(kSizeClass));
+
+ // First fill up the cache to its capacity.
+
+ while (env.transfer_cache().HasSpareCapacity(kSizeClass) ||
+ env.transfer_cache().GrowCache(kSizeClass)) {
+ env.transfer_cache().InsertRange(kSizeClass, absl::MakeSpan(pointers));
+ }
+
+ // The current size of the transfer cache is close to its capacity. Insert
+ // enough batches to make sure we wrap around twice (1 batch size should wrap
+ // around as we are full currently, then insert the same amount of items
+ // again, then one more wrap around).
+ const size_t kObjects = env.transfer_cache().tc_length() + 2 * batch_size;
+
+ // From now on, calls to InsertRange() should result in a corresponding call
+ // to the freelist whenever the cache is full. This doesn't happen on every
+ // call, as we return up to num_to_move (i.e. kBatchSize) items to the free
+ // list in one batch.
+ EXPECT_CALL(env.central_freelist(),
+ InsertRange(testing::SizeIs(EnvType::kBatchSize)))
+ .Times(testing::AnyNumber());
+ for (size_t i = 0; i < kObjects; i += batch_size) {
+ env.transfer_cache().InsertRange(kSizeClass, absl::MakeSpan(pointers));
}
-
- // The current size of the transfer cache is close to its capacity. Insert
- // enough batches to make sure we wrap around twice (1 batch size should wrap
- // around as we are full currently, then insert the same amount of items
- // again, then one more wrap around).
- const size_t kObjects = env.transfer_cache().tc_length() + 2 * batch_size;
-
- // From now on, calls to InsertRange() should result in a corresponding call
- // to the freelist whenever the cache is full. This doesn't happen on every
- // call, as we return up to num_to_move (i.e. kBatchSize) items to the free
- // list in one batch.
- EXPECT_CALL(env.central_freelist(),
- InsertRange(testing::SizeIs(EnvType::kBatchSize)))
- .Times(testing::AnyNumber());
- for (size_t i = 0; i < kObjects; i += batch_size) {
- env.transfer_cache().InsertRange(kSizeClass, absl::MakeSpan(pointers));
- }
- // Manually drain the items in the transfercache, otherwise the destructor
- // will try to free them.
- std::vector<void*> to_free(batch_size);
- size_t N = env.transfer_cache().tc_length();
- while (N > 0) {
- const size_t to_remove = std::min(N, batch_size);
- const size_t removed =
- env.transfer_cache().RemoveRange(kSizeClass, to_free.data(), to_remove);
- ASSERT_THAT(removed, testing::Le(to_remove));
- ASSERT_THAT(removed, testing::Gt(0));
- N -= removed;
- }
- ASSERT_EQ(env.transfer_cache().tc_length(), 0);
+ // Manually drain the items in the transfercache, otherwise the destructor
+ // will try to free them.
+ std::vector<void*> to_free(batch_size);
+ size_t N = env.transfer_cache().tc_length();
+ while (N > 0) {
+ const size_t to_remove = std::min(N, batch_size);
+ const size_t removed =
+ env.transfer_cache().RemoveRange(kSizeClass, to_free.data(), to_remove);
+ ASSERT_THAT(removed, testing::Le(to_remove));
+ ASSERT_THAT(removed, testing::Gt(0));
+ N -= removed;
+ }
+ ASSERT_EQ(env.transfer_cache().tc_length(), 0);
}
-REGISTER_TYPED_TEST_SUITE_P(TransferCacheTest, IsolatedSmoke, ReadStats,
+REGISTER_TYPED_TEST_SUITE_P(TransferCacheTest, IsolatedSmoke, ReadStats,
FetchesFromFreelist, PartialFetchFromFreelist,
- EvictsOtherCaches, PushesToFreelist, WrappingWorks,
- SingleItemSmoke, EvictsOtherCachesFlex,
- FullCacheFlex, WrappingFlex, Plunder);
+ EvictsOtherCaches, PushesToFreelist, WrappingWorks,
+ SingleItemSmoke, EvictsOtherCachesFlex,
+ FullCacheFlex, WrappingFlex, Plunder);
template <typename Env>
-using FuzzTest = ::testing::Test;
-TYPED_TEST_SUITE_P(FuzzTest);
+using FuzzTest = ::testing::Test;
+TYPED_TEST_SUITE_P(FuzzTest);
-TYPED_TEST_P(FuzzTest, MultiThreadedUnbiased) {
+TYPED_TEST_P(FuzzTest, MultiThreadedUnbiased) {
TypeParam env;
ThreadManager threads;
threads.Start(10, [&](int) { env.RandomlyPoke(); });
@@ -462,7 +462,7 @@ TYPED_TEST_P(FuzzTest, MultiThreadedUnbiased) {
threads.Stop();
}
-TYPED_TEST_P(FuzzTest, MultiThreadedBiasedInsert) {
+TYPED_TEST_P(FuzzTest, MultiThreadedBiasedInsert) {
const int batch_size = TypeParam::kBatchSize;
TypeParam env;
@@ -474,7 +474,7 @@ TYPED_TEST_P(FuzzTest, MultiThreadedBiasedInsert) {
threads.Stop();
}
-TYPED_TEST_P(FuzzTest, MultiThreadedBiasedRemove) {
+TYPED_TEST_P(FuzzTest, MultiThreadedBiasedRemove) {
const int batch_size = TypeParam::kBatchSize;
TypeParam env;
@@ -486,7 +486,7 @@ TYPED_TEST_P(FuzzTest, MultiThreadedBiasedRemove) {
threads.Stop();
}
-TYPED_TEST_P(FuzzTest, MultiThreadedBiasedShrink) {
+TYPED_TEST_P(FuzzTest, MultiThreadedBiasedShrink) {
TypeParam env;
ThreadManager threads;
threads.Start(10, [&](int) { env.RandomlyPoke(); });
@@ -496,7 +496,7 @@ TYPED_TEST_P(FuzzTest, MultiThreadedBiasedShrink) {
threads.Stop();
}
-TYPED_TEST_P(FuzzTest, MultiThreadedBiasedGrow) {
+TYPED_TEST_P(FuzzTest, MultiThreadedBiasedGrow) {
TypeParam env;
ThreadManager threads;
threads.Start(10, [&](int) { env.RandomlyPoke(); });
@@ -506,120 +506,120 @@ TYPED_TEST_P(FuzzTest, MultiThreadedBiasedGrow) {
threads.Stop();
}
-REGISTER_TYPED_TEST_SUITE_P(FuzzTest, MultiThreadedUnbiased,
+REGISTER_TYPED_TEST_SUITE_P(FuzzTest, MultiThreadedUnbiased,
MultiThreadedBiasedInsert,
MultiThreadedBiasedRemove, MultiThreadedBiasedGrow,
MultiThreadedBiasedShrink);
namespace unit_tests {
-using Env = FakeTransferCacheEnvironment<internal_transfer_cache::TransferCache<
- MockCentralFreeList, MockTransferCacheManager>>;
-INSTANTIATE_TYPED_TEST_SUITE_P(TransferCache, TransferCacheTest,
- ::testing::Types<Env>);
-
-using RingBufferEnv = FakeTransferCacheEnvironment<
- internal_transfer_cache::RingBufferTransferCache<MockCentralFreeList,
- MockTransferCacheManager>>;
-INSTANTIATE_TYPED_TEST_SUITE_P(RingBuffer, TransferCacheTest,
- ::testing::Types<RingBufferEnv>);
+using Env = FakeTransferCacheEnvironment<internal_transfer_cache::TransferCache<
+ MockCentralFreeList, MockTransferCacheManager>>;
+INSTANTIATE_TYPED_TEST_SUITE_P(TransferCache, TransferCacheTest,
+ ::testing::Types<Env>);
+
+using RingBufferEnv = FakeTransferCacheEnvironment<
+ internal_transfer_cache::RingBufferTransferCache<MockCentralFreeList,
+ MockTransferCacheManager>>;
+INSTANTIATE_TYPED_TEST_SUITE_P(RingBuffer, TransferCacheTest,
+ ::testing::Types<RingBufferEnv>);
} // namespace unit_tests
namespace fuzz_tests {
// Use the FakeCentralFreeList instead of the MockCentralFreeList for fuzz tests
// as it avoids the overheads of mocks and allows more iterations of the fuzzing
// itself.
-using Env = FakeTransferCacheEnvironment<internal_transfer_cache::TransferCache<
- MockCentralFreeList, MockTransferCacheManager>>;
-INSTANTIATE_TYPED_TEST_SUITE_P(TransferCache, FuzzTest, ::testing::Types<Env>);
-
-using RingBufferEnv = FakeTransferCacheEnvironment<
- internal_transfer_cache::RingBufferTransferCache<MockCentralFreeList,
- MockTransferCacheManager>>;
-INSTANTIATE_TYPED_TEST_SUITE_P(RingBuffer, FuzzTest,
- ::testing::Types<RingBufferEnv>);
+using Env = FakeTransferCacheEnvironment<internal_transfer_cache::TransferCache<
+ MockCentralFreeList, MockTransferCacheManager>>;
+INSTANTIATE_TYPED_TEST_SUITE_P(TransferCache, FuzzTest, ::testing::Types<Env>);
+
+using RingBufferEnv = FakeTransferCacheEnvironment<
+ internal_transfer_cache::RingBufferTransferCache<MockCentralFreeList,
+ MockTransferCacheManager>>;
+INSTANTIATE_TYPED_TEST_SUITE_P(RingBuffer, FuzzTest,
+ ::testing::Types<RingBufferEnv>);
} // namespace fuzz_tests
-namespace leak_tests {
-
-template <typename Env>
-using TwoSizeClassTest = ::testing::Test;
-TYPED_TEST_SUITE_P(TwoSizeClassTest);
-
-TYPED_TEST_P(TwoSizeClassTest, NoLeaks) {
- TypeParam env;
-
- // The point of this test is to see that adding "random" amounts of
- // allocations to the transfer caches behaves correctly, even in the case that
- // there are multiple size classes interacting by stealing from each other.
-
- // Fill all caches to their maximum without starting to steal from each other.
- for (int cl = 1; cl < TypeParam::Manager::kSizeClasses; ++cl) {
- const size_t batch_size = TypeParam::Manager::num_objects_to_move(cl);
- while (env.transfer_cache_manager().HasSpareCapacity(cl)) {
- env.Insert(cl, batch_size);
- }
- }
-
- // Count the number of batches currently in the cache.
- auto count_batches = [&env]() {
- int batch_count = 0;
- for (int cl = 1; cl < TypeParam::Manager::kSizeClasses; ++cl) {
- const size_t batch_size = TypeParam::Manager::num_objects_to_move(cl);
- batch_count += env.transfer_cache_manager().tc_length(cl) / batch_size;
- }
- return batch_count;
- };
-
- absl::BitGen bitgen;
- const int max_batches = count_batches();
- int expected_batches = max_batches;
- for (int i = 0; i < 100; ++i) {
- {
- // First remove.
- const int cl =
- absl::Uniform<int>(bitgen, 1, TypeParam::Manager::kSizeClasses);
- const size_t batch_size = TypeParam::Manager::num_objects_to_move(cl);
- if (env.transfer_cache_manager().tc_length(cl) >= batch_size) {
- env.Remove(cl, batch_size);
- --expected_batches;
- }
- const int current_batches = count_batches();
- EXPECT_EQ(current_batches, expected_batches) << "iteration " << i;
- }
- {
- // Then add in another size class.
- const int cl =
- absl::Uniform<int>(bitgen, 1, TypeParam::Manager::kSizeClasses);
- // Evict from the "next" size class, skipping 0.
- // This makes sure we are always evicting from somewhere if at all
- // possible.
- env.transfer_cache_manager().evicting_from_ =
- 1 + cl % (TypeParam::Manager::kSizeClasses - 1);
- if (expected_batches < max_batches) {
- const size_t batch_size = TypeParam::Manager::num_objects_to_move(cl);
- env.Insert(cl, batch_size);
- ++expected_batches;
- }
- const int current_batches = count_batches();
- EXPECT_EQ(current_batches, expected_batches) << "iteration " << i;
- }
- }
-}
-
-REGISTER_TYPED_TEST_SUITE_P(TwoSizeClassTest, NoLeaks);
-
-using TwoTransferCacheEnv =
- TwoSizeClassEnv<internal_transfer_cache::TransferCache>;
-INSTANTIATE_TYPED_TEST_SUITE_P(TransferCache, TwoSizeClassTest,
- ::testing::Types<TwoTransferCacheEnv>);
-
-using TwoRingBufferEnv =
- TwoSizeClassEnv<internal_transfer_cache::RingBufferTransferCache>;
-INSTANTIATE_TYPED_TEST_SUITE_P(RingBuffer, TwoSizeClassTest,
- ::testing::Types<TwoRingBufferEnv>);
-
-} // namespace leak_tests
-
+namespace leak_tests {
+
+template <typename Env>
+using TwoSizeClassTest = ::testing::Test;
+TYPED_TEST_SUITE_P(TwoSizeClassTest);
+
+TYPED_TEST_P(TwoSizeClassTest, NoLeaks) {
+ TypeParam env;
+
+ // The point of this test is to see that adding "random" amounts of
+ // allocations to the transfer caches behaves correctly, even in the case that
+ // there are multiple size classes interacting by stealing from each other.
+
+ // Fill all caches to their maximum without starting to steal from each other.
+ for (int cl = 1; cl < TypeParam::Manager::kSizeClasses; ++cl) {
+ const size_t batch_size = TypeParam::Manager::num_objects_to_move(cl);
+ while (env.transfer_cache_manager().HasSpareCapacity(cl)) {
+ env.Insert(cl, batch_size);
+ }
+ }
+
+ // Count the number of batches currently in the cache.
+ auto count_batches = [&env]() {
+ int batch_count = 0;
+ for (int cl = 1; cl < TypeParam::Manager::kSizeClasses; ++cl) {
+ const size_t batch_size = TypeParam::Manager::num_objects_to_move(cl);
+ batch_count += env.transfer_cache_manager().tc_length(cl) / batch_size;
+ }
+ return batch_count;
+ };
+
+ absl::BitGen bitgen;
+ const int max_batches = count_batches();
+ int expected_batches = max_batches;
+ for (int i = 0; i < 100; ++i) {
+ {
+ // First remove.
+ const int cl =
+ absl::Uniform<int>(bitgen, 1, TypeParam::Manager::kSizeClasses);
+ const size_t batch_size = TypeParam::Manager::num_objects_to_move(cl);
+ if (env.transfer_cache_manager().tc_length(cl) >= batch_size) {
+ env.Remove(cl, batch_size);
+ --expected_batches;
+ }
+ const int current_batches = count_batches();
+ EXPECT_EQ(current_batches, expected_batches) << "iteration " << i;
+ }
+ {
+ // Then add in another size class.
+ const int cl =
+ absl::Uniform<int>(bitgen, 1, TypeParam::Manager::kSizeClasses);
+ // Evict from the "next" size class, skipping 0.
+ // This makes sure we are always evicting from somewhere if at all
+ // possible.
+ env.transfer_cache_manager().evicting_from_ =
+ 1 + cl % (TypeParam::Manager::kSizeClasses - 1);
+ if (expected_batches < max_batches) {
+ const size_t batch_size = TypeParam::Manager::num_objects_to_move(cl);
+ env.Insert(cl, batch_size);
+ ++expected_batches;
+ }
+ const int current_batches = count_batches();
+ EXPECT_EQ(current_batches, expected_batches) << "iteration " << i;
+ }
+ }
+}
+
+REGISTER_TYPED_TEST_SUITE_P(TwoSizeClassTest, NoLeaks);
+
+using TwoTransferCacheEnv =
+ TwoSizeClassEnv<internal_transfer_cache::TransferCache>;
+INSTANTIATE_TYPED_TEST_SUITE_P(TransferCache, TwoSizeClassTest,
+ ::testing::Types<TwoTransferCacheEnv>);
+
+using TwoRingBufferEnv =
+ TwoSizeClassEnv<internal_transfer_cache::RingBufferTransferCache>;
+INSTANTIATE_TYPED_TEST_SUITE_P(RingBuffer, TwoSizeClassTest,
+ ::testing::Types<TwoRingBufferEnv>);
+
+} // namespace leak_tests
+
} // namespace
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
diff --git a/contrib/libs/tcmalloc/tcmalloc/want_hpaa.cc b/contrib/libs/tcmalloc/tcmalloc/want_hpaa.cc
index b488ceb54f..6047944bea 100644
--- a/contrib/libs/tcmalloc/tcmalloc/want_hpaa.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/want_hpaa.cc
@@ -13,11 +13,11 @@
// limitations under the License.
#include "absl/base/attributes.h"
-#include "tcmalloc/internal/config.h"
+#include "tcmalloc/internal/config.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
// This -if linked into a binary - overrides page_allocator.cc and forces HPAA
// on/subrelease off.
@@ -25,6 +25,6 @@ ABSL_ATTRIBUTE_UNUSED int default_want_hpaa() { return 1; }
ABSL_ATTRIBUTE_UNUSED int default_subrelease() { return -1; }
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
diff --git a/contrib/libs/tcmalloc/tcmalloc/want_hpaa_subrelease.cc b/contrib/libs/tcmalloc/tcmalloc/want_hpaa_subrelease.cc
index 323cce40ed..acad45c51b 100644
--- a/contrib/libs/tcmalloc/tcmalloc/want_hpaa_subrelease.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/want_hpaa_subrelease.cc
@@ -13,11 +13,11 @@
// limitations under the License.
#include "absl/base/attributes.h"
-#include "tcmalloc/internal/config.h"
+#include "tcmalloc/internal/config.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
// This -if linked into a binary - overrides page_allocator.cc and forces HPAA
// on/subrelease on.
@@ -25,6 +25,6 @@ ABSL_ATTRIBUTE_UNUSED int default_want_hpaa() { return 1; }
ABSL_ATTRIBUTE_UNUSED int default_subrelease() { return 1; }
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
diff --git a/contrib/libs/tcmalloc/tcmalloc/want_legacy_spans.cc b/contrib/libs/tcmalloc/tcmalloc/want_legacy_spans.cc
index 28580e13ed..5a46481c05 100644
--- a/contrib/libs/tcmalloc/tcmalloc/want_legacy_spans.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/want_legacy_spans.cc
@@ -13,16 +13,16 @@
// limitations under the License.
#include "absl/base/attributes.h"
-#include "tcmalloc/internal/config.h"
+#include "tcmalloc/internal/config.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
// This -if linked into a binary - overrides common.cc and
// forces old span sizes.
ABSL_ATTRIBUTE_UNUSED int default_want_legacy_spans() { return 1; }
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
diff --git a/contrib/libs/tcmalloc/tcmalloc/want_no_hpaa.cc b/contrib/libs/tcmalloc/tcmalloc/want_no_hpaa.cc
index e23d93d9ce..700efa2064 100644
--- a/contrib/libs/tcmalloc/tcmalloc/want_no_hpaa.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/want_no_hpaa.cc
@@ -13,11 +13,11 @@
// limitations under the License.
#include "absl/base/attributes.h"
-#include "tcmalloc/internal/config.h"
+#include "tcmalloc/internal/config.h"
-GOOGLE_MALLOC_SECTION_BEGIN
+GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
-namespace tcmalloc_internal {
+namespace tcmalloc_internal {
// This -if linked into a binary - overrides page_allocator.cc and
// forces HPAA off/subrelease off.
@@ -25,6 +25,6 @@ ABSL_ATTRIBUTE_UNUSED int default_want_hpaa() { return -1; }
ABSL_ATTRIBUTE_UNUSED int default_subrelease() { return -1; }
-} // namespace tcmalloc_internal
+} // namespace tcmalloc_internal
} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+GOOGLE_MALLOC_SECTION_END
diff --git a/contrib/libs/tcmalloc/tcmalloc/want_numa_aware.cc b/contrib/libs/tcmalloc/tcmalloc/want_numa_aware.cc
index 3f0519dd50..1fc1cf0a17 100644
--- a/contrib/libs/tcmalloc/tcmalloc/want_numa_aware.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/want_numa_aware.cc
@@ -1,28 +1,28 @@
-// Copyright 2021 The TCMalloc Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "absl/base/attributes.h"
-#include "tcmalloc/internal/config.h"
-
-GOOGLE_MALLOC_SECTION_BEGIN
-namespace tcmalloc {
-namespace tcmalloc_internal {
-
-// When linked into a binary this overrides the weak implementation in numa.cc
-// and causes TCMalloc to enable NUMA awareness by default.
-ABSL_ATTRIBUTE_UNUSED bool default_want_numa_aware() { return true; }
-
-} // namespace tcmalloc_internal
-} // namespace tcmalloc
-GOOGLE_MALLOC_SECTION_END
+// Copyright 2021 The TCMalloc Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/attributes.h"
+#include "tcmalloc/internal/config.h"
+
+GOOGLE_MALLOC_SECTION_BEGIN
+namespace tcmalloc {
+namespace tcmalloc_internal {
+
+// When linked into a binary this overrides the weak implementation in numa.cc
+// and causes TCMalloc to enable NUMA awareness by default.
+ABSL_ATTRIBUTE_UNUSED bool default_want_numa_aware() { return true; }
+
+} // namespace tcmalloc_internal
+} // namespace tcmalloc
+GOOGLE_MALLOC_SECTION_END
diff --git a/contrib/libs/tcmalloc/ya.make b/contrib/libs/tcmalloc/ya.make
index 54701b1b77..dfd1706aad 100644
--- a/contrib/libs/tcmalloc/ya.make
+++ b/contrib/libs/tcmalloc/ya.make
@@ -6,33 +6,33 @@ LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
OWNER(
ayles
- prime
+ prime
g:cpp-contrib
)
# https://github.com/google/tcmalloc
-VERSION(2021-10-04-45c59ccbc062ac96d83710205033c656e490d376)
+VERSION(2021-10-04-45c59ccbc062ac96d83710205033c656e490d376)
SRCS(
- # Options
- tcmalloc/want_hpaa.cc
+ # Options
+ tcmalloc/want_hpaa.cc
)
-INCLUDE(common.inc)
+INCLUDE(common.inc)
CFLAGS(
-DTCMALLOC_256K_PAGES
)
END()
-
+
IF (NOT DLL_FOR)
- RECURSE(
- default
- dynamic
- malloc_extension
- numa_256k
- numa_large_pages
- slow_but_small
- )
-ENDIF()
+ RECURSE(
+ default
+ dynamic
+ malloc_extension
+ numa_256k
+ numa_large_pages
+ slow_but_small
+ )
+ENDIF()
diff --git a/contrib/libs/ya.make b/contrib/libs/ya.make
index 9c4640fdcf..956e372391 100644
--- a/contrib/libs/ya.make
+++ b/contrib/libs/ya.make
@@ -189,7 +189,7 @@ RECURSE(
libtiff
libunistring
libunwind
- liburing
+ liburing
libuv
libvorbis
libvorbis/libvorbisenc
diff --git a/contrib/restricted/abseil-cpp/.yandex_meta/devtools.copyrights.report b/contrib/restricted/abseil-cpp/.yandex_meta/devtools.copyrights.report
index 52e2892efe..13685fed91 100644
--- a/contrib/restricted/abseil-cpp/.yandex_meta/devtools.copyrights.report
+++ b/contrib/restricted/abseil-cpp/.yandex_meta/devtools.copyrights.report
@@ -1,321 +1,321 @@
-# File format ($ symbol means the beginning of a line):
-#
-# $ # this message
-# $ # =======================
-# $ # comments (all commentaries should starts with some number of spaces and # symbol)
-# ${action} {license id} {license text hash}
-# $BELONGS ./ya/make/file/relative/path/1/ya.make ./ya/make/2/ya.make
-# ${all_file_action} filename
-# $ # user commentaries (many lines)
-# $ generated description - files with this license, license text... (some number of lines that starts with some number of spaces, do not modify)
-# ${action} {license spdx} {license text hash}
-# $BELONGS ./ya/make/file/relative/path/3/ya.make
-# ${all_file_action} filename
-# $ # user commentaries
-# $ generated description
-# $ ...
-#
-# You can modify action, all_file_action and add commentaries
-# Available actions:
-# keep - keep license in contrib and use in credits
-# skip - skip license
-# remove - remove all files with this license
-# rename - save license text/links into licenses texts file, but not store SPDX into LINCENSE macro. You should store correct license id into devtools.license.spdx.txt file
-#
-# {all file action} records will be generated when license text contains filename that exists on filesystem (in contrib directory)
-# We suppose that that files can contain some license info
-# Available all file actions:
-# FILE_IGNORE - ignore file (do nothing)
-# FILE_INCLUDE - include all file data into licenses text file
-# =======================
-
-KEEP COPYRIGHT_SERVICE_LABEL 02e3ff10f74acdb217118846c5465fc1
+# File format ($ symbol means the beginning of a line):
+#
+# $ # this message
+# $ # =======================
+# $ # comments (all commentaries should starts with some number of spaces and # symbol)
+# ${action} {license id} {license text hash}
+# $BELONGS ./ya/make/file/relative/path/1/ya.make ./ya/make/2/ya.make
+# ${all_file_action} filename
+# $ # user commentaries (many lines)
+# $ generated description - files with this license, license text... (some number of lines that starts with some number of spaces, do not modify)
+# ${action} {license spdx} {license text hash}
+# $BELONGS ./ya/make/file/relative/path/3/ya.make
+# ${all_file_action} filename
+# $ # user commentaries
+# $ generated description
+# $ ...
+#
+# You can modify action, all_file_action and add commentaries
+# Available actions:
+# keep - keep license in contrib and use in credits
+# skip - skip license
+# remove - remove all files with this license
+# rename - save license text/links into licenses texts file, but not store SPDX into LINCENSE macro. You should store correct license id into devtools.license.spdx.txt file
+#
+# {all file action} records will be generated when license text contains filename that exists on filesystem (in contrib directory)
+# We suppose that that files can contain some license info
+# Available all file actions:
+# FILE_IGNORE - ignore file (do nothing)
+# FILE_INCLUDE - include all file data into licenses text file
+# =======================
+
+KEEP COPYRIGHT_SERVICE_LABEL 02e3ff10f74acdb217118846c5465fc1
BELONGS absl/algorithm/ya.make absl/base/ya.make absl/container/ya.make absl/debugging/internal/ya.make absl/debugging/ya.make absl/memory/ya.make absl/meta/ya.make absl/numeric/ya.make absl/strings/internal/str_format/ya.make absl/strings/ya.make absl/synchronization/internal/ya.make absl/synchronization/ya.make absl/time/ya.make absl/types/internal/ya.make absl/types/ya.make absl/utility/ya.make ya.make
- License text:
- // Copyright 2017 The Abseil Authors.
- Scancode info:
- Original SPDX id: COPYRIGHT_SERVICE_LABEL
- Score : 100.00
- Match type : COPYRIGHT
- Files with this license:
- absl/algorithm/algorithm.h [1:1]
- absl/algorithm/container.h [1:1]
- absl/base/attributes.h [1:1]
- absl/base/call_once.h [1:1]
- absl/base/casts.h [2:2]
- absl/base/config.h [2:2]
- absl/base/const_init.h [1:1]
- absl/base/dynamic_annotations.h [1:1]
- absl/base/internal/atomic_hook.h [1:1]
- absl/base/internal/atomic_hook_test_helper.h [1:1]
- absl/base/internal/cycleclock.cc [1:1]
- absl/base/internal/cycleclock.h [2:2]
- absl/base/internal/direct_mmap.h [1:1]
- absl/base/internal/dynamic_annotations.h [1:1]
- absl/base/internal/endian.h [1:1]
- absl/base/internal/errno_saver.h [1:1]
- absl/base/internal/exception_safety_testing.h [1:1]
- absl/base/internal/exception_testing.h [1:1]
- absl/base/internal/identity.h [1:1]
- absl/base/internal/inline_variable.h [1:1]
- absl/base/internal/inline_variable_testing.h [1:1]
- absl/base/internal/invoke.h [1:1]
- absl/base/internal/low_level_alloc.cc [1:1]
- absl/base/internal/low_level_alloc.h [1:1]
- absl/base/internal/low_level_scheduling.h [1:1]
- absl/base/internal/per_thread_tls.h [1:1]
- absl/base/internal/pretty_function.h [1:1]
- absl/base/internal/raw_logging.cc [1:1]
- absl/base/internal/raw_logging.h [1:1]
- absl/base/internal/scheduling_mode.h [1:1]
- absl/base/internal/spinlock.cc [1:1]
- absl/base/internal/spinlock.h [2:2]
- absl/base/internal/spinlock_akaros.inc [1:1]
- absl/base/internal/spinlock_posix.inc [1:1]
- absl/base/internal/spinlock_wait.cc [1:1]
- absl/base/internal/spinlock_wait.h [1:1]
- absl/base/internal/spinlock_win32.inc [1:1]
- absl/base/internal/sysinfo.cc [1:1]
- absl/base/internal/sysinfo.h [1:1]
- absl/base/internal/thread_identity.cc [1:1]
- absl/base/internal/thread_identity.h [1:1]
- absl/base/internal/throw_delegate.cc [1:1]
- absl/base/internal/throw_delegate.h [2:2]
- absl/base/internal/tsan_mutex_interface.h [1:1]
- absl/base/internal/unaligned_access.h [2:2]
- absl/base/internal/unscaledcycleclock.cc [1:1]
- absl/base/internal/unscaledcycleclock.h [1:1]
- absl/base/log_severity.cc [1:1]
- absl/base/log_severity.h [1:1]
- absl/base/macros.h [2:2]
- absl/base/optimization.h [2:2]
- absl/base/policy_checks.h [1:1]
- absl/base/port.h [1:1]
- absl/base/thread_annotations.h [1:1]
- absl/container/internal/test_instance_tracker.h [1:1]
- absl/debugging/internal/address_is_readable.cc [1:1]
- absl/debugging/internal/address_is_readable.h [1:1]
- absl/debugging/internal/elf_mem_image.cc [1:1]
- absl/debugging/internal/elf_mem_image.h [2:2]
- absl/debugging/internal/stacktrace_arm-inl.inc [1:1]
- absl/debugging/internal/stacktrace_config.h [2:2]
+ License text:
+ // Copyright 2017 The Abseil Authors.
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ absl/algorithm/algorithm.h [1:1]
+ absl/algorithm/container.h [1:1]
+ absl/base/attributes.h [1:1]
+ absl/base/call_once.h [1:1]
+ absl/base/casts.h [2:2]
+ absl/base/config.h [2:2]
+ absl/base/const_init.h [1:1]
+ absl/base/dynamic_annotations.h [1:1]
+ absl/base/internal/atomic_hook.h [1:1]
+ absl/base/internal/atomic_hook_test_helper.h [1:1]
+ absl/base/internal/cycleclock.cc [1:1]
+ absl/base/internal/cycleclock.h [2:2]
+ absl/base/internal/direct_mmap.h [1:1]
+ absl/base/internal/dynamic_annotations.h [1:1]
+ absl/base/internal/endian.h [1:1]
+ absl/base/internal/errno_saver.h [1:1]
+ absl/base/internal/exception_safety_testing.h [1:1]
+ absl/base/internal/exception_testing.h [1:1]
+ absl/base/internal/identity.h [1:1]
+ absl/base/internal/inline_variable.h [1:1]
+ absl/base/internal/inline_variable_testing.h [1:1]
+ absl/base/internal/invoke.h [1:1]
+ absl/base/internal/low_level_alloc.cc [1:1]
+ absl/base/internal/low_level_alloc.h [1:1]
+ absl/base/internal/low_level_scheduling.h [1:1]
+ absl/base/internal/per_thread_tls.h [1:1]
+ absl/base/internal/pretty_function.h [1:1]
+ absl/base/internal/raw_logging.cc [1:1]
+ absl/base/internal/raw_logging.h [1:1]
+ absl/base/internal/scheduling_mode.h [1:1]
+ absl/base/internal/spinlock.cc [1:1]
+ absl/base/internal/spinlock.h [2:2]
+ absl/base/internal/spinlock_akaros.inc [1:1]
+ absl/base/internal/spinlock_posix.inc [1:1]
+ absl/base/internal/spinlock_wait.cc [1:1]
+ absl/base/internal/spinlock_wait.h [1:1]
+ absl/base/internal/spinlock_win32.inc [1:1]
+ absl/base/internal/sysinfo.cc [1:1]
+ absl/base/internal/sysinfo.h [1:1]
+ absl/base/internal/thread_identity.cc [1:1]
+ absl/base/internal/thread_identity.h [1:1]
+ absl/base/internal/throw_delegate.cc [1:1]
+ absl/base/internal/throw_delegate.h [2:2]
+ absl/base/internal/tsan_mutex_interface.h [1:1]
+ absl/base/internal/unaligned_access.h [2:2]
+ absl/base/internal/unscaledcycleclock.cc [1:1]
+ absl/base/internal/unscaledcycleclock.h [1:1]
+ absl/base/log_severity.cc [1:1]
+ absl/base/log_severity.h [1:1]
+ absl/base/macros.h [2:2]
+ absl/base/optimization.h [2:2]
+ absl/base/policy_checks.h [1:1]
+ absl/base/port.h [1:1]
+ absl/base/thread_annotations.h [1:1]
+ absl/container/internal/test_instance_tracker.h [1:1]
+ absl/debugging/internal/address_is_readable.cc [1:1]
+ absl/debugging/internal/address_is_readable.h [1:1]
+ absl/debugging/internal/elf_mem_image.cc [1:1]
+ absl/debugging/internal/elf_mem_image.h [2:2]
+ absl/debugging/internal/stacktrace_arm-inl.inc [1:1]
+ absl/debugging/internal/stacktrace_config.h [2:2]
absl/debugging/internal/stacktrace_emscripten-inl.inc [1:1]
- absl/debugging/internal/stacktrace_generic-inl.inc [1:1]
- absl/debugging/internal/stacktrace_powerpc-inl.inc [1:1]
- absl/debugging/internal/stacktrace_win32-inl.inc [1:1]
- absl/debugging/internal/stacktrace_x86-inl.inc [1:1]
- absl/debugging/internal/vdso_support.cc [1:1]
- absl/debugging/internal/vdso_support.h [2:2]
- absl/debugging/leak_check.cc [1:1]
- absl/debugging/leak_check_disable.cc [1:1]
- absl/debugging/stacktrace.cc [1:1]
- absl/memory/memory.h [1:1]
- absl/meta/type_traits.h [2:2]
- absl/numeric/int128.cc [1:1]
- absl/numeric/int128.h [2:2]
- absl/numeric/int128_have_intrinsic.inc [2:2]
- absl/numeric/int128_no_intrinsic.inc [2:2]
- absl/random/bernoulli_distribution.h [1:1]
- absl/random/beta_distribution.h [1:1]
- absl/random/discrete_distribution.cc [1:1]
- absl/random/discrete_distribution.h [1:1]
- absl/random/distributions.h [1:1]
- absl/random/exponential_distribution.h [1:1]
- absl/random/gaussian_distribution.h [1:1]
- absl/random/internal/chi_square.cc [1:1]
- absl/random/internal/chi_square.h [1:1]
- absl/random/internal/distribution_test_util.cc [1:1]
- absl/random/internal/distribution_test_util.h [1:1]
- absl/random/internal/explicit_seed_seq.h [1:1]
- absl/random/internal/fast_uniform_bits.h [1:1]
- absl/random/internal/fastmath.h [1:1]
- absl/random/internal/generate_real.h [1:1]
- absl/random/internal/iostream_state_saver.h [1:1]
- absl/random/internal/nonsecure_base.h [1:1]
- absl/random/internal/platform.h [1:1]
- absl/random/internal/pool_urbg.cc [1:1]
- absl/random/internal/pool_urbg.h [1:1]
- absl/random/internal/randen.cc [1:1]
- absl/random/internal/randen.h [1:1]
- absl/random/internal/randen_detect.cc [1:1]
- absl/random/internal/randen_detect.h [1:1]
- absl/random/internal/randen_engine.h [1:1]
- absl/random/internal/randen_hwaes.cc [1:1]
- absl/random/internal/randen_hwaes.h [1:1]
- absl/random/internal/randen_round_keys.cc [1:1]
- absl/random/internal/randen_slow.cc [1:1]
- absl/random/internal/randen_slow.h [1:1]
- absl/random/internal/randen_traits.h [1:1]
- absl/random/internal/salted_seed_seq.h [1:1]
- absl/random/internal/seed_material.cc [1:1]
- absl/random/internal/seed_material.h [1:1]
- absl/random/internal/sequence_urbg.h [1:1]
- absl/random/internal/traits.h [1:1]
- absl/random/internal/wide_multiply.h [1:1]
- absl/random/log_uniform_int_distribution.h [1:1]
- absl/random/poisson_distribution.h [1:1]
- absl/random/random.h [1:1]
- absl/random/seed_gen_exception.cc [1:1]
- absl/random/seed_gen_exception.h [1:1]
- absl/random/seed_sequences.cc [1:1]
- absl/random/seed_sequences.h [1:1]
- absl/random/uniform_int_distribution.h [1:1]
- absl/random/uniform_real_distribution.h [1:1]
- absl/random/zipf_distribution.h [1:1]
- absl/strings/ascii.cc [1:1]
- absl/strings/ascii.h [2:2]
- absl/strings/escaping.cc [1:1]
- absl/strings/escaping.h [2:2]
- absl/strings/internal/char_map.h [1:1]
- absl/strings/internal/escaping_test_common.h [1:1]
- absl/strings/internal/memutil.cc [1:1]
- absl/strings/internal/memutil.h [2:2]
- absl/strings/internal/numbers_test_common.h [1:1]
- absl/strings/internal/ostringstream.cc [1:1]
- absl/strings/internal/ostringstream.h [1:1]
- absl/strings/internal/resize_uninitialized.h [2:2]
- absl/strings/internal/stl_type_traits.h [1:1]
- absl/strings/internal/str_format/extension.cc [2:2]
- absl/strings/internal/str_format/extension.h [2:2]
- absl/strings/internal/str_format/output.cc [1:1]
- absl/strings/internal/str_format/output.h [1:1]
- absl/strings/internal/str_join_internal.h [2:2]
- absl/strings/internal/str_split_internal.h [1:1]
- absl/strings/internal/utf8.cc [1:1]
- absl/strings/internal/utf8.h [1:1]
- absl/strings/match.cc [1:1]
- absl/strings/match.h [2:2]
- absl/strings/numbers.cc [1:1]
- absl/strings/numbers.h [1:1]
- absl/strings/str_cat.cc [1:1]
- absl/strings/str_cat.h [2:2]
- absl/strings/str_join.h [2:2]
- absl/strings/str_replace.cc [1:1]
- absl/strings/str_replace.h [2:2]
- absl/strings/str_split.cc [1:1]
- absl/strings/str_split.h [2:2]
- absl/strings/string_view.cc [1:1]
- absl/strings/string_view.h [2:2]
- absl/strings/strip.h [2:2]
- absl/strings/substitute.cc [1:1]
- absl/strings/substitute.h [2:2]
- absl/synchronization/barrier.cc [1:1]
- absl/synchronization/barrier.h [1:1]
- absl/synchronization/blocking_counter.cc [1:1]
- absl/synchronization/blocking_counter.h [2:2]
- absl/synchronization/internal/create_thread_identity.cc [1:1]
- absl/synchronization/internal/create_thread_identity.h [2:2]
- absl/synchronization/internal/graphcycles.cc [1:1]
- absl/synchronization/internal/graphcycles.h [1:1]
- absl/synchronization/internal/kernel_timeout.h [1:1]
- absl/synchronization/internal/per_thread_sem.cc [1:1]
- absl/synchronization/internal/per_thread_sem.h [1:1]
- absl/synchronization/internal/thread_pool.h [1:1]
- absl/synchronization/internal/waiter.cc [1:1]
- absl/synchronization/internal/waiter.h [1:1]
- absl/synchronization/mutex.cc [1:1]
- absl/synchronization/mutex.h [1:1]
- absl/synchronization/notification.cc [1:1]
- absl/synchronization/notification.h [1:1]
- absl/time/clock.cc [1:1]
- absl/time/clock.h [1:1]
- absl/time/duration.cc [1:1]
- absl/time/format.cc [1:1]
- absl/time/internal/test_util.h [1:1]
- absl/time/time.cc [1:1]
- absl/time/time.h [1:1]
- absl/types/any.h [2:2]
- absl/types/bad_any_cast.cc [1:1]
- absl/types/bad_optional_access.cc [1:1]
- absl/types/bad_variant_access.cc [1:1]
- absl/types/internal/optional.h [1:1]
- absl/types/optional.h [1:1]
- absl/types/span.h [2:2]
- absl/utility/utility.h [1:1]
-
-KEEP COPYRIGHT_SERVICE_LABEL 05bdd09fb9fdb384a61f2eb54df462d6
-BELONGS absl/time/ya.make
- License text:
- // Copyright 2016 Google Inc. All Rights Reserved.
- Scancode info:
- Original SPDX id: COPYRIGHT_SERVICE_LABEL
- Score : 100.00
- Match type : COPYRIGHT
- Files with this license:
- absl/time/internal/cctz/include/cctz/civil_time.h [1:1]
- absl/time/internal/cctz/include/cctz/civil_time_detail.h [1:1]
- absl/time/internal/cctz/include/cctz/time_zone.h [1:1]
- absl/time/internal/cctz/include/cctz/zone_info_source.h [1:1]
- absl/time/internal/cctz/src/civil_time_detail.cc [1:1]
- absl/time/internal/cctz/src/time_zone_fixed.cc [1:1]
- absl/time/internal/cctz/src/time_zone_fixed.h [1:1]
- absl/time/internal/cctz/src/time_zone_format.cc [1:1]
- absl/time/internal/cctz/src/time_zone_if.cc [1:1]
- absl/time/internal/cctz/src/time_zone_if.h [1:1]
- absl/time/internal/cctz/src/time_zone_impl.cc [1:1]
- absl/time/internal/cctz/src/time_zone_impl.h [1:1]
- absl/time/internal/cctz/src/time_zone_info.cc [1:1]
- absl/time/internal/cctz/src/time_zone_info.h [1:1]
- absl/time/internal/cctz/src/time_zone_libc.cc [1:1]
- absl/time/internal/cctz/src/time_zone_libc.h [1:1]
- absl/time/internal/cctz/src/time_zone_lookup.cc [1:1]
- absl/time/internal/cctz/src/time_zone_posix.cc [1:1]
- absl/time/internal/cctz/src/time_zone_posix.h [1:1]
- absl/time/internal/cctz/src/zone_info_source.cc [1:1]
-
-KEEP COPYRIGHT_SERVICE_LABEL 2277624a2da390a98ec17138cb6dc2a5
+ absl/debugging/internal/stacktrace_generic-inl.inc [1:1]
+ absl/debugging/internal/stacktrace_powerpc-inl.inc [1:1]
+ absl/debugging/internal/stacktrace_win32-inl.inc [1:1]
+ absl/debugging/internal/stacktrace_x86-inl.inc [1:1]
+ absl/debugging/internal/vdso_support.cc [1:1]
+ absl/debugging/internal/vdso_support.h [2:2]
+ absl/debugging/leak_check.cc [1:1]
+ absl/debugging/leak_check_disable.cc [1:1]
+ absl/debugging/stacktrace.cc [1:1]
+ absl/memory/memory.h [1:1]
+ absl/meta/type_traits.h [2:2]
+ absl/numeric/int128.cc [1:1]
+ absl/numeric/int128.h [2:2]
+ absl/numeric/int128_have_intrinsic.inc [2:2]
+ absl/numeric/int128_no_intrinsic.inc [2:2]
+ absl/random/bernoulli_distribution.h [1:1]
+ absl/random/beta_distribution.h [1:1]
+ absl/random/discrete_distribution.cc [1:1]
+ absl/random/discrete_distribution.h [1:1]
+ absl/random/distributions.h [1:1]
+ absl/random/exponential_distribution.h [1:1]
+ absl/random/gaussian_distribution.h [1:1]
+ absl/random/internal/chi_square.cc [1:1]
+ absl/random/internal/chi_square.h [1:1]
+ absl/random/internal/distribution_test_util.cc [1:1]
+ absl/random/internal/distribution_test_util.h [1:1]
+ absl/random/internal/explicit_seed_seq.h [1:1]
+ absl/random/internal/fast_uniform_bits.h [1:1]
+ absl/random/internal/fastmath.h [1:1]
+ absl/random/internal/generate_real.h [1:1]
+ absl/random/internal/iostream_state_saver.h [1:1]
+ absl/random/internal/nonsecure_base.h [1:1]
+ absl/random/internal/platform.h [1:1]
+ absl/random/internal/pool_urbg.cc [1:1]
+ absl/random/internal/pool_urbg.h [1:1]
+ absl/random/internal/randen.cc [1:1]
+ absl/random/internal/randen.h [1:1]
+ absl/random/internal/randen_detect.cc [1:1]
+ absl/random/internal/randen_detect.h [1:1]
+ absl/random/internal/randen_engine.h [1:1]
+ absl/random/internal/randen_hwaes.cc [1:1]
+ absl/random/internal/randen_hwaes.h [1:1]
+ absl/random/internal/randen_round_keys.cc [1:1]
+ absl/random/internal/randen_slow.cc [1:1]
+ absl/random/internal/randen_slow.h [1:1]
+ absl/random/internal/randen_traits.h [1:1]
+ absl/random/internal/salted_seed_seq.h [1:1]
+ absl/random/internal/seed_material.cc [1:1]
+ absl/random/internal/seed_material.h [1:1]
+ absl/random/internal/sequence_urbg.h [1:1]
+ absl/random/internal/traits.h [1:1]
+ absl/random/internal/wide_multiply.h [1:1]
+ absl/random/log_uniform_int_distribution.h [1:1]
+ absl/random/poisson_distribution.h [1:1]
+ absl/random/random.h [1:1]
+ absl/random/seed_gen_exception.cc [1:1]
+ absl/random/seed_gen_exception.h [1:1]
+ absl/random/seed_sequences.cc [1:1]
+ absl/random/seed_sequences.h [1:1]
+ absl/random/uniform_int_distribution.h [1:1]
+ absl/random/uniform_real_distribution.h [1:1]
+ absl/random/zipf_distribution.h [1:1]
+ absl/strings/ascii.cc [1:1]
+ absl/strings/ascii.h [2:2]
+ absl/strings/escaping.cc [1:1]
+ absl/strings/escaping.h [2:2]
+ absl/strings/internal/char_map.h [1:1]
+ absl/strings/internal/escaping_test_common.h [1:1]
+ absl/strings/internal/memutil.cc [1:1]
+ absl/strings/internal/memutil.h [2:2]
+ absl/strings/internal/numbers_test_common.h [1:1]
+ absl/strings/internal/ostringstream.cc [1:1]
+ absl/strings/internal/ostringstream.h [1:1]
+ absl/strings/internal/resize_uninitialized.h [2:2]
+ absl/strings/internal/stl_type_traits.h [1:1]
+ absl/strings/internal/str_format/extension.cc [2:2]
+ absl/strings/internal/str_format/extension.h [2:2]
+ absl/strings/internal/str_format/output.cc [1:1]
+ absl/strings/internal/str_format/output.h [1:1]
+ absl/strings/internal/str_join_internal.h [2:2]
+ absl/strings/internal/str_split_internal.h [1:1]
+ absl/strings/internal/utf8.cc [1:1]
+ absl/strings/internal/utf8.h [1:1]
+ absl/strings/match.cc [1:1]
+ absl/strings/match.h [2:2]
+ absl/strings/numbers.cc [1:1]
+ absl/strings/numbers.h [1:1]
+ absl/strings/str_cat.cc [1:1]
+ absl/strings/str_cat.h [2:2]
+ absl/strings/str_join.h [2:2]
+ absl/strings/str_replace.cc [1:1]
+ absl/strings/str_replace.h [2:2]
+ absl/strings/str_split.cc [1:1]
+ absl/strings/str_split.h [2:2]
+ absl/strings/string_view.cc [1:1]
+ absl/strings/string_view.h [2:2]
+ absl/strings/strip.h [2:2]
+ absl/strings/substitute.cc [1:1]
+ absl/strings/substitute.h [2:2]
+ absl/synchronization/barrier.cc [1:1]
+ absl/synchronization/barrier.h [1:1]
+ absl/synchronization/blocking_counter.cc [1:1]
+ absl/synchronization/blocking_counter.h [2:2]
+ absl/synchronization/internal/create_thread_identity.cc [1:1]
+ absl/synchronization/internal/create_thread_identity.h [2:2]
+ absl/synchronization/internal/graphcycles.cc [1:1]
+ absl/synchronization/internal/graphcycles.h [1:1]
+ absl/synchronization/internal/kernel_timeout.h [1:1]
+ absl/synchronization/internal/per_thread_sem.cc [1:1]
+ absl/synchronization/internal/per_thread_sem.h [1:1]
+ absl/synchronization/internal/thread_pool.h [1:1]
+ absl/synchronization/internal/waiter.cc [1:1]
+ absl/synchronization/internal/waiter.h [1:1]
+ absl/synchronization/mutex.cc [1:1]
+ absl/synchronization/mutex.h [1:1]
+ absl/synchronization/notification.cc [1:1]
+ absl/synchronization/notification.h [1:1]
+ absl/time/clock.cc [1:1]
+ absl/time/clock.h [1:1]
+ absl/time/duration.cc [1:1]
+ absl/time/format.cc [1:1]
+ absl/time/internal/test_util.h [1:1]
+ absl/time/time.cc [1:1]
+ absl/time/time.h [1:1]
+ absl/types/any.h [2:2]
+ absl/types/bad_any_cast.cc [1:1]
+ absl/types/bad_optional_access.cc [1:1]
+ absl/types/bad_variant_access.cc [1:1]
+ absl/types/internal/optional.h [1:1]
+ absl/types/optional.h [1:1]
+ absl/types/span.h [2:2]
+ absl/utility/utility.h [1:1]
+
+KEEP COPYRIGHT_SERVICE_LABEL 05bdd09fb9fdb384a61f2eb54df462d6
+BELONGS absl/time/ya.make
+ License text:
+ // Copyright 2016 Google Inc. All Rights Reserved.
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ absl/time/internal/cctz/include/cctz/civil_time.h [1:1]
+ absl/time/internal/cctz/include/cctz/civil_time_detail.h [1:1]
+ absl/time/internal/cctz/include/cctz/time_zone.h [1:1]
+ absl/time/internal/cctz/include/cctz/zone_info_source.h [1:1]
+ absl/time/internal/cctz/src/civil_time_detail.cc [1:1]
+ absl/time/internal/cctz/src/time_zone_fixed.cc [1:1]
+ absl/time/internal/cctz/src/time_zone_fixed.h [1:1]
+ absl/time/internal/cctz/src/time_zone_format.cc [1:1]
+ absl/time/internal/cctz/src/time_zone_if.cc [1:1]
+ absl/time/internal/cctz/src/time_zone_if.h [1:1]
+ absl/time/internal/cctz/src/time_zone_impl.cc [1:1]
+ absl/time/internal/cctz/src/time_zone_impl.h [1:1]
+ absl/time/internal/cctz/src/time_zone_info.cc [1:1]
+ absl/time/internal/cctz/src/time_zone_info.h [1:1]
+ absl/time/internal/cctz/src/time_zone_libc.cc [1:1]
+ absl/time/internal/cctz/src/time_zone_libc.h [1:1]
+ absl/time/internal/cctz/src/time_zone_lookup.cc [1:1]
+ absl/time/internal/cctz/src/time_zone_posix.cc [1:1]
+ absl/time/internal/cctz/src/time_zone_posix.h [1:1]
+ absl/time/internal/cctz/src/zone_info_source.cc [1:1]
+
+KEEP COPYRIGHT_SERVICE_LABEL 2277624a2da390a98ec17138cb6dc2a5
BELONGS absl/base/ya.make absl/container/ya.make absl/flags/ya.make absl/functional/ya.make absl/status/ya.make absl/strings/ya.make absl/types/internal/ya.make ya.make
- License text:
- // Copyright 2019 The Abseil Authors.
- Scancode info:
- Original SPDX id: COPYRIGHT_SERVICE_LABEL
- Score : 100.00
- Match type : COPYRIGHT
- Files with this license:
- absl/base/internal/scoped_set_env.cc [1:1]
- absl/base/internal/scoped_set_env.h [2:2]
- absl/base/internal/thread_annotations.h [1:1]
- absl/base/options.h [1:1]
- absl/container/inlined_vector.h [1:1]
- absl/container/internal/inlined_vector.h [1:1]
- absl/container/internal/unordered_map_members_test.h [1:1]
- absl/container/internal/unordered_set_members_test.h [1:1]
- absl/flags/config.h [2:2]
- absl/flags/declare.h [2:2]
- absl/flags/flag.cc [2:2]
- absl/flags/flag.h [2:2]
- absl/flags/internal/commandlineflag.h [2:2]
- absl/flags/internal/flag.cc [2:2]
- absl/flags/internal/flag.h [2:2]
- absl/flags/internal/parse.h [2:2]
- absl/flags/internal/path_util.h [2:2]
- absl/flags/internal/program_name.cc [2:2]
- absl/flags/internal/program_name.h [2:2]
- absl/flags/internal/registry.h [2:2]
- absl/flags/internal/usage.cc [2:2]
- absl/flags/internal/usage.h [2:2]
- absl/flags/marshalling.cc [2:2]
- absl/flags/marshalling.h [2:2]
- absl/flags/parse.cc [2:2]
- absl/flags/parse.h [2:2]
- absl/flags/usage.cc [2:2]
- absl/flags/usage.h [2:2]
- absl/flags/usage_config.cc [2:2]
- absl/flags/usage_config.h [2:2]
- absl/functional/function_ref.h [1:1]
- absl/functional/internal/function_ref.h [1:1]
+ License text:
+ // Copyright 2019 The Abseil Authors.
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ absl/base/internal/scoped_set_env.cc [1:1]
+ absl/base/internal/scoped_set_env.h [2:2]
+ absl/base/internal/thread_annotations.h [1:1]
+ absl/base/options.h [1:1]
+ absl/container/inlined_vector.h [1:1]
+ absl/container/internal/inlined_vector.h [1:1]
+ absl/container/internal/unordered_map_members_test.h [1:1]
+ absl/container/internal/unordered_set_members_test.h [1:1]
+ absl/flags/config.h [2:2]
+ absl/flags/declare.h [2:2]
+ absl/flags/flag.cc [2:2]
+ absl/flags/flag.h [2:2]
+ absl/flags/internal/commandlineflag.h [2:2]
+ absl/flags/internal/flag.cc [2:2]
+ absl/flags/internal/flag.h [2:2]
+ absl/flags/internal/parse.h [2:2]
+ absl/flags/internal/path_util.h [2:2]
+ absl/flags/internal/program_name.cc [2:2]
+ absl/flags/internal/program_name.h [2:2]
+ absl/flags/internal/registry.h [2:2]
+ absl/flags/internal/usage.cc [2:2]
+ absl/flags/internal/usage.h [2:2]
+ absl/flags/marshalling.cc [2:2]
+ absl/flags/marshalling.h [2:2]
+ absl/flags/parse.cc [2:2]
+ absl/flags/parse.h [2:2]
+ absl/flags/usage.cc [2:2]
+ absl/flags/usage.h [2:2]
+ absl/flags/usage_config.cc [2:2]
+ absl/flags/usage_config.h [2:2]
+ absl/functional/function_ref.h [1:1]
+ absl/functional/internal/function_ref.h [1:1]
absl/profiling/internal/exponential_biased.cc [1:1]
absl/profiling/internal/exponential_biased.h [1:1]
absl/profiling/internal/periodic_sampler.cc [1:1]
absl/profiling/internal/periodic_sampler.h [1:1]
- absl/random/internal/mock_helpers.h [2:2]
- absl/random/internal/mock_overload_set.h [2:2]
- absl/random/internal/uniform_helper.h [1:1]
- absl/status/internal/status_internal.h [1:1]
- absl/status/status.cc [1:1]
- absl/status/status.h [1:1]
- absl/status/status_payload_printer.cc [1:1]
- absl/status/status_payload_printer.h [1:1]
+ absl/random/internal/mock_helpers.h [2:2]
+ absl/random/internal/mock_overload_set.h [2:2]
+ absl/random/internal/uniform_helper.h [1:1]
+ absl/status/internal/status_internal.h [1:1]
+ absl/status/status.cc [1:1]
+ absl/status/status.h [1:1]
+ absl/status/status_payload_printer.cc [1:1]
+ absl/status/status_payload_printer.h [1:1]
absl/strings/internal/cordz_functions.cc [1:1]
absl/strings/internal/cordz_functions.h [1:1]
absl/strings/internal/cordz_handle.cc [1:1]
@@ -325,127 +325,127 @@ BELONGS absl/base/ya.make absl/container/ya.make absl/flags/ya.make absl/functio
absl/strings/internal/cordz_sample_token.cc [1:1]
absl/strings/internal/cordz_sample_token.h [1:1]
absl/strings/internal/cordz_statistics.h [1:1]
- absl/types/internal/conformance_archetype.h [1:1]
- absl/types/internal/conformance_profile.h [1:1]
- absl/types/internal/conformance_testing.h [1:1]
- absl/types/internal/conformance_testing_helpers.h [1:1]
- absl/types/internal/parentheses.h [1:1]
- absl/types/internal/span.h [2:2]
- absl/types/internal/transform_args.h [1:1]
-
-KEEP COPYRIGHT_SERVICE_LABEL 3fb410b721d46624abdaeb2473ffa5d6
+ absl/types/internal/conformance_archetype.h [1:1]
+ absl/types/internal/conformance_profile.h [1:1]
+ absl/types/internal/conformance_testing.h [1:1]
+ absl/types/internal/conformance_testing_helpers.h [1:1]
+ absl/types/internal/parentheses.h [1:1]
+ absl/types/internal/span.h [2:2]
+ absl/types/internal/transform_args.h [1:1]
+
+KEEP COPYRIGHT_SERVICE_LABEL 3fb410b721d46624abdaeb2473ffa5d6
BELONGS absl/base/ya.make absl/container/ya.make absl/debugging/internal/ya.make absl/debugging/ya.make absl/functional/ya.make absl/hash/internal/ya.make absl/hash/ya.make absl/strings/ya.make absl/time/ya.make absl/types/internal/ya.make absl/types/ya.make ya.make
- License text:
- // Copyright 2018 The Abseil Authors.
- Scancode info:
- Original SPDX id: COPYRIGHT_SERVICE_LABEL
- Score : 100.00
- Match type : COPYRIGHT
- Files with this license:
- absl/base/internal/hide_ptr.h [1:1]
- absl/base/internal/spinlock_linux.inc [1:1]
- absl/container/btree_map.h [1:1]
- absl/container/btree_set.h [1:1]
- absl/container/btree_test.h [1:1]
- absl/container/fixed_array.h [1:1]
- absl/container/flat_hash_map.h [1:1]
- absl/container/flat_hash_set.h [1:1]
- absl/container/internal/btree.h [1:1]
- absl/container/internal/btree_container.h [1:1]
- absl/container/internal/common.h [1:1]
- absl/container/internal/compressed_tuple.h [1:1]
- absl/container/internal/container_memory.h [1:1]
- absl/container/internal/counting_allocator.h [1:1]
- absl/container/internal/hash_function_defaults.h [1:1]
- absl/container/internal/hash_generator_testing.h [1:1]
- absl/container/internal/hash_policy_testing.h [1:1]
- absl/container/internal/hash_policy_traits.h [1:1]
- absl/container/internal/hashtable_debug.h [1:1]
- absl/container/internal/hashtable_debug_hooks.h [1:1]
- absl/container/internal/hashtablez_sampler.cc [1:1]
- absl/container/internal/hashtablez_sampler.h [1:1]
- absl/container/internal/hashtablez_sampler_force_weak_definition.cc [1:1]
- absl/container/internal/have_sse.h [1:1]
- absl/container/internal/layout.h [1:1]
- absl/container/internal/node_hash_policy.h [1:1]
- absl/container/internal/raw_hash_map.h [1:1]
- absl/container/internal/raw_hash_set.cc [1:1]
- absl/container/internal/raw_hash_set.h [1:1]
- absl/container/internal/tracked.h [1:1]
- absl/container/internal/unordered_map_constructor_test.h [1:1]
- absl/container/internal/unordered_map_lookup_test.h [1:1]
- absl/container/internal/unordered_map_modifiers_test.h [1:1]
- absl/container/internal/unordered_set_constructor_test.h [1:1]
- absl/container/internal/unordered_set_lookup_test.h [1:1]
- absl/container/internal/unordered_set_modifiers_test.h [1:1]
- absl/container/node_hash_map.h [1:1]
- absl/container/node_hash_set.h [1:1]
- absl/debugging/failure_signal_handler.cc [2:2]
- absl/debugging/failure_signal_handler.h [1:1]
- absl/debugging/internal/demangle.cc [1:1]
- absl/debugging/internal/demangle.h [1:1]
- absl/debugging/internal/examine_stack.cc [2:2]
- absl/debugging/internal/examine_stack.h [2:2]
- absl/debugging/internal/stack_consumption.h [2:2]
- absl/debugging/internal/symbolize.h [1:1]
- absl/debugging/leak_check.h [1:1]
- absl/debugging/stacktrace.h [1:1]
- absl/debugging/symbolize.cc [1:1]
- absl/debugging/symbolize.h [1:1]
- absl/debugging/symbolize_elf.inc [1:1]
- absl/debugging/symbolize_unimplemented.inc [1:1]
- absl/debugging/symbolize_win32.inc [1:1]
- absl/functional/bind_front.h [1:1]
- absl/functional/internal/front_binder.h [1:1]
- absl/hash/hash.h [1:1]
- absl/hash/hash_testing.h [1:1]
- absl/hash/internal/city.cc [1:1]
- absl/hash/internal/city.h [1:1]
- absl/hash/internal/hash.cc [1:1]
- absl/hash/internal/hash.h [1:1]
- absl/hash/internal/spy_hash_state.h [1:1]
+ License text:
+ // Copyright 2018 The Abseil Authors.
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ absl/base/internal/hide_ptr.h [1:1]
+ absl/base/internal/spinlock_linux.inc [1:1]
+ absl/container/btree_map.h [1:1]
+ absl/container/btree_set.h [1:1]
+ absl/container/btree_test.h [1:1]
+ absl/container/fixed_array.h [1:1]
+ absl/container/flat_hash_map.h [1:1]
+ absl/container/flat_hash_set.h [1:1]
+ absl/container/internal/btree.h [1:1]
+ absl/container/internal/btree_container.h [1:1]
+ absl/container/internal/common.h [1:1]
+ absl/container/internal/compressed_tuple.h [1:1]
+ absl/container/internal/container_memory.h [1:1]
+ absl/container/internal/counting_allocator.h [1:1]
+ absl/container/internal/hash_function_defaults.h [1:1]
+ absl/container/internal/hash_generator_testing.h [1:1]
+ absl/container/internal/hash_policy_testing.h [1:1]
+ absl/container/internal/hash_policy_traits.h [1:1]
+ absl/container/internal/hashtable_debug.h [1:1]
+ absl/container/internal/hashtable_debug_hooks.h [1:1]
+ absl/container/internal/hashtablez_sampler.cc [1:1]
+ absl/container/internal/hashtablez_sampler.h [1:1]
+ absl/container/internal/hashtablez_sampler_force_weak_definition.cc [1:1]
+ absl/container/internal/have_sse.h [1:1]
+ absl/container/internal/layout.h [1:1]
+ absl/container/internal/node_hash_policy.h [1:1]
+ absl/container/internal/raw_hash_map.h [1:1]
+ absl/container/internal/raw_hash_set.cc [1:1]
+ absl/container/internal/raw_hash_set.h [1:1]
+ absl/container/internal/tracked.h [1:1]
+ absl/container/internal/unordered_map_constructor_test.h [1:1]
+ absl/container/internal/unordered_map_lookup_test.h [1:1]
+ absl/container/internal/unordered_map_modifiers_test.h [1:1]
+ absl/container/internal/unordered_set_constructor_test.h [1:1]
+ absl/container/internal/unordered_set_lookup_test.h [1:1]
+ absl/container/internal/unordered_set_modifiers_test.h [1:1]
+ absl/container/node_hash_map.h [1:1]
+ absl/container/node_hash_set.h [1:1]
+ absl/debugging/failure_signal_handler.cc [2:2]
+ absl/debugging/failure_signal_handler.h [1:1]
+ absl/debugging/internal/demangle.cc [1:1]
+ absl/debugging/internal/demangle.h [1:1]
+ absl/debugging/internal/examine_stack.cc [2:2]
+ absl/debugging/internal/examine_stack.h [2:2]
+ absl/debugging/internal/stack_consumption.h [2:2]
+ absl/debugging/internal/symbolize.h [1:1]
+ absl/debugging/leak_check.h [1:1]
+ absl/debugging/stacktrace.h [1:1]
+ absl/debugging/symbolize.cc [1:1]
+ absl/debugging/symbolize.h [1:1]
+ absl/debugging/symbolize_elf.inc [1:1]
+ absl/debugging/symbolize_unimplemented.inc [1:1]
+ absl/debugging/symbolize_win32.inc [1:1]
+ absl/functional/bind_front.h [1:1]
+ absl/functional/internal/front_binder.h [1:1]
+ absl/hash/hash.h [1:1]
+ absl/hash/hash_testing.h [1:1]
+ absl/hash/internal/city.cc [1:1]
+ absl/hash/internal/city.h [1:1]
+ absl/hash/internal/hash.cc [1:1]
+ absl/hash/internal/hash.h [1:1]
+ absl/hash/internal/spy_hash_state.h [1:1]
absl/profiling/internal/sample_recorder.h [1:1]
- absl/random/bit_gen_ref.h [2:2]
- absl/random/internal/distribution_caller.h [2:2]
- absl/random/internal/pcg_engine.h [1:1]
- absl/random/mock_distributions.h [1:1]
- absl/random/mocking_bit_gen.h [1:1]
- absl/strings/charconv.cc [1:1]
- absl/strings/charconv.h [1:1]
- absl/strings/cord_test_helpers.h [2:2]
- absl/strings/internal/charconv_bigint.cc [1:1]
- absl/strings/internal/charconv_bigint.h [1:1]
- absl/strings/internal/charconv_parse.cc [1:1]
- absl/strings/internal/charconv_parse.h [1:1]
- absl/strings/internal/pow10_helper.h [2:2]
- absl/strings/str_format.h [2:2]
- absl/time/civil_time.cc [1:1]
- absl/time/civil_time.h [1:1]
- absl/time/internal/get_current_time_chrono.inc [1:1]
- absl/types/bad_any_cast.h [1:1]
- absl/types/bad_optional_access.h [1:1]
- absl/types/bad_variant_access.h [1:1]
- absl/types/compare.h [1:1]
- absl/types/internal/conformance_aliases.h [1:1]
- absl/types/internal/variant.h [1:1]
- absl/types/variant.h [1:1]
-
-KEEP COPYRIGHT_SERVICE_LABEL 58e60221a225d38384f3c66b2400cc91
+ absl/random/bit_gen_ref.h [2:2]
+ absl/random/internal/distribution_caller.h [2:2]
+ absl/random/internal/pcg_engine.h [1:1]
+ absl/random/mock_distributions.h [1:1]
+ absl/random/mocking_bit_gen.h [1:1]
+ absl/strings/charconv.cc [1:1]
+ absl/strings/charconv.h [1:1]
+ absl/strings/cord_test_helpers.h [2:2]
+ absl/strings/internal/charconv_bigint.cc [1:1]
+ absl/strings/internal/charconv_bigint.h [1:1]
+ absl/strings/internal/charconv_parse.cc [1:1]
+ absl/strings/internal/charconv_parse.h [1:1]
+ absl/strings/internal/pow10_helper.h [2:2]
+ absl/strings/str_format.h [2:2]
+ absl/time/civil_time.cc [1:1]
+ absl/time/civil_time.h [1:1]
+ absl/time/internal/get_current_time_chrono.inc [1:1]
+ absl/types/bad_any_cast.h [1:1]
+ absl/types/bad_optional_access.h [1:1]
+ absl/types/bad_variant_access.h [1:1]
+ absl/types/compare.h [1:1]
+ absl/types/internal/conformance_aliases.h [1:1]
+ absl/types/internal/variant.h [1:1]
+ absl/types/variant.h [1:1]
+
+KEEP COPYRIGHT_SERVICE_LABEL 58e60221a225d38384f3c66b2400cc91
BELONGS absl/debugging/internal/ya.make absl/flags/ya.make absl/numeric/ya.make absl/strings/ya.make ya.make
- License text:
- // Copyright 2021 The Abseil Authors.
- Scancode info:
- Original SPDX id: COPYRIGHT_SERVICE_LABEL
- Score : 100.00
- Match type : COPYRIGHT
- Files with this license:
- absl/cleanup/cleanup.h [1:1]
- absl/cleanup/internal/cleanup.h [1:1]
+ License text:
+ // Copyright 2021 The Abseil Authors.
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ absl/cleanup/cleanup.h [1:1]
+ absl/cleanup/internal/cleanup.h [1:1]
absl/debugging/internal/stacktrace_riscv-inl.inc [1:1]
absl/flags/internal/flag_msvc.inc [2:2]
- absl/numeric/internal/representation.h [1:1]
+ absl/numeric/internal/representation.h [1:1]
absl/strings/cordz_test_helpers.h [1:1]
- absl/strings/internal/cord_internal.h [1:1]
+ absl/strings/internal/cord_internal.h [1:1]
absl/strings/internal/cord_rep_btree.cc [1:1]
absl/strings/internal/cord_rep_btree.h [1:1]
absl/strings/internal/cord_rep_btree_navigator.cc [1:1]
@@ -454,67 +454,67 @@ BELONGS absl/debugging/internal/ya.make absl/flags/ya.make absl/numeric/ya.make
absl/strings/internal/cord_rep_btree_reader.h [1:1]
absl/strings/internal/cord_rep_consume.cc [1:1]
absl/strings/internal/cord_rep_consume.h [1:1]
- absl/strings/internal/cord_rep_ring_reader.h [1:1]
+ absl/strings/internal/cord_rep_ring_reader.h [1:1]
absl/strings/internal/cord_rep_test_util.h [1:1]
absl/strings/internal/cordz_update_scope.h [1:1]
absl/strings/internal/cordz_update_tracker.h [1:1]
-
-KEEP COPYRIGHT_SERVICE_LABEL 6499e2ad737f62db5558c81fbd2749a7
-BELONGS ya.make
- License text:
- // Copyright 2017 Google Inc. All Rights Reserved.
- Scancode info:
- Original SPDX id: COPYRIGHT_SERVICE_LABEL
- Score : 100.00
- Match type : COPYRIGHT
- Files with this license:
- absl/random/internal/nanobenchmark.h [1:1]
-
-KEEP COPYRIGHT_SERVICE_LABEL d34864d3c7c7a5ffae3d414344aa54a8
+
+KEEP COPYRIGHT_SERVICE_LABEL 6499e2ad737f62db5558c81fbd2749a7
+BELONGS ya.make
+ License text:
+ // Copyright 2017 Google Inc. All Rights Reserved.
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ absl/random/internal/nanobenchmark.h [1:1]
+
+KEEP COPYRIGHT_SERVICE_LABEL d34864d3c7c7a5ffae3d414344aa54a8
BELONGS absl/base/ya.make absl/debugging/ya.make absl/flags/ya.make absl/hash/internal/ya.make absl/numeric/ya.make absl/status/ya.make absl/strings/internal/str_format/ya.make absl/strings/ya.make absl/synchronization/internal/ya.make
- License text:
- // Copyright 2020 The Abseil Authors.
- Scancode info:
- Original SPDX id: COPYRIGHT_SERVICE_LABEL
- Score : 100.00
- Match type : COPYRIGHT
- Files with this license:
- absl/base/internal/fast_type_id.h [2:2]
- absl/base/internal/strerror.cc [1:1]
- absl/base/internal/strerror.h [1:1]
- absl/debugging/symbolize_darwin.inc [1:1]
+ License text:
+ // Copyright 2020 The Abseil Authors.
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ absl/base/internal/fast_type_id.h [2:2]
+ absl/base/internal/strerror.cc [1:1]
+ absl/base/internal/strerror.h [1:1]
+ absl/debugging/symbolize_darwin.inc [1:1]
absl/debugging/symbolize_emscripten.inc [1:1]
- absl/flags/commandlineflag.cc [2:2]
- absl/flags/commandlineflag.h [2:2]
- absl/flags/internal/commandlineflag.cc [2:2]
- absl/flags/internal/private_handle_accessor.cc [2:2]
- absl/flags/internal/private_handle_accessor.h [2:2]
- absl/flags/internal/sequence_lock.h [2:2]
- absl/flags/reflection.cc [2:2]
- absl/flags/reflection.h [2:2]
+ absl/flags/commandlineflag.cc [2:2]
+ absl/flags/commandlineflag.h [2:2]
+ absl/flags/internal/commandlineflag.cc [2:2]
+ absl/flags/internal/private_handle_accessor.cc [2:2]
+ absl/flags/internal/private_handle_accessor.h [2:2]
+ absl/flags/internal/sequence_lock.h [2:2]
+ absl/flags/reflection.cc [2:2]
+ absl/flags/reflection.h [2:2]
absl/hash/internal/low_level_hash.cc [1:1]
absl/hash/internal/low_level_hash.h [1:1]
- absl/numeric/bits.h [1:1]
- absl/numeric/internal/bits.h [1:1]
- absl/status/internal/statusor_internal.h [1:1]
- absl/status/statusor.cc [1:1]
- absl/status/statusor.h [1:1]
- absl/strings/cord.cc [1:1]
- absl/strings/cord.h [1:1]
- absl/strings/internal/cord_internal.cc [1:1]
- absl/strings/internal/cord_rep_flat.h [1:1]
- absl/strings/internal/cord_rep_ring.cc [1:1]
- absl/strings/internal/cord_rep_ring.h [1:1]
- absl/strings/internal/escaping.cc [1:1]
- absl/strings/internal/escaping.h [1:1]
- absl/strings/internal/str_format/arg.cc [1:1]
- absl/strings/internal/str_format/arg.h [1:1]
- absl/strings/internal/str_format/bind.cc [1:1]
- absl/strings/internal/str_format/bind.h [1:1]
- absl/strings/internal/str_format/checker.h [1:1]
- absl/strings/internal/str_format/float_conversion.cc [1:1]
- absl/strings/internal/str_format/float_conversion.h [1:1]
- absl/strings/internal/str_format/parser.cc [1:1]
- absl/strings/internal/str_format/parser.h [1:1]
- absl/strings/internal/string_constant.h [1:1]
- absl/synchronization/internal/futex.h [1:1]
+ absl/numeric/bits.h [1:1]
+ absl/numeric/internal/bits.h [1:1]
+ absl/status/internal/statusor_internal.h [1:1]
+ absl/status/statusor.cc [1:1]
+ absl/status/statusor.h [1:1]
+ absl/strings/cord.cc [1:1]
+ absl/strings/cord.h [1:1]
+ absl/strings/internal/cord_internal.cc [1:1]
+ absl/strings/internal/cord_rep_flat.h [1:1]
+ absl/strings/internal/cord_rep_ring.cc [1:1]
+ absl/strings/internal/cord_rep_ring.h [1:1]
+ absl/strings/internal/escaping.cc [1:1]
+ absl/strings/internal/escaping.h [1:1]
+ absl/strings/internal/str_format/arg.cc [1:1]
+ absl/strings/internal/str_format/arg.h [1:1]
+ absl/strings/internal/str_format/bind.cc [1:1]
+ absl/strings/internal/str_format/bind.h [1:1]
+ absl/strings/internal/str_format/checker.h [1:1]
+ absl/strings/internal/str_format/float_conversion.cc [1:1]
+ absl/strings/internal/str_format/float_conversion.h [1:1]
+ absl/strings/internal/str_format/parser.cc [1:1]
+ absl/strings/internal/str_format/parser.h [1:1]
+ absl/strings/internal/string_constant.h [1:1]
+ absl/synchronization/internal/futex.h [1:1]
diff --git a/contrib/restricted/abseil-cpp/.yandex_meta/devtools.licenses.report b/contrib/restricted/abseil-cpp/.yandex_meta/devtools.licenses.report
index e81554edb2..1151a553df 100644
--- a/contrib/restricted/abseil-cpp/.yandex_meta/devtools.licenses.report
+++ b/contrib/restricted/abseil-cpp/.yandex_meta/devtools.licenses.report
@@ -1,52 +1,52 @@
-# File format ($ symbol means the beginning of a line):
-#
-# $ # this message
-# $ # =======================
-# $ # comments (all commentaries should starts with some number of spaces and # symbol)
-# ${action} {license id} {license text hash}
-# $BELONGS ./ya/make/file/relative/path/1/ya.make ./ya/make/2/ya.make
-# ${all_file_action} filename
-# $ # user commentaries (many lines)
-# $ generated description - files with this license, license text... (some number of lines that starts with some number of spaces, do not modify)
-# ${action} {license spdx} {license text hash}
-# $BELONGS ./ya/make/file/relative/path/3/ya.make
-# ${all_file_action} filename
-# $ # user commentaries
-# $ generated description
-# $ ...
-#
-# You can modify action, all_file_action and add commentaries
-# Available actions:
-# keep - keep license in contrib and use in credits
-# skip - skip license
-# remove - remove all files with this license
-# rename - save license text/links into licenses texts file, but not store SPDX into LINCENSE macro. You should store correct license id into devtools.license.spdx.txt file
-#
-# {all file action} records will be generated when license text contains filename that exists on filesystem (in contrib directory)
-# We suppose that that files can contain some license info
-# Available all file actions:
-# FILE_IGNORE - ignore file (do nothing)
-# FILE_INCLUDE - include all file data into licenses text file
-# =======================
-
-KEEP Apache-2.0 0e8699c5f5ea602534a6558430df2b8d
+# File format ($ symbol means the beginning of a line):
+#
+# $ # this message
+# $ # =======================
+# $ # comments (all commentaries should starts with some number of spaces and # symbol)
+# ${action} {license id} {license text hash}
+# $BELONGS ./ya/make/file/relative/path/1/ya.make ./ya/make/2/ya.make
+# ${all_file_action} filename
+# $ # user commentaries (many lines)
+# $ generated description - files with this license, license text... (some number of lines that starts with some number of spaces, do not modify)
+# ${action} {license spdx} {license text hash}
+# $BELONGS ./ya/make/file/relative/path/3/ya.make
+# ${all_file_action} filename
+# $ # user commentaries
+# $ generated description
+# $ ...
+#
+# You can modify action, all_file_action and add commentaries
+# Available actions:
+# keep - keep license in contrib and use in credits
+# skip - skip license
+# remove - remove all files with this license
+# rename - save license text/links into licenses texts file, but not store SPDX into LINCENSE macro. You should store correct license id into devtools.license.spdx.txt file
+#
+# {all file action} records will be generated when license text contains filename that exists on filesystem (in contrib directory)
+# We suppose that that files can contain some license info
+# Available all file actions:
+# FILE_IGNORE - ignore file (do nothing)
+# FILE_INCLUDE - include all file data into licenses text file
+# =======================
+
+KEEP Apache-2.0 0e8699c5f5ea602534a6558430df2b8d
BELONGS absl/debugging/internal/ya.make absl/hash/internal/ya.make absl/numeric/ya.make absl/strings/ya.make ya.make
- Note: matched license text is too long. Read it in the source files.
- Scancode info:
- Original SPDX id: Apache-2.0
- Score : 100.00
- Match type : NOTICE
- Links : http://www.apache.org/licenses/, http://www.apache.org/licenses/LICENSE-2.0, https://spdx.org/licenses/Apache-2.0
- Files with this license:
+ Note: matched license text is too long. Read it in the source files.
+ Scancode info:
+ Original SPDX id: Apache-2.0
+ Score : 100.00
+ Match type : NOTICE
+ Links : http://www.apache.org/licenses/, http://www.apache.org/licenses/LICENSE-2.0, https://spdx.org/licenses/Apache-2.0
+ Files with this license:
absl/debugging/internal/stacktrace_riscv-inl.inc [3:13]
absl/hash/internal/low_level_hash.cc [3:13]
absl/hash/internal/low_level_hash.h [3:13]
- absl/numeric/bits.h [3:13]
- absl/numeric/internal/bits.h [3:13]
- absl/numeric/internal/representation.h [3:13]
+ absl/numeric/bits.h [3:13]
+ absl/numeric/internal/bits.h [3:13]
+ absl/numeric/internal/representation.h [3:13]
absl/profiling/internal/exponential_biased.cc [3:13]
absl/profiling/internal/exponential_biased.h [3:13]
- absl/random/internal/nanobenchmark.h [3:13]
+ absl/random/internal/nanobenchmark.h [3:13]
absl/strings/cordz_test_helpers.h [3:13]
absl/strings/internal/cord_rep_btree.cc [3:13]
absl/strings/internal/cord_rep_btree.h [3:13]
@@ -56,346 +56,346 @@ BELONGS absl/debugging/internal/ya.make absl/hash/internal/ya.make absl/numeric/
absl/strings/internal/cord_rep_btree_reader.h [3:13]
absl/strings/internal/cord_rep_consume.cc [3:13]
absl/strings/internal/cord_rep_consume.h [3:13]
- absl/strings/internal/cord_rep_flat.h [3:13]
- absl/strings/internal/cord_rep_ring.cc [3:13]
- absl/strings/internal/cord_rep_ring.h [3:13]
- absl/strings/internal/cord_rep_ring_reader.h [3:13]
+ absl/strings/internal/cord_rep_flat.h [3:13]
+ absl/strings/internal/cord_rep_ring.cc [3:13]
+ absl/strings/internal/cord_rep_ring.h [3:13]
+ absl/strings/internal/cord_rep_ring_reader.h [3:13]
absl/strings/internal/cord_rep_test_util.h [3:13]
absl/strings/internal/cordz_update_scope.h [3:13]
absl/strings/internal/cordz_update_tracker.h [3:13]
-
-KEEP Apache-2.0 0f66a26c8211d9f8c21369fcb6702370
-BELONGS absl/time/ya.make
- Note: matched license text is too long. Read it in the source files.
- Scancode info:
- Original SPDX id: Apache-2.0
- Score : 100.00
- Match type : NOTICE
- Links : http://www.apache.org/licenses/, http://www.apache.org/licenses/LICENSE-2.0, https://spdx.org/licenses/Apache-2.0
- Files with this license:
- absl/time/internal/cctz/include/cctz/civil_time.h [3:13]
- absl/time/internal/cctz/include/cctz/civil_time_detail.h [3:13]
- absl/time/internal/cctz/include/cctz/time_zone.h [3:13]
- absl/time/internal/cctz/include/cctz/zone_info_source.h [3:13]
- absl/time/internal/cctz/src/civil_time_detail.cc [3:13]
- absl/time/internal/cctz/src/time_zone_fixed.cc [3:13]
- absl/time/internal/cctz/src/time_zone_fixed.h [3:13]
- absl/time/internal/cctz/src/time_zone_format.cc [3:13]
- absl/time/internal/cctz/src/time_zone_if.cc [3:13]
- absl/time/internal/cctz/src/time_zone_if.h [3:13]
- absl/time/internal/cctz/src/time_zone_impl.cc [3:13]
- absl/time/internal/cctz/src/time_zone_impl.h [3:13]
- absl/time/internal/cctz/src/time_zone_info.cc [3:13]
- absl/time/internal/cctz/src/time_zone_info.h [3:13]
- absl/time/internal/cctz/src/time_zone_libc.cc [3:13]
- absl/time/internal/cctz/src/time_zone_libc.h [3:13]
- absl/time/internal/cctz/src/time_zone_lookup.cc [3:13]
- absl/time/internal/cctz/src/time_zone_posix.cc [3:13]
- absl/time/internal/cctz/src/time_zone_posix.h [3:13]
- absl/time/internal/cctz/src/zone_info_source.cc [3:13]
-
-KEEP Apache-2.0 3493ceb30c6c8a1d5127bc1f0b030380
-BELONGS ya.make
- License text:
- \## License
- The Abseil C++ library is licensed under the terms of the Apache
- license. See [LICENSE](LICENSE) for more information.
- Scancode info:
- Original SPDX id: Apache-2.0
- Score : 62.50
- Match type : NOTICE
- Links : http://www.apache.org/licenses/, http://www.apache.org/licenses/LICENSE-2.0, https://spdx.org/licenses/Apache-2.0
- Files with this license:
+
+KEEP Apache-2.0 0f66a26c8211d9f8c21369fcb6702370
+BELONGS absl/time/ya.make
+ Note: matched license text is too long. Read it in the source files.
+ Scancode info:
+ Original SPDX id: Apache-2.0
+ Score : 100.00
+ Match type : NOTICE
+ Links : http://www.apache.org/licenses/, http://www.apache.org/licenses/LICENSE-2.0, https://spdx.org/licenses/Apache-2.0
+ Files with this license:
+ absl/time/internal/cctz/include/cctz/civil_time.h [3:13]
+ absl/time/internal/cctz/include/cctz/civil_time_detail.h [3:13]
+ absl/time/internal/cctz/include/cctz/time_zone.h [3:13]
+ absl/time/internal/cctz/include/cctz/zone_info_source.h [3:13]
+ absl/time/internal/cctz/src/civil_time_detail.cc [3:13]
+ absl/time/internal/cctz/src/time_zone_fixed.cc [3:13]
+ absl/time/internal/cctz/src/time_zone_fixed.h [3:13]
+ absl/time/internal/cctz/src/time_zone_format.cc [3:13]
+ absl/time/internal/cctz/src/time_zone_if.cc [3:13]
+ absl/time/internal/cctz/src/time_zone_if.h [3:13]
+ absl/time/internal/cctz/src/time_zone_impl.cc [3:13]
+ absl/time/internal/cctz/src/time_zone_impl.h [3:13]
+ absl/time/internal/cctz/src/time_zone_info.cc [3:13]
+ absl/time/internal/cctz/src/time_zone_info.h [3:13]
+ absl/time/internal/cctz/src/time_zone_libc.cc [3:13]
+ absl/time/internal/cctz/src/time_zone_libc.h [3:13]
+ absl/time/internal/cctz/src/time_zone_lookup.cc [3:13]
+ absl/time/internal/cctz/src/time_zone_posix.cc [3:13]
+ absl/time/internal/cctz/src/time_zone_posix.h [3:13]
+ absl/time/internal/cctz/src/zone_info_source.cc [3:13]
+
+KEEP Apache-2.0 3493ceb30c6c8a1d5127bc1f0b030380
+BELONGS ya.make
+ License text:
+ \## License
+ The Abseil C++ library is licensed under the terms of the Apache
+ license. See [LICENSE](LICENSE) for more information.
+ Scancode info:
+ Original SPDX id: Apache-2.0
+ Score : 62.50
+ Match type : NOTICE
+ Links : http://www.apache.org/licenses/, http://www.apache.org/licenses/LICENSE-2.0, https://spdx.org/licenses/Apache-2.0
+ Files with this license:
README.md [128:131]
-
-KEEP Public-Domain 3a682fe6def1cddc889298ee2a043f6f
-BELONGS absl/time/ya.make
- License text:
- ** This file is in the public domain, so clarified as of
- Scancode info:
- Original SPDX id: LicenseRef-scancode-public-domain
- Score : 100.00
- Match type : TEXT
- Links : http://www.linfo.org/publicdomain.html, https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/public-domain.LICENSE
- Files with this license:
- absl/time/internal/cctz/src/tzfile.h [8:8]
-
-KEEP Apache-2.0 3ea5060c4f08f5769674fbf0c0fb3992
+
+KEEP Public-Domain 3a682fe6def1cddc889298ee2a043f6f
+BELONGS absl/time/ya.make
+ License text:
+ ** This file is in the public domain, so clarified as of
+ Scancode info:
+ Original SPDX id: LicenseRef-scancode-public-domain
+ Score : 100.00
+ Match type : TEXT
+ Links : http://www.linfo.org/publicdomain.html, https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/public-domain.LICENSE
+ Files with this license:
+ absl/time/internal/cctz/src/tzfile.h [8:8]
+
+KEEP Apache-2.0 3ea5060c4f08f5769674fbf0c0fb3992
BELONGS absl/algorithm/ya.make absl/base/ya.make absl/container/ya.make absl/debugging/internal/ya.make absl/debugging/ya.make absl/flags/ya.make absl/functional/ya.make absl/hash/internal/ya.make absl/hash/ya.make absl/memory/ya.make absl/meta/ya.make absl/numeric/ya.make absl/status/ya.make absl/strings/internal/str_format/ya.make absl/strings/ya.make absl/synchronization/internal/ya.make absl/synchronization/ya.make absl/time/ya.make absl/types/internal/ya.make absl/types/ya.make absl/utility/ya.make ya.make
- Note: matched license text is too long. Read it in the source files.
- Scancode info:
- Original SPDX id: Apache-2.0
- Score : 100.00
- Match type : NOTICE
- Links : http://www.apache.org/licenses/, http://www.apache.org/licenses/LICENSE-2.0, https://spdx.org/licenses/Apache-2.0
- Files with this license:
- absl/algorithm/algorithm.h [3:13]
- absl/algorithm/container.h [3:13]
- absl/base/attributes.h [3:13]
- absl/base/call_once.h [3:13]
- absl/base/casts.h [4:14]
- absl/base/config.h [4:14]
- absl/base/const_init.h [3:13]
- absl/base/dynamic_annotations.h [3:13]
- absl/base/internal/atomic_hook.h [3:13]
- absl/base/internal/atomic_hook_test_helper.h [3:13]
- absl/base/internal/cycleclock.cc [3:13]
- absl/base/internal/cycleclock.h [4:14]
- absl/base/internal/direct_mmap.h [3:13]
- absl/base/internal/dynamic_annotations.h [3:13]
- absl/base/internal/endian.h [3:13]
- absl/base/internal/errno_saver.h [3:13]
- absl/base/internal/exception_safety_testing.h [3:13]
- absl/base/internal/exception_testing.h [3:13]
- absl/base/internal/fast_type_id.h [4:14]
- absl/base/internal/hide_ptr.h [3:13]
- absl/base/internal/identity.h [3:13]
- absl/base/internal/inline_variable.h [3:13]
- absl/base/internal/inline_variable_testing.h [3:13]
- absl/base/internal/invoke.h [3:13]
- absl/base/internal/low_level_alloc.cc [3:13]
- absl/base/internal/low_level_alloc.h [3:13]
- absl/base/internal/low_level_scheduling.h [3:13]
- absl/base/internal/per_thread_tls.h [3:13]
- absl/base/internal/pretty_function.h [3:13]
- absl/base/internal/raw_logging.cc [3:13]
- absl/base/internal/raw_logging.h [3:13]
- absl/base/internal/scheduling_mode.h [3:13]
- absl/base/internal/scoped_set_env.cc [3:13]
- absl/base/internal/scoped_set_env.h [4:14]
- absl/base/internal/spinlock.cc [3:13]
- absl/base/internal/spinlock.h [4:14]
- absl/base/internal/spinlock_akaros.inc [3:13]
- absl/base/internal/spinlock_linux.inc [3:13]
- absl/base/internal/spinlock_posix.inc [3:13]
- absl/base/internal/spinlock_wait.cc [3:13]
- absl/base/internal/spinlock_wait.h [3:13]
- absl/base/internal/spinlock_win32.inc [3:13]
- absl/base/internal/strerror.cc [3:13]
- absl/base/internal/strerror.h [3:13]
- absl/base/internal/sysinfo.cc [3:13]
- absl/base/internal/sysinfo.h [3:13]
- absl/base/internal/thread_annotations.h [3:13]
- absl/base/internal/thread_identity.cc [3:13]
- absl/base/internal/thread_identity.h [3:13]
- absl/base/internal/throw_delegate.cc [3:13]
- absl/base/internal/throw_delegate.h [4:14]
- absl/base/internal/tsan_mutex_interface.h [3:13]
- absl/base/internal/unaligned_access.h [4:14]
- absl/base/internal/unscaledcycleclock.cc [3:13]
- absl/base/internal/unscaledcycleclock.h [3:13]
- absl/base/log_severity.cc [3:13]
- absl/base/log_severity.h [3:13]
- absl/base/macros.h [4:14]
- absl/base/optimization.h [4:14]
- absl/base/options.h [3:13]
- absl/base/policy_checks.h [3:13]
- absl/base/port.h [3:13]
- absl/base/thread_annotations.h [3:13]
- absl/cleanup/cleanup.h [3:13]
- absl/cleanup/internal/cleanup.h [3:13]
- absl/container/btree_map.h [3:13]
- absl/container/btree_set.h [3:13]
- absl/container/btree_test.h [3:13]
- absl/container/fixed_array.h [3:13]
- absl/container/flat_hash_map.h [3:13]
- absl/container/flat_hash_set.h [3:13]
- absl/container/inlined_vector.h [3:13]
- absl/container/internal/btree.h [3:13]
- absl/container/internal/btree_container.h [3:13]
- absl/container/internal/common.h [3:13]
- absl/container/internal/compressed_tuple.h [3:13]
- absl/container/internal/container_memory.h [3:13]
- absl/container/internal/counting_allocator.h [3:13]
- absl/container/internal/hash_function_defaults.h [3:13]
- absl/container/internal/hash_generator_testing.h [3:13]
- absl/container/internal/hash_policy_testing.h [3:13]
- absl/container/internal/hash_policy_traits.h [3:13]
- absl/container/internal/hashtable_debug.h [3:13]
- absl/container/internal/hashtable_debug_hooks.h [3:13]
- absl/container/internal/hashtablez_sampler.cc [3:13]
- absl/container/internal/hashtablez_sampler.h [3:13]
- absl/container/internal/hashtablez_sampler_force_weak_definition.cc [3:13]
- absl/container/internal/have_sse.h [3:13]
- absl/container/internal/inlined_vector.h [3:13]
- absl/container/internal/layout.h [3:13]
- absl/container/internal/node_hash_policy.h [3:13]
- absl/container/internal/raw_hash_map.h [3:13]
- absl/container/internal/raw_hash_set.cc [3:13]
- absl/container/internal/raw_hash_set.h [3:13]
- absl/container/internal/test_instance_tracker.h [3:13]
- absl/container/internal/tracked.h [3:13]
- absl/container/internal/unordered_map_constructor_test.h [3:13]
- absl/container/internal/unordered_map_lookup_test.h [3:13]
- absl/container/internal/unordered_map_members_test.h [3:13]
- absl/container/internal/unordered_map_modifiers_test.h [3:13]
- absl/container/internal/unordered_set_constructor_test.h [3:13]
- absl/container/internal/unordered_set_lookup_test.h [3:13]
- absl/container/internal/unordered_set_members_test.h [3:13]
- absl/container/internal/unordered_set_modifiers_test.h [3:13]
- absl/container/node_hash_map.h [3:13]
- absl/container/node_hash_set.h [3:13]
- absl/debugging/failure_signal_handler.cc [4:14]
- absl/debugging/failure_signal_handler.h [3:13]
- absl/debugging/internal/address_is_readable.cc [3:13]
- absl/debugging/internal/address_is_readable.h [3:13]
- absl/debugging/internal/demangle.cc [3:13]
- absl/debugging/internal/demangle.h [3:13]
- absl/debugging/internal/elf_mem_image.cc [3:13]
- absl/debugging/internal/examine_stack.cc [4:14]
- absl/debugging/internal/examine_stack.h [4:14]
- absl/debugging/internal/stack_consumption.h [4:14]
- absl/debugging/internal/stacktrace_arm-inl.inc [3:13]
+ Note: matched license text is too long. Read it in the source files.
+ Scancode info:
+ Original SPDX id: Apache-2.0
+ Score : 100.00
+ Match type : NOTICE
+ Links : http://www.apache.org/licenses/, http://www.apache.org/licenses/LICENSE-2.0, https://spdx.org/licenses/Apache-2.0
+ Files with this license:
+ absl/algorithm/algorithm.h [3:13]
+ absl/algorithm/container.h [3:13]
+ absl/base/attributes.h [3:13]
+ absl/base/call_once.h [3:13]
+ absl/base/casts.h [4:14]
+ absl/base/config.h [4:14]
+ absl/base/const_init.h [3:13]
+ absl/base/dynamic_annotations.h [3:13]
+ absl/base/internal/atomic_hook.h [3:13]
+ absl/base/internal/atomic_hook_test_helper.h [3:13]
+ absl/base/internal/cycleclock.cc [3:13]
+ absl/base/internal/cycleclock.h [4:14]
+ absl/base/internal/direct_mmap.h [3:13]
+ absl/base/internal/dynamic_annotations.h [3:13]
+ absl/base/internal/endian.h [3:13]
+ absl/base/internal/errno_saver.h [3:13]
+ absl/base/internal/exception_safety_testing.h [3:13]
+ absl/base/internal/exception_testing.h [3:13]
+ absl/base/internal/fast_type_id.h [4:14]
+ absl/base/internal/hide_ptr.h [3:13]
+ absl/base/internal/identity.h [3:13]
+ absl/base/internal/inline_variable.h [3:13]
+ absl/base/internal/inline_variable_testing.h [3:13]
+ absl/base/internal/invoke.h [3:13]
+ absl/base/internal/low_level_alloc.cc [3:13]
+ absl/base/internal/low_level_alloc.h [3:13]
+ absl/base/internal/low_level_scheduling.h [3:13]
+ absl/base/internal/per_thread_tls.h [3:13]
+ absl/base/internal/pretty_function.h [3:13]
+ absl/base/internal/raw_logging.cc [3:13]
+ absl/base/internal/raw_logging.h [3:13]
+ absl/base/internal/scheduling_mode.h [3:13]
+ absl/base/internal/scoped_set_env.cc [3:13]
+ absl/base/internal/scoped_set_env.h [4:14]
+ absl/base/internal/spinlock.cc [3:13]
+ absl/base/internal/spinlock.h [4:14]
+ absl/base/internal/spinlock_akaros.inc [3:13]
+ absl/base/internal/spinlock_linux.inc [3:13]
+ absl/base/internal/spinlock_posix.inc [3:13]
+ absl/base/internal/spinlock_wait.cc [3:13]
+ absl/base/internal/spinlock_wait.h [3:13]
+ absl/base/internal/spinlock_win32.inc [3:13]
+ absl/base/internal/strerror.cc [3:13]
+ absl/base/internal/strerror.h [3:13]
+ absl/base/internal/sysinfo.cc [3:13]
+ absl/base/internal/sysinfo.h [3:13]
+ absl/base/internal/thread_annotations.h [3:13]
+ absl/base/internal/thread_identity.cc [3:13]
+ absl/base/internal/thread_identity.h [3:13]
+ absl/base/internal/throw_delegate.cc [3:13]
+ absl/base/internal/throw_delegate.h [4:14]
+ absl/base/internal/tsan_mutex_interface.h [3:13]
+ absl/base/internal/unaligned_access.h [4:14]
+ absl/base/internal/unscaledcycleclock.cc [3:13]
+ absl/base/internal/unscaledcycleclock.h [3:13]
+ absl/base/log_severity.cc [3:13]
+ absl/base/log_severity.h [3:13]
+ absl/base/macros.h [4:14]
+ absl/base/optimization.h [4:14]
+ absl/base/options.h [3:13]
+ absl/base/policy_checks.h [3:13]
+ absl/base/port.h [3:13]
+ absl/base/thread_annotations.h [3:13]
+ absl/cleanup/cleanup.h [3:13]
+ absl/cleanup/internal/cleanup.h [3:13]
+ absl/container/btree_map.h [3:13]
+ absl/container/btree_set.h [3:13]
+ absl/container/btree_test.h [3:13]
+ absl/container/fixed_array.h [3:13]
+ absl/container/flat_hash_map.h [3:13]
+ absl/container/flat_hash_set.h [3:13]
+ absl/container/inlined_vector.h [3:13]
+ absl/container/internal/btree.h [3:13]
+ absl/container/internal/btree_container.h [3:13]
+ absl/container/internal/common.h [3:13]
+ absl/container/internal/compressed_tuple.h [3:13]
+ absl/container/internal/container_memory.h [3:13]
+ absl/container/internal/counting_allocator.h [3:13]
+ absl/container/internal/hash_function_defaults.h [3:13]
+ absl/container/internal/hash_generator_testing.h [3:13]
+ absl/container/internal/hash_policy_testing.h [3:13]
+ absl/container/internal/hash_policy_traits.h [3:13]
+ absl/container/internal/hashtable_debug.h [3:13]
+ absl/container/internal/hashtable_debug_hooks.h [3:13]
+ absl/container/internal/hashtablez_sampler.cc [3:13]
+ absl/container/internal/hashtablez_sampler.h [3:13]
+ absl/container/internal/hashtablez_sampler_force_weak_definition.cc [3:13]
+ absl/container/internal/have_sse.h [3:13]
+ absl/container/internal/inlined_vector.h [3:13]
+ absl/container/internal/layout.h [3:13]
+ absl/container/internal/node_hash_policy.h [3:13]
+ absl/container/internal/raw_hash_map.h [3:13]
+ absl/container/internal/raw_hash_set.cc [3:13]
+ absl/container/internal/raw_hash_set.h [3:13]
+ absl/container/internal/test_instance_tracker.h [3:13]
+ absl/container/internal/tracked.h [3:13]
+ absl/container/internal/unordered_map_constructor_test.h [3:13]
+ absl/container/internal/unordered_map_lookup_test.h [3:13]
+ absl/container/internal/unordered_map_members_test.h [3:13]
+ absl/container/internal/unordered_map_modifiers_test.h [3:13]
+ absl/container/internal/unordered_set_constructor_test.h [3:13]
+ absl/container/internal/unordered_set_lookup_test.h [3:13]
+ absl/container/internal/unordered_set_members_test.h [3:13]
+ absl/container/internal/unordered_set_modifiers_test.h [3:13]
+ absl/container/node_hash_map.h [3:13]
+ absl/container/node_hash_set.h [3:13]
+ absl/debugging/failure_signal_handler.cc [4:14]
+ absl/debugging/failure_signal_handler.h [3:13]
+ absl/debugging/internal/address_is_readable.cc [3:13]
+ absl/debugging/internal/address_is_readable.h [3:13]
+ absl/debugging/internal/demangle.cc [3:13]
+ absl/debugging/internal/demangle.h [3:13]
+ absl/debugging/internal/elf_mem_image.cc [3:13]
+ absl/debugging/internal/examine_stack.cc [4:14]
+ absl/debugging/internal/examine_stack.h [4:14]
+ absl/debugging/internal/stack_consumption.h [4:14]
+ absl/debugging/internal/stacktrace_arm-inl.inc [3:13]
absl/debugging/internal/stacktrace_emscripten-inl.inc [3:13]
- absl/debugging/internal/stacktrace_generic-inl.inc [3:13]
- absl/debugging/internal/stacktrace_powerpc-inl.inc [3:13]
- absl/debugging/internal/stacktrace_win32-inl.inc [3:13]
- absl/debugging/internal/stacktrace_x86-inl.inc [3:13]
- absl/debugging/internal/symbolize.h [3:13]
- absl/debugging/internal/vdso_support.cc [3:13]
- absl/debugging/internal/vdso_support.h [4:14]
- absl/debugging/leak_check.cc [3:13]
- absl/debugging/leak_check.h [3:13]
- absl/debugging/leak_check_disable.cc [3:13]
- absl/debugging/stacktrace.cc [3:13]
- absl/debugging/stacktrace.h [3:13]
- absl/debugging/symbolize.cc [3:13]
- absl/debugging/symbolize.h [3:13]
- absl/debugging/symbolize_darwin.inc [3:13]
- absl/debugging/symbolize_elf.inc [3:13]
+ absl/debugging/internal/stacktrace_generic-inl.inc [3:13]
+ absl/debugging/internal/stacktrace_powerpc-inl.inc [3:13]
+ absl/debugging/internal/stacktrace_win32-inl.inc [3:13]
+ absl/debugging/internal/stacktrace_x86-inl.inc [3:13]
+ absl/debugging/internal/symbolize.h [3:13]
+ absl/debugging/internal/vdso_support.cc [3:13]
+ absl/debugging/internal/vdso_support.h [4:14]
+ absl/debugging/leak_check.cc [3:13]
+ absl/debugging/leak_check.h [3:13]
+ absl/debugging/leak_check_disable.cc [3:13]
+ absl/debugging/stacktrace.cc [3:13]
+ absl/debugging/stacktrace.h [3:13]
+ absl/debugging/symbolize.cc [3:13]
+ absl/debugging/symbolize.h [3:13]
+ absl/debugging/symbolize_darwin.inc [3:13]
+ absl/debugging/symbolize_elf.inc [3:13]
absl/debugging/symbolize_emscripten.inc [3:13]
- absl/debugging/symbolize_unimplemented.inc [3:13]
- absl/debugging/symbolize_win32.inc [3:13]
- absl/flags/commandlineflag.cc [4:14]
- absl/flags/commandlineflag.h [4:14]
- absl/flags/config.h [4:14]
- absl/flags/declare.h [4:14]
- absl/flags/flag.cc [4:14]
- absl/flags/flag.h [4:14]
- absl/flags/internal/commandlineflag.cc [4:14]
- absl/flags/internal/commandlineflag.h [4:14]
- absl/flags/internal/flag.cc [4:14]
- absl/flags/internal/flag.h [4:14]
+ absl/debugging/symbolize_unimplemented.inc [3:13]
+ absl/debugging/symbolize_win32.inc [3:13]
+ absl/flags/commandlineflag.cc [4:14]
+ absl/flags/commandlineflag.h [4:14]
+ absl/flags/config.h [4:14]
+ absl/flags/declare.h [4:14]
+ absl/flags/flag.cc [4:14]
+ absl/flags/flag.h [4:14]
+ absl/flags/internal/commandlineflag.cc [4:14]
+ absl/flags/internal/commandlineflag.h [4:14]
+ absl/flags/internal/flag.cc [4:14]
+ absl/flags/internal/flag.h [4:14]
absl/flags/internal/flag_msvc.inc [4:14]
- absl/flags/internal/parse.h [4:14]
- absl/flags/internal/path_util.h [4:14]
- absl/flags/internal/private_handle_accessor.cc [4:14]
- absl/flags/internal/private_handle_accessor.h [4:14]
- absl/flags/internal/program_name.cc [4:14]
- absl/flags/internal/program_name.h [4:14]
- absl/flags/internal/registry.h [4:14]
- absl/flags/internal/sequence_lock.h [4:14]
- absl/flags/internal/usage.cc [4:14]
- absl/flags/internal/usage.h [4:14]
- absl/flags/marshalling.cc [4:14]
- absl/flags/marshalling.h [4:14]
- absl/flags/parse.cc [4:14]
- absl/flags/parse.h [4:14]
- absl/flags/reflection.cc [4:14]
- absl/flags/reflection.h [4:14]
- absl/flags/usage.cc [4:14]
- absl/flags/usage.h [4:14]
- absl/flags/usage_config.cc [4:14]
- absl/flags/usage_config.h [4:14]
- absl/functional/bind_front.h [3:13]
- absl/functional/function_ref.h [3:13]
- absl/functional/internal/front_binder.h [3:13]
- absl/functional/internal/function_ref.h [3:13]
- absl/hash/hash.h [3:13]
- absl/hash/hash_testing.h [3:13]
- absl/hash/internal/city.cc [3:13]
- absl/hash/internal/city.h [3:13]
- absl/hash/internal/hash.cc [3:13]
- absl/hash/internal/hash.h [3:13]
- absl/hash/internal/spy_hash_state.h [3:13]
- absl/memory/memory.h [3:13]
- absl/meta/type_traits.h [4:14]
- absl/numeric/int128.cc [3:13]
- absl/numeric/int128.h [4:14]
- absl/numeric/int128_have_intrinsic.inc [4:14]
- absl/numeric/int128_no_intrinsic.inc [4:14]
+ absl/flags/internal/parse.h [4:14]
+ absl/flags/internal/path_util.h [4:14]
+ absl/flags/internal/private_handle_accessor.cc [4:14]
+ absl/flags/internal/private_handle_accessor.h [4:14]
+ absl/flags/internal/program_name.cc [4:14]
+ absl/flags/internal/program_name.h [4:14]
+ absl/flags/internal/registry.h [4:14]
+ absl/flags/internal/sequence_lock.h [4:14]
+ absl/flags/internal/usage.cc [4:14]
+ absl/flags/internal/usage.h [4:14]
+ absl/flags/marshalling.cc [4:14]
+ absl/flags/marshalling.h [4:14]
+ absl/flags/parse.cc [4:14]
+ absl/flags/parse.h [4:14]
+ absl/flags/reflection.cc [4:14]
+ absl/flags/reflection.h [4:14]
+ absl/flags/usage.cc [4:14]
+ absl/flags/usage.h [4:14]
+ absl/flags/usage_config.cc [4:14]
+ absl/flags/usage_config.h [4:14]
+ absl/functional/bind_front.h [3:13]
+ absl/functional/function_ref.h [3:13]
+ absl/functional/internal/front_binder.h [3:13]
+ absl/functional/internal/function_ref.h [3:13]
+ absl/hash/hash.h [3:13]
+ absl/hash/hash_testing.h [3:13]
+ absl/hash/internal/city.cc [3:13]
+ absl/hash/internal/city.h [3:13]
+ absl/hash/internal/hash.cc [3:13]
+ absl/hash/internal/hash.h [3:13]
+ absl/hash/internal/spy_hash_state.h [3:13]
+ absl/memory/memory.h [3:13]
+ absl/meta/type_traits.h [4:14]
+ absl/numeric/int128.cc [3:13]
+ absl/numeric/int128.h [4:14]
+ absl/numeric/int128_have_intrinsic.inc [4:14]
+ absl/numeric/int128_no_intrinsic.inc [4:14]
absl/profiling/internal/periodic_sampler.cc [3:13]
absl/profiling/internal/periodic_sampler.h [3:13]
absl/profiling/internal/sample_recorder.h [3:13]
- absl/random/bernoulli_distribution.h [3:13]
- absl/random/beta_distribution.h [3:13]
- absl/random/bit_gen_ref.h [4:14]
- absl/random/discrete_distribution.cc [3:13]
- absl/random/discrete_distribution.h [3:13]
- absl/random/distributions.h [3:13]
- absl/random/exponential_distribution.h [3:13]
- absl/random/gaussian_distribution.h [3:13]
- absl/random/internal/chi_square.cc [3:13]
- absl/random/internal/chi_square.h [3:13]
- absl/random/internal/distribution_caller.h [4:14]
- absl/random/internal/distribution_test_util.cc [3:13]
- absl/random/internal/distribution_test_util.h [3:13]
- absl/random/internal/explicit_seed_seq.h [3:13]
- absl/random/internal/fast_uniform_bits.h [3:13]
- absl/random/internal/fastmath.h [3:13]
- absl/random/internal/generate_real.h [3:13]
- absl/random/internal/iostream_state_saver.h [3:13]
- absl/random/internal/mock_helpers.h [4:14]
- absl/random/internal/mock_overload_set.h [4:14]
- absl/random/internal/nonsecure_base.h [3:13]
- absl/random/internal/pcg_engine.h [3:13]
- absl/random/internal/platform.h [3:13]
- absl/random/internal/pool_urbg.cc [3:13]
- absl/random/internal/pool_urbg.h [3:13]
- absl/random/internal/randen.cc [3:13]
- absl/random/internal/randen.h [3:13]
- absl/random/internal/randen_detect.cc [3:13]
- absl/random/internal/randen_detect.h [3:13]
- absl/random/internal/randen_engine.h [3:13]
- absl/random/internal/randen_hwaes.cc [3:13]
- absl/random/internal/randen_hwaes.h [3:13]
- absl/random/internal/randen_round_keys.cc [3:13]
- absl/random/internal/randen_slow.cc [3:13]
- absl/random/internal/randen_slow.h [3:13]
- absl/random/internal/randen_traits.h [3:13]
- absl/random/internal/salted_seed_seq.h [3:13]
- absl/random/internal/seed_material.cc [3:13]
- absl/random/internal/seed_material.h [3:13]
- absl/random/internal/sequence_urbg.h [3:13]
- absl/random/internal/traits.h [3:13]
- absl/random/internal/uniform_helper.h [3:13]
- absl/random/internal/wide_multiply.h [3:13]
- absl/random/log_uniform_int_distribution.h [3:13]
- absl/random/mock_distributions.h [3:13]
- absl/random/mocking_bit_gen.h [3:13]
- absl/random/poisson_distribution.h [3:13]
- absl/random/random.h [3:13]
- absl/random/seed_gen_exception.cc [3:13]
- absl/random/seed_gen_exception.h [3:13]
- absl/random/seed_sequences.cc [3:13]
- absl/random/seed_sequences.h [3:13]
- absl/random/uniform_int_distribution.h [3:13]
- absl/random/uniform_real_distribution.h [3:13]
- absl/random/zipf_distribution.h [3:13]
- absl/status/internal/status_internal.h [3:13]
- absl/status/internal/statusor_internal.h [3:13]
- absl/status/status.cc [3:13]
- absl/status/status.h [3:13]
- absl/status/status_payload_printer.cc [3:13]
- absl/status/status_payload_printer.h [3:13]
- absl/status/statusor.cc [3:13]
- absl/status/statusor.h [3:13]
- absl/strings/ascii.cc [3:13]
- absl/strings/ascii.h [4:14]
- absl/strings/charconv.cc [3:13]
- absl/strings/charconv.h [3:13]
- absl/strings/cord.cc [3:13]
- absl/strings/cord.h [3:13]
- absl/strings/cord_test_helpers.h [4:14]
- absl/strings/escaping.cc [3:13]
- absl/strings/escaping.h [4:14]
- absl/strings/internal/char_map.h [3:13]
- absl/strings/internal/charconv_bigint.cc [3:13]
- absl/strings/internal/charconv_bigint.h [3:13]
- absl/strings/internal/charconv_parse.cc [3:13]
- absl/strings/internal/charconv_parse.h [3:13]
- absl/strings/internal/cord_internal.cc [3:13]
- absl/strings/internal/cord_internal.h [3:13]
+ absl/random/bernoulli_distribution.h [3:13]
+ absl/random/beta_distribution.h [3:13]
+ absl/random/bit_gen_ref.h [4:14]
+ absl/random/discrete_distribution.cc [3:13]
+ absl/random/discrete_distribution.h [3:13]
+ absl/random/distributions.h [3:13]
+ absl/random/exponential_distribution.h [3:13]
+ absl/random/gaussian_distribution.h [3:13]
+ absl/random/internal/chi_square.cc [3:13]
+ absl/random/internal/chi_square.h [3:13]
+ absl/random/internal/distribution_caller.h [4:14]
+ absl/random/internal/distribution_test_util.cc [3:13]
+ absl/random/internal/distribution_test_util.h [3:13]
+ absl/random/internal/explicit_seed_seq.h [3:13]
+ absl/random/internal/fast_uniform_bits.h [3:13]
+ absl/random/internal/fastmath.h [3:13]
+ absl/random/internal/generate_real.h [3:13]
+ absl/random/internal/iostream_state_saver.h [3:13]
+ absl/random/internal/mock_helpers.h [4:14]
+ absl/random/internal/mock_overload_set.h [4:14]
+ absl/random/internal/nonsecure_base.h [3:13]
+ absl/random/internal/pcg_engine.h [3:13]
+ absl/random/internal/platform.h [3:13]
+ absl/random/internal/pool_urbg.cc [3:13]
+ absl/random/internal/pool_urbg.h [3:13]
+ absl/random/internal/randen.cc [3:13]
+ absl/random/internal/randen.h [3:13]
+ absl/random/internal/randen_detect.cc [3:13]
+ absl/random/internal/randen_detect.h [3:13]
+ absl/random/internal/randen_engine.h [3:13]
+ absl/random/internal/randen_hwaes.cc [3:13]
+ absl/random/internal/randen_hwaes.h [3:13]
+ absl/random/internal/randen_round_keys.cc [3:13]
+ absl/random/internal/randen_slow.cc [3:13]
+ absl/random/internal/randen_slow.h [3:13]
+ absl/random/internal/randen_traits.h [3:13]
+ absl/random/internal/salted_seed_seq.h [3:13]
+ absl/random/internal/seed_material.cc [3:13]
+ absl/random/internal/seed_material.h [3:13]
+ absl/random/internal/sequence_urbg.h [3:13]
+ absl/random/internal/traits.h [3:13]
+ absl/random/internal/uniform_helper.h [3:13]
+ absl/random/internal/wide_multiply.h [3:13]
+ absl/random/log_uniform_int_distribution.h [3:13]
+ absl/random/mock_distributions.h [3:13]
+ absl/random/mocking_bit_gen.h [3:13]
+ absl/random/poisson_distribution.h [3:13]
+ absl/random/random.h [3:13]
+ absl/random/seed_gen_exception.cc [3:13]
+ absl/random/seed_gen_exception.h [3:13]
+ absl/random/seed_sequences.cc [3:13]
+ absl/random/seed_sequences.h [3:13]
+ absl/random/uniform_int_distribution.h [3:13]
+ absl/random/uniform_real_distribution.h [3:13]
+ absl/random/zipf_distribution.h [3:13]
+ absl/status/internal/status_internal.h [3:13]
+ absl/status/internal/statusor_internal.h [3:13]
+ absl/status/status.cc [3:13]
+ absl/status/status.h [3:13]
+ absl/status/status_payload_printer.cc [3:13]
+ absl/status/status_payload_printer.h [3:13]
+ absl/status/statusor.cc [3:13]
+ absl/status/statusor.h [3:13]
+ absl/strings/ascii.cc [3:13]
+ absl/strings/ascii.h [4:14]
+ absl/strings/charconv.cc [3:13]
+ absl/strings/charconv.h [3:13]
+ absl/strings/cord.cc [3:13]
+ absl/strings/cord.h [3:13]
+ absl/strings/cord_test_helpers.h [4:14]
+ absl/strings/escaping.cc [3:13]
+ absl/strings/escaping.h [4:14]
+ absl/strings/internal/char_map.h [3:13]
+ absl/strings/internal/charconv_bigint.cc [3:13]
+ absl/strings/internal/charconv_bigint.h [3:13]
+ absl/strings/internal/charconv_parse.cc [3:13]
+ absl/strings/internal/charconv_parse.h [3:13]
+ absl/strings/internal/cord_internal.cc [3:13]
+ absl/strings/internal/cord_internal.h [3:13]
absl/strings/internal/cordz_functions.cc [3:13]
absl/strings/internal/cordz_functions.h [3:13]
absl/strings/internal/cordz_handle.cc [3:13]
@@ -405,188 +405,188 @@ BELONGS absl/algorithm/ya.make absl/base/ya.make absl/container/ya.make absl/deb
absl/strings/internal/cordz_sample_token.cc [3:13]
absl/strings/internal/cordz_sample_token.h [3:13]
absl/strings/internal/cordz_statistics.h [3:13]
- absl/strings/internal/escaping.cc [3:13]
- absl/strings/internal/escaping.h [3:13]
- absl/strings/internal/escaping_test_common.h [3:13]
- absl/strings/internal/memutil.cc [3:13]
- absl/strings/internal/memutil.h [4:14]
- absl/strings/internal/numbers_test_common.h [3:13]
- absl/strings/internal/ostringstream.cc [3:13]
- absl/strings/internal/ostringstream.h [3:13]
- absl/strings/internal/pow10_helper.h [4:14]
- absl/strings/internal/resize_uninitialized.h [4:14]
- absl/strings/internal/stl_type_traits.h [3:13]
- absl/strings/internal/str_format/arg.cc [3:13]
- absl/strings/internal/str_format/arg.h [3:13]
- absl/strings/internal/str_format/bind.cc [3:13]
- absl/strings/internal/str_format/bind.h [3:13]
- absl/strings/internal/str_format/checker.h [3:13]
- absl/strings/internal/str_format/extension.cc [4:14]
- absl/strings/internal/str_format/extension.h [4:14]
- absl/strings/internal/str_format/float_conversion.cc [3:13]
- absl/strings/internal/str_format/float_conversion.h [3:13]
- absl/strings/internal/str_format/output.cc [3:13]
- absl/strings/internal/str_format/output.h [3:13]
- absl/strings/internal/str_format/parser.cc [3:13]
- absl/strings/internal/str_format/parser.h [3:13]
- absl/strings/internal/str_join_internal.h [4:14]
- absl/strings/internal/str_split_internal.h [3:13]
- absl/strings/internal/string_constant.h [3:13]
- absl/strings/internal/utf8.cc [3:13]
- absl/strings/internal/utf8.h [3:13]
- absl/strings/match.cc [3:13]
- absl/strings/match.h [4:14]
- absl/strings/numbers.cc [3:13]
- absl/strings/numbers.h [3:13]
- absl/strings/str_cat.cc [3:13]
- absl/strings/str_cat.h [4:14]
- absl/strings/str_format.h [4:14]
- absl/strings/str_join.h [4:14]
- absl/strings/str_replace.cc [3:13]
- absl/strings/str_replace.h [4:14]
- absl/strings/str_split.cc [3:13]
- absl/strings/str_split.h [4:14]
- absl/strings/string_view.cc [3:13]
- absl/strings/string_view.h [4:14]
- absl/strings/strip.h [4:14]
- absl/strings/substitute.cc [3:13]
- absl/strings/substitute.h [4:14]
- absl/synchronization/barrier.cc [3:13]
- absl/synchronization/barrier.h [3:13]
- absl/synchronization/blocking_counter.cc [3:13]
- absl/synchronization/blocking_counter.h [4:14]
- absl/synchronization/internal/create_thread_identity.cc [3:13]
- absl/synchronization/internal/futex.h [3:13]
- absl/synchronization/internal/graphcycles.cc [3:13]
- absl/synchronization/internal/graphcycles.h [3:13]
- absl/synchronization/internal/kernel_timeout.h [3:13]
- absl/synchronization/internal/per_thread_sem.cc [3:13]
- absl/synchronization/internal/per_thread_sem.h [3:13]
- absl/synchronization/internal/thread_pool.h [3:13]
- absl/synchronization/internal/waiter.cc [3:13]
- absl/synchronization/internal/waiter.h [3:13]
- absl/synchronization/mutex.cc [3:13]
- absl/synchronization/mutex.h [3:13]
- absl/synchronization/notification.cc [3:13]
- absl/synchronization/notification.h [3:13]
- absl/time/civil_time.cc [3:13]
- absl/time/civil_time.h [3:13]
- absl/time/clock.cc [3:13]
- absl/time/clock.h [3:13]
- absl/time/duration.cc [3:13]
- absl/time/format.cc [3:13]
- absl/time/internal/get_current_time_chrono.inc [3:13]
- absl/time/internal/test_util.h [3:13]
- absl/time/time.cc [3:13]
- absl/time/time.h [3:13]
- absl/types/any.h [4:14]
- absl/types/bad_any_cast.cc [3:13]
- absl/types/bad_any_cast.h [3:13]
- absl/types/bad_optional_access.cc [3:13]
- absl/types/bad_optional_access.h [3:13]
- absl/types/bad_variant_access.cc [3:13]
- absl/types/bad_variant_access.h [3:13]
- absl/types/compare.h [3:13]
- absl/types/internal/conformance_aliases.h [3:13]
- absl/types/internal/conformance_archetype.h [3:13]
- absl/types/internal/conformance_profile.h [3:13]
- absl/types/internal/conformance_testing.h [3:13]
- absl/types/internal/conformance_testing_helpers.h [3:13]
- absl/types/internal/optional.h [3:13]
- absl/types/internal/parentheses.h [3:13]
- absl/types/internal/span.h [4:14]
- absl/types/internal/transform_args.h [3:13]
- absl/types/internal/variant.h [3:13]
- absl/types/optional.h [3:13]
- absl/types/span.h [4:14]
- absl/types/variant.h [3:13]
- absl/utility/utility.h [3:13]
-
+ absl/strings/internal/escaping.cc [3:13]
+ absl/strings/internal/escaping.h [3:13]
+ absl/strings/internal/escaping_test_common.h [3:13]
+ absl/strings/internal/memutil.cc [3:13]
+ absl/strings/internal/memutil.h [4:14]
+ absl/strings/internal/numbers_test_common.h [3:13]
+ absl/strings/internal/ostringstream.cc [3:13]
+ absl/strings/internal/ostringstream.h [3:13]
+ absl/strings/internal/pow10_helper.h [4:14]
+ absl/strings/internal/resize_uninitialized.h [4:14]
+ absl/strings/internal/stl_type_traits.h [3:13]
+ absl/strings/internal/str_format/arg.cc [3:13]
+ absl/strings/internal/str_format/arg.h [3:13]
+ absl/strings/internal/str_format/bind.cc [3:13]
+ absl/strings/internal/str_format/bind.h [3:13]
+ absl/strings/internal/str_format/checker.h [3:13]
+ absl/strings/internal/str_format/extension.cc [4:14]
+ absl/strings/internal/str_format/extension.h [4:14]
+ absl/strings/internal/str_format/float_conversion.cc [3:13]
+ absl/strings/internal/str_format/float_conversion.h [3:13]
+ absl/strings/internal/str_format/output.cc [3:13]
+ absl/strings/internal/str_format/output.h [3:13]
+ absl/strings/internal/str_format/parser.cc [3:13]
+ absl/strings/internal/str_format/parser.h [3:13]
+ absl/strings/internal/str_join_internal.h [4:14]
+ absl/strings/internal/str_split_internal.h [3:13]
+ absl/strings/internal/string_constant.h [3:13]
+ absl/strings/internal/utf8.cc [3:13]
+ absl/strings/internal/utf8.h [3:13]
+ absl/strings/match.cc [3:13]
+ absl/strings/match.h [4:14]
+ absl/strings/numbers.cc [3:13]
+ absl/strings/numbers.h [3:13]
+ absl/strings/str_cat.cc [3:13]
+ absl/strings/str_cat.h [4:14]
+ absl/strings/str_format.h [4:14]
+ absl/strings/str_join.h [4:14]
+ absl/strings/str_replace.cc [3:13]
+ absl/strings/str_replace.h [4:14]
+ absl/strings/str_split.cc [3:13]
+ absl/strings/str_split.h [4:14]
+ absl/strings/string_view.cc [3:13]
+ absl/strings/string_view.h [4:14]
+ absl/strings/strip.h [4:14]
+ absl/strings/substitute.cc [3:13]
+ absl/strings/substitute.h [4:14]
+ absl/synchronization/barrier.cc [3:13]
+ absl/synchronization/barrier.h [3:13]
+ absl/synchronization/blocking_counter.cc [3:13]
+ absl/synchronization/blocking_counter.h [4:14]
+ absl/synchronization/internal/create_thread_identity.cc [3:13]
+ absl/synchronization/internal/futex.h [3:13]
+ absl/synchronization/internal/graphcycles.cc [3:13]
+ absl/synchronization/internal/graphcycles.h [3:13]
+ absl/synchronization/internal/kernel_timeout.h [3:13]
+ absl/synchronization/internal/per_thread_sem.cc [3:13]
+ absl/synchronization/internal/per_thread_sem.h [3:13]
+ absl/synchronization/internal/thread_pool.h [3:13]
+ absl/synchronization/internal/waiter.cc [3:13]
+ absl/synchronization/internal/waiter.h [3:13]
+ absl/synchronization/mutex.cc [3:13]
+ absl/synchronization/mutex.h [3:13]
+ absl/synchronization/notification.cc [3:13]
+ absl/synchronization/notification.h [3:13]
+ absl/time/civil_time.cc [3:13]
+ absl/time/civil_time.h [3:13]
+ absl/time/clock.cc [3:13]
+ absl/time/clock.h [3:13]
+ absl/time/duration.cc [3:13]
+ absl/time/format.cc [3:13]
+ absl/time/internal/get_current_time_chrono.inc [3:13]
+ absl/time/internal/test_util.h [3:13]
+ absl/time/time.cc [3:13]
+ absl/time/time.h [3:13]
+ absl/types/any.h [4:14]
+ absl/types/bad_any_cast.cc [3:13]
+ absl/types/bad_any_cast.h [3:13]
+ absl/types/bad_optional_access.cc [3:13]
+ absl/types/bad_optional_access.h [3:13]
+ absl/types/bad_variant_access.cc [3:13]
+ absl/types/bad_variant_access.h [3:13]
+ absl/types/compare.h [3:13]
+ absl/types/internal/conformance_aliases.h [3:13]
+ absl/types/internal/conformance_archetype.h [3:13]
+ absl/types/internal/conformance_profile.h [3:13]
+ absl/types/internal/conformance_testing.h [3:13]
+ absl/types/internal/conformance_testing_helpers.h [3:13]
+ absl/types/internal/optional.h [3:13]
+ absl/types/internal/parentheses.h [3:13]
+ absl/types/internal/span.h [4:14]
+ absl/types/internal/transform_args.h [3:13]
+ absl/types/internal/variant.h [3:13]
+ absl/types/optional.h [3:13]
+ absl/types/span.h [4:14]
+ absl/types/variant.h [3:13]
+ absl/utility/utility.h [3:13]
+
SKIP LicenseRef-scancode-warranty-disclaimer 5ba761db85e57267704f71a6bcf20c2a
BELONGS absl/container/ya.make ya.make
- License text:
- // This utility is internal-only. Use at your own risk.
- Scancode info:
- Original SPDX id: LicenseRef-scancode-warranty-disclaimer
- Score : 100.00
- Match type : TEXT
- Links : https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/warranty-disclaimer.LICENSE
- Files with this license:
- absl/container/internal/hashtablez_sampler.h [37:37]
+ License text:
+ // This utility is internal-only. Use at your own risk.
+ Scancode info:
+ Original SPDX id: LicenseRef-scancode-warranty-disclaimer
+ Score : 100.00
+ Match type : TEXT
+ Links : https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/warranty-disclaimer.LICENSE
+ Files with this license:
+ absl/container/internal/hashtablez_sampler.h [37:37]
absl/profiling/internal/sample_recorder.h [22:22]
-
+
SKIP LicenseRef-scancode-generic-cla 5d780ffa423067f23c6a123ae33e7c18
-BELONGS ya.make
- License text:
- \## Contributor License Agreement
- Scancode info:
- Original SPDX id: LicenseRef-scancode-generic-cla
- Score : 16.00
- Match type : NOTICE
- Links : https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/generic-cla.LICENSE
- Files with this license:
- CONTRIBUTING.md [9:9]
-
+BELONGS ya.make
+ License text:
+ \## Contributor License Agreement
+ Scancode info:
+ Original SPDX id: LicenseRef-scancode-generic-cla
+ Score : 16.00
+ Match type : NOTICE
+ Links : https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/generic-cla.LICENSE
+ Files with this license:
+ CONTRIBUTING.md [9:9]
+
SKIP LicenseRef-scancode-unknown-license-reference 8e1ade755f3bfad0a6736f291073f1ac
-BELONGS ya.make
- License text:
- license. See [LICENSE](LICENSE) for more information.
- Scancode info:
- Original SPDX id: LicenseRef-scancode-unknown-license-reference
- Score : 100.00
- Match type : REFERENCE
- Links : https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/unknown-license-reference.LICENSE
- Files with this license:
+BELONGS ya.make
+ License text:
+ license. See [LICENSE](LICENSE) for more information.
+ Scancode info:
+ Original SPDX id: LicenseRef-scancode-unknown-license-reference
+ Score : 100.00
+ Match type : REFERENCE
+ Links : https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/unknown-license-reference.LICENSE
+ Files with this license:
README.md [131:131]
-
+
SKIP LicenseRef-scancode-generic-exception 99cf00730bf3973359b67cfa5b7ac051
-BELONGS absl/synchronization/ya.make
- License text:
- // logging; as a special exception, the function may acquire other mutexes
- Scancode info:
- Original SPDX id: LicenseRef-scancode-generic-exception
- Score : 16.00
- Match type : INTRO
- Links : https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/generic-exception.LICENSE
- Files with this license:
- absl/synchronization/mutex.h [289:289]
-
-KEEP Apache-2.0 cac6cbe8ed5a3da569f7c01e4e486688
-BELONGS ya.make
- Note: matched license text is too long. Read it in the source files.
- Scancode info:
- Original SPDX id: Apache-2.0
- Score : 100.00
- Match type : TEXT
- Links : http://www.apache.org/licenses/, http://www.apache.org/licenses/LICENSE-2.0, https://spdx.org/licenses/Apache-2.0
- Files with this license:
- LICENSE [2:202]
-
-KEEP Apache-2.0 d4afbfe97ca1f27103271d24e8af5b32
-BELONGS absl/debugging/internal/ya.make absl/synchronization/internal/ya.make
- Note: matched license text is too long. Read it in the source files.
- Scancode info:
- Original SPDX id: Apache-2.0
- Score : 100.00
- Match type : NOTICE
- Links : http://www.apache.org/licenses/, http://www.apache.org/licenses/LICENSE-2.0, https://spdx.org/licenses/Apache-2.0
- Files with this license:
- absl/debugging/internal/elf_mem_image.h [4:14]
- absl/debugging/internal/stacktrace_config.h [4:14]
- absl/synchronization/internal/create_thread_identity.h [4:14]
-
+BELONGS absl/synchronization/ya.make
+ License text:
+ // logging; as a special exception, the function may acquire other mutexes
+ Scancode info:
+ Original SPDX id: LicenseRef-scancode-generic-exception
+ Score : 16.00
+ Match type : INTRO
+ Links : https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/generic-exception.LICENSE
+ Files with this license:
+ absl/synchronization/mutex.h [289:289]
+
+KEEP Apache-2.0 cac6cbe8ed5a3da569f7c01e4e486688
+BELONGS ya.make
+ Note: matched license text is too long. Read it in the source files.
+ Scancode info:
+ Original SPDX id: Apache-2.0
+ Score : 100.00
+ Match type : TEXT
+ Links : http://www.apache.org/licenses/, http://www.apache.org/licenses/LICENSE-2.0, https://spdx.org/licenses/Apache-2.0
+ Files with this license:
+ LICENSE [2:202]
+
+KEEP Apache-2.0 d4afbfe97ca1f27103271d24e8af5b32
+BELONGS absl/debugging/internal/ya.make absl/synchronization/internal/ya.make
+ Note: matched license text is too long. Read it in the source files.
+ Scancode info:
+ Original SPDX id: Apache-2.0
+ Score : 100.00
+ Match type : NOTICE
+ Links : http://www.apache.org/licenses/, http://www.apache.org/licenses/LICENSE-2.0, https://spdx.org/licenses/Apache-2.0
+ Files with this license:
+ absl/debugging/internal/elf_mem_image.h [4:14]
+ absl/debugging/internal/stacktrace_config.h [4:14]
+ absl/synchronization/internal/create_thread_identity.h [4:14]
+
SKIP LicenseRef-scancode-generic-cla d72fcd21b18e44b666a94e6225ed43eb
-BELONGS ya.make
- License text:
- Contributions to this project must be accompanied by a Contributor License
- Agreement. You (or your employer) retain the copyright to your contribution,
- Scancode info:
- Original SPDX id: LicenseRef-scancode-generic-cla
- Score : 16.00
- Match type : NOTICE
- Links : https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/generic-cla.LICENSE
- Files with this license:
- CONTRIBUTING.md [11:12]
+BELONGS ya.make
+ License text:
+ Contributions to this project must be accompanied by a Contributor License
+ Agreement. You (or your employer) retain the copyright to your contribution,
+ Scancode info:
+ Original SPDX id: LicenseRef-scancode-generic-cla
+ Score : 16.00
+ Match type : NOTICE
+ Links : https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/generic-cla.LICENSE
+ Files with this license:
+ CONTRIBUTING.md [11:12]
SKIP BSD-2-Clause AND GPL-2.0-only e12cf8844c9d92dd647ddf4320b73d06
BELONGS absl/strings/ya.make
diff --git a/contrib/restricted/abseil-cpp/README.md b/contrib/restricted/abseil-cpp/README.md
index db3a7b447a..8e1aadba44 100644
--- a/contrib/restricted/abseil-cpp/README.md
+++ b/contrib/restricted/abseil-cpp/README.md
@@ -9,9 +9,9 @@ standard library.
- [About Abseil](#about)
- [Quickstart](#quickstart)
- [Building Abseil](#build)
-- [Support](#support)
+- [Support](#support)
- [Codemap](#codemap)
-- [Releases](#releases)
+- [Releases](#releases)
- [License](#license)
- [Links](#links)
@@ -44,23 +44,23 @@ the Abseil code, running tests, and getting a simple binary working.
<a name="build"></a>
## Building Abseil
-[Bazel](https://bazel.build) and [CMake](https://cmake.org/) are the official
-build systems for Abseil.
-
-See the [quickstart](https://abseil.io/docs/cpp/quickstart) for more information
-on building Abseil using the Bazel build system.
-
-If you require CMake support, please check the [CMake build
-instructions](CMake/README.md) and [CMake
-Quickstart](https://abseil.io/docs/cpp/quickstart-cmake).
-
-## Support
-
-Abseil is officially supported on many platforms. See the [Abseil
-platform support
-guide](https://abseil.io/docs/cpp/platforms/platforms) for details on
-supported operating systems, compilers, CPUs, etc.
-
+[Bazel](https://bazel.build) and [CMake](https://cmake.org/) are the official
+build systems for Abseil.
+
+See the [quickstart](https://abseil.io/docs/cpp/quickstart) for more information
+on building Abseil using the Bazel build system.
+
+If you require CMake support, please check the [CMake build
+instructions](CMake/README.md) and [CMake
+Quickstart](https://abseil.io/docs/cpp/quickstart-cmake).
+
+## Support
+
+Abseil is officially supported on many platforms. See the [Abseil
+platform support
+guide](https://abseil.io/docs/cpp/platforms/platforms) for details on
+supported operating systems, compilers, CPUs, etc.
+
## Codemap
Abseil contains the following C++ library components:
@@ -72,9 +72,9 @@ Abseil contains the following C++ library components:
* [`algorithm`](absl/algorithm/)
<br /> The `algorithm` library contains additions to the C++ `<algorithm>`
library and container-based versions of such algorithms.
-* [`cleanup`](absl/cleanup/)
- <br /> The `cleanup` library contains the control-flow-construct-like type
- `absl::Cleanup` which is used for executing a callback on scope exit.
+* [`cleanup`](absl/cleanup/)
+ <br /> The `cleanup` library contains the control-flow-construct-like type
+ `absl::Cleanup` which is used for executing a callback on scope exit.
* [`container`](absl/container/)
<br /> The `container` library contains additional STL-style containers,
including Abseil's unordered "Swiss table" containers.
@@ -95,9 +95,9 @@ Abseil contains the following C++ library components:
* [`profiling`](absl/profiling/)
<br /> The `profiling` library contains utility code for profiling C++
entities. It is currently a private dependency of other Abseil libraries.
-* [`status`](absl/status/)
- <br /> The `status` contains abstractions for error handling, specifically
- `absl::Status` and `absl::StatusOr<T>`.
+* [`status`](absl/status/)
+ <br /> The `status` contains abstractions for error handling, specifically
+ `absl::Status` and `absl::StatusOr<T>`.
* [`strings`](absl/strings/)
<br /> The `strings` library contains a variety of strings routines and
utilities, including a C++11-compatible version of the C++17
@@ -116,15 +116,15 @@ Abseil contains the following C++ library components:
* [`utility`](absl/utility/)
<br /> The `utility` library contains utility and helper code.
-## Releases
-
-Abseil recommends users "live-at-head" (update to the latest commit from the
-master branch as often as possible). However, we realize this philosophy doesn't
-work for every project, so we also provide [Long Term Support
-Releases](https://github.com/abseil/abseil-cpp/releases) to which we backport
-fixes for severe bugs. See our [release
-management](https://abseil.io/about/releases) document for more details.
-
+## Releases
+
+Abseil recommends users "live-at-head" (update to the latest commit from the
+master branch as often as possible). However, we realize this philosophy doesn't
+work for every project, so we also provide [Long Term Support
+Releases](https://github.com/abseil/abseil-cpp/releases) to which we backport
+fixes for severe bugs. See our [release
+management](https://abseil.io/about/releases) document for more details.
+
## License
The Abseil C++ library is licensed under the terms of the Apache
diff --git a/contrib/restricted/abseil-cpp/absl/algorithm/container.h b/contrib/restricted/abseil-cpp/absl/algorithm/container.h
index c38a4a63db..ccd257fa53 100644
--- a/contrib/restricted/abseil-cpp/absl/algorithm/container.h
+++ b/contrib/restricted/abseil-cpp/absl/algorithm/container.h
@@ -188,7 +188,7 @@ bool c_any_of(const C& c, Pred&& pred) {
// c_none_of()
//
// Container-based version of the <algorithm> `std::none_of()` function to
-// test if no elements in a container fulfill a condition.
+// test if no elements in a container fulfill a condition.
template <typename C, typename Pred>
bool c_none_of(const C& c, Pred&& pred) {
return std::none_of(container_algorithm_internal::c_begin(c),
diff --git a/contrib/restricted/abseil-cpp/absl/base/attributes.h b/contrib/restricted/abseil-cpp/absl/base/attributes.h
index e3907827d6..03cf88c270 100644
--- a/contrib/restricted/abseil-cpp/absl/base/attributes.h
+++ b/contrib/restricted/abseil-cpp/absl/base/attributes.h
@@ -119,7 +119,7 @@
#if ABSL_HAVE_ATTRIBUTE(disable_tail_calls)
#define ABSL_HAVE_ATTRIBUTE_NO_TAIL_CALL 1
#define ABSL_ATTRIBUTE_NO_TAIL_CALL __attribute__((disable_tail_calls))
-#elif defined(__GNUC__) && !defined(__clang__) && !defined(__e2k__)
+#elif defined(__GNUC__) && !defined(__clang__) && !defined(__e2k__)
#define ABSL_HAVE_ATTRIBUTE_NO_TAIL_CALL 1
#define ABSL_ATTRIBUTE_NO_TAIL_CALL \
__attribute__((optimize("no-optimize-sibling-calls")))
@@ -621,7 +621,7 @@
// When used with unsupported compilers, the ABSL_FALLTHROUGH_INTENDED macro has
// no effect on diagnostics. In any case this macro has no effect on runtime
// behavior and performance of code.
-
+
#ifdef ABSL_FALLTHROUGH_INTENDED
#error "ABSL_FALLTHROUGH_INTENDED should not be defined."
#elif ABSL_HAVE_CPP_ATTRIBUTE(fallthrough)
@@ -655,7 +655,7 @@
// Every usage of a deprecated entity will trigger a warning when compiled with
// clang's `-Wdeprecated-declarations` option. This option is turned off by
// default, but the warnings will be reported by clang-tidy.
-#if defined(__clang__) && defined(__cplusplus) && __cplusplus >= 201103L
+#if defined(__clang__) && defined(__cplusplus) && __cplusplus >= 201103L
#define ABSL_DEPRECATED(message) __attribute__((deprecated(message)))
#endif
@@ -689,27 +689,27 @@
#define ABSL_CONST_INIT
#endif // ABSL_HAVE_CPP_ATTRIBUTE(clang::require_constant_initialization)
-// ABSL_ATTRIBUTE_PURE_FUNCTION
-//
-// ABSL_ATTRIBUTE_PURE_FUNCTION is used to annotate declarations of "pure"
-// functions. A function is pure if its return value is only a function of its
-// arguments. The pure attribute prohibits a function from modifying the state
-// of the program that is observable by means other than inspecting the
-// function's return value. Declaring such functions with the pure attribute
-// allows the compiler to avoid emitting some calls in repeated invocations of
-// the function with the same argument values.
-//
-// Example:
-//
-// ABSL_ATTRIBUTE_PURE_FUNCTION int64_t ToInt64Milliseconds(Duration d);
-#if ABSL_HAVE_CPP_ATTRIBUTE(gnu::pure)
-#define ABSL_ATTRIBUTE_PURE_FUNCTION [[gnu::pure]]
-#elif ABSL_HAVE_ATTRIBUTE(pure)
-#define ABSL_ATTRIBUTE_PURE_FUNCTION __attribute__((pure))
-#else
-#define ABSL_ATTRIBUTE_PURE_FUNCTION
-#endif
-
+// ABSL_ATTRIBUTE_PURE_FUNCTION
+//
+// ABSL_ATTRIBUTE_PURE_FUNCTION is used to annotate declarations of "pure"
+// functions. A function is pure if its return value is only a function of its
+// arguments. The pure attribute prohibits a function from modifying the state
+// of the program that is observable by means other than inspecting the
+// function's return value. Declaring such functions with the pure attribute
+// allows the compiler to avoid emitting some calls in repeated invocations of
+// the function with the same argument values.
+//
+// Example:
+//
+// ABSL_ATTRIBUTE_PURE_FUNCTION int64_t ToInt64Milliseconds(Duration d);
+#if ABSL_HAVE_CPP_ATTRIBUTE(gnu::pure)
+#define ABSL_ATTRIBUTE_PURE_FUNCTION [[gnu::pure]]
+#elif ABSL_HAVE_ATTRIBUTE(pure)
+#define ABSL_ATTRIBUTE_PURE_FUNCTION __attribute__((pure))
+#else
+#define ABSL_ATTRIBUTE_PURE_FUNCTION
+#endif
+
// ABSL_ATTRIBUTE_LIFETIME_BOUND indicates that a resource owned by a function
// parameter or implicit object parameter is retained by the return value of the
// annotated function (or, for a parameter of a constructor, in the value of the
diff --git a/contrib/restricted/abseil-cpp/absl/base/call_once.h b/contrib/restricted/abseil-cpp/absl/base/call_once.h
index 96109f537c..86635bbee0 100644
--- a/contrib/restricted/abseil-cpp/absl/base/call_once.h
+++ b/contrib/restricted/abseil-cpp/absl/base/call_once.h
@@ -177,8 +177,8 @@ void CallOnceImpl(std::atomic<uint32_t>* control,
scheduling_mode) == kOnceInit) {
base_internal::invoke(std::forward<Callable>(fn),
std::forward<Args>(args)...);
- old_control =
- control->exchange(base_internal::kOnceDone, std::memory_order_release);
+ old_control =
+ control->exchange(base_internal::kOnceDone, std::memory_order_release);
if (old_control == base_internal::kOnceWaiter) {
base_internal::SpinLockWake(control, true);
}
diff --git a/contrib/restricted/abseil-cpp/absl/base/config.h b/contrib/restricted/abseil-cpp/absl/base/config.h
index 450d00bc4c..21244de01e 100644
--- a/contrib/restricted/abseil-cpp/absl/base/config.h
+++ b/contrib/restricted/abseil-cpp/absl/base/config.h
@@ -150,16 +150,16 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#if ABSL_OPTION_USE_INLINE_NAMESPACE == 0
#define ABSL_NAMESPACE_BEGIN
#define ABSL_NAMESPACE_END
-#define ABSL_INTERNAL_C_SYMBOL(x) x
+#define ABSL_INTERNAL_C_SYMBOL(x) x
#elif ABSL_OPTION_USE_INLINE_NAMESPACE == 1
#define ABSL_NAMESPACE_BEGIN \
inline namespace ABSL_OPTION_INLINE_NAMESPACE_NAME {
#define ABSL_NAMESPACE_END }
-#define ABSL_INTERNAL_C_SYMBOL_HELPER_2(x, v) x##_##v
-#define ABSL_INTERNAL_C_SYMBOL_HELPER_1(x, v) \
- ABSL_INTERNAL_C_SYMBOL_HELPER_2(x, v)
-#define ABSL_INTERNAL_C_SYMBOL(x) \
- ABSL_INTERNAL_C_SYMBOL_HELPER_1(x, ABSL_OPTION_INLINE_NAMESPACE_NAME)
+#define ABSL_INTERNAL_C_SYMBOL_HELPER_2(x, v) x##_##v
+#define ABSL_INTERNAL_C_SYMBOL_HELPER_1(x, v) \
+ ABSL_INTERNAL_C_SYMBOL_HELPER_2(x, v)
+#define ABSL_INTERNAL_C_SYMBOL(x) \
+ ABSL_INTERNAL_C_SYMBOL_HELPER_1(x, ABSL_OPTION_INLINE_NAMESPACE_NAME)
#else
#error options.h is misconfigured.
#endif
@@ -268,7 +268,7 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
ABSL_INTERNAL_HAS_KEYWORD(__builtin_FILE)
#define ABSL_HAVE_SOURCE_LOCATION_CURRENT 1
#elif ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(5, 0)
-#define ABSL_HAVE_SOURCE_LOCATION_CURRENT 1
+#define ABSL_HAVE_SOURCE_LOCATION_CURRENT 1
#endif
#endif
@@ -429,15 +429,15 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#define ABSL_HAVE_PTHREAD_GETSCHEDPARAM 1
#endif
-// ABSL_HAVE_SCHED_GETCPU
-//
-// Checks whether sched_getcpu is available.
-#ifdef ABSL_HAVE_SCHED_GETCPU
-#error ABSL_HAVE_SCHED_GETCPU cannot be directly set
-#elif defined(__linux__)
-#define ABSL_HAVE_SCHED_GETCPU 1
-#endif
-
+// ABSL_HAVE_SCHED_GETCPU
+//
+// Checks whether sched_getcpu is available.
+#ifdef ABSL_HAVE_SCHED_GETCPU
+#error ABSL_HAVE_SCHED_GETCPU cannot be directly set
+#elif defined(__linux__)
+#define ABSL_HAVE_SCHED_GETCPU 1
+#endif
+
// ABSL_HAVE_SCHED_YIELD
//
// Checks whether the platform implements sched_yield(2) as defined in
@@ -549,7 +549,7 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#endif
#ifdef __has_include
-#if __has_include(<any>) && defined(__cplusplus) && __cplusplus >= 201703L && \
+#if __has_include(<any>) && defined(__cplusplus) && __cplusplus >= 201703L && \
!ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE
#define ABSL_HAVE_STD_ANY 1
#endif
@@ -563,8 +563,8 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#endif
#ifdef __has_include
-#if __has_include(<optional>) && defined(__cplusplus) && \
- __cplusplus >= 201703L && !ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE
+#if __has_include(<optional>) && defined(__cplusplus) && \
+ __cplusplus >= 201703L && !ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE
#define ABSL_HAVE_STD_OPTIONAL 1
#endif
#endif
@@ -577,8 +577,8 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#endif
#ifdef __has_include
-#if __has_include(<variant>) && defined(__cplusplus) && \
- __cplusplus >= 201703L && !ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE
+#if __has_include(<variant>) && defined(__cplusplus) && \
+ __cplusplus >= 201703L && !ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE
#define ABSL_HAVE_STD_VARIANT 1
#endif
#endif
@@ -599,9 +599,9 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
// not correctly set by MSVC, so we use `_MSVC_LANG` to check the language
// version.
// TODO(zhangxy): fix tests before enabling aliasing for `std::any`.
-#if defined(_MSC_VER) && _MSC_VER >= 1910 && \
- ((defined(_MSVC_LANG) && _MSVC_LANG > 201402) || \
- (defined(__cplusplus) && __cplusplus > 201402))
+#if defined(_MSC_VER) && _MSC_VER >= 1910 && \
+ ((defined(_MSVC_LANG) && _MSVC_LANG > 201402) || \
+ (defined(__cplusplus) && __cplusplus > 201402))
// #define ABSL_HAVE_STD_ANY 1
#define ABSL_HAVE_STD_OPTIONAL 1
#define ABSL_HAVE_STD_VARIANT 1
@@ -755,13 +755,13 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#define ABSL_HAVE_ADDRESS_SANITIZER 1
#endif
-// ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION
-//
-// Class template argument deduction is a language feature added in C++17.
-#ifdef ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION
-#error "ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION cannot be directly set."
-#elif defined(__cpp_deduction_guides)
-#define ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION 1
-#endif
-
+// ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION
+//
+// Class template argument deduction is a language feature added in C++17.
+#ifdef ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION
+#error "ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION cannot be directly set."
+#elif defined(__cpp_deduction_guides)
+#define ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION 1
+#endif
+
#endif // ABSL_BASE_CONFIG_H_
diff --git a/contrib/restricted/abseil-cpp/absl/base/dynamic_annotations.h b/contrib/restricted/abseil-cpp/absl/base/dynamic_annotations.h
index 3ea7c1568c..bc823c0a7d 100644
--- a/contrib/restricted/abseil-cpp/absl/base/dynamic_annotations.h
+++ b/contrib/restricted/abseil-cpp/absl/base/dynamic_annotations.h
@@ -110,9 +110,9 @@
// Define race annotations.
#if ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED == 1
-// Some of the symbols used in this section (e.g. AnnotateBenignRaceSized) are
-// defined by the compiler-based santizer implementation, not by the Abseil
-// library. Therefore they do not use ABSL_INTERNAL_C_SYMBOL.
+// Some of the symbols used in this section (e.g. AnnotateBenignRaceSized) are
+// defined by the compiler-based santizer implementation, not by the Abseil
+// library. Therefore they do not use ABSL_INTERNAL_C_SYMBOL.
// -------------------------------------------------------------
// Annotations that suppress errors. It is usually better to express the
@@ -289,22 +289,22 @@ ABSL_INTERNAL_END_EXTERN_C
// Define IGNORE_READS_BEGIN/_END annotations.
#if ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED == 1
-// Some of the symbols used in this section (e.g. AnnotateIgnoreReadsBegin) are
-// defined by the compiler-based implementation, not by the Abseil
-// library. Therefore they do not use ABSL_INTERNAL_C_SYMBOL.
+// Some of the symbols used in this section (e.g. AnnotateIgnoreReadsBegin) are
+// defined by the compiler-based implementation, not by the Abseil
+// library. Therefore they do not use ABSL_INTERNAL_C_SYMBOL.
// Request the analysis tool to ignore all reads in the current thread until
// ABSL_ANNOTATE_IGNORE_READS_END is called. Useful to ignore intentional racey
// reads, while still checking other reads and all writes.
// See also ABSL_ANNOTATE_UNPROTECTED_READ.
-#define ABSL_ANNOTATE_IGNORE_READS_BEGIN() \
- ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsBegin) \
- (__FILE__, __LINE__)
+#define ABSL_ANNOTATE_IGNORE_READS_BEGIN() \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsBegin) \
+ (__FILE__, __LINE__)
// Stop ignoring reads.
-#define ABSL_ANNOTATE_IGNORE_READS_END() \
- ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsEnd) \
- (__FILE__, __LINE__)
+#define ABSL_ANNOTATE_IGNORE_READS_END() \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsEnd) \
+ (__FILE__, __LINE__)
// Function prototypes of annotations provided by the compiler-based sanitizer
// implementation.
@@ -324,22 +324,22 @@ ABSL_INTERNAL_END_EXTERN_C
// TODO(delesley) -- The exclusive lock here ignores writes as well, but
// allows IGNORE_READS_AND_WRITES to work properly.
-#define ABSL_ANNOTATE_IGNORE_READS_BEGIN() \
- ABSL_INTERNAL_GLOBAL_SCOPED( \
- ABSL_INTERNAL_C_SYMBOL(AbslInternalAnnotateIgnoreReadsBegin)) \
- ()
+#define ABSL_ANNOTATE_IGNORE_READS_BEGIN() \
+ ABSL_INTERNAL_GLOBAL_SCOPED( \
+ ABSL_INTERNAL_C_SYMBOL(AbslInternalAnnotateIgnoreReadsBegin)) \
+ ()
-#define ABSL_ANNOTATE_IGNORE_READS_END() \
- ABSL_INTERNAL_GLOBAL_SCOPED( \
- ABSL_INTERNAL_C_SYMBOL(AbslInternalAnnotateIgnoreReadsEnd)) \
- ()
+#define ABSL_ANNOTATE_IGNORE_READS_END() \
+ ABSL_INTERNAL_GLOBAL_SCOPED( \
+ ABSL_INTERNAL_C_SYMBOL(AbslInternalAnnotateIgnoreReadsEnd)) \
+ ()
-ABSL_INTERNAL_STATIC_INLINE void ABSL_INTERNAL_C_SYMBOL(
- AbslInternalAnnotateIgnoreReadsBegin)()
+ABSL_INTERNAL_STATIC_INLINE void ABSL_INTERNAL_C_SYMBOL(
+ AbslInternalAnnotateIgnoreReadsBegin)()
ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE {}
-ABSL_INTERNAL_STATIC_INLINE void ABSL_INTERNAL_C_SYMBOL(
- AbslInternalAnnotateIgnoreReadsEnd)()
+ABSL_INTERNAL_STATIC_INLINE void ABSL_INTERNAL_C_SYMBOL(
+ AbslInternalAnnotateIgnoreReadsEnd)()
ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE {}
#else
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/direct_mmap.h b/contrib/restricted/abseil-cpp/absl/base/internal/direct_mmap.h
index 274054cd5a..6a83966333 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/direct_mmap.h
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/direct_mmap.h
@@ -74,13 +74,13 @@ namespace base_internal {
inline void* DirectMmap(void* start, size_t length, int prot, int flags, int fd,
off64_t offset) noexcept {
#if defined(__i386__) || defined(__ARM_ARCH_3__) || defined(__ARM_EABI__) || \
- defined(__m68k__) || defined(__sh__) || \
- (defined(__hppa__) && !defined(__LP64__)) || \
+ defined(__m68k__) || defined(__sh__) || \
+ (defined(__hppa__) && !defined(__LP64__)) || \
(defined(__mips__) && _MIPS_SIM == _MIPS_SIM_ABI32) || \
(defined(__PPC__) && !defined(__PPC64__)) || \
(defined(__riscv) && __riscv_xlen == 32) || \
- (defined(__s390__) && !defined(__s390x__)) || \
- (defined(__sparc__) && !defined(__arch64__))
+ (defined(__s390__) && !defined(__s390x__)) || \
+ (defined(__sparc__) && !defined(__arch64__))
// On these architectures, implement mmap with mmap2.
static int pagesize = 0;
if (pagesize == 0) {
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/endian.h b/contrib/restricted/abseil-cpp/absl/base/internal/endian.h
index dad0e9aeb0..acd60cd340 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/endian.h
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/endian.h
@@ -26,7 +26,7 @@
#endif
#include <cstdint>
-#include "absl/base/casts.h"
+#include "absl/base/casts.h"
#include "absl/base/config.h"
#include "absl/base/internal/unaligned_access.h"
#include "absl/base/port.h"
@@ -174,36 +174,36 @@ inline constexpr bool IsLittleEndian() { return false; }
#endif /* ENDIAN */
-inline uint8_t FromHost(uint8_t x) { return x; }
-inline uint16_t FromHost(uint16_t x) { return FromHost16(x); }
-inline uint32_t FromHost(uint32_t x) { return FromHost32(x); }
-inline uint64_t FromHost(uint64_t x) { return FromHost64(x); }
-inline uint8_t ToHost(uint8_t x) { return x; }
-inline uint16_t ToHost(uint16_t x) { return ToHost16(x); }
-inline uint32_t ToHost(uint32_t x) { return ToHost32(x); }
-inline uint64_t ToHost(uint64_t x) { return ToHost64(x); }
-
-inline int8_t FromHost(int8_t x) { return x; }
-inline int16_t FromHost(int16_t x) {
- return bit_cast<int16_t>(FromHost16(bit_cast<uint16_t>(x)));
-}
-inline int32_t FromHost(int32_t x) {
- return bit_cast<int32_t>(FromHost32(bit_cast<uint32_t>(x)));
-}
-inline int64_t FromHost(int64_t x) {
- return bit_cast<int64_t>(FromHost64(bit_cast<uint64_t>(x)));
-}
-inline int8_t ToHost(int8_t x) { return x; }
-inline int16_t ToHost(int16_t x) {
- return bit_cast<int16_t>(ToHost16(bit_cast<uint16_t>(x)));
-}
-inline int32_t ToHost(int32_t x) {
- return bit_cast<int32_t>(ToHost32(bit_cast<uint32_t>(x)));
-}
-inline int64_t ToHost(int64_t x) {
- return bit_cast<int64_t>(ToHost64(bit_cast<uint64_t>(x)));
-}
-
+inline uint8_t FromHost(uint8_t x) { return x; }
+inline uint16_t FromHost(uint16_t x) { return FromHost16(x); }
+inline uint32_t FromHost(uint32_t x) { return FromHost32(x); }
+inline uint64_t FromHost(uint64_t x) { return FromHost64(x); }
+inline uint8_t ToHost(uint8_t x) { return x; }
+inline uint16_t ToHost(uint16_t x) { return ToHost16(x); }
+inline uint32_t ToHost(uint32_t x) { return ToHost32(x); }
+inline uint64_t ToHost(uint64_t x) { return ToHost64(x); }
+
+inline int8_t FromHost(int8_t x) { return x; }
+inline int16_t FromHost(int16_t x) {
+ return bit_cast<int16_t>(FromHost16(bit_cast<uint16_t>(x)));
+}
+inline int32_t FromHost(int32_t x) {
+ return bit_cast<int32_t>(FromHost32(bit_cast<uint32_t>(x)));
+}
+inline int64_t FromHost(int64_t x) {
+ return bit_cast<int64_t>(FromHost64(bit_cast<uint64_t>(x)));
+}
+inline int8_t ToHost(int8_t x) { return x; }
+inline int16_t ToHost(int16_t x) {
+ return bit_cast<int16_t>(ToHost16(bit_cast<uint16_t>(x)));
+}
+inline int32_t ToHost(int32_t x) {
+ return bit_cast<int32_t>(ToHost32(bit_cast<uint32_t>(x)));
+}
+inline int64_t ToHost(int64_t x) {
+ return bit_cast<int64_t>(ToHost64(bit_cast<uint64_t>(x)));
+}
+
// Functions to do unaligned loads and stores in little-endian order.
inline uint16_t Load16(const void *p) {
return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p));
@@ -264,36 +264,36 @@ inline constexpr bool IsLittleEndian() { return false; }
#endif /* ENDIAN */
-inline uint8_t FromHost(uint8_t x) { return x; }
-inline uint16_t FromHost(uint16_t x) { return FromHost16(x); }
-inline uint32_t FromHost(uint32_t x) { return FromHost32(x); }
-inline uint64_t FromHost(uint64_t x) { return FromHost64(x); }
-inline uint8_t ToHost(uint8_t x) { return x; }
-inline uint16_t ToHost(uint16_t x) { return ToHost16(x); }
-inline uint32_t ToHost(uint32_t x) { return ToHost32(x); }
-inline uint64_t ToHost(uint64_t x) { return ToHost64(x); }
-
-inline int8_t FromHost(int8_t x) { return x; }
-inline int16_t FromHost(int16_t x) {
- return bit_cast<int16_t>(FromHost16(bit_cast<uint16_t>(x)));
-}
-inline int32_t FromHost(int32_t x) {
- return bit_cast<int32_t>(FromHost32(bit_cast<uint32_t>(x)));
-}
-inline int64_t FromHost(int64_t x) {
- return bit_cast<int64_t>(FromHost64(bit_cast<uint64_t>(x)));
-}
-inline int8_t ToHost(int8_t x) { return x; }
-inline int16_t ToHost(int16_t x) {
- return bit_cast<int16_t>(ToHost16(bit_cast<uint16_t>(x)));
-}
-inline int32_t ToHost(int32_t x) {
- return bit_cast<int32_t>(ToHost32(bit_cast<uint32_t>(x)));
-}
-inline int64_t ToHost(int64_t x) {
- return bit_cast<int64_t>(ToHost64(bit_cast<uint64_t>(x)));
-}
-
+inline uint8_t FromHost(uint8_t x) { return x; }
+inline uint16_t FromHost(uint16_t x) { return FromHost16(x); }
+inline uint32_t FromHost(uint32_t x) { return FromHost32(x); }
+inline uint64_t FromHost(uint64_t x) { return FromHost64(x); }
+inline uint8_t ToHost(uint8_t x) { return x; }
+inline uint16_t ToHost(uint16_t x) { return ToHost16(x); }
+inline uint32_t ToHost(uint32_t x) { return ToHost32(x); }
+inline uint64_t ToHost(uint64_t x) { return ToHost64(x); }
+
+inline int8_t FromHost(int8_t x) { return x; }
+inline int16_t FromHost(int16_t x) {
+ return bit_cast<int16_t>(FromHost16(bit_cast<uint16_t>(x)));
+}
+inline int32_t FromHost(int32_t x) {
+ return bit_cast<int32_t>(FromHost32(bit_cast<uint32_t>(x)));
+}
+inline int64_t FromHost(int64_t x) {
+ return bit_cast<int64_t>(FromHost64(bit_cast<uint64_t>(x)));
+}
+inline int8_t ToHost(int8_t x) { return x; }
+inline int16_t ToHost(int16_t x) {
+ return bit_cast<int16_t>(ToHost16(bit_cast<uint16_t>(x)));
+}
+inline int32_t ToHost(int32_t x) {
+ return bit_cast<int32_t>(ToHost32(bit_cast<uint32_t>(x)));
+}
+inline int64_t ToHost(int64_t x) {
+ return bit_cast<int64_t>(ToHost64(bit_cast<uint64_t>(x)));
+}
+
// Functions to do unaligned loads and stores in big-endian order.
inline uint16_t Load16(const void *p) {
return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p));
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/low_level_scheduling.h b/contrib/restricted/abseil-cpp/absl/base/internal/low_level_scheduling.h
index 9baccc0659..e306ead646 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/low_level_scheduling.h
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/low_level_scheduling.h
@@ -61,8 +61,8 @@ class SchedulingGuard {
public:
// Returns true iff the calling thread may be cooperatively rescheduled.
static bool ReschedulingIsAllowed();
- SchedulingGuard(const SchedulingGuard&) = delete;
- SchedulingGuard& operator=(const SchedulingGuard&) = delete;
+ SchedulingGuard(const SchedulingGuard&) = delete;
+ SchedulingGuard& operator=(const SchedulingGuard&) = delete;
private:
// Disable cooperative rescheduling of the calling thread. It may still
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/raw_logging.cc b/contrib/restricted/abseil-cpp/absl/base/internal/raw_logging.cc
index 074e026adb..26560af1f8 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/raw_logging.cc
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/raw_logging.cc
@@ -67,32 +67,32 @@
#undef ABSL_HAVE_RAW_IO
#endif
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-namespace raw_logging_internal {
-namespace {
-
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace raw_logging_internal {
+namespace {
+
// TODO(gfalcon): We want raw-logging to work on as many platforms as possible.
-// Explicitly `#error` out when not `ABSL_LOW_LEVEL_WRITE_SUPPORTED`, except for
-// a selected set of platforms for which we expect not to be able to raw log.
+// Explicitly `#error` out when not `ABSL_LOW_LEVEL_WRITE_SUPPORTED`, except for
+// a selected set of platforms for which we expect not to be able to raw log.
-ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
- absl::base_internal::AtomicHook<LogPrefixHook>
- log_prefix_hook;
-ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
- absl::base_internal::AtomicHook<AbortHook>
- abort_hook;
+ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
+ absl::base_internal::AtomicHook<LogPrefixHook>
+ log_prefix_hook;
+ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
+ absl::base_internal::AtomicHook<AbortHook>
+ abort_hook;
#ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED
-constexpr char kTruncated[] = " ... (message truncated)\n";
+constexpr char kTruncated[] = " ... (message truncated)\n";
// sprintf the format to the buffer, adjusting *buf and *size to reflect the
// consumed bytes, and return whether the message fit without truncation. If
// truncation occurred, if possible leave room in the buffer for the message
// kTruncated[].
-bool VADoRawLog(char** buf, int* size, const char* format, va_list ap)
- ABSL_PRINTF_ATTRIBUTE(3, 0);
-bool VADoRawLog(char** buf, int* size, const char* format, va_list ap) {
+bool VADoRawLog(char** buf, int* size, const char* format, va_list ap)
+ ABSL_PRINTF_ATTRIBUTE(3, 0);
+bool VADoRawLog(char** buf, int* size, const char* format, va_list ap) {
int n = vsnprintf(*buf, *size, format, ap);
bool result = true;
if (n < 0 || n > *size) {
@@ -100,7 +100,7 @@ bool VADoRawLog(char** buf, int* size, const char* format, va_list ap) {
if (static_cast<size_t>(*size) > sizeof(kTruncated)) {
n = *size - sizeof(kTruncated); // room for truncation message
} else {
- n = 0; // no room for truncation message
+ n = 0; // no room for truncation message
}
}
*size -= n;
@@ -109,7 +109,7 @@ bool VADoRawLog(char** buf, int* size, const char* format, va_list ap) {
}
#endif // ABSL_LOW_LEVEL_WRITE_SUPPORTED
-constexpr int kLogBufSize = 3000;
+constexpr int kLogBufSize = 3000;
// CAVEAT: vsnprintf called from *DoRawLog below has some (exotic) code paths
// that invoke malloc() and getenv() that might acquire some locks.
@@ -168,7 +168,7 @@ void RawLogVA(absl::LogSeverity severity, const char* file, int line,
} else {
DoRawLog(&buf, &size, "%s", kTruncated);
}
- SafeWriteToStderr(buffer, strlen(buffer));
+ SafeWriteToStderr(buffer, strlen(buffer));
}
#else
static_cast<void>(format);
@@ -183,16 +183,16 @@ void RawLogVA(absl::LogSeverity severity, const char* file, int line,
}
}
-// Non-formatting version of RawLog().
-//
-// TODO(gfalcon): When string_view no longer depends on base, change this
-// interface to take its message as a string_view instead.
-void DefaultInternalLog(absl::LogSeverity severity, const char* file, int line,
- const std::string& message) {
- RawLog(severity, file, line, "%.*s", static_cast<int>(message.size()),
- message.data());
-}
-
+// Non-formatting version of RawLog().
+//
+// TODO(gfalcon): When string_view no longer depends on base, change this
+// interface to take its message as a string_view instead.
+void DefaultInternalLog(absl::LogSeverity severity, const char* file, int line,
+ const std::string& message) {
+ RawLog(severity, file, line, "%.*s", static_cast<int>(message.size()),
+ message.data());
+}
+
} // namespace
void SafeWriteToStderr(const char *s, size_t len) {
@@ -229,10 +229,10 @@ ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES ABSL_DLL
absl::base_internal::AtomicHook<InternalLogFunction>
internal_log_function(DefaultInternalLog);
-void RegisterLogPrefixHook(LogPrefixHook func) { log_prefix_hook.Store(func); }
-
-void RegisterAbortHook(AbortHook func) { abort_hook.Store(func); }
-
+void RegisterLogPrefixHook(LogPrefixHook func) { log_prefix_hook.Store(func); }
+
+void RegisterAbortHook(AbortHook func) { abort_hook.Store(func); }
+
void RegisterInternalLogFunction(InternalLogFunction func) {
internal_log_function.Store(func);
}
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/raw_logging.h b/contrib/restricted/abseil-cpp/absl/base/internal/raw_logging.h
index 2bf7aabac1..4a5ba01071 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/raw_logging.h
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/raw_logging.h
@@ -72,14 +72,14 @@
//
// The API is a subset of the above: each macro only takes two arguments. Use
// StrCat if you need to build a richer message.
-#define ABSL_INTERNAL_LOG(severity, message) \
- do { \
- constexpr const char* absl_raw_logging_internal_filename = __FILE__; \
- ::absl::raw_logging_internal::internal_log_function( \
- ABSL_RAW_LOGGING_INTERNAL_##severity, \
- absl_raw_logging_internal_filename, __LINE__, message); \
- if (ABSL_RAW_LOGGING_INTERNAL_##severity == ::absl::LogSeverity::kFatal) \
- ABSL_INTERNAL_UNREACHABLE; \
+#define ABSL_INTERNAL_LOG(severity, message) \
+ do { \
+ constexpr const char* absl_raw_logging_internal_filename = __FILE__; \
+ ::absl::raw_logging_internal::internal_log_function( \
+ ABSL_RAW_LOGGING_INTERNAL_##severity, \
+ absl_raw_logging_internal_filename, __LINE__, message); \
+ if (ABSL_RAW_LOGGING_INTERNAL_##severity == ::absl::LogSeverity::kFatal) \
+ ABSL_INTERNAL_UNREACHABLE; \
} while (0)
#define ABSL_INTERNAL_CHECK(condition, message) \
@@ -178,14 +178,14 @@ ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES ABSL_DLL extern base_internal::AtomicHook<
InternalLogFunction>
internal_log_function;
-// Registers hooks of the above types. Only a single hook of each type may be
-// registered. It is an error to call these functions multiple times with
-// different input arguments.
-//
-// These functions are safe to call at any point during initialization; they do
-// not block or malloc, and are async-signal safe.
-void RegisterLogPrefixHook(LogPrefixHook func);
-void RegisterAbortHook(AbortHook func);
+// Registers hooks of the above types. Only a single hook of each type may be
+// registered. It is an error to call these functions multiple times with
+// different input arguments.
+//
+// These functions are safe to call at any point during initialization; they do
+// not block or malloc, and are async-signal safe.
+void RegisterLogPrefixHook(LogPrefixHook func);
+void RegisterAbortHook(AbortHook func);
void RegisterInternalLogFunction(InternalLogFunction func);
} // namespace raw_logging_internal
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/spinlock.cc b/contrib/restricted/abseil-cpp/absl/base/internal/spinlock.cc
index 35c0696a34..62dc4c7869 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/spinlock.cc
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/spinlock.cc
@@ -125,9 +125,9 @@ void SpinLock::SlowLock() {
// it as having a sleeper.
if ((lock_value & kWaitTimeMask) == 0) {
// Here, just "mark" that the thread is going to sleep. Don't store the
- // lock wait time in the lock -- the lock word stores the amount of time
- // that the current holder waited before acquiring the lock, not the wait
- // time of any thread currently waiting to acquire it.
+ // lock wait time in the lock -- the lock word stores the amount of time
+ // that the current holder waited before acquiring the lock, not the wait
+ // time of any thread currently waiting to acquire it.
if (lockword_.compare_exchange_strong(
lock_value, lock_value | kSpinLockSleeper,
std::memory_order_relaxed, std::memory_order_relaxed)) {
@@ -141,14 +141,14 @@ void SpinLock::SlowLock() {
// this thread obtains the lock.
lock_value = TryLockInternal(lock_value, wait_cycles);
continue; // Skip the delay at the end of the loop.
- } else if ((lock_value & kWaitTimeMask) == 0) {
- // The lock is still held, without a waiter being marked, but something
- // else about the lock word changed, causing our CAS to fail. For
- // example, a new lock holder may have acquired the lock with
- // kSpinLockDisabledScheduling set, whereas the previous holder had not
- // set that flag. In this case, attempt again to mark ourselves as a
- // waiter.
- continue;
+ } else if ((lock_value & kWaitTimeMask) == 0) {
+ // The lock is still held, without a waiter being marked, but something
+ // else about the lock word changed, causing our CAS to fail. For
+ // example, a new lock holder may have acquired the lock with
+ // kSpinLockDisabledScheduling set, whereas the previous holder had not
+ // set that flag. In this case, attempt again to mark ourselves as a
+ // waiter.
+ continue;
}
}
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/spinlock.h b/contrib/restricted/abseil-cpp/absl/base/internal/spinlock.h
index ac40daff12..f00df6da26 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/spinlock.h
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/spinlock.h
@@ -15,7 +15,7 @@
//
// Most users requiring mutual exclusion should use Mutex.
-// SpinLock is provided for use in two situations:
+// SpinLock is provided for use in two situations:
// - for use by Abseil internal code that Mutex itself depends on
// - for async signal safety (see below)
@@ -139,20 +139,20 @@ class ABSL_LOCKABLE SpinLock {
//
// bit[0] encodes whether a lock is being held.
// bit[1] encodes whether a lock uses cooperative scheduling.
- // bit[2] encodes whether the current lock holder disabled scheduling when
- // acquiring the lock. Only set when kSpinLockHeld is also set.
+ // bit[2] encodes whether the current lock holder disabled scheduling when
+ // acquiring the lock. Only set when kSpinLockHeld is also set.
// bit[3:31] encodes time a lock spent on waiting as a 29-bit unsigned int.
- // This is set by the lock holder to indicate how long it waited on
- // the lock before eventually acquiring it. The number of cycles is
- // encoded as a 29-bit unsigned int, or in the case that the current
- // holder did not wait but another waiter is queued, the LSB
- // (kSpinLockSleeper) is set. The implementation does not explicitly
- // track the number of queued waiters beyond this. It must always be
- // assumed that waiters may exist if the current holder was required to
- // queue.
- //
- // Invariant: if the lock is not held, the value is either 0 or
- // kSpinLockCooperative.
+ // This is set by the lock holder to indicate how long it waited on
+ // the lock before eventually acquiring it. The number of cycles is
+ // encoded as a 29-bit unsigned int, or in the case that the current
+ // holder did not wait but another waiter is queued, the LSB
+ // (kSpinLockSleeper) is set. The implementation does not explicitly
+ // track the number of queued waiters beyond this. It must always be
+ // assumed that waiters may exist if the current holder was required to
+ // queue.
+ //
+ // Invariant: if the lock is not held, the value is either 0 or
+ // kSpinLockCooperative.
static constexpr uint32_t kSpinLockHeld = 1;
static constexpr uint32_t kSpinLockCooperative = 2;
static constexpr uint32_t kSpinLockDisabledScheduling = 4;
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/spinlock_akaros.inc b/contrib/restricted/abseil-cpp/absl/base/internal/spinlock_akaros.inc
index 7b0cada4f1..12d3f713d1 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/spinlock_akaros.inc
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/spinlock_akaros.inc
@@ -20,7 +20,7 @@
extern "C" {
-ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)(
+ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)(
std::atomic<uint32_t>* /* lock_word */, uint32_t /* value */,
int /* loop */, absl::base_internal::SchedulingMode /* mode */) {
// In Akaros, one must take care not to call anything that could cause a
@@ -29,7 +29,7 @@ ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)(
// arbitrary code.
}
-ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(
+ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(
std::atomic<uint32_t>* /* lock_word */, bool /* all */) {}
} // extern "C"
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/spinlock_linux.inc b/contrib/restricted/abseil-cpp/absl/base/internal/spinlock_linux.inc
index 202f7cdfc8..cb9806de5f 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/spinlock_linux.inc
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/spinlock_linux.inc
@@ -56,7 +56,7 @@ static_assert(sizeof(std::atomic<uint32_t>) == sizeof(int),
extern "C" {
-ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)(
+ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)(
std::atomic<uint32_t> *w, uint32_t value, int loop,
absl::base_internal::SchedulingMode) {
absl::base_internal::ErrnoSaver errno_saver;
@@ -66,8 +66,8 @@ ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)(
syscall(SYS_futex, w, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, value, &tm);
}
-ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(
- std::atomic<uint32_t> *w, bool all) {
+ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(
+ std::atomic<uint32_t> *w, bool all) {
syscall(SYS_futex, w, FUTEX_WAKE | FUTEX_PRIVATE_FLAG, all ? INT_MAX : 1, 0);
}
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/spinlock_posix.inc b/contrib/restricted/abseil-cpp/absl/base/internal/spinlock_posix.inc
index 4f6f887d99..5ab5cbf816 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/spinlock_posix.inc
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/spinlock_posix.inc
@@ -25,7 +25,7 @@
extern "C" {
-ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)(
+ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)(
std::atomic<uint32_t>* /* lock_word */, uint32_t /* value */, int loop,
absl::base_internal::SchedulingMode /* mode */) {
absl::base_internal::ErrnoSaver errno_saver;
@@ -40,7 +40,7 @@ ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)(
}
}
-ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(
+ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(
std::atomic<uint32_t>* /* lock_word */, bool /* all */) {}
} // extern "C"
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/spinlock_wait.h b/contrib/restricted/abseil-cpp/absl/base/internal/spinlock_wait.h
index 9a1adcda5e..5c4a783bb5 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/spinlock_wait.h
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/spinlock_wait.h
@@ -45,16 +45,16 @@ uint32_t SpinLockWait(std::atomic<uint32_t> *w, int n,
const SpinLockWaitTransition trans[],
SchedulingMode scheduling_mode);
-// If possible, wake some thread that has called SpinLockDelay(w, ...). If `all`
-// is true, wake all such threads. On some systems, this may be a no-op; on
-// those systems, threads calling SpinLockDelay() will always wake eventually
-// even if SpinLockWake() is never called.
+// If possible, wake some thread that has called SpinLockDelay(w, ...). If `all`
+// is true, wake all such threads. On some systems, this may be a no-op; on
+// those systems, threads calling SpinLockDelay() will always wake eventually
+// even if SpinLockWake() is never called.
void SpinLockWake(std::atomic<uint32_t> *w, bool all);
// Wait for an appropriate spin delay on iteration "loop" of a
// spin loop on location *w, whose previously observed value was "value".
// SpinLockDelay() may do nothing, may yield the CPU, may sleep a clock tick,
-// or may wait for a call to SpinLockWake(w).
+// or may wait for a call to SpinLockWake(w).
void SpinLockDelay(std::atomic<uint32_t> *w, uint32_t value, int loop,
base_internal::SchedulingMode scheduling_mode);
@@ -73,23 +73,23 @@ ABSL_NAMESPACE_END
// By changing our extension points to be extern "C", we dodge this
// check.
extern "C" {
-void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(std::atomic<uint32_t> *w,
- bool all);
-void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)(
+void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(std::atomic<uint32_t> *w,
+ bool all);
+void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)(
std::atomic<uint32_t> *w, uint32_t value, int loop,
absl::base_internal::SchedulingMode scheduling_mode);
}
inline void absl::base_internal::SpinLockWake(std::atomic<uint32_t> *w,
bool all) {
- ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(w, all);
+ ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(w, all);
}
inline void absl::base_internal::SpinLockDelay(
std::atomic<uint32_t> *w, uint32_t value, int loop,
absl::base_internal::SchedulingMode scheduling_mode) {
- ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)
- (w, value, loop, scheduling_mode);
+ ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)
+ (w, value, loop, scheduling_mode);
}
#endif // ABSL_BASE_INTERNAL_SPINLOCK_WAIT_H_
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/spinlock_win32.inc b/contrib/restricted/abseil-cpp/absl/base/internal/spinlock_win32.inc
index 9d224813a5..5ce0dbad8c 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/spinlock_win32.inc
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/spinlock_win32.inc
@@ -20,9 +20,9 @@
extern "C" {
-void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)(
- std::atomic<uint32_t>* /* lock_word */, uint32_t /* value */, int loop,
- absl::base_internal::SchedulingMode /* mode */) {
+void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)(
+ std::atomic<uint32_t>* /* lock_word */, uint32_t /* value */, int loop,
+ absl::base_internal::SchedulingMode /* mode */) {
if (loop == 0) {
} else if (loop == 1) {
Sleep(0);
@@ -31,7 +31,7 @@ void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)(
}
}
-void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(
- std::atomic<uint32_t>* /* lock_word */, bool /* all */) {}
+void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(
+ std::atomic<uint32_t>* /* lock_word */, bool /* all */) {}
} // extern "C"
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/strerror.cc b/contrib/restricted/abseil-cpp/absl/base/internal/strerror.cc
index 0d6226fd0a..ba1af09bee 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/strerror.cc
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/strerror.cc
@@ -75,7 +75,7 @@ std::array<std::string, kSysNerr>* NewStrErrorTable() {
} // namespace
std::string StrError(int errnum) {
- absl::base_internal::ErrnoSaver errno_saver;
+ absl::base_internal::ErrnoSaver errno_saver;
static const auto* table = NewStrErrorTable();
if (errnum >= 0 && errnum < static_cast<int>(table->size())) {
return (*table)[errnum];
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/sysinfo.cc b/contrib/restricted/abseil-cpp/absl/base/internal/sysinfo.cc
index 8c2e6c87fa..823608d3d7 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/sysinfo.cc
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/sysinfo.cc
@@ -495,7 +495,7 @@ pid_t GetTID() {
// userspace construct) to avoid unnecessary system calls. Without this caching,
// it can take roughly 98ns, while it takes roughly 1ns with this caching.
pid_t GetCachedTID() {
-#ifdef ABSL_HAVE_THREAD_LOCAL
+#ifdef ABSL_HAVE_THREAD_LOCAL
static thread_local pid_t thread_id = GetTID();
return thread_id;
#else
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/thread_identity.cc b/contrib/restricted/abseil-cpp/absl/base/internal/thread_identity.cc
index 9950e63a79..f1e9a35994 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/thread_identity.cc
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/thread_identity.cc
@@ -23,7 +23,7 @@
#include <cassert>
#include <memory>
-#include "absl/base/attributes.h"
+#include "absl/base/attributes.h"
#include "absl/base/call_once.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/base/internal/spinlock.h"
@@ -54,11 +54,11 @@ void AllocateThreadIdentityKey(ThreadIdentityReclaimerFunction reclaimer) {
// exist within a process (via dlopen() or similar), references to
// thread_identity_ptr from each instance of the code will refer to
// *different* instances of this ptr.
-// Apple platforms have the visibility attribute, but issue a compile warning
-// that protected visibility is unsupported.
-#if ABSL_HAVE_ATTRIBUTE(visibility) && !defined(__APPLE__)
+// Apple platforms have the visibility attribute, but issue a compile warning
+// that protected visibility is unsupported.
+#if ABSL_HAVE_ATTRIBUTE(visibility) && !defined(__APPLE__)
__attribute__((visibility("protected")))
-#endif // ABSL_HAVE_ATTRIBUTE(visibility) && !defined(__APPLE__)
+#endif // ABSL_HAVE_ATTRIBUTE(visibility) && !defined(__APPLE__)
#if ABSL_PER_THREAD_TLS
// Prefer __thread to thread_local as benchmarks indicate it is a bit faster.
ABSL_PER_THREAD_TLS_KEYWORD ThreadIdentity* thread_identity_ptr = nullptr;
@@ -120,10 +120,10 @@ void SetCurrentThreadIdentity(
ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11
// Please see the comment on `CurrentThreadIdentityIfPresent` in
-// thread_identity.h. When we cannot expose thread_local variables in
-// headers, we opt for the correct-but-slower option of not inlining this
-// function.
-#ifndef ABSL_INTERNAL_INLINE_CURRENT_THREAD_IDENTITY_IF_PRESENT
+// thread_identity.h. When we cannot expose thread_local variables in
+// headers, we opt for the correct-but-slower option of not inlining this
+// function.
+#ifndef ABSL_INTERNAL_INLINE_CURRENT_THREAD_IDENTITY_IF_PRESENT
ThreadIdentity* CurrentThreadIdentityIfPresent() { return thread_identity_ptr; }
#endif
#endif
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/thread_identity.h b/contrib/restricted/abseil-cpp/absl/base/internal/thread_identity.h
index 659694b326..2920a5bfff 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/thread_identity.h
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/thread_identity.h
@@ -32,7 +32,7 @@
#include "absl/base/config.h"
#include "absl/base/internal/per_thread_tls.h"
-#include "absl/base/optimization.h"
+#include "absl/base/optimization.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
@@ -70,28 +70,28 @@ struct PerThreadSynch {
// is using this PerThreadSynch as a terminator. Its
// skip field must not be filled in because the loop
// might then skip over the terminator.
- bool wake; // This thread is to be woken from a Mutex.
- // If "x" is on a waiter list for a mutex, "x->cond_waiter" is true iff the
- // waiter is waiting on the mutex as part of a CV Wait or Mutex Await.
- //
- // The value of "x->cond_waiter" is meaningless if "x" is not on a
- // Mutex waiter list.
- bool cond_waiter;
- bool maybe_unlocking; // Valid at head of Mutex waiter queue;
- // true if UnlockSlow could be searching
- // for a waiter to wake. Used for an optimization
- // in Enqueue(). true is always a valid value.
- // Can be reset to false when the unlocker or any
- // writer releases the lock, or a reader fully
- // releases the lock. It may not be set to false
- // by a reader that decrements the count to
- // non-zero. protected by mutex spinlock
- bool suppress_fatal_errors; // If true, try to proceed even in the face
- // of broken invariants. This is used within
- // fatal signal handlers to improve the
- // chances of debug logging information being
- // output successfully.
- int priority; // Priority of thread (updated every so often).
+ bool wake; // This thread is to be woken from a Mutex.
+ // If "x" is on a waiter list for a mutex, "x->cond_waiter" is true iff the
+ // waiter is waiting on the mutex as part of a CV Wait or Mutex Await.
+ //
+ // The value of "x->cond_waiter" is meaningless if "x" is not on a
+ // Mutex waiter list.
+ bool cond_waiter;
+ bool maybe_unlocking; // Valid at head of Mutex waiter queue;
+ // true if UnlockSlow could be searching
+ // for a waiter to wake. Used for an optimization
+ // in Enqueue(). true is always a valid value.
+ // Can be reset to false when the unlocker or any
+ // writer releases the lock, or a reader fully
+ // releases the lock. It may not be set to false
+ // by a reader that decrements the count to
+ // non-zero. protected by mutex spinlock
+ bool suppress_fatal_errors; // If true, try to proceed even in the face
+ // of broken invariants. This is used within
+ // fatal signal handlers to improve the
+ // chances of debug logging information being
+ // output successfully.
+ int priority; // Priority of thread (updated every so often).
// State values:
// kAvailable: This PerThreadSynch is available.
@@ -110,30 +110,30 @@ struct PerThreadSynch {
};
std::atomic<State> state;
- // The wait parameters of the current wait. waitp is null if the
- // thread is not waiting. Transitions from null to non-null must
- // occur before the enqueue commit point (state = kQueued in
- // Enqueue() and CondVarEnqueue()). Transitions from non-null to
- // null must occur after the wait is finished (state = kAvailable in
- // Mutex::Block() and CondVar::WaitCommon()). This field may be
- // changed only by the thread that describes this PerThreadSynch. A
- // special case is Fer(), which calls Enqueue() on another thread,
- // but with an identical SynchWaitParams pointer, thus leaving the
- // pointer unchanged.
- SynchWaitParams* waitp;
+ // The wait parameters of the current wait. waitp is null if the
+ // thread is not waiting. Transitions from null to non-null must
+ // occur before the enqueue commit point (state = kQueued in
+ // Enqueue() and CondVarEnqueue()). Transitions from non-null to
+ // null must occur after the wait is finished (state = kAvailable in
+ // Mutex::Block() and CondVar::WaitCommon()). This field may be
+ // changed only by the thread that describes this PerThreadSynch. A
+ // special case is Fer(), which calls Enqueue() on another thread,
+ // but with an identical SynchWaitParams pointer, thus leaving the
+ // pointer unchanged.
+ SynchWaitParams* waitp;
- intptr_t readers; // Number of readers in mutex.
+ intptr_t readers; // Number of readers in mutex.
- // When priority will next be read (cycles).
- int64_t next_priority_read_cycles;
+ // When priority will next be read (cycles).
+ int64_t next_priority_read_cycles;
// Locks held; used during deadlock detection.
// Allocated in Synch_GetAllLocks() and freed in ReclaimThreadIdentity().
SynchLocksHeld *all_locks;
};
-// The instances of this class are allocated in NewThreadIdentity() with an
-// alignment of PerThreadSynch::kAlignment.
+// The instances of this class are allocated in NewThreadIdentity() with an
+// alignment of PerThreadSynch::kAlignment.
struct ThreadIdentity {
// Must be the first member. The Mutex implementation requires that
// the PerThreadSynch object associated with each thread is
@@ -143,7 +143,7 @@ struct ThreadIdentity {
// Private: Reserved for absl::synchronization_internal::Waiter.
struct WaiterState {
- alignas(void*) char data[128];
+ alignas(void*) char data[128];
} waiter_state;
// Used by PerThreadSem::{Get,Set}ThreadBlockedCounter().
@@ -211,9 +211,9 @@ void ClearCurrentThreadIdentity();
#define ABSL_THREAD_IDENTITY_MODE ABSL_FORCE_THREAD_IDENTITY_MODE
#elif defined(_WIN32) && !defined(__MINGW32__)
#define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_CPP11
-#elif defined(__APPLE__) && defined(ABSL_HAVE_THREAD_LOCAL)
-#define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_CPP11
-#elif ABSL_PER_THREAD_TLS && defined(__GOOGLE_GRTE_VERSION__) && \
+#elif defined(__APPLE__) && defined(ABSL_HAVE_THREAD_LOCAL)
+#define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_CPP11
+#elif ABSL_PER_THREAD_TLS && defined(__GOOGLE_GRTE_VERSION__) && \
(__GOOGLE_GRTE_VERSION__ >= 20140228L)
// Support for async-safe TLS was specifically added in GRTEv4. It's not
// present in the upstream eglibc.
@@ -236,18 +236,18 @@ ABSL_CONST_INIT extern thread_local ThreadIdentity* thread_identity_ptr;
#error Thread-local storage not detected on this platform
#endif
-// thread_local variables cannot be in headers exposed by DLLs or in certain
-// build configurations on Apple platforms. However, it is important for
-// performance reasons in general that `CurrentThreadIdentityIfPresent` be
-// inlined. In the other cases we opt to have the function not be inlined. Note
+// thread_local variables cannot be in headers exposed by DLLs or in certain
+// build configurations on Apple platforms. However, it is important for
+// performance reasons in general that `CurrentThreadIdentityIfPresent` be
+// inlined. In the other cases we opt to have the function not be inlined. Note
// that `CurrentThreadIdentityIfPresent` is declared above so we can exclude
-// this entire inline definition.
-#if !defined(__APPLE__) && !defined(ABSL_BUILD_DLL) && \
- !defined(ABSL_CONSUME_DLL)
-#define ABSL_INTERNAL_INLINE_CURRENT_THREAD_IDENTITY_IF_PRESENT 1
-#endif
-
-#ifdef ABSL_INTERNAL_INLINE_CURRENT_THREAD_IDENTITY_IF_PRESENT
+// this entire inline definition.
+#if !defined(__APPLE__) && !defined(ABSL_BUILD_DLL) && \
+ !defined(ABSL_CONSUME_DLL)
+#define ABSL_INTERNAL_INLINE_CURRENT_THREAD_IDENTITY_IF_PRESENT 1
+#endif
+
+#ifdef ABSL_INTERNAL_INLINE_CURRENT_THREAD_IDENTITY_IF_PRESENT
inline ThreadIdentity* CurrentThreadIdentityIfPresent() {
return thread_identity_ptr;
}
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/throw_delegate.cc b/contrib/restricted/abseil-cpp/absl/base/internal/throw_delegate.cc
index c260ff1eed..37b4c38b5a 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/throw_delegate.cc
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/throw_delegate.cc
@@ -18,7 +18,7 @@
#include <functional>
#include <new>
#include <stdexcept>
-
+
#include "absl/base/config.h"
#include "absl/base/internal/raw_logging.h"
@@ -26,186 +26,186 @@ namespace absl {
ABSL_NAMESPACE_BEGIN
namespace base_internal {
-// NOTE: The various STL exception throwing functions are placed within the
-// #ifdef blocks so the symbols aren't exposed on platforms that don't support
-// them, such as the Android NDK. For example, ANGLE fails to link when building
-// within AOSP without them, since the STL functions don't exist.
+// NOTE: The various STL exception throwing functions are placed within the
+// #ifdef blocks so the symbols aren't exposed on platforms that don't support
+// them, such as the Android NDK. For example, ANGLE fails to link when building
+// within AOSP without them, since the STL functions don't exist.
namespace {
-#ifdef ABSL_HAVE_EXCEPTIONS
+#ifdef ABSL_HAVE_EXCEPTIONS
template <typename T>
[[noreturn]] void Throw(const T& error) {
throw error;
-}
+}
#endif
} // namespace
void ThrowStdLogicError(const std::string& what_arg) {
-#ifdef ABSL_HAVE_EXCEPTIONS
+#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::logic_error(what_arg));
-#else
- ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
- std::abort();
-#endif
+#else
+ ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
+ std::abort();
+#endif
}
void ThrowStdLogicError(const char* what_arg) {
-#ifdef ABSL_HAVE_EXCEPTIONS
+#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::logic_error(what_arg));
-#else
- ABSL_RAW_LOG(FATAL, "%s", what_arg);
- std::abort();
-#endif
+#else
+ ABSL_RAW_LOG(FATAL, "%s", what_arg);
+ std::abort();
+#endif
}
void ThrowStdInvalidArgument(const std::string& what_arg) {
-#ifdef ABSL_HAVE_EXCEPTIONS
+#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::invalid_argument(what_arg));
-#else
- ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
- std::abort();
-#endif
+#else
+ ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
+ std::abort();
+#endif
}
void ThrowStdInvalidArgument(const char* what_arg) {
-#ifdef ABSL_HAVE_EXCEPTIONS
+#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::invalid_argument(what_arg));
-#else
- ABSL_RAW_LOG(FATAL, "%s", what_arg);
- std::abort();
-#endif
+#else
+ ABSL_RAW_LOG(FATAL, "%s", what_arg);
+ std::abort();
+#endif
}
void ThrowStdDomainError(const std::string& what_arg) {
-#ifdef ABSL_HAVE_EXCEPTIONS
+#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::domain_error(what_arg));
-#else
- ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
- std::abort();
-#endif
+#else
+ ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
+ std::abort();
+#endif
}
void ThrowStdDomainError(const char* what_arg) {
-#ifdef ABSL_HAVE_EXCEPTIONS
+#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::domain_error(what_arg));
-#else
- ABSL_RAW_LOG(FATAL, "%s", what_arg);
- std::abort();
-#endif
+#else
+ ABSL_RAW_LOG(FATAL, "%s", what_arg);
+ std::abort();
+#endif
}
void ThrowStdLengthError(const std::string& what_arg) {
-#ifdef ABSL_HAVE_EXCEPTIONS
+#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::length_error(what_arg));
-#else
- ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
- std::abort();
-#endif
+#else
+ ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
+ std::abort();
+#endif
}
void ThrowStdLengthError(const char* what_arg) {
-#ifdef ABSL_HAVE_EXCEPTIONS
+#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::length_error(what_arg));
-#else
- ABSL_RAW_LOG(FATAL, "%s", what_arg);
- std::abort();
-#endif
+#else
+ ABSL_RAW_LOG(FATAL, "%s", what_arg);
+ std::abort();
+#endif
}
void ThrowStdOutOfRange(const std::string& what_arg) {
-#ifdef ABSL_HAVE_EXCEPTIONS
+#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::out_of_range(what_arg));
-#else
- ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
- std::abort();
-#endif
+#else
+ ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
+ std::abort();
+#endif
}
void ThrowStdOutOfRange(const char* what_arg) {
-#ifdef ABSL_HAVE_EXCEPTIONS
+#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::out_of_range(what_arg));
-#else
- ABSL_RAW_LOG(FATAL, "%s", what_arg);
- std::abort();
-#endif
+#else
+ ABSL_RAW_LOG(FATAL, "%s", what_arg);
+ std::abort();
+#endif
}
void ThrowStdRuntimeError(const std::string& what_arg) {
-#ifdef ABSL_HAVE_EXCEPTIONS
+#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::runtime_error(what_arg));
-#else
- ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
- std::abort();
-#endif
+#else
+ ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
+ std::abort();
+#endif
}
void ThrowStdRuntimeError(const char* what_arg) {
-#ifdef ABSL_HAVE_EXCEPTIONS
+#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::runtime_error(what_arg));
-#else
- ABSL_RAW_LOG(FATAL, "%s", what_arg);
- std::abort();
-#endif
+#else
+ ABSL_RAW_LOG(FATAL, "%s", what_arg);
+ std::abort();
+#endif
}
void ThrowStdRangeError(const std::string& what_arg) {
-#ifdef ABSL_HAVE_EXCEPTIONS
+#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::range_error(what_arg));
-#else
- ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
- std::abort();
-#endif
+#else
+ ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
+ std::abort();
+#endif
}
void ThrowStdRangeError(const char* what_arg) {
-#ifdef ABSL_HAVE_EXCEPTIONS
+#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::range_error(what_arg));
-#else
- ABSL_RAW_LOG(FATAL, "%s", what_arg);
- std::abort();
-#endif
+#else
+ ABSL_RAW_LOG(FATAL, "%s", what_arg);
+ std::abort();
+#endif
}
void ThrowStdOverflowError(const std::string& what_arg) {
-#ifdef ABSL_HAVE_EXCEPTIONS
+#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::overflow_error(what_arg));
-#else
- ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
- std::abort();
-#endif
+#else
+ ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
+ std::abort();
+#endif
}
void ThrowStdOverflowError(const char* what_arg) {
-#ifdef ABSL_HAVE_EXCEPTIONS
+#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::overflow_error(what_arg));
-#else
- ABSL_RAW_LOG(FATAL, "%s", what_arg);
- std::abort();
-#endif
+#else
+ ABSL_RAW_LOG(FATAL, "%s", what_arg);
+ std::abort();
+#endif
}
void ThrowStdUnderflowError(const std::string& what_arg) {
-#ifdef ABSL_HAVE_EXCEPTIONS
+#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::underflow_error(what_arg));
-#else
- ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
- std::abort();
-#endif
+#else
+ ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
+ std::abort();
+#endif
}
void ThrowStdUnderflowError(const char* what_arg) {
-#ifdef ABSL_HAVE_EXCEPTIONS
+#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::underflow_error(what_arg));
-#else
- ABSL_RAW_LOG(FATAL, "%s", what_arg);
- std::abort();
-#endif
-}
-
-void ThrowStdBadFunctionCall() {
-#ifdef ABSL_HAVE_EXCEPTIONS
- Throw(std::bad_function_call());
-#else
- std::abort();
-#endif
-}
-
-void ThrowStdBadAlloc() {
-#ifdef ABSL_HAVE_EXCEPTIONS
- Throw(std::bad_alloc());
-#else
- std::abort();
-#endif
-}
+#else
+ ABSL_RAW_LOG(FATAL, "%s", what_arg);
+ std::abort();
+#endif
+}
+
+void ThrowStdBadFunctionCall() {
+#ifdef ABSL_HAVE_EXCEPTIONS
+ Throw(std::bad_function_call());
+#else
+ std::abort();
+#endif
+}
+
+void ThrowStdBadAlloc() {
+#ifdef ABSL_HAVE_EXCEPTIONS
+ Throw(std::bad_alloc());
+#else
+ std::abort();
+#endif
+}
} // namespace base_internal
ABSL_NAMESPACE_END
diff --git a/contrib/restricted/abseil-cpp/absl/base/log_severity.h b/contrib/restricted/abseil-cpp/absl/base/log_severity.h
index 2236422462..760c91babd 100644
--- a/contrib/restricted/abseil-cpp/absl/base/log_severity.h
+++ b/contrib/restricted/abseil-cpp/absl/base/log_severity.h
@@ -36,7 +36,7 @@ ABSL_NAMESPACE_BEGIN
// such values to a defined severity level, however in some cases values other
// than the defined levels are useful for comparison.
//
-// Example:
+// Example:
//
// // Effectively disables all logging:
// SetMinLogLevel(static_cast<absl::LogSeverity>(100));
diff --git a/contrib/restricted/abseil-cpp/absl/base/macros.h b/contrib/restricted/abseil-cpp/absl/base/macros.h
index 3e085a916b..0d26bd5700 100644
--- a/contrib/restricted/abseil-cpp/absl/base/macros.h
+++ b/contrib/restricted/abseil-cpp/absl/base/macros.h
@@ -144,15 +144,15 @@ ABSL_NAMESPACE_END
#define ABSL_INTERNAL_RETHROW do {} while (false)
#endif // ABSL_HAVE_EXCEPTIONS
-// `ABSL_INTERNAL_UNREACHABLE` is an unreachable statement. A program which
-// reaches one has undefined behavior, and the compiler may optimize
-// accordingly.
-#if defined(__GNUC__) || ABSL_HAVE_BUILTIN(__builtin_unreachable)
-#define ABSL_INTERNAL_UNREACHABLE __builtin_unreachable()
-#elif defined(_MSC_VER)
-#define ABSL_INTERNAL_UNREACHABLE __assume(0)
-#else
-#define ABSL_INTERNAL_UNREACHABLE
-#endif
-
+// `ABSL_INTERNAL_UNREACHABLE` is an unreachable statement. A program which
+// reaches one has undefined behavior, and the compiler may optimize
+// accordingly.
+#if defined(__GNUC__) || ABSL_HAVE_BUILTIN(__builtin_unreachable)
+#define ABSL_INTERNAL_UNREACHABLE __builtin_unreachable()
+#elif defined(_MSC_VER)
+#define ABSL_INTERNAL_UNREACHABLE __assume(0)
+#else
+#define ABSL_INTERNAL_UNREACHABLE
+#endif
+
#endif // ABSL_BASE_MACROS_H_
diff --git a/contrib/restricted/abseil-cpp/absl/base/optimization.h b/contrib/restricted/abseil-cpp/absl/base/optimization.h
index d090be1286..d3eed93ab7 100644
--- a/contrib/restricted/abseil-cpp/absl/base/optimization.h
+++ b/contrib/restricted/abseil-cpp/absl/base/optimization.h
@@ -22,15 +22,15 @@
#ifndef ABSL_BASE_OPTIMIZATION_H_
#define ABSL_BASE_OPTIMIZATION_H_
-#include <assert.h>
-
+#include <assert.h>
+
#include "absl/base/config.h"
// ABSL_BLOCK_TAIL_CALL_OPTIMIZATION
//
-// Instructs the compiler to avoid optimizing tail-call recursion. This macro is
-// useful when you wish to preserve the existing function order within a stack
-// trace for logging, debugging, or profiling purposes.
+// Instructs the compiler to avoid optimizing tail-call recursion. This macro is
+// useful when you wish to preserve the existing function order within a stack
+// trace for logging, debugging, or profiling purposes.
//
// Example:
//
@@ -106,10 +106,10 @@
// Cacheline aligning objects properly allows constructive memory sharing and
// prevents destructive (or "false") memory sharing.
//
-// NOTE: callers should replace uses of this macro with `alignas()` using
+// NOTE: callers should replace uses of this macro with `alignas()` using
// `std::hardware_constructive_interference_size` and/or
-// `std::hardware_destructive_interference_size` when C++17 becomes available to
-// them.
+// `std::hardware_destructive_interference_size` when C++17 becomes available to
+// them.
//
// See http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2016/p0154r1.html
// for more information.
@@ -182,7 +182,7 @@
#endif
// ABSL_INTERNAL_ASSUME(cond)
-// Informs the compiler that a condition is always true and that it can assume
+// Informs the compiler that a condition is always true and that it can assume
// it to be true for optimization purposes. The call has undefined behavior if
// the condition is false.
// In !NDEBUG mode, the condition is checked with an assert().
@@ -219,7 +219,7 @@
// This macro forces small unique name on a static file level symbols like
// static local variables or static functions. This is intended to be used in
// macro definitions to optimize the cost of generated code. Do NOT use it on
-// symbols exported from translation unit since it may cause a link time
+// symbols exported from translation unit since it may cause a link time
// conflict.
//
// Example:
diff --git a/contrib/restricted/abseil-cpp/absl/base/options.h b/contrib/restricted/abseil-cpp/absl/base/options.h
index 56b4e36ee0..40429044c3 100644
--- a/contrib/restricted/abseil-cpp/absl/base/options.h
+++ b/contrib/restricted/abseil-cpp/absl/base/options.h
@@ -205,7 +205,7 @@
// be changed to a new, unique identifier name. In particular "head" is not
// allowed.
-#define ABSL_OPTION_USE_INLINE_NAMESPACE 1
+#define ABSL_OPTION_USE_INLINE_NAMESPACE 1
#define ABSL_OPTION_INLINE_NAMESPACE_NAME lts_20211102
// ABSL_OPTION_HARDENED
diff --git a/contrib/restricted/abseil-cpp/absl/base/thread_annotations.h b/contrib/restricted/abseil-cpp/absl/base/thread_annotations.h
index 9695f6de67..acc61bf898 100644
--- a/contrib/restricted/abseil-cpp/absl/base/thread_annotations.h
+++ b/contrib/restricted/abseil-cpp/absl/base/thread_annotations.h
@@ -317,7 +317,7 @@ namespace base_internal {
// Takes a reference to a guarded data member, and returns an unguarded
// reference.
-// Do not use this function directly, use ABSL_TS_UNCHECKED_READ instead.
+// Do not use this function directly, use ABSL_TS_UNCHECKED_READ instead.
template <typename T>
inline const T& ts_unchecked_read(const T& v) ABSL_NO_THREAD_SAFETY_ANALYSIS {
return v;
diff --git a/contrib/restricted/abseil-cpp/absl/city/ya.make b/contrib/restricted/abseil-cpp/absl/city/ya.make
index dffd2d3a70..4acc7109f0 100644
--- a/contrib/restricted/abseil-cpp/absl/city/ya.make
+++ b/contrib/restricted/abseil-cpp/absl/city/ya.make
@@ -8,13 +8,13 @@ OWNER(g:cpp-contrib)
LICENSE(Apache-2.0)
-PEERDIR(
- contrib/restricted/abseil-cpp/absl/base
- contrib/restricted/abseil-cpp/absl/base/internal/raw_logging
- contrib/restricted/abseil-cpp/absl/base/internal/spinlock_wait
- contrib/restricted/abseil-cpp/absl/base/log_severity
-)
-
+PEERDIR(
+ contrib/restricted/abseil-cpp/absl/base
+ contrib/restricted/abseil-cpp/absl/base/internal/raw_logging
+ contrib/restricted/abseil-cpp/absl/base/internal/spinlock_wait
+ contrib/restricted/abseil-cpp/absl/base/log_severity
+)
+
ADDINCL(
GLOBAL contrib/restricted/abseil-cpp
)
diff --git a/contrib/restricted/abseil-cpp/absl/cleanup/cleanup.h b/contrib/restricted/abseil-cpp/absl/cleanup/cleanup.h
index 960ccd080e..8842d3b874 100644
--- a/contrib/restricted/abseil-cpp/absl/cleanup/cleanup.h
+++ b/contrib/restricted/abseil-cpp/absl/cleanup/cleanup.h
@@ -1,140 +1,140 @@
-// Copyright 2021 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// -----------------------------------------------------------------------------
-// File: cleanup.h
-// -----------------------------------------------------------------------------
-//
-// `absl::Cleanup` implements the scope guard idiom, invoking the contained
-// callback's `operator()() &&` on scope exit.
-//
-// Example:
-//
-// ```
-// absl::Status CopyGoodData(const char* source_path, const char* sink_path) {
-// FILE* source_file = fopen(source_path, "r");
-// if (source_file == nullptr) {
-// return absl::NotFoundError("No source file"); // No cleanups execute
-// }
-//
-// // C++17 style cleanup using class template argument deduction
-// absl::Cleanup source_closer = [source_file] { fclose(source_file); };
-//
-// FILE* sink_file = fopen(sink_path, "w");
-// if (sink_file == nullptr) {
-// return absl::NotFoundError("No sink file"); // First cleanup executes
-// }
-//
-// // C++11 style cleanup using the factory function
-// auto sink_closer = absl::MakeCleanup([sink_file] { fclose(sink_file); });
-//
-// Data data;
-// while (ReadData(source_file, &data)) {
-// if (!data.IsGood()) {
-// absl::Status result = absl::FailedPreconditionError("Read bad data");
-// return result; // Both cleanups execute
-// }
-// SaveData(sink_file, &data);
-// }
-//
-// return absl::OkStatus(); // Both cleanups execute
-// }
-// ```
-//
-// Methods:
-//
-// `std::move(cleanup).Cancel()` will prevent the callback from executing.
-//
-// `std::move(cleanup).Invoke()` will execute the callback early, before
-// destruction, and prevent the callback from executing in the destructor.
-//
-// Usage:
-//
-// `absl::Cleanup` is not an interface type. It is only intended to be used
-// within the body of a function. It is not a value type and instead models a
-// control flow construct. Check out `defer` in Golang for something similar.
-
-#ifndef ABSL_CLEANUP_CLEANUP_H_
-#define ABSL_CLEANUP_CLEANUP_H_
-
-#include <utility>
-
-#include "absl/base/config.h"
-#include "absl/base/macros.h"
-#include "absl/cleanup/internal/cleanup.h"
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-
-template <typename Arg, typename Callback = void()>
-class ABSL_MUST_USE_RESULT Cleanup final {
- static_assert(cleanup_internal::WasDeduced<Arg>(),
- "Explicit template parameters are not supported.");
-
- static_assert(cleanup_internal::ReturnsVoid<Callback>(),
- "Callbacks that return values are not supported.");
-
- public:
+// Copyright 2021 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: cleanup.h
+// -----------------------------------------------------------------------------
+//
+// `absl::Cleanup` implements the scope guard idiom, invoking the contained
+// callback's `operator()() &&` on scope exit.
+//
+// Example:
+//
+// ```
+// absl::Status CopyGoodData(const char* source_path, const char* sink_path) {
+// FILE* source_file = fopen(source_path, "r");
+// if (source_file == nullptr) {
+// return absl::NotFoundError("No source file"); // No cleanups execute
+// }
+//
+// // C++17 style cleanup using class template argument deduction
+// absl::Cleanup source_closer = [source_file] { fclose(source_file); };
+//
+// FILE* sink_file = fopen(sink_path, "w");
+// if (sink_file == nullptr) {
+// return absl::NotFoundError("No sink file"); // First cleanup executes
+// }
+//
+// // C++11 style cleanup using the factory function
+// auto sink_closer = absl::MakeCleanup([sink_file] { fclose(sink_file); });
+//
+// Data data;
+// while (ReadData(source_file, &data)) {
+// if (!data.IsGood()) {
+// absl::Status result = absl::FailedPreconditionError("Read bad data");
+// return result; // Both cleanups execute
+// }
+// SaveData(sink_file, &data);
+// }
+//
+// return absl::OkStatus(); // Both cleanups execute
+// }
+// ```
+//
+// Methods:
+//
+// `std::move(cleanup).Cancel()` will prevent the callback from executing.
+//
+// `std::move(cleanup).Invoke()` will execute the callback early, before
+// destruction, and prevent the callback from executing in the destructor.
+//
+// Usage:
+//
+// `absl::Cleanup` is not an interface type. It is only intended to be used
+// within the body of a function. It is not a value type and instead models a
+// control flow construct. Check out `defer` in Golang for something similar.
+
+#ifndef ABSL_CLEANUP_CLEANUP_H_
+#define ABSL_CLEANUP_CLEANUP_H_
+
+#include <utility>
+
+#include "absl/base/config.h"
+#include "absl/base/macros.h"
+#include "absl/cleanup/internal/cleanup.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+
+template <typename Arg, typename Callback = void()>
+class ABSL_MUST_USE_RESULT Cleanup final {
+ static_assert(cleanup_internal::WasDeduced<Arg>(),
+ "Explicit template parameters are not supported.");
+
+ static_assert(cleanup_internal::ReturnsVoid<Callback>(),
+ "Callbacks that return values are not supported.");
+
+ public:
Cleanup(Callback callback) : storage_(std::move(callback)) {} // NOLINT
-
- Cleanup(Cleanup&& other) = default;
-
- void Cancel() && {
- ABSL_HARDENING_ASSERT(storage_.IsCallbackEngaged());
+
+ Cleanup(Cleanup&& other) = default;
+
+ void Cancel() && {
+ ABSL_HARDENING_ASSERT(storage_.IsCallbackEngaged());
storage_.DestroyCallback();
- }
-
- void Invoke() && {
- ABSL_HARDENING_ASSERT(storage_.IsCallbackEngaged());
- storage_.InvokeCallback();
+ }
+
+ void Invoke() && {
+ ABSL_HARDENING_ASSERT(storage_.IsCallbackEngaged());
+ storage_.InvokeCallback();
storage_.DestroyCallback();
- }
-
- ~Cleanup() {
- if (storage_.IsCallbackEngaged()) {
- storage_.InvokeCallback();
+ }
+
+ ~Cleanup() {
+ if (storage_.IsCallbackEngaged()) {
+ storage_.InvokeCallback();
storage_.DestroyCallback();
- }
- }
-
- private:
- cleanup_internal::Storage<Callback> storage_;
-};
-
-// `absl::Cleanup c = /* callback */;`
-//
-// C++17 type deduction API for creating an instance of `absl::Cleanup`
-#if defined(ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION)
-template <typename Callback>
-Cleanup(Callback callback) -> Cleanup<cleanup_internal::Tag, Callback>;
-#endif // defined(ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION)
-
-// `auto c = absl::MakeCleanup(/* callback */);`
-//
-// C++11 type deduction API for creating an instance of `absl::Cleanup`
-template <typename... Args, typename Callback>
-absl::Cleanup<cleanup_internal::Tag, Callback> MakeCleanup(Callback callback) {
- static_assert(cleanup_internal::WasDeduced<cleanup_internal::Tag, Args...>(),
- "Explicit template parameters are not supported.");
-
- static_assert(cleanup_internal::ReturnsVoid<Callback>(),
- "Callbacks that return values are not supported.");
-
- return {std::move(callback)};
-}
-
-ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_CLEANUP_CLEANUP_H_
+ }
+ }
+
+ private:
+ cleanup_internal::Storage<Callback> storage_;
+};
+
+// `absl::Cleanup c = /* callback */;`
+//
+// C++17 type deduction API for creating an instance of `absl::Cleanup`
+#if defined(ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION)
+template <typename Callback>
+Cleanup(Callback callback) -> Cleanup<cleanup_internal::Tag, Callback>;
+#endif // defined(ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION)
+
+// `auto c = absl::MakeCleanup(/* callback */);`
+//
+// C++11 type deduction API for creating an instance of `absl::Cleanup`
+template <typename... Args, typename Callback>
+absl::Cleanup<cleanup_internal::Tag, Callback> MakeCleanup(Callback callback) {
+ static_assert(cleanup_internal::WasDeduced<cleanup_internal::Tag, Args...>(),
+ "Explicit template parameters are not supported.");
+
+ static_assert(cleanup_internal::ReturnsVoid<Callback>(),
+ "Callbacks that return values are not supported.");
+
+ return {std::move(callback)};
+}
+
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_CLEANUP_CLEANUP_H_
diff --git a/contrib/restricted/abseil-cpp/absl/cleanup/internal/cleanup.h b/contrib/restricted/abseil-cpp/absl/cleanup/internal/cleanup.h
index 2783fcb7c1..fc90b7f6a2 100644
--- a/contrib/restricted/abseil-cpp/absl/cleanup/internal/cleanup.h
+++ b/contrib/restricted/abseil-cpp/absl/cleanup/internal/cleanup.h
@@ -1,52 +1,52 @@
-// Copyright 2021 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef ABSL_CLEANUP_INTERNAL_CLEANUP_H_
-#define ABSL_CLEANUP_INTERNAL_CLEANUP_H_
-
+// Copyright 2021 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CLEANUP_INTERNAL_CLEANUP_H_
+#define ABSL_CLEANUP_INTERNAL_CLEANUP_H_
+
#include <new>
-#include <type_traits>
-#include <utility>
-
-#include "absl/base/internal/invoke.h"
+#include <type_traits>
+#include <utility>
+
+#include "absl/base/internal/invoke.h"
#include "absl/base/macros.h"
-#include "absl/base/thread_annotations.h"
-#include "absl/utility/utility.h"
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-
-namespace cleanup_internal {
-
-struct Tag {};
-
-template <typename Arg, typename... Args>
-constexpr bool WasDeduced() {
- return (std::is_same<cleanup_internal::Tag, Arg>::value) &&
- (sizeof...(Args) == 0);
-}
-
-template <typename Callback>
-constexpr bool ReturnsVoid() {
- return (std::is_same<base_internal::invoke_result_t<Callback>, void>::value);
-}
-
-template <typename Callback>
-class Storage {
- public:
- Storage() = delete;
-
+#include "absl/base/thread_annotations.h"
+#include "absl/utility/utility.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+
+namespace cleanup_internal {
+
+struct Tag {};
+
+template <typename Arg, typename... Args>
+constexpr bool WasDeduced() {
+ return (std::is_same<cleanup_internal::Tag, Arg>::value) &&
+ (sizeof...(Args) == 0);
+}
+
+template <typename Callback>
+constexpr bool ReturnsVoid() {
+ return (std::is_same<base_internal::invoke_result_t<Callback>, void>::value);
+}
+
+template <typename Callback>
+class Storage {
+ public:
+ Storage() = delete;
+
explicit Storage(Callback callback) {
// Placement-new into a character buffer is used for eager destruction when
// the cleanup is invoked or cancelled. To ensure this optimizes well, the
@@ -54,47 +54,47 @@ class Storage {
::new (GetCallbackBuffer()) Callback(std::move(callback));
is_callback_engaged_ = true;
}
-
+
Storage(Storage&& other) {
ABSL_HARDENING_ASSERT(other.IsCallbackEngaged());
-
+
::new (GetCallbackBuffer()) Callback(std::move(other.GetCallback()));
is_callback_engaged_ = true;
other.DestroyCallback();
}
- Storage(const Storage& other) = delete;
-
- Storage& operator=(Storage&& other) = delete;
-
- Storage& operator=(const Storage& other) = delete;
-
+ Storage(const Storage& other) = delete;
+
+ Storage& operator=(Storage&& other) = delete;
+
+ Storage& operator=(const Storage& other) = delete;
+
void* GetCallbackBuffer() { return static_cast<void*>(+callback_buffer_); }
Callback& GetCallback() {
return *reinterpret_cast<Callback*>(GetCallbackBuffer());
}
- bool IsCallbackEngaged() const { return is_callback_engaged_; }
-
+ bool IsCallbackEngaged() const { return is_callback_engaged_; }
+
void DestroyCallback() {
is_callback_engaged_ = false;
GetCallback().~Callback();
}
-
- void InvokeCallback() ABSL_NO_THREAD_SAFETY_ANALYSIS {
+
+ void InvokeCallback() ABSL_NO_THREAD_SAFETY_ANALYSIS {
std::move(GetCallback())();
- }
-
- private:
- bool is_callback_engaged_;
+ }
+
+ private:
+ bool is_callback_engaged_;
alignas(Callback) char callback_buffer_[sizeof(Callback)];
-};
-
-} // namespace cleanup_internal
-
-ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_CLEANUP_INTERNAL_CLEANUP_H_
+};
+
+} // namespace cleanup_internal
+
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_CLEANUP_INTERNAL_CLEANUP_H_
diff --git a/contrib/restricted/abseil-cpp/absl/container/btree_map.h b/contrib/restricted/abseil-cpp/absl/container/btree_map.h
index f0a8d4a6a4..ba3bbc145b 100644
--- a/contrib/restricted/abseil-cpp/absl/container/btree_map.h
+++ b/contrib/restricted/abseil-cpp/absl/container/btree_map.h
@@ -384,8 +384,8 @@ class btree_map
// btree_map::equal_range()
//
- // Returns a half-open range [first, last), defined by a `std::pair` of two
- // iterators, containing all elements with the passed key in the `btree_map`.
+ // Returns a half-open range [first, last), defined by a `std::pair` of two
+ // iterators, containing all elements with the passed key in the `btree_map`.
using Base::equal_range;
// btree_map::find()
@@ -731,7 +731,7 @@ class btree_multimap
// btree_multimap::equal_range()
//
- // Returns a half-open range [first, last), defined by a `std::pair` of two
+ // Returns a half-open range [first, last), defined by a `std::pair` of two
// iterators, containing all elements with the passed key in the
// `btree_multimap`.
using Base::equal_range;
diff --git a/contrib/restricted/abseil-cpp/absl/container/fixed_array.h b/contrib/restricted/abseil-cpp/absl/container/fixed_array.h
index 839ba0bc16..5b23df9643 100644
--- a/contrib/restricted/abseil-cpp/absl/container/fixed_array.h
+++ b/contrib/restricted/abseil-cpp/absl/container/fixed_array.h
@@ -227,8 +227,8 @@ class FixedArray {
// FixedArray::at
//
- // Bounds-checked access. Returns a reference to the ith element of the fixed
- // array, or throws std::out_of_range
+ // Bounds-checked access. Returns a reference to the ith element of the fixed
+ // array, or throws std::out_of_range
reference at(size_type i) {
if (ABSL_PREDICT_FALSE(i >= size())) {
base_internal::ThrowStdOutOfRange("FixedArray::at failed bounds check");
diff --git a/contrib/restricted/abseil-cpp/absl/container/flat_hash_set.h b/contrib/restricted/abseil-cpp/absl/container/flat_hash_set.h
index 6b89da6571..63b014d4ce 100644
--- a/contrib/restricted/abseil-cpp/absl/container/flat_hash_set.h
+++ b/contrib/restricted/abseil-cpp/absl/container/flat_hash_set.h
@@ -324,7 +324,7 @@ class flat_hash_set
// flat_hash_set::merge()
//
- // Extracts elements from a given `source` flat hash set into this
+ // Extracts elements from a given `source` flat hash set into this
// `flat_hash_set`. If the destination `flat_hash_set` already contains an
// element with an equivalent key, that element is not extracted.
using Base::merge;
diff --git a/contrib/restricted/abseil-cpp/absl/container/inlined_vector.h b/contrib/restricted/abseil-cpp/absl/container/inlined_vector.h
index df9e09917d..6a5b58e55a 100644
--- a/contrib/restricted/abseil-cpp/absl/container/inlined_vector.h
+++ b/contrib/restricted/abseil-cpp/absl/container/inlined_vector.h
@@ -174,13 +174,13 @@ class InlinedVector {
// provided `allocator`.
InlinedVector(const InlinedVector& other, const allocator_type& allocator)
: storage_(allocator) {
- if (other.empty()) {
- // Empty; nothing to do.
+ if (other.empty()) {
+ // Empty; nothing to do.
} else if (IsMemcpyOk<A>::value && !other.storage_.GetIsAllocated()) {
- // Memcpy-able and do not need allocation.
+ // Memcpy-able and do not need allocation.
storage_.MemcpyFrom(other.storage_);
} else {
- storage_.InitFrom(other.storage_);
+ storage_.InitFrom(other.storage_);
}
}
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/absl_hashtablez_sampler/ya.make b/contrib/restricted/abseil-cpp/absl/container/internal/absl_hashtablez_sampler/ya.make
index 1933289a6d..5a37c14c4a 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/absl_hashtablez_sampler/ya.make
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/absl_hashtablez_sampler/ya.make
@@ -1,52 +1,52 @@
-# Generated by devtools/yamaker.
-
-LIBRARY()
-
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
WITHOUT_LICENSE_TEXTS()
-OWNER(g:cpp-contrib)
-
-LICENSE(Apache-2.0)
-
-PEERDIR(
- contrib/restricted/abseil-cpp/absl/base
+OWNER(g:cpp-contrib)
+
+LICENSE(Apache-2.0)
+
+PEERDIR(
+ contrib/restricted/abseil-cpp/absl/base
contrib/restricted/abseil-cpp/absl/base/internal/low_level_alloc
- contrib/restricted/abseil-cpp/absl/base/internal/raw_logging
- contrib/restricted/abseil-cpp/absl/base/internal/spinlock_wait
- contrib/restricted/abseil-cpp/absl/base/internal/throw_delegate
- contrib/restricted/abseil-cpp/absl/base/log_severity
+ contrib/restricted/abseil-cpp/absl/base/internal/raw_logging
+ contrib/restricted/abseil-cpp/absl/base/internal/spinlock_wait
+ contrib/restricted/abseil-cpp/absl/base/internal/throw_delegate
+ contrib/restricted/abseil-cpp/absl/base/log_severity
contrib/restricted/abseil-cpp/absl/debugging
contrib/restricted/abseil-cpp/absl/debugging/stacktrace
contrib/restricted/abseil-cpp/absl/debugging/symbolize
contrib/restricted/abseil-cpp/absl/demangle
- contrib/restricted/abseil-cpp/absl/numeric
+ contrib/restricted/abseil-cpp/absl/numeric
contrib/restricted/abseil-cpp/absl/profiling/internal/exponential_biased
- contrib/restricted/abseil-cpp/absl/strings
+ contrib/restricted/abseil-cpp/absl/strings
contrib/restricted/abseil-cpp/absl/strings/internal/absl_strings_internal
contrib/restricted/abseil-cpp/absl/synchronization
contrib/restricted/abseil-cpp/absl/synchronization/internal
contrib/restricted/abseil-cpp/absl/time
contrib/restricted/abseil-cpp/absl/time/civil_time
contrib/restricted/abseil-cpp/absl/time/time_zone
-)
-
-ADDINCL(
- GLOBAL contrib/restricted/abseil-cpp
-)
-
-NO_COMPILER_WARNINGS()
-
-NO_UTIL()
-
-CFLAGS(
- -DNOMINMAX
-)
-
+)
+
+ADDINCL(
+ GLOBAL contrib/restricted/abseil-cpp
+)
+
+NO_COMPILER_WARNINGS()
+
+NO_UTIL()
+
+CFLAGS(
+ -DNOMINMAX
+)
+
SRCDIR(contrib/restricted/abseil-cpp/absl/container/internal)
-
-SRCS(
+
+SRCS(
hashtablez_sampler.cc
hashtablez_sampler_force_weak_definition.cc
-)
-
-END()
+)
+
+END()
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/btree.h b/contrib/restricted/abseil-cpp/absl/container/internal/btree.h
index f636c5fc73..60d6979c0f 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/btree.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/btree.h
@@ -192,38 +192,38 @@ struct key_compare_to_adapter<std::greater<absl::Cord>> {
using type = StringBtreeDefaultGreater;
};
-// Detects an 'absl_btree_prefer_linear_node_search' member. This is
-// a protocol used as an opt-in or opt-out of linear search.
-//
-// For example, this would be useful for key types that wrap an integer
-// and define their own cheap operator<(). For example:
-//
-// class K {
-// public:
-// using absl_btree_prefer_linear_node_search = std::true_type;
-// ...
-// private:
-// friend bool operator<(K a, K b) { return a.k_ < b.k_; }
-// int k_;
-// };
-//
-// btree_map<K, V> m; // Uses linear search
-//
-// If T has the preference tag, then it has a preference.
-// Btree will use the tag's truth value.
-template <typename T, typename = void>
-struct has_linear_node_search_preference : std::false_type {};
-template <typename T, typename = void>
-struct prefers_linear_node_search : std::false_type {};
-template <typename T>
-struct has_linear_node_search_preference<
- T, absl::void_t<typename T::absl_btree_prefer_linear_node_search>>
- : std::true_type {};
-template <typename T>
-struct prefers_linear_node_search<
- T, absl::void_t<typename T::absl_btree_prefer_linear_node_search>>
- : T::absl_btree_prefer_linear_node_search {};
-
+// Detects an 'absl_btree_prefer_linear_node_search' member. This is
+// a protocol used as an opt-in or opt-out of linear search.
+//
+// For example, this would be useful for key types that wrap an integer
+// and define their own cheap operator<(). For example:
+//
+// class K {
+// public:
+// using absl_btree_prefer_linear_node_search = std::true_type;
+// ...
+// private:
+// friend bool operator<(K a, K b) { return a.k_ < b.k_; }
+// int k_;
+// };
+//
+// btree_map<K, V> m; // Uses linear search
+//
+// If T has the preference tag, then it has a preference.
+// Btree will use the tag's truth value.
+template <typename T, typename = void>
+struct has_linear_node_search_preference : std::false_type {};
+template <typename T, typename = void>
+struct prefers_linear_node_search : std::false_type {};
+template <typename T>
+struct has_linear_node_search_preference<
+ T, absl::void_t<typename T::absl_btree_prefer_linear_node_search>>
+ : std::true_type {};
+template <typename T>
+struct prefers_linear_node_search<
+ T, absl::void_t<typename T::absl_btree_prefer_linear_node_search>>
+ : T::absl_btree_prefer_linear_node_search {};
+
template <typename Key, typename Compare, typename Alloc, int TargetNodeSize,
bool Multi, typename SlotPolicy>
struct common_params {
@@ -250,23 +250,23 @@ struct common_params {
using reference = value_type &;
using const_reference = const value_type &;
- // For the given lookup key type, returns whether we can have multiple
- // equivalent keys in the btree. If this is a multi-container, then we can.
- // Otherwise, we can have multiple equivalent keys only if all of the
- // following conditions are met:
- // - The comparator is transparent.
- // - The lookup key type is not the same as key_type.
- // - The comparator is not a StringBtreeDefault{Less,Greater} comparator
- // that we know has the same equivalence classes for all lookup types.
- template <typename LookupKey>
- constexpr static bool can_have_multiple_equivalent_keys() {
- return Multi ||
- (IsTransparent<key_compare>::value &&
- !std::is_same<LookupKey, Key>::value &&
- !std::is_same<key_compare, StringBtreeDefaultLess>::value &&
- !std::is_same<key_compare, StringBtreeDefaultGreater>::value);
- }
-
+ // For the given lookup key type, returns whether we can have multiple
+ // equivalent keys in the btree. If this is a multi-container, then we can.
+ // Otherwise, we can have multiple equivalent keys only if all of the
+ // following conditions are met:
+ // - The comparator is transparent.
+ // - The lookup key type is not the same as key_type.
+ // - The comparator is not a StringBtreeDefault{Less,Greater} comparator
+ // that we know has the same equivalence classes for all lookup types.
+ template <typename LookupKey>
+ constexpr static bool can_have_multiple_equivalent_keys() {
+ return Multi ||
+ (IsTransparent<key_compare>::value &&
+ !std::is_same<LookupKey, Key>::value &&
+ !std::is_same<key_compare, StringBtreeDefaultLess>::value &&
+ !std::is_same<key_compare, StringBtreeDefaultGreater>::value);
+ }
+
enum {
kTargetNodeSize = TargetNodeSize,
@@ -452,7 +452,7 @@ struct SearchResult {
// useful information.
template <typename V>
struct SearchResult<V, false> {
- SearchResult() {}
+ SearchResult() {}
explicit SearchResult(V value) : value(value) {}
SearchResult(V value, MatchKind /*match*/) : value(value) {}
@@ -485,22 +485,22 @@ class btree_node {
using difference_type = typename Params::difference_type;
// Btree decides whether to use linear node search as follows:
- // - If the comparator expresses a preference, use that.
- // - If the key expresses a preference, use that.
+ // - If the comparator expresses a preference, use that.
+ // - If the key expresses a preference, use that.
// - If the key is arithmetic and the comparator is std::less or
// std::greater, choose linear.
// - Otherwise, choose binary.
// TODO(ezb): Might make sense to add condition(s) based on node-size.
using use_linear_search = std::integral_constant<
bool,
- has_linear_node_search_preference<key_compare>::value
- ? prefers_linear_node_search<key_compare>::value
- : has_linear_node_search_preference<key_type>::value
- ? prefers_linear_node_search<key_type>::value
- : std::is_arithmetic<key_type>::value &&
- (std::is_same<std::less<key_type>, key_compare>::value ||
- std::is_same<std::greater<key_type>,
- key_compare>::value)>;
+ has_linear_node_search_preference<key_compare>::value
+ ? prefers_linear_node_search<key_compare>::value
+ : has_linear_node_search_preference<key_type>::value
+ ? prefers_linear_node_search<key_type>::value
+ : std::is_arithmetic<key_type>::value &&
+ (std::is_same<std::less<key_type>, key_compare>::value ||
+ std::is_same<std::greater<key_type>,
+ key_compare>::value)>;
// This class is organized by absl::container_internal::Layout as if it had
// the following structure:
@@ -517,23 +517,23 @@ class btree_node {
// // is the same as the count of values.
// field_type finish;
// // The maximum number of values the node can hold. This is an integer in
- // // [1, kNodeSlots] for root leaf nodes, kNodeSlots for non-root leaf
+ // // [1, kNodeSlots] for root leaf nodes, kNodeSlots for non-root leaf
// // nodes, and kInternalNodeMaxCount (as a sentinel value) for internal
- // // nodes (even though there are still kNodeSlots values in the node).
+ // // nodes (even though there are still kNodeSlots values in the node).
// // TODO(ezb): make max_count use only 4 bits and record log2(capacity)
// // to free extra bits for is_root, etc.
// field_type max_count;
//
// // The array of values. The capacity is `max_count` for leaf nodes and
- // // kNodeSlots for internal nodes. Only the values in
+ // // kNodeSlots for internal nodes. Only the values in
// // [start, finish) have been initialized and are valid.
// slot_type values[max_count];
//
// // The array of child pointers. The keys in children[i] are all less
// // than key(i). The keys in children[i + 1] are all greater than key(i).
- // // There are 0 children for leaf nodes and kNodeSlots + 1 children for
+ // // There are 0 children for leaf nodes and kNodeSlots + 1 children for
// // internal nodes.
- // btree_node *children[kNodeSlots + 1];
+ // btree_node *children[kNodeSlots + 1];
//
// This class is only constructed by EmptyNodeType. Normally, pointers to the
// layout above are allocated, cast to btree_node*, and de-allocated within
@@ -555,62 +555,62 @@ class btree_node {
private:
using layout_type = absl::container_internal::Layout<btree_node *, field_type,
slot_type, btree_node *>;
- constexpr static size_type SizeWithNSlots(size_type n) {
+ constexpr static size_type SizeWithNSlots(size_type n) {
return layout_type(/*parent*/ 1,
/*position, start, finish, max_count*/ 4,
- /*slots*/ n,
+ /*slots*/ n,
/*children*/ 0)
.AllocSize();
}
// A lower bound for the overhead of fields other than values in a leaf node.
constexpr static size_type MinimumOverhead() {
- return SizeWithNSlots(1) - sizeof(value_type);
+ return SizeWithNSlots(1) - sizeof(value_type);
}
// Compute how many values we can fit onto a leaf node taking into account
// padding.
- constexpr static size_type NodeTargetSlots(const int begin, const int end) {
+ constexpr static size_type NodeTargetSlots(const int begin, const int end) {
return begin == end ? begin
- : SizeWithNSlots((begin + end) / 2 + 1) >
+ : SizeWithNSlots((begin + end) / 2 + 1) >
params_type::kTargetNodeSize
- ? NodeTargetSlots(begin, (begin + end) / 2)
- : NodeTargetSlots((begin + end) / 2 + 1, end);
+ ? NodeTargetSlots(begin, (begin + end) / 2)
+ : NodeTargetSlots((begin + end) / 2 + 1, end);
}
enum {
kTargetNodeSize = params_type::kTargetNodeSize,
- kNodeTargetSlots = NodeTargetSlots(0, params_type::kTargetNodeSize),
+ kNodeTargetSlots = NodeTargetSlots(0, params_type::kTargetNodeSize),
- // We need a minimum of 3 slots per internal node in order to perform
+ // We need a minimum of 3 slots per internal node in order to perform
// splitting (1 value for the two nodes involved in the split and 1 value
- // propagated to the parent as the delimiter for the split). For performance
- // reasons, we don't allow 3 slots-per-node due to bad worst case occupancy
- // of 1/3 (for a node, not a b-tree).
- kMinNodeSlots = 4,
-
- kNodeSlots =
- kNodeTargetSlots >= kMinNodeSlots ? kNodeTargetSlots : kMinNodeSlots,
-
+ // propagated to the parent as the delimiter for the split). For performance
+ // reasons, we don't allow 3 slots-per-node due to bad worst case occupancy
+ // of 1/3 (for a node, not a b-tree).
+ kMinNodeSlots = 4,
+
+ kNodeSlots =
+ kNodeTargetSlots >= kMinNodeSlots ? kNodeTargetSlots : kMinNodeSlots,
+
// The node is internal (i.e. is not a leaf node) if and only if `max_count`
// has this value.
kInternalNodeMaxCount = 0,
};
- // Leaves can have less than kNodeSlots values.
- constexpr static layout_type LeafLayout(const int slot_count = kNodeSlots) {
+ // Leaves can have less than kNodeSlots values.
+ constexpr static layout_type LeafLayout(const int slot_count = kNodeSlots) {
return layout_type(/*parent*/ 1,
/*position, start, finish, max_count*/ 4,
- /*slots*/ slot_count,
+ /*slots*/ slot_count,
/*children*/ 0);
}
constexpr static layout_type InternalLayout() {
return layout_type(/*parent*/ 1,
/*position, start, finish, max_count*/ 4,
- /*slots*/ kNodeSlots,
- /*children*/ kNodeSlots + 1);
+ /*slots*/ kNodeSlots,
+ /*children*/ kNodeSlots + 1);
}
- constexpr static size_type LeafSize(const int slot_count = kNodeSlots) {
- return LeafLayout(slot_count).AllocSize();
+ constexpr static size_type LeafSize(const int slot_count = kNodeSlots) {
+ return LeafLayout(slot_count).AllocSize();
}
constexpr static size_type InternalSize() {
return InternalLayout().AllocSize();
@@ -667,10 +667,10 @@ class btree_node {
}
field_type max_count() const {
// Internal nodes have max_count==kInternalNodeMaxCount.
- // Leaf nodes have max_count in [1, kNodeSlots].
+ // Leaf nodes have max_count in [1, kNodeSlots].
const field_type max_count = GetField<1>()[3];
return max_count == field_type{kInternalNodeMaxCount}
- ? field_type{kNodeSlots}
+ ? field_type{kNodeSlots}
: max_count;
}
@@ -792,7 +792,7 @@ class btree_node {
SearchResult<int, true> binary_search_impl(
const K &k, int s, int e, const CompareTo &comp,
std::true_type /* IsCompareTo */) const {
- if (params_type::template can_have_multiple_equivalent_keys<K>()) {
+ if (params_type::template can_have_multiple_equivalent_keys<K>()) {
MatchKind exact_match = MatchKind::kNe;
while (s != e) {
const int mid = (s + e) >> 1;
@@ -803,14 +803,14 @@ class btree_node {
e = mid;
if (c == 0) {
// Need to return the first value whose key is not less than k,
- // which requires continuing the binary search if there could be
- // multiple equivalent keys.
+ // which requires continuing the binary search if there could be
+ // multiple equivalent keys.
exact_match = MatchKind::kEq;
}
}
}
return {s, exact_match};
- } else { // Can't have multiple equivalent keys.
+ } else { // Can't have multiple equivalent keys.
while (s != e) {
const int mid = (s + e) >> 1;
const absl::weak_ordering c = comp(key(mid), k);
@@ -860,12 +860,12 @@ class btree_node {
start_slot(), max_count * sizeof(slot_type));
}
void init_internal(btree_node *parent) {
- init_leaf(parent, kNodeSlots);
+ init_leaf(parent, kNodeSlots);
// Set `max_count` to a sentinel value to indicate that this node is
// internal.
set_max_count(kInternalNodeMaxCount);
absl::container_internal::SanitizerPoisonMemoryRegion(
- &mutable_child(start()), (kNodeSlots + 1) * sizeof(btree_node *));
+ &mutable_child(start()), (kNodeSlots + 1) * sizeof(btree_node *));
}
static void deallocate(const size_type size, btree_node *node,
@@ -943,7 +943,7 @@ struct btree_iterator {
using key_type = typename Node::key_type;
using size_type = typename Node::size_type;
using params_type = typename Node::params_type;
- using is_map_container = typename params_type::is_map_container;
+ using is_map_container = typename params_type::is_map_container;
using node_type = Node;
using normal_node = typename std::remove_const<Node>::type;
@@ -955,7 +955,7 @@ struct btree_iterator {
using slot_type = typename params_type::slot_type;
using iterator =
- btree_iterator<normal_node, normal_reference, normal_pointer>;
+ btree_iterator<normal_node, normal_reference, normal_pointer>;
using const_iterator =
btree_iterator<const_node, const_reference, const_pointer>;
@@ -972,19 +972,19 @@ struct btree_iterator {
btree_iterator(Node *n, int p) : node(n), position(p) {}
// NOTE: this SFINAE allows for implicit conversions from iterator to
- // const_iterator, but it specifically avoids hiding the copy constructor so
- // that the trivial one will be used when possible.
+ // const_iterator, but it specifically avoids hiding the copy constructor so
+ // that the trivial one will be used when possible.
template <typename N, typename R, typename P,
absl::enable_if_t<
std::is_same<btree_iterator<N, R, P>, iterator>::value &&
std::is_same<btree_iterator, const_iterator>::value,
int> = 0>
- btree_iterator(const btree_iterator<N, R, P> other) // NOLINT
+ btree_iterator(const btree_iterator<N, R, P> other) // NOLINT
: node(other.node), position(other.position) {}
private:
// This SFINAE allows explicit conversions from const_iterator to
- // iterator, but also avoids hiding the copy constructor.
+ // iterator, but also avoids hiding the copy constructor.
// NOTE: the const_cast is safe because this constructor is only called by
// non-const methods and the container owns the nodes.
template <typename N, typename R, typename P,
@@ -992,7 +992,7 @@ struct btree_iterator {
std::is_same<btree_iterator<N, R, P>, const_iterator>::value &&
std::is_same<btree_iterator, iterator>::value,
int> = 0>
- explicit btree_iterator(const btree_iterator<N, R, P> other)
+ explicit btree_iterator(const btree_iterator<N, R, P> other)
: node(const_cast<node_type *>(other.node)), position(other.position) {}
// Increment/decrement the iterator.
@@ -1055,8 +1055,8 @@ struct btree_iterator {
}
private:
- friend iterator;
- friend const_iterator;
+ friend iterator;
+ friend const_iterator;
template <typename Params>
friend class btree;
template <typename Tree>
@@ -1122,8 +1122,8 @@ class btree {
}
enum : uint32_t {
- kNodeSlots = node_type::kNodeSlots,
- kMinNodeValues = kNodeSlots / 2,
+ kNodeSlots = node_type::kNodeSlots,
+ kMinNodeValues = kNodeSlots / 2,
};
struct node_stats {
@@ -1154,8 +1154,8 @@ class btree {
using const_reference = typename Params::const_reference;
using pointer = typename Params::pointer;
using const_pointer = typename Params::const_pointer;
- using iterator =
- typename btree_iterator<node_type, reference, pointer>::iterator;
+ using iterator =
+ typename btree_iterator<node_type, reference, pointer>::iterator;
using const_iterator = typename iterator::const_iterator;
using reverse_iterator = std::reverse_iterator<iterator>;
using const_reverse_iterator = std::reverse_iterator<const_iterator>;
@@ -1168,46 +1168,46 @@ class btree {
private:
// For use in copy_or_move_values_in_order.
const value_type &maybe_move_from_iterator(const_iterator it) { return *it; }
- value_type &&maybe_move_from_iterator(iterator it) {
- // This is a destructive operation on the other container so it's safe for
- // us to const_cast and move from the keys here even if it's a set.
- return std::move(const_cast<value_type &>(*it));
- }
+ value_type &&maybe_move_from_iterator(iterator it) {
+ // This is a destructive operation on the other container so it's safe for
+ // us to const_cast and move from the keys here even if it's a set.
+ return std::move(const_cast<value_type &>(*it));
+ }
// Copies or moves (depending on the template parameter) the values in
// other into this btree in their order in other. This btree must be empty
// before this method is called. This method is used in copy construction,
// copy assignment, and move assignment.
template <typename Btree>
- void copy_or_move_values_in_order(Btree &other);
+ void copy_or_move_values_in_order(Btree &other);
// Validates that various assumptions/requirements are true at compile time.
constexpr static bool static_assert_validation();
public:
- btree(const key_compare &comp, const allocator_type &alloc)
- : root_(comp, alloc, EmptyNode()), rightmost_(EmptyNode()), size_(0) {}
-
- btree(const btree &other) : btree(other, other.allocator()) {}
- btree(const btree &other, const allocator_type &alloc)
- : btree(other.key_comp(), alloc) {
- copy_or_move_values_in_order(other);
- }
+ btree(const key_compare &comp, const allocator_type &alloc)
+ : root_(comp, alloc, EmptyNode()), rightmost_(EmptyNode()), size_(0) {}
+
+ btree(const btree &other) : btree(other, other.allocator()) {}
+ btree(const btree &other, const allocator_type &alloc)
+ : btree(other.key_comp(), alloc) {
+ copy_or_move_values_in_order(other);
+ }
btree(btree &&other) noexcept
: root_(std::move(other.root_)),
rightmost_(absl::exchange(other.rightmost_, EmptyNode())),
size_(absl::exchange(other.size_, 0)) {
other.mutable_root() = EmptyNode();
}
- btree(btree &&other, const allocator_type &alloc)
- : btree(other.key_comp(), alloc) {
- if (alloc == other.allocator()) {
- swap(other);
- } else {
- // Move values from `other` one at a time when allocators are different.
- copy_or_move_values_in_order(other);
- }
- }
+ btree(btree &&other, const allocator_type &alloc)
+ : btree(other.key_comp(), alloc) {
+ if (alloc == other.allocator()) {
+ swap(other);
+ } else {
+ // Move values from `other` one at a time when allocators are different.
+ copy_or_move_values_in_order(other);
+ }
+ }
~btree() {
// Put static_asserts in destructor to avoid triggering them before the type
@@ -1235,23 +1235,23 @@ class btree {
return const_reverse_iterator(begin());
}
- // Finds the first element whose key is not less than `key`.
+ // Finds the first element whose key is not less than `key`.
template <typename K>
iterator lower_bound(const K &key) {
- return internal_end(internal_lower_bound(key).value);
+ return internal_end(internal_lower_bound(key).value);
}
template <typename K>
const_iterator lower_bound(const K &key) const {
- return internal_end(internal_lower_bound(key).value);
+ return internal_end(internal_lower_bound(key).value);
}
- // Finds the first element whose key is not less than `key` and also returns
- // whether that element is equal to `key`.
- template <typename K>
- std::pair<iterator, bool> lower_bound_equal(const K &key) const;
-
- // Finds the first element whose key is greater than `key`.
+ // Finds the first element whose key is not less than `key` and also returns
+ // whether that element is equal to `key`.
template <typename K>
+ std::pair<iterator, bool> lower_bound_equal(const K &key) const;
+
+ // Finds the first element whose key is greater than `key`.
+ template <typename K>
iterator upper_bound(const K &key) {
return internal_end(internal_upper_bound(key));
}
@@ -1332,8 +1332,8 @@ class btree {
// to the element after the last erased element.
std::pair<size_type, iterator> erase_range(iterator begin, iterator end);
- // Finds an element with key equivalent to `key` or returns `end()` if `key`
- // is not present.
+ // Finds an element with key equivalent to `key` or returns `end()` if `key`
+ // is not present.
template <typename K>
iterator find(const K &key) {
return internal_end(internal_find(key));
@@ -1407,14 +1407,14 @@ class btree {
}
}
- // The average number of bytes used per value stored in the btree assuming
- // random insertion order.
+ // The average number of bytes used per value stored in the btree assuming
+ // random insertion order.
static double average_bytes_per_value() {
- // The expected number of values per node with random insertion order is the
- // average of the maximum and minimum numbers of values per node.
- const double expected_values_per_node =
- (kNodeSlots + kMinNodeValues) / 2.0;
- return node_type::LeafSize() / expected_values_per_node;
+ // The expected number of values per node with random insertion order is the
+ // average of the maximum and minimum numbers of values per node.
+ const double expected_values_per_node =
+ (kNodeSlots + kMinNodeValues) / 2.0;
+ return node_type::LeafSize() / expected_values_per_node;
}
// The fullness of the btree. Computed as the number of elements in the btree
@@ -1424,7 +1424,7 @@ class btree {
// Returns 0 for empty trees.
double fullness() const {
if (empty()) return 0.0;
- return static_cast<double>(size()) / (nodes() * kNodeSlots);
+ return static_cast<double>(size()) / (nodes() * kNodeSlots);
}
// The overhead of the btree structure in bytes per node. Computed as the
// total number of bytes used by the btree minus the number of bytes used for
@@ -1474,7 +1474,7 @@ class btree {
}
node_type *new_leaf_node(node_type *parent) {
node_type *n = allocate(node_type::LeafSize());
- n->init_leaf(parent, kNodeSlots);
+ n->init_leaf(parent, kNodeSlots);
return n;
}
node_type *new_leaf_root_node(const int max_count) {
@@ -1534,8 +1534,8 @@ class btree {
// Internal routine which implements lower_bound().
template <typename K>
- SearchResult<iterator, is_key_compare_to::value> internal_lower_bound(
- const K &key) const;
+ SearchResult<iterator, is_key_compare_to::value> internal_lower_bound(
+ const K &key) const;
// Internal routine which implements upper_bound().
template <typename K>
@@ -1719,7 +1719,7 @@ template <typename P>
void btree_node<P>::split(const int insert_position, btree_node *dest,
allocator_type *alloc) {
assert(dest->count() == 0);
- assert(max_count() == kNodeSlots);
+ assert(max_count() == kNodeSlots);
// We bias the split based on the position being inserted. If we're
// inserting at the beginning of the left node then bias the split to put
@@ -1727,7 +1727,7 @@ void btree_node<P>::split(const int insert_position, btree_node *dest,
// right node then bias the split to put more values on the left node.
if (insert_position == start()) {
dest->set_finish(dest->start() + finish() - 1);
- } else if (insert_position == kNodeSlots) {
+ } else if (insert_position == kNodeSlots) {
dest->set_finish(dest->start());
} else {
dest->set_finish(dest->start() + count() / 2);
@@ -1798,7 +1798,7 @@ void btree_node<P>::clear_and_delete(btree_node *node, allocator_type *alloc) {
// Navigate to the leftmost leaf under node, and then delete upwards.
while (!node->leaf()) node = node->start_child();
- // Use `int` because `pos` needs to be able to hold `kNodeSlots+1`, which
+ // Use `int` because `pos` needs to be able to hold `kNodeSlots+1`, which
// isn't guaranteed to be a valid `field_type`.
int pos = node->position();
btree_node *parent = node->parent();
@@ -1886,7 +1886,7 @@ void btree_iterator<N, R, P>::decrement_slow() {
// btree methods
template <typename P>
template <typename Btree>
-void btree<P>::copy_or_move_values_in_order(Btree &other) {
+void btree<P>::copy_or_move_values_in_order(Btree &other) {
static_assert(std::is_same<btree, Btree>::value ||
std::is_same<const btree, Btree>::value,
"Btree type must be same or const.");
@@ -1894,11 +1894,11 @@ void btree<P>::copy_or_move_values_in_order(Btree &other) {
// We can avoid key comparisons because we know the order of the
// values is the same order we'll store them in.
- auto iter = other.begin();
- if (iter == other.end()) return;
+ auto iter = other.begin();
+ if (iter == other.end()) return;
insert_multi(maybe_move_from_iterator(iter));
++iter;
- for (; iter != other.end(); ++iter) {
+ for (; iter != other.end(); ++iter) {
// If the btree is not empty, we can just insert the new value at the end
// of the tree.
internal_emplace(end(), maybe_move_from_iterator(iter));
@@ -1917,7 +1917,7 @@ constexpr bool btree<P>::static_assert_validation() {
// Note: We assert that kTargetValues, which is computed from
// Params::kTargetNodeSize, must fit the node_type::field_type.
static_assert(
- kNodeSlots < (1 << (8 * sizeof(typename node_type::field_type))),
+ kNodeSlots < (1 << (8 * sizeof(typename node_type::field_type))),
"target node size too large");
// Verify that key_compare returns an absl::{weak,strong}_ordering or bool.
@@ -1937,29 +1937,29 @@ constexpr bool btree<P>::static_assert_validation() {
}
template <typename P>
-template <typename K>
-auto btree<P>::lower_bound_equal(const K &key) const
- -> std::pair<iterator, bool> {
- const SearchResult<iterator, is_key_compare_to::value> res =
- internal_lower_bound(key);
- const iterator lower = iterator(internal_end(res.value));
- const bool equal = res.HasMatch()
- ? res.IsEq()
- : lower != end() && !compare_keys(key, lower.key());
- return {lower, equal};
+template <typename K>
+auto btree<P>::lower_bound_equal(const K &key) const
+ -> std::pair<iterator, bool> {
+ const SearchResult<iterator, is_key_compare_to::value> res =
+ internal_lower_bound(key);
+ const iterator lower = iterator(internal_end(res.value));
+ const bool equal = res.HasMatch()
+ ? res.IsEq()
+ : lower != end() && !compare_keys(key, lower.key());
+ return {lower, equal};
}
template <typename P>
template <typename K>
auto btree<P>::equal_range(const K &key) -> std::pair<iterator, iterator> {
- const std::pair<iterator, bool> lower_and_equal = lower_bound_equal(key);
- const iterator lower = lower_and_equal.first;
- if (!lower_and_equal.second) {
- return {lower, lower};
- }
+ const std::pair<iterator, bool> lower_and_equal = lower_bound_equal(key);
+ const iterator lower = lower_and_equal.first;
+ if (!lower_and_equal.second) {
+ return {lower, lower};
+ }
const iterator next = std::next(lower);
- if (!params_type::template can_have_multiple_equivalent_keys<K>()) {
+ if (!params_type::template can_have_multiple_equivalent_keys<K>()) {
// The next iterator after lower must point to a key greater than `key`.
// Note: if this assert fails, then it may indicate that the comparator does
// not meet the equivalence requirements for Compare
@@ -1970,7 +1970,7 @@ auto btree<P>::equal_range(const K &key) -> std::pair<iterator, iterator> {
// Try once more to avoid the call to upper_bound() if there's only one
// equivalent key. This should prevent all calls to upper_bound() in cases of
// unique-containers with heterogeneous comparators in which all comparison
- // operators have the same equivalence classes.
+ // operators have the same equivalence classes.
if (next == end() || compare_keys(key, next.key())) return {lower, next};
// In this case, we need to call upper_bound() to avoid worst case O(N)
@@ -2101,7 +2101,7 @@ auto btree<P>::operator=(const btree &other) -> btree & {
*mutable_allocator() = other.allocator();
}
- copy_or_move_values_in_order(other);
+ copy_or_move_values_in_order(other);
}
return *this;
}
@@ -2131,7 +2131,7 @@ auto btree<P>::operator=(btree &&other) noexcept -> btree & {
// comparator while moving the values so we can't swap the key
// comparators.
*mutable_key_comp() = other.key_comp();
- copy_or_move_values_in_order(other);
+ copy_or_move_values_in_order(other);
}
}
}
@@ -2298,7 +2298,7 @@ void btree<P>::rebalance_or_split(iterator *iter) {
node_type *&node = iter->node;
int &insert_position = iter->position;
assert(node->count() == node->max_count());
- assert(kNodeSlots == node->max_count());
+ assert(kNodeSlots == node->max_count());
// First try to make room on the node by rebalancing.
node_type *parent = node->parent();
@@ -2306,17 +2306,17 @@ void btree<P>::rebalance_or_split(iterator *iter) {
if (node->position() > parent->start()) {
// Try rebalancing with our left sibling.
node_type *left = parent->child(node->position() - 1);
- assert(left->max_count() == kNodeSlots);
- if (left->count() < kNodeSlots) {
+ assert(left->max_count() == kNodeSlots);
+ if (left->count() < kNodeSlots) {
// We bias rebalancing based on the position being inserted. If we're
// inserting at the end of the right node then we bias rebalancing to
// fill up the left node.
- int to_move = (kNodeSlots - left->count()) /
- (1 + (insert_position < static_cast<int>(kNodeSlots)));
+ int to_move = (kNodeSlots - left->count()) /
+ (1 + (insert_position < static_cast<int>(kNodeSlots)));
to_move = (std::max)(1, to_move);
if (insert_position - to_move >= node->start() ||
- left->count() + to_move < static_cast<int>(kNodeSlots)) {
+ left->count() + to_move < static_cast<int>(kNodeSlots)) {
left->rebalance_right_to_left(to_move, node, mutable_allocator());
assert(node->max_count() - node->count() == to_move);
@@ -2335,17 +2335,17 @@ void btree<P>::rebalance_or_split(iterator *iter) {
if (node->position() < parent->finish()) {
// Try rebalancing with our right sibling.
node_type *right = parent->child(node->position() + 1);
- assert(right->max_count() == kNodeSlots);
- if (right->count() < kNodeSlots) {
+ assert(right->max_count() == kNodeSlots);
+ if (right->count() < kNodeSlots) {
// We bias rebalancing based on the position being inserted. If we're
// inserting at the beginning of the left node then we bias rebalancing
// to fill up the right node.
- int to_move = (static_cast<int>(kNodeSlots) - right->count()) /
+ int to_move = (static_cast<int>(kNodeSlots) - right->count()) /
(1 + (insert_position > node->start()));
to_move = (std::max)(1, to_move);
if (insert_position <= node->finish() - to_move ||
- right->count() + to_move < static_cast<int>(kNodeSlots)) {
+ right->count() + to_move < static_cast<int>(kNodeSlots)) {
node->rebalance_left_to_right(to_move, right, mutable_allocator());
if (insert_position > node->finish()) {
@@ -2361,8 +2361,8 @@ void btree<P>::rebalance_or_split(iterator *iter) {
// Rebalancing failed, make sure there is room on the parent node for a new
// value.
- assert(parent->max_count() == kNodeSlots);
- if (parent->count() == kNodeSlots) {
+ assert(parent->max_count() == kNodeSlots);
+ if (parent->count() == kNodeSlots) {
iterator parent_iter(node->parent(), node->position());
rebalance_or_split(&parent_iter);
}
@@ -2407,8 +2407,8 @@ bool btree<P>::try_merge_or_rebalance(iterator *iter) {
if (iter->node->position() > parent->start()) {
// Try merging with our left sibling.
node_type *left = parent->child(iter->node->position() - 1);
- assert(left->max_count() == kNodeSlots);
- if (1U + left->count() + iter->node->count() <= kNodeSlots) {
+ assert(left->max_count() == kNodeSlots);
+ if (1U + left->count() + iter->node->count() <= kNodeSlots) {
iter->position += 1 + left->count();
merge_nodes(left, iter->node);
iter->node = left;
@@ -2418,8 +2418,8 @@ bool btree<P>::try_merge_or_rebalance(iterator *iter) {
if (iter->node->position() < parent->finish()) {
// Try merging with our right sibling.
node_type *right = parent->child(iter->node->position() + 1);
- assert(right->max_count() == kNodeSlots);
- if (1U + iter->node->count() + right->count() <= kNodeSlots) {
+ assert(right->max_count() == kNodeSlots);
+ if (1U + iter->node->count() + right->count() <= kNodeSlots) {
merge_nodes(iter->node, right);
return true;
}
@@ -2500,12 +2500,12 @@ inline auto btree<P>::internal_emplace(iterator iter, Args &&... args)
allocator_type *alloc = mutable_allocator();
if (iter.node->count() == max_count) {
// Make room in the leaf for the new item.
- if (max_count < kNodeSlots) {
+ if (max_count < kNodeSlots) {
// Insertion into the root where the root is smaller than the full node
// size. Simply grow the size of the root node.
assert(iter.node == root());
iter.node =
- new_leaf_root_node((std::min<int>)(kNodeSlots, 2 * max_count));
+ new_leaf_root_node((std::min<int>)(kNodeSlots, 2 * max_count));
// Transfer the values from the old root to the new root.
node_type *old_root = root();
node_type *new_root = iter.node;
@@ -2552,27 +2552,27 @@ inline auto btree<P>::internal_locate(const K &key) const
template <typename P>
template <typename K>
-auto btree<P>::internal_lower_bound(const K &key) const
- -> SearchResult<iterator, is_key_compare_to::value> {
- if (!params_type::template can_have_multiple_equivalent_keys<K>()) {
- SearchResult<iterator, is_key_compare_to::value> ret = internal_locate(key);
- ret.value = internal_last(ret.value);
- return ret;
- }
+auto btree<P>::internal_lower_bound(const K &key) const
+ -> SearchResult<iterator, is_key_compare_to::value> {
+ if (!params_type::template can_have_multiple_equivalent_keys<K>()) {
+ SearchResult<iterator, is_key_compare_to::value> ret = internal_locate(key);
+ ret.value = internal_last(ret.value);
+ return ret;
+ }
iterator iter(const_cast<node_type *>(root()));
- SearchResult<int, is_key_compare_to::value> res;
- bool seen_eq = false;
+ SearchResult<int, is_key_compare_to::value> res;
+ bool seen_eq = false;
for (;;) {
- res = iter.node->lower_bound(key, key_comp());
- iter.position = res.value;
+ res = iter.node->lower_bound(key, key_comp());
+ iter.position = res.value;
if (iter.node->leaf()) {
break;
}
- seen_eq = seen_eq || res.IsEq();
+ seen_eq = seen_eq || res.IsEq();
iter.node = iter.node->child(iter.position);
}
- if (res.IsEq()) return {iter, MatchKind::kEq};
- return {internal_last(iter), seen_eq ? MatchKind::kEq : MatchKind::kNe};
+ if (res.IsEq()) return {iter, MatchKind::kEq};
+ return {internal_last(iter), seen_eq ? MatchKind::kEq : MatchKind::kNe};
}
template <typename P>
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/btree_container.h b/contrib/restricted/abseil-cpp/absl/container/internal/btree_container.h
index a99668c713..83c411a6e2 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/btree_container.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/btree_container.h
@@ -24,7 +24,7 @@
#include "absl/base/internal/throw_delegate.h"
#include "absl/container/internal/btree.h" // IWYU pragma: export
#include "absl/container/internal/common.h"
-#include "absl/memory/memory.h"
+#include "absl/memory/memory.h"
#include "absl/meta/type_traits.h"
namespace absl {
@@ -70,21 +70,21 @@ class btree_container {
explicit btree_container(const key_compare &comp,
const allocator_type &alloc = allocator_type())
: tree_(comp, alloc) {}
- explicit btree_container(const allocator_type &alloc)
- : tree_(key_compare(), alloc) {}
-
- btree_container(const btree_container &other)
- : btree_container(other, absl::allocator_traits<allocator_type>::
- select_on_container_copy_construction(
- other.get_allocator())) {}
- btree_container(const btree_container &other, const allocator_type &alloc)
- : tree_(other.tree_, alloc) {}
-
- btree_container(btree_container &&other) noexcept(
- std::is_nothrow_move_constructible<Tree>::value) = default;
- btree_container(btree_container &&other, const allocator_type &alloc)
- : tree_(std::move(other.tree_), alloc) {}
-
+ explicit btree_container(const allocator_type &alloc)
+ : tree_(key_compare(), alloc) {}
+
+ btree_container(const btree_container &other)
+ : btree_container(other, absl::allocator_traits<allocator_type>::
+ select_on_container_copy_construction(
+ other.get_allocator())) {}
+ btree_container(const btree_container &other, const allocator_type &alloc)
+ : tree_(other.tree_, alloc) {}
+
+ btree_container(btree_container &&other) noexcept(
+ std::is_nothrow_move_constructible<Tree>::value) = default;
+ btree_container(btree_container &&other, const allocator_type &alloc)
+ : tree_(std::move(other.tree_), alloc) {}
+
btree_container &operator=(const btree_container &other) = default;
btree_container &operator=(btree_container &&other) noexcept(
std::is_nothrow_move_assignable<Tree>::value) = default;
@@ -105,11 +105,11 @@ class btree_container {
// Lookup routines.
template <typename K = key_type>
- size_type count(const key_arg<K> &key) const {
- auto equal_range = this->equal_range(key);
- return std::distance(equal_range.first, equal_range.second);
- }
- template <typename K = key_type>
+ size_type count(const key_arg<K> &key) const {
+ auto equal_range = this->equal_range(key);
+ return std::distance(equal_range.first, equal_range.second);
+ }
+ template <typename K = key_type>
iterator find(const key_arg<K> &key) {
return tree_.find(key);
}
@@ -158,11 +158,11 @@ class btree_container {
iterator erase(const_iterator first, const_iterator last) {
return tree_.erase_range(iterator(first), iterator(last)).second;
}
- template <typename K = key_type>
- size_type erase(const key_arg<K> &key) {
- auto equal_range = this->equal_range(key);
- return tree_.erase_range(equal_range.first, equal_range.second).first;
- }
+ template <typename K = key_type>
+ size_type erase(const key_arg<K> &key) {
+ auto equal_range = this->equal_range(key);
+ return tree_.erase_range(equal_range.first, equal_range.second).first;
+ }
// Extract routines.
node_type extract(iterator position) {
@@ -259,7 +259,7 @@ class btree_set_container : public btree_container<Tree> {
using super_type::super_type;
btree_set_container() {}
- // Range constructors.
+ // Range constructors.
template <class InputIterator>
btree_set_container(InputIterator b, InputIterator e,
const key_compare &comp = key_compare(),
@@ -267,19 +267,19 @@ class btree_set_container : public btree_container<Tree> {
: super_type(comp, alloc) {
insert(b, e);
}
- template <class InputIterator>
- btree_set_container(InputIterator b, InputIterator e,
- const allocator_type &alloc)
- : btree_set_container(b, e, key_compare(), alloc) {}
+ template <class InputIterator>
+ btree_set_container(InputIterator b, InputIterator e,
+ const allocator_type &alloc)
+ : btree_set_container(b, e, key_compare(), alloc) {}
- // Initializer list constructors.
+ // Initializer list constructors.
btree_set_container(std::initializer_list<init_type> init,
const key_compare &comp = key_compare(),
const allocator_type &alloc = allocator_type())
: btree_set_container(init.begin(), init.end(), comp, alloc) {}
- btree_set_container(std::initializer_list<init_type> init,
- const allocator_type &alloc)
- : btree_set_container(init.begin(), init.end(), alloc) {}
+ btree_set_container(std::initializer_list<init_type> init,
+ const allocator_type &alloc)
+ : btree_set_container(init.begin(), init.end(), alloc) {}
// Insertion routines.
std::pair<iterator, bool> insert(const value_type &v) {
@@ -341,10 +341,10 @@ class btree_set_container : public btree_container<Tree> {
// Node extraction routines.
template <typename K = key_type>
node_type extract(const key_arg<K> &key) {
- const std::pair<iterator, bool> lower_and_equal =
- this->tree_.lower_bound_equal(key);
- return lower_and_equal.second ? extract(lower_and_equal.first)
- : node_type();
+ const std::pair<iterator, bool> lower_and_equal =
+ this->tree_.lower_bound_equal(key);
+ return lower_and_equal.second ? extract(lower_and_equal.first)
+ : node_type();
}
using super_type::extract;
@@ -389,7 +389,7 @@ template <typename Tree>
class btree_map_container : public btree_set_container<Tree> {
using super_type = btree_set_container<Tree>;
using params_type = typename Tree::params_type;
- friend class BtreeNodePeer;
+ friend class BtreeNodePeer;
private:
template <class K>
@@ -554,7 +554,7 @@ class btree_multiset_container : public btree_container<Tree> {
using super_type::super_type;
btree_multiset_container() {}
- // Range constructors.
+ // Range constructors.
template <class InputIterator>
btree_multiset_container(InputIterator b, InputIterator e,
const key_compare &comp = key_compare(),
@@ -562,19 +562,19 @@ class btree_multiset_container : public btree_container<Tree> {
: super_type(comp, alloc) {
insert(b, e);
}
- template <class InputIterator>
- btree_multiset_container(InputIterator b, InputIterator e,
- const allocator_type &alloc)
- : btree_multiset_container(b, e, key_compare(), alloc) {}
+ template <class InputIterator>
+ btree_multiset_container(InputIterator b, InputIterator e,
+ const allocator_type &alloc)
+ : btree_multiset_container(b, e, key_compare(), alloc) {}
- // Initializer list constructors.
+ // Initializer list constructors.
btree_multiset_container(std::initializer_list<init_type> init,
const key_compare &comp = key_compare(),
const allocator_type &alloc = allocator_type())
: btree_multiset_container(init.begin(), init.end(), comp, alloc) {}
- btree_multiset_container(std::initializer_list<init_type> init,
- const allocator_type &alloc)
- : btree_multiset_container(init.begin(), init.end(), alloc) {}
+ btree_multiset_container(std::initializer_list<init_type> init,
+ const allocator_type &alloc)
+ : btree_multiset_container(init.begin(), init.end(), alloc) {}
// Insertion routines.
iterator insert(const value_type &v) { return this->tree_.insert_multi(v); }
@@ -623,10 +623,10 @@ class btree_multiset_container : public btree_container<Tree> {
// Node extraction routines.
template <typename K = key_type>
node_type extract(const key_arg<K> &key) {
- const std::pair<iterator, bool> lower_and_equal =
- this->tree_.lower_bound_equal(key);
- return lower_and_equal.second ? extract(lower_and_equal.first)
- : node_type();
+ const std::pair<iterator, bool> lower_and_equal =
+ this->tree_.lower_bound_equal(key);
+ return lower_and_equal.second ? extract(lower_and_equal.first)
+ : node_type();
}
using super_type::extract;
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler.cc b/contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler.cc
index 40cce0479e..f1c72287ef 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler.cc
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler.cc
@@ -70,7 +70,7 @@ void HashtablezInfo::PrepareForSampling() {
total_probe_length.store(0, std::memory_order_relaxed);
hashes_bitwise_or.store(0, std::memory_order_relaxed);
hashes_bitwise_and.store(~size_t{}, std::memory_order_relaxed);
- hashes_bitwise_xor.store(0, std::memory_order_relaxed);
+ hashes_bitwise_xor.store(0, std::memory_order_relaxed);
max_reserve.store(0, std::memory_order_relaxed);
create_time = absl::Now();
@@ -93,9 +93,9 @@ static bool ShouldForceSampling() {
if (ABSL_PREDICT_TRUE(state == kDontForce)) return false;
if (state == kUninitialized) {
- state = ABSL_INTERNAL_C_SYMBOL(AbslContainerInternalSampleEverything)()
- ? kForce
- : kDontForce;
+ state = ABSL_INTERNAL_C_SYMBOL(AbslContainerInternalSampleEverything)()
+ ? kForce
+ : kDontForce;
global_state.store(state, std::memory_order_relaxed);
}
return state == kForce;
@@ -154,7 +154,7 @@ void RecordInsertSlow(HashtablezInfo* info, size_t hash,
info->hashes_bitwise_and.fetch_and(hash, std::memory_order_relaxed);
info->hashes_bitwise_or.fetch_or(hash, std::memory_order_relaxed);
- info->hashes_bitwise_xor.fetch_xor(hash, std::memory_order_relaxed);
+ info->hashes_bitwise_xor.fetch_xor(hash, std::memory_order_relaxed);
info->max_probe_length.store(
std::max(info->max_probe_length.load(std::memory_order_relaxed),
probe_length),
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler.h b/contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler.h
index 91fcdb34a3..0064307c9a 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler.h
@@ -79,7 +79,7 @@ struct HashtablezInfo : public profiling_internal::Sample<HashtablezInfo> {
std::atomic<size_t> total_probe_length;
std::atomic<size_t> hashes_bitwise_or;
std::atomic<size_t> hashes_bitwise_and;
- std::atomic<size_t> hashes_bitwise_xor;
+ std::atomic<size_t> hashes_bitwise_xor;
std::atomic<size_t> max_reserve;
// All of the fields below are set by `PrepareForSampling`, they must not be
@@ -272,7 +272,7 @@ void SetHashtablezMaxSamples(int32_t max);
// initialization of static storage duration objects.
// The definition of this constant is weak, which allows us to inject a
// different value for it at link time.
-extern "C" bool ABSL_INTERNAL_C_SYMBOL(AbslContainerInternalSampleEverything)();
+extern "C" bool ABSL_INTERNAL_C_SYMBOL(AbslContainerInternalSampleEverything)();
} // namespace container_internal
ABSL_NAMESPACE_END
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler_force_weak_definition.cc b/contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler_force_weak_definition.cc
index ed35a7eec3..2166c3f189 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler_force_weak_definition.cc
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler_force_weak_definition.cc
@@ -21,8 +21,8 @@ ABSL_NAMESPACE_BEGIN
namespace container_internal {
// See hashtablez_sampler.h for details.
-extern "C" ABSL_ATTRIBUTE_WEAK bool ABSL_INTERNAL_C_SYMBOL(
- AbslContainerInternalSampleEverything)() {
+extern "C" ABSL_ATTRIBUTE_WEAK bool ABSL_INTERNAL_C_SYMBOL(
+ AbslContainerInternalSampleEverything)() {
return false;
}
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/inlined_vector.h b/contrib/restricted/abseil-cpp/absl/container/internal/inlined_vector.h
index 1d7d6cda72..34a78e0498 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/inlined_vector.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/inlined_vector.h
@@ -36,13 +36,13 @@ namespace absl {
ABSL_NAMESPACE_BEGIN
namespace inlined_vector_internal {
-// GCC does not deal very well with the below code
-#if !defined(__clang__) && defined(__GNUC__)
-#pragma GCC diagnostic push
+// GCC does not deal very well with the below code
+#if !defined(__clang__) && defined(__GNUC__)
+#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Warray-bounds"
-#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
-#endif
-
+#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
+#endif
+
template <typename A>
using AllocatorTraits = std::allocator_traits<A>;
template <typename A>
@@ -110,7 +110,7 @@ struct Allocation {
Pointer<A> data;
SizeType<A> capacity;
};
-
+
template <typename A,
bool IsOverAligned =
(alignof(ValueType<A>) > ABSL_INTERNAL_DEFAULT_NEW_ALIGNMENT)>
@@ -119,13 +119,13 @@ struct MallocAdapter {
return {AllocatorTraits<A>::allocate(allocator, requested_capacity),
requested_capacity};
}
-
+
static void Deallocate(A& allocator, Pointer<A> pointer,
SizeType<A> capacity) {
AllocatorTraits<A>::deallocate(allocator, pointer, capacity);
}
};
-
+
template <typename A, typename ValueAdapter>
void ConstructElements(NoTypeDeduction<A>& allocator,
Pointer<A> construct_first, ValueAdapter& values,
@@ -303,14 +303,14 @@ class Storage {
: metadata_(allocator, /* size and is_allocated */ 0) {}
~Storage() {
- if (GetSizeAndIsAllocated() == 0) {
- // Empty and not allocated; nothing to do.
+ if (GetSizeAndIsAllocated() == 0) {
+ // Empty and not allocated; nothing to do.
} else if (IsMemcpyOk<A>::value) {
- // No destructors need to be run; just deallocate if necessary.
- DeallocateIfAllocated();
- } else {
- DestroyContents();
- }
+ // No destructors need to be run; just deallocate if necessary.
+ DeallocateIfAllocated();
+ } else {
+ DestroyContents();
+ }
}
// ---------------------------------------------------------------------------
@@ -364,8 +364,8 @@ class Storage {
// Storage Member Mutators
// ---------------------------------------------------------------------------
- ABSL_ATTRIBUTE_NOINLINE void InitFrom(const Storage& other);
-
+ ABSL_ATTRIBUTE_NOINLINE void InitFrom(const Storage& other);
+
template <typename ValueAdapter>
void Initialize(ValueAdapter values, SizeType<A> new_size);
@@ -441,8 +441,8 @@ class Storage {
}
private:
- ABSL_ATTRIBUTE_NOINLINE void DestroyContents();
-
+ ABSL_ATTRIBUTE_NOINLINE void DestroyContents();
+
using Metadata = container_internal::CompressedTuple<A, SizeType<A>>;
struct Allocated {
@@ -459,51 +459,51 @@ class Storage {
Inlined inlined;
};
- template <typename... Args>
+ template <typename... Args>
ABSL_ATTRIBUTE_NOINLINE Reference<A> EmplaceBackSlow(Args&&... args);
-
+
Metadata metadata_;
Data data_;
};
template <typename T, size_t N, typename A>
-void Storage<T, N, A>::DestroyContents() {
+void Storage<T, N, A>::DestroyContents() {
Pointer<A> data = GetIsAllocated() ? GetAllocatedData() : GetInlinedData();
DestroyElements<A>(GetAllocator(), data, GetSize());
- DeallocateIfAllocated();
-}
-
-template <typename T, size_t N, typename A>
-void Storage<T, N, A>::InitFrom(const Storage& other) {
+ DeallocateIfAllocated();
+}
+
+template <typename T, size_t N, typename A>
+void Storage<T, N, A>::InitFrom(const Storage& other) {
const SizeType<A> n = other.GetSize();
- assert(n > 0); // Empty sources handled handled in caller.
+ assert(n > 0); // Empty sources handled handled in caller.
ConstPointer<A> src;
Pointer<A> dst;
- if (!other.GetIsAllocated()) {
- dst = GetInlinedData();
- src = other.GetInlinedData();
- } else {
- // Because this is only called from the `InlinedVector` constructors, it's
- // safe to take on the allocation with size `0`. If `ConstructElements(...)`
- // throws, deallocation will be automatically handled by `~Storage()`.
+ if (!other.GetIsAllocated()) {
+ dst = GetInlinedData();
+ src = other.GetInlinedData();
+ } else {
+ // Because this is only called from the `InlinedVector` constructors, it's
+ // safe to take on the allocation with size `0`. If `ConstructElements(...)`
+ // throws, deallocation will be automatically handled by `~Storage()`.
SizeType<A> requested_capacity = ComputeCapacity(GetInlinedCapacity(), n);
Allocation<A> allocation =
MallocAdapter<A>::Allocate(GetAllocator(), requested_capacity);
SetAllocation(allocation);
dst = allocation.data;
- src = other.GetAllocatedData();
- }
+ src = other.GetAllocatedData();
+ }
if (IsMemcpyOk<A>::value) {
std::memcpy(reinterpret_cast<char*>(dst),
reinterpret_cast<const char*>(src), n * sizeof(ValueType<A>));
- } else {
+ } else {
auto values = IteratorValueAdapter<A, ConstPointer<A>>(src);
ConstructElements<A>(GetAllocator(), dst, values, n);
- }
- GetSizeAndIsAllocated() = other.GetSizeAndIsAllocated();
-}
-
-template <typename T, size_t N, typename A>
+ }
+ GetSizeAndIsAllocated() = other.GetSizeAndIsAllocated();
+}
+
+template <typename T, size_t N, typename A>
template <typename ValueAdapter>
auto Storage<T, N, A>::Initialize(ValueAdapter values, SizeType<A> new_size)
-> void {
@@ -585,20 +585,20 @@ auto Storage<T, N, A>::Resize(ValueAdapter values, SizeType<A> new_size)
Pointer<A> const base = storage_view.data;
const SizeType<A> size = storage_view.size;
A& alloc = GetAllocator();
- if (new_size <= size) {
- // Destroy extra old elements.
+ if (new_size <= size) {
+ // Destroy extra old elements.
DestroyElements<A>(alloc, base + new_size, size - new_size);
- } else if (new_size <= storage_view.capacity) {
- // Construct new elements in place.
+ } else if (new_size <= storage_view.capacity) {
+ // Construct new elements in place.
ConstructElements<A>(alloc, base + size, values, new_size - size);
- } else {
- // Steps:
- // a. Allocate new backing store.
- // b. Construct new elements in new backing store.
- // c. Move existing elements from old backing store to now.
- // d. Destroy all elements in old backing store.
- // Use transactional wrappers for the first two steps so we can roll
- // back if necessary due to exceptions.
+ } else {
+ // Steps:
+ // a. Allocate new backing store.
+ // b. Construct new elements in new backing store.
+ // c. Move existing elements from old backing store to now.
+ // d. Destroy all elements in old backing store.
+ // Use transactional wrappers for the first two steps so we can roll
+ // back if necessary due to exceptions.
AllocationTransaction<A> allocation_tx(alloc);
SizeType<A> requested_capacity =
ComputeCapacity(storage_view.capacity, new_size);
@@ -717,20 +717,20 @@ template <typename... Args>
auto Storage<T, N, A>::EmplaceBack(Args&&... args) -> Reference<A> {
StorageView<A> storage_view = MakeStorageView();
const SizeType<A> n = storage_view.size;
- if (ABSL_PREDICT_TRUE(n != storage_view.capacity)) {
- // Fast path; new element fits.
+ if (ABSL_PREDICT_TRUE(n != storage_view.capacity)) {
+ // Fast path; new element fits.
Pointer<A> last_ptr = storage_view.data + n;
AllocatorTraits<A>::construct(GetAllocator(), last_ptr,
std::forward<Args>(args)...);
- AddSize(1);
- return *last_ptr;
- }
- // TODO(b/173712035): Annotate with musttail attribute to prevent regression.
- return EmplaceBackSlow(std::forward<Args>(args)...);
-}
-
-template <typename T, size_t N, typename A>
-template <typename... Args>
+ AddSize(1);
+ return *last_ptr;
+ }
+ // TODO(b/173712035): Annotate with musttail attribute to prevent regression.
+ return EmplaceBackSlow(std::forward<Args>(args)...);
+}
+
+template <typename T, size_t N, typename A>
+template <typename... Args>
auto Storage<T, N, A>::EmplaceBackSlow(Args&&... args) -> Reference<A> {
StorageView<A> storage_view = MakeStorageView();
AllocationTransaction<A> allocation_tx(GetAllocator());
@@ -740,24 +740,24 @@ auto Storage<T, N, A>::EmplaceBackSlow(Args&&... args) -> Reference<A> {
Pointer<A> construct_data = allocation_tx.Allocate(requested_capacity);
Pointer<A> last_ptr = construct_data + storage_view.size;
- // Construct new element.
+ // Construct new element.
AllocatorTraits<A>::construct(GetAllocator(), last_ptr,
std::forward<Args>(args)...);
- // Move elements from old backing store to new backing store.
- ABSL_INTERNAL_TRY {
+ // Move elements from old backing store to new backing store.
+ ABSL_INTERNAL_TRY {
ConstructElements<A>(GetAllocator(), allocation_tx.GetData(), move_values,
storage_view.size);
}
- ABSL_INTERNAL_CATCH_ANY {
+ ABSL_INTERNAL_CATCH_ANY {
AllocatorTraits<A>::destroy(GetAllocator(), last_ptr);
- ABSL_INTERNAL_RETHROW;
- }
- // Destroy elements in old backing store.
+ ABSL_INTERNAL_RETHROW;
+ }
+ // Destroy elements in old backing store.
DestroyElements<A>(GetAllocator(), storage_view.data, storage_view.size);
- DeallocateIfAllocated();
+ DeallocateIfAllocated();
SetAllocation(std::move(allocation_tx).Release());
- SetIsAllocated();
+ SetIsAllocated();
AddSize(1);
return *last_ptr;
}
@@ -921,10 +921,10 @@ auto Storage<T, N, A>::Swap(Storage* other_storage_ptr) -> void {
}
// End ignore "array-bounds" and "maybe-uninitialized"
-#if !defined(__clang__) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif
-
+#if !defined(__clang__) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+
} // namespace inlined_vector_internal
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/layout.h b/contrib/restricted/abseil-cpp/absl/container/internal/layout.h
index a59a243059..23d44d7793 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/layout.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/layout.h
@@ -404,7 +404,7 @@ class LayoutImpl<std::tuple<Elements...>, absl::index_sequence<SizeSeq...>,
constexpr size_t Offset() const {
static_assert(N < NumOffsets, "Index out of bounds");
return adl_barrier::Align(
- Offset<N - 1>() + SizeOf<ElementType<N - 1>>::value * size_[N - 1],
+ Offset<N - 1>() + SizeOf<ElementType<N - 1>>::value * size_[N - 1],
ElementAlignment<N>::value);
}
@@ -597,7 +597,7 @@ class LayoutImpl<std::tuple<Elements...>, absl::index_sequence<SizeSeq...>,
constexpr size_t AllocSize() const {
static_assert(NumTypes == NumSizes, "You must specify sizes of all fields");
return Offset<NumTypes - 1>() +
- SizeOf<ElementType<NumTypes - 1>>::value * size_[NumTypes - 1];
+ SizeOf<ElementType<NumTypes - 1>>::value * size_[NumTypes - 1];
}
// If built with --config=asan, poisons padding bytes (if any) in the
@@ -621,7 +621,7 @@ class LayoutImpl<std::tuple<Elements...>, absl::index_sequence<SizeSeq...>,
// The `if` is an optimization. It doesn't affect the observable behaviour.
if (ElementAlignment<N - 1>::value % ElementAlignment<N>::value) {
size_t start =
- Offset<N - 1>() + SizeOf<ElementType<N - 1>>::value * size_[N - 1];
+ Offset<N - 1>() + SizeOf<ElementType<N - 1>>::value * size_[N - 1];
ASAN_POISON_MEMORY_REGION(p + start, Offset<N>() - start);
}
#endif
@@ -645,7 +645,7 @@ class LayoutImpl<std::tuple<Elements...>, absl::index_sequence<SizeSeq...>,
// produce "unsigned*" where another produces "unsigned int *".
std::string DebugString() const {
const auto offsets = Offsets();
- const size_t sizes[] = {SizeOf<ElementType<OffsetSeq>>::value...};
+ const size_t sizes[] = {SizeOf<ElementType<OffsetSeq>>::value...};
const std::string types[] = {
adl_barrier::TypeName<ElementType<OffsetSeq>>()...};
std::string res = absl::StrCat("@0", types[0], "(", sizes[0], ")");
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set.cc b/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set.cc
index 687bcb8a4d..eea9f6ee4e 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set.cc
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set.cc
@@ -33,7 +33,7 @@ constexpr size_t Group::kWidth;
// Returns "random" seed.
inline size_t RandomSeed() {
-#ifdef ABSL_HAVE_THREAD_LOCAL
+#ifdef ABSL_HAVE_THREAD_LOCAL
static thread_local size_t counter = 0;
size_t value = ++counter;
#else // ABSL_HAVE_THREAD_LOCAL
@@ -51,17 +51,17 @@ bool ShouldInsertBackwards(size_t hash, const ctrl_t* ctrl) {
void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity) {
assert(ctrl[capacity] == ctrl_t::kSentinel);
- assert(IsValidCapacity(capacity));
+ assert(IsValidCapacity(capacity));
for (ctrl_t* pos = ctrl; pos < ctrl + capacity; pos += Group::kWidth) {
- Group{pos}.ConvertSpecialToEmptyAndFullToDeleted(pos);
- }
- // Copy the cloned ctrl bytes.
+ Group{pos}.ConvertSpecialToEmptyAndFullToDeleted(pos);
+ }
+ // Copy the cloned ctrl bytes.
std::memcpy(ctrl + capacity + 1, ctrl, NumClonedBytes());
ctrl[capacity] = ctrl_t::kSentinel;
-}
+}
// Extern template instantiotion for inline function.
template FindInfo find_first_non_full(const ctrl_t*, size_t, size_t);
-
+
} // namespace container_internal
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set.h b/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set.h
index 12682b3532..24cf740296 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set.h
@@ -125,7 +125,7 @@
#include "absl/container/internal/have_sse.h"
#include "absl/memory/memory.h"
#include "absl/meta/type_traits.h"
-#include "absl/numeric/bits.h"
+#include "absl/numeric/bits.h"
#include "absl/utility/utility.h"
namespace absl {
@@ -199,9 +199,9 @@ constexpr bool IsNoThrowSwappable(std::false_type /* is_swappable */) {
}
template <typename T>
-uint32_t TrailingZeros(T x) {
- ABSL_INTERNAL_ASSUME(x != 0);
- return countr_zero(x);
+uint32_t TrailingZeros(T x) {
+ ABSL_INTERNAL_ASSUME(x != 0);
+ return countr_zero(x);
}
// An abstraction over a bitmask. It provides an easy way to iterate through the
@@ -231,24 +231,24 @@ class BitMask {
}
explicit operator bool() const { return mask_ != 0; }
int operator*() const { return LowestBitSet(); }
- uint32_t LowestBitSet() const {
+ uint32_t LowestBitSet() const {
return container_internal::TrailingZeros(mask_) >> Shift;
}
- uint32_t HighestBitSet() const {
- return static_cast<uint32_t>((bit_width(mask_) - 1) >> Shift);
+ uint32_t HighestBitSet() const {
+ return static_cast<uint32_t>((bit_width(mask_) - 1) >> Shift);
}
BitMask begin() const { return *this; }
BitMask end() const { return BitMask(0); }
- uint32_t TrailingZeros() const {
+ uint32_t TrailingZeros() const {
return container_internal::TrailingZeros(mask_) >> Shift;
}
- uint32_t LeadingZeros() const {
+ uint32_t LeadingZeros() const {
constexpr int total_significant_bits = SignificantBits << Shift;
constexpr int extra_bits = sizeof(T) * 8 - total_significant_bits;
- return countl_zero(mask_ << extra_bits) >> Shift;
+ return countl_zero(mask_ << extra_bits) >> Shift;
}
private:
@@ -384,8 +384,8 @@ struct GroupSse2Impl {
// Returns the number of trailing empty or deleted elements in the group.
uint32_t CountLeadingEmptyOrDeleted() const {
auto special = _mm_set1_epi8(static_cast<int8_t>(ctrl_t::kSentinel));
- return TrailingZeros(static_cast<uint32_t>(
- _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)) + 1));
+ return TrailingZeros(static_cast<uint32_t>(
+ _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)) + 1));
}
void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
@@ -480,23 +480,23 @@ inline bool IsValidCapacity(size_t n) { return ((n + 1) & n) == 0 && n > 0; }
// DELETED -> EMPTY
// EMPTY -> EMPTY
// FULL -> DELETED
-void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity);
+void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity);
// Rounds up the capacity to the next power of 2 minus 1, with a minimum of 1.
inline size_t NormalizeCapacity(size_t n) {
- return n ? ~size_t{} >> countl_zero(n) : 1;
+ return n ? ~size_t{} >> countl_zero(n) : 1;
}
-// General notes on capacity/growth methods below:
-// - We use 7/8th as maximum load factor. For 16-wide groups, that gives an
-// average of two empty slots per group.
-// - For (capacity+1) >= Group::kWidth, growth is 7/8*capacity.
-// - For (capacity+1) < Group::kWidth, growth == capacity. In this case, we
-// never need to probe (the whole table fits in one group) so we don't need a
-// load factor less than 1.
-
-// Given `capacity` of the table, returns the size (i.e. number of full slots)
-// at which we should grow the capacity.
+// General notes on capacity/growth methods below:
+// - We use 7/8th as maximum load factor. For 16-wide groups, that gives an
+// average of two empty slots per group.
+// - For (capacity+1) >= Group::kWidth, growth is 7/8*capacity.
+// - For (capacity+1) < Group::kWidth, growth == capacity. In this case, we
+// never need to probe (the whole table fits in one group) so we don't need a
+// load factor less than 1.
+
+// Given `capacity` of the table, returns the size (i.e. number of full slots)
+// at which we should grow the capacity.
inline size_t CapacityToGrowth(size_t capacity) {
assert(IsValidCapacity(capacity));
// `capacity*7/8`
@@ -507,7 +507,7 @@ inline size_t CapacityToGrowth(size_t capacity) {
return capacity - capacity / 8;
}
// From desired "growth" to a lowerbound of the necessary capacity.
-// Might not be a valid one and requires NormalizeCapacity().
+// Might not be a valid one and requires NormalizeCapacity().
inline size_t GrowthToLowerboundCapacity(size_t growth) {
// `growth*8/7`
if (Group::kWidth == 8 && growth == 7) {
@@ -545,66 +545,66 @@ inline void AssertIsValid(ctrl_t* ctrl) {
"been erased, or the table might have rehashed.");
}
-struct FindInfo {
- size_t offset;
- size_t probe_length;
-};
-
-// The representation of the object has two modes:
-// - small: For capacities < kWidth-1
-// - large: For the rest.
-//
-// Differences:
-// - In small mode we are able to use the whole capacity. The extra control
-// bytes give us at least one "empty" control byte to stop the iteration.
-// This is important to make 1 a valid capacity.
-//
-// - In small mode only the first `capacity()` control bytes after the
+struct FindInfo {
+ size_t offset;
+ size_t probe_length;
+};
+
+// The representation of the object has two modes:
+// - small: For capacities < kWidth-1
+// - large: For the rest.
+//
+// Differences:
+// - In small mode we are able to use the whole capacity. The extra control
+// bytes give us at least one "empty" control byte to stop the iteration.
+// This is important to make 1 a valid capacity.
+//
+// - In small mode only the first `capacity()` control bytes after the
// sentinel are valid. The rest contain dummy ctrl_t::kEmpty values that do not
-// represent a real slot. This is important to take into account on
-// find_first_non_full(), where we never try ShouldInsertBackwards() for
-// small tables.
-inline bool is_small(size_t capacity) { return capacity < Group::kWidth - 1; }
-
+// represent a real slot. This is important to take into account on
+// find_first_non_full(), where we never try ShouldInsertBackwards() for
+// small tables.
+inline bool is_small(size_t capacity) { return capacity < Group::kWidth - 1; }
+
inline probe_seq<Group::kWidth> probe(const ctrl_t* ctrl, size_t hash,
- size_t capacity) {
- return probe_seq<Group::kWidth>(H1(hash, ctrl), capacity);
-}
-
-// Probes the raw_hash_set with the probe sequence for hash and returns the
-// pointer to the first empty or deleted slot.
+ size_t capacity) {
+ return probe_seq<Group::kWidth>(H1(hash, ctrl), capacity);
+}
+
+// Probes the raw_hash_set with the probe sequence for hash and returns the
+// pointer to the first empty or deleted slot.
// NOTE: this function must work with tables having both ctrl_t::kEmpty and
// ctrl_t::kDeleted in one group. Such tables appears during
// drop_deletes_without_resize.
-//
-// This function is very useful when insertions happen and:
-// - the input is already a set
-// - there are enough slots
-// - the element with the hash is not in the table
+//
+// This function is very useful when insertions happen and:
+// - the input is already a set
+// - there are enough slots
+// - the element with the hash is not in the table
template <typename = void>
inline FindInfo find_first_non_full(const ctrl_t* ctrl, size_t hash,
- size_t capacity) {
- auto seq = probe(ctrl, hash, capacity);
- while (true) {
- Group g{ctrl + seq.offset()};
- auto mask = g.MatchEmptyOrDeleted();
- if (mask) {
-#if !defined(NDEBUG)
- // We want to add entropy even when ASLR is not enabled.
- // In debug build we will randomly insert in either the front or back of
- // the group.
- // TODO(kfm,sbenza): revisit after we do unconditional mixing
- if (!is_small(capacity) && ShouldInsertBackwards(hash, ctrl)) {
- return {seq.offset(mask.HighestBitSet()), seq.index()};
- }
-#endif
- return {seq.offset(mask.LowestBitSet()), seq.index()};
- }
- seq.next();
+ size_t capacity) {
+ auto seq = probe(ctrl, hash, capacity);
+ while (true) {
+ Group g{ctrl + seq.offset()};
+ auto mask = g.MatchEmptyOrDeleted();
+ if (mask) {
+#if !defined(NDEBUG)
+ // We want to add entropy even when ASLR is not enabled.
+ // In debug build we will randomly insert in either the front or back of
+ // the group.
+ // TODO(kfm,sbenza): revisit after we do unconditional mixing
+ if (!is_small(capacity) && ShouldInsertBackwards(hash, ctrl)) {
+ return {seq.offset(mask.HighestBitSet()), seq.index()};
+ }
+#endif
+ return {seq.offset(mask.LowestBitSet()), seq.index()};
+ }
+ seq.next();
assert(seq.index() <= capacity && "full table!");
- }
-}
-
+ }
+}
+
// Extern template for inline function keep possibility of inlining.
// When compiler decided to not inline, no symbols will be added to the
// corresponding translation unit.
@@ -872,8 +872,8 @@ class raw_hash_set {
explicit raw_hash_set(size_t bucket_count, const hasher& hash = hasher(),
const key_equal& eq = key_equal(),
const allocator_type& alloc = allocator_type())
- : ctrl_(EmptyGroup()),
- settings_(0, HashtablezInfoHandle(), hash, eq, alloc) {
+ : ctrl_(EmptyGroup()),
+ settings_(0, HashtablezInfoHandle(), hash, eq, alloc) {
if (bucket_count) {
capacity_ = NormalizeCapacity(bucket_count);
initialize_slots();
@@ -982,11 +982,11 @@ class raw_hash_set {
// than a full `insert`.
for (const auto& v : that) {
const size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, v);
- auto target = find_first_non_full(ctrl_, hash, capacity_);
+ auto target = find_first_non_full(ctrl_, hash, capacity_);
SetCtrl(target.offset, H2(hash), capacity_, ctrl_, slots_,
sizeof(slot_type));
emplace_at(target.offset, v);
- infoz().RecordInsert(hash, target.probe_length);
+ infoz().RecordInsert(hash, target.probe_length);
}
size_ = that.size();
growth_left() -= that.size();
@@ -1003,24 +1003,24 @@ class raw_hash_set {
// Hash, equality and allocator are copied instead of moved because
// `that` must be left valid. If Hash is std::function<Key>, moving it
// would create a nullptr functor that cannot be called.
- settings_(absl::exchange(that.growth_left(), 0),
- absl::exchange(that.infoz(), HashtablezInfoHandle()),
- that.hash_ref(), that.eq_ref(), that.alloc_ref()) {}
+ settings_(absl::exchange(that.growth_left(), 0),
+ absl::exchange(that.infoz(), HashtablezInfoHandle()),
+ that.hash_ref(), that.eq_ref(), that.alloc_ref()) {}
raw_hash_set(raw_hash_set&& that, const allocator_type& a)
: ctrl_(EmptyGroup()),
slots_(nullptr),
size_(0),
capacity_(0),
- settings_(0, HashtablezInfoHandle(), that.hash_ref(), that.eq_ref(),
- a) {
+ settings_(0, HashtablezInfoHandle(), that.hash_ref(), that.eq_ref(),
+ a) {
if (a == that.alloc_ref()) {
std::swap(ctrl_, that.ctrl_);
std::swap(slots_, that.slots_);
std::swap(size_, that.size_);
std::swap(capacity_, that.capacity_);
std::swap(growth_left(), that.growth_left());
- std::swap(infoz(), that.infoz());
+ std::swap(infoz(), that.infoz());
} else {
reserve(that.size());
// Note: this will copy elements of dense_set and unordered_set instead of
@@ -1093,7 +1093,7 @@ class raw_hash_set {
reset_growth_left();
}
assert(empty());
- infoz().RecordStorageChanged(0, capacity_);
+ infoz().RecordStorageChanged(0, capacity_);
}
// This overload kicks in when the argument is an rvalue of insertable and
@@ -1166,7 +1166,7 @@ class raw_hash_set {
template <class InputIt>
void insert(InputIt first, InputIt last) {
- for (; first != last; ++first) emplace(*first);
+ for (; first != last; ++first) emplace(*first);
}
template <class T, RequiresNotInit<T> = 0, RequiresInsertable<const T&> = 0>
@@ -1193,9 +1193,9 @@ class raw_hash_set {
}
iterator insert(const_iterator, node_type&& node) {
- auto res = insert(std::move(node));
- node = std::move(res.node);
- return res.position;
+ auto res = insert(std::move(node));
+ node = std::move(res.node);
+ return res.position;
}
// This overload kicks in if we can deduce the key from args. This enables us
@@ -1385,7 +1385,7 @@ class raw_hash_set {
swap(growth_left(), that.growth_left());
swap(hash_ref(), that.hash_ref());
swap(eq_ref(), that.eq_ref());
- swap(infoz(), that.infoz());
+ swap(infoz(), that.infoz());
SwapAlloc(alloc_ref(), that.alloc_ref(),
typename AllocTraits::propagate_on_container_swap{});
}
@@ -1394,7 +1394,7 @@ class raw_hash_set {
if (n == 0 && capacity_ == 0) return;
if (n == 0 && size_ == 0) {
destroy_slots();
- infoz().RecordStorageChanged(0, 0);
+ infoz().RecordStorageChanged(0, 0);
infoz().RecordClearedReservation();
return;
}
@@ -1412,16 +1412,16 @@ class raw_hash_set {
}
}
- void reserve(size_t n) {
+ void reserve(size_t n) {
if (n > size() + growth_left()) {
size_t m = GrowthToLowerboundCapacity(n);
- resize(NormalizeCapacity(m));
+ resize(NormalizeCapacity(m));
// This is after resize, to ensure that we have completed the allocation
// and have potentially sampled the hashtable.
infoz().RecordReservation(n);
- }
- }
+ }
+ }
// Extension API: support for heterogeneous keys.
//
@@ -1447,7 +1447,7 @@ class raw_hash_set {
(void)key;
#if defined(__GNUC__)
prefetch_heap_block();
- auto seq = probe(ctrl_, hash_ref()(key), capacity_);
+ auto seq = probe(ctrl_, hash_ref()(key), capacity_);
__builtin_prefetch(static_cast<const void*>(ctrl_ + seq.offset()));
__builtin_prefetch(static_cast<const void*>(slots_ + seq.offset()));
#endif // __GNUC__
@@ -1462,7 +1462,7 @@ class raw_hash_set {
// called heterogeneous key support.
template <class K = key_type>
iterator find(const key_arg<K>& key, size_t hash) {
- auto seq = probe(ctrl_, hash, capacity_);
+ auto seq = probe(ctrl_, hash, capacity_);
while (true) {
Group g{ctrl_ + seq.offset()};
for (int i : g.Match(H2(hash))) {
@@ -1626,7 +1626,7 @@ class raw_hash_set {
SetCtrl(index, was_never_full ? ctrl_t::kEmpty : ctrl_t::kDeleted,
capacity_, ctrl_, slots_, sizeof(slot_type));
growth_left() += was_never_full;
- infoz().RecordErase();
+ infoz().RecordErase();
}
void initialize_slots() {
@@ -1654,7 +1654,7 @@ class raw_hash_set {
mem + SlotOffset(capacity_, alignof(slot_type)));
ResetCtrl(capacity_, ctrl_, slots_, sizeof(slot_type));
reset_growth_left();
- infoz().RecordStorageChanged(size_, capacity_);
+ infoz().RecordStorageChanged(size_, capacity_);
}
void destroy_slots() {
@@ -1690,7 +1690,7 @@ class raw_hash_set {
if (IsFull(old_ctrl[i])) {
size_t hash = PolicyTraits::apply(HashElement{hash_ref()},
PolicyTraits::element(old_slots + i));
- auto target = find_first_non_full(ctrl_, hash, capacity_);
+ auto target = find_first_non_full(ctrl_, hash, capacity_);
size_t new_i = target.offset;
total_probe_length += target.probe_length;
SetCtrl(new_i, H2(hash), capacity_, ctrl_, slots_, sizeof(slot_type));
@@ -1704,12 +1704,12 @@ class raw_hash_set {
&alloc_ref(), old_ctrl,
AllocSize(old_capacity, sizeof(slot_type), alignof(slot_type)));
}
- infoz().RecordRehash(total_probe_length);
+ infoz().RecordRehash(total_probe_length);
}
void drop_deletes_without_resize() ABSL_ATTRIBUTE_NOINLINE {
assert(IsValidCapacity(capacity_));
- assert(!is_small(capacity_));
+ assert(!is_small(capacity_));
// Algorithm:
// - mark all DELETED slots as EMPTY
// - mark all FULL slots as DELETED
@@ -1770,7 +1770,7 @@ class raw_hash_set {
}
}
reset_growth_left();
- infoz().RecordRehash(total_probe_length);
+ infoz().RecordRehash(total_probe_length);
}
void rehash_and_grow_if_necessary() {
@@ -1829,7 +1829,7 @@ class raw_hash_set {
bool has_element(const value_type& elem) const {
size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, elem);
- auto seq = probe(ctrl_, hash, capacity_);
+ auto seq = probe(ctrl_, hash, capacity_);
while (true) {
Group g{ctrl_ + seq.offset()};
for (int i : g.Match(H2(hash))) {
@@ -1861,7 +1861,7 @@ class raw_hash_set {
std::pair<size_t, bool> find_or_prepare_insert(const K& key) {
prefetch_heap_block();
auto hash = hash_ref()(key);
- auto seq = probe(ctrl_, hash, capacity_);
+ auto seq = probe(ctrl_, hash, capacity_);
while (true) {
Group g{ctrl_ + seq.offset()};
for (int i : g.Match(H2(hash))) {
@@ -1878,17 +1878,17 @@ class raw_hash_set {
}
size_t prepare_insert(size_t hash) ABSL_ATTRIBUTE_NOINLINE {
- auto target = find_first_non_full(ctrl_, hash, capacity_);
+ auto target = find_first_non_full(ctrl_, hash, capacity_);
if (ABSL_PREDICT_FALSE(growth_left() == 0 &&
!IsDeleted(ctrl_[target.offset]))) {
rehash_and_grow_if_necessary();
- target = find_first_non_full(ctrl_, hash, capacity_);
+ target = find_first_non_full(ctrl_, hash, capacity_);
}
++size_;
growth_left() -= IsEmpty(ctrl_[target.offset]);
SetCtrl(target.offset, H2(hash), capacity_, ctrl_, slots_,
sizeof(slot_type));
- infoz().RecordInsert(hash, target.probe_length);
+ infoz().RecordInsert(hash, target.probe_length);
return target.offset;
}
@@ -1931,15 +1931,15 @@ class raw_hash_set {
#endif // __GNUC__
}
- HashtablezInfoHandle& infoz() { return settings_.template get<1>(); }
+ HashtablezInfoHandle& infoz() { return settings_.template get<1>(); }
- hasher& hash_ref() { return settings_.template get<2>(); }
- const hasher& hash_ref() const { return settings_.template get<2>(); }
- key_equal& eq_ref() { return settings_.template get<3>(); }
- const key_equal& eq_ref() const { return settings_.template get<3>(); }
- allocator_type& alloc_ref() { return settings_.template get<4>(); }
+ hasher& hash_ref() { return settings_.template get<2>(); }
+ const hasher& hash_ref() const { return settings_.template get<2>(); }
+ key_equal& eq_ref() { return settings_.template get<3>(); }
+ const key_equal& eq_ref() const { return settings_.template get<3>(); }
+ allocator_type& alloc_ref() { return settings_.template get<4>(); }
const allocator_type& alloc_ref() const {
- return settings_.template get<4>();
+ return settings_.template get<4>();
}
// TODO(alkis): Investigate removing some of these fields:
@@ -1949,11 +1949,11 @@ class raw_hash_set {
slot_type* slots_ = nullptr; // [capacity * slot_type]
size_t size_ = 0; // number of full slots
size_t capacity_ = 0; // total number of slots
- absl::container_internal::CompressedTuple<size_t /* growth_left */,
- HashtablezInfoHandle, hasher,
+ absl::container_internal::CompressedTuple<size_t /* growth_left */,
+ HashtablezInfoHandle, hasher,
key_equal, allocator_type>
- settings_{0, HashtablezInfoHandle{}, hasher{}, key_equal{},
- allocator_type{}};
+ settings_{0, HashtablezInfoHandle{}, hasher{}, key_equal{},
+ allocator_type{}};
};
// Erases all elements that satisfy the predicate `pred` from the container `c`.
@@ -1978,7 +1978,7 @@ struct HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>> {
const typename Set::key_type& key) {
size_t num_probes = 0;
size_t hash = set.hash_ref()(key);
- auto seq = probe(set.ctrl_, hash, set.capacity_);
+ auto seq = probe(set.ctrl_, hash, set.capacity_);
while (true) {
container_internal::Group g{set.ctrl_ + seq.offset()};
for (int i : g.Match(container_internal::H2(hash))) {
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set/ya.make b/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set/ya.make
index 3fe7e7b5c0..28951c5549 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set/ya.make
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set/ya.make
@@ -1,29 +1,29 @@
-# Generated by devtools/yamaker.
-
-LIBRARY()
-
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
WITHOUT_LICENSE_TEXTS()
-OWNER(g:cpp-contrib)
-
-LICENSE(Apache-2.0)
-
-PEERDIR(
- contrib/restricted/abseil-cpp/absl/base
+OWNER(g:cpp-contrib)
+
+LICENSE(Apache-2.0)
+
+PEERDIR(
+ contrib/restricted/abseil-cpp/absl/base
contrib/restricted/abseil-cpp/absl/base/internal/low_level_alloc
- contrib/restricted/abseil-cpp/absl/base/internal/raw_logging
- contrib/restricted/abseil-cpp/absl/base/internal/spinlock_wait
- contrib/restricted/abseil-cpp/absl/base/internal/throw_delegate
- contrib/restricted/abseil-cpp/absl/base/log_severity
+ contrib/restricted/abseil-cpp/absl/base/internal/raw_logging
+ contrib/restricted/abseil-cpp/absl/base/internal/spinlock_wait
+ contrib/restricted/abseil-cpp/absl/base/internal/throw_delegate
+ contrib/restricted/abseil-cpp/absl/base/log_severity
contrib/restricted/abseil-cpp/absl/container/internal/absl_hashtablez_sampler
contrib/restricted/abseil-cpp/absl/debugging
contrib/restricted/abseil-cpp/absl/debugging/stacktrace
contrib/restricted/abseil-cpp/absl/debugging/symbolize
contrib/restricted/abseil-cpp/absl/demangle
contrib/restricted/abseil-cpp/absl/hash
- contrib/restricted/abseil-cpp/absl/numeric
+ contrib/restricted/abseil-cpp/absl/numeric
contrib/restricted/abseil-cpp/absl/profiling/internal/exponential_biased
- contrib/restricted/abseil-cpp/absl/strings
+ contrib/restricted/abseil-cpp/absl/strings
contrib/restricted/abseil-cpp/absl/strings/internal/absl_strings_internal
contrib/restricted/abseil-cpp/absl/synchronization
contrib/restricted/abseil-cpp/absl/synchronization/internal
@@ -31,25 +31,25 @@ PEERDIR(
contrib/restricted/abseil-cpp/absl/time/civil_time
contrib/restricted/abseil-cpp/absl/time/time_zone
contrib/restricted/abseil-cpp/absl/types
- contrib/restricted/abseil-cpp/absl/types/bad_optional_access
-)
-
-ADDINCL(
- GLOBAL contrib/restricted/abseil-cpp
-)
-
-NO_COMPILER_WARNINGS()
-
-NO_UTIL()
-
-CFLAGS(
- -DNOMINMAX
-)
-
+ contrib/restricted/abseil-cpp/absl/types/bad_optional_access
+)
+
+ADDINCL(
+ GLOBAL contrib/restricted/abseil-cpp
+)
+
+NO_COMPILER_WARNINGS()
+
+NO_UTIL()
+
+CFLAGS(
+ -DNOMINMAX
+)
+
SRCDIR(contrib/restricted/abseil-cpp/absl/container/internal)
-
-SRCS(
+
+SRCS(
raw_hash_set.cc
-)
-
-END()
+)
+
+END()
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/unordered_map_constructor_test.h b/contrib/restricted/abseil-cpp/absl/container/internal/unordered_map_constructor_test.h
index c1d20f3c52..f1f7369ff3 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/unordered_map_constructor_test.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/unordered_map_constructor_test.h
@@ -16,7 +16,7 @@
#define ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_CONSTRUCTOR_TEST_H_
#include <algorithm>
-#include <unordered_map>
+#include <unordered_map>
#include <vector>
#include "gmock/gmock.h"
diff --git a/contrib/restricted/abseil-cpp/absl/container/node_hash_set.h b/contrib/restricted/abseil-cpp/absl/container/node_hash_set.h
index 93b15f4681..707a583f2d 100644
--- a/contrib/restricted/abseil-cpp/absl/container/node_hash_set.h
+++ b/contrib/restricted/abseil-cpp/absl/container/node_hash_set.h
@@ -18,7 +18,7 @@
//
// An `absl::node_hash_set<T>` is an unordered associative container designed to
// be a more efficient replacement for `std::unordered_set`. Like
-// `unordered_set`, search, insertion, and deletion of set elements can be done
+// `unordered_set`, search, insertion, and deletion of set elements can be done
// as an `O(1)` operation. However, `node_hash_set` (and other unordered
// associative containers known as the collection of Abseil "Swiss tables")
// contain other optimizations that result in both memory and computation
@@ -60,7 +60,7 @@ struct NodeHashSetPolicy;
// following notable differences:
//
// * Supports heterogeneous lookup, through `find()`, `operator[]()` and
-// `insert()`, provided that the set is provided a compatible heterogeneous
+// `insert()`, provided that the set is provided a compatible heterogeneous
// hashing function and equality operator.
// * Contains a `capacity()` member function indicating the number of element
// slots (open, deleted, and empty) within the hash set.
@@ -76,13 +76,13 @@ struct NodeHashSetPolicy;
// Example:
//
// // Create a node hash set of three strings
-// absl::node_hash_set<std::string> ducks =
+// absl::node_hash_set<std::string> ducks =
// {"huey", "dewey", "louie"};
//
-// // Insert a new element into the node hash set
-// ducks.insert("donald");
+// // Insert a new element into the node hash set
+// ducks.insert("donald");
//
-// // Force a rehash of the node hash set
+// // Force a rehash of the node hash set
// ducks.rehash(0);
//
// // See if "dewey" is present
@@ -100,7 +100,7 @@ class node_hash_set
public:
// Constructors and Assignment Operators
//
- // A node_hash_set supports the same overload set as `std::unordered_set`
+ // A node_hash_set supports the same overload set as `std::unordered_set`
// for construction and assignment:
//
// * Default constructor
@@ -167,7 +167,7 @@ class node_hash_set
// available within the `node_hash_set`.
//
// NOTE: this member function is particular to `absl::node_hash_set` and is
- // not provided in the `std::unordered_set` API.
+ // not provided in the `std::unordered_set` API.
using Base::capacity;
// node_hash_set::empty()
@@ -208,7 +208,7 @@ class node_hash_set
// `void`.
//
// NOTE: this return behavior is different than that of STL containers in
- // general and `std::unordered_set` in particular.
+ // general and `std::unordered_set` in particular.
//
// iterator erase(const_iterator first, const_iterator last):
//
@@ -314,7 +314,7 @@ class node_hash_set
// node_hash_set::merge()
//
- // Extracts elements from a given `source` node hash set into this
+ // Extracts elements from a given `source` node hash set into this
// `node_hash_set`. If the destination `node_hash_set` already contains an
// element with an equivalent key, that element is not extracted.
using Base::merge;
@@ -322,15 +322,15 @@ class node_hash_set
// node_hash_set::swap(node_hash_set& other)
//
// Exchanges the contents of this `node_hash_set` with those of the `other`
- // node hash set, avoiding invocation of any move, copy, or swap operations on
+ // node hash set, avoiding invocation of any move, copy, or swap operations on
// individual elements.
//
// All iterators and references on the `node_hash_set` remain valid, excepting
// for the past-the-end iterator, which is invalidated.
//
- // `swap()` requires that the node hash set's hashing and key equivalence
+ // `swap()` requires that the node hash set's hashing and key equivalence
// functions be Swappable, and are exchaged using unqualified calls to
- // non-member `swap()`. If the set's allocator has
+ // non-member `swap()`. If the set's allocator has
// `std::allocator_traits<allocator_type>::propagate_on_container_swap::value`
// set to `true`, the allocators are also exchanged using an unqualified call
// to non-member `swap()`; otherwise, the allocators are not swapped.
@@ -385,14 +385,14 @@ class node_hash_set
// node_hash_set::bucket_count()
//
// Returns the number of "buckets" within the `node_hash_set`. Note that
- // because a node hash set contains all elements within its internal storage,
+ // because a node hash set contains all elements within its internal storage,
// this value simply equals the current capacity of the `node_hash_set`.
using Base::bucket_count;
// node_hash_set::load_factor()
//
// Returns the current load factor of the `node_hash_set` (the average number
- // of slots occupied with a value within the hash set).
+ // of slots occupied with a value within the hash set).
using Base::load_factor;
// node_hash_set::max_load_factor()
diff --git a/contrib/restricted/abseil-cpp/absl/debugging/failure_signal_handler.cc b/contrib/restricted/abseil-cpp/absl/debugging/failure_signal_handler.cc
index 689e5979e7..9eece4ded2 100644
--- a/contrib/restricted/abseil-cpp/absl/debugging/failure_signal_handler.cc
+++ b/contrib/restricted/abseil-cpp/absl/debugging/failure_signal_handler.cc
@@ -21,7 +21,7 @@
#ifdef _WIN32
#include <windows.h>
#else
-#include <sched.h>
+#include <sched.h>
#include <unistd.h>
#endif
@@ -136,8 +136,8 @@ static bool SetupAlternateStackOnce() {
#else
const size_t page_mask = sysconf(_SC_PAGESIZE) - 1;
#endif
- size_t stack_size =
- (std::max<size_t>(SIGSTKSZ, 65536) + page_mask) & ~page_mask;
+ size_t stack_size =
+ (std::max<size_t>(SIGSTKSZ, 65536) + page_mask) & ~page_mask;
#if defined(ABSL_HAVE_ADDRESS_SANITIZER) || \
defined(ABSL_HAVE_MEMORY_SANITIZER) || defined(ABSL_HAVE_THREAD_SANITIZER)
// Account for sanitizer instrumentation requiring additional stack space.
@@ -221,24 +221,24 @@ static void WriteToStderr(const char* data) {
absl::raw_logging_internal::SafeWriteToStderr(data, strlen(data));
}
-static void WriteSignalMessage(int signo, int cpu,
- void (*writerfn)(const char*)) {
- char buf[96];
- char on_cpu[32] = {0};
- if (cpu != -1) {
- snprintf(on_cpu, sizeof(on_cpu), " on cpu %d", cpu);
- }
+static void WriteSignalMessage(int signo, int cpu,
+ void (*writerfn)(const char*)) {
+ char buf[96];
+ char on_cpu[32] = {0};
+ if (cpu != -1) {
+ snprintf(on_cpu, sizeof(on_cpu), " on cpu %d", cpu);
+ }
const char* const signal_string =
debugging_internal::FailureSignalToString(signo);
if (signal_string != nullptr && signal_string[0] != '\0') {
- snprintf(buf, sizeof(buf), "*** %s received at time=%ld%s ***\n",
+ snprintf(buf, sizeof(buf), "*** %s received at time=%ld%s ***\n",
signal_string,
- static_cast<long>(time(nullptr)), // NOLINT(runtime/int)
- on_cpu);
+ static_cast<long>(time(nullptr)), // NOLINT(runtime/int)
+ on_cpu);
} else {
- snprintf(buf, sizeof(buf), "*** Signal %d received at time=%ld%s ***\n",
- signo, static_cast<long>(time(nullptr)), // NOLINT(runtime/int)
- on_cpu);
+ snprintf(buf, sizeof(buf), "*** Signal %d received at time=%ld%s ***\n",
+ signo, static_cast<long>(time(nullptr)), // NOLINT(runtime/int)
+ on_cpu);
}
writerfn(buf);
}
@@ -278,10 +278,10 @@ ABSL_ATTRIBUTE_NOINLINE static void WriteStackTrace(
// Called by AbslFailureSignalHandler() to write the failure info. It is
// called once with writerfn set to WriteToStderr() and then possibly
// with writerfn set to the user provided function.
-static void WriteFailureInfo(int signo, void* ucontext, int cpu,
+static void WriteFailureInfo(int signo, void* ucontext, int cpu,
void (*writerfn)(const char*)) {
WriterFnStruct writerfn_struct{writerfn};
- WriteSignalMessage(signo, cpu, writerfn);
+ WriteSignalMessage(signo, cpu, writerfn);
WriteStackTrace(ucontext, fsh_options.symbolize_stacktrace, WriterFnWrapper,
&writerfn_struct);
}
@@ -343,14 +343,14 @@ static void AbslFailureSignalHandler(int signo, siginfo_t*, void* ucontext) {
}
}
- // Increase the chance that the CPU we report was the same CPU on which the
- // signal was received by doing this as early as possible, i.e. after
- // verifying that this is not a recursive signal handler invocation.
- int my_cpu = -1;
-#ifdef ABSL_HAVE_SCHED_GETCPU
- my_cpu = sched_getcpu();
-#endif
-
+ // Increase the chance that the CPU we report was the same CPU on which the
+ // signal was received by doing this as early as possible, i.e. after
+ // verifying that this is not a recursive signal handler invocation.
+ int my_cpu = -1;
+#ifdef ABSL_HAVE_SCHED_GETCPU
+ my_cpu = sched_getcpu();
+#endif
+
#ifdef ABSL_HAVE_ALARM
// Set an alarm to abort the program in case this code hangs or deadlocks.
if (fsh_options.alarm_on_failure_secs > 0) {
@@ -361,12 +361,12 @@ static void AbslFailureSignalHandler(int signo, siginfo_t*, void* ucontext) {
#endif
// First write to stderr.
- WriteFailureInfo(signo, ucontext, my_cpu, WriteToStderr);
+ WriteFailureInfo(signo, ucontext, my_cpu, WriteToStderr);
// Riskier code (because it is less likely to be async-signal-safe)
// goes after this point.
if (fsh_options.writerfn != nullptr) {
- WriteFailureInfo(signo, ucontext, my_cpu, fsh_options.writerfn);
+ WriteFailureInfo(signo, ucontext, my_cpu, fsh_options.writerfn);
fsh_options.writerfn(nullptr);
}
diff --git a/contrib/restricted/abseil-cpp/absl/debugging/failure_signal_handler/ya.make b/contrib/restricted/abseil-cpp/absl/debugging/failure_signal_handler/ya.make
index 62217bae8f..6f214ff9a7 100644
--- a/contrib/restricted/abseil-cpp/absl/debugging/failure_signal_handler/ya.make
+++ b/contrib/restricted/abseil-cpp/absl/debugging/failure_signal_handler/ya.make
@@ -1,46 +1,46 @@
-# Generated by devtools/yamaker.
-
-LIBRARY()
-
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
WITHOUT_LICENSE_TEXTS()
-OWNER(g:cpp-contrib)
-
-LICENSE(Apache-2.0)
-
-PEERDIR(
- contrib/restricted/abseil-cpp/absl/base
+OWNER(g:cpp-contrib)
+
+LICENSE(Apache-2.0)
+
+PEERDIR(
+ contrib/restricted/abseil-cpp/absl/base
contrib/restricted/abseil-cpp/absl/base/internal/low_level_alloc
- contrib/restricted/abseil-cpp/absl/base/internal/raw_logging
- contrib/restricted/abseil-cpp/absl/base/internal/spinlock_wait
- contrib/restricted/abseil-cpp/absl/base/internal/throw_delegate
- contrib/restricted/abseil-cpp/absl/base/log_severity
+ contrib/restricted/abseil-cpp/absl/base/internal/raw_logging
+ contrib/restricted/abseil-cpp/absl/base/internal/spinlock_wait
+ contrib/restricted/abseil-cpp/absl/base/internal/throw_delegate
+ contrib/restricted/abseil-cpp/absl/base/log_severity
contrib/restricted/abseil-cpp/absl/debugging
contrib/restricted/abseil-cpp/absl/debugging/internal
contrib/restricted/abseil-cpp/absl/debugging/stacktrace
contrib/restricted/abseil-cpp/absl/debugging/symbolize
contrib/restricted/abseil-cpp/absl/demangle
- contrib/restricted/abseil-cpp/absl/numeric
- contrib/restricted/abseil-cpp/absl/strings
+ contrib/restricted/abseil-cpp/absl/numeric
+ contrib/restricted/abseil-cpp/absl/strings
contrib/restricted/abseil-cpp/absl/strings/internal/absl_strings_internal
-)
-
-ADDINCL(
- GLOBAL contrib/restricted/abseil-cpp
-)
-
-NO_COMPILER_WARNINGS()
-
-NO_UTIL()
-
-CFLAGS(
- -DNOMINMAX
-)
-
+)
+
+ADDINCL(
+ GLOBAL contrib/restricted/abseil-cpp
+)
+
+NO_COMPILER_WARNINGS()
+
+NO_UTIL()
+
+CFLAGS(
+ -DNOMINMAX
+)
+
SRCDIR(contrib/restricted/abseil-cpp/absl/debugging)
-
-SRCS(
+
+SRCS(
failure_signal_handler.cc
-)
-
-END()
+)
+
+END()
diff --git a/contrib/restricted/abseil-cpp/absl/debugging/internal/address_is_readable.cc b/contrib/restricted/abseil-cpp/absl/debugging/internal/address_is_readable.cc
index 329c285f3b..e4289ee15f 100644
--- a/contrib/restricted/abseil-cpp/absl/debugging/internal/address_is_readable.cc
+++ b/contrib/restricted/abseil-cpp/absl/debugging/internal/address_is_readable.cc
@@ -68,7 +68,7 @@ static void Unpack(uint64_t x, int *pid, int *read_fd, int *write_fd) {
// unimplemented.
// This is a namespace-scoped variable for correct zero-initialization.
static std::atomic<uint64_t> pid_and_fds; // initially 0, an invalid pid.
-
+
bool AddressIsReadable(const void *addr) {
absl::base_internal::ErrnoSaver errno_saver;
// We test whether a byte is readable by using write(). Normally, this would
@@ -87,7 +87,7 @@ bool AddressIsReadable(const void *addr) {
int pid;
int read_fd;
int write_fd;
- uint64_t local_pid_and_fds = pid_and_fds.load(std::memory_order_acquire);
+ uint64_t local_pid_and_fds = pid_and_fds.load(std::memory_order_acquire);
Unpack(local_pid_and_fds, &pid, &read_fd, &write_fd);
while (current_pid != pid) {
int p[2];
@@ -99,13 +99,13 @@ bool AddressIsReadable(const void *addr) {
fcntl(p[1], F_SETFD, FD_CLOEXEC);
uint64_t new_pid_and_fds = Pack(current_pid, p[0], p[1]);
if (pid_and_fds.compare_exchange_strong(
- local_pid_and_fds, new_pid_and_fds, std::memory_order_release,
+ local_pid_and_fds, new_pid_and_fds, std::memory_order_release,
std::memory_order_relaxed)) {
local_pid_and_fds = new_pid_and_fds; // fds exposed to other threads
} else { // fds not exposed to other threads; we can close them.
close(p[0]);
close(p[1]);
- local_pid_and_fds = pid_and_fds.load(std::memory_order_acquire);
+ local_pid_and_fds = pid_and_fds.load(std::memory_order_acquire);
}
Unpack(local_pid_and_fds, &pid, &read_fd, &write_fd);
}
@@ -125,7 +125,7 @@ bool AddressIsReadable(const void *addr) {
// If pid_and_fds contains the problematic file descriptors we just used,
// this call will forget them, and the loop will try again.
pid_and_fds.compare_exchange_strong(local_pid_and_fds, 0,
- std::memory_order_release,
+ std::memory_order_release,
std::memory_order_relaxed);
}
} while (errno == EBADF);
diff --git a/contrib/restricted/abseil-cpp/absl/debugging/internal/demangle.cc b/contrib/restricted/abseil-cpp/absl/debugging/internal/demangle.cc
index 93ae32796c..a5b37f3199 100644
--- a/contrib/restricted/abseil-cpp/absl/debugging/internal/demangle.cc
+++ b/contrib/restricted/abseil-cpp/absl/debugging/internal/demangle.cc
@@ -386,27 +386,27 @@ static bool IsDigit(char c) { return c >= '0' && c <= '9'; }
// by GCC 4.5.x and later versions (and our locally-modified version of GCC
// 4.4.x) to indicate functions which have been cloned during optimization.
// We treat any sequence (.<alpha>+.<digit>+)+ as a function clone suffix.
-// Additionally, '_' is allowed along with the alphanumeric sequence.
+// Additionally, '_' is allowed along with the alphanumeric sequence.
static bool IsFunctionCloneSuffix(const char *str) {
size_t i = 0;
while (str[i] != '\0') {
- bool parsed = false;
- // Consume a single [.<alpha> | _]*[.<digit>]* sequence.
- if (str[i] == '.' && (IsAlpha(str[i + 1]) || str[i + 1] == '_')) {
- parsed = true;
- i += 2;
- while (IsAlpha(str[i]) || str[i] == '_') {
- ++i;
- }
+ bool parsed = false;
+ // Consume a single [.<alpha> | _]*[.<digit>]* sequence.
+ if (str[i] == '.' && (IsAlpha(str[i + 1]) || str[i + 1] == '_')) {
+ parsed = true;
+ i += 2;
+ while (IsAlpha(str[i]) || str[i] == '_') {
+ ++i;
+ }
}
- if (str[i] == '.' && IsDigit(str[i + 1])) {
- parsed = true;
- i += 2;
- while (IsDigit(str[i])) {
- ++i;
- }
+ if (str[i] == '.' && IsDigit(str[i + 1])) {
+ parsed = true;
+ i += 2;
+ while (IsDigit(str[i])) {
+ ++i;
+ }
}
- if (!parsed)
+ if (!parsed)
return false;
}
return true; // Consumed everything in "str".
diff --git a/contrib/restricted/abseil-cpp/absl/debugging/internal/examine_stack.cc b/contrib/restricted/abseil-cpp/absl/debugging/internal/examine_stack.cc
index 589a3ef367..a6bbf1405a 100644
--- a/contrib/restricted/abseil-cpp/absl/debugging/internal/examine_stack.cc
+++ b/contrib/restricted/abseil-cpp/absl/debugging/internal/examine_stack.cc
@@ -46,42 +46,42 @@ void* GetProgramCounter(void* vuc) {
ucontext_t* context = reinterpret_cast<ucontext_t*>(vuc);
#if defined(__aarch64__)
return reinterpret_cast<void*>(context->uc_mcontext.pc);
-#elif defined(__alpha__)
- return reinterpret_cast<void*>(context->uc_mcontext.sc_pc);
+#elif defined(__alpha__)
+ return reinterpret_cast<void*>(context->uc_mcontext.sc_pc);
#elif defined(__arm__)
return reinterpret_cast<void*>(context->uc_mcontext.arm_pc);
-#elif defined(__hppa__)
- return reinterpret_cast<void*>(context->uc_mcontext.sc_iaoq[0]);
+#elif defined(__hppa__)
+ return reinterpret_cast<void*>(context->uc_mcontext.sc_iaoq[0]);
#elif defined(__i386__)
if (14 < ABSL_ARRAYSIZE(context->uc_mcontext.gregs))
return reinterpret_cast<void*>(context->uc_mcontext.gregs[14]);
-#elif defined(__ia64__)
- return reinterpret_cast<void*>(context->uc_mcontext.sc_ip);
-#elif defined(__m68k__)
- return reinterpret_cast<void*>(context->uc_mcontext.gregs[16]);
+#elif defined(__ia64__)
+ return reinterpret_cast<void*>(context->uc_mcontext.sc_ip);
+#elif defined(__m68k__)
+ return reinterpret_cast<void*>(context->uc_mcontext.gregs[16]);
#elif defined(__mips__)
return reinterpret_cast<void*>(context->uc_mcontext.pc);
#elif defined(__powerpc64__)
return reinterpret_cast<void*>(context->uc_mcontext.gp_regs[32]);
#elif defined(__powerpc__)
- return reinterpret_cast<void*>(context->uc_mcontext.uc_regs->gregs[32]);
+ return reinterpret_cast<void*>(context->uc_mcontext.uc_regs->gregs[32]);
#elif defined(__riscv)
return reinterpret_cast<void*>(context->uc_mcontext.__gregs[REG_PC]);
#elif defined(__s390__) && !defined(__s390x__)
return reinterpret_cast<void*>(context->uc_mcontext.psw.addr & 0x7fffffff);
#elif defined(__s390__) && defined(__s390x__)
return reinterpret_cast<void*>(context->uc_mcontext.psw.addr);
-#elif defined(__sh__)
- return reinterpret_cast<void*>(context->uc_mcontext.pc);
-#elif defined(__sparc__) && !defined(__arch64__)
- return reinterpret_cast<void*>(context->uc_mcontext.gregs[19]);
-#elif defined(__sparc__) && defined(__arch64__)
- return reinterpret_cast<void*>(context->uc_mcontext.mc_gregs[19]);
+#elif defined(__sh__)
+ return reinterpret_cast<void*>(context->uc_mcontext.pc);
+#elif defined(__sparc__) && !defined(__arch64__)
+ return reinterpret_cast<void*>(context->uc_mcontext.gregs[19]);
+#elif defined(__sparc__) && defined(__arch64__)
+ return reinterpret_cast<void*>(context->uc_mcontext.mc_gregs[19]);
#elif defined(__x86_64__)
if (16 < ABSL_ARRAYSIZE(context->uc_mcontext.gregs))
return reinterpret_cast<void*>(context->uc_mcontext.gregs[16]);
-#elif defined(__e2k__)
- return reinterpret_cast<void*>(context->uc_mcontext.cr0_hi);
+#elif defined(__e2k__)
+ return reinterpret_cast<void*>(context->uc_mcontext.cr0_hi);
#else
#error "Undefined Architecture."
#endif
diff --git a/contrib/restricted/abseil-cpp/absl/debugging/internal/stacktrace_aarch64-inl.inc b/contrib/restricted/abseil-cpp/absl/debugging/internal/stacktrace_aarch64-inl.inc
index f4859d7c21..48689e2949 100644
--- a/contrib/restricted/abseil-cpp/absl/debugging/internal/stacktrace_aarch64-inl.inc
+++ b/contrib/restricted/abseil-cpp/absl/debugging/internal/stacktrace_aarch64-inl.inc
@@ -37,11 +37,11 @@ static const unsigned char* GetKernelRtSigreturnAddress() {
absl::debugging_internal::VDSOSupport vdso;
if (vdso.IsPresent()) {
absl::debugging_internal::VDSOSupport::SymbolInfo symbol_info;
- auto lookup = [&](int type) {
- return vdso.LookupSymbol("__kernel_rt_sigreturn", "LINUX_2.6.39", type,
- &symbol_info);
- };
- if ((!lookup(STT_FUNC) && !lookup(STT_NOTYPE)) ||
+ auto lookup = [&](int type) {
+ return vdso.LookupSymbol("__kernel_rt_sigreturn", "LINUX_2.6.39", type,
+ &symbol_info);
+ };
+ if ((!lookup(STT_FUNC) && !lookup(STT_NOTYPE)) ||
symbol_info.address == nullptr) {
// Unexpected: VDSO is present, yet the expected symbol is missing
// or null.
diff --git a/contrib/restricted/abseil-cpp/absl/debugging/internal/stacktrace_config.h b/contrib/restricted/abseil-cpp/absl/debugging/internal/stacktrace_config.h
index ff21b719a0..5679ae462e 100644
--- a/contrib/restricted/abseil-cpp/absl/debugging/internal/stacktrace_config.h
+++ b/contrib/restricted/abseil-cpp/absl/debugging/internal/stacktrace_config.h
@@ -21,8 +21,8 @@
#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_CONFIG_H_
#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_CONFIG_H_
-#include "absl/base/config.h"
-
+#include "absl/base/config.h"
+
#if defined(ABSL_STACKTRACE_INL_HEADER)
#error ABSL_STACKTRACE_INL_HEADER cannot be directly set
@@ -31,7 +31,7 @@
"absl/debugging/internal/stacktrace_win32-inl.inc"
#elif defined(__APPLE__)
-#ifdef ABSL_HAVE_THREAD_LOCAL
+#ifdef ABSL_HAVE_THREAD_LOCAL
// Thread local support required for UnwindImpl.
#define ABSL_STACKTRACE_INL_HEADER \
"absl/debugging/internal/stacktrace_generic-inl.inc"
diff --git a/contrib/restricted/abseil-cpp/absl/debugging/internal/stacktrace_powerpc-inl.inc b/contrib/restricted/abseil-cpp/absl/debugging/internal/stacktrace_powerpc-inl.inc
index cf8c05160c..bf82026c7f 100644
--- a/contrib/restricted/abseil-cpp/absl/debugging/internal/stacktrace_powerpc-inl.inc
+++ b/contrib/restricted/abseil-cpp/absl/debugging/internal/stacktrace_powerpc-inl.inc
@@ -131,12 +131,12 @@ static void **NextStackFrame(void **old_sp, const void *uc) {
const ucontext_t* signal_context =
reinterpret_cast<const ucontext_t*>(uc);
void **const sp_before_signal =
-#if defined(__PPC64__)
- reinterpret_cast<void **>(signal_context->uc_mcontext.gp_regs[PT_R1]);
-#else
- reinterpret_cast<void **>(
- signal_context->uc_mcontext.uc_regs->gregs[PT_R1]);
-#endif
+#if defined(__PPC64__)
+ reinterpret_cast<void **>(signal_context->uc_mcontext.gp_regs[PT_R1]);
+#else
+ reinterpret_cast<void **>(
+ signal_context->uc_mcontext.uc_regs->gregs[PT_R1]);
+#endif
// Check that alleged sp before signal is nonnull and is reasonably
// aligned.
if (sp_before_signal != nullptr &&
diff --git a/contrib/restricted/abseil-cpp/absl/debugging/leak_check.cc b/contrib/restricted/abseil-cpp/absl/debugging/leak_check.cc
index 764ca0ad00..771b79aec6 100644
--- a/contrib/restricted/abseil-cpp/absl/debugging/leak_check.cc
+++ b/contrib/restricted/abseil-cpp/absl/debugging/leak_check.cc
@@ -16,7 +16,7 @@
// When lsan is not linked in, these functions are not available,
// therefore Abseil code which depends on these functions is conditioned on the
// definition of LEAK_SANITIZER.
-#include "absl/base/attributes.h"
+#include "absl/base/attributes.h"
#include "absl/debugging/leak_check.h"
#ifndef LEAK_SANITIZER
@@ -24,7 +24,7 @@
namespace absl {
ABSL_NAMESPACE_BEGIN
bool HaveLeakSanitizer() { return false; }
-bool LeakCheckerIsActive() { return false; }
+bool LeakCheckerIsActive() { return false; }
void DoIgnoreLeak(const void*) { }
void RegisterLivePointers(const void*, size_t) { }
void UnRegisterLivePointers(const void*, size_t) { }
@@ -37,23 +37,23 @@ ABSL_NAMESPACE_END
#include <sanitizer/lsan_interface.h>
-#if ABSL_HAVE_ATTRIBUTE_WEAK
-extern "C" ABSL_ATTRIBUTE_WEAK int __lsan_is_turned_off();
-#endif
-
+#if ABSL_HAVE_ATTRIBUTE_WEAK
+extern "C" ABSL_ATTRIBUTE_WEAK int __lsan_is_turned_off();
+#endif
+
namespace absl {
ABSL_NAMESPACE_BEGIN
bool HaveLeakSanitizer() { return true; }
-
-#if ABSL_HAVE_ATTRIBUTE_WEAK
-bool LeakCheckerIsActive() {
- return !(&__lsan_is_turned_off && __lsan_is_turned_off());
-}
-#else
-bool LeakCheckerIsActive() { return true; }
-#endif
-
-bool FindAndReportLeaks() { return __lsan_do_recoverable_leak_check(); }
+
+#if ABSL_HAVE_ATTRIBUTE_WEAK
+bool LeakCheckerIsActive() {
+ return !(&__lsan_is_turned_off && __lsan_is_turned_off());
+}
+#else
+bool LeakCheckerIsActive() { return true; }
+#endif
+
+bool FindAndReportLeaks() { return __lsan_do_recoverable_leak_check(); }
void DoIgnoreLeak(const void* ptr) { __lsan_ignore_object(ptr); }
void RegisterLivePointers(const void* ptr, size_t size) {
__lsan_register_root_region(ptr, size);
diff --git a/contrib/restricted/abseil-cpp/absl/debugging/leak_check.h b/contrib/restricted/abseil-cpp/absl/debugging/leak_check.h
index 5fc2b052e4..a177a37aec 100644
--- a/contrib/restricted/abseil-cpp/absl/debugging/leak_check.h
+++ b/contrib/restricted/abseil-cpp/absl/debugging/leak_check.h
@@ -43,12 +43,12 @@ ABSL_NAMESPACE_BEGIN
// currently built into this target.
bool HaveLeakSanitizer();
-// LeakCheckerIsActive()
-//
-// Returns true if a leak-checking sanitizer (either ASan or standalone LSan) is
-// currently built into this target and is turned on.
-bool LeakCheckerIsActive();
-
+// LeakCheckerIsActive()
+//
+// Returns true if a leak-checking sanitizer (either ASan or standalone LSan) is
+// currently built into this target and is turned on.
+bool LeakCheckerIsActive();
+
// DoIgnoreLeak()
//
// Implements `IgnoreLeak()` below. This function should usually
@@ -68,8 +68,8 @@ void DoIgnoreLeak(const void* ptr);
//
// If the passed `ptr` does not point to an actively allocated object at the
// time `IgnoreLeak()` is called, the call is a no-op; if it is actively
-// allocated, leak sanitizer will assume this object is referenced even if
-// there is no actual reference in user memory.
+// allocated, leak sanitizer will assume this object is referenced even if
+// there is no actual reference in user memory.
//
template <typename T>
T* IgnoreLeak(T* ptr) {
@@ -77,19 +77,19 @@ T* IgnoreLeak(T* ptr) {
return ptr;
}
-// FindAndReportLeaks()
-//
-// If any leaks are detected, prints a leak report and returns true. This
-// function may be called repeatedly, and does not affect end-of-process leak
-// checking.
-//
-// Example:
-// if (FindAndReportLeaks()) {
-// ... diagnostic already printed. Exit with failure code.
-// exit(1)
-// }
-bool FindAndReportLeaks();
-
+// FindAndReportLeaks()
+//
+// If any leaks are detected, prints a leak report and returns true. This
+// function may be called repeatedly, and does not affect end-of-process leak
+// checking.
+//
+// Example:
+// if (FindAndReportLeaks()) {
+// ... diagnostic already printed. Exit with failure code.
+// exit(1)
+// }
+bool FindAndReportLeaks();
+
// LeakCheckDisabler
//
// This helper class indicates that any heap allocations done in the code block
diff --git a/contrib/restricted/abseil-cpp/absl/debugging/symbolize/ya.make b/contrib/restricted/abseil-cpp/absl/debugging/symbolize/ya.make
index 5c25223100..4ca1eae5c9 100644
--- a/contrib/restricted/abseil-cpp/absl/debugging/symbolize/ya.make
+++ b/contrib/restricted/abseil-cpp/absl/debugging/symbolize/ya.make
@@ -1,43 +1,43 @@
-# Generated by devtools/yamaker.
-
-LIBRARY()
-
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
WITHOUT_LICENSE_TEXTS()
-OWNER(g:cpp-contrib)
-
-LICENSE(Apache-2.0)
-
-PEERDIR(
- contrib/restricted/abseil-cpp/absl/base
+OWNER(g:cpp-contrib)
+
+LICENSE(Apache-2.0)
+
+PEERDIR(
+ contrib/restricted/abseil-cpp/absl/base
contrib/restricted/abseil-cpp/absl/base/internal/low_level_alloc
- contrib/restricted/abseil-cpp/absl/base/internal/raw_logging
- contrib/restricted/abseil-cpp/absl/base/internal/spinlock_wait
- contrib/restricted/abseil-cpp/absl/base/internal/throw_delegate
- contrib/restricted/abseil-cpp/absl/base/log_severity
+ contrib/restricted/abseil-cpp/absl/base/internal/raw_logging
+ contrib/restricted/abseil-cpp/absl/base/internal/spinlock_wait
+ contrib/restricted/abseil-cpp/absl/base/internal/throw_delegate
+ contrib/restricted/abseil-cpp/absl/base/log_severity
contrib/restricted/abseil-cpp/absl/debugging
contrib/restricted/abseil-cpp/absl/demangle
- contrib/restricted/abseil-cpp/absl/numeric
- contrib/restricted/abseil-cpp/absl/strings
+ contrib/restricted/abseil-cpp/absl/numeric
+ contrib/restricted/abseil-cpp/absl/strings
contrib/restricted/abseil-cpp/absl/strings/internal/absl_strings_internal
-)
-
-ADDINCL(
- GLOBAL contrib/restricted/abseil-cpp
-)
-
-NO_COMPILER_WARNINGS()
-
-NO_UTIL()
-
-CFLAGS(
- -DNOMINMAX
-)
-
+)
+
+ADDINCL(
+ GLOBAL contrib/restricted/abseil-cpp
+)
+
+NO_COMPILER_WARNINGS()
+
+NO_UTIL()
+
+CFLAGS(
+ -DNOMINMAX
+)
+
SRCDIR(contrib/restricted/abseil-cpp/absl/debugging)
-
-SRCS(
+
+SRCS(
symbolize.cc
-)
-
-END()
+)
+
+END()
diff --git a/contrib/restricted/abseil-cpp/absl/debugging/symbolize_elf.inc b/contrib/restricted/abseil-cpp/absl/debugging/symbolize_elf.inc
index 3ff343d64f..7c88a58e4d 100644
--- a/contrib/restricted/abseil-cpp/absl/debugging/symbolize_elf.inc
+++ b/contrib/restricted/abseil-cpp/absl/debugging/symbolize_elf.inc
@@ -1295,7 +1295,7 @@ static bool MaybeInitializeObjFile(ObjFile *obj) {
const int phnum = obj->elf_header.e_phnum;
const int phentsize = obj->elf_header.e_phentsize;
size_t phoff = obj->elf_header.e_phoff;
- size_t num_executable_load_segments = 0;
+ size_t num_executable_load_segments = 0;
for (int j = 0; j < phnum; j++) {
ElfW(Phdr) phdr;
if (!ReadFromOffsetExact(obj->fd, &phdr, sizeof(phdr), phoff)) {
@@ -1356,7 +1356,7 @@ const char *Symbolizer::GetSymbol(const void *const pc) {
// Note: some binaries have multiple "rx" LOAD segments. We must
// find the right one.
ElfW(Phdr) *phdr = nullptr;
- for (size_t j = 0; j < obj->phdr.size(); j++) {
+ for (size_t j = 0; j < obj->phdr.size(); j++) {
ElfW(Phdr) &p = obj->phdr[j];
if (p.p_type != PT_LOAD) {
// We only expect PT_LOADs. This must be PT_NULL that we didn't
diff --git a/contrib/restricted/abseil-cpp/absl/flags/commandlineflag/ya.make b/contrib/restricted/abseil-cpp/absl/flags/commandlineflag/ya.make
index 4eef7c0237..71191a2d72 100644
--- a/contrib/restricted/abseil-cpp/absl/flags/commandlineflag/ya.make
+++ b/contrib/restricted/abseil-cpp/absl/flags/commandlineflag/ya.make
@@ -1,42 +1,42 @@
-# Generated by devtools/yamaker.
-
-LIBRARY()
-
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
WITHOUT_LICENSE_TEXTS()
-OWNER(g:cpp-contrib)
-
-LICENSE(Apache-2.0)
-
-PEERDIR(
- contrib/restricted/abseil-cpp/absl/base
- contrib/restricted/abseil-cpp/absl/base/internal/raw_logging
- contrib/restricted/abseil-cpp/absl/base/internal/spinlock_wait
- contrib/restricted/abseil-cpp/absl/base/internal/throw_delegate
- contrib/restricted/abseil-cpp/absl/base/log_severity
+OWNER(g:cpp-contrib)
+
+LICENSE(Apache-2.0)
+
+PEERDIR(
+ contrib/restricted/abseil-cpp/absl/base
+ contrib/restricted/abseil-cpp/absl/base/internal/raw_logging
+ contrib/restricted/abseil-cpp/absl/base/internal/spinlock_wait
+ contrib/restricted/abseil-cpp/absl/base/internal/throw_delegate
+ contrib/restricted/abseil-cpp/absl/base/log_severity
contrib/restricted/abseil-cpp/absl/flags/internal/commandlineflag
- contrib/restricted/abseil-cpp/absl/numeric
- contrib/restricted/abseil-cpp/absl/strings
+ contrib/restricted/abseil-cpp/absl/numeric
+ contrib/restricted/abseil-cpp/absl/strings
contrib/restricted/abseil-cpp/absl/strings/internal/absl_strings_internal
- contrib/restricted/abseil-cpp/absl/types/bad_optional_access
-)
-
-ADDINCL(
- GLOBAL contrib/restricted/abseil-cpp
-)
-
-NO_COMPILER_WARNINGS()
-
-NO_UTIL()
-
-CFLAGS(
- -DNOMINMAX
-)
-
+ contrib/restricted/abseil-cpp/absl/types/bad_optional_access
+)
+
+ADDINCL(
+ GLOBAL contrib/restricted/abseil-cpp
+)
+
+NO_COMPILER_WARNINGS()
+
+NO_UTIL()
+
+CFLAGS(
+ -DNOMINMAX
+)
+
SRCDIR(contrib/restricted/abseil-cpp/absl/flags)
-
-SRCS(
+
+SRCS(
commandlineflag.cc
-)
-
-END()
+)
+
+END()
diff --git a/contrib/restricted/abseil-cpp/absl/flags/flag.h b/contrib/restricted/abseil-cpp/absl/flags/flag.h
index a724ccc97d..64f6013d20 100644
--- a/contrib/restricted/abseil-cpp/absl/flags/flag.h
+++ b/contrib/restricted/abseil-cpp/absl/flags/flag.h
@@ -208,15 +208,15 @@ ABSL_NAMESPACE_END
#if ABSL_FLAGS_STRIP_NAMES
#define ABSL_FLAG_IMPL_FLAGNAME(txt) ""
#define ABSL_FLAG_IMPL_FILENAME() ""
-#define ABSL_FLAG_IMPL_REGISTRAR(T, flag) \
- absl::flags_internal::FlagRegistrar<T, false>(ABSL_FLAG_IMPL_FLAG_PTR(flag), \
- nullptr)
+#define ABSL_FLAG_IMPL_REGISTRAR(T, flag) \
+ absl::flags_internal::FlagRegistrar<T, false>(ABSL_FLAG_IMPL_FLAG_PTR(flag), \
+ nullptr)
#else
#define ABSL_FLAG_IMPL_FLAGNAME(txt) txt
#define ABSL_FLAG_IMPL_FILENAME() __FILE__
-#define ABSL_FLAG_IMPL_REGISTRAR(T, flag) \
- absl::flags_internal::FlagRegistrar<T, true>(ABSL_FLAG_IMPL_FLAG_PTR(flag), \
- __FILE__)
+#define ABSL_FLAG_IMPL_REGISTRAR(T, flag) \
+ absl::flags_internal::FlagRegistrar<T, true>(ABSL_FLAG_IMPL_FLAG_PTR(flag), \
+ __FILE__)
#endif
// ABSL_FLAG_IMPL macro definition conditional on ABSL_FLAGS_STRIP_HELP
diff --git a/contrib/restricted/abseil-cpp/absl/flags/internal/commandlineflag.h b/contrib/restricted/abseil-cpp/absl/flags/internal/commandlineflag.h
index ebfe81ba1e..ddb0e9cd00 100644
--- a/contrib/restricted/abseil-cpp/absl/flags/internal/commandlineflag.h
+++ b/contrib/restricted/abseil-cpp/absl/flags/internal/commandlineflag.h
@@ -24,7 +24,7 @@ ABSL_NAMESPACE_BEGIN
namespace flags_internal {
// An alias for flag fast type id. This value identifies the flag value type
-// similarly to typeid(T), without relying on RTTI being available. In most
+// similarly to typeid(T), without relying on RTTI being available. In most
// cases this id is enough to uniquely identify the flag's value type. In a few
// cases we'll have to resort to using actual RTTI implementation if it is
// available.
diff --git a/contrib/restricted/abseil-cpp/absl/flags/internal/flag.cc b/contrib/restricted/abseil-cpp/absl/flags/internal/flag.cc
index 1515022d11..20e1e8cd71 100644
--- a/contrib/restricted/abseil-cpp/absl/flags/internal/flag.cc
+++ b/contrib/restricted/abseil-cpp/absl/flags/internal/flag.cc
@@ -96,8 +96,8 @@ class FlagState : public flags_internal::FlagStateInterface {
counter_(counter) {}
~FlagState() override {
- if (flag_impl_.ValueStorageKind() != FlagValueStorageKind::kAlignedBuffer &&
- flag_impl_.ValueStorageKind() != FlagValueStorageKind::kSequenceLocked)
+ if (flag_impl_.ValueStorageKind() != FlagValueStorageKind::kAlignedBuffer &&
+ flag_impl_.ValueStorageKind() != FlagValueStorageKind::kSequenceLocked)
return;
flags_internal::Delete(flag_impl_.op_, value_.heap_allocated);
}
@@ -164,11 +164,11 @@ void FlagImpl::Init() {
std::memory_order_release);
break;
}
- case FlagValueStorageKind::kSequenceLocked: {
+ case FlagValueStorageKind::kSequenceLocked: {
// For this storage kind the default_value_ always points to gen_func
// during initialization.
assert(def_kind == FlagDefaultKind::kGenFunc);
- (*default_value_.gen_func)(AtomicBufferValue());
+ (*default_value_.gen_func)(AtomicBufferValue());
break;
}
case FlagValueStorageKind::kAlignedBuffer:
@@ -178,7 +178,7 @@ void FlagImpl::Init() {
(*default_value_.gen_func)(AlignedBufferValue());
break;
}
- seq_lock_.MarkInitialized();
+ seq_lock_.MarkInitialized();
}
absl::Mutex* FlagImpl::DataGuard() const {
@@ -239,11 +239,11 @@ void FlagImpl::StoreValue(const void* src) {
int64_t one_word_val = OneWordValue().load(std::memory_order_acquire);
std::memcpy(&one_word_val, src, Sizeof(op_));
OneWordValue().store(one_word_val, std::memory_order_release);
- seq_lock_.IncrementModificationCount();
+ seq_lock_.IncrementModificationCount();
break;
}
- case FlagValueStorageKind::kSequenceLocked: {
- seq_lock_.Write(AtomicBufferValue(), src, Sizeof(op_));
+ case FlagValueStorageKind::kSequenceLocked: {
+ seq_lock_.Write(AtomicBufferValue(), src, Sizeof(op_));
break;
}
case FlagValueStorageKind::kAlignedBuffer:
@@ -270,10 +270,10 @@ FlagFastTypeId FlagImpl::TypeId() const {
return flags_internal::FastTypeId(op_);
}
-int64_t FlagImpl::ModificationCount() const {
- return seq_lock_.ModificationCount();
-}
-
+int64_t FlagImpl::ModificationCount() const {
+ return seq_lock_.ModificationCount();
+}
+
bool FlagImpl::IsSpecifiedOnCommandLine() const {
absl::MutexLock l(DataGuard());
return on_command_line_;
@@ -296,11 +296,11 @@ std::string FlagImpl::CurrentValue() const {
OneWordValue().load(std::memory_order_acquire));
return flags_internal::Unparse(op_, one_word_val.data());
}
- case FlagValueStorageKind::kSequenceLocked: {
- std::unique_ptr<void, DynValueDeleter> cloned(flags_internal::Alloc(op_),
- DynValueDeleter{op_});
- ReadSequenceLockedData(cloned.get());
- return flags_internal::Unparse(op_, cloned.get());
+ case FlagValueStorageKind::kSequenceLocked: {
+ std::unique_ptr<void, DynValueDeleter> cloned(flags_internal::Alloc(op_),
+ DynValueDeleter{op_});
+ ReadSequenceLockedData(cloned.get());
+ return flags_internal::Unparse(op_, cloned.get());
}
case FlagValueStorageKind::kAlignedBuffer: {
absl::MutexLock l(guard);
@@ -355,17 +355,17 @@ std::unique_ptr<FlagStateInterface> FlagImpl::SaveState() {
case FlagValueStorageKind::kOneWordAtomic: {
return absl::make_unique<FlagState>(
*this, OneWordValue().load(std::memory_order_acquire), modified,
- on_command_line, ModificationCount());
+ on_command_line, ModificationCount());
}
- case FlagValueStorageKind::kSequenceLocked: {
- void* cloned = flags_internal::Alloc(op_);
- // Read is guaranteed to be successful because we hold the lock.
- bool success =
- seq_lock_.TryRead(cloned, AtomicBufferValue(), Sizeof(op_));
- assert(success);
- static_cast<void>(success);
- return absl::make_unique<FlagState>(*this, cloned, modified,
- on_command_line, ModificationCount());
+ case FlagValueStorageKind::kSequenceLocked: {
+ void* cloned = flags_internal::Alloc(op_);
+ // Read is guaranteed to be successful because we hold the lock.
+ bool success =
+ seq_lock_.TryRead(cloned, AtomicBufferValue(), Sizeof(op_));
+ assert(success);
+ static_cast<void>(success);
+ return absl::make_unique<FlagState>(*this, cloned, modified,
+ on_command_line, ModificationCount());
}
case FlagValueStorageKind::kAlignedBuffer: {
return absl::make_unique<FlagState>(
@@ -378,7 +378,7 @@ std::unique_ptr<FlagStateInterface> FlagImpl::SaveState() {
bool FlagImpl::RestoreState(const FlagState& flag_state) {
absl::MutexLock l(DataGuard());
- if (flag_state.counter_ == ModificationCount()) {
+ if (flag_state.counter_ == ModificationCount()) {
return false;
}
@@ -413,11 +413,11 @@ void* FlagImpl::AlignedBufferValue() const {
return OffsetValue<void>();
}
-std::atomic<uint64_t>* FlagImpl::AtomicBufferValue() const {
- assert(ValueStorageKind() == FlagValueStorageKind::kSequenceLocked);
- return OffsetValue<std::atomic<uint64_t>>();
-}
-
+std::atomic<uint64_t>* FlagImpl::AtomicBufferValue() const {
+ assert(ValueStorageKind() == FlagValueStorageKind::kSequenceLocked);
+ return OffsetValue<std::atomic<uint64_t>>();
+}
+
std::atomic<int64_t>& FlagImpl::OneWordValue() const {
assert(ValueStorageKind() == FlagValueStorageKind::kOneWordAtomic ||
ValueStorageKind() == FlagValueStorageKind::kValueAndInitBit);
@@ -453,8 +453,8 @@ void FlagImpl::Read(void* dst) const {
std::memcpy(dst, &one_word_val, Sizeof(op_));
break;
}
- case FlagValueStorageKind::kSequenceLocked: {
- ReadSequenceLockedData(dst);
+ case FlagValueStorageKind::kSequenceLocked: {
+ ReadSequenceLockedData(dst);
break;
}
case FlagValueStorageKind::kAlignedBuffer: {
@@ -482,20 +482,20 @@ bool FlagImpl::ReadOneBool() const {
.value;
}
-void FlagImpl::ReadSequenceLockedData(void* dst) const {
- int size = Sizeof(op_);
- // Attempt to read using the sequence lock.
- if (ABSL_PREDICT_TRUE(seq_lock_.TryRead(dst, AtomicBufferValue(), size))) {
- return;
- }
- // We failed due to contention. Acquire the lock to prevent contention
- // and try again.
- absl::ReaderMutexLock l(DataGuard());
- bool success = seq_lock_.TryRead(dst, AtomicBufferValue(), size);
- assert(success);
- static_cast<void>(success);
-}
-
+void FlagImpl::ReadSequenceLockedData(void* dst) const {
+ int size = Sizeof(op_);
+ // Attempt to read using the sequence lock.
+ if (ABSL_PREDICT_TRUE(seq_lock_.TryRead(dst, AtomicBufferValue(), size))) {
+ return;
+ }
+ // We failed due to contention. Acquire the lock to prevent contention
+ // and try again.
+ absl::ReaderMutexLock l(DataGuard());
+ bool success = seq_lock_.TryRead(dst, AtomicBufferValue(), size);
+ assert(success);
+ static_cast<void>(success);
+}
+
void FlagImpl::Write(const void* src) {
absl::MutexLock l(DataGuard());
diff --git a/contrib/restricted/abseil-cpp/absl/flags/internal/flag.h b/contrib/restricted/abseil-cpp/absl/flags/internal/flag.h
index 124a2f1c03..9e3a204d39 100644
--- a/contrib/restricted/abseil-cpp/absl/flags/internal/flag.h
+++ b/contrib/restricted/abseil-cpp/absl/flags/internal/flag.h
@@ -37,7 +37,7 @@
#include "absl/flags/config.h"
#include "absl/flags/internal/commandlineflag.h"
#include "absl/flags/internal/registry.h"
-#include "absl/flags/internal/sequence_lock.h"
+#include "absl/flags/internal/sequence_lock.h"
#include "absl/flags/marshalling.h"
#include "absl/meta/type_traits.h"
#include "absl/strings/string_view.h"
@@ -315,15 +315,15 @@ using FlagUseOneWordStorage = std::integral_constant<
bool, absl::type_traits_internal::is_trivially_copyable<T>::value &&
(sizeof(T) <= 8)>;
-template <class T>
+template <class T>
using FlagUseSequenceLockStorage = std::integral_constant<
bool, absl::type_traits_internal::is_trivially_copyable<T>::value &&
- (sizeof(T) > 8)>;
+ (sizeof(T) > 8)>;
enum class FlagValueStorageKind : uint8_t {
kValueAndInitBit = 0,
kOneWordAtomic = 1,
- kSequenceLocked = 2,
+ kSequenceLocked = 2,
kAlignedBuffer = 3,
};
@@ -334,8 +334,8 @@ static constexpr FlagValueStorageKind StorageKind() {
: FlagUseOneWordStorage<T>::value
? FlagValueStorageKind::kOneWordAtomic
: FlagUseSequenceLockStorage<T>::value
- ? FlagValueStorageKind::kSequenceLocked
- : FlagValueStorageKind::kAlignedBuffer;
+ ? FlagValueStorageKind::kSequenceLocked
+ : FlagValueStorageKind::kAlignedBuffer;
}
struct FlagOneWordValue {
@@ -371,7 +371,7 @@ struct FlagValue<T, FlagValueStorageKind::kValueAndInitBit> : FlagOneWordValue {
template <typename T>
struct FlagValue<T, FlagValueStorageKind::kOneWordAtomic> : FlagOneWordValue {
constexpr FlagValue() : FlagOneWordValue(UninitializedFlagValue()) {}
- bool Get(const SequenceLock&, T& dst) const {
+ bool Get(const SequenceLock&, T& dst) const {
int64_t one_word_val = value.load(std::memory_order_acquire);
if (ABSL_PREDICT_FALSE(one_word_val == UninitializedFlagValue())) {
return false;
@@ -382,16 +382,16 @@ struct FlagValue<T, FlagValueStorageKind::kOneWordAtomic> : FlagOneWordValue {
};
template <typename T>
-struct FlagValue<T, FlagValueStorageKind::kSequenceLocked> {
- bool Get(const SequenceLock& lock, T& dst) const {
- return lock.TryRead(&dst, value_words, sizeof(T));
+struct FlagValue<T, FlagValueStorageKind::kSequenceLocked> {
+ bool Get(const SequenceLock& lock, T& dst) const {
+ return lock.TryRead(&dst, value_words, sizeof(T));
}
-
- static constexpr int kNumWords =
- flags_internal::AlignUp(sizeof(T), sizeof(uint64_t)) / sizeof(uint64_t);
-
- alignas(T) alignas(
- std::atomic<uint64_t>) std::atomic<uint64_t> value_words[kNumWords];
+
+ static constexpr int kNumWords =
+ flags_internal::AlignUp(sizeof(T), sizeof(uint64_t)) / sizeof(uint64_t);
+
+ alignas(T) alignas(
+ std::atomic<uint64_t>) std::atomic<uint64_t> value_words[kNumWords];
};
template <typename T>
@@ -507,14 +507,14 @@ class FlagImpl final : public CommandLineFlag {
// flag.cc, we can define it in that file as well.
template <typename StorageT>
StorageT* OffsetValue() const;
- // This is an accessor for a value stored in an aligned buffer storage
- // used for non-trivially-copyable data types.
+ // This is an accessor for a value stored in an aligned buffer storage
+ // used for non-trivially-copyable data types.
// Returns a mutable pointer to the start of a buffer.
void* AlignedBufferValue() const;
-
- // The same as above, but used for sequencelock-protected storage.
- std::atomic<uint64_t>* AtomicBufferValue() const;
-
+
+ // The same as above, but used for sequencelock-protected storage.
+ std::atomic<uint64_t>* AtomicBufferValue() const;
+
// This is an accessor for a value stored as one word atomic. Returns a
// mutable reference to an atomic value.
std::atomic<int64_t>& OneWordValue() const;
@@ -527,12 +527,12 @@ class FlagImpl final : public CommandLineFlag {
// Stores the flag value based on the pointer to the source.
void StoreValue(const void* src) ABSL_EXCLUSIVE_LOCKS_REQUIRED(*DataGuard());
- // Copy the flag data, protected by `seq_lock_` into `dst`.
- //
- // REQUIRES: ValueStorageKind() == kSequenceLocked.
- void ReadSequenceLockedData(void* dst) const
- ABSL_LOCKS_EXCLUDED(*DataGuard());
-
+ // Copy the flag data, protected by `seq_lock_` into `dst`.
+ //
+ // REQUIRES: ValueStorageKind() == kSequenceLocked.
+ void ReadSequenceLockedData(void* dst) const
+ ABSL_LOCKS_EXCLUDED(*DataGuard());
+
FlagHelpKind HelpSourceKind() const {
return static_cast<FlagHelpKind>(help_source_kind_);
}
@@ -558,8 +558,8 @@ class FlagImpl final : public CommandLineFlag {
void CheckDefaultValueParsingRoundtrip() const override
ABSL_LOCKS_EXCLUDED(*DataGuard());
- int64_t ModificationCount() const ABSL_EXCLUSIVE_LOCKS_REQUIRED(*DataGuard());
-
+ int64_t ModificationCount() const ABSL_EXCLUSIVE_LOCKS_REQUIRED(*DataGuard());
+
// Interfaces to save and restore flags to/from persistent state.
// Returns current flag state or nullptr if flag does not support
// saving and restoring a state.
@@ -606,9 +606,9 @@ class FlagImpl final : public CommandLineFlag {
// Unique tag for absl::call_once call to initialize this flag.
absl::once_flag init_control_;
- // Sequence lock / mutation counter.
- flags_internal::SequenceLock seq_lock_;
-
+ // Sequence lock / mutation counter.
+ flags_internal::SequenceLock seq_lock_;
+
// Optional flag's callback and absl::Mutex to guard the invocations.
FlagCallback* callback_ ABSL_GUARDED_BY(*DataGuard());
// Either a pointer to the function generating the default value based on the
@@ -669,9 +669,9 @@ class Flag {
impl_.AssertValidType(base_internal::FastTypeId<T>(), &GenRuntimeTypeId<T>);
#endif
- if (ABSL_PREDICT_FALSE(!value_.Get(impl_.seq_lock_, u.value))) {
- impl_.Read(&u.value);
- }
+ if (ABSL_PREDICT_FALSE(!value_.Get(impl_.seq_lock_, u.value))) {
+ impl_.Read(&u.value);
+ }
return std::move(u.value);
}
void Set(const T& v) {
@@ -772,9 +772,9 @@ struct FlagRegistrarEmpty {};
template <typename T, bool do_register>
class FlagRegistrar {
public:
- explicit FlagRegistrar(Flag<T>& flag, const char* filename) : flag_(flag) {
- if (do_register)
- flags_internal::RegisterCommandLineFlag(flag_.impl_, filename);
+ explicit FlagRegistrar(Flag<T>& flag, const char* filename) : flag_(flag) {
+ if (do_register)
+ flags_internal::RegisterCommandLineFlag(flag_.impl_, filename);
}
FlagRegistrar OnUpdate(FlagCallbackFunc cb) && {
diff --git a/contrib/restricted/abseil-cpp/absl/flags/internal/flag/ya.make b/contrib/restricted/abseil-cpp/absl/flags/internal/flag/ya.make
index 76f47ce655..12b51ff58c 100644
--- a/contrib/restricted/abseil-cpp/absl/flags/internal/flag/ya.make
+++ b/contrib/restricted/abseil-cpp/absl/flags/internal/flag/ya.make
@@ -1,20 +1,20 @@
-# Generated by devtools/yamaker.
-
-LIBRARY()
-
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
WITHOUT_LICENSE_TEXTS()
-OWNER(g:cpp-contrib)
-
-LICENSE(Apache-2.0)
-
-PEERDIR(
- contrib/restricted/abseil-cpp/absl/base
+OWNER(g:cpp-contrib)
+
+LICENSE(Apache-2.0)
+
+PEERDIR(
+ contrib/restricted/abseil-cpp/absl/base
contrib/restricted/abseil-cpp/absl/base/internal/low_level_alloc
- contrib/restricted/abseil-cpp/absl/base/internal/raw_logging
- contrib/restricted/abseil-cpp/absl/base/internal/spinlock_wait
- contrib/restricted/abseil-cpp/absl/base/internal/throw_delegate
- contrib/restricted/abseil-cpp/absl/base/log_severity
+ contrib/restricted/abseil-cpp/absl/base/internal/raw_logging
+ contrib/restricted/abseil-cpp/absl/base/internal/spinlock_wait
+ contrib/restricted/abseil-cpp/absl/base/internal/throw_delegate
+ contrib/restricted/abseil-cpp/absl/base/log_severity
contrib/restricted/abseil-cpp/absl/debugging
contrib/restricted/abseil-cpp/absl/debugging/stacktrace
contrib/restricted/abseil-cpp/absl/debugging/symbolize
@@ -24,8 +24,8 @@ PEERDIR(
contrib/restricted/abseil-cpp/absl/flags/internal/program_name
contrib/restricted/abseil-cpp/absl/flags/marshalling
contrib/restricted/abseil-cpp/absl/flags/usage_config
- contrib/restricted/abseil-cpp/absl/numeric
- contrib/restricted/abseil-cpp/absl/strings
+ contrib/restricted/abseil-cpp/absl/numeric
+ contrib/restricted/abseil-cpp/absl/strings
contrib/restricted/abseil-cpp/absl/strings/internal/absl_strings_internal
contrib/restricted/abseil-cpp/absl/strings/internal/str_format
contrib/restricted/abseil-cpp/absl/synchronization
@@ -33,25 +33,25 @@ PEERDIR(
contrib/restricted/abseil-cpp/absl/time
contrib/restricted/abseil-cpp/absl/time/civil_time
contrib/restricted/abseil-cpp/absl/time/time_zone
- contrib/restricted/abseil-cpp/absl/types/bad_optional_access
-)
-
-ADDINCL(
- GLOBAL contrib/restricted/abseil-cpp
-)
-
-NO_COMPILER_WARNINGS()
-
-NO_UTIL()
-
-CFLAGS(
- -DNOMINMAX
-)
-
+ contrib/restricted/abseil-cpp/absl/types/bad_optional_access
+)
+
+ADDINCL(
+ GLOBAL contrib/restricted/abseil-cpp
+)
+
+NO_COMPILER_WARNINGS()
+
+NO_UTIL()
+
+CFLAGS(
+ -DNOMINMAX
+)
+
SRCDIR(contrib/restricted/abseil-cpp/absl/flags/internal)
-
-SRCS(
+
+SRCS(
flag.cc
-)
-
-END()
+)
+
+END()
diff --git a/contrib/restricted/abseil-cpp/absl/flags/internal/registry.h b/contrib/restricted/abseil-cpp/absl/flags/internal/registry.h
index 4b68c85f5c..4848bda8a8 100644
--- a/contrib/restricted/abseil-cpp/absl/flags/internal/registry.h
+++ b/contrib/restricted/abseil-cpp/absl/flags/internal/registry.h
@@ -36,7 +36,7 @@ void ForEachFlag(std::function<void(CommandLineFlag&)> visitor);
//-----------------------------------------------------------------------------
-bool RegisterCommandLineFlag(CommandLineFlag&, const char* filename);
+bool RegisterCommandLineFlag(CommandLineFlag&, const char* filename);
void FinalizeRegistry();
diff --git a/contrib/restricted/abseil-cpp/absl/flags/internal/sequence_lock.h b/contrib/restricted/abseil-cpp/absl/flags/internal/sequence_lock.h
index 36318ab9d3..caa745b42a 100644
--- a/contrib/restricted/abseil-cpp/absl/flags/internal/sequence_lock.h
+++ b/contrib/restricted/abseil-cpp/absl/flags/internal/sequence_lock.h
@@ -1,187 +1,187 @@
-//
-// Copyright 2020 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef ABSL_FLAGS_INTERNAL_SEQUENCE_LOCK_H_
-#define ABSL_FLAGS_INTERNAL_SEQUENCE_LOCK_H_
-
-#include <stddef.h>
-#include <stdint.h>
-
-#include <atomic>
-#include <cassert>
-#include <cstring>
-
-#include "absl/base/optimization.h"
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-namespace flags_internal {
-
-// Align 'x' up to the nearest 'align' bytes.
-inline constexpr size_t AlignUp(size_t x, size_t align) {
- return align * ((x + align - 1) / align);
-}
-
-// A SequenceLock implements lock-free reads. A sequence counter is incremented
-// before and after each write, and readers access the counter before and after
-// accessing the protected data. If the counter is verified to not change during
-// the access, and the sequence counter value was even, then the reader knows
-// that the read was race-free and valid. Otherwise, the reader must fall back
-// to a Mutex-based code path.
-//
-// This particular SequenceLock starts in an "uninitialized" state in which
-// TryRead() returns false. It must be enabled by calling MarkInitialized().
-// This serves as a marker that the associated flag value has not yet been
-// initialized and a slow path needs to be taken.
-//
-// The memory reads and writes protected by this lock must use the provided
-// `TryRead()` and `Write()` functions. These functions behave similarly to
-// `memcpy()`, with one oddity: the protected data must be an array of
+//
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_FLAGS_INTERNAL_SEQUENCE_LOCK_H_
+#define ABSL_FLAGS_INTERNAL_SEQUENCE_LOCK_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <atomic>
+#include <cassert>
+#include <cstring>
+
+#include "absl/base/optimization.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace flags_internal {
+
+// Align 'x' up to the nearest 'align' bytes.
+inline constexpr size_t AlignUp(size_t x, size_t align) {
+ return align * ((x + align - 1) / align);
+}
+
+// A SequenceLock implements lock-free reads. A sequence counter is incremented
+// before and after each write, and readers access the counter before and after
+// accessing the protected data. If the counter is verified to not change during
+// the access, and the sequence counter value was even, then the reader knows
+// that the read was race-free and valid. Otherwise, the reader must fall back
+// to a Mutex-based code path.
+//
+// This particular SequenceLock starts in an "uninitialized" state in which
+// TryRead() returns false. It must be enabled by calling MarkInitialized().
+// This serves as a marker that the associated flag value has not yet been
+// initialized and a slow path needs to be taken.
+//
+// The memory reads and writes protected by this lock must use the provided
+// `TryRead()` and `Write()` functions. These functions behave similarly to
+// `memcpy()`, with one oddity: the protected data must be an array of
// `std::atomic<uint64>`. This is to comply with the C++ standard, which
-// considers data races on non-atomic objects to be undefined behavior. See "Can
-// Seqlocks Get Along With Programming Language Memory Models?"[1] by Hans J.
-// Boehm for more details.
-//
-// [1] https://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf
-class SequenceLock {
- public:
- constexpr SequenceLock() : lock_(kUninitialized) {}
-
- // Mark that this lock is ready for use.
- void MarkInitialized() {
- assert(lock_.load(std::memory_order_relaxed) == kUninitialized);
- lock_.store(0, std::memory_order_release);
- }
-
- // Copy "size" bytes of data from "src" to "dst", protected as a read-side
- // critical section of the sequence lock.
- //
- // Unlike traditional sequence lock implementations which loop until getting a
- // clean read, this implementation returns false in the case of concurrent
- // calls to `Write`. In such a case, the caller should fall back to a
- // locking-based slow path.
- //
- // Returns false if the sequence lock was not yet marked as initialized.
- //
- // NOTE: If this returns false, "dst" may be overwritten with undefined
- // (potentially uninitialized) data.
- bool TryRead(void* dst, const std::atomic<uint64_t>* src, size_t size) const {
- // Acquire barrier ensures that no loads done by f() are reordered
- // above the first load of the sequence counter.
- int64_t seq_before = lock_.load(std::memory_order_acquire);
- if (ABSL_PREDICT_FALSE(seq_before & 1) == 1) return false;
- RelaxedCopyFromAtomic(dst, src, size);
- // Another acquire fence ensures that the load of 'lock_' below is
- // strictly ordered after the RelaxedCopyToAtomic call above.
- std::atomic_thread_fence(std::memory_order_acquire);
- int64_t seq_after = lock_.load(std::memory_order_relaxed);
- return ABSL_PREDICT_TRUE(seq_before == seq_after);
- }
-
- // Copy "size" bytes from "src" to "dst" as a write-side critical section
- // of the sequence lock. Any concurrent readers will be forced to retry
- // until they get a read that does not conflict with this write.
- //
- // This call must be externally synchronized against other calls to Write,
- // but may proceed concurrently with reads.
- void Write(std::atomic<uint64_t>* dst, const void* src, size_t size) {
- // We can use relaxed instructions to increment the counter since we
- // are extenally synchronized. The std::atomic_thread_fence below
- // ensures that the counter updates don't get interleaved with the
- // copy to the data.
- int64_t orig_seq = lock_.load(std::memory_order_relaxed);
- assert((orig_seq & 1) == 0); // Must be initially unlocked.
- lock_.store(orig_seq + 1, std::memory_order_relaxed);
-
- // We put a release fence between update to lock_ and writes to shared data.
- // Thus all stores to shared data are effectively release operations and
- // update to lock_ above cannot be re-ordered past any of them. Note that
- // this barrier is not for the fetch_add above. A release barrier for the
- // fetch_add would be before it, not after.
- std::atomic_thread_fence(std::memory_order_release);
- RelaxedCopyToAtomic(dst, src, size);
- // "Release" semantics ensure that none of the writes done by
- // RelaxedCopyToAtomic() can be reordered after the following modification.
- lock_.store(orig_seq + 2, std::memory_order_release);
- }
-
- // Return the number of times that Write() has been called.
- //
- // REQUIRES: This must be externally synchronized against concurrent calls to
- // `Write()` or `IncrementModificationCount()`.
- // REQUIRES: `MarkInitialized()` must have been previously called.
- int64_t ModificationCount() const {
- int64_t val = lock_.load(std::memory_order_relaxed);
- assert(val != kUninitialized && (val & 1) == 0);
- return val / 2;
- }
-
- // REQUIRES: This must be externally synchronized against concurrent calls to
- // `Write()` or `ModificationCount()`.
- // REQUIRES: `MarkInitialized()` must have been previously called.
- void IncrementModificationCount() {
- int64_t val = lock_.load(std::memory_order_relaxed);
- assert(val != kUninitialized);
- lock_.store(val + 2, std::memory_order_relaxed);
- }
-
- private:
- // Perform the equivalent of "memcpy(dst, src, size)", but using relaxed
- // atomics.
- static void RelaxedCopyFromAtomic(void* dst, const std::atomic<uint64_t>* src,
- size_t size) {
- char* dst_byte = static_cast<char*>(dst);
- while (size >= sizeof(uint64_t)) {
- uint64_t word = src->load(std::memory_order_relaxed);
- std::memcpy(dst_byte, &word, sizeof(word));
- dst_byte += sizeof(word);
- src++;
- size -= sizeof(word);
- }
- if (size > 0) {
- uint64_t word = src->load(std::memory_order_relaxed);
- std::memcpy(dst_byte, &word, size);
- }
- }
-
- // Perform the equivalent of "memcpy(dst, src, size)", but using relaxed
- // atomics.
- static void RelaxedCopyToAtomic(std::atomic<uint64_t>* dst, const void* src,
- size_t size) {
- const char* src_byte = static_cast<const char*>(src);
- while (size >= sizeof(uint64_t)) {
- uint64_t word;
- std::memcpy(&word, src_byte, sizeof(word));
- dst->store(word, std::memory_order_relaxed);
- src_byte += sizeof(word);
- dst++;
- size -= sizeof(word);
- }
- if (size > 0) {
- uint64_t word = 0;
- std::memcpy(&word, src_byte, size);
- dst->store(word, std::memory_order_relaxed);
- }
- }
-
- static constexpr int64_t kUninitialized = -1;
- std::atomic<int64_t> lock_;
-};
-
-} // namespace flags_internal
-ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_FLAGS_INTERNAL_SEQUENCE_LOCK_H_
+// considers data races on non-atomic objects to be undefined behavior. See "Can
+// Seqlocks Get Along With Programming Language Memory Models?"[1] by Hans J.
+// Boehm for more details.
+//
+// [1] https://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf
+class SequenceLock {
+ public:
+ constexpr SequenceLock() : lock_(kUninitialized) {}
+
+ // Mark that this lock is ready for use.
+ void MarkInitialized() {
+ assert(lock_.load(std::memory_order_relaxed) == kUninitialized);
+ lock_.store(0, std::memory_order_release);
+ }
+
+ // Copy "size" bytes of data from "src" to "dst", protected as a read-side
+ // critical section of the sequence lock.
+ //
+ // Unlike traditional sequence lock implementations which loop until getting a
+ // clean read, this implementation returns false in the case of concurrent
+ // calls to `Write`. In such a case, the caller should fall back to a
+ // locking-based slow path.
+ //
+ // Returns false if the sequence lock was not yet marked as initialized.
+ //
+ // NOTE: If this returns false, "dst" may be overwritten with undefined
+ // (potentially uninitialized) data.
+ bool TryRead(void* dst, const std::atomic<uint64_t>* src, size_t size) const {
+ // Acquire barrier ensures that no loads done by f() are reordered
+ // above the first load of the sequence counter.
+ int64_t seq_before = lock_.load(std::memory_order_acquire);
+ if (ABSL_PREDICT_FALSE(seq_before & 1) == 1) return false;
+ RelaxedCopyFromAtomic(dst, src, size);
+ // Another acquire fence ensures that the load of 'lock_' below is
+ // strictly ordered after the RelaxedCopyToAtomic call above.
+ std::atomic_thread_fence(std::memory_order_acquire);
+ int64_t seq_after = lock_.load(std::memory_order_relaxed);
+ return ABSL_PREDICT_TRUE(seq_before == seq_after);
+ }
+
+ // Copy "size" bytes from "src" to "dst" as a write-side critical section
+ // of the sequence lock. Any concurrent readers will be forced to retry
+ // until they get a read that does not conflict with this write.
+ //
+ // This call must be externally synchronized against other calls to Write,
+ // but may proceed concurrently with reads.
+ void Write(std::atomic<uint64_t>* dst, const void* src, size_t size) {
+ // We can use relaxed instructions to increment the counter since we
+ // are extenally synchronized. The std::atomic_thread_fence below
+ // ensures that the counter updates don't get interleaved with the
+ // copy to the data.
+ int64_t orig_seq = lock_.load(std::memory_order_relaxed);
+ assert((orig_seq & 1) == 0); // Must be initially unlocked.
+ lock_.store(orig_seq + 1, std::memory_order_relaxed);
+
+ // We put a release fence between update to lock_ and writes to shared data.
+ // Thus all stores to shared data are effectively release operations and
+ // update to lock_ above cannot be re-ordered past any of them. Note that
+ // this barrier is not for the fetch_add above. A release barrier for the
+ // fetch_add would be before it, not after.
+ std::atomic_thread_fence(std::memory_order_release);
+ RelaxedCopyToAtomic(dst, src, size);
+ // "Release" semantics ensure that none of the writes done by
+ // RelaxedCopyToAtomic() can be reordered after the following modification.
+ lock_.store(orig_seq + 2, std::memory_order_release);
+ }
+
+ // Return the number of times that Write() has been called.
+ //
+ // REQUIRES: This must be externally synchronized against concurrent calls to
+ // `Write()` or `IncrementModificationCount()`.
+ // REQUIRES: `MarkInitialized()` must have been previously called.
+ int64_t ModificationCount() const {
+ int64_t val = lock_.load(std::memory_order_relaxed);
+ assert(val != kUninitialized && (val & 1) == 0);
+ return val / 2;
+ }
+
+ // REQUIRES: This must be externally synchronized against concurrent calls to
+ // `Write()` or `ModificationCount()`.
+ // REQUIRES: `MarkInitialized()` must have been previously called.
+ void IncrementModificationCount() {
+ int64_t val = lock_.load(std::memory_order_relaxed);
+ assert(val != kUninitialized);
+ lock_.store(val + 2, std::memory_order_relaxed);
+ }
+
+ private:
+ // Perform the equivalent of "memcpy(dst, src, size)", but using relaxed
+ // atomics.
+ static void RelaxedCopyFromAtomic(void* dst, const std::atomic<uint64_t>* src,
+ size_t size) {
+ char* dst_byte = static_cast<char*>(dst);
+ while (size >= sizeof(uint64_t)) {
+ uint64_t word = src->load(std::memory_order_relaxed);
+ std::memcpy(dst_byte, &word, sizeof(word));
+ dst_byte += sizeof(word);
+ src++;
+ size -= sizeof(word);
+ }
+ if (size > 0) {
+ uint64_t word = src->load(std::memory_order_relaxed);
+ std::memcpy(dst_byte, &word, size);
+ }
+ }
+
+ // Perform the equivalent of "memcpy(dst, src, size)", but using relaxed
+ // atomics.
+ static void RelaxedCopyToAtomic(std::atomic<uint64_t>* dst, const void* src,
+ size_t size) {
+ const char* src_byte = static_cast<const char*>(src);
+ while (size >= sizeof(uint64_t)) {
+ uint64_t word;
+ std::memcpy(&word, src_byte, sizeof(word));
+ dst->store(word, std::memory_order_relaxed);
+ src_byte += sizeof(word);
+ dst++;
+ size -= sizeof(word);
+ }
+ if (size > 0) {
+ uint64_t word = 0;
+ std::memcpy(&word, src_byte, size);
+ dst->store(word, std::memory_order_relaxed);
+ }
+ }
+
+ static constexpr int64_t kUninitialized = -1;
+ std::atomic<int64_t> lock_;
+};
+
+} // namespace flags_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_FLAGS_INTERNAL_SEQUENCE_LOCK_H_
diff --git a/contrib/restricted/abseil-cpp/absl/flags/internal/usage.cc b/contrib/restricted/abseil-cpp/absl/flags/internal/usage.cc
index 949709e883..a3a7d5c933 100644
--- a/contrib/restricted/abseil-cpp/absl/flags/internal/usage.cc
+++ b/contrib/restricted/abseil-cpp/absl/flags/internal/usage.cc
@@ -37,26 +37,26 @@
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
-// Dummy global variables to prevent anyone else defining these.
-bool FLAGS_help = false;
-bool FLAGS_helpfull = false;
-bool FLAGS_helpshort = false;
-bool FLAGS_helppackage = false;
-bool FLAGS_version = false;
-bool FLAGS_only_check_args = false;
-bool FLAGS_helpon = false;
-bool FLAGS_helpmatch = false;
+// Dummy global variables to prevent anyone else defining these.
+bool FLAGS_help = false;
+bool FLAGS_helpfull = false;
+bool FLAGS_helpshort = false;
+bool FLAGS_helppackage = false;
+bool FLAGS_version = false;
+bool FLAGS_only_check_args = false;
+bool FLAGS_helpon = false;
+bool FLAGS_helpmatch = false;
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace flags_internal {
namespace {
-using PerFlagFilter = std::function<bool(const absl::CommandLineFlag&)>;
-
-// Maximum length size in a human readable format.
-constexpr size_t kHrfMaxLineLength = 80;
-
+using PerFlagFilter = std::function<bool(const absl::CommandLineFlag&)>;
+
+// Maximum length size in a human readable format.
+constexpr size_t kHrfMaxLineLength = 80;
+
// This class is used to emit an XML element with `tag` and `text`.
// It adds opening and closing tags and escapes special characters in the text.
// For example:
@@ -109,12 +109,12 @@ class FlagHelpPrettyPrinter {
public:
// Pretty printer holds on to the std::ostream& reference to direct an output
// to that stream.
- FlagHelpPrettyPrinter(size_t max_line_len, size_t min_line_len,
- size_t wrapped_line_indent, std::ostream& out)
+ FlagHelpPrettyPrinter(size_t max_line_len, size_t min_line_len,
+ size_t wrapped_line_indent, std::ostream& out)
: out_(out),
max_line_len_(max_line_len),
- min_line_len_(min_line_len),
- wrapped_line_indent_(wrapped_line_indent),
+ min_line_len_(min_line_len),
+ wrapped_line_indent_(wrapped_line_indent),
line_len_(0),
first_line_(true) {}
@@ -168,12 +168,12 @@ class FlagHelpPrettyPrinter {
void StartLine() {
if (first_line_) {
- line_len_ = min_line_len_;
+ line_len_ = min_line_len_;
first_line_ = false;
} else {
- line_len_ = min_line_len_ + wrapped_line_indent_;
+ line_len_ = min_line_len_ + wrapped_line_indent_;
}
- out_ << std::string(line_len_, ' ');
+ out_ << std::string(line_len_, ' ');
}
void EndLine() {
out_ << '\n';
@@ -182,15 +182,15 @@ class FlagHelpPrettyPrinter {
private:
std::ostream& out_;
- const size_t max_line_len_;
- const size_t min_line_len_;
- const size_t wrapped_line_indent_;
- size_t line_len_;
+ const size_t max_line_len_;
+ const size_t min_line_len_;
+ const size_t wrapped_line_indent_;
+ size_t line_len_;
bool first_line_;
};
void FlagHelpHumanReadable(const CommandLineFlag& flag, std::ostream& out) {
- FlagHelpPrettyPrinter printer(kHrfMaxLineLength, 4, 2, out);
+ FlagHelpPrettyPrinter printer(kHrfMaxLineLength, 4, 2, out);
// Flag name.
printer.Write(absl::StrCat("--", flag.Name()));
@@ -226,7 +226,7 @@ void FlagHelpHumanReadable(const CommandLineFlag& flag, std::ostream& out) {
// If a flag's help message has been stripped (e.g. by adding '#define
// STRIP_FLAG_HELP 1' then this flag will not be displayed by '--help'
// and its variants.
-void FlagsHelpImpl(std::ostream& out, PerFlagFilter filter_cb,
+void FlagsHelpImpl(std::ostream& out, PerFlagFilter filter_cb,
HelpFormat format, absl::string_view program_usage_message) {
if (format == HelpFormat::kHumanReadable) {
out << flags_internal::ShortProgramInvocationName() << ": "
@@ -261,9 +261,9 @@ void FlagsHelpImpl(std::ostream& out, PerFlagFilter filter_cb,
// If the flag has been stripped, pretend that it doesn't exist.
if (flag.Help() == flags_internal::kStrippedFlagHelp) return;
- // Make sure flag satisfies the filter
- if (!filter_cb(flag)) return;
-
+ // Make sure flag satisfies the filter
+ if (!filter_cb(flag)) return;
+
std::string flag_filename = flag.Filename();
matching_flags[std::string(flags_internal::Package(flag_filename))]
@@ -300,34 +300,34 @@ void FlagsHelpImpl(std::ostream& out, PerFlagFilter filter_cb,
}
if (format == HelpFormat::kHumanReadable) {
- FlagHelpPrettyPrinter printer(kHrfMaxLineLength, 0, 0, out);
-
+ FlagHelpPrettyPrinter printer(kHrfMaxLineLength, 0, 0, out);
+
if (filter_cb && matching_flags.empty()) {
- printer.Write("No flags matched.\n", true);
+ printer.Write("No flags matched.\n", true);
}
- printer.EndLine();
- printer.Write(
- "Try --helpfull to get a list of all flags or --help=substring "
- "shows help for flags which include specified substring in either "
- "in the name, or description or path.\n",
- true);
+ printer.EndLine();
+ printer.Write(
+ "Try --helpfull to get a list of all flags or --help=substring "
+ "shows help for flags which include specified substring in either "
+ "in the name, or description or path.\n",
+ true);
} else {
// The end of the document.
out << "</AllFlags>\n";
}
}
-void FlagsHelpImpl(std::ostream& out,
- flags_internal::FlagKindFilter filename_filter_cb,
- HelpFormat format, absl::string_view program_usage_message) {
- FlagsHelpImpl(
- out,
- [&](const absl::CommandLineFlag& flag) {
- return filename_filter_cb && filename_filter_cb(flag.Filename());
- },
- format, program_usage_message);
-}
-
+void FlagsHelpImpl(std::ostream& out,
+ flags_internal::FlagKindFilter filename_filter_cb,
+ HelpFormat format, absl::string_view program_usage_message) {
+ FlagsHelpImpl(
+ out,
+ [&](const absl::CommandLineFlag& flag) {
+ return filename_filter_cb && filename_filter_cb(flag.Filename());
+ },
+ format, program_usage_message);
+}
+
} // namespace
// --------------------------------------------------------------------
@@ -339,7 +339,7 @@ void FlagHelp(std::ostream& out, const CommandLineFlag& flag,
}
// --------------------------------------------------------------------
-// Produces the help messages for all flags matching the filename filter.
+// Produces the help messages for all flags matching the filename filter.
// If filter is empty produces help messages for all flags.
void FlagsHelp(std::ostream& out, absl::string_view filter, HelpFormat format,
absl::string_view program_usage_message) {
@@ -354,169 +354,169 @@ void FlagsHelp(std::ostream& out, absl::string_view filter, HelpFormat format,
// If so, handles them appropriately.
int HandleUsageFlags(std::ostream& out,
absl::string_view program_usage_message) {
- switch (GetFlagsHelpMode()) {
- case HelpMode::kNone:
- break;
- case HelpMode::kImportant:
- flags_internal::FlagsHelpImpl(
- out, flags_internal::GetUsageConfig().contains_help_flags,
- GetFlagsHelpFormat(), program_usage_message);
- return 1;
-
- case HelpMode::kShort:
- flags_internal::FlagsHelpImpl(
- out, flags_internal::GetUsageConfig().contains_helpshort_flags,
- GetFlagsHelpFormat(), program_usage_message);
- return 1;
-
- case HelpMode::kFull:
- flags_internal::FlagsHelp(out, "", GetFlagsHelpFormat(),
- program_usage_message);
- return 1;
-
- case HelpMode::kPackage:
- flags_internal::FlagsHelpImpl(
- out, flags_internal::GetUsageConfig().contains_helppackage_flags,
- GetFlagsHelpFormat(), program_usage_message);
-
- return 1;
-
- case HelpMode::kMatch: {
- std::string substr = GetFlagsHelpMatchSubstr();
- if (substr.empty()) {
- // show all options
- flags_internal::FlagsHelp(out, substr, GetFlagsHelpFormat(),
- program_usage_message);
- } else {
- auto filter_cb = [&substr](const absl::CommandLineFlag& flag) {
- if (absl::StrContains(flag.Name(), substr)) return true;
- if (absl::StrContains(flag.Filename(), substr)) return true;
- if (absl::StrContains(flag.Help(), substr)) return true;
-
- return false;
- };
- flags_internal::FlagsHelpImpl(
- out, filter_cb, HelpFormat::kHumanReadable, program_usage_message);
- }
-
- return 1;
- }
- case HelpMode::kVersion:
- if (flags_internal::GetUsageConfig().version_string)
- out << flags_internal::GetUsageConfig().version_string();
- // Unlike help, we may be asking for version in a script, so return 0
- return 0;
-
- case HelpMode::kOnlyCheckArgs:
- return 0;
+ switch (GetFlagsHelpMode()) {
+ case HelpMode::kNone:
+ break;
+ case HelpMode::kImportant:
+ flags_internal::FlagsHelpImpl(
+ out, flags_internal::GetUsageConfig().contains_help_flags,
+ GetFlagsHelpFormat(), program_usage_message);
+ return 1;
+
+ case HelpMode::kShort:
+ flags_internal::FlagsHelpImpl(
+ out, flags_internal::GetUsageConfig().contains_helpshort_flags,
+ GetFlagsHelpFormat(), program_usage_message);
+ return 1;
+
+ case HelpMode::kFull:
+ flags_internal::FlagsHelp(out, "", GetFlagsHelpFormat(),
+ program_usage_message);
+ return 1;
+
+ case HelpMode::kPackage:
+ flags_internal::FlagsHelpImpl(
+ out, flags_internal::GetUsageConfig().contains_helppackage_flags,
+ GetFlagsHelpFormat(), program_usage_message);
+
+ return 1;
+
+ case HelpMode::kMatch: {
+ std::string substr = GetFlagsHelpMatchSubstr();
+ if (substr.empty()) {
+ // show all options
+ flags_internal::FlagsHelp(out, substr, GetFlagsHelpFormat(),
+ program_usage_message);
+ } else {
+ auto filter_cb = [&substr](const absl::CommandLineFlag& flag) {
+ if (absl::StrContains(flag.Name(), substr)) return true;
+ if (absl::StrContains(flag.Filename(), substr)) return true;
+ if (absl::StrContains(flag.Help(), substr)) return true;
+
+ return false;
+ };
+ flags_internal::FlagsHelpImpl(
+ out, filter_cb, HelpFormat::kHumanReadable, program_usage_message);
+ }
+
+ return 1;
+ }
+ case HelpMode::kVersion:
+ if (flags_internal::GetUsageConfig().version_string)
+ out << flags_internal::GetUsageConfig().version_string();
+ // Unlike help, we may be asking for version in a script, so return 0
+ return 0;
+
+ case HelpMode::kOnlyCheckArgs:
+ return 0;
}
- return -1;
-}
-
-// --------------------------------------------------------------------
-// Globals representing usage reporting flags
-
-namespace {
-
-ABSL_CONST_INIT absl::Mutex help_attributes_guard(absl::kConstInit);
-ABSL_CONST_INIT std::string* match_substr
- ABSL_GUARDED_BY(help_attributes_guard) = nullptr;
-ABSL_CONST_INIT HelpMode help_mode ABSL_GUARDED_BY(help_attributes_guard) =
- HelpMode::kNone;
-ABSL_CONST_INIT HelpFormat help_format ABSL_GUARDED_BY(help_attributes_guard) =
- HelpFormat::kHumanReadable;
-
-} // namespace
-
-std::string GetFlagsHelpMatchSubstr() {
- absl::MutexLock l(&help_attributes_guard);
- if (match_substr == nullptr) return "";
- return *match_substr;
-}
-
-void SetFlagsHelpMatchSubstr(absl::string_view substr) {
- absl::MutexLock l(&help_attributes_guard);
- if (match_substr == nullptr) match_substr = new std::string;
- match_substr->assign(substr.data(), substr.size());
-}
-
-HelpMode GetFlagsHelpMode() {
- absl::MutexLock l(&help_attributes_guard);
- return help_mode;
-}
-
-void SetFlagsHelpMode(HelpMode mode) {
- absl::MutexLock l(&help_attributes_guard);
- help_mode = mode;
-}
-
-HelpFormat GetFlagsHelpFormat() {
- absl::MutexLock l(&help_attributes_guard);
- return help_format;
-}
-
-void SetFlagsHelpFormat(HelpFormat format) {
- absl::MutexLock l(&help_attributes_guard);
- help_format = format;
-}
-
-// Deduces usage flags from the input argument in a form --name=value or
-// --name. argument is already split into name and value before we call this
-// function.
-bool DeduceUsageFlags(absl::string_view name, absl::string_view value) {
- if (absl::ConsumePrefix(&name, "help")) {
- if (name == "") {
- if (value.empty()) {
- SetFlagsHelpMode(HelpMode::kImportant);
- } else {
- SetFlagsHelpMode(HelpMode::kMatch);
- SetFlagsHelpMatchSubstr(value);
- }
- return true;
- }
-
- if (name == "match") {
- SetFlagsHelpMode(HelpMode::kMatch);
- SetFlagsHelpMatchSubstr(value);
- return true;
- }
-
- if (name == "on") {
- SetFlagsHelpMode(HelpMode::kMatch);
- SetFlagsHelpMatchSubstr(absl::StrCat("/", value, "."));
- return true;
- }
-
- if (name == "full") {
- SetFlagsHelpMode(HelpMode::kFull);
- return true;
- }
-
- if (name == "short") {
- SetFlagsHelpMode(HelpMode::kShort);
- return true;
- }
-
- if (name == "package") {
- SetFlagsHelpMode(HelpMode::kPackage);
- return true;
- }
-
- return false;
+ return -1;
+}
+
+// --------------------------------------------------------------------
+// Globals representing usage reporting flags
+
+namespace {
+
+ABSL_CONST_INIT absl::Mutex help_attributes_guard(absl::kConstInit);
+ABSL_CONST_INIT std::string* match_substr
+ ABSL_GUARDED_BY(help_attributes_guard) = nullptr;
+ABSL_CONST_INIT HelpMode help_mode ABSL_GUARDED_BY(help_attributes_guard) =
+ HelpMode::kNone;
+ABSL_CONST_INIT HelpFormat help_format ABSL_GUARDED_BY(help_attributes_guard) =
+ HelpFormat::kHumanReadable;
+
+} // namespace
+
+std::string GetFlagsHelpMatchSubstr() {
+ absl::MutexLock l(&help_attributes_guard);
+ if (match_substr == nullptr) return "";
+ return *match_substr;
+}
+
+void SetFlagsHelpMatchSubstr(absl::string_view substr) {
+ absl::MutexLock l(&help_attributes_guard);
+ if (match_substr == nullptr) match_substr = new std::string;
+ match_substr->assign(substr.data(), substr.size());
+}
+
+HelpMode GetFlagsHelpMode() {
+ absl::MutexLock l(&help_attributes_guard);
+ return help_mode;
+}
+
+void SetFlagsHelpMode(HelpMode mode) {
+ absl::MutexLock l(&help_attributes_guard);
+ help_mode = mode;
+}
+
+HelpFormat GetFlagsHelpFormat() {
+ absl::MutexLock l(&help_attributes_guard);
+ return help_format;
+}
+
+void SetFlagsHelpFormat(HelpFormat format) {
+ absl::MutexLock l(&help_attributes_guard);
+ help_format = format;
+}
+
+// Deduces usage flags from the input argument in a form --name=value or
+// --name. argument is already split into name and value before we call this
+// function.
+bool DeduceUsageFlags(absl::string_view name, absl::string_view value) {
+ if (absl::ConsumePrefix(&name, "help")) {
+ if (name == "") {
+ if (value.empty()) {
+ SetFlagsHelpMode(HelpMode::kImportant);
+ } else {
+ SetFlagsHelpMode(HelpMode::kMatch);
+ SetFlagsHelpMatchSubstr(value);
+ }
+ return true;
+ }
+
+ if (name == "match") {
+ SetFlagsHelpMode(HelpMode::kMatch);
+ SetFlagsHelpMatchSubstr(value);
+ return true;
+ }
+
+ if (name == "on") {
+ SetFlagsHelpMode(HelpMode::kMatch);
+ SetFlagsHelpMatchSubstr(absl::StrCat("/", value, "."));
+ return true;
+ }
+
+ if (name == "full") {
+ SetFlagsHelpMode(HelpMode::kFull);
+ return true;
+ }
+
+ if (name == "short") {
+ SetFlagsHelpMode(HelpMode::kShort);
+ return true;
+ }
+
+ if (name == "package") {
+ SetFlagsHelpMode(HelpMode::kPackage);
+ return true;
+ }
+
+ return false;
}
- if (name == "version") {
- SetFlagsHelpMode(HelpMode::kVersion);
- return true;
+ if (name == "version") {
+ SetFlagsHelpMode(HelpMode::kVersion);
+ return true;
}
- if (name == "only_check_args") {
- SetFlagsHelpMode(HelpMode::kOnlyCheckArgs);
- return true;
+ if (name == "only_check_args") {
+ SetFlagsHelpMode(HelpMode::kOnlyCheckArgs);
+ return true;
}
- return false;
+ return false;
}
} // namespace flags_internal
diff --git a/contrib/restricted/abseil-cpp/absl/flags/internal/usage.h b/contrib/restricted/abseil-cpp/absl/flags/internal/usage.h
index c0bcac5762..5198d75225 100644
--- a/contrib/restricted/abseil-cpp/absl/flags/internal/usage.h
+++ b/contrib/restricted/abseil-cpp/absl/flags/internal/usage.h
@@ -66,37 +66,37 @@ void FlagsHelp(std::ostream& out, absl::string_view filter,
int HandleUsageFlags(std::ostream& out,
absl::string_view program_usage_message);
-// --------------------------------------------------------------------
-// Globals representing usage reporting flags
-
-enum class HelpMode {
- kNone,
- kImportant,
- kShort,
- kFull,
- kPackage,
- kMatch,
- kVersion,
- kOnlyCheckArgs
-};
-
-// Returns substring to filter help output (--help=substr argument)
-std::string GetFlagsHelpMatchSubstr();
-// Returns the requested help mode.
-HelpMode GetFlagsHelpMode();
-// Returns the requested help format.
-HelpFormat GetFlagsHelpFormat();
-
-// These are corresponding setters to the attributes above.
-void SetFlagsHelpMatchSubstr(absl::string_view);
-void SetFlagsHelpMode(HelpMode);
-void SetFlagsHelpFormat(HelpFormat);
-
-// Deduces usage flags from the input argument in a form --name=value or
-// --name. argument is already split into name and value before we call this
-// function.
-bool DeduceUsageFlags(absl::string_view name, absl::string_view value);
-
+// --------------------------------------------------------------------
+// Globals representing usage reporting flags
+
+enum class HelpMode {
+ kNone,
+ kImportant,
+ kShort,
+ kFull,
+ kPackage,
+ kMatch,
+ kVersion,
+ kOnlyCheckArgs
+};
+
+// Returns substring to filter help output (--help=substr argument)
+std::string GetFlagsHelpMatchSubstr();
+// Returns the requested help mode.
+HelpMode GetFlagsHelpMode();
+// Returns the requested help format.
+HelpFormat GetFlagsHelpFormat();
+
+// These are corresponding setters to the attributes above.
+void SetFlagsHelpMatchSubstr(absl::string_view);
+void SetFlagsHelpMode(HelpMode);
+void SetFlagsHelpFormat(HelpFormat);
+
+// Deduces usage flags from the input argument in a form --name=value or
+// --name. argument is already split into name and value before we call this
+// function.
+bool DeduceUsageFlags(absl::string_view name, absl::string_view value);
+
} // namespace flags_internal
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/contrib/restricted/abseil-cpp/absl/flags/internal/usage/ya.make b/contrib/restricted/abseil-cpp/absl/flags/internal/usage/ya.make
index 358744df59..3973999aa3 100644
--- a/contrib/restricted/abseil-cpp/absl/flags/internal/usage/ya.make
+++ b/contrib/restricted/abseil-cpp/absl/flags/internal/usage/ya.make
@@ -10,7 +10,7 @@ LICENSE(Apache-2.0)
PEERDIR(
contrib/restricted/abseil-cpp/absl/base
- contrib/restricted/abseil-cpp/absl/base/internal/low_level_alloc
+ contrib/restricted/abseil-cpp/absl/base/internal/low_level_alloc
contrib/restricted/abseil-cpp/absl/base/internal/raw_logging
contrib/restricted/abseil-cpp/absl/base/internal/spinlock_wait
contrib/restricted/abseil-cpp/absl/base/internal/throw_delegate
@@ -18,10 +18,10 @@ PEERDIR(
contrib/restricted/abseil-cpp/absl/city
contrib/restricted/abseil-cpp/absl/container/internal/absl_hashtablez_sampler
contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set
- contrib/restricted/abseil-cpp/absl/debugging
- contrib/restricted/abseil-cpp/absl/debugging/stacktrace
- contrib/restricted/abseil-cpp/absl/debugging/symbolize
- contrib/restricted/abseil-cpp/absl/demangle
+ contrib/restricted/abseil-cpp/absl/debugging
+ contrib/restricted/abseil-cpp/absl/debugging/stacktrace
+ contrib/restricted/abseil-cpp/absl/debugging/symbolize
+ contrib/restricted/abseil-cpp/absl/demangle
contrib/restricted/abseil-cpp/absl/flags
contrib/restricted/abseil-cpp/absl/flags/commandlineflag
contrib/restricted/abseil-cpp/absl/flags/internal/commandlineflag
@@ -42,13 +42,13 @@ PEERDIR(
contrib/restricted/abseil-cpp/absl/strings/internal/cordz_functions
contrib/restricted/abseil-cpp/absl/strings/internal/cordz_handle
contrib/restricted/abseil-cpp/absl/strings/internal/cordz_info
- contrib/restricted/abseil-cpp/absl/strings/internal/str_format
+ contrib/restricted/abseil-cpp/absl/strings/internal/str_format
contrib/restricted/abseil-cpp/absl/synchronization
contrib/restricted/abseil-cpp/absl/synchronization/internal
contrib/restricted/abseil-cpp/absl/time
contrib/restricted/abseil-cpp/absl/time/civil_time
contrib/restricted/abseil-cpp/absl/time/time_zone
- contrib/restricted/abseil-cpp/absl/types/bad_optional_access
+ contrib/restricted/abseil-cpp/absl/types/bad_optional_access
contrib/restricted/abseil-cpp/absl/types/bad_variant_access
)
diff --git a/contrib/restricted/abseil-cpp/absl/flags/marshalling.h b/contrib/restricted/abseil-cpp/absl/flags/marshalling.h
index 7cbc136d57..00704d1ea3 100644
--- a/contrib/restricted/abseil-cpp/absl/flags/marshalling.h
+++ b/contrib/restricted/abseil-cpp/absl/flags/marshalling.h
@@ -83,7 +83,7 @@
// // AbslParseFlag converts from a string to OutputMode.
// // Must be in same namespace as OutputMode.
//
-// // Parses an OutputMode from the command line flag value `text`. Returns
+// // Parses an OutputMode from the command line flag value `text`. Returns
// // `true` and sets `*mode` on success; returns `false` and sets `*error`
// // on failure.
// bool AbslParseFlag(absl::string_view text,
@@ -139,7 +139,7 @@
//
// // Within the implementation, `AbslParseFlag()` will, in turn invoke
// // `absl::ParseFlag()` on its constituent `int` and `std::string` types
-// // (which have built-in Abseil flag support).
+// // (which have built-in Abseil flag support).
//
// bool AbslParseFlag(absl::string_view text, MyFlagType* flag,
// std::string* err) {
diff --git a/contrib/restricted/abseil-cpp/absl/flags/marshalling/ya.make b/contrib/restricted/abseil-cpp/absl/flags/marshalling/ya.make
index 1b02b87173..102f239809 100644
--- a/contrib/restricted/abseil-cpp/absl/flags/marshalling/ya.make
+++ b/contrib/restricted/abseil-cpp/absl/flags/marshalling/ya.make
@@ -1,41 +1,41 @@
-# Generated by devtools/yamaker.
-
-LIBRARY()
-
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
WITHOUT_LICENSE_TEXTS()
-OWNER(g:cpp-contrib)
-
-LICENSE(Apache-2.0)
-
-PEERDIR(
- contrib/restricted/abseil-cpp/absl/base
- contrib/restricted/abseil-cpp/absl/base/internal/raw_logging
- contrib/restricted/abseil-cpp/absl/base/internal/spinlock_wait
- contrib/restricted/abseil-cpp/absl/base/internal/throw_delegate
- contrib/restricted/abseil-cpp/absl/base/log_severity
- contrib/restricted/abseil-cpp/absl/numeric
- contrib/restricted/abseil-cpp/absl/strings
+OWNER(g:cpp-contrib)
+
+LICENSE(Apache-2.0)
+
+PEERDIR(
+ contrib/restricted/abseil-cpp/absl/base
+ contrib/restricted/abseil-cpp/absl/base/internal/raw_logging
+ contrib/restricted/abseil-cpp/absl/base/internal/spinlock_wait
+ contrib/restricted/abseil-cpp/absl/base/internal/throw_delegate
+ contrib/restricted/abseil-cpp/absl/base/log_severity
+ contrib/restricted/abseil-cpp/absl/numeric
+ contrib/restricted/abseil-cpp/absl/strings
contrib/restricted/abseil-cpp/absl/strings/internal/absl_strings_internal
contrib/restricted/abseil-cpp/absl/strings/internal/str_format
-)
-
-ADDINCL(
- GLOBAL contrib/restricted/abseil-cpp
-)
-
-NO_COMPILER_WARNINGS()
-
-NO_UTIL()
-
-CFLAGS(
- -DNOMINMAX
-)
-
+)
+
+ADDINCL(
+ GLOBAL contrib/restricted/abseil-cpp
+)
+
+NO_COMPILER_WARNINGS()
+
+NO_UTIL()
+
+CFLAGS(
+ -DNOMINMAX
+)
+
SRCDIR(contrib/restricted/abseil-cpp/absl/flags)
-
-SRCS(
+
+SRCS(
marshalling.cc
-)
-
-END()
+)
+
+END()
diff --git a/contrib/restricted/abseil-cpp/absl/flags/parse.cc b/contrib/restricted/abseil-cpp/absl/flags/parse.cc
index dd1a6796ca..5c8738309d 100644
--- a/contrib/restricted/abseil-cpp/absl/flags/parse.cc
+++ b/contrib/restricted/abseil-cpp/absl/flags/parse.cc
@@ -713,11 +713,11 @@ std::vector<char*> ParseCommandLineImpl(int argc, char* argv[],
std::tie(flag, is_negative) = LocateFlag(flag_name);
if (flag == nullptr) {
- // Usage flags are not modeled as Abseil flags. Locate them separately.
- if (flags_internal::DeduceUsageFlags(flag_name, value)) {
- continue;
- }
-
+ // Usage flags are not modeled as Abseil flags. Locate them separately.
+ if (flags_internal::DeduceUsageFlags(flag_name, value)) {
+ continue;
+ }
+
if (on_undef_flag != OnUndefinedFlag::kIgnoreUndefined) {
undefined_flag_names.emplace_back(arg_from_argv,
std::string(flag_name));
diff --git a/contrib/restricted/abseil-cpp/absl/flags/reflection.cc b/contrib/restricted/abseil-cpp/absl/flags/reflection.cc
index dbce4032ab..5dde972b1b 100644
--- a/contrib/restricted/abseil-cpp/absl/flags/reflection.cc
+++ b/contrib/restricted/abseil-cpp/absl/flags/reflection.cc
@@ -50,7 +50,7 @@ class FlagRegistry {
~FlagRegistry() = default;
// Store a flag in this registry. Takes ownership of *flag.
- void RegisterFlag(CommandLineFlag& flag, const char* filename);
+ void RegisterFlag(CommandLineFlag& flag, const char* filename);
void Lock() ABSL_EXCLUSIVE_LOCK_FUNCTION(lock_) { lock_.Lock(); }
void Unlock() ABSL_UNLOCK_FUNCTION(lock_) { lock_.Unlock(); }
@@ -110,20 +110,20 @@ CommandLineFlag* FlagRegistry::FindFlag(absl::string_view name) {
return it != flags_.end() ? it->second : nullptr;
}
-void FlagRegistry::RegisterFlag(CommandLineFlag& flag, const char* filename) {
- if (filename != nullptr &&
- flag.Filename() != GetUsageConfig().normalize_filename(filename)) {
- flags_internal::ReportUsageError(
- absl::StrCat(
- "Inconsistency between flag object and registration for flag '",
- flag.Name(),
- "', likely due to duplicate flags or an ODR violation. Relevant "
- "files: ",
- flag.Filename(), " and ", filename),
- true);
- std::exit(1);
- }
-
+void FlagRegistry::RegisterFlag(CommandLineFlag& flag, const char* filename) {
+ if (filename != nullptr &&
+ flag.Filename() != GetUsageConfig().normalize_filename(filename)) {
+ flags_internal::ReportUsageError(
+ absl::StrCat(
+ "Inconsistency between flag object and registration for flag '",
+ flag.Name(),
+ "', likely due to duplicate flags or an ODR violation. Relevant "
+ "files: ",
+ flag.Filename(), " and ", filename),
+ true);
+ std::exit(1);
+ }
+
FlagRegistryLock registry_lock(*this);
std::pair<FlagIterator, bool> ins =
@@ -188,8 +188,8 @@ void ForEachFlag(std::function<void(CommandLineFlag&)> visitor) {
// --------------------------------------------------------------------
-bool RegisterCommandLineFlag(CommandLineFlag& flag, const char* filename) {
- FlagRegistry::GlobalRegistry().RegisterFlag(flag, filename);
+bool RegisterCommandLineFlag(CommandLineFlag& flag, const char* filename) {
+ FlagRegistry::GlobalRegistry().RegisterFlag(flag, filename);
return true;
}
@@ -283,7 +283,7 @@ void Retire(const char* name, FlagFastTypeId type_id, char* buf) {
static_assert(alignof(RetiredFlagObj) == kRetiredFlagObjAlignment, "");
auto* flag = ::new (static_cast<void*>(buf))
flags_internal::RetiredFlagObj(name, type_id);
- FlagRegistry::GlobalRegistry().RegisterFlag(*flag, nullptr);
+ FlagRegistry::GlobalRegistry().RegisterFlag(*flag, nullptr);
}
// --------------------------------------------------------------------
@@ -345,7 +345,7 @@ CommandLineFlag* FindCommandLineFlag(absl::string_view name) {
absl::flat_hash_map<absl::string_view, absl::CommandLineFlag*> GetAllFlags() {
absl::flat_hash_map<absl::string_view, absl::CommandLineFlag*> res;
flags_internal::ForEachFlag([&](CommandLineFlag& flag) {
- if (!flag.IsRetired()) res.insert({flag.Name(), &flag});
+ if (!flag.IsRetired()) res.insert({flag.Name(), &flag});
});
return res;
}
diff --git a/contrib/restricted/abseil-cpp/absl/flags/reflection.h b/contrib/restricted/abseil-cpp/absl/flags/reflection.h
index e6baf5de4b..f38292b8db 100644
--- a/contrib/restricted/abseil-cpp/absl/flags/reflection.h
+++ b/contrib/restricted/abseil-cpp/absl/flags/reflection.h
@@ -64,7 +64,7 @@ absl::flat_hash_map<absl::string_view, absl::CommandLineFlag*> GetAllFlags();
// void MyFunc() {
// absl::FlagSaver fs;
// ...
-// absl::SetFlag(&FLAGS_myFlag, otherValue);
+// absl::SetFlag(&FLAGS_myFlag, otherValue);
// ...
// } // scope of FlagSaver left, flags return to previous state
//
diff --git a/contrib/restricted/abseil-cpp/absl/flags/reflection/ya.make b/contrib/restricted/abseil-cpp/absl/flags/reflection/ya.make
index f75bc1c67d..0936f25e48 100644
--- a/contrib/restricted/abseil-cpp/absl/flags/reflection/ya.make
+++ b/contrib/restricted/abseil-cpp/absl/flags/reflection/ya.make
@@ -10,7 +10,7 @@ LICENSE(Apache-2.0)
PEERDIR(
contrib/restricted/abseil-cpp/absl/base
- contrib/restricted/abseil-cpp/absl/base/internal/low_level_alloc
+ contrib/restricted/abseil-cpp/absl/base/internal/low_level_alloc
contrib/restricted/abseil-cpp/absl/base/internal/raw_logging
contrib/restricted/abseil-cpp/absl/base/internal/spinlock_wait
contrib/restricted/abseil-cpp/absl/base/internal/throw_delegate
@@ -18,10 +18,10 @@ PEERDIR(
contrib/restricted/abseil-cpp/absl/city
contrib/restricted/abseil-cpp/absl/container/internal/absl_hashtablez_sampler
contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set
- contrib/restricted/abseil-cpp/absl/debugging
- contrib/restricted/abseil-cpp/absl/debugging/stacktrace
- contrib/restricted/abseil-cpp/absl/debugging/symbolize
- contrib/restricted/abseil-cpp/absl/demangle
+ contrib/restricted/abseil-cpp/absl/debugging
+ contrib/restricted/abseil-cpp/absl/debugging/stacktrace
+ contrib/restricted/abseil-cpp/absl/debugging/symbolize
+ contrib/restricted/abseil-cpp/absl/demangle
contrib/restricted/abseil-cpp/absl/flags/commandlineflag
contrib/restricted/abseil-cpp/absl/flags/internal/commandlineflag
contrib/restricted/abseil-cpp/absl/flags/internal/private_handle_accessor
@@ -43,7 +43,7 @@ PEERDIR(
contrib/restricted/abseil-cpp/absl/time
contrib/restricted/abseil-cpp/absl/time/civil_time
contrib/restricted/abseil-cpp/absl/time/time_zone
- contrib/restricted/abseil-cpp/absl/types/bad_optional_access
+ contrib/restricted/abseil-cpp/absl/types/bad_optional_access
contrib/restricted/abseil-cpp/absl/types/bad_variant_access
)
diff --git a/contrib/restricted/abseil-cpp/absl/flags/usage/ya.make b/contrib/restricted/abseil-cpp/absl/flags/usage/ya.make
index f6b243c84c..b5879008eb 100644
--- a/contrib/restricted/abseil-cpp/absl/flags/usage/ya.make
+++ b/contrib/restricted/abseil-cpp/absl/flags/usage/ya.make
@@ -33,7 +33,7 @@ PEERDIR(
contrib/restricted/abseil-cpp/absl/flags/reflection
contrib/restricted/abseil-cpp/absl/flags/usage_config
contrib/restricted/abseil-cpp/absl/hash
- contrib/restricted/abseil-cpp/absl/hash/internal
+ contrib/restricted/abseil-cpp/absl/hash/internal
contrib/restricted/abseil-cpp/absl/numeric
contrib/restricted/abseil-cpp/absl/profiling/internal/exponential_biased
contrib/restricted/abseil-cpp/absl/strings
diff --git a/contrib/restricted/abseil-cpp/absl/flags/usage_config.cc b/contrib/restricted/abseil-cpp/absl/flags/usage_config.cc
index 5d7426db31..acf437a90c 100644
--- a/contrib/restricted/abseil-cpp/absl/flags/usage_config.cc
+++ b/contrib/restricted/abseil-cpp/absl/flags/usage_config.cc
@@ -34,8 +34,8 @@ extern "C" {
// Additional report of fatal usage error message before we std::exit. Error is
// fatal if is_fatal argument to ReportUsageError is true.
-ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(
- AbslInternalReportFatalUsageError)(absl::string_view) {}
+ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(
+ AbslInternalReportFatalUsageError)(absl::string_view) {}
} // extern "C"
@@ -129,7 +129,7 @@ void ReportUsageError(absl::string_view msg, bool is_fatal) {
std::cerr << "ERROR: " << msg << std::endl;
if (is_fatal) {
- ABSL_INTERNAL_C_SYMBOL(AbslInternalReportFatalUsageError)(msg);
+ ABSL_INTERNAL_C_SYMBOL(AbslInternalReportFatalUsageError)(msg);
}
}
diff --git a/contrib/restricted/abseil-cpp/absl/flags/usage_config.h b/contrib/restricted/abseil-cpp/absl/flags/usage_config.h
index ded70300f0..5a1acaf9be 100644
--- a/contrib/restricted/abseil-cpp/absl/flags/usage_config.h
+++ b/contrib/restricted/abseil-cpp/absl/flags/usage_config.h
@@ -127,8 +127,8 @@ extern "C" {
// Additional report of fatal usage error message before we std::exit. Error is
// fatal if is_fatal argument to ReportUsageError is true.
-void ABSL_INTERNAL_C_SYMBOL(AbslInternalReportFatalUsageError)(
- absl::string_view);
+void ABSL_INTERNAL_C_SYMBOL(AbslInternalReportFatalUsageError)(
+ absl::string_view);
} // extern "C"
diff --git a/contrib/restricted/abseil-cpp/absl/flags/ya.make b/contrib/restricted/abseil-cpp/absl/flags/ya.make
index 983809b80c..ec1b324e17 100644
--- a/contrib/restricted/abseil-cpp/absl/flags/ya.make
+++ b/contrib/restricted/abseil-cpp/absl/flags/ya.make
@@ -1,20 +1,20 @@
-# Generated by devtools/yamaker.
-
-LIBRARY()
-
-OWNER(g:cpp-contrib)
-
-LICENSE(Apache-2.0)
-
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
+OWNER(g:cpp-contrib)
+
+LICENSE(Apache-2.0)
+
LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
-PEERDIR(
- contrib/restricted/abseil-cpp/absl/base
+PEERDIR(
+ contrib/restricted/abseil-cpp/absl/base
contrib/restricted/abseil-cpp/absl/base/internal/low_level_alloc
- contrib/restricted/abseil-cpp/absl/base/internal/raw_logging
- contrib/restricted/abseil-cpp/absl/base/internal/spinlock_wait
- contrib/restricted/abseil-cpp/absl/base/internal/throw_delegate
- contrib/restricted/abseil-cpp/absl/base/log_severity
+ contrib/restricted/abseil-cpp/absl/base/internal/raw_logging
+ contrib/restricted/abseil-cpp/absl/base/internal/spinlock_wait
+ contrib/restricted/abseil-cpp/absl/base/internal/throw_delegate
+ contrib/restricted/abseil-cpp/absl/base/log_severity
contrib/restricted/abseil-cpp/absl/city
contrib/restricted/abseil-cpp/absl/container/internal/absl_hashtablez_sampler
contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set
@@ -32,9 +32,9 @@ PEERDIR(
contrib/restricted/abseil-cpp/absl/flags/usage_config
contrib/restricted/abseil-cpp/absl/hash
contrib/restricted/abseil-cpp/absl/hash/internal
- contrib/restricted/abseil-cpp/absl/numeric
+ contrib/restricted/abseil-cpp/absl/numeric
contrib/restricted/abseil-cpp/absl/profiling/internal/exponential_biased
- contrib/restricted/abseil-cpp/absl/strings
+ contrib/restricted/abseil-cpp/absl/strings
contrib/restricted/abseil-cpp/absl/strings/cord
contrib/restricted/abseil-cpp/absl/strings/internal/absl_cord_internal
contrib/restricted/abseil-cpp/absl/strings/internal/absl_strings_internal
@@ -47,24 +47,24 @@ PEERDIR(
contrib/restricted/abseil-cpp/absl/time
contrib/restricted/abseil-cpp/absl/time/civil_time
contrib/restricted/abseil-cpp/absl/time/time_zone
- contrib/restricted/abseil-cpp/absl/types/bad_optional_access
+ contrib/restricted/abseil-cpp/absl/types/bad_optional_access
contrib/restricted/abseil-cpp/absl/types/bad_variant_access
-)
-
-ADDINCL(
- GLOBAL contrib/restricted/abseil-cpp
-)
-
-NO_COMPILER_WARNINGS()
-
-NO_UTIL()
-
-CFLAGS(
- -DNOMINMAX
-)
-
-SRCS(
+)
+
+ADDINCL(
+ GLOBAL contrib/restricted/abseil-cpp
+)
+
+NO_COMPILER_WARNINGS()
+
+NO_UTIL()
+
+CFLAGS(
+ -DNOMINMAX
+)
+
+SRCS(
flag.cc
-)
-
-END()
+)
+
+END()
diff --git a/contrib/restricted/abseil-cpp/absl/hash/internal/city.cc b/contrib/restricted/abseil-cpp/absl/hash/internal/city.cc
index 5460134e57..01bd850482 100644
--- a/contrib/restricted/abseil-cpp/absl/hash/internal/city.cc
+++ b/contrib/restricted/abseil-cpp/absl/hash/internal/city.cc
@@ -210,11 +210,11 @@ static uint64_t HashLen16(uint64_t u, uint64_t v, uint64_t mul) {
return b;
}
-static uint64_t HashLen16(uint64_t u, uint64_t v) {
- const uint64_t kMul = 0x9ddfea08eb382d69ULL;
- return HashLen16(u, v, kMul);
-}
-
+static uint64_t HashLen16(uint64_t u, uint64_t v) {
+ const uint64_t kMul = 0x9ddfea08eb382d69ULL;
+ return HashLen16(u, v, kMul);
+}
+
static uint64_t HashLen0to16(const char *s, size_t len) {
if (len >= 8) {
uint64_t mul = k2 + len * 2;
diff --git a/contrib/restricted/abseil-cpp/absl/hash/internal/hash.cc b/contrib/restricted/abseil-cpp/absl/hash/internal/hash.cc
index 11451e575c..f8cb56df18 100644
--- a/contrib/restricted/abseil-cpp/absl/hash/internal/hash.cc
+++ b/contrib/restricted/abseil-cpp/absl/hash/internal/hash.cc
@@ -35,7 +35,7 @@ uint64_t MixingHashState::CombineLargeContiguousImpl32(
uint64_t MixingHashState::CombineLargeContiguousImpl64(
uint64_t state, const unsigned char* first, size_t len) {
while (len >= PiecewiseChunkSize()) {
- state = Mix(state, Hash64(first, PiecewiseChunkSize()));
+ state = Mix(state, Hash64(first, PiecewiseChunkSize()));
len -= PiecewiseChunkSize();
first += PiecewiseChunkSize();
}
@@ -49,21 +49,21 @@ ABSL_CONST_INIT const void* const MixingHashState::kSeed = &kSeed;
// The salt array used by LowLevelHash. This array is NOT the mechanism used to
// make absl::Hash non-deterministic between program invocations. See `Seed()`
// for that mechanism.
-//
-// Any random values are fine. These values are just digits from the decimal
-// part of pi.
-// https://en.wikipedia.org/wiki/Nothing-up-my-sleeve_number
+//
+// Any random values are fine. These values are just digits from the decimal
+// part of pi.
+// https://en.wikipedia.org/wiki/Nothing-up-my-sleeve_number
constexpr uint64_t kHashSalt[5] = {
- uint64_t{0x243F6A8885A308D3}, uint64_t{0x13198A2E03707344},
- uint64_t{0xA4093822299F31D0}, uint64_t{0x082EFA98EC4E6C89},
- uint64_t{0x452821E638D01377},
-};
-
+ uint64_t{0x243F6A8885A308D3}, uint64_t{0x13198A2E03707344},
+ uint64_t{0xA4093822299F31D0}, uint64_t{0x082EFA98EC4E6C89},
+ uint64_t{0x452821E638D01377},
+};
+
uint64_t MixingHashState::LowLevelHashImpl(const unsigned char* data,
size_t len) {
return LowLevelHash(data, len, Seed(), kHashSalt);
-}
-
+}
+
} // namespace hash_internal
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/contrib/restricted/abseil-cpp/absl/hash/internal/hash.h b/contrib/restricted/abseil-cpp/absl/hash/internal/hash.h
index b1e33caf4c..f6c4a148b8 100644
--- a/contrib/restricted/abseil-cpp/absl/hash/internal/hash.h
+++ b/contrib/restricted/abseil-cpp/absl/hash/internal/hash.h
@@ -39,8 +39,8 @@
#include <utility>
#include <vector>
-#include "absl/base/config.h"
-#include "absl/base/internal/unaligned_access.h"
+#include "absl/base/config.h"
+#include "absl/base/internal/unaligned_access.h"
#include "absl/base/port.h"
#include "absl/container/fixed_array.h"
#include "absl/hash/internal/city.h"
@@ -834,7 +834,7 @@ class ABSL_DLL MixingHashState : public HashStateBase<MixingHashState> {
static uint64_t CombineContiguousImpl(uint64_t state,
const unsigned char* first, size_t len,
std::integral_constant<int, 8>
- /* sizeof_size_t */);
+ /* sizeof_size_t */);
// Slow dispatch path for calls to CombineContiguousImpl with a size argument
// larger than PiecewiseChunkSize(). Has the same effect as calling
@@ -847,54 +847,54 @@ class ABSL_DLL MixingHashState : public HashStateBase<MixingHashState> {
size_t len);
// Reads 9 to 16 bytes from p.
- // The least significant 8 bytes are in .first, the rest (zero padded) bytes
- // are in .second.
+ // The least significant 8 bytes are in .first, the rest (zero padded) bytes
+ // are in .second.
static std::pair<uint64_t, uint64_t> Read9To16(const unsigned char* p,
size_t len) {
- uint64_t low_mem = absl::base_internal::UnalignedLoad64(p);
- uint64_t high_mem = absl::base_internal::UnalignedLoad64(p + len - 8);
-#ifdef ABSL_IS_LITTLE_ENDIAN
- uint64_t most_significant = high_mem;
- uint64_t least_significant = low_mem;
-#else
- uint64_t most_significant = low_mem;
- uint64_t least_significant = high_mem;
-#endif
- return {least_significant, most_significant >> (128 - len * 8)};
+ uint64_t low_mem = absl::base_internal::UnalignedLoad64(p);
+ uint64_t high_mem = absl::base_internal::UnalignedLoad64(p + len - 8);
+#ifdef ABSL_IS_LITTLE_ENDIAN
+ uint64_t most_significant = high_mem;
+ uint64_t least_significant = low_mem;
+#else
+ uint64_t most_significant = low_mem;
+ uint64_t least_significant = high_mem;
+#endif
+ return {least_significant, most_significant >> (128 - len * 8)};
}
// Reads 4 to 8 bytes from p. Zero pads to fill uint64_t.
static uint64_t Read4To8(const unsigned char* p, size_t len) {
- uint32_t low_mem = absl::base_internal::UnalignedLoad32(p);
- uint32_t high_mem = absl::base_internal::UnalignedLoad32(p + len - 4);
-#ifdef ABSL_IS_LITTLE_ENDIAN
- uint32_t most_significant = high_mem;
- uint32_t least_significant = low_mem;
-#else
- uint32_t most_significant = low_mem;
- uint32_t least_significant = high_mem;
-#endif
- return (static_cast<uint64_t>(most_significant) << (len - 4) * 8) |
- least_significant;
+ uint32_t low_mem = absl::base_internal::UnalignedLoad32(p);
+ uint32_t high_mem = absl::base_internal::UnalignedLoad32(p + len - 4);
+#ifdef ABSL_IS_LITTLE_ENDIAN
+ uint32_t most_significant = high_mem;
+ uint32_t least_significant = low_mem;
+#else
+ uint32_t most_significant = low_mem;
+ uint32_t least_significant = high_mem;
+#endif
+ return (static_cast<uint64_t>(most_significant) << (len - 4) * 8) |
+ least_significant;
}
// Reads 1 to 3 bytes from p. Zero pads to fill uint32_t.
static uint32_t Read1To3(const unsigned char* p, size_t len) {
- unsigned char mem0 = p[0];
- unsigned char mem1 = p[len / 2];
- unsigned char mem2 = p[len - 1];
-#ifdef ABSL_IS_LITTLE_ENDIAN
- unsigned char significant2 = mem2;
- unsigned char significant1 = mem1;
- unsigned char significant0 = mem0;
-#else
- unsigned char significant2 = mem0;
- unsigned char significant1 = mem1;
- unsigned char significant0 = mem2;
-#endif
- return static_cast<uint32_t>(significant0 | //
- (significant1 << (len / 2 * 8)) | //
- (significant2 << ((len - 1) * 8)));
+ unsigned char mem0 = p[0];
+ unsigned char mem1 = p[len / 2];
+ unsigned char mem2 = p[len - 1];
+#ifdef ABSL_IS_LITTLE_ENDIAN
+ unsigned char significant2 = mem2;
+ unsigned char significant1 = mem1;
+ unsigned char significant0 = mem0;
+#else
+ unsigned char significant2 = mem0;
+ unsigned char significant1 = mem1;
+ unsigned char significant0 = mem2;
+#endif
+ return static_cast<uint32_t>(significant0 | //
+ (significant1 << (len / 2 * 8)) | //
+ (significant2 << ((len - 1) * 8)));
}
ABSL_ATTRIBUTE_ALWAYS_INLINE static uint64_t Mix(uint64_t state, uint64_t v) {
@@ -919,16 +919,16 @@ class ABSL_DLL MixingHashState : public HashStateBase<MixingHashState> {
// An extern to avoid bloat on a direct call to LowLevelHash() with fixed
// values for both the seed and salt parameters.
static uint64_t LowLevelHashImpl(const unsigned char* data, size_t len);
-
- ABSL_ATTRIBUTE_ALWAYS_INLINE static uint64_t Hash64(const unsigned char* data,
- size_t len) {
-#ifdef ABSL_HAVE_INTRINSIC_INT128
+
+ ABSL_ATTRIBUTE_ALWAYS_INLINE static uint64_t Hash64(const unsigned char* data,
+ size_t len) {
+#ifdef ABSL_HAVE_INTRINSIC_INT128
return LowLevelHashImpl(data, len);
-#else
+#else
return hash_internal::CityHash64(reinterpret_cast<const char*>(data), len);
-#endif
- }
-
+#endif
+ }
+
// Seed()
//
// A non-deterministic seed.
@@ -946,14 +946,14 @@ class ABSL_DLL MixingHashState : public HashStateBase<MixingHashState> {
// On other platforms this is still going to be non-deterministic but most
// probably per-build and not per-process.
ABSL_ATTRIBUTE_ALWAYS_INLINE static uint64_t Seed() {
-#if (!defined(__clang__) || __clang_major__ > 11) && \
- !defined(__apple_build_version__)
- return static_cast<uint64_t>(reinterpret_cast<uintptr_t>(&kSeed));
-#else
- // Workaround the absence of
- // https://github.com/llvm/llvm-project/commit/bc15bf66dcca76cc06fe71fca35b74dc4d521021.
+#if (!defined(__clang__) || __clang_major__ > 11) && \
+ !defined(__apple_build_version__)
+ return static_cast<uint64_t>(reinterpret_cast<uintptr_t>(&kSeed));
+#else
+ // Workaround the absence of
+ // https://github.com/llvm/llvm-project/commit/bc15bf66dcca76cc06fe71fca35b74dc4d521021.
return static_cast<uint64_t>(reinterpret_cast<uintptr_t>(kSeed));
-#endif
+#endif
}
static const void* const kSeed;
@@ -994,7 +994,7 @@ inline uint64_t MixingHashState::CombineContiguousImpl(
if (ABSL_PREDICT_FALSE(len > PiecewiseChunkSize())) {
return CombineLargeContiguousImpl64(state, first, len);
}
- v = Hash64(first, len);
+ v = Hash64(first, len);
} else if (len > 8) {
auto p = Read9To16(first, len);
state = Mix(state, p.first);
diff --git a/contrib/restricted/abseil-cpp/absl/hash/internal/low_level_hash.cc b/contrib/restricted/abseil-cpp/absl/hash/internal/low_level_hash.cc
index 6f9cb9c7bf..865f5901c8 100644
--- a/contrib/restricted/abseil-cpp/absl/hash/internal/low_level_hash.cc
+++ b/contrib/restricted/abseil-cpp/absl/hash/internal/low_level_hash.cc
@@ -1,33 +1,33 @@
-// Copyright 2020 The Abseil Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
+// Copyright 2020 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
#include "absl/hash/internal/low_level_hash.h"
-
-#include "absl/base/internal/unaligned_access.h"
+
+#include "absl/base/internal/unaligned_access.h"
#include "absl/numeric/bits.h"
-#include "absl/numeric/int128.h"
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-namespace hash_internal {
-
+#include "absl/numeric/int128.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace hash_internal {
+
static uint64_t Mix(uint64_t v0, uint64_t v1) {
#if !defined(__aarch64__)
// The default bit-mixer uses 64x64->128-bit multiplication.
- absl::uint128 p = v0;
- p *= v1;
- return absl::Uint128Low64(p) ^ absl::Uint128High64(p);
+ absl::uint128 p = v0;
+ p *= v1;
+ return absl::Uint128Low64(p) ^ absl::Uint128High64(p);
#else
// The default bit-mixer above would perform poorly on some ARM microarchs,
// where calculating a 128-bit product requires a sequence of two
@@ -37,87 +37,87 @@ static uint64_t Mix(uint64_t v0, uint64_t v1) {
p *= v1 ^ absl::rotl(v0, 39);
return p ^ (p >> 11);
#endif
-}
-
+}
+
uint64_t LowLevelHash(const void* data, size_t len, uint64_t seed,
const uint64_t salt[]) {
- const uint8_t* ptr = static_cast<const uint8_t*>(data);
- uint64_t starting_length = static_cast<uint64_t>(len);
- uint64_t current_state = seed ^ salt[0];
-
- if (len > 64) {
- // If we have more than 64 bytes, we're going to handle chunks of 64
- // bytes at a time. We're going to build up two separate hash states
- // which we will then hash together.
- uint64_t duplicated_state = current_state;
-
- do {
- uint64_t a = absl::base_internal::UnalignedLoad64(ptr);
- uint64_t b = absl::base_internal::UnalignedLoad64(ptr + 8);
- uint64_t c = absl::base_internal::UnalignedLoad64(ptr + 16);
- uint64_t d = absl::base_internal::UnalignedLoad64(ptr + 24);
- uint64_t e = absl::base_internal::UnalignedLoad64(ptr + 32);
- uint64_t f = absl::base_internal::UnalignedLoad64(ptr + 40);
- uint64_t g = absl::base_internal::UnalignedLoad64(ptr + 48);
- uint64_t h = absl::base_internal::UnalignedLoad64(ptr + 56);
-
+ const uint8_t* ptr = static_cast<const uint8_t*>(data);
+ uint64_t starting_length = static_cast<uint64_t>(len);
+ uint64_t current_state = seed ^ salt[0];
+
+ if (len > 64) {
+ // If we have more than 64 bytes, we're going to handle chunks of 64
+ // bytes at a time. We're going to build up two separate hash states
+ // which we will then hash together.
+ uint64_t duplicated_state = current_state;
+
+ do {
+ uint64_t a = absl::base_internal::UnalignedLoad64(ptr);
+ uint64_t b = absl::base_internal::UnalignedLoad64(ptr + 8);
+ uint64_t c = absl::base_internal::UnalignedLoad64(ptr + 16);
+ uint64_t d = absl::base_internal::UnalignedLoad64(ptr + 24);
+ uint64_t e = absl::base_internal::UnalignedLoad64(ptr + 32);
+ uint64_t f = absl::base_internal::UnalignedLoad64(ptr + 40);
+ uint64_t g = absl::base_internal::UnalignedLoad64(ptr + 48);
+ uint64_t h = absl::base_internal::UnalignedLoad64(ptr + 56);
+
uint64_t cs0 = Mix(a ^ salt[1], b ^ current_state);
uint64_t cs1 = Mix(c ^ salt[2], d ^ current_state);
- current_state = (cs0 ^ cs1);
-
+ current_state = (cs0 ^ cs1);
+
uint64_t ds0 = Mix(e ^ salt[3], f ^ duplicated_state);
uint64_t ds1 = Mix(g ^ salt[4], h ^ duplicated_state);
- duplicated_state = (ds0 ^ ds1);
-
- ptr += 64;
- len -= 64;
- } while (len > 64);
-
- current_state = current_state ^ duplicated_state;
- }
-
- // We now have a data `ptr` with at most 64 bytes and the current state
- // of the hashing state machine stored in current_state.
- while (len > 16) {
- uint64_t a = absl::base_internal::UnalignedLoad64(ptr);
- uint64_t b = absl::base_internal::UnalignedLoad64(ptr + 8);
-
+ duplicated_state = (ds0 ^ ds1);
+
+ ptr += 64;
+ len -= 64;
+ } while (len > 64);
+
+ current_state = current_state ^ duplicated_state;
+ }
+
+ // We now have a data `ptr` with at most 64 bytes and the current state
+ // of the hashing state machine stored in current_state.
+ while (len > 16) {
+ uint64_t a = absl::base_internal::UnalignedLoad64(ptr);
+ uint64_t b = absl::base_internal::UnalignedLoad64(ptr + 8);
+
current_state = Mix(a ^ salt[1], b ^ current_state);
-
- ptr += 16;
- len -= 16;
- }
-
- // We now have a data `ptr` with at most 16 bytes.
- uint64_t a = 0;
- uint64_t b = 0;
- if (len > 8) {
- // When we have at least 9 and at most 16 bytes, set A to the first 64
- // bits of the input and B to the last 64 bits of the input. Yes, they will
- // overlap in the middle if we are working with less than the full 16
- // bytes.
- a = absl::base_internal::UnalignedLoad64(ptr);
- b = absl::base_internal::UnalignedLoad64(ptr + len - 8);
- } else if (len > 3) {
- // If we have at least 4 and at most 8 bytes, set A to the first 32
- // bits and B to the last 32 bits.
- a = absl::base_internal::UnalignedLoad32(ptr);
- b = absl::base_internal::UnalignedLoad32(ptr + len - 4);
- } else if (len > 0) {
- // If we have at least 1 and at most 3 bytes, read all of the provided
- // bits into A, with some adjustments.
- a = ((ptr[0] << 16) | (ptr[len >> 1] << 8) | ptr[len - 1]);
- b = 0;
- } else {
- a = 0;
- b = 0;
- }
-
+
+ ptr += 16;
+ len -= 16;
+ }
+
+ // We now have a data `ptr` with at most 16 bytes.
+ uint64_t a = 0;
+ uint64_t b = 0;
+ if (len > 8) {
+ // When we have at least 9 and at most 16 bytes, set A to the first 64
+ // bits of the input and B to the last 64 bits of the input. Yes, they will
+ // overlap in the middle if we are working with less than the full 16
+ // bytes.
+ a = absl::base_internal::UnalignedLoad64(ptr);
+ b = absl::base_internal::UnalignedLoad64(ptr + len - 8);
+ } else if (len > 3) {
+ // If we have at least 4 and at most 8 bytes, set A to the first 32
+ // bits and B to the last 32 bits.
+ a = absl::base_internal::UnalignedLoad32(ptr);
+ b = absl::base_internal::UnalignedLoad32(ptr + len - 4);
+ } else if (len > 0) {
+ // If we have at least 1 and at most 3 bytes, read all of the provided
+ // bits into A, with some adjustments.
+ a = ((ptr[0] << 16) | (ptr[len >> 1] << 8) | ptr[len - 1]);
+ b = 0;
+ } else {
+ a = 0;
+ b = 0;
+ }
+
uint64_t w = Mix(a ^ salt[1], b ^ current_state);
- uint64_t z = salt[1] ^ starting_length;
+ uint64_t z = salt[1] ^ starting_length;
return Mix(w, z);
-}
-
-} // namespace hash_internal
-ABSL_NAMESPACE_END
-} // namespace absl
+}
+
+} // namespace hash_internal
+ABSL_NAMESPACE_END
+} // namespace absl
diff --git a/contrib/restricted/abseil-cpp/absl/hash/internal/low_level_hash.h b/contrib/restricted/abseil-cpp/absl/hash/internal/low_level_hash.h
index 439968aa98..3409f41b61 100644
--- a/contrib/restricted/abseil-cpp/absl/hash/internal/low_level_hash.h
+++ b/contrib/restricted/abseil-cpp/absl/hash/internal/low_level_hash.h
@@ -1,19 +1,19 @@
-// Copyright 2020 The Abseil Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
+// Copyright 2020 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
// This file provides the Google-internal implementation of LowLevelHash.
-//
+//
// LowLevelHash is a fast hash function for hash tables, the fastest we've
// currently (late 2020) found that passes the SMHasher tests. The algorithm
// relies on intrinsic 128-bit multiplication for speed. This is not meant to be
@@ -21,30 +21,30 @@
//
// It is closely based on a version of wyhash, but does not maintain or
// guarantee future compatibility with it.
-
+
#ifndef ABSL_HASH_INTERNAL_LOW_LEVEL_HASH_H_
#define ABSL_HASH_INTERNAL_LOW_LEVEL_HASH_H_
-
-#include <stdint.h>
-#include <stdlib.h>
-
-#include "absl/base/config.h"
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-namespace hash_internal {
-
-// Hash function for a byte array. A 64-bit seed and a set of five 64-bit
-// integers are hashed into the result.
-//
-// To allow all hashable types (including string_view and Span) to depend on
+
+#include <stdint.h>
+#include <stdlib.h>
+
+#include "absl/base/config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace hash_internal {
+
+// Hash function for a byte array. A 64-bit seed and a set of five 64-bit
+// integers are hashed into the result.
+//
+// To allow all hashable types (including string_view and Span) to depend on
// this algorithm, we keep the API low-level, with as few dependencies as
-// possible.
+// possible.
uint64_t LowLevelHash(const void* data, size_t len, uint64_t seed,
const uint64_t salt[5]);
-
-} // namespace hash_internal
-ABSL_NAMESPACE_END
-} // namespace absl
-
+
+} // namespace hash_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
#endif // ABSL_HASH_INTERNAL_LOW_LEVEL_HASH_H_
diff --git a/contrib/restricted/abseil-cpp/absl/hash/ya.make b/contrib/restricted/abseil-cpp/absl/hash/ya.make
index f71202900a..12ce2b67fe 100644
--- a/contrib/restricted/abseil-cpp/absl/hash/ya.make
+++ b/contrib/restricted/abseil-cpp/absl/hash/ya.make
@@ -1,44 +1,44 @@
-# Generated by devtools/yamaker.
-
-LIBRARY()
-
-OWNER(g:cpp-contrib)
-
-LICENSE(Apache-2.0)
-
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
+OWNER(g:cpp-contrib)
+
+LICENSE(Apache-2.0)
+
LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
-PEERDIR(
- contrib/restricted/abseil-cpp/absl/base
- contrib/restricted/abseil-cpp/absl/base/internal/raw_logging
- contrib/restricted/abseil-cpp/absl/base/internal/spinlock_wait
- contrib/restricted/abseil-cpp/absl/base/internal/throw_delegate
- contrib/restricted/abseil-cpp/absl/base/log_severity
+PEERDIR(
+ contrib/restricted/abseil-cpp/absl/base
+ contrib/restricted/abseil-cpp/absl/base/internal/raw_logging
+ contrib/restricted/abseil-cpp/absl/base/internal/spinlock_wait
+ contrib/restricted/abseil-cpp/absl/base/internal/throw_delegate
+ contrib/restricted/abseil-cpp/absl/base/log_severity
contrib/restricted/abseil-cpp/absl/city
contrib/restricted/abseil-cpp/absl/hash/internal
- contrib/restricted/abseil-cpp/absl/numeric
- contrib/restricted/abseil-cpp/absl/strings
+ contrib/restricted/abseil-cpp/absl/numeric
+ contrib/restricted/abseil-cpp/absl/strings
contrib/restricted/abseil-cpp/absl/strings/internal/absl_strings_internal
contrib/restricted/abseil-cpp/absl/types
- contrib/restricted/abseil-cpp/absl/types/bad_optional_access
+ contrib/restricted/abseil-cpp/absl/types/bad_optional_access
contrib/restricted/abseil-cpp/absl/types/bad_variant_access
contrib/restricted/abseil-cpp/absl/types/internal
-)
-
-ADDINCL(
- GLOBAL contrib/restricted/abseil-cpp
-)
-
-NO_COMPILER_WARNINGS()
-
-NO_UTIL()
-
-CFLAGS(
- -DNOMINMAX
-)
-
-SRCS(
+)
+
+ADDINCL(
+ GLOBAL contrib/restricted/abseil-cpp
+)
+
+NO_COMPILER_WARNINGS()
+
+NO_UTIL()
+
+CFLAGS(
+ -DNOMINMAX
+)
+
+SRCS(
internal/hash.cc
-)
-
-END()
+)
+
+END()
diff --git a/contrib/restricted/abseil-cpp/absl/meta/type_traits.h b/contrib/restricted/abseil-cpp/absl/meta/type_traits.h
index d886cb30a8..6368cfff3f 100644
--- a/contrib/restricted/abseil-cpp/absl/meta/type_traits.h
+++ b/contrib/restricted/abseil-cpp/absl/meta/type_traits.h
@@ -641,23 +641,23 @@ using underlying_type_t = typename std::underlying_type<T>::type;
namespace type_traits_internal {
-
+
#if (defined(__cpp_lib_is_invocable) && __cpp_lib_is_invocable >= 201703L) || \
(defined(_MSVC_LANG) && _MSVC_LANG >= 201703L)
-// std::result_of is deprecated (C++17) or removed (C++20)
-template<typename> struct result_of;
-template<typename F, typename... Args>
-struct result_of<F(Args...)> : std::invoke_result<F, Args...> {};
-#else
-template<typename F> using result_of = std::result_of<F>;
-#endif
-
-} // namespace type_traits_internal
-
-template<typename F>
-using result_of_t = typename type_traits_internal::result_of<F>::type;
-
-namespace type_traits_internal {
+// std::result_of is deprecated (C++17) or removed (C++20)
+template<typename> struct result_of;
+template<typename F, typename... Args>
+struct result_of<F(Args...)> : std::invoke_result<F, Args...> {};
+#else
+template<typename F> using result_of = std::result_of<F>;
+#endif
+
+} // namespace type_traits_internal
+
+template<typename F>
+using result_of_t = typename type_traits_internal::result_of<F>::type;
+
+namespace type_traits_internal {
// In MSVC we can't probe std::hash or stdext::hash because it triggers a
// static_assert instead of failing substitution. Libc++ prior to 4.0
// also used a static_assert.
diff --git a/contrib/restricted/abseil-cpp/absl/numeric/bits.h b/contrib/restricted/abseil-cpp/absl/numeric/bits.h
index 52013ad49b..f5381d8f94 100644
--- a/contrib/restricted/abseil-cpp/absl/numeric/bits.h
+++ b/contrib/restricted/abseil-cpp/absl/numeric/bits.h
@@ -1,177 +1,177 @@
-// Copyright 2020 The Abseil Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// -----------------------------------------------------------------------------
-// File: bits.h
-// -----------------------------------------------------------------------------
-//
-// This file contains implementations of C++20's bitwise math functions, as
-// defined by:
-//
-// P0553R4:
-// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2019/p0553r4.html
-// P0556R3:
-// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/p0556r3.html
-// P1355R2:
-// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2019/p1355r2.html
-// P1956R1:
-// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2020/p1956r1.pdf
-//
-// When using a standard library that implements these functions, we use the
-// standard library's implementation.
-
-#ifndef ABSL_NUMERIC_BITS_H_
-#define ABSL_NUMERIC_BITS_H_
-
-#include <cstdint>
-#include <limits>
-#include <type_traits>
-
-#if (defined(__cpp_lib_int_pow2) && __cpp_lib_int_pow2 >= 202002L) || \
- (defined(__cpp_lib_bitops) && __cpp_lib_bitops >= 201907L)
-#include <bit>
-#endif
-
-#include "absl/base/attributes.h"
-#include "absl/base/config.h"
-#include "absl/numeric/internal/bits.h"
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-
-#if !(defined(__cpp_lib_bitops) && __cpp_lib_bitops >= 201907L)
-// rotating
-template <class T>
-ABSL_MUST_USE_RESULT constexpr
- typename std::enable_if<std::is_unsigned<T>::value, T>::type
- rotl(T x, int s) noexcept {
- return numeric_internal::RotateLeft(x, s);
-}
-
-template <class T>
-ABSL_MUST_USE_RESULT constexpr
- typename std::enable_if<std::is_unsigned<T>::value, T>::type
- rotr(T x, int s) noexcept {
- return numeric_internal::RotateRight(x, s);
-}
-
-// Counting functions
-//
-// While these functions are typically constexpr, on some platforms, they may
-// not be marked as constexpr due to constraints of the compiler/available
-// intrinsics.
-template <class T>
-ABSL_INTERNAL_CONSTEXPR_CLZ inline
- typename std::enable_if<std::is_unsigned<T>::value, int>::type
- countl_zero(T x) noexcept {
- return numeric_internal::CountLeadingZeroes(x);
-}
-
-template <class T>
-ABSL_INTERNAL_CONSTEXPR_CLZ inline
- typename std::enable_if<std::is_unsigned<T>::value, int>::type
- countl_one(T x) noexcept {
- // Avoid integer promotion to a wider type
- return countl_zero(static_cast<T>(~x));
-}
-
-template <class T>
-ABSL_INTERNAL_CONSTEXPR_CTZ inline
- typename std::enable_if<std::is_unsigned<T>::value, int>::type
- countr_zero(T x) noexcept {
- return numeric_internal::CountTrailingZeroes(x);
-}
-
-template <class T>
-ABSL_INTERNAL_CONSTEXPR_CTZ inline
- typename std::enable_if<std::is_unsigned<T>::value, int>::type
- countr_one(T x) noexcept {
- // Avoid integer promotion to a wider type
- return countr_zero(static_cast<T>(~x));
-}
-
-template <class T>
-ABSL_INTERNAL_CONSTEXPR_POPCOUNT inline
- typename std::enable_if<std::is_unsigned<T>::value, int>::type
- popcount(T x) noexcept {
- return numeric_internal::Popcount(x);
-}
-#else // defined(__cpp_lib_bitops) && __cpp_lib_bitops >= 201907L
-
-using std::countl_one;
-using std::countl_zero;
-using std::countr_one;
-using std::countr_zero;
-using std::popcount;
-using std::rotl;
-using std::rotr;
-
-#endif
-
-#if !(defined(__cpp_lib_int_pow2) && __cpp_lib_int_pow2 >= 202002L)
-// Returns: true if x is an integral power of two; false otherwise.
-template <class T>
-constexpr inline typename std::enable_if<std::is_unsigned<T>::value, bool>::type
-has_single_bit(T x) noexcept {
- return x != 0 && (x & (x - 1)) == 0;
-}
-
-// Returns: If x == 0, 0; otherwise one plus the base-2 logarithm of x, with any
-// fractional part discarded.
-template <class T>
-ABSL_INTERNAL_CONSTEXPR_CLZ inline
- typename std::enable_if<std::is_unsigned<T>::value, T>::type
- bit_width(T x) noexcept {
- return std::numeric_limits<T>::digits - countl_zero(x);
-}
-
-// Returns: If x == 0, 0; otherwise the maximal value y such that
-// has_single_bit(y) is true and y <= x.
-template <class T>
-ABSL_INTERNAL_CONSTEXPR_CLZ inline
- typename std::enable_if<std::is_unsigned<T>::value, T>::type
- bit_floor(T x) noexcept {
- return x == 0 ? 0 : T{1} << (bit_width(x) - 1);
-}
-
-// Returns: N, where N is the smallest power of 2 greater than or equal to x.
-//
-// Preconditions: N is representable as a value of type T.
-template <class T>
-ABSL_INTERNAL_CONSTEXPR_CLZ inline
- typename std::enable_if<std::is_unsigned<T>::value, T>::type
- bit_ceil(T x) {
- // If T is narrower than unsigned, T{1} << bit_width will be promoted. We
- // want to force it to wraparound so that bit_ceil of an invalid value are not
- // core constant expressions.
- //
- // BitCeilNonPowerOf2 triggers an overflow in constexpr contexts if we would
- // undergo promotion to unsigned but not fit the result into T without
- // truncation.
- return has_single_bit(x) ? T{1} << (bit_width(x) - 1)
- : numeric_internal::BitCeilNonPowerOf2(x);
-}
-#else // defined(__cpp_lib_int_pow2) && __cpp_lib_int_pow2 >= 202002L
-
-using std::bit_ceil;
-using std::bit_floor;
-using std::bit_width;
-using std::has_single_bit;
-
-#endif
-
-ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_NUMERIC_BITS_H_
+// Copyright 2020 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: bits.h
+// -----------------------------------------------------------------------------
+//
+// This file contains implementations of C++20's bitwise math functions, as
+// defined by:
+//
+// P0553R4:
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2019/p0553r4.html
+// P0556R3:
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/p0556r3.html
+// P1355R2:
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2019/p1355r2.html
+// P1956R1:
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2020/p1956r1.pdf
+//
+// When using a standard library that implements these functions, we use the
+// standard library's implementation.
+
+#ifndef ABSL_NUMERIC_BITS_H_
+#define ABSL_NUMERIC_BITS_H_
+
+#include <cstdint>
+#include <limits>
+#include <type_traits>
+
+#if (defined(__cpp_lib_int_pow2) && __cpp_lib_int_pow2 >= 202002L) || \
+ (defined(__cpp_lib_bitops) && __cpp_lib_bitops >= 201907L)
+#include <bit>
+#endif
+
+#include "absl/base/attributes.h"
+#include "absl/base/config.h"
+#include "absl/numeric/internal/bits.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+
+#if !(defined(__cpp_lib_bitops) && __cpp_lib_bitops >= 201907L)
+// rotating
+template <class T>
+ABSL_MUST_USE_RESULT constexpr
+ typename std::enable_if<std::is_unsigned<T>::value, T>::type
+ rotl(T x, int s) noexcept {
+ return numeric_internal::RotateLeft(x, s);
+}
+
+template <class T>
+ABSL_MUST_USE_RESULT constexpr
+ typename std::enable_if<std::is_unsigned<T>::value, T>::type
+ rotr(T x, int s) noexcept {
+ return numeric_internal::RotateRight(x, s);
+}
+
+// Counting functions
+//
+// While these functions are typically constexpr, on some platforms, they may
+// not be marked as constexpr due to constraints of the compiler/available
+// intrinsics.
+template <class T>
+ABSL_INTERNAL_CONSTEXPR_CLZ inline
+ typename std::enable_if<std::is_unsigned<T>::value, int>::type
+ countl_zero(T x) noexcept {
+ return numeric_internal::CountLeadingZeroes(x);
+}
+
+template <class T>
+ABSL_INTERNAL_CONSTEXPR_CLZ inline
+ typename std::enable_if<std::is_unsigned<T>::value, int>::type
+ countl_one(T x) noexcept {
+ // Avoid integer promotion to a wider type
+ return countl_zero(static_cast<T>(~x));
+}
+
+template <class T>
+ABSL_INTERNAL_CONSTEXPR_CTZ inline
+ typename std::enable_if<std::is_unsigned<T>::value, int>::type
+ countr_zero(T x) noexcept {
+ return numeric_internal::CountTrailingZeroes(x);
+}
+
+template <class T>
+ABSL_INTERNAL_CONSTEXPR_CTZ inline
+ typename std::enable_if<std::is_unsigned<T>::value, int>::type
+ countr_one(T x) noexcept {
+ // Avoid integer promotion to a wider type
+ return countr_zero(static_cast<T>(~x));
+}
+
+template <class T>
+ABSL_INTERNAL_CONSTEXPR_POPCOUNT inline
+ typename std::enable_if<std::is_unsigned<T>::value, int>::type
+ popcount(T x) noexcept {
+ return numeric_internal::Popcount(x);
+}
+#else // defined(__cpp_lib_bitops) && __cpp_lib_bitops >= 201907L
+
+using std::countl_one;
+using std::countl_zero;
+using std::countr_one;
+using std::countr_zero;
+using std::popcount;
+using std::rotl;
+using std::rotr;
+
+#endif
+
+#if !(defined(__cpp_lib_int_pow2) && __cpp_lib_int_pow2 >= 202002L)
+// Returns: true if x is an integral power of two; false otherwise.
+template <class T>
+constexpr inline typename std::enable_if<std::is_unsigned<T>::value, bool>::type
+has_single_bit(T x) noexcept {
+ return x != 0 && (x & (x - 1)) == 0;
+}
+
+// Returns: If x == 0, 0; otherwise one plus the base-2 logarithm of x, with any
+// fractional part discarded.
+template <class T>
+ABSL_INTERNAL_CONSTEXPR_CLZ inline
+ typename std::enable_if<std::is_unsigned<T>::value, T>::type
+ bit_width(T x) noexcept {
+ return std::numeric_limits<T>::digits - countl_zero(x);
+}
+
+// Returns: If x == 0, 0; otherwise the maximal value y such that
+// has_single_bit(y) is true and y <= x.
+template <class T>
+ABSL_INTERNAL_CONSTEXPR_CLZ inline
+ typename std::enable_if<std::is_unsigned<T>::value, T>::type
+ bit_floor(T x) noexcept {
+ return x == 0 ? 0 : T{1} << (bit_width(x) - 1);
+}
+
+// Returns: N, where N is the smallest power of 2 greater than or equal to x.
+//
+// Preconditions: N is representable as a value of type T.
+template <class T>
+ABSL_INTERNAL_CONSTEXPR_CLZ inline
+ typename std::enable_if<std::is_unsigned<T>::value, T>::type
+ bit_ceil(T x) {
+ // If T is narrower than unsigned, T{1} << bit_width will be promoted. We
+ // want to force it to wraparound so that bit_ceil of an invalid value are not
+ // core constant expressions.
+ //
+ // BitCeilNonPowerOf2 triggers an overflow in constexpr contexts if we would
+ // undergo promotion to unsigned but not fit the result into T without
+ // truncation.
+ return has_single_bit(x) ? T{1} << (bit_width(x) - 1)
+ : numeric_internal::BitCeilNonPowerOf2(x);
+}
+#else // defined(__cpp_lib_int_pow2) && __cpp_lib_int_pow2 >= 202002L
+
+using std::bit_ceil;
+using std::bit_floor;
+using std::bit_width;
+using std::has_single_bit;
+
+#endif
+
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_NUMERIC_BITS_H_
diff --git a/contrib/restricted/abseil-cpp/absl/numeric/int128.cc b/contrib/restricted/abseil-cpp/absl/numeric/int128.cc
index 4f91e48463..07bfbc593f 100644
--- a/contrib/restricted/abseil-cpp/absl/numeric/int128.cc
+++ b/contrib/restricted/abseil-cpp/absl/numeric/int128.cc
@@ -24,7 +24,7 @@
#include <type_traits>
#include "absl/base/optimization.h"
-#include "absl/numeric/bits.h"
+#include "absl/numeric/bits.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
@@ -43,11 +43,11 @@ namespace {
inline ABSL_ATTRIBUTE_ALWAYS_INLINE int Fls128(uint128 n) {
if (uint64_t hi = Uint128High64(n)) {
ABSL_INTERNAL_ASSUME(hi != 0);
- return 127 - countl_zero(hi);
+ return 127 - countl_zero(hi);
}
const uint64_t low = Uint128Low64(n);
ABSL_INTERNAL_ASSUME(low != 0);
- return 63 - countl_zero(low);
+ return 63 - countl_zero(low);
}
// Long division/modulo for uint128 implemented using the shift-subtract
diff --git a/contrib/restricted/abseil-cpp/absl/numeric/internal/bits.h b/contrib/restricted/abseil-cpp/absl/numeric/internal/bits.h
index bfef06bce1..087ac613cd 100644
--- a/contrib/restricted/abseil-cpp/absl/numeric/internal/bits.h
+++ b/contrib/restricted/abseil-cpp/absl/numeric/internal/bits.h
@@ -1,358 +1,358 @@
-// Copyright 2020 The Abseil Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef ABSL_NUMERIC_INTERNAL_BITS_H_
-#define ABSL_NUMERIC_INTERNAL_BITS_H_
-
-#include <cstdint>
-#include <limits>
-#include <type_traits>
-
-// Clang on Windows has __builtin_clzll; otherwise we need to use the
-// windows intrinsic functions.
-#if defined(_MSC_VER) && !defined(__clang__)
-#include <intrin.h>
-#endif
-
-#include "absl/base/attributes.h"
-#include "absl/base/config.h"
-
-#if defined(__GNUC__) && !defined(__clang__)
-// GCC
-#define ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(x) 1
-#else
-#define ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(x) ABSL_HAVE_BUILTIN(x)
-#endif
-
-#if ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_popcountl) && \
- ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_popcountll)
-#define ABSL_INTERNAL_CONSTEXPR_POPCOUNT constexpr
-#define ABSL_INTERNAL_HAS_CONSTEXPR_POPCOUNT 1
-#else
-#define ABSL_INTERNAL_CONSTEXPR_POPCOUNT
-#define ABSL_INTERNAL_HAS_CONSTEXPR_POPCOUNT 0
-#endif
-
-#if ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_clz) && \
- ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_clzll)
-#define ABSL_INTERNAL_CONSTEXPR_CLZ constexpr
-#define ABSL_INTERNAL_HAS_CONSTEXPR_CLZ 1
-#else
-#define ABSL_INTERNAL_CONSTEXPR_CLZ
-#define ABSL_INTERNAL_HAS_CONSTEXPR_CLZ 0
-#endif
-
-#if ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_ctz) && \
- ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_ctzll)
-#define ABSL_INTERNAL_CONSTEXPR_CTZ constexpr
-#define ABSL_INTERNAL_HAS_CONSTEXPR_CTZ 1
-#else
-#define ABSL_INTERNAL_CONSTEXPR_CTZ
-#define ABSL_INTERNAL_HAS_CONSTEXPR_CTZ 0
-#endif
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-namespace numeric_internal {
-
-constexpr bool IsPowerOf2(unsigned int x) noexcept {
- return x != 0 && (x & (x - 1)) == 0;
-}
-
-template <class T>
-ABSL_MUST_USE_RESULT ABSL_ATTRIBUTE_ALWAYS_INLINE constexpr T RotateRight(
- T x, int s) noexcept {
- static_assert(std::is_unsigned<T>::value, "T must be unsigned");
- static_assert(IsPowerOf2(std::numeric_limits<T>::digits),
- "T must have a power-of-2 size");
-
- return static_cast<T>(x >> (s & (std::numeric_limits<T>::digits - 1))) |
- static_cast<T>(x << ((-s) & (std::numeric_limits<T>::digits - 1)));
-}
-
-template <class T>
-ABSL_MUST_USE_RESULT ABSL_ATTRIBUTE_ALWAYS_INLINE constexpr T RotateLeft(
- T x, int s) noexcept {
- static_assert(std::is_unsigned<T>::value, "T must be unsigned");
- static_assert(IsPowerOf2(std::numeric_limits<T>::digits),
- "T must have a power-of-2 size");
-
- return static_cast<T>(x << (s & (std::numeric_limits<T>::digits - 1))) |
- static_cast<T>(x >> ((-s) & (std::numeric_limits<T>::digits - 1)));
-}
-
-ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_POPCOUNT inline int
-Popcount32(uint32_t x) noexcept {
-#if ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_popcount)
- static_assert(sizeof(unsigned int) == sizeof(x),
- "__builtin_popcount does not take 32-bit arg");
- return __builtin_popcount(x);
-#else
- x -= ((x >> 1) & 0x55555555);
- x = ((x >> 2) & 0x33333333) + (x & 0x33333333);
- return static_cast<int>((((x + (x >> 4)) & 0xF0F0F0F) * 0x1010101) >> 24);
-#endif
-}
-
-ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_POPCOUNT inline int
-Popcount64(uint64_t x) noexcept {
-#if ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_popcountll)
- static_assert(sizeof(unsigned long long) == sizeof(x), // NOLINT(runtime/int)
- "__builtin_popcount does not take 64-bit arg");
- return __builtin_popcountll(x);
-#else
- x -= (x >> 1) & 0x5555555555555555ULL;
- x = ((x >> 2) & 0x3333333333333333ULL) + (x & 0x3333333333333333ULL);
- return static_cast<int>(
- (((x + (x >> 4)) & 0xF0F0F0F0F0F0F0FULL) * 0x101010101010101ULL) >> 56);
-#endif
-}
-
-template <class T>
-ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_POPCOUNT inline int
-Popcount(T x) noexcept {
- static_assert(std::is_unsigned<T>::value, "T must be unsigned");
- static_assert(IsPowerOf2(std::numeric_limits<T>::digits),
- "T must have a power-of-2 size");
- static_assert(sizeof(x) <= sizeof(uint64_t), "T is too large");
- return sizeof(x) <= sizeof(uint32_t) ? Popcount32(x) : Popcount64(x);
-}
-
-ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CLZ inline int
-CountLeadingZeroes32(uint32_t x) {
-#if ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_clz)
- // Use __builtin_clz, which uses the following instructions:
- // x86: bsr, lzcnt
- // ARM64: clz
- // PPC: cntlzd
-
- static_assert(sizeof(unsigned int) == sizeof(x),
- "__builtin_clz does not take 32-bit arg");
- // Handle 0 as a special case because __builtin_clz(0) is undefined.
- return x == 0 ? 32 : __builtin_clz(x);
-#elif defined(_MSC_VER) && !defined(__clang__)
- unsigned long result = 0; // NOLINT(runtime/int)
- if (_BitScanReverse(&result, x)) {
- return 31 - result;
- }
- return 32;
-#else
- int zeroes = 28;
- if (x >> 16) {
- zeroes -= 16;
- x >>= 16;
- }
- if (x >> 8) {
- zeroes -= 8;
- x >>= 8;
- }
- if (x >> 4) {
- zeroes -= 4;
- x >>= 4;
- }
- return "\4\3\2\2\1\1\1\1\0\0\0\0\0\0\0"[x] + zeroes;
-#endif
-}
-
-ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CLZ inline int
-CountLeadingZeroes16(uint16_t x) {
-#if ABSL_HAVE_BUILTIN(__builtin_clzs)
- static_assert(sizeof(unsigned short) == sizeof(x), // NOLINT(runtime/int)
- "__builtin_clzs does not take 16-bit arg");
- return x == 0 ? 16 : __builtin_clzs(x);
-#else
- return CountLeadingZeroes32(x) - 16;
-#endif
-}
-
-ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CLZ inline int
-CountLeadingZeroes64(uint64_t x) {
-#if ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_clzll)
- // Use __builtin_clzll, which uses the following instructions:
- // x86: bsr, lzcnt
- // ARM64: clz
- // PPC: cntlzd
- static_assert(sizeof(unsigned long long) == sizeof(x), // NOLINT(runtime/int)
- "__builtin_clzll does not take 64-bit arg");
-
- // Handle 0 as a special case because __builtin_clzll(0) is undefined.
- return x == 0 ? 64 : __builtin_clzll(x);
-#elif defined(_MSC_VER) && !defined(__clang__) && \
- (defined(_M_X64) || defined(_M_ARM64))
- // MSVC does not have __buitin_clzll. Use _BitScanReverse64.
- unsigned long result = 0; // NOLINT(runtime/int)
- if (_BitScanReverse64(&result, x)) {
- return 63 - result;
- }
- return 64;
-#elif defined(_MSC_VER) && !defined(__clang__)
- // MSVC does not have __buitin_clzll. Compose two calls to _BitScanReverse
- unsigned long result = 0; // NOLINT(runtime/int)
- if ((x >> 32) &&
- _BitScanReverse(&result, static_cast<unsigned long>(x >> 32))) {
- return 31 - result;
- }
- if (_BitScanReverse(&result, static_cast<unsigned long>(x))) {
- return 63 - result;
- }
- return 64;
-#else
- int zeroes = 60;
- if (x >> 32) {
- zeroes -= 32;
- x >>= 32;
- }
- if (x >> 16) {
- zeroes -= 16;
- x >>= 16;
- }
- if (x >> 8) {
- zeroes -= 8;
- x >>= 8;
- }
- if (x >> 4) {
- zeroes -= 4;
- x >>= 4;
- }
- return "\4\3\2\2\1\1\1\1\0\0\0\0\0\0\0"[x] + zeroes;
-#endif
-}
-
-template <typename T>
-ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CLZ inline int
-CountLeadingZeroes(T x) {
- static_assert(std::is_unsigned<T>::value, "T must be unsigned");
- static_assert(IsPowerOf2(std::numeric_limits<T>::digits),
- "T must have a power-of-2 size");
- static_assert(sizeof(T) <= sizeof(uint64_t), "T too large");
- return sizeof(T) <= sizeof(uint16_t)
- ? CountLeadingZeroes16(static_cast<uint16_t>(x)) -
- (std::numeric_limits<uint16_t>::digits -
- std::numeric_limits<T>::digits)
- : (sizeof(T) <= sizeof(uint32_t)
- ? CountLeadingZeroes32(static_cast<uint32_t>(x)) -
- (std::numeric_limits<uint32_t>::digits -
- std::numeric_limits<T>::digits)
- : CountLeadingZeroes64(x));
-}
-
-ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CTZ inline int
-CountTrailingZeroesNonzero32(uint32_t x) {
-#if ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_ctz)
- static_assert(sizeof(unsigned int) == sizeof(x),
- "__builtin_ctz does not take 32-bit arg");
- return __builtin_ctz(x);
-#elif defined(_MSC_VER) && !defined(__clang__)
- unsigned long result = 0; // NOLINT(runtime/int)
- _BitScanForward(&result, x);
- return result;
-#else
- int c = 31;
- x &= ~x + 1;
- if (x & 0x0000FFFF) c -= 16;
- if (x & 0x00FF00FF) c -= 8;
- if (x & 0x0F0F0F0F) c -= 4;
- if (x & 0x33333333) c -= 2;
- if (x & 0x55555555) c -= 1;
- return c;
-#endif
-}
-
-ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CTZ inline int
-CountTrailingZeroesNonzero64(uint64_t x) {
-#if ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_ctzll)
- static_assert(sizeof(unsigned long long) == sizeof(x), // NOLINT(runtime/int)
- "__builtin_ctzll does not take 64-bit arg");
- return __builtin_ctzll(x);
-#elif defined(_MSC_VER) && !defined(__clang__) && \
- (defined(_M_X64) || defined(_M_ARM64))
- unsigned long result = 0; // NOLINT(runtime/int)
- _BitScanForward64(&result, x);
- return result;
-#elif defined(_MSC_VER) && !defined(__clang__)
- unsigned long result = 0; // NOLINT(runtime/int)
- if (static_cast<uint32_t>(x) == 0) {
- _BitScanForward(&result, static_cast<unsigned long>(x >> 32));
- return result + 32;
- }
- _BitScanForward(&result, static_cast<unsigned long>(x));
- return result;
-#else
- int c = 63;
- x &= ~x + 1;
- if (x & 0x00000000FFFFFFFF) c -= 32;
- if (x & 0x0000FFFF0000FFFF) c -= 16;
- if (x & 0x00FF00FF00FF00FF) c -= 8;
- if (x & 0x0F0F0F0F0F0F0F0F) c -= 4;
- if (x & 0x3333333333333333) c -= 2;
- if (x & 0x5555555555555555) c -= 1;
- return c;
-#endif
-}
-
-ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CTZ inline int
-CountTrailingZeroesNonzero16(uint16_t x) {
-#if ABSL_HAVE_BUILTIN(__builtin_ctzs)
- static_assert(sizeof(unsigned short) == sizeof(x), // NOLINT(runtime/int)
- "__builtin_ctzs does not take 16-bit arg");
- return __builtin_ctzs(x);
-#else
- return CountTrailingZeroesNonzero32(x);
-#endif
-}
-
-template <class T>
-ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CTZ inline int
-CountTrailingZeroes(T x) noexcept {
- static_assert(std::is_unsigned<T>::value, "T must be unsigned");
- static_assert(IsPowerOf2(std::numeric_limits<T>::digits),
- "T must have a power-of-2 size");
- static_assert(sizeof(T) <= sizeof(uint64_t), "T too large");
- return x == 0 ? std::numeric_limits<T>::digits
- : (sizeof(T) <= sizeof(uint16_t)
- ? CountTrailingZeroesNonzero16(static_cast<uint16_t>(x))
- : (sizeof(T) <= sizeof(uint32_t)
- ? CountTrailingZeroesNonzero32(
- static_cast<uint32_t>(x))
- : CountTrailingZeroesNonzero64(x)));
-}
-
-// If T is narrower than unsigned, T{1} << bit_width will be promoted. We
-// want to force it to wraparound so that bit_ceil of an invalid value are not
-// core constant expressions.
-template <class T>
-ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CLZ inline
- typename std::enable_if<std::is_unsigned<T>::value, T>::type
- BitCeilPromotionHelper(T x, T promotion) {
- return (T{1} << (x + promotion)) >> promotion;
-}
-
-template <class T>
-ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CLZ inline
- typename std::enable_if<std::is_unsigned<T>::value, T>::type
- BitCeilNonPowerOf2(T x) {
- // If T is narrower than unsigned, it undergoes promotion to unsigned when we
- // shift. We calculate the number of bits added by the wider type.
- return BitCeilPromotionHelper(
- static_cast<T>(std::numeric_limits<T>::digits - CountLeadingZeroes(x)),
- T{sizeof(T) >= sizeof(unsigned) ? 0
- : std::numeric_limits<unsigned>::digits -
- std::numeric_limits<T>::digits});
-}
-
-} // namespace numeric_internal
-ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_NUMERIC_INTERNAL_BITS_H_
+// Copyright 2020 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_NUMERIC_INTERNAL_BITS_H_
+#define ABSL_NUMERIC_INTERNAL_BITS_H_
+
+#include <cstdint>
+#include <limits>
+#include <type_traits>
+
+// Clang on Windows has __builtin_clzll; otherwise we need to use the
+// windows intrinsic functions.
+#if defined(_MSC_VER) && !defined(__clang__)
+#include <intrin.h>
+#endif
+
+#include "absl/base/attributes.h"
+#include "absl/base/config.h"
+
+#if defined(__GNUC__) && !defined(__clang__)
+// GCC
+#define ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(x) 1
+#else
+#define ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(x) ABSL_HAVE_BUILTIN(x)
+#endif
+
+#if ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_popcountl) && \
+ ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_popcountll)
+#define ABSL_INTERNAL_CONSTEXPR_POPCOUNT constexpr
+#define ABSL_INTERNAL_HAS_CONSTEXPR_POPCOUNT 1
+#else
+#define ABSL_INTERNAL_CONSTEXPR_POPCOUNT
+#define ABSL_INTERNAL_HAS_CONSTEXPR_POPCOUNT 0
+#endif
+
+#if ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_clz) && \
+ ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_clzll)
+#define ABSL_INTERNAL_CONSTEXPR_CLZ constexpr
+#define ABSL_INTERNAL_HAS_CONSTEXPR_CLZ 1
+#else
+#define ABSL_INTERNAL_CONSTEXPR_CLZ
+#define ABSL_INTERNAL_HAS_CONSTEXPR_CLZ 0
+#endif
+
+#if ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_ctz) && \
+ ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_ctzll)
+#define ABSL_INTERNAL_CONSTEXPR_CTZ constexpr
+#define ABSL_INTERNAL_HAS_CONSTEXPR_CTZ 1
+#else
+#define ABSL_INTERNAL_CONSTEXPR_CTZ
+#define ABSL_INTERNAL_HAS_CONSTEXPR_CTZ 0
+#endif
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace numeric_internal {
+
+constexpr bool IsPowerOf2(unsigned int x) noexcept {
+ return x != 0 && (x & (x - 1)) == 0;
+}
+
+template <class T>
+ABSL_MUST_USE_RESULT ABSL_ATTRIBUTE_ALWAYS_INLINE constexpr T RotateRight(
+ T x, int s) noexcept {
+ static_assert(std::is_unsigned<T>::value, "T must be unsigned");
+ static_assert(IsPowerOf2(std::numeric_limits<T>::digits),
+ "T must have a power-of-2 size");
+
+ return static_cast<T>(x >> (s & (std::numeric_limits<T>::digits - 1))) |
+ static_cast<T>(x << ((-s) & (std::numeric_limits<T>::digits - 1)));
+}
+
+template <class T>
+ABSL_MUST_USE_RESULT ABSL_ATTRIBUTE_ALWAYS_INLINE constexpr T RotateLeft(
+ T x, int s) noexcept {
+ static_assert(std::is_unsigned<T>::value, "T must be unsigned");
+ static_assert(IsPowerOf2(std::numeric_limits<T>::digits),
+ "T must have a power-of-2 size");
+
+ return static_cast<T>(x << (s & (std::numeric_limits<T>::digits - 1))) |
+ static_cast<T>(x >> ((-s) & (std::numeric_limits<T>::digits - 1)));
+}
+
+ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_POPCOUNT inline int
+Popcount32(uint32_t x) noexcept {
+#if ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_popcount)
+ static_assert(sizeof(unsigned int) == sizeof(x),
+ "__builtin_popcount does not take 32-bit arg");
+ return __builtin_popcount(x);
+#else
+ x -= ((x >> 1) & 0x55555555);
+ x = ((x >> 2) & 0x33333333) + (x & 0x33333333);
+ return static_cast<int>((((x + (x >> 4)) & 0xF0F0F0F) * 0x1010101) >> 24);
+#endif
+}
+
+ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_POPCOUNT inline int
+Popcount64(uint64_t x) noexcept {
+#if ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_popcountll)
+ static_assert(sizeof(unsigned long long) == sizeof(x), // NOLINT(runtime/int)
+ "__builtin_popcount does not take 64-bit arg");
+ return __builtin_popcountll(x);
+#else
+ x -= (x >> 1) & 0x5555555555555555ULL;
+ x = ((x >> 2) & 0x3333333333333333ULL) + (x & 0x3333333333333333ULL);
+ return static_cast<int>(
+ (((x + (x >> 4)) & 0xF0F0F0F0F0F0F0FULL) * 0x101010101010101ULL) >> 56);
+#endif
+}
+
+template <class T>
+ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_POPCOUNT inline int
+Popcount(T x) noexcept {
+ static_assert(std::is_unsigned<T>::value, "T must be unsigned");
+ static_assert(IsPowerOf2(std::numeric_limits<T>::digits),
+ "T must have a power-of-2 size");
+ static_assert(sizeof(x) <= sizeof(uint64_t), "T is too large");
+ return sizeof(x) <= sizeof(uint32_t) ? Popcount32(x) : Popcount64(x);
+}
+
+ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CLZ inline int
+CountLeadingZeroes32(uint32_t x) {
+#if ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_clz)
+ // Use __builtin_clz, which uses the following instructions:
+ // x86: bsr, lzcnt
+ // ARM64: clz
+ // PPC: cntlzd
+
+ static_assert(sizeof(unsigned int) == sizeof(x),
+ "__builtin_clz does not take 32-bit arg");
+ // Handle 0 as a special case because __builtin_clz(0) is undefined.
+ return x == 0 ? 32 : __builtin_clz(x);
+#elif defined(_MSC_VER) && !defined(__clang__)
+ unsigned long result = 0; // NOLINT(runtime/int)
+ if (_BitScanReverse(&result, x)) {
+ return 31 - result;
+ }
+ return 32;
+#else
+ int zeroes = 28;
+ if (x >> 16) {
+ zeroes -= 16;
+ x >>= 16;
+ }
+ if (x >> 8) {
+ zeroes -= 8;
+ x >>= 8;
+ }
+ if (x >> 4) {
+ zeroes -= 4;
+ x >>= 4;
+ }
+ return "\4\3\2\2\1\1\1\1\0\0\0\0\0\0\0"[x] + zeroes;
+#endif
+}
+
+ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CLZ inline int
+CountLeadingZeroes16(uint16_t x) {
+#if ABSL_HAVE_BUILTIN(__builtin_clzs)
+ static_assert(sizeof(unsigned short) == sizeof(x), // NOLINT(runtime/int)
+ "__builtin_clzs does not take 16-bit arg");
+ return x == 0 ? 16 : __builtin_clzs(x);
+#else
+ return CountLeadingZeroes32(x) - 16;
+#endif
+}
+
+ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CLZ inline int
+CountLeadingZeroes64(uint64_t x) {
+#if ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_clzll)
+ // Use __builtin_clzll, which uses the following instructions:
+ // x86: bsr, lzcnt
+ // ARM64: clz
+ // PPC: cntlzd
+ static_assert(sizeof(unsigned long long) == sizeof(x), // NOLINT(runtime/int)
+ "__builtin_clzll does not take 64-bit arg");
+
+ // Handle 0 as a special case because __builtin_clzll(0) is undefined.
+ return x == 0 ? 64 : __builtin_clzll(x);
+#elif defined(_MSC_VER) && !defined(__clang__) && \
+ (defined(_M_X64) || defined(_M_ARM64))
+ // MSVC does not have __buitin_clzll. Use _BitScanReverse64.
+ unsigned long result = 0; // NOLINT(runtime/int)
+ if (_BitScanReverse64(&result, x)) {
+ return 63 - result;
+ }
+ return 64;
+#elif defined(_MSC_VER) && !defined(__clang__)
+ // MSVC does not have __buitin_clzll. Compose two calls to _BitScanReverse
+ unsigned long result = 0; // NOLINT(runtime/int)
+ if ((x >> 32) &&
+ _BitScanReverse(&result, static_cast<unsigned long>(x >> 32))) {
+ return 31 - result;
+ }
+ if (_BitScanReverse(&result, static_cast<unsigned long>(x))) {
+ return 63 - result;
+ }
+ return 64;
+#else
+ int zeroes = 60;
+ if (x >> 32) {
+ zeroes -= 32;
+ x >>= 32;
+ }
+ if (x >> 16) {
+ zeroes -= 16;
+ x >>= 16;
+ }
+ if (x >> 8) {
+ zeroes -= 8;
+ x >>= 8;
+ }
+ if (x >> 4) {
+ zeroes -= 4;
+ x >>= 4;
+ }
+ return "\4\3\2\2\1\1\1\1\0\0\0\0\0\0\0"[x] + zeroes;
+#endif
+}
+
+template <typename T>
+ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CLZ inline int
+CountLeadingZeroes(T x) {
+ static_assert(std::is_unsigned<T>::value, "T must be unsigned");
+ static_assert(IsPowerOf2(std::numeric_limits<T>::digits),
+ "T must have a power-of-2 size");
+ static_assert(sizeof(T) <= sizeof(uint64_t), "T too large");
+ return sizeof(T) <= sizeof(uint16_t)
+ ? CountLeadingZeroes16(static_cast<uint16_t>(x)) -
+ (std::numeric_limits<uint16_t>::digits -
+ std::numeric_limits<T>::digits)
+ : (sizeof(T) <= sizeof(uint32_t)
+ ? CountLeadingZeroes32(static_cast<uint32_t>(x)) -
+ (std::numeric_limits<uint32_t>::digits -
+ std::numeric_limits<T>::digits)
+ : CountLeadingZeroes64(x));
+}
+
+ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CTZ inline int
+CountTrailingZeroesNonzero32(uint32_t x) {
+#if ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_ctz)
+ static_assert(sizeof(unsigned int) == sizeof(x),
+ "__builtin_ctz does not take 32-bit arg");
+ return __builtin_ctz(x);
+#elif defined(_MSC_VER) && !defined(__clang__)
+ unsigned long result = 0; // NOLINT(runtime/int)
+ _BitScanForward(&result, x);
+ return result;
+#else
+ int c = 31;
+ x &= ~x + 1;
+ if (x & 0x0000FFFF) c -= 16;
+ if (x & 0x00FF00FF) c -= 8;
+ if (x & 0x0F0F0F0F) c -= 4;
+ if (x & 0x33333333) c -= 2;
+ if (x & 0x55555555) c -= 1;
+ return c;
+#endif
+}
+
+ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CTZ inline int
+CountTrailingZeroesNonzero64(uint64_t x) {
+#if ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_ctzll)
+ static_assert(sizeof(unsigned long long) == sizeof(x), // NOLINT(runtime/int)
+ "__builtin_ctzll does not take 64-bit arg");
+ return __builtin_ctzll(x);
+#elif defined(_MSC_VER) && !defined(__clang__) && \
+ (defined(_M_X64) || defined(_M_ARM64))
+ unsigned long result = 0; // NOLINT(runtime/int)
+ _BitScanForward64(&result, x);
+ return result;
+#elif defined(_MSC_VER) && !defined(__clang__)
+ unsigned long result = 0; // NOLINT(runtime/int)
+ if (static_cast<uint32_t>(x) == 0) {
+ _BitScanForward(&result, static_cast<unsigned long>(x >> 32));
+ return result + 32;
+ }
+ _BitScanForward(&result, static_cast<unsigned long>(x));
+ return result;
+#else
+ int c = 63;
+ x &= ~x + 1;
+ if (x & 0x00000000FFFFFFFF) c -= 32;
+ if (x & 0x0000FFFF0000FFFF) c -= 16;
+ if (x & 0x00FF00FF00FF00FF) c -= 8;
+ if (x & 0x0F0F0F0F0F0F0F0F) c -= 4;
+ if (x & 0x3333333333333333) c -= 2;
+ if (x & 0x5555555555555555) c -= 1;
+ return c;
+#endif
+}
+
+ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CTZ inline int
+CountTrailingZeroesNonzero16(uint16_t x) {
+#if ABSL_HAVE_BUILTIN(__builtin_ctzs)
+ static_assert(sizeof(unsigned short) == sizeof(x), // NOLINT(runtime/int)
+ "__builtin_ctzs does not take 16-bit arg");
+ return __builtin_ctzs(x);
+#else
+ return CountTrailingZeroesNonzero32(x);
+#endif
+}
+
+template <class T>
+ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CTZ inline int
+CountTrailingZeroes(T x) noexcept {
+ static_assert(std::is_unsigned<T>::value, "T must be unsigned");
+ static_assert(IsPowerOf2(std::numeric_limits<T>::digits),
+ "T must have a power-of-2 size");
+ static_assert(sizeof(T) <= sizeof(uint64_t), "T too large");
+ return x == 0 ? std::numeric_limits<T>::digits
+ : (sizeof(T) <= sizeof(uint16_t)
+ ? CountTrailingZeroesNonzero16(static_cast<uint16_t>(x))
+ : (sizeof(T) <= sizeof(uint32_t)
+ ? CountTrailingZeroesNonzero32(
+ static_cast<uint32_t>(x))
+ : CountTrailingZeroesNonzero64(x)));
+}
+
+// If T is narrower than unsigned, T{1} << bit_width will be promoted. We
+// want to force it to wraparound so that bit_ceil of an invalid value are not
+// core constant expressions.
+template <class T>
+ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CLZ inline
+ typename std::enable_if<std::is_unsigned<T>::value, T>::type
+ BitCeilPromotionHelper(T x, T promotion) {
+ return (T{1} << (x + promotion)) >> promotion;
+}
+
+template <class T>
+ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CLZ inline
+ typename std::enable_if<std::is_unsigned<T>::value, T>::type
+ BitCeilNonPowerOf2(T x) {
+ // If T is narrower than unsigned, it undergoes promotion to unsigned when we
+ // shift. We calculate the number of bits added by the wider type.
+ return BitCeilPromotionHelper(
+ static_cast<T>(std::numeric_limits<T>::digits - CountLeadingZeroes(x)),
+ T{sizeof(T) >= sizeof(unsigned) ? 0
+ : std::numeric_limits<unsigned>::digits -
+ std::numeric_limits<T>::digits});
+}
+
+} // namespace numeric_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_NUMERIC_INTERNAL_BITS_H_
diff --git a/contrib/restricted/abseil-cpp/absl/numeric/internal/representation.h b/contrib/restricted/abseil-cpp/absl/numeric/internal/representation.h
index 82d332fdde..4651292487 100644
--- a/contrib/restricted/abseil-cpp/absl/numeric/internal/representation.h
+++ b/contrib/restricted/abseil-cpp/absl/numeric/internal/representation.h
@@ -1,55 +1,55 @@
-// Copyright 2021 The Abseil Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef ABSL_NUMERIC_INTERNAL_REPRESENTATION_H_
-#define ABSL_NUMERIC_INTERNAL_REPRESENTATION_H_
-
-#include <limits>
-
-#include "absl/base/config.h"
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-namespace numeric_internal {
-
-// Returns true iff long double is represented as a pair of doubles added
-// together.
-inline constexpr bool IsDoubleDouble() {
- // A double-double value always has exactly twice the precision of a double
- // value--one double carries the high digits and one double carries the low
- // digits. This property is not shared with any other common floating-point
- // representation, so this test won't trigger false positives. For reference,
- // this table gives the number of bits of precision of each common
- // floating-point representation:
- //
- // type precision
- // IEEE single 24 b
- // IEEE double 53
- // x86 long double 64
- // double-double 106
- // IEEE quadruple 113
- //
- // Note in particular that a quadruple-precision float has greater precision
- // than a double-double float despite taking up the same amount of memory; the
- // quad has more of its bits allocated to the mantissa than the double-double
- // has.
- return std::numeric_limits<long double>::digits ==
- 2 * std::numeric_limits<double>::digits;
-}
-
-} // namespace numeric_internal
-ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_NUMERIC_INTERNAL_REPRESENTATION_H_
+// Copyright 2021 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_NUMERIC_INTERNAL_REPRESENTATION_H_
+#define ABSL_NUMERIC_INTERNAL_REPRESENTATION_H_
+
+#include <limits>
+
+#include "absl/base/config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace numeric_internal {
+
+// Returns true iff long double is represented as a pair of doubles added
+// together.
+inline constexpr bool IsDoubleDouble() {
+ // A double-double value always has exactly twice the precision of a double
+ // value--one double carries the high digits and one double carries the low
+ // digits. This property is not shared with any other common floating-point
+ // representation, so this test won't trigger false positives. For reference,
+ // this table gives the number of bits of precision of each common
+ // floating-point representation:
+ //
+ // type precision
+ // IEEE single 24 b
+ // IEEE double 53
+ // x86 long double 64
+ // double-double 106
+ // IEEE quadruple 113
+ //
+ // Note in particular that a quadruple-precision float has greater precision
+ // than a double-double float despite taking up the same amount of memory; the
+ // quad has more of its bits allocated to the mantissa than the double-double
+ // has.
+ return std::numeric_limits<long double>::digits ==
+ 2 * std::numeric_limits<double>::digits;
+}
+
+} // namespace numeric_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_NUMERIC_INTERNAL_REPRESENTATION_H_
diff --git a/contrib/restricted/abseil-cpp/absl/random/internal/explicit_seed_seq.h b/contrib/restricted/abseil-cpp/absl/random/internal/explicit_seed_seq.h
index 25f791535f..f405c562d4 100644
--- a/contrib/restricted/abseil-cpp/absl/random/internal/explicit_seed_seq.h
+++ b/contrib/restricted/abseil-cpp/absl/random/internal/explicit_seed_seq.h
@@ -23,7 +23,7 @@
#include <vector>
#include "absl/base/config.h"
-#include "absl/base/internal/endian.h"
+#include "absl/base/internal/endian.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
diff --git a/contrib/restricted/abseil-cpp/absl/random/internal/fastmath.h b/contrib/restricted/abseil-cpp/absl/random/internal/fastmath.h
index 963b7690f1..615ed41ce5 100644
--- a/contrib/restricted/abseil-cpp/absl/random/internal/fastmath.h
+++ b/contrib/restricted/abseil-cpp/absl/random/internal/fastmath.h
@@ -22,7 +22,7 @@
#include <cmath>
#include <cstdint>
-#include "absl/numeric/bits.h"
+#include "absl/numeric/bits.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
@@ -34,10 +34,10 @@ namespace random_internal {
// for instance--std::log2 rounds up rather than down, which introduces
// definite skew in the results.
inline int IntLog2Floor(uint64_t n) {
- return (n <= 1) ? 0 : (63 - countl_zero(n));
+ return (n <= 1) ? 0 : (63 - countl_zero(n));
}
inline int IntLog2Ceil(uint64_t n) {
- return (n <= 1) ? 0 : (64 - countl_zero(n - 1));
+ return (n <= 1) ? 0 : (64 - countl_zero(n - 1));
}
inline double StirlingLogFactorial(double n) {
diff --git a/contrib/restricted/abseil-cpp/absl/random/internal/generate_real.h b/contrib/restricted/abseil-cpp/absl/random/internal/generate_real.h
index d5fbb44c24..6c08de7834 100644
--- a/contrib/restricted/abseil-cpp/absl/random/internal/generate_real.h
+++ b/contrib/restricted/abseil-cpp/absl/random/internal/generate_real.h
@@ -24,7 +24,7 @@
#include <type_traits>
#include "absl/meta/type_traits.h"
-#include "absl/numeric/bits.h"
+#include "absl/numeric/bits.h"
#include "absl/random/internal/fastmath.h"
#include "absl/random/internal/traits.h"
@@ -120,7 +120,7 @@ inline RealType GenerateRealFromBits(uint64_t bits, int exp_bias = 0) {
// Number of leading zeros is mapped to the exponent: 2^-clz
// bits is 0..01xxxxxx. After shifting, we're left with 1xxx...0..0
- int clz = countl_zero(bits);
+ int clz = countl_zero(bits);
bits <<= (IncludeZero ? clz : (clz & 63)); // remove 0-bits.
exp -= clz; // set the exponent.
bits >>= (63 - kExp);
diff --git a/contrib/restricted/abseil-cpp/absl/random/internal/mock_helpers.h b/contrib/restricted/abseil-cpp/absl/random/internal/mock_helpers.h
index 9d6ab21ef5..0b1180d708 100644
--- a/contrib/restricted/abseil-cpp/absl/random/internal/mock_helpers.h
+++ b/contrib/restricted/abseil-cpp/absl/random/internal/mock_helpers.h
@@ -80,13 +80,13 @@ class MockHelpers {
}
public:
- // InvokeMock is private; this provides access for some specialized use cases.
- template <typename URBG>
- static inline bool PrivateInvokeMock(URBG* urbg, IdType type,
- void* args_tuple, void* result) {
- return urbg->InvokeMock(type, args_tuple, result);
- }
-
+ // InvokeMock is private; this provides access for some specialized use cases.
+ template <typename URBG>
+ static inline bool PrivateInvokeMock(URBG* urbg, IdType type,
+ void* args_tuple, void* result) {
+ return urbg->InvokeMock(type, args_tuple, result);
+ }
+
// Invoke a mock for the KeyT (may or may not be a signature).
//
// KeyT is used to generate a typeid-based lookup key for the mock.
@@ -116,14 +116,14 @@ class MockHelpers {
// The mocked function signature will be composed from KeyT as:
// result_type(args...)
template <typename KeyT, typename MockURBG>
- static auto MockFor(MockURBG& m)
- -> decltype(m.template RegisterMock<
- typename KeySignature<KeyT>::result_type,
- typename KeySignature<KeyT>::arg_tuple_type>(
- m, std::declval<IdType>())) {
+ static auto MockFor(MockURBG& m)
+ -> decltype(m.template RegisterMock<
+ typename KeySignature<KeyT>::result_type,
+ typename KeySignature<KeyT>::arg_tuple_type>(
+ m, std::declval<IdType>())) {
return m.template RegisterMock<typename KeySignature<KeyT>::result_type,
typename KeySignature<KeyT>::arg_tuple_type>(
- m, ::absl::base_internal::FastTypeId<KeyT>());
+ m, ::absl::base_internal::FastTypeId<KeyT>());
}
};
diff --git a/contrib/restricted/abseil-cpp/absl/random/internal/mock_overload_set.h b/contrib/restricted/abseil-cpp/absl/random/internal/mock_overload_set.h
index 0d9c6c120c..6f3d33d917 100644
--- a/contrib/restricted/abseil-cpp/absl/random/internal/mock_overload_set.h
+++ b/contrib/restricted/abseil-cpp/absl/random/internal/mock_overload_set.h
@@ -44,12 +44,12 @@ struct MockSingleOverload<DistrT, Ret(MockingBitGen&, Args...)> {
"Overload signature must have return type matching the "
"distribution result_type.");
using KeyT = Ret(DistrT, std::tuple<Args...>);
-
- template <typename MockURBG>
- auto gmock_Call(MockURBG& gen, const ::testing::Matcher<Args>&... matchers)
+
+ template <typename MockURBG>
+ auto gmock_Call(MockURBG& gen, const ::testing::Matcher<Args>&... matchers)
-> decltype(MockHelpers::MockFor<KeyT>(gen).gmock_Call(matchers...)) {
- static_assert(std::is_base_of<MockingBitGen, MockURBG>::value,
- "Mocking requires an absl::MockingBitGen");
+ static_assert(std::is_base_of<MockingBitGen, MockURBG>::value,
+ "Mocking requires an absl::MockingBitGen");
return MockHelpers::MockFor<KeyT>(gen).gmock_Call(matchers...);
}
};
@@ -60,14 +60,14 @@ struct MockSingleOverload<DistrT, Ret(Arg, MockingBitGen&, Args...)> {
"Overload signature must have return type matching the "
"distribution result_type.");
using KeyT = Ret(DistrT, std::tuple<Arg, Args...>);
-
- template <typename MockURBG>
- auto gmock_Call(const ::testing::Matcher<Arg>& matcher, MockURBG& gen,
- const ::testing::Matcher<Args>&... matchers)
+
+ template <typename MockURBG>
+ auto gmock_Call(const ::testing::Matcher<Arg>& matcher, MockURBG& gen,
+ const ::testing::Matcher<Args>&... matchers)
-> decltype(MockHelpers::MockFor<KeyT>(gen).gmock_Call(matcher,
matchers...)) {
- static_assert(std::is_base_of<MockingBitGen, MockURBG>::value,
- "Mocking requires an absl::MockingBitGen");
+ static_assert(std::is_base_of<MockingBitGen, MockURBG>::value,
+ "Mocking requires an absl::MockingBitGen");
return MockHelpers::MockFor<KeyT>(gen).gmock_Call(matcher, matchers...);
}
};
diff --git a/contrib/restricted/abseil-cpp/absl/random/internal/pcg_engine.h b/contrib/restricted/abseil-cpp/absl/random/internal/pcg_engine.h
index 8efaf2e09a..0b07417aeb 100644
--- a/contrib/restricted/abseil-cpp/absl/random/internal/pcg_engine.h
+++ b/contrib/restricted/abseil-cpp/absl/random/internal/pcg_engine.h
@@ -19,7 +19,7 @@
#include "absl/base/config.h"
#include "absl/meta/type_traits.h"
-#include "absl/numeric/bits.h"
+#include "absl/numeric/bits.h"
#include "absl/numeric/int128.h"
#include "absl/random/internal/fastmath.h"
#include "absl/random/internal/iostream_state_saver.h"
@@ -262,7 +262,7 @@ struct pcg_xsl_rr_128_64 {
uint64_t rotate = h >> 58u;
uint64_t s = Uint128Low64(state) ^ h;
#endif
- return rotr(s, rotate);
+ return rotr(s, rotate);
}
};
@@ -282,8 +282,8 @@ struct pcg_xsh_rr_64_32 {
using state_type = uint64_t;
using result_type = uint32_t;
inline uint32_t operator()(uint64_t state) {
- return rotr(static_cast<uint32_t>(((state >> 18) ^ state) >> 27),
- state >> 59);
+ return rotr(static_cast<uint32_t>(((state >> 18) ^ state) >> 27),
+ state >> 59);
}
};
diff --git a/contrib/restricted/abseil-cpp/absl/random/internal/randen.cc b/contrib/restricted/abseil-cpp/absl/random/internal/randen.cc
index c1bc044435..ad8ac3bb6d 100644
--- a/contrib/restricted/abseil-cpp/absl/random/internal/randen.cc
+++ b/contrib/restricted/abseil-cpp/absl/random/internal/randen.cc
@@ -17,7 +17,7 @@
#include "absl/base/internal/raw_logging.h"
#include "absl/random/internal/randen_detect.h"
-// RANDen = RANDom generator or beetroots in Swiss High German.
+// RANDen = RANDom generator or beetroots in Swiss High German.
// 'Strong' (well-distributed, unpredictable, backtracking-resistant) random
// generator, faster in some benchmarks than std::mt19937_64 and pcg64_c32.
//
diff --git a/contrib/restricted/abseil-cpp/absl/random/internal/randen.h b/contrib/restricted/abseil-cpp/absl/random/internal/randen.h
index 9a3840b8f1..acb34f3908 100644
--- a/contrib/restricted/abseil-cpp/absl/random/internal/randen.h
+++ b/contrib/restricted/abseil-cpp/absl/random/internal/randen.h
@@ -26,7 +26,7 @@ namespace absl {
ABSL_NAMESPACE_BEGIN
namespace random_internal {
-// RANDen = RANDom generator or beetroots in Swiss High German.
+// RANDen = RANDom generator or beetroots in Swiss High German.
// 'Strong' (well-distributed, unpredictable, backtracking-resistant) random
// generator, faster in some benchmarks than std::mt19937_64 and pcg64_c32.
//
diff --git a/contrib/restricted/abseil-cpp/absl/random/internal/randen_detect.cc b/contrib/restricted/abseil-cpp/absl/random/internal/randen_detect.cc
index bbe7b96532..c8de07ac7f 100644
--- a/contrib/restricted/abseil-cpp/absl/random/internal/randen_detect.cc
+++ b/contrib/restricted/abseil-cpp/absl/random/internal/randen_detect.cc
@@ -1,13 +1,13 @@
// Copyright 2017 The Abseil Authors.
//
-// Licensed under the Apache License, Version 2.0 (the "License");
+// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
+// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
diff --git a/contrib/restricted/abseil-cpp/absl/random/internal/randen_engine.h b/contrib/restricted/abseil-cpp/absl/random/internal/randen_engine.h
index 372c3ac2bd..870ee2caf4 100644
--- a/contrib/restricted/abseil-cpp/absl/random/internal/randen_engine.h
+++ b/contrib/restricted/abseil-cpp/absl/random/internal/randen_engine.h
@@ -23,7 +23,7 @@
#include <limits>
#include <type_traits>
-#include "absl/base/internal/endian.h"
+#include "absl/base/internal/endian.h"
#include "absl/meta/type_traits.h"
#include "absl/random/internal/iostream_state_saver.h"
#include "absl/random/internal/randen.h"
@@ -77,7 +77,7 @@ class alignas(16) randen_engine {
impl_.Generate(state_);
}
- return little_endian::ToHost(state_[next_++]);
+ return little_endian::ToHost(state_[next_++]);
}
template <class SeedSequence>
@@ -189,8 +189,8 @@ class alignas(16) randen_engine {
// In the case that `elem` is `uint8_t`, it must be cast to something
// larger so that it prints as an integer rather than a character. For
// simplicity, apply the cast all circumstances.
- os << static_cast<numeric_type>(little_endian::FromHost(elem))
- << os.fill();
+ os << static_cast<numeric_type>(little_endian::FromHost(elem))
+ << os.fill();
}
os << engine.next_;
return os;
@@ -209,7 +209,7 @@ class alignas(16) randen_engine {
// necessary to read a wider type and then cast it to uint8_t.
numeric_type value;
is >> value;
- elem = little_endian::ToHost(static_cast<result_type>(value));
+ elem = little_endian::ToHost(static_cast<result_type>(value));
}
is >> next;
if (is.fail()) {
diff --git a/contrib/restricted/abseil-cpp/absl/random/internal/randen_hwaes.h b/contrib/restricted/abseil-cpp/absl/random/internal/randen_hwaes.h
index 71a7f69f25..4c241dc743 100644
--- a/contrib/restricted/abseil-cpp/absl/random/internal/randen_hwaes.h
+++ b/contrib/restricted/abseil-cpp/absl/random/internal/randen_hwaes.h
@@ -26,7 +26,7 @@ namespace absl {
ABSL_NAMESPACE_BEGIN
namespace random_internal {
-// RANDen = RANDom generator or beetroots in Swiss High German.
+// RANDen = RANDom generator or beetroots in Swiss High German.
// 'Strong' (well-distributed, unpredictable, backtracking-resistant) random
// generator, faster in some benchmarks than std::mt19937_64 and pcg64_c32.
//
diff --git a/contrib/restricted/abseil-cpp/absl/random/internal/randen_slow.h b/contrib/restricted/abseil-cpp/absl/random/internal/randen_slow.h
index 532c3a8991..abc23ce6b0 100644
--- a/contrib/restricted/abseil-cpp/absl/random/internal/randen_slow.h
+++ b/contrib/restricted/abseil-cpp/absl/random/internal/randen_slow.h
@@ -23,7 +23,7 @@ namespace absl {
ABSL_NAMESPACE_BEGIN
namespace random_internal {
-// RANDen = RANDom generator or beetroots in Swiss High German.
+// RANDen = RANDom generator or beetroots in Swiss High German.
// RandenSlow implements the basic state manipulation methods for
// architectures lacking AES hardware acceleration intrinsics.
class RandenSlow {
diff --git a/contrib/restricted/abseil-cpp/absl/random/internal/randen_traits.h b/contrib/restricted/abseil-cpp/absl/random/internal/randen_traits.h
index 120022c9fb..18e6d38d14 100644
--- a/contrib/restricted/abseil-cpp/absl/random/internal/randen_traits.h
+++ b/contrib/restricted/abseil-cpp/absl/random/internal/randen_traits.h
@@ -28,7 +28,7 @@ namespace absl {
ABSL_NAMESPACE_BEGIN
namespace random_internal {
-// RANDen = RANDom generator or beetroots in Swiss High German.
+// RANDen = RANDom generator or beetroots in Swiss High German.
// 'Strong' (well-distributed, unpredictable, backtracking-resistant) random
// generator, faster in some benchmarks than std::mt19937_64 and pcg64_c32.
//
diff --git a/contrib/restricted/abseil-cpp/absl/random/internal/wide_multiply.h b/contrib/restricted/abseil-cpp/absl/random/internal/wide_multiply.h
index b6e6c4b6aa..609d411f73 100644
--- a/contrib/restricted/abseil-cpp/absl/random/internal/wide_multiply.h
+++ b/contrib/restricted/abseil-cpp/absl/random/internal/wide_multiply.h
@@ -26,7 +26,7 @@
#endif
#include "absl/base/config.h"
-#include "absl/numeric/bits.h"
+#include "absl/numeric/bits.h"
#include "absl/numeric/int128.h"
#include "absl/random/internal/traits.h"
diff --git a/contrib/restricted/abseil-cpp/absl/random/log_uniform_int_distribution.h b/contrib/restricted/abseil-cpp/absl/random/log_uniform_int_distribution.h
index 43e101169c..6c52d34d26 100644
--- a/contrib/restricted/abseil-cpp/absl/random/log_uniform_int_distribution.h
+++ b/contrib/restricted/abseil-cpp/absl/random/log_uniform_int_distribution.h
@@ -23,7 +23,7 @@
#include <ostream>
#include <type_traits>
-#include "absl/numeric/bits.h"
+#include "absl/numeric/bits.h"
#include "absl/random/internal/fastmath.h"
#include "absl/random/internal/generate_real.h"
#include "absl/random/internal/iostream_state_saver.h"
@@ -69,10 +69,10 @@ class log_uniform_int_distribution {
if (base_ == 2) {
// Determine where the first set bit is on range(), giving a log2(range)
// value which can be used to construct bounds.
- log_range_ =
- (std::min)(bit_width(range()),
- static_cast<unsigned_type>(
- std::numeric_limits<unsigned_type>::digits));
+ log_range_ =
+ (std::min)(bit_width(range()),
+ static_cast<unsigned_type>(
+ std::numeric_limits<unsigned_type>::digits));
} else {
// NOTE: Computing the logN(x) introduces error from 2 sources:
// 1. Conversion of int to double loses precision for values >=
diff --git a/contrib/restricted/abseil-cpp/absl/random/mocking_bit_gen.h b/contrib/restricted/abseil-cpp/absl/random/mocking_bit_gen.h
index 7b2b80eb35..89d7a2ec12 100644
--- a/contrib/restricted/abseil-cpp/absl/random/mocking_bit_gen.h
+++ b/contrib/restricted/abseil-cpp/absl/random/mocking_bit_gen.h
@@ -104,7 +104,7 @@ class BitGenRef;
class MockingBitGen {
public:
MockingBitGen() = default;
- ~MockingBitGen() = default;
+ ~MockingBitGen() = default;
// URBG interface
using result_type = absl::BitGen::result_type;
@@ -125,46 +125,46 @@ class MockingBitGen {
// NOTE: MockFnCaller is essentially equivalent to the lambda:
// [fn](auto... args) { return fn->Call(std::move(args)...)}
// however that fails to build on some supported platforms.
- template <typename MockFnType, typename ResultT, typename Tuple>
+ template <typename MockFnType, typename ResultT, typename Tuple>
struct MockFnCaller;
-
+
// specialization for std::tuple.
- template <typename MockFnType, typename ResultT, typename... Args>
- struct MockFnCaller<MockFnType, ResultT, std::tuple<Args...>> {
+ template <typename MockFnType, typename ResultT, typename... Args>
+ struct MockFnCaller<MockFnType, ResultT, std::tuple<Args...>> {
MockFnType* fn;
inline ResultT operator()(Args... args) {
return fn->Call(std::move(args)...);
}
};
- // FunctionHolder owns a particular ::testing::MockFunction associated with
- // a mocked type signature, and implement the type-erased Apply call, which
- // applies type-erased arguments to the mock.
- class FunctionHolder {
- public:
- virtual ~FunctionHolder() = default;
-
- // Call is a dispatch function which converts the
- // generic type-erased parameters into a specific mock invocation call.
- virtual void Apply(/*ArgTupleT*/ void* args_tuple,
- /*ResultT*/ void* result) = 0;
- };
-
- template <typename MockFnType, typename ResultT, typename ArgTupleT>
- class FunctionHolderImpl final : public FunctionHolder {
- public:
- void Apply(void* args_tuple, void* result) override {
- // Requires tuple_args to point to a ArgTupleT, which is a
- // std::tuple<Args...> used to invoke the mock function. Requires result
- // to point to a ResultT, which is the result of the call.
- *static_cast<ResultT*>(result) =
- absl::apply(MockFnCaller<MockFnType, ResultT, ArgTupleT>{&mock_fn_},
- *static_cast<ArgTupleT*>(args_tuple));
- }
-
- MockFnType mock_fn_;
- };
-
+ // FunctionHolder owns a particular ::testing::MockFunction associated with
+ // a mocked type signature, and implement the type-erased Apply call, which
+ // applies type-erased arguments to the mock.
+ class FunctionHolder {
+ public:
+ virtual ~FunctionHolder() = default;
+
+ // Call is a dispatch function which converts the
+ // generic type-erased parameters into a specific mock invocation call.
+ virtual void Apply(/*ArgTupleT*/ void* args_tuple,
+ /*ResultT*/ void* result) = 0;
+ };
+
+ template <typename MockFnType, typename ResultT, typename ArgTupleT>
+ class FunctionHolderImpl final : public FunctionHolder {
+ public:
+ void Apply(void* args_tuple, void* result) override {
+ // Requires tuple_args to point to a ArgTupleT, which is a
+ // std::tuple<Args...> used to invoke the mock function. Requires result
+ // to point to a ResultT, which is the result of the call.
+ *static_cast<ResultT*>(result) =
+ absl::apply(MockFnCaller<MockFnType, ResultT, ArgTupleT>{&mock_fn_},
+ *static_cast<ArgTupleT*>(args_tuple));
+ }
+
+ MockFnType mock_fn_;
+ };
+
// MockingBitGen::RegisterMock
//
// RegisterMock<ResultT, ArgTupleT>(FastTypeIdType) is the main extension
@@ -175,31 +175,31 @@ class MockingBitGen {
//
// The returned MockFunction<...> type can be used to setup additional
// distribution parameters of the expectation.
- template <typename ResultT, typename ArgTupleT, typename SelfT>
- auto RegisterMock(SelfT&, base_internal::FastTypeIdType type)
+ template <typename ResultT, typename ArgTupleT, typename SelfT>
+ auto RegisterMock(SelfT&, base_internal::FastTypeIdType type)
-> decltype(GetMockFnType(std::declval<ResultT>(),
std::declval<ArgTupleT>()))& {
- using MockFnType = decltype(GetMockFnType(std::declval<ResultT>(),
- std::declval<ArgTupleT>()));
-
- using WrappedFnType = absl::conditional_t<
- std::is_same<SelfT, ::testing::NiceMock<absl::MockingBitGen>>::value,
- ::testing::NiceMock<MockFnType>,
- absl::conditional_t<
- std::is_same<SelfT,
- ::testing::NaggyMock<absl::MockingBitGen>>::value,
- ::testing::NaggyMock<MockFnType>,
- absl::conditional_t<
- std::is_same<SelfT,
- ::testing::StrictMock<absl::MockingBitGen>>::value,
- ::testing::StrictMock<MockFnType>, MockFnType>>>;
-
- using ImplT = FunctionHolderImpl<WrappedFnType, ResultT, ArgTupleT>;
+ using MockFnType = decltype(GetMockFnType(std::declval<ResultT>(),
+ std::declval<ArgTupleT>()));
+
+ using WrappedFnType = absl::conditional_t<
+ std::is_same<SelfT, ::testing::NiceMock<absl::MockingBitGen>>::value,
+ ::testing::NiceMock<MockFnType>,
+ absl::conditional_t<
+ std::is_same<SelfT,
+ ::testing::NaggyMock<absl::MockingBitGen>>::value,
+ ::testing::NaggyMock<MockFnType>,
+ absl::conditional_t<
+ std::is_same<SelfT,
+ ::testing::StrictMock<absl::MockingBitGen>>::value,
+ ::testing::StrictMock<MockFnType>, MockFnType>>>;
+
+ using ImplT = FunctionHolderImpl<WrappedFnType, ResultT, ArgTupleT>;
auto& mock = mocks_[type];
- if (!mock) {
- mock = absl::make_unique<ImplT>();
+ if (!mock) {
+ mock = absl::make_unique<ImplT>();
}
- return static_cast<ImplT*>(mock.get())->mock_fn_;
+ return static_cast<ImplT*>(mock.get())->mock_fn_;
}
// MockingBitGen::InvokeMock
@@ -218,13 +218,13 @@ class MockingBitGen {
// Trigger a mock, if there exists one that matches `param`.
auto it = mocks_.find(type);
if (it == mocks_.end()) return false;
- it->second->Apply(args_tuple, result);
+ it->second->Apply(args_tuple, result);
return true;
}
- absl::flat_hash_map<base_internal::FastTypeIdType,
- std::unique_ptr<FunctionHolder>>
- mocks_;
+ absl::flat_hash_map<base_internal::FastTypeIdType,
+ std::unique_ptr<FunctionHolder>>
+ mocks_;
absl::BitGen gen_;
template <typename>
diff --git a/contrib/restricted/abseil-cpp/absl/random/uniform_int_distribution.h b/contrib/restricted/abseil-cpp/absl/random/uniform_int_distribution.h
index c1f54ccebc..e71b2bcbe6 100644
--- a/contrib/restricted/abseil-cpp/absl/random/uniform_int_distribution.h
+++ b/contrib/restricted/abseil-cpp/absl/random/uniform_int_distribution.h
@@ -196,7 +196,7 @@ typename random_internal::make_unsigned_bits<IntType>::type
uniform_int_distribution<IntType>::Generate(
URBG& g, // NOLINT(runtime/references)
typename random_internal::make_unsigned_bits<IntType>::type R) {
- random_internal::FastUniformBits<unsigned_type> fast_bits;
+ random_internal::FastUniformBits<unsigned_type> fast_bits;
unsigned_type bits = fast_bits(g);
const unsigned_type Lim = R + 1;
if ((R & Lim) == 0) {
diff --git a/contrib/restricted/abseil-cpp/absl/status/internal/status_internal.h b/contrib/restricted/abseil-cpp/absl/status/internal/status_internal.h
index ac12940a6d..9a3a8df838 100644
--- a/contrib/restricted/abseil-cpp/absl/status/internal/status_internal.h
+++ b/contrib/restricted/abseil-cpp/absl/status/internal/status_internal.h
@@ -19,20 +19,20 @@
#include "absl/container/inlined_vector.h"
#include "absl/strings/cord.h"
-#ifndef SWIG
-// Disabled for SWIG as it doesn't parse attributes correctly.
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-// Returned Status objects may not be ignored. Codesearch doesn't handle ifdefs
-// as part of a class definitions (b/6995610), so we use a forward declaration.
-class ABSL_MUST_USE_RESULT Status;
-ABSL_NAMESPACE_END
-} // namespace absl
-#endif // !SWIG
-
+#ifndef SWIG
+// Disabled for SWIG as it doesn't parse attributes correctly.
namespace absl {
ABSL_NAMESPACE_BEGIN
+// Returned Status objects may not be ignored. Codesearch doesn't handle ifdefs
+// as part of a class definitions (b/6995610), so we use a forward declaration.
+class ABSL_MUST_USE_RESULT Status;
+ABSL_NAMESPACE_END
+} // namespace absl
+#endif // !SWIG
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+
enum class StatusCode : int;
namespace status_internal {
@@ -49,11 +49,11 @@ using Payloads = absl::InlinedVector<Payload, 1>;
struct StatusRep {
StatusRep(absl::StatusCode code_arg, absl::string_view message_arg,
std::unique_ptr<status_internal::Payloads> payloads_arg)
- : ref(int32_t{1}),
+ : ref(int32_t{1}),
code(code_arg),
message(message_arg),
payloads(std::move(payloads_arg)) {}
-
+
std::atomic<int32_t> ref;
absl::StatusCode code;
std::string message;
diff --git a/contrib/restricted/abseil-cpp/absl/status/internal/statusor_internal.h b/contrib/restricted/abseil-cpp/absl/status/internal/statusor_internal.h
index eaac2c0b14..668c1b00ed 100644
--- a/contrib/restricted/abseil-cpp/absl/status/internal/statusor_internal.h
+++ b/contrib/restricted/abseil-cpp/absl/status/internal/statusor_internal.h
@@ -17,7 +17,7 @@
#include <type_traits>
#include <utility>
-#include "absl/base/attributes.h"
+#include "absl/base/attributes.h"
#include "absl/meta/type_traits.h"
#include "absl/status/status.h"
#include "absl/utility/utility.h"
@@ -136,14 +136,14 @@ class Helper {
public:
// Move type-agnostic error handling to the .cc.
static void HandleInvalidStatusCtorArg(Status*);
- ABSL_ATTRIBUTE_NORETURN static void Crash(const absl::Status& status);
+ ABSL_ATTRIBUTE_NORETURN static void Crash(const absl::Status& status);
};
// Construct an instance of T in `p` through placement new, passing Args... to
// the constructor.
// This abstraction is here mostly for the gcc performance fix.
template <typename T, typename... Args>
-ABSL_ATTRIBUTE_NONNULL(1) void PlacementNew(void* p, Args&&... args) {
+ABSL_ATTRIBUTE_NONNULL(1) void PlacementNew(void* p, Args&&... args) {
new (p) T(std::forward<Args>(args)...);
}
diff --git a/contrib/restricted/abseil-cpp/absl/status/status.cc b/contrib/restricted/abseil-cpp/absl/status/status.cc
index bcf3413e5f..04aeefb216 100644
--- a/contrib/restricted/abseil-cpp/absl/status/status.cc
+++ b/contrib/restricted/abseil-cpp/absl/status/status.cc
@@ -283,27 +283,27 @@ bool Status::EqualsSlow(const absl::Status& a, const absl::Status& b) {
return true;
}
-std::string Status::ToStringSlow(StatusToStringMode mode) const {
+std::string Status::ToStringSlow(StatusToStringMode mode) const {
std::string text;
absl::StrAppend(&text, absl::StatusCodeToString(code()), ": ", message());
- const bool with_payload = (mode & StatusToStringMode::kWithPayload) ==
- StatusToStringMode::kWithPayload;
-
- if (with_payload) {
- status_internal::StatusPayloadPrinter printer =
- status_internal::GetStatusPayloadPrinter();
- this->ForEachPayload([&](absl::string_view type_url,
- const absl::Cord& payload) {
- absl::optional<std::string> result;
- if (printer) result = printer(type_url, payload);
- absl::StrAppend(
- &text, " [", type_url, "='",
- result.has_value() ? *result : absl::CHexEscape(std::string(payload)),
- "']");
- });
- }
-
+ const bool with_payload = (mode & StatusToStringMode::kWithPayload) ==
+ StatusToStringMode::kWithPayload;
+
+ if (with_payload) {
+ status_internal::StatusPayloadPrinter printer =
+ status_internal::GetStatusPayloadPrinter();
+ this->ForEachPayload([&](absl::string_view type_url,
+ const absl::Cord& payload) {
+ absl::optional<std::string> result;
+ if (printer) result = printer(type_url, payload);
+ absl::StrAppend(
+ &text, " [", type_url, "='",
+ result.has_value() ? *result : absl::CHexEscape(std::string(payload)),
+ "']");
+ });
+ }
+
return text;
}
diff --git a/contrib/restricted/abseil-cpp/absl/status/status.h b/contrib/restricted/abseil-cpp/absl/status/status.h
index 39071e5f4a..d43f526336 100644
--- a/contrib/restricted/abseil-cpp/absl/status/status.h
+++ b/contrib/restricted/abseil-cpp/absl/status/status.h
@@ -58,7 +58,7 @@
#include "absl/functional/function_ref.h"
#include "absl/status/internal/status_internal.h"
#include "absl/strings/cord.h"
-#include "absl/strings/string_view.h"
+#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
namespace absl {
@@ -282,59 +282,59 @@ std::string StatusCodeToString(StatusCode code);
// Streams StatusCodeToString(code) to `os`.
std::ostream& operator<<(std::ostream& os, StatusCode code);
-// absl::StatusToStringMode
-//
-// An `absl::StatusToStringMode` is an enumerated type indicating how
+// absl::StatusToStringMode
+//
+// An `absl::StatusToStringMode` is an enumerated type indicating how
// `absl::Status::ToString()` should construct the output string for a non-ok
-// status.
-enum class StatusToStringMode : int {
- // ToString will not contain any extra data (such as payloads). It will only
- // contain the error code and message, if any.
- kWithNoExtraData = 0,
- // ToString will contain the payloads.
- kWithPayload = 1 << 0,
- // ToString will include all the extra data this Status has.
- kWithEverything = ~kWithNoExtraData,
+// status.
+enum class StatusToStringMode : int {
+ // ToString will not contain any extra data (such as payloads). It will only
+ // contain the error code and message, if any.
+ kWithNoExtraData = 0,
+ // ToString will contain the payloads.
+ kWithPayload = 1 << 0,
+ // ToString will include all the extra data this Status has.
+ kWithEverything = ~kWithNoExtraData,
// Default mode used by ToString. Its exact value might change in the future.
kDefault = kWithPayload,
-};
-
-// absl::StatusToStringMode is specified as a bitmask type, which means the
-// following operations must be provided:
-inline constexpr StatusToStringMode operator&(StatusToStringMode lhs,
- StatusToStringMode rhs) {
- return static_cast<StatusToStringMode>(static_cast<int>(lhs) &
- static_cast<int>(rhs));
-}
-inline constexpr StatusToStringMode operator|(StatusToStringMode lhs,
- StatusToStringMode rhs) {
- return static_cast<StatusToStringMode>(static_cast<int>(lhs) |
- static_cast<int>(rhs));
-}
-inline constexpr StatusToStringMode operator^(StatusToStringMode lhs,
- StatusToStringMode rhs) {
- return static_cast<StatusToStringMode>(static_cast<int>(lhs) ^
- static_cast<int>(rhs));
-}
-inline constexpr StatusToStringMode operator~(StatusToStringMode arg) {
- return static_cast<StatusToStringMode>(~static_cast<int>(arg));
-}
-inline StatusToStringMode& operator&=(StatusToStringMode& lhs,
- StatusToStringMode rhs) {
- lhs = lhs & rhs;
- return lhs;
-}
-inline StatusToStringMode& operator|=(StatusToStringMode& lhs,
- StatusToStringMode rhs) {
- lhs = lhs | rhs;
- return lhs;
-}
-inline StatusToStringMode& operator^=(StatusToStringMode& lhs,
- StatusToStringMode rhs) {
- lhs = lhs ^ rhs;
- return lhs;
-}
-
+};
+
+// absl::StatusToStringMode is specified as a bitmask type, which means the
+// following operations must be provided:
+inline constexpr StatusToStringMode operator&(StatusToStringMode lhs,
+ StatusToStringMode rhs) {
+ return static_cast<StatusToStringMode>(static_cast<int>(lhs) &
+ static_cast<int>(rhs));
+}
+inline constexpr StatusToStringMode operator|(StatusToStringMode lhs,
+ StatusToStringMode rhs) {
+ return static_cast<StatusToStringMode>(static_cast<int>(lhs) |
+ static_cast<int>(rhs));
+}
+inline constexpr StatusToStringMode operator^(StatusToStringMode lhs,
+ StatusToStringMode rhs) {
+ return static_cast<StatusToStringMode>(static_cast<int>(lhs) ^
+ static_cast<int>(rhs));
+}
+inline constexpr StatusToStringMode operator~(StatusToStringMode arg) {
+ return static_cast<StatusToStringMode>(~static_cast<int>(arg));
+}
+inline StatusToStringMode& operator&=(StatusToStringMode& lhs,
+ StatusToStringMode rhs) {
+ lhs = lhs & rhs;
+ return lhs;
+}
+inline StatusToStringMode& operator|=(StatusToStringMode& lhs,
+ StatusToStringMode rhs) {
+ lhs = lhs | rhs;
+ return lhs;
+}
+inline StatusToStringMode& operator^=(StatusToStringMode& lhs,
+ StatusToStringMode rhs) {
+ lhs = lhs ^ rhs;
+ return lhs;
+}
+
// absl::Status
//
// The `absl::Status` class is generally used to gracefully handle errors
@@ -416,12 +416,12 @@ inline StatusToStringMode& operator^=(StatusToStringMode& lhs,
// return result;
// }
//
-// For documentation see https://abseil.io/docs/cpp/guides/status.
-//
-// Returned Status objects may not be ignored. status_internal.h has a forward
-// declaration of the form
-// class ABSL_MUST_USE_RESULT Status;
-class Status final {
+// For documentation see https://abseil.io/docs/cpp/guides/status.
+//
+// Returned Status objects may not be ignored. status_internal.h has a forward
+// declaration of the form
+// class ABSL_MUST_USE_RESULT Status;
+class Status final {
public:
// Constructors
@@ -431,10 +431,10 @@ class Status final {
Status();
// Creates a status in the canonical error space with the specified
- // `absl::StatusCode` and error message. If `code == absl::StatusCode::kOk`, // NOLINT
+ // `absl::StatusCode` and error message. If `code == absl::StatusCode::kOk`, // NOLINT
// `msg` is ignored and an object identical to an OK status is constructed.
//
- // The `msg` string must be in UTF-8. The implementation may complain (e.g., // NOLINT
+ // The `msg` string must be in UTF-8. The implementation may complain (e.g., // NOLINT
// by printing a warning) if it is not.
Status(absl::StatusCode code, absl::string_view msg);
@@ -503,16 +503,16 @@ class Status final {
// Status::ToString()
//
- // Returns a string based on the `mode`. By default, it returns combination of
- // the error code name, the message and any associated payload messages. This
- // string is designed simply to be human readable and its exact format should
- // not be load bearing. Do not depend on the exact format of the result of
- // `ToString()` which is subject to change.
+ // Returns a string based on the `mode`. By default, it returns combination of
+ // the error code name, the message and any associated payload messages. This
+ // string is designed simply to be human readable and its exact format should
+ // not be load bearing. Do not depend on the exact format of the result of
+ // `ToString()` which is subject to change.
//
// The printed code name and the message are generally substrings of the
// result, and the payloads to be printed use the status payload printer
// mechanism (which is internal).
- std::string ToString(
+ std::string ToString(
StatusToStringMode mode = StatusToStringMode::kDefault) const;
// Status::IgnoreError()
@@ -613,9 +613,9 @@ class Status final {
status_internal::Payloads* GetPayloads();
// Takes ownership of payload.
- static uintptr_t NewRep(
- absl::StatusCode code, absl::string_view msg,
- std::unique_ptr<status_internal::Payloads> payload);
+ static uintptr_t NewRep(
+ absl::StatusCode code, absl::string_view msg,
+ std::unique_ptr<status_internal::Payloads> payload);
static bool EqualsSlow(const absl::Status& a, const absl::Status& b);
// MSVC 14.0 limitation requires the const.
@@ -644,7 +644,7 @@ class Status final {
static uintptr_t PointerToRep(status_internal::StatusRep* r);
static status_internal::StatusRep* RepToPointer(uintptr_t r);
- std::string ToStringSlow(StatusToStringMode mode) const;
+ std::string ToStringSlow(StatusToStringMode mode) const;
// Status supports two different representations.
// - When the low bit is off it is an inlined representation.
@@ -767,11 +767,11 @@ inline Status::Status(Status&& x) noexcept : rep_(x.rep_) {
inline Status& Status::operator=(Status&& x) {
uintptr_t old_rep = rep_;
- if (x.rep_ != old_rep) {
- rep_ = x.rep_;
- x.rep_ = MovedFromRep();
- Unref(old_rep);
- }
+ if (x.rep_ != old_rep) {
+ rep_ = x.rep_;
+ x.rep_ = MovedFromRep();
+ Unref(old_rep);
+ }
return *this;
}
@@ -808,8 +808,8 @@ inline bool operator!=(const Status& lhs, const Status& rhs) {
return !(lhs == rhs);
}
-inline std::string Status::ToString(StatusToStringMode mode) const {
- return ok() ? "OK" : ToStringSlow(mode);
+inline std::string Status::ToString(StatusToStringMode mode) const {
+ return ok() ? "OK" : ToStringSlow(mode);
}
inline void Status::IgnoreError() const {
diff --git a/contrib/restricted/abseil-cpp/absl/status/statusor.h b/contrib/restricted/abseil-cpp/absl/status/statusor.h
index c051fbb3aa..f6eaf41703 100644
--- a/contrib/restricted/abseil-cpp/absl/status/statusor.h
+++ b/contrib/restricted/abseil-cpp/absl/status/statusor.h
@@ -145,7 +145,7 @@ class ABSL_MUST_USE_RESULT StatusOr;
//
// NOTE: using `absl::StatusOr<T>::value()` when no valid value is present will
// throw an exception if exceptions are enabled or terminate the process when
-// exceptions are not enabled.
+// exceptions are not enabled.
//
// Example:
//
@@ -550,7 +550,7 @@ class StatusOr : private internal_statusor::StatusOrData<T>,
// StatusOr<T>::value_or()
//
- // Returns the current value if `this->ok() == true`. Otherwise constructs a
+ // Returns the current value if `this->ok() == true`. Otherwise constructs a
// value using the provided `default_value`.
//
// Unlike `value`, this function returns by value, copying the current value
diff --git a/contrib/restricted/abseil-cpp/absl/status/statusor/ya.make b/contrib/restricted/abseil-cpp/absl/status/statusor/ya.make
index 9628fcdffb..5c4e185687 100644
--- a/contrib/restricted/abseil-cpp/absl/status/statusor/ya.make
+++ b/contrib/restricted/abseil-cpp/absl/status/statusor/ya.make
@@ -1,28 +1,28 @@
-# Generated by devtools/yamaker.
-
-LIBRARY()
-
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
WITHOUT_LICENSE_TEXTS()
-OWNER(g:cpp-contrib)
-
-LICENSE(Apache-2.0)
-
-PEERDIR(
- contrib/restricted/abseil-cpp/absl/base
+OWNER(g:cpp-contrib)
+
+LICENSE(Apache-2.0)
+
+PEERDIR(
+ contrib/restricted/abseil-cpp/absl/base
contrib/restricted/abseil-cpp/absl/base/internal/low_level_alloc
- contrib/restricted/abseil-cpp/absl/base/internal/raw_logging
- contrib/restricted/abseil-cpp/absl/base/internal/spinlock_wait
- contrib/restricted/abseil-cpp/absl/base/internal/throw_delegate
- contrib/restricted/abseil-cpp/absl/base/log_severity
+ contrib/restricted/abseil-cpp/absl/base/internal/raw_logging
+ contrib/restricted/abseil-cpp/absl/base/internal/spinlock_wait
+ contrib/restricted/abseil-cpp/absl/base/internal/throw_delegate
+ contrib/restricted/abseil-cpp/absl/base/log_severity
contrib/restricted/abseil-cpp/absl/debugging
contrib/restricted/abseil-cpp/absl/debugging/stacktrace
contrib/restricted/abseil-cpp/absl/debugging/symbolize
contrib/restricted/abseil-cpp/absl/demangle
- contrib/restricted/abseil-cpp/absl/numeric
+ contrib/restricted/abseil-cpp/absl/numeric
contrib/restricted/abseil-cpp/absl/profiling/internal/exponential_biased
contrib/restricted/abseil-cpp/absl/status
- contrib/restricted/abseil-cpp/absl/strings
+ contrib/restricted/abseil-cpp/absl/strings
contrib/restricted/abseil-cpp/absl/strings/cord
contrib/restricted/abseil-cpp/absl/strings/internal/absl_cord_internal
contrib/restricted/abseil-cpp/absl/strings/internal/absl_strings_internal
@@ -35,26 +35,26 @@ PEERDIR(
contrib/restricted/abseil-cpp/absl/time
contrib/restricted/abseil-cpp/absl/time/civil_time
contrib/restricted/abseil-cpp/absl/time/time_zone
- contrib/restricted/abseil-cpp/absl/types/bad_optional_access
+ contrib/restricted/abseil-cpp/absl/types/bad_optional_access
contrib/restricted/abseil-cpp/absl/types/bad_variant_access
-)
-
-ADDINCL(
- GLOBAL contrib/restricted/abseil-cpp
-)
-
-NO_COMPILER_WARNINGS()
-
-NO_UTIL()
-
-CFLAGS(
- -DNOMINMAX
-)
-
+)
+
+ADDINCL(
+ GLOBAL contrib/restricted/abseil-cpp
+)
+
+NO_COMPILER_WARNINGS()
+
+NO_UTIL()
+
+CFLAGS(
+ -DNOMINMAX
+)
+
SRCDIR(contrib/restricted/abseil-cpp/absl/status)
-
-SRCS(
+
+SRCS(
statusor.cc
-)
-
-END()
+)
+
+END()
diff --git a/contrib/restricted/abseil-cpp/absl/strings/charconv.cc b/contrib/restricted/abseil-cpp/absl/strings/charconv.cc
index fefcfc90a5..e9e2446602 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/charconv.cc
+++ b/contrib/restricted/abseil-cpp/absl/strings/charconv.cc
@@ -20,7 +20,7 @@
#include <cstring>
#include "absl/base/casts.h"
-#include "absl/numeric/bits.h"
+#include "absl/numeric/bits.h"
#include "absl/numeric/int128.h"
#include "absl/strings/internal/charconv_bigint.h"
#include "absl/strings/internal/charconv_parse.h"
@@ -242,11 +242,11 @@ struct CalculatedFloat {
// Returns the bit width of the given uint128. (Equivalently, returns 128
// minus the number of leading zero bits.)
-unsigned BitWidth(uint128 value) {
+unsigned BitWidth(uint128 value) {
if (Uint128High64(value) == 0) {
- return static_cast<unsigned>(bit_width(Uint128Low64(value)));
+ return static_cast<unsigned>(bit_width(Uint128Low64(value)));
}
- return 128 - countl_zero(Uint128High64(value));
+ return 128 - countl_zero(Uint128High64(value));
}
// Calculates how far to the right a mantissa needs to be shifted to create a
@@ -519,7 +519,7 @@ CalculatedFloat CalculateFromParsedHexadecimal(
const strings_internal::ParsedFloat& parsed_hex) {
uint64_t mantissa = parsed_hex.mantissa;
int exponent = parsed_hex.exponent;
- auto mantissa_width = static_cast<unsigned>(bit_width(mantissa));
+ auto mantissa_width = static_cast<unsigned>(bit_width(mantissa));
const int shift = NormalizedShiftSize<FloatType>(mantissa_width, exponent);
bool result_exact;
exponent += shift;
diff --git a/contrib/restricted/abseil-cpp/absl/strings/cord.cc b/contrib/restricted/abseil-cpp/absl/strings/cord.cc
index 854047ca98..db0659b584 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/cord.cc
+++ b/contrib/restricted/abseil-cpp/absl/strings/cord.cc
@@ -37,7 +37,7 @@
#include "absl/strings/escaping.h"
#include "absl/strings/internal/cord_internal.h"
#include "absl/strings/internal/cord_rep_btree.h"
-#include "absl/strings/internal/cord_rep_flat.h"
+#include "absl/strings/internal/cord_rep_flat.h"
#include "absl/strings/internal/cordz_statistics.h"
#include "absl/strings/internal/cordz_update_scope.h"
#include "absl/strings/internal/cordz_update_tracker.h"
@@ -54,15 +54,15 @@ using ::absl::cord_internal::CordRep;
using ::absl::cord_internal::CordRepBtree;
using ::absl::cord_internal::CordRepConcat;
using ::absl::cord_internal::CordRepExternal;
-using ::absl::cord_internal::CordRepFlat;
+using ::absl::cord_internal::CordRepFlat;
using ::absl::cord_internal::CordRepSubstring;
using ::absl::cord_internal::CordzUpdateTracker;
using ::absl::cord_internal::InlineData;
using ::absl::cord_internal::kMaxFlatLength;
-using ::absl::cord_internal::kMinFlatLength;
+using ::absl::cord_internal::kMinFlatLength;
-using ::absl::cord_internal::kInlinedVectorSize;
-using ::absl::cord_internal::kMaxBytesToCopy;
+using ::absl::cord_internal::kInlinedVectorSize;
+using ::absl::cord_internal::kMaxBytesToCopy;
constexpr uint64_t Fibonacci(unsigned char n, uint64_t a = 0, uint64_t b = 1) {
return n == 0 ? a : Fibonacci(n - 1, b, a + b);
@@ -96,8 +96,8 @@ static const int kMinLengthSize = ABSL_ARRAYSIZE(min_length);
static inline bool btree_enabled() {
return cord_internal::cord_btree_enabled.load(
- std::memory_order_relaxed);
-}
+ std::memory_order_relaxed);
+}
static inline bool IsRootBalanced(CordRep* node) {
if (!node->IsConcat()) {
@@ -114,8 +114,8 @@ static inline bool IsRootBalanced(CordRep* node) {
}
static CordRep* Rebalance(CordRep* node);
-static void DumpNode(CordRep* rep, bool include_data, std::ostream* os,
- int indent = 0);
+static void DumpNode(CordRep* rep, bool include_data, std::ostream* os,
+ int indent = 0);
static bool VerifyNode(CordRep* root, CordRep* start_node,
bool full_validation);
@@ -158,14 +158,14 @@ static void SetConcatChildren(CordRepConcat* concat, CordRep* left,
// The returned node has a refcount of 1.
static CordRep* RawConcat(CordRep* left, CordRep* right) {
// Avoid making degenerate concat nodes (one child is empty)
- if (left == nullptr) return right;
- if (right == nullptr) return left;
- if (left->length == 0) {
- CordRep::Unref(left);
+ if (left == nullptr) return right;
+ if (right == nullptr) return left;
+ if (left->length == 0) {
+ CordRep::Unref(left);
return right;
}
- if (right->length == 0) {
- CordRep::Unref(right);
+ if (right->length == 0) {
+ CordRep::Unref(right);
return left;
}
@@ -204,23 +204,23 @@ static CordRep* MakeBalancedTree(CordRep** reps, size_t n) {
return reps[0];
}
-static CordRepFlat* CreateFlat(const char* data, size_t length,
+static CordRepFlat* CreateFlat(const char* data, size_t length,
size_t alloc_hint) {
- CordRepFlat* flat = CordRepFlat::New(length + alloc_hint);
- flat->length = length;
- memcpy(flat->Data(), data, length);
- return flat;
-}
-
+ CordRepFlat* flat = CordRepFlat::New(length + alloc_hint);
+ flat->length = length;
+ memcpy(flat->Data(), data, length);
+ return flat;
+}
+
// Creates a new flat or Btree out of the specified array.
-// The returned node has a refcount of 1.
+// The returned node has a refcount of 1.
static CordRep* NewBtree(const char* data, size_t length, size_t alloc_hint) {
- if (length <= kMaxFlatLength) {
- return CreateFlat(data, length, alloc_hint);
+ if (length <= kMaxFlatLength) {
+ return CreateFlat(data, length, alloc_hint);
}
- CordRepFlat* flat = CreateFlat(data, kMaxFlatLength, 0);
- data += kMaxFlatLength;
- length -= kMaxFlatLength;
+ CordRepFlat* flat = CreateFlat(data, kMaxFlatLength, 0);
+ data += kMaxFlatLength;
+ length -= kMaxFlatLength;
auto* root = CordRepBtree::Create(flat);
return CordRepBtree::Append(root, {data, length}, alloc_hint);
}
@@ -231,14 +231,14 @@ static CordRep* NewTree(const char* data, size_t length, size_t alloc_hint) {
if (length == 0) return nullptr;
if (btree_enabled()) {
return NewBtree(data, length, alloc_hint);
- }
+ }
absl::FixedArray<CordRep*> reps((length - 1) / kMaxFlatLength + 1);
size_t n = 0;
do {
const size_t len = std::min(length, kMaxFlatLength);
- CordRepFlat* rep = CordRepFlat::New(len + alloc_hint);
+ CordRepFlat* rep = CordRepFlat::New(len + alloc_hint);
rep->length = len;
- memcpy(rep->Data(), data, len);
+ memcpy(rep->Data(), data, len);
reps[n++] = VerifyTree(rep);
data += len;
length -= len;
@@ -261,7 +261,7 @@ void InitializeCordRepExternal(absl::string_view data, CordRepExternal* rep) {
static CordRep* NewSubstring(CordRep* child, size_t offset, size_t length) {
// Never create empty substring nodes
if (length == 0) {
- CordRep::Unref(child);
+ CordRep::Unref(child);
return nullptr;
} else {
CordRepSubstring* rep = new CordRepSubstring();
@@ -312,29 +312,29 @@ inline void Cord::InlineRep::set_data(const char* data, size_t n,
bool nullify_tail) {
static_assert(kMaxInline == 15, "set_data is hard-coded for a length of 15");
- cord_internal::SmallMemmove(data_.as_chars(), data, n, nullify_tail);
- set_inline_size(n);
+ cord_internal::SmallMemmove(data_.as_chars(), data, n, nullify_tail);
+ set_inline_size(n);
}
inline char* Cord::InlineRep::set_data(size_t n) {
assert(n <= kMaxInline);
ResetToEmpty();
- set_inline_size(n);
- return data_.as_chars();
+ set_inline_size(n);
+ return data_.as_chars();
}
inline void Cord::InlineRep::reduce_size(size_t n) {
- size_t tag = inline_size();
+ size_t tag = inline_size();
assert(tag <= kMaxInline);
assert(tag >= n);
tag -= n;
- memset(data_.as_chars() + tag, 0, n);
- set_inline_size(static_cast<char>(tag));
+ memset(data_.as_chars() + tag, 0, n);
+ set_inline_size(static_cast<char>(tag));
}
inline void Cord::InlineRep::remove_prefix(size_t n) {
- cord_internal::SmallMemmove(data_.as_chars(), data_.as_chars() + n,
- inline_size() - n);
+ cord_internal::SmallMemmove(data_.as_chars(), data_.as_chars() + n,
+ inline_size() - n);
reduce_size(n);
}
@@ -342,8 +342,8 @@ inline void Cord::InlineRep::remove_prefix(size_t n) {
// Directly returns `rep` if `rep` is already a CordRepBtree.
static CordRepBtree* ForceBtree(CordRep* rep) {
return rep->IsBtree() ? rep->btree() : CordRepBtree::Create(rep);
-}
-
+}
+
void Cord::InlineRep::AppendTreeToInlined(CordRep* tree,
MethodIdentifier method) {
assert(!is_tree());
@@ -421,13 +421,13 @@ static inline bool PrepareAppendRegion(CordRep* root, char** region,
size_t* size, size_t max_length) {
if (root->IsBtree() && root->refcount.IsMutable()) {
Span<char> span = root->btree()->GetAppendBuffer(max_length);
- if (!span.empty()) {
- *region = span.data();
- *size = span.size();
- return true;
- }
- }
-
+ if (!span.empty()) {
+ *region = span.data();
+ *size = span.size();
+ return true;
+ }
+ }
+
// Search down the right-hand path for a non-full FLAT node.
CordRep* dst = root;
while (dst->IsConcat() && dst->refcount.IsMutable()) {
@@ -441,7 +441,7 @@ static inline bool PrepareAppendRegion(CordRep* root, char** region,
}
const size_t in_use = dst->length;
- const size_t capacity = dst->flat()->Capacity();
+ const size_t capacity = dst->flat()->Capacity();
if (in_use == capacity) {
*region = nullptr;
*size = 0;
@@ -456,7 +456,7 @@ static inline bool PrepareAppendRegion(CordRep* root, char** region,
}
dst->length += size_increase;
- *region = dst->flat()->Data() + in_use;
+ *region = dst->flat()->Data() + in_use;
*size = size_increase;
return true;
}
@@ -474,8 +474,8 @@ void Cord::InlineRep::GetAppendRegion(char** region, size_t* size,
*region = data_.as_chars() + sz;
*size = has_length ? length : available;
set_inline_size(has_length ? sz + length : kMaxInline);
- return;
- }
+ return;
+ }
}
size_t extra = has_length ? length : (std::max)(sz, kMinFlatLength);
@@ -489,14 +489,14 @@ void Cord::InlineRep::GetAppendRegion(char** region, size_t* size,
// Allocate new node.
CordRepFlat* new_node = CordRepFlat::New(extra);
new_node->length = std::min(new_node->Capacity(), length);
- *region = new_node->Data();
+ *region = new_node->Data();
*size = new_node->length;
-
+
if (btree_enabled()) {
rep = CordRepBtree::Append(ForceBtree(rep), new_node);
} else {
rep = Concat(rep, new_node);
- }
+ }
CommitTree(root, rep, scope, method);
}
@@ -522,7 +522,7 @@ static bool RepMemoryUsageDataEdge(const CordRep* rep,
sizeof(cord_internal::CordRepExternalImpl<intptr_t>) +
rep->length;
return true;
- }
+ }
return false;
}
@@ -530,7 +530,7 @@ static bool RepMemoryUsageDataEdge(const CordRep* rep,
// will return true.
static bool RepMemoryUsageLeaf(const CordRep* rep, size_t* total_mem_usage) {
if (rep->IsFlat()) {
- *total_mem_usage += rep->flat()->AllocatedSize();
+ *total_mem_usage += rep->flat()->AllocatedSize();
return true;
}
if (rep->IsExternal()) {
@@ -570,7 +570,7 @@ void Cord::InlineRep::AssignSlow(const Cord::InlineRep& src) {
void Cord::InlineRep::UnrefTree() {
if (is_tree()) {
CordzInfo::MaybeUntrackCord(data_.cordz_info());
- CordRep::Unref(tree());
+ CordRep::Unref(tree());
}
}
@@ -612,9 +612,9 @@ void Cord::DestroyCordSlow() {
// Mutators
void Cord::Clear() {
- if (CordRep* tree = contents_.clear()) {
- CordRep::Unref(tree);
- }
+ if (CordRep* tree = contents_.clear()) {
+ CordRep::Unref(tree);
+ }
}
Cord& Cord::AssignLargeString(std::string&& src) {
@@ -680,15 +680,15 @@ void Cord::InlineRep::AppendArray(absl::string_view src,
memcpy(region, src.data(), appended);
}
} else {
- // Try to fit in the inline buffer if possible.
- size_t inline_length = inline_size();
+ // Try to fit in the inline buffer if possible.
+ size_t inline_length = inline_size();
if (src.size() <= kMaxInline - inline_length) {
- // Append new data to embedded array
+ // Append new data to embedded array
memcpy(data_.as_chars() + inline_length, src.data(), src.size());
set_inline_size(inline_length + src.size());
- return;
- }
-
+ return;
+ }
+
// Allocate flat to be a perfect fit on first append exceeding inlined size.
// Subsequent growth will use amortized growth until we reach maximum flat
// size.
@@ -725,12 +725,12 @@ void Cord::InlineRep::AppendArray(absl::string_view src,
length = std::max<size_t>(rep->length / 10, src.size());
}
rep = Concat(rep, NewTree(src.data(), src.size(), length - src.size()));
- }
+ }
CommitTree(root, rep, scope, method);
}
inline CordRep* Cord::TakeRep() const& {
- return CordRep::Ref(contents_.tree());
+ return CordRep::Ref(contents_.tree());
}
inline CordRep* Cord::TakeRep() && {
@@ -781,7 +781,7 @@ inline void Cord::AppendImpl(C&& src) {
return;
}
- // Guaranteed to be a tree (kMaxBytesToCopy > kInlinedSize)
+ // Guaranteed to be a tree (kMaxBytesToCopy > kInlinedSize)
CordRep* rep = std::forward<C>(src).TakeRep();
contents_.AppendTree(rep, CordzUpdateTracker::kAppendCord);
}
@@ -809,7 +809,7 @@ template void Cord::Append(std::string&& src);
void Cord::Prepend(const Cord& src) {
CordRep* src_tree = src.contents_.tree();
if (src_tree != nullptr) {
- CordRep::Ref(src_tree);
+ CordRep::Ref(src_tree);
contents_.PrependTree(src_tree, CordzUpdateTracker::kPrependCord);
return;
}
@@ -821,17 +821,17 @@ void Cord::Prepend(const Cord& src) {
void Cord::PrependArray(absl::string_view src, MethodIdentifier method) {
if (src.empty()) return; // memcpy(_, nullptr, 0) is undefined.
- if (!contents_.is_tree()) {
- size_t cur_size = contents_.inline_size();
- if (cur_size + src.size() <= InlineRep::kMaxInline) {
- // Use embedded storage.
- char data[InlineRep::kMaxInline + 1] = {0};
- memcpy(data, src.data(), src.size());
- memcpy(data + src.size(), contents_.data(), cur_size);
- memcpy(contents_.data_.as_chars(), data, InlineRep::kMaxInline + 1);
- contents_.set_inline_size(cur_size + src.size());
- return;
- }
+ if (!contents_.is_tree()) {
+ size_t cur_size = contents_.inline_size();
+ if (cur_size + src.size() <= InlineRep::kMaxInline) {
+ // Use embedded storage.
+ char data[InlineRep::kMaxInline + 1] = {0};
+ memcpy(data, src.data(), src.size());
+ memcpy(data + src.size(), contents_.data(), cur_size);
+ memcpy(contents_.data_.as_chars(), data, InlineRep::kMaxInline + 1);
+ contents_.set_inline_size(cur_size + src.size());
+ return;
+ }
}
CordRep* rep = NewTree(src.data(), src.size(), 0);
contents_.PrependTree(rep, method);
@@ -851,7 +851,7 @@ template void Cord::Prepend(std::string&& src);
static CordRep* RemovePrefixFrom(CordRep* node, size_t n) {
if (n >= node->length) return nullptr;
- if (n == 0) return CordRep::Ref(node);
+ if (n == 0) return CordRep::Ref(node);
absl::InlinedVector<CordRep*, kInlinedVectorSize> rhs_stack;
while (node->IsConcat()) {
@@ -869,7 +869,7 @@ static CordRep* RemovePrefixFrom(CordRep* node, size_t n) {
assert(n <= node->length);
if (n == 0) {
- CordRep::Ref(node);
+ CordRep::Ref(node);
} else {
size_t start = n;
size_t len = node->length - n;
@@ -878,10 +878,10 @@ static CordRep* RemovePrefixFrom(CordRep* node, size_t n) {
start += node->substring()->start;
node = node->substring()->child;
}
- node = NewSubstring(CordRep::Ref(node), start, len);
+ node = NewSubstring(CordRep::Ref(node), start, len);
}
while (!rhs_stack.empty()) {
- node = Concat(node, CordRep::Ref(rhs_stack.back()));
+ node = Concat(node, CordRep::Ref(rhs_stack.back()));
rhs_stack.pop_back();
}
return node;
@@ -892,7 +892,7 @@ static CordRep* RemovePrefixFrom(CordRep* node, size_t n) {
// edited in place iff that node and all its ancestors have a refcount of 1.
static CordRep* RemoveSuffixFrom(CordRep* node, size_t n) {
if (n >= node->length) return nullptr;
- if (n == 0) return CordRep::Ref(node);
+ if (n == 0) return CordRep::Ref(node);
absl::InlinedVector<CordRep*, kInlinedVectorSize> lhs_stack;
bool inplace_ok = node->refcount.IsMutable();
@@ -912,11 +912,11 @@ static CordRep* RemoveSuffixFrom(CordRep* node, size_t n) {
assert(n <= node->length);
if (n == 0) {
- CordRep::Ref(node);
+ CordRep::Ref(node);
} else if (inplace_ok && !node->IsExternal()) {
// Consider making a new buffer if the current node capacity is much
// larger than the new length.
- CordRep::Ref(node);
+ CordRep::Ref(node);
node->length -= n;
} else {
size_t start = 0;
@@ -925,10 +925,10 @@ static CordRep* RemoveSuffixFrom(CordRep* node, size_t n) {
start = node->substring()->start;
node = node->substring()->child;
}
- node = NewSubstring(CordRep::Ref(node), start, len);
+ node = NewSubstring(CordRep::Ref(node), start, len);
}
while (!lhs_stack.empty()) {
- node = Concat(CordRep::Ref(lhs_stack.back()), node);
+ node = Concat(CordRep::Ref(lhs_stack.back()), node);
lhs_stack.pop_back();
}
return node;
@@ -1006,13 +1006,13 @@ static CordRep* NewSubRange(CordRep* node, size_t pos, size_t n) {
results.pop_back();
results.push_back(Concat(left, right));
} else if (pos == 0 && n == node->length) {
- results.push_back(CordRep::Ref(node));
+ results.push_back(CordRep::Ref(node));
} else if (!node->IsConcat()) {
if (node->IsSubstring()) {
pos += node->substring()->start;
node = node->substring()->child;
}
- results.push_back(NewSubstring(CordRep::Ref(node), pos, n));
+ results.push_back(NewSubstring(CordRep::Ref(node), pos, n));
} else if (pos + n <= node->concat()->left->length) {
todo.push_back(SubRange(node->concat()->left, pos, n));
} else if (pos >= node->concat()->left->length) {
@@ -1056,7 +1056,7 @@ Cord Cord::Subcord(size_t pos, size_t new_size) const {
++it;
}
cord_internal::SmallMemmove(dest, it->data(), remaining_size);
- sub_cord.contents_.set_inline_size(new_size);
+ sub_cord.contents_.set_inline_size(new_size);
return sub_cord;
}
@@ -1100,9 +1100,9 @@ class CordForest {
concat_node->left = concat_freelist_;
concat_freelist_ = concat_node;
} else {
- CordRep::Ref(concat_node->right);
- CordRep::Ref(concat_node->left);
- CordRep::Unref(concat_node);
+ CordRep::Ref(concat_node->right);
+ CordRep::Ref(concat_node->left);
+ CordRep::Unref(concat_node);
}
} else {
AddNode(node);
@@ -1252,13 +1252,13 @@ bool ComputeCompareResult<bool>(int memcmp_res) {
// Helper routine. Locates the first flat or external chunk of the Cord without
// initializing the iterator, and returns a string_view referencing the data.
inline absl::string_view Cord::InlineRep::FindFlatStartPiece() const {
- if (!is_tree()) {
- return absl::string_view(data_.as_chars(), data_.inline_size());
+ if (!is_tree()) {
+ return absl::string_view(data_.as_chars(), data_.inline_size());
}
CordRep* node = tree();
if (node->IsFlat()) {
- return absl::string_view(node->flat()->Data(), node->length);
+ return absl::string_view(node->flat()->Data(), node->length);
}
if (node->IsExternal()) {
@@ -1272,8 +1272,8 @@ inline absl::string_view Cord::InlineRep::FindFlatStartPiece() const {
tree = tree->Edge(CordRepBtree::kFront)->btree();
}
return tree->Data(tree->begin());
- }
-
+ }
+
// Walk down the left branches until we hit a non-CONCAT node.
while (node->IsConcat()) {
node = node->concat()->left;
@@ -1290,7 +1290,7 @@ inline absl::string_view Cord::InlineRep::FindFlatStartPiece() const {
}
if (node->IsFlat()) {
- return absl::string_view(node->flat()->Data() + offset, length);
+ return absl::string_view(node->flat()->Data() + offset, length);
}
assert(node->IsExternal() && "Expect FLAT or EXTERNAL node here");
@@ -1473,22 +1473,22 @@ void Cord::CopyToArraySlowPath(char* dst) const {
}
}
-Cord::ChunkIterator& Cord::ChunkIterator::AdvanceStack() {
- auto& stack_of_right_children = stack_of_right_children_;
- if (stack_of_right_children.empty()) {
+Cord::ChunkIterator& Cord::ChunkIterator::AdvanceStack() {
+ auto& stack_of_right_children = stack_of_right_children_;
+ if (stack_of_right_children.empty()) {
assert(!current_chunk_.empty()); // Called on invalid iterator.
// We have reached the end of the Cord.
return *this;
}
// Process the next node on the stack.
- CordRep* node = stack_of_right_children.back();
- stack_of_right_children.pop_back();
+ CordRep* node = stack_of_right_children.back();
+ stack_of_right_children.pop_back();
// Walk down the left branches until we hit a non-CONCAT node. Save the
// right children to the stack for subsequent traversal.
while (node->IsConcat()) {
- stack_of_right_children.push_back(node->concat()->right);
+ stack_of_right_children.push_back(node->concat()->right);
node = node->concat()->left;
}
@@ -1532,30 +1532,30 @@ Cord Cord::ChunkIterator::AdvanceAndReadBytes(size_t n) {
}
return subcord;
}
-
+
if (btree_reader_) {
- size_t chunk_size = current_chunk_.size();
- if (n <= chunk_size && n <= kMaxBytesToCopy) {
+ size_t chunk_size = current_chunk_.size();
+ if (n <= chunk_size && n <= kMaxBytesToCopy) {
subcord = Cord(current_chunk_.substr(0, n), method);
if (n < chunk_size) {
current_chunk_.remove_prefix(n);
} else {
current_chunk_ = btree_reader_.Next();
}
- } else {
+ } else {
CordRep* rep;
current_chunk_ = btree_reader_.Read(n, chunk_size, rep);
subcord.contents_.EmplaceTree(rep, method);
- }
+ }
bytes_remaining_ -= n;
- return subcord;
- }
-
- auto& stack_of_right_children = stack_of_right_children_;
+ return subcord;
+ }
+
+ auto& stack_of_right_children = stack_of_right_children_;
if (n < current_chunk_.size()) {
// Range to read is a proper subrange of the current chunk.
assert(current_leaf_ != nullptr);
- CordRep* subnode = CordRep::Ref(current_leaf_);
+ CordRep* subnode = CordRep::Ref(current_leaf_);
const char* data = subnode->IsExternal() ? subnode->external()->base
: subnode->flat()->Data();
subnode = NewSubstring(subnode, current_chunk_.data() - data, n);
@@ -1567,7 +1567,7 @@ Cord Cord::ChunkIterator::AdvanceAndReadBytes(size_t n) {
// Range to read begins with a proper subrange of the current chunk.
assert(!current_chunk_.empty());
assert(current_leaf_ != nullptr);
- CordRep* subnode = CordRep::Ref(current_leaf_);
+ CordRep* subnode = CordRep::Ref(current_leaf_);
if (current_chunk_.size() < subnode->length) {
const char* data = subnode->IsExternal() ? subnode->external()->base
: subnode->flat()->Data();
@@ -1580,20 +1580,20 @@ Cord Cord::ChunkIterator::AdvanceAndReadBytes(size_t n) {
// Process the next node(s) on the stack, reading whole subtrees depending on
// their length and how many bytes we are advancing.
CordRep* node = nullptr;
- while (!stack_of_right_children.empty()) {
- node = stack_of_right_children.back();
- stack_of_right_children.pop_back();
+ while (!stack_of_right_children.empty()) {
+ node = stack_of_right_children.back();
+ stack_of_right_children.pop_back();
if (node->length > n) break;
// TODO(qrczak): This might unnecessarily recreate existing concat nodes.
// Avoiding that would need pretty complicated logic (instead of
- // current_leaf, keep current_subtree_ which points to the highest node
+ // current_leaf, keep current_subtree_ which points to the highest node
// such that the current leaf can be found on the path of left children
// starting from current_subtree_; delay creating subnode while node is
// below current_subtree_; find the proper node along the path of left
// children starting from current_subtree_ if this loop exits while staying
// below current_subtree_; etc.; alternatively, push parents instead of
// right children on the stack).
- subnode = Concat(subnode, CordRep::Ref(node));
+ subnode = Concat(subnode, CordRep::Ref(node));
n -= node->length;
bytes_remaining_ -= node->length;
node = nullptr;
@@ -1611,11 +1611,11 @@ Cord Cord::ChunkIterator::AdvanceAndReadBytes(size_t n) {
while (node->IsConcat()) {
if (node->concat()->left->length > n) {
// Push right, descend left.
- stack_of_right_children.push_back(node->concat()->right);
+ stack_of_right_children.push_back(node->concat()->right);
node = node->concat()->left;
} else {
// Read left, descend right.
- subnode = Concat(subnode, CordRep::Ref(node->concat()->left));
+ subnode = Concat(subnode, CordRep::Ref(node->concat()->left));
n -= node->concat()->left->length;
bytes_remaining_ -= node->concat()->left->length;
node = node->concat()->right;
@@ -1634,9 +1634,9 @@ Cord Cord::ChunkIterator::AdvanceAndReadBytes(size_t n) {
// chunk.
assert(node->IsExternal() || node->IsFlat());
assert(length > n);
- if (n > 0) {
- subnode = Concat(subnode, NewSubstring(CordRep::Ref(node), offset, n));
- }
+ if (n > 0) {
+ subnode = Concat(subnode, NewSubstring(CordRep::Ref(node), offset, n));
+ }
const char* data =
node->IsExternal() ? node->external()->base : node->flat()->Data();
current_chunk_ = absl::string_view(data + offset + n, length - n);
@@ -1654,19 +1654,19 @@ void Cord::ChunkIterator::AdvanceBytesSlowPath(size_t n) {
n -= current_chunk_.size();
bytes_remaining_ -= current_chunk_.size();
- if (stack_of_right_children_.empty()) {
- // We have reached the end of the Cord.
- assert(bytes_remaining_ == 0);
- return;
- }
-
+ if (stack_of_right_children_.empty()) {
+ // We have reached the end of the Cord.
+ assert(bytes_remaining_ == 0);
+ return;
+ }
+
// Process the next node(s) on the stack, skipping whole subtrees depending on
// their length and how many bytes we are advancing.
CordRep* node = nullptr;
- auto& stack_of_right_children = stack_of_right_children_;
- while (!stack_of_right_children.empty()) {
- node = stack_of_right_children.back();
- stack_of_right_children.pop_back();
+ auto& stack_of_right_children = stack_of_right_children_;
+ while (!stack_of_right_children.empty()) {
+ node = stack_of_right_children.back();
+ stack_of_right_children.pop_back();
if (node->length > n) break;
n -= node->length;
bytes_remaining_ -= node->length;
@@ -1684,7 +1684,7 @@ void Cord::ChunkIterator::AdvanceBytesSlowPath(size_t n) {
while (node->IsConcat()) {
if (node->concat()->left->length > n) {
// Push right, descend left.
- stack_of_right_children.push_back(node->concat()->right);
+ stack_of_right_children.push_back(node->concat()->right);
node = node->concat()->left;
} else {
// Skip left, descend right.
@@ -1723,7 +1723,7 @@ char Cord::operator[](size_t i) const {
assert(offset < rep->length);
if (rep->IsFlat()) {
// Get the "i"th character directly from the flat array.
- return rep->flat()->Data()[offset];
+ return rep->flat()->Data()[offset];
} else if (rep->IsBtree()) {
return rep->btree()->GetCharacter(offset);
} else if (rep->IsExternal()) {
@@ -1757,9 +1757,9 @@ absl::string_view Cord::FlattenSlowPath() {
// Try to put the contents into a new flat rep. If they won't fit in the
// biggest possible flat node, use an external rep instead.
if (total_size <= kMaxFlatLength) {
- new_rep = CordRepFlat::New(total_size);
+ new_rep = CordRepFlat::New(total_size);
new_rep->length = total_size;
- new_buffer = new_rep->flat()->Data();
+ new_buffer = new_rep->flat()->Data();
CopyToArraySlowPath(new_buffer);
} else {
new_buffer = std::allocator<char>().allocate(total_size);
@@ -1779,7 +1779,7 @@ absl::string_view Cord::FlattenSlowPath() {
/* static */ bool Cord::GetFlatAux(CordRep* rep, absl::string_view* fragment) {
assert(rep != nullptr);
if (rep->IsFlat()) {
- *fragment = absl::string_view(rep->flat()->Data(), rep->length);
+ *fragment = absl::string_view(rep->flat()->Data(), rep->length);
return true;
} else if (rep->IsExternal()) {
*fragment = absl::string_view(rep->external()->base, rep->length);
@@ -1789,8 +1789,8 @@ absl::string_view Cord::FlattenSlowPath() {
} else if (rep->IsSubstring()) {
CordRep* child = rep->substring()->child;
if (child->IsFlat()) {
- *fragment = absl::string_view(
- child->flat()->Data() + rep->substring()->start, rep->length);
+ *fragment = absl::string_view(
+ child->flat()->Data() + rep->substring()->start, rep->length);
return true;
} else if (child->IsExternal()) {
*fragment = absl::string_view(
@@ -1808,14 +1808,14 @@ absl::string_view Cord::FlattenSlowPath() {
absl::cord_internal::CordRep* rep,
absl::FunctionRef<void(absl::string_view)> callback) {
if (rep->IsBtree()) {
- ChunkIterator it(rep), end;
- while (it != end) {
- callback(*it);
- ++it;
- }
- return;
- }
-
+ ChunkIterator it(rep), end;
+ while (it != end) {
+ callback(*it);
+ ++it;
+ }
+ return;
+ }
+
assert(rep != nullptr);
int stack_pos = 0;
constexpr int stack_max = 128;
@@ -1857,8 +1857,8 @@ absl::string_view Cord::FlattenSlowPath() {
}
}
-static void DumpNode(CordRep* rep, bool include_data, std::ostream* os,
- int indent) {
+static void DumpNode(CordRep* rep, bool include_data, std::ostream* os,
+ int indent) {
const int kIndentStep = 1;
absl::InlinedVector<CordRep*, kInlinedVectorSize> stack;
absl::InlinedVector<int, kInlinedVectorSize> indents;
@@ -1880,7 +1880,7 @@ static void DumpNode(CordRep* rep, bool include_data, std::ostream* os,
*os << "SUBSTRING @ " << rep->substring()->start << "\n";
indent += kIndentStep;
rep = rep->substring()->child;
- } else { // Leaf or ring
+ } else { // Leaf or ring
if (rep->IsExternal()) {
*os << "EXTERNAL [";
if (include_data)
@@ -1889,9 +1889,9 @@ static void DumpNode(CordRep* rep, bool include_data, std::ostream* os,
} else if (rep->IsFlat()) {
*os << "FLAT cap=" << rep->flat()->Capacity() << " [";
if (include_data)
- *os << absl::CEscape(std::string(rep->flat()->Data(), rep->length));
+ *os << absl::CEscape(std::string(rep->flat()->Data(), rep->length));
*os << "]\n";
- } else {
+ } else {
CordRepBtree::Dump(rep, /*label=*/ "", include_data, *os);
}
if (stack.empty()) break;
@@ -2026,14 +2026,14 @@ std::ostream& operator<<(std::ostream& out, const Cord& cord) {
}
namespace strings_internal {
-size_t CordTestAccess::FlatOverhead() { return cord_internal::kFlatOverhead; }
-size_t CordTestAccess::MaxFlatLength() { return cord_internal::kMaxFlatLength; }
+size_t CordTestAccess::FlatOverhead() { return cord_internal::kFlatOverhead; }
+size_t CordTestAccess::MaxFlatLength() { return cord_internal::kMaxFlatLength; }
size_t CordTestAccess::FlatTagToLength(uint8_t tag) {
- return cord_internal::TagToLength(tag);
+ return cord_internal::TagToLength(tag);
}
uint8_t CordTestAccess::LengthToTag(size_t s) {
ABSL_INTERNAL_CHECK(s <= kMaxFlatLength, absl::StrCat("Invalid length ", s));
- return cord_internal::AllocatedSizeToTag(s + cord_internal::kFlatOverhead);
+ return cord_internal::AllocatedSizeToTag(s + cord_internal::kFlatOverhead);
}
size_t CordTestAccess::SizeofCordRepConcat() { return sizeof(CordRepConcat); }
size_t CordTestAccess::SizeofCordRepExternal() {
diff --git a/contrib/restricted/abseil-cpp/absl/strings/cord.h b/contrib/restricted/abseil-cpp/absl/strings/cord.h
index f0a1991471..b0589b54ab 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/cord.h
+++ b/contrib/restricted/abseil-cpp/absl/strings/cord.h
@@ -25,7 +25,7 @@
//
// Because a Cord consists of these chunks, data can be added to or removed from
// a Cord during its lifetime. Chunks may also be shared between Cords. Unlike a
-// `std::string`, a Cord can therefore accommodate data that changes over its
+// `std::string`, a Cord can therefore accommodate data that changes over its
// lifetime, though it's not quite "mutable"; it can change only in the
// attachment, detachment, or rearrangement of chunks of its constituent data.
//
@@ -81,14 +81,14 @@
#include "absl/strings/internal/cord_internal.h"
#include "absl/strings/internal/cord_rep_btree.h"
#include "absl/strings/internal/cord_rep_btree_reader.h"
-#include "absl/strings/internal/cord_rep_ring.h"
+#include "absl/strings/internal/cord_rep_ring.h"
#include "absl/strings/internal/cordz_functions.h"
#include "absl/strings/internal/cordz_info.h"
#include "absl/strings/internal/cordz_statistics.h"
#include "absl/strings/internal/cordz_update_scope.h"
#include "absl/strings/internal/cordz_update_tracker.h"
#include "absl/strings/internal/resize_uninitialized.h"
-#include "absl/strings/internal/string_constant.h"
+#include "absl/strings/internal/string_constant.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
@@ -294,7 +294,7 @@ class Cord {
bool StartsWith(const Cord& rhs) const;
bool StartsWith(absl::string_view rhs) const;
- // Cord::EndsWith()
+ // Cord::EndsWith()
//
// Determines whether the Cord ends with the passed string data `rhs`.
bool EndsWith(absl::string_view rhs) const;
@@ -368,38 +368,38 @@ class Cord {
friend class CharIterator;
private:
- using CordRep = absl::cord_internal::CordRep;
+ using CordRep = absl::cord_internal::CordRep;
using CordRepBtree = absl::cord_internal::CordRepBtree;
using CordRepBtreeReader = absl::cord_internal::CordRepBtreeReader;
-
- // Stack of right children of concat nodes that we have to visit.
- // Keep this at the end of the structure to avoid cache-thrashing.
- // TODO(jgm): Benchmark to see if there's a more optimal value than 47 for
- // the inlined vector size (47 exists for backward compatibility).
- using Stack = absl::InlinedVector<absl::cord_internal::CordRep*, 47>;
-
- // Constructs a `begin()` iterator from `tree`. `tree` must not be null.
- explicit ChunkIterator(cord_internal::CordRep* tree);
-
+
+ // Stack of right children of concat nodes that we have to visit.
+ // Keep this at the end of the structure to avoid cache-thrashing.
+ // TODO(jgm): Benchmark to see if there's a more optimal value than 47 for
+ // the inlined vector size (47 exists for backward compatibility).
+ using Stack = absl::InlinedVector<absl::cord_internal::CordRep*, 47>;
+
+ // Constructs a `begin()` iterator from `tree`. `tree` must not be null.
+ explicit ChunkIterator(cord_internal::CordRep* tree);
+
// Constructs a `begin()` iterator from `cord`.
explicit ChunkIterator(const Cord* cord);
- // Initializes this instance from a tree. Invoked by constructors.
- void InitTree(cord_internal::CordRep* tree);
-
+ // Initializes this instance from a tree. Invoked by constructors.
+ void InitTree(cord_internal::CordRep* tree);
+
// Removes `n` bytes from `current_chunk_`. Expects `n` to be smaller than
// `current_chunk_.size()`.
void RemoveChunkPrefix(size_t n);
Cord AdvanceAndReadBytes(size_t n);
void AdvanceBytes(size_t n);
-
- // Stack specific operator++
- ChunkIterator& AdvanceStack();
-
+
+ // Stack specific operator++
+ ChunkIterator& AdvanceStack();
+
// Btree specific operator++
ChunkIterator& AdvanceBtree();
void AdvanceBytesBtree(size_t n);
-
+
// Iterates `n` bytes, where `n` is expected to be greater than or equal to
// `current_chunk_.size()`.
void AdvanceBytesSlowPath(size_t n);
@@ -413,12 +413,12 @@ class Cord {
absl::cord_internal::CordRep* current_leaf_ = nullptr;
// The number of bytes left in the `Cord` over which we are iterating.
size_t bytes_remaining_ = 0;
-
+
// Cord reader for cord btrees. Empty if not traversing a btree.
CordRepBtreeReader btree_reader_;
-
- // See 'Stack' alias definition.
- Stack stack_of_right_children_;
+
+ // See 'Stack' alias definition.
+ Stack stack_of_right_children_;
};
// Cord::ChunkIterator::chunk_begin()
@@ -680,14 +680,14 @@ class Cord {
return c.HashFragmented(std::move(hash_state));
}
- // Create a Cord with the contents of StringConstant<T>::value.
- // No allocations will be done and no data will be copied.
- // This is an INTERNAL API and subject to change or removal. This API can only
- // be used by spelling absl::strings_internal::MakeStringConstant, which is
- // also an internal API.
- template <typename T>
- explicit constexpr Cord(strings_internal::StringConstant<T>);
-
+ // Create a Cord with the contents of StringConstant<T>::value.
+ // No allocations will be done and no data will be copied.
+ // This is an INTERNAL API and subject to change or removal. This API can only
+ // be used by spelling absl::strings_internal::MakeStringConstant, which is
+ // also an internal API.
+ template <typename T>
+ explicit constexpr Cord(strings_internal::StringConstant<T>);
+
private:
using CordRep = absl::cord_internal::CordRep;
using CordRepFlat = absl::cord_internal::CordRepFlat;
@@ -732,8 +732,8 @@ class Cord {
InlineRep& operator=(const InlineRep& src);
InlineRep& operator=(InlineRep&& src) noexcept;
- explicit constexpr InlineRep(cord_internal::InlineData data);
-
+ explicit constexpr InlineRep(cord_internal::InlineData data);
+
void Swap(InlineRep* rhs);
bool empty() const;
size_t size() const;
@@ -743,7 +743,7 @@ class Cord {
char* set_data(size_t n); // Write data to the result
// Returns nullptr if holding bytes
absl::cord_internal::CordRep* tree() const;
- absl::cord_internal::CordRep* as_tree() const;
+ absl::cord_internal::CordRep* as_tree() const;
// Returns non-null iff was holding a pointer
absl::cord_internal::CordRep* clear();
// Converts to pointer if necessary.
@@ -820,31 +820,31 @@ class Cord {
memcpy(&(*dst)[0], &data_, sizeof(data_) - 1);
// erase is faster than resize because the logic for memory allocation is
// not needed.
- dst->erase(inline_size());
+ dst->erase(inline_size());
}
// Copies the inline contents into `dst`. Assumes the cord is not empty.
void CopyToArray(char* dst) const;
- bool is_tree() const { return data_.is_tree(); }
-
- // Returns true if the Cord is being profiled by cordz.
- bool is_profiled() const { return data_.is_tree() && data_.is_profiled(); }
-
- // Returns the profiled CordzInfo, or nullptr if not sampled.
- absl::cord_internal::CordzInfo* cordz_info() const {
- return data_.cordz_info();
- }
-
- // Sets the profiled CordzInfo. `cordz_info` must not be null.
- void set_cordz_info(cord_internal::CordzInfo* cordz_info) {
- assert(cordz_info != nullptr);
- data_.set_cordz_info(cordz_info);
- }
-
- // Resets the current cordz_info to null / empty.
- void clear_cordz_info() { data_.clear_cordz_info(); }
-
+ bool is_tree() const { return data_.is_tree(); }
+
+ // Returns true if the Cord is being profiled by cordz.
+ bool is_profiled() const { return data_.is_tree() && data_.is_profiled(); }
+
+ // Returns the profiled CordzInfo, or nullptr if not sampled.
+ absl::cord_internal::CordzInfo* cordz_info() const {
+ return data_.cordz_info();
+ }
+
+ // Sets the profiled CordzInfo. `cordz_info` must not be null.
+ void set_cordz_info(cord_internal::CordzInfo* cordz_info) {
+ assert(cordz_info != nullptr);
+ data_.set_cordz_info(cordz_info);
+ }
+
+ // Resets the current cordz_info to null / empty.
+ void clear_cordz_info() { data_.clear_cordz_info(); }
+
private:
friend class Cord;
@@ -854,8 +854,8 @@ class Cord {
void ResetToEmpty() { data_ = {}; }
- void set_inline_size(size_t size) { data_.set_inline_size(size); }
- size_t inline_size() const { return data_.inline_size(); }
+ void set_inline_size(size_t size) { data_.set_inline_size(size); }
+ size_t inline_size() const { return data_.inline_size(); }
cord_internal::InlineData data_;
};
@@ -1019,17 +1019,17 @@ Cord MakeCordFromExternal(absl::string_view data, Releaser&& releaser) {
return cord;
}
-constexpr Cord::InlineRep::InlineRep(cord_internal::InlineData data)
- : data_(data) {}
-
-inline Cord::InlineRep::InlineRep(const Cord::InlineRep& src)
+constexpr Cord::InlineRep::InlineRep(cord_internal::InlineData data)
+ : data_(data) {}
+
+inline Cord::InlineRep::InlineRep(const Cord::InlineRep& src)
: data_(InlineData::kDefaultInit) {
if (CordRep* tree = src.tree()) {
EmplaceTree(CordRep::Ref(tree), src.data_,
CordzUpdateTracker::kConstructorCord);
} else {
data_ = src.data_;
- }
+ }
}
inline Cord::InlineRep::InlineRep(Cord::InlineRep&& src) : data_(src.data_) {
@@ -1066,26 +1066,26 @@ inline void Cord::InlineRep::Swap(Cord::InlineRep* rhs) {
}
inline const char* Cord::InlineRep::data() const {
- return is_tree() ? nullptr : data_.as_chars();
-}
-
-inline absl::cord_internal::CordRep* Cord::InlineRep::as_tree() const {
- assert(data_.is_tree());
- return data_.as_tree();
+ return is_tree() ? nullptr : data_.as_chars();
}
+inline absl::cord_internal::CordRep* Cord::InlineRep::as_tree() const {
+ assert(data_.is_tree());
+ return data_.as_tree();
+}
+
inline absl::cord_internal::CordRep* Cord::InlineRep::tree() const {
if (is_tree()) {
- return as_tree();
+ return as_tree();
} else {
return nullptr;
}
}
-inline bool Cord::InlineRep::empty() const { return data_.is_empty(); }
+inline bool Cord::InlineRep::empty() const { return data_.is_empty(); }
inline size_t Cord::InlineRep::size() const {
- return is_tree() ? as_tree()->length : inline_size();
+ return is_tree() ? as_tree()->length : inline_size();
}
inline cord_internal::CordRepFlat* Cord::InlineRep::MakeFlatWithExtraCapacity(
@@ -1151,9 +1151,9 @@ inline absl::cord_internal::CordRep* Cord::InlineRep::clear() {
inline void Cord::InlineRep::CopyToArray(char* dst) const {
assert(!is_tree());
- size_t n = inline_size();
+ size_t n = inline_size();
assert(n != 0);
- cord_internal::SmallMemmove(dst, data_.as_chars(), n);
+ cord_internal::SmallMemmove(dst, data_.as_chars(), n);
}
constexpr inline Cord::Cord() noexcept {}
@@ -1161,16 +1161,16 @@ constexpr inline Cord::Cord() noexcept {}
inline Cord::Cord(absl::string_view src)
: Cord(src, CordzUpdateTracker::kConstructorString) {}
-template <typename T>
-constexpr Cord::Cord(strings_internal::StringConstant<T>)
- : contents_(strings_internal::StringConstant<T>::value.size() <=
- cord_internal::kMaxInline
- ? cord_internal::InlineData(
- strings_internal::StringConstant<T>::value)
- : cord_internal::InlineData(
- &cord_internal::ConstInitExternalStorage<
- strings_internal::StringConstant<T>>::value)) {}
-
+template <typename T>
+constexpr Cord::Cord(strings_internal::StringConstant<T>)
+ : contents_(strings_internal::StringConstant<T>::value.size() <=
+ cord_internal::kMaxInline
+ ? cord_internal::InlineData(
+ strings_internal::StringConstant<T>::value)
+ : cord_internal::InlineData(
+ &cord_internal::ConstInitExternalStorage<
+ strings_internal::StringConstant<T>>::value)) {}
+
inline Cord& Cord::operator=(const Cord& x) {
contents_ = x.contents_;
return *this;
@@ -1185,8 +1185,8 @@ Cord& Cord::operator=(T&& src) {
}
}
-inline Cord::Cord(const Cord& src) : contents_(src.contents_) {}
-
+inline Cord::Cord(const Cord& src) : contents_(src.contents_) {}
+
inline Cord::Cord(Cord&& src) noexcept : contents_(std::move(src.contents_)) {}
inline void Cord::swap(Cord& other) noexcept {
@@ -1273,64 +1273,64 @@ inline bool Cord::StartsWith(absl::string_view rhs) const {
return EqualsImpl(rhs, rhs_size);
}
-inline void Cord::ChunkIterator::InitTree(cord_internal::CordRep* tree) {
+inline void Cord::ChunkIterator::InitTree(cord_internal::CordRep* tree) {
if (tree->tag == cord_internal::BTREE) {
current_chunk_ = btree_reader_.Init(tree->btree());
- return;
- }
-
- stack_of_right_children_.push_back(tree);
- operator++();
-}
-
-inline Cord::ChunkIterator::ChunkIterator(cord_internal::CordRep* tree)
- : bytes_remaining_(tree->length) {
- InitTree(tree);
-}
-
+ return;
+ }
+
+ stack_of_right_children_.push_back(tree);
+ operator++();
+}
+
+inline Cord::ChunkIterator::ChunkIterator(cord_internal::CordRep* tree)
+ : bytes_remaining_(tree->length) {
+ InitTree(tree);
+}
+
inline Cord::ChunkIterator::ChunkIterator(const Cord* cord)
: bytes_remaining_(cord->size()) {
if (cord->contents_.is_tree()) {
- InitTree(cord->contents_.as_tree());
+ InitTree(cord->contents_.as_tree());
} else {
- current_chunk_ =
- absl::string_view(cord->contents_.data(), bytes_remaining_);
+ current_chunk_ =
+ absl::string_view(cord->contents_.data(), bytes_remaining_);
}
}
inline Cord::ChunkIterator& Cord::ChunkIterator::AdvanceBtree() {
current_chunk_ = btree_reader_.Next();
- return *this;
-}
-
+ return *this;
+}
+
inline void Cord::ChunkIterator::AdvanceBytesBtree(size_t n) {
- assert(n >= current_chunk_.size());
- bytes_remaining_ -= n;
- if (bytes_remaining_) {
- if (n == current_chunk_.size()) {
+ assert(n >= current_chunk_.size());
+ bytes_remaining_ -= n;
+ if (bytes_remaining_) {
+ if (n == current_chunk_.size()) {
current_chunk_ = btree_reader_.Next();
- } else {
+ } else {
size_t offset = btree_reader_.length() - bytes_remaining_;
current_chunk_ = btree_reader_.Seek(offset);
- }
- } else {
- current_chunk_ = {};
- }
-}
-
-inline Cord::ChunkIterator& Cord::ChunkIterator::operator++() {
- ABSL_HARDENING_ASSERT(bytes_remaining_ > 0 &&
- "Attempted to iterate past `end()`");
- assert(bytes_remaining_ >= current_chunk_.size());
- bytes_remaining_ -= current_chunk_.size();
- if (bytes_remaining_ > 0) {
+ }
+ } else {
+ current_chunk_ = {};
+ }
+}
+
+inline Cord::ChunkIterator& Cord::ChunkIterator::operator++() {
+ ABSL_HARDENING_ASSERT(bytes_remaining_ > 0 &&
+ "Attempted to iterate past `end()`");
+ assert(bytes_remaining_ >= current_chunk_.size());
+ bytes_remaining_ -= current_chunk_.size();
+ if (bytes_remaining_ > 0) {
return btree_reader_ ? AdvanceBtree() : AdvanceStack();
- } else {
- current_chunk_ = {};
- }
- return *this;
-}
-
+ } else {
+ current_chunk_ = {};
+ }
+ return *this;
+}
+
inline Cord::ChunkIterator Cord::ChunkIterator::operator++(int) {
ChunkIterator tmp(*this);
operator++();
@@ -1362,7 +1362,7 @@ inline void Cord::ChunkIterator::RemoveChunkPrefix(size_t n) {
}
inline void Cord::ChunkIterator::AdvanceBytes(size_t n) {
- assert(bytes_remaining_ >= n);
+ assert(bytes_remaining_ >= n);
if (ABSL_PREDICT_TRUE(n < current_chunk_.size())) {
RemoveChunkPrefix(n);
} else if (n != 0) {
diff --git a/contrib/restricted/abseil-cpp/absl/strings/cord/ya.make b/contrib/restricted/abseil-cpp/absl/strings/cord/ya.make
index b3654c86d0..df2c3b8c5a 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/cord/ya.make
+++ b/contrib/restricted/abseil-cpp/absl/strings/cord/ya.make
@@ -1,30 +1,30 @@
-# Generated by devtools/yamaker.
-
-LIBRARY()
-
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
WITHOUT_LICENSE_TEXTS()
-OWNER(g:cpp-contrib)
-
-LICENSE(Apache-2.0)
-
-PEERDIR(
+OWNER(g:cpp-contrib)
+
+LICENSE(Apache-2.0)
+
+PEERDIR(
contrib/restricted/abseil-cpp/absl/algorithm
- contrib/restricted/abseil-cpp/absl/base
+ contrib/restricted/abseil-cpp/absl/base
contrib/restricted/abseil-cpp/absl/base/internal/low_level_alloc
- contrib/restricted/abseil-cpp/absl/base/internal/raw_logging
- contrib/restricted/abseil-cpp/absl/base/internal/spinlock_wait
- contrib/restricted/abseil-cpp/absl/base/internal/throw_delegate
- contrib/restricted/abseil-cpp/absl/base/log_severity
+ contrib/restricted/abseil-cpp/absl/base/internal/raw_logging
+ contrib/restricted/abseil-cpp/absl/base/internal/spinlock_wait
+ contrib/restricted/abseil-cpp/absl/base/internal/throw_delegate
+ contrib/restricted/abseil-cpp/absl/base/log_severity
contrib/restricted/abseil-cpp/absl/container
contrib/restricted/abseil-cpp/absl/debugging
contrib/restricted/abseil-cpp/absl/debugging/stacktrace
contrib/restricted/abseil-cpp/absl/debugging/symbolize
contrib/restricted/abseil-cpp/absl/demangle
contrib/restricted/abseil-cpp/absl/functional
- contrib/restricted/abseil-cpp/absl/numeric
+ contrib/restricted/abseil-cpp/absl/numeric
contrib/restricted/abseil-cpp/absl/profiling/internal/exponential_biased
- contrib/restricted/abseil-cpp/absl/strings
+ contrib/restricted/abseil-cpp/absl/strings
contrib/restricted/abseil-cpp/absl/strings/internal/absl_cord_internal
contrib/restricted/abseil-cpp/absl/strings/internal/absl_strings_internal
contrib/restricted/abseil-cpp/absl/strings/internal/cordz_functions
@@ -36,26 +36,26 @@ PEERDIR(
contrib/restricted/abseil-cpp/absl/time/civil_time
contrib/restricted/abseil-cpp/absl/time/time_zone
contrib/restricted/abseil-cpp/absl/types
- contrib/restricted/abseil-cpp/absl/types/bad_optional_access
+ contrib/restricted/abseil-cpp/absl/types/bad_optional_access
contrib/restricted/abseil-cpp/absl/utility
-)
-
-ADDINCL(
- GLOBAL contrib/restricted/abseil-cpp
-)
-
-NO_COMPILER_WARNINGS()
-
-NO_UTIL()
-
-CFLAGS(
- -DNOMINMAX
-)
-
-SRCDIR(contrib/restricted/abseil-cpp/absl/strings)
-
-SRCS(
- cord.cc
-)
-
-END()
+)
+
+ADDINCL(
+ GLOBAL contrib/restricted/abseil-cpp
+)
+
+NO_COMPILER_WARNINGS()
+
+NO_UTIL()
+
+CFLAGS(
+ -DNOMINMAX
+)
+
+SRCDIR(contrib/restricted/abseil-cpp/absl/strings)
+
+SRCS(
+ cord.cc
+)
+
+END()
diff --git a/contrib/restricted/abseil-cpp/absl/strings/escaping.cc b/contrib/restricted/abseil-cpp/absl/strings/escaping.cc
index 18b20b83fd..b7a6525a49 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/escaping.cc
+++ b/contrib/restricted/abseil-cpp/absl/strings/escaping.cc
@@ -137,7 +137,7 @@ bool CUnescapeInternal(absl::string_view source, bool leave_nulls_escaped,
// Copy the escape sequence for the null character
const ptrdiff_t octal_size = p + 1 - octal_start;
*d++ = '\\';
- memmove(d, octal_start, octal_size);
+ memmove(d, octal_start, octal_size);
d += octal_size;
break;
}
@@ -170,7 +170,7 @@ bool CUnescapeInternal(absl::string_view source, bool leave_nulls_escaped,
// Copy the escape sequence for the null character
const ptrdiff_t hex_size = p + 1 - hex_start;
*d++ = '\\';
- memmove(d, hex_start, hex_size);
+ memmove(d, hex_start, hex_size);
d += hex_size;
break;
}
@@ -203,7 +203,7 @@ bool CUnescapeInternal(absl::string_view source, bool leave_nulls_escaped,
if ((rune == 0) && leave_nulls_escaped) {
// Copy the escape sequence for the null character
*d++ = '\\';
- memmove(d, hex_start, 5); // u0000
+ memmove(d, hex_start, 5); // u0000
d += 5;
break;
}
@@ -251,7 +251,7 @@ bool CUnescapeInternal(absl::string_view source, bool leave_nulls_escaped,
if ((rune == 0) && leave_nulls_escaped) {
// Copy the escape sequence for the null character
*d++ = '\\';
- memmove(d, hex_start, 9); // U00000000
+ memmove(d, hex_start, 9); // U00000000
d += 9;
break;
}
diff --git a/contrib/restricted/abseil-cpp/absl/strings/internal/absl_strings_internal/ya.make b/contrib/restricted/abseil-cpp/absl/strings/internal/absl_strings_internal/ya.make
index 2c62f6421a..a1c2096292 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/internal/absl_strings_internal/ya.make
+++ b/contrib/restricted/abseil-cpp/absl/strings/internal/absl_strings_internal/ya.make
@@ -9,9 +9,9 @@ OWNER(g:cpp-contrib)
LICENSE(Apache-2.0)
PEERDIR(
- contrib/restricted/abseil-cpp/absl/base
+ contrib/restricted/abseil-cpp/absl/base
contrib/restricted/abseil-cpp/absl/base/internal/raw_logging
- contrib/restricted/abseil-cpp/absl/base/internal/spinlock_wait
+ contrib/restricted/abseil-cpp/absl/base/internal/spinlock_wait
contrib/restricted/abseil-cpp/absl/base/log_severity
)
diff --git a/contrib/restricted/abseil-cpp/absl/strings/internal/charconv_parse.cc b/contrib/restricted/abseil-cpp/absl/strings/internal/charconv_parse.cc
index d29acaf462..a476cd262c 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/internal/charconv_parse.cc
+++ b/contrib/restricted/abseil-cpp/absl/strings/internal/charconv_parse.cc
@@ -246,8 +246,8 @@ constexpr int DigitMagnitude<16>() {
// ConsumeDigits does not protect against overflow on *out; max_digits must
// be chosen with respect to type T to avoid the possibility of overflow.
template <int base, typename T>
-int ConsumeDigits(const char* begin, const char* end, int max_digits, T* out,
- bool* dropped_nonzero_digit) {
+int ConsumeDigits(const char* begin, const char* end, int max_digits, T* out,
+ bool* dropped_nonzero_digit) {
if (base == 10) {
assert(max_digits <= std::numeric_limits<T>::digits10);
} else if (base == 16) {
@@ -282,7 +282,7 @@ int ConsumeDigits(const char* begin, const char* end, int max_digits, T* out,
*dropped_nonzero_digit = true;
}
*out = accumulator;
- return static_cast<int>(begin - original_begin);
+ return static_cast<int>(begin - original_begin);
}
// Returns true if `v` is one of the chars allowed inside parentheses following
@@ -372,7 +372,7 @@ strings_internal::ParsedFloat ParseFloat(const char* begin, const char* end,
int exponent_adjustment = 0;
bool mantissa_is_inexact = false;
- int pre_decimal_digits = ConsumeDigits<base>(
+ int pre_decimal_digits = ConsumeDigits<base>(
begin, end, MantissaDigitsMax<base>(), &mantissa, &mantissa_is_inexact);
begin += pre_decimal_digits;
int digits_left;
@@ -398,14 +398,14 @@ strings_internal::ParsedFloat ParseFloat(const char* begin, const char* end,
while (begin < end && *begin == '0') {
++begin;
}
- int zeros_skipped = static_cast<int>(begin - begin_zeros);
+ int zeros_skipped = static_cast<int>(begin - begin_zeros);
if (zeros_skipped >= DigitLimit<base>()) {
// refuse to parse pathological inputs
return result;
}
exponent_adjustment -= static_cast<int>(zeros_skipped);
}
- int post_decimal_digits = ConsumeDigits<base>(
+ int post_decimal_digits = ConsumeDigits<base>(
begin, end, digits_left, &mantissa, &mantissa_is_inexact);
begin += post_decimal_digits;
diff --git a/contrib/restricted/abseil-cpp/absl/strings/internal/cord_internal.cc b/contrib/restricted/abseil-cpp/absl/strings/internal/cord_internal.cc
index 1767e6fcc5..2fdbd88583 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/internal/cord_internal.cc
+++ b/contrib/restricted/abseil-cpp/absl/strings/internal/cord_internal.cc
@@ -1,89 +1,89 @@
-// Copyright 2020 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-#include "absl/strings/internal/cord_internal.h"
-
-#include <atomic>
-#include <cassert>
-#include <memory>
-
-#include "absl/container/inlined_vector.h"
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+#include "absl/strings/internal/cord_internal.h"
+
+#include <atomic>
+#include <cassert>
+#include <memory>
+
+#include "absl/container/inlined_vector.h"
#include "absl/strings/internal/cord_rep_btree.h"
-#include "absl/strings/internal/cord_rep_flat.h"
-#include "absl/strings/internal/cord_rep_ring.h"
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-namespace cord_internal {
-
+#include "absl/strings/internal/cord_rep_flat.h"
+#include "absl/strings/internal/cord_rep_ring.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+
ABSL_CONST_INIT std::atomic<bool> cord_btree_enabled(kCordEnableBtreeDefault);
-ABSL_CONST_INIT std::atomic<bool> cord_ring_buffer_enabled(
- kCordEnableRingBufferDefault);
-ABSL_CONST_INIT std::atomic<bool> shallow_subcords_enabled(
- kCordShallowSubcordsDefault);
+ABSL_CONST_INIT std::atomic<bool> cord_ring_buffer_enabled(
+ kCordEnableRingBufferDefault);
+ABSL_CONST_INIT std::atomic<bool> shallow_subcords_enabled(
+ kCordShallowSubcordsDefault);
ABSL_CONST_INIT std::atomic<bool> cord_btree_exhaustive_validation(false);
-
-void CordRep::Destroy(CordRep* rep) {
- assert(rep != nullptr);
-
- absl::InlinedVector<CordRep*, Constants::kInlinedVectorSize> pending;
- while (true) {
- assert(!rep->refcount.IsImmortal());
- if (rep->tag == CONCAT) {
- CordRepConcat* rep_concat = rep->concat();
- CordRep* right = rep_concat->right;
- if (!right->refcount.Decrement()) {
- pending.push_back(right);
- }
- CordRep* left = rep_concat->left;
- delete rep_concat;
- rep = nullptr;
- if (!left->refcount.Decrement()) {
- rep = left;
- continue;
- }
+
+void CordRep::Destroy(CordRep* rep) {
+ assert(rep != nullptr);
+
+ absl::InlinedVector<CordRep*, Constants::kInlinedVectorSize> pending;
+ while (true) {
+ assert(!rep->refcount.IsImmortal());
+ if (rep->tag == CONCAT) {
+ CordRepConcat* rep_concat = rep->concat();
+ CordRep* right = rep_concat->right;
+ if (!right->refcount.Decrement()) {
+ pending.push_back(right);
+ }
+ CordRep* left = rep_concat->left;
+ delete rep_concat;
+ rep = nullptr;
+ if (!left->refcount.Decrement()) {
+ rep = left;
+ continue;
+ }
} else if (rep->tag == BTREE) {
CordRepBtree::Destroy(rep->btree());
rep = nullptr;
- } else if (rep->tag == RING) {
- CordRepRing::Destroy(rep->ring());
- rep = nullptr;
- } else if (rep->tag == EXTERNAL) {
- CordRepExternal::Delete(rep);
- rep = nullptr;
- } else if (rep->tag == SUBSTRING) {
- CordRepSubstring* rep_substring = rep->substring();
- CordRep* child = rep_substring->child;
- delete rep_substring;
- rep = nullptr;
- if (!child->refcount.Decrement()) {
- rep = child;
- continue;
- }
- } else {
- CordRepFlat::Delete(rep);
- rep = nullptr;
- }
-
- if (!pending.empty()) {
- rep = pending.back();
- pending.pop_back();
- } else {
- break;
- }
- }
-}
-
-} // namespace cord_internal
-ABSL_NAMESPACE_END
-} // namespace absl
+ } else if (rep->tag == RING) {
+ CordRepRing::Destroy(rep->ring());
+ rep = nullptr;
+ } else if (rep->tag == EXTERNAL) {
+ CordRepExternal::Delete(rep);
+ rep = nullptr;
+ } else if (rep->tag == SUBSTRING) {
+ CordRepSubstring* rep_substring = rep->substring();
+ CordRep* child = rep_substring->child;
+ delete rep_substring;
+ rep = nullptr;
+ if (!child->refcount.Decrement()) {
+ rep = child;
+ continue;
+ }
+ } else {
+ CordRepFlat::Delete(rep);
+ rep = nullptr;
+ }
+
+ if (!pending.empty()) {
+ rep = pending.back();
+ pending.pop_back();
+ } else {
+ break;
+ }
+ }
+}
+
+} // namespace cord_internal
+ABSL_NAMESPACE_END
+} // namespace absl
diff --git a/contrib/restricted/abseil-cpp/absl/strings/internal/cord_internal.h b/contrib/restricted/abseil-cpp/absl/strings/internal/cord_internal.h
index bfe5564e46..6aaad5021a 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/internal/cord_internal.h
+++ b/contrib/restricted/abseil-cpp/absl/strings/internal/cord_internal.h
@@ -1,4 +1,4 @@
-// Copyright 2021 The Abseil Authors.
+// Copyright 2021 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -21,10 +21,10 @@
#include <cstdint>
#include <type_traits>
-#include "absl/base/config.h"
-#include "absl/base/internal/endian.h"
+#include "absl/base/config.h"
+#include "absl/base/internal/endian.h"
#include "absl/base/internal/invoke.h"
-#include "absl/base/optimization.h"
+#include "absl/base/optimization.h"
#include "absl/container/internal/compressed_tuple.h"
#include "absl/meta/type_traits.h"
#include "absl/strings/string_view.h"
@@ -33,19 +33,19 @@ namespace absl {
ABSL_NAMESPACE_BEGIN
namespace cord_internal {
-class CordzInfo;
-
-// Default feature enable states for cord ring buffers
-enum CordFeatureDefaults {
+class CordzInfo;
+
+// Default feature enable states for cord ring buffers
+enum CordFeatureDefaults {
kCordEnableBtreeDefault = true,
- kCordEnableRingBufferDefault = false,
- kCordShallowSubcordsDefault = false
-};
-
+ kCordEnableRingBufferDefault = false,
+ kCordShallowSubcordsDefault = false
+};
+
extern std::atomic<bool> cord_btree_enabled;
-extern std::atomic<bool> cord_ring_buffer_enabled;
-extern std::atomic<bool> shallow_subcords_enabled;
-
+extern std::atomic<bool> cord_ring_buffer_enabled;
+extern std::atomic<bool> shallow_subcords_enabled;
+
// `cord_btree_exhaustive_validation` can be set to force exhaustive validation
// in debug assertions, and code that calls `IsValid()` explicitly. By default,
// assertions should be relatively cheap and AssertValid() can easily lead to
@@ -56,48 +56,48 @@ inline void enable_cord_btree(bool enable) {
cord_btree_enabled.store(enable, std::memory_order_relaxed);
}
-inline void enable_cord_ring_buffer(bool enable) {
- cord_ring_buffer_enabled.store(enable, std::memory_order_relaxed);
-}
-
-inline void enable_shallow_subcords(bool enable) {
- shallow_subcords_enabled.store(enable, std::memory_order_relaxed);
-}
-
-enum Constants {
- // The inlined size to use with absl::InlinedVector.
- //
- // Note: The InlinedVectors in this file (and in cord.h) do not need to use
- // the same value for their inlined size. The fact that they do is historical.
- // It may be desirable for each to use a different inlined size optimized for
- // that InlinedVector's usage.
- //
- // TODO(jgm): Benchmark to see if there's a more optimal value than 47 for
- // the inlined vector size (47 exists for backward compatibility).
- kInlinedVectorSize = 47,
-
- // Prefer copying blocks of at most this size, otherwise reference count.
- kMaxBytesToCopy = 511
-};
-
+inline void enable_cord_ring_buffer(bool enable) {
+ cord_ring_buffer_enabled.store(enable, std::memory_order_relaxed);
+}
+
+inline void enable_shallow_subcords(bool enable) {
+ shallow_subcords_enabled.store(enable, std::memory_order_relaxed);
+}
+
+enum Constants {
+ // The inlined size to use with absl::InlinedVector.
+ //
+ // Note: The InlinedVectors in this file (and in cord.h) do not need to use
+ // the same value for their inlined size. The fact that they do is historical.
+ // It may be desirable for each to use a different inlined size optimized for
+ // that InlinedVector's usage.
+ //
+ // TODO(jgm): Benchmark to see if there's a more optimal value than 47 for
+ // the inlined vector size (47 exists for backward compatibility).
+ kInlinedVectorSize = 47,
+
+ // Prefer copying blocks of at most this size, otherwise reference count.
+ kMaxBytesToCopy = 511
+};
+
// Compact class for tracking the reference count and state flags for CordRep
// instances. Data is stored in an atomic int32_t for compactness and speed.
class RefcountAndFlags {
public:
constexpr RefcountAndFlags() : count_{kRefIncrement} {}
- struct Immortal {};
+ struct Immortal {};
explicit constexpr RefcountAndFlags(Immortal) : count_(kImmortalFlag) {}
struct WithCrc {};
explicit constexpr RefcountAndFlags(WithCrc)
: count_(kCrcFlag | kRefIncrement) {}
- // Increments the reference count. Imposes no memory ordering.
- inline void Increment() {
- count_.fetch_add(kRefIncrement, std::memory_order_relaxed);
- }
+ // Increments the reference count. Imposes no memory ordering.
+ inline void Increment() {
+ count_.fetch_add(kRefIncrement, std::memory_order_relaxed);
+ }
// Asserts that the current refcount is greater than 0. If the refcount is
- // greater than 1, decrements the reference count.
+ // greater than 1, decrements the reference count.
//
// Returns false if there are no references outstanding; true otherwise.
// Inserts barriers to ensure that state written before this method returns
@@ -106,24 +106,24 @@ class RefcountAndFlags {
inline bool Decrement() {
int32_t refcount = count_.load(std::memory_order_acquire) & kRefcountMask;
assert(refcount > 0 || refcount & kImmortalFlag);
- return refcount != kRefIncrement &&
+ return refcount != kRefIncrement &&
(count_.fetch_sub(kRefIncrement, std::memory_order_acq_rel) &
kRefcountMask) != kRefIncrement;
}
// Same as Decrement but expect that refcount is greater than 1.
inline bool DecrementExpectHighRefcount() {
- int32_t refcount =
+ int32_t refcount =
count_.fetch_sub(kRefIncrement, std::memory_order_acq_rel) &
kRefcountMask;
assert(refcount > 0 || refcount & kImmortalFlag);
- return refcount != kRefIncrement;
+ return refcount != kRefIncrement;
}
// Returns the current reference count using acquire semantics.
- inline int32_t Get() const {
+ inline int32_t Get() const {
return count_.load(std::memory_order_acquire) >> kNumFlags;
- }
+ }
// Returns true if the referenced object carries a CRC value.
bool HasCrc() const {
@@ -151,22 +151,22 @@ class RefcountAndFlags {
//
// When this returns true, there are no other references, and data sinks
// may safely adopt the children of the CordRep.
- inline bool IsOne() {
+ inline bool IsOne() {
return (count_.load(std::memory_order_acquire) & kRefcountMask) ==
kRefIncrement;
- }
+ }
- bool IsImmortal() const {
+ bool IsImmortal() const {
return (count_.load(std::memory_order_relaxed) & kImmortalFlag) != 0;
- }
-
+ }
+
private:
// We reserve the bottom bits for flags.
// kImmortalBit indicates that this entity should never be collected; it is
// used for the StringConstant constructor to avoid collecting immutable
// constant cords.
// kReservedFlag is reserved for future use.
- enum {
+ enum {
kNumFlags = 2,
kImmortalFlag = 0x1,
@@ -178,8 +178,8 @@ class RefcountAndFlags {
// purposes of equality. (A refcount of 0 or 1 does not count as 0 or 1
// if the immortal bit is set.)
kRefcountMask = ~kCrcFlag,
- };
-
+ };
+
std::atomic<int32_t> count_;
};
@@ -189,26 +189,26 @@ class RefcountAndFlags {
// functions in the base class.
struct CordRepConcat;
-struct CordRepExternal;
-struct CordRepFlat;
+struct CordRepExternal;
+struct CordRepFlat;
struct CordRepSubstring;
-class CordRepRing;
+class CordRepRing;
class CordRepBtree;
// Various representations that we allow
enum CordRepKind {
- CONCAT = 0,
+ CONCAT = 0,
SUBSTRING = 1,
BTREE = 2,
- RING = 3,
+ RING = 3,
EXTERNAL = 4,
// We have different tags for different sized flat arrays,
// starting with FLAT, and limited to MAX_FLAT_TAG. The 225 value is based on
- // the current 'size to tag' encoding of 8 / 32 bytes. If a new tag is needed
- // in the future, then 'FLAT' and 'MAX_FLAT_TAG' should be adjusted as well
- // as the Tag <---> Size logic so that FLAT stil represents the minimum flat
- // allocation size. (32 bytes as of now).
+ // the current 'size to tag' encoding of 8 / 32 bytes. If a new tag is needed
+ // in the future, then 'FLAT' and 'MAX_FLAT_TAG' should be adjusted as well
+ // as the Tag <---> Size logic so that FLAT stil represents the minimum flat
+ // allocation size. (32 bytes as of now).
FLAT = 5,
MAX_FLAT_TAG = 225
};
@@ -225,10 +225,10 @@ static_assert(EXTERNAL == RING + 1, "BTREE and EXTERNAL not consecutive");
static_assert(FLAT == EXTERNAL + 1, "EXTERNAL and FLAT not consecutive");
struct CordRep {
- CordRep() = default;
+ CordRep() = default;
constexpr CordRep(RefcountAndFlags::Immortal immortal, size_t l)
- : length(l), refcount(immortal), tag(EXTERNAL), storage{} {}
-
+ : length(l), refcount(immortal), tag(EXTERNAL), storage{} {}
+
// The following three fields have to be less than 32 bytes since
// that is the smallest supported flat node size.
size_t length;
@@ -255,32 +255,32 @@ struct CordRep {
constexpr bool IsFlat() const { return tag >= FLAT; }
constexpr bool IsBtree() const { return tag == BTREE; }
- inline CordRepRing* ring();
- inline const CordRepRing* ring() const;
+ inline CordRepRing* ring();
+ inline const CordRepRing* ring() const;
inline CordRepConcat* concat();
inline const CordRepConcat* concat() const;
inline CordRepSubstring* substring();
inline const CordRepSubstring* substring() const;
inline CordRepExternal* external();
inline const CordRepExternal* external() const;
- inline CordRepFlat* flat();
- inline const CordRepFlat* flat() const;
+ inline CordRepFlat* flat();
+ inline const CordRepFlat* flat() const;
inline CordRepBtree* btree();
inline const CordRepBtree* btree() const;
-
- // --------------------------------------------------------------------
- // Memory management
-
- // Destroys the provided `rep`.
- static void Destroy(CordRep* rep);
-
- // Increments the reference count of `rep`.
- // Requires `rep` to be a non-null pointer value.
- static inline CordRep* Ref(CordRep* rep);
-
- // Decrements the reference count of `rep`. Destroys rep if count reaches
- // zero. Requires `rep` to be a non-null pointer value.
- static inline void Unref(CordRep* rep);
+
+ // --------------------------------------------------------------------
+ // Memory management
+
+ // Destroys the provided `rep`.
+ static void Destroy(CordRep* rep);
+
+ // Increments the reference count of `rep`.
+ // Requires `rep` to be a non-null pointer value.
+ static inline CordRep* Ref(CordRep* rep);
+
+ // Decrements the reference count of `rep`. Destroys rep if count reaches
+ // zero. Requires `rep` to be a non-null pointer value.
+ static inline void Unref(CordRep* rep);
};
struct CordRepConcat : public CordRep {
@@ -304,19 +304,19 @@ using ExternalReleaserInvoker = void (*)(CordRepExternal*);
// External CordReps are allocated together with a type erased releaser. The
// releaser is stored in the memory directly following the CordRepExternal.
struct CordRepExternal : public CordRep {
- CordRepExternal() = default;
- explicit constexpr CordRepExternal(absl::string_view str)
+ CordRepExternal() = default;
+ explicit constexpr CordRepExternal(absl::string_view str)
: CordRep(RefcountAndFlags::Immortal{}, str.size()),
- base(str.data()),
- releaser_invoker(nullptr) {}
-
+ base(str.data()),
+ releaser_invoker(nullptr) {}
+
const char* base;
// Pointer to function that knows how to call and destroy the releaser.
ExternalReleaserInvoker releaser_invoker;
-
- // Deletes (releases) the external rep.
+
+ // Deletes (releases) the external rep.
// Requires rep != nullptr and rep->IsExternal()
- static void Delete(CordRep* rep);
+ static void Delete(CordRep* rep);
};
struct Rank1 {};
@@ -357,90 +357,90 @@ struct CordRepExternalImpl
}
};
-inline void CordRepExternal::Delete(CordRep* rep) {
+inline void CordRepExternal::Delete(CordRep* rep) {
assert(rep != nullptr && rep->IsExternal());
- auto* rep_external = static_cast<CordRepExternal*>(rep);
- assert(rep_external->releaser_invoker != nullptr);
- rep_external->releaser_invoker(rep_external);
-}
-
-template <typename Str>
-struct ConstInitExternalStorage {
- ABSL_CONST_INIT static CordRepExternal value;
-};
-
-template <typename Str>
-CordRepExternal ConstInitExternalStorage<Str>::value(Str::value);
-
+ auto* rep_external = static_cast<CordRepExternal*>(rep);
+ assert(rep_external->releaser_invoker != nullptr);
+ rep_external->releaser_invoker(rep_external);
+}
+
+template <typename Str>
+struct ConstInitExternalStorage {
+ ABSL_CONST_INIT static CordRepExternal value;
+};
+
+template <typename Str>
+CordRepExternal ConstInitExternalStorage<Str>::value(Str::value);
+
enum {
kMaxInline = 15,
};
-constexpr char GetOrNull(absl::string_view data, size_t pos) {
- return pos < data.size() ? data[pos] : '\0';
-}
-
-// We store cordz_info as 64 bit pointer value in big endian format. This
-// guarantees that the least significant byte of cordz_info matches the last
-// byte of the inline data representation in as_chars_, which holds the inlined
-// size or the 'is_tree' bit.
-using cordz_info_t = int64_t;
-
-// Assert that the `cordz_info` pointer value perfectly overlaps the last half
-// of `as_chars_` and can hold a pointer value.
-static_assert(sizeof(cordz_info_t) * 2 == kMaxInline + 1, "");
-static_assert(sizeof(cordz_info_t) >= sizeof(intptr_t), "");
-
-// BigEndianByte() creates a big endian representation of 'value', i.e.: a big
-// endian value where the last byte in the host's representation holds 'value`,
-// with all other bytes being 0.
-static constexpr cordz_info_t BigEndianByte(unsigned char value) {
-#if defined(ABSL_IS_BIG_ENDIAN)
- return value;
-#else
- return static_cast<cordz_info_t>(value) << ((sizeof(cordz_info_t) - 1) * 8);
-#endif
-}
-
-class InlineData {
- public:
+constexpr char GetOrNull(absl::string_view data, size_t pos) {
+ return pos < data.size() ? data[pos] : '\0';
+}
+
+// We store cordz_info as 64 bit pointer value in big endian format. This
+// guarantees that the least significant byte of cordz_info matches the last
+// byte of the inline data representation in as_chars_, which holds the inlined
+// size or the 'is_tree' bit.
+using cordz_info_t = int64_t;
+
+// Assert that the `cordz_info` pointer value perfectly overlaps the last half
+// of `as_chars_` and can hold a pointer value.
+static_assert(sizeof(cordz_info_t) * 2 == kMaxInline + 1, "");
+static_assert(sizeof(cordz_info_t) >= sizeof(intptr_t), "");
+
+// BigEndianByte() creates a big endian representation of 'value', i.e.: a big
+// endian value where the last byte in the host's representation holds 'value`,
+// with all other bytes being 0.
+static constexpr cordz_info_t BigEndianByte(unsigned char value) {
+#if defined(ABSL_IS_BIG_ENDIAN)
+ return value;
+#else
+ return static_cast<cordz_info_t>(value) << ((sizeof(cordz_info_t) - 1) * 8);
+#endif
+}
+
+class InlineData {
+ public:
// DefaultInitType forces the use of the default initialization constructor.
enum DefaultInitType { kDefaultInit };
- // kNullCordzInfo holds the big endian representation of intptr_t(1)
- // This is the 'null' / initial value of 'cordz_info'. The null value
- // is specifically big endian 1 as with 64-bit pointers, the last
- // byte of cordz_info overlaps with the last byte holding the tag.
- static constexpr cordz_info_t kNullCordzInfo = BigEndianByte(1);
-
- constexpr InlineData() : as_chars_{0} {}
+ // kNullCordzInfo holds the big endian representation of intptr_t(1)
+ // This is the 'null' / initial value of 'cordz_info'. The null value
+ // is specifically big endian 1 as with 64-bit pointers, the last
+ // byte of cordz_info overlaps with the last byte holding the tag.
+ static constexpr cordz_info_t kNullCordzInfo = BigEndianByte(1);
+
+ constexpr InlineData() : as_chars_{0} {}
explicit InlineData(DefaultInitType) {}
- explicit constexpr InlineData(CordRep* rep) : as_tree_(rep) {}
- explicit constexpr InlineData(absl::string_view chars)
- : as_chars_{
- GetOrNull(chars, 0), GetOrNull(chars, 1),
- GetOrNull(chars, 2), GetOrNull(chars, 3),
- GetOrNull(chars, 4), GetOrNull(chars, 5),
- GetOrNull(chars, 6), GetOrNull(chars, 7),
- GetOrNull(chars, 8), GetOrNull(chars, 9),
- GetOrNull(chars, 10), GetOrNull(chars, 11),
- GetOrNull(chars, 12), GetOrNull(chars, 13),
- GetOrNull(chars, 14), static_cast<char>((chars.size() << 1))} {}
-
- // Returns true if the current instance is empty.
- // The 'empty value' is an inlined data value of zero length.
- bool is_empty() const { return tag() == 0; }
-
- // Returns true if the current instance holds a tree value.
- bool is_tree() const { return (tag() & 1) != 0; }
-
- // Returns true if the current instance holds a cordz_info value.
- // Requires the current instance to hold a tree value.
- bool is_profiled() const {
- assert(is_tree());
- return as_tree_.cordz_info != kNullCordzInfo;
- }
-
+ explicit constexpr InlineData(CordRep* rep) : as_tree_(rep) {}
+ explicit constexpr InlineData(absl::string_view chars)
+ : as_chars_{
+ GetOrNull(chars, 0), GetOrNull(chars, 1),
+ GetOrNull(chars, 2), GetOrNull(chars, 3),
+ GetOrNull(chars, 4), GetOrNull(chars, 5),
+ GetOrNull(chars, 6), GetOrNull(chars, 7),
+ GetOrNull(chars, 8), GetOrNull(chars, 9),
+ GetOrNull(chars, 10), GetOrNull(chars, 11),
+ GetOrNull(chars, 12), GetOrNull(chars, 13),
+ GetOrNull(chars, 14), static_cast<char>((chars.size() << 1))} {}
+
+ // Returns true if the current instance is empty.
+ // The 'empty value' is an inlined data value of zero length.
+ bool is_empty() const { return tag() == 0; }
+
+ // Returns true if the current instance holds a tree value.
+ bool is_tree() const { return (tag() & 1) != 0; }
+
+ // Returns true if the current instance holds a cordz_info value.
+ // Requires the current instance to hold a tree value.
+ bool is_profiled() const {
+ assert(is_tree());
+ return as_tree_.cordz_info != kNullCordzInfo;
+ }
+
// Returns true if either of the provided instances hold a cordz_info value.
// This method is more efficient than the equivalent `data1.is_profiled() ||
// data2.is_profiled()`. Requires both arguments to hold a tree.
@@ -451,170 +451,170 @@ class InlineData {
kNullCordzInfo;
}
- // Returns the cordz_info sampling instance for this instance, or nullptr
- // if the current instance is not sampled and does not have CordzInfo data.
- // Requires the current instance to hold a tree value.
- CordzInfo* cordz_info() const {
- assert(is_tree());
- intptr_t info =
- static_cast<intptr_t>(absl::big_endian::ToHost64(as_tree_.cordz_info));
- assert(info & 1);
- return reinterpret_cast<CordzInfo*>(info - 1);
- }
-
- // Sets the current cordz_info sampling instance for this instance, or nullptr
- // if the current instance is not sampled and does not have CordzInfo data.
- // Requires the current instance to hold a tree value.
- void set_cordz_info(CordzInfo* cordz_info) {
- assert(is_tree());
- intptr_t info = reinterpret_cast<intptr_t>(cordz_info) | 1;
- as_tree_.cordz_info = absl::big_endian::FromHost64(info);
- }
-
- // Resets the current cordz_info to null / empty.
- void clear_cordz_info() {
- assert(is_tree());
- as_tree_.cordz_info = kNullCordzInfo;
- }
-
- // Returns a read only pointer to the character data inside this instance.
- // Requires the current instance to hold inline data.
- const char* as_chars() const {
- assert(!is_tree());
- return as_chars_;
- }
-
- // Returns a mutable pointer to the character data inside this instance.
- // Should be used for 'write only' operations setting an inlined value.
- // Applications can set the value of inlined data either before or after
- // setting the inlined size, i.e., both of the below are valid:
- //
- // // Set inlined data and inline size
- // memcpy(data_.as_chars(), data, size);
- // data_.set_inline_size(size);
- //
- // // Set inlined size and inline data
- // data_.set_inline_size(size);
- // memcpy(data_.as_chars(), data, size);
- //
- // It's an error to read from the returned pointer without a preceding write
- // if the current instance does not hold inline data, i.e.: is_tree() == true.
- char* as_chars() { return as_chars_; }
-
- // Returns the tree value of this value.
- // Requires the current instance to hold a tree value.
- CordRep* as_tree() const {
- assert(is_tree());
- return as_tree_.rep;
- }
-
- // Initialize this instance to holding the tree value `rep`,
- // initializing the cordz_info to null, i.e.: 'not profiled'.
- void make_tree(CordRep* rep) {
- as_tree_.rep = rep;
- as_tree_.cordz_info = kNullCordzInfo;
- }
-
- // Set the tree value of this instance to 'rep`.
- // Requires the current instance to already hold a tree value.
- // Does not affect the value of cordz_info.
- void set_tree(CordRep* rep) {
- assert(is_tree());
- as_tree_.rep = rep;
- }
-
- // Returns the size of the inlined character data inside this instance.
- // Requires the current instance to hold inline data.
- size_t inline_size() const {
- assert(!is_tree());
- return tag() >> 1;
- }
-
- // Sets the size of the inlined character data inside this instance.
- // Requires `size` to be <= kMaxInline.
- // See the documentation on 'as_chars()' for more information and examples.
- void set_inline_size(size_t size) {
- ABSL_ASSERT(size <= kMaxInline);
- tag() = static_cast<char>(size << 1);
- }
-
- private:
- // See cordz_info_t for forced alignment and size of `cordz_info` details.
- struct AsTree {
- explicit constexpr AsTree(absl::cord_internal::CordRep* tree)
- : rep(tree), cordz_info(kNullCordzInfo) {}
- // This union uses up extra space so that whether rep is 32 or 64 bits,
- // cordz_info will still start at the eighth byte, and the last
- // byte of cordz_info will still be the last byte of InlineData.
- union {
- absl::cord_internal::CordRep* rep;
- cordz_info_t unused_aligner;
- };
- cordz_info_t cordz_info;
- };
-
- char& tag() { return reinterpret_cast<char*>(this)[kMaxInline]; }
- char tag() const { return reinterpret_cast<const char*>(this)[kMaxInline]; }
-
- // If the data has length <= kMaxInline, we store it in `as_chars_`, and
- // store the size in the last char of `as_chars_` shifted left + 1.
- // Else we store it in a tree and store a pointer to that tree in
- // `as_tree_.rep` and store a tag in `tagged_size`.
+ // Returns the cordz_info sampling instance for this instance, or nullptr
+ // if the current instance is not sampled and does not have CordzInfo data.
+ // Requires the current instance to hold a tree value.
+ CordzInfo* cordz_info() const {
+ assert(is_tree());
+ intptr_t info =
+ static_cast<intptr_t>(absl::big_endian::ToHost64(as_tree_.cordz_info));
+ assert(info & 1);
+ return reinterpret_cast<CordzInfo*>(info - 1);
+ }
+
+ // Sets the current cordz_info sampling instance for this instance, or nullptr
+ // if the current instance is not sampled and does not have CordzInfo data.
+ // Requires the current instance to hold a tree value.
+ void set_cordz_info(CordzInfo* cordz_info) {
+ assert(is_tree());
+ intptr_t info = reinterpret_cast<intptr_t>(cordz_info) | 1;
+ as_tree_.cordz_info = absl::big_endian::FromHost64(info);
+ }
+
+ // Resets the current cordz_info to null / empty.
+ void clear_cordz_info() {
+ assert(is_tree());
+ as_tree_.cordz_info = kNullCordzInfo;
+ }
+
+ // Returns a read only pointer to the character data inside this instance.
+ // Requires the current instance to hold inline data.
+ const char* as_chars() const {
+ assert(!is_tree());
+ return as_chars_;
+ }
+
+ // Returns a mutable pointer to the character data inside this instance.
+ // Should be used for 'write only' operations setting an inlined value.
+ // Applications can set the value of inlined data either before or after
+ // setting the inlined size, i.e., both of the below are valid:
+ //
+ // // Set inlined data and inline size
+ // memcpy(data_.as_chars(), data, size);
+ // data_.set_inline_size(size);
+ //
+ // // Set inlined size and inline data
+ // data_.set_inline_size(size);
+ // memcpy(data_.as_chars(), data, size);
+ //
+ // It's an error to read from the returned pointer without a preceding write
+ // if the current instance does not hold inline data, i.e.: is_tree() == true.
+ char* as_chars() { return as_chars_; }
+
+ // Returns the tree value of this value.
+ // Requires the current instance to hold a tree value.
+ CordRep* as_tree() const {
+ assert(is_tree());
+ return as_tree_.rep;
+ }
+
+ // Initialize this instance to holding the tree value `rep`,
+ // initializing the cordz_info to null, i.e.: 'not profiled'.
+ void make_tree(CordRep* rep) {
+ as_tree_.rep = rep;
+ as_tree_.cordz_info = kNullCordzInfo;
+ }
+
+ // Set the tree value of this instance to 'rep`.
+ // Requires the current instance to already hold a tree value.
+ // Does not affect the value of cordz_info.
+ void set_tree(CordRep* rep) {
+ assert(is_tree());
+ as_tree_.rep = rep;
+ }
+
+ // Returns the size of the inlined character data inside this instance.
+ // Requires the current instance to hold inline data.
+ size_t inline_size() const {
+ assert(!is_tree());
+ return tag() >> 1;
+ }
+
+ // Sets the size of the inlined character data inside this instance.
+ // Requires `size` to be <= kMaxInline.
+ // See the documentation on 'as_chars()' for more information and examples.
+ void set_inline_size(size_t size) {
+ ABSL_ASSERT(size <= kMaxInline);
+ tag() = static_cast<char>(size << 1);
+ }
+
+ private:
+ // See cordz_info_t for forced alignment and size of `cordz_info` details.
+ struct AsTree {
+ explicit constexpr AsTree(absl::cord_internal::CordRep* tree)
+ : rep(tree), cordz_info(kNullCordzInfo) {}
+ // This union uses up extra space so that whether rep is 32 or 64 bits,
+ // cordz_info will still start at the eighth byte, and the last
+ // byte of cordz_info will still be the last byte of InlineData.
+ union {
+ absl::cord_internal::CordRep* rep;
+ cordz_info_t unused_aligner;
+ };
+ cordz_info_t cordz_info;
+ };
+
+ char& tag() { return reinterpret_cast<char*>(this)[kMaxInline]; }
+ char tag() const { return reinterpret_cast<const char*>(this)[kMaxInline]; }
+
+ // If the data has length <= kMaxInline, we store it in `as_chars_`, and
+ // store the size in the last char of `as_chars_` shifted left + 1.
+ // Else we store it in a tree and store a pointer to that tree in
+ // `as_tree_.rep` and store a tag in `tagged_size`.
union {
- char as_chars_[kMaxInline + 1];
- AsTree as_tree_;
- };
+ char as_chars_[kMaxInline + 1];
+ AsTree as_tree_;
+ };
};
static_assert(sizeof(InlineData) == kMaxInline + 1, "");
-inline CordRepConcat* CordRep::concat() {
+inline CordRepConcat* CordRep::concat() {
assert(IsConcat());
- return static_cast<CordRepConcat*>(this);
-}
-
-inline const CordRepConcat* CordRep::concat() const {
+ return static_cast<CordRepConcat*>(this);
+}
+
+inline const CordRepConcat* CordRep::concat() const {
assert(IsConcat());
- return static_cast<const CordRepConcat*>(this);
-}
-
-inline CordRepSubstring* CordRep::substring() {
+ return static_cast<const CordRepConcat*>(this);
+}
+
+inline CordRepSubstring* CordRep::substring() {
assert(IsSubstring());
- return static_cast<CordRepSubstring*>(this);
-}
-
-inline const CordRepSubstring* CordRep::substring() const {
+ return static_cast<CordRepSubstring*>(this);
+}
+
+inline const CordRepSubstring* CordRep::substring() const {
assert(IsSubstring());
- return static_cast<const CordRepSubstring*>(this);
-}
-
-inline CordRepExternal* CordRep::external() {
+ return static_cast<const CordRepSubstring*>(this);
+}
+
+inline CordRepExternal* CordRep::external() {
assert(IsExternal());
- return static_cast<CordRepExternal*>(this);
-}
-
-inline const CordRepExternal* CordRep::external() const {
+ return static_cast<CordRepExternal*>(this);
+}
+
+inline const CordRepExternal* CordRep::external() const {
assert(IsExternal());
- return static_cast<const CordRepExternal*>(this);
-}
-
-inline CordRep* CordRep::Ref(CordRep* rep) {
- assert(rep != nullptr);
- rep->refcount.Increment();
- return rep;
-}
-
-inline void CordRep::Unref(CordRep* rep) {
- assert(rep != nullptr);
- // Expect refcount to be 0. Avoiding the cost of an atomic decrement should
- // typically outweigh the cost of an extra branch checking for ref == 1.
- if (ABSL_PREDICT_FALSE(!rep->refcount.DecrementExpectHighRefcount())) {
- Destroy(rep);
- }
-}
-
+ return static_cast<const CordRepExternal*>(this);
+}
+
+inline CordRep* CordRep::Ref(CordRep* rep) {
+ assert(rep != nullptr);
+ rep->refcount.Increment();
+ return rep;
+}
+
+inline void CordRep::Unref(CordRep* rep) {
+ assert(rep != nullptr);
+ // Expect refcount to be 0. Avoiding the cost of an atomic decrement should
+ // typically outweigh the cost of an extra branch checking for ref == 1.
+ if (ABSL_PREDICT_FALSE(!rep->refcount.DecrementExpectHighRefcount())) {
+ Destroy(rep);
+ }
+}
+
} // namespace cord_internal
-
+
ABSL_NAMESPACE_END
} // namespace absl
#endif // ABSL_STRINGS_INTERNAL_CORD_INTERNAL_H_
diff --git a/contrib/restricted/abseil-cpp/absl/strings/internal/cord_rep_flat.h b/contrib/restricted/abseil-cpp/absl/strings/internal/cord_rep_flat.h
index 4d0f988697..c9249798a1 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/internal/cord_rep_flat.h
+++ b/contrib/restricted/abseil-cpp/absl/strings/internal/cord_rep_flat.h
@@ -1,146 +1,146 @@
-// Copyright 2020 The Abseil Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef ABSL_STRINGS_INTERNAL_CORD_REP_FLAT_H_
-#define ABSL_STRINGS_INTERNAL_CORD_REP_FLAT_H_
-
-#include <cassert>
-#include <cstddef>
-#include <cstdint>
-#include <memory>
-
-#include "absl/strings/internal/cord_internal.h"
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-namespace cord_internal {
-
-// Note: all constants below are never ODR used and internal to cord, we define
-// these as static constexpr to avoid 'in struct' definition and usage clutter.
-
-// Largest and smallest flat node lengths we are willing to allocate
-// Flat allocation size is stored in tag, which currently can encode sizes up
-// to 4K, encoded as multiple of either 8 or 32 bytes.
-// If we allow for larger sizes, we need to change this to 8/64, 16/128, etc.
-// kMinFlatSize is bounded by tag needing to be at least FLAT * 8 bytes, and
-// ideally a 'nice' size aligning with allocation and cacheline sizes like 32.
-// kMaxFlatSize is bounded by the size resulting in a computed tag no greater
-// than MAX_FLAT_TAG. MAX_FLAT_TAG provides for additional 'high' tag values.
-static constexpr size_t kFlatOverhead = offsetof(CordRep, storage);
-static constexpr size_t kMinFlatSize = 32;
-static constexpr size_t kMaxFlatSize = 4096;
-static constexpr size_t kMaxFlatLength = kMaxFlatSize - kFlatOverhead;
-static constexpr size_t kMinFlatLength = kMinFlatSize - kFlatOverhead;
-
-constexpr uint8_t AllocatedSizeToTagUnchecked(size_t size) {
+// Copyright 2020 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_STRINGS_INTERNAL_CORD_REP_FLAT_H_
+#define ABSL_STRINGS_INTERNAL_CORD_REP_FLAT_H_
+
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <memory>
+
+#include "absl/strings/internal/cord_internal.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+
+// Note: all constants below are never ODR used and internal to cord, we define
+// these as static constexpr to avoid 'in struct' definition and usage clutter.
+
+// Largest and smallest flat node lengths we are willing to allocate
+// Flat allocation size is stored in tag, which currently can encode sizes up
+// to 4K, encoded as multiple of either 8 or 32 bytes.
+// If we allow for larger sizes, we need to change this to 8/64, 16/128, etc.
+// kMinFlatSize is bounded by tag needing to be at least FLAT * 8 bytes, and
+// ideally a 'nice' size aligning with allocation and cacheline sizes like 32.
+// kMaxFlatSize is bounded by the size resulting in a computed tag no greater
+// than MAX_FLAT_TAG. MAX_FLAT_TAG provides for additional 'high' tag values.
+static constexpr size_t kFlatOverhead = offsetof(CordRep, storage);
+static constexpr size_t kMinFlatSize = 32;
+static constexpr size_t kMaxFlatSize = 4096;
+static constexpr size_t kMaxFlatLength = kMaxFlatSize - kFlatOverhead;
+static constexpr size_t kMinFlatLength = kMinFlatSize - kFlatOverhead;
+
+constexpr uint8_t AllocatedSizeToTagUnchecked(size_t size) {
return static_cast<uint8_t>((size <= 1024) ? size / 8 + 1
: 129 + size / 32 - 1024 / 32);
-}
-
+}
+
static_assert(kMinFlatSize / 8 + 1 >= FLAT, "");
-static_assert(AllocatedSizeToTagUnchecked(kMaxFlatSize) <= MAX_FLAT_TAG, "");
-
-// Helper functions for rounded div, and rounding to exact sizes.
-constexpr size_t DivUp(size_t n, size_t m) { return (n + m - 1) / m; }
-constexpr size_t RoundUp(size_t n, size_t m) { return DivUp(n, m) * m; }
-
-// Returns the size to the nearest equal or larger value that can be
-// expressed exactly as a tag value.
-inline size_t RoundUpForTag(size_t size) {
- return RoundUp(size, (size <= 1024) ? 8 : 32);
-}
-
-// Converts the allocated size to a tag, rounding down if the size
-// does not exactly match a 'tag expressible' size value. The result is
-// undefined if the size exceeds the maximum size that can be encoded in
-// a tag, i.e., if size is larger than TagToAllocatedSize(<max tag>).
-inline uint8_t AllocatedSizeToTag(size_t size) {
- const uint8_t tag = AllocatedSizeToTagUnchecked(size);
- assert(tag <= MAX_FLAT_TAG);
- return tag;
-}
-
-// Converts the provided tag to the corresponding allocated size
-constexpr size_t TagToAllocatedSize(uint8_t tag) {
+static_assert(AllocatedSizeToTagUnchecked(kMaxFlatSize) <= MAX_FLAT_TAG, "");
+
+// Helper functions for rounded div, and rounding to exact sizes.
+constexpr size_t DivUp(size_t n, size_t m) { return (n + m - 1) / m; }
+constexpr size_t RoundUp(size_t n, size_t m) { return DivUp(n, m) * m; }
+
+// Returns the size to the nearest equal or larger value that can be
+// expressed exactly as a tag value.
+inline size_t RoundUpForTag(size_t size) {
+ return RoundUp(size, (size <= 1024) ? 8 : 32);
+}
+
+// Converts the allocated size to a tag, rounding down if the size
+// does not exactly match a 'tag expressible' size value. The result is
+// undefined if the size exceeds the maximum size that can be encoded in
+// a tag, i.e., if size is larger than TagToAllocatedSize(<max tag>).
+inline uint8_t AllocatedSizeToTag(size_t size) {
+ const uint8_t tag = AllocatedSizeToTagUnchecked(size);
+ assert(tag <= MAX_FLAT_TAG);
+ return tag;
+}
+
+// Converts the provided tag to the corresponding allocated size
+constexpr size_t TagToAllocatedSize(uint8_t tag) {
return (tag <= 129) ? ((tag - 1) * 8) : (1024 + (tag - 129) * 32);
-}
-
-// Converts the provided tag to the corresponding available data length
-constexpr size_t TagToLength(uint8_t tag) {
- return TagToAllocatedSize(tag) - kFlatOverhead;
-}
-
-// Enforce that kMaxFlatSize maps to a well-known exact tag value.
+}
+
+// Converts the provided tag to the corresponding available data length
+constexpr size_t TagToLength(uint8_t tag) {
+ return TagToAllocatedSize(tag) - kFlatOverhead;
+}
+
+// Enforce that kMaxFlatSize maps to a well-known exact tag value.
static_assert(TagToAllocatedSize(225) == kMaxFlatSize, "Bad tag logic");
-
-struct CordRepFlat : public CordRep {
- // Creates a new flat node.
- static CordRepFlat* New(size_t len) {
- if (len <= kMinFlatLength) {
- len = kMinFlatLength;
- } else if (len > kMaxFlatLength) {
- len = kMaxFlatLength;
- }
-
- // Round size up so it matches a size we can exactly express in a tag.
- const size_t size = RoundUpForTag(len + kFlatOverhead);
- void* const raw_rep = ::operator new(size);
- CordRepFlat* rep = new (raw_rep) CordRepFlat();
- rep->tag = AllocatedSizeToTag(size);
- return rep;
- }
-
- // Deletes a CordRepFlat instance created previously through a call to New().
- // Flat CordReps are allocated and constructed with raw ::operator new and
- // placement new, and must be destructed and deallocated accordingly.
- static void Delete(CordRep*rep) {
- assert(rep->tag >= FLAT && rep->tag <= MAX_FLAT_TAG);
-
-#if defined(__cpp_sized_deallocation)
- size_t size = TagToAllocatedSize(rep->tag);
- rep->~CordRep();
- ::operator delete(rep, size);
-#else
- rep->~CordRep();
- ::operator delete(rep);
-#endif
- }
-
- // Returns a pointer to the data inside this flat rep.
+
+struct CordRepFlat : public CordRep {
+ // Creates a new flat node.
+ static CordRepFlat* New(size_t len) {
+ if (len <= kMinFlatLength) {
+ len = kMinFlatLength;
+ } else if (len > kMaxFlatLength) {
+ len = kMaxFlatLength;
+ }
+
+ // Round size up so it matches a size we can exactly express in a tag.
+ const size_t size = RoundUpForTag(len + kFlatOverhead);
+ void* const raw_rep = ::operator new(size);
+ CordRepFlat* rep = new (raw_rep) CordRepFlat();
+ rep->tag = AllocatedSizeToTag(size);
+ return rep;
+ }
+
+ // Deletes a CordRepFlat instance created previously through a call to New().
+ // Flat CordReps are allocated and constructed with raw ::operator new and
+ // placement new, and must be destructed and deallocated accordingly.
+ static void Delete(CordRep*rep) {
+ assert(rep->tag >= FLAT && rep->tag <= MAX_FLAT_TAG);
+
+#if defined(__cpp_sized_deallocation)
+ size_t size = TagToAllocatedSize(rep->tag);
+ rep->~CordRep();
+ ::operator delete(rep, size);
+#else
+ rep->~CordRep();
+ ::operator delete(rep);
+#endif
+ }
+
+ // Returns a pointer to the data inside this flat rep.
char* Data() { return reinterpret_cast<char*>(storage); }
const char* Data() const { return reinterpret_cast<const char*>(storage); }
-
- // Returns the maximum capacity (payload size) of this instance.
- size_t Capacity() const { return TagToLength(tag); }
-
- // Returns the allocated size (payload + overhead) of this instance.
- size_t AllocatedSize() const { return TagToAllocatedSize(tag); }
-};
-
-// Now that CordRepFlat is defined, we can define CordRep's helper casts:
-inline CordRepFlat* CordRep::flat() {
- assert(tag >= FLAT && tag <= MAX_FLAT_TAG);
- return reinterpret_cast<CordRepFlat*>(this);
-}
-
-inline const CordRepFlat* CordRep::flat() const {
- assert(tag >= FLAT && tag <= MAX_FLAT_TAG);
- return reinterpret_cast<const CordRepFlat*>(this);
-}
-
-} // namespace cord_internal
-ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_STRINGS_INTERNAL_CORD_REP_FLAT_H_
+
+ // Returns the maximum capacity (payload size) of this instance.
+ size_t Capacity() const { return TagToLength(tag); }
+
+ // Returns the allocated size (payload + overhead) of this instance.
+ size_t AllocatedSize() const { return TagToAllocatedSize(tag); }
+};
+
+// Now that CordRepFlat is defined, we can define CordRep's helper casts:
+inline CordRepFlat* CordRep::flat() {
+ assert(tag >= FLAT && tag <= MAX_FLAT_TAG);
+ return reinterpret_cast<CordRepFlat*>(this);
+}
+
+inline const CordRepFlat* CordRep::flat() const {
+ assert(tag >= FLAT && tag <= MAX_FLAT_TAG);
+ return reinterpret_cast<const CordRepFlat*>(this);
+}
+
+} // namespace cord_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_STRINGS_INTERNAL_CORD_REP_FLAT_H_
diff --git a/contrib/restricted/abseil-cpp/absl/strings/internal/cord_rep_ring.cc b/contrib/restricted/abseil-cpp/absl/strings/internal/cord_rep_ring.cc
index 07c77eb3e5..2635fe1558 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/internal/cord_rep_ring.cc
+++ b/contrib/restricted/abseil-cpp/absl/strings/internal/cord_rep_ring.cc
@@ -1,771 +1,771 @@
-// Copyright 2020 The Abseil Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-#include "absl/strings/internal/cord_rep_ring.h"
-
-#include <cassert>
-#include <cstddef>
-#include <cstdint>
-#include <iostream>
-#include <limits>
-#include <memory>
-#include <string>
-
-#include "absl/base/internal/raw_logging.h"
-#include "absl/base/internal/throw_delegate.h"
-#include "absl/base/macros.h"
-#include "absl/container/inlined_vector.h"
-#include "absl/strings/internal/cord_internal.h"
+// Copyright 2020 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+#include "absl/strings/internal/cord_rep_ring.h"
+
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <iostream>
+#include <limits>
+#include <memory>
+#include <string>
+
+#include "absl/base/internal/raw_logging.h"
+#include "absl/base/internal/throw_delegate.h"
+#include "absl/base/macros.h"
+#include "absl/container/inlined_vector.h"
+#include "absl/strings/internal/cord_internal.h"
#include "absl/strings/internal/cord_rep_consume.h"
-#include "absl/strings/internal/cord_rep_flat.h"
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-namespace cord_internal {
-
-namespace {
-
-using index_type = CordRepRing::index_type;
-
-enum class Direction { kForward, kReversed };
-
-inline bool IsFlatOrExternal(CordRep* rep) {
+#include "absl/strings/internal/cord_rep_flat.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+
+namespace {
+
+using index_type = CordRepRing::index_type;
+
+enum class Direction { kForward, kReversed };
+
+inline bool IsFlatOrExternal(CordRep* rep) {
return rep->IsFlat() || rep->IsExternal();
-}
-
-// Verifies that n + extra <= kMaxCapacity: throws std::length_error otherwise.
-inline void CheckCapacity(size_t n, size_t extra) {
- if (ABSL_PREDICT_FALSE(extra > CordRepRing::kMaxCapacity - n)) {
- base_internal::ThrowStdLengthError("Maximum capacity exceeded");
- }
-}
-
-// Creates a flat from the provided string data, allocating up to `extra`
-// capacity in the returned flat depending on kMaxFlatLength limitations.
-// Requires `len` to be less or equal to `kMaxFlatLength`
-CordRepFlat* CreateFlat(const char* s, size_t n, size_t extra = 0) { // NOLINT
- assert(n <= kMaxFlatLength);
- auto* rep = CordRepFlat::New(n + extra);
- rep->length = n;
- memcpy(rep->Data(), s, n);
- return rep;
-}
-
-// Unrefs the entries in `[head, tail)`.
-// Requires all entries to be a FLAT or EXTERNAL node.
-void UnrefEntries(const CordRepRing* rep, index_type head, index_type tail) {
- rep->ForEach(head, tail, [rep](index_type ix) {
- CordRep* child = rep->entry_child(ix);
- if (!child->refcount.Decrement()) {
- if (child->tag >= FLAT) {
- CordRepFlat::Delete(child->flat());
- } else {
- CordRepExternal::Delete(child->external());
- }
- }
- });
-}
-
-} // namespace
-
-std::ostream& operator<<(std::ostream& s, const CordRepRing& rep) {
- // Note: 'pos' values are defined as size_t (for overflow reasons), but that
- // prints really awkward for small prepended values such as -5. ssize_t is not
- // portable (POSIX), so we use ptrdiff_t instead to cast to signed values.
- s << " CordRepRing(" << &rep << ", length = " << rep.length
- << ", head = " << rep.head_ << ", tail = " << rep.tail_
- << ", cap = " << rep.capacity_ << ", rc = " << rep.refcount.Get()
- << ", begin_pos_ = " << static_cast<ptrdiff_t>(rep.begin_pos_) << ") {\n";
- CordRepRing::index_type head = rep.head();
- do {
- CordRep* child = rep.entry_child(head);
- s << " entry[" << head << "] length = " << rep.entry_length(head)
- << ", child " << child << ", clen = " << child->length
- << ", tag = " << static_cast<int>(child->tag)
- << ", rc = " << child->refcount.Get()
- << ", offset = " << rep.entry_data_offset(head)
- << ", end_pos = " << static_cast<ptrdiff_t>(rep.entry_end_pos(head))
- << "\n";
- head = rep.advance(head);
- } while (head != rep.tail());
- return s << "}\n";
-}
-
-void CordRepRing::AddDataOffset(index_type index, size_t n) {
- entry_data_offset()[index] += static_cast<offset_type>(n);
-}
-
-void CordRepRing::SubLength(index_type index, size_t n) {
- entry_end_pos()[index] -= n;
-}
-
-class CordRepRing::Filler {
- public:
- Filler(CordRepRing* rep, index_type pos) : rep_(rep), head_(pos), pos_(pos) {}
-
- index_type head() const { return head_; }
- index_type pos() const { return pos_; }
-
- void Add(CordRep* child, size_t offset, pos_type end_pos) {
- rep_->entry_end_pos()[pos_] = end_pos;
- rep_->entry_child()[pos_] = child;
- rep_->entry_data_offset()[pos_] = static_cast<offset_type>(offset);
- pos_ = rep_->advance(pos_);
- }
-
- private:
- CordRepRing* rep_;
- index_type head_;
- index_type pos_;
-};
-
-constexpr size_t CordRepRing::kMaxCapacity; // NOLINT: needed for c++11
-
-bool CordRepRing::IsValid(std::ostream& output) const {
- if (capacity_ == 0) {
- output << "capacity == 0";
- return false;
- }
-
- if (head_ >= capacity_ || tail_ >= capacity_) {
- output << "head " << head_ << " and/or tail " << tail_ << "exceed capacity "
- << capacity_;
- return false;
- }
-
- const index_type back = retreat(tail_);
- size_t pos_length = Distance(begin_pos_, entry_end_pos(back));
- if (pos_length != length) {
- output << "length " << length << " does not match positional length "
- << pos_length << " from begin_pos " << begin_pos_ << " and entry["
- << back << "].end_pos " << entry_end_pos(back);
- return false;
- }
-
- index_type head = head_;
- pos_type begin_pos = begin_pos_;
- do {
- pos_type end_pos = entry_end_pos(head);
- size_t entry_length = Distance(begin_pos, end_pos);
- if (entry_length == 0) {
- output << "entry[" << head << "] has an invalid length " << entry_length
- << " from begin_pos " << begin_pos << " and end_pos " << end_pos;
- return false;
- }
-
- CordRep* child = entry_child(head);
- if (child == nullptr) {
- output << "entry[" << head << "].child == nullptr";
- return false;
- }
- if (child->tag < FLAT && child->tag != EXTERNAL) {
- output << "entry[" << head << "].child has an invalid tag "
- << static_cast<int>(child->tag);
- return false;
- }
-
- size_t offset = entry_data_offset(head);
- if (offset >= child->length || entry_length > child->length - offset) {
- output << "entry[" << head << "] has offset " << offset
- << " and entry length " << entry_length
+}
+
+// Verifies that n + extra <= kMaxCapacity: throws std::length_error otherwise.
+inline void CheckCapacity(size_t n, size_t extra) {
+ if (ABSL_PREDICT_FALSE(extra > CordRepRing::kMaxCapacity - n)) {
+ base_internal::ThrowStdLengthError("Maximum capacity exceeded");
+ }
+}
+
+// Creates a flat from the provided string data, allocating up to `extra`
+// capacity in the returned flat depending on kMaxFlatLength limitations.
+// Requires `len` to be less or equal to `kMaxFlatLength`
+CordRepFlat* CreateFlat(const char* s, size_t n, size_t extra = 0) { // NOLINT
+ assert(n <= kMaxFlatLength);
+ auto* rep = CordRepFlat::New(n + extra);
+ rep->length = n;
+ memcpy(rep->Data(), s, n);
+ return rep;
+}
+
+// Unrefs the entries in `[head, tail)`.
+// Requires all entries to be a FLAT or EXTERNAL node.
+void UnrefEntries(const CordRepRing* rep, index_type head, index_type tail) {
+ rep->ForEach(head, tail, [rep](index_type ix) {
+ CordRep* child = rep->entry_child(ix);
+ if (!child->refcount.Decrement()) {
+ if (child->tag >= FLAT) {
+ CordRepFlat::Delete(child->flat());
+ } else {
+ CordRepExternal::Delete(child->external());
+ }
+ }
+ });
+}
+
+} // namespace
+
+std::ostream& operator<<(std::ostream& s, const CordRepRing& rep) {
+ // Note: 'pos' values are defined as size_t (for overflow reasons), but that
+ // prints really awkward for small prepended values such as -5. ssize_t is not
+ // portable (POSIX), so we use ptrdiff_t instead to cast to signed values.
+ s << " CordRepRing(" << &rep << ", length = " << rep.length
+ << ", head = " << rep.head_ << ", tail = " << rep.tail_
+ << ", cap = " << rep.capacity_ << ", rc = " << rep.refcount.Get()
+ << ", begin_pos_ = " << static_cast<ptrdiff_t>(rep.begin_pos_) << ") {\n";
+ CordRepRing::index_type head = rep.head();
+ do {
+ CordRep* child = rep.entry_child(head);
+ s << " entry[" << head << "] length = " << rep.entry_length(head)
+ << ", child " << child << ", clen = " << child->length
+ << ", tag = " << static_cast<int>(child->tag)
+ << ", rc = " << child->refcount.Get()
+ << ", offset = " << rep.entry_data_offset(head)
+ << ", end_pos = " << static_cast<ptrdiff_t>(rep.entry_end_pos(head))
+ << "\n";
+ head = rep.advance(head);
+ } while (head != rep.tail());
+ return s << "}\n";
+}
+
+void CordRepRing::AddDataOffset(index_type index, size_t n) {
+ entry_data_offset()[index] += static_cast<offset_type>(n);
+}
+
+void CordRepRing::SubLength(index_type index, size_t n) {
+ entry_end_pos()[index] -= n;
+}
+
+class CordRepRing::Filler {
+ public:
+ Filler(CordRepRing* rep, index_type pos) : rep_(rep), head_(pos), pos_(pos) {}
+
+ index_type head() const { return head_; }
+ index_type pos() const { return pos_; }
+
+ void Add(CordRep* child, size_t offset, pos_type end_pos) {
+ rep_->entry_end_pos()[pos_] = end_pos;
+ rep_->entry_child()[pos_] = child;
+ rep_->entry_data_offset()[pos_] = static_cast<offset_type>(offset);
+ pos_ = rep_->advance(pos_);
+ }
+
+ private:
+ CordRepRing* rep_;
+ index_type head_;
+ index_type pos_;
+};
+
+constexpr size_t CordRepRing::kMaxCapacity; // NOLINT: needed for c++11
+
+bool CordRepRing::IsValid(std::ostream& output) const {
+ if (capacity_ == 0) {
+ output << "capacity == 0";
+ return false;
+ }
+
+ if (head_ >= capacity_ || tail_ >= capacity_) {
+ output << "head " << head_ << " and/or tail " << tail_ << "exceed capacity "
+ << capacity_;
+ return false;
+ }
+
+ const index_type back = retreat(tail_);
+ size_t pos_length = Distance(begin_pos_, entry_end_pos(back));
+ if (pos_length != length) {
+ output << "length " << length << " does not match positional length "
+ << pos_length << " from begin_pos " << begin_pos_ << " and entry["
+ << back << "].end_pos " << entry_end_pos(back);
+ return false;
+ }
+
+ index_type head = head_;
+ pos_type begin_pos = begin_pos_;
+ do {
+ pos_type end_pos = entry_end_pos(head);
+ size_t entry_length = Distance(begin_pos, end_pos);
+ if (entry_length == 0) {
+ output << "entry[" << head << "] has an invalid length " << entry_length
+ << " from begin_pos " << begin_pos << " and end_pos " << end_pos;
+ return false;
+ }
+
+ CordRep* child = entry_child(head);
+ if (child == nullptr) {
+ output << "entry[" << head << "].child == nullptr";
+ return false;
+ }
+ if (child->tag < FLAT && child->tag != EXTERNAL) {
+ output << "entry[" << head << "].child has an invalid tag "
+ << static_cast<int>(child->tag);
+ return false;
+ }
+
+ size_t offset = entry_data_offset(head);
+ if (offset >= child->length || entry_length > child->length - offset) {
+ output << "entry[" << head << "] has offset " << offset
+ << " and entry length " << entry_length
<< " which are outside of the child's length of " << child->length;
- return false;
- }
-
- begin_pos = end_pos;
- head = advance(head);
- } while (head != tail_);
-
- return true;
-}
-
-#ifdef EXTRA_CORD_RING_VALIDATION
-CordRepRing* CordRepRing::Validate(CordRepRing* rep, const char* file,
- int line) {
- if (!rep->IsValid(std::cerr)) {
- std::cerr << "\nERROR: CordRepRing corrupted";
- if (line) std::cerr << " at line " << line;
- if (file) std::cerr << " in file " << file;
- std::cerr << "\nContent = " << *rep;
- abort();
- }
- return rep;
-}
-#endif // EXTRA_CORD_RING_VALIDATION
-
-CordRepRing* CordRepRing::New(size_t capacity, size_t extra) {
- CheckCapacity(capacity, extra);
-
- size_t size = AllocSize(capacity += extra);
- void* mem = ::operator new(size);
- auto* rep = new (mem) CordRepRing(static_cast<index_type>(capacity));
- rep->tag = RING;
- rep->capacity_ = static_cast<index_type>(capacity);
- rep->begin_pos_ = 0;
- return rep;
-}
-
-void CordRepRing::SetCapacityForTesting(size_t capacity) {
- // Adjust for the changed layout
- assert(capacity <= capacity_);
- assert(head() == 0 || head() < tail());
- memmove(Layout::Partial(capacity).Pointer<1>(data_) + head(),
- Layout::Partial(capacity_).Pointer<1>(data_) + head(),
- entries() * sizeof(Layout::ElementType<1>));
- memmove(Layout::Partial(capacity, capacity).Pointer<2>(data_) + head(),
- Layout::Partial(capacity_, capacity_).Pointer<2>(data_) + head(),
- entries() * sizeof(Layout::ElementType<2>));
- capacity_ = static_cast<index_type>(capacity);
-}
-
-void CordRepRing::Delete(CordRepRing* rep) {
+ return false;
+ }
+
+ begin_pos = end_pos;
+ head = advance(head);
+ } while (head != tail_);
+
+ return true;
+}
+
+#ifdef EXTRA_CORD_RING_VALIDATION
+CordRepRing* CordRepRing::Validate(CordRepRing* rep, const char* file,
+ int line) {
+ if (!rep->IsValid(std::cerr)) {
+ std::cerr << "\nERROR: CordRepRing corrupted";
+ if (line) std::cerr << " at line " << line;
+ if (file) std::cerr << " in file " << file;
+ std::cerr << "\nContent = " << *rep;
+ abort();
+ }
+ return rep;
+}
+#endif // EXTRA_CORD_RING_VALIDATION
+
+CordRepRing* CordRepRing::New(size_t capacity, size_t extra) {
+ CheckCapacity(capacity, extra);
+
+ size_t size = AllocSize(capacity += extra);
+ void* mem = ::operator new(size);
+ auto* rep = new (mem) CordRepRing(static_cast<index_type>(capacity));
+ rep->tag = RING;
+ rep->capacity_ = static_cast<index_type>(capacity);
+ rep->begin_pos_ = 0;
+ return rep;
+}
+
+void CordRepRing::SetCapacityForTesting(size_t capacity) {
+ // Adjust for the changed layout
+ assert(capacity <= capacity_);
+ assert(head() == 0 || head() < tail());
+ memmove(Layout::Partial(capacity).Pointer<1>(data_) + head(),
+ Layout::Partial(capacity_).Pointer<1>(data_) + head(),
+ entries() * sizeof(Layout::ElementType<1>));
+ memmove(Layout::Partial(capacity, capacity).Pointer<2>(data_) + head(),
+ Layout::Partial(capacity_, capacity_).Pointer<2>(data_) + head(),
+ entries() * sizeof(Layout::ElementType<2>));
+ capacity_ = static_cast<index_type>(capacity);
+}
+
+void CordRepRing::Delete(CordRepRing* rep) {
assert(rep != nullptr && rep->IsRing());
-#if defined(__cpp_sized_deallocation)
- size_t size = AllocSize(rep->capacity_);
- rep->~CordRepRing();
- ::operator delete(rep, size);
-#else
- rep->~CordRepRing();
- ::operator delete(rep);
-#endif
-}
-
-void CordRepRing::Destroy(CordRepRing* rep) {
- UnrefEntries(rep, rep->head(), rep->tail());
- Delete(rep);
-}
-
-template <bool ref>
-void CordRepRing::Fill(const CordRepRing* src, index_type head,
- index_type tail) {
- this->length = src->length;
- head_ = 0;
- tail_ = advance(0, src->entries(head, tail));
- begin_pos_ = src->begin_pos_;
-
- // TODO(mvels): there may be opportunities here for large buffers.
- auto* dst_pos = entry_end_pos();
- auto* dst_child = entry_child();
- auto* dst_offset = entry_data_offset();
- src->ForEach(head, tail, [&](index_type index) {
- *dst_pos++ = src->entry_end_pos(index);
- CordRep* child = src->entry_child(index);
- *dst_child++ = ref ? CordRep::Ref(child) : child;
- *dst_offset++ = src->entry_data_offset(index);
- });
-}
-
-CordRepRing* CordRepRing::Copy(CordRepRing* rep, index_type head,
- index_type tail, size_t extra) {
- CordRepRing* newrep = CordRepRing::New(rep->entries(head, tail), extra);
- newrep->Fill<true>(rep, head, tail);
- CordRep::Unref(rep);
- return newrep;
-}
-
-CordRepRing* CordRepRing::Mutable(CordRepRing* rep, size_t extra) {
- // Get current number of entries, and check for max capacity.
- size_t entries = rep->entries();
-
+#if defined(__cpp_sized_deallocation)
+ size_t size = AllocSize(rep->capacity_);
+ rep->~CordRepRing();
+ ::operator delete(rep, size);
+#else
+ rep->~CordRepRing();
+ ::operator delete(rep);
+#endif
+}
+
+void CordRepRing::Destroy(CordRepRing* rep) {
+ UnrefEntries(rep, rep->head(), rep->tail());
+ Delete(rep);
+}
+
+template <bool ref>
+void CordRepRing::Fill(const CordRepRing* src, index_type head,
+ index_type tail) {
+ this->length = src->length;
+ head_ = 0;
+ tail_ = advance(0, src->entries(head, tail));
+ begin_pos_ = src->begin_pos_;
+
+ // TODO(mvels): there may be opportunities here for large buffers.
+ auto* dst_pos = entry_end_pos();
+ auto* dst_child = entry_child();
+ auto* dst_offset = entry_data_offset();
+ src->ForEach(head, tail, [&](index_type index) {
+ *dst_pos++ = src->entry_end_pos(index);
+ CordRep* child = src->entry_child(index);
+ *dst_child++ = ref ? CordRep::Ref(child) : child;
+ *dst_offset++ = src->entry_data_offset(index);
+ });
+}
+
+CordRepRing* CordRepRing::Copy(CordRepRing* rep, index_type head,
+ index_type tail, size_t extra) {
+ CordRepRing* newrep = CordRepRing::New(rep->entries(head, tail), extra);
+ newrep->Fill<true>(rep, head, tail);
+ CordRep::Unref(rep);
+ return newrep;
+}
+
+CordRepRing* CordRepRing::Mutable(CordRepRing* rep, size_t extra) {
+ // Get current number of entries, and check for max capacity.
+ size_t entries = rep->entries();
+
if (!rep->refcount.IsMutable()) {
return Copy(rep, rep->head(), rep->tail(), extra);
- } else if (entries + extra > rep->capacity()) {
+ } else if (entries + extra > rep->capacity()) {
const size_t min_grow = rep->capacity() + rep->capacity() / 2;
const size_t min_extra = (std::max)(extra, min_grow - entries);
- CordRepRing* newrep = CordRepRing::New(entries, min_extra);
- newrep->Fill<false>(rep, rep->head(), rep->tail());
- CordRepRing::Delete(rep);
- return newrep;
- } else {
- return rep;
- }
-}
-
-Span<char> CordRepRing::GetAppendBuffer(size_t size) {
+ CordRepRing* newrep = CordRepRing::New(entries, min_extra);
+ newrep->Fill<false>(rep, rep->head(), rep->tail());
+ CordRepRing::Delete(rep);
+ return newrep;
+ } else {
+ return rep;
+ }
+}
+
+Span<char> CordRepRing::GetAppendBuffer(size_t size) {
assert(refcount.IsMutable());
- index_type back = retreat(tail_);
- CordRep* child = entry_child(back);
+ index_type back = retreat(tail_);
+ CordRep* child = entry_child(back);
if (child->tag >= FLAT && child->refcount.IsMutable()) {
- size_t capacity = child->flat()->Capacity();
- pos_type end_pos = entry_end_pos(back);
- size_t data_offset = entry_data_offset(back);
- size_t entry_length = Distance(entry_begin_pos(back), end_pos);
- size_t used = data_offset + entry_length;
- if (size_t n = (std::min)(capacity - used, size)) {
- child->length = data_offset + entry_length + n;
- entry_end_pos()[back] = end_pos + n;
- this->length += n;
- return {child->flat()->Data() + used, n};
- }
- }
- return {nullptr, 0};
-}
-
-Span<char> CordRepRing::GetPrependBuffer(size_t size) {
+ size_t capacity = child->flat()->Capacity();
+ pos_type end_pos = entry_end_pos(back);
+ size_t data_offset = entry_data_offset(back);
+ size_t entry_length = Distance(entry_begin_pos(back), end_pos);
+ size_t used = data_offset + entry_length;
+ if (size_t n = (std::min)(capacity - used, size)) {
+ child->length = data_offset + entry_length + n;
+ entry_end_pos()[back] = end_pos + n;
+ this->length += n;
+ return {child->flat()->Data() + used, n};
+ }
+ }
+ return {nullptr, 0};
+}
+
+Span<char> CordRepRing::GetPrependBuffer(size_t size) {
assert(refcount.IsMutable());
- CordRep* child = entry_child(head_);
- size_t data_offset = entry_data_offset(head_);
+ CordRep* child = entry_child(head_);
+ size_t data_offset = entry_data_offset(head_);
if (data_offset && child->refcount.IsMutable() && child->tag >= FLAT) {
- size_t n = (std::min)(data_offset, size);
- this->length += n;
- begin_pos_ -= n;
- data_offset -= n;
- entry_data_offset()[head_] = static_cast<offset_type>(data_offset);
- return {child->flat()->Data() + data_offset, n};
- }
- return {nullptr, 0};
-}
-
-CordRepRing* CordRepRing::CreateFromLeaf(CordRep* child, size_t offset,
+ size_t n = (std::min)(data_offset, size);
+ this->length += n;
+ begin_pos_ -= n;
+ data_offset -= n;
+ entry_data_offset()[head_] = static_cast<offset_type>(data_offset);
+ return {child->flat()->Data() + data_offset, n};
+ }
+ return {nullptr, 0};
+}
+
+CordRepRing* CordRepRing::CreateFromLeaf(CordRep* child, size_t offset,
size_t len, size_t extra) {
- CordRepRing* rep = CordRepRing::New(1, extra);
- rep->head_ = 0;
- rep->tail_ = rep->advance(0);
+ CordRepRing* rep = CordRepRing::New(1, extra);
+ rep->head_ = 0;
+ rep->tail_ = rep->advance(0);
rep->length = len;
rep->entry_end_pos()[0] = len;
- rep->entry_child()[0] = child;
- rep->entry_data_offset()[0] = static_cast<offset_type>(offset);
- return Validate(rep);
-}
-
-CordRepRing* CordRepRing::CreateSlow(CordRep* child, size_t extra) {
- CordRepRing* rep = nullptr;
+ rep->entry_child()[0] = child;
+ rep->entry_data_offset()[0] = static_cast<offset_type>(offset);
+ return Validate(rep);
+}
+
+CordRepRing* CordRepRing::CreateSlow(CordRep* child, size_t extra) {
+ CordRepRing* rep = nullptr;
Consume(child, [&](CordRep* child_arg, size_t offset, size_t len) {
if (IsFlatOrExternal(child_arg)) {
rep = rep ? AppendLeaf(rep, child_arg, offset, len)
: CreateFromLeaf(child_arg, offset, len, extra);
- } else if (rep) {
+ } else if (rep) {
rep = AddRing<AddMode::kAppend>(rep, child_arg->ring(), offset, len);
} else if (offset == 0 && child_arg->length == len) {
rep = Mutable(child_arg->ring(), extra);
- } else {
+ } else {
rep = SubRing(child_arg->ring(), offset, len, extra);
- }
- });
- return Validate(rep, nullptr, __LINE__);
-}
-
-CordRepRing* CordRepRing::Create(CordRep* child, size_t extra) {
- size_t length = child->length;
- if (IsFlatOrExternal(child)) {
- return CreateFromLeaf(child, 0, length, extra);
- }
+ }
+ });
+ return Validate(rep, nullptr, __LINE__);
+}
+
+CordRepRing* CordRepRing::Create(CordRep* child, size_t extra) {
+ size_t length = child->length;
+ if (IsFlatOrExternal(child)) {
+ return CreateFromLeaf(child, 0, length, extra);
+ }
if (child->IsRing()) {
- return Mutable(child->ring(), extra);
- }
- return CreateSlow(child, extra);
-}
-
-template <CordRepRing::AddMode mode>
-CordRepRing* CordRepRing::AddRing(CordRepRing* rep, CordRepRing* ring,
+ return Mutable(child->ring(), extra);
+ }
+ return CreateSlow(child, extra);
+}
+
+template <CordRepRing::AddMode mode>
+CordRepRing* CordRepRing::AddRing(CordRepRing* rep, CordRepRing* ring,
size_t offset, size_t len) {
- assert(offset < ring->length);
- constexpr bool append = mode == AddMode::kAppend;
- Position head = ring->Find(offset);
+ assert(offset < ring->length);
+ constexpr bool append = mode == AddMode::kAppend;
+ Position head = ring->Find(offset);
Position tail = ring->FindTail(head.index, offset + len);
- const index_type entries = ring->entries(head.index, tail.index);
-
- rep = Mutable(rep, entries);
-
- // The delta for making ring[head].end_pos into 'len - offset'
- const pos_type delta_length =
+ const index_type entries = ring->entries(head.index, tail.index);
+
+ rep = Mutable(rep, entries);
+
+ // The delta for making ring[head].end_pos into 'len - offset'
+ const pos_type delta_length =
(append ? rep->begin_pos_ + rep->length : rep->begin_pos_ - len) -
- ring->entry_begin_pos(head.index) - head.offset;
-
- // Start filling at `tail`, or `entries` before `head`
- Filler filler(rep, append ? rep->tail_ : rep->retreat(rep->head_, entries));
-
- if (ring->refcount.IsOne()) {
- // Copy entries from source stealing the ref and adjusting the end position.
- // Commit the filler as this is no-op.
- ring->ForEach(head.index, tail.index, [&](index_type ix) {
- filler.Add(ring->entry_child(ix), ring->entry_data_offset(ix),
- ring->entry_end_pos(ix) + delta_length);
- });
-
- // Unref entries we did not copy over, and delete source.
- if (head.index != ring->head_) UnrefEntries(ring, ring->head_, head.index);
- if (tail.index != ring->tail_) UnrefEntries(ring, tail.index, ring->tail_);
- CordRepRing::Delete(ring);
- } else {
- ring->ForEach(head.index, tail.index, [&](index_type ix) {
- CordRep* child = ring->entry_child(ix);
- filler.Add(child, ring->entry_data_offset(ix),
- ring->entry_end_pos(ix) + delta_length);
- CordRep::Ref(child);
- });
- CordRepRing::Unref(ring);
- }
-
- if (head.offset) {
- // Increase offset of first 'source' entry appended or prepended.
- // This is always the entry in `filler.head()`
- rep->AddDataOffset(filler.head(), head.offset);
- }
-
- if (tail.offset) {
- // Reduce length of last 'source' entry appended or prepended.
- // This is always the entry tailed by `filler.pos()`
- rep->SubLength(rep->retreat(filler.pos()), tail.offset);
- }
-
- // Commit changes
+ ring->entry_begin_pos(head.index) - head.offset;
+
+ // Start filling at `tail`, or `entries` before `head`
+ Filler filler(rep, append ? rep->tail_ : rep->retreat(rep->head_, entries));
+
+ if (ring->refcount.IsOne()) {
+ // Copy entries from source stealing the ref and adjusting the end position.
+ // Commit the filler as this is no-op.
+ ring->ForEach(head.index, tail.index, [&](index_type ix) {
+ filler.Add(ring->entry_child(ix), ring->entry_data_offset(ix),
+ ring->entry_end_pos(ix) + delta_length);
+ });
+
+ // Unref entries we did not copy over, and delete source.
+ if (head.index != ring->head_) UnrefEntries(ring, ring->head_, head.index);
+ if (tail.index != ring->tail_) UnrefEntries(ring, tail.index, ring->tail_);
+ CordRepRing::Delete(ring);
+ } else {
+ ring->ForEach(head.index, tail.index, [&](index_type ix) {
+ CordRep* child = ring->entry_child(ix);
+ filler.Add(child, ring->entry_data_offset(ix),
+ ring->entry_end_pos(ix) + delta_length);
+ CordRep::Ref(child);
+ });
+ CordRepRing::Unref(ring);
+ }
+
+ if (head.offset) {
+ // Increase offset of first 'source' entry appended or prepended.
+ // This is always the entry in `filler.head()`
+ rep->AddDataOffset(filler.head(), head.offset);
+ }
+
+ if (tail.offset) {
+ // Reduce length of last 'source' entry appended or prepended.
+ // This is always the entry tailed by `filler.pos()`
+ rep->SubLength(rep->retreat(filler.pos()), tail.offset);
+ }
+
+ // Commit changes
rep->length += len;
- if (append) {
- rep->tail_ = filler.pos();
- } else {
- rep->head_ = filler.head();
+ if (append) {
+ rep->tail_ = filler.pos();
+ } else {
+ rep->head_ = filler.head();
rep->begin_pos_ -= len;
- }
-
- return Validate(rep);
-}
-
-CordRepRing* CordRepRing::AppendSlow(CordRepRing* rep, CordRep* child) {
+ }
+
+ return Validate(rep);
+}
+
+CordRepRing* CordRepRing::AppendSlow(CordRepRing* rep, CordRep* child) {
Consume(child, [&rep](CordRep* child_arg, size_t offset, size_t len) {
if (child_arg->IsRing()) {
rep = AddRing<AddMode::kAppend>(rep, child_arg->ring(), offset, len);
- } else {
+ } else {
rep = AppendLeaf(rep, child_arg, offset, len);
- }
- });
- return rep;
-}
-
-CordRepRing* CordRepRing::AppendLeaf(CordRepRing* rep, CordRep* child,
+ }
+ });
+ return rep;
+}
+
+CordRepRing* CordRepRing::AppendLeaf(CordRepRing* rep, CordRep* child,
size_t offset, size_t len) {
- rep = Mutable(rep, 1);
- index_type back = rep->tail_;
- const pos_type begin_pos = rep->begin_pos_ + rep->length;
- rep->tail_ = rep->advance(rep->tail_);
+ rep = Mutable(rep, 1);
+ index_type back = rep->tail_;
+ const pos_type begin_pos = rep->begin_pos_ + rep->length;
+ rep->tail_ = rep->advance(rep->tail_);
rep->length += len;
rep->entry_end_pos()[back] = begin_pos + len;
- rep->entry_child()[back] = child;
- rep->entry_data_offset()[back] = static_cast<offset_type>(offset);
- return Validate(rep, nullptr, __LINE__);
-}
-
-CordRepRing* CordRepRing::Append(CordRepRing* rep, CordRep* child) {
- size_t length = child->length;
- if (IsFlatOrExternal(child)) {
- return AppendLeaf(rep, child, 0, length);
- }
+ rep->entry_child()[back] = child;
+ rep->entry_data_offset()[back] = static_cast<offset_type>(offset);
+ return Validate(rep, nullptr, __LINE__);
+}
+
+CordRepRing* CordRepRing::Append(CordRepRing* rep, CordRep* child) {
+ size_t length = child->length;
+ if (IsFlatOrExternal(child)) {
+ return AppendLeaf(rep, child, 0, length);
+ }
if (child->IsRing()) {
- return AddRing<AddMode::kAppend>(rep, child->ring(), 0, length);
- }
- return AppendSlow(rep, child);
-}
-
-CordRepRing* CordRepRing::PrependSlow(CordRepRing* rep, CordRep* child) {
+ return AddRing<AddMode::kAppend>(rep, child->ring(), 0, length);
+ }
+ return AppendSlow(rep, child);
+}
+
+CordRepRing* CordRepRing::PrependSlow(CordRepRing* rep, CordRep* child) {
ReverseConsume(child, [&](CordRep* child_arg, size_t offset, size_t len) {
if (IsFlatOrExternal(child_arg)) {
rep = PrependLeaf(rep, child_arg, offset, len);
- } else {
+ } else {
rep = AddRing<AddMode::kPrepend>(rep, child_arg->ring(), offset, len);
- }
- });
- return Validate(rep);
-}
-
-CordRepRing* CordRepRing::PrependLeaf(CordRepRing* rep, CordRep* child,
+ }
+ });
+ return Validate(rep);
+}
+
+CordRepRing* CordRepRing::PrependLeaf(CordRepRing* rep, CordRep* child,
size_t offset, size_t len) {
- rep = Mutable(rep, 1);
- index_type head = rep->retreat(rep->head_);
- pos_type end_pos = rep->begin_pos_;
- rep->head_ = head;
+ rep = Mutable(rep, 1);
+ index_type head = rep->retreat(rep->head_);
+ pos_type end_pos = rep->begin_pos_;
+ rep->head_ = head;
rep->length += len;
rep->begin_pos_ -= len;
- rep->entry_end_pos()[head] = end_pos;
- rep->entry_child()[head] = child;
- rep->entry_data_offset()[head] = static_cast<offset_type>(offset);
- return Validate(rep);
-}
-
-CordRepRing* CordRepRing::Prepend(CordRepRing* rep, CordRep* child) {
- size_t length = child->length;
- if (IsFlatOrExternal(child)) {
- return PrependLeaf(rep, child, 0, length);
- }
+ rep->entry_end_pos()[head] = end_pos;
+ rep->entry_child()[head] = child;
+ rep->entry_data_offset()[head] = static_cast<offset_type>(offset);
+ return Validate(rep);
+}
+
+CordRepRing* CordRepRing::Prepend(CordRepRing* rep, CordRep* child) {
+ size_t length = child->length;
+ if (IsFlatOrExternal(child)) {
+ return PrependLeaf(rep, child, 0, length);
+ }
if (child->IsRing()) {
- return AddRing<AddMode::kPrepend>(rep, child->ring(), 0, length);
- }
- return PrependSlow(rep, child);
-}
-
-CordRepRing* CordRepRing::Append(CordRepRing* rep, absl::string_view data,
- size_t extra) {
+ return AddRing<AddMode::kPrepend>(rep, child->ring(), 0, length);
+ }
+ return PrependSlow(rep, child);
+}
+
+CordRepRing* CordRepRing::Append(CordRepRing* rep, absl::string_view data,
+ size_t extra) {
if (rep->refcount.IsMutable()) {
- Span<char> avail = rep->GetAppendBuffer(data.length());
- if (!avail.empty()) {
- memcpy(avail.data(), data.data(), avail.length());
- data.remove_prefix(avail.length());
- }
- }
- if (data.empty()) return Validate(rep);
-
- const size_t flats = (data.length() - 1) / kMaxFlatLength + 1;
- rep = Mutable(rep, flats);
-
- Filler filler(rep, rep->tail_);
- pos_type pos = rep->begin_pos_ + rep->length;
-
- while (data.length() >= kMaxFlatLength) {
- auto* flat = CreateFlat(data.data(), kMaxFlatLength);
- filler.Add(flat, 0, pos += kMaxFlatLength);
- data.remove_prefix(kMaxFlatLength);
- }
-
- if (data.length()) {
- auto* flat = CreateFlat(data.data(), data.length(), extra);
- filler.Add(flat, 0, pos += data.length());
- }
-
- rep->length = pos - rep->begin_pos_;
- rep->tail_ = filler.pos();
-
- return Validate(rep);
-}
-
-CordRepRing* CordRepRing::Prepend(CordRepRing* rep, absl::string_view data,
- size_t extra) {
+ Span<char> avail = rep->GetAppendBuffer(data.length());
+ if (!avail.empty()) {
+ memcpy(avail.data(), data.data(), avail.length());
+ data.remove_prefix(avail.length());
+ }
+ }
+ if (data.empty()) return Validate(rep);
+
+ const size_t flats = (data.length() - 1) / kMaxFlatLength + 1;
+ rep = Mutable(rep, flats);
+
+ Filler filler(rep, rep->tail_);
+ pos_type pos = rep->begin_pos_ + rep->length;
+
+ while (data.length() >= kMaxFlatLength) {
+ auto* flat = CreateFlat(data.data(), kMaxFlatLength);
+ filler.Add(flat, 0, pos += kMaxFlatLength);
+ data.remove_prefix(kMaxFlatLength);
+ }
+
+ if (data.length()) {
+ auto* flat = CreateFlat(data.data(), data.length(), extra);
+ filler.Add(flat, 0, pos += data.length());
+ }
+
+ rep->length = pos - rep->begin_pos_;
+ rep->tail_ = filler.pos();
+
+ return Validate(rep);
+}
+
+CordRepRing* CordRepRing::Prepend(CordRepRing* rep, absl::string_view data,
+ size_t extra) {
if (rep->refcount.IsMutable()) {
- Span<char> avail = rep->GetPrependBuffer(data.length());
- if (!avail.empty()) {
- const char* tail = data.data() + data.length() - avail.length();
- memcpy(avail.data(), tail, avail.length());
- data.remove_suffix(avail.length());
- }
- }
- if (data.empty()) return rep;
-
- const size_t flats = (data.length() - 1) / kMaxFlatLength + 1;
- rep = Mutable(rep, flats);
- pos_type pos = rep->begin_pos_;
- Filler filler(rep, rep->retreat(rep->head_, static_cast<index_type>(flats)));
-
- size_t first_size = data.size() - (flats - 1) * kMaxFlatLength;
- CordRepFlat* flat = CordRepFlat::New(first_size + extra);
- flat->length = first_size + extra;
- memcpy(flat->Data() + extra, data.data(), first_size);
- data.remove_prefix(first_size);
- filler.Add(flat, extra, pos);
- pos -= first_size;
-
- while (!data.empty()) {
- assert(data.size() >= kMaxFlatLength);
- flat = CreateFlat(data.data(), kMaxFlatLength);
- filler.Add(flat, 0, pos);
- pos -= kMaxFlatLength;
- data.remove_prefix(kMaxFlatLength);
- }
-
- rep->head_ = filler.head();
- rep->length += rep->begin_pos_ - pos;
- rep->begin_pos_ = pos;
-
- return Validate(rep);
-}
-
-// 32 entries is 32 * sizeof(pos_type) = 4 cache lines on x86
-static constexpr index_type kBinarySearchThreshold = 32;
-static constexpr index_type kBinarySearchEndCount = 8;
-
-template <bool wrap>
-CordRepRing::index_type CordRepRing::FindBinary(index_type head,
- index_type tail,
- size_t offset) const {
- index_type count = tail + (wrap ? capacity_ : 0) - head;
- do {
- count = (count - 1) / 2;
- assert(count < entries(head, tail_));
- index_type mid = wrap ? advance(head, count) : head + count;
- index_type after_mid = wrap ? advance(mid) : mid + 1;
- bool larger = (offset >= entry_end_offset(mid));
- head = larger ? after_mid : head;
- tail = larger ? tail : mid;
- assert(head != tail);
- } while (ABSL_PREDICT_TRUE(count > kBinarySearchEndCount));
- return head;
-}
-
-CordRepRing::Position CordRepRing::FindSlow(index_type head,
- size_t offset) const {
- index_type tail = tail_;
-
- // Binary search until we are good for linear search
- // Optimize for branchless / non wrapping ops
- if (tail > head) {
- index_type count = tail - head;
- if (count > kBinarySearchThreshold) {
- head = FindBinary<false>(head, tail, offset);
- }
- } else {
- index_type count = capacity_ + tail - head;
- if (count > kBinarySearchThreshold) {
- head = FindBinary<true>(head, tail, offset);
- }
- }
-
- pos_type pos = entry_begin_pos(head);
- pos_type end_pos = entry_end_pos(head);
- while (offset >= Distance(begin_pos_, end_pos)) {
- head = advance(head);
- pos = end_pos;
- end_pos = entry_end_pos(head);
- }
-
- return {head, offset - Distance(begin_pos_, pos)};
-}
-
-CordRepRing::Position CordRepRing::FindTailSlow(index_type head,
- size_t offset) const {
- index_type tail = tail_;
- const size_t tail_offset = offset - 1;
-
- // Binary search until we are good for linear search
- // Optimize for branchless / non wrapping ops
- if (tail > head) {
- index_type count = tail - head;
- if (count > kBinarySearchThreshold) {
- head = FindBinary<false>(head, tail, tail_offset);
- }
- } else {
- index_type count = capacity_ + tail - head;
- if (count > kBinarySearchThreshold) {
- head = FindBinary<true>(head, tail, tail_offset);
- }
- }
-
- size_t end_offset = entry_end_offset(head);
- while (tail_offset >= end_offset) {
- head = advance(head);
- end_offset = entry_end_offset(head);
- }
-
- return {advance(head), end_offset - offset};
-}
-
-char CordRepRing::GetCharacter(size_t offset) const {
- assert(offset < length);
-
- Position pos = Find(offset);
- size_t data_offset = entry_data_offset(pos.index) + pos.offset;
- return GetRepData(entry_child(pos.index))[data_offset];
-}
-
-CordRepRing* CordRepRing::SubRing(CordRepRing* rep, size_t offset,
+ Span<char> avail = rep->GetPrependBuffer(data.length());
+ if (!avail.empty()) {
+ const char* tail = data.data() + data.length() - avail.length();
+ memcpy(avail.data(), tail, avail.length());
+ data.remove_suffix(avail.length());
+ }
+ }
+ if (data.empty()) return rep;
+
+ const size_t flats = (data.length() - 1) / kMaxFlatLength + 1;
+ rep = Mutable(rep, flats);
+ pos_type pos = rep->begin_pos_;
+ Filler filler(rep, rep->retreat(rep->head_, static_cast<index_type>(flats)));
+
+ size_t first_size = data.size() - (flats - 1) * kMaxFlatLength;
+ CordRepFlat* flat = CordRepFlat::New(first_size + extra);
+ flat->length = first_size + extra;
+ memcpy(flat->Data() + extra, data.data(), first_size);
+ data.remove_prefix(first_size);
+ filler.Add(flat, extra, pos);
+ pos -= first_size;
+
+ while (!data.empty()) {
+ assert(data.size() >= kMaxFlatLength);
+ flat = CreateFlat(data.data(), kMaxFlatLength);
+ filler.Add(flat, 0, pos);
+ pos -= kMaxFlatLength;
+ data.remove_prefix(kMaxFlatLength);
+ }
+
+ rep->head_ = filler.head();
+ rep->length += rep->begin_pos_ - pos;
+ rep->begin_pos_ = pos;
+
+ return Validate(rep);
+}
+
+// 32 entries is 32 * sizeof(pos_type) = 4 cache lines on x86
+static constexpr index_type kBinarySearchThreshold = 32;
+static constexpr index_type kBinarySearchEndCount = 8;
+
+template <bool wrap>
+CordRepRing::index_type CordRepRing::FindBinary(index_type head,
+ index_type tail,
+ size_t offset) const {
+ index_type count = tail + (wrap ? capacity_ : 0) - head;
+ do {
+ count = (count - 1) / 2;
+ assert(count < entries(head, tail_));
+ index_type mid = wrap ? advance(head, count) : head + count;
+ index_type after_mid = wrap ? advance(mid) : mid + 1;
+ bool larger = (offset >= entry_end_offset(mid));
+ head = larger ? after_mid : head;
+ tail = larger ? tail : mid;
+ assert(head != tail);
+ } while (ABSL_PREDICT_TRUE(count > kBinarySearchEndCount));
+ return head;
+}
+
+CordRepRing::Position CordRepRing::FindSlow(index_type head,
+ size_t offset) const {
+ index_type tail = tail_;
+
+ // Binary search until we are good for linear search
+ // Optimize for branchless / non wrapping ops
+ if (tail > head) {
+ index_type count = tail - head;
+ if (count > kBinarySearchThreshold) {
+ head = FindBinary<false>(head, tail, offset);
+ }
+ } else {
+ index_type count = capacity_ + tail - head;
+ if (count > kBinarySearchThreshold) {
+ head = FindBinary<true>(head, tail, offset);
+ }
+ }
+
+ pos_type pos = entry_begin_pos(head);
+ pos_type end_pos = entry_end_pos(head);
+ while (offset >= Distance(begin_pos_, end_pos)) {
+ head = advance(head);
+ pos = end_pos;
+ end_pos = entry_end_pos(head);
+ }
+
+ return {head, offset - Distance(begin_pos_, pos)};
+}
+
+CordRepRing::Position CordRepRing::FindTailSlow(index_type head,
+ size_t offset) const {
+ index_type tail = tail_;
+ const size_t tail_offset = offset - 1;
+
+ // Binary search until we are good for linear search
+ // Optimize for branchless / non wrapping ops
+ if (tail > head) {
+ index_type count = tail - head;
+ if (count > kBinarySearchThreshold) {
+ head = FindBinary<false>(head, tail, tail_offset);
+ }
+ } else {
+ index_type count = capacity_ + tail - head;
+ if (count > kBinarySearchThreshold) {
+ head = FindBinary<true>(head, tail, tail_offset);
+ }
+ }
+
+ size_t end_offset = entry_end_offset(head);
+ while (tail_offset >= end_offset) {
+ head = advance(head);
+ end_offset = entry_end_offset(head);
+ }
+
+ return {advance(head), end_offset - offset};
+}
+
+char CordRepRing::GetCharacter(size_t offset) const {
+ assert(offset < length);
+
+ Position pos = Find(offset);
+ size_t data_offset = entry_data_offset(pos.index) + pos.offset;
+ return GetRepData(entry_child(pos.index))[data_offset];
+}
+
+CordRepRing* CordRepRing::SubRing(CordRepRing* rep, size_t offset,
size_t len, size_t extra) {
- assert(offset <= rep->length);
+ assert(offset <= rep->length);
assert(offset <= rep->length - len);
-
+
if (len == 0) {
- CordRep::Unref(rep);
- return nullptr;
- }
-
- // Find position of first byte
- Position head = rep->Find(offset);
+ CordRep::Unref(rep);
+ return nullptr;
+ }
+
+ // Find position of first byte
+ Position head = rep->Find(offset);
Position tail = rep->FindTail(head.index, offset + len);
- const size_t new_entries = rep->entries(head.index, tail.index);
-
+ const size_t new_entries = rep->entries(head.index, tail.index);
+
if (rep->refcount.IsMutable() && extra <= (rep->capacity() - new_entries)) {
- // We adopt a privately owned rep and no extra entries needed.
- if (head.index != rep->head_) UnrefEntries(rep, rep->head_, head.index);
- if (tail.index != rep->tail_) UnrefEntries(rep, tail.index, rep->tail_);
- rep->head_ = head.index;
- rep->tail_ = tail.index;
- } else {
- // Copy subset to new rep
- rep = Copy(rep, head.index, tail.index, extra);
- head.index = rep->head_;
- tail.index = rep->tail_;
- }
-
- // Adjust begin_pos and length
+ // We adopt a privately owned rep and no extra entries needed.
+ if (head.index != rep->head_) UnrefEntries(rep, rep->head_, head.index);
+ if (tail.index != rep->tail_) UnrefEntries(rep, tail.index, rep->tail_);
+ rep->head_ = head.index;
+ rep->tail_ = tail.index;
+ } else {
+ // Copy subset to new rep
+ rep = Copy(rep, head.index, tail.index, extra);
+ head.index = rep->head_;
+ tail.index = rep->tail_;
+ }
+
+ // Adjust begin_pos and length
rep->length = len;
- rep->begin_pos_ += offset;
-
- // Adjust head and tail blocks
- if (head.offset) {
- rep->AddDataOffset(head.index, head.offset);
- }
- if (tail.offset) {
- rep->SubLength(rep->retreat(tail.index), tail.offset);
- }
-
- return Validate(rep);
-}
-
-CordRepRing* CordRepRing::RemovePrefix(CordRepRing* rep, size_t len,
- size_t extra) {
- assert(len <= rep->length);
- if (len == rep->length) {
- CordRep::Unref(rep);
- return nullptr;
- }
-
- Position head = rep->Find(len);
+ rep->begin_pos_ += offset;
+
+ // Adjust head and tail blocks
+ if (head.offset) {
+ rep->AddDataOffset(head.index, head.offset);
+ }
+ if (tail.offset) {
+ rep->SubLength(rep->retreat(tail.index), tail.offset);
+ }
+
+ return Validate(rep);
+}
+
+CordRepRing* CordRepRing::RemovePrefix(CordRepRing* rep, size_t len,
+ size_t extra) {
+ assert(len <= rep->length);
+ if (len == rep->length) {
+ CordRep::Unref(rep);
+ return nullptr;
+ }
+
+ Position head = rep->Find(len);
if (rep->refcount.IsMutable()) {
- if (head.index != rep->head_) UnrefEntries(rep, rep->head_, head.index);
- rep->head_ = head.index;
- } else {
- rep = Copy(rep, head.index, rep->tail_, extra);
- head.index = rep->head_;
- }
-
- // Adjust begin_pos and length
- rep->length -= len;
- rep->begin_pos_ += len;
-
- // Adjust head block
- if (head.offset) {
- rep->AddDataOffset(head.index, head.offset);
- }
-
- return Validate(rep);
-}
-
-CordRepRing* CordRepRing::RemoveSuffix(CordRepRing* rep, size_t len,
- size_t extra) {
- assert(len <= rep->length);
-
- if (len == rep->length) {
- CordRep::Unref(rep);
- return nullptr;
- }
-
- Position tail = rep->FindTail(rep->length - len);
+ if (head.index != rep->head_) UnrefEntries(rep, rep->head_, head.index);
+ rep->head_ = head.index;
+ } else {
+ rep = Copy(rep, head.index, rep->tail_, extra);
+ head.index = rep->head_;
+ }
+
+ // Adjust begin_pos and length
+ rep->length -= len;
+ rep->begin_pos_ += len;
+
+ // Adjust head block
+ if (head.offset) {
+ rep->AddDataOffset(head.index, head.offset);
+ }
+
+ return Validate(rep);
+}
+
+CordRepRing* CordRepRing::RemoveSuffix(CordRepRing* rep, size_t len,
+ size_t extra) {
+ assert(len <= rep->length);
+
+ if (len == rep->length) {
+ CordRep::Unref(rep);
+ return nullptr;
+ }
+
+ Position tail = rep->FindTail(rep->length - len);
if (rep->refcount.IsMutable()) {
- // We adopt a privately owned rep, scrub.
- if (tail.index != rep->tail_) UnrefEntries(rep, tail.index, rep->tail_);
- rep->tail_ = tail.index;
- } else {
- // Copy subset to new rep
- rep = Copy(rep, rep->head_, tail.index, extra);
- tail.index = rep->tail_;
- }
-
- // Adjust length
- rep->length -= len;
-
- // Adjust tail block
- if (tail.offset) {
- rep->SubLength(rep->retreat(tail.index), tail.offset);
- }
-
- return Validate(rep);
-}
-
-} // namespace cord_internal
-ABSL_NAMESPACE_END
-} // namespace absl
+ // We adopt a privately owned rep, scrub.
+ if (tail.index != rep->tail_) UnrefEntries(rep, tail.index, rep->tail_);
+ rep->tail_ = tail.index;
+ } else {
+ // Copy subset to new rep
+ rep = Copy(rep, rep->head_, tail.index, extra);
+ tail.index = rep->tail_;
+ }
+
+ // Adjust length
+ rep->length -= len;
+
+ // Adjust tail block
+ if (tail.offset) {
+ rep->SubLength(rep->retreat(tail.index), tail.offset);
+ }
+
+ return Validate(rep);
+}
+
+} // namespace cord_internal
+ABSL_NAMESPACE_END
+} // namespace absl
diff --git a/contrib/restricted/abseil-cpp/absl/strings/internal/cord_rep_ring.h b/contrib/restricted/abseil-cpp/absl/strings/internal/cord_rep_ring.h
index 2000e21ea0..f9b57a8bbf 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/internal/cord_rep_ring.h
+++ b/contrib/restricted/abseil-cpp/absl/strings/internal/cord_rep_ring.h
@@ -1,233 +1,233 @@
-// Copyright 2020 The Abseil Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef ABSL_STRINGS_INTERNAL_CORD_REP_RING_H_
-#define ABSL_STRINGS_INTERNAL_CORD_REP_RING_H_
-
-#include <cassert>
-#include <cstddef>
-#include <cstdint>
-#include <iosfwd>
-#include <limits>
-#include <memory>
-
-#include "absl/container/internal/layout.h"
-#include "absl/strings/internal/cord_internal.h"
-#include "absl/strings/internal/cord_rep_flat.h"
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-namespace cord_internal {
-
-// All operations modifying a ring buffer are implemented as static methods
-// requiring a CordRepRing instance with a reference adopted by the method.
-//
-// The methods return the modified ring buffer, which may be equal to the input
-// if the input was not shared, and having large enough capacity to accommodate
-// any newly added node(s). Otherwise, a copy of the input rep with the new
-// node(s) added is returned.
-//
-// Any modification on non shared ring buffers with enough capacity will then
-// require minimum atomic operations. Caller should where possible provide
-// reasonable `extra` hints for both anticipated extra `flat` byte space, as
-// well as anticipated extra nodes required for complex operations.
-//
-// Example of code creating a ring buffer, adding some data to it,
-// and discarding the buffer when done:
-//
-// void FunWithRings() {
-// // Create ring with 3 flats
-// CordRep* flat = CreateFlat("Hello");
-// CordRepRing* ring = CordRepRing::Create(flat, 2);
-// ring = CordRepRing::Append(ring, CreateFlat(" "));
-// ring = CordRepRing::Append(ring, CreateFlat("world"));
-// DoSomethingWithRing(ring);
-// CordRep::Unref(ring);
-// }
-//
-// Example of code Copying an existing ring buffer and modifying it:
-//
-// void MoreFunWithRings(CordRepRing* src) {
-// CordRepRing* ring = CordRep::Ref(src)->ring();
-// ring = CordRepRing::Append(ring, CreateFlat("Hello"));
-// ring = CordRepRing::Append(ring, CreateFlat(" "));
-// ring = CordRepRing::Append(ring, CreateFlat("world"));
-// DoSomethingWithRing(ring);
-// CordRep::Unref(ring);
-// }
-//
-class CordRepRing : public CordRep {
- public:
- // `pos_type` represents a 'logical position'. A CordRepRing instance has a
- // `begin_pos` (default 0), and each node inside the buffer will have an
- // `end_pos` which is the `end_pos` of the previous node (or `begin_pos`) plus
- // this node's length. The purpose is to allow for a binary search on this
- // position, while allowing O(1) prepend and append operations.
- using pos_type = size_t;
-
- // `index_type` is the type for the `head`, `tail` and `capacity` indexes.
- // Ring buffers are limited to having no more than four billion entries.
- using index_type = uint32_t;
-
- // `offset_type` is the type for the data offset inside a child rep's data.
- using offset_type = uint32_t;
-
- // Position holds the node index and relative offset into the node for
- // some physical offset in the contained data as returned by the Find()
- // and FindTail() methods.
- struct Position {
- index_type index;
- size_t offset;
- };
-
- // The maximum # of child nodes that can be hosted inside a CordRepRing.
- static constexpr size_t kMaxCapacity = (std::numeric_limits<uint32_t>::max)();
-
- // CordRepring can not be default constructed, moved, copied or assigned.
- CordRepRing() = delete;
- CordRepRing(const CordRepRing&) = delete;
- CordRepRing& operator=(const CordRepRing&) = delete;
-
- // Returns true if this instance is valid, false if some or all of the
- // invariants are broken. Intended for debug purposes only.
- // `output` receives an explanation of the broken invariants.
- bool IsValid(std::ostream& output) const;
-
- // Returns the size in bytes for a CordRepRing with `capacity' entries.
- static constexpr size_t AllocSize(size_t capacity);
-
- // Returns the distance in bytes from `pos` to `end_pos`.
- static constexpr size_t Distance(pos_type pos, pos_type end_pos);
-
- // Creates a new ring buffer from the provided `rep`. Adopts a reference
- // on `rep`. The returned ring buffer has a capacity of at least `extra + 1`
- static CordRepRing* Create(CordRep* child, size_t extra = 0);
-
- // `head`, `tail` and `capacity` indexes defining the ring buffer boundaries.
- index_type head() const { return head_; }
- index_type tail() const { return tail_; }
- index_type capacity() const { return capacity_; }
-
- // Returns the number of entries in this instance.
- index_type entries() const { return entries(head_, tail_); }
-
- // Returns the logical begin position of this instance.
- pos_type begin_pos() const { return begin_pos_; }
-
- // Returns the number of entries for a given head-tail range.
- // Requires `head` and `tail` values to be less than `capacity()`.
- index_type entries(index_type head, index_type tail) const {
- assert(head < capacity_ && tail < capacity_);
- return tail - head + ((tail > head) ? 0 : capacity_);
- }
-
- // Returns the logical end position of entry `index`.
- pos_type const& entry_end_pos(index_type index) const {
- assert(IsValidIndex(index));
- return Layout::Partial().Pointer<0>(data_)[index];
- }
-
- // Returns the child pointer of entry `index`.
- CordRep* const& entry_child(index_type index) const {
- assert(IsValidIndex(index));
- return Layout::Partial(capacity()).Pointer<1>(data_)[index];
- }
-
- // Returns the data offset of entry `index`
- offset_type const& entry_data_offset(index_type index) const {
- assert(IsValidIndex(index));
- return Layout::Partial(capacity(), capacity()).Pointer<2>(data_)[index];
- }
-
- // Appends the provided child node to the `rep` instance.
- // Adopts a reference from `rep` and `child` which may not be null.
- // If the provided child is a FLAT or EXTERNAL node, or a SUBSTRING node
- // containing a FLAT or EXTERNAL node, then flat or external the node is added
- // 'as is', with an offset added for the SUBSTRING case.
- // If the provided child is a RING or CONCAT tree, or a SUBSTRING of a RING or
- // CONCAT tree, then all child nodes not excluded by any start offset or
- // length values are added recursively.
- static CordRepRing* Append(CordRepRing* rep, CordRep* child);
-
- // Appends the provided string data to the `rep` instance.
- // This function will attempt to utilize any remaining capacity in the last
- // node of the input if that node is not shared (directly or indirectly), and
- // of type FLAT. Remaining data will be added as one or more FLAT nodes.
- // Any last node added to the ring buffer will be allocated with up to
- // `extra` bytes of capacity for (anticipated) subsequent append actions.
- static CordRepRing* Append(CordRepRing* rep, string_view data,
- size_t extra = 0);
-
- // Prepends the provided child node to the `rep` instance.
- // Adopts a reference from `rep` and `child` which may not be null.
- // If the provided child is a FLAT or EXTERNAL node, or a SUBSTRING node
- // containing a FLAT or EXTERNAL node, then flat or external the node is
- // prepended 'as is', with an optional offset added for the SUBSTRING case.
- // If the provided child is a RING or CONCAT tree, or a SUBSTRING of a RING
- // or CONCAT tree, then all child nodes not excluded by any start offset or
- // length values are added recursively.
- static CordRepRing* Prepend(CordRepRing* rep, CordRep* child);
-
- // Prepends the provided string data to the `rep` instance.
- // This function will attempt to utilize any remaining capacity in the first
- // node of the input if that node is not shared (directly or indirectly), and
- // of type FLAT. Remaining data will be added as one or more FLAT nodes.
- // Any first node prepnded to the ring buffer will be allocated with up to
- // `extra` bytes of capacity for (anticipated) subsequent prepend actions.
- static CordRepRing* Prepend(CordRepRing* rep, string_view data,
- size_t extra = 0);
-
- // Returns a span referencing potentially unused capacity in the last node.
- // The returned span may be empty if no such capacity is available, or if the
- // current instance is shared. Else, a span of size `n <= size` is returned.
- // If non empty, the ring buffer is adjusted to the new length, with the newly
- // added capacity left uninitialized. Callers should assign a value to the
- // entire span before any other operations on this instance.
- Span<char> GetAppendBuffer(size_t size);
-
- // Returns a span referencing potentially unused capacity in the first node.
- // This function is identical to GetAppendBuffer except that it returns a span
- // referencing up to `size` capacity directly before the existing data.
- Span<char> GetPrependBuffer(size_t size);
-
+// Copyright 2020 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_STRINGS_INTERNAL_CORD_REP_RING_H_
+#define ABSL_STRINGS_INTERNAL_CORD_REP_RING_H_
+
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <iosfwd>
+#include <limits>
+#include <memory>
+
+#include "absl/container/internal/layout.h"
+#include "absl/strings/internal/cord_internal.h"
+#include "absl/strings/internal/cord_rep_flat.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+
+// All operations modifying a ring buffer are implemented as static methods
+// requiring a CordRepRing instance with a reference adopted by the method.
+//
+// The methods return the modified ring buffer, which may be equal to the input
+// if the input was not shared, and having large enough capacity to accommodate
+// any newly added node(s). Otherwise, a copy of the input rep with the new
+// node(s) added is returned.
+//
+// Any modification on non shared ring buffers with enough capacity will then
+// require minimum atomic operations. Caller should where possible provide
+// reasonable `extra` hints for both anticipated extra `flat` byte space, as
+// well as anticipated extra nodes required for complex operations.
+//
+// Example of code creating a ring buffer, adding some data to it,
+// and discarding the buffer when done:
+//
+// void FunWithRings() {
+// // Create ring with 3 flats
+// CordRep* flat = CreateFlat("Hello");
+// CordRepRing* ring = CordRepRing::Create(flat, 2);
+// ring = CordRepRing::Append(ring, CreateFlat(" "));
+// ring = CordRepRing::Append(ring, CreateFlat("world"));
+// DoSomethingWithRing(ring);
+// CordRep::Unref(ring);
+// }
+//
+// Example of code Copying an existing ring buffer and modifying it:
+//
+// void MoreFunWithRings(CordRepRing* src) {
+// CordRepRing* ring = CordRep::Ref(src)->ring();
+// ring = CordRepRing::Append(ring, CreateFlat("Hello"));
+// ring = CordRepRing::Append(ring, CreateFlat(" "));
+// ring = CordRepRing::Append(ring, CreateFlat("world"));
+// DoSomethingWithRing(ring);
+// CordRep::Unref(ring);
+// }
+//
+class CordRepRing : public CordRep {
+ public:
+ // `pos_type` represents a 'logical position'. A CordRepRing instance has a
+ // `begin_pos` (default 0), and each node inside the buffer will have an
+ // `end_pos` which is the `end_pos` of the previous node (or `begin_pos`) plus
+ // this node's length. The purpose is to allow for a binary search on this
+ // position, while allowing O(1) prepend and append operations.
+ using pos_type = size_t;
+
+ // `index_type` is the type for the `head`, `tail` and `capacity` indexes.
+ // Ring buffers are limited to having no more than four billion entries.
+ using index_type = uint32_t;
+
+ // `offset_type` is the type for the data offset inside a child rep's data.
+ using offset_type = uint32_t;
+
+ // Position holds the node index and relative offset into the node for
+ // some physical offset in the contained data as returned by the Find()
+ // and FindTail() methods.
+ struct Position {
+ index_type index;
+ size_t offset;
+ };
+
+ // The maximum # of child nodes that can be hosted inside a CordRepRing.
+ static constexpr size_t kMaxCapacity = (std::numeric_limits<uint32_t>::max)();
+
+ // CordRepring can not be default constructed, moved, copied or assigned.
+ CordRepRing() = delete;
+ CordRepRing(const CordRepRing&) = delete;
+ CordRepRing& operator=(const CordRepRing&) = delete;
+
+ // Returns true if this instance is valid, false if some or all of the
+ // invariants are broken. Intended for debug purposes only.
+ // `output` receives an explanation of the broken invariants.
+ bool IsValid(std::ostream& output) const;
+
+ // Returns the size in bytes for a CordRepRing with `capacity' entries.
+ static constexpr size_t AllocSize(size_t capacity);
+
+ // Returns the distance in bytes from `pos` to `end_pos`.
+ static constexpr size_t Distance(pos_type pos, pos_type end_pos);
+
+ // Creates a new ring buffer from the provided `rep`. Adopts a reference
+ // on `rep`. The returned ring buffer has a capacity of at least `extra + 1`
+ static CordRepRing* Create(CordRep* child, size_t extra = 0);
+
+ // `head`, `tail` and `capacity` indexes defining the ring buffer boundaries.
+ index_type head() const { return head_; }
+ index_type tail() const { return tail_; }
+ index_type capacity() const { return capacity_; }
+
+ // Returns the number of entries in this instance.
+ index_type entries() const { return entries(head_, tail_); }
+
+ // Returns the logical begin position of this instance.
+ pos_type begin_pos() const { return begin_pos_; }
+
+ // Returns the number of entries for a given head-tail range.
+ // Requires `head` and `tail` values to be less than `capacity()`.
+ index_type entries(index_type head, index_type tail) const {
+ assert(head < capacity_ && tail < capacity_);
+ return tail - head + ((tail > head) ? 0 : capacity_);
+ }
+
+ // Returns the logical end position of entry `index`.
+ pos_type const& entry_end_pos(index_type index) const {
+ assert(IsValidIndex(index));
+ return Layout::Partial().Pointer<0>(data_)[index];
+ }
+
+ // Returns the child pointer of entry `index`.
+ CordRep* const& entry_child(index_type index) const {
+ assert(IsValidIndex(index));
+ return Layout::Partial(capacity()).Pointer<1>(data_)[index];
+ }
+
+ // Returns the data offset of entry `index`
+ offset_type const& entry_data_offset(index_type index) const {
+ assert(IsValidIndex(index));
+ return Layout::Partial(capacity(), capacity()).Pointer<2>(data_)[index];
+ }
+
+ // Appends the provided child node to the `rep` instance.
+ // Adopts a reference from `rep` and `child` which may not be null.
+ // If the provided child is a FLAT or EXTERNAL node, or a SUBSTRING node
+ // containing a FLAT or EXTERNAL node, then flat or external the node is added
+ // 'as is', with an offset added for the SUBSTRING case.
+ // If the provided child is a RING or CONCAT tree, or a SUBSTRING of a RING or
+ // CONCAT tree, then all child nodes not excluded by any start offset or
+ // length values are added recursively.
+ static CordRepRing* Append(CordRepRing* rep, CordRep* child);
+
+ // Appends the provided string data to the `rep` instance.
+ // This function will attempt to utilize any remaining capacity in the last
+ // node of the input if that node is not shared (directly or indirectly), and
+ // of type FLAT. Remaining data will be added as one or more FLAT nodes.
+ // Any last node added to the ring buffer will be allocated with up to
+ // `extra` bytes of capacity for (anticipated) subsequent append actions.
+ static CordRepRing* Append(CordRepRing* rep, string_view data,
+ size_t extra = 0);
+
+ // Prepends the provided child node to the `rep` instance.
+ // Adopts a reference from `rep` and `child` which may not be null.
+ // If the provided child is a FLAT or EXTERNAL node, or a SUBSTRING node
+ // containing a FLAT or EXTERNAL node, then flat or external the node is
+ // prepended 'as is', with an optional offset added for the SUBSTRING case.
+ // If the provided child is a RING or CONCAT tree, or a SUBSTRING of a RING
+ // or CONCAT tree, then all child nodes not excluded by any start offset or
+ // length values are added recursively.
+ static CordRepRing* Prepend(CordRepRing* rep, CordRep* child);
+
+ // Prepends the provided string data to the `rep` instance.
+ // This function will attempt to utilize any remaining capacity in the first
+ // node of the input if that node is not shared (directly or indirectly), and
+ // of type FLAT. Remaining data will be added as one or more FLAT nodes.
+ // Any first node prepnded to the ring buffer will be allocated with up to
+ // `extra` bytes of capacity for (anticipated) subsequent prepend actions.
+ static CordRepRing* Prepend(CordRepRing* rep, string_view data,
+ size_t extra = 0);
+
+ // Returns a span referencing potentially unused capacity in the last node.
+ // The returned span may be empty if no such capacity is available, or if the
+ // current instance is shared. Else, a span of size `n <= size` is returned.
+ // If non empty, the ring buffer is adjusted to the new length, with the newly
+ // added capacity left uninitialized. Callers should assign a value to the
+ // entire span before any other operations on this instance.
+ Span<char> GetAppendBuffer(size_t size);
+
+ // Returns a span referencing potentially unused capacity in the first node.
+ // This function is identical to GetAppendBuffer except that it returns a span
+ // referencing up to `size` capacity directly before the existing data.
+ Span<char> GetPrependBuffer(size_t size);
+
// Returns a cord ring buffer containing `len` bytes of data starting at
- // `offset`. If the input is not shared, this function will remove all head
- // and tail child nodes outside of the requested range, and adjust the new
- // head and tail nodes as required. If the input is shared, this function
- // returns a new instance sharing some or all of the nodes from the input.
+ // `offset`. If the input is not shared, this function will remove all head
+ // and tail child nodes outside of the requested range, and adjust the new
+ // head and tail nodes as required. If the input is shared, this function
+ // returns a new instance sharing some or all of the nodes from the input.
static CordRepRing* SubRing(CordRepRing* r, size_t offset, size_t len,
- size_t extra = 0);
-
+ size_t extra = 0);
+
// Returns a cord ring buffer with the first `len` bytes removed.
- // If the input is not shared, this function will remove all head child nodes
- // fully inside the first `length` bytes, and adjust the new head as required.
- // If the input is shared, this function returns a new instance sharing some
- // or all of the nodes from the input.
+ // If the input is not shared, this function will remove all head child nodes
+ // fully inside the first `length` bytes, and adjust the new head as required.
+ // If the input is shared, this function returns a new instance sharing some
+ // or all of the nodes from the input.
static CordRepRing* RemoveSuffix(CordRepRing* r, size_t len,
- size_t extra = 0);
-
+ size_t extra = 0);
+
// Returns a cord ring buffer with the last `len` bytes removed.
- // If the input is not shared, this function will remove all head child nodes
- // fully inside the first `length` bytes, and adjust the new head as required.
- // If the input is shared, this function returns a new instance sharing some
- // or all of the nodes from the input.
- static CordRepRing* RemovePrefix(CordRepRing* r, size_t len,
- size_t extra = 0);
-
- // Returns the character at `offset`. Requires that `offset < length`.
- char GetCharacter(size_t offset) const;
-
+ // If the input is not shared, this function will remove all head child nodes
+ // fully inside the first `length` bytes, and adjust the new head as required.
+ // If the input is shared, this function returns a new instance sharing some
+ // or all of the nodes from the input.
+ static CordRepRing* RemovePrefix(CordRepRing* r, size_t len,
+ size_t extra = 0);
+
+ // Returns the character at `offset`. Requires that `offset < length`.
+ char GetCharacter(size_t offset) const;
+
// Returns true if this instance manages a single contiguous buffer, in which
// case the (optional) output parameter `fragment` is set. Otherwise, the
// function returns false, and `fragment` is left unchanged.
@@ -240,345 +240,345 @@ class CordRepRing : public CordRep {
// returns false, and `fragment` is left unchanged.
bool IsFlat(size_t offset, size_t len, absl::string_view* fragment) const;
- // Testing only: set capacity to requested capacity.
- void SetCapacityForTesting(size_t capacity);
-
- // Returns the CordRep data pointer for the provided CordRep.
- // Requires that the provided `rep` is either a FLAT or EXTERNAL CordRep.
- static const char* GetLeafData(const CordRep* rep);
-
- // Returns the CordRep data pointer for the provided CordRep.
- // Requires that `rep` is either a FLAT, EXTERNAL, or SUBSTRING CordRep.
- static const char* GetRepData(const CordRep* rep);
-
- // Advances the provided position, wrapping around capacity as needed.
- // Requires `index` < capacity()
- inline index_type advance(index_type index) const;
-
- // Advances the provided position by 'n`, wrapping around capacity as needed.
- // Requires `index` < capacity() and `n` <= capacity.
- inline index_type advance(index_type index, index_type n) const;
-
- // Retreats the provided position, wrapping around 0 as needed.
- // Requires `index` < capacity()
- inline index_type retreat(index_type index) const;
-
- // Retreats the provided position by 'n', wrapping around 0 as needed.
- // Requires `index` < capacity()
- inline index_type retreat(index_type index, index_type n) const;
-
- // Returns the logical begin position of entry `index`
- pos_type const& entry_begin_pos(index_type index) const {
- return (index == head_) ? begin_pos_ : entry_end_pos(retreat(index));
- }
-
- // Returns the physical start offset of entry `index`
- size_t entry_start_offset(index_type index) const {
- return Distance(begin_pos_, entry_begin_pos(index));
- }
-
- // Returns the physical end offset of entry `index`
- size_t entry_end_offset(index_type index) const {
- return Distance(begin_pos_, entry_end_pos(index));
- }
-
- // Returns the data length for entry `index`
- size_t entry_length(index_type index) const {
- return Distance(entry_begin_pos(index), entry_end_pos(index));
- }
-
- // Returns the data for entry `index`
- absl::string_view entry_data(index_type index) const;
-
- // Returns the position for `offset` as {index, prefix}. `index` holds the
- // index of the entry at the specified offset and `prefix` holds the relative
- // offset inside that entry.
- // Requires `offset` < length.
- //
- // For example we can implement GetCharacter(offset) as:
- // char GetCharacter(size_t offset) {
- // Position pos = this->Find(offset);
- // return this->entry_data(pos.pos)[pos.offset];
- // }
- inline Position Find(size_t offset) const;
-
- // Find starting at `head`
- inline Position Find(index_type head, size_t offset) const;
-
- // Returns the tail position for `offset` as {tail index, suffix}.
- // `tail index` holds holds the index of the entry holding the offset directly
- // before 'offset` advanced by one. 'suffix` holds the relative offset from
- // that relative offset in the entry to the end of the entry.
- // For example, FindTail(length) will return {tail(), 0}, FindTail(length - 5)
- // will return {retreat(tail), 5)} provided the preceding entry contains at
- // least 5 bytes of data.
- // Requires offset >= 1 && offset <= length.
- //
- // This function is very useful in functions that need to clip the end of some
- // ring buffer such as 'RemovePrefix'.
- // For example, we could implement RemovePrefix for non shared instances as:
- // void RemoveSuffix(size_t n) {
- // Position pos = FindTail(length - n);
- // UnrefEntries(pos.pos, this->tail_);
- // this->tail_ = pos.pos;
- // entry(retreat(pos.pos)).end_pos -= pos.offset;
- // }
- inline Position FindTail(size_t offset) const;
-
- // Find tail starting at `head`
- inline Position FindTail(index_type head, size_t offset) const;
-
- // Invokes f(index_type index) for each entry inside the range [head, tail>
- template <typename F>
- void ForEach(index_type head, index_type tail, F&& f) const {
- index_type n1 = (tail > head) ? tail : capacity_;
- for (index_type i = head; i < n1; ++i) f(i);
- if (tail <= head) {
- for (index_type i = 0; i < tail; ++i) f(i);
- }
- }
-
- // Invokes f(index_type index) for each entry inside this instance.
- template <typename F>
- void ForEach(F&& f) const {
- ForEach(head_, tail_, std::forward<F>(f));
- }
-
- // Dump this instance's data tp stream `s` in human readable format, excluding
- // the actual data content itself. Intended for debug purposes only.
- friend std::ostream& operator<<(std::ostream& s, const CordRepRing& rep);
-
- private:
- enum class AddMode { kAppend, kPrepend };
-
- using Layout = container_internal::Layout<pos_type, CordRep*, offset_type>;
-
- class Filler;
- class Transaction;
- class CreateTransaction;
-
- static constexpr size_t kLayoutAlignment = Layout::Partial().Alignment();
-
- // Creates a new CordRepRing.
- explicit CordRepRing(index_type capacity) : capacity_(capacity) {}
-
- // Returns true if `index` is a valid index into this instance.
- bool IsValidIndex(index_type index) const;
-
- // Debug use only: validates the provided CordRepRing invariants.
- // Verification of all CordRepRing methods can be enabled by defining
- // EXTRA_CORD_RING_VALIDATION, i.e.: `--copts=-DEXTRA_CORD_RING_VALIDATION`
- // Verification is VERY expensive, so only do it for debugging purposes.
- static CordRepRing* Validate(CordRepRing* rep, const char* file = nullptr,
- int line = 0);
-
- // Allocates a CordRepRing large enough to hold `capacity + extra' entries.
- // The returned capacity may be larger if the allocated memory allows for it.
- // The maximum capacity of a CordRepRing is capped at kMaxCapacity.
- // Throws `std::length_error` if `capacity + extra' exceeds kMaxCapacity.
- static CordRepRing* New(size_t capacity, size_t extra);
-
- // Deallocates (but does not destroy) the provided ring buffer.
- static void Delete(CordRepRing* rep);
-
- // Destroys the provided ring buffer, decrementing the reference count of all
- // contained child CordReps. The provided 1\`rep` should have a ref count of
+ // Testing only: set capacity to requested capacity.
+ void SetCapacityForTesting(size_t capacity);
+
+ // Returns the CordRep data pointer for the provided CordRep.
+ // Requires that the provided `rep` is either a FLAT or EXTERNAL CordRep.
+ static const char* GetLeafData(const CordRep* rep);
+
+ // Returns the CordRep data pointer for the provided CordRep.
+ // Requires that `rep` is either a FLAT, EXTERNAL, or SUBSTRING CordRep.
+ static const char* GetRepData(const CordRep* rep);
+
+ // Advances the provided position, wrapping around capacity as needed.
+ // Requires `index` < capacity()
+ inline index_type advance(index_type index) const;
+
+ // Advances the provided position by 'n`, wrapping around capacity as needed.
+ // Requires `index` < capacity() and `n` <= capacity.
+ inline index_type advance(index_type index, index_type n) const;
+
+ // Retreats the provided position, wrapping around 0 as needed.
+ // Requires `index` < capacity()
+ inline index_type retreat(index_type index) const;
+
+ // Retreats the provided position by 'n', wrapping around 0 as needed.
+ // Requires `index` < capacity()
+ inline index_type retreat(index_type index, index_type n) const;
+
+ // Returns the logical begin position of entry `index`
+ pos_type const& entry_begin_pos(index_type index) const {
+ return (index == head_) ? begin_pos_ : entry_end_pos(retreat(index));
+ }
+
+ // Returns the physical start offset of entry `index`
+ size_t entry_start_offset(index_type index) const {
+ return Distance(begin_pos_, entry_begin_pos(index));
+ }
+
+ // Returns the physical end offset of entry `index`
+ size_t entry_end_offset(index_type index) const {
+ return Distance(begin_pos_, entry_end_pos(index));
+ }
+
+ // Returns the data length for entry `index`
+ size_t entry_length(index_type index) const {
+ return Distance(entry_begin_pos(index), entry_end_pos(index));
+ }
+
+ // Returns the data for entry `index`
+ absl::string_view entry_data(index_type index) const;
+
+ // Returns the position for `offset` as {index, prefix}. `index` holds the
+ // index of the entry at the specified offset and `prefix` holds the relative
+ // offset inside that entry.
+ // Requires `offset` < length.
+ //
+ // For example we can implement GetCharacter(offset) as:
+ // char GetCharacter(size_t offset) {
+ // Position pos = this->Find(offset);
+ // return this->entry_data(pos.pos)[pos.offset];
+ // }
+ inline Position Find(size_t offset) const;
+
+ // Find starting at `head`
+ inline Position Find(index_type head, size_t offset) const;
+
+ // Returns the tail position for `offset` as {tail index, suffix}.
+ // `tail index` holds holds the index of the entry holding the offset directly
+ // before 'offset` advanced by one. 'suffix` holds the relative offset from
+ // that relative offset in the entry to the end of the entry.
+ // For example, FindTail(length) will return {tail(), 0}, FindTail(length - 5)
+ // will return {retreat(tail), 5)} provided the preceding entry contains at
+ // least 5 bytes of data.
+ // Requires offset >= 1 && offset <= length.
+ //
+ // This function is very useful in functions that need to clip the end of some
+ // ring buffer such as 'RemovePrefix'.
+ // For example, we could implement RemovePrefix for non shared instances as:
+ // void RemoveSuffix(size_t n) {
+ // Position pos = FindTail(length - n);
+ // UnrefEntries(pos.pos, this->tail_);
+ // this->tail_ = pos.pos;
+ // entry(retreat(pos.pos)).end_pos -= pos.offset;
+ // }
+ inline Position FindTail(size_t offset) const;
+
+ // Find tail starting at `head`
+ inline Position FindTail(index_type head, size_t offset) const;
+
+ // Invokes f(index_type index) for each entry inside the range [head, tail>
+ template <typename F>
+ void ForEach(index_type head, index_type tail, F&& f) const {
+ index_type n1 = (tail > head) ? tail : capacity_;
+ for (index_type i = head; i < n1; ++i) f(i);
+ if (tail <= head) {
+ for (index_type i = 0; i < tail; ++i) f(i);
+ }
+ }
+
+ // Invokes f(index_type index) for each entry inside this instance.
+ template <typename F>
+ void ForEach(F&& f) const {
+ ForEach(head_, tail_, std::forward<F>(f));
+ }
+
+ // Dump this instance's data tp stream `s` in human readable format, excluding
+ // the actual data content itself. Intended for debug purposes only.
+ friend std::ostream& operator<<(std::ostream& s, const CordRepRing& rep);
+
+ private:
+ enum class AddMode { kAppend, kPrepend };
+
+ using Layout = container_internal::Layout<pos_type, CordRep*, offset_type>;
+
+ class Filler;
+ class Transaction;
+ class CreateTransaction;
+
+ static constexpr size_t kLayoutAlignment = Layout::Partial().Alignment();
+
+ // Creates a new CordRepRing.
+ explicit CordRepRing(index_type capacity) : capacity_(capacity) {}
+
+ // Returns true if `index` is a valid index into this instance.
+ bool IsValidIndex(index_type index) const;
+
+ // Debug use only: validates the provided CordRepRing invariants.
+ // Verification of all CordRepRing methods can be enabled by defining
+ // EXTRA_CORD_RING_VALIDATION, i.e.: `--copts=-DEXTRA_CORD_RING_VALIDATION`
+ // Verification is VERY expensive, so only do it for debugging purposes.
+ static CordRepRing* Validate(CordRepRing* rep, const char* file = nullptr,
+ int line = 0);
+
+ // Allocates a CordRepRing large enough to hold `capacity + extra' entries.
+ // The returned capacity may be larger if the allocated memory allows for it.
+ // The maximum capacity of a CordRepRing is capped at kMaxCapacity.
+ // Throws `std::length_error` if `capacity + extra' exceeds kMaxCapacity.
+ static CordRepRing* New(size_t capacity, size_t extra);
+
+ // Deallocates (but does not destroy) the provided ring buffer.
+ static void Delete(CordRepRing* rep);
+
+ // Destroys the provided ring buffer, decrementing the reference count of all
+ // contained child CordReps. The provided 1\`rep` should have a ref count of
// one (pre decrement destroy call observing `refcount.IsOne()`) or zero
// (post decrement destroy call observing `!refcount.Decrement()`).
- static void Destroy(CordRepRing* rep);
-
- // Returns a mutable reference to the logical end position array.
- pos_type* entry_end_pos() {
- return Layout::Partial().Pointer<0>(data_);
- }
-
- // Returns a mutable reference to the child pointer array.
- CordRep** entry_child() {
- return Layout::Partial(capacity()).Pointer<1>(data_);
- }
-
- // Returns a mutable reference to the data offset array.
- offset_type* entry_data_offset() {
- return Layout::Partial(capacity(), capacity()).Pointer<2>(data_);
- }
-
- // Find implementations for the non fast path 0 / length cases.
- Position FindSlow(index_type head, size_t offset) const;
- Position FindTailSlow(index_type head, size_t offset) const;
-
- // Finds the index of the first node that is inside a reasonable distance
- // of the node at `offset` from which we can continue with a linear search.
- template <bool wrap>
- index_type FindBinary(index_type head, index_type tail, size_t offset) const;
-
- // Fills the current (initialized) instance from the provided source, copying
- // entries [head, tail). Adds a reference to copied entries if `ref` is true.
- template <bool ref>
- void Fill(const CordRepRing* src, index_type head, index_type tail);
-
- // Create a copy of 'rep', copying all entries [head, tail), allocating room
- // for `extra` entries. Adds a reference on all copied entries.
- static CordRepRing* Copy(CordRepRing* rep, index_type head, index_type tail,
- size_t extra = 0);
-
- // Returns a Mutable CordRepRing reference from `rep` with room for at least
- // `extra` additional nodes. Adopts a reference count from `rep`.
- // This function will return `rep` if, and only if:
- // - rep.entries + extra <= rep.capacity
- // - rep.refcount == 1
- // Otherwise, this function will create a new copy of `rep` with additional
- // capacity to satisfy `extra` extra nodes, and unref the old `rep` instance.
- //
- // If a new CordRepRing can not be allocated, or the new capacity would exceed
- // the maxmimum capacity, then the input is consumed only, and an exception is
- // thrown.
- static CordRepRing* Mutable(CordRepRing* rep, size_t extra);
-
- // Slow path for Append(CordRepRing* rep, CordRep* child). This function is
- // exercised if the provided `child` in Append() is not a leaf node, i.e., a
- // ring buffer or old (concat) cord tree.
- static CordRepRing* AppendSlow(CordRepRing* rep, CordRep* child);
-
- // Appends the provided leaf node. Requires `child` to be FLAT or EXTERNAL.
- static CordRepRing* AppendLeaf(CordRepRing* rep, CordRep* child,
- size_t offset, size_t length);
-
- // Prepends the provided leaf node. Requires `child` to be FLAT or EXTERNAL.
- static CordRepRing* PrependLeaf(CordRepRing* rep, CordRep* child,
- size_t offset, size_t length);
-
- // Slow path for Prepend(CordRepRing* rep, CordRep* child). This function is
- // exercised if the provided `child` in Prepend() is not a leaf node, i.e., a
- // ring buffer or old (concat) cord tree.
- static CordRepRing* PrependSlow(CordRepRing* rep, CordRep* child);
-
- // Slow path for Create(CordRep* child, size_t extra). This function is
- // exercised if the provided `child` in Prepend() is not a leaf node, i.e., a
- // ring buffer or old (concat) cord tree.
- static CordRepRing* CreateSlow(CordRep* child, size_t extra);
-
- // Creates a new ring buffer from the provided `child` leaf node. Requires
- // `child` to be FLAT or EXTERNAL. on `rep`.
- // The returned ring buffer has a capacity of at least `1 + extra`
- static CordRepRing* CreateFromLeaf(CordRep* child, size_t offset,
- size_t length, size_t extra);
-
- // Appends or prepends (depending on AddMode) the ring buffer in `ring' to
+ static void Destroy(CordRepRing* rep);
+
+ // Returns a mutable reference to the logical end position array.
+ pos_type* entry_end_pos() {
+ return Layout::Partial().Pointer<0>(data_);
+ }
+
+ // Returns a mutable reference to the child pointer array.
+ CordRep** entry_child() {
+ return Layout::Partial(capacity()).Pointer<1>(data_);
+ }
+
+ // Returns a mutable reference to the data offset array.
+ offset_type* entry_data_offset() {
+ return Layout::Partial(capacity(), capacity()).Pointer<2>(data_);
+ }
+
+ // Find implementations for the non fast path 0 / length cases.
+ Position FindSlow(index_type head, size_t offset) const;
+ Position FindTailSlow(index_type head, size_t offset) const;
+
+ // Finds the index of the first node that is inside a reasonable distance
+ // of the node at `offset` from which we can continue with a linear search.
+ template <bool wrap>
+ index_type FindBinary(index_type head, index_type tail, size_t offset) const;
+
+ // Fills the current (initialized) instance from the provided source, copying
+ // entries [head, tail). Adds a reference to copied entries if `ref` is true.
+ template <bool ref>
+ void Fill(const CordRepRing* src, index_type head, index_type tail);
+
+ // Create a copy of 'rep', copying all entries [head, tail), allocating room
+ // for `extra` entries. Adds a reference on all copied entries.
+ static CordRepRing* Copy(CordRepRing* rep, index_type head, index_type tail,
+ size_t extra = 0);
+
+ // Returns a Mutable CordRepRing reference from `rep` with room for at least
+ // `extra` additional nodes. Adopts a reference count from `rep`.
+ // This function will return `rep` if, and only if:
+ // - rep.entries + extra <= rep.capacity
+ // - rep.refcount == 1
+ // Otherwise, this function will create a new copy of `rep` with additional
+ // capacity to satisfy `extra` extra nodes, and unref the old `rep` instance.
+ //
+ // If a new CordRepRing can not be allocated, or the new capacity would exceed
+ // the maxmimum capacity, then the input is consumed only, and an exception is
+ // thrown.
+ static CordRepRing* Mutable(CordRepRing* rep, size_t extra);
+
+ // Slow path for Append(CordRepRing* rep, CordRep* child). This function is
+ // exercised if the provided `child` in Append() is not a leaf node, i.e., a
+ // ring buffer or old (concat) cord tree.
+ static CordRepRing* AppendSlow(CordRepRing* rep, CordRep* child);
+
+ // Appends the provided leaf node. Requires `child` to be FLAT or EXTERNAL.
+ static CordRepRing* AppendLeaf(CordRepRing* rep, CordRep* child,
+ size_t offset, size_t length);
+
+ // Prepends the provided leaf node. Requires `child` to be FLAT or EXTERNAL.
+ static CordRepRing* PrependLeaf(CordRepRing* rep, CordRep* child,
+ size_t offset, size_t length);
+
+ // Slow path for Prepend(CordRepRing* rep, CordRep* child). This function is
+ // exercised if the provided `child` in Prepend() is not a leaf node, i.e., a
+ // ring buffer or old (concat) cord tree.
+ static CordRepRing* PrependSlow(CordRepRing* rep, CordRep* child);
+
+ // Slow path for Create(CordRep* child, size_t extra). This function is
+ // exercised if the provided `child` in Prepend() is not a leaf node, i.e., a
+ // ring buffer or old (concat) cord tree.
+ static CordRepRing* CreateSlow(CordRep* child, size_t extra);
+
+ // Creates a new ring buffer from the provided `child` leaf node. Requires
+ // `child` to be FLAT or EXTERNAL. on `rep`.
+ // The returned ring buffer has a capacity of at least `1 + extra`
+ static CordRepRing* CreateFromLeaf(CordRep* child, size_t offset,
+ size_t length, size_t extra);
+
+ // Appends or prepends (depending on AddMode) the ring buffer in `ring' to
// `rep` starting at `offset` with length `len`.
- template <AddMode mode>
- static CordRepRing* AddRing(CordRepRing* rep, CordRepRing* ring,
+ template <AddMode mode>
+ static CordRepRing* AddRing(CordRepRing* rep, CordRepRing* ring,
size_t offset, size_t len);
-
- // Increases the data offset for entry `index` by `n`.
- void AddDataOffset(index_type index, size_t n);
-
- // Descreases the length for entry `index` by `n`.
- void SubLength(index_type index, size_t n);
-
- index_type head_;
- index_type tail_;
- index_type capacity_;
- pos_type begin_pos_;
-
- alignas(kLayoutAlignment) char data_[kLayoutAlignment];
-
- friend struct CordRep;
-};
-
-constexpr size_t CordRepRing::AllocSize(size_t capacity) {
- return sizeof(CordRepRing) - sizeof(data_) +
- Layout(capacity, capacity, capacity).AllocSize();
-}
-
-inline constexpr size_t CordRepRing::Distance(pos_type pos, pos_type end_pos) {
- return (end_pos - pos);
-}
-
-inline const char* CordRepRing::GetLeafData(const CordRep* rep) {
- return rep->tag != EXTERNAL ? rep->flat()->Data() : rep->external()->base;
-}
-
-inline const char* CordRepRing::GetRepData(const CordRep* rep) {
- if (rep->tag >= FLAT) return rep->flat()->Data();
- if (rep->tag == EXTERNAL) return rep->external()->base;
- return GetLeafData(rep->substring()->child) + rep->substring()->start;
-}
-
-inline CordRepRing::index_type CordRepRing::advance(index_type index) const {
- assert(index < capacity_);
- return ++index == capacity_ ? 0 : index;
-}
-
-inline CordRepRing::index_type CordRepRing::advance(index_type index,
- index_type n) const {
- assert(index < capacity_ && n <= capacity_);
- return (index += n) >= capacity_ ? index - capacity_ : index;
-}
-
-inline CordRepRing::index_type CordRepRing::retreat(index_type index) const {
- assert(index < capacity_);
- return (index > 0 ? index : capacity_) - 1;
-}
-
-inline CordRepRing::index_type CordRepRing::retreat(index_type index,
- index_type n) const {
- assert(index < capacity_ && n <= capacity_);
- return index >= n ? index - n : capacity_ - n + index;
-}
-
-inline absl::string_view CordRepRing::entry_data(index_type index) const {
- size_t data_offset = entry_data_offset(index);
- return {GetRepData(entry_child(index)) + data_offset, entry_length(index)};
-}
-
-inline bool CordRepRing::IsValidIndex(index_type index) const {
- if (index >= capacity_) return false;
- return (tail_ > head_) ? (index >= head_ && index < tail_)
- : (index >= head_ || index < tail_);
-}
-
-#ifndef EXTRA_CORD_RING_VALIDATION
-inline CordRepRing* CordRepRing::Validate(CordRepRing* rep,
- const char* /*file*/, int /*line*/) {
- return rep;
-}
-#endif
-
-inline CordRepRing::Position CordRepRing::Find(size_t offset) const {
- assert(offset < length);
- return (offset == 0) ? Position{head_, 0} : FindSlow(head_, offset);
-}
-
-inline CordRepRing::Position CordRepRing::Find(index_type head,
- size_t offset) const {
- assert(offset < length);
- assert(IsValidIndex(head) && offset >= entry_start_offset(head));
- return (offset == 0) ? Position{head_, 0} : FindSlow(head, offset);
-}
-
-inline CordRepRing::Position CordRepRing::FindTail(size_t offset) const {
- assert(offset > 0 && offset <= length);
- return (offset == length) ? Position{tail_, 0} : FindTailSlow(head_, offset);
-}
-
-inline CordRepRing::Position CordRepRing::FindTail(index_type head,
- size_t offset) const {
- assert(offset > 0 && offset <= length);
- assert(IsValidIndex(head) && offset >= entry_start_offset(head) + 1);
- return (offset == length) ? Position{tail_, 0} : FindTailSlow(head, offset);
-}
-
-// Now that CordRepRing is defined, we can define CordRep's helper casts:
-inline CordRepRing* CordRep::ring() {
+
+ // Increases the data offset for entry `index` by `n`.
+ void AddDataOffset(index_type index, size_t n);
+
+ // Descreases the length for entry `index` by `n`.
+ void SubLength(index_type index, size_t n);
+
+ index_type head_;
+ index_type tail_;
+ index_type capacity_;
+ pos_type begin_pos_;
+
+ alignas(kLayoutAlignment) char data_[kLayoutAlignment];
+
+ friend struct CordRep;
+};
+
+constexpr size_t CordRepRing::AllocSize(size_t capacity) {
+ return sizeof(CordRepRing) - sizeof(data_) +
+ Layout(capacity, capacity, capacity).AllocSize();
+}
+
+inline constexpr size_t CordRepRing::Distance(pos_type pos, pos_type end_pos) {
+ return (end_pos - pos);
+}
+
+inline const char* CordRepRing::GetLeafData(const CordRep* rep) {
+ return rep->tag != EXTERNAL ? rep->flat()->Data() : rep->external()->base;
+}
+
+inline const char* CordRepRing::GetRepData(const CordRep* rep) {
+ if (rep->tag >= FLAT) return rep->flat()->Data();
+ if (rep->tag == EXTERNAL) return rep->external()->base;
+ return GetLeafData(rep->substring()->child) + rep->substring()->start;
+}
+
+inline CordRepRing::index_type CordRepRing::advance(index_type index) const {
+ assert(index < capacity_);
+ return ++index == capacity_ ? 0 : index;
+}
+
+inline CordRepRing::index_type CordRepRing::advance(index_type index,
+ index_type n) const {
+ assert(index < capacity_ && n <= capacity_);
+ return (index += n) >= capacity_ ? index - capacity_ : index;
+}
+
+inline CordRepRing::index_type CordRepRing::retreat(index_type index) const {
+ assert(index < capacity_);
+ return (index > 0 ? index : capacity_) - 1;
+}
+
+inline CordRepRing::index_type CordRepRing::retreat(index_type index,
+ index_type n) const {
+ assert(index < capacity_ && n <= capacity_);
+ return index >= n ? index - n : capacity_ - n + index;
+}
+
+inline absl::string_view CordRepRing::entry_data(index_type index) const {
+ size_t data_offset = entry_data_offset(index);
+ return {GetRepData(entry_child(index)) + data_offset, entry_length(index)};
+}
+
+inline bool CordRepRing::IsValidIndex(index_type index) const {
+ if (index >= capacity_) return false;
+ return (tail_ > head_) ? (index >= head_ && index < tail_)
+ : (index >= head_ || index < tail_);
+}
+
+#ifndef EXTRA_CORD_RING_VALIDATION
+inline CordRepRing* CordRepRing::Validate(CordRepRing* rep,
+ const char* /*file*/, int /*line*/) {
+ return rep;
+}
+#endif
+
+inline CordRepRing::Position CordRepRing::Find(size_t offset) const {
+ assert(offset < length);
+ return (offset == 0) ? Position{head_, 0} : FindSlow(head_, offset);
+}
+
+inline CordRepRing::Position CordRepRing::Find(index_type head,
+ size_t offset) const {
+ assert(offset < length);
+ assert(IsValidIndex(head) && offset >= entry_start_offset(head));
+ return (offset == 0) ? Position{head_, 0} : FindSlow(head, offset);
+}
+
+inline CordRepRing::Position CordRepRing::FindTail(size_t offset) const {
+ assert(offset > 0 && offset <= length);
+ return (offset == length) ? Position{tail_, 0} : FindTailSlow(head_, offset);
+}
+
+inline CordRepRing::Position CordRepRing::FindTail(index_type head,
+ size_t offset) const {
+ assert(offset > 0 && offset <= length);
+ assert(IsValidIndex(head) && offset >= entry_start_offset(head) + 1);
+ return (offset == length) ? Position{tail_, 0} : FindTailSlow(head, offset);
+}
+
+// Now that CordRepRing is defined, we can define CordRep's helper casts:
+inline CordRepRing* CordRep::ring() {
assert(IsRing());
- return static_cast<CordRepRing*>(this);
-}
-
-inline const CordRepRing* CordRep::ring() const {
+ return static_cast<CordRepRing*>(this);
+}
+
+inline const CordRepRing* CordRep::ring() const {
assert(IsRing());
- return static_cast<const CordRepRing*>(this);
-}
-
+ return static_cast<const CordRepRing*>(this);
+}
+
inline bool CordRepRing::IsFlat(absl::string_view* fragment) const {
if (entries() == 1) {
if (fragment) *fragment = entry_data(head());
@@ -598,10 +598,10 @@ inline bool CordRepRing::IsFlat(size_t offset, size_t len,
return false;
}
-std::ostream& operator<<(std::ostream& s, const CordRepRing& rep);
-
-} // namespace cord_internal
-ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_STRINGS_INTERNAL_CORD_REP_RING_H_
+std::ostream& operator<<(std::ostream& s, const CordRepRing& rep);
+
+} // namespace cord_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_STRINGS_INTERNAL_CORD_REP_RING_H_
diff --git a/contrib/restricted/abseil-cpp/absl/strings/internal/cord_rep_ring_reader.h b/contrib/restricted/abseil-cpp/absl/strings/internal/cord_rep_ring_reader.h
index 7ceeaa000e..9578154b12 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/internal/cord_rep_ring_reader.h
+++ b/contrib/restricted/abseil-cpp/absl/strings/internal/cord_rep_ring_reader.h
@@ -1,118 +1,118 @@
-// Copyright 2021 The Abseil Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef ABSL_STRINGS_INTERNAL_CORD_REP_RING_READER_H_
-#define ABSL_STRINGS_INTERNAL_CORD_REP_RING_READER_H_
-
-#include <cassert>
-#include <cstddef>
-#include <cstdint>
-
-#include "absl/strings/internal/cord_internal.h"
-#include "absl/strings/internal/cord_rep_ring.h"
-#include "absl/strings/string_view.h"
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-namespace cord_internal {
-
-// CordRepRingReader provides basic navigation over CordRepRing data.
-class CordRepRingReader {
- public:
- // Returns true if this instance is not empty.
- explicit operator bool() const { return ring_ != nullptr; }
-
- // Returns the ring buffer reference for this instance, or nullptr if empty.
- CordRepRing* ring() const { return ring_; }
-
- // Returns the current node index inside the ring buffer for this instance.
- // The returned value is undefined if this instance is empty.
- CordRepRing::index_type index() const { return index_; }
-
+// Copyright 2021 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_STRINGS_INTERNAL_CORD_REP_RING_READER_H_
+#define ABSL_STRINGS_INTERNAL_CORD_REP_RING_READER_H_
+
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+
+#include "absl/strings/internal/cord_internal.h"
+#include "absl/strings/internal/cord_rep_ring.h"
+#include "absl/strings/string_view.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+
+// CordRepRingReader provides basic navigation over CordRepRing data.
+class CordRepRingReader {
+ public:
+ // Returns true if this instance is not empty.
+ explicit operator bool() const { return ring_ != nullptr; }
+
+ // Returns the ring buffer reference for this instance, or nullptr if empty.
+ CordRepRing* ring() const { return ring_; }
+
+ // Returns the current node index inside the ring buffer for this instance.
+ // The returned value is undefined if this instance is empty.
+ CordRepRing::index_type index() const { return index_; }
+
// Returns the current node inside the ring buffer for this instance.
// The returned value is undefined if this instance is empty.
CordRep* node() const { return ring_->entry_child(index_); }
- // Returns the length of the referenced ring buffer.
- // Requires the current instance to be non empty.
- size_t length() const {
- assert(ring_);
- return ring_->length;
- }
-
- // Returns the end offset of the last navigated-to chunk, which represents the
- // total bytes 'consumed' relative to the start of the ring. The returned
- // value is never zero. For example, initializing a reader with a ring buffer
- // with a first chunk of 19 bytes will return consumed() = 19.
- // Requires the current instance to be non empty.
- size_t consumed() const {
- assert(ring_);
- return ring_->entry_end_offset(index_);
- }
-
- // Returns the number of bytes remaining beyond the last navigated-to chunk.
- // Requires the current instance to be non empty.
- size_t remaining() const {
- assert(ring_);
- return length() - consumed();
- }
-
- // Resets this instance to an empty value
- void Reset() { ring_ = nullptr; }
-
- // Resets this instance to the start of `ring`. `ring` must not be null.
- // Returns a reference into the first chunk of the provided ring.
- absl::string_view Reset(CordRepRing* ring) {
- assert(ring);
- ring_ = ring;
- index_ = ring_->head();
- return ring_->entry_data(index_);
- }
-
- // Navigates to the next chunk inside the reference ring buffer.
- // Returns a reference into the navigated-to chunk.
- // Requires remaining() to be non zero.
- absl::string_view Next() {
- assert(remaining());
- index_ = ring_->advance(index_);
- return ring_->entry_data(index_);
- }
-
- // Navigates to the chunk at offset `offset`.
- // Returns a reference into the navigated-to chunk, adjusted for the relative
- // position of `offset` into that chunk. For example, calling Seek(13) on a
- // ring buffer containing 2 chunks of 10 and 20 bytes respectively will return
- // a string view into the second chunk starting at offset 3 with a size of 17.
- // Requires `offset` to be less than `length()`
- absl::string_view Seek(size_t offset) {
- assert(offset < length());
- size_t current = ring_->entry_end_offset(index_);
- CordRepRing::index_type hint = (offset >= current) ? index_ : ring_->head();
- const CordRepRing::Position head = ring_->Find(hint, offset);
- index_ = head.index;
- auto data = ring_->entry_data(head.index);
- data.remove_prefix(head.offset);
- return data;
- }
-
- private:
- CordRepRing* ring_ = nullptr;
- CordRepRing::index_type index_;
-};
-
-} // namespace cord_internal
-ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_STRINGS_INTERNAL_CORD_REP_RING_READER_H_
+ // Returns the length of the referenced ring buffer.
+ // Requires the current instance to be non empty.
+ size_t length() const {
+ assert(ring_);
+ return ring_->length;
+ }
+
+ // Returns the end offset of the last navigated-to chunk, which represents the
+ // total bytes 'consumed' relative to the start of the ring. The returned
+ // value is never zero. For example, initializing a reader with a ring buffer
+ // with a first chunk of 19 bytes will return consumed() = 19.
+ // Requires the current instance to be non empty.
+ size_t consumed() const {
+ assert(ring_);
+ return ring_->entry_end_offset(index_);
+ }
+
+ // Returns the number of bytes remaining beyond the last navigated-to chunk.
+ // Requires the current instance to be non empty.
+ size_t remaining() const {
+ assert(ring_);
+ return length() - consumed();
+ }
+
+ // Resets this instance to an empty value
+ void Reset() { ring_ = nullptr; }
+
+ // Resets this instance to the start of `ring`. `ring` must not be null.
+ // Returns a reference into the first chunk of the provided ring.
+ absl::string_view Reset(CordRepRing* ring) {
+ assert(ring);
+ ring_ = ring;
+ index_ = ring_->head();
+ return ring_->entry_data(index_);
+ }
+
+ // Navigates to the next chunk inside the reference ring buffer.
+ // Returns a reference into the navigated-to chunk.
+ // Requires remaining() to be non zero.
+ absl::string_view Next() {
+ assert(remaining());
+ index_ = ring_->advance(index_);
+ return ring_->entry_data(index_);
+ }
+
+ // Navigates to the chunk at offset `offset`.
+ // Returns a reference into the navigated-to chunk, adjusted for the relative
+ // position of `offset` into that chunk. For example, calling Seek(13) on a
+ // ring buffer containing 2 chunks of 10 and 20 bytes respectively will return
+ // a string view into the second chunk starting at offset 3 with a size of 17.
+ // Requires `offset` to be less than `length()`
+ absl::string_view Seek(size_t offset) {
+ assert(offset < length());
+ size_t current = ring_->entry_end_offset(index_);
+ CordRepRing::index_type hint = (offset >= current) ? index_ : ring_->head();
+ const CordRepRing::Position head = ring_->Find(hint, offset);
+ index_ = head.index;
+ auto data = ring_->entry_data(head.index);
+ data.remove_prefix(head.offset);
+ return data;
+ }
+
+ private:
+ CordRepRing* ring_ = nullptr;
+ CordRepRing::index_type index_;
+};
+
+} // namespace cord_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_STRINGS_INTERNAL_CORD_REP_RING_READER_H_
diff --git a/contrib/restricted/abseil-cpp/absl/strings/internal/cordz_handle/ya.make b/contrib/restricted/abseil-cpp/absl/strings/internal/cordz_handle/ya.make
index 47e1c6006c..b18d14d614 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/internal/cordz_handle/ya.make
+++ b/contrib/restricted/abseil-cpp/absl/strings/internal/cordz_handle/ya.make
@@ -1,50 +1,50 @@
-# Generated by devtools/yamaker.
-
-LIBRARY()
-
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
WITHOUT_LICENSE_TEXTS()
-OWNER(g:cpp-contrib)
-
-LICENSE(Apache-2.0)
-
-PEERDIR(
- contrib/restricted/abseil-cpp/absl/base
+OWNER(g:cpp-contrib)
+
+LICENSE(Apache-2.0)
+
+PEERDIR(
+ contrib/restricted/abseil-cpp/absl/base
contrib/restricted/abseil-cpp/absl/base/internal/low_level_alloc
- contrib/restricted/abseil-cpp/absl/base/internal/raw_logging
- contrib/restricted/abseil-cpp/absl/base/internal/spinlock_wait
- contrib/restricted/abseil-cpp/absl/base/internal/throw_delegate
- contrib/restricted/abseil-cpp/absl/base/log_severity
+ contrib/restricted/abseil-cpp/absl/base/internal/raw_logging
+ contrib/restricted/abseil-cpp/absl/base/internal/spinlock_wait
+ contrib/restricted/abseil-cpp/absl/base/internal/throw_delegate
+ contrib/restricted/abseil-cpp/absl/base/log_severity
contrib/restricted/abseil-cpp/absl/debugging
contrib/restricted/abseil-cpp/absl/debugging/stacktrace
contrib/restricted/abseil-cpp/absl/debugging/symbolize
contrib/restricted/abseil-cpp/absl/demangle
- contrib/restricted/abseil-cpp/absl/numeric
- contrib/restricted/abseil-cpp/absl/strings
+ contrib/restricted/abseil-cpp/absl/numeric
+ contrib/restricted/abseil-cpp/absl/strings
contrib/restricted/abseil-cpp/absl/strings/internal/absl_strings_internal
contrib/restricted/abseil-cpp/absl/synchronization
contrib/restricted/abseil-cpp/absl/synchronization/internal
contrib/restricted/abseil-cpp/absl/time
contrib/restricted/abseil-cpp/absl/time/civil_time
contrib/restricted/abseil-cpp/absl/time/time_zone
-)
-
-ADDINCL(
- GLOBAL contrib/restricted/abseil-cpp
-)
-
-NO_COMPILER_WARNINGS()
-
-NO_UTIL()
-
-CFLAGS(
- -DNOMINMAX
-)
-
+)
+
+ADDINCL(
+ GLOBAL contrib/restricted/abseil-cpp
+)
+
+NO_COMPILER_WARNINGS()
+
+NO_UTIL()
+
+CFLAGS(
+ -DNOMINMAX
+)
+
SRCDIR(contrib/restricted/abseil-cpp/absl/strings/internal)
-
-SRCS(
+
+SRCS(
cordz_handle.cc
-)
-
-END()
+)
+
+END()
diff --git a/contrib/restricted/abseil-cpp/absl/strings/internal/cordz_info/ya.make b/contrib/restricted/abseil-cpp/absl/strings/internal/cordz_info/ya.make
index 56243dd401..e571f5dcf6 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/internal/cordz_info/ya.make
+++ b/contrib/restricted/abseil-cpp/absl/strings/internal/cordz_info/ya.make
@@ -1,27 +1,27 @@
-# Generated by devtools/yamaker.
-
-LIBRARY()
-
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
WITHOUT_LICENSE_TEXTS()
-OWNER(g:cpp-contrib)
-
-LICENSE(Apache-2.0)
-
-PEERDIR(
- contrib/restricted/abseil-cpp/absl/base
+OWNER(g:cpp-contrib)
+
+LICENSE(Apache-2.0)
+
+PEERDIR(
+ contrib/restricted/abseil-cpp/absl/base
contrib/restricted/abseil-cpp/absl/base/internal/low_level_alloc
- contrib/restricted/abseil-cpp/absl/base/internal/raw_logging
- contrib/restricted/abseil-cpp/absl/base/internal/spinlock_wait
- contrib/restricted/abseil-cpp/absl/base/internal/throw_delegate
- contrib/restricted/abseil-cpp/absl/base/log_severity
+ contrib/restricted/abseil-cpp/absl/base/internal/raw_logging
+ contrib/restricted/abseil-cpp/absl/base/internal/spinlock_wait
+ contrib/restricted/abseil-cpp/absl/base/internal/throw_delegate
+ contrib/restricted/abseil-cpp/absl/base/log_severity
contrib/restricted/abseil-cpp/absl/debugging
contrib/restricted/abseil-cpp/absl/debugging/stacktrace
contrib/restricted/abseil-cpp/absl/debugging/symbolize
contrib/restricted/abseil-cpp/absl/demangle
- contrib/restricted/abseil-cpp/absl/numeric
+ contrib/restricted/abseil-cpp/absl/numeric
contrib/restricted/abseil-cpp/absl/profiling/internal/exponential_biased
- contrib/restricted/abseil-cpp/absl/strings
+ contrib/restricted/abseil-cpp/absl/strings
contrib/restricted/abseil-cpp/absl/strings/internal/absl_cord_internal
contrib/restricted/abseil-cpp/absl/strings/internal/absl_strings_internal
contrib/restricted/abseil-cpp/absl/strings/internal/cordz_functions
@@ -31,24 +31,24 @@ PEERDIR(
contrib/restricted/abseil-cpp/absl/time
contrib/restricted/abseil-cpp/absl/time/civil_time
contrib/restricted/abseil-cpp/absl/time/time_zone
-)
-
-ADDINCL(
- GLOBAL contrib/restricted/abseil-cpp
-)
-
-NO_COMPILER_WARNINGS()
-
-NO_UTIL()
-
-CFLAGS(
- -DNOMINMAX
-)
-
+)
+
+ADDINCL(
+ GLOBAL contrib/restricted/abseil-cpp
+)
+
+NO_COMPILER_WARNINGS()
+
+NO_UTIL()
+
+CFLAGS(
+ -DNOMINMAX
+)
+
SRCDIR(contrib/restricted/abseil-cpp/absl/strings/internal)
-
-SRCS(
+
+SRCS(
cordz_info.cc
-)
-
-END()
+)
+
+END()
diff --git a/contrib/restricted/abseil-cpp/absl/strings/internal/cordz_sample_token/ya.make b/contrib/restricted/abseil-cpp/absl/strings/internal/cordz_sample_token/ya.make
index 1ad95a8a05..3765399311 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/internal/cordz_sample_token/ya.make
+++ b/contrib/restricted/abseil-cpp/absl/strings/internal/cordz_sample_token/ya.make
@@ -1,27 +1,27 @@
-# Generated by devtools/yamaker.
-
-LIBRARY()
-
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
WITHOUT_LICENSE_TEXTS()
-OWNER(g:cpp-contrib)
-
-LICENSE(Apache-2.0)
-
-PEERDIR(
- contrib/restricted/abseil-cpp/absl/base
+OWNER(g:cpp-contrib)
+
+LICENSE(Apache-2.0)
+
+PEERDIR(
+ contrib/restricted/abseil-cpp/absl/base
contrib/restricted/abseil-cpp/absl/base/internal/low_level_alloc
- contrib/restricted/abseil-cpp/absl/base/internal/raw_logging
- contrib/restricted/abseil-cpp/absl/base/internal/spinlock_wait
- contrib/restricted/abseil-cpp/absl/base/internal/throw_delegate
- contrib/restricted/abseil-cpp/absl/base/log_severity
+ contrib/restricted/abseil-cpp/absl/base/internal/raw_logging
+ contrib/restricted/abseil-cpp/absl/base/internal/spinlock_wait
+ contrib/restricted/abseil-cpp/absl/base/internal/throw_delegate
+ contrib/restricted/abseil-cpp/absl/base/log_severity
contrib/restricted/abseil-cpp/absl/debugging
contrib/restricted/abseil-cpp/absl/debugging/stacktrace
contrib/restricted/abseil-cpp/absl/debugging/symbolize
contrib/restricted/abseil-cpp/absl/demangle
- contrib/restricted/abseil-cpp/absl/numeric
+ contrib/restricted/abseil-cpp/absl/numeric
contrib/restricted/abseil-cpp/absl/profiling/internal/exponential_biased
- contrib/restricted/abseil-cpp/absl/strings
+ contrib/restricted/abseil-cpp/absl/strings
contrib/restricted/abseil-cpp/absl/strings/internal/absl_cord_internal
contrib/restricted/abseil-cpp/absl/strings/internal/absl_strings_internal
contrib/restricted/abseil-cpp/absl/strings/internal/cordz_functions
@@ -32,24 +32,24 @@ PEERDIR(
contrib/restricted/abseil-cpp/absl/time
contrib/restricted/abseil-cpp/absl/time/civil_time
contrib/restricted/abseil-cpp/absl/time/time_zone
-)
-
-ADDINCL(
- GLOBAL contrib/restricted/abseil-cpp
-)
-
-NO_COMPILER_WARNINGS()
-
-NO_UTIL()
-
-CFLAGS(
- -DNOMINMAX
-)
-
+)
+
+ADDINCL(
+ GLOBAL contrib/restricted/abseil-cpp
+)
+
+NO_COMPILER_WARNINGS()
+
+NO_UTIL()
+
+CFLAGS(
+ -DNOMINMAX
+)
+
SRCDIR(contrib/restricted/abseil-cpp/absl/strings/internal)
-
-SRCS(
+
+SRCS(
cordz_sample_token.cc
-)
-
-END()
+)
+
+END()
diff --git a/contrib/restricted/abseil-cpp/absl/strings/internal/str_format/bind.cc b/contrib/restricted/abseil-cpp/absl/strings/internal/str_format/bind.cc
index c988ba8fd2..9f5e8aae3d 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/internal/str_format/bind.cc
+++ b/contrib/restricted/abseil-cpp/absl/strings/internal/str_format/bind.cc
@@ -234,7 +234,7 @@ int FprintF(std::FILE* output, const UntypedFormatSpecImpl format,
errno = sink.error();
return -1;
}
- if (sink.count() > static_cast<size_t>(std::numeric_limits<int>::max())) {
+ if (sink.count() > static_cast<size_t>(std::numeric_limits<int>::max())) {
errno = EFBIG;
return -1;
}
diff --git a/contrib/restricted/abseil-cpp/absl/strings/internal/str_format/bind.h b/contrib/restricted/abseil-cpp/absl/strings/internal/str_format/bind.h
index b26cff6648..bbca0fdcfb 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/internal/str_format/bind.h
+++ b/contrib/restricted/abseil-cpp/absl/strings/internal/str_format/bind.h
@@ -133,11 +133,11 @@ class FormatSpecTemplate
#endif // ABSL_INTERNAL_ENABLE_FORMAT_CHECKER
- template <
- FormatConversionCharSet... C,
- typename = typename std::enable_if<sizeof...(C) == sizeof...(Args)>::type,
- typename = typename std::enable_if<AllOf(Contains(Args,
- C)...)>::type>
+ template <
+ FormatConversionCharSet... C,
+ typename = typename std::enable_if<sizeof...(C) == sizeof...(Args)>::type,
+ typename = typename std::enable_if<AllOf(Contains(Args,
+ C)...)>::type>
FormatSpecTemplate(const ExtendedParsedFormat<C...>& pc) // NOLINT
: Base(&pc) {}
};
diff --git a/contrib/restricted/abseil-cpp/absl/strings/internal/str_format/float_conversion.cc b/contrib/restricted/abseil-cpp/absl/strings/internal/str_format/float_conversion.cc
index b1c4068475..73381fb0a4 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/internal/str_format/float_conversion.cc
+++ b/contrib/restricted/abseil-cpp/absl/strings/internal/str_format/float_conversion.cc
@@ -27,9 +27,9 @@
#include "absl/base/optimization.h"
#include "absl/functional/function_ref.h"
#include "absl/meta/type_traits.h"
-#include "absl/numeric/bits.h"
+#include "absl/numeric/bits.h"
#include "absl/numeric/int128.h"
-#include "absl/numeric/internal/representation.h"
+#include "absl/numeric/internal/representation.h"
#include "absl/strings/numbers.h"
#include "absl/types/optional.h"
#include "absl/types/span.h"
@@ -40,8 +40,8 @@ namespace str_format_internal {
namespace {
-using ::absl::numeric_internal::IsDoubleDouble;
-
+using ::absl::numeric_internal::IsDoubleDouble;
+
// The code below wants to avoid heap allocations.
// To do so it needs to allocate memory on the stack.
// `StackArray` will allocate memory on the stack in the form of a uint32_t
@@ -115,15 +115,15 @@ inline uint64_t DivideBy10WithCarry(uint64_t *v, uint64_t carry) {
return next_carry % divisor;
}
-using MaxFloatType =
- typename std::conditional<IsDoubleDouble(), double, long double>::type;
-
+using MaxFloatType =
+ typename std::conditional<IsDoubleDouble(), double, long double>::type;
+
// Generates the decimal representation for an integer of the form `v * 2^exp`,
// where `v` and `exp` are both positive integers.
// It generates the digits from the left (ie the most significant digit first)
// to allow for direct printing into the sink.
//
-// Requires `0 <= exp` and `exp <= numeric_limits<MaxFloatType>::max_exponent`.
+// Requires `0 <= exp` and `exp <= numeric_limits<MaxFloatType>::max_exponent`.
class BinaryToDecimal {
static constexpr int ChunksNeeded(int exp) {
// We will left shift a uint128 by `exp` bits, so we need `128+exp` total
@@ -138,10 +138,10 @@ class BinaryToDecimal {
static void RunConversion(uint128 v, int exp,
absl::FunctionRef<void(BinaryToDecimal)> f) {
assert(exp > 0);
- assert(exp <= std::numeric_limits<MaxFloatType>::max_exponent);
+ assert(exp <= std::numeric_limits<MaxFloatType>::max_exponent);
static_assert(
- static_cast<int>(StackArray::kMaxCapacity) >=
- ChunksNeeded(std::numeric_limits<MaxFloatType>::max_exponent),
+ static_cast<int>(StackArray::kMaxCapacity) >=
+ ChunksNeeded(std::numeric_limits<MaxFloatType>::max_exponent),
"");
StackArray::RunWithCapacity(
@@ -238,14 +238,14 @@ class BinaryToDecimal {
// Converts a value of the form `x * 2^-exp` into a sequence of decimal digits.
// Requires `-exp < 0` and
-// `-exp >= limits<MaxFloatType>::min_exponent - limits<MaxFloatType>::digits`.
+// `-exp >= limits<MaxFloatType>::min_exponent - limits<MaxFloatType>::digits`.
class FractionalDigitGenerator {
public:
// Run the conversion for `v * 2^exp` and call `f(generator)`.
// This function will allocate enough stack space to perform the conversion.
static void RunConversion(
uint128 v, int exp, absl::FunctionRef<void(FractionalDigitGenerator)> f) {
- using Limits = std::numeric_limits<MaxFloatType>;
+ using Limits = std::numeric_limits<MaxFloatType>;
assert(-exp < 0);
assert(-exp >= Limits::min_exponent - 128);
static_assert(StackArray::kMaxCapacity >=
@@ -321,11 +321,11 @@ class FractionalDigitGenerator {
};
// Count the number of leading zero bits.
-int LeadingZeros(uint64_t v) { return countl_zero(v); }
+int LeadingZeros(uint64_t v) { return countl_zero(v); }
int LeadingZeros(uint128 v) {
auto high = static_cast<uint64_t>(v >> 64);
auto low = static_cast<uint64_t>(v);
- return high != 0 ? countl_zero(high) : 64 + countl_zero(low);
+ return high != 0 ? countl_zero(high) : 64 + countl_zero(low);
}
// Round up the text digits starting at `p`.
@@ -877,10 +877,10 @@ void FormatA(const HexFloatTypeParams float_traits, Int mantissa, int exp,
// This buffer holds the "0x1.ab1de3" portion of "0x1.ab1de3pe+2". Compute the
// size with long double which is the largest of the floats.
constexpr size_t kBufSizeForHexFloatRepr =
- 2 // 0x
- + std::numeric_limits<MaxFloatType>::digits / 4 // number of hex digits
- + 1 // round up
- + 1; // "." (dot)
+ 2 // 0x
+ + std::numeric_limits<MaxFloatType>::digits / 4 // number of hex digits
+ + 1 // round up
+ + 1; // "." (dot)
char digits_buffer[kBufSizeForHexFloatRepr];
char *digits_iter = digits_buffer;
const char *const digits =
@@ -1399,9 +1399,9 @@ bool FloatToSink(const Float v, const FormatConversionSpecImpl &conv,
bool ConvertFloatImpl(long double v, const FormatConversionSpecImpl &conv,
FormatSinkImpl *sink) {
- if (IsDoubleDouble()) {
- // This is the `double-double` representation of `long double`. We do not
- // handle it natively. Fallback to snprintf.
+ if (IsDoubleDouble()) {
+ // This is the `double-double` representation of `long double`. We do not
+ // handle it natively. Fallback to snprintf.
return FallbackToSnprintf(v, conv, sink);
}
diff --git a/contrib/restricted/abseil-cpp/absl/strings/internal/str_format/ya.make b/contrib/restricted/abseil-cpp/absl/strings/internal/str_format/ya.make
index 1f07f02b2e..ae934819e5 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/internal/str_format/ya.make
+++ b/contrib/restricted/abseil-cpp/absl/strings/internal/str_format/ya.make
@@ -1,43 +1,43 @@
-# Generated by devtools/yamaker.
-
-LIBRARY()
-
-OWNER(g:cpp-contrib)
-
-LICENSE(Apache-2.0)
-
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
+OWNER(g:cpp-contrib)
+
+LICENSE(Apache-2.0)
+
LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
-PEERDIR(
- contrib/restricted/abseil-cpp/absl/base
- contrib/restricted/abseil-cpp/absl/base/internal/raw_logging
- contrib/restricted/abseil-cpp/absl/base/internal/spinlock_wait
- contrib/restricted/abseil-cpp/absl/base/internal/throw_delegate
- contrib/restricted/abseil-cpp/absl/base/log_severity
- contrib/restricted/abseil-cpp/absl/numeric
- contrib/restricted/abseil-cpp/absl/strings
+PEERDIR(
+ contrib/restricted/abseil-cpp/absl/base
+ contrib/restricted/abseil-cpp/absl/base/internal/raw_logging
+ contrib/restricted/abseil-cpp/absl/base/internal/spinlock_wait
+ contrib/restricted/abseil-cpp/absl/base/internal/throw_delegate
+ contrib/restricted/abseil-cpp/absl/base/log_severity
+ contrib/restricted/abseil-cpp/absl/numeric
+ contrib/restricted/abseil-cpp/absl/strings
contrib/restricted/abseil-cpp/absl/strings/internal/absl_strings_internal
-)
-
-ADDINCL(
- GLOBAL contrib/restricted/abseil-cpp
-)
-
-NO_COMPILER_WARNINGS()
-
-NO_UTIL()
-
-CFLAGS(
- -DNOMINMAX
-)
-
-SRCS(
+)
+
+ADDINCL(
+ GLOBAL contrib/restricted/abseil-cpp
+)
+
+NO_COMPILER_WARNINGS()
+
+NO_UTIL()
+
+CFLAGS(
+ -DNOMINMAX
+)
+
+SRCS(
arg.cc
bind.cc
extension.cc
float_conversion.cc
output.cc
parser.cc
-)
-
-END()
+)
+
+END()
diff --git a/contrib/restricted/abseil-cpp/absl/strings/internal/str_split_internal.h b/contrib/restricted/abseil-cpp/absl/strings/internal/str_split_internal.h
index e766421617..bc503bc9a9 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/internal/str_split_internal.h
+++ b/contrib/restricted/abseil-cpp/absl/strings/internal/str_split_internal.h
@@ -51,9 +51,9 @@ ABSL_NAMESPACE_BEGIN
namespace strings_internal {
// This class is implicitly constructible from everything that absl::string_view
-// is implicitly constructible from, except for rvalue strings. This means it
-// can be used as a function parameter in places where passing a temporary
-// string might cause memory lifetime issues.
+// is implicitly constructible from, except for rvalue strings. This means it
+// can be used as a function parameter in places where passing a temporary
+// string might cause memory lifetime issues.
class ConvertibleToStringView {
public:
ConvertibleToStringView(const char* s) // NOLINT(runtime/explicit)
@@ -65,8 +65,8 @@ class ConvertibleToStringView {
: value_(s) {}
// Disable conversion from rvalue strings.
- ConvertibleToStringView(std::string&& s) = delete;
- ConvertibleToStringView(const std::string&& s) = delete;
+ ConvertibleToStringView(std::string&& s) = delete;
+ ConvertibleToStringView(const std::string&& s) = delete;
absl::string_view value() const { return value_; }
@@ -251,11 +251,11 @@ struct SplitterIsConvertibleTo
// the split strings: only strings for which the predicate returns true will be
// kept. A Predicate object is any unary functor that takes an absl::string_view
// and returns bool.
-//
-// The StringType parameter can be either string_view or string, depending on
-// whether the Splitter refers to a string stored elsewhere, or if the string
-// resides inside the Splitter itself.
-template <typename Delimiter, typename Predicate, typename StringType>
+//
+// The StringType parameter can be either string_view or string, depending on
+// whether the Splitter refers to a string stored elsewhere, or if the string
+// resides inside the Splitter itself.
+template <typename Delimiter, typename Predicate, typename StringType>
class Splitter {
public:
using DelimiterType = Delimiter;
@@ -263,12 +263,12 @@ class Splitter {
using const_iterator = strings_internal::SplitIterator<Splitter>;
using value_type = typename std::iterator_traits<const_iterator>::value_type;
- Splitter(StringType input_text, Delimiter d, Predicate p)
+ Splitter(StringType input_text, Delimiter d, Predicate p)
: text_(std::move(input_text)),
delimiter_(std::move(d)),
predicate_(std::move(p)) {}
- absl::string_view text() const { return text_; }
+ absl::string_view text() const { return text_; }
const Delimiter& delimiter() const { return delimiter_; }
const Predicate& predicate() const { return predicate_; }
@@ -418,7 +418,7 @@ class Splitter {
static iterator ToIter(iterator iter) { return iter; }
};
- StringType text_;
+ StringType text_;
Delimiter delimiter_;
Predicate predicate_;
};
diff --git a/contrib/restricted/abseil-cpp/absl/strings/internal/string_constant.h b/contrib/restricted/abseil-cpp/absl/strings/internal/string_constant.h
index a11336b7f0..ade2a38ff5 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/internal/string_constant.h
+++ b/contrib/restricted/abseil-cpp/absl/strings/internal/string_constant.h
@@ -1,64 +1,64 @@
-// Copyright 2020 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef ABSL_STRINGS_INTERNAL_STRING_CONSTANT_H_
-#define ABSL_STRINGS_INTERNAL_STRING_CONSTANT_H_
-
-#include "absl/meta/type_traits.h"
-#include "absl/strings/string_view.h"
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-namespace strings_internal {
-
-// StringConstant<T> represents a compile time string constant.
-// It can be accessed via its `absl::string_view value` static member.
-// It is guaranteed that the `string_view` returned has constant `.data()`,
-// constant `.size()` and constant `value[i]` for all `0 <= i < .size()`
-//
-// The `T` is an opaque type. It is guaranteed that different string constants
-// will have different values of `T`. This allows users to associate the string
-// constant with other static state at compile time.
-//
-// Instances should be made using the `MakeStringConstant()` factory function
-// below.
-template <typename T>
-struct StringConstant {
- static constexpr absl::string_view value = T{}();
- constexpr absl::string_view operator()() const { return value; }
-
- // Check to be sure `view` points to constant data.
- // Otherwise, it can't be constant evaluated.
- static_assert(value.empty() || 2 * value[0] != 1,
- "The input string_view must point to constant data.");
-};
-
-template <typename T>
-constexpr absl::string_view StringConstant<T>::value; // NOLINT
-
-// Factory function for `StringConstant` instances.
-// It supports callables that have a constexpr default constructor and a
-// constexpr operator().
-// It must return an `absl::string_view` or `const char*` pointing to constant
-// data. This is validated at compile time.
-template <typename T>
-constexpr StringConstant<T> MakeStringConstant(T) {
- return {};
-}
-
-} // namespace strings_internal
-ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_STRINGS_INTERNAL_STRING_CONSTANT_H_
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_STRINGS_INTERNAL_STRING_CONSTANT_H_
+#define ABSL_STRINGS_INTERNAL_STRING_CONSTANT_H_
+
+#include "absl/meta/type_traits.h"
+#include "absl/strings/string_view.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace strings_internal {
+
+// StringConstant<T> represents a compile time string constant.
+// It can be accessed via its `absl::string_view value` static member.
+// It is guaranteed that the `string_view` returned has constant `.data()`,
+// constant `.size()` and constant `value[i]` for all `0 <= i < .size()`
+//
+// The `T` is an opaque type. It is guaranteed that different string constants
+// will have different values of `T`. This allows users to associate the string
+// constant with other static state at compile time.
+//
+// Instances should be made using the `MakeStringConstant()` factory function
+// below.
+template <typename T>
+struct StringConstant {
+ static constexpr absl::string_view value = T{}();
+ constexpr absl::string_view operator()() const { return value; }
+
+ // Check to be sure `view` points to constant data.
+ // Otherwise, it can't be constant evaluated.
+ static_assert(value.empty() || 2 * value[0] != 1,
+ "The input string_view must point to constant data.");
+};
+
+template <typename T>
+constexpr absl::string_view StringConstant<T>::value; // NOLINT
+
+// Factory function for `StringConstant` instances.
+// It supports callables that have a constexpr default constructor and a
+// constexpr operator().
+// It must return an `absl::string_view` or `const char*` pointing to constant
+// data. This is validated at compile time.
+template <typename T>
+constexpr StringConstant<T> MakeStringConstant(T) {
+ return {};
+}
+
+} // namespace strings_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_STRINGS_INTERNAL_STRING_CONSTANT_H_
diff --git a/contrib/restricted/abseil-cpp/absl/strings/match.cc b/contrib/restricted/abseil-cpp/absl/strings/match.cc
index 2d67250970..50f1fba32b 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/match.cc
+++ b/contrib/restricted/abseil-cpp/absl/strings/match.cc
@@ -19,22 +19,22 @@
namespace absl {
ABSL_NAMESPACE_BEGIN
-bool EqualsIgnoreCase(absl::string_view piece1,
- absl::string_view piece2) noexcept {
+bool EqualsIgnoreCase(absl::string_view piece1,
+ absl::string_view piece2) noexcept {
return (piece1.size() == piece2.size() &&
0 == absl::strings_internal::memcasecmp(piece1.data(), piece2.data(),
piece1.size()));
// memcasecmp uses absl::ascii_tolower().
}
-bool StartsWithIgnoreCase(absl::string_view text,
- absl::string_view prefix) noexcept {
+bool StartsWithIgnoreCase(absl::string_view text,
+ absl::string_view prefix) noexcept {
return (text.size() >= prefix.size()) &&
EqualsIgnoreCase(text.substr(0, prefix.size()), prefix);
}
-bool EndsWithIgnoreCase(absl::string_view text,
- absl::string_view suffix) noexcept {
+bool EndsWithIgnoreCase(absl::string_view text,
+ absl::string_view suffix) noexcept {
return (text.size() >= suffix.size()) &&
EqualsIgnoreCase(text.substr(text.size() - suffix.size()), suffix);
}
diff --git a/contrib/restricted/abseil-cpp/absl/strings/match.h b/contrib/restricted/abseil-cpp/absl/strings/match.h
index 038cbb3fa8..c79ed7edd4 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/match.h
+++ b/contrib/restricted/abseil-cpp/absl/strings/match.h
@@ -43,20 +43,20 @@ ABSL_NAMESPACE_BEGIN
// StrContains()
//
// Returns whether a given string `haystack` contains the substring `needle`.
-inline bool StrContains(absl::string_view haystack,
- absl::string_view needle) noexcept {
+inline bool StrContains(absl::string_view haystack,
+ absl::string_view needle) noexcept {
return haystack.find(needle, 0) != haystack.npos;
}
-inline bool StrContains(absl::string_view haystack, char needle) noexcept {
- return haystack.find(needle) != haystack.npos;
-}
-
+inline bool StrContains(absl::string_view haystack, char needle) noexcept {
+ return haystack.find(needle) != haystack.npos;
+}
+
// StartsWith()
//
// Returns whether a given string `text` begins with `prefix`.
-inline bool StartsWith(absl::string_view text,
- absl::string_view prefix) noexcept {
+inline bool StartsWith(absl::string_view text,
+ absl::string_view prefix) noexcept {
return prefix.empty() ||
(text.size() >= prefix.size() &&
memcmp(text.data(), prefix.data(), prefix.size()) == 0);
@@ -65,8 +65,8 @@ inline bool StartsWith(absl::string_view text,
// EndsWith()
//
// Returns whether a given string `text` ends with `suffix`.
-inline bool EndsWith(absl::string_view text,
- absl::string_view suffix) noexcept {
+inline bool EndsWith(absl::string_view text,
+ absl::string_view suffix) noexcept {
return suffix.empty() ||
(text.size() >= suffix.size() &&
memcmp(text.data() + (text.size() - suffix.size()), suffix.data(),
@@ -77,22 +77,22 @@ inline bool EndsWith(absl::string_view text,
//
// Returns whether given ASCII strings `piece1` and `piece2` are equal, ignoring
// case in the comparison.
-bool EqualsIgnoreCase(absl::string_view piece1,
- absl::string_view piece2) noexcept;
+bool EqualsIgnoreCase(absl::string_view piece1,
+ absl::string_view piece2) noexcept;
// StartsWithIgnoreCase()
//
// Returns whether a given ASCII string `text` starts with `prefix`,
// ignoring case in the comparison.
-bool StartsWithIgnoreCase(absl::string_view text,
- absl::string_view prefix) noexcept;
+bool StartsWithIgnoreCase(absl::string_view text,
+ absl::string_view prefix) noexcept;
// EndsWithIgnoreCase()
//
// Returns whether a given ASCII string `text` ends with `suffix`, ignoring
// case in the comparison.
-bool EndsWithIgnoreCase(absl::string_view text,
- absl::string_view suffix) noexcept;
+bool EndsWithIgnoreCase(absl::string_view text,
+ absl::string_view suffix) noexcept;
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/contrib/restricted/abseil-cpp/absl/strings/numbers.cc b/contrib/restricted/abseil-cpp/absl/strings/numbers.cc
index cbd84c918b..24f6bcfcb8 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/numbers.cc
+++ b/contrib/restricted/abseil-cpp/absl/strings/numbers.cc
@@ -32,7 +32,7 @@
#include "absl/base/attributes.h"
#include "absl/base/internal/raw_logging.h"
-#include "absl/numeric/bits.h"
+#include "absl/numeric/bits.h"
#include "absl/strings/ascii.h"
#include "absl/strings/charconv.h"
#include "absl/strings/escaping.h"
@@ -46,13 +46,13 @@ ABSL_NAMESPACE_BEGIN
bool SimpleAtof(absl::string_view str, float* out) {
*out = 0.0;
str = StripAsciiWhitespace(str);
- // std::from_chars doesn't accept an initial +, but SimpleAtof does, so if one
- // is present, skip it, while avoiding accepting "+-0" as valid.
+ // std::from_chars doesn't accept an initial +, but SimpleAtof does, so if one
+ // is present, skip it, while avoiding accepting "+-0" as valid.
if (!str.empty() && str[0] == '+') {
str.remove_prefix(1);
- if (!str.empty() && str[0] == '-') {
- return false;
- }
+ if (!str.empty() && str[0] == '-') {
+ return false;
+ }
}
auto result = absl::from_chars(str.data(), str.data() + str.size(), *out);
if (result.ec == std::errc::invalid_argument) {
@@ -77,13 +77,13 @@ bool SimpleAtof(absl::string_view str, float* out) {
bool SimpleAtod(absl::string_view str, double* out) {
*out = 0.0;
str = StripAsciiWhitespace(str);
- // std::from_chars doesn't accept an initial +, but SimpleAtod does, so if one
- // is present, skip it, while avoiding accepting "+-0" as valid.
+ // std::from_chars doesn't accept an initial +, but SimpleAtod does, so if one
+ // is present, skip it, while avoiding accepting "+-0" as valid.
if (!str.empty() && str[0] == '+') {
str.remove_prefix(1);
- if (!str.empty() && str[0] == '-') {
- return false;
- }
+ if (!str.empty() && str[0] == '-') {
+ return false;
+ }
}
auto result = absl::from_chars(str.data(), str.data() + str.size(), *out);
if (result.ec == std::errc::invalid_argument) {
@@ -313,7 +313,7 @@ static std::pair<uint64_t, uint64_t> Mul32(std::pair<uint64_t, uint64_t> num,
uint64_t bits128_up = (bits96_127 >> 32) + (bits64_127 < bits64_95);
if (bits128_up == 0) return {bits64_127, bits0_63};
- auto shift = static_cast<unsigned>(bit_width(bits128_up));
+ auto shift = static_cast<unsigned>(bit_width(bits128_up));
uint64_t lo = (bits0_63 >> shift) + (bits64_127 << (64 - shift));
uint64_t hi = (bits64_127 >> shift) + (bits128_up << (64 - shift));
return {hi, lo};
@@ -344,7 +344,7 @@ static std::pair<uint64_t, uint64_t> PowFive(uint64_t num, int expfive) {
5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5,
5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5};
result = Mul32(result, powers_of_five[expfive & 15]);
- int shift = countl_zero(result.first);
+ int shift = countl_zero(result.first);
if (shift != 0) {
result.first = (result.first << shift) + (result.second >> (64 - shift));
result.second = (result.second << shift);
@@ -746,18 +746,18 @@ struct LookupTables {
X / 35, X / 36, \
}
-// This kVmaxOverBase is generated with
-// for (int base = 2; base < 37; ++base) {
-// absl::uint128 max = std::numeric_limits<absl::uint128>::max();
-// auto result = max / base;
-// std::cout << " MakeUint128(" << absl::Uint128High64(result) << "u, "
-// << absl::Uint128Low64(result) << "u),\n";
-// }
-// See https://godbolt.org/z/aneYsb
-//
+// This kVmaxOverBase is generated with
+// for (int base = 2; base < 37; ++base) {
+// absl::uint128 max = std::numeric_limits<absl::uint128>::max();
+// auto result = max / base;
+// std::cout << " MakeUint128(" << absl::Uint128High64(result) << "u, "
+// << absl::Uint128Low64(result) << "u),\n";
+// }
+// See https://godbolt.org/z/aneYsb
+//
// uint128& operator/=(uint128) is not constexpr, so hardcode the resulting
// array to avoid a static initializer.
-template<>
+template<>
const uint128 LookupTables<uint128>::kVmaxOverBase[] = {
0,
0,
@@ -798,111 +798,111 @@ const uint128 LookupTables<uint128>::kVmaxOverBase[] = {
MakeUint128(512409557603043100u, 8198552921648689607u),
};
-// This kVmaxOverBase generated with
-// for (int base = 2; base < 37; ++base) {
-// absl::int128 max = std::numeric_limits<absl::int128>::max();
-// auto result = max / base;
-// std::cout << "\tMakeInt128(" << absl::Int128High64(result) << ", "
-// << absl::Int128Low64(result) << "u),\n";
-// }
-// See https://godbolt.org/z/7djYWz
-//
-// int128& operator/=(int128) is not constexpr, so hardcode the resulting array
-// to avoid a static initializer.
-template<>
-const int128 LookupTables<int128>::kVmaxOverBase[] = {
- 0,
- 0,
- MakeInt128(4611686018427387903, 18446744073709551615u),
- MakeInt128(3074457345618258602, 12297829382473034410u),
- MakeInt128(2305843009213693951, 18446744073709551615u),
- MakeInt128(1844674407370955161, 11068046444225730969u),
- MakeInt128(1537228672809129301, 6148914691236517205u),
- MakeInt128(1317624576693539401, 2635249153387078802u),
- MakeInt128(1152921504606846975, 18446744073709551615u),
- MakeInt128(1024819115206086200, 16397105843297379214u),
- MakeInt128(922337203685477580, 14757395258967641292u),
- MakeInt128(838488366986797800, 13415813871788764811u),
- MakeInt128(768614336404564650, 12297829382473034410u),
- MakeInt128(709490156681136600, 11351842506898185609u),
- MakeInt128(658812288346769700, 10540996613548315209u),
- MakeInt128(614891469123651720, 9838263505978427528u),
- MakeInt128(576460752303423487, 18446744073709551615u),
- MakeInt128(542551296285575047, 9765923333140350855u),
- MakeInt128(512409557603043100, 8198552921648689607u),
- MakeInt128(485440633518672410, 17475862806672206794u),
- MakeInt128(461168601842738790, 7378697629483820646u),
- MakeInt128(439208192231179800, 7027331075698876806u),
- MakeInt128(419244183493398900, 6707906935894382405u),
- MakeInt128(401016175515425035, 2406097053092550210u),
- MakeInt128(384307168202282325, 6148914691236517205u),
- MakeInt128(368934881474191032, 5902958103587056517u),
- MakeInt128(354745078340568300, 5675921253449092804u),
- MakeInt128(341606371735362066, 17763531330238827482u),
- MakeInt128(329406144173384850, 5270498306774157604u),
- MakeInt128(318047311615681924, 7633135478776366185u),
- MakeInt128(307445734561825860, 4919131752989213764u),
- MakeInt128(297528130221121800, 4760450083537948804u),
- MakeInt128(288230376151711743, 18446744073709551615u),
- MakeInt128(279496122328932600, 4471937957262921603u),
- MakeInt128(271275648142787523, 14106333703424951235u),
- MakeInt128(263524915338707880, 4216398645419326083u),
- MakeInt128(256204778801521550, 4099276460824344803u),
-};
-
-// This kVminOverBase generated with
-// for (int base = 2; base < 37; ++base) {
-// absl::int128 min = std::numeric_limits<absl::int128>::min();
-// auto result = min / base;
-// std::cout << "\tMakeInt128(" << absl::Int128High64(result) << ", "
-// << absl::Int128Low64(result) << "u),\n";
-// }
-//
-// See https://godbolt.org/z/7djYWz
-//
-// int128& operator/=(int128) is not constexpr, so hardcode the resulting array
-// to avoid a static initializer.
-template<>
-const int128 LookupTables<int128>::kVminOverBase[] = {
- 0,
- 0,
- MakeInt128(-4611686018427387904, 0u),
- MakeInt128(-3074457345618258603, 6148914691236517206u),
- MakeInt128(-2305843009213693952, 0u),
- MakeInt128(-1844674407370955162, 7378697629483820647u),
- MakeInt128(-1537228672809129302, 12297829382473034411u),
- MakeInt128(-1317624576693539402, 15811494920322472814u),
- MakeInt128(-1152921504606846976, 0u),
- MakeInt128(-1024819115206086201, 2049638230412172402u),
- MakeInt128(-922337203685477581, 3689348814741910324u),
- MakeInt128(-838488366986797801, 5030930201920786805u),
- MakeInt128(-768614336404564651, 6148914691236517206u),
- MakeInt128(-709490156681136601, 7094901566811366007u),
- MakeInt128(-658812288346769701, 7905747460161236407u),
- MakeInt128(-614891469123651721, 8608480567731124088u),
- MakeInt128(-576460752303423488, 0u),
- MakeInt128(-542551296285575048, 8680820740569200761u),
- MakeInt128(-512409557603043101, 10248191152060862009u),
- MakeInt128(-485440633518672411, 970881267037344822u),
- MakeInt128(-461168601842738791, 11068046444225730970u),
- MakeInt128(-439208192231179801, 11419412998010674810u),
- MakeInt128(-419244183493398901, 11738837137815169211u),
- MakeInt128(-401016175515425036, 16040647020617001406u),
- MakeInt128(-384307168202282326, 12297829382473034411u),
- MakeInt128(-368934881474191033, 12543785970122495099u),
- MakeInt128(-354745078340568301, 12770822820260458812u),
- MakeInt128(-341606371735362067, 683212743470724134u),
- MakeInt128(-329406144173384851, 13176245766935394012u),
- MakeInt128(-318047311615681925, 10813608594933185431u),
- MakeInt128(-307445734561825861, 13527612320720337852u),
- MakeInt128(-297528130221121801, 13686293990171602812u),
- MakeInt128(-288230376151711744, 0u),
- MakeInt128(-279496122328932601, 13974806116446630013u),
- MakeInt128(-271275648142787524, 4340410370284600381u),
- MakeInt128(-263524915338707881, 14230345428290225533u),
- MakeInt128(-256204778801521551, 14347467612885206813u),
-};
-
+// This kVmaxOverBase generated with
+// for (int base = 2; base < 37; ++base) {
+// absl::int128 max = std::numeric_limits<absl::int128>::max();
+// auto result = max / base;
+// std::cout << "\tMakeInt128(" << absl::Int128High64(result) << ", "
+// << absl::Int128Low64(result) << "u),\n";
+// }
+// See https://godbolt.org/z/7djYWz
+//
+// int128& operator/=(int128) is not constexpr, so hardcode the resulting array
+// to avoid a static initializer.
+template<>
+const int128 LookupTables<int128>::kVmaxOverBase[] = {
+ 0,
+ 0,
+ MakeInt128(4611686018427387903, 18446744073709551615u),
+ MakeInt128(3074457345618258602, 12297829382473034410u),
+ MakeInt128(2305843009213693951, 18446744073709551615u),
+ MakeInt128(1844674407370955161, 11068046444225730969u),
+ MakeInt128(1537228672809129301, 6148914691236517205u),
+ MakeInt128(1317624576693539401, 2635249153387078802u),
+ MakeInt128(1152921504606846975, 18446744073709551615u),
+ MakeInt128(1024819115206086200, 16397105843297379214u),
+ MakeInt128(922337203685477580, 14757395258967641292u),
+ MakeInt128(838488366986797800, 13415813871788764811u),
+ MakeInt128(768614336404564650, 12297829382473034410u),
+ MakeInt128(709490156681136600, 11351842506898185609u),
+ MakeInt128(658812288346769700, 10540996613548315209u),
+ MakeInt128(614891469123651720, 9838263505978427528u),
+ MakeInt128(576460752303423487, 18446744073709551615u),
+ MakeInt128(542551296285575047, 9765923333140350855u),
+ MakeInt128(512409557603043100, 8198552921648689607u),
+ MakeInt128(485440633518672410, 17475862806672206794u),
+ MakeInt128(461168601842738790, 7378697629483820646u),
+ MakeInt128(439208192231179800, 7027331075698876806u),
+ MakeInt128(419244183493398900, 6707906935894382405u),
+ MakeInt128(401016175515425035, 2406097053092550210u),
+ MakeInt128(384307168202282325, 6148914691236517205u),
+ MakeInt128(368934881474191032, 5902958103587056517u),
+ MakeInt128(354745078340568300, 5675921253449092804u),
+ MakeInt128(341606371735362066, 17763531330238827482u),
+ MakeInt128(329406144173384850, 5270498306774157604u),
+ MakeInt128(318047311615681924, 7633135478776366185u),
+ MakeInt128(307445734561825860, 4919131752989213764u),
+ MakeInt128(297528130221121800, 4760450083537948804u),
+ MakeInt128(288230376151711743, 18446744073709551615u),
+ MakeInt128(279496122328932600, 4471937957262921603u),
+ MakeInt128(271275648142787523, 14106333703424951235u),
+ MakeInt128(263524915338707880, 4216398645419326083u),
+ MakeInt128(256204778801521550, 4099276460824344803u),
+};
+
+// This kVminOverBase generated with
+// for (int base = 2; base < 37; ++base) {
+// absl::int128 min = std::numeric_limits<absl::int128>::min();
+// auto result = min / base;
+// std::cout << "\tMakeInt128(" << absl::Int128High64(result) << ", "
+// << absl::Int128Low64(result) << "u),\n";
+// }
+//
+// See https://godbolt.org/z/7djYWz
+//
+// int128& operator/=(int128) is not constexpr, so hardcode the resulting array
+// to avoid a static initializer.
+template<>
+const int128 LookupTables<int128>::kVminOverBase[] = {
+ 0,
+ 0,
+ MakeInt128(-4611686018427387904, 0u),
+ MakeInt128(-3074457345618258603, 6148914691236517206u),
+ MakeInt128(-2305843009213693952, 0u),
+ MakeInt128(-1844674407370955162, 7378697629483820647u),
+ MakeInt128(-1537228672809129302, 12297829382473034411u),
+ MakeInt128(-1317624576693539402, 15811494920322472814u),
+ MakeInt128(-1152921504606846976, 0u),
+ MakeInt128(-1024819115206086201, 2049638230412172402u),
+ MakeInt128(-922337203685477581, 3689348814741910324u),
+ MakeInt128(-838488366986797801, 5030930201920786805u),
+ MakeInt128(-768614336404564651, 6148914691236517206u),
+ MakeInt128(-709490156681136601, 7094901566811366007u),
+ MakeInt128(-658812288346769701, 7905747460161236407u),
+ MakeInt128(-614891469123651721, 8608480567731124088u),
+ MakeInt128(-576460752303423488, 0u),
+ MakeInt128(-542551296285575048, 8680820740569200761u),
+ MakeInt128(-512409557603043101, 10248191152060862009u),
+ MakeInt128(-485440633518672411, 970881267037344822u),
+ MakeInt128(-461168601842738791, 11068046444225730970u),
+ MakeInt128(-439208192231179801, 11419412998010674810u),
+ MakeInt128(-419244183493398901, 11738837137815169211u),
+ MakeInt128(-401016175515425036, 16040647020617001406u),
+ MakeInt128(-384307168202282326, 12297829382473034411u),
+ MakeInt128(-368934881474191033, 12543785970122495099u),
+ MakeInt128(-354745078340568301, 12770822820260458812u),
+ MakeInt128(-341606371735362067, 683212743470724134u),
+ MakeInt128(-329406144173384851, 13176245766935394012u),
+ MakeInt128(-318047311615681925, 10813608594933185431u),
+ MakeInt128(-307445734561825861, 13527612320720337852u),
+ MakeInt128(-297528130221121801, 13686293990171602812u),
+ MakeInt128(-288230376151711744, 0u),
+ MakeInt128(-279496122328932601, 13974806116446630013u),
+ MakeInt128(-271275648142787524, 4340410370284600381u),
+ MakeInt128(-263524915338707881, 14230345428290225533u),
+ MakeInt128(-256204778801521551, 14347467612885206813u),
+};
+
template <typename IntType>
const IntType LookupTables<IntType>::kVmaxOverBase[] =
X_OVER_BASE_INITIALIZER(std::numeric_limits<IntType>::max());
@@ -1072,10 +1072,10 @@ bool safe_strto64_base(absl::string_view text, int64_t* value, int base) {
return safe_int_internal<int64_t>(text, value, base);
}
-bool safe_strto128_base(absl::string_view text, int128* value, int base) {
- return safe_int_internal<absl::int128>(text, value, base);
-}
-
+bool safe_strto128_base(absl::string_view text, int128* value, int base) {
+ return safe_int_internal<absl::int128>(text, value, base);
+}
+
bool safe_strtou32_base(absl::string_view text, uint32_t* value, int base) {
return safe_uint_internal<uint32_t>(text, value, base);
}
diff --git a/contrib/restricted/abseil-cpp/absl/strings/numbers.h b/contrib/restricted/abseil-cpp/absl/strings/numbers.h
index 899e623c8c..6952facbb2 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/numbers.h
+++ b/contrib/restricted/abseil-cpp/absl/strings/numbers.h
@@ -50,7 +50,7 @@
#endif
#include "absl/base/macros.h"
#include "absl/base/port.h"
-#include "absl/numeric/bits.h"
+#include "absl/numeric/bits.h"
#include "absl/numeric/int128.h"
#include "absl/strings/string_view.h"
@@ -147,11 +147,11 @@ inline void PutTwoDigits(size_t i, char* buf) {
}
// safe_strto?() functions for implementing SimpleAtoi()
-
+
bool safe_strto32_base(absl::string_view text, int32_t* value, int base);
bool safe_strto64_base(absl::string_view text, int64_t* value, int base);
-bool safe_strto128_base(absl::string_view text, absl::int128* value,
- int base);
+bool safe_strto128_base(absl::string_view text, absl::int128* value,
+ int base);
bool safe_strtou32_base(absl::string_view text, uint32_t* value, int base);
bool safe_strtou64_base(absl::string_view text, uint64_t* value, int base);
bool safe_strtou128_base(absl::string_view text, absl::uint128* value,
@@ -263,7 +263,7 @@ inline size_t FastHexToBufferZeroPad16(uint64_t val, char* out) {
}
#endif
// | 0x1 so that even 0 has 1 digit.
- return 16 - countl_zero(val | 0x1) / 4;
+ return 16 - countl_zero(val | 0x1) / 4;
}
} // namespace numbers_internal
@@ -274,11 +274,11 @@ ABSL_MUST_USE_RESULT bool SimpleAtoi(absl::string_view str, int_type* out) {
}
ABSL_MUST_USE_RESULT inline bool SimpleAtoi(absl::string_view str,
- absl::int128* out) {
- return numbers_internal::safe_strto128_base(str, out, 10);
-}
-
-ABSL_MUST_USE_RESULT inline bool SimpleAtoi(absl::string_view str,
+ absl::int128* out) {
+ return numbers_internal::safe_strto128_base(str, out, 10);
+}
+
+ABSL_MUST_USE_RESULT inline bool SimpleAtoi(absl::string_view str,
absl::uint128* out) {
return numbers_internal::safe_strtou128_base(str, out, 10);
}
diff --git a/contrib/restricted/abseil-cpp/absl/strings/str_join.h b/contrib/restricted/abseil-cpp/absl/strings/str_join.h
index 33534536cf..96eb8725ad 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/str_join.h
+++ b/contrib/restricted/abseil-cpp/absl/strings/str_join.h
@@ -144,7 +144,7 @@ strings_internal::DereferenceFormatterImpl<Formatter> DereferenceFormatter(
std::forward<Formatter>(f));
}
-// Function overload of `DereferenceFormatter()` for using a default
+// Function overload of `DereferenceFormatter()` for using a default
// `AlphaNumFormatter()`.
inline strings_internal::DereferenceFormatterImpl<
strings_internal::AlphaNumFormatterImpl>
diff --git a/contrib/restricted/abseil-cpp/absl/strings/str_split.h b/contrib/restricted/abseil-cpp/absl/strings/str_split.h
index bfbca422a8..c896bcbf83 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/str_split.h
+++ b/contrib/restricted/abseil-cpp/absl/strings/str_split.h
@@ -369,12 +369,12 @@ struct SkipWhitespace {
}
};
-template <typename T>
-using EnableSplitIfString =
- typename std::enable_if<std::is_same<T, std::string>::value ||
- std::is_same<T, const std::string>::value,
- int>::type;
-
+template <typename T>
+using EnableSplitIfString =
+ typename std::enable_if<std::is_same<T, std::string>::value ||
+ std::is_same<T, const std::string>::value,
+ int>::type;
+
//------------------------------------------------------------------------------
// StrSplit()
//------------------------------------------------------------------------------
@@ -495,50 +495,50 @@ using EnableSplitIfString =
// Try not to depend on this distinction because the bug may one day be fixed.
template <typename Delimiter>
strings_internal::Splitter<
- typename strings_internal::SelectDelimiter<Delimiter>::type, AllowEmpty,
- absl::string_view>
+ typename strings_internal::SelectDelimiter<Delimiter>::type, AllowEmpty,
+ absl::string_view>
StrSplit(strings_internal::ConvertibleToStringView text, Delimiter d) {
using DelimiterType =
typename strings_internal::SelectDelimiter<Delimiter>::type;
- return strings_internal::Splitter<DelimiterType, AllowEmpty,
- absl::string_view>(
- text.value(), DelimiterType(d), AllowEmpty());
-}
-
-template <typename Delimiter, typename StringType,
- EnableSplitIfString<StringType> = 0>
-strings_internal::Splitter<
- typename strings_internal::SelectDelimiter<Delimiter>::type, AllowEmpty,
- std::string>
-StrSplit(StringType&& text, Delimiter d) {
- using DelimiterType =
- typename strings_internal::SelectDelimiter<Delimiter>::type;
- return strings_internal::Splitter<DelimiterType, AllowEmpty, std::string>(
+ return strings_internal::Splitter<DelimiterType, AllowEmpty,
+ absl::string_view>(
+ text.value(), DelimiterType(d), AllowEmpty());
+}
+
+template <typename Delimiter, typename StringType,
+ EnableSplitIfString<StringType> = 0>
+strings_internal::Splitter<
+ typename strings_internal::SelectDelimiter<Delimiter>::type, AllowEmpty,
+ std::string>
+StrSplit(StringType&& text, Delimiter d) {
+ using DelimiterType =
+ typename strings_internal::SelectDelimiter<Delimiter>::type;
+ return strings_internal::Splitter<DelimiterType, AllowEmpty, std::string>(
std::move(text), DelimiterType(d), AllowEmpty());
}
template <typename Delimiter, typename Predicate>
strings_internal::Splitter<
- typename strings_internal::SelectDelimiter<Delimiter>::type, Predicate,
- absl::string_view>
+ typename strings_internal::SelectDelimiter<Delimiter>::type, Predicate,
+ absl::string_view>
StrSplit(strings_internal::ConvertibleToStringView text, Delimiter d,
Predicate p) {
using DelimiterType =
typename strings_internal::SelectDelimiter<Delimiter>::type;
- return strings_internal::Splitter<DelimiterType, Predicate,
- absl::string_view>(
- text.value(), DelimiterType(d), std::move(p));
-}
-
-template <typename Delimiter, typename Predicate, typename StringType,
- EnableSplitIfString<StringType> = 0>
-strings_internal::Splitter<
- typename strings_internal::SelectDelimiter<Delimiter>::type, Predicate,
- std::string>
-StrSplit(StringType&& text, Delimiter d, Predicate p) {
- using DelimiterType =
- typename strings_internal::SelectDelimiter<Delimiter>::type;
- return strings_internal::Splitter<DelimiterType, Predicate, std::string>(
+ return strings_internal::Splitter<DelimiterType, Predicate,
+ absl::string_view>(
+ text.value(), DelimiterType(d), std::move(p));
+}
+
+template <typename Delimiter, typename Predicate, typename StringType,
+ EnableSplitIfString<StringType> = 0>
+strings_internal::Splitter<
+ typename strings_internal::SelectDelimiter<Delimiter>::type, Predicate,
+ std::string>
+StrSplit(StringType&& text, Delimiter d, Predicate p) {
+ using DelimiterType =
+ typename strings_internal::SelectDelimiter<Delimiter>::type;
+ return strings_internal::Splitter<DelimiterType, Predicate, std::string>(
std::move(text), DelimiterType(d), std::move(p));
}
diff --git a/contrib/restricted/abseil-cpp/absl/strings/ya.make b/contrib/restricted/abseil-cpp/absl/strings/ya.make
index bc11193f12..278e4f8cf7 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/ya.make
+++ b/contrib/restricted/abseil-cpp/absl/strings/ya.make
@@ -9,9 +9,9 @@ LICENSE(Apache-2.0)
LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
PEERDIR(
- contrib/restricted/abseil-cpp/absl/base
+ contrib/restricted/abseil-cpp/absl/base
contrib/restricted/abseil-cpp/absl/base/internal/raw_logging
- contrib/restricted/abseil-cpp/absl/base/internal/spinlock_wait
+ contrib/restricted/abseil-cpp/absl/base/internal/spinlock_wait
contrib/restricted/abseil-cpp/absl/base/internal/throw_delegate
contrib/restricted/abseil-cpp/absl/base/log_severity
contrib/restricted/abseil-cpp/absl/numeric
diff --git a/contrib/restricted/abseil-cpp/absl/synchronization/internal/futex.h b/contrib/restricted/abseil-cpp/absl/synchronization/internal/futex.h
index 06fbd6d072..8503a65aa7 100644
--- a/contrib/restricted/abseil-cpp/absl/synchronization/internal/futex.h
+++ b/contrib/restricted/abseil-cpp/absl/synchronization/internal/futex.h
@@ -38,19 +38,19 @@
#include "absl/base/optimization.h"
#include "absl/synchronization/internal/kernel_timeout.h"
-#ifdef ABSL_INTERNAL_HAVE_FUTEX
-#error ABSL_INTERNAL_HAVE_FUTEX may not be set on the command line
-#elif defined(__BIONIC__)
-// Bionic supports all the futex operations we need even when some of the futex
-// definitions are missing.
-#define ABSL_INTERNAL_HAVE_FUTEX
-#elif defined(__linux__) && defined(FUTEX_CLOCK_REALTIME)
-// FUTEX_CLOCK_REALTIME requires Linux >= 2.6.28.
-#define ABSL_INTERNAL_HAVE_FUTEX
-#endif
-
-#ifdef ABSL_INTERNAL_HAVE_FUTEX
-
+#ifdef ABSL_INTERNAL_HAVE_FUTEX
+#error ABSL_INTERNAL_HAVE_FUTEX may not be set on the command line
+#elif defined(__BIONIC__)
+// Bionic supports all the futex operations we need even when some of the futex
+// definitions are missing.
+#define ABSL_INTERNAL_HAVE_FUTEX
+#elif defined(__linux__) && defined(FUTEX_CLOCK_REALTIME)
+// FUTEX_CLOCK_REALTIME requires Linux >= 2.6.28.
+#define ABSL_INTERNAL_HAVE_FUTEX
+#endif
+
+#ifdef ABSL_INTERNAL_HAVE_FUTEX
+
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace synchronization_internal {
@@ -149,6 +149,6 @@ class Futex : public FutexImpl {};
ABSL_NAMESPACE_END
} // namespace absl
-#endif // ABSL_INTERNAL_HAVE_FUTEX
-
+#endif // ABSL_INTERNAL_HAVE_FUTEX
+
#endif // ABSL_SYNCHRONIZATION_INTERNAL_FUTEX_H_
diff --git a/contrib/restricted/abseil-cpp/absl/synchronization/internal/graphcycles.cc b/contrib/restricted/abseil-cpp/absl/synchronization/internal/graphcycles.cc
index 27fec21681..3ed6c7ba14 100644
--- a/contrib/restricted/abseil-cpp/absl/synchronization/internal/graphcycles.cc
+++ b/contrib/restricted/abseil-cpp/absl/synchronization/internal/graphcycles.cc
@@ -37,7 +37,7 @@
#include <algorithm>
#include <array>
-#include <limits>
+#include <limits>
#include "absl/base/internal/hide_ptr.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/base/internal/spinlock.h"
diff --git a/contrib/restricted/abseil-cpp/absl/synchronization/internal/per_thread_sem.cc b/contrib/restricted/abseil-cpp/absl/synchronization/internal/per_thread_sem.cc
index a6031787e0..30b4336dd1 100644
--- a/contrib/restricted/abseil-cpp/absl/synchronization/internal/per_thread_sem.cc
+++ b/contrib/restricted/abseil-cpp/absl/synchronization/internal/per_thread_sem.cc
@@ -68,12 +68,12 @@ ABSL_NAMESPACE_END
extern "C" {
-ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPost)(
+ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPost)(
absl::base_internal::ThreadIdentity *identity) {
absl::synchronization_internal::Waiter::GetWaiter(identity)->Post();
}
-ABSL_ATTRIBUTE_WEAK bool ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemWait)(
+ABSL_ATTRIBUTE_WEAK bool ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemWait)(
absl::synchronization_internal::KernelTimeout t) {
bool timeout = false;
absl::base_internal::ThreadIdentity *identity;
diff --git a/contrib/restricted/abseil-cpp/absl/synchronization/internal/per_thread_sem.h b/contrib/restricted/abseil-cpp/absl/synchronization/internal/per_thread_sem.h
index 7beae8ef1d..ef450b953a 100644
--- a/contrib/restricted/abseil-cpp/absl/synchronization/internal/per_thread_sem.h
+++ b/contrib/restricted/abseil-cpp/absl/synchronization/internal/per_thread_sem.h
@@ -96,20 +96,20 @@ ABSL_NAMESPACE_END
// By changing our extension points to be extern "C", we dodge this
// check.
extern "C" {
-void ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPost)(
+void ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPost)(
absl::base_internal::ThreadIdentity* identity);
-bool ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemWait)(
+bool ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemWait)(
absl::synchronization_internal::KernelTimeout t);
} // extern "C"
void absl::synchronization_internal::PerThreadSem::Post(
absl::base_internal::ThreadIdentity* identity) {
- ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPost)(identity);
+ ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPost)(identity);
}
bool absl::synchronization_internal::PerThreadSem::Wait(
absl::synchronization_internal::KernelTimeout t) {
- return ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemWait)(t);
+ return ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemWait)(t);
}
#endif // ABSL_SYNCHRONIZATION_INTERNAL_PER_THREAD_SEM_H_
diff --git a/contrib/restricted/abseil-cpp/absl/synchronization/internal/waiter.h b/contrib/restricted/abseil-cpp/absl/synchronization/internal/waiter.h
index be3df180d4..638490367b 100644
--- a/contrib/restricted/abseil-cpp/absl/synchronization/internal/waiter.h
+++ b/contrib/restricted/abseil-cpp/absl/synchronization/internal/waiter.h
@@ -36,7 +36,7 @@
#include <cstdint>
#include "absl/base/internal/thread_identity.h"
-#include "absl/synchronization/internal/futex.h"
+#include "absl/synchronization/internal/futex.h"
#include "absl/synchronization/internal/kernel_timeout.h"
// May be chosen at compile time via -DABSL_FORCE_WAITER_MODE=<index>
@@ -49,7 +49,7 @@
#define ABSL_WAITER_MODE ABSL_FORCE_WAITER_MODE
#elif defined(_WIN32) && _WIN32_WINNT >= _WIN32_WINNT_VISTA
#define ABSL_WAITER_MODE ABSL_WAITER_MODE_WIN32
-#elif defined(ABSL_INTERNAL_HAVE_FUTEX)
+#elif defined(ABSL_INTERNAL_HAVE_FUTEX)
#define ABSL_WAITER_MODE ABSL_WAITER_MODE_FUTEX
#elif defined(ABSL_HAVE_SEMAPHORE_H)
#define ABSL_WAITER_MODE ABSL_WAITER_MODE_SEM
diff --git a/contrib/restricted/abseil-cpp/absl/synchronization/mutex.cc b/contrib/restricted/abseil-cpp/absl/synchronization/mutex.cc
index 76ad41fe16..a68691f3a2 100644
--- a/contrib/restricted/abseil-cpp/absl/synchronization/mutex.cc
+++ b/contrib/restricted/abseil-cpp/absl/synchronization/mutex.cc
@@ -50,7 +50,7 @@
#include "absl/base/internal/spinlock.h"
#include "absl/base/internal/sysinfo.h"
#include "absl/base/internal/thread_identity.h"
-#include "absl/base/internal/tsan_mutex_interface.h"
+#include "absl/base/internal/tsan_mutex_interface.h"
#include "absl/base/port.h"
#include "absl/debugging/stacktrace.h"
#include "absl/debugging/symbolize.h"
@@ -70,9 +70,9 @@ using absl::synchronization_internal::KernelTimeout;
using absl::synchronization_internal::PerThreadSem;
extern "C" {
-ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)() {
- std::this_thread::yield();
-}
+ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)() {
+ std::this_thread::yield();
+}
} // extern "C"
namespace absl {
@@ -126,44 +126,44 @@ void RegisterSymbolizer(bool (*fn)(const void *pc, char *out, int out_size)) {
symbolizer.Store(fn);
}
-namespace {
-// Represents the strategy for spin and yield.
-// See the comment in GetMutexGlobals() for more information.
-enum DelayMode { AGGRESSIVE, GENTLE };
-
+namespace {
+// Represents the strategy for spin and yield.
+// See the comment in GetMutexGlobals() for more information.
+enum DelayMode { AGGRESSIVE, GENTLE };
+
struct ABSL_CACHELINE_ALIGNED MutexGlobals {
absl::once_flag once;
int spinloop_iterations = 0;
- int32_t mutex_sleep_limit[2] = {};
+ int32_t mutex_sleep_limit[2] = {};
};
-const MutexGlobals &GetMutexGlobals() {
+const MutexGlobals &GetMutexGlobals() {
ABSL_CONST_INIT static MutexGlobals data;
absl::base_internal::LowLevelCallOnce(&data.once, [&]() {
- const int num_cpus = absl::base_internal::NumCPUs();
- data.spinloop_iterations = num_cpus > 1 ? 1500 : 0;
- // If this a uniprocessor, only yield/sleep. Otherwise, if the mode is
- // aggressive then spin many times before yielding. If the mode is
- // gentle then spin only a few times before yielding. Aggressive spinning
- // is used to ensure that an Unlock() call, which must get the spin lock
- // for any thread to make progress gets it without undue delay.
- if (num_cpus > 1) {
- data.mutex_sleep_limit[AGGRESSIVE] = 5000;
- data.mutex_sleep_limit[GENTLE] = 250;
- } else {
- data.mutex_sleep_limit[AGGRESSIVE] = 0;
- data.mutex_sleep_limit[GENTLE] = 0;
- }
+ const int num_cpus = absl::base_internal::NumCPUs();
+ data.spinloop_iterations = num_cpus > 1 ? 1500 : 0;
+ // If this a uniprocessor, only yield/sleep. Otherwise, if the mode is
+ // aggressive then spin many times before yielding. If the mode is
+ // gentle then spin only a few times before yielding. Aggressive spinning
+ // is used to ensure that an Unlock() call, which must get the spin lock
+ // for any thread to make progress gets it without undue delay.
+ if (num_cpus > 1) {
+ data.mutex_sleep_limit[AGGRESSIVE] = 5000;
+ data.mutex_sleep_limit[GENTLE] = 250;
+ } else {
+ data.mutex_sleep_limit[AGGRESSIVE] = 0;
+ data.mutex_sleep_limit[GENTLE] = 0;
+ }
});
return data;
}
-} // namespace
+} // namespace
namespace synchronization_internal {
-// Returns the Mutex delay on iteration `c` depending on the given `mode`.
-// The returned value should be used as `c` for the next call to `MutexDelay`.
+// Returns the Mutex delay on iteration `c` depending on the given `mode`.
+// The returned value should be used as `c` for the next call to `MutexDelay`.
int MutexDelay(int32_t c, int mode) {
- const int32_t limit = GetMutexGlobals().mutex_sleep_limit[mode];
+ const int32_t limit = GetMutexGlobals().mutex_sleep_limit[mode];
if (c < limit) {
// Spin.
c++;
@@ -172,7 +172,7 @@ int MutexDelay(int32_t c, int mode) {
ABSL_TSAN_MUTEX_PRE_DIVERT(nullptr, 0);
if (c == limit) {
// Yield once.
- ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)();
+ ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)();
c++;
} else {
// Then wait.
@@ -559,7 +559,7 @@ static SynchLocksHeld *Synch_GetAllLocks() {
}
// Post on "w"'s associated PerThreadSem.
-void Mutex::IncrementSynchSem(Mutex *mu, PerThreadSynch *w) {
+void Mutex::IncrementSynchSem(Mutex *mu, PerThreadSynch *w) {
if (mu) {
ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
}
@@ -717,7 +717,7 @@ static constexpr bool kDebugMode = false;
static constexpr bool kDebugMode = true;
#endif
-#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
+#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
static unsigned TsanFlags(Mutex::MuHow how) {
return how == kShared ? __tsan_mutex_read_lock : 0;
}
@@ -763,13 +763,13 @@ void SetMutexDeadlockDetectionMode(OnDeadlockCycle mode) {
synch_deadlock_detection.store(mode, std::memory_order_release);
}
-// Return true iff threads x and y are part of the same equivalence
-// class of waiters. An equivalence class is defined as the set of
-// waiters with the same condition, type of lock, and thread priority.
-//
-// Requires that x and y be waiting on the same Mutex queue.
-static bool MuEquivalentWaiter(PerThreadSynch *x, PerThreadSynch *y) {
- return x->waitp->how == y->waitp->how && x->priority == y->priority &&
+// Return true iff threads x and y are part of the same equivalence
+// class of waiters. An equivalence class is defined as the set of
+// waiters with the same condition, type of lock, and thread priority.
+//
+// Requires that x and y be waiting on the same Mutex queue.
+static bool MuEquivalentWaiter(PerThreadSynch *x, PerThreadSynch *y) {
+ return x->waitp->how == y->waitp->how && x->priority == y->priority &&
Condition::GuaranteedEqual(x->waitp->cond, y->waitp->cond);
}
@@ -788,19 +788,19 @@ static inline PerThreadSynch *GetPerThreadSynch(intptr_t v) {
// - invalid (iff x is not in a Mutex wait queue),
// - null, or
// - a pointer to a distinct thread waiting later in the same Mutex queue
-// such that all threads in [x, x->skip] have the same condition, priority
-// and lock type (MuEquivalentWaiter() is true for all pairs in [x,
-// x->skip]).
+// such that all threads in [x, x->skip] have the same condition, priority
+// and lock type (MuEquivalentWaiter() is true for all pairs in [x,
+// x->skip]).
// In addition, if x->skip is valid, (x->may_skip || x->skip == null)
//
-// By the spec of MuEquivalentWaiter(), it is not necessary when removing the
+// By the spec of MuEquivalentWaiter(), it is not necessary when removing the
// first runnable thread y from the front a Mutex queue to adjust the skip
// field of another thread x because if x->skip==y, x->skip must (have) become
// invalid before y is removed. The function TryRemove can remove a specified
// thread from an arbitrary position in the queue whether runnable or not, so
// it fixes up skip fields that would otherwise be left dangling.
// The statement
-// if (x->may_skip && MuEquivalentWaiter(x, x->next)) { x->skip = x->next; }
+// if (x->may_skip && MuEquivalentWaiter(x, x->next)) { x->skip = x->next; }
// maintains the invariant provided x is not the last waiter in a Mutex queue
// The statement
// if (x->skip != null) { x->skip = x->skip->skip; }
@@ -934,17 +934,17 @@ static PerThreadSynch *Enqueue(PerThreadSynch *head,
if (s->priority > head->priority) { // s's priority is above head's
// try to put s in priority-fifo order, or failing that at the front.
if (!head->maybe_unlocking) {
- // No unlocker can be scanning the queue, so we can insert into the
- // middle of the queue.
- //
- // Within a skip chain, all waiters have the same priority, so we can
- // skip forward through the chains until we find one with a lower
- // priority than the waiter to be enqueued.
+ // No unlocker can be scanning the queue, so we can insert into the
+ // middle of the queue.
+ //
+ // Within a skip chain, all waiters have the same priority, so we can
+ // skip forward through the chains until we find one with a lower
+ // priority than the waiter to be enqueued.
PerThreadSynch *advance_to = head; // next value of enqueue_after
do {
enqueue_after = advance_to;
- // (side-effect: optimizes skip chain)
- advance_to = Skip(enqueue_after->next);
+ // (side-effect: optimizes skip chain)
+ advance_to = Skip(enqueue_after->next);
} while (s->priority <= advance_to->priority);
// termination guaranteed because s->priority > head->priority
// and head is the end of a skip chain
@@ -963,21 +963,21 @@ static PerThreadSynch *Enqueue(PerThreadSynch *head,
// enqueue_after can be: head, Skip(...), or cur.
// The first two imply enqueue_after->skip == nullptr, and
- // the last is used only if MuEquivalentWaiter(s, cur).
+ // the last is used only if MuEquivalentWaiter(s, cur).
// We require this because clearing enqueue_after->skip
// is impossible; enqueue_after's predecessors might also
// incorrectly skip over s if we were to allow other
// insertion points.
- ABSL_RAW_CHECK(enqueue_after->skip == nullptr ||
- MuEquivalentWaiter(enqueue_after, s),
- "Mutex Enqueue failure");
+ ABSL_RAW_CHECK(enqueue_after->skip == nullptr ||
+ MuEquivalentWaiter(enqueue_after, s),
+ "Mutex Enqueue failure");
if (enqueue_after != head && enqueue_after->may_skip &&
- MuEquivalentWaiter(enqueue_after, enqueue_after->next)) {
+ MuEquivalentWaiter(enqueue_after, enqueue_after->next)) {
// enqueue_after can skip to its new successor, s
enqueue_after->skip = enqueue_after->next;
}
- if (MuEquivalentWaiter(s, s->next)) { // s->may_skip is known to be true
+ if (MuEquivalentWaiter(s, s->next)) { // s->may_skip is known to be true
s->skip = s->next; // s may skip to its successor
}
} else { // enqueue not done any other way, so
@@ -987,7 +987,7 @@ static PerThreadSynch *Enqueue(PerThreadSynch *head,
head->next = s;
s->readers = head->readers; // reader count is from previous head
s->maybe_unlocking = head->maybe_unlocking; // same for unlock hint
- if (head->may_skip && MuEquivalentWaiter(head, s)) {
+ if (head->may_skip && MuEquivalentWaiter(head, s)) {
// head now has successor; may skip
head->skip = s;
}
@@ -1007,7 +1007,7 @@ static PerThreadSynch *Dequeue(PerThreadSynch *head, PerThreadSynch *pw) {
pw->next = w->next; // snip w out of list
if (head == w) { // we removed the head
head = (pw == w) ? nullptr : pw; // either emptied list, or pw is new head
- } else if (pw != head && MuEquivalentWaiter(pw, pw->next)) {
+ } else if (pw != head && MuEquivalentWaiter(pw, pw->next)) {
// pw can skip to its new successor
if (pw->next->skip !=
nullptr) { // either skip to its successors skip target
@@ -1077,13 +1077,13 @@ void Mutex::TryRemove(PerThreadSynch *s) {
PerThreadSynch *w;
if ((w = pw->next) != s) { // search for thread,
do { // processing at least one element
- // If the current element isn't equivalent to the waiter to be
- // removed, we can skip the entire chain.
- if (!MuEquivalentWaiter(s, w)) {
+ // If the current element isn't equivalent to the waiter to be
+ // removed, we can skip the entire chain.
+ if (!MuEquivalentWaiter(s, w)) {
pw = Skip(w); // so skip all that won't match
// we don't have to worry about dangling skip fields
// in the threads we skipped; none can point to s
- // because they are in a different equivalence class.
+ // because they are in a different equivalence class.
} else { // seeking same condition
FixSkip(w, s); // fix up any skip pointer from w to s
pw = w;
@@ -1374,9 +1374,9 @@ static GraphId DeadlockCheck(Mutex *mu) {
len += static_cast<int>(strlen(&b->buf[len]));
}
}
- ABSL_RAW_LOG(ERROR,
- "Acquiring absl::Mutex %p while holding %s; a cycle in the "
- "historical lock ordering graph has been observed",
+ ABSL_RAW_LOG(ERROR,
+ "Acquiring absl::Mutex %p while holding %s; a cycle in the "
+ "historical lock ordering graph has been observed",
static_cast<void *>(mu), b->buf);
ABSL_RAW_LOG(ERROR, "Cycle: ");
int path_len = deadlock_graph->FindPath(
@@ -1779,7 +1779,7 @@ static inline bool EvalConditionAnnotated(const Condition *cond, Mutex *mu,
// All memory accesses are ignored inside of mutex operations + for unlock
// operation tsan considers that we've already released the mutex.
bool res = false;
-#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
+#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
const int flags = read_lock ? __tsan_mutex_read_lock : 0;
const int tryflags = flags | (trylock ? __tsan_mutex_try_lock : 0);
#endif
@@ -2150,7 +2150,7 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
!old_h->may_skip) { // we used old_h as a terminator
old_h->may_skip = true; // allow old_h to skip once more
ABSL_RAW_CHECK(old_h->skip == nullptr, "illegal skip from head");
- if (h != old_h && MuEquivalentWaiter(old_h, old_h->next)) {
+ if (h != old_h && MuEquivalentWaiter(old_h, old_h->next)) {
old_h->skip = old_h->next; // old_h not head & can skip to successor
}
}
diff --git a/contrib/restricted/abseil-cpp/absl/synchronization/mutex.h b/contrib/restricted/abseil-cpp/absl/synchronization/mutex.h
index 38338f24df..d8bf58c871 100644
--- a/contrib/restricted/abseil-cpp/absl/synchronization/mutex.h
+++ b/contrib/restricted/abseil-cpp/absl/synchronization/mutex.h
@@ -31,23 +31,23 @@
//
// MutexLock - An RAII wrapper to acquire and release a `Mutex` for exclusive/
// write access within the current scope.
-//
+//
// ReaderMutexLock
// - An RAII wrapper to acquire and release a `Mutex` for shared/read
// access within the current scope.
//
// WriterMutexLock
-// - Effectively an alias for `MutexLock` above, designed for use in
-// distinguishing reader and writer locks within code.
+// - Effectively an alias for `MutexLock` above, designed for use in
+// distinguishing reader and writer locks within code.
//
// In addition to simple mutex locks, this file also defines ways to perform
// locking under certain conditions.
//
-// Condition - (Preferred) Used to wait for a particular predicate that
-// depends on state protected by the `Mutex` to become true.
-// CondVar - A lower-level variant of `Condition` that relies on
-// application code to explicitly signal the `CondVar` when
-// a condition has been met.
+// Condition - (Preferred) Used to wait for a particular predicate that
+// depends on state protected by the `Mutex` to become true.
+// CondVar - A lower-level variant of `Condition` that relies on
+// application code to explicitly signal the `CondVar` when
+// a condition has been met.
//
// See below for more information on using `Condition` or `CondVar`.
//
@@ -147,7 +147,7 @@ class ABSL_LOCKABLE Mutex {
//
// Example usage:
// namespace foo {
- // ABSL_CONST_INIT absl::Mutex mu(absl::kConstInit);
+ // ABSL_CONST_INIT absl::Mutex mu(absl::kConstInit);
// }
explicit constexpr Mutex(absl::ConstInitType);
@@ -162,7 +162,7 @@ class ABSL_LOCKABLE Mutex {
// Mutex::Unlock()
//
// Releases this `Mutex` and returns it from the exclusive/write state to the
- // free state. Calling thread must hold the `Mutex` exclusively.
+ // free state. Calling thread must hold the `Mutex` exclusively.
void Unlock() ABSL_UNLOCK_FUNCTION();
// Mutex::TryLock()
@@ -457,9 +457,9 @@ class ABSL_LOCKABLE Mutex {
// Post()/Wait() versus associated PerThreadSem; in class for required
// friendship with PerThreadSem.
- static void IncrementSynchSem(Mutex *mu, base_internal::PerThreadSynch *w);
- static bool DecrementSynchSem(Mutex *mu, base_internal::PerThreadSynch *w,
- synchronization_internal::KernelTimeout t);
+ static void IncrementSynchSem(Mutex *mu, base_internal::PerThreadSynch *w);
+ static bool DecrementSynchSem(Mutex *mu, base_internal::PerThreadSynch *w,
+ synchronization_internal::KernelTimeout t);
// slow path acquire
void LockSlowLoop(SynchWaitParams *waitp, int flags);
@@ -505,36 +505,36 @@ class ABSL_LOCKABLE Mutex {
// Example:
//
// Class Foo {
-// public:
+// public:
// Foo::Bar* Baz() {
-// MutexLock lock(&mu_);
+// MutexLock lock(&mu_);
// ...
// return bar;
// }
//
// private:
-// Mutex mu_;
+// Mutex mu_;
// };
class ABSL_SCOPED_LOCKABLE MutexLock {
public:
- // Constructors
-
- // Calls `mu->Lock()` and returns when that call returns. That is, `*mu` is
- // guaranteed to be locked when this object is constructed. Requires that
- // `mu` be dereferenceable.
+ // Constructors
+
+ // Calls `mu->Lock()` and returns when that call returns. That is, `*mu` is
+ // guaranteed to be locked when this object is constructed. Requires that
+ // `mu` be dereferenceable.
explicit MutexLock(Mutex *mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) : mu_(mu) {
this->mu_->Lock();
}
- // Like above, but calls `mu->LockWhen(cond)` instead. That is, in addition to
- // the above, the condition given by `cond` is also guaranteed to hold when
- // this object is constructed.
- explicit MutexLock(Mutex *mu, const Condition &cond)
- ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
- : mu_(mu) {
- this->mu_->LockWhen(cond);
- }
-
+ // Like above, but calls `mu->LockWhen(cond)` instead. That is, in addition to
+ // the above, the condition given by `cond` is also guaranteed to hold when
+ // this object is constructed.
+ explicit MutexLock(Mutex *mu, const Condition &cond)
+ ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
+ : mu_(mu) {
+ this->mu_->LockWhen(cond);
+ }
+
MutexLock(const MutexLock &) = delete; // NOLINT(runtime/mutex)
MutexLock(MutexLock&&) = delete; // NOLINT(runtime/mutex)
MutexLock& operator=(const MutexLock&) = delete;
@@ -556,12 +556,12 @@ class ABSL_SCOPED_LOCKABLE ReaderMutexLock {
mu->ReaderLock();
}
- explicit ReaderMutexLock(Mutex *mu, const Condition &cond)
- ABSL_SHARED_LOCK_FUNCTION(mu)
- : mu_(mu) {
- mu->ReaderLockWhen(cond);
- }
-
+ explicit ReaderMutexLock(Mutex *mu, const Condition &cond)
+ ABSL_SHARED_LOCK_FUNCTION(mu)
+ : mu_(mu) {
+ mu->ReaderLockWhen(cond);
+ }
+
ReaderMutexLock(const ReaderMutexLock&) = delete;
ReaderMutexLock(ReaderMutexLock&&) = delete;
ReaderMutexLock& operator=(const ReaderMutexLock&) = delete;
@@ -584,12 +584,12 @@ class ABSL_SCOPED_LOCKABLE WriterMutexLock {
mu->WriterLock();
}
- explicit WriterMutexLock(Mutex *mu, const Condition &cond)
- ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
- : mu_(mu) {
- mu->WriterLockWhen(cond);
- }
-
+ explicit WriterMutexLock(Mutex *mu, const Condition &cond)
+ ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
+ : mu_(mu) {
+ mu->WriterLockWhen(cond);
+ }
+
WriterMutexLock(const WriterMutexLock&) = delete;
WriterMutexLock(WriterMutexLock&&) = delete;
WriterMutexLock& operator=(const WriterMutexLock&) = delete;
@@ -628,27 +628,27 @@ class ABSL_SCOPED_LOCKABLE WriterMutexLock {
// `noexcept`; until then this requirement cannot be enforced in the
// type system.)
//
-// Note: to use a `Condition`, you need only construct it and pass it to a
-// suitable `Mutex' member function, such as `Mutex::Await()`, or to the
-// constructor of one of the scope guard classes.
+// Note: to use a `Condition`, you need only construct it and pass it to a
+// suitable `Mutex' member function, such as `Mutex::Await()`, or to the
+// constructor of one of the scope guard classes.
//
-// Example using LockWhen/Unlock:
+// Example using LockWhen/Unlock:
//
// // assume count_ is not internal reference count
// int count_ ABSL_GUARDED_BY(mu_);
-// Condition count_is_zero(+[](int *count) { return *count == 0; }, &count_);
-//
-// mu_.LockWhen(count_is_zero);
-// // ...
-// mu_.Unlock();
-//
-// Example using a scope guard:
-//
-// {
-// MutexLock lock(&mu_, count_is_zero);
-// // ...
-// }
-//
+// Condition count_is_zero(+[](int *count) { return *count == 0; }, &count_);
+//
+// mu_.LockWhen(count_is_zero);
+// // ...
+// mu_.Unlock();
+//
+// Example using a scope guard:
+//
+// {
+// MutexLock lock(&mu_, count_is_zero);
+// // ...
+// }
+//
// When multiple threads are waiting on exactly the same condition, make sure
// that they are constructed with the same parameters (same pointer to function
// + arg, or same pointer to object + method), so that the mutex implementation
@@ -879,15 +879,15 @@ class ABSL_SCOPED_LOCKABLE MutexLockMaybe {
this->mu_->Lock();
}
}
-
- explicit MutexLockMaybe(Mutex *mu, const Condition &cond)
- ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
- : mu_(mu) {
- if (this->mu_ != nullptr) {
- this->mu_->LockWhen(cond);
- }
- }
-
+
+ explicit MutexLockMaybe(Mutex *mu, const Condition &cond)
+ ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
+ : mu_(mu) {
+ if (this->mu_ != nullptr) {
+ this->mu_->LockWhen(cond);
+ }
+ }
+
~MutexLockMaybe() ABSL_UNLOCK_FUNCTION() {
if (this->mu_ != nullptr) { this->mu_->Unlock(); }
}
@@ -910,13 +910,13 @@ class ABSL_SCOPED_LOCKABLE ReleasableMutexLock {
: mu_(mu) {
this->mu_->Lock();
}
-
- explicit ReleasableMutexLock(Mutex *mu, const Condition &cond)
- ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
- : mu_(mu) {
- this->mu_->LockWhen(cond);
- }
-
+
+ explicit ReleasableMutexLock(Mutex *mu, const Condition &cond)
+ ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
+ : mu_(mu) {
+ this->mu_->LockWhen(cond);
+ }
+
~ReleasableMutexLock() ABSL_UNLOCK_FUNCTION() {
if (this->mu_ != nullptr) { this->mu_->Unlock(); }
}
@@ -1076,7 +1076,7 @@ ABSL_NAMESPACE_END
// By changing our extension points to be extern "C", we dodge this
// check.
extern "C" {
-void ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)();
+void ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)();
} // extern "C"
#endif // ABSL_SYNCHRONIZATION_MUTEX_H_
diff --git a/contrib/restricted/abseil-cpp/absl/time/clock.cc b/contrib/restricted/abseil-cpp/absl/time/clock.cc
index 7b204c4ee0..bce60d53d6 100644
--- a/contrib/restricted/abseil-cpp/absl/time/clock.cc
+++ b/contrib/restricted/abseil-cpp/absl/time/clock.cc
@@ -15,7 +15,7 @@
#include "absl/time/clock.h"
#include "absl/base/attributes.h"
-#include "absl/base/optimization.h"
+#include "absl/base/optimization.h"
#ifdef _WIN32
#include <windows.h>
@@ -152,109 +152,109 @@ static_assert(((kMinNSBetweenSamples << (kScale + 1)) >> (kScale + 1)) ==
// data from a sample of the kernel's time value
struct TimeSampleAtomic {
- std::atomic<uint64_t> raw_ns{0}; // raw kernel time
- std::atomic<uint64_t> base_ns{0}; // our estimate of time
- std::atomic<uint64_t> base_cycles{0}; // cycle counter reading
- std::atomic<uint64_t> nsscaled_per_cycle{0}; // cycle period
+ std::atomic<uint64_t> raw_ns{0}; // raw kernel time
+ std::atomic<uint64_t> base_ns{0}; // our estimate of time
+ std::atomic<uint64_t> base_cycles{0}; // cycle counter reading
+ std::atomic<uint64_t> nsscaled_per_cycle{0}; // cycle period
// cycles before we'll sample again (a scaled reciprocal of the period,
// to avoid a division on the fast path).
- std::atomic<uint64_t> min_cycles_per_sample{0};
+ std::atomic<uint64_t> min_cycles_per_sample{0};
};
// Same again, but with non-atomic types
struct TimeSample {
- uint64_t raw_ns = 0; // raw kernel time
- uint64_t base_ns = 0; // our estimate of time
- uint64_t base_cycles = 0; // cycle counter reading
- uint64_t nsscaled_per_cycle = 0; // cycle period
- uint64_t min_cycles_per_sample = 0; // approx cycles before next sample
+ uint64_t raw_ns = 0; // raw kernel time
+ uint64_t base_ns = 0; // our estimate of time
+ uint64_t base_cycles = 0; // cycle counter reading
+ uint64_t nsscaled_per_cycle = 0; // cycle period
+ uint64_t min_cycles_per_sample = 0; // approx cycles before next sample
};
-struct ABSL_CACHELINE_ALIGNED TimeState {
- std::atomic<uint64_t> seq{0};
- TimeSampleAtomic last_sample; // the last sample; under seq
-
- // The following counters are used only by the test code.
- int64_t stats_initializations{0};
- int64_t stats_reinitializations{0};
- int64_t stats_calibrations{0};
- int64_t stats_slow_paths{0};
- int64_t stats_fast_slow_paths{0};
-
- uint64_t last_now_cycles ABSL_GUARDED_BY(lock){0};
-
- // Used by GetCurrentTimeNanosFromKernel().
- // We try to read clock values at about the same time as the kernel clock.
- // This value gets adjusted up or down as estimate of how long that should
- // take, so we can reject attempts that take unusually long.
- std::atomic<uint64_t> approx_syscall_time_in_cycles{10 * 1000};
- // Number of times in a row we've seen a kernel time call take substantially
- // less than approx_syscall_time_in_cycles.
- std::atomic<uint32_t> kernel_time_seen_smaller{0};
-
- // A reader-writer lock protecting the static locations below.
- // See SeqAcquire() and SeqRelease() above.
- absl::base_internal::SpinLock lock{absl::kConstInit,
- base_internal::SCHEDULE_KERNEL_ONLY};
-};
-ABSL_CONST_INIT static TimeState time_state{};
-
-// Return the time in ns as told by the kernel interface. Place in *cycleclock
-// the value of the cycleclock at about the time of the syscall.
-// This call represents the time base that this module synchronizes to.
-// Ensures that *cycleclock does not step back by up to (1 << 16) from
-// last_cycleclock, to discard small backward counter steps. (Larger steps are
-// assumed to be complete resyncs, which shouldn't happen. If they do, a full
-// reinitialization of the outer algorithm should occur.)
-static int64_t GetCurrentTimeNanosFromKernel(uint64_t last_cycleclock,
- uint64_t *cycleclock)
- ABSL_EXCLUSIVE_LOCKS_REQUIRED(time_state.lock) {
- uint64_t local_approx_syscall_time_in_cycles = // local copy
- time_state.approx_syscall_time_in_cycles.load(std::memory_order_relaxed);
-
- int64_t current_time_nanos_from_system;
- uint64_t before_cycles;
- uint64_t after_cycles;
- uint64_t elapsed_cycles;
- int loops = 0;
- do {
- before_cycles = GET_CURRENT_TIME_NANOS_CYCLECLOCK_NOW();
- current_time_nanos_from_system = GET_CURRENT_TIME_NANOS_FROM_SYSTEM();
- after_cycles = GET_CURRENT_TIME_NANOS_CYCLECLOCK_NOW();
- // elapsed_cycles is unsigned, so is large on overflow
- elapsed_cycles = after_cycles - before_cycles;
- if (elapsed_cycles >= local_approx_syscall_time_in_cycles &&
- ++loops == 20) { // clock changed frequencies? Back off.
- loops = 0;
- if (local_approx_syscall_time_in_cycles < 1000 * 1000) {
- local_approx_syscall_time_in_cycles =
- (local_approx_syscall_time_in_cycles + 1) << 1;
- }
- time_state.approx_syscall_time_in_cycles.store(
- local_approx_syscall_time_in_cycles, std::memory_order_relaxed);
- }
- } while (elapsed_cycles >= local_approx_syscall_time_in_cycles ||
- last_cycleclock - after_cycles < (static_cast<uint64_t>(1) << 16));
-
- // Adjust approx_syscall_time_in_cycles to be within a factor of 2
- // of the typical time to execute one iteration of the loop above.
- if ((local_approx_syscall_time_in_cycles >> 1) < elapsed_cycles) {
- // measured time is no smaller than half current approximation
- time_state.kernel_time_seen_smaller.store(0, std::memory_order_relaxed);
- } else if (time_state.kernel_time_seen_smaller.fetch_add(
- 1, std::memory_order_relaxed) >= 3) {
- // smaller delays several times in a row; reduce approximation by 12.5%
- const uint64_t new_approximation =
- local_approx_syscall_time_in_cycles -
- (local_approx_syscall_time_in_cycles >> 3);
- time_state.approx_syscall_time_in_cycles.store(new_approximation,
- std::memory_order_relaxed);
- time_state.kernel_time_seen_smaller.store(0, std::memory_order_relaxed);
- }
-
- *cycleclock = after_cycles;
- return current_time_nanos_from_system;
-}
-
+struct ABSL_CACHELINE_ALIGNED TimeState {
+ std::atomic<uint64_t> seq{0};
+ TimeSampleAtomic last_sample; // the last sample; under seq
+
+ // The following counters are used only by the test code.
+ int64_t stats_initializations{0};
+ int64_t stats_reinitializations{0};
+ int64_t stats_calibrations{0};
+ int64_t stats_slow_paths{0};
+ int64_t stats_fast_slow_paths{0};
+
+ uint64_t last_now_cycles ABSL_GUARDED_BY(lock){0};
+
+ // Used by GetCurrentTimeNanosFromKernel().
+ // We try to read clock values at about the same time as the kernel clock.
+ // This value gets adjusted up or down as estimate of how long that should
+ // take, so we can reject attempts that take unusually long.
+ std::atomic<uint64_t> approx_syscall_time_in_cycles{10 * 1000};
+ // Number of times in a row we've seen a kernel time call take substantially
+ // less than approx_syscall_time_in_cycles.
+ std::atomic<uint32_t> kernel_time_seen_smaller{0};
+
+ // A reader-writer lock protecting the static locations below.
+ // See SeqAcquire() and SeqRelease() above.
+ absl::base_internal::SpinLock lock{absl::kConstInit,
+ base_internal::SCHEDULE_KERNEL_ONLY};
+};
+ABSL_CONST_INIT static TimeState time_state{};
+
+// Return the time in ns as told by the kernel interface. Place in *cycleclock
+// the value of the cycleclock at about the time of the syscall.
+// This call represents the time base that this module synchronizes to.
+// Ensures that *cycleclock does not step back by up to (1 << 16) from
+// last_cycleclock, to discard small backward counter steps. (Larger steps are
+// assumed to be complete resyncs, which shouldn't happen. If they do, a full
+// reinitialization of the outer algorithm should occur.)
+static int64_t GetCurrentTimeNanosFromKernel(uint64_t last_cycleclock,
+ uint64_t *cycleclock)
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(time_state.lock) {
+ uint64_t local_approx_syscall_time_in_cycles = // local copy
+ time_state.approx_syscall_time_in_cycles.load(std::memory_order_relaxed);
+
+ int64_t current_time_nanos_from_system;
+ uint64_t before_cycles;
+ uint64_t after_cycles;
+ uint64_t elapsed_cycles;
+ int loops = 0;
+ do {
+ before_cycles = GET_CURRENT_TIME_NANOS_CYCLECLOCK_NOW();
+ current_time_nanos_from_system = GET_CURRENT_TIME_NANOS_FROM_SYSTEM();
+ after_cycles = GET_CURRENT_TIME_NANOS_CYCLECLOCK_NOW();
+ // elapsed_cycles is unsigned, so is large on overflow
+ elapsed_cycles = after_cycles - before_cycles;
+ if (elapsed_cycles >= local_approx_syscall_time_in_cycles &&
+ ++loops == 20) { // clock changed frequencies? Back off.
+ loops = 0;
+ if (local_approx_syscall_time_in_cycles < 1000 * 1000) {
+ local_approx_syscall_time_in_cycles =
+ (local_approx_syscall_time_in_cycles + 1) << 1;
+ }
+ time_state.approx_syscall_time_in_cycles.store(
+ local_approx_syscall_time_in_cycles, std::memory_order_relaxed);
+ }
+ } while (elapsed_cycles >= local_approx_syscall_time_in_cycles ||
+ last_cycleclock - after_cycles < (static_cast<uint64_t>(1) << 16));
+
+ // Adjust approx_syscall_time_in_cycles to be within a factor of 2
+ // of the typical time to execute one iteration of the loop above.
+ if ((local_approx_syscall_time_in_cycles >> 1) < elapsed_cycles) {
+ // measured time is no smaller than half current approximation
+ time_state.kernel_time_seen_smaller.store(0, std::memory_order_relaxed);
+ } else if (time_state.kernel_time_seen_smaller.fetch_add(
+ 1, std::memory_order_relaxed) >= 3) {
+ // smaller delays several times in a row; reduce approximation by 12.5%
+ const uint64_t new_approximation =
+ local_approx_syscall_time_in_cycles -
+ (local_approx_syscall_time_in_cycles >> 3);
+ time_state.approx_syscall_time_in_cycles.store(new_approximation,
+ std::memory_order_relaxed);
+ time_state.kernel_time_seen_smaller.store(0, std::memory_order_relaxed);
+ }
+
+ *cycleclock = after_cycles;
+ return current_time_nanos_from_system;
+}
+
static int64_t GetCurrentTimeNanosSlowPath() ABSL_ATTRIBUTE_COLD;
// Read the contents of *atomic into *sample.
@@ -321,15 +321,15 @@ int64_t GetCurrentTimeNanos() {
// Acquire pairs with the barrier in SeqRelease - if this load sees that
// store, the shared-data reads necessarily see that SeqRelease's updates
// to the same shared data.
- seq_read0 = time_state.seq.load(std::memory_order_acquire);
+ seq_read0 = time_state.seq.load(std::memory_order_acquire);
- base_ns = time_state.last_sample.base_ns.load(std::memory_order_relaxed);
- base_cycles =
- time_state.last_sample.base_cycles.load(std::memory_order_relaxed);
+ base_ns = time_state.last_sample.base_ns.load(std::memory_order_relaxed);
+ base_cycles =
+ time_state.last_sample.base_cycles.load(std::memory_order_relaxed);
nsscaled_per_cycle =
- time_state.last_sample.nsscaled_per_cycle.load(std::memory_order_relaxed);
- min_cycles_per_sample = time_state.last_sample.min_cycles_per_sample.load(
- std::memory_order_relaxed);
+ time_state.last_sample.nsscaled_per_cycle.load(std::memory_order_relaxed);
+ min_cycles_per_sample = time_state.last_sample.min_cycles_per_sample.load(
+ std::memory_order_relaxed);
// This acquire fence pairs with the release fence in SeqAcquire. Since it
// is sequenced between reads of shared data and seq_read1, the reads of
@@ -340,7 +340,7 @@ int64_t GetCurrentTimeNanos() {
// shared-data writes are effectively release ordered. Therefore if our
// shared-data reads see any of a particular update's shared-data writes,
// seq_read1 is guaranteed to see that update's SeqAcquire.
- seq_read1 = time_state.seq.load(std::memory_order_relaxed);
+ seq_read1 = time_state.seq.load(std::memory_order_relaxed);
// Fast path. Return if min_cycles_per_sample has not yet elapsed since the
// last sample, and we read a consistent sample. The fast path activates
@@ -353,9 +353,9 @@ int64_t GetCurrentTimeNanos() {
// last_sample was updated). This is harmless, because delta_cycles will wrap
// and report a time much much bigger than min_cycles_per_sample. In that case
// we will take the slow path.
- uint64_t delta_cycles;
+ uint64_t delta_cycles;
if (seq_read0 == seq_read1 && (seq_read0 & 1) == 0 &&
- (delta_cycles = now_cycles - base_cycles) < min_cycles_per_sample) {
+ (delta_cycles = now_cycles - base_cycles) < min_cycles_per_sample) {
return base_ns + ((delta_cycles * nsscaled_per_cycle) >> kScale);
}
return GetCurrentTimeNanosSlowPath();
@@ -395,25 +395,25 @@ static uint64_t UpdateLastSample(
// TODO(absl-team): Remove this attribute when our compiler is smart enough
// to do the right thing.
ABSL_ATTRIBUTE_NOINLINE
-static int64_t GetCurrentTimeNanosSlowPath()
- ABSL_LOCKS_EXCLUDED(time_state.lock) {
+static int64_t GetCurrentTimeNanosSlowPath()
+ ABSL_LOCKS_EXCLUDED(time_state.lock) {
// Serialize access to slow-path. Fast-path readers are not blocked yet, and
// code below must not modify last_sample until the seqlock is acquired.
- time_state.lock.Lock();
+ time_state.lock.Lock();
// Sample the kernel time base. This is the definition of
// "now" if we take the slow path.
uint64_t now_cycles;
- uint64_t now_ns =
- GetCurrentTimeNanosFromKernel(time_state.last_now_cycles, &now_cycles);
- time_state.last_now_cycles = now_cycles;
+ uint64_t now_ns =
+ GetCurrentTimeNanosFromKernel(time_state.last_now_cycles, &now_cycles);
+ time_state.last_now_cycles = now_cycles;
uint64_t estimated_base_ns;
// ----------
// Read the "last_sample" values again; this time holding the write lock.
struct TimeSample sample;
- ReadTimeSampleAtomic(&time_state.last_sample, &sample);
+ ReadTimeSampleAtomic(&time_state.last_sample, &sample);
// ----------
// Try running the fast path again; another thread may have updated the
@@ -424,13 +424,13 @@ static int64_t GetCurrentTimeNanosSlowPath()
// so that blocked readers can make progress without blocking new readers.
estimated_base_ns = sample.base_ns +
((delta_cycles * sample.nsscaled_per_cycle) >> kScale);
- time_state.stats_fast_slow_paths++;
+ time_state.stats_fast_slow_paths++;
} else {
estimated_base_ns =
UpdateLastSample(now_cycles, now_ns, delta_cycles, &sample);
}
- time_state.lock.Unlock();
+ time_state.lock.Unlock();
return estimated_base_ns;
}
@@ -441,10 +441,10 @@ static int64_t GetCurrentTimeNanosSlowPath()
static uint64_t UpdateLastSample(uint64_t now_cycles, uint64_t now_ns,
uint64_t delta_cycles,
const struct TimeSample *sample)
- ABSL_EXCLUSIVE_LOCKS_REQUIRED(time_state.lock) {
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(time_state.lock) {
uint64_t estimated_base_ns = now_ns;
- uint64_t lock_value =
- SeqAcquire(&time_state.seq); // acquire seqlock to block readers
+ uint64_t lock_value =
+ SeqAcquire(&time_state.seq); // acquire seqlock to block readers
// The 5s in the next if-statement limits the time for which we will trust
// the cycle counter and our last sample to give a reasonable result.
@@ -454,16 +454,16 @@ static uint64_t UpdateLastSample(uint64_t now_cycles, uint64_t now_ns,
sample->raw_ns + static_cast<uint64_t>(5) * 1000 * 1000 * 1000 < now_ns ||
now_ns < sample->raw_ns || now_cycles < sample->base_cycles) {
// record this sample, and forget any previously known slope.
- time_state.last_sample.raw_ns.store(now_ns, std::memory_order_relaxed);
- time_state.last_sample.base_ns.store(estimated_base_ns,
- std::memory_order_relaxed);
- time_state.last_sample.base_cycles.store(now_cycles,
- std::memory_order_relaxed);
- time_state.last_sample.nsscaled_per_cycle.store(0,
- std::memory_order_relaxed);
- time_state.last_sample.min_cycles_per_sample.store(
- 0, std::memory_order_relaxed);
- time_state.stats_initializations++;
+ time_state.last_sample.raw_ns.store(now_ns, std::memory_order_relaxed);
+ time_state.last_sample.base_ns.store(estimated_base_ns,
+ std::memory_order_relaxed);
+ time_state.last_sample.base_cycles.store(now_cycles,
+ std::memory_order_relaxed);
+ time_state.last_sample.nsscaled_per_cycle.store(0,
+ std::memory_order_relaxed);
+ time_state.last_sample.min_cycles_per_sample.store(
+ 0, std::memory_order_relaxed);
+ time_state.stats_initializations++;
} else if (sample->raw_ns + 500 * 1000 * 1000 < now_ns &&
sample->base_cycles + 50 < now_cycles) {
// Enough time has passed to compute the cycle time.
@@ -506,32 +506,32 @@ static uint64_t UpdateLastSample(uint64_t now_cycles, uint64_t now_ns,
if (new_nsscaled_per_cycle != 0 &&
diff_ns < 100 * 1000 * 1000 && -diff_ns < 100 * 1000 * 1000) {
// record the cycle time measurement
- time_state.last_sample.nsscaled_per_cycle.store(
+ time_state.last_sample.nsscaled_per_cycle.store(
new_nsscaled_per_cycle, std::memory_order_relaxed);
uint64_t new_min_cycles_per_sample =
SafeDivideAndScale(kMinNSBetweenSamples, new_nsscaled_per_cycle);
- time_state.last_sample.min_cycles_per_sample.store(
+ time_state.last_sample.min_cycles_per_sample.store(
new_min_cycles_per_sample, std::memory_order_relaxed);
- time_state.stats_calibrations++;
+ time_state.stats_calibrations++;
} else { // something went wrong; forget the slope
- time_state.last_sample.nsscaled_per_cycle.store(
- 0, std::memory_order_relaxed);
- time_state.last_sample.min_cycles_per_sample.store(
- 0, std::memory_order_relaxed);
+ time_state.last_sample.nsscaled_per_cycle.store(
+ 0, std::memory_order_relaxed);
+ time_state.last_sample.min_cycles_per_sample.store(
+ 0, std::memory_order_relaxed);
estimated_base_ns = now_ns;
- time_state.stats_reinitializations++;
+ time_state.stats_reinitializations++;
}
- time_state.last_sample.raw_ns.store(now_ns, std::memory_order_relaxed);
- time_state.last_sample.base_ns.store(estimated_base_ns,
- std::memory_order_relaxed);
- time_state.last_sample.base_cycles.store(now_cycles,
- std::memory_order_relaxed);
+ time_state.last_sample.raw_ns.store(now_ns, std::memory_order_relaxed);
+ time_state.last_sample.base_ns.store(estimated_base_ns,
+ std::memory_order_relaxed);
+ time_state.last_sample.base_cycles.store(now_cycles,
+ std::memory_order_relaxed);
} else {
// have a sample, but no slope; waiting for enough time for a calibration
- time_state.stats_slow_paths++;
+ time_state.stats_slow_paths++;
}
- SeqRelease(&time_state.seq, lock_value); // release the readers
+ SeqRelease(&time_state.seq, lock_value); // release the readers
return estimated_base_ns;
}
@@ -573,8 +573,8 @@ ABSL_NAMESPACE_END
extern "C" {
-ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSleepFor)(
- absl::Duration duration) {
+ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSleepFor)(
+ absl::Duration duration) {
while (duration > absl::ZeroDuration()) {
absl::Duration to_sleep = std::min(duration, absl::MaxSleep());
absl::SleepOnce(to_sleep);
diff --git a/contrib/restricted/abseil-cpp/absl/time/clock.h b/contrib/restricted/abseil-cpp/absl/time/clock.h
index 5fe244d638..1cf616ff16 100644
--- a/contrib/restricted/abseil-cpp/absl/time/clock.h
+++ b/contrib/restricted/abseil-cpp/absl/time/clock.h
@@ -64,11 +64,11 @@ ABSL_NAMESPACE_END
// By changing our extension points to be extern "C", we dodge this
// check.
extern "C" {
-void ABSL_INTERNAL_C_SYMBOL(AbslInternalSleepFor)(absl::Duration duration);
+void ABSL_INTERNAL_C_SYMBOL(AbslInternalSleepFor)(absl::Duration duration);
} // extern "C"
inline void absl::SleepFor(absl::Duration duration) {
- ABSL_INTERNAL_C_SYMBOL(AbslInternalSleepFor)(duration);
+ ABSL_INTERNAL_C_SYMBOL(AbslInternalSleepFor)(duration);
}
#endif // ABSL_TIME_CLOCK_H_
diff --git a/contrib/restricted/abseil-cpp/absl/time/internal/cctz/include/cctz/civil_time_detail.h b/contrib/restricted/abseil-cpp/absl/time/internal/cctz/include/cctz/civil_time_detail.h
index 8aadde57ca..a0be43ae1f 100644
--- a/contrib/restricted/abseil-cpp/absl/time/internal/cctz/include/cctz/civil_time_detail.h
+++ b/contrib/restricted/abseil-cpp/absl/time/internal/cctz/include/cctz/civil_time_detail.h
@@ -416,10 +416,10 @@ class civil_time {
// Assigning arithmetic.
CONSTEXPR_M civil_time& operator+=(diff_t n) noexcept {
- return *this = *this + n;
+ return *this = *this + n;
}
CONSTEXPR_M civil_time& operator-=(diff_t n) noexcept {
- return *this = *this - n;
+ return *this = *this - n;
}
CONSTEXPR_M civil_time& operator++() noexcept { return *this += 1; }
CONSTEXPR_M civil_time operator++(int) noexcept {
@@ -436,15 +436,15 @@ class civil_time {
// Binary arithmetic operators.
friend CONSTEXPR_F civil_time operator+(civil_time a, diff_t n) noexcept {
- return civil_time(step(T{}, a.f_, n));
+ return civil_time(step(T{}, a.f_, n));
}
friend CONSTEXPR_F civil_time operator+(diff_t n, civil_time a) noexcept {
- return a + n;
+ return a + n;
}
friend CONSTEXPR_F civil_time operator-(civil_time a, diff_t n) noexcept {
- return n != (std::numeric_limits<diff_t>::min)()
- ? civil_time(step(T{}, a.f_, -n))
- : civil_time(step(T{}, step(T{}, a.f_, -(n + 1)), 1));
+ return n != (std::numeric_limits<diff_t>::min)()
+ ? civil_time(step(T{}, a.f_, -n))
+ : civil_time(step(T{}, step(T{}, a.f_, -(n + 1)), 1));
}
friend CONSTEXPR_F diff_t operator-(civil_time lhs, civil_time rhs) noexcept {
return difference(T{}, lhs.f_, rhs.f_);
diff --git a/contrib/restricted/abseil-cpp/absl/time/internal/cctz/src/time_zone_libc.cc b/contrib/restricted/abseil-cpp/absl/time/internal/cctz/src/time_zone_libc.cc
index 887dd097c6..fdd6ab25fd 100644
--- a/contrib/restricted/abseil-cpp/absl/time/internal/cctz/src/time_zone_libc.cc
+++ b/contrib/restricted/abseil-cpp/absl/time/internal/cctz/src/time_zone_libc.cc
@@ -27,12 +27,12 @@
#include "absl/time/internal/cctz/include/cctz/civil_time.h"
#include "absl/time/internal/cctz/include/cctz/time_zone.h"
-#if defined(_AIX)
-extern "C" {
-extern long altzone;
-}
-#endif
-
+#if defined(_AIX)
+extern "C" {
+extern long altzone;
+}
+#endif
+
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace time_internal {
@@ -50,7 +50,7 @@ auto tm_zone(const std::tm& tm) -> decltype(_tzname[0]) {
const bool is_dst = tm.tm_isdst > 0;
return _tzname[is_dst];
}
-#elif defined(__sun) || defined(_AIX)
+#elif defined(__sun) || defined(_AIX)
// Uses the globals: 'timezone', 'altzone' and 'tzname'.
auto tm_gmtoff(const std::tm& tm) -> decltype(timezone) {
const bool is_dst = tm.tm_isdst > 0;
diff --git a/contrib/restricted/abseil-cpp/absl/time/internal/cctz/src/tzfile.h b/contrib/restricted/abseil-cpp/absl/time/internal/cctz/src/tzfile.h
index 31e8598257..4c49ecf1ca 100644
--- a/contrib/restricted/abseil-cpp/absl/time/internal/cctz/src/tzfile.h
+++ b/contrib/restricted/abseil-cpp/absl/time/internal/cctz/src/tzfile.h
@@ -108,15 +108,15 @@ struct tzhead {
#ifndef TZ_MAX_TYPES
/* This must be at least 17 for Europe/Samara and Europe/Vilnius. */
#define TZ_MAX_TYPES 256 /* Limited by what (unsigned char)'s can hold */
-#endif /* !defined TZ_MAX_TYPES */
+#endif /* !defined TZ_MAX_TYPES */
#ifndef TZ_MAX_CHARS
#define TZ_MAX_CHARS 50 /* Maximum number of abbreviation characters */
- /* (limited by what unsigned chars can hold) */
-#endif /* !defined TZ_MAX_CHARS */
+ /* (limited by what unsigned chars can hold) */
+#endif /* !defined TZ_MAX_CHARS */
#ifndef TZ_MAX_LEAPS
#define TZ_MAX_LEAPS 50 /* Maximum number of leap second corrections */
-#endif /* !defined TZ_MAX_LEAPS */
+#endif /* !defined TZ_MAX_LEAPS */
#endif /* !defined TZFILE_H */
diff --git a/contrib/restricted/abseil-cpp/absl/time/time.h b/contrib/restricted/abseil-cpp/absl/time/time.h
index 5abd815a79..1acb83aed0 100644
--- a/contrib/restricted/abseil-cpp/absl/time/time.h
+++ b/contrib/restricted/abseil-cpp/absl/time/time.h
@@ -488,12 +488,12 @@ Duration Hours(T n) {
//
// absl::Duration d = absl::Milliseconds(1500);
// int64_t isec = absl::ToInt64Seconds(d); // isec == 1
-ABSL_ATTRIBUTE_PURE_FUNCTION int64_t ToInt64Nanoseconds(Duration d);
-ABSL_ATTRIBUTE_PURE_FUNCTION int64_t ToInt64Microseconds(Duration d);
-ABSL_ATTRIBUTE_PURE_FUNCTION int64_t ToInt64Milliseconds(Duration d);
-ABSL_ATTRIBUTE_PURE_FUNCTION int64_t ToInt64Seconds(Duration d);
-ABSL_ATTRIBUTE_PURE_FUNCTION int64_t ToInt64Minutes(Duration d);
-ABSL_ATTRIBUTE_PURE_FUNCTION int64_t ToInt64Hours(Duration d);
+ABSL_ATTRIBUTE_PURE_FUNCTION int64_t ToInt64Nanoseconds(Duration d);
+ABSL_ATTRIBUTE_PURE_FUNCTION int64_t ToInt64Microseconds(Duration d);
+ABSL_ATTRIBUTE_PURE_FUNCTION int64_t ToInt64Milliseconds(Duration d);
+ABSL_ATTRIBUTE_PURE_FUNCTION int64_t ToInt64Seconds(Duration d);
+ABSL_ATTRIBUTE_PURE_FUNCTION int64_t ToInt64Minutes(Duration d);
+ABSL_ATTRIBUTE_PURE_FUNCTION int64_t ToInt64Hours(Duration d);
// ToDoubleNanoSeconds()
// ToDoubleMicroseconds()
@@ -510,12 +510,12 @@ ABSL_ATTRIBUTE_PURE_FUNCTION int64_t ToInt64Hours(Duration d);
//
// absl::Duration d = absl::Milliseconds(1500);
// double dsec = absl::ToDoubleSeconds(d); // dsec == 1.5
-ABSL_ATTRIBUTE_PURE_FUNCTION double ToDoubleNanoseconds(Duration d);
-ABSL_ATTRIBUTE_PURE_FUNCTION double ToDoubleMicroseconds(Duration d);
-ABSL_ATTRIBUTE_PURE_FUNCTION double ToDoubleMilliseconds(Duration d);
-ABSL_ATTRIBUTE_PURE_FUNCTION double ToDoubleSeconds(Duration d);
-ABSL_ATTRIBUTE_PURE_FUNCTION double ToDoubleMinutes(Duration d);
-ABSL_ATTRIBUTE_PURE_FUNCTION double ToDoubleHours(Duration d);
+ABSL_ATTRIBUTE_PURE_FUNCTION double ToDoubleNanoseconds(Duration d);
+ABSL_ATTRIBUTE_PURE_FUNCTION double ToDoubleMicroseconds(Duration d);
+ABSL_ATTRIBUTE_PURE_FUNCTION double ToDoubleMilliseconds(Duration d);
+ABSL_ATTRIBUTE_PURE_FUNCTION double ToDoubleSeconds(Duration d);
+ABSL_ATTRIBUTE_PURE_FUNCTION double ToDoubleMinutes(Duration d);
+ABSL_ATTRIBUTE_PURE_FUNCTION double ToDoubleHours(Duration d);
// FromChrono()
//
@@ -1230,15 +1230,15 @@ inline Time FromDateTime(int64_t year, int mon, int day, int hour,
//
// Converts the `tm_year`, `tm_mon`, `tm_mday`, `tm_hour`, `tm_min`, and
// `tm_sec` fields to an `absl::Time` using the given time zone. See ctime(3)
-// for a description of the expected values of the tm fields. If the civil time
-// is unique (see `absl::TimeZone::At(absl::CivilSecond)` above), the matching
-// time instant is returned. Otherwise, the `tm_isdst` field is consulted to
-// choose between the possible results. For a repeated civil time, `tm_isdst !=
-// 0` returns the matching DST instant, while `tm_isdst == 0` returns the
-// matching non-DST instant. For a skipped civil time there is no matching
-// instant, so `tm_isdst != 0` returns the DST instant, and `tm_isdst == 0`
-// returns the non-DST instant, that would have matched if the transition never
-// happened.
+// for a description of the expected values of the tm fields. If the civil time
+// is unique (see `absl::TimeZone::At(absl::CivilSecond)` above), the matching
+// time instant is returned. Otherwise, the `tm_isdst` field is consulted to
+// choose between the possible results. For a repeated civil time, `tm_isdst !=
+// 0` returns the matching DST instant, while `tm_isdst == 0` returns the
+// matching non-DST instant. For a skipped civil time there is no matching
+// instant, so `tm_isdst != 0` returns the DST instant, and `tm_isdst == 0`
+// returns the non-DST instant, that would have matched if the transition never
+// happened.
Time FromTM(const struct tm& tm, TimeZone tz);
// ToTM()
diff --git a/contrib/restricted/abseil-cpp/absl/types/variant.h b/contrib/restricted/abseil-cpp/absl/types/variant.h
index ac93464bf8..5284dcb166 100644
--- a/contrib/restricted/abseil-cpp/absl/types/variant.h
+++ b/contrib/restricted/abseil-cpp/absl/types/variant.h
@@ -604,10 +604,10 @@ class variant<T0, Tn...> : private variant_internal::VariantBase<T0, Tn...> {
// emplace() Functions
- // Constructs a value of the given alternative type T within the variant. The
- // existing value of the variant is destroyed first (provided that
- // `absl::valueless_by_exception()` is false). Requires that T is unambiguous
- // in the variant.
+ // Constructs a value of the given alternative type T within the variant. The
+ // existing value of the variant is destroyed first (provided that
+ // `absl::valueless_by_exception()` is false). Requires that T is unambiguous
+ // in the variant.
//
// Example:
//
@@ -627,9 +627,9 @@ class variant<T0, Tn...> : private variant_internal::VariantBase<T0, Tn...> {
}
// Constructs a value of the given alternative type T within the variant using
- // an initializer list. The existing value of the variant is destroyed first
- // (provided that `absl::valueless_by_exception()` is false). Requires that T
- // is unambiguous in the variant.
+ // an initializer list. The existing value of the variant is destroyed first
+ // (provided that `absl::valueless_by_exception()` is false). Requires that T
+ // is unambiguous in the variant.
//
// Example:
//
@@ -648,7 +648,7 @@ class variant<T0, Tn...> : private variant_internal::VariantBase<T0, Tn...> {
}
// Destroys the current value of the variant (provided that
- // `absl::valueless_by_exception()` is false) and constructs a new value at
+ // `absl::valueless_by_exception()` is false) and constructs a new value at
// the given index.
//
// Example:
@@ -667,7 +667,7 @@ class variant<T0, Tn...> : private variant_internal::VariantBase<T0, Tn...> {
}
// Destroys the current value of the variant (provided that
- // `absl::valueless_by_exception()` is false) and constructs a new value at
+ // `absl::valueless_by_exception()` is false) and constructs a new value at
// the given index using an initializer list and the provided arguments.
//
// Example:
diff --git a/contrib/restricted/abseil-cpp/ya.make b/contrib/restricted/abseil-cpp/ya.make
index f69125bf2d..4bf2d79c59 100644
--- a/contrib/restricted/abseil-cpp/ya.make
+++ b/contrib/restricted/abseil-cpp/ya.make
@@ -46,7 +46,7 @@ PEERDIR(
contrib/restricted/abseil-cpp/absl/flags/usage_config
contrib/restricted/abseil-cpp/absl/functional
contrib/restricted/abseil-cpp/absl/hash
- contrib/restricted/abseil-cpp/absl/hash/internal
+ contrib/restricted/abseil-cpp/absl/hash/internal
contrib/restricted/abseil-cpp/absl/memory
contrib/restricted/abseil-cpp/absl/meta
contrib/restricted/abseil-cpp/absl/numeric
@@ -127,7 +127,7 @@ RECURSE(
absl/flags/usage_config
absl/functional
absl/hash
- absl/hash/internal
+ absl/hash/internal
absl/memory
absl/meta
absl/numeric
diff --git a/contrib/restricted/ya.make b/contrib/restricted/ya.make
index b52a206733..d0a8b759fa 100644
--- a/contrib/restricted/ya.make
+++ b/contrib/restricted/ya.make
@@ -34,7 +34,7 @@ RECURSE(
libffi
libiscsi
libntirpc
- librseq
+ librseq
libsigcxx
libsoup
libtorrent