diff options
author | thegeorg <thegeorg@yandex-team.com> | 2023-03-25 20:23:17 +0300 |
---|---|---|
committer | thegeorg <thegeorg@yandex-team.com> | 2023-03-25 20:23:17 +0300 |
commit | a50a4399c2600b05a086acdca3ba56c957d62196 (patch) | |
tree | 2cf3f6cc37ccc6bd19c33a928e07dd6c083cea72 /contrib/restricted/abseil-cpp-tstring/y_absl/random/internal | |
parent | 76f3ccf647d9cff0e38a7989dc89480854107b78 (diff) | |
download | ydb-a50a4399c2600b05a086acdca3ba56c957d62196.tar.gz |
Update contrib/restricted/abseil-cpp-tstring to 20230125.1
Diffstat (limited to 'contrib/restricted/abseil-cpp-tstring/y_absl/random/internal')
5 files changed, 16 insertions, 36 deletions
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/fast_uniform_bits.h b/contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/fast_uniform_bits.h index f4ae282010..9827d1899e 100644 --- a/contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/fast_uniform_bits.h +++ b/contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/fast_uniform_bits.h @@ -151,7 +151,8 @@ FastUniformBits<UIntType>::Generate(URBG& g, // NOLINT(runtime/references) result_type r = static_cast<result_type>(g() - kMin); for (size_t n = 1; n < kIters; ++n) { - r = (r << kShift) + static_cast<result_type>(g() - kMin); + r = static_cast<result_type>(r << kShift) + + static_cast<result_type>(g() - kMin); } return r; } diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/nonsecure_base.h b/contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/nonsecure_base.h index 593cd880c7..920dfe73b8 100644 --- a/contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/nonsecure_base.h +++ b/contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/nonsecure_base.h @@ -44,7 +44,7 @@ class RandenPoolSeedSeq { // Generate random unsigned values directly into the buffer. template <typename Contiguous> void generate_impl(ContiguousTag, Contiguous begin, Contiguous end) { - const size_t n = std::distance(begin, end); + const size_t n = static_cast<size_t>(std::distance(begin, end)); auto* a = &(*begin); RandenPool<uint8_t>::Fill( y_absl::MakeSpan(reinterpret_cast<uint8_t*>(a), sizeof(*a) * n)); diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/pcg_engine.h b/contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/pcg_engine.h index 678af064de..a6d61bc21b 100644 --- a/contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/pcg_engine.h +++ b/contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/pcg_engine.h @@ -221,47 +221,26 @@ class pcg_engine { template <uint64_t kMultA, uint64_t kMultB, uint64_t kIncA, uint64_t kIncB> class pcg128_params { public: -#if Y_ABSL_HAVE_INTRINSIC_INT128 - using state_type = __uint128_t; - static inline constexpr state_type make_u128(uint64_t a, uint64_t b) { - return (static_cast<__uint128_t>(a) << 64) | b; - } -#else using state_type = y_absl::uint128; - static inline constexpr state_type make_u128(uint64_t a, uint64_t b) { - return y_absl::MakeUint128(a, b); - } -#endif - static inline constexpr state_type multiplier() { - return make_u128(kMultA, kMultB); + return y_absl::MakeUint128(kMultA, kMultB); } static inline constexpr state_type increment() { - return make_u128(kIncA, kIncB); + return y_absl::MakeUint128(kIncA, kIncB); } }; // Implementation of the PCG xsl_rr_128_64 128-bit mixing function, which // accepts an input of state_type and mixes it into an output of result_type. struct pcg_xsl_rr_128_64 { -#if Y_ABSL_HAVE_INTRINSIC_INT128 - using state_type = __uint128_t; -#else using state_type = y_absl::uint128; -#endif using result_type = uint64_t; inline uint64_t operator()(state_type state) { // This is equivalent to the xsl_rr_128_64 mixing function. -#if Y_ABSL_HAVE_INTRINSIC_INT128 uint64_t rotate = static_cast<uint64_t>(state >> 122u); state ^= state >> 64; uint64_t s = static_cast<uint64_t>(state); -#else - uint64_t h = Uint128High64(state); - uint64_t rotate = h >> 58u; - uint64_t s = Uint128Low64(state) ^ h; -#endif return rotr(s, static_cast<int>(rotate)); } }; diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/pool_urbg.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/pool_urbg.cc index 4468bc479b..7c4c18744f 100644 --- a/contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/pool_urbg.cc +++ b/contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/pool_urbg.cc @@ -131,7 +131,7 @@ void RandenPoolEntry::Fill(uint8_t* out, size_t bytes) { } // Number of pooled urbg entries. -static constexpr int kPoolSize = 8; +static constexpr size_t kPoolSize = 8; // Shared pool entries. static y_absl::once_flag pool_once; @@ -147,15 +147,15 @@ Y_ABSL_CACHELINE_ALIGNED static RandenPoolEntry* shared_pools[kPoolSize]; // on subsequent runs the order within the same program may be significantly // different. However, as other thread IDs are not assigned sequentially, // this is not expected to matter. -int GetPoolID() { +size_t GetPoolID() { static_assert(kPoolSize >= 1, "At least one urbg instance is required for PoolURBG"); - Y_ABSL_CONST_INIT static std::atomic<int64_t> sequence{0}; + Y_ABSL_CONST_INIT static std::atomic<uint64_t> sequence{0}; #ifdef Y_ABSL_HAVE_THREAD_LOCAL - static thread_local int my_pool_id = -1; - if (Y_ABSL_PREDICT_FALSE(my_pool_id < 0)) { + static thread_local size_t my_pool_id = kPoolSize; + if (Y_ABSL_PREDICT_FALSE(my_pool_id == kPoolSize)) { my_pool_id = (sequence++ % kPoolSize); } return my_pool_id; @@ -171,8 +171,8 @@ int GetPoolID() { // Store the value in the pthread_{get/set}specific. However an uninitialized // value is 0, so add +1 to distinguish from the null value. - intptr_t my_pool_id = - reinterpret_cast<intptr_t>(pthread_getspecific(tid_key)); + uintptr_t my_pool_id = + reinterpret_cast<uintptr_t>(pthread_getspecific(tid_key)); if (Y_ABSL_PREDICT_FALSE(my_pool_id == 0)) { // No allocated ID, allocate the next value, cache it, and return. my_pool_id = (sequence++ % kPoolSize) + 1; @@ -194,7 +194,7 @@ RandenPoolEntry* PoolAlignedAlloc() { // Not all the platforms that we build for have std::aligned_alloc, however // since we never free these objects, we can over allocate and munge the // pointers to the correct alignment. - intptr_t x = reinterpret_cast<intptr_t>( + uintptr_t x = reinterpret_cast<uintptr_t>( new char[sizeof(RandenPoolEntry) + kAlignment]); auto y = x % kAlignment; void* aligned = reinterpret_cast<void*>(y == 0 ? x : (x + kAlignment - y)); @@ -215,7 +215,7 @@ void InitPoolURBG() { y_absl::MakeSpan(seed_material))) { random_internal::ThrowSeedGenException(); } - for (int i = 0; i < kPoolSize; i++) { + for (size_t i = 0; i < kPoolSize; i++) { shared_pools[i] = PoolAlignedAlloc(); shared_pools[i]->Init( y_absl::MakeSpan(&seed_material[i * kSeedSize], kSeedSize)); diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/seed_material.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/seed_material.cc index 9268912827..19cea29649 100644 --- a/contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/seed_material.cc +++ b/contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/seed_material.cc @@ -173,12 +173,12 @@ bool ReadSeedMaterialFromDevURandom(y_absl::Span<uint32_t> values) { } while (success && buffer_size > 0) { - int bytes_read = read(dev_urandom, buffer, buffer_size); + ssize_t bytes_read = read(dev_urandom, buffer, buffer_size); int read_error = errno; success = (bytes_read > 0); if (success) { buffer += bytes_read; - buffer_size -= bytes_read; + buffer_size -= static_cast<size_t>(bytes_read); } else if (bytes_read == -1 && read_error == EINTR) { success = true; // Need to try again. } |