diff options
author | prime <prime@yandex-team.ru> | 2022-02-10 16:46:00 +0300 |
---|---|---|
committer | Daniil Cherednik <dcherednik@yandex-team.ru> | 2022-02-10 16:46:00 +0300 |
commit | 3695a7cd42b74a4987d8d5a8f2e2443556998943 (patch) | |
tree | ee79ee9294a61ee00e647684b3700d0a87e102a3 /contrib/restricted/abseil-cpp/absl/time | |
parent | 4d8b546b89b5afc08cf3667e176271c7ba935f33 (diff) | |
download | ydb-3695a7cd42b74a4987d8d5a8f2e2443556998943.tar.gz |
Restoring authorship annotation for <prime@yandex-team.ru>. Commit 1 of 2.
Diffstat (limited to 'contrib/restricted/abseil-cpp/absl/time')
6 files changed, 187 insertions, 187 deletions
diff --git a/contrib/restricted/abseil-cpp/absl/time/clock.cc b/contrib/restricted/abseil-cpp/absl/time/clock.cc index 7b204c4ee0..bce60d53d6 100644 --- a/contrib/restricted/abseil-cpp/absl/time/clock.cc +++ b/contrib/restricted/abseil-cpp/absl/time/clock.cc @@ -15,7 +15,7 @@ #include "absl/time/clock.h" #include "absl/base/attributes.h" -#include "absl/base/optimization.h" +#include "absl/base/optimization.h" #ifdef _WIN32 #include <windows.h> @@ -152,109 +152,109 @@ static_assert(((kMinNSBetweenSamples << (kScale + 1)) >> (kScale + 1)) == // data from a sample of the kernel's time value struct TimeSampleAtomic { - std::atomic<uint64_t> raw_ns{0}; // raw kernel time - std::atomic<uint64_t> base_ns{0}; // our estimate of time - std::atomic<uint64_t> base_cycles{0}; // cycle counter reading - std::atomic<uint64_t> nsscaled_per_cycle{0}; // cycle period + std::atomic<uint64_t> raw_ns{0}; // raw kernel time + std::atomic<uint64_t> base_ns{0}; // our estimate of time + std::atomic<uint64_t> base_cycles{0}; // cycle counter reading + std::atomic<uint64_t> nsscaled_per_cycle{0}; // cycle period // cycles before we'll sample again (a scaled reciprocal of the period, // to avoid a division on the fast path). - std::atomic<uint64_t> min_cycles_per_sample{0}; + std::atomic<uint64_t> min_cycles_per_sample{0}; }; // Same again, but with non-atomic types struct TimeSample { - uint64_t raw_ns = 0; // raw kernel time - uint64_t base_ns = 0; // our estimate of time - uint64_t base_cycles = 0; // cycle counter reading - uint64_t nsscaled_per_cycle = 0; // cycle period - uint64_t min_cycles_per_sample = 0; // approx cycles before next sample + uint64_t raw_ns = 0; // raw kernel time + uint64_t base_ns = 0; // our estimate of time + uint64_t base_cycles = 0; // cycle counter reading + uint64_t nsscaled_per_cycle = 0; // cycle period + uint64_t min_cycles_per_sample = 0; // approx cycles before next sample }; -struct ABSL_CACHELINE_ALIGNED TimeState { - std::atomic<uint64_t> seq{0}; - TimeSampleAtomic last_sample; // the last sample; under seq - - // The following counters are used only by the test code. - int64_t stats_initializations{0}; - int64_t stats_reinitializations{0}; - int64_t stats_calibrations{0}; - int64_t stats_slow_paths{0}; - int64_t stats_fast_slow_paths{0}; - - uint64_t last_now_cycles ABSL_GUARDED_BY(lock){0}; - - // Used by GetCurrentTimeNanosFromKernel(). - // We try to read clock values at about the same time as the kernel clock. - // This value gets adjusted up or down as estimate of how long that should - // take, so we can reject attempts that take unusually long. - std::atomic<uint64_t> approx_syscall_time_in_cycles{10 * 1000}; - // Number of times in a row we've seen a kernel time call take substantially - // less than approx_syscall_time_in_cycles. - std::atomic<uint32_t> kernel_time_seen_smaller{0}; - - // A reader-writer lock protecting the static locations below. - // See SeqAcquire() and SeqRelease() above. - absl::base_internal::SpinLock lock{absl::kConstInit, - base_internal::SCHEDULE_KERNEL_ONLY}; -}; -ABSL_CONST_INIT static TimeState time_state{}; - -// Return the time in ns as told by the kernel interface. Place in *cycleclock -// the value of the cycleclock at about the time of the syscall. -// This call represents the time base that this module synchronizes to. -// Ensures that *cycleclock does not step back by up to (1 << 16) from -// last_cycleclock, to discard small backward counter steps. (Larger steps are -// assumed to be complete resyncs, which shouldn't happen. If they do, a full -// reinitialization of the outer algorithm should occur.) -static int64_t GetCurrentTimeNanosFromKernel(uint64_t last_cycleclock, - uint64_t *cycleclock) - ABSL_EXCLUSIVE_LOCKS_REQUIRED(time_state.lock) { - uint64_t local_approx_syscall_time_in_cycles = // local copy - time_state.approx_syscall_time_in_cycles.load(std::memory_order_relaxed); - - int64_t current_time_nanos_from_system; - uint64_t before_cycles; - uint64_t after_cycles; - uint64_t elapsed_cycles; - int loops = 0; - do { - before_cycles = GET_CURRENT_TIME_NANOS_CYCLECLOCK_NOW(); - current_time_nanos_from_system = GET_CURRENT_TIME_NANOS_FROM_SYSTEM(); - after_cycles = GET_CURRENT_TIME_NANOS_CYCLECLOCK_NOW(); - // elapsed_cycles is unsigned, so is large on overflow - elapsed_cycles = after_cycles - before_cycles; - if (elapsed_cycles >= local_approx_syscall_time_in_cycles && - ++loops == 20) { // clock changed frequencies? Back off. - loops = 0; - if (local_approx_syscall_time_in_cycles < 1000 * 1000) { - local_approx_syscall_time_in_cycles = - (local_approx_syscall_time_in_cycles + 1) << 1; - } - time_state.approx_syscall_time_in_cycles.store( - local_approx_syscall_time_in_cycles, std::memory_order_relaxed); - } - } while (elapsed_cycles >= local_approx_syscall_time_in_cycles || - last_cycleclock - after_cycles < (static_cast<uint64_t>(1) << 16)); - - // Adjust approx_syscall_time_in_cycles to be within a factor of 2 - // of the typical time to execute one iteration of the loop above. - if ((local_approx_syscall_time_in_cycles >> 1) < elapsed_cycles) { - // measured time is no smaller than half current approximation - time_state.kernel_time_seen_smaller.store(0, std::memory_order_relaxed); - } else if (time_state.kernel_time_seen_smaller.fetch_add( - 1, std::memory_order_relaxed) >= 3) { - // smaller delays several times in a row; reduce approximation by 12.5% - const uint64_t new_approximation = - local_approx_syscall_time_in_cycles - - (local_approx_syscall_time_in_cycles >> 3); - time_state.approx_syscall_time_in_cycles.store(new_approximation, - std::memory_order_relaxed); - time_state.kernel_time_seen_smaller.store(0, std::memory_order_relaxed); - } - - *cycleclock = after_cycles; - return current_time_nanos_from_system; -} - +struct ABSL_CACHELINE_ALIGNED TimeState { + std::atomic<uint64_t> seq{0}; + TimeSampleAtomic last_sample; // the last sample; under seq + + // The following counters are used only by the test code. + int64_t stats_initializations{0}; + int64_t stats_reinitializations{0}; + int64_t stats_calibrations{0}; + int64_t stats_slow_paths{0}; + int64_t stats_fast_slow_paths{0}; + + uint64_t last_now_cycles ABSL_GUARDED_BY(lock){0}; + + // Used by GetCurrentTimeNanosFromKernel(). + // We try to read clock values at about the same time as the kernel clock. + // This value gets adjusted up or down as estimate of how long that should + // take, so we can reject attempts that take unusually long. + std::atomic<uint64_t> approx_syscall_time_in_cycles{10 * 1000}; + // Number of times in a row we've seen a kernel time call take substantially + // less than approx_syscall_time_in_cycles. + std::atomic<uint32_t> kernel_time_seen_smaller{0}; + + // A reader-writer lock protecting the static locations below. + // See SeqAcquire() and SeqRelease() above. + absl::base_internal::SpinLock lock{absl::kConstInit, + base_internal::SCHEDULE_KERNEL_ONLY}; +}; +ABSL_CONST_INIT static TimeState time_state{}; + +// Return the time in ns as told by the kernel interface. Place in *cycleclock +// the value of the cycleclock at about the time of the syscall. +// This call represents the time base that this module synchronizes to. +// Ensures that *cycleclock does not step back by up to (1 << 16) from +// last_cycleclock, to discard small backward counter steps. (Larger steps are +// assumed to be complete resyncs, which shouldn't happen. If they do, a full +// reinitialization of the outer algorithm should occur.) +static int64_t GetCurrentTimeNanosFromKernel(uint64_t last_cycleclock, + uint64_t *cycleclock) + ABSL_EXCLUSIVE_LOCKS_REQUIRED(time_state.lock) { + uint64_t local_approx_syscall_time_in_cycles = // local copy + time_state.approx_syscall_time_in_cycles.load(std::memory_order_relaxed); + + int64_t current_time_nanos_from_system; + uint64_t before_cycles; + uint64_t after_cycles; + uint64_t elapsed_cycles; + int loops = 0; + do { + before_cycles = GET_CURRENT_TIME_NANOS_CYCLECLOCK_NOW(); + current_time_nanos_from_system = GET_CURRENT_TIME_NANOS_FROM_SYSTEM(); + after_cycles = GET_CURRENT_TIME_NANOS_CYCLECLOCK_NOW(); + // elapsed_cycles is unsigned, so is large on overflow + elapsed_cycles = after_cycles - before_cycles; + if (elapsed_cycles >= local_approx_syscall_time_in_cycles && + ++loops == 20) { // clock changed frequencies? Back off. + loops = 0; + if (local_approx_syscall_time_in_cycles < 1000 * 1000) { + local_approx_syscall_time_in_cycles = + (local_approx_syscall_time_in_cycles + 1) << 1; + } + time_state.approx_syscall_time_in_cycles.store( + local_approx_syscall_time_in_cycles, std::memory_order_relaxed); + } + } while (elapsed_cycles >= local_approx_syscall_time_in_cycles || + last_cycleclock - after_cycles < (static_cast<uint64_t>(1) << 16)); + + // Adjust approx_syscall_time_in_cycles to be within a factor of 2 + // of the typical time to execute one iteration of the loop above. + if ((local_approx_syscall_time_in_cycles >> 1) < elapsed_cycles) { + // measured time is no smaller than half current approximation + time_state.kernel_time_seen_smaller.store(0, std::memory_order_relaxed); + } else if (time_state.kernel_time_seen_smaller.fetch_add( + 1, std::memory_order_relaxed) >= 3) { + // smaller delays several times in a row; reduce approximation by 12.5% + const uint64_t new_approximation = + local_approx_syscall_time_in_cycles - + (local_approx_syscall_time_in_cycles >> 3); + time_state.approx_syscall_time_in_cycles.store(new_approximation, + std::memory_order_relaxed); + time_state.kernel_time_seen_smaller.store(0, std::memory_order_relaxed); + } + + *cycleclock = after_cycles; + return current_time_nanos_from_system; +} + static int64_t GetCurrentTimeNanosSlowPath() ABSL_ATTRIBUTE_COLD; // Read the contents of *atomic into *sample. @@ -321,15 +321,15 @@ int64_t GetCurrentTimeNanos() { // Acquire pairs with the barrier in SeqRelease - if this load sees that // store, the shared-data reads necessarily see that SeqRelease's updates // to the same shared data. - seq_read0 = time_state.seq.load(std::memory_order_acquire); + seq_read0 = time_state.seq.load(std::memory_order_acquire); - base_ns = time_state.last_sample.base_ns.load(std::memory_order_relaxed); - base_cycles = - time_state.last_sample.base_cycles.load(std::memory_order_relaxed); + base_ns = time_state.last_sample.base_ns.load(std::memory_order_relaxed); + base_cycles = + time_state.last_sample.base_cycles.load(std::memory_order_relaxed); nsscaled_per_cycle = - time_state.last_sample.nsscaled_per_cycle.load(std::memory_order_relaxed); - min_cycles_per_sample = time_state.last_sample.min_cycles_per_sample.load( - std::memory_order_relaxed); + time_state.last_sample.nsscaled_per_cycle.load(std::memory_order_relaxed); + min_cycles_per_sample = time_state.last_sample.min_cycles_per_sample.load( + std::memory_order_relaxed); // This acquire fence pairs with the release fence in SeqAcquire. Since it // is sequenced between reads of shared data and seq_read1, the reads of @@ -340,7 +340,7 @@ int64_t GetCurrentTimeNanos() { // shared-data writes are effectively release ordered. Therefore if our // shared-data reads see any of a particular update's shared-data writes, // seq_read1 is guaranteed to see that update's SeqAcquire. - seq_read1 = time_state.seq.load(std::memory_order_relaxed); + seq_read1 = time_state.seq.load(std::memory_order_relaxed); // Fast path. Return if min_cycles_per_sample has not yet elapsed since the // last sample, and we read a consistent sample. The fast path activates @@ -353,9 +353,9 @@ int64_t GetCurrentTimeNanos() { // last_sample was updated). This is harmless, because delta_cycles will wrap // and report a time much much bigger than min_cycles_per_sample. In that case // we will take the slow path. - uint64_t delta_cycles; + uint64_t delta_cycles; if (seq_read0 == seq_read1 && (seq_read0 & 1) == 0 && - (delta_cycles = now_cycles - base_cycles) < min_cycles_per_sample) { + (delta_cycles = now_cycles - base_cycles) < min_cycles_per_sample) { return base_ns + ((delta_cycles * nsscaled_per_cycle) >> kScale); } return GetCurrentTimeNanosSlowPath(); @@ -395,25 +395,25 @@ static uint64_t UpdateLastSample( // TODO(absl-team): Remove this attribute when our compiler is smart enough // to do the right thing. ABSL_ATTRIBUTE_NOINLINE -static int64_t GetCurrentTimeNanosSlowPath() - ABSL_LOCKS_EXCLUDED(time_state.lock) { +static int64_t GetCurrentTimeNanosSlowPath() + ABSL_LOCKS_EXCLUDED(time_state.lock) { // Serialize access to slow-path. Fast-path readers are not blocked yet, and // code below must not modify last_sample until the seqlock is acquired. - time_state.lock.Lock(); + time_state.lock.Lock(); // Sample the kernel time base. This is the definition of // "now" if we take the slow path. uint64_t now_cycles; - uint64_t now_ns = - GetCurrentTimeNanosFromKernel(time_state.last_now_cycles, &now_cycles); - time_state.last_now_cycles = now_cycles; + uint64_t now_ns = + GetCurrentTimeNanosFromKernel(time_state.last_now_cycles, &now_cycles); + time_state.last_now_cycles = now_cycles; uint64_t estimated_base_ns; // ---------- // Read the "last_sample" values again; this time holding the write lock. struct TimeSample sample; - ReadTimeSampleAtomic(&time_state.last_sample, &sample); + ReadTimeSampleAtomic(&time_state.last_sample, &sample); // ---------- // Try running the fast path again; another thread may have updated the @@ -424,13 +424,13 @@ static int64_t GetCurrentTimeNanosSlowPath() // so that blocked readers can make progress without blocking new readers. estimated_base_ns = sample.base_ns + ((delta_cycles * sample.nsscaled_per_cycle) >> kScale); - time_state.stats_fast_slow_paths++; + time_state.stats_fast_slow_paths++; } else { estimated_base_ns = UpdateLastSample(now_cycles, now_ns, delta_cycles, &sample); } - time_state.lock.Unlock(); + time_state.lock.Unlock(); return estimated_base_ns; } @@ -441,10 +441,10 @@ static int64_t GetCurrentTimeNanosSlowPath() static uint64_t UpdateLastSample(uint64_t now_cycles, uint64_t now_ns, uint64_t delta_cycles, const struct TimeSample *sample) - ABSL_EXCLUSIVE_LOCKS_REQUIRED(time_state.lock) { + ABSL_EXCLUSIVE_LOCKS_REQUIRED(time_state.lock) { uint64_t estimated_base_ns = now_ns; - uint64_t lock_value = - SeqAcquire(&time_state.seq); // acquire seqlock to block readers + uint64_t lock_value = + SeqAcquire(&time_state.seq); // acquire seqlock to block readers // The 5s in the next if-statement limits the time for which we will trust // the cycle counter and our last sample to give a reasonable result. @@ -454,16 +454,16 @@ static uint64_t UpdateLastSample(uint64_t now_cycles, uint64_t now_ns, sample->raw_ns + static_cast<uint64_t>(5) * 1000 * 1000 * 1000 < now_ns || now_ns < sample->raw_ns || now_cycles < sample->base_cycles) { // record this sample, and forget any previously known slope. - time_state.last_sample.raw_ns.store(now_ns, std::memory_order_relaxed); - time_state.last_sample.base_ns.store(estimated_base_ns, - std::memory_order_relaxed); - time_state.last_sample.base_cycles.store(now_cycles, - std::memory_order_relaxed); - time_state.last_sample.nsscaled_per_cycle.store(0, - std::memory_order_relaxed); - time_state.last_sample.min_cycles_per_sample.store( - 0, std::memory_order_relaxed); - time_state.stats_initializations++; + time_state.last_sample.raw_ns.store(now_ns, std::memory_order_relaxed); + time_state.last_sample.base_ns.store(estimated_base_ns, + std::memory_order_relaxed); + time_state.last_sample.base_cycles.store(now_cycles, + std::memory_order_relaxed); + time_state.last_sample.nsscaled_per_cycle.store(0, + std::memory_order_relaxed); + time_state.last_sample.min_cycles_per_sample.store( + 0, std::memory_order_relaxed); + time_state.stats_initializations++; } else if (sample->raw_ns + 500 * 1000 * 1000 < now_ns && sample->base_cycles + 50 < now_cycles) { // Enough time has passed to compute the cycle time. @@ -506,32 +506,32 @@ static uint64_t UpdateLastSample(uint64_t now_cycles, uint64_t now_ns, if (new_nsscaled_per_cycle != 0 && diff_ns < 100 * 1000 * 1000 && -diff_ns < 100 * 1000 * 1000) { // record the cycle time measurement - time_state.last_sample.nsscaled_per_cycle.store( + time_state.last_sample.nsscaled_per_cycle.store( new_nsscaled_per_cycle, std::memory_order_relaxed); uint64_t new_min_cycles_per_sample = SafeDivideAndScale(kMinNSBetweenSamples, new_nsscaled_per_cycle); - time_state.last_sample.min_cycles_per_sample.store( + time_state.last_sample.min_cycles_per_sample.store( new_min_cycles_per_sample, std::memory_order_relaxed); - time_state.stats_calibrations++; + time_state.stats_calibrations++; } else { // something went wrong; forget the slope - time_state.last_sample.nsscaled_per_cycle.store( - 0, std::memory_order_relaxed); - time_state.last_sample.min_cycles_per_sample.store( - 0, std::memory_order_relaxed); + time_state.last_sample.nsscaled_per_cycle.store( + 0, std::memory_order_relaxed); + time_state.last_sample.min_cycles_per_sample.store( + 0, std::memory_order_relaxed); estimated_base_ns = now_ns; - time_state.stats_reinitializations++; + time_state.stats_reinitializations++; } - time_state.last_sample.raw_ns.store(now_ns, std::memory_order_relaxed); - time_state.last_sample.base_ns.store(estimated_base_ns, - std::memory_order_relaxed); - time_state.last_sample.base_cycles.store(now_cycles, - std::memory_order_relaxed); + time_state.last_sample.raw_ns.store(now_ns, std::memory_order_relaxed); + time_state.last_sample.base_ns.store(estimated_base_ns, + std::memory_order_relaxed); + time_state.last_sample.base_cycles.store(now_cycles, + std::memory_order_relaxed); } else { // have a sample, but no slope; waiting for enough time for a calibration - time_state.stats_slow_paths++; + time_state.stats_slow_paths++; } - SeqRelease(&time_state.seq, lock_value); // release the readers + SeqRelease(&time_state.seq, lock_value); // release the readers return estimated_base_ns; } @@ -573,8 +573,8 @@ ABSL_NAMESPACE_END extern "C" { -ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSleepFor)( - absl::Duration duration) { +ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSleepFor)( + absl::Duration duration) { while (duration > absl::ZeroDuration()) { absl::Duration to_sleep = std::min(duration, absl::MaxSleep()); absl::SleepOnce(to_sleep); diff --git a/contrib/restricted/abseil-cpp/absl/time/clock.h b/contrib/restricted/abseil-cpp/absl/time/clock.h index 5fe244d638..1cf616ff16 100644 --- a/contrib/restricted/abseil-cpp/absl/time/clock.h +++ b/contrib/restricted/abseil-cpp/absl/time/clock.h @@ -64,11 +64,11 @@ ABSL_NAMESPACE_END // By changing our extension points to be extern "C", we dodge this // check. extern "C" { -void ABSL_INTERNAL_C_SYMBOL(AbslInternalSleepFor)(absl::Duration duration); +void ABSL_INTERNAL_C_SYMBOL(AbslInternalSleepFor)(absl::Duration duration); } // extern "C" inline void absl::SleepFor(absl::Duration duration) { - ABSL_INTERNAL_C_SYMBOL(AbslInternalSleepFor)(duration); + ABSL_INTERNAL_C_SYMBOL(AbslInternalSleepFor)(duration); } #endif // ABSL_TIME_CLOCK_H_ diff --git a/contrib/restricted/abseil-cpp/absl/time/internal/cctz/include/cctz/civil_time_detail.h b/contrib/restricted/abseil-cpp/absl/time/internal/cctz/include/cctz/civil_time_detail.h index 8aadde57ca..a0be43ae1f 100644 --- a/contrib/restricted/abseil-cpp/absl/time/internal/cctz/include/cctz/civil_time_detail.h +++ b/contrib/restricted/abseil-cpp/absl/time/internal/cctz/include/cctz/civil_time_detail.h @@ -416,10 +416,10 @@ class civil_time { // Assigning arithmetic. CONSTEXPR_M civil_time& operator+=(diff_t n) noexcept { - return *this = *this + n; + return *this = *this + n; } CONSTEXPR_M civil_time& operator-=(diff_t n) noexcept { - return *this = *this - n; + return *this = *this - n; } CONSTEXPR_M civil_time& operator++() noexcept { return *this += 1; } CONSTEXPR_M civil_time operator++(int) noexcept { @@ -436,15 +436,15 @@ class civil_time { // Binary arithmetic operators. friend CONSTEXPR_F civil_time operator+(civil_time a, diff_t n) noexcept { - return civil_time(step(T{}, a.f_, n)); + return civil_time(step(T{}, a.f_, n)); } friend CONSTEXPR_F civil_time operator+(diff_t n, civil_time a) noexcept { - return a + n; + return a + n; } friend CONSTEXPR_F civil_time operator-(civil_time a, diff_t n) noexcept { - return n != (std::numeric_limits<diff_t>::min)() - ? civil_time(step(T{}, a.f_, -n)) - : civil_time(step(T{}, step(T{}, a.f_, -(n + 1)), 1)); + return n != (std::numeric_limits<diff_t>::min)() + ? civil_time(step(T{}, a.f_, -n)) + : civil_time(step(T{}, step(T{}, a.f_, -(n + 1)), 1)); } friend CONSTEXPR_F diff_t operator-(civil_time lhs, civil_time rhs) noexcept { return difference(T{}, lhs.f_, rhs.f_); diff --git a/contrib/restricted/abseil-cpp/absl/time/internal/cctz/src/time_zone_libc.cc b/contrib/restricted/abseil-cpp/absl/time/internal/cctz/src/time_zone_libc.cc index 887dd097c6..fdd6ab25fd 100644 --- a/contrib/restricted/abseil-cpp/absl/time/internal/cctz/src/time_zone_libc.cc +++ b/contrib/restricted/abseil-cpp/absl/time/internal/cctz/src/time_zone_libc.cc @@ -27,12 +27,12 @@ #include "absl/time/internal/cctz/include/cctz/civil_time.h" #include "absl/time/internal/cctz/include/cctz/time_zone.h" -#if defined(_AIX) -extern "C" { -extern long altzone; -} -#endif - +#if defined(_AIX) +extern "C" { +extern long altzone; +} +#endif + namespace absl { ABSL_NAMESPACE_BEGIN namespace time_internal { @@ -50,7 +50,7 @@ auto tm_zone(const std::tm& tm) -> decltype(_tzname[0]) { const bool is_dst = tm.tm_isdst > 0; return _tzname[is_dst]; } -#elif defined(__sun) || defined(_AIX) +#elif defined(__sun) || defined(_AIX) // Uses the globals: 'timezone', 'altzone' and 'tzname'. auto tm_gmtoff(const std::tm& tm) -> decltype(timezone) { const bool is_dst = tm.tm_isdst > 0; diff --git a/contrib/restricted/abseil-cpp/absl/time/internal/cctz/src/tzfile.h b/contrib/restricted/abseil-cpp/absl/time/internal/cctz/src/tzfile.h index 31e8598257..4c49ecf1ca 100644 --- a/contrib/restricted/abseil-cpp/absl/time/internal/cctz/src/tzfile.h +++ b/contrib/restricted/abseil-cpp/absl/time/internal/cctz/src/tzfile.h @@ -108,15 +108,15 @@ struct tzhead { #ifndef TZ_MAX_TYPES /* This must be at least 17 for Europe/Samara and Europe/Vilnius. */ #define TZ_MAX_TYPES 256 /* Limited by what (unsigned char)'s can hold */ -#endif /* !defined TZ_MAX_TYPES */ +#endif /* !defined TZ_MAX_TYPES */ #ifndef TZ_MAX_CHARS #define TZ_MAX_CHARS 50 /* Maximum number of abbreviation characters */ - /* (limited by what unsigned chars can hold) */ -#endif /* !defined TZ_MAX_CHARS */ + /* (limited by what unsigned chars can hold) */ +#endif /* !defined TZ_MAX_CHARS */ #ifndef TZ_MAX_LEAPS #define TZ_MAX_LEAPS 50 /* Maximum number of leap second corrections */ -#endif /* !defined TZ_MAX_LEAPS */ +#endif /* !defined TZ_MAX_LEAPS */ #endif /* !defined TZFILE_H */ diff --git a/contrib/restricted/abseil-cpp/absl/time/time.h b/contrib/restricted/abseil-cpp/absl/time/time.h index 5abd815a79..1acb83aed0 100644 --- a/contrib/restricted/abseil-cpp/absl/time/time.h +++ b/contrib/restricted/abseil-cpp/absl/time/time.h @@ -488,12 +488,12 @@ Duration Hours(T n) { // // absl::Duration d = absl::Milliseconds(1500); // int64_t isec = absl::ToInt64Seconds(d); // isec == 1 -ABSL_ATTRIBUTE_PURE_FUNCTION int64_t ToInt64Nanoseconds(Duration d); -ABSL_ATTRIBUTE_PURE_FUNCTION int64_t ToInt64Microseconds(Duration d); -ABSL_ATTRIBUTE_PURE_FUNCTION int64_t ToInt64Milliseconds(Duration d); -ABSL_ATTRIBUTE_PURE_FUNCTION int64_t ToInt64Seconds(Duration d); -ABSL_ATTRIBUTE_PURE_FUNCTION int64_t ToInt64Minutes(Duration d); -ABSL_ATTRIBUTE_PURE_FUNCTION int64_t ToInt64Hours(Duration d); +ABSL_ATTRIBUTE_PURE_FUNCTION int64_t ToInt64Nanoseconds(Duration d); +ABSL_ATTRIBUTE_PURE_FUNCTION int64_t ToInt64Microseconds(Duration d); +ABSL_ATTRIBUTE_PURE_FUNCTION int64_t ToInt64Milliseconds(Duration d); +ABSL_ATTRIBUTE_PURE_FUNCTION int64_t ToInt64Seconds(Duration d); +ABSL_ATTRIBUTE_PURE_FUNCTION int64_t ToInt64Minutes(Duration d); +ABSL_ATTRIBUTE_PURE_FUNCTION int64_t ToInt64Hours(Duration d); // ToDoubleNanoSeconds() // ToDoubleMicroseconds() @@ -510,12 +510,12 @@ ABSL_ATTRIBUTE_PURE_FUNCTION int64_t ToInt64Hours(Duration d); // // absl::Duration d = absl::Milliseconds(1500); // double dsec = absl::ToDoubleSeconds(d); // dsec == 1.5 -ABSL_ATTRIBUTE_PURE_FUNCTION double ToDoubleNanoseconds(Duration d); -ABSL_ATTRIBUTE_PURE_FUNCTION double ToDoubleMicroseconds(Duration d); -ABSL_ATTRIBUTE_PURE_FUNCTION double ToDoubleMilliseconds(Duration d); -ABSL_ATTRIBUTE_PURE_FUNCTION double ToDoubleSeconds(Duration d); -ABSL_ATTRIBUTE_PURE_FUNCTION double ToDoubleMinutes(Duration d); -ABSL_ATTRIBUTE_PURE_FUNCTION double ToDoubleHours(Duration d); +ABSL_ATTRIBUTE_PURE_FUNCTION double ToDoubleNanoseconds(Duration d); +ABSL_ATTRIBUTE_PURE_FUNCTION double ToDoubleMicroseconds(Duration d); +ABSL_ATTRIBUTE_PURE_FUNCTION double ToDoubleMilliseconds(Duration d); +ABSL_ATTRIBUTE_PURE_FUNCTION double ToDoubleSeconds(Duration d); +ABSL_ATTRIBUTE_PURE_FUNCTION double ToDoubleMinutes(Duration d); +ABSL_ATTRIBUTE_PURE_FUNCTION double ToDoubleHours(Duration d); // FromChrono() // @@ -1230,15 +1230,15 @@ inline Time FromDateTime(int64_t year, int mon, int day, int hour, // // Converts the `tm_year`, `tm_mon`, `tm_mday`, `tm_hour`, `tm_min`, and // `tm_sec` fields to an `absl::Time` using the given time zone. See ctime(3) -// for a description of the expected values of the tm fields. If the civil time -// is unique (see `absl::TimeZone::At(absl::CivilSecond)` above), the matching -// time instant is returned. Otherwise, the `tm_isdst` field is consulted to -// choose between the possible results. For a repeated civil time, `tm_isdst != -// 0` returns the matching DST instant, while `tm_isdst == 0` returns the -// matching non-DST instant. For a skipped civil time there is no matching -// instant, so `tm_isdst != 0` returns the DST instant, and `tm_isdst == 0` -// returns the non-DST instant, that would have matched if the transition never -// happened. +// for a description of the expected values of the tm fields. If the civil time +// is unique (see `absl::TimeZone::At(absl::CivilSecond)` above), the matching +// time instant is returned. Otherwise, the `tm_isdst` field is consulted to +// choose between the possible results. For a repeated civil time, `tm_isdst != +// 0` returns the matching DST instant, while `tm_isdst == 0` returns the +// matching non-DST instant. For a skipped civil time there is no matching +// instant, so `tm_isdst != 0` returns the DST instant, and `tm_isdst == 0` +// returns the non-DST instant, that would have matched if the transition never +// happened. Time FromTM(const struct tm& tm, TimeZone tz); // ToTM() |