aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/restricted/abseil-cpp/absl/random/internal/fast_uniform_bits.h
blob: 425aaf7d830c0370d2943ea5db4ea6c78b185025 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//      https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#ifndef ABSL_RANDOM_INTERNAL_FAST_UNIFORM_BITS_H_
#define ABSL_RANDOM_INTERNAL_FAST_UNIFORM_BITS_H_

#include <cstddef>
#include <cstdint>
#include <limits>
#include <type_traits>

#include "absl/base/config.h"
#include "absl/meta/type_traits.h"

namespace absl {
ABSL_NAMESPACE_BEGIN
namespace random_internal {
// Returns true if the input value is zero or a power of two. Useful for
// determining if the range of output values in a URBG
template <typename UIntType>
constexpr bool IsPowerOfTwoOrZero(UIntType n) {
  return (n == 0) || ((n & (n - 1)) == 0);
}

// Computes the length of the range of values producible by the URBG, or returns
// zero if that would encompass the entire range of representable values in
// URBG::result_type.
template <typename URBG>
constexpr typename URBG::result_type RangeSize() {
  using result_type = typename URBG::result_type;
  static_assert((URBG::max)() != (URBG::min)(), "URBG range cannot be 0.");
  return ((URBG::max)() == (std::numeric_limits<result_type>::max)() &&
          (URBG::min)() == std::numeric_limits<result_type>::lowest())
             ? result_type{0}
             : ((URBG::max)() - (URBG::min)() + result_type{1});
}

// Computes the floor of the log. (i.e., std::floor(std::log2(N));
template <typename UIntType>
constexpr UIntType IntegerLog2(UIntType n) {
  return (n <= 1) ? 0 : 1 + IntegerLog2(n >> 1);
}

// Returns the number of bits of randomness returned through
// `PowerOfTwoVariate(urbg)`.
template <typename URBG>
constexpr size_t NumBits() {
  return RangeSize<URBG>() == 0
             ? std::numeric_limits<typename URBG::result_type>::digits
             : IntegerLog2(RangeSize<URBG>());
}

// Given a shift value `n`, constructs a mask with exactly the low `n` bits set.
// If `n == 0`, all bits are set.
template <typename UIntType>
constexpr UIntType MaskFromShift(size_t n) {
  return ((n % std::numeric_limits<UIntType>::digits) == 0)
             ? ~UIntType{0}
             : (UIntType{1} << n) - UIntType{1};
}

// Tags used to dispatch FastUniformBits::generate to the simple or more complex
// entropy extraction algorithm.
struct SimplifiedLoopTag {};
struct RejectionLoopTag {};

// FastUniformBits implements a fast path to acquire uniform independent bits
// from a type which conforms to the [rand.req.urbg] concept.
// Parameterized by:
//  `UIntType`: the result (output) type
//
// The std::independent_bits_engine [rand.adapt.ibits] adaptor can be
// instantiated from an existing generator through a copy or a move. It does
// not, however, facilitate the production of pseudorandom bits from an un-owned
// generator that will outlive the std::independent_bits_engine instance.
template <typename UIntType = uint64_t>
class FastUniformBits {
 public:
  using result_type = UIntType;

  static constexpr result_type(min)() { return 0; }
  static constexpr result_type(max)() {
    return (std::numeric_limits<result_type>::max)();
  }

  template <typename URBG>
  result_type operator()(URBG& g);  // NOLINT(runtime/references)

 private:
  static_assert(std::is_unsigned<UIntType>::value,
                "Class-template FastUniformBits<> must be parameterized using "
                "an unsigned type.");

  // Generate() generates a random value, dispatched on whether
  // the underlying URBG must use rejection sampling to generate a value,
  // or whether a simplified loop will suffice.
  template <typename URBG>
  result_type Generate(URBG& g,  // NOLINT(runtime/references)
                       SimplifiedLoopTag);

  template <typename URBG>
  result_type Generate(URBG& g,  // NOLINT(runtime/references)
                       RejectionLoopTag);
};

template <typename UIntType>
template <typename URBG>
typename FastUniformBits<UIntType>::result_type
FastUniformBits<UIntType>::operator()(URBG& g) {  // NOLINT(runtime/references)
  // kRangeMask is the mask used when sampling variates from the URBG when the
  // width of the URBG range is not a power of 2.
  // Y = (2 ^ kRange) - 1
  static_assert((URBG::max)() > (URBG::min)(),
                "URBG::max and URBG::min may not be equal.");

  using tag = absl::conditional_t<IsPowerOfTwoOrZero(RangeSize<URBG>()),
                                  SimplifiedLoopTag, RejectionLoopTag>;
  return Generate(g, tag{});
}

template <typename UIntType>
template <typename URBG>
typename FastUniformBits<UIntType>::result_type
FastUniformBits<UIntType>::Generate(URBG& g,  // NOLINT(runtime/references)
                                    SimplifiedLoopTag) {
  // The simplified version of FastUniformBits works only on URBGs that have
  // a range that is a power of 2. In this case we simply loop and shift without
  // attempting to balance the bits across calls.
  static_assert(IsPowerOfTwoOrZero(RangeSize<URBG>()),
                "incorrect Generate tag for URBG instance");

  static constexpr size_t kResultBits =
      std::numeric_limits<result_type>::digits;
  static constexpr size_t kUrbgBits = NumBits<URBG>();
  static constexpr size_t kIters =
      (kResultBits / kUrbgBits) + (kResultBits % kUrbgBits != 0);
  static constexpr size_t kShift = (kIters == 1) ? 0 : kUrbgBits;
  static constexpr auto kMin = (URBG::min)();

  result_type r = static_cast<result_type>(g() - kMin);
  for (size_t n = 1; n < kIters; ++n) {
    r = (r << kShift) + static_cast<result_type>(g() - kMin);
  }
  return r;
}

template <typename UIntType>
template <typename URBG>
typename FastUniformBits<UIntType>::result_type
FastUniformBits<UIntType>::Generate(URBG& g,  // NOLINT(runtime/references)
                                    RejectionLoopTag) {
  static_assert(!IsPowerOfTwoOrZero(RangeSize<URBG>()),
                "incorrect Generate tag for URBG instance");
  using urbg_result_type = typename URBG::result_type;

  // See [rand.adapt.ibits] for more details on the constants calculated below.
  //
  // It is preferable to use roughly the same number of bits from each generator
  // call, however this is only possible when the number of bits provided by the
  // URBG is a divisor of the number of bits in `result_type`. In all other
  // cases, the number of bits used cannot always be the same, but it can be
  // guaranteed to be off by at most 1. Thus we run two loops, one with a
  // smaller bit-width size (`kSmallWidth`) and one with a larger width size
  // (satisfying `kLargeWidth == kSmallWidth + 1`). The loops are run
  // `kSmallIters` and `kLargeIters` times respectively such
  // that
  //
  //    `kResultBits == kSmallIters * kSmallBits
  //                    + kLargeIters * kLargeBits`
  //
  // where `kResultBits` is the total number of bits in `result_type`.
  //
  static constexpr size_t kResultBits =
      std::numeric_limits<result_type>::digits;                      // w
  static constexpr urbg_result_type kUrbgRange = RangeSize<URBG>();  // R
  static constexpr size_t kUrbgBits = NumBits<URBG>();               // m

  // compute the initial estimate of the bits used.
  // [rand.adapt.ibits] 2 (c)
  static constexpr size_t kA =  // ceil(w/m)
      (kResultBits / kUrbgBits) + ((kResultBits % kUrbgBits) != 0);  // n'

  static constexpr size_t kABits = kResultBits / kA;  // w0'
  static constexpr urbg_result_type kARejection =
      ((kUrbgRange >> kABits) << kABits);  // y0'

  // refine the selection to reduce the rejection frequency.
  static constexpr size_t kTotalIters =
      ((kUrbgRange - kARejection) <= (kARejection / kA)) ? kA : (kA + 1);  // n

  // [rand.adapt.ibits] 2 (b)
  static constexpr size_t kSmallIters =
      kTotalIters - (kResultBits % kTotalIters);                   // n0
  static constexpr size_t kSmallBits = kResultBits / kTotalIters;  // w0
  static constexpr urbg_result_type kSmallRejection =
      ((kUrbgRange >> kSmallBits) << kSmallBits);  // y0

  static constexpr size_t kLargeBits = kSmallBits + 1;  // w0+1
  static constexpr urbg_result_type kLargeRejection =
      ((kUrbgRange >> kLargeBits) << kLargeBits);  // y1

  //
  // Because `kLargeBits == kSmallBits + 1`, it follows that
  //
  //     `kResultBits == kSmallIters * kSmallBits + kLargeIters`
  //
  // and therefore
  //
  //     `kLargeIters == kTotalWidth % kSmallWidth`
  //
  // Intuitively, each iteration with the large width accounts for one unit
  // of the remainder when `kTotalWidth` is divided by `kSmallWidth`. As
  // mentioned above, if the URBG width is a divisor of `kTotalWidth`, then
  // there would be no need for any large iterations (i.e., one loop would
  // suffice), and indeed, in this case, `kLargeIters` would be zero.
  static_assert(kResultBits == kSmallIters * kSmallBits +
                                   (kTotalIters - kSmallIters) * kLargeBits,
                "Error in looping constant calculations.");

  // The small shift is essentially small bits, but due to the potential
  // of generating a smaller result_type from a larger urbg type, the actual
  // shift might be 0.
  static constexpr size_t kSmallShift = kSmallBits % kResultBits;
  static constexpr auto kSmallMask =
      MaskFromShift<urbg_result_type>(kSmallShift);
  static constexpr size_t kLargeShift = kLargeBits % kResultBits;
  static constexpr auto kLargeMask =
      MaskFromShift<urbg_result_type>(kLargeShift);

  static constexpr auto kMin = (URBG::min)();

  result_type s = 0;
  for (size_t n = 0; n < kSmallIters; ++n) {
    urbg_result_type v;
    do {
      v = g() - kMin;
    } while (v >= kSmallRejection);

    s = (s << kSmallShift) + static_cast<result_type>(v & kSmallMask);
  }

  for (size_t n = kSmallIters; n < kTotalIters; ++n) {
    urbg_result_type v;
    do {
      v = g() - kMin;
    } while (v >= kLargeRejection);

    s = (s << kLargeShift) + static_cast<result_type>(v & kLargeMask);
  }
  return s;
}

}  // namespace random_internal
ABSL_NAMESPACE_END
}  // namespace absl

#endif  // ABSL_RANDOM_INTERNAL_FAST_UNIFORM_BITS_H_