1 // Copyright 2018-2020 Developers of the Rand project.
2 // Copyright 2017 The Rust Project Developers.
3 //
4 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
5 // https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
7 // option. This file may not be copied, modified, or distributed
8 // except according to those terms.
9
10 //! A distribution uniformly sampling numbers within a given range.
11 //!
12 //! [`Uniform`] is the standard distribution to sample uniformly from a range;
13 //! e.g. `Uniform::new_inclusive(1, 6)` can sample integers from 1 to 6, like a
14 //! standard die. [`Rng::gen_range`] supports any type supported by
15 //! [`Uniform`].
16 //!
17 //! This distribution is provided with support for several primitive types
18 //! (all integer and floating-point types) as well as [`std::time::Duration`],
19 //! and supports extension to user-defined types via a type-specific *back-end*
20 //! implementation.
21 //!
22 //! The types [`UniformInt`], [`UniformFloat`] and [`UniformDuration`] are the
23 //! back-ends supporting sampling from primitive integer and floating-point
24 //! ranges as well as from [`std::time::Duration`]; these types do not normally
25 //! need to be used directly (unless implementing a derived back-end).
26 //!
27 //! # Example usage
28 //!
29 //! ```
30 //! use rand::{Rng, thread_rng};
31 //! use rand::distributions::Uniform;
32 //!
33 //! let mut rng = thread_rng();
34 //! let side = Uniform::new(-10.0, 10.0);
35 //!
36 //! // sample between 1 and 10 points
37 //! for _ in 0..rng.gen_range(1..=10) {
38 //! // sample a point from the square with sides -10 - 10 in two dimensions
39 //! let (x, y) = (rng.sample(side), rng.sample(side));
40 //! println!("Point: {}, {}", x, y);
41 //! }
42 //! ```
43 //!
44 //! # Extending `Uniform` to support a custom type
45 //!
46 //! To extend [`Uniform`] to support your own types, write a back-end which
47 //! implements the [`UniformSampler`] trait, then implement the [`SampleUniform`]
48 //! helper trait to "register" your back-end. See the `MyF32` example below.
49 //!
50 //! At a minimum, the back-end needs to store any parameters needed for sampling
51 //! (e.g. the target range) and implement `new`, `new_inclusive` and `sample`.
52 //! Those methods should include an assert to check the range is valid (i.e.
53 //! `low < high`). The example below merely wraps another back-end.
54 //!
55 //! The `new`, `new_inclusive` and `sample_single` functions use arguments of
56 //! type SampleBorrow<X> in order to support passing in values by reference or
57 //! by value. In the implementation of these functions, you can choose to
58 //! simply use the reference returned by [`SampleBorrow::borrow`], or you can choose
59 //! to copy or clone the value, whatever is appropriate for your type.
60 //!
61 //! ```
62 //! use rand::prelude::*;
63 //! use rand::distributions::uniform::{Uniform, SampleUniform,
64 //! UniformSampler, UniformFloat, SampleBorrow};
65 //!
66 //! struct MyF32(f32);
67 //!
68 //! #[derive(Clone, Copy, Debug)]
69 //! struct UniformMyF32(UniformFloat<f32>);
70 //!
71 //! impl UniformSampler for UniformMyF32 {
72 //! type X = MyF32;
73 //! fn new<B1, B2>(low: B1, high: B2) -> Self
74 //! where B1: SampleBorrow<Self::X> + Sized,
75 //! B2: SampleBorrow<Self::X> + Sized
76 //! {
77 //! UniformMyF32(UniformFloat::<f32>::new(low.borrow().0, high.borrow().0))
78 //! }
79 //! fn new_inclusive<B1, B2>(low: B1, high: B2) -> Self
80 //! where B1: SampleBorrow<Self::X> + Sized,
81 //! B2: SampleBorrow<Self::X> + Sized
82 //! {
83 //! UniformSampler::new(low, high)
84 //! }
85 //! fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
86 //! MyF32(self.0.sample(rng))
87 //! }
88 //! }
89 //!
90 //! impl SampleUniform for MyF32 {
91 //! type Sampler = UniformMyF32;
92 //! }
93 //!
94 //! let (low, high) = (MyF32(17.0f32), MyF32(22.0f32));
95 //! let uniform = Uniform::new(low, high);
96 //! let x = uniform.sample(&mut thread_rng());
97 //! ```
98 //!
99 //! [`SampleUniform`]: crate::distributions::uniform::SampleUniform
100 //! [`UniformSampler`]: crate::distributions::uniform::UniformSampler
101 //! [`UniformInt`]: crate::distributions::uniform::UniformInt
102 //! [`UniformFloat`]: crate::distributions::uniform::UniformFloat
103 //! [`UniformDuration`]: crate::distributions::uniform::UniformDuration
104 //! [`SampleBorrow::borrow`]: crate::distributions::uniform::SampleBorrow::borrow
105
106 #[cfg(not(feature = "std"))] use core::time::Duration;
107 #[cfg(feature = "std")] use std::time::Duration;
108 use core::ops::{Range, RangeInclusive};
109
110 use crate::distributions::float::IntoFloat;
111 use crate::distributions::utils::{BoolAsSIMD, FloatAsSIMD, FloatSIMDUtils, WideningMultiply};
112 use crate::distributions::Distribution;
113 use crate::{Rng, RngCore};
114
115 #[cfg(not(feature = "std"))]
116 #[allow(unused_imports)] // rustc doesn't detect that this is actually used
117 use crate::distributions::utils::Float;
118
119 #[cfg(feature = "simd_support")] use packed_simd::*;
120
121 #[cfg(feature = "serde1")]
122 use serde::{Serialize, Deserialize};
123
124 /// Sample values uniformly between two bounds.
125 ///
126 /// [`Uniform::new`] and [`Uniform::new_inclusive`] construct a uniform
127 /// distribution sampling from the given range; these functions may do extra
128 /// work up front to make sampling of multiple values faster. If only one sample
129 /// from the range is required, [`Rng::gen_range`] can be more efficient.
130 ///
131 /// When sampling from a constant range, many calculations can happen at
132 /// compile-time and all methods should be fast; for floating-point ranges and
133 /// the full range of integer types this should have comparable performance to
134 /// the `Standard` distribution.
135 ///
136 /// Steps are taken to avoid bias which might be present in naive
137 /// implementations; for example `rng.gen::<u8>() % 170` samples from the range
138 /// `[0, 169]` but is twice as likely to select numbers less than 85 than other
139 /// values. Further, the implementations here give more weight to the high-bits
140 /// generated by the RNG than the low bits, since with some RNGs the low-bits
141 /// are of lower quality than the high bits.
142 ///
143 /// Implementations must sample in `[low, high)` range for
144 /// `Uniform::new(low, high)`, i.e., excluding `high`. In particular, care must
145 /// be taken to ensure that rounding never results values `< low` or `>= high`.
146 ///
147 /// # Example
148 ///
149 /// ```
150 /// use rand::distributions::{Distribution, Uniform};
151 ///
152 /// let between = Uniform::from(10..10000);
153 /// let mut rng = rand::thread_rng();
154 /// let mut sum = 0;
155 /// for _ in 0..1000 {
156 /// sum += between.sample(&mut rng);
157 /// }
158 /// println!("{}", sum);
159 /// ```
160 ///
161 /// For a single sample, [`Rng::gen_range`] may be prefered:
162 ///
163 /// ```
164 /// use rand::Rng;
165 ///
166 /// let mut rng = rand::thread_rng();
167 /// println!("{}", rng.gen_range(0..10));
168 /// ```
169 ///
170 /// [`new`]: Uniform::new
171 /// [`new_inclusive`]: Uniform::new_inclusive
172 /// [`Rng::gen_range`]: Rng::gen_range
173 #[derive(Clone, Copy, Debug)]
174 #[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
175 pub struct Uniform<X: SampleUniform>(X::Sampler);
176
177 impl<X: SampleUniform> Uniform<X> {
178 /// Create a new `Uniform` instance which samples uniformly from the half
179 /// open range `[low, high)` (excluding `high`). Panics if `low >= high`.
new<B1, B2>(low: B1, high: B2) -> Uniform<X> where B1: SampleBorrow<X> + Sized, B2: SampleBorrow<X> + Sized,180 pub fn new<B1, B2>(low: B1, high: B2) -> Uniform<X>
181 where
182 B1: SampleBorrow<X> + Sized,
183 B2: SampleBorrow<X> + Sized,
184 {
185 Uniform(X::Sampler::new(low, high))
186 }
187
188 /// Create a new `Uniform` instance which samples uniformly from the closed
189 /// range `[low, high]` (inclusive). Panics if `low > high`.
new_inclusive<B1, B2>(low: B1, high: B2) -> Uniform<X> where B1: SampleBorrow<X> + Sized, B2: SampleBorrow<X> + Sized,190 pub fn new_inclusive<B1, B2>(low: B1, high: B2) -> Uniform<X>
191 where
192 B1: SampleBorrow<X> + Sized,
193 B2: SampleBorrow<X> + Sized,
194 {
195 Uniform(X::Sampler::new_inclusive(low, high))
196 }
197 }
198
199 impl<X: SampleUniform> Distribution<X> for Uniform<X> {
sample<R: Rng + ?Sized>(&self, rng: &mut R) -> X200 fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> X {
201 self.0.sample(rng)
202 }
203 }
204
205 /// Helper trait for creating objects using the correct implementation of
206 /// [`UniformSampler`] for the sampling type.
207 ///
208 /// See the [module documentation] on how to implement [`Uniform`] range
209 /// sampling for a custom type.
210 ///
211 /// [module documentation]: crate::distributions::uniform
212 pub trait SampleUniform: Sized {
213 /// The `UniformSampler` implementation supporting type `X`.
214 type Sampler: UniformSampler<X = Self>;
215 }
216
217 /// Helper trait handling actual uniform sampling.
218 ///
219 /// See the [module documentation] on how to implement [`Uniform`] range
220 /// sampling for a custom type.
221 ///
222 /// Implementation of [`sample_single`] is optional, and is only useful when
223 /// the implementation can be faster than `Self::new(low, high).sample(rng)`.
224 ///
225 /// [module documentation]: crate::distributions::uniform
226 /// [`sample_single`]: UniformSampler::sample_single
227 pub trait UniformSampler: Sized {
228 /// The type sampled by this implementation.
229 type X;
230
231 /// Construct self, with inclusive lower bound and exclusive upper bound
232 /// `[low, high)`.
233 ///
234 /// Usually users should not call this directly but instead use
235 /// `Uniform::new`, which asserts that `low < high` before calling this.
new<B1, B2>(low: B1, high: B2) -> Self where B1: SampleBorrow<Self::X> + Sized, B2: SampleBorrow<Self::X> + Sized236 fn new<B1, B2>(low: B1, high: B2) -> Self
237 where
238 B1: SampleBorrow<Self::X> + Sized,
239 B2: SampleBorrow<Self::X> + Sized;
240
241 /// Construct self, with inclusive bounds `[low, high]`.
242 ///
243 /// Usually users should not call this directly but instead use
244 /// `Uniform::new_inclusive`, which asserts that `low <= high` before
245 /// calling this.
new_inclusive<B1, B2>(low: B1, high: B2) -> Self where B1: SampleBorrow<Self::X> + Sized, B2: SampleBorrow<Self::X> + Sized246 fn new_inclusive<B1, B2>(low: B1, high: B2) -> Self
247 where
248 B1: SampleBorrow<Self::X> + Sized,
249 B2: SampleBorrow<Self::X> + Sized;
250
251 /// Sample a value.
sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X252 fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X;
253
254 /// Sample a single value uniformly from a range with inclusive lower bound
255 /// and exclusive upper bound `[low, high)`.
256 ///
257 /// By default this is implemented using
258 /// `UniformSampler::new(low, high).sample(rng)`. However, for some types
259 /// more optimal implementations for single usage may be provided via this
260 /// method (which is the case for integers and floats).
261 /// Results may not be identical.
262 ///
263 /// Note that to use this method in a generic context, the type needs to be
264 /// retrieved via `SampleUniform::Sampler` as follows:
265 /// ```
266 /// use rand::{thread_rng, distributions::uniform::{SampleUniform, UniformSampler}};
267 /// # #[allow(unused)]
268 /// fn sample_from_range<T: SampleUniform>(lb: T, ub: T) -> T {
269 /// let mut rng = thread_rng();
270 /// <T as SampleUniform>::Sampler::sample_single(lb, ub, &mut rng)
271 /// }
272 /// ```
sample_single<R: Rng + ?Sized, B1, B2>(low: B1, high: B2, rng: &mut R) -> Self::X where B1: SampleBorrow<Self::X> + Sized, B2: SampleBorrow<Self::X> + Sized,273 fn sample_single<R: Rng + ?Sized, B1, B2>(low: B1, high: B2, rng: &mut R) -> Self::X
274 where
275 B1: SampleBorrow<Self::X> + Sized,
276 B2: SampleBorrow<Self::X> + Sized,
277 {
278 let uniform: Self = UniformSampler::new(low, high);
279 uniform.sample(rng)
280 }
281
282 /// Sample a single value uniformly from a range with inclusive lower bound
283 /// and inclusive upper bound `[low, high]`.
284 ///
285 /// By default this is implemented using
286 /// `UniformSampler::new_inclusive(low, high).sample(rng)`. However, for
287 /// some types more optimal implementations for single usage may be provided
288 /// via this method.
289 /// Results may not be identical.
sample_single_inclusive<R: Rng + ?Sized, B1, B2>(low: B1, high: B2, rng: &mut R) -> Self::X where B1: SampleBorrow<Self::X> + Sized, B2: SampleBorrow<Self::X> + Sized290 fn sample_single_inclusive<R: Rng + ?Sized, B1, B2>(low: B1, high: B2, rng: &mut R)
291 -> Self::X
292 where B1: SampleBorrow<Self::X> + Sized,
293 B2: SampleBorrow<Self::X> + Sized
294 {
295 let uniform: Self = UniformSampler::new_inclusive(low, high);
296 uniform.sample(rng)
297 }
298 }
299
300 impl<X: SampleUniform> From<Range<X>> for Uniform<X> {
from(r: ::core::ops::Range<X>) -> Uniform<X>301 fn from(r: ::core::ops::Range<X>) -> Uniform<X> {
302 Uniform::new(r.start, r.end)
303 }
304 }
305
306 impl<X: SampleUniform> From<RangeInclusive<X>> for Uniform<X> {
from(r: ::core::ops::RangeInclusive<X>) -> Uniform<X>307 fn from(r: ::core::ops::RangeInclusive<X>) -> Uniform<X> {
308 Uniform::new_inclusive(r.start(), r.end())
309 }
310 }
311
312
313 /// Helper trait similar to [`Borrow`] but implemented
314 /// only for SampleUniform and references to SampleUniform in
315 /// order to resolve ambiguity issues.
316 ///
317 /// [`Borrow`]: std::borrow::Borrow
318 pub trait SampleBorrow<Borrowed> {
319 /// Immutably borrows from an owned value. See [`Borrow::borrow`]
320 ///
321 /// [`Borrow::borrow`]: std::borrow::Borrow::borrow
borrow(&self) -> &Borrowed322 fn borrow(&self) -> &Borrowed;
323 }
324 impl<Borrowed> SampleBorrow<Borrowed> for Borrowed
325 where Borrowed: SampleUniform
326 {
327 #[inline(always)]
borrow(&self) -> &Borrowed328 fn borrow(&self) -> &Borrowed {
329 self
330 }
331 }
332 impl<'a, Borrowed> SampleBorrow<Borrowed> for &'a Borrowed
333 where Borrowed: SampleUniform
334 {
335 #[inline(always)]
borrow(&self) -> &Borrowed336 fn borrow(&self) -> &Borrowed {
337 *self
338 }
339 }
340
341 /// Range that supports generating a single sample efficiently.
342 ///
343 /// Any type implementing this trait can be used to specify the sampled range
344 /// for `Rng::gen_range`.
345 pub trait SampleRange<T> {
346 /// Generate a sample from the given range.
sample_single<R: RngCore + ?Sized>(self, rng: &mut R) -> T347 fn sample_single<R: RngCore + ?Sized>(self, rng: &mut R) -> T;
348
349 /// Check whether the range is empty.
is_empty(&self) -> bool350 fn is_empty(&self) -> bool;
351 }
352
353 impl<T: SampleUniform + PartialOrd> SampleRange<T> for Range<T> {
354 #[inline]
sample_single<R: RngCore + ?Sized>(self, rng: &mut R) -> T355 fn sample_single<R: RngCore + ?Sized>(self, rng: &mut R) -> T {
356 T::Sampler::sample_single(self.start, self.end, rng)
357 }
358
359 #[inline]
is_empty(&self) -> bool360 fn is_empty(&self) -> bool {
361 !(self.start < self.end)
362 }
363 }
364
365 impl<T: SampleUniform + PartialOrd> SampleRange<T> for RangeInclusive<T> {
366 #[inline]
sample_single<R: RngCore + ?Sized>(self, rng: &mut R) -> T367 fn sample_single<R: RngCore + ?Sized>(self, rng: &mut R) -> T {
368 T::Sampler::sample_single_inclusive(self.start(), self.end(), rng)
369 }
370
371 #[inline]
is_empty(&self) -> bool372 fn is_empty(&self) -> bool {
373 !(self.start() <= self.end())
374 }
375 }
376
377
378 ////////////////////////////////////////////////////////////////////////////////
379
380 // What follows are all back-ends.
381
382
383 /// The back-end implementing [`UniformSampler`] for integer types.
384 ///
385 /// Unless you are implementing [`UniformSampler`] for your own type, this type
386 /// should not be used directly, use [`Uniform`] instead.
387 ///
388 /// # Implementation notes
389 ///
390 /// For simplicity, we use the same generic struct `UniformInt<X>` for all
391 /// integer types `X`. This gives us only one field type, `X`; to store unsigned
392 /// values of this size, we take use the fact that these conversions are no-ops.
393 ///
394 /// For a closed range, the number of possible numbers we should generate is
395 /// `range = (high - low + 1)`. To avoid bias, we must ensure that the size of
396 /// our sample space, `zone`, is a multiple of `range`; other values must be
397 /// rejected (by replacing with a new random sample).
398 ///
399 /// As a special case, we use `range = 0` to represent the full range of the
400 /// result type (i.e. for `new_inclusive($ty::MIN, $ty::MAX)`).
401 ///
402 /// The optimum `zone` is the largest product of `range` which fits in our
403 /// (unsigned) target type. We calculate this by calculating how many numbers we
404 /// must reject: `reject = (MAX + 1) % range = (MAX - range + 1) % range`. Any (large)
405 /// product of `range` will suffice, thus in `sample_single` we multiply by a
406 /// power of 2 via bit-shifting (faster but may cause more rejections).
407 ///
408 /// The smallest integer PRNGs generate is `u32`. For 8- and 16-bit outputs we
409 /// use `u32` for our `zone` and samples (because it's not slower and because
410 /// it reduces the chance of having to reject a sample). In this case we cannot
411 /// store `zone` in the target type since it is too large, however we know
412 /// `ints_to_reject < range <= $unsigned::MAX`.
413 ///
414 /// An alternative to using a modulus is widening multiply: After a widening
415 /// multiply by `range`, the result is in the high word. Then comparing the low
416 /// word against `zone` makes sure our distribution is uniform.
417 #[derive(Clone, Copy, Debug)]
418 #[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
419 pub struct UniformInt<X> {
420 low: X,
421 range: X,
422 z: X, // either ints_to_reject or zone depending on implementation
423 }
424
425 macro_rules! uniform_int_impl {
426 ($ty:ty, $unsigned:ident, $u_large:ident) => {
427 impl SampleUniform for $ty {
428 type Sampler = UniformInt<$ty>;
429 }
430
431 impl UniformSampler for UniformInt<$ty> {
432 // We play free and fast with unsigned vs signed here
433 // (when $ty is signed), but that's fine, since the
434 // contract of this macro is for $ty and $unsigned to be
435 // "bit-equal", so casting between them is a no-op.
436
437 type X = $ty;
438
439 #[inline] // if the range is constant, this helps LLVM to do the
440 // calculations at compile-time.
441 fn new<B1, B2>(low_b: B1, high_b: B2) -> Self
442 where
443 B1: SampleBorrow<Self::X> + Sized,
444 B2: SampleBorrow<Self::X> + Sized,
445 {
446 let low = *low_b.borrow();
447 let high = *high_b.borrow();
448 assert!(low < high, "Uniform::new called with `low >= high`");
449 UniformSampler::new_inclusive(low, high - 1)
450 }
451
452 #[inline] // if the range is constant, this helps LLVM to do the
453 // calculations at compile-time.
454 fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Self
455 where
456 B1: SampleBorrow<Self::X> + Sized,
457 B2: SampleBorrow<Self::X> + Sized,
458 {
459 let low = *low_b.borrow();
460 let high = *high_b.borrow();
461 assert!(
462 low <= high,
463 "Uniform::new_inclusive called with `low > high`"
464 );
465 let unsigned_max = ::core::$u_large::MAX;
466
467 let range = high.wrapping_sub(low).wrapping_add(1) as $unsigned;
468 let ints_to_reject = if range > 0 {
469 let range = $u_large::from(range);
470 (unsigned_max - range + 1) % range
471 } else {
472 0
473 };
474
475 UniformInt {
476 low,
477 // These are really $unsigned values, but store as $ty:
478 range: range as $ty,
479 z: ints_to_reject as $unsigned as $ty,
480 }
481 }
482
483 #[inline]
484 fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
485 let range = self.range as $unsigned as $u_large;
486 if range > 0 {
487 let unsigned_max = ::core::$u_large::MAX;
488 let zone = unsigned_max - (self.z as $unsigned as $u_large);
489 loop {
490 let v: $u_large = rng.gen();
491 let (hi, lo) = v.wmul(range);
492 if lo <= zone {
493 return self.low.wrapping_add(hi as $ty);
494 }
495 }
496 } else {
497 // Sample from the entire integer range.
498 rng.gen()
499 }
500 }
501
502 #[inline]
503 fn sample_single<R: Rng + ?Sized, B1, B2>(low_b: B1, high_b: B2, rng: &mut R) -> Self::X
504 where
505 B1: SampleBorrow<Self::X> + Sized,
506 B2: SampleBorrow<Self::X> + Sized,
507 {
508 let low = *low_b.borrow();
509 let high = *high_b.borrow();
510 assert!(low < high, "UniformSampler::sample_single: low >= high");
511 Self::sample_single_inclusive(low, high - 1, rng)
512 }
513
514 #[inline]
515 fn sample_single_inclusive<R: Rng + ?Sized, B1, B2>(low_b: B1, high_b: B2, rng: &mut R) -> Self::X
516 where
517 B1: SampleBorrow<Self::X> + Sized,
518 B2: SampleBorrow<Self::X> + Sized,
519 {
520 let low = *low_b.borrow();
521 let high = *high_b.borrow();
522 assert!(low <= high, "UniformSampler::sample_single_inclusive: low > high");
523 let range = high.wrapping_sub(low).wrapping_add(1) as $unsigned as $u_large;
524 // If the above resulted in wrap-around to 0, the range is $ty::MIN..=$ty::MAX,
525 // and any integer will do.
526 if range == 0 {
527 return rng.gen();
528 }
529
530 let zone = if ::core::$unsigned::MAX <= ::core::u16::MAX as $unsigned {
531 // Using a modulus is faster than the approximation for
532 // i8 and i16. I suppose we trade the cost of one
533 // modulus for near-perfect branch prediction.
534 let unsigned_max: $u_large = ::core::$u_large::MAX;
535 let ints_to_reject = (unsigned_max - range + 1) % range;
536 unsigned_max - ints_to_reject
537 } else {
538 // conservative but fast approximation. `- 1` is necessary to allow the
539 // same comparison without bias.
540 (range << range.leading_zeros()).wrapping_sub(1)
541 };
542
543 loop {
544 let v: $u_large = rng.gen();
545 let (hi, lo) = v.wmul(range);
546 if lo <= zone {
547 return low.wrapping_add(hi as $ty);
548 }
549 }
550 }
551 }
552 };
553 }
554
555 uniform_int_impl! { i8, u8, u32 }
556 uniform_int_impl! { i16, u16, u32 }
557 uniform_int_impl! { i32, u32, u32 }
558 uniform_int_impl! { i64, u64, u64 }
559 #[cfg(not(target_os = "emscripten"))]
560 uniform_int_impl! { i128, u128, u128 }
561 uniform_int_impl! { isize, usize, usize }
562 uniform_int_impl! { u8, u8, u32 }
563 uniform_int_impl! { u16, u16, u32 }
564 uniform_int_impl! { u32, u32, u32 }
565 uniform_int_impl! { u64, u64, u64 }
566 uniform_int_impl! { usize, usize, usize }
567 #[cfg(not(target_os = "emscripten"))]
568 uniform_int_impl! { u128, u128, u128 }
569
570 #[cfg(feature = "simd_support")]
571 macro_rules! uniform_simd_int_impl {
572 ($ty:ident, $unsigned:ident, $u_scalar:ident) => {
573 // The "pick the largest zone that can fit in an `u32`" optimization
574 // is less useful here. Multiple lanes complicate things, we don't
575 // know the PRNG's minimal output size, and casting to a larger vector
576 // is generally a bad idea for SIMD performance. The user can still
577 // implement it manually.
578
579 // TODO: look into `Uniform::<u32x4>::new(0u32, 100)` functionality
580 // perhaps `impl SampleUniform for $u_scalar`?
581 impl SampleUniform for $ty {
582 type Sampler = UniformInt<$ty>;
583 }
584
585 impl UniformSampler for UniformInt<$ty> {
586 type X = $ty;
587
588 #[inline] // if the range is constant, this helps LLVM to do the
589 // calculations at compile-time.
590 fn new<B1, B2>(low_b: B1, high_b: B2) -> Self
591 where B1: SampleBorrow<Self::X> + Sized,
592 B2: SampleBorrow<Self::X> + Sized
593 {
594 let low = *low_b.borrow();
595 let high = *high_b.borrow();
596 assert!(low.lt(high).all(), "Uniform::new called with `low >= high`");
597 UniformSampler::new_inclusive(low, high - 1)
598 }
599
600 #[inline] // if the range is constant, this helps LLVM to do the
601 // calculations at compile-time.
602 fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Self
603 where B1: SampleBorrow<Self::X> + Sized,
604 B2: SampleBorrow<Self::X> + Sized
605 {
606 let low = *low_b.borrow();
607 let high = *high_b.borrow();
608 assert!(low.le(high).all(),
609 "Uniform::new_inclusive called with `low > high`");
610 let unsigned_max = ::core::$u_scalar::MAX;
611
612 // NOTE: these may need to be replaced with explicitly
613 // wrapping operations if `packed_simd` changes
614 let range: $unsigned = ((high - low) + 1).cast();
615 // `% 0` will panic at runtime.
616 let not_full_range = range.gt($unsigned::splat(0));
617 // replacing 0 with `unsigned_max` allows a faster `select`
618 // with bitwise OR
619 let modulo = not_full_range.select(range, $unsigned::splat(unsigned_max));
620 // wrapping addition
621 let ints_to_reject = (unsigned_max - range + 1) % modulo;
622 // When `range` is 0, `lo` of `v.wmul(range)` will always be
623 // zero which means only one sample is needed.
624 let zone = unsigned_max - ints_to_reject;
625
626 UniformInt {
627 low,
628 // These are really $unsigned values, but store as $ty:
629 range: range.cast(),
630 z: zone.cast(),
631 }
632 }
633
634 fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
635 let range: $unsigned = self.range.cast();
636 let zone: $unsigned = self.z.cast();
637
638 // This might seem very slow, generating a whole new
639 // SIMD vector for every sample rejection. For most uses
640 // though, the chance of rejection is small and provides good
641 // general performance. With multiple lanes, that chance is
642 // multiplied. To mitigate this, we replace only the lanes of
643 // the vector which fail, iteratively reducing the chance of
644 // rejection. The replacement method does however add a little
645 // overhead. Benchmarking or calculating probabilities might
646 // reveal contexts where this replacement method is slower.
647 let mut v: $unsigned = rng.gen();
648 loop {
649 let (hi, lo) = v.wmul(range);
650 let mask = lo.le(zone);
651 if mask.all() {
652 let hi: $ty = hi.cast();
653 // wrapping addition
654 let result = self.low + hi;
655 // `select` here compiles to a blend operation
656 // When `range.eq(0).none()` the compare and blend
657 // operations are avoided.
658 let v: $ty = v.cast();
659 return range.gt($unsigned::splat(0)).select(result, v);
660 }
661 // Replace only the failing lanes
662 v = mask.select(v, rng.gen());
663 }
664 }
665 }
666 };
667
668 // bulk implementation
669 ($(($unsigned:ident, $signed:ident),)+ $u_scalar:ident) => {
670 $(
671 uniform_simd_int_impl!($unsigned, $unsigned, $u_scalar);
672 uniform_simd_int_impl!($signed, $unsigned, $u_scalar);
673 )+
674 };
675 }
676
677 #[cfg(feature = "simd_support")]
678 uniform_simd_int_impl! {
679 (u64x2, i64x2),
680 (u64x4, i64x4),
681 (u64x8, i64x8),
682 u64
683 }
684
685 #[cfg(feature = "simd_support")]
686 uniform_simd_int_impl! {
687 (u32x2, i32x2),
688 (u32x4, i32x4),
689 (u32x8, i32x8),
690 (u32x16, i32x16),
691 u32
692 }
693
694 #[cfg(feature = "simd_support")]
695 uniform_simd_int_impl! {
696 (u16x2, i16x2),
697 (u16x4, i16x4),
698 (u16x8, i16x8),
699 (u16x16, i16x16),
700 (u16x32, i16x32),
701 u16
702 }
703
704 #[cfg(feature = "simd_support")]
705 uniform_simd_int_impl! {
706 (u8x2, i8x2),
707 (u8x4, i8x4),
708 (u8x8, i8x8),
709 (u8x16, i8x16),
710 (u8x32, i8x32),
711 (u8x64, i8x64),
712 u8
713 }
714
715 impl SampleUniform for char {
716 type Sampler = UniformChar;
717 }
718
719 /// The back-end implementing [`UniformSampler`] for `char`.
720 ///
721 /// Unless you are implementing [`UniformSampler`] for your own type, this type
722 /// should not be used directly, use [`Uniform`] instead.
723 ///
724 /// This differs from integer range sampling since the range `0xD800..=0xDFFF`
725 /// are used for surrogate pairs in UCS and UTF-16, and consequently are not
726 /// valid Unicode code points. We must therefore avoid sampling values in this
727 /// range.
728 #[derive(Clone, Copy, Debug)]
729 #[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
730 pub struct UniformChar {
731 sampler: UniformInt<u32>,
732 }
733
734 /// UTF-16 surrogate range start
735 const CHAR_SURROGATE_START: u32 = 0xD800;
736 /// UTF-16 surrogate range size
737 const CHAR_SURROGATE_LEN: u32 = 0xE000 - CHAR_SURROGATE_START;
738
739 /// Convert `char` to compressed `u32`
char_to_comp_u32(c: char) -> u32740 fn char_to_comp_u32(c: char) -> u32 {
741 match c as u32 {
742 c if c >= CHAR_SURROGATE_START => c - CHAR_SURROGATE_LEN,
743 c => c,
744 }
745 }
746
747 impl UniformSampler for UniformChar {
748 type X = char;
749
750 #[inline] // if the range is constant, this helps LLVM to do the
751 // calculations at compile-time.
new<B1, B2>(low_b: B1, high_b: B2) -> Self where B1: SampleBorrow<Self::X> + Sized, B2: SampleBorrow<Self::X> + Sized,752 fn new<B1, B2>(low_b: B1, high_b: B2) -> Self
753 where
754 B1: SampleBorrow<Self::X> + Sized,
755 B2: SampleBorrow<Self::X> + Sized,
756 {
757 let low = char_to_comp_u32(*low_b.borrow());
758 let high = char_to_comp_u32(*high_b.borrow());
759 let sampler = UniformInt::<u32>::new(low, high);
760 UniformChar { sampler }
761 }
762
763 #[inline] // if the range is constant, this helps LLVM to do the
764 // calculations at compile-time.
new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Self where B1: SampleBorrow<Self::X> + Sized, B2: SampleBorrow<Self::X> + Sized,765 fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Self
766 where
767 B1: SampleBorrow<Self::X> + Sized,
768 B2: SampleBorrow<Self::X> + Sized,
769 {
770 let low = char_to_comp_u32(*low_b.borrow());
771 let high = char_to_comp_u32(*high_b.borrow());
772 let sampler = UniformInt::<u32>::new_inclusive(low, high);
773 UniformChar { sampler }
774 }
775
sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X776 fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
777 let mut x = self.sampler.sample(rng);
778 if x >= CHAR_SURROGATE_START {
779 x += CHAR_SURROGATE_LEN;
780 }
781 // SAFETY: x must not be in surrogate range or greater than char::MAX.
782 // This relies on range constructors which accept char arguments.
783 // Validity of input char values is assumed.
784 unsafe { core::char::from_u32_unchecked(x) }
785 }
786 }
787
788 /// The back-end implementing [`UniformSampler`] for floating-point types.
789 ///
790 /// Unless you are implementing [`UniformSampler`] for your own type, this type
791 /// should not be used directly, use [`Uniform`] instead.
792 ///
793 /// # Implementation notes
794 ///
795 /// Instead of generating a float in the `[0, 1)` range using [`Standard`], the
796 /// `UniformFloat` implementation converts the output of an PRNG itself. This
797 /// way one or two steps can be optimized out.
798 ///
799 /// The floats are first converted to a value in the `[1, 2)` interval using a
800 /// transmute-based method, and then mapped to the expected range with a
801 /// multiply and addition. Values produced this way have what equals 23 bits of
802 /// random digits for an `f32`, and 52 for an `f64`.
803 ///
804 /// [`new`]: UniformSampler::new
805 /// [`new_inclusive`]: UniformSampler::new_inclusive
806 /// [`Standard`]: crate::distributions::Standard
807 #[derive(Clone, Copy, Debug)]
808 #[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
809 pub struct UniformFloat<X> {
810 low: X,
811 scale: X,
812 }
813
814 macro_rules! uniform_float_impl {
815 ($ty:ty, $uty:ident, $f_scalar:ident, $u_scalar:ident, $bits_to_discard:expr) => {
816 impl SampleUniform for $ty {
817 type Sampler = UniformFloat<$ty>;
818 }
819
820 impl UniformSampler for UniformFloat<$ty> {
821 type X = $ty;
822
823 fn new<B1, B2>(low_b: B1, high_b: B2) -> Self
824 where
825 B1: SampleBorrow<Self::X> + Sized,
826 B2: SampleBorrow<Self::X> + Sized,
827 {
828 let low = *low_b.borrow();
829 let high = *high_b.borrow();
830 debug_assert!(
831 low.all_finite(),
832 "Uniform::new called with `low` non-finite."
833 );
834 debug_assert!(
835 high.all_finite(),
836 "Uniform::new called with `high` non-finite."
837 );
838 assert!(low.all_lt(high), "Uniform::new called with `low >= high`");
839 let max_rand = <$ty>::splat(
840 (::core::$u_scalar::MAX >> $bits_to_discard).into_float_with_exponent(0) - 1.0,
841 );
842
843 let mut scale = high - low;
844 assert!(scale.all_finite(), "Uniform::new: range overflow");
845
846 loop {
847 let mask = (scale * max_rand + low).ge_mask(high);
848 if mask.none() {
849 break;
850 }
851 scale = scale.decrease_masked(mask);
852 }
853
854 debug_assert!(<$ty>::splat(0.0).all_le(scale));
855
856 UniformFloat { low, scale }
857 }
858
859 fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Self
860 where
861 B1: SampleBorrow<Self::X> + Sized,
862 B2: SampleBorrow<Self::X> + Sized,
863 {
864 let low = *low_b.borrow();
865 let high = *high_b.borrow();
866 debug_assert!(
867 low.all_finite(),
868 "Uniform::new_inclusive called with `low` non-finite."
869 );
870 debug_assert!(
871 high.all_finite(),
872 "Uniform::new_inclusive called with `high` non-finite."
873 );
874 assert!(
875 low.all_le(high),
876 "Uniform::new_inclusive called with `low > high`"
877 );
878 let max_rand = <$ty>::splat(
879 (::core::$u_scalar::MAX >> $bits_to_discard).into_float_with_exponent(0) - 1.0,
880 );
881
882 let mut scale = (high - low) / max_rand;
883 assert!(scale.all_finite(), "Uniform::new_inclusive: range overflow");
884
885 loop {
886 let mask = (scale * max_rand + low).gt_mask(high);
887 if mask.none() {
888 break;
889 }
890 scale = scale.decrease_masked(mask);
891 }
892
893 debug_assert!(<$ty>::splat(0.0).all_le(scale));
894
895 UniformFloat { low, scale }
896 }
897
898 fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
899 // Generate a value in the range [1, 2)
900 let value1_2 = (rng.gen::<$uty>() >> $bits_to_discard).into_float_with_exponent(0);
901
902 // Get a value in the range [0, 1) in order to avoid
903 // overflowing into infinity when multiplying with scale
904 let value0_1 = value1_2 - 1.0;
905
906 // We don't use `f64::mul_add`, because it is not available with
907 // `no_std`. Furthermore, it is slower for some targets (but
908 // faster for others). However, the order of multiplication and
909 // addition is important, because on some platforms (e.g. ARM)
910 // it will be optimized to a single (non-FMA) instruction.
911 value0_1 * self.scale + self.low
912 }
913
914 #[inline]
915 fn sample_single<R: Rng + ?Sized, B1, B2>(low_b: B1, high_b: B2, rng: &mut R) -> Self::X
916 where
917 B1: SampleBorrow<Self::X> + Sized,
918 B2: SampleBorrow<Self::X> + Sized,
919 {
920 let low = *low_b.borrow();
921 let high = *high_b.borrow();
922 debug_assert!(
923 low.all_finite(),
924 "UniformSampler::sample_single called with `low` non-finite."
925 );
926 debug_assert!(
927 high.all_finite(),
928 "UniformSampler::sample_single called with `high` non-finite."
929 );
930 assert!(
931 low.all_lt(high),
932 "UniformSampler::sample_single: low >= high"
933 );
934 let mut scale = high - low;
935 assert!(scale.all_finite(), "UniformSampler::sample_single: range overflow");
936
937 loop {
938 // Generate a value in the range [1, 2)
939 let value1_2 =
940 (rng.gen::<$uty>() >> $bits_to_discard).into_float_with_exponent(0);
941
942 // Get a value in the range [0, 1) in order to avoid
943 // overflowing into infinity when multiplying with scale
944 let value0_1 = value1_2 - 1.0;
945
946 // Doing multiply before addition allows some architectures
947 // to use a single instruction.
948 let res = value0_1 * scale + low;
949
950 debug_assert!(low.all_le(res) || !scale.all_finite());
951 if res.all_lt(high) {
952 return res;
953 }
954
955 // This handles a number of edge cases.
956 // * `low` or `high` is NaN. In this case `scale` and
957 // `res` are going to end up as NaN.
958 // * `low` is negative infinity and `high` is finite.
959 // `scale` is going to be infinite and `res` will be
960 // NaN.
961 // * `high` is positive infinity and `low` is finite.
962 // `scale` is going to be infinite and `res` will
963 // be infinite or NaN (if value0_1 is 0).
964 // * `low` is negative infinity and `high` is positive
965 // infinity. `scale` will be infinite and `res` will
966 // be NaN.
967 // * `low` and `high` are finite, but `high - low`
968 // overflows to infinite. `scale` will be infinite
969 // and `res` will be infinite or NaN (if value0_1 is 0).
970 // So if `high` or `low` are non-finite, we are guaranteed
971 // to fail the `res < high` check above and end up here.
972 //
973 // While we technically should check for non-finite `low`
974 // and `high` before entering the loop, by doing the checks
975 // here instead, we allow the common case to avoid these
976 // checks. But we are still guaranteed that if `low` or
977 // `high` are non-finite we'll end up here and can do the
978 // appropriate checks.
979 //
980 // Likewise `high - low` overflowing to infinity is also
981 // rare, so handle it here after the common case.
982 let mask = !scale.finite_mask();
983 if mask.any() {
984 assert!(
985 low.all_finite() && high.all_finite(),
986 "Uniform::sample_single: low and high must be finite"
987 );
988 scale = scale.decrease_masked(mask);
989 }
990 }
991 }
992 }
993 };
994 }
995
996 uniform_float_impl! { f32, u32, f32, u32, 32 - 23 }
997 uniform_float_impl! { f64, u64, f64, u64, 64 - 52 }
998
999 #[cfg(feature = "simd_support")]
1000 uniform_float_impl! { f32x2, u32x2, f32, u32, 32 - 23 }
1001 #[cfg(feature = "simd_support")]
1002 uniform_float_impl! { f32x4, u32x4, f32, u32, 32 - 23 }
1003 #[cfg(feature = "simd_support")]
1004 uniform_float_impl! { f32x8, u32x8, f32, u32, 32 - 23 }
1005 #[cfg(feature = "simd_support")]
1006 uniform_float_impl! { f32x16, u32x16, f32, u32, 32 - 23 }
1007
1008 #[cfg(feature = "simd_support")]
1009 uniform_float_impl! { f64x2, u64x2, f64, u64, 64 - 52 }
1010 #[cfg(feature = "simd_support")]
1011 uniform_float_impl! { f64x4, u64x4, f64, u64, 64 - 52 }
1012 #[cfg(feature = "simd_support")]
1013 uniform_float_impl! { f64x8, u64x8, f64, u64, 64 - 52 }
1014
1015
1016 /// The back-end implementing [`UniformSampler`] for `Duration`.
1017 ///
1018 /// Unless you are implementing [`UniformSampler`] for your own types, this type
1019 /// should not be used directly, use [`Uniform`] instead.
1020 #[derive(Clone, Copy, Debug)]
1021 #[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
1022 pub struct UniformDuration {
1023 mode: UniformDurationMode,
1024 offset: u32,
1025 }
1026
1027 #[derive(Debug, Copy, Clone)]
1028 #[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
1029 enum UniformDurationMode {
1030 Small {
1031 secs: u64,
1032 nanos: Uniform<u32>,
1033 },
1034 Medium {
1035 nanos: Uniform<u64>,
1036 },
1037 Large {
1038 max_secs: u64,
1039 max_nanos: u32,
1040 secs: Uniform<u64>,
1041 },
1042 }
1043
1044 impl SampleUniform for Duration {
1045 type Sampler = UniformDuration;
1046 }
1047
1048 impl UniformSampler for UniformDuration {
1049 type X = Duration;
1050
1051 #[inline]
new<B1, B2>(low_b: B1, high_b: B2) -> Self where B1: SampleBorrow<Self::X> + Sized, B2: SampleBorrow<Self::X> + Sized,1052 fn new<B1, B2>(low_b: B1, high_b: B2) -> Self
1053 where
1054 B1: SampleBorrow<Self::X> + Sized,
1055 B2: SampleBorrow<Self::X> + Sized,
1056 {
1057 let low = *low_b.borrow();
1058 let high = *high_b.borrow();
1059 assert!(low < high, "Uniform::new called with `low >= high`");
1060 UniformDuration::new_inclusive(low, high - Duration::new(0, 1))
1061 }
1062
1063 #[inline]
new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Self where B1: SampleBorrow<Self::X> + Sized, B2: SampleBorrow<Self::X> + Sized,1064 fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Self
1065 where
1066 B1: SampleBorrow<Self::X> + Sized,
1067 B2: SampleBorrow<Self::X> + Sized,
1068 {
1069 let low = *low_b.borrow();
1070 let high = *high_b.borrow();
1071 assert!(
1072 low <= high,
1073 "Uniform::new_inclusive called with `low > high`"
1074 );
1075
1076 let low_s = low.as_secs();
1077 let low_n = low.subsec_nanos();
1078 let mut high_s = high.as_secs();
1079 let mut high_n = high.subsec_nanos();
1080
1081 if high_n < low_n {
1082 high_s -= 1;
1083 high_n += 1_000_000_000;
1084 }
1085
1086 let mode = if low_s == high_s {
1087 UniformDurationMode::Small {
1088 secs: low_s,
1089 nanos: Uniform::new_inclusive(low_n, high_n),
1090 }
1091 } else {
1092 let max = high_s
1093 .checked_mul(1_000_000_000)
1094 .and_then(|n| n.checked_add(u64::from(high_n)));
1095
1096 if let Some(higher_bound) = max {
1097 let lower_bound = low_s * 1_000_000_000 + u64::from(low_n);
1098 UniformDurationMode::Medium {
1099 nanos: Uniform::new_inclusive(lower_bound, higher_bound),
1100 }
1101 } else {
1102 // An offset is applied to simplify generation of nanoseconds
1103 let max_nanos = high_n - low_n;
1104 UniformDurationMode::Large {
1105 max_secs: high_s,
1106 max_nanos,
1107 secs: Uniform::new_inclusive(low_s, high_s),
1108 }
1109 }
1110 };
1111 UniformDuration {
1112 mode,
1113 offset: low_n,
1114 }
1115 }
1116
1117 #[inline]
sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Duration1118 fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Duration {
1119 match self.mode {
1120 UniformDurationMode::Small { secs, nanos } => {
1121 let n = nanos.sample(rng);
1122 Duration::new(secs, n)
1123 }
1124 UniformDurationMode::Medium { nanos } => {
1125 let nanos = nanos.sample(rng);
1126 Duration::new(nanos / 1_000_000_000, (nanos % 1_000_000_000) as u32)
1127 }
1128 UniformDurationMode::Large {
1129 max_secs,
1130 max_nanos,
1131 secs,
1132 } => {
1133 // constant folding means this is at least as fast as `Rng::sample(Range)`
1134 let nano_range = Uniform::new(0, 1_000_000_000);
1135 loop {
1136 let s = secs.sample(rng);
1137 let n = nano_range.sample(rng);
1138 if !(s == max_secs && n > max_nanos) {
1139 let sum = n + self.offset;
1140 break Duration::new(s, sum);
1141 }
1142 }
1143 }
1144 }
1145 }
1146 }
1147
1148 #[cfg(test)]
1149 mod tests {
1150 use super::*;
1151 use crate::rngs::mock::StepRng;
1152
1153 #[test]
1154 #[cfg(feature = "serde1")]
test_serialization_uniform_duration()1155 fn test_serialization_uniform_duration() {
1156 let distr = UniformDuration::new(std::time::Duration::from_secs(10), std::time::Duration::from_secs(60));
1157 let de_distr: UniformDuration = bincode::deserialize(&bincode::serialize(&distr).unwrap()).unwrap();
1158 assert_eq!(
1159 distr.offset, de_distr.offset
1160 );
1161 match (distr.mode, de_distr.mode) {
1162 (UniformDurationMode::Small {secs: a_secs, nanos: a_nanos}, UniformDurationMode::Small {secs, nanos}) => {
1163 assert_eq!(a_secs, secs);
1164
1165 assert_eq!(a_nanos.0.low, nanos.0.low);
1166 assert_eq!(a_nanos.0.range, nanos.0.range);
1167 assert_eq!(a_nanos.0.z, nanos.0.z);
1168 }
1169 (UniformDurationMode::Medium {nanos: a_nanos} , UniformDurationMode::Medium {nanos}) => {
1170 assert_eq!(a_nanos.0.low, nanos.0.low);
1171 assert_eq!(a_nanos.0.range, nanos.0.range);
1172 assert_eq!(a_nanos.0.z, nanos.0.z);
1173 }
1174 (UniformDurationMode::Large {max_secs:a_max_secs, max_nanos:a_max_nanos, secs:a_secs}, UniformDurationMode::Large {max_secs, max_nanos, secs} ) => {
1175 assert_eq!(a_max_secs, max_secs);
1176 assert_eq!(a_max_nanos, max_nanos);
1177
1178 assert_eq!(a_secs.0.low, secs.0.low);
1179 assert_eq!(a_secs.0.range, secs.0.range);
1180 assert_eq!(a_secs.0.z, secs.0.z);
1181 }
1182 _ => panic!("`UniformDurationMode` was not serialized/deserialized correctly")
1183 }
1184 }
1185
1186 #[test]
1187 #[cfg(feature = "serde1")]
test_uniform_serialization()1188 fn test_uniform_serialization() {
1189 let unit_box: Uniform<i32> = Uniform::new(-1, 1);
1190 let de_unit_box: Uniform<i32> = bincode::deserialize(&bincode::serialize(&unit_box).unwrap()).unwrap();
1191
1192 assert_eq!(unit_box.0.low, de_unit_box.0.low);
1193 assert_eq!(unit_box.0.range, de_unit_box.0.range);
1194 assert_eq!(unit_box.0.z, de_unit_box.0.z);
1195
1196 let unit_box: Uniform<f32> = Uniform::new(-1., 1.);
1197 let de_unit_box: Uniform<f32> = bincode::deserialize(&bincode::serialize(&unit_box).unwrap()).unwrap();
1198
1199 assert_eq!(unit_box.0.low, de_unit_box.0.low);
1200 assert_eq!(unit_box.0.scale, de_unit_box.0.scale);
1201 }
1202
1203 #[should_panic]
1204 #[test]
test_uniform_bad_limits_equal_int()1205 fn test_uniform_bad_limits_equal_int() {
1206 Uniform::new(10, 10);
1207 }
1208
1209 #[test]
test_uniform_good_limits_equal_int()1210 fn test_uniform_good_limits_equal_int() {
1211 let mut rng = crate::test::rng(804);
1212 let dist = Uniform::new_inclusive(10, 10);
1213 for _ in 0..20 {
1214 assert_eq!(rng.sample(dist), 10);
1215 }
1216 }
1217
1218 #[should_panic]
1219 #[test]
test_uniform_bad_limits_flipped_int()1220 fn test_uniform_bad_limits_flipped_int() {
1221 Uniform::new(10, 5);
1222 }
1223
1224 #[test]
1225 #[cfg_attr(miri, ignore)] // Miri is too slow
test_integers()1226 fn test_integers() {
1227 #[cfg(not(target_os = "emscripten"))] use core::{i128, u128};
1228 use core::{i16, i32, i64, i8, isize};
1229 use core::{u16, u32, u64, u8, usize};
1230
1231 let mut rng = crate::test::rng(251);
1232 macro_rules! t {
1233 ($ty:ident, $v:expr, $le:expr, $lt:expr) => {{
1234 for &(low, high) in $v.iter() {
1235 let my_uniform = Uniform::new(low, high);
1236 for _ in 0..1000 {
1237 let v: $ty = rng.sample(my_uniform);
1238 assert!($le(low, v) && $lt(v, high));
1239 }
1240
1241 let my_uniform = Uniform::new_inclusive(low, high);
1242 for _ in 0..1000 {
1243 let v: $ty = rng.sample(my_uniform);
1244 assert!($le(low, v) && $le(v, high));
1245 }
1246
1247 let my_uniform = Uniform::new(&low, high);
1248 for _ in 0..1000 {
1249 let v: $ty = rng.sample(my_uniform);
1250 assert!($le(low, v) && $lt(v, high));
1251 }
1252
1253 let my_uniform = Uniform::new_inclusive(&low, &high);
1254 for _ in 0..1000 {
1255 let v: $ty = rng.sample(my_uniform);
1256 assert!($le(low, v) && $le(v, high));
1257 }
1258
1259 for _ in 0..1000 {
1260 let v = <$ty as SampleUniform>::Sampler::sample_single(low, high, &mut rng);
1261 assert!($le(low, v) && $lt(v, high));
1262 }
1263
1264 for _ in 0..1000 {
1265 let v = <$ty as SampleUniform>::Sampler::sample_single_inclusive(low, high, &mut rng);
1266 assert!($le(low, v) && $le(v, high));
1267 }
1268 }
1269 }};
1270
1271 // scalar bulk
1272 ($($ty:ident),*) => {{
1273 $(t!(
1274 $ty,
1275 [(0, 10), (10, 127), ($ty::MIN, $ty::MAX)],
1276 |x, y| x <= y,
1277 |x, y| x < y
1278 );)*
1279 }};
1280
1281 // simd bulk
1282 ($($ty:ident),* => $scalar:ident) => {{
1283 $(t!(
1284 $ty,
1285 [
1286 ($ty::splat(0), $ty::splat(10)),
1287 ($ty::splat(10), $ty::splat(127)),
1288 ($ty::splat($scalar::MIN), $ty::splat($scalar::MAX)),
1289 ],
1290 |x: $ty, y| x.le(y).all(),
1291 |x: $ty, y| x.lt(y).all()
1292 );)*
1293 }};
1294 }
1295 t!(i8, i16, i32, i64, isize, u8, u16, u32, u64, usize);
1296 #[cfg(not(target_os = "emscripten"))]
1297 t!(i128, u128);
1298
1299 #[cfg(feature = "simd_support")]
1300 {
1301 t!(u8x2, u8x4, u8x8, u8x16, u8x32, u8x64 => u8);
1302 t!(i8x2, i8x4, i8x8, i8x16, i8x32, i8x64 => i8);
1303 t!(u16x2, u16x4, u16x8, u16x16, u16x32 => u16);
1304 t!(i16x2, i16x4, i16x8, i16x16, i16x32 => i16);
1305 t!(u32x2, u32x4, u32x8, u32x16 => u32);
1306 t!(i32x2, i32x4, i32x8, i32x16 => i32);
1307 t!(u64x2, u64x4, u64x8 => u64);
1308 t!(i64x2, i64x4, i64x8 => i64);
1309 }
1310 }
1311
1312 #[test]
1313 #[cfg_attr(miri, ignore)] // Miri is too slow
test_char()1314 fn test_char() {
1315 let mut rng = crate::test::rng(891);
1316 let mut max = core::char::from_u32(0).unwrap();
1317 for _ in 0..100 {
1318 let c = rng.gen_range('A'..='Z');
1319 assert!(('A'..='Z').contains(&c));
1320 max = max.max(c);
1321 }
1322 assert_eq!(max, 'Z');
1323 let d = Uniform::new(
1324 core::char::from_u32(0xD7F0).unwrap(),
1325 core::char::from_u32(0xE010).unwrap(),
1326 );
1327 for _ in 0..100 {
1328 let c = d.sample(&mut rng);
1329 assert!((c as u32) < 0xD800 || (c as u32) > 0xDFFF);
1330 }
1331 }
1332
1333 #[test]
1334 #[cfg_attr(miri, ignore)] // Miri is too slow
test_floats()1335 fn test_floats() {
1336 let mut rng = crate::test::rng(252);
1337 let mut zero_rng = StepRng::new(0, 0);
1338 let mut max_rng = StepRng::new(0xffff_ffff_ffff_ffff, 0);
1339 macro_rules! t {
1340 ($ty:ty, $f_scalar:ident, $bits_shifted:expr) => {{
1341 let v: &[($f_scalar, $f_scalar)] = &[
1342 (0.0, 100.0),
1343 (-1e35, -1e25),
1344 (1e-35, 1e-25),
1345 (-1e35, 1e35),
1346 (<$f_scalar>::from_bits(0), <$f_scalar>::from_bits(3)),
1347 (-<$f_scalar>::from_bits(10), -<$f_scalar>::from_bits(1)),
1348 (-<$f_scalar>::from_bits(5), 0.0),
1349 (-<$f_scalar>::from_bits(7), -0.0),
1350 (0.1 * ::core::$f_scalar::MAX, ::core::$f_scalar::MAX),
1351 (-::core::$f_scalar::MAX * 0.2, ::core::$f_scalar::MAX * 0.7),
1352 ];
1353 for &(low_scalar, high_scalar) in v.iter() {
1354 for lane in 0..<$ty>::lanes() {
1355 let low = <$ty>::splat(0.0 as $f_scalar).replace(lane, low_scalar);
1356 let high = <$ty>::splat(1.0 as $f_scalar).replace(lane, high_scalar);
1357 let my_uniform = Uniform::new(low, high);
1358 let my_incl_uniform = Uniform::new_inclusive(low, high);
1359 for _ in 0..100 {
1360 let v = rng.sample(my_uniform).extract(lane);
1361 assert!(low_scalar <= v && v < high_scalar);
1362 let v = rng.sample(my_incl_uniform).extract(lane);
1363 assert!(low_scalar <= v && v <= high_scalar);
1364 let v = <$ty as SampleUniform>::Sampler
1365 ::sample_single(low, high, &mut rng).extract(lane);
1366 assert!(low_scalar <= v && v < high_scalar);
1367 }
1368
1369 assert_eq!(
1370 rng.sample(Uniform::new_inclusive(low, low)).extract(lane),
1371 low_scalar
1372 );
1373
1374 assert_eq!(zero_rng.sample(my_uniform).extract(lane), low_scalar);
1375 assert_eq!(zero_rng.sample(my_incl_uniform).extract(lane), low_scalar);
1376 assert_eq!(<$ty as SampleUniform>::Sampler
1377 ::sample_single(low, high, &mut zero_rng)
1378 .extract(lane), low_scalar);
1379 assert!(max_rng.sample(my_uniform).extract(lane) < high_scalar);
1380 assert!(max_rng.sample(my_incl_uniform).extract(lane) <= high_scalar);
1381
1382 // Don't run this test for really tiny differences between high and low
1383 // since for those rounding might result in selecting high for a very
1384 // long time.
1385 if (high_scalar - low_scalar) > 0.0001 {
1386 let mut lowering_max_rng = StepRng::new(
1387 0xffff_ffff_ffff_ffff,
1388 (-1i64 << $bits_shifted) as u64,
1389 );
1390 assert!(
1391 <$ty as SampleUniform>::Sampler
1392 ::sample_single(low, high, &mut lowering_max_rng)
1393 .extract(lane) < high_scalar
1394 );
1395 }
1396 }
1397 }
1398
1399 assert_eq!(
1400 rng.sample(Uniform::new_inclusive(
1401 ::core::$f_scalar::MAX,
1402 ::core::$f_scalar::MAX
1403 )),
1404 ::core::$f_scalar::MAX
1405 );
1406 assert_eq!(
1407 rng.sample(Uniform::new_inclusive(
1408 -::core::$f_scalar::MAX,
1409 -::core::$f_scalar::MAX
1410 )),
1411 -::core::$f_scalar::MAX
1412 );
1413 }};
1414 }
1415
1416 t!(f32, f32, 32 - 23);
1417 t!(f64, f64, 64 - 52);
1418 #[cfg(feature = "simd_support")]
1419 {
1420 t!(f32x2, f32, 32 - 23);
1421 t!(f32x4, f32, 32 - 23);
1422 t!(f32x8, f32, 32 - 23);
1423 t!(f32x16, f32, 32 - 23);
1424 t!(f64x2, f64, 64 - 52);
1425 t!(f64x4, f64, 64 - 52);
1426 t!(f64x8, f64, 64 - 52);
1427 }
1428 }
1429
1430 #[test]
1431 #[should_panic]
test_float_overflow()1432 fn test_float_overflow() {
1433 Uniform::from(::core::f64::MIN..::core::f64::MAX);
1434 }
1435
1436 #[test]
1437 #[should_panic]
test_float_overflow_single()1438 fn test_float_overflow_single() {
1439 let mut rng = crate::test::rng(252);
1440 rng.gen_range(::core::f64::MIN..::core::f64::MAX);
1441 }
1442
1443 #[test]
1444 #[cfg(all(
1445 feature = "std",
1446 not(target_arch = "wasm32"),
1447 not(target_arch = "asmjs")
1448 ))]
test_float_assertions()1449 fn test_float_assertions() {
1450 use super::SampleUniform;
1451 use std::panic::catch_unwind;
1452 fn range<T: SampleUniform>(low: T, high: T) {
1453 let mut rng = crate::test::rng(253);
1454 T::Sampler::sample_single(low, high, &mut rng);
1455 }
1456
1457 macro_rules! t {
1458 ($ty:ident, $f_scalar:ident) => {{
1459 let v: &[($f_scalar, $f_scalar)] = &[
1460 (::std::$f_scalar::NAN, 0.0),
1461 (1.0, ::std::$f_scalar::NAN),
1462 (::std::$f_scalar::NAN, ::std::$f_scalar::NAN),
1463 (1.0, 0.5),
1464 (::std::$f_scalar::MAX, -::std::$f_scalar::MAX),
1465 (::std::$f_scalar::INFINITY, ::std::$f_scalar::INFINITY),
1466 (
1467 ::std::$f_scalar::NEG_INFINITY,
1468 ::std::$f_scalar::NEG_INFINITY,
1469 ),
1470 (::std::$f_scalar::NEG_INFINITY, 5.0),
1471 (5.0, ::std::$f_scalar::INFINITY),
1472 (::std::$f_scalar::NAN, ::std::$f_scalar::INFINITY),
1473 (::std::$f_scalar::NEG_INFINITY, ::std::$f_scalar::NAN),
1474 (::std::$f_scalar::NEG_INFINITY, ::std::$f_scalar::INFINITY),
1475 ];
1476 for &(low_scalar, high_scalar) in v.iter() {
1477 for lane in 0..<$ty>::lanes() {
1478 let low = <$ty>::splat(0.0 as $f_scalar).replace(lane, low_scalar);
1479 let high = <$ty>::splat(1.0 as $f_scalar).replace(lane, high_scalar);
1480 assert!(catch_unwind(|| range(low, high)).is_err());
1481 assert!(catch_unwind(|| Uniform::new(low, high)).is_err());
1482 assert!(catch_unwind(|| Uniform::new_inclusive(low, high)).is_err());
1483 assert!(catch_unwind(|| range(low, low)).is_err());
1484 assert!(catch_unwind(|| Uniform::new(low, low)).is_err());
1485 }
1486 }
1487 }};
1488 }
1489
1490 t!(f32, f32);
1491 t!(f64, f64);
1492 #[cfg(feature = "simd_support")]
1493 {
1494 t!(f32x2, f32);
1495 t!(f32x4, f32);
1496 t!(f32x8, f32);
1497 t!(f32x16, f32);
1498 t!(f64x2, f64);
1499 t!(f64x4, f64);
1500 t!(f64x8, f64);
1501 }
1502 }
1503
1504
1505 #[test]
1506 #[cfg_attr(miri, ignore)] // Miri is too slow
test_durations()1507 fn test_durations() {
1508 #[cfg(not(feature = "std"))] use core::time::Duration;
1509 #[cfg(feature = "std")] use std::time::Duration;
1510
1511 let mut rng = crate::test::rng(253);
1512
1513 let v = &[
1514 (Duration::new(10, 50000), Duration::new(100, 1234)),
1515 (Duration::new(0, 100), Duration::new(1, 50)),
1516 (
1517 Duration::new(0, 0),
1518 Duration::new(u64::max_value(), 999_999_999),
1519 ),
1520 ];
1521 for &(low, high) in v.iter() {
1522 let my_uniform = Uniform::new(low, high);
1523 for _ in 0..1000 {
1524 let v = rng.sample(my_uniform);
1525 assert!(low <= v && v < high);
1526 }
1527 }
1528 }
1529
1530 #[test]
test_custom_uniform()1531 fn test_custom_uniform() {
1532 use crate::distributions::uniform::{
1533 SampleBorrow, SampleUniform, UniformFloat, UniformSampler,
1534 };
1535 #[derive(Clone, Copy, PartialEq, PartialOrd)]
1536 struct MyF32 {
1537 x: f32,
1538 }
1539 #[derive(Clone, Copy, Debug)]
1540 struct UniformMyF32(UniformFloat<f32>);
1541 impl UniformSampler for UniformMyF32 {
1542 type X = MyF32;
1543
1544 fn new<B1, B2>(low: B1, high: B2) -> Self
1545 where
1546 B1: SampleBorrow<Self::X> + Sized,
1547 B2: SampleBorrow<Self::X> + Sized,
1548 {
1549 UniformMyF32(UniformFloat::<f32>::new(low.borrow().x, high.borrow().x))
1550 }
1551
1552 fn new_inclusive<B1, B2>(low: B1, high: B2) -> Self
1553 where
1554 B1: SampleBorrow<Self::X> + Sized,
1555 B2: SampleBorrow<Self::X> + Sized,
1556 {
1557 UniformSampler::new(low, high)
1558 }
1559
1560 fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
1561 MyF32 {
1562 x: self.0.sample(rng),
1563 }
1564 }
1565 }
1566 impl SampleUniform for MyF32 {
1567 type Sampler = UniformMyF32;
1568 }
1569
1570 let (low, high) = (MyF32 { x: 17.0f32 }, MyF32 { x: 22.0f32 });
1571 let uniform = Uniform::new(low, high);
1572 let mut rng = crate::test::rng(804);
1573 for _ in 0..100 {
1574 let x: MyF32 = rng.sample(uniform);
1575 assert!(low <= x && x < high);
1576 }
1577 }
1578
1579 #[test]
test_uniform_from_std_range()1580 fn test_uniform_from_std_range() {
1581 let r = Uniform::from(2u32..7);
1582 assert_eq!(r.0.low, 2);
1583 assert_eq!(r.0.range, 5);
1584 let r = Uniform::from(2.0f64..7.0);
1585 assert_eq!(r.0.low, 2.0);
1586 assert_eq!(r.0.scale, 5.0);
1587 }
1588
1589 #[test]
test_uniform_from_std_range_inclusive()1590 fn test_uniform_from_std_range_inclusive() {
1591 let r = Uniform::from(2u32..=6);
1592 assert_eq!(r.0.low, 2);
1593 assert_eq!(r.0.range, 5);
1594 let r = Uniform::from(2.0f64..=7.0);
1595 assert_eq!(r.0.low, 2.0);
1596 assert!(r.0.scale > 5.0);
1597 assert!(r.0.scale < 5.0 + 1e-14);
1598 }
1599
1600 #[test]
value_stability()1601 fn value_stability() {
1602 fn test_samples<T: SampleUniform + Copy + core::fmt::Debug + PartialEq>(
1603 lb: T, ub: T, expected_single: &[T], expected_multiple: &[T],
1604 ) where Uniform<T>: Distribution<T> {
1605 let mut rng = crate::test::rng(897);
1606 let mut buf = [lb; 3];
1607
1608 for x in &mut buf {
1609 *x = T::Sampler::sample_single(lb, ub, &mut rng);
1610 }
1611 assert_eq!(&buf, expected_single);
1612
1613 let distr = Uniform::new(lb, ub);
1614 for x in &mut buf {
1615 *x = rng.sample(&distr);
1616 }
1617 assert_eq!(&buf, expected_multiple);
1618 }
1619
1620 // We test on a sub-set of types; possibly we should do more.
1621 // TODO: SIMD types
1622
1623 test_samples(11u8, 219, &[17, 66, 214], &[181, 93, 165]);
1624 test_samples(11u32, 219, &[17, 66, 214], &[181, 93, 165]);
1625
1626 test_samples(0f32, 1e-2f32, &[0.0003070104, 0.0026630748, 0.00979833], &[
1627 0.008194133,
1628 0.00398172,
1629 0.007428536,
1630 ]);
1631 test_samples(
1632 -1e10f64,
1633 1e10f64,
1634 &[-4673848682.871551, 6388267422.932352, 4857075081.198343],
1635 &[1173375212.1808167, 1917642852.109581, 2365076174.3153973],
1636 );
1637
1638 test_samples(
1639 Duration::new(2, 0),
1640 Duration::new(4, 0),
1641 &[
1642 Duration::new(2, 532615131),
1643 Duration::new(3, 638826742),
1644 Duration::new(3, 485707508),
1645 ],
1646 &[
1647 Duration::new(3, 117337521),
1648 Duration::new(3, 191764285),
1649 Duration::new(3, 236507617),
1650 ],
1651 );
1652 }
1653 }
1654