1 // Copyright 2018 Developers of the Rand project. 2 // Copyright 2017 The Rust Project Developers. 3 // 4 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or 5 // https://www.apache.org/licenses/LICENSE-2.0> or the MIT license 6 // <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your 7 // option. This file may not be copied, modified, or distributed 8 // except according to those terms. 9 10 //! A distribution uniformly sampling numbers within a given range. 11 //! 12 //! [`Uniform`] is the standard distribution to sample uniformly from a range; 13 //! e.g. `Uniform::new_inclusive(1, 6)` can sample integers from 1 to 6, like a 14 //! standard die. [`Rng::gen_range`] supports any type supported by 15 //! [`Uniform`]. 16 //! 17 //! This distribution is provided with support for several primitive types 18 //! (all integer and floating-point types) as well as [`std::time::Duration`], 19 //! and supports extension to user-defined types via a type-specific *back-end* 20 //! implementation. 21 //! 22 //! The types [`UniformInt`], [`UniformFloat`] and [`UniformDuration`] are the 23 //! back-ends supporting sampling from primitive integer and floating-point 24 //! ranges as well as from [`std::time::Duration`]; these types do not normally 25 //! need to be used directly (unless implementing a derived back-end). 26 //! 27 //! # Example usage 28 //! 29 //! ``` 30 //! use rand::{Rng, thread_rng}; 31 //! use rand::distributions::Uniform; 32 //! 33 //! let mut rng = thread_rng(); 34 //! let side = Uniform::new(-10.0, 10.0); 35 //! 36 //! // sample between 1 and 10 points 37 //! for _ in 0..rng.gen_range(1, 11) { 38 //! // sample a point from the square with sides -10 - 10 in two dimensions 39 //! let (x, y) = (rng.sample(side), rng.sample(side)); 40 //! println!("Point: {}, {}", x, y); 41 //! } 42 //! ``` 43 //! 44 //! # Extending `Uniform` to support a custom type 45 //! 46 //! To extend [`Uniform`] to support your own types, write a back-end which 47 //! implements the [`UniformSampler`] trait, then implement the [`SampleUniform`] 48 //! helper trait to "register" your back-end. See the `MyF32` example below. 49 //! 50 //! At a minimum, the back-end needs to store any parameters needed for sampling 51 //! (e.g. the target range) and implement `new`, `new_inclusive` and `sample`. 52 //! Those methods should include an assert to check the range is valid (i.e. 53 //! `low < high`). The example below merely wraps another back-end. 54 //! 55 //! The `new`, `new_inclusive` and `sample_single` functions use arguments of 56 //! type SampleBorrow<X> in order to support passing in values by reference or 57 //! by value. In the implementation of these functions, you can choose to 58 //! simply use the reference returned by [`SampleBorrow::borrow`], or you can choose 59 //! to copy or clone the value, whatever is appropriate for your type. 60 //! 61 //! ``` 62 //! use rand::prelude::*; 63 //! use rand::distributions::uniform::{Uniform, SampleUniform, 64 //! UniformSampler, UniformFloat, SampleBorrow}; 65 //! 66 //! struct MyF32(f32); 67 //! 68 //! #[derive(Clone, Copy, Debug)] 69 //! struct UniformMyF32 { 70 //! inner: UniformFloat<f32>, 71 //! } 72 //! 73 //! impl UniformSampler for UniformMyF32 { 74 //! type X = MyF32; 75 //! fn new<B1, B2>(low: B1, high: B2) -> Self 76 //! where B1: SampleBorrow<Self::X> + Sized, 77 //! B2: SampleBorrow<Self::X> + Sized 78 //! { 79 //! UniformMyF32 { 80 //! inner: UniformFloat::<f32>::new(low.borrow().0, high.borrow().0), 81 //! } 82 //! } 83 //! fn new_inclusive<B1, B2>(low: B1, high: B2) -> Self 84 //! where B1: SampleBorrow<Self::X> + Sized, 85 //! B2: SampleBorrow<Self::X> + Sized 86 //! { 87 //! UniformSampler::new(low, high) 88 //! } 89 //! fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X { 90 //! MyF32(self.inner.sample(rng)) 91 //! } 92 //! } 93 //! 94 //! impl SampleUniform for MyF32 { 95 //! type Sampler = UniformMyF32; 96 //! } 97 //! 98 //! let (low, high) = (MyF32(17.0f32), MyF32(22.0f32)); 99 //! let uniform = Uniform::new(low, high); 100 //! let x = uniform.sample(&mut thread_rng()); 101 //! ``` 102 //! 103 //! [`SampleUniform`]: crate::distributions::uniform::SampleUniform 104 //! [`UniformSampler`]: crate::distributions::uniform::UniformSampler 105 //! [`UniformInt`]: crate::distributions::uniform::UniformInt 106 //! [`UniformFloat`]: crate::distributions::uniform::UniformFloat 107 //! [`UniformDuration`]: crate::distributions::uniform::UniformDuration 108 //! [`SampleBorrow::borrow`]: crate::distributions::uniform::SampleBorrow::borrow 109 110 #[cfg(feature = "std")] 111 use std::time::Duration; 112 #[cfg(all(not(feature = "std"), rustc_1_25))] 113 use core::time::Duration; 114 115 use Rng; 116 use distributions::Distribution; 117 use distributions::float::IntoFloat; 118 use distributions::utils::{WideningMultiply, FloatSIMDUtils, FloatAsSIMD, BoolAsSIMD}; 119 120 #[cfg(not(feature = "std"))] 121 #[allow(unused_imports)] // rustc doesn't detect that this is actually used 122 use distributions::utils::Float; 123 124 125 #[cfg(feature="simd_support")] 126 use packed_simd::*; 127 128 /// Sample values uniformly between two bounds. 129 /// 130 /// [`Uniform::new`] and [`Uniform::new_inclusive`] construct a uniform 131 /// distribution sampling from the given range; these functions may do extra 132 /// work up front to make sampling of multiple values faster. 133 /// 134 /// When sampling from a constant range, many calculations can happen at 135 /// compile-time and all methods should be fast; for floating-point ranges and 136 /// the full range of integer types this should have comparable performance to 137 /// the `Standard` distribution. 138 /// 139 /// Steps are taken to avoid bias which might be present in naive 140 /// implementations; for example `rng.gen::<u8>() % 170` samples from the range 141 /// `[0, 169]` but is twice as likely to select numbers less than 85 than other 142 /// values. Further, the implementations here give more weight to the high-bits 143 /// generated by the RNG than the low bits, since with some RNGs the low-bits 144 /// are of lower quality than the high bits. 145 /// 146 /// Implementations must sample in `[low, high)` range for 147 /// `Uniform::new(low, high)`, i.e., excluding `high`. In particular care must 148 /// be taken to ensure that rounding never results values `< low` or `>= high`. 149 /// 150 /// # Example 151 /// 152 /// ``` 153 /// use rand::distributions::{Distribution, Uniform}; 154 /// 155 /// fn main() { 156 /// let between = Uniform::from(10..10000); 157 /// let mut rng = rand::thread_rng(); 158 /// let mut sum = 0; 159 /// for _ in 0..1000 { 160 /// sum += between.sample(&mut rng); 161 /// } 162 /// println!("{}", sum); 163 /// } 164 /// ``` 165 /// 166 /// [`new`]: Uniform::new 167 /// [`new_inclusive`]: Uniform::new_inclusive 168 #[derive(Clone, Copy, Debug)] 169 pub struct Uniform<X: SampleUniform> { 170 inner: X::Sampler, 171 } 172 173 impl<X: SampleUniform> Uniform<X> { 174 /// Create a new `Uniform` instance which samples uniformly from the half 175 /// open range `[low, high)` (excluding `high`). Panics if `low >= high`. new<B1, B2>(low: B1, high: B2) -> Uniform<X> where B1: SampleBorrow<X> + Sized, B2: SampleBorrow<X> + Sized176 pub fn new<B1, B2>(low: B1, high: B2) -> Uniform<X> 177 where B1: SampleBorrow<X> + Sized, 178 B2: SampleBorrow<X> + Sized 179 { 180 Uniform { inner: X::Sampler::new(low, high) } 181 } 182 183 /// Create a new `Uniform` instance which samples uniformly from the closed 184 /// range `[low, high]` (inclusive). Panics if `low > high`. new_inclusive<B1, B2>(low: B1, high: B2) -> Uniform<X> where B1: SampleBorrow<X> + Sized, B2: SampleBorrow<X> + Sized185 pub fn new_inclusive<B1, B2>(low: B1, high: B2) -> Uniform<X> 186 where B1: SampleBorrow<X> + Sized, 187 B2: SampleBorrow<X> + Sized 188 { 189 Uniform { inner: X::Sampler::new_inclusive(low, high) } 190 } 191 } 192 193 impl<X: SampleUniform> Distribution<X> for Uniform<X> { sample<R: Rng + ?Sized>(&self, rng: &mut R) -> X194 fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> X { 195 self.inner.sample(rng) 196 } 197 } 198 199 /// Helper trait for creating objects using the correct implementation of 200 /// [`UniformSampler`] for the sampling type. 201 /// 202 /// See the [module documentation] on how to implement [`Uniform`] range 203 /// sampling for a custom type. 204 /// 205 /// [module documentation]: crate::distributions::uniform 206 pub trait SampleUniform: Sized { 207 /// The `UniformSampler` implementation supporting type `X`. 208 type Sampler: UniformSampler<X = Self>; 209 } 210 211 /// Helper trait handling actual uniform sampling. 212 /// 213 /// See the [module documentation] on how to implement [`Uniform`] range 214 /// sampling for a custom type. 215 /// 216 /// Implementation of [`sample_single`] is optional, and is only useful when 217 /// the implementation can be faster than `Self::new(low, high).sample(rng)`. 218 /// 219 /// [module documentation]: crate::distributions::uniform 220 /// [`sample_single`]: UniformSampler::sample_single 221 pub trait UniformSampler: Sized { 222 /// The type sampled by this implementation. 223 type X; 224 225 /// Construct self, with inclusive lower bound and exclusive upper bound 226 /// `[low, high)`. 227 /// 228 /// Usually users should not call this directly but instead use 229 /// `Uniform::new`, which asserts that `low < high` before calling this. new<B1, B2>(low: B1, high: B2) -> Self where B1: SampleBorrow<Self::X> + Sized, B2: SampleBorrow<Self::X> + Sized230 fn new<B1, B2>(low: B1, high: B2) -> Self 231 where B1: SampleBorrow<Self::X> + Sized, 232 B2: SampleBorrow<Self::X> + Sized; 233 234 /// Construct self, with inclusive bounds `[low, high]`. 235 /// 236 /// Usually users should not call this directly but instead use 237 /// `Uniform::new_inclusive`, which asserts that `low <= high` before 238 /// calling this. new_inclusive<B1, B2>(low: B1, high: B2) -> Self where B1: SampleBorrow<Self::X> + Sized, B2: SampleBorrow<Self::X> + Sized239 fn new_inclusive<B1, B2>(low: B1, high: B2) -> Self 240 where B1: SampleBorrow<Self::X> + Sized, 241 B2: SampleBorrow<Self::X> + Sized; 242 243 /// Sample a value. sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X244 fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X; 245 246 /// Sample a single value uniformly from a range with inclusive lower bound 247 /// and exclusive upper bound `[low, high)`. 248 /// 249 /// Usually users should not call this directly but instead use 250 /// `Uniform::sample_single`, which asserts that `low < high` before calling 251 /// this. 252 /// 253 /// Via this method, implementations can provide a method optimized for 254 /// sampling only a single value from the specified range. The default 255 /// implementation simply calls `UniformSampler::new` then `sample` on the 256 /// result. sample_single<R: Rng + ?Sized, B1, B2>(low: B1, high: B2, rng: &mut R) -> Self::X where B1: SampleBorrow<Self::X> + Sized, B2: SampleBorrow<Self::X> + Sized257 fn sample_single<R: Rng + ?Sized, B1, B2>(low: B1, high: B2, rng: &mut R) 258 -> Self::X 259 where B1: SampleBorrow<Self::X> + Sized, 260 B2: SampleBorrow<Self::X> + Sized 261 { 262 let uniform: Self = UniformSampler::new(low, high); 263 uniform.sample(rng) 264 } 265 } 266 267 impl<X: SampleUniform> From<::core::ops::Range<X>> for Uniform<X> { from(r: ::core::ops::Range<X>) -> Uniform<X>268 fn from(r: ::core::ops::Range<X>) -> Uniform<X> { 269 Uniform::new(r.start, r.end) 270 } 271 } 272 273 #[cfg(rustc_1_27)] 274 impl<X: SampleUniform> From<::core::ops::RangeInclusive<X>> for Uniform<X> { from(r: ::core::ops::RangeInclusive<X>) -> Uniform<X>275 fn from(r: ::core::ops::RangeInclusive<X>) -> Uniform<X> { 276 Uniform::new_inclusive(r.start(), r.end()) 277 } 278 } 279 280 /// Helper trait similar to [`Borrow`] but implemented 281 /// only for SampleUniform and references to SampleUniform in 282 /// order to resolve ambiguity issues. 283 /// 284 /// [`Borrow`]: std::borrow::Borrow 285 pub trait SampleBorrow<Borrowed> { 286 /// Immutably borrows from an owned value. See [`Borrow::borrow`] 287 /// 288 /// [`Borrow::borrow`]: std::borrow::Borrow::borrow borrow(&self) -> &Borrowed289 fn borrow(&self) -> &Borrowed; 290 } 291 impl<Borrowed> SampleBorrow<Borrowed> for Borrowed where Borrowed: SampleUniform { 292 #[inline(always)] borrow(&self) -> &Borrowed293 fn borrow(&self) -> &Borrowed { self } 294 } 295 impl<'a, Borrowed> SampleBorrow<Borrowed> for &'a Borrowed where Borrowed: SampleUniform { 296 #[inline(always)] borrow(&self) -> &Borrowed297 fn borrow(&self) -> &Borrowed { *self } 298 } 299 300 //////////////////////////////////////////////////////////////////////////////// 301 302 // What follows are all back-ends. 303 304 305 /// The back-end implementing [`UniformSampler`] for integer types. 306 /// 307 /// Unless you are implementing [`UniformSampler`] for your own type, this type 308 /// should not be used directly, use [`Uniform`] instead. 309 /// 310 /// # Implementation notes 311 /// 312 /// For a closed range, the number of possible numbers we should generate is 313 /// `range = (high - low + 1)`. It is not possible to end up with a uniform 314 /// distribution if we map *all* the random integers that can be generated to 315 /// this range. We have to map integers from a `zone` that is a multiple of the 316 /// range. The rest of the integers, that cause a bias, are rejected. 317 /// 318 /// The problem with `range` is that to cover the full range of the type, it has 319 /// to store `unsigned_max + 1`, which can't be represented. But if the range 320 /// covers the full range of the type, no modulus is needed. A range of size 0 321 /// can't exist, so we use that to represent this special case. Wrapping 322 /// arithmetic even makes representing `unsigned_max + 1` as 0 simple. 323 /// 324 /// We don't calculate `zone` directly, but first calculate the number of 325 /// integers to reject. To handle `unsigned_max + 1` not fitting in the type, 326 /// we use: 327 /// `ints_to_reject = (unsigned_max + 1) % range;` 328 /// `ints_to_reject = (unsigned_max - range + 1) % range;` 329 /// 330 /// The smallest integer PRNGs generate is `u32`. That is why for small integer 331 /// sizes (`i8`/`u8` and `i16`/`u16`) there is an optimization: don't pick the 332 /// largest zone that can fit in the small type, but pick the largest zone that 333 /// can fit in an `u32`. `ints_to_reject` is always less than half the size of 334 /// the small integer. This means the first bit of `zone` is always 1, and so 335 /// are all the other preceding bits of a larger integer. The easiest way to 336 /// grow the `zone` for the larger type is to simply sign extend it. 337 /// 338 /// An alternative to using a modulus is widening multiply: After a widening 339 /// multiply by `range`, the result is in the high word. Then comparing the low 340 /// word against `zone` makes sure our distribution is uniform. 341 #[derive(Clone, Copy, Debug)] 342 pub struct UniformInt<X> { 343 low: X, 344 range: X, 345 zone: X, 346 } 347 348 macro_rules! uniform_int_impl { 349 ($ty:ty, $signed:ty, $unsigned:ident, 350 $i_large:ident, $u_large:ident) => { 351 impl SampleUniform for $ty { 352 type Sampler = UniformInt<$ty>; 353 } 354 355 impl UniformSampler for UniformInt<$ty> { 356 // We play free and fast with unsigned vs signed here 357 // (when $ty is signed), but that's fine, since the 358 // contract of this macro is for $ty and $unsigned to be 359 // "bit-equal", so casting between them is a no-op. 360 361 type X = $ty; 362 363 #[inline] // if the range is constant, this helps LLVM to do the 364 // calculations at compile-time. 365 fn new<B1, B2>(low_b: B1, high_b: B2) -> Self 366 where B1: SampleBorrow<Self::X> + Sized, 367 B2: SampleBorrow<Self::X> + Sized 368 { 369 let low = *low_b.borrow(); 370 let high = *high_b.borrow(); 371 assert!(low < high, "Uniform::new called with `low >= high`"); 372 UniformSampler::new_inclusive(low, high - 1) 373 } 374 375 #[inline] // if the range is constant, this helps LLVM to do the 376 // calculations at compile-time. 377 fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Self 378 where B1: SampleBorrow<Self::X> + Sized, 379 B2: SampleBorrow<Self::X> + Sized 380 { 381 let low = *low_b.borrow(); 382 let high = *high_b.borrow(); 383 assert!(low <= high, 384 "Uniform::new_inclusive called with `low > high`"); 385 let unsigned_max = ::core::$unsigned::MAX; 386 387 let range = high.wrapping_sub(low).wrapping_add(1) as $unsigned; 388 let ints_to_reject = 389 if range > 0 { 390 (unsigned_max - range + 1) % range 391 } else { 392 0 393 }; 394 let zone = unsigned_max - ints_to_reject; 395 396 UniformInt { 397 low: low, 398 // These are really $unsigned values, but store as $ty: 399 range: range as $ty, 400 zone: zone as $ty 401 } 402 } 403 404 fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X { 405 let range = self.range as $unsigned as $u_large; 406 if range > 0 { 407 // Grow `zone` to fit a type of at least 32 bits, by 408 // sign-extending it (the first bit is always 1, so are all 409 // the preceding bits of the larger type). 410 // For types that already have the right size, all the 411 // casting is a no-op. 412 let zone = self.zone as $signed as $i_large as $u_large; 413 loop { 414 let v: $u_large = rng.gen(); 415 let (hi, lo) = v.wmul(range); 416 if lo <= zone { 417 return self.low.wrapping_add(hi as $ty); 418 } 419 } 420 } else { 421 // Sample from the entire integer range. 422 rng.gen() 423 } 424 } 425 426 fn sample_single<R: Rng + ?Sized, B1, B2>(low_b: B1, high_b: B2, rng: &mut R) 427 -> Self::X 428 where B1: SampleBorrow<Self::X> + Sized, 429 B2: SampleBorrow<Self::X> + Sized 430 { 431 let low = *low_b.borrow(); 432 let high = *high_b.borrow(); 433 assert!(low < high, 434 "Uniform::sample_single called with low >= high"); 435 let range = high.wrapping_sub(low) as $unsigned as $u_large; 436 let zone = 437 if ::core::$unsigned::MAX <= ::core::u16::MAX as $unsigned { 438 // Using a modulus is faster than the approximation for 439 // i8 and i16. I suppose we trade the cost of one 440 // modulus for near-perfect branch prediction. 441 let unsigned_max: $u_large = ::core::$u_large::MAX; 442 let ints_to_reject = (unsigned_max - range + 1) % range; 443 unsigned_max - ints_to_reject 444 } else { 445 // conservative but fast approximation. `- 1` is necessary to allow the 446 // same comparison without bias. 447 (range << range.leading_zeros()).wrapping_sub(1) 448 }; 449 450 loop { 451 let v: $u_large = rng.gen(); 452 let (hi, lo) = v.wmul(range); 453 if lo <= zone { 454 return low.wrapping_add(hi as $ty); 455 } 456 } 457 } 458 } 459 } 460 } 461 462 uniform_int_impl! { i8, i8, u8, i32, u32 } 463 uniform_int_impl! { i16, i16, u16, i32, u32 } 464 uniform_int_impl! { i32, i32, u32, i32, u32 } 465 uniform_int_impl! { i64, i64, u64, i64, u64 } 466 #[cfg(all(rustc_1_26, not(target_os = "emscripten")))] 467 uniform_int_impl! { i128, i128, u128, u128, u128 } 468 uniform_int_impl! { isize, isize, usize, isize, usize } 469 uniform_int_impl! { u8, i8, u8, i32, u32 } 470 uniform_int_impl! { u16, i16, u16, i32, u32 } 471 uniform_int_impl! { u32, i32, u32, i32, u32 } 472 uniform_int_impl! { u64, i64, u64, i64, u64 } 473 uniform_int_impl! { usize, isize, usize, isize, usize } 474 #[cfg(all(rustc_1_26, not(target_os = "emscripten")))] 475 uniform_int_impl! { u128, u128, u128, i128, u128 } 476 477 #[cfg(all(feature = "simd_support", feature = "nightly"))] 478 macro_rules! uniform_simd_int_impl { 479 ($ty:ident, $unsigned:ident, $u_scalar:ident) => { 480 // The "pick the largest zone that can fit in an `u32`" optimization 481 // is less useful here. Multiple lanes complicate things, we don't 482 // know the PRNG's minimal output size, and casting to a larger vector 483 // is generally a bad idea for SIMD performance. The user can still 484 // implement it manually. 485 486 // TODO: look into `Uniform::<u32x4>::new(0u32, 100)` functionality 487 // perhaps `impl SampleUniform for $u_scalar`? 488 impl SampleUniform for $ty { 489 type Sampler = UniformInt<$ty>; 490 } 491 492 impl UniformSampler for UniformInt<$ty> { 493 type X = $ty; 494 495 #[inline] // if the range is constant, this helps LLVM to do the 496 // calculations at compile-time. 497 fn new<B1, B2>(low_b: B1, high_b: B2) -> Self 498 where B1: SampleBorrow<Self::X> + Sized, 499 B2: SampleBorrow<Self::X> + Sized 500 { 501 let low = *low_b.borrow(); 502 let high = *high_b.borrow(); 503 assert!(low.lt(high).all(), "Uniform::new called with `low >= high`"); 504 UniformSampler::new_inclusive(low, high - 1) 505 } 506 507 #[inline] // if the range is constant, this helps LLVM to do the 508 // calculations at compile-time. 509 fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Self 510 where B1: SampleBorrow<Self::X> + Sized, 511 B2: SampleBorrow<Self::X> + Sized 512 { 513 let low = *low_b.borrow(); 514 let high = *high_b.borrow(); 515 assert!(low.le(high).all(), 516 "Uniform::new_inclusive called with `low > high`"); 517 let unsigned_max = ::core::$u_scalar::MAX; 518 519 // NOTE: these may need to be replaced with explicitly 520 // wrapping operations if `packed_simd` changes 521 let range: $unsigned = ((high - low) + 1).cast(); 522 // `% 0` will panic at runtime. 523 let not_full_range = range.gt($unsigned::splat(0)); 524 // replacing 0 with `unsigned_max` allows a faster `select` 525 // with bitwise OR 526 let modulo = not_full_range.select(range, $unsigned::splat(unsigned_max)); 527 // wrapping addition 528 let ints_to_reject = (unsigned_max - range + 1) % modulo; 529 // When `range` is 0, `lo` of `v.wmul(range)` will always be 530 // zero which means only one sample is needed. 531 let zone = unsigned_max - ints_to_reject; 532 533 UniformInt { 534 low: low, 535 // These are really $unsigned values, but store as $ty: 536 range: range.cast(), 537 zone: zone.cast(), 538 } 539 } 540 541 fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X { 542 let range: $unsigned = self.range.cast(); 543 let zone: $unsigned = self.zone.cast(); 544 545 // This might seem very slow, generating a whole new 546 // SIMD vector for every sample rejection. For most uses 547 // though, the chance of rejection is small and provides good 548 // general performance. With multiple lanes, that chance is 549 // multiplied. To mitigate this, we replace only the lanes of 550 // the vector which fail, iteratively reducing the chance of 551 // rejection. The replacement method does however add a little 552 // overhead. Benchmarking or calculating probabilities might 553 // reveal contexts where this replacement method is slower. 554 let mut v: $unsigned = rng.gen(); 555 loop { 556 let (hi, lo) = v.wmul(range); 557 let mask = lo.le(zone); 558 if mask.all() { 559 let hi: $ty = hi.cast(); 560 // wrapping addition 561 let result = self.low + hi; 562 // `select` here compiles to a blend operation 563 // When `range.eq(0).none()` the compare and blend 564 // operations are avoided. 565 let v: $ty = v.cast(); 566 return range.gt($unsigned::splat(0)).select(result, v); 567 } 568 // Replace only the failing lanes 569 v = mask.select(v, rng.gen()); 570 } 571 } 572 } 573 }; 574 575 // bulk implementation 576 ($(($unsigned:ident, $signed:ident),)+ $u_scalar:ident) => { 577 $( 578 uniform_simd_int_impl!($unsigned, $unsigned, $u_scalar); 579 uniform_simd_int_impl!($signed, $unsigned, $u_scalar); 580 )+ 581 }; 582 } 583 584 #[cfg(all(feature = "simd_support", feature = "nightly"))] 585 uniform_simd_int_impl! { 586 (u64x2, i64x2), 587 (u64x4, i64x4), 588 (u64x8, i64x8), 589 u64 590 } 591 592 #[cfg(all(feature = "simd_support", feature = "nightly"))] 593 uniform_simd_int_impl! { 594 (u32x2, i32x2), 595 (u32x4, i32x4), 596 (u32x8, i32x8), 597 (u32x16, i32x16), 598 u32 599 } 600 601 #[cfg(all(feature = "simd_support", feature = "nightly"))] 602 uniform_simd_int_impl! { 603 (u16x2, i16x2), 604 (u16x4, i16x4), 605 (u16x8, i16x8), 606 (u16x16, i16x16), 607 (u16x32, i16x32), 608 u16 609 } 610 611 #[cfg(all(feature = "simd_support", feature = "nightly"))] 612 uniform_simd_int_impl! { 613 (u8x2, i8x2), 614 (u8x4, i8x4), 615 (u8x8, i8x8), 616 (u8x16, i8x16), 617 (u8x32, i8x32), 618 (u8x64, i8x64), 619 u8 620 } 621 622 623 /// The back-end implementing [`UniformSampler`] for floating-point types. 624 /// 625 /// Unless you are implementing [`UniformSampler`] for your own type, this type 626 /// should not be used directly, use [`Uniform`] instead. 627 /// 628 /// # Implementation notes 629 /// 630 /// Instead of generating a float in the `[0, 1)` range using [`Standard`], the 631 /// `UniformFloat` implementation converts the output of an PRNG itself. This 632 /// way one or two steps can be optimized out. 633 /// 634 /// The floats are first converted to a value in the `[1, 2)` interval using a 635 /// transmute-based method, and then mapped to the expected range with a 636 /// multiply and addition. Values produced this way have what equals 22 bits of 637 /// random digits for an `f32`, and 52 for an `f64`. 638 /// 639 /// [`new`]: UniformSampler::new 640 /// [`new_inclusive`]: UniformSampler::new_inclusive 641 /// [`Standard`]: crate::distributions::Standard 642 #[derive(Clone, Copy, Debug)] 643 pub struct UniformFloat<X> { 644 low: X, 645 scale: X, 646 } 647 648 macro_rules! uniform_float_impl { 649 ($ty:ty, $uty:ident, $f_scalar:ident, $u_scalar:ident, $bits_to_discard:expr) => { 650 impl SampleUniform for $ty { 651 type Sampler = UniformFloat<$ty>; 652 } 653 654 impl UniformSampler for UniformFloat<$ty> { 655 type X = $ty; 656 657 fn new<B1, B2>(low_b: B1, high_b: B2) -> Self 658 where B1: SampleBorrow<Self::X> + Sized, 659 B2: SampleBorrow<Self::X> + Sized 660 { 661 let low = *low_b.borrow(); 662 let high = *high_b.borrow(); 663 assert!(low.all_lt(high), 664 "Uniform::new called with `low >= high`"); 665 assert!(low.all_finite() && high.all_finite(), 666 "Uniform::new called with non-finite boundaries"); 667 let max_rand = <$ty>::splat((::core::$u_scalar::MAX >> $bits_to_discard) 668 .into_float_with_exponent(0) - 1.0); 669 670 let mut scale = high - low; 671 672 loop { 673 let mask = (scale * max_rand + low).ge_mask(high); 674 if mask.none() { 675 break; 676 } 677 scale = scale.decrease_masked(mask); 678 } 679 680 debug_assert!(<$ty>::splat(0.0).all_le(scale)); 681 682 UniformFloat { low, scale } 683 } 684 685 fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Self 686 where B1: SampleBorrow<Self::X> + Sized, 687 B2: SampleBorrow<Self::X> + Sized 688 { 689 let low = *low_b.borrow(); 690 let high = *high_b.borrow(); 691 assert!(low.all_le(high), 692 "Uniform::new_inclusive called with `low > high`"); 693 assert!(low.all_finite() && high.all_finite(), 694 "Uniform::new_inclusive called with non-finite boundaries"); 695 let max_rand = <$ty>::splat((::core::$u_scalar::MAX >> $bits_to_discard) 696 .into_float_with_exponent(0) - 1.0); 697 698 let mut scale = (high - low) / max_rand; 699 700 loop { 701 let mask = (scale * max_rand + low).gt_mask(high); 702 if mask.none() { 703 break; 704 } 705 scale = scale.decrease_masked(mask); 706 } 707 708 debug_assert!(<$ty>::splat(0.0).all_le(scale)); 709 710 UniformFloat { low, scale } 711 } 712 713 fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X { 714 // Generate a value in the range [1, 2) 715 let value1_2 = (rng.gen::<$uty>() >> $bits_to_discard) 716 .into_float_with_exponent(0); 717 718 // Get a value in the range [0, 1) in order to avoid 719 // overflowing into infinity when multiplying with scale 720 let value0_1 = value1_2 - 1.0; 721 722 // We don't use `f64::mul_add`, because it is not available with 723 // `no_std`. Furthermore, it is slower for some targets (but 724 // faster for others). However, the order of multiplication and 725 // addition is important, because on some platforms (e.g. ARM) 726 // it will be optimized to a single (non-FMA) instruction. 727 value0_1 * self.scale + self.low 728 } 729 730 #[inline] 731 fn sample_single<R: Rng + ?Sized, B1, B2>(low_b: B1, high_b: B2, rng: &mut R) 732 -> Self::X 733 where B1: SampleBorrow<Self::X> + Sized, 734 B2: SampleBorrow<Self::X> + Sized 735 { 736 let low = *low_b.borrow(); 737 let high = *high_b.borrow(); 738 assert!(low.all_lt(high), 739 "Uniform::sample_single called with low >= high"); 740 let mut scale = high - low; 741 742 loop { 743 // Generate a value in the range [1, 2) 744 let value1_2 = (rng.gen::<$uty>() >> $bits_to_discard) 745 .into_float_with_exponent(0); 746 747 // Get a value in the range [0, 1) in order to avoid 748 // overflowing into infinity when multiplying with scale 749 let value0_1 = value1_2 - 1.0; 750 751 // Doing multiply before addition allows some architectures 752 // to use a single instruction. 753 let res = value0_1 * scale + low; 754 755 debug_assert!(low.all_le(res) || !scale.all_finite()); 756 if res.all_lt(high) { 757 return res; 758 } 759 760 // This handles a number of edge cases. 761 // * `low` or `high` is NaN. In this case `scale` and 762 // `res` are going to end up as NaN. 763 // * `low` is negative infinity and `high` is finite. 764 // `scale` is going to be infinite and `res` will be 765 // NaN. 766 // * `high` is positive infinity and `low` is finite. 767 // `scale` is going to be infinite and `res` will 768 // be infinite or NaN (if value0_1 is 0). 769 // * `low` is negative infinity and `high` is positive 770 // infinity. `scale` will be infinite and `res` will 771 // be NaN. 772 // * `low` and `high` are finite, but `high - low` 773 // overflows to infinite. `scale` will be infinite 774 // and `res` will be infinite or NaN (if value0_1 is 0). 775 // So if `high` or `low` are non-finite, we are guaranteed 776 // to fail the `res < high` check above and end up here. 777 // 778 // While we technically should check for non-finite `low` 779 // and `high` before entering the loop, by doing the checks 780 // here instead, we allow the common case to avoid these 781 // checks. But we are still guaranteed that if `low` or 782 // `high` are non-finite we'll end up here and can do the 783 // appropriate checks. 784 // 785 // Likewise `high - low` overflowing to infinity is also 786 // rare, so handle it here after the common case. 787 let mask = !scale.finite_mask(); 788 if mask.any() { 789 assert!(low.all_finite() && high.all_finite(), 790 "Uniform::sample_single called with non-finite boundaries"); 791 scale = scale.decrease_masked(mask); 792 } 793 } 794 } 795 } 796 } 797 } 798 799 uniform_float_impl! { f32, u32, f32, u32, 32 - 23 } 800 uniform_float_impl! { f64, u64, f64, u64, 64 - 52 } 801 802 #[cfg(feature="simd_support")] 803 uniform_float_impl! { f32x2, u32x2, f32, u32, 32 - 23 } 804 #[cfg(feature="simd_support")] 805 uniform_float_impl! { f32x4, u32x4, f32, u32, 32 - 23 } 806 #[cfg(feature="simd_support")] 807 uniform_float_impl! { f32x8, u32x8, f32, u32, 32 - 23 } 808 #[cfg(feature="simd_support")] 809 uniform_float_impl! { f32x16, u32x16, f32, u32, 32 - 23 } 810 811 #[cfg(feature="simd_support")] 812 uniform_float_impl! { f64x2, u64x2, f64, u64, 64 - 52 } 813 #[cfg(feature="simd_support")] 814 uniform_float_impl! { f64x4, u64x4, f64, u64, 64 - 52 } 815 #[cfg(feature="simd_support")] 816 uniform_float_impl! { f64x8, u64x8, f64, u64, 64 - 52 } 817 818 819 820 /// The back-end implementing [`UniformSampler`] for `Duration`. 821 /// 822 /// Unless you are implementing [`UniformSampler`] for your own types, this type 823 /// should not be used directly, use [`Uniform`] instead. 824 #[cfg(any(feature = "std", rustc_1_25))] 825 #[derive(Clone, Copy, Debug)] 826 pub struct UniformDuration { 827 mode: UniformDurationMode, 828 offset: u32, 829 } 830 831 #[cfg(any(feature = "std", rustc_1_25))] 832 #[derive(Debug, Copy, Clone)] 833 enum UniformDurationMode { 834 Small { 835 secs: u64, 836 nanos: Uniform<u32>, 837 }, 838 Medium { 839 nanos: Uniform<u64>, 840 }, 841 Large { 842 max_secs: u64, 843 max_nanos: u32, 844 secs: Uniform<u64>, 845 } 846 } 847 848 #[cfg(any(feature = "std", rustc_1_25))] 849 impl SampleUniform for Duration { 850 type Sampler = UniformDuration; 851 } 852 853 #[cfg(any(feature = "std", rustc_1_25))] 854 impl UniformSampler for UniformDuration { 855 type X = Duration; 856 857 #[inline] new<B1, B2>(low_b: B1, high_b: B2) -> Self where B1: SampleBorrow<Self::X> + Sized, B2: SampleBorrow<Self::X> + Sized858 fn new<B1, B2>(low_b: B1, high_b: B2) -> Self 859 where B1: SampleBorrow<Self::X> + Sized, 860 B2: SampleBorrow<Self::X> + Sized 861 { 862 let low = *low_b.borrow(); 863 let high = *high_b.borrow(); 864 assert!(low < high, "Uniform::new called with `low >= high`"); 865 UniformDuration::new_inclusive(low, high - Duration::new(0, 1)) 866 } 867 868 #[inline] new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Self where B1: SampleBorrow<Self::X> + Sized, B2: SampleBorrow<Self::X> + Sized869 fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Self 870 where B1: SampleBorrow<Self::X> + Sized, 871 B2: SampleBorrow<Self::X> + Sized 872 { 873 let low = *low_b.borrow(); 874 let high = *high_b.borrow(); 875 assert!(low <= high, "Uniform::new_inclusive called with `low > high`"); 876 877 let low_s = low.as_secs(); 878 let low_n = low.subsec_nanos(); 879 let mut high_s = high.as_secs(); 880 let mut high_n = high.subsec_nanos(); 881 882 if high_n < low_n { 883 high_s = high_s - 1; 884 high_n = high_n + 1_000_000_000; 885 } 886 887 let mode = if low_s == high_s { 888 UniformDurationMode::Small { 889 secs: low_s, 890 nanos: Uniform::new_inclusive(low_n, high_n), 891 } 892 } else { 893 let max = high_s 894 .checked_mul(1_000_000_000) 895 .and_then(|n| n.checked_add(high_n as u64)); 896 897 if let Some(higher_bound) = max { 898 let lower_bound = low_s * 1_000_000_000 + low_n as u64; 899 UniformDurationMode::Medium { 900 nanos: Uniform::new_inclusive(lower_bound, higher_bound), 901 } 902 } else { 903 // An offset is applied to simplify generation of nanoseconds 904 let max_nanos = high_n - low_n; 905 UniformDurationMode::Large { 906 max_secs: high_s, 907 max_nanos, 908 secs: Uniform::new_inclusive(low_s, high_s), 909 } 910 } 911 }; 912 UniformDuration { 913 mode, 914 offset: low_n, 915 } 916 } 917 918 #[inline] sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Duration919 fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Duration { 920 match self.mode { 921 UniformDurationMode::Small { secs, nanos } => { 922 let n = nanos.sample(rng); 923 Duration::new(secs, n) 924 } 925 UniformDurationMode::Medium { nanos } => { 926 let nanos = nanos.sample(rng); 927 Duration::new(nanos / 1_000_000_000, (nanos % 1_000_000_000) as u32) 928 } 929 UniformDurationMode::Large { max_secs, max_nanos, secs } => { 930 // constant folding means this is at least as fast as `gen_range` 931 let nano_range = Uniform::new(0, 1_000_000_000); 932 loop { 933 let s = secs.sample(rng); 934 let n = nano_range.sample(rng); 935 if !(s == max_secs && n > max_nanos) { 936 let sum = n + self.offset; 937 break Duration::new(s, sum); 938 } 939 } 940 } 941 } 942 } 943 } 944 945 #[cfg(test)] 946 mod tests { 947 use Rng; 948 use rngs::mock::StepRng; 949 use distributions::uniform::Uniform; 950 use distributions::utils::FloatAsSIMD; 951 #[cfg(feature="simd_support")] use packed_simd::*; 952 953 #[should_panic] 954 #[test] test_uniform_bad_limits_equal_int()955 fn test_uniform_bad_limits_equal_int() { 956 Uniform::new(10, 10); 957 } 958 959 #[test] test_uniform_good_limits_equal_int()960 fn test_uniform_good_limits_equal_int() { 961 let mut rng = ::test::rng(804); 962 let dist = Uniform::new_inclusive(10, 10); 963 for _ in 0..20 { 964 assert_eq!(rng.sample(dist), 10); 965 } 966 } 967 968 #[should_panic] 969 #[test] test_uniform_bad_limits_flipped_int()970 fn test_uniform_bad_limits_flipped_int() { 971 Uniform::new(10, 5); 972 } 973 974 #[test] test_integers()975 fn test_integers() { 976 use core::{i8, i16, i32, i64, isize}; 977 use core::{u8, u16, u32, u64, usize}; 978 #[cfg(all(rustc_1_26, not(target_os = "emscripten")))] 979 use core::{i128, u128}; 980 981 let mut rng = ::test::rng(251); 982 macro_rules! t { 983 ($ty:ident, $v:expr, $le:expr, $lt:expr) => {{ 984 for &(low, high) in $v.iter() { 985 let my_uniform = Uniform::new(low, high); 986 for _ in 0..1000 { 987 let v: $ty = rng.sample(my_uniform); 988 assert!($le(low, v) && $lt(v, high)); 989 } 990 991 let my_uniform = Uniform::new_inclusive(low, high); 992 for _ in 0..1000 { 993 let v: $ty = rng.sample(my_uniform); 994 assert!($le(low, v) && $le(v, high)); 995 } 996 997 let my_uniform = Uniform::new(&low, high); 998 for _ in 0..1000 { 999 let v: $ty = rng.sample(my_uniform); 1000 assert!($le(low, v) && $lt(v, high)); 1001 } 1002 1003 let my_uniform = Uniform::new_inclusive(&low, &high); 1004 for _ in 0..1000 { 1005 let v: $ty = rng.sample(my_uniform); 1006 assert!($le(low, v) && $le(v, high)); 1007 } 1008 1009 for _ in 0..1000 { 1010 let v: $ty = rng.gen_range(low, high); 1011 assert!($le(low, v) && $lt(v, high)); 1012 } 1013 } 1014 }}; 1015 1016 // scalar bulk 1017 ($($ty:ident),*) => {{ 1018 $(t!( 1019 $ty, 1020 [(0, 10), (10, 127), ($ty::MIN, $ty::MAX)], 1021 |x, y| x <= y, 1022 |x, y| x < y 1023 );)* 1024 }}; 1025 1026 // simd bulk 1027 ($($ty:ident),* => $scalar:ident) => {{ 1028 $(t!( 1029 $ty, 1030 [ 1031 ($ty::splat(0), $ty::splat(10)), 1032 ($ty::splat(10), $ty::splat(127)), 1033 ($ty::splat($scalar::MIN), $ty::splat($scalar::MAX)), 1034 ], 1035 |x: $ty, y| x.le(y).all(), 1036 |x: $ty, y| x.lt(y).all() 1037 );)* 1038 }}; 1039 } 1040 t!(i8, i16, i32, i64, isize, 1041 u8, u16, u32, u64, usize); 1042 #[cfg(all(rustc_1_26, not(target_os = "emscripten")))] 1043 t!(i128, u128); 1044 1045 #[cfg(all(feature = "simd_support", feature = "nightly"))] 1046 { 1047 t!(u8x2, u8x4, u8x8, u8x16, u8x32, u8x64 => u8); 1048 t!(i8x2, i8x4, i8x8, i8x16, i8x32, i8x64 => i8); 1049 t!(u16x2, u16x4, u16x8, u16x16, u16x32 => u16); 1050 t!(i16x2, i16x4, i16x8, i16x16, i16x32 => i16); 1051 t!(u32x2, u32x4, u32x8, u32x16 => u32); 1052 t!(i32x2, i32x4, i32x8, i32x16 => i32); 1053 t!(u64x2, u64x4, u64x8 => u64); 1054 t!(i64x2, i64x4, i64x8 => i64); 1055 } 1056 } 1057 1058 #[test] test_floats()1059 fn test_floats() { 1060 let mut rng = ::test::rng(252); 1061 let mut zero_rng = StepRng::new(0, 0); 1062 let mut max_rng = StepRng::new(0xffff_ffff_ffff_ffff, 0); 1063 macro_rules! t { 1064 ($ty:ty, $f_scalar:ident, $bits_shifted:expr) => {{ 1065 let v: &[($f_scalar, $f_scalar)]= 1066 &[(0.0, 100.0), 1067 (-1e35, -1e25), 1068 (1e-35, 1e-25), 1069 (-1e35, 1e35), 1070 (<$f_scalar>::from_bits(0), <$f_scalar>::from_bits(3)), 1071 (-<$f_scalar>::from_bits(10), -<$f_scalar>::from_bits(1)), 1072 (-<$f_scalar>::from_bits(5), 0.0), 1073 (-<$f_scalar>::from_bits(7), -0.0), 1074 (10.0, ::core::$f_scalar::MAX), 1075 (-100.0, ::core::$f_scalar::MAX), 1076 (-::core::$f_scalar::MAX / 5.0, ::core::$f_scalar::MAX), 1077 (-::core::$f_scalar::MAX, ::core::$f_scalar::MAX / 5.0), 1078 (-::core::$f_scalar::MAX * 0.8, ::core::$f_scalar::MAX * 0.7), 1079 (-::core::$f_scalar::MAX, ::core::$f_scalar::MAX), 1080 ]; 1081 for &(low_scalar, high_scalar) in v.iter() { 1082 for lane in 0..<$ty>::lanes() { 1083 let low = <$ty>::splat(0.0 as $f_scalar).replace(lane, low_scalar); 1084 let high = <$ty>::splat(1.0 as $f_scalar).replace(lane, high_scalar); 1085 let my_uniform = Uniform::new(low, high); 1086 let my_incl_uniform = Uniform::new_inclusive(low, high); 1087 for _ in 0..100 { 1088 let v = rng.sample(my_uniform).extract(lane); 1089 assert!(low_scalar <= v && v < high_scalar); 1090 let v = rng.sample(my_incl_uniform).extract(lane); 1091 assert!(low_scalar <= v && v <= high_scalar); 1092 let v = rng.gen_range(low, high).extract(lane); 1093 assert!(low_scalar <= v && v < high_scalar); 1094 } 1095 1096 assert_eq!(rng.sample(Uniform::new_inclusive(low, low)).extract(lane), low_scalar); 1097 1098 assert_eq!(zero_rng.sample(my_uniform).extract(lane), low_scalar); 1099 assert_eq!(zero_rng.sample(my_incl_uniform).extract(lane), low_scalar); 1100 assert_eq!(zero_rng.gen_range(low, high).extract(lane), low_scalar); 1101 assert!(max_rng.sample(my_uniform).extract(lane) < high_scalar); 1102 assert!(max_rng.sample(my_incl_uniform).extract(lane) <= high_scalar); 1103 1104 // Don't run this test for really tiny differences between high and low 1105 // since for those rounding might result in selecting high for a very 1106 // long time. 1107 if (high_scalar - low_scalar) > 0.0001 { 1108 let mut lowering_max_rng = 1109 StepRng::new(0xffff_ffff_ffff_ffff, 1110 (-1i64 << $bits_shifted) as u64); 1111 assert!(lowering_max_rng.gen_range(low, high).extract(lane) < high_scalar); 1112 } 1113 } 1114 } 1115 1116 assert_eq!(rng.sample(Uniform::new_inclusive(::core::$f_scalar::MAX, 1117 ::core::$f_scalar::MAX)), 1118 ::core::$f_scalar::MAX); 1119 assert_eq!(rng.sample(Uniform::new_inclusive(-::core::$f_scalar::MAX, 1120 -::core::$f_scalar::MAX)), 1121 -::core::$f_scalar::MAX); 1122 }} 1123 } 1124 1125 t!(f32, f32, 32 - 23); 1126 t!(f64, f64, 64 - 52); 1127 #[cfg(feature="simd_support")] 1128 { 1129 t!(f32x2, f32, 32 - 23); 1130 t!(f32x4, f32, 32 - 23); 1131 t!(f32x8, f32, 32 - 23); 1132 t!(f32x16, f32, 32 - 23); 1133 t!(f64x2, f64, 64 - 52); 1134 t!(f64x4, f64, 64 - 52); 1135 t!(f64x8, f64, 64 - 52); 1136 } 1137 } 1138 1139 #[test] 1140 #[cfg(all(feature="std", 1141 not(target_arch = "wasm32"), 1142 not(target_arch = "asmjs")))] test_float_assertions()1143 fn test_float_assertions() { 1144 use std::panic::catch_unwind; 1145 use super::SampleUniform; 1146 fn range<T: SampleUniform>(low: T, high: T) { 1147 let mut rng = ::test::rng(253); 1148 rng.gen_range(low, high); 1149 } 1150 1151 macro_rules! t { 1152 ($ty:ident, $f_scalar:ident) => {{ 1153 let v: &[($f_scalar, $f_scalar)] = 1154 &[(::std::$f_scalar::NAN, 0.0), 1155 (1.0, ::std::$f_scalar::NAN), 1156 (::std::$f_scalar::NAN, ::std::$f_scalar::NAN), 1157 (1.0, 0.5), 1158 (::std::$f_scalar::MAX, -::std::$f_scalar::MAX), 1159 (::std::$f_scalar::INFINITY, ::std::$f_scalar::INFINITY), 1160 (::std::$f_scalar::NEG_INFINITY, ::std::$f_scalar::NEG_INFINITY), 1161 (::std::$f_scalar::NEG_INFINITY, 5.0), 1162 (5.0, ::std::$f_scalar::INFINITY), 1163 (::std::$f_scalar::NAN, ::std::$f_scalar::INFINITY), 1164 (::std::$f_scalar::NEG_INFINITY, ::std::$f_scalar::NAN), 1165 (::std::$f_scalar::NEG_INFINITY, ::std::$f_scalar::INFINITY), 1166 ]; 1167 for &(low_scalar, high_scalar) in v.iter() { 1168 for lane in 0..<$ty>::lanes() { 1169 let low = <$ty>::splat(0.0 as $f_scalar).replace(lane, low_scalar); 1170 let high = <$ty>::splat(1.0 as $f_scalar).replace(lane, high_scalar); 1171 assert!(catch_unwind(|| range(low, high)).is_err()); 1172 assert!(catch_unwind(|| Uniform::new(low, high)).is_err()); 1173 assert!(catch_unwind(|| Uniform::new_inclusive(low, high)).is_err()); 1174 assert!(catch_unwind(|| range(low, low)).is_err()); 1175 assert!(catch_unwind(|| Uniform::new(low, low)).is_err()); 1176 } 1177 } 1178 }} 1179 } 1180 1181 t!(f32, f32); 1182 t!(f64, f64); 1183 #[cfg(feature="simd_support")] 1184 { 1185 t!(f32x2, f32); 1186 t!(f32x4, f32); 1187 t!(f32x8, f32); 1188 t!(f32x16, f32); 1189 t!(f64x2, f64); 1190 t!(f64x4, f64); 1191 t!(f64x8, f64); 1192 } 1193 } 1194 1195 1196 #[test] 1197 #[cfg(any(feature = "std", rustc_1_25))] test_durations()1198 fn test_durations() { 1199 #[cfg(feature = "std")] 1200 use std::time::Duration; 1201 #[cfg(all(not(feature = "std"), rustc_1_25))] 1202 use core::time::Duration; 1203 1204 let mut rng = ::test::rng(253); 1205 1206 let v = &[(Duration::new(10, 50000), Duration::new(100, 1234)), 1207 (Duration::new(0, 100), Duration::new(1, 50)), 1208 (Duration::new(0, 0), Duration::new(u64::max_value(), 999_999_999))]; 1209 for &(low, high) in v.iter() { 1210 let my_uniform = Uniform::new(low, high); 1211 for _ in 0..1000 { 1212 let v = rng.sample(my_uniform); 1213 assert!(low <= v && v < high); 1214 } 1215 } 1216 } 1217 1218 #[test] test_custom_uniform()1219 fn test_custom_uniform() { 1220 use distributions::uniform::{UniformSampler, UniformFloat, SampleUniform, SampleBorrow}; 1221 #[derive(Clone, Copy, PartialEq, PartialOrd)] 1222 struct MyF32 { 1223 x: f32, 1224 } 1225 #[derive(Clone, Copy, Debug)] 1226 struct UniformMyF32 { 1227 inner: UniformFloat<f32>, 1228 } 1229 impl UniformSampler for UniformMyF32 { 1230 type X = MyF32; 1231 fn new<B1, B2>(low: B1, high: B2) -> Self 1232 where B1: SampleBorrow<Self::X> + Sized, 1233 B2: SampleBorrow<Self::X> + Sized 1234 { 1235 UniformMyF32 { 1236 inner: UniformFloat::<f32>::new(low.borrow().x, high.borrow().x), 1237 } 1238 } 1239 fn new_inclusive<B1, B2>(low: B1, high: B2) -> Self 1240 where B1: SampleBorrow<Self::X> + Sized, 1241 B2: SampleBorrow<Self::X> + Sized 1242 { 1243 UniformSampler::new(low, high) 1244 } 1245 fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X { 1246 MyF32 { x: self.inner.sample(rng) } 1247 } 1248 } 1249 impl SampleUniform for MyF32 { 1250 type Sampler = UniformMyF32; 1251 } 1252 1253 let (low, high) = (MyF32{ x: 17.0f32 }, MyF32{ x: 22.0f32 }); 1254 let uniform = Uniform::new(low, high); 1255 let mut rng = ::test::rng(804); 1256 for _ in 0..100 { 1257 let x: MyF32 = rng.sample(uniform); 1258 assert!(low <= x && x < high); 1259 } 1260 } 1261 1262 #[test] test_uniform_from_std_range()1263 fn test_uniform_from_std_range() { 1264 let r = Uniform::from(2u32..7); 1265 assert_eq!(r.inner.low, 2); 1266 assert_eq!(r.inner.range, 5); 1267 let r = Uniform::from(2.0f64..7.0); 1268 assert_eq!(r.inner.low, 2.0); 1269 assert_eq!(r.inner.scale, 5.0); 1270 } 1271 1272 #[cfg(rustc_1_27)] 1273 #[test] test_uniform_from_std_range_inclusive()1274 fn test_uniform_from_std_range_inclusive() { 1275 let r = Uniform::from(2u32..=6); 1276 assert_eq!(r.inner.low, 2); 1277 assert_eq!(r.inner.range, 5); 1278 let r = Uniform::from(2.0f64..=7.0); 1279 assert_eq!(r.inner.low, 2.0); 1280 assert!(r.inner.scale > 5.0); 1281 assert!(r.inner.scale < 5.0 + 1e-14); 1282 } 1283 } 1284