]> git.proxmox.com Git - rustc.git/blob - vendor/rand/src/distributions/uniform.rs
New upstream version 1.51.0+dfsg1
[rustc.git] / vendor / rand / src / distributions / uniform.rs
1 // Copyright 2018 Developers of the Rand project.
2 // Copyright 2017 The Rust Project Developers.
3 //
4 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
5 // https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
7 // option. This file may not be copied, modified, or distributed
8 // except according to those terms.
9
10 //! A distribution uniformly sampling numbers within a given range.
11 //!
12 //! [`Uniform`] is the standard distribution to sample uniformly from a range;
13 //! e.g. `Uniform::new_inclusive(1, 6)` can sample integers from 1 to 6, like a
14 //! standard die. [`Rng::gen_range`] supports any type supported by
15 //! [`Uniform`].
16 //!
17 //! This distribution is provided with support for several primitive types
18 //! (all integer and floating-point types) as well as [`std::time::Duration`],
19 //! and supports extension to user-defined types via a type-specific *back-end*
20 //! implementation.
21 //!
22 //! The types [`UniformInt`], [`UniformFloat`] and [`UniformDuration`] are the
23 //! back-ends supporting sampling from primitive integer and floating-point
24 //! ranges as well as from [`std::time::Duration`]; these types do not normally
25 //! need to be used directly (unless implementing a derived back-end).
26 //!
27 //! # Example usage
28 //!
29 //! ```
30 //! use rand::{Rng, thread_rng};
31 //! use rand::distributions::Uniform;
32 //!
33 //! let mut rng = thread_rng();
34 //! let side = Uniform::new(-10.0, 10.0);
35 //!
36 //! // sample between 1 and 10 points
37 //! for _ in 0..rng.gen_range(1, 11) {
38 //! // sample a point from the square with sides -10 - 10 in two dimensions
39 //! let (x, y) = (rng.sample(side), rng.sample(side));
40 //! println!("Point: {}, {}", x, y);
41 //! }
42 //! ```
43 //!
44 //! # Extending `Uniform` to support a custom type
45 //!
46 //! To extend [`Uniform`] to support your own types, write a back-end which
47 //! implements the [`UniformSampler`] trait, then implement the [`SampleUniform`]
48 //! helper trait to "register" your back-end. See the `MyF32` example below.
49 //!
50 //! At a minimum, the back-end needs to store any parameters needed for sampling
51 //! (e.g. the target range) and implement `new`, `new_inclusive` and `sample`.
52 //! Those methods should include an assert to check the range is valid (i.e.
53 //! `low < high`). The example below merely wraps another back-end.
54 //!
55 //! The `new`, `new_inclusive` and `sample_single` functions use arguments of
56 //! type SampleBorrow<X> in order to support passing in values by reference or
57 //! by value. In the implementation of these functions, you can choose to
58 //! simply use the reference returned by [`SampleBorrow::borrow`], or you can choose
59 //! to copy or clone the value, whatever is appropriate for your type.
60 //!
61 //! ```
62 //! use rand::prelude::*;
63 //! use rand::distributions::uniform::{Uniform, SampleUniform,
64 //! UniformSampler, UniformFloat, SampleBorrow};
65 //!
66 //! struct MyF32(f32);
67 //!
68 //! #[derive(Clone, Copy, Debug)]
69 //! struct UniformMyF32(UniformFloat<f32>);
70 //!
71 //! impl UniformSampler for UniformMyF32 {
72 //! type X = MyF32;
73 //! fn new<B1, B2>(low: B1, high: B2) -> Self
74 //! where B1: SampleBorrow<Self::X> + Sized,
75 //! B2: SampleBorrow<Self::X> + Sized
76 //! {
77 //! UniformMyF32(UniformFloat::<f32>::new(low.borrow().0, high.borrow().0))
78 //! }
79 //! fn new_inclusive<B1, B2>(low: B1, high: B2) -> Self
80 //! where B1: SampleBorrow<Self::X> + Sized,
81 //! B2: SampleBorrow<Self::X> + Sized
82 //! {
83 //! UniformSampler::new(low, high)
84 //! }
85 //! fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
86 //! MyF32(self.0.sample(rng))
87 //! }
88 //! }
89 //!
90 //! impl SampleUniform for MyF32 {
91 //! type Sampler = UniformMyF32;
92 //! }
93 //!
94 //! let (low, high) = (MyF32(17.0f32), MyF32(22.0f32));
95 //! let uniform = Uniform::new(low, high);
96 //! let x = uniform.sample(&mut thread_rng());
97 //! ```
98 //!
99 //! [`SampleUniform`]: crate::distributions::uniform::SampleUniform
100 //! [`UniformSampler`]: crate::distributions::uniform::UniformSampler
101 //! [`UniformInt`]: crate::distributions::uniform::UniformInt
102 //! [`UniformFloat`]: crate::distributions::uniform::UniformFloat
103 //! [`UniformDuration`]: crate::distributions::uniform::UniformDuration
104 //! [`SampleBorrow::borrow`]: crate::distributions::uniform::SampleBorrow::borrow
105
106 #[cfg(not(feature = "std"))] use core::time::Duration;
107 #[cfg(feature = "std")] use std::time::Duration;
108
109 use crate::distributions::float::IntoFloat;
110 use crate::distributions::utils::{BoolAsSIMD, FloatAsSIMD, FloatSIMDUtils, WideningMultiply};
111 use crate::distributions::Distribution;
112 use crate::Rng;
113
114 #[cfg(not(feature = "std"))]
115 #[allow(unused_imports)] // rustc doesn't detect that this is actually used
116 use crate::distributions::utils::Float;
117
118
119 #[cfg(feature = "simd_support")] use packed_simd::*;
120
121 /// Sample values uniformly between two bounds.
122 ///
123 /// [`Uniform::new`] and [`Uniform::new_inclusive`] construct a uniform
124 /// distribution sampling from the given range; these functions may do extra
125 /// work up front to make sampling of multiple values faster.
126 ///
127 /// When sampling from a constant range, many calculations can happen at
128 /// compile-time and all methods should be fast; for floating-point ranges and
129 /// the full range of integer types this should have comparable performance to
130 /// the `Standard` distribution.
131 ///
132 /// Steps are taken to avoid bias which might be present in naive
133 /// implementations; for example `rng.gen::<u8>() % 170` samples from the range
134 /// `[0, 169]` but is twice as likely to select numbers less than 85 than other
135 /// values. Further, the implementations here give more weight to the high-bits
136 /// generated by the RNG than the low bits, since with some RNGs the low-bits
137 /// are of lower quality than the high bits.
138 ///
139 /// Implementations must sample in `[low, high)` range for
140 /// `Uniform::new(low, high)`, i.e., excluding `high`. In particular care must
141 /// be taken to ensure that rounding never results values `< low` or `>= high`.
142 ///
143 /// # Example
144 ///
145 /// ```
146 /// use rand::distributions::{Distribution, Uniform};
147 ///
148 /// fn main() {
149 /// let between = Uniform::from(10..10000);
150 /// let mut rng = rand::thread_rng();
151 /// let mut sum = 0;
152 /// for _ in 0..1000 {
153 /// sum += between.sample(&mut rng);
154 /// }
155 /// println!("{}", sum);
156 /// }
157 /// ```
158 ///
159 /// [`new`]: Uniform::new
160 /// [`new_inclusive`]: Uniform::new_inclusive
161 #[derive(Clone, Copy, Debug)]
162 pub struct Uniform<X: SampleUniform>(X::Sampler);
163
164 impl<X: SampleUniform> Uniform<X> {
165 /// Create a new `Uniform` instance which samples uniformly from the half
166 /// open range `[low, high)` (excluding `high`). Panics if `low >= high`.
167 pub fn new<B1, B2>(low: B1, high: B2) -> Uniform<X>
168 where
169 B1: SampleBorrow<X> + Sized,
170 B2: SampleBorrow<X> + Sized,
171 {
172 Uniform(X::Sampler::new(low, high))
173 }
174
175 /// Create a new `Uniform` instance which samples uniformly from the closed
176 /// range `[low, high]` (inclusive). Panics if `low > high`.
177 pub fn new_inclusive<B1, B2>(low: B1, high: B2) -> Uniform<X>
178 where
179 B1: SampleBorrow<X> + Sized,
180 B2: SampleBorrow<X> + Sized,
181 {
182 Uniform(X::Sampler::new_inclusive(low, high))
183 }
184 }
185
186 impl<X: SampleUniform> Distribution<X> for Uniform<X> {
187 fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> X {
188 self.0.sample(rng)
189 }
190 }
191
192 /// Helper trait for creating objects using the correct implementation of
193 /// [`UniformSampler`] for the sampling type.
194 ///
195 /// See the [module documentation] on how to implement [`Uniform`] range
196 /// sampling for a custom type.
197 ///
198 /// [module documentation]: crate::distributions::uniform
199 pub trait SampleUniform: Sized {
200 /// The `UniformSampler` implementation supporting type `X`.
201 type Sampler: UniformSampler<X = Self>;
202 }
203
204 /// Helper trait handling actual uniform sampling.
205 ///
206 /// See the [module documentation] on how to implement [`Uniform`] range
207 /// sampling for a custom type.
208 ///
209 /// Implementation of [`sample_single`] is optional, and is only useful when
210 /// the implementation can be faster than `Self::new(low, high).sample(rng)`.
211 ///
212 /// [module documentation]: crate::distributions::uniform
213 /// [`sample_single`]: UniformSampler::sample_single
214 pub trait UniformSampler: Sized {
215 /// The type sampled by this implementation.
216 type X;
217
218 /// Construct self, with inclusive lower bound and exclusive upper bound
219 /// `[low, high)`.
220 ///
221 /// Usually users should not call this directly but instead use
222 /// `Uniform::new`, which asserts that `low < high` before calling this.
223 fn new<B1, B2>(low: B1, high: B2) -> Self
224 where
225 B1: SampleBorrow<Self::X> + Sized,
226 B2: SampleBorrow<Self::X> + Sized;
227
228 /// Construct self, with inclusive bounds `[low, high]`.
229 ///
230 /// Usually users should not call this directly but instead use
231 /// `Uniform::new_inclusive`, which asserts that `low <= high` before
232 /// calling this.
233 fn new_inclusive<B1, B2>(low: B1, high: B2) -> Self
234 where
235 B1: SampleBorrow<Self::X> + Sized,
236 B2: SampleBorrow<Self::X> + Sized;
237
238 /// Sample a value.
239 fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X;
240
241 /// Sample a single value uniformly from a range with inclusive lower bound
242 /// and exclusive upper bound `[low, high)`.
243 ///
244 /// By default this is implemented using
245 /// `UniformSampler::new(low, high).sample(rng)`. However, for some types
246 /// more optimal implementations for single usage may be provided via this
247 /// method (which is the case for integers and floats).
248 /// Results may not be identical.
249 ///
250 /// Note that to use this method in a generic context, the type needs to be
251 /// retrieved via `SampleUniform::Sampler` as follows:
252 /// ```
253 /// use rand::{thread_rng, distributions::uniform::{SampleUniform, UniformSampler}};
254 /// # #[allow(unused)]
255 /// fn sample_from_range<T: SampleUniform>(lb: T, ub: T) -> T {
256 /// let mut rng = thread_rng();
257 /// <T as SampleUniform>::Sampler::sample_single(lb, ub, &mut rng)
258 /// }
259 /// ```
260 fn sample_single<R: Rng + ?Sized, B1, B2>(low: B1, high: B2, rng: &mut R) -> Self::X
261 where
262 B1: SampleBorrow<Self::X> + Sized,
263 B2: SampleBorrow<Self::X> + Sized,
264 {
265 let uniform: Self = UniformSampler::new(low, high);
266 uniform.sample(rng)
267 }
268 }
269
270 impl<X: SampleUniform> From<::core::ops::Range<X>> for Uniform<X> {
271 fn from(r: ::core::ops::Range<X>) -> Uniform<X> {
272 Uniform::new(r.start, r.end)
273 }
274 }
275
276 impl<X: SampleUniform> From<::core::ops::RangeInclusive<X>> for Uniform<X> {
277 fn from(r: ::core::ops::RangeInclusive<X>) -> Uniform<X> {
278 Uniform::new_inclusive(r.start(), r.end())
279 }
280 }
281
282 /// Helper trait similar to [`Borrow`] but implemented
283 /// only for SampleUniform and references to SampleUniform in
284 /// order to resolve ambiguity issues.
285 ///
286 /// [`Borrow`]: std::borrow::Borrow
287 pub trait SampleBorrow<Borrowed> {
288 /// Immutably borrows from an owned value. See [`Borrow::borrow`]
289 ///
290 /// [`Borrow::borrow`]: std::borrow::Borrow::borrow
291 fn borrow(&self) -> &Borrowed;
292 }
293 impl<Borrowed> SampleBorrow<Borrowed> for Borrowed
294 where Borrowed: SampleUniform
295 {
296 #[inline(always)]
297 fn borrow(&self) -> &Borrowed {
298 self
299 }
300 }
301 impl<'a, Borrowed> SampleBorrow<Borrowed> for &'a Borrowed
302 where Borrowed: SampleUniform
303 {
304 #[inline(always)]
305 fn borrow(&self) -> &Borrowed {
306 *self
307 }
308 }
309
310 ////////////////////////////////////////////////////////////////////////////////
311
312 // What follows are all back-ends.
313
314
315 /// The back-end implementing [`UniformSampler`] for integer types.
316 ///
317 /// Unless you are implementing [`UniformSampler`] for your own type, this type
318 /// should not be used directly, use [`Uniform`] instead.
319 ///
320 /// # Implementation notes
321 ///
322 /// For simplicity, we use the same generic struct `UniformInt<X>` for all
323 /// integer types `X`. This gives us only one field type, `X`; to store unsigned
324 /// values of this size, we take use the fact that these conversions are no-ops.
325 ///
326 /// For a closed range, the number of possible numbers we should generate is
327 /// `range = (high - low + 1)`. To avoid bias, we must ensure that the size of
328 /// our sample space, `zone`, is a multiple of `range`; other values must be
329 /// rejected (by replacing with a new random sample).
330 ///
331 /// As a special case, we use `range = 0` to represent the full range of the
332 /// result type (i.e. for `new_inclusive($ty::MIN, $ty::MAX)`).
333 ///
334 /// The optimum `zone` is the largest product of `range` which fits in our
335 /// (unsigned) target type. We calculate this by calculating how many numbers we
336 /// must reject: `reject = (MAX + 1) % range = (MAX - range + 1) % range`. Any (large)
337 /// product of `range` will suffice, thus in `sample_single` we multiply by a
338 /// power of 2 via bit-shifting (faster but may cause more rejections).
339 ///
340 /// The smallest integer PRNGs generate is `u32`. For 8- and 16-bit outputs we
341 /// use `u32` for our `zone` and samples (because it's not slower and because
342 /// it reduces the chance of having to reject a sample). In this case we cannot
343 /// store `zone` in the target type since it is too large, however we know
344 /// `ints_to_reject < range <= $unsigned::MAX`.
345 ///
346 /// An alternative to using a modulus is widening multiply: After a widening
347 /// multiply by `range`, the result is in the high word. Then comparing the low
348 /// word against `zone` makes sure our distribution is uniform.
349 #[derive(Clone, Copy, Debug)]
350 pub struct UniformInt<X> {
351 low: X,
352 range: X,
353 z: X, // either ints_to_reject or zone depending on implementation
354 }
355
356 macro_rules! uniform_int_impl {
357 ($ty:ty, $unsigned:ident, $u_large:ident) => {
358 impl SampleUniform for $ty {
359 type Sampler = UniformInt<$ty>;
360 }
361
362 impl UniformSampler for UniformInt<$ty> {
363 // We play free and fast with unsigned vs signed here
364 // (when $ty is signed), but that's fine, since the
365 // contract of this macro is for $ty and $unsigned to be
366 // "bit-equal", so casting between them is a no-op.
367
368 type X = $ty;
369
370 #[inline] // if the range is constant, this helps LLVM to do the
371 // calculations at compile-time.
372 fn new<B1, B2>(low_b: B1, high_b: B2) -> Self
373 where
374 B1: SampleBorrow<Self::X> + Sized,
375 B2: SampleBorrow<Self::X> + Sized,
376 {
377 let low = *low_b.borrow();
378 let high = *high_b.borrow();
379 assert!(low < high, "Uniform::new called with `low >= high`");
380 UniformSampler::new_inclusive(low, high - 1)
381 }
382
383 #[inline] // if the range is constant, this helps LLVM to do the
384 // calculations at compile-time.
385 fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Self
386 where
387 B1: SampleBorrow<Self::X> + Sized,
388 B2: SampleBorrow<Self::X> + Sized,
389 {
390 let low = *low_b.borrow();
391 let high = *high_b.borrow();
392 assert!(
393 low <= high,
394 "Uniform::new_inclusive called with `low > high`"
395 );
396 let unsigned_max = ::core::$u_large::MAX;
397
398 let range = high.wrapping_sub(low).wrapping_add(1) as $unsigned;
399 let ints_to_reject = if range > 0 {
400 let range = $u_large::from(range);
401 (unsigned_max - range + 1) % range
402 } else {
403 0
404 };
405
406 UniformInt {
407 low: low,
408 // These are really $unsigned values, but store as $ty:
409 range: range as $ty,
410 z: ints_to_reject as $unsigned as $ty,
411 }
412 }
413
414 fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
415 let range = self.range as $unsigned as $u_large;
416 if range > 0 {
417 let unsigned_max = ::core::$u_large::MAX;
418 let zone = unsigned_max - (self.z as $unsigned as $u_large);
419 loop {
420 let v: $u_large = rng.gen();
421 let (hi, lo) = v.wmul(range);
422 if lo <= zone {
423 return self.low.wrapping_add(hi as $ty);
424 }
425 }
426 } else {
427 // Sample from the entire integer range.
428 rng.gen()
429 }
430 }
431
432 fn sample_single<R: Rng + ?Sized, B1, B2>(low_b: B1, high_b: B2, rng: &mut R) -> Self::X
433 where
434 B1: SampleBorrow<Self::X> + Sized,
435 B2: SampleBorrow<Self::X> + Sized,
436 {
437 let low = *low_b.borrow();
438 let high = *high_b.borrow();
439 assert!(low < high, "UniformSampler::sample_single: low >= high");
440 let range = high.wrapping_sub(low) as $unsigned as $u_large;
441 let zone = if ::core::$unsigned::MAX <= ::core::u16::MAX as $unsigned {
442 // Using a modulus is faster than the approximation for
443 // i8 and i16. I suppose we trade the cost of one
444 // modulus for near-perfect branch prediction.
445 let unsigned_max: $u_large = ::core::$u_large::MAX;
446 let ints_to_reject = (unsigned_max - range + 1) % range;
447 unsigned_max - ints_to_reject
448 } else {
449 // conservative but fast approximation. `- 1` is necessary to allow the
450 // same comparison without bias.
451 (range << range.leading_zeros()).wrapping_sub(1)
452 };
453
454 loop {
455 let v: $u_large = rng.gen();
456 let (hi, lo) = v.wmul(range);
457 if lo <= zone {
458 return low.wrapping_add(hi as $ty);
459 }
460 }
461 }
462 }
463 };
464 }
465
466 uniform_int_impl! { i8, u8, u32 }
467 uniform_int_impl! { i16, u16, u32 }
468 uniform_int_impl! { i32, u32, u32 }
469 uniform_int_impl! { i64, u64, u64 }
470 #[cfg(not(target_os = "emscripten"))]
471 uniform_int_impl! { i128, u128, u128 }
472 uniform_int_impl! { isize, usize, usize }
473 uniform_int_impl! { u8, u8, u32 }
474 uniform_int_impl! { u16, u16, u32 }
475 uniform_int_impl! { u32, u32, u32 }
476 uniform_int_impl! { u64, u64, u64 }
477 uniform_int_impl! { usize, usize, usize }
478 #[cfg(not(target_os = "emscripten"))]
479 uniform_int_impl! { u128, u128, u128 }
480
481 #[cfg(all(feature = "simd_support", feature = "nightly"))]
482 macro_rules! uniform_simd_int_impl {
483 ($ty:ident, $unsigned:ident, $u_scalar:ident) => {
484 // The "pick the largest zone that can fit in an `u32`" optimization
485 // is less useful here. Multiple lanes complicate things, we don't
486 // know the PRNG's minimal output size, and casting to a larger vector
487 // is generally a bad idea for SIMD performance. The user can still
488 // implement it manually.
489
490 // TODO: look into `Uniform::<u32x4>::new(0u32, 100)` functionality
491 // perhaps `impl SampleUniform for $u_scalar`?
492 impl SampleUniform for $ty {
493 type Sampler = UniformInt<$ty>;
494 }
495
496 impl UniformSampler for UniformInt<$ty> {
497 type X = $ty;
498
499 #[inline] // if the range is constant, this helps LLVM to do the
500 // calculations at compile-time.
501 fn new<B1, B2>(low_b: B1, high_b: B2) -> Self
502 where B1: SampleBorrow<Self::X> + Sized,
503 B2: SampleBorrow<Self::X> + Sized
504 {
505 let low = *low_b.borrow();
506 let high = *high_b.borrow();
507 assert!(low.lt(high).all(), "Uniform::new called with `low >= high`");
508 UniformSampler::new_inclusive(low, high - 1)
509 }
510
511 #[inline] // if the range is constant, this helps LLVM to do the
512 // calculations at compile-time.
513 fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Self
514 where B1: SampleBorrow<Self::X> + Sized,
515 B2: SampleBorrow<Self::X> + Sized
516 {
517 let low = *low_b.borrow();
518 let high = *high_b.borrow();
519 assert!(low.le(high).all(),
520 "Uniform::new_inclusive called with `low > high`");
521 let unsigned_max = ::core::$u_scalar::MAX;
522
523 // NOTE: these may need to be replaced with explicitly
524 // wrapping operations if `packed_simd` changes
525 let range: $unsigned = ((high - low) + 1).cast();
526 // `% 0` will panic at runtime.
527 let not_full_range = range.gt($unsigned::splat(0));
528 // replacing 0 with `unsigned_max` allows a faster `select`
529 // with bitwise OR
530 let modulo = not_full_range.select(range, $unsigned::splat(unsigned_max));
531 // wrapping addition
532 let ints_to_reject = (unsigned_max - range + 1) % modulo;
533 // When `range` is 0, `lo` of `v.wmul(range)` will always be
534 // zero which means only one sample is needed.
535 let zone = unsigned_max - ints_to_reject;
536
537 UniformInt {
538 low: low,
539 // These are really $unsigned values, but store as $ty:
540 range: range.cast(),
541 z: zone.cast(),
542 }
543 }
544
545 fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
546 let range: $unsigned = self.range.cast();
547 let zone: $unsigned = self.z.cast();
548
549 // This might seem very slow, generating a whole new
550 // SIMD vector for every sample rejection. For most uses
551 // though, the chance of rejection is small and provides good
552 // general performance. With multiple lanes, that chance is
553 // multiplied. To mitigate this, we replace only the lanes of
554 // the vector which fail, iteratively reducing the chance of
555 // rejection. The replacement method does however add a little
556 // overhead. Benchmarking or calculating probabilities might
557 // reveal contexts where this replacement method is slower.
558 let mut v: $unsigned = rng.gen();
559 loop {
560 let (hi, lo) = v.wmul(range);
561 let mask = lo.le(zone);
562 if mask.all() {
563 let hi: $ty = hi.cast();
564 // wrapping addition
565 let result = self.low + hi;
566 // `select` here compiles to a blend operation
567 // When `range.eq(0).none()` the compare and blend
568 // operations are avoided.
569 let v: $ty = v.cast();
570 return range.gt($unsigned::splat(0)).select(result, v);
571 }
572 // Replace only the failing lanes
573 v = mask.select(v, rng.gen());
574 }
575 }
576 }
577 };
578
579 // bulk implementation
580 ($(($unsigned:ident, $signed:ident),)+ $u_scalar:ident) => {
581 $(
582 uniform_simd_int_impl!($unsigned, $unsigned, $u_scalar);
583 uniform_simd_int_impl!($signed, $unsigned, $u_scalar);
584 )+
585 };
586 }
587
588 #[cfg(all(feature = "simd_support", feature = "nightly"))]
589 uniform_simd_int_impl! {
590 (u64x2, i64x2),
591 (u64x4, i64x4),
592 (u64x8, i64x8),
593 u64
594 }
595
596 #[cfg(all(feature = "simd_support", feature = "nightly"))]
597 uniform_simd_int_impl! {
598 (u32x2, i32x2),
599 (u32x4, i32x4),
600 (u32x8, i32x8),
601 (u32x16, i32x16),
602 u32
603 }
604
605 #[cfg(all(feature = "simd_support", feature = "nightly"))]
606 uniform_simd_int_impl! {
607 (u16x2, i16x2),
608 (u16x4, i16x4),
609 (u16x8, i16x8),
610 (u16x16, i16x16),
611 (u16x32, i16x32),
612 u16
613 }
614
615 #[cfg(all(feature = "simd_support", feature = "nightly"))]
616 uniform_simd_int_impl! {
617 (u8x2, i8x2),
618 (u8x4, i8x4),
619 (u8x8, i8x8),
620 (u8x16, i8x16),
621 (u8x32, i8x32),
622 (u8x64, i8x64),
623 u8
624 }
625
626
627 /// The back-end implementing [`UniformSampler`] for floating-point types.
628 ///
629 /// Unless you are implementing [`UniformSampler`] for your own type, this type
630 /// should not be used directly, use [`Uniform`] instead.
631 ///
632 /// # Implementation notes
633 ///
634 /// Instead of generating a float in the `[0, 1)` range using [`Standard`], the
635 /// `UniformFloat` implementation converts the output of an PRNG itself. This
636 /// way one or two steps can be optimized out.
637 ///
638 /// The floats are first converted to a value in the `[1, 2)` interval using a
639 /// transmute-based method, and then mapped to the expected range with a
640 /// multiply and addition. Values produced this way have what equals 23 bits of
641 /// random digits for an `f32`, and 52 for an `f64`.
642 ///
643 /// [`new`]: UniformSampler::new
644 /// [`new_inclusive`]: UniformSampler::new_inclusive
645 /// [`Standard`]: crate::distributions::Standard
646 #[derive(Clone, Copy, Debug)]
647 pub struct UniformFloat<X> {
648 low: X,
649 scale: X,
650 }
651
652 macro_rules! uniform_float_impl {
653 ($ty:ty, $uty:ident, $f_scalar:ident, $u_scalar:ident, $bits_to_discard:expr) => {
654 impl SampleUniform for $ty {
655 type Sampler = UniformFloat<$ty>;
656 }
657
658 impl UniformSampler for UniformFloat<$ty> {
659 type X = $ty;
660
661 fn new<B1, B2>(low_b: B1, high_b: B2) -> Self
662 where
663 B1: SampleBorrow<Self::X> + Sized,
664 B2: SampleBorrow<Self::X> + Sized,
665 {
666 let low = *low_b.borrow();
667 let high = *high_b.borrow();
668 assert!(low.all_lt(high), "Uniform::new called with `low >= high`");
669 assert!(
670 low.all_finite() && high.all_finite(),
671 "Uniform::new called with non-finite boundaries"
672 );
673 let max_rand = <$ty>::splat(
674 (::core::$u_scalar::MAX >> $bits_to_discard).into_float_with_exponent(0) - 1.0,
675 );
676
677 let mut scale = high - low;
678
679 loop {
680 let mask = (scale * max_rand + low).ge_mask(high);
681 if mask.none() {
682 break;
683 }
684 scale = scale.decrease_masked(mask);
685 }
686
687 debug_assert!(<$ty>::splat(0.0).all_le(scale));
688
689 UniformFloat { low, scale }
690 }
691
692 fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Self
693 where
694 B1: SampleBorrow<Self::X> + Sized,
695 B2: SampleBorrow<Self::X> + Sized,
696 {
697 let low = *low_b.borrow();
698 let high = *high_b.borrow();
699 assert!(
700 low.all_le(high),
701 "Uniform::new_inclusive called with `low > high`"
702 );
703 assert!(
704 low.all_finite() && high.all_finite(),
705 "Uniform::new_inclusive called with non-finite boundaries"
706 );
707 let max_rand = <$ty>::splat(
708 (::core::$u_scalar::MAX >> $bits_to_discard).into_float_with_exponent(0) - 1.0,
709 );
710
711 let mut scale = (high - low) / max_rand;
712
713 loop {
714 let mask = (scale * max_rand + low).gt_mask(high);
715 if mask.none() {
716 break;
717 }
718 scale = scale.decrease_masked(mask);
719 }
720
721 debug_assert!(<$ty>::splat(0.0).all_le(scale));
722
723 UniformFloat { low, scale }
724 }
725
726 fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
727 // Generate a value in the range [1, 2)
728 let value1_2 = (rng.gen::<$uty>() >> $bits_to_discard).into_float_with_exponent(0);
729
730 // Get a value in the range [0, 1) in order to avoid
731 // overflowing into infinity when multiplying with scale
732 let value0_1 = value1_2 - 1.0;
733
734 // We don't use `f64::mul_add`, because it is not available with
735 // `no_std`. Furthermore, it is slower for some targets (but
736 // faster for others). However, the order of multiplication and
737 // addition is important, because on some platforms (e.g. ARM)
738 // it will be optimized to a single (non-FMA) instruction.
739 value0_1 * self.scale + self.low
740 }
741
742 #[inline]
743 fn sample_single<R: Rng + ?Sized, B1, B2>(low_b: B1, high_b: B2, rng: &mut R) -> Self::X
744 where
745 B1: SampleBorrow<Self::X> + Sized,
746 B2: SampleBorrow<Self::X> + Sized,
747 {
748 let low = *low_b.borrow();
749 let high = *high_b.borrow();
750 assert!(
751 low.all_lt(high),
752 "UniformSampler::sample_single: low >= high"
753 );
754 let mut scale = high - low;
755
756 loop {
757 // Generate a value in the range [1, 2)
758 let value1_2 =
759 (rng.gen::<$uty>() >> $bits_to_discard).into_float_with_exponent(0);
760
761 // Get a value in the range [0, 1) in order to avoid
762 // overflowing into infinity when multiplying with scale
763 let value0_1 = value1_2 - 1.0;
764
765 // Doing multiply before addition allows some architectures
766 // to use a single instruction.
767 let res = value0_1 * scale + low;
768
769 debug_assert!(low.all_le(res) || !scale.all_finite());
770 if res.all_lt(high) {
771 return res;
772 }
773
774 // This handles a number of edge cases.
775 // * `low` or `high` is NaN. In this case `scale` and
776 // `res` are going to end up as NaN.
777 // * `low` is negative infinity and `high` is finite.
778 // `scale` is going to be infinite and `res` will be
779 // NaN.
780 // * `high` is positive infinity and `low` is finite.
781 // `scale` is going to be infinite and `res` will
782 // be infinite or NaN (if value0_1 is 0).
783 // * `low` is negative infinity and `high` is positive
784 // infinity. `scale` will be infinite and `res` will
785 // be NaN.
786 // * `low` and `high` are finite, but `high - low`
787 // overflows to infinite. `scale` will be infinite
788 // and `res` will be infinite or NaN (if value0_1 is 0).
789 // So if `high` or `low` are non-finite, we are guaranteed
790 // to fail the `res < high` check above and end up here.
791 //
792 // While we technically should check for non-finite `low`
793 // and `high` before entering the loop, by doing the checks
794 // here instead, we allow the common case to avoid these
795 // checks. But we are still guaranteed that if `low` or
796 // `high` are non-finite we'll end up here and can do the
797 // appropriate checks.
798 //
799 // Likewise `high - low` overflowing to infinity is also
800 // rare, so handle it here after the common case.
801 let mask = !scale.finite_mask();
802 if mask.any() {
803 assert!(
804 low.all_finite() && high.all_finite(),
805 "Uniform::sample_single: low and high must be finite"
806 );
807 scale = scale.decrease_masked(mask);
808 }
809 }
810 }
811 }
812 };
813 }
814
815 uniform_float_impl! { f32, u32, f32, u32, 32 - 23 }
816 uniform_float_impl! { f64, u64, f64, u64, 64 - 52 }
817
818 #[cfg(feature = "simd_support")]
819 uniform_float_impl! { f32x2, u32x2, f32, u32, 32 - 23 }
820 #[cfg(feature = "simd_support")]
821 uniform_float_impl! { f32x4, u32x4, f32, u32, 32 - 23 }
822 #[cfg(feature = "simd_support")]
823 uniform_float_impl! { f32x8, u32x8, f32, u32, 32 - 23 }
824 #[cfg(feature = "simd_support")]
825 uniform_float_impl! { f32x16, u32x16, f32, u32, 32 - 23 }
826
827 #[cfg(feature = "simd_support")]
828 uniform_float_impl! { f64x2, u64x2, f64, u64, 64 - 52 }
829 #[cfg(feature = "simd_support")]
830 uniform_float_impl! { f64x4, u64x4, f64, u64, 64 - 52 }
831 #[cfg(feature = "simd_support")]
832 uniform_float_impl! { f64x8, u64x8, f64, u64, 64 - 52 }
833
834
835 /// The back-end implementing [`UniformSampler`] for `Duration`.
836 ///
837 /// Unless you are implementing [`UniformSampler`] for your own types, this type
838 /// should not be used directly, use [`Uniform`] instead.
839 #[derive(Clone, Copy, Debug)]
840 pub struct UniformDuration {
841 mode: UniformDurationMode,
842 offset: u32,
843 }
844
845 #[derive(Debug, Copy, Clone)]
846 enum UniformDurationMode {
847 Small {
848 secs: u64,
849 nanos: Uniform<u32>,
850 },
851 Medium {
852 nanos: Uniform<u64>,
853 },
854 Large {
855 max_secs: u64,
856 max_nanos: u32,
857 secs: Uniform<u64>,
858 },
859 }
860
861 impl SampleUniform for Duration {
862 type Sampler = UniformDuration;
863 }
864
865 impl UniformSampler for UniformDuration {
866 type X = Duration;
867
868 #[inline]
869 fn new<B1, B2>(low_b: B1, high_b: B2) -> Self
870 where
871 B1: SampleBorrow<Self::X> + Sized,
872 B2: SampleBorrow<Self::X> + Sized,
873 {
874 let low = *low_b.borrow();
875 let high = *high_b.borrow();
876 assert!(low < high, "Uniform::new called with `low >= high`");
877 UniformDuration::new_inclusive(low, high - Duration::new(0, 1))
878 }
879
880 #[inline]
881 fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Self
882 where
883 B1: SampleBorrow<Self::X> + Sized,
884 B2: SampleBorrow<Self::X> + Sized,
885 {
886 let low = *low_b.borrow();
887 let high = *high_b.borrow();
888 assert!(
889 low <= high,
890 "Uniform::new_inclusive called with `low > high`"
891 );
892
893 let low_s = low.as_secs();
894 let low_n = low.subsec_nanos();
895 let mut high_s = high.as_secs();
896 let mut high_n = high.subsec_nanos();
897
898 if high_n < low_n {
899 high_s -= 1;
900 high_n += 1_000_000_000;
901 }
902
903 let mode = if low_s == high_s {
904 UniformDurationMode::Small {
905 secs: low_s,
906 nanos: Uniform::new_inclusive(low_n, high_n),
907 }
908 } else {
909 let max = high_s
910 .checked_mul(1_000_000_000)
911 .and_then(|n| n.checked_add(u64::from(high_n)));
912
913 if let Some(higher_bound) = max {
914 let lower_bound = low_s * 1_000_000_000 + u64::from(low_n);
915 UniformDurationMode::Medium {
916 nanos: Uniform::new_inclusive(lower_bound, higher_bound),
917 }
918 } else {
919 // An offset is applied to simplify generation of nanoseconds
920 let max_nanos = high_n - low_n;
921 UniformDurationMode::Large {
922 max_secs: high_s,
923 max_nanos,
924 secs: Uniform::new_inclusive(low_s, high_s),
925 }
926 }
927 };
928 UniformDuration {
929 mode,
930 offset: low_n,
931 }
932 }
933
934 #[inline]
935 fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Duration {
936 match self.mode {
937 UniformDurationMode::Small { secs, nanos } => {
938 let n = nanos.sample(rng);
939 Duration::new(secs, n)
940 }
941 UniformDurationMode::Medium { nanos } => {
942 let nanos = nanos.sample(rng);
943 Duration::new(nanos / 1_000_000_000, (nanos % 1_000_000_000) as u32)
944 }
945 UniformDurationMode::Large {
946 max_secs,
947 max_nanos,
948 secs,
949 } => {
950 // constant folding means this is at least as fast as `gen_range`
951 let nano_range = Uniform::new(0, 1_000_000_000);
952 loop {
953 let s = secs.sample(rng);
954 let n = nano_range.sample(rng);
955 if !(s == max_secs && n > max_nanos) {
956 let sum = n + self.offset;
957 break Duration::new(s, sum);
958 }
959 }
960 }
961 }
962 }
963 }
964
965 #[cfg(test)]
966 mod tests {
967 use super::*;
968 use crate::rngs::mock::StepRng;
969
970 #[should_panic]
971 #[test]
972 fn test_uniform_bad_limits_equal_int() {
973 Uniform::new(10, 10);
974 }
975
976 #[test]
977 fn test_uniform_good_limits_equal_int() {
978 let mut rng = crate::test::rng(804);
979 let dist = Uniform::new_inclusive(10, 10);
980 for _ in 0..20 {
981 assert_eq!(rng.sample(dist), 10);
982 }
983 }
984
985 #[should_panic]
986 #[test]
987 fn test_uniform_bad_limits_flipped_int() {
988 Uniform::new(10, 5);
989 }
990
991 #[test]
992 #[cfg_attr(miri, ignore)] // Miri is too slow
993 fn test_integers() {
994 #[cfg(not(target_os = "emscripten"))] use core::{i128, u128};
995 use core::{i16, i32, i64, i8, isize};
996 use core::{u16, u32, u64, u8, usize};
997
998 let mut rng = crate::test::rng(251);
999 macro_rules! t {
1000 ($ty:ident, $v:expr, $le:expr, $lt:expr) => {{
1001 for &(low, high) in $v.iter() {
1002 let my_uniform = Uniform::new(low, high);
1003 for _ in 0..1000 {
1004 let v: $ty = rng.sample(my_uniform);
1005 assert!($le(low, v) && $lt(v, high));
1006 }
1007
1008 let my_uniform = Uniform::new_inclusive(low, high);
1009 for _ in 0..1000 {
1010 let v: $ty = rng.sample(my_uniform);
1011 assert!($le(low, v) && $le(v, high));
1012 }
1013
1014 let my_uniform = Uniform::new(&low, high);
1015 for _ in 0..1000 {
1016 let v: $ty = rng.sample(my_uniform);
1017 assert!($le(low, v) && $lt(v, high));
1018 }
1019
1020 let my_uniform = Uniform::new_inclusive(&low, &high);
1021 for _ in 0..1000 {
1022 let v: $ty = rng.sample(my_uniform);
1023 assert!($le(low, v) && $le(v, high));
1024 }
1025
1026 for _ in 0..1000 {
1027 let v: $ty = rng.gen_range(low, high);
1028 assert!($le(low, v) && $lt(v, high));
1029 }
1030 }
1031 }};
1032
1033 // scalar bulk
1034 ($($ty:ident),*) => {{
1035 $(t!(
1036 $ty,
1037 [(0, 10), (10, 127), ($ty::MIN, $ty::MAX)],
1038 |x, y| x <= y,
1039 |x, y| x < y
1040 );)*
1041 }};
1042
1043 // simd bulk
1044 ($($ty:ident),* => $scalar:ident) => {{
1045 $(t!(
1046 $ty,
1047 [
1048 ($ty::splat(0), $ty::splat(10)),
1049 ($ty::splat(10), $ty::splat(127)),
1050 ($ty::splat($scalar::MIN), $ty::splat($scalar::MAX)),
1051 ],
1052 |x: $ty, y| x.le(y).all(),
1053 |x: $ty, y| x.lt(y).all()
1054 );)*
1055 }};
1056 }
1057 t!(i8, i16, i32, i64, isize, u8, u16, u32, u64, usize);
1058 #[cfg(not(target_os = "emscripten"))]
1059 t!(i128, u128);
1060
1061 #[cfg(all(feature = "simd_support", feature = "nightly"))]
1062 {
1063 t!(u8x2, u8x4, u8x8, u8x16, u8x32, u8x64 => u8);
1064 t!(i8x2, i8x4, i8x8, i8x16, i8x32, i8x64 => i8);
1065 t!(u16x2, u16x4, u16x8, u16x16, u16x32 => u16);
1066 t!(i16x2, i16x4, i16x8, i16x16, i16x32 => i16);
1067 t!(u32x2, u32x4, u32x8, u32x16 => u32);
1068 t!(i32x2, i32x4, i32x8, i32x16 => i32);
1069 t!(u64x2, u64x4, u64x8 => u64);
1070 t!(i64x2, i64x4, i64x8 => i64);
1071 }
1072 }
1073
1074 #[test]
1075 #[cfg_attr(miri, ignore)] // Miri is too slow
1076 fn test_floats() {
1077 let mut rng = crate::test::rng(252);
1078 let mut zero_rng = StepRng::new(0, 0);
1079 let mut max_rng = StepRng::new(0xffff_ffff_ffff_ffff, 0);
1080 macro_rules! t {
1081 ($ty:ty, $f_scalar:ident, $bits_shifted:expr) => {{
1082 let v: &[($f_scalar, $f_scalar)] = &[
1083 (0.0, 100.0),
1084 (-1e35, -1e25),
1085 (1e-35, 1e-25),
1086 (-1e35, 1e35),
1087 (<$f_scalar>::from_bits(0), <$f_scalar>::from_bits(3)),
1088 (-<$f_scalar>::from_bits(10), -<$f_scalar>::from_bits(1)),
1089 (-<$f_scalar>::from_bits(5), 0.0),
1090 (-<$f_scalar>::from_bits(7), -0.0),
1091 (10.0, ::core::$f_scalar::MAX),
1092 (-100.0, ::core::$f_scalar::MAX),
1093 (-::core::$f_scalar::MAX / 5.0, ::core::$f_scalar::MAX),
1094 (-::core::$f_scalar::MAX, ::core::$f_scalar::MAX / 5.0),
1095 (-::core::$f_scalar::MAX * 0.8, ::core::$f_scalar::MAX * 0.7),
1096 (-::core::$f_scalar::MAX, ::core::$f_scalar::MAX),
1097 ];
1098 for &(low_scalar, high_scalar) in v.iter() {
1099 for lane in 0..<$ty>::lanes() {
1100 let low = <$ty>::splat(0.0 as $f_scalar).replace(lane, low_scalar);
1101 let high = <$ty>::splat(1.0 as $f_scalar).replace(lane, high_scalar);
1102 let my_uniform = Uniform::new(low, high);
1103 let my_incl_uniform = Uniform::new_inclusive(low, high);
1104 for _ in 0..100 {
1105 let v = rng.sample(my_uniform).extract(lane);
1106 assert!(low_scalar <= v && v < high_scalar);
1107 let v = rng.sample(my_incl_uniform).extract(lane);
1108 assert!(low_scalar <= v && v <= high_scalar);
1109 let v = rng.gen_range(low, high).extract(lane);
1110 assert!(low_scalar <= v && v < high_scalar);
1111 }
1112
1113 assert_eq!(
1114 rng.sample(Uniform::new_inclusive(low, low)).extract(lane),
1115 low_scalar
1116 );
1117
1118 assert_eq!(zero_rng.sample(my_uniform).extract(lane), low_scalar);
1119 assert_eq!(zero_rng.sample(my_incl_uniform).extract(lane), low_scalar);
1120 assert_eq!(zero_rng.gen_range(low, high).extract(lane), low_scalar);
1121 assert!(max_rng.sample(my_uniform).extract(lane) < high_scalar);
1122 assert!(max_rng.sample(my_incl_uniform).extract(lane) <= high_scalar);
1123
1124 // Don't run this test for really tiny differences between high and low
1125 // since for those rounding might result in selecting high for a very
1126 // long time.
1127 if (high_scalar - low_scalar) > 0.0001 {
1128 let mut lowering_max_rng = StepRng::new(
1129 0xffff_ffff_ffff_ffff,
1130 (-1i64 << $bits_shifted) as u64,
1131 );
1132 assert!(
1133 lowering_max_rng.gen_range(low, high).extract(lane) < high_scalar
1134 );
1135 }
1136 }
1137 }
1138
1139 assert_eq!(
1140 rng.sample(Uniform::new_inclusive(
1141 ::core::$f_scalar::MAX,
1142 ::core::$f_scalar::MAX
1143 )),
1144 ::core::$f_scalar::MAX
1145 );
1146 assert_eq!(
1147 rng.sample(Uniform::new_inclusive(
1148 -::core::$f_scalar::MAX,
1149 -::core::$f_scalar::MAX
1150 )),
1151 -::core::$f_scalar::MAX
1152 );
1153 }};
1154 }
1155
1156 t!(f32, f32, 32 - 23);
1157 t!(f64, f64, 64 - 52);
1158 #[cfg(feature = "simd_support")]
1159 {
1160 t!(f32x2, f32, 32 - 23);
1161 t!(f32x4, f32, 32 - 23);
1162 t!(f32x8, f32, 32 - 23);
1163 t!(f32x16, f32, 32 - 23);
1164 t!(f64x2, f64, 64 - 52);
1165 t!(f64x4, f64, 64 - 52);
1166 t!(f64x8, f64, 64 - 52);
1167 }
1168 }
1169
1170 #[test]
1171 #[cfg(all(
1172 feature = "std",
1173 not(target_arch = "wasm32"),
1174 not(target_arch = "asmjs")
1175 ))]
1176 fn test_float_assertions() {
1177 use super::SampleUniform;
1178 use std::panic::catch_unwind;
1179 fn range<T: SampleUniform>(low: T, high: T) {
1180 let mut rng = crate::test::rng(253);
1181 rng.gen_range(low, high);
1182 }
1183
1184 macro_rules! t {
1185 ($ty:ident, $f_scalar:ident) => {{
1186 let v: &[($f_scalar, $f_scalar)] = &[
1187 (::std::$f_scalar::NAN, 0.0),
1188 (1.0, ::std::$f_scalar::NAN),
1189 (::std::$f_scalar::NAN, ::std::$f_scalar::NAN),
1190 (1.0, 0.5),
1191 (::std::$f_scalar::MAX, -::std::$f_scalar::MAX),
1192 (::std::$f_scalar::INFINITY, ::std::$f_scalar::INFINITY),
1193 (
1194 ::std::$f_scalar::NEG_INFINITY,
1195 ::std::$f_scalar::NEG_INFINITY,
1196 ),
1197 (::std::$f_scalar::NEG_INFINITY, 5.0),
1198 (5.0, ::std::$f_scalar::INFINITY),
1199 (::std::$f_scalar::NAN, ::std::$f_scalar::INFINITY),
1200 (::std::$f_scalar::NEG_INFINITY, ::std::$f_scalar::NAN),
1201 (::std::$f_scalar::NEG_INFINITY, ::std::$f_scalar::INFINITY),
1202 ];
1203 for &(low_scalar, high_scalar) in v.iter() {
1204 for lane in 0..<$ty>::lanes() {
1205 let low = <$ty>::splat(0.0 as $f_scalar).replace(lane, low_scalar);
1206 let high = <$ty>::splat(1.0 as $f_scalar).replace(lane, high_scalar);
1207 assert!(catch_unwind(|| range(low, high)).is_err());
1208 assert!(catch_unwind(|| Uniform::new(low, high)).is_err());
1209 assert!(catch_unwind(|| Uniform::new_inclusive(low, high)).is_err());
1210 assert!(catch_unwind(|| range(low, low)).is_err());
1211 assert!(catch_unwind(|| Uniform::new(low, low)).is_err());
1212 }
1213 }
1214 }};
1215 }
1216
1217 t!(f32, f32);
1218 t!(f64, f64);
1219 #[cfg(feature = "simd_support")]
1220 {
1221 t!(f32x2, f32);
1222 t!(f32x4, f32);
1223 t!(f32x8, f32);
1224 t!(f32x16, f32);
1225 t!(f64x2, f64);
1226 t!(f64x4, f64);
1227 t!(f64x8, f64);
1228 }
1229 }
1230
1231
1232 #[test]
1233 #[cfg_attr(miri, ignore)] // Miri is too slow
1234 fn test_durations() {
1235 #[cfg(not(feature = "std"))] use core::time::Duration;
1236 #[cfg(feature = "std")] use std::time::Duration;
1237
1238 let mut rng = crate::test::rng(253);
1239
1240 let v = &[
1241 (Duration::new(10, 50000), Duration::new(100, 1234)),
1242 (Duration::new(0, 100), Duration::new(1, 50)),
1243 (
1244 Duration::new(0, 0),
1245 Duration::new(u64::max_value(), 999_999_999),
1246 ),
1247 ];
1248 for &(low, high) in v.iter() {
1249 let my_uniform = Uniform::new(low, high);
1250 for _ in 0..1000 {
1251 let v = rng.sample(my_uniform);
1252 assert!(low <= v && v < high);
1253 }
1254 }
1255 }
1256
1257 #[test]
1258 fn test_custom_uniform() {
1259 use crate::distributions::uniform::{
1260 SampleBorrow, SampleUniform, UniformFloat, UniformSampler,
1261 };
1262 #[derive(Clone, Copy, PartialEq, PartialOrd)]
1263 struct MyF32 {
1264 x: f32,
1265 }
1266 #[derive(Clone, Copy, Debug)]
1267 struct UniformMyF32(UniformFloat<f32>);
1268 impl UniformSampler for UniformMyF32 {
1269 type X = MyF32;
1270
1271 fn new<B1, B2>(low: B1, high: B2) -> Self
1272 where
1273 B1: SampleBorrow<Self::X> + Sized,
1274 B2: SampleBorrow<Self::X> + Sized,
1275 {
1276 UniformMyF32(UniformFloat::<f32>::new(low.borrow().x, high.borrow().x))
1277 }
1278
1279 fn new_inclusive<B1, B2>(low: B1, high: B2) -> Self
1280 where
1281 B1: SampleBorrow<Self::X> + Sized,
1282 B2: SampleBorrow<Self::X> + Sized,
1283 {
1284 UniformSampler::new(low, high)
1285 }
1286
1287 fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
1288 MyF32 {
1289 x: self.0.sample(rng),
1290 }
1291 }
1292 }
1293 impl SampleUniform for MyF32 {
1294 type Sampler = UniformMyF32;
1295 }
1296
1297 let (low, high) = (MyF32 { x: 17.0f32 }, MyF32 { x: 22.0f32 });
1298 let uniform = Uniform::new(low, high);
1299 let mut rng = crate::test::rng(804);
1300 for _ in 0..100 {
1301 let x: MyF32 = rng.sample(uniform);
1302 assert!(low <= x && x < high);
1303 }
1304 }
1305
1306 #[test]
1307 fn test_uniform_from_std_range() {
1308 let r = Uniform::from(2u32..7);
1309 assert_eq!(r.0.low, 2);
1310 assert_eq!(r.0.range, 5);
1311 let r = Uniform::from(2.0f64..7.0);
1312 assert_eq!(r.0.low, 2.0);
1313 assert_eq!(r.0.scale, 5.0);
1314 }
1315
1316 #[test]
1317 fn test_uniform_from_std_range_inclusive() {
1318 let r = Uniform::from(2u32..=6);
1319 assert_eq!(r.0.low, 2);
1320 assert_eq!(r.0.range, 5);
1321 let r = Uniform::from(2.0f64..=7.0);
1322 assert_eq!(r.0.low, 2.0);
1323 assert!(r.0.scale > 5.0);
1324 assert!(r.0.scale < 5.0 + 1e-14);
1325 }
1326
1327 #[test]
1328 fn value_stability() {
1329 fn test_samples<T: SampleUniform + Copy + core::fmt::Debug + PartialEq>(
1330 lb: T, ub: T, expected_single: &[T], expected_multiple: &[T],
1331 ) where Uniform<T>: Distribution<T> {
1332 let mut rng = crate::test::rng(897);
1333 let mut buf = [lb; 3];
1334
1335 for x in &mut buf {
1336 *x = T::Sampler::sample_single(lb, ub, &mut rng);
1337 }
1338 assert_eq!(&buf, expected_single);
1339
1340 let distr = Uniform::new(lb, ub);
1341 for x in &mut buf {
1342 *x = rng.sample(&distr);
1343 }
1344 assert_eq!(&buf, expected_multiple);
1345 }
1346
1347 // We test on a sub-set of types; possibly we should do more.
1348 // TODO: SIMD types
1349
1350 test_samples(11u8, 219, &[17, 66, 214], &[181, 93, 165]);
1351 test_samples(11u32, 219, &[17, 66, 214], &[181, 93, 165]);
1352
1353 test_samples(0f32, 1e-2f32, &[0.0003070104, 0.0026630748, 0.00979833], &[
1354 0.008194133,
1355 0.00398172,
1356 0.007428536,
1357 ]);
1358 test_samples(
1359 -1e10f64,
1360 1e10f64,
1361 &[-4673848682.871551, 6388267422.932352, 4857075081.198343],
1362 &[1173375212.1808167, 1917642852.109581, 2365076174.3153973],
1363 );
1364
1365 test_samples(
1366 Duration::new(2, 0),
1367 Duration::new(4, 0),
1368 &[
1369 Duration::new(2, 532615131),
1370 Duration::new(3, 638826742),
1371 Duration::new(3, 485707508),
1372 ],
1373 &[
1374 Duration::new(3, 117337521),
1375 Duration::new(3, 191764285),
1376 Duration::new(3, 236507617),
1377 ],
1378 );
1379 }
1380 }