1 // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! Sampling from random distributions.
13 //! This is a generalization of `Rand` to allow parameters to control the
14 //! exact properties of the generated values, e.g. the mean and standard
15 //! deviation of a normal distribution. The `Sample` trait is the most
16 //! general, and allows for generating values that change some state
17 //! internally. The `IndependentSample` trait is for generating values
18 //! that do not need to record state.
22 use core
::marker
::PhantomData
;
26 pub use self::range
::Range
;
27 pub use self::gamma
::{Gamma, ChiSquared, FisherF, StudentT}
;
28 pub use self::normal
::{Normal, LogNormal}
;
29 pub use self::exponential
::Exp
;
36 /// Types that can be used to create a random instance of `Support`.
37 pub trait Sample
<Support
> {
38 /// Generate a random value of `Support`, using `rng` as the
39 /// source of randomness.
40 fn sample
<R
: Rng
>(&mut self, rng
: &mut R
) -> Support
;
43 /// `Sample`s that do not require keeping track of state.
45 /// Since no state is recorded, each sample is (statistically)
46 /// independent of all others, assuming the `Rng` used has this
48 // FIXME maybe having this separate is overkill (the only reason is to
49 // take &self rather than &mut self)? or maybe this should be the
50 // trait called `Sample` and the other should be `DependentSample`.
51 pub trait IndependentSample
<Support
>: Sample
<Support
> {
52 /// Generate a random value.
53 fn ind_sample
<R
: Rng
>(&self, &mut R
) -> Support
;
56 /// A wrapper for generating types that implement `Rand` via the
57 /// `Sample` & `IndependentSample` traits.
58 pub struct RandSample
<Sup
> { _marker: PhantomData<Sup> }
60 impl<Sup
> RandSample
<Sup
> {
61 pub fn new() -> RandSample
<Sup
> {
62 RandSample { _marker: PhantomData }
66 impl<Sup
: Rand
> Sample
<Sup
> for RandSample
<Sup
> {
67 fn sample
<R
: Rng
>(&mut self, rng
: &mut R
) -> Sup { self.ind_sample(rng) }
70 impl<Sup
: Rand
> IndependentSample
<Sup
> for RandSample
<Sup
> {
71 fn ind_sample
<R
: Rng
>(&self, rng
: &mut R
) -> Sup
{
76 /// A value with a particular weight for use with `WeightedChoice`.
77 pub struct Weighted
<T
> {
78 /// The numerical weight of this item
80 /// The actual item which is being weighted
84 /// A distribution that selects from a finite collection of weighted items.
86 /// Each item has an associated weight that influences how likely it
87 /// is to be chosen: higher weight is more likely.
89 /// The `Clone` restriction is a limitation of the `Sample` and
90 /// `IndependentSample` traits. Note that `&T` is (cheaply) `Clone` for
91 /// all `T`, as is `usize`, so one can store references or indices into
93 pub struct WeightedChoice
<'a
, T
:'a
> {
94 items
: &'a
mut [Weighted
<T
>],
95 weight_range
: Range
<usize>
98 impl<'a
, T
: Clone
> WeightedChoice
<'a
, T
> {
99 /// Create a new `WeightedChoice`.
103 /// - the total weight is 0
104 /// - the total weight is larger than a `usize` can contain.
105 pub fn new(items
: &'a
mut [Weighted
<T
>]) -> WeightedChoice
<'a
, T
> {
106 // strictly speaking, this is subsumed by the total weight == 0 case
107 assert
!(!items
.is_empty(), "WeightedChoice::new called with no items");
109 let mut running_total
= 0_usize
;
111 // we convert the list from individual weights to cumulative
112 // weights so we can binary search. This *could* drop elements
113 // with weight == 0 as an optimisation.
114 for item
in &mut *items
{
115 running_total
= match running_total
.checked_add(item
.weight
) {
117 None
=> panic
!("WeightedChoice::new called with a total weight \
118 larger than a usize can contain")
121 item
.weight
= running_total
;
123 assert
!(running_total
!= 0, "WeightedChoice::new called with a total weight of 0");
127 // we're likely to be generating numbers in this range
128 // relatively often, so might as well cache it
129 weight_range
: Range
::new(0, running_total
)
134 impl<'a
, T
: Clone
> Sample
<T
> for WeightedChoice
<'a
, T
> {
135 fn sample
<R
: Rng
>(&mut self, rng
: &mut R
) -> T { self.ind_sample(rng) }
138 impl<'a
, T
: Clone
> IndependentSample
<T
> for WeightedChoice
<'a
, T
> {
139 fn ind_sample
<R
: Rng
>(&self, rng
: &mut R
) -> T
{
140 // we want to find the first element that has cumulative
141 // weight > sample_weight, which we do by binary since the
142 // cumulative weights of self.items are sorted.
144 // choose a weight in [0, total_weight)
145 let sample_weight
= self.weight_range
.ind_sample(rng
);
147 // short circuit when it's the first item
148 if sample_weight
< self.items
[0].weight
{
149 return self.items
[0].item
.clone();
153 let mut modifier
= self.items
.len();
155 // now we know that every possibility has an element to the
156 // left, so we can just search for the last element that has
157 // cumulative weight <= sample_weight, then the next one will
158 // be "it". (Note that this greatest element will never be the
159 // last element of the vector, since sample_weight is chosen
160 // in [0, total_weight) and the cumulative weight of the last
161 // one is exactly the total weight.)
163 let i
= idx
+ modifier
/ 2;
164 if self.items
[i
].weight
<= sample_weight
{
165 // we're small, so look to the right, but allow this
166 // exact element still.
168 // we need the `/ 2` to round up otherwise we'll drop
169 // the trailing elements when `modifier` is odd.
172 // otherwise we're too big, so go left. (i.e. do
177 return self.items
[idx
+ 1].item
.clone();
183 /// Sample a random number using the Ziggurat method (specifically the
184 /// ZIGNOR variant from Doornik 2005). Most of the arguments are
185 /// directly from the paper:
187 /// * `rng`: source of randomness
188 /// * `symmetric`: whether this is a symmetric distribution, or one-sided with P(x < 0) = 0.
189 /// * `X`: the $x_i$ abscissae.
190 /// * `F`: precomputed values of the PDF at the $x_i$, (i.e. $f(x_i)$)
191 /// * `F_DIFF`: precomputed values of $f(x_i) - f(x_{i+1})$
192 /// * `pdf`: the probability density function
193 /// * `zero_case`: manual sampling from the tail when we chose the
194 /// bottom box (i.e. i == 0)
196 // the perf improvement (25-50%) is definitely worth the extra code
197 // size from force-inlining.
199 fn ziggurat
<R
: Rng
, P
, Z
>(
202 x_tab
: ziggurat_tables
::ZigTable
,
203 f_tab
: ziggurat_tables
::ZigTable
,
206 -> f64 where P
: FnMut(f64) -> f64, Z
: FnMut(&mut R
, f64) -> f64 {
207 const SCALE
: f64 = (1u64 << 53) as f64;
209 // reimplement the f64 generation as an optimisation suggested
210 // by the Doornik paper: we have a lot of precision-space
211 // (i.e. there are 11 bits of the 64 of a u64 to use after
212 // creating a f64), so we might as well reuse some to save
213 // generating a whole extra random number. (Seems to be 15%
216 // This unfortunately misses out on the benefits of direct
217 // floating point generation if an RNG like dSMFT is
218 // used. (That is, such RNGs create floats directly, highly
219 // efficiently and overload next_f32/f64, so by not calling it
220 // this may be slower than it would be otherwise.)
221 // FIXME: investigate/optimise for the above.
222 let bits
: u64 = rng
.gen();
223 let i
= (bits
& 0xff) as usize;
224 let f
= (bits
>> 11) as f64 / SCALE
;
226 // u is either U(-1, 1) or U(0, 1) depending on if this is a
227 // symmetric distribution or not.
228 let u
= if symmetric {2.0 * f - 1.0}
else {f}
;
229 let x
= u
* x_tab
[i
];
231 let test_x
= if symmetric { x.abs() }
else {x}
;
233 // algebraically equivalent to |u| < x_tab[i+1]/x_tab[i] (or u < x_tab[i+1]/x_tab[i])
234 if test_x
< x_tab
[i
+ 1] {
238 return zero_case(rng
, u
);
240 // algebraically equivalent to f1 + DRanU()*(f0 - f1) < 1
241 if f_tab
[i
+ 1] + (f_tab
[i
] - f_tab
[i
+ 1]) * rng
.gen
::<f64>() < pdf(x
) {
249 use std
::prelude
::v1
::*;
252 use super::{RandSample, WeightedChoice, Weighted, Sample, IndependentSample}
;
254 #[derive(PartialEq, Debug)]
255 struct ConstRand(usize);
256 impl Rand
for ConstRand
{
257 fn rand
<R
: Rng
>(_
: &mut R
) -> ConstRand
{
263 struct CountingRng { i: u32 }
264 impl Rng
for CountingRng
{
265 fn next_u32(&mut self) -> u32 {
269 fn next_u64(&mut self) -> u64 {
270 self.next_u32() as u64
275 fn test_rand_sample() {
276 let mut rand_sample
= RandSample
::<ConstRand
>::new();
278 assert_eq
!(rand_sample
.sample(&mut ::test
::rng()), ConstRand(0));
279 assert_eq
!(rand_sample
.ind_sample(&mut ::test
::rng()), ConstRand(0));
282 fn test_weighted_choice() {
283 // this makes assumptions about the internal implementation of
284 // WeightedChoice, specifically: it doesn't reorder the items,
285 // it doesn't do weird things to the RNG (so 0 maps to 0, 1 to
286 // 1, internally; modulo a modulo operation).
289 ($items
:expr
, $expected
:expr
) => {{
290 let mut items
= $items
;
291 let wc
= WeightedChoice
::new(&mut items
);
292 let expected
= $expected
;
294 let mut rng
= CountingRng { i: 0 }
;
296 for &val
in &expected
{
297 assert_eq
!(wc
.ind_sample(&mut rng
), val
)
302 t
!(vec
!(Weighted { weight: 1, item: 10}
), [10]);
305 t
!(vec
!(Weighted { weight: 0, item: 20}
,
306 Weighted { weight: 2, item: 21}
,
307 Weighted { weight: 0, item: 22}
,
308 Weighted { weight: 1, item: 23}
),
312 t
!(vec
!(Weighted { weight: 4, item: 30}
,
313 Weighted { weight: 3, item: 31}
),
314 [30,30,30,30, 31,31,31]);
316 // check that we're binary searching
317 // correctly with some vectors of odd
319 t
!(vec
!(Weighted { weight: 1, item: 40}
,
320 Weighted { weight: 1, item: 41}
,
321 Weighted { weight: 1, item: 42}
,
322 Weighted { weight: 1, item: 43}
,
323 Weighted { weight: 1, item: 44}
),
324 [40, 41, 42, 43, 44]);
325 t
!(vec
!(Weighted { weight: 1, item: 50}
,
326 Weighted { weight: 1, item: 51}
,
327 Weighted { weight: 1, item: 52}
,
328 Weighted { weight: 1, item: 53}
,
329 Weighted { weight: 1, item: 54}
,
330 Weighted { weight: 1, item: 55}
,
331 Weighted { weight: 1, item: 56}
),
332 [50, 51, 52, 53, 54, 55, 56]);
335 #[test] #[should_panic]
336 fn test_weighted_choice_no_items() {
337 WeightedChoice
::<isize>::new(&mut []);
339 #[test] #[should_panic]
340 fn test_weighted_choice_zero_weight() {
341 WeightedChoice
::new(&mut [Weighted { weight: 0, item: 0}
,
342 Weighted { weight: 0, item: 1}
]);
344 #[test] #[should_panic]
345 fn test_weighted_choice_weight_overflows() {
346 let x
= (!0) as usize / 2; // x + x + 2 is the overflow
347 WeightedChoice
::new(&mut [Weighted { weight: x, item: 0 }
,
348 Weighted { weight: 1, item: 1 }
,
349 Weighted { weight: x, item: 2 }
,
350 Weighted { weight: 1, item: 3 }
]);