]> git.proxmox.com Git - cargo.git/blob - vendor/rand_core-0.3.1/src/impls.rs
New upstream version 0.33.0
[cargo.git] / vendor / rand_core-0.3.1 / src / impls.rs
1 // Copyright 2018 Developers of the Rand project.
2 //
3 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
4 // https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
5 // <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
6 // option. This file may not be copied, modified, or distributed
7 // except according to those terms.
8
9 //! Helper functions for implementing `RngCore` functions.
10 //!
11 //! For cross-platform reproducibility, these functions all use Little Endian:
12 //! least-significant part first. For example, `next_u64_via_u32` takes `u32`
13 //! values `x, y`, then outputs `(y << 32) | x`. To implement `next_u32`
14 //! from `next_u64` in little-endian order, one should use `next_u64() as u32`.
15 //!
16 //! Byte-swapping (like the std `to_le` functions) is only needed to convert
17 //! to/from byte sequences, and since its purpose is reproducibility,
18 //! non-reproducible sources (e.g. `OsRng`) need not bother with it.
19
20 use core::intrinsics::transmute;
21 use core::ptr::copy_nonoverlapping;
22 use core::slice;
23 use core::cmp::min;
24 use core::mem::size_of;
25 use RngCore;
26
27
28 /// Implement `next_u64` via `next_u32`, little-endian order.
29 pub fn next_u64_via_u32<R: RngCore + ?Sized>(rng: &mut R) -> u64 {
30 // Use LE; we explicitly generate one value before the next.
31 let x = u64::from(rng.next_u32());
32 let y = u64::from(rng.next_u32());
33 (y << 32) | x
34 }
35
36 /// Implement `fill_bytes` via `next_u64` and `next_u32`, little-endian order.
37 ///
38 /// The fastest way to fill a slice is usually to work as long as possible with
39 /// integers. That is why this method mostly uses `next_u64`, and only when
40 /// there are 4 or less bytes remaining at the end of the slice it uses
41 /// `next_u32` once.
42 pub fn fill_bytes_via_next<R: RngCore + ?Sized>(rng: &mut R, dest: &mut [u8]) {
43 let mut left = dest;
44 while left.len() >= 8 {
45 let (l, r) = {left}.split_at_mut(8);
46 left = r;
47 let chunk: [u8; 8] = unsafe {
48 transmute(rng.next_u64().to_le())
49 };
50 l.copy_from_slice(&chunk);
51 }
52 let n = left.len();
53 if n > 4 {
54 let chunk: [u8; 8] = unsafe {
55 transmute(rng.next_u64().to_le())
56 };
57 left.copy_from_slice(&chunk[..n]);
58 } else if n > 0 {
59 let chunk: [u8; 4] = unsafe {
60 transmute(rng.next_u32().to_le())
61 };
62 left.copy_from_slice(&chunk[..n]);
63 }
64 }
65
66 macro_rules! impl_uint_from_fill {
67 ($rng:expr, $ty:ty, $N:expr) => ({
68 debug_assert!($N == size_of::<$ty>());
69
70 let mut int: $ty = 0;
71 unsafe {
72 let ptr = &mut int as *mut $ty as *mut u8;
73 let slice = slice::from_raw_parts_mut(ptr, $N);
74 $rng.fill_bytes(slice);
75 }
76 int
77 });
78 }
79
80 macro_rules! fill_via_chunks {
81 ($src:expr, $dst:expr, $ty:ty, $size:expr) => ({
82 let chunk_size_u8 = min($src.len() * $size, $dst.len());
83 let chunk_size = (chunk_size_u8 + $size - 1) / $size;
84 if cfg!(target_endian="little") {
85 unsafe {
86 copy_nonoverlapping(
87 $src.as_ptr() as *const u8,
88 $dst.as_mut_ptr(),
89 chunk_size_u8);
90 }
91 } else {
92 for (&n, chunk) in $src.iter().zip($dst.chunks_mut($size)) {
93 let tmp = n.to_le();
94 let src_ptr = &tmp as *const $ty as *const u8;
95 unsafe {
96 copy_nonoverlapping(src_ptr,
97 chunk.as_mut_ptr(),
98 chunk.len());
99 }
100 }
101 }
102
103 (chunk_size, chunk_size_u8)
104 });
105 }
106
107 /// Implement `fill_bytes` by reading chunks from the output buffer of a block
108 /// based RNG.
109 ///
110 /// The return values are `(consumed_u32, filled_u8)`.
111 ///
112 /// `filled_u8` is the number of filled bytes in `dest`, which may be less than
113 /// the length of `dest`.
114 /// `consumed_u32` is the number of words consumed from `src`, which is the same
115 /// as `filled_u8 / 4` rounded up.
116 ///
117 /// # Example
118 /// (from `IsaacRng`)
119 ///
120 /// ```ignore
121 /// fn fill_bytes(&mut self, dest: &mut [u8]) {
122 /// let mut read_len = 0;
123 /// while read_len < dest.len() {
124 /// if self.index >= self.rsl.len() {
125 /// self.isaac();
126 /// }
127 ///
128 /// let (consumed_u32, filled_u8) =
129 /// impls::fill_via_u32_chunks(&mut self.rsl[self.index..],
130 /// &mut dest[read_len..]);
131 ///
132 /// self.index += consumed_u32;
133 /// read_len += filled_u8;
134 /// }
135 /// }
136 /// ```
137 pub fn fill_via_u32_chunks(src: &[u32], dest: &mut [u8]) -> (usize, usize) {
138 fill_via_chunks!(src, dest, u32, 4)
139 }
140
141 /// Implement `fill_bytes` by reading chunks from the output buffer of a block
142 /// based RNG.
143 ///
144 /// The return values are `(consumed_u64, filled_u8)`.
145 /// `filled_u8` is the number of filled bytes in `dest`, which may be less than
146 /// the length of `dest`.
147 /// `consumed_u64` is the number of words consumed from `src`, which is the same
148 /// as `filled_u8 / 8` rounded up.
149 ///
150 /// See `fill_via_u32_chunks` for an example.
151 pub fn fill_via_u64_chunks(src: &[u64], dest: &mut [u8]) -> (usize, usize) {
152 fill_via_chunks!(src, dest, u64, 8)
153 }
154
155 /// Implement `next_u32` via `fill_bytes`, little-endian order.
156 pub fn next_u32_via_fill<R: RngCore + ?Sized>(rng: &mut R) -> u32 {
157 impl_uint_from_fill!(rng, u32, 4)
158 }
159
160 /// Implement `next_u64` via `fill_bytes`, little-endian order.
161 pub fn next_u64_via_fill<R: RngCore + ?Sized>(rng: &mut R) -> u64 {
162 impl_uint_from_fill!(rng, u64, 8)
163 }
164
165 // TODO: implement tests for the above