]>
Commit | Line | Data |
---|---|---|
416331ca XL |
1 | // Copyright 2018 Developers of the Rand project. |
2 | // | |
3 | // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or | |
4 | // https://www.apache.org/licenses/LICENSE-2.0> or the MIT license | |
5 | // <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your | |
6 | // option. This file may not be copied, modified, or distributed | |
7 | // except according to those terms. | |
8 | ||
9 | //! The `BlockRngCore` trait and implementation helpers | |
10 | //! | |
11 | //! The [`BlockRngCore`] trait exists to assist in the implementation of RNGs | |
12 | //! which generate a block of data in a cache instead of returning generated | |
13 | //! values directly. | |
14 | //! | |
15 | //! Usage of this trait is optional, but provides two advantages: | |
16 | //! implementations only need to concern themselves with generation of the | |
17 | //! block, not the various [`RngCore`] methods (especially [`fill_bytes`], where | |
18 | //! the optimal implementations are not trivial), and this allows | |
19 | //! `ReseedingRng` (see [`rand`](https://docs.rs/rand) crate) perform periodic | |
20 | //! reseeding with very low overhead. | |
21 | //! | |
22 | //! # Example | |
23 | //! | |
24 | //! ```norun | |
25 | //! use rand_core::block::{BlockRngCore, BlockRng}; | |
26 | //! | |
27 | //! struct MyRngCore; | |
28 | //! | |
29 | //! impl BlockRngCore for MyRngCore { | |
30 | //! type Results = [u32; 16]; | |
31 | //! | |
32 | //! fn generate(&mut self, results: &mut Self::Results) { | |
33 | //! unimplemented!() | |
34 | //! } | |
35 | //! } | |
36 | //! | |
37 | //! impl SeedableRng for MyRngCore { | |
38 | //! type Seed = unimplemented!(); | |
39 | //! fn from_seed(seed: Self::Seed) -> Self { | |
40 | //! unimplemented!() | |
41 | //! } | |
42 | //! } | |
43 | //! | |
44 | //! // optionally, also implement CryptoRng for MyRngCore | |
45 | //! | |
46 | //! // Final RNG. | |
47 | //! type MyRng = BlockRng<u32, MyRngCore>; | |
48 | //! ``` | |
49 | //! | |
50 | //! [`BlockRngCore`]: crate::block::BlockRngCore | |
51 | //! [`fill_bytes`]: RngCore::fill_bytes | |
52 | ||
53 | use core::convert::AsRef; | |
54 | use core::{fmt, ptr}; | |
60c5eb7d XL |
55 | #[cfg(feature="serde1")] use serde::{Serialize, Deserialize}; |
56 | use crate::{RngCore, CryptoRng, SeedableRng, Error}; | |
57 | use crate::impls::{fill_via_u32_chunks, fill_via_u64_chunks}; | |
416331ca XL |
58 | |
59 | /// A trait for RNGs which do not generate random numbers individually, but in | |
60 | /// blocks (typically `[u32; N]`). This technique is commonly used by | |
61 | /// cryptographic RNGs to improve performance. | |
62 | /// | |
63 | /// See the [module][crate::block] documentation for details. | |
64 | pub trait BlockRngCore { | |
65 | /// Results element type, e.g. `u32`. | |
66 | type Item; | |
67 | ||
68 | /// Results type. This is the 'block' an RNG implementing `BlockRngCore` | |
69 | /// generates, which will usually be an array like `[u32; 16]`. | |
70 | type Results: AsRef<[Self::Item]> + AsMut<[Self::Item]> + Default; | |
71 | ||
72 | /// Generate a new block of results. | |
73 | fn generate(&mut self, results: &mut Self::Results); | |
74 | } | |
75 | ||
76 | ||
77 | /// A wrapper type implementing [`RngCore`] for some type implementing | |
78 | /// [`BlockRngCore`] with `u32` array buffer; i.e. this can be used to implement | |
79 | /// a full RNG from just a `generate` function. | |
80 | /// | |
81 | /// The `core` field may be accessed directly but the results buffer may not. | |
82 | /// PRNG implementations can simply use a type alias | |
83 | /// (`pub type MyRng = BlockRng<MyRngCore>;`) but might prefer to use a | |
84 | /// wrapper type (`pub struct MyRng(BlockRng<MyRngCore>);`); the latter must | |
85 | /// re-implement `RngCore` but hides the implementation details and allows | |
86 | /// extra functionality to be defined on the RNG | |
87 | /// (e.g. `impl MyRng { fn set_stream(...){...} }`). | |
88 | /// | |
89 | /// `BlockRng` has heavily optimized implementations of the [`RngCore`] methods | |
90 | /// reading values from the results buffer, as well as | |
91 | /// calling [`BlockRngCore::generate`] directly on the output array when | |
92 | /// [`fill_bytes`] / [`try_fill_bytes`] is called on a large array. These methods | |
93 | /// also handle the bookkeeping of when to generate a new batch of values. | |
94 | /// | |
95 | /// No whole generated `u32` values are thown away and all values are consumed | |
96 | /// in-order. [`next_u32`] simply takes the next available `u32` value. | |
97 | /// [`next_u64`] is implemented by combining two `u32` values, least | |
98 | /// significant first. [`fill_bytes`] and [`try_fill_bytes`] consume a whole | |
99 | /// number of `u32` values, converting each `u32` to a byte slice in | |
100 | /// little-endian order. If the requested byte length is not a multiple of 4, | |
101 | /// some bytes will be discarded. | |
102 | /// | |
103 | /// See also [`BlockRng64`] which uses `u64` array buffers. Currently there is | |
104 | /// no direct support for other buffer types. | |
105 | /// | |
106 | /// For easy initialization `BlockRng` also implements [`SeedableRng`]. | |
107 | /// | |
108 | /// [`next_u32`]: RngCore::next_u32 | |
109 | /// [`next_u64`]: RngCore::next_u64 | |
110 | /// [`fill_bytes`]: RngCore::fill_bytes | |
111 | /// [`try_fill_bytes`]: RngCore::try_fill_bytes | |
112 | #[derive(Clone)] | |
113 | #[cfg_attr(feature="serde1", derive(Serialize, Deserialize))] | |
114 | pub struct BlockRng<R: BlockRngCore + ?Sized> { | |
115 | results: R::Results, | |
116 | index: usize, | |
117 | /// The *core* part of the RNG, implementing the `generate` function. | |
118 | pub core: R, | |
119 | } | |
120 | ||
121 | // Custom Debug implementation that does not expose the contents of `results`. | |
122 | impl<R: BlockRngCore + fmt::Debug> fmt::Debug for BlockRng<R> { | |
123 | fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { | |
124 | fmt.debug_struct("BlockRng") | |
125 | .field("core", &self.core) | |
126 | .field("result_len", &self.results.as_ref().len()) | |
127 | .field("index", &self.index) | |
128 | .finish() | |
129 | } | |
130 | } | |
131 | ||
132 | impl<R: BlockRngCore> BlockRng<R> { | |
133 | /// Create a new `BlockRng` from an existing RNG implementing | |
134 | /// `BlockRngCore`. Results will be generated on first use. | |
135 | #[inline] | |
136 | pub fn new(core: R) -> BlockRng<R>{ | |
137 | let results_empty = R::Results::default(); | |
138 | BlockRng { | |
139 | core, | |
140 | index: results_empty.as_ref().len(), | |
141 | results: results_empty, | |
142 | } | |
143 | } | |
144 | ||
145 | /// Get the index into the result buffer. | |
146 | /// | |
147 | /// If this is equal to or larger than the size of the result buffer then | |
148 | /// the buffer is "empty" and `generate()` must be called to produce new | |
149 | /// results. | |
150 | #[inline(always)] | |
151 | pub fn index(&self) -> usize { | |
152 | self.index | |
153 | } | |
154 | ||
155 | /// Reset the number of available results. | |
156 | /// This will force a new set of results to be generated on next use. | |
157 | #[inline] | |
158 | pub fn reset(&mut self) { | |
159 | self.index = self.results.as_ref().len(); | |
160 | } | |
161 | ||
162 | /// Generate a new set of results immediately, setting the index to the | |
163 | /// given value. | |
164 | #[inline] | |
165 | pub fn generate_and_set(&mut self, index: usize) { | |
166 | assert!(index < self.results.as_ref().len()); | |
167 | self.core.generate(&mut self.results); | |
168 | self.index = index; | |
169 | } | |
170 | } | |
171 | ||
172 | impl<R: BlockRngCore<Item=u32>> RngCore for BlockRng<R> | |
173 | where <R as BlockRngCore>::Results: AsRef<[u32]> + AsMut<[u32]> | |
174 | { | |
175 | #[inline] | |
176 | fn next_u32(&mut self) -> u32 { | |
177 | if self.index >= self.results.as_ref().len() { | |
178 | self.generate_and_set(0); | |
179 | } | |
180 | ||
181 | let value = self.results.as_ref()[self.index]; | |
182 | self.index += 1; | |
183 | value | |
184 | } | |
185 | ||
186 | #[inline] | |
187 | fn next_u64(&mut self) -> u64 { | |
188 | let read_u64 = |results: &[u32], index| { | |
189 | if cfg!(any(target_endian = "little")) { | |
190 | // requires little-endian CPU | |
60c5eb7d | 191 | #[allow(clippy::cast_ptr_alignment)] // false positive |
416331ca XL |
192 | let ptr: *const u64 = results[index..=index+1].as_ptr() as *const u64; |
193 | unsafe { ptr::read_unaligned(ptr) } | |
194 | } else { | |
195 | let x = u64::from(results[index]); | |
196 | let y = u64::from(results[index + 1]); | |
197 | (y << 32) | x | |
198 | } | |
199 | }; | |
200 | ||
201 | let len = self.results.as_ref().len(); | |
202 | ||
203 | let index = self.index; | |
204 | if index < len-1 { | |
205 | self.index += 2; | |
206 | // Read an u64 from the current index | |
207 | read_u64(self.results.as_ref(), index) | |
208 | } else if index >= len { | |
209 | self.generate_and_set(2); | |
210 | read_u64(self.results.as_ref(), 0) | |
211 | } else { | |
212 | let x = u64::from(self.results.as_ref()[len-1]); | |
213 | self.generate_and_set(1); | |
214 | let y = u64::from(self.results.as_ref()[0]); | |
215 | (y << 32) | x | |
216 | } | |
217 | } | |
218 | ||
219 | #[inline] | |
220 | fn fill_bytes(&mut self, dest: &mut [u8]) { | |
221 | let mut read_len = 0; | |
222 | while read_len < dest.len() { | |
223 | if self.index >= self.results.as_ref().len() { | |
224 | self.generate_and_set(0); | |
225 | } | |
226 | let (consumed_u32, filled_u8) = | |
227 | fill_via_u32_chunks(&self.results.as_ref()[self.index..], | |
228 | &mut dest[read_len..]); | |
229 | ||
230 | self.index += consumed_u32; | |
231 | read_len += filled_u8; | |
232 | } | |
233 | } | |
234 | ||
235 | #[inline(always)] | |
236 | fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> { | |
60c5eb7d XL |
237 | self.fill_bytes(dest); |
238 | Ok(()) | |
416331ca XL |
239 | } |
240 | } | |
241 | ||
242 | impl<R: BlockRngCore + SeedableRng> SeedableRng for BlockRng<R> { | |
243 | type Seed = R::Seed; | |
244 | ||
245 | #[inline(always)] | |
246 | fn from_seed(seed: Self::Seed) -> Self { | |
247 | Self::new(R::from_seed(seed)) | |
248 | } | |
249 | ||
250 | #[inline(always)] | |
251 | fn seed_from_u64(seed: u64) -> Self { | |
252 | Self::new(R::seed_from_u64(seed)) | |
253 | } | |
254 | ||
255 | #[inline(always)] | |
256 | fn from_rng<S: RngCore>(rng: S) -> Result<Self, Error> { | |
257 | Ok(Self::new(R::from_rng(rng)?)) | |
258 | } | |
259 | } | |
260 | ||
261 | ||
262 | ||
263 | /// A wrapper type implementing [`RngCore`] for some type implementing | |
264 | /// [`BlockRngCore`] with `u64` array buffer; i.e. this can be used to implement | |
265 | /// a full RNG from just a `generate` function. | |
266 | /// | |
267 | /// This is similar to [`BlockRng`], but specialized for algorithms that operate | |
268 | /// on `u64` values. | |
269 | /// | |
270 | /// No whole generated `u64` values are thrown away and all values are consumed | |
271 | /// in-order. [`next_u64`] simply takes the next available `u64` value. | |
272 | /// [`next_u32`] is however a bit special: half of a `u64` is consumed, leaving | |
273 | /// the other half in the buffer. If the next function called is [`next_u32`] | |
274 | /// then the other half is then consumed, however both [`next_u64`] and | |
275 | /// [`fill_bytes`] discard the rest of any half-consumed `u64`s when called. | |
276 | /// | |
277 | /// [`fill_bytes`] and [`try_fill_bytes`] consume a whole number of `u64` | |
278 | /// values. If the requested length is not a multiple of 8, some bytes will be | |
279 | /// discarded. | |
280 | /// | |
281 | /// [`next_u32`]: RngCore::next_u32 | |
282 | /// [`next_u64`]: RngCore::next_u64 | |
283 | /// [`fill_bytes`]: RngCore::fill_bytes | |
284 | /// [`try_fill_bytes`]: RngCore::try_fill_bytes | |
285 | #[derive(Clone)] | |
286 | #[cfg_attr(feature="serde1", derive(Serialize, Deserialize))] | |
287 | pub struct BlockRng64<R: BlockRngCore + ?Sized> { | |
288 | results: R::Results, | |
289 | index: usize, | |
290 | half_used: bool, // true if only half of the previous result is used | |
291 | /// The *core* part of the RNG, implementing the `generate` function. | |
292 | pub core: R, | |
293 | } | |
294 | ||
295 | // Custom Debug implementation that does not expose the contents of `results`. | |
296 | impl<R: BlockRngCore + fmt::Debug> fmt::Debug for BlockRng64<R> { | |
297 | fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { | |
298 | fmt.debug_struct("BlockRng64") | |
299 | .field("core", &self.core) | |
300 | .field("result_len", &self.results.as_ref().len()) | |
301 | .field("index", &self.index) | |
302 | .field("half_used", &self.half_used) | |
303 | .finish() | |
304 | } | |
305 | } | |
306 | ||
307 | impl<R: BlockRngCore> BlockRng64<R> { | |
308 | /// Create a new `BlockRng` from an existing RNG implementing | |
309 | /// `BlockRngCore`. Results will be generated on first use. | |
310 | #[inline] | |
311 | pub fn new(core: R) -> BlockRng64<R>{ | |
312 | let results_empty = R::Results::default(); | |
313 | BlockRng64 { | |
314 | core, | |
315 | index: results_empty.as_ref().len(), | |
316 | half_used: false, | |
317 | results: results_empty, | |
318 | } | |
319 | } | |
320 | ||
321 | /// Get the index into the result buffer. | |
322 | /// | |
323 | /// If this is equal to or larger than the size of the result buffer then | |
324 | /// the buffer is "empty" and `generate()` must be called to produce new | |
325 | /// results. | |
326 | #[inline(always)] | |
327 | pub fn index(&self) -> usize { | |
328 | self.index | |
329 | } | |
330 | ||
331 | /// Reset the number of available results. | |
332 | /// This will force a new set of results to be generated on next use. | |
333 | #[inline] | |
334 | pub fn reset(&mut self) { | |
335 | self.index = self.results.as_ref().len(); | |
336 | self.half_used = false; | |
337 | } | |
338 | ||
339 | /// Generate a new set of results immediately, setting the index to the | |
340 | /// given value. | |
341 | #[inline] | |
342 | pub fn generate_and_set(&mut self, index: usize) { | |
343 | assert!(index < self.results.as_ref().len()); | |
344 | self.core.generate(&mut self.results); | |
345 | self.index = index; | |
346 | self.half_used = false; | |
347 | } | |
348 | } | |
349 | ||
350 | impl<R: BlockRngCore<Item=u64>> RngCore for BlockRng64<R> | |
351 | where <R as BlockRngCore>::Results: AsRef<[u64]> + AsMut<[u64]> | |
352 | { | |
353 | #[inline] | |
354 | fn next_u32(&mut self) -> u32 { | |
355 | let mut index = self.index * 2 - self.half_used as usize; | |
356 | if index >= self.results.as_ref().len() * 2 { | |
357 | self.core.generate(&mut self.results); | |
358 | self.index = 0; | |
359 | // `self.half_used` is by definition `false` | |
360 | self.half_used = false; | |
361 | index = 0; | |
362 | } | |
363 | ||
364 | self.half_used = !self.half_used; | |
365 | self.index += self.half_used as usize; | |
366 | ||
367 | // Index as if this is a u32 slice. | |
368 | unsafe { | |
369 | let results = | |
370 | &*(self.results.as_ref() as *const [u64] as *const [u32]); | |
371 | if cfg!(target_endian = "little") { | |
372 | *results.get_unchecked(index) | |
373 | } else { | |
374 | *results.get_unchecked(index ^ 1) | |
375 | } | |
376 | } | |
377 | } | |
378 | ||
379 | #[inline] | |
380 | fn next_u64(&mut self) -> u64 { | |
381 | if self.index >= self.results.as_ref().len() { | |
382 | self.core.generate(&mut self.results); | |
383 | self.index = 0; | |
384 | } | |
385 | ||
386 | let value = self.results.as_ref()[self.index]; | |
387 | self.index += 1; | |
388 | self.half_used = false; | |
389 | value | |
390 | } | |
391 | ||
392 | #[inline] | |
393 | fn fill_bytes(&mut self, dest: &mut [u8]) { | |
394 | let mut read_len = 0; | |
395 | self.half_used = false; | |
396 | while read_len < dest.len() { | |
397 | if self.index as usize >= self.results.as_ref().len() { | |
398 | self.core.generate(&mut self.results); | |
399 | self.index = 0; | |
400 | } | |
401 | ||
402 | let (consumed_u64, filled_u8) = | |
403 | fill_via_u64_chunks(&self.results.as_ref()[self.index as usize..], | |
404 | &mut dest[read_len..]); | |
405 | ||
406 | self.index += consumed_u64; | |
407 | read_len += filled_u8; | |
408 | } | |
409 | } | |
410 | ||
411 | #[inline(always)] | |
412 | fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> { | |
60c5eb7d XL |
413 | self.fill_bytes(dest); |
414 | Ok(()) | |
416331ca XL |
415 | } |
416 | } | |
417 | ||
418 | impl<R: BlockRngCore + SeedableRng> SeedableRng for BlockRng64<R> { | |
419 | type Seed = R::Seed; | |
420 | ||
421 | #[inline(always)] | |
422 | fn from_seed(seed: Self::Seed) -> Self { | |
423 | Self::new(R::from_seed(seed)) | |
424 | } | |
425 | ||
426 | #[inline(always)] | |
427 | fn seed_from_u64(seed: u64) -> Self { | |
428 | Self::new(R::seed_from_u64(seed)) | |
429 | } | |
430 | ||
431 | #[inline(always)] | |
432 | fn from_rng<S: RngCore>(rng: S) -> Result<Self, Error> { | |
433 | Ok(Self::new(R::from_rng(rng)?)) | |
434 | } | |
435 | } | |
436 | ||
437 | impl<R: BlockRngCore + CryptoRng> CryptoRng for BlockRng<R> {} |