]> git.proxmox.com Git - rustc.git/blame - vendor/rand_core-0.5.1/src/impls.rs
Merge tag 'debian/1.52.1+dfsg1-1_exp2' into proxmox/buster
[rustc.git] / vendor / rand_core-0.5.1 / src / impls.rs
CommitLineData
416331ca
XL
1// Copyright 2018 Developers of the Rand project.
2//
3// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
4// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
5// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
6// option. This file may not be copied, modified, or distributed
7// except according to those terms.
8
9//! Helper functions for implementing `RngCore` functions.
10//!
11//! For cross-platform reproducibility, these functions all use Little Endian:
12//! least-significant part first. For example, `next_u64_via_u32` takes `u32`
13//! values `x, y`, then outputs `(y << 32) | x`. To implement `next_u32`
14//! from `next_u64` in little-endian order, one should use `next_u64() as u32`.
15//!
16//! Byte-swapping (like the std `to_le` functions) is only needed to convert
17//! to/from byte sequences, and since its purpose is reproducibility,
18//! non-reproducible sources (e.g. `OsRng`) need not bother with it.
19
416331ca
XL
20use core::ptr::copy_nonoverlapping;
21use core::slice;
22use core::cmp::min;
23use core::mem::size_of;
60c5eb7d 24use crate::RngCore;
416331ca
XL
25
26
27/// Implement `next_u64` via `next_u32`, little-endian order.
28pub fn next_u64_via_u32<R: RngCore + ?Sized>(rng: &mut R) -> u64 {
29 // Use LE; we explicitly generate one value before the next.
30 let x = u64::from(rng.next_u32());
31 let y = u64::from(rng.next_u32());
32 (y << 32) | x
33}
34
35/// Implement `fill_bytes` via `next_u64` and `next_u32`, little-endian order.
36///
37/// The fastest way to fill a slice is usually to work as long as possible with
38/// integers. That is why this method mostly uses `next_u64`, and only when
39/// there are 4 or less bytes remaining at the end of the slice it uses
40/// `next_u32` once.
41pub fn fill_bytes_via_next<R: RngCore + ?Sized>(rng: &mut R, dest: &mut [u8]) {
42 let mut left = dest;
43 while left.len() >= 8 {
44 let (l, r) = {left}.split_at_mut(8);
45 left = r;
60c5eb7d 46 let chunk: [u8; 8] = rng.next_u64().to_le_bytes();
416331ca
XL
47 l.copy_from_slice(&chunk);
48 }
49 let n = left.len();
50 if n > 4 {
60c5eb7d 51 let chunk: [u8; 8] = rng.next_u64().to_le_bytes();
416331ca
XL
52 left.copy_from_slice(&chunk[..n]);
53 } else if n > 0 {
60c5eb7d 54 let chunk: [u8; 4] = rng.next_u32().to_le_bytes();
416331ca
XL
55 left.copy_from_slice(&chunk[..n]);
56 }
57}
58
59macro_rules! impl_uint_from_fill {
60 ($rng:expr, $ty:ty, $N:expr) => ({
61 debug_assert!($N == size_of::<$ty>());
62
63 let mut int: $ty = 0;
64 unsafe {
65 let ptr = &mut int as *mut $ty as *mut u8;
66 let slice = slice::from_raw_parts_mut(ptr, $N);
67 $rng.fill_bytes(slice);
68 }
69 int
70 });
71}
72
73macro_rules! fill_via_chunks {
74 ($src:expr, $dst:expr, $ty:ty, $size:expr) => ({
75 let chunk_size_u8 = min($src.len() * $size, $dst.len());
76 let chunk_size = (chunk_size_u8 + $size - 1) / $size;
77 if cfg!(target_endian="little") {
78 unsafe {
79 copy_nonoverlapping(
80 $src.as_ptr() as *const u8,
81 $dst.as_mut_ptr(),
82 chunk_size_u8);
83 }
84 } else {
85 for (&n, chunk) in $src.iter().zip($dst.chunks_mut($size)) {
86 let tmp = n.to_le();
87 let src_ptr = &tmp as *const $ty as *const u8;
88 unsafe {
89 copy_nonoverlapping(src_ptr,
90 chunk.as_mut_ptr(),
91 chunk.len());
92 }
93 }
94 }
95
96 (chunk_size, chunk_size_u8)
97 });
98}
99
100/// Implement `fill_bytes` by reading chunks from the output buffer of a block
101/// based RNG.
102///
103/// The return values are `(consumed_u32, filled_u8)`.
104///
105/// `filled_u8` is the number of filled bytes in `dest`, which may be less than
106/// the length of `dest`.
107/// `consumed_u32` is the number of words consumed from `src`, which is the same
108/// as `filled_u8 / 4` rounded up.
109///
110/// # Example
111/// (from `IsaacRng`)
112///
113/// ```ignore
114/// fn fill_bytes(&mut self, dest: &mut [u8]) {
115/// let mut read_len = 0;
116/// while read_len < dest.len() {
117/// if self.index >= self.rsl.len() {
118/// self.isaac();
119/// }
120///
121/// let (consumed_u32, filled_u8) =
122/// impls::fill_via_u32_chunks(&mut self.rsl[self.index..],
123/// &mut dest[read_len..]);
124///
125/// self.index += consumed_u32;
126/// read_len += filled_u8;
127/// }
128/// }
129/// ```
130pub fn fill_via_u32_chunks(src: &[u32], dest: &mut [u8]) -> (usize, usize) {
131 fill_via_chunks!(src, dest, u32, 4)
132}
133
134/// Implement `fill_bytes` by reading chunks from the output buffer of a block
135/// based RNG.
136///
137/// The return values are `(consumed_u64, filled_u8)`.
138/// `filled_u8` is the number of filled bytes in `dest`, which may be less than
139/// the length of `dest`.
140/// `consumed_u64` is the number of words consumed from `src`, which is the same
141/// as `filled_u8 / 8` rounded up.
142///
143/// See `fill_via_u32_chunks` for an example.
144pub fn fill_via_u64_chunks(src: &[u64], dest: &mut [u8]) -> (usize, usize) {
145 fill_via_chunks!(src, dest, u64, 8)
146}
147
148/// Implement `next_u32` via `fill_bytes`, little-endian order.
149pub fn next_u32_via_fill<R: RngCore + ?Sized>(rng: &mut R) -> u32 {
150 impl_uint_from_fill!(rng, u32, 4)
151}
152
153/// Implement `next_u64` via `fill_bytes`, little-endian order.
154pub fn next_u64_via_fill<R: RngCore + ?Sized>(rng: &mut R) -> u64 {
155 impl_uint_from_fill!(rng, u64, 8)
156}
157
158// TODO: implement tests for the above