]>
git.proxmox.com Git - rustc.git/blob - vendor/rand_core-0.5.1/src/impls.rs
1 // Copyright 2018 Developers of the Rand project.
3 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
4 // https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
5 // <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
6 // option. This file may not be copied, modified, or distributed
7 // except according to those terms.
9 //! Helper functions for implementing `RngCore` functions.
11 //! For cross-platform reproducibility, these functions all use Little Endian:
12 //! least-significant part first. For example, `next_u64_via_u32` takes `u32`
13 //! values `x, y`, then outputs `(y << 32) | x`. To implement `next_u32`
14 //! from `next_u64` in little-endian order, one should use `next_u64() as u32`.
16 //! Byte-swapping (like the std `to_le` functions) is only needed to convert
17 //! to/from byte sequences, and since its purpose is reproducibility,
18 //! non-reproducible sources (e.g. `OsRng`) need not bother with it.
20 use core
::ptr
::copy_nonoverlapping
;
23 use core
::mem
::size_of
;
27 /// Implement `next_u64` via `next_u32`, little-endian order.
28 pub fn next_u64_via_u32
<R
: RngCore
+ ?Sized
>(rng
: &mut R
) -> u64 {
29 // Use LE; we explicitly generate one value before the next.
30 let x
= u64::from(rng
.next_u32());
31 let y
= u64::from(rng
.next_u32());
35 /// Implement `fill_bytes` via `next_u64` and `next_u32`, little-endian order.
37 /// The fastest way to fill a slice is usually to work as long as possible with
38 /// integers. That is why this method mostly uses `next_u64`, and only when
39 /// there are 4 or less bytes remaining at the end of the slice it uses
41 pub fn fill_bytes_via_next
<R
: RngCore
+ ?Sized
>(rng
: &mut R
, dest
: &mut [u8]) {
43 while left
.len() >= 8 {
44 let (l
, r
) = {left}
.split_at_mut(8);
46 let chunk
: [u8; 8] = rng
.next_u64().to_le_bytes();
47 l
.copy_from_slice(&chunk
);
51 let chunk
: [u8; 8] = rng
.next_u64().to_le_bytes();
52 left
.copy_from_slice(&chunk
[..n
]);
54 let chunk
: [u8; 4] = rng
.next_u32().to_le_bytes();
55 left
.copy_from_slice(&chunk
[..n
]);
59 macro_rules
! impl_uint_from_fill
{
60 ($rng
:expr
, $ty
:ty
, $N
:expr
) => ({
61 debug_assert
!($N
== size_of
::<$ty
>());
65 let ptr
= &mut int
as *mut $ty
as *mut u8;
66 let slice
= slice
::from_raw_parts_mut(ptr
, $N
);
67 $rng
.fill_bytes(slice
);
73 macro_rules
! fill_via_chunks
{
74 ($src
:expr
, $dst
:expr
, $ty
:ty
, $size
:expr
) => ({
75 let chunk_size_u8
= min($src
.len() * $size
, $dst
.len());
76 let chunk_size
= (chunk_size_u8
+ $size
- 1) / $size
;
77 if cfg
!(target_endian
="little") {
80 $src
.as_ptr() as *const u8,
85 for (&n
, chunk
) in $src
.iter().zip($dst
.chunks_mut($size
)) {
87 let src_ptr
= &tmp
as *const $ty
as *const u8;
89 copy_nonoverlapping(src_ptr
,
96 (chunk_size
, chunk_size_u8
)
100 /// Implement `fill_bytes` by reading chunks from the output buffer of a block
103 /// The return values are `(consumed_u32, filled_u8)`.
105 /// `filled_u8` is the number of filled bytes in `dest`, which may be less than
106 /// the length of `dest`.
107 /// `consumed_u32` is the number of words consumed from `src`, which is the same
108 /// as `filled_u8 / 4` rounded up.
111 /// (from `IsaacRng`)
114 /// fn fill_bytes(&mut self, dest: &mut [u8]) {
115 /// let mut read_len = 0;
116 /// while read_len < dest.len() {
117 /// if self.index >= self.rsl.len() {
121 /// let (consumed_u32, filled_u8) =
122 /// impls::fill_via_u32_chunks(&mut self.rsl[self.index..],
123 /// &mut dest[read_len..]);
125 /// self.index += consumed_u32;
126 /// read_len += filled_u8;
130 pub fn fill_via_u32_chunks(src
: &[u32], dest
: &mut [u8]) -> (usize, usize) {
131 fill_via_chunks
!(src
, dest
, u32, 4)
134 /// Implement `fill_bytes` by reading chunks from the output buffer of a block
137 /// The return values are `(consumed_u64, filled_u8)`.
138 /// `filled_u8` is the number of filled bytes in `dest`, which may be less than
139 /// the length of `dest`.
140 /// `consumed_u64` is the number of words consumed from `src`, which is the same
141 /// as `filled_u8 / 8` rounded up.
143 /// See `fill_via_u32_chunks` for an example.
144 pub fn fill_via_u64_chunks(src
: &[u64], dest
: &mut [u8]) -> (usize, usize) {
145 fill_via_chunks
!(src
, dest
, u64, 8)
148 /// Implement `next_u32` via `fill_bytes`, little-endian order.
149 pub fn next_u32_via_fill
<R
: RngCore
+ ?Sized
>(rng
: &mut R
) -> u32 {
150 impl_uint_from_fill
!(rng
, u32, 4)
153 /// Implement `next_u64` via `fill_bytes`, little-endian order.
154 pub fn next_u64_via_fill
<R
: RngCore
+ ?Sized
>(rng
: &mut R
) -> u64 {
155 impl_uint_from_fill
!(rng
, u64, 8)
158 // TODO: implement tests for the above