]> git.proxmox.com Git - cargo.git/blob - vendor/rand-0.5.6/src/rngs/jitter.rs
New upstream version 0.33.0
[cargo.git] / vendor / rand-0.5.6 / src / rngs / jitter.rs
1 // Copyright 2017 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // https://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10 //
11 // Based on jitterentropy-library, http://www.chronox.de/jent.html.
12 // Copyright Stephan Mueller <smueller@chronox.de>, 2014 - 2017.
13 //
14 // With permission from Stephan Mueller to relicense the Rust translation under
15 // the MIT license.
16
17 //! Non-physical true random number generator based on timing jitter.
18
19 // Note: the C implementation of `Jitterentropy` relies on being compiled
20 // without optimizations. This implementation goes through lengths to make the
21 // compiler not optimize out code which does influence timing jitter, but is
22 // technically dead code.
23
24 use rand_core::{RngCore, CryptoRng, Error, ErrorKind, impls};
25
26 use core::{fmt, mem, ptr};
27 #[cfg(all(feature="std", not(target_arch = "wasm32")))]
28 use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
29
30 const MEMORY_BLOCKS: usize = 64;
31 const MEMORY_BLOCKSIZE: usize = 32;
32 const MEMORY_SIZE: usize = MEMORY_BLOCKS * MEMORY_BLOCKSIZE;
33
34 /// A true random number generator based on jitter in the CPU execution time,
35 /// and jitter in memory access time.
36 ///
37 /// This is a true random number generator, as opposed to pseudo-random
38 /// generators. Random numbers generated by `JitterRng` can be seen as fresh
39 /// entropy. A consequence is that is orders of magnitude slower than [`OsRng`]
40 /// and PRNGs (about 10<sup>3</sup>..10<sup>6</sup> slower).
41 ///
42 /// There are very few situations where using this RNG is appropriate. Only very
43 /// few applications require true entropy. A normal PRNG can be statistically
44 /// indistinguishable, and a cryptographic PRNG should also be as impossible to
45 /// predict.
46 ///
47 /// Use of `JitterRng` is recommended for initializing cryptographic PRNGs when
48 /// [`OsRng`] is not available.
49 ///
50 /// `JitterRng` can be used without the standard library, but not conveniently,
51 /// you must provide a high-precision timer and carefully have to follow the
52 /// instructions of [`new_with_timer`].
53 ///
54 /// This implementation is based on
55 /// [Jitterentropy](http://www.chronox.de/jent.html) version 2.1.0.
56 ///
57 /// Note: There is no accurate timer available on Wasm platforms, to help
58 /// prevent fingerprinting or timing side-channel attacks. Therefore
59 /// [`JitterRng::new()`] is not available on Wasm.
60 ///
61 /// # Quality testing
62 ///
63 /// [`JitterRng::new()`] has build-in, but limited, quality testing, however
64 /// before using `JitterRng` on untested hardware, or after changes that could
65 /// effect how the code is optimized (such as a new LLVM version), it is
66 /// recommend to run the much more stringent
67 /// [NIST SP 800-90B Entropy Estimation Suite](
68 /// https://github.com/usnistgov/SP800-90B_EntropyAssessment).
69 ///
70 /// Use the following code using [`timer_stats`] to collect the data:
71 ///
72 /// ```no_run
73 /// use rand::jitter::JitterRng;
74 /// #
75 /// # use std::error::Error;
76 /// # use std::fs::File;
77 /// # use std::io::Write;
78 /// #
79 /// # fn try_main() -> Result<(), Box<Error>> {
80 /// let mut rng = JitterRng::new()?;
81 ///
82 /// // 1_000_000 results are required for the
83 /// // NIST SP 800-90B Entropy Estimation Suite
84 /// const ROUNDS: usize = 1_000_000;
85 /// let mut deltas_variable: Vec<u8> = Vec::with_capacity(ROUNDS);
86 /// let mut deltas_minimal: Vec<u8> = Vec::with_capacity(ROUNDS);
87 ///
88 /// for _ in 0..ROUNDS {
89 /// deltas_variable.push(rng.timer_stats(true) as u8);
90 /// deltas_minimal.push(rng.timer_stats(false) as u8);
91 /// }
92 ///
93 /// // Write out after the statistics collection loop, to not disturb the
94 /// // test results.
95 /// File::create("jitter_rng_var.bin")?.write(&deltas_variable)?;
96 /// File::create("jitter_rng_min.bin")?.write(&deltas_minimal)?;
97 /// #
98 /// # Ok(())
99 /// # }
100 /// #
101 /// # fn main() {
102 /// # try_main().unwrap();
103 /// # }
104 /// ```
105 ///
106 /// This will produce two files: `jitter_rng_var.bin` and `jitter_rng_min.bin`.
107 /// Run the Entropy Estimation Suite in three configurations, as outlined below.
108 /// Every run has two steps. One step to produce an estimation, another to
109 /// validate the estimation.
110 ///
111 /// 1. Estimate the expected amount of entropy that is at least available with
112 /// each round of the entropy collector. This number should be greater than
113 /// the amount estimated with `64 / test_timer()`.
114 /// ```sh
115 /// python noniid_main.py -v jitter_rng_var.bin 8
116 /// restart.py -v jitter_rng_var.bin 8 <min-entropy>
117 /// ```
118 /// 2. Estimate the expected amount of entropy that is available in the last 4
119 /// bits of the timer delta after running noice sources. Note that a value of
120 /// `3.70` is the minimum estimated entropy for true randomness.
121 /// ```sh
122 /// python noniid_main.py -v -u 4 jitter_rng_var.bin 4
123 /// restart.py -v -u 4 jitter_rng_var.bin 4 <min-entropy>
124 /// ```
125 /// 3. Estimate the expected amount of entropy that is available to the entropy
126 /// collector if both noice sources only run their minimal number of times.
127 /// This measures the absolute worst-case, and gives a lower bound for the
128 /// available entropy.
129 /// ```sh
130 /// python noniid_main.py -v -u 4 jitter_rng_min.bin 4
131 /// restart.py -v -u 4 jitter_rng_min.bin 4 <min-entropy>
132 /// ```
133 ///
134 /// [`OsRng`]: struct.OsRng.html
135 /// [`JitterRng::new()`]: struct.JitterRng.html#method.new
136 /// [`new_with_timer`]: struct.JitterRng.html#method.new_with_timer
137 /// [`timer_stats`]: struct.JitterRng.html#method.timer_stats
138 pub struct JitterRng {
139 data: u64, // Actual random number
140 // Number of rounds to run the entropy collector per 64 bits
141 rounds: u8,
142 // Timer used by `measure_jitter`
143 timer: fn() -> u64,
144 // Memory for the Memory Access noise source
145 mem_prev_index: u16,
146 // Make `next_u32` not waste 32 bits
147 data_half_used: bool,
148 }
149
150 // Note: `JitterRng` maintains a small 64-bit entropy pool. With every
151 // `generate` 64 new bits should be integrated in the pool. If a round of
152 // `generate` were to collect less than the expected 64 bit, then the returned
153 // value, and the new state of the entropy pool, would be in some way related to
154 // the initial state. It is therefore better if the initial state of the entropy
155 // pool is different on each call to `generate`. This has a few implications:
156 // - `generate` should be called once before using `JitterRng` to produce the
157 // first usable value (this is done by default in `new`);
158 // - We do not zero the entropy pool after generating a result. The reference
159 // implementation also does not support zeroing, but recommends generating a
160 // new value without using it if you want to protect a previously generated
161 // 'secret' value from someone inspecting the memory;
162 // - Implementing `Clone` seems acceptable, as it would not cause the systematic
163 // bias a constant might cause. Only instead of one value that could be
164 // potentially related to the same initial state, there are now two.
165
166 // Entropy collector state.
167 // These values are not necessary to preserve across runs.
168 struct EcState {
169 // Previous time stamp to determine the timer delta
170 prev_time: u64,
171 // Deltas used for the stuck test
172 last_delta: i32,
173 last_delta2: i32,
174 // Memory for the Memory Access noise source
175 mem: [u8; MEMORY_SIZE],
176 }
177
178 impl EcState {
179 // Stuck test by checking the:
180 // - 1st derivation of the jitter measurement (time delta)
181 // - 2nd derivation of the jitter measurement (delta of time deltas)
182 // - 3rd derivation of the jitter measurement (delta of delta of time
183 // deltas)
184 //
185 // All values must always be non-zero.
186 // This test is a heuristic to see whether the last measurement holds
187 // entropy.
188 fn stuck(&mut self, current_delta: i32) -> bool {
189 let delta2 = self.last_delta - current_delta;
190 let delta3 = delta2 - self.last_delta2;
191
192 self.last_delta = current_delta;
193 self.last_delta2 = delta2;
194
195 current_delta == 0 || delta2 == 0 || delta3 == 0
196 }
197 }
198
199 // Custom Debug implementation that does not expose the internal state
200 impl fmt::Debug for JitterRng {
201 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
202 write!(f, "JitterRng {{}}")
203 }
204 }
205
206 impl Clone for JitterRng {
207 fn clone(&self) -> JitterRng {
208 JitterRng {
209 data: self.data,
210 rounds: self.rounds,
211 timer: self.timer,
212 mem_prev_index: self.mem_prev_index,
213 // The 32 bits that may still be unused from the previous round are
214 // for the original to use, not for the clone.
215 data_half_used: false,
216 }
217 }
218 }
219
220 /// An error that can occur when [`JitterRng::test_timer`] fails.
221 ///
222 /// [`JitterRng::test_timer`]: struct.JitterRng.html#method.test_timer
223 #[derive(Debug, Clone, PartialEq, Eq)]
224 pub enum TimerError {
225 /// No timer available.
226 NoTimer,
227 /// Timer too coarse to use as an entropy source.
228 CoarseTimer,
229 /// Timer is not monotonically increasing.
230 NotMonotonic,
231 /// Variations of deltas of time too small.
232 TinyVariantions,
233 /// Too many stuck results (indicating no added entropy).
234 TooManyStuck,
235 #[doc(hidden)]
236 __Nonexhaustive,
237 }
238
239 impl TimerError {
240 fn description(&self) -> &'static str {
241 match *self {
242 TimerError::NoTimer => "no timer available",
243 TimerError::CoarseTimer => "coarse timer",
244 TimerError::NotMonotonic => "timer not monotonic",
245 TimerError::TinyVariantions => "time delta variations too small",
246 TimerError::TooManyStuck => "too many stuck results",
247 TimerError::__Nonexhaustive => unreachable!(),
248 }
249 }
250 }
251
252 impl fmt::Display for TimerError {
253 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
254 write!(f, "{}", self.description())
255 }
256 }
257
258 #[cfg(feature="std")]
259 impl ::std::error::Error for TimerError {
260 fn description(&self) -> &str {
261 self.description()
262 }
263 }
264
265 impl From<TimerError> for Error {
266 fn from(err: TimerError) -> Error {
267 // Timer check is already quite permissive of failures so we don't
268 // expect false-positive failures, i.e. any error is irrecoverable.
269 Error::with_cause(ErrorKind::Unavailable,
270 "timer jitter failed basic quality tests", err)
271 }
272 }
273
274 // Initialise to zero; must be positive
275 #[cfg(all(feature="std", not(target_arch = "wasm32")))]
276 static JITTER_ROUNDS: AtomicUsize = ATOMIC_USIZE_INIT;
277
278 impl JitterRng {
279 /// Create a new `JitterRng`. Makes use of `std::time` for a timer, or a
280 /// platform-specific function with higher accuracy if necessary and
281 /// available.
282 ///
283 /// During initialization CPU execution timing jitter is measured a few
284 /// hundred times. If this does not pass basic quality tests, an error is
285 /// returned. The test result is cached to make subsequent calls faster.
286 #[cfg(all(feature="std", not(target_arch = "wasm32")))]
287 pub fn new() -> Result<JitterRng, TimerError> {
288 let mut state = JitterRng::new_with_timer(platform::get_nstime);
289 let mut rounds = JITTER_ROUNDS.load(Ordering::Relaxed) as u8;
290 if rounds == 0 {
291 // No result yet: run test.
292 // This allows the timer test to run multiple times; we don't care.
293 rounds = state.test_timer()?;
294 JITTER_ROUNDS.store(rounds as usize, Ordering::Relaxed);
295 info!("JitterRng: using {} rounds per u64 output", rounds);
296 }
297 state.set_rounds(rounds);
298
299 // Fill `data` with a non-zero value.
300 state.gen_entropy();
301 Ok(state)
302 }
303
304 /// Create a new `JitterRng`.
305 /// A custom timer can be supplied, making it possible to use `JitterRng` in
306 /// `no_std` environments.
307 ///
308 /// The timer must have nanosecond precision.
309 ///
310 /// This method is more low-level than `new()`. It is the responsibility of
311 /// the caller to run [`test_timer`] before using any numbers generated with
312 /// `JitterRng`, and optionally call [`set_rounds`]. Also it is important to
313 /// consume at least one `u64` before using the first result to initialize
314 /// the entropy collection pool.
315 ///
316 /// # Example
317 ///
318 /// ```
319 /// # use rand::{Rng, Error};
320 /// use rand::jitter::JitterRng;
321 ///
322 /// # fn try_inner() -> Result<(), Error> {
323 /// fn get_nstime() -> u64 {
324 /// use std::time::{SystemTime, UNIX_EPOCH};
325 ///
326 /// let dur = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
327 /// // The correct way to calculate the current time is
328 /// // `dur.as_secs() * 1_000_000_000 + dur.subsec_nanos() as u64`
329 /// // But this is faster, and the difference in terms of entropy is
330 /// // negligible (log2(10^9) == 29.9).
331 /// dur.as_secs() << 30 | dur.subsec_nanos() as u64
332 /// }
333 ///
334 /// let mut rng = JitterRng::new_with_timer(get_nstime);
335 /// let rounds = rng.test_timer()?;
336 /// rng.set_rounds(rounds); // optional
337 /// let _ = rng.gen::<u64>();
338 ///
339 /// // Ready for use
340 /// let v: u64 = rng.gen();
341 /// # Ok(())
342 /// # }
343 ///
344 /// # let _ = try_inner();
345 /// ```
346 ///
347 /// [`test_timer`]: struct.JitterRng.html#method.test_timer
348 /// [`set_rounds`]: struct.JitterRng.html#method.set_rounds
349 pub fn new_with_timer(timer: fn() -> u64) -> JitterRng {
350 JitterRng {
351 data: 0,
352 rounds: 64,
353 timer,
354 mem_prev_index: 0,
355 data_half_used: false,
356 }
357 }
358
359 /// Configures how many rounds are used to generate each 64-bit value.
360 /// This must be greater than zero, and has a big impact on performance
361 /// and output quality.
362 ///
363 /// [`new_with_timer`] conservatively uses 64 rounds, but often less rounds
364 /// can be used. The `test_timer()` function returns the minimum number of
365 /// rounds required for full strength (platform dependent), so one may use
366 /// `rng.set_rounds(rng.test_timer()?);` or cache the value.
367 ///
368 /// [`new_with_timer`]: struct.JitterRng.html#method.new_with_timer
369 pub fn set_rounds(&mut self, rounds: u8) {
370 assert!(rounds > 0);
371 self.rounds = rounds;
372 }
373
374 // Calculate a random loop count used for the next round of an entropy
375 // collection, based on bits from a fresh value from the timer.
376 //
377 // The timer is folded to produce a number that contains at most `n_bits`
378 // bits.
379 //
380 // Note: A constant should be added to the resulting random loop count to
381 // prevent loops that run 0 times.
382 #[inline(never)]
383 fn random_loop_cnt(&mut self, n_bits: u32) -> u32 {
384 let mut rounds = 0;
385
386 let mut time = (self.timer)();
387 // Mix with the current state of the random number balance the random
388 // loop counter a bit more.
389 time ^= self.data;
390
391 // We fold the time value as much as possible to ensure that as many
392 // bits of the time stamp are included as possible.
393 let folds = (64 + n_bits - 1) / n_bits;
394 let mask = (1 << n_bits) - 1;
395 for _ in 0..folds {
396 rounds ^= time & mask;
397 time >>= n_bits;
398 }
399
400 rounds as u32
401 }
402
403 // CPU jitter noise source
404 // Noise source based on the CPU execution time jitter
405 //
406 // This function injects the individual bits of the time value into the
407 // entropy pool using an LFSR.
408 //
409 // The code is deliberately inefficient with respect to the bit shifting.
410 // This function not only acts as folding operation, but this function's
411 // execution is used to measure the CPU execution time jitter. Any change to
412 // the loop in this function implies that careful retesting must be done.
413 #[inline(never)]
414 fn lfsr_time(&mut self, time: u64, var_rounds: bool) {
415 fn lfsr(mut data: u64, time: u64) -> u64{
416 for i in 1..65 {
417 let mut tmp = time << (64 - i);
418 tmp >>= 64 - 1;
419
420 // Fibonacci LSFR with polynomial of
421 // x^64 + x^61 + x^56 + x^31 + x^28 + x^23 + 1 which is
422 // primitive according to
423 // http://poincare.matf.bg.ac.rs/~ezivkovm/publications/primpol1.pdf
424 // (the shift values are the polynomial values minus one
425 // due to counting bits from 0 to 63). As the current
426 // position is always the LSB, the polynomial only needs
427 // to shift data in from the left without wrap.
428 data ^= tmp;
429 data ^= (data >> 63) & 1;
430 data ^= (data >> 60) & 1;
431 data ^= (data >> 55) & 1;
432 data ^= (data >> 30) & 1;
433 data ^= (data >> 27) & 1;
434 data ^= (data >> 22) & 1;
435 data = data.rotate_left(1);
436 }
437 data
438 }
439
440 // Note: in the reference implementation only the last round effects
441 // `self.data`, all the other results are ignored. To make sure the
442 // other rounds are not optimised out, we first run all but the last
443 // round on a throw-away value instead of the real `self.data`.
444 let mut lfsr_loop_cnt = 0;
445 if var_rounds { lfsr_loop_cnt = self.random_loop_cnt(4) };
446
447 let mut throw_away: u64 = 0;
448 for _ in 0..lfsr_loop_cnt {
449 throw_away = lfsr(throw_away, time);
450 }
451 black_box(throw_away);
452
453 self.data = lfsr(self.data, time);
454 }
455
456 // Memory Access noise source
457 // This is a noise source based on variations in memory access times
458 //
459 // This function performs memory accesses which will add to the timing
460 // variations due to an unknown amount of CPU wait states that need to be
461 // added when accessing memory. The memory size should be larger than the L1
462 // caches as outlined in the documentation and the associated testing.
463 //
464 // The L1 cache has a very high bandwidth, albeit its access rate is usually
465 // slower than accessing CPU registers. Therefore, L1 accesses only add
466 // minimal variations as the CPU has hardly to wait. Starting with L2,
467 // significant variations are added because L2 typically does not belong to
468 // the CPU any more and therefore a wider range of CPU wait states is
469 // necessary for accesses. L3 and real memory accesses have even a wider
470 // range of wait states. However, to reliably access either L3 or memory,
471 // the `self.mem` memory must be quite large which is usually not desirable.
472 #[inline(never)]
473 fn memaccess(&mut self, mem: &mut [u8; MEMORY_SIZE], var_rounds: bool) {
474 let mut acc_loop_cnt = 128;
475 if var_rounds { acc_loop_cnt += self.random_loop_cnt(4) };
476
477 let mut index = self.mem_prev_index as usize;
478 for _ in 0..acc_loop_cnt {
479 // Addition of memblocksize - 1 to index with wrap around logic to
480 // ensure that every memory location is hit evenly.
481 // The modulus also allows the compiler to remove the indexing
482 // bounds check.
483 index = (index + MEMORY_BLOCKSIZE - 1) % MEMORY_SIZE;
484
485 // memory access: just add 1 to one byte
486 // memory access implies read from and write to memory location
487 mem[index] = mem[index].wrapping_add(1);
488 }
489 self.mem_prev_index = index as u16;
490 }
491
492 // This is the heart of the entropy generation: calculate time deltas and
493 // use the CPU jitter in the time deltas. The jitter is injected into the
494 // entropy pool.
495 //
496 // Ensure that `ec.prev_time` is primed before using the output of this
497 // function. This can be done by calling this function and not using its
498 // result.
499 fn measure_jitter(&mut self, ec: &mut EcState) -> Option<()> {
500 // Invoke one noise source before time measurement to add variations
501 self.memaccess(&mut ec.mem, true);
502
503 // Get time stamp and calculate time delta to previous
504 // invocation to measure the timing variations
505 let time = (self.timer)();
506 // Note: wrapping_sub combined with a cast to `i64` generates a correct
507 // delta, even in the unlikely case this is a timer that is not strictly
508 // monotonic.
509 let current_delta = time.wrapping_sub(ec.prev_time) as i64 as i32;
510 ec.prev_time = time;
511
512 // Call the next noise source which also injects the data
513 self.lfsr_time(current_delta as u64, true);
514
515 // Check whether we have a stuck measurement (i.e. does the last
516 // measurement holds entropy?).
517 if ec.stuck(current_delta) { return None };
518
519 // Rotate the data buffer by a prime number (any odd number would
520 // do) to ensure that every bit position of the input time stamp
521 // has an even chance of being merged with a bit position in the
522 // entropy pool. We do not use one here as the adjacent bits in
523 // successive time deltas may have some form of dependency. The
524 // chosen value of 7 implies that the low 7 bits of the next
525 // time delta value is concatenated with the current time delta.
526 self.data = self.data.rotate_left(7);
527
528 Some(())
529 }
530
531 // Shuffle the pool a bit by mixing some value with a bijective function
532 // (XOR) into the pool.
533 //
534 // The function generates a mixer value that depends on the bits set and
535 // the location of the set bits in the random number generated by the
536 // entropy source. Therefore, based on the generated random number, this
537 // mixer value can have 2^64 different values. That mixer value is
538 // initialized with the first two SHA-1 constants. After obtaining the
539 // mixer value, it is XORed into the random number.
540 //
541 // The mixer value is not assumed to contain any entropy. But due to the
542 // XOR operation, it can also not destroy any entropy present in the
543 // entropy pool.
544 #[inline(never)]
545 fn stir_pool(&mut self) {
546 // This constant is derived from the first two 32 bit initialization
547 // vectors of SHA-1 as defined in FIPS 180-4 section 5.3.1
548 // The order does not really matter as we do not rely on the specific
549 // numbers. We just pick the SHA-1 constants as they have a good mix of
550 // bit set and unset.
551 const CONSTANT: u64 = 0x67452301efcdab89;
552
553 // The start value of the mixer variable is derived from the third
554 // and fourth 32 bit initialization vector of SHA-1 as defined in
555 // FIPS 180-4 section 5.3.1
556 let mut mixer = 0x98badcfe10325476;
557
558 // This is a constant time function to prevent leaking timing
559 // information about the random number.
560 // The normal code is:
561 // ```
562 // for i in 0..64 {
563 // if ((self.data >> i) & 1) == 1 { mixer ^= CONSTANT; }
564 // }
565 // ```
566 // This is a bit fragile, as LLVM really wants to use branches here, and
567 // we rely on it to not recognise the opportunity.
568 for i in 0..64 {
569 let apply = (self.data >> i) & 1;
570 let mask = !apply.wrapping_sub(1);
571 mixer ^= CONSTANT & mask;
572 mixer = mixer.rotate_left(1);
573 }
574
575 self.data ^= mixer;
576 }
577
578 fn gen_entropy(&mut self) -> u64 {
579 trace!("JitterRng: collecting entropy");
580
581 // Prime `ec.prev_time`, and run the noice sources to make sure the
582 // first loop round collects the expected entropy.
583 let mut ec = EcState {
584 prev_time: (self.timer)(),
585 last_delta: 0,
586 last_delta2: 0,
587 mem: [0; MEMORY_SIZE],
588 };
589 let _ = self.measure_jitter(&mut ec);
590
591 for _ in 0..self.rounds {
592 // If a stuck measurement is received, repeat measurement
593 // Note: we do not guard against an infinite loop, that would mean
594 // the timer suddenly became broken.
595 while self.measure_jitter(&mut ec).is_none() {}
596 }
597
598 // Do a single read from `self.mem` to make sure the Memory Access noise
599 // source is not optimised out.
600 black_box(ec.mem[0]);
601
602 self.stir_pool();
603 self.data
604 }
605
606 /// Basic quality tests on the timer, by measuring CPU timing jitter a few
607 /// hundred times.
608 ///
609 /// If succesful, this will return the estimated number of rounds necessary
610 /// to collect 64 bits of entropy. Otherwise a [`TimerError`] with the cause
611 /// of the failure will be returned.
612 ///
613 /// [`TimerError`]: enum.TimerError.html
614 pub fn test_timer(&mut self) -> Result<u8, TimerError> {
615 debug!("JitterRng: testing timer ...");
616 // We could add a check for system capabilities such as `clock_getres`
617 // or check for `CONFIG_X86_TSC`, but it does not make much sense as the
618 // following sanity checks verify that we have a high-resolution timer.
619
620 let mut delta_sum = 0;
621 let mut old_delta = 0;
622
623 let mut time_backwards = 0;
624 let mut count_mod = 0;
625 let mut count_stuck = 0;
626
627 let mut ec = EcState {
628 prev_time: (self.timer)(),
629 last_delta: 0,
630 last_delta2: 0,
631 mem: [0; MEMORY_SIZE],
632 };
633
634 // TESTLOOPCOUNT needs some loops to identify edge systems.
635 // 100 is definitely too little.
636 const TESTLOOPCOUNT: u64 = 300;
637 const CLEARCACHE: u64 = 100;
638
639 for i in 0..(CLEARCACHE + TESTLOOPCOUNT) {
640 // Measure time delta of core entropy collection logic
641 let time = (self.timer)();
642 self.memaccess(&mut ec.mem, true);
643 self.lfsr_time(time, true);
644 let time2 = (self.timer)();
645
646 // Test whether timer works
647 if time == 0 || time2 == 0 {
648 return Err(TimerError::NoTimer);
649 }
650 let delta = time2.wrapping_sub(time) as i64 as i32;
651
652 // Test whether timer is fine grained enough to provide delta even
653 // when called shortly after each other -- this implies that we also
654 // have a high resolution timer
655 if delta == 0 {
656 return Err(TimerError::CoarseTimer);
657 }
658
659 // Up to here we did not modify any variable that will be
660 // evaluated later, but we already performed some work. Thus we
661 // already have had an impact on the caches, branch prediction,
662 // etc. with the goal to clear it to get the worst case
663 // measurements.
664 if i < CLEARCACHE { continue; }
665
666 if ec.stuck(delta) { count_stuck += 1; }
667
668 // Test whether we have an increasing timer.
669 if !(time2 > time) { time_backwards += 1; }
670
671 // Count the number of times the counter increases in steps of 100ns
672 // or greater.
673 if (delta % 100) == 0 { count_mod += 1; }
674
675 // Ensure that we have a varying delta timer which is necessary for
676 // the calculation of entropy -- perform this check only after the
677 // first loop is executed as we need to prime the old_delta value
678 delta_sum += (delta - old_delta).abs() as u64;
679 old_delta = delta;
680 }
681
682 // Do a single read from `self.mem` to make sure the Memory Access noise
683 // source is not optimised out.
684 black_box(ec.mem[0]);
685
686 // We allow the time to run backwards for up to three times.
687 // This can happen if the clock is being adjusted by NTP operations.
688 // If such an operation just happens to interfere with our test, it
689 // should not fail. The value of 3 should cover the NTP case being
690 // performed during our test run.
691 if time_backwards > 3 {
692 return Err(TimerError::NotMonotonic);
693 }
694
695 // Test that the available amount of entropy per round does not get to
696 // low. We expect 1 bit of entropy per round as a reasonable minimum
697 // (although less is possible, it means the collector loop has to run
698 // much more often).
699 // `assert!(delta_average >= log2(1))`
700 // `assert!(delta_sum / TESTLOOPCOUNT >= 1)`
701 // `assert!(delta_sum >= TESTLOOPCOUNT)`
702 if delta_sum < TESTLOOPCOUNT {
703 return Err(TimerError::TinyVariantions);
704 }
705
706 // Ensure that we have variations in the time stamp below 100 for at
707 // least 10% of all checks -- on some platforms, the counter increments
708 // in multiples of 100, but not always
709 if count_mod > (TESTLOOPCOUNT * 9 / 10) {
710 return Err(TimerError::CoarseTimer);
711 }
712
713 // If we have more than 90% stuck results, then this Jitter RNG is
714 // likely to not work well.
715 if count_stuck > (TESTLOOPCOUNT * 9 / 10) {
716 return Err(TimerError::TooManyStuck);
717 }
718
719 // Estimate the number of `measure_jitter` rounds necessary for 64 bits
720 // of entropy.
721 //
722 // We don't try very hard to come up with a good estimate of the
723 // available bits of entropy per round here for two reasons:
724 // 1. Simple estimates of the available bits (like Shannon entropy) are
725 // too optimistic.
726 // 2. Unless we want to waste a lot of time during intialization, there
727 // only a small number of samples are available.
728 //
729 // Therefore we use a very simple and conservative estimate:
730 // `let bits_of_entropy = log2(delta_average) / 2`.
731 //
732 // The number of rounds `measure_jitter` should run to collect 64 bits
733 // of entropy is `64 / bits_of_entropy`.
734 let delta_average = delta_sum / TESTLOOPCOUNT;
735
736 if delta_average >= 16 {
737 let log2 = 64 - delta_average.leading_zeros();
738 // Do something similar to roundup(64/(log2/2)):
739 Ok( ((64u32 * 2 + log2 - 1) / log2) as u8)
740 } else {
741 // For values < 16 the rounding error becomes too large, use a
742 // lookup table.
743 // Values 0 and 1 are invalid, and filtered out by the
744 // `delta_sum < TESTLOOPCOUNT` test above.
745 let log2_lookup = [0, 0, 128, 81, 64, 56, 50, 46,
746 43, 41, 39, 38, 36, 35, 34, 33];
747 Ok(log2_lookup[delta_average as usize])
748 }
749 }
750
751 /// Statistical test: return the timer delta of one normal run of the
752 /// `JitterRng` entropy collector.
753 ///
754 /// Setting `var_rounds` to `true` will execute the memory access and the
755 /// CPU jitter noice sources a variable amount of times (just like a real
756 /// `JitterRng` round).
757 ///
758 /// Setting `var_rounds` to `false` will execute the noice sources the
759 /// minimal number of times. This can be used to measure the minimum amount
760 /// of entropy one round of the entropy collector can collect in the worst
761 /// case.
762 ///
763 /// See [Quality testing](struct.JitterRng.html#quality-testing) on how to
764 /// use `timer_stats` to test the quality of `JitterRng`.
765 pub fn timer_stats(&mut self, var_rounds: bool) -> i64 {
766 let mut mem = [0; MEMORY_SIZE];
767
768 let time = (self.timer)();
769 self.memaccess(&mut mem, var_rounds);
770 self.lfsr_time(time, var_rounds);
771 let time2 = (self.timer)();
772 time2.wrapping_sub(time) as i64
773 }
774 }
775
776 #[cfg(feature="std")]
777 mod platform {
778 #[cfg(not(any(target_os = "macos", target_os = "ios",
779 target_os = "windows",
780 target_arch = "wasm32")))]
781 pub fn get_nstime() -> u64 {
782 use std::time::{SystemTime, UNIX_EPOCH};
783
784 let dur = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
785 // The correct way to calculate the current time is
786 // `dur.as_secs() * 1_000_000_000 + dur.subsec_nanos() as u64`
787 // But this is faster, and the difference in terms of entropy is
788 // negligible (log2(10^9) == 29.9).
789 dur.as_secs() << 30 | dur.subsec_nanos() as u64
790 }
791
792 #[cfg(any(target_os = "macos", target_os = "ios"))]
793 pub fn get_nstime() -> u64 {
794 extern crate libc;
795 // On Mac OS and iOS std::time::SystemTime only has 1000ns resolution.
796 // We use `mach_absolute_time` instead. This provides a CPU dependent
797 // unit, to get real nanoseconds the result should by multiplied by
798 // numer/denom from `mach_timebase_info`.
799 // But we are not interested in the exact nanoseconds, just entropy. So
800 // we use the raw result.
801 unsafe { libc::mach_absolute_time() }
802 }
803
804 #[cfg(target_os = "windows")]
805 pub fn get_nstime() -> u64 {
806 extern crate winapi;
807 unsafe {
808 let mut t = super::mem::zeroed();
809 winapi::um::profileapi::QueryPerformanceCounter(&mut t);
810 *t.QuadPart() as u64
811 }
812 }
813 }
814
815 // A function that is opaque to the optimizer to assist in avoiding dead-code
816 // elimination. Taken from `bencher`.
817 fn black_box<T>(dummy: T) -> T {
818 unsafe {
819 let ret = ptr::read_volatile(&dummy);
820 mem::forget(dummy);
821 ret
822 }
823 }
824
825 impl RngCore for JitterRng {
826 fn next_u32(&mut self) -> u32 {
827 // We want to use both parts of the generated entropy
828 if self.data_half_used {
829 self.data_half_used = false;
830 (self.data >> 32) as u32
831 } else {
832 self.data = self.next_u64();
833 self.data_half_used = true;
834 self.data as u32
835 }
836 }
837
838 fn next_u64(&mut self) -> u64 {
839 self.data_half_used = false;
840 self.gen_entropy()
841 }
842
843 fn fill_bytes(&mut self, dest: &mut [u8]) {
844 // Fill using `next_u32`. This is faster for filling small slices (four
845 // bytes or less), while the overhead is negligible.
846 //
847 // This is done especially for wrappers that implement `next_u32`
848 // themselves via `fill_bytes`.
849 impls::fill_bytes_via_next(self, dest)
850 }
851
852 fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> {
853 Ok(self.fill_bytes(dest))
854 }
855 }
856
857 impl CryptoRng for JitterRng {}
858
859 #[cfg(test)]
860 mod test_jitter_init {
861 use jitter::JitterRng;
862
863 #[cfg(all(feature="std", not(target_arch = "wasm32")))]
864 #[test]
865 fn test_jitter_init() {
866 use RngCore;
867 // Because this is a debug build, measurements here are not representive
868 // of the final release build.
869 // Don't fail this test if initializing `JitterRng` fails because of a
870 // bad timer (the timer from the standard library may not have enough
871 // accuracy on all platforms).
872 match JitterRng::new() {
873 Ok(ref mut rng) => {
874 // false positives are possible, but extremely unlikely
875 assert!(rng.next_u32() | rng.next_u32() != 0);
876 },
877 Err(_) => {},
878 }
879 }
880
881 #[test]
882 fn test_jitter_bad_timer() {
883 fn bad_timer() -> u64 { 0 }
884 let mut rng = JitterRng::new_with_timer(bad_timer);
885 assert!(rng.test_timer().is_err());
886 }
887 }