1 // Copyright 2017 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10 //
11 // Based on jitterentropy-library, http://www.chronox.de/jent.html.
12 // Copyright Stephan Mueller <smueller@chronox.de>, 2014 - 2017.
13 //
14 // With permission from Stephan Mueller to relicense the Rust translation under
15 // the MIT license.
16
17 //! Non-physical true random number generator based on timing jitter.
18
19 use Rng;
20
21 use core::{fmt, mem, ptr};
22 #[cfg(feature="std")]
23 use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
24
25 const MEMORY_BLOCKS: usize = 64;
26 const MEMORY_BLOCKSIZE: usize = 32;
27 const MEMORY_SIZE: usize = MEMORY_BLOCKS * MEMORY_BLOCKSIZE;
28
29 /// A true random number generator based on jitter in the CPU execution time,
30 /// and jitter in memory access time.
31 ///
32 /// This is a true random number generator, as opposed to pseudo-random
33 /// generators. Random numbers generated by `JitterRng` can be seen as fresh
34 /// entropy. A consequence is that is orders of magnitude slower than `OsRng`
35 /// and PRNGs (about 10^3 .. 10^6 slower).
36 ///
37 /// There are very few situations where using this RNG is appropriate. Only very
38 /// few applications require true entropy. A normal PRNG can be statistically
39 /// indistinguishable, and a cryptographic PRNG should also be as impossible to
40 /// predict.
41 ///
42 /// Use of `JitterRng` is recommended for initializing cryptographic PRNGs when
43 /// `OsRng` is not available.
44 ///
45 /// This implementation is based on
46 /// [Jitterentropy](http://www.chronox.de/jent.html) version 2.1.0.
47 //
48 // Note: the C implementation relies on being compiled without optimizations.
49 // This implementation goes through lengths to make the compiler not optimise
50 // out what is technically dead code, but that does influence timing jitter.
51 pub struct JitterRng {
52 data: u64, // Actual random number
53 // Number of rounds to run the entropy collector per 64 bits
54 rounds: u32,
55 // Timer and previous time stamp, used by `measure_jitter`
56 timer: fn() -> u64,
57 prev_time: u64,
58 // Deltas used for the stuck test
59 last_delta: i64,
60 last_delta2: i64,
61 // Memory for the Memory Access noise source
62 mem_prev_index: usize,
63 mem: [u8; MEMORY_SIZE],
64 // Make `next_u32` not waste 32 bits
65 data_remaining: Option<u32>,
66 }
67
68 // Custom Debug implementation that does not expose the internal state
69 impl fmt::Debug for JitterRng {
fmt(&self, f: &mut fmt::Formatter) -> fmt::Result70 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
71 write!(f, "JitterRng {{}}")
72 }
73 }
74
75 /// An error that can occur when `test_timer` fails.
76 #[derive(Debug, Clone, PartialEq, Eq)]
77 pub enum TimerError {
78 /// No timer available.
79 NoTimer,
80 /// Timer too coarse to use as an entropy source.
81 CoarseTimer,
82 /// Timer is not monotonically increasing.
83 NotMonotonic,
84 /// Variations of deltas of time too small.
85 TinyVariantions,
86 /// Too many stuck results (indicating no added entropy).
87 TooManyStuck,
88 #[doc(hidden)]
89 __Nonexhaustive,
90 }
91
92 impl TimerError {
description(&self) -> &'static str93 fn description(&self) -> &'static str {
94 match *self {
95 TimerError::NoTimer => "no timer available",
96 TimerError::CoarseTimer => "coarse timer",
97 TimerError::NotMonotonic => "timer not monotonic",
98 TimerError::TinyVariantions => "time delta variations too small",
99 TimerError::TooManyStuck => "too many stuck results",
100 TimerError::__Nonexhaustive => unreachable!(),
101 }
102 }
103 }
104
105 impl fmt::Display for TimerError {
fmt(&self, f: &mut fmt::Formatter) -> fmt::Result106 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
107 write!(f, "{}", self.description())
108 }
109 }
110
111 #[cfg(feature="std")]
112 impl ::std::error::Error for TimerError {
description(&self) -> &str113 fn description(&self) -> &str {
114 self.description()
115 }
116 }
117
118 // Initialise to zero; must be positive
119 #[cfg(feature="std")]
120 static JITTER_ROUNDS: AtomicUsize = ATOMIC_USIZE_INIT;
121
122 impl JitterRng {
123 /// Create a new `JitterRng`.
124 /// Makes use of `std::time` for a timer.
125 ///
126 /// During initialization CPU execution timing jitter is measured a few
127 /// hundred times. If this does not pass basic quality tests, an error is
128 /// returned. The test result is cached to make subsequent calls faster.
129 #[cfg(feature="std")]
new() -> Result<JitterRng, TimerError>130 pub fn new() -> Result<JitterRng, TimerError> {
131 let mut ec = JitterRng::new_with_timer(platform::get_nstime);
132 let mut rounds = JITTER_ROUNDS.load(Ordering::Relaxed) as u32;
133 if rounds == 0 {
134 // No result yet: run test.
135 // This allows the timer test to run multiple times; we don't care.
136 rounds = ec.test_timer()?;
137 JITTER_ROUNDS.store(rounds as usize, Ordering::Relaxed);
138 }
139 ec.set_rounds(rounds);
140 Ok(ec)
141 }
142
143 /// Create a new `JitterRng`.
144 /// A custom timer can be supplied, making it possible to use `JitterRng` in
145 /// `no_std` environments.
146 ///
147 /// The timer must have nanosecond precision.
148 ///
149 /// This method is more low-level than `new()`. It is the responsibility of
150 /// the caller to run `test_timer` before using any numbers generated with
151 /// `JitterRng`, and optionally call `set_rounds()`.
new_with_timer(timer: fn() -> u64) -> JitterRng152 pub fn new_with_timer(timer: fn() -> u64) -> JitterRng {
153 let mut ec = JitterRng {
154 data: 0,
155 rounds: 64,
156 timer: timer,
157 prev_time: 0,
158 last_delta: 0,
159 last_delta2: 0,
160 mem_prev_index: 0,
161 mem: [0; MEMORY_SIZE],
162 data_remaining: None,
163 };
164
165 // Fill `data`, `prev_time`, `last_delta` and `last_delta2` with
166 // non-zero values.
167 ec.prev_time = timer();
168 ec.gen_entropy();
169
170 // Do a single read from `self.mem` to make sure the Memory Access noise
171 // source is not optimised out.
172 // Note: this read is important, it effects optimisations for the entire
173 // module!
174 black_box(ec.mem[0]);
175
176 ec
177 }
178
179 /// Configures how many rounds are used to generate each 64-bit value.
180 /// This must be greater than zero, and has a big impact on performance
181 /// and output quality.
182 ///
183 /// `new_with_timer` conservatively uses 64 rounds, but often less rounds
184 /// can be used. The `test_timer()` function returns the minimum number of
185 /// rounds required for full strength (platform dependent), so one may use
186 /// `rng.set_rounds(rng.test_timer()?);` or cache the value.
set_rounds(&mut self, rounds: u32)187 pub fn set_rounds(&mut self, rounds: u32) {
188 assert!(rounds > 0);
189 self.rounds = rounds;
190 }
191
192 // Calculate a random loop count used for the next round of an entropy
193 // collection, based on bits from a fresh value from the timer.
194 //
195 // The timer is folded to produce a number that contains at most `n_bits`
196 // bits.
197 //
198 // Note: A constant should be added to the resulting random loop count to
199 // prevent loops that run 0 times.
200 #[inline(never)]
random_loop_cnt(&mut self, n_bits: u32) -> u32201 fn random_loop_cnt(&mut self, n_bits: u32) -> u32 {
202 let mut rounds = 0;
203
204 let mut time = (self.timer)();
205 // Mix with the current state of the random number balance the random
206 // loop counter a bit more.
207 time ^= self.data;
208
209 // We fold the time value as much as possible to ensure that as many
210 // bits of the time stamp are included as possible.
211 let folds = (64 + n_bits - 1) / n_bits;
212 let mask = (1 << n_bits) - 1;
213 for _ in 0..folds {
214 rounds ^= time & mask;
215 time = time >> n_bits;
216 }
217
218 rounds as u32
219 }
220
221 // CPU jitter noise source
222 // Noise source based on the CPU execution time jitter
223 //
224 // This function injects the individual bits of the time value into the
225 // entropy pool using an LFSR.
226 //
227 // The code is deliberately inefficient with respect to the bit shifting.
228 // This function not only acts as folding operation, but this function's
229 // execution is used to measure the CPU execution time jitter. Any change to
230 // the loop in this function implies that careful retesting must be done.
231 #[inline(never)]
lfsr_time(&mut self, time: u64, var_rounds: bool)232 fn lfsr_time(&mut self, time: u64, var_rounds: bool) {
233 fn lfsr(mut data: u64, time: u64) -> u64{
234 for i in 1..65 {
235 let mut tmp = time << (64 - i);
236 tmp = tmp >> (64 - 1);
237
238 // Fibonacci LSFR with polynomial of
239 // x^64 + x^61 + x^56 + x^31 + x^28 + x^23 + 1 which is
240 // primitive according to
241 // http://poincare.matf.bg.ac.rs/~ezivkovm/publications/primpol1.pdf
242 // (the shift values are the polynomial values minus one
243 // due to counting bits from 0 to 63). As the current
244 // position is always the LSB, the polynomial only needs
245 // to shift data in from the left without wrap.
246 data ^= tmp;
247 data ^= (data >> 63) & 1;
248 data ^= (data >> 60) & 1;
249 data ^= (data >> 55) & 1;
250 data ^= (data >> 30) & 1;
251 data ^= (data >> 27) & 1;
252 data ^= (data >> 22) & 1;
253 data = data.rotate_left(1);
254 }
255 data
256 }
257
258 // Note: in the reference implementation only the last round effects
259 // `self.data`, all the other results are ignored. To make sure the
260 // other rounds are not optimised out, we first run all but the last
261 // round on a throw-away value instead of the real `self.data`.
262 let mut lfsr_loop_cnt = 0;
263 if var_rounds { lfsr_loop_cnt = self.random_loop_cnt(4) };
264
265 let mut throw_away: u64 = 0;
266 for _ in 0..lfsr_loop_cnt {
267 throw_away = lfsr(throw_away, time);
268 }
269 black_box(throw_away);
270
271 self.data = lfsr(self.data, time);
272 }
273
274 // Memory Access noise source
275 // This is a noise source based on variations in memory access times
276 //
277 // This function performs memory accesses which will add to the timing
278 // variations due to an unknown amount of CPU wait states that need to be
279 // added when accessing memory. The memory size should be larger than the L1
280 // caches as outlined in the documentation and the associated testing.
281 //
282 // The L1 cache has a very high bandwidth, albeit its access rate is usually
283 // slower than accessing CPU registers. Therefore, L1 accesses only add
284 // minimal variations as the CPU has hardly to wait. Starting with L2,
285 // significant variations are added because L2 typically does not belong to
286 // the CPU any more and therefore a wider range of CPU wait states is
287 // necessary for accesses. L3 and real memory accesses have even a wider
288 // range of wait states. However, to reliably access either L3 or memory,
289 // the `self.mem` memory must be quite large which is usually not desirable.
290 #[inline(never)]
memaccess(&mut self, var_rounds: bool)291 fn memaccess(&mut self, var_rounds: bool) {
292 let mut acc_loop_cnt = 128;
293 if var_rounds { acc_loop_cnt += self.random_loop_cnt(4) };
294
295 let mut index = self.mem_prev_index;
296 for _ in 0..acc_loop_cnt {
297 // Addition of memblocksize - 1 to index with wrap around logic to
298 // ensure that every memory location is hit evenly.
299 // The modulus also allows the compiler to remove the indexing
300 // bounds check.
301 index = (index + MEMORY_BLOCKSIZE - 1) % MEMORY_SIZE;
302
303 // memory access: just add 1 to one byte
304 // memory access implies read from and write to memory location
305 let tmp = self.mem[index];
306 self.mem[index] = tmp.wrapping_add(1);
307 }
308 self.mem_prev_index = index;
309 }
310
311
312 // Stuck test by checking the:
313 // - 1st derivation of the jitter measurement (time delta)
314 // - 2nd derivation of the jitter measurement (delta of time deltas)
315 // - 3rd derivation of the jitter measurement (delta of delta of time
316 // deltas)
317 //
318 // All values must always be non-zero.
319 // This test is a heuristic to see whether the last measurement holds
320 // entropy.
stuck(&mut self, current_delta: i64) -> bool321 fn stuck(&mut self, current_delta: i64) -> bool {
322 let delta2 = self.last_delta - current_delta;
323 let delta3 = delta2 - self.last_delta2;
324
325 self.last_delta = current_delta;
326 self.last_delta2 = delta2;
327
328 current_delta == 0 || delta2 == 0 || delta3 == 0
329 }
330
331 // This is the heart of the entropy generation: calculate time deltas and
332 // use the CPU jitter in the time deltas. The jitter is injected into the
333 // entropy pool.
334 //
335 // Ensure that `self.prev_time` is primed before using the output of this
336 // function. This can be done by calling this function and not using its
337 // result.
measure_jitter(&mut self) -> Option<()>338 fn measure_jitter(&mut self) -> Option<()> {
339 // Invoke one noise source before time measurement to add variations
340 self.memaccess(true);
341
342 // Get time stamp and calculate time delta to previous
343 // invocation to measure the timing variations
344 let time = (self.timer)();
345 // Note: wrapping_sub combined with a cast to `i64` generates a correct
346 // delta, even in the unlikely case this is a timer that is not strictly
347 // monotonic.
348 let current_delta = time.wrapping_sub(self.prev_time) as i64;
349 self.prev_time = time;
350
351 // Call the next noise source which also injects the data
352 self.lfsr_time(current_delta as u64, true);
353
354 // Check whether we have a stuck measurement (i.e. does the last
355 // measurement holds entropy?).
356 if self.stuck(current_delta) { return None };
357
358 // Rotate the data buffer by a prime number (any odd number would
359 // do) to ensure that every bit position of the input time stamp
360 // has an even chance of being merged with a bit position in the
361 // entropy pool. We do not use one here as the adjacent bits in
362 // successive time deltas may have some form of dependency. The
363 // chosen value of 7 implies that the low 7 bits of the next
364 // time delta value is concatenated with the current time delta.
365 self.data = self.data.rotate_left(7);
366
367 Some(())
368 }
369
370 // Shuffle the pool a bit by mixing some value with a bijective function
371 // (XOR) into the pool.
372 //
373 // The function generates a mixer value that depends on the bits set and
374 // the location of the set bits in the random number generated by the
375 // entropy source. Therefore, based on the generated random number, this
376 // mixer value can have 2^64 different values. That mixer value is
377 // initialized with the first two SHA-1 constants. After obtaining the
378 // mixer value, it is XORed into the random number.
379 //
380 // The mixer value is not assumed to contain any entropy. But due to the
381 // XOR operation, it can also not destroy any entropy present in the
382 // entropy pool.
383 #[inline(never)]
stir_pool(&mut self)384 fn stir_pool(&mut self) {
385 // This constant is derived from the first two 32 bit initialization
386 // vectors of SHA-1 as defined in FIPS 180-4 section 5.3.1
387 // The order does not really matter as we do not rely on the specific
388 // numbers. We just pick the SHA-1 constants as they have a good mix of
389 // bit set and unset.
390 const CONSTANT: u64 = 0x67452301efcdab89;
391
392 // The start value of the mixer variable is derived from the third
393 // and fourth 32 bit initialization vector of SHA-1 as defined in
394 // FIPS 180-4 section 5.3.1
395 let mut mixer = 0x98badcfe10325476;
396
397 // This is a constant time function to prevent leaking timing
398 // information about the random number.
399 // The normal code is:
400 // ```
401 // for i in 0..64 {
402 // if ((self.data >> i) & 1) == 1 { mixer ^= CONSTANT; }
403 // }
404 // ```
405 // This is a bit fragile, as LLVM really wants to use branches here, and
406 // we rely on it to not recognise the opportunity.
407 for i in 0..64 {
408 let apply = (self.data >> i) & 1;
409 let mask = !apply.wrapping_sub(1);
410 mixer ^= CONSTANT & mask;
411 mixer = mixer.rotate_left(1);
412 }
413
414 self.data ^= mixer;
415 }
416
gen_entropy(&mut self) -> u64417 fn gen_entropy(&mut self) -> u64 {
418 // Prime `self.prev_time`, and run the noice sources to make sure the
419 // first loop round collects the expected entropy.
420 let _ = self.measure_jitter();
421
422 for _ in 0..self.rounds {
423 // If a stuck measurement is received, repeat measurement
424 // Note: we do not guard against an infinite loop, that would mean
425 // the timer suddenly became broken.
426 while self.measure_jitter().is_none() {}
427 }
428
429 self.stir_pool();
430 self.data
431 }
432
433 /// Basic quality tests on the timer, by measuring CPU timing jitter a few
434 /// hundred times.
435 ///
436 /// If succesful, this will return the estimated number of rounds necessary
437 /// to collect 64 bits of entropy. Otherwise a `TimerError` with the cause
438 /// of the failure will be returned.
test_timer(&mut self) -> Result<u32, TimerError>439 pub fn test_timer(&mut self) -> Result<u32, TimerError> {
440 // We could add a check for system capabilities such as `clock_getres`
441 // or check for `CONFIG_X86_TSC`, but it does not make much sense as the
442 // following sanity checks verify that we have a high-resolution timer.
443
444 #[cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))]
445 return Err(TimerError::NoTimer);
446
447 let mut delta_sum = 0;
448 let mut old_delta = 0;
449
450 let mut time_backwards = 0;
451 let mut count_mod = 0;
452 let mut count_stuck = 0;
453
454 // TESTLOOPCOUNT needs some loops to identify edge systems.
455 // 100 is definitely too little.
456 const TESTLOOPCOUNT: u64 = 300;
457 const CLEARCACHE: u64 = 100;
458
459 for i in 0..(CLEARCACHE + TESTLOOPCOUNT) {
460 // Measure time delta of core entropy collection logic
461 let time = (self.timer)();
462 self.memaccess(true);
463 self.lfsr_time(time, true);
464 let time2 = (self.timer)();
465
466 // Test whether timer works
467 if time == 0 || time2 == 0 {
468 return Err(TimerError::NoTimer);
469 }
470 let delta = time2.wrapping_sub(time) as i64;
471
472 // Test whether timer is fine grained enough to provide delta even
473 // when called shortly after each other -- this implies that we also
474 // have a high resolution timer
475 if delta == 0 {
476 return Err(TimerError::CoarseTimer);
477 }
478
479 // Up to here we did not modify any variable that will be
480 // evaluated later, but we already performed some work. Thus we
481 // already have had an impact on the caches, branch prediction,
482 // etc. with the goal to clear it to get the worst case
483 // measurements.
484 if i < CLEARCACHE { continue; }
485
486 if self.stuck(delta) { count_stuck += 1; }
487
488 // Test whether we have an increasing timer.
489 if !(time2 > time) { time_backwards += 1; }
490
491 // Count the number of times the counter increases in steps of 100ns
492 // or greater.
493 if (delta % 100) == 0 { count_mod += 1; }
494
495 // Ensure that we have a varying delta timer which is necessary for
496 // the calculation of entropy -- perform this check only after the
497 // first loop is executed as we need to prime the old_delta value
498 delta_sum += (delta - old_delta).abs() as u64;
499 old_delta = delta;
500 }
501
502 // We allow the time to run backwards for up to three times.
503 // This can happen if the clock is being adjusted by NTP operations.
504 // If such an operation just happens to interfere with our test, it
505 // should not fail. The value of 3 should cover the NTP case being
506 // performed during our test run.
507 if time_backwards > 3 {
508 return Err(TimerError::NotMonotonic);
509 }
510
511 // Test that the available amount of entropy per round does not get to
512 // low. We expect 1 bit of entropy per round as a reasonable minimum
513 // (although less is possible, it means the collector loop has to run
514 // much more often).
515 // `assert!(delta_average >= log2(1))`
516 // `assert!(delta_sum / TESTLOOPCOUNT >= 1)`
517 // `assert!(delta_sum >= TESTLOOPCOUNT)`
518 if delta_sum < TESTLOOPCOUNT {
519 return Err(TimerError::TinyVariantions);
520 }
521
522 // Ensure that we have variations in the time stamp below 100 for at
523 // least 10% of all checks -- on some platforms, the counter increments
524 // in multiples of 100, but not always
525 if count_mod > (TESTLOOPCOUNT * 9 / 10) {
526 return Err(TimerError::CoarseTimer);
527 }
528
529 // If we have more than 90% stuck results, then this Jitter RNG is
530 // likely to not work well.
531 if count_stuck > (TESTLOOPCOUNT * 9 / 10) {
532 return Err(TimerError::TooManyStuck);
533 }
534
535 // Estimate the number of `measure_jitter` rounds necessary for 64 bits
536 // of entropy.
537 //
538 // We don't try very hard to come up with a good estimate of the
539 // available bits of entropy per round here for two reasons:
540 // 1. Simple estimates of the available bits (like Shannon entropy) are
541 // too optimistic.
542 // 2) Unless we want to waste a lot of time during intialization, there
543 // only a small number of samples are available.
544 //
545 // Therefore we use a very simple and conservative estimate:
546 // `let bits_of_entropy = log2(delta_average) / 2`.
547 //
548 // The number of rounds `measure_jitter` should run to collect 64 bits
549 // of entropy is `64 / bits_of_entropy`.
550 //
551 // To have smaller rounding errors, intermediate values are multiplied
552 // by `FACTOR`. To compensate for `log2` and division rounding down,
553 // add 1.
554 let delta_average = delta_sum / TESTLOOPCOUNT;
555 // println!("delta_average: {}", delta_average);
556
557 const FACTOR: u32 = 3;
558 fn log2(x: u64) -> u32 { 64 - x.leading_zeros() }
559
560 // pow(δ, FACTOR) must be representable; if you have overflow reduce FACTOR
561 Ok(64 * 2 * FACTOR / (log2(delta_average.pow(FACTOR)) + 1))
562 }
563
564 /// Statistical test: return the timer delta of one normal run of the
565 /// `JitterEntropy` entropy collector.
566 ///
567 /// Setting `var_rounds` to `true` will execute the memory access and the
568 /// CPU jitter noice sources a variable amount of times (just like a real
569 /// `JitterEntropy` round).
570 ///
571 /// Setting `var_rounds` to `false` will execute the noice sources the
572 /// minimal number of times. This can be used to measure the minimum amount
573 /// of entropy one round of entropy collector can collect in the worst case.
574 ///
575 /// # Example
576 ///
577 /// Use `timer_stats` to run the [NIST SP 800-90B Entropy Estimation Suite]
578 /// (https://github.com/usnistgov/SP800-90B_EntropyAssessment).
579 ///
580 /// This is the recommended way to test the quality of `JitterRng`. It
581 /// should be run before using the RNG on untested hardware, after changes
582 /// that could effect how the code is optimised, and after major compiler
583 /// compiler changes, like a new LLVM version.
584 ///
585 /// First generate two files `jitter_rng_var.bin` and `jitter_rng_var.min`.
586 ///
587 /// Execute `python noniid_main.py -v jitter_rng_var.bin 8`, and validate it
588 /// with `restart.py -v jitter_rng_var.bin 8 <min-entropy>`.
589 /// This number is the expected amount of entropy that is at least available
590 /// for each round of the entropy collector. This number should be greater
591 /// than the amount estimated with `64 / test_timer()`.
592 ///
593 /// Execute `python noniid_main.py -v -u 4 jitter_rng_var.bin 4`, and
594 /// validate it with `restart.py -v -u 4 jitter_rng_var.bin 4 <min-entropy>`.
595 /// This number is the expected amount of entropy that is available in the
596 /// last 4 bits of the timer delta after running noice sources. Note that
597 /// a value of 3.70 is the minimum estimated entropy for true randomness.
598 ///
599 /// Execute `python noniid_main.py -v -u 4 jitter_rng_var.bin 4`, and
600 /// validate it with `restart.py -v -u 4 jitter_rng_var.bin 4 <min-entropy>`.
601 /// This number is the expected amount of entropy that is available to the
602 /// entropy collecter if both noice sources only run their minimal number of
603 /// times. This measures the absolute worst-case, and gives a lower bound
604 /// for the available entropy.
605 ///
606 /// ```rust,no_run
607 /// use rand::JitterRng;
608 ///
609 /// # use std::error::Error;
610 /// # use std::fs::File;
611 /// # use std::io::Write;
612 /// #
613 /// # fn try_main() -> Result<(), Box<Error>> {
614 /// fn get_nstime() -> u64 {
615 /// use std::time::{SystemTime, UNIX_EPOCH};
616 ///
617 /// let dur = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
618 /// // The correct way to calculate the current time is
619 /// // `dur.as_secs() * 1_000_000_000 + dur.subsec_nanos() as u64`
620 /// // But this is faster, and the difference in terms of entropy is
621 /// // negligible (log2(10^9) == 29.9).
622 /// dur.as_secs() << 30 | dur.subsec_nanos() as u64
623 /// }
624 ///
625 /// // Do not initialize with `JitterRng::new`, but with `new_with_timer`.
626 /// // 'new' always runst `test_timer`, and can therefore fail to
627 /// // initialize. We want to be able to get the statistics even when the
628 /// // timer test fails.
629 /// let mut rng = JitterRng::new_with_timer(get_nstime);
630 ///
631 /// // 1_000_000 results are required for the NIST SP 800-90B Entropy
632 /// // Estimation Suite
633 /// // FIXME: this number is smaller here, otherwise the Doc-test is too slow
634 /// const ROUNDS: usize = 10_000;
635 /// let mut deltas_variable: Vec<u8> = Vec::with_capacity(ROUNDS);
636 /// let mut deltas_minimal: Vec<u8> = Vec::with_capacity(ROUNDS);
637 ///
638 /// for _ in 0..ROUNDS {
639 /// deltas_variable.push(rng.timer_stats(true) as u8);
640 /// deltas_minimal.push(rng.timer_stats(false) as u8);
641 /// }
642 ///
643 /// // Write out after the statistics collection loop, to not disturb the
644 /// // test results.
645 /// File::create("jitter_rng_var.bin")?.write(&deltas_variable)?;
646 /// File::create("jitter_rng_min.bin")?.write(&deltas_minimal)?;
647 /// #
648 /// # Ok(())
649 /// # }
650 /// #
651 /// # fn main() {
652 /// # try_main().unwrap();
653 /// # }
654 /// ```
655 #[cfg(feature="std")]
timer_stats(&mut self, var_rounds: bool) -> i64656 pub fn timer_stats(&mut self, var_rounds: bool) -> i64 {
657 let time = platform::get_nstime();
658 self.memaccess(var_rounds);
659 self.lfsr_time(time, var_rounds);
660 let time2 = platform::get_nstime();
661 time2.wrapping_sub(time) as i64
662 }
663 }
664
665 #[cfg(feature="std")]
666 mod platform {
667 #[cfg(not(any(target_os = "macos", target_os = "ios", target_os = "windows", all(target_arch = "wasm32", not(target_os = "emscripten")))))]
get_nstime() -> u64668 pub fn get_nstime() -> u64 {
669 use std::time::{SystemTime, UNIX_EPOCH};
670
671 let dur = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
672 // The correct way to calculate the current time is
673 // `dur.as_secs() * 1_000_000_000 + dur.subsec_nanos() as u64`
674 // But this is faster, and the difference in terms of entropy is negligible
675 // (log2(10^9) == 29.9).
676 dur.as_secs() << 30 | dur.subsec_nanos() as u64
677 }
678
679 #[cfg(any(target_os = "macos", target_os = "ios"))]
get_nstime() -> u64680 pub fn get_nstime() -> u64 {
681 extern crate libc;
682 // On Mac OS and iOS std::time::SystemTime only has 1000ns resolution.
683 // We use `mach_absolute_time` instead. This provides a CPU dependent unit,
684 // to get real nanoseconds the result should by multiplied by numer/denom
685 // from `mach_timebase_info`.
686 // But we are not interested in the exact nanoseconds, just entropy. So we
687 // use the raw result.
688 unsafe { libc::mach_absolute_time() }
689 }
690
691 #[cfg(target_os = "windows")]
get_nstime() -> u64692 pub fn get_nstime() -> u64 {
693 extern crate winapi;
694 unsafe {
695 let mut t = super::mem::zeroed();
696 winapi::um::profileapi::QueryPerformanceCounter(&mut t);
697 *t.QuadPart() as u64
698 }
699 }
700
701 #[cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))]
get_nstime() -> u64702 pub fn get_nstime() -> u64 {
703 unreachable!()
704 }
705 }
706
707 // A function that is opaque to the optimizer to assist in avoiding dead-code
708 // elimination. Taken from `bencher`.
black_box<T>(dummy: T) -> T709 fn black_box<T>(dummy: T) -> T {
710 unsafe {
711 let ret = ptr::read_volatile(&dummy);
712 mem::forget(dummy);
713 ret
714 }
715 }
716
717 impl Rng for JitterRng {
next_u32(&mut self) -> u32718 fn next_u32(&mut self) -> u32 {
719 // We want to use both parts of the generated entropy
720 if let Some(high) = self.data_remaining.take() {
721 high
722 } else {
723 let data = self.next_u64();
724 self.data_remaining = Some((data >> 32) as u32);
725 data as u32
726 }
727 }
728
next_u64(&mut self) -> u64729 fn next_u64(&mut self) -> u64 {
730 self.gen_entropy()
731 }
732
fill_bytes(&mut self, dest: &mut [u8])733 fn fill_bytes(&mut self, dest: &mut [u8]) {
734 let mut left = dest;
735 while left.len() >= 8 {
736 let (l, r) = {left}.split_at_mut(8);
737 left = r;
738 let chunk: [u8; 8] = unsafe {
739 mem::transmute(self.next_u64().to_le())
740 };
741 l.copy_from_slice(&chunk);
742 }
743 let n = left.len();
744 if n > 0 {
745 let chunk: [u8; 8] = unsafe {
746 mem::transmute(self.next_u64().to_le())
747 };
748 left.copy_from_slice(&chunk[..n]);
749 }
750 }
751 }
752
753 // There are no tests included because (1) this is an "external" RNG, so output
754 // is not reproducible and (2) `test_timer` *will* fail on some platforms.
755