1 // Copyright 2018 Developers of the Rand project.
2 //
3 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
4 // https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
5 // <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
6 // option. This file may not be copied, modified, or distributed
7 // except according to those terms.
8 //
9 // Based on jitterentropy-library, http://www.chronox.de/jent.html.
10 // Copyright Stephan Mueller <smueller@chronox.de>, 2014 - 2017.
11 //
12 // With permission from Stephan Mueller to relicense the Rust translation under
13 // the MIT license.
14
15 //! Non-physical true random number generator based on timing jitter.
16 //!
17 //! This is a true random number generator, as opposed to pseudo-random
18 //! generators. Random numbers generated by `JitterRng` can be seen as fresh
19 //! entropy. A consequence is that it is orders of magnitude slower than `OsRng`
20 //! and PRNGs (about 10<sup>3</sup>..10<sup>6</sup> slower).
21 //!
22 //! There are very few situations where using this RNG is appropriate. Only very
23 //! few applications require true entropy. A normal PRNG can be statistically
24 //! indistinguishable, and a cryptographic PRNG should also be as impossible to
25 //! predict.
26 //!
27 //! Use of `JitterRng` is recommended for initializing cryptographic PRNGs when
28 //! `OsRng` is not available.
29 //!
30 //! `JitterRng` can be used without the standard library, but not conveniently,
31 //! you must provide a high-precision timer and carefully have to follow the
32 //! instructions of [`JitterRng::new_with_timer`].
33 //!
34 //! This implementation is based on [Jitterentropy] version 2.1.0.
35 //!
36 //! Note: There is no accurate timer available on WASM platforms, to help
37 //! prevent fingerprinting or timing side-channel attacks. Therefore
38 //! [`JitterRng::new()`] is not available on WASM. It is also unavailable
39 //! with disabled `std` feature.
40 //!
41 //! [Jitterentropy]: http://www.chronox.de/jent.html
42
43 #![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk.png",
44 html_favicon_url = "https://www.rust-lang.org/favicon.ico",
45 html_root_url = "https://rust-random.github.io/rand/")]
46
47 #![deny(missing_docs)]
48 #![deny(missing_debug_implementations)]
49 #![doc(test(attr(allow(unused_variables), deny(warnings))))]
50
51 // Note: the C implementation of `Jitterentropy` relies on being compiled
52 // without optimizations. This implementation goes through lengths to make the
53 // compiler not optimize out code which does influence timing jitter, but is
54 // technically dead code.
55 #![no_std]
56 pub extern crate rand_core;
57 #[cfg(feature = "std")]
58 extern crate std;
59 #[cfg(feature = "log")]
60 #[macro_use] extern crate log;
61 #[cfg(any(target_os = "macos", target_os = "ios"))]
62 extern crate libc;
63 #[cfg(target_os = "windows")]
64 extern crate winapi;
65
66
67 #[cfg(not(feature = "log"))]
68 #[macro_use] mod dummy_log;
69 #[cfg(feature = "std")]
70 mod platform;
71 mod error;
72
73 use rand_core::{RngCore, CryptoRng, Error, impls};
74 pub use error::TimerError;
75
76 use core::{fmt, mem, ptr};
77 #[cfg(feature = "std")]
78 use std::sync::atomic::{AtomicUsize, Ordering};
79 #[cfg(feature = "std")]
80 #[allow(deprecated)] // Required for compatibility with Rust < 1.24.
81 use std::sync::atomic::ATOMIC_USIZE_INIT;
82
83 const MEMORY_BLOCKS: usize = 64;
84 const MEMORY_BLOCKSIZE: usize = 32;
85 const MEMORY_SIZE: usize = MEMORY_BLOCKS * MEMORY_BLOCKSIZE;
86
87 /// A true random number generator based on jitter in the CPU execution time,
88 /// and jitter in memory access time.
89 pub struct JitterRng {
90 data: u64, // Actual random number
91 // Number of rounds to run the entropy collector per 64 bits
92 rounds: u8,
93 // Timer used by `measure_jitter`
94 timer: fn() -> u64,
95 // Memory for the Memory Access noise source
96 mem_prev_index: u16,
97 // Make `next_u32` not waste 32 bits
98 data_half_used: bool,
99 }
100
101 // Note: `JitterRng` maintains a small 64-bit entropy pool. With every
102 // `generate` 64 new bits should be integrated in the pool. If a round of
103 // `generate` were to collect less than the expected 64 bit, then the returned
104 // value, and the new state of the entropy pool, would be in some way related to
105 // the initial state. It is therefore better if the initial state of the entropy
106 // pool is different on each call to `generate`. This has a few implications:
107 // - `generate` should be called once before using `JitterRng` to produce the
108 // first usable value (this is done by default in `new`);
109 // - We do not zero the entropy pool after generating a result. The reference
110 // implementation also does not support zeroing, but recommends generating a
111 // new value without using it if you want to protect a previously generated
112 // 'secret' value from someone inspecting the memory;
113 // - Implementing `Clone` seems acceptable, as it would not cause the systematic
114 // bias a constant might cause. Only instead of one value that could be
115 // potentially related to the same initial state, there are now two.
116
117 // Entropy collector state.
118 // These values are not necessary to preserve across runs.
119 struct EcState {
120 // Previous time stamp to determine the timer delta
121 prev_time: u64,
122 // Deltas used for the stuck test
123 last_delta: i32,
124 last_delta2: i32,
125 // Memory for the Memory Access noise source
126 mem: [u8; MEMORY_SIZE],
127 }
128
129 impl EcState {
130 // Stuck test by checking the:
131 // - 1st derivation of the jitter measurement (time delta)
132 // - 2nd derivation of the jitter measurement (delta of time deltas)
133 // - 3rd derivation of the jitter measurement (delta of delta of time
134 // deltas)
135 //
136 // All values must always be non-zero.
137 // This test is a heuristic to see whether the last measurement holds
138 // entropy.
stuck(&mut self, current_delta: i32) -> bool139 fn stuck(&mut self, current_delta: i32) -> bool {
140 let delta2 = self.last_delta - current_delta;
141 let delta3 = delta2 - self.last_delta2;
142
143 self.last_delta = current_delta;
144 self.last_delta2 = delta2;
145
146 current_delta == 0 || delta2 == 0 || delta3 == 0
147 }
148 }
149
150 // Custom Debug implementation that does not expose the internal state
151 impl fmt::Debug for JitterRng {
fmt(&self, f: &mut fmt::Formatter) -> fmt::Result152 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
153 write!(f, "JitterRng {{}}")
154 }
155 }
156
157 impl Clone for JitterRng {
clone(&self) -> JitterRng158 fn clone(&self) -> JitterRng {
159 JitterRng {
160 data: self.data,
161 rounds: self.rounds,
162 timer: self.timer,
163 mem_prev_index: self.mem_prev_index,
164 // The 32 bits that may still be unused from the previous round are
165 // for the original to use, not for the clone.
166 data_half_used: false,
167 }
168 }
169 }
170
171 // Initialise to zero; must be positive
172 #[cfg(all(feature = "std", not(target_arch = "wasm32")))]
173 #[allow(deprecated)]
174 static JITTER_ROUNDS: AtomicUsize = ATOMIC_USIZE_INIT;
175
176 impl JitterRng {
177 /// Create a new `JitterRng`. Makes use of `std::time` for a timer, or a
178 /// platform-specific function with higher accuracy if necessary and
179 /// available.
180 ///
181 /// During initialization CPU execution timing jitter is measured a few
182 /// hundred times. If this does not pass basic quality tests, an error is
183 /// returned. The test result is cached to make subsequent calls faster.
184 #[cfg(all(feature = "std", not(target_arch = "wasm32")))]
new() -> Result<JitterRng, TimerError>185 pub fn new() -> Result<JitterRng, TimerError> {
186 if cfg!(target_arch = "wasm32") {
187 return Err(TimerError::NoTimer);
188 }
189 let mut state = JitterRng::new_with_timer(platform::get_nstime);
190 let mut rounds = JITTER_ROUNDS.load(Ordering::Relaxed) as u8;
191 if rounds == 0 {
192 // No result yet: run test.
193 // This allows the timer test to run multiple times; we don't care.
194 rounds = state.test_timer()?;
195 JITTER_ROUNDS.store(rounds as usize, Ordering::Relaxed);
196 info!("JitterRng: using {} rounds per u64 output", rounds);
197 }
198 state.set_rounds(rounds);
199
200 // Fill `data` with a non-zero value.
201 state.gen_entropy();
202 Ok(state)
203 }
204
205 /// Create a new `JitterRng`.
206 /// A custom timer can be supplied, making it possible to use `JitterRng` in
207 /// `no_std` environments.
208 ///
209 /// The timer must have nanosecond precision.
210 ///
211 /// This method is more low-level than `new()`. It is the responsibility of
212 /// the caller to run [`test_timer`] before using any numbers generated with
213 /// `JitterRng`, and optionally call [`set_rounds`]. Also it is important to
214 /// consume at least one `u64` before using the first result to initialize
215 /// the entropy collection pool.
216 ///
217 /// # Example
218 ///
219 /// ```
220 /// # use rand_jitter::rand_core::{RngCore, Error};
221 /// use rand_jitter::JitterRng;
222 ///
223 /// # fn try_inner() -> Result<(), Error> {
224 /// fn get_nstime() -> u64 {
225 /// use std::time::{SystemTime, UNIX_EPOCH};
226 ///
227 /// let dur = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
228 /// // The correct way to calculate the current time is
229 /// // `dur.as_secs() * 1_000_000_000 + dur.subsec_nanos() as u64`
230 /// // But this is faster, and the difference in terms of entropy is
231 /// // negligible (log2(10^9) == 29.9).
232 /// dur.as_secs() << 30 | dur.subsec_nanos() as u64
233 /// }
234 ///
235 /// let mut rng = JitterRng::new_with_timer(get_nstime);
236 /// let rounds = rng.test_timer()?;
237 /// rng.set_rounds(rounds); // optional
238 /// let _ = rng.next_u64();
239 ///
240 /// // Ready for use
241 /// let v: u64 = rng.next_u64();
242 /// # Ok(())
243 /// # }
244 ///
245 /// # let _ = try_inner();
246 /// ```
247 ///
248 /// [`test_timer`]: JitterRng::test_timer
249 /// [`set_rounds`]: JitterRng::set_rounds
new_with_timer(timer: fn() -> u64) -> JitterRng250 pub fn new_with_timer(timer: fn() -> u64) -> JitterRng {
251 JitterRng {
252 data: 0,
253 rounds: 64,
254 timer,
255 mem_prev_index: 0,
256 data_half_used: false,
257 }
258 }
259
260 /// Configures how many rounds are used to generate each 64-bit value.
261 /// This must be greater than zero, and has a big impact on performance
262 /// and output quality.
263 ///
264 /// [`new_with_timer`] conservatively uses 64 rounds, but often less rounds
265 /// can be used. The `test_timer()` function returns the minimum number of
266 /// rounds required for full strength (platform dependent), so one may use
267 /// `rng.set_rounds(rng.test_timer()?);` or cache the value.
268 ///
269 /// [`new_with_timer`]: JitterRng::new_with_timer
set_rounds(&mut self, rounds: u8)270 pub fn set_rounds(&mut self, rounds: u8) {
271 assert!(rounds > 0);
272 self.rounds = rounds;
273 }
274
275 // Calculate a random loop count used for the next round of an entropy
276 // collection, based on bits from a fresh value from the timer.
277 //
278 // The timer is folded to produce a number that contains at most `n_bits`
279 // bits.
280 //
281 // Note: A constant should be added to the resulting random loop count to
282 // prevent loops that run 0 times.
283 #[inline(never)]
random_loop_cnt(&mut self, n_bits: u32) -> u32284 fn random_loop_cnt(&mut self, n_bits: u32) -> u32 {
285 let mut rounds = 0;
286
287 let mut time = (self.timer)();
288 // Mix with the current state of the random number balance the random
289 // loop counter a bit more.
290 time ^= self.data;
291
292 // We fold the time value as much as possible to ensure that as many
293 // bits of the time stamp are included as possible.
294 let folds = (64 + n_bits - 1) / n_bits;
295 let mask = (1 << n_bits) - 1;
296 for _ in 0..folds {
297 rounds ^= time & mask;
298 time >>= n_bits;
299 }
300
301 rounds as u32
302 }
303
304 // CPU jitter noise source
305 // Noise source based on the CPU execution time jitter
306 //
307 // This function injects the individual bits of the time value into the
308 // entropy pool using an LFSR.
309 //
310 // The code is deliberately inefficient with respect to the bit shifting.
311 // This function not only acts as folding operation, but this function's
312 // execution is used to measure the CPU execution time jitter. Any change to
313 // the loop in this function implies that careful retesting must be done.
314 #[inline(never)]
lfsr_time(&mut self, time: u64, var_rounds: bool)315 fn lfsr_time(&mut self, time: u64, var_rounds: bool) {
316 fn lfsr(mut data: u64, time: u64) -> u64{
317 for i in 1..65 {
318 let mut tmp = time << (64 - i);
319 tmp >>= 64 - 1;
320
321 // Fibonacci LSFR with polynomial of
322 // x^64 + x^61 + x^56 + x^31 + x^28 + x^23 + 1 which is
323 // primitive according to
324 // http://poincare.matf.bg.ac.rs/~ezivkovm/publications/primpol1.pdf
325 // (the shift values are the polynomial values minus one
326 // due to counting bits from 0 to 63). As the current
327 // position is always the LSB, the polynomial only needs
328 // to shift data in from the left without wrap.
329 data ^= tmp;
330 data ^= (data >> 63) & 1;
331 data ^= (data >> 60) & 1;
332 data ^= (data >> 55) & 1;
333 data ^= (data >> 30) & 1;
334 data ^= (data >> 27) & 1;
335 data ^= (data >> 22) & 1;
336 data = data.rotate_left(1);
337 }
338 data
339 }
340
341 // Note: in the reference implementation only the last round effects
342 // `self.data`, all the other results are ignored. To make sure the
343 // other rounds are not optimised out, we first run all but the last
344 // round on a throw-away value instead of the real `self.data`.
345 let mut lfsr_loop_cnt = 0;
346 if var_rounds { lfsr_loop_cnt = self.random_loop_cnt(4) };
347
348 let mut throw_away: u64 = 0;
349 for _ in 0..lfsr_loop_cnt {
350 throw_away = lfsr(throw_away, time);
351 }
352 black_box(throw_away);
353
354 self.data = lfsr(self.data, time);
355 }
356
357 // Memory Access noise source
358 // This is a noise source based on variations in memory access times
359 //
360 // This function performs memory accesses which will add to the timing
361 // variations due to an unknown amount of CPU wait states that need to be
362 // added when accessing memory. The memory size should be larger than the L1
363 // caches as outlined in the documentation and the associated testing.
364 //
365 // The L1 cache has a very high bandwidth, albeit its access rate is usually
366 // slower than accessing CPU registers. Therefore, L1 accesses only add
367 // minimal variations as the CPU has hardly to wait. Starting with L2,
368 // significant variations are added because L2 typically does not belong to
369 // the CPU any more and therefore a wider range of CPU wait states is
370 // necessary for accesses. L3 and real memory accesses have even a wider
371 // range of wait states. However, to reliably access either L3 or memory,
372 // the `self.mem` memory must be quite large which is usually not desirable.
373 #[inline(never)]
memaccess(&mut self, mem: &mut [u8; MEMORY_SIZE], var_rounds: bool)374 fn memaccess(&mut self, mem: &mut [u8; MEMORY_SIZE], var_rounds: bool) {
375 let mut acc_loop_cnt = 128;
376 if var_rounds { acc_loop_cnt += self.random_loop_cnt(4) };
377
378 let mut index = self.mem_prev_index as usize;
379 for _ in 0..acc_loop_cnt {
380 // Addition of memblocksize - 1 to index with wrap around logic to
381 // ensure that every memory location is hit evenly.
382 // The modulus also allows the compiler to remove the indexing
383 // bounds check.
384 index = (index + MEMORY_BLOCKSIZE - 1) % MEMORY_SIZE;
385
386 // memory access: just add 1 to one byte
387 // memory access implies read from and write to memory location
388 mem[index] = mem[index].wrapping_add(1);
389 }
390 self.mem_prev_index = index as u16;
391 }
392
393 // This is the heart of the entropy generation: calculate time deltas and
394 // use the CPU jitter in the time deltas. The jitter is injected into the
395 // entropy pool.
396 //
397 // Ensure that `ec.prev_time` is primed before using the output of this
398 // function. This can be done by calling this function and not using its
399 // result.
measure_jitter(&mut self, ec: &mut EcState) -> Option<()>400 fn measure_jitter(&mut self, ec: &mut EcState) -> Option<()> {
401 // Invoke one noise source before time measurement to add variations
402 self.memaccess(&mut ec.mem, true);
403
404 // Get time stamp and calculate time delta to previous
405 // invocation to measure the timing variations
406 let time = (self.timer)();
407 // Note: wrapping_sub combined with a cast to `i64` generates a correct
408 // delta, even in the unlikely case this is a timer that is not strictly
409 // monotonic.
410 let current_delta = time.wrapping_sub(ec.prev_time) as i64 as i32;
411 ec.prev_time = time;
412
413 // Call the next noise source which also injects the data
414 self.lfsr_time(current_delta as u64, true);
415
416 // Check whether we have a stuck measurement (i.e. does the last
417 // measurement holds entropy?).
418 if ec.stuck(current_delta) { return None };
419
420 // Rotate the data buffer by a prime number (any odd number would
421 // do) to ensure that every bit position of the input time stamp
422 // has an even chance of being merged with a bit position in the
423 // entropy pool. We do not use one here as the adjacent bits in
424 // successive time deltas may have some form of dependency. The
425 // chosen value of 7 implies that the low 7 bits of the next
426 // time delta value is concatenated with the current time delta.
427 self.data = self.data.rotate_left(7);
428
429 Some(())
430 }
431
432 // Shuffle the pool a bit by mixing some value with a bijective function
433 // (XOR) into the pool.
434 //
435 // The function generates a mixer value that depends on the bits set and
436 // the location of the set bits in the random number generated by the
437 // entropy source. Therefore, based on the generated random number, this
438 // mixer value can have 2^64 different values. That mixer value is
439 // initialized with the first two SHA-1 constants. After obtaining the
440 // mixer value, it is XORed into the random number.
441 //
442 // The mixer value is not assumed to contain any entropy. But due to the
443 // XOR operation, it can also not destroy any entropy present in the
444 // entropy pool.
445 #[inline(never)]
stir_pool(&mut self)446 fn stir_pool(&mut self) {
447 // This constant is derived from the first two 32 bit initialization
448 // vectors of SHA-1 as defined in FIPS 180-4 section 5.3.1
449 // The order does not really matter as we do not rely on the specific
450 // numbers. We just pick the SHA-1 constants as they have a good mix of
451 // bit set and unset.
452 const CONSTANT: u64 = 0x67452301efcdab89;
453
454 // The start value of the mixer variable is derived from the third
455 // and fourth 32 bit initialization vector of SHA-1 as defined in
456 // FIPS 180-4 section 5.3.1
457 let mut mixer = 0x98badcfe10325476;
458
459 // This is a constant time function to prevent leaking timing
460 // information about the random number.
461 // The normal code is:
462 // ```
463 // for i in 0..64 {
464 // if ((self.data >> i) & 1) == 1 { mixer ^= CONSTANT; }
465 // }
466 // ```
467 // This is a bit fragile, as LLVM really wants to use branches here, and
468 // we rely on it to not recognise the opportunity.
469 for i in 0..64 {
470 let apply = (self.data >> i) & 1;
471 let mask = !apply.wrapping_sub(1);
472 mixer ^= CONSTANT & mask;
473 mixer = mixer.rotate_left(1);
474 }
475
476 self.data ^= mixer;
477 }
478
gen_entropy(&mut self) -> u64479 fn gen_entropy(&mut self) -> u64 {
480 trace!("JitterRng: collecting entropy");
481
482 // Prime `ec.prev_time`, and run the noice sources to make sure the
483 // first loop round collects the expected entropy.
484 let mut ec = EcState {
485 prev_time: (self.timer)(),
486 last_delta: 0,
487 last_delta2: 0,
488 mem: [0; MEMORY_SIZE],
489 };
490 let _ = self.measure_jitter(&mut ec);
491
492 for _ in 0..self.rounds {
493 // If a stuck measurement is received, repeat measurement
494 // Note: we do not guard against an infinite loop, that would mean
495 // the timer suddenly became broken.
496 while self.measure_jitter(&mut ec).is_none() {}
497 }
498
499 // Do a single read from `self.mem` to make sure the Memory Access noise
500 // source is not optimised out.
501 black_box(ec.mem[0]);
502
503 self.stir_pool();
504 self.data
505 }
506
507 /// Basic quality tests on the timer, by measuring CPU timing jitter a few
508 /// hundred times.
509 ///
510 /// If successful, this will return the estimated number of rounds necessary
511 /// to collect 64 bits of entropy. Otherwise a [`TimerError`] with the cause
512 /// of the failure will be returned.
test_timer(&mut self) -> Result<u8, TimerError>513 pub fn test_timer(&mut self) -> Result<u8, TimerError> {
514 debug!("JitterRng: testing timer ...");
515 // We could add a check for system capabilities such as `clock_getres`
516 // or check for `CONFIG_X86_TSC`, but it does not make much sense as the
517 // following sanity checks verify that we have a high-resolution timer.
518
519 let mut delta_sum = 0;
520 let mut old_delta = 0;
521
522 let mut time_backwards = 0;
523 let mut count_mod = 0;
524 let mut count_stuck = 0;
525
526 let mut ec = EcState {
527 prev_time: (self.timer)(),
528 last_delta: 0,
529 last_delta2: 0,
530 mem: [0; MEMORY_SIZE],
531 };
532
533 // TESTLOOPCOUNT needs some loops to identify edge systems.
534 // 100 is definitely too little.
535 const TESTLOOPCOUNT: u64 = 300;
536 const CLEARCACHE: u64 = 100;
537
538 for i in 0..(CLEARCACHE + TESTLOOPCOUNT) {
539 // Measure time delta of core entropy collection logic
540 let time = (self.timer)();
541 self.memaccess(&mut ec.mem, true);
542 self.lfsr_time(time, true);
543 let time2 = (self.timer)();
544
545 // Test whether timer works
546 if time == 0 || time2 == 0 {
547 return Err(TimerError::NoTimer);
548 }
549 let delta = time2.wrapping_sub(time) as i64 as i32;
550
551 // Test whether timer is fine grained enough to provide delta even
552 // when called shortly after each other -- this implies that we also
553 // have a high resolution timer
554 if delta == 0 {
555 return Err(TimerError::CoarseTimer);
556 }
557
558 // Up to here we did not modify any variable that will be
559 // evaluated later, but we already performed some work. Thus we
560 // already have had an impact on the caches, branch prediction,
561 // etc. with the goal to clear it to get the worst case
562 // measurements.
563 if i < CLEARCACHE { continue; }
564
565 if ec.stuck(delta) { count_stuck += 1; }
566
567 // Test whether we have an increasing timer.
568 if !(time2 > time) { time_backwards += 1; }
569
570 // Count the number of times the counter increases in steps of 100ns
571 // or greater.
572 if (delta % 100) == 0 { count_mod += 1; }
573
574 // Ensure that we have a varying delta timer which is necessary for
575 // the calculation of entropy -- perform this check only after the
576 // first loop is executed as we need to prime the old_delta value
577 delta_sum += (delta - old_delta).abs() as u64;
578 old_delta = delta;
579 }
580
581 // Do a single read from `self.mem` to make sure the Memory Access noise
582 // source is not optimised out.
583 black_box(ec.mem[0]);
584
585 // We allow the time to run backwards for up to three times.
586 // This can happen if the clock is being adjusted by NTP operations.
587 // If such an operation just happens to interfere with our test, it
588 // should not fail. The value of 3 should cover the NTP case being
589 // performed during our test run.
590 if time_backwards > 3 {
591 return Err(TimerError::NotMonotonic);
592 }
593
594 // Test that the available amount of entropy per round does not get to
595 // low. We expect 1 bit of entropy per round as a reasonable minimum
596 // (although less is possible, it means the collector loop has to run
597 // much more often).
598 // `assert!(delta_average >= log2(1))`
599 // `assert!(delta_sum / TESTLOOPCOUNT >= 1)`
600 // `assert!(delta_sum >= TESTLOOPCOUNT)`
601 if delta_sum < TESTLOOPCOUNT {
602 return Err(TimerError::TinyVariantions);
603 }
604
605 // Ensure that we have variations in the time stamp below 100 for at
606 // least 10% of all checks -- on some platforms, the counter increments
607 // in multiples of 100, but not always
608 if count_mod > (TESTLOOPCOUNT * 9 / 10) {
609 return Err(TimerError::CoarseTimer);
610 }
611
612 // If we have more than 90% stuck results, then this Jitter RNG is
613 // likely to not work well.
614 if count_stuck > (TESTLOOPCOUNT * 9 / 10) {
615 return Err(TimerError::TooManyStuck);
616 }
617
618 // Estimate the number of `measure_jitter` rounds necessary for 64 bits
619 // of entropy.
620 //
621 // We don't try very hard to come up with a good estimate of the
622 // available bits of entropy per round here for two reasons:
623 // 1. Simple estimates of the available bits (like Shannon entropy) are
624 // too optimistic.
625 // 2. Unless we want to waste a lot of time during intialization, there
626 // only a small number of samples are available.
627 //
628 // Therefore we use a very simple and conservative estimate:
629 // `let bits_of_entropy = log2(delta_average) / 2`.
630 //
631 // The number of rounds `measure_jitter` should run to collect 64 bits
632 // of entropy is `64 / bits_of_entropy`.
633 let delta_average = delta_sum / TESTLOOPCOUNT;
634
635 if delta_average >= 16 {
636 let log2 = 64 - delta_average.leading_zeros();
637 // Do something similar to roundup(64/(log2/2)):
638 Ok( ((64u32 * 2 + log2 - 1) / log2) as u8)
639 } else {
640 // For values < 16 the rounding error becomes too large, use a
641 // lookup table.
642 // Values 0 and 1 are invalid, and filtered out by the
643 // `delta_sum < TESTLOOPCOUNT` test above.
644 let log2_lookup = [0, 0, 128, 81, 64, 56, 50, 46,
645 43, 41, 39, 38, 36, 35, 34, 33];
646 Ok(log2_lookup[delta_average as usize])
647 }
648 }
649
650 /// Statistical test: return the timer delta of one normal run of the
651 /// `JitterRng` entropy collector.
652 ///
653 /// Setting `var_rounds` to `true` will execute the memory access and the
654 /// CPU jitter noice sources a variable amount of times (just like a real
655 /// `JitterRng` round).
656 ///
657 /// Setting `var_rounds` to `false` will execute the noice sources the
658 /// minimal number of times. This can be used to measure the minimum amount
659 /// of entropy one round of the entropy collector can collect in the worst
660 /// case.
661 ///
662 /// See this crate's README on how to use `timer_stats` to test the quality
663 /// of `JitterRng`.
timer_stats(&mut self, var_rounds: bool) -> i64664 pub fn timer_stats(&mut self, var_rounds: bool) -> i64 {
665 let mut mem = [0; MEMORY_SIZE];
666
667 let time = (self.timer)();
668 self.memaccess(&mut mem, var_rounds);
669 self.lfsr_time(time, var_rounds);
670 let time2 = (self.timer)();
671 time2.wrapping_sub(time) as i64
672 }
673 }
674
675 // A function that is opaque to the optimizer to assist in avoiding dead-code
676 // elimination. Taken from `bencher`.
black_box<T>(dummy: T) -> T677 fn black_box<T>(dummy: T) -> T {
678 unsafe {
679 let ret = ptr::read_volatile(&dummy);
680 mem::forget(dummy);
681 ret
682 }
683 }
684
685 impl RngCore for JitterRng {
next_u32(&mut self) -> u32686 fn next_u32(&mut self) -> u32 {
687 // We want to use both parts of the generated entropy
688 if self.data_half_used {
689 self.data_half_used = false;
690 (self.data >> 32) as u32
691 } else {
692 self.data = self.next_u64();
693 self.data_half_used = true;
694 self.data as u32
695 }
696 }
697
next_u64(&mut self) -> u64698 fn next_u64(&mut self) -> u64 {
699 self.data_half_used = false;
700 self.gen_entropy()
701 }
702
fill_bytes(&mut self, dest: &mut [u8])703 fn fill_bytes(&mut self, dest: &mut [u8]) {
704 // Fill using `next_u32`. This is faster for filling small slices (four
705 // bytes or less), while the overhead is negligible.
706 //
707 // This is done especially for wrappers that implement `next_u32`
708 // themselves via `fill_bytes`.
709 impls::fill_bytes_via_next(self, dest)
710 }
711
try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error>712 fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> {
713 Ok(self.fill_bytes(dest))
714 }
715 }
716
717 impl CryptoRng for JitterRng {}
718
719