1 /*	$NetBSD: kern_rndq.c,v 1.89 2016/05/21 15:27:15 riastradh Exp $	*/
2 
3 /*-
4  * Copyright (c) 1997-2013 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Michael Graff <explorer@flame.org> and Thor Lancelot Simon.
9  * This code uses ideas and algorithms from the Linux driver written by
10  * Ted Ts'o.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <sys/cdefs.h>
35 __KERNEL_RCSID(0, "$NetBSD: kern_rndq.c,v 1.89 2016/05/21 15:27:15 riastradh Exp $");
36 
37 #include <sys/param.h>
38 #include <sys/atomic.h>
39 #include <sys/callout.h>
40 #include <sys/fcntl.h>
41 #include <sys/intr.h>
42 #include <sys/ioctl.h>
43 #include <sys/kauth.h>
44 #include <sys/kernel.h>
45 #include <sys/kmem.h>
46 #include <sys/mutex.h>
47 #include <sys/pool.h>
48 #include <sys/proc.h>
49 #include <sys/rnd.h>
50 #include <sys/rndpool.h>
51 #include <sys/rndsink.h>
52 #include <sys/rndsource.h>
53 #include <sys/rngtest.h>
54 #include <sys/systm.h>
55 
56 #include <dev/rnd_private.h>
57 
58 #ifdef COMPAT_50
59 #include <compat/sys/rnd.h>
60 #endif
61 
62 #if defined(__HAVE_CPU_RNG) && !defined(_RUMPKERNEL)
63 #include <machine/cpu_rng.h>
64 #endif
65 
66 #if defined(__HAVE_CPU_COUNTER)
67 #include <machine/cpu_counter.h>
68 #endif
69 
70 #ifdef RND_DEBUG
71 #define	DPRINTF(l,x)      if (rnd_debug & (l)) rnd_printf x
72 int	rnd_debug = 0;
73 #else
74 #define	DPRINTF(l,x)
75 #endif
76 
77 /*
78  * list devices attached
79  */
80 #if 0
81 #define	RND_VERBOSE
82 #endif
83 
84 #ifdef RND_VERBOSE
85 #define	rnd_printf_verbose(fmt, ...)	rnd_printf(fmt, ##__VA_ARGS__)
86 #else
87 #define	rnd_printf_verbose(fmt, ...)	((void)0)
88 #endif
89 
90 #ifdef RND_VERBOSE
91 static unsigned int deltacnt;
92 #endif
93 
94 /*
95  * This is a little bit of state information attached to each device that we
96  * collect entropy from.  This is simply a collection buffer, and when it
97  * is full it will be "detached" from the source and added to the entropy
98  * pool after entropy is distilled as much as possible.
99  */
100 #define	RND_SAMPLE_COUNT	64	/* collect N samples, then compress */
101 typedef struct _rnd_sample_t {
102 	SIMPLEQ_ENTRY(_rnd_sample_t) next;
103 	krndsource_t	*source;
104 	int		cursor;
105 	int		entropy;
106 	uint32_t	ts[RND_SAMPLE_COUNT];
107 	uint32_t	values[RND_SAMPLE_COUNT];
108 } rnd_sample_t;
109 
110 SIMPLEQ_HEAD(rnd_sampleq, _rnd_sample_t);
111 
112 /*
113  * The sample queue.  Samples are put into the queue and processed in a
114  * softint in order to limit the latency of adding a sample.
115  */
116 static struct {
117 	kmutex_t		lock;
118 	struct rnd_sampleq	q;
119 } rnd_samples __cacheline_aligned;
120 
121 /*
122  * Memory pool for sample buffers
123  */
124 static pool_cache_t rnd_mempc __read_mostly;
125 
126 /*
127  * Global entropy pool and sources.
128  */
129 static struct {
130 	kmutex_t		lock;
131 	rndpool_t		pool;
132 	LIST_HEAD(, krndsource)	sources;
133 	kcondvar_t		cv;
134 } rnd_global __cacheline_aligned;
135 
136 /*
137  * This source is used to easily "remove" queue entries when the source
138  * which actually generated the events is going away.
139  */
140 static krndsource_t rnd_source_no_collect = {
141 	/* LIST_ENTRY list */
142 	.name = { 'N', 'o', 'C', 'o', 'l', 'l', 'e', 'c', 't',
143 		   0, 0, 0, 0, 0, 0, 0 },
144 	.total = 0,
145 	.type = RND_TYPE_UNKNOWN,
146 	.flags = (RND_FLAG_NO_COLLECT |
147 		  RND_FLAG_NO_ESTIMATE),
148 	.state = NULL,
149 	.test_cnt = 0,
150 	.test = NULL
151 };
152 
153 krndsource_t rnd_printf_source, rnd_autoconf_source;
154 
155 static void *rnd_process __read_mostly;
156 static void *rnd_wakeup __read_mostly;
157 
158 static inline uint32_t	rnd_counter(void);
159 static        void	rnd_intr(void *);
160 static	      void	rnd_wake(void *);
161 static	      void	rnd_process_events(void);
162 static	      void	rnd_add_data_ts(krndsource_t *, const void *const,
163 					uint32_t, uint32_t, uint32_t, bool);
164 static inline void	rnd_schedule_process(void);
165 
166 int			rnd_ready = 0;
167 int			rnd_initial_entropy = 0;
168 
169 static volatile unsigned	rnd_printing = 0;
170 
171 #ifdef DIAGNOSTIC
172 static int		rnd_tested = 0;
173 static rngtest_t	rnd_rt;
174 static uint8_t		rnd_testbits[sizeof(rnd_rt.rt_b)];
175 #endif
176 
177 static rndsave_t	*boot_rsp;
178 
179 static inline void
rnd_printf(const char * fmt,...)180 rnd_printf(const char *fmt, ...)
181 {
182 	va_list ap;
183 
184 	if (atomic_cas_uint(&rnd_printing, 0, 1) != 0)
185 		return;
186 	va_start(ap, fmt);
187 	vprintf(fmt, ap);
188 	va_end(ap);
189 	rnd_printing = 0;
190 }
191 
192 /*
193  * Generate a 32-bit counter.
194  */
195 static inline uint32_t
rnd_counter(void)196 rnd_counter(void)
197 {
198 	struct bintime bt;
199 	uint32_t ret;
200 
201 #if defined(__HAVE_CPU_COUNTER)
202 	if (cpu_hascounter())
203 		return cpu_counter32();
204 #endif
205 	if (!rnd_ready)
206 		/* Too early to call nanotime.  */
207 		return 0;
208 
209 	binuptime(&bt);
210 	ret = bt.sec;
211 	ret ^= bt.sec >> 32;
212 	ret ^= bt.frac;
213 	ret ^= bt.frac >> 32;
214 
215 	return ret;
216 }
217 
218 /*
219  * We may be called from low IPL -- protect our softint.
220  */
221 
222 static inline void
rnd_schedule_softint(void * softint)223 rnd_schedule_softint(void *softint)
224 {
225 
226 	kpreempt_disable();
227 	softint_schedule(softint);
228 	kpreempt_enable();
229 }
230 
231 static inline void
rnd_schedule_process(void)232 rnd_schedule_process(void)
233 {
234 
235 	if (__predict_true(rnd_process)) {
236 		rnd_schedule_softint(rnd_process);
237 		return;
238 	}
239 	rnd_process_events();
240 }
241 
242 static inline void
rnd_schedule_wakeup(void)243 rnd_schedule_wakeup(void)
244 {
245 
246 	if (__predict_true(rnd_wakeup)) {
247 		rnd_schedule_softint(rnd_wakeup);
248 		return;
249 	}
250 	rndsinks_distribute();
251 }
252 
253 /*
254  * Tell any sources with "feed me" callbacks that we are hungry.
255  */
256 void
rnd_getmore(size_t byteswanted)257 rnd_getmore(size_t byteswanted)
258 {
259 	krndsource_t *rs, *next;
260 
261 	/*
262 	 * Due to buffering in rnd_process_events, even if the entropy
263 	 * sources provide the requested number of bytes, users may not
264 	 * be woken because the data may be stuck in unfilled buffers.
265 	 * So ask for enough data to fill all the buffers.
266 	 *
267 	 * XXX Just get rid of this buffering and solve the
268 	 * /dev/random-as-side-channel-for-keystroke-timings a
269 	 * different way.
270 	 */
271 	byteswanted = MAX(byteswanted,
272 	    MAX(RND_POOLBITS/NBBY, sizeof(uint32_t)*RND_SAMPLE_COUNT));
273 
274 	mutex_spin_enter(&rnd_global.lock);
275 	LIST_FOREACH_SAFE(rs, &rnd_global.sources, list, next) {
276 		/* Skip if the source is disabled.  */
277 		if (!RND_ENABLED(rs))
278 			continue;
279 
280 		/* Skip if there's no callback.  */
281 		if (!ISSET(rs->flags, RND_FLAG_HASCB))
282 			continue;
283 		KASSERT(rs->get != NULL);
284 
285 		/* Skip if there are too many users right now.  */
286 		if (rs->refcnt == UINT_MAX)
287 			continue;
288 
289 		/*
290 		 * Hold a reference while we release rnd_global.lock to
291 		 * call the callback.  The callback may in turn call
292 		 * rnd_add_data, which acquires rnd_global.lock.
293 		 */
294 		rs->refcnt++;
295 		mutex_spin_exit(&rnd_global.lock);
296 		rs->get(byteswanted, rs->getarg);
297 		mutex_spin_enter(&rnd_global.lock);
298 		if (--rs->refcnt == 0)
299 			cv_broadcast(&rnd_global.cv);
300 
301 		/* Dribble some goo to the console.  */
302 		rnd_printf_verbose("rnd: entropy estimate %zu bits\n",
303 		    rndpool_get_entropy_count(&rnd_global.pool));
304 		rnd_printf_verbose("rnd: asking source %s for %zu bytes\n",
305 		    rs->name, byteswanted);
306 	}
307 	mutex_spin_exit(&rnd_global.lock);
308 
309 	/*
310 	 * Check whether we got entropy samples to process.  In that
311 	 * case, we may need to distribute entropy to waiters.  Do
312 	 * that, if we can do it asynchronously.
313 	 *
314 	 * - Conditionally because we don't want a softint loop.
315 	 * - Asynchronously because if we did it synchronously, we may
316 	 *   end up with lock recursion on rndsinks_lock.
317 	 */
318 	if (!SIMPLEQ_EMPTY(&rnd_samples.q) && rnd_process != NULL)
319 		rnd_schedule_process();
320 }
321 
322 /*
323  * Use the timing/value of the event to estimate the entropy gathered.
324  * If all the differentials (first, second, and third) are non-zero, return
325  * non-zero.  If any of these are zero, return zero.
326  */
327 static inline uint32_t
rnd_delta_estimate(rnd_delta_t * d,uint32_t v,int32_t delta)328 rnd_delta_estimate(rnd_delta_t *d, uint32_t v, int32_t delta)
329 {
330 	int32_t delta2, delta3;
331 
332 	d->insamples++;
333 
334 	/*
335 	 * Calculate the second and third order differentials
336 	 */
337 	delta2 = d->dx - delta;
338 	if (delta2 < 0)
339 		delta2 = -delta2;
340 
341 	delta3 = d->d2x - delta2;
342 	if (delta3 < 0)
343 		delta3 = -delta3;
344 
345 	d->x = v;
346 	d->dx = delta;
347 	d->d2x = delta2;
348 
349 	/*
350 	 * If any delta is 0, we got no entropy.  If all are non-zero, we
351 	 * might have something.
352 	 */
353 	if (delta == 0 || delta2 == 0 || delta3 == 0)
354 		return 0;
355 
356 	d->outbits++;
357 	return 1;
358 }
359 
360 /*
361  * Delta estimator for 32-bit timeestamps.  Must handle wrap.
362  */
363 static inline uint32_t
rnd_dt_estimate(krndsource_t * rs,uint32_t t)364 rnd_dt_estimate(krndsource_t *rs, uint32_t t)
365 {
366 	int32_t delta;
367 	uint32_t ret;
368 	rnd_delta_t *d = &rs->time_delta;
369 
370 	if (t < d->x) {
371 		delta = UINT32_MAX - d->x + t;
372 	} else {
373 		delta = d->x - t;
374 	}
375 
376 	if (delta < 0) {
377 		delta = -delta;
378 	}
379 
380 	ret = rnd_delta_estimate(d, t, delta);
381 
382 	KASSERT(d->x == t);
383 	KASSERT(d->dx == delta);
384 #ifdef RND_VERBOSE
385 	if (deltacnt++ % 1151 == 0) {
386 		rnd_printf_verbose("rnd_dt_estimate: %s x = %lld, dx = %lld, "
387 		       "d2x = %lld\n", rs->name,
388 		       (int)d->x, (int)d->dx, (int)d->d2x);
389 	}
390 #endif
391 	return ret;
392 }
393 
394 /*
395  * Delta estimator for 32 or bit values.  "Wrap" isn't.
396  */
397 static inline uint32_t
rnd_dv_estimate(krndsource_t * rs,uint32_t v)398 rnd_dv_estimate(krndsource_t *rs, uint32_t v)
399 {
400 	int32_t delta;
401 	uint32_t ret;
402 	rnd_delta_t *d = &rs->value_delta;
403 
404 	delta = d->x - v;
405 
406 	if (delta < 0) {
407 		delta = -delta;
408 	}
409 	ret = rnd_delta_estimate(d, v, (uint32_t)delta);
410 
411 	KASSERT(d->x == v);
412 	KASSERT(d->dx == delta);
413 #ifdef RND_VERBOSE
414 	if (deltacnt++ % 1151 == 0) {
415 		rnd_printf_verbose("rnd_dv_estimate: %s x = %lld, dx = %lld, "
416 		       " d2x = %lld\n", rs->name,
417 		       (long long int)d->x,
418 		       (long long int)d->dx,
419 		       (long long int)d->d2x);
420 	}
421 #endif
422 	return ret;
423 }
424 
425 #if defined(__HAVE_CPU_RNG) && !defined(_RUMPKERNEL)
426 static struct {
427 	kmutex_t	lock;	/* unfortunately, must protect krndsource */
428 	krndsource_t	source;
429 } rnd_cpu __cacheline_aligned;
430 
431 static void
rnd_cpu_get(size_t bytes,void * priv)432 rnd_cpu_get(size_t bytes, void *priv)
433 {
434 	krndsource_t *cpusrcp = priv;
435 	cpu_rng_t buf[2 * RND_ENTROPY_THRESHOLD / sizeof(cpu_rng_t)];
436 	cpu_rng_t *bufp;
437 	size_t cnt = __arraycount(buf);
438 	size_t entropy = 0;
439 
440 	KASSERT(cpusrcp == &rnd_cpu.source);
441 
442 	for (bufp = buf; bufp < buf + cnt; bufp++) {
443 		entropy += cpu_rng(bufp);
444 	}
445 	if (__predict_true(entropy)) {
446 		mutex_spin_enter(&rnd_cpu.lock);
447 		rnd_add_data_sync(cpusrcp, buf, sizeof(buf), entropy);
448 		explicit_memset(buf, 0, sizeof(buf));
449 		mutex_spin_exit(&rnd_cpu.lock);
450 	}
451 }
452 
453 #endif
454 
455 #if defined(__HAVE_CPU_COUNTER)
456 static struct {
457 	kmutex_t	lock;
458 	int		iter;
459 	struct callout	callout;
460 	krndsource_t	source;
461 } rnd_skew __cacheline_aligned;
462 
463 static void rnd_skew_intr(void *);
464 
465 static void
rnd_skew_enable(krndsource_t * rs,bool enabled)466 rnd_skew_enable(krndsource_t *rs, bool enabled)
467 {
468 
469 	if (enabled) {
470 		rnd_skew_intr(rs);
471 	} else {
472 		callout_stop(&rnd_skew.callout);
473 	}
474 }
475 
476 static void
rnd_skew_get(size_t bytes,void * priv)477 rnd_skew_get(size_t bytes, void *priv)
478 {
479 	krndsource_t *skewsrcp __diagused = priv;
480 
481 	KASSERT(skewsrcp == &rnd_skew.source);
482 
483 	/* Measure 100 times */
484 	rnd_skew.iter = 100;
485 	callout_schedule(&rnd_skew.callout, 1);
486 }
487 
488 static void
rnd_skew_intr(void * arg)489 rnd_skew_intr(void *arg)
490 {
491 	/*
492 	 * Even on systems with seemingly stable clocks, the
493 	 * delta-time entropy estimator seems to think we get 1 bit here
494 	 * about every 2 calls.
495 	 *
496 	 */
497 	mutex_spin_enter(&rnd_skew.lock);
498 
499 	if (RND_ENABLED(&rnd_skew.source)) {
500 		int next_ticks = 1;
501 		if (rnd_skew.iter & 1) {
502 			rnd_add_uint32(&rnd_skew.source, rnd_counter());
503 			next_ticks = hz / 10;
504 		}
505 		if (--rnd_skew.iter > 0) {
506 			callout_schedule(&rnd_skew.callout, next_ticks);
507 		}
508 	}
509 	mutex_spin_exit(&rnd_skew.lock);
510 }
511 #endif
512 
513 void
rnd_init_softint(void)514 rnd_init_softint(void)
515 {
516 
517 	rnd_process = softint_establish(SOFTINT_SERIAL|SOFTINT_MPSAFE,
518 	    rnd_intr, NULL);
519 	rnd_wakeup = softint_establish(SOFTINT_CLOCK|SOFTINT_MPSAFE,
520 	    rnd_wake, NULL);
521 	rnd_schedule_process();
522 }
523 
524 /*
525  * Entropy was just added to the pool.  If we crossed the threshold for
526  * the first time, set rnd_initial_entropy = 1.
527  */
528 static void
rnd_entropy_added(void)529 rnd_entropy_added(void)
530 {
531 	uint32_t pool_entropy;
532 
533 	KASSERT(mutex_owned(&rnd_global.lock));
534 
535 	if (__predict_true(rnd_initial_entropy))
536 		return;
537 	pool_entropy = rndpool_get_entropy_count(&rnd_global.pool);
538 	if (pool_entropy > RND_ENTROPY_THRESHOLD * NBBY) {
539 		rnd_printf_verbose("rnd: have initial entropy (%zu)\n",
540 		    pool_entropy);
541 		rnd_initial_entropy = 1;
542 	}
543 }
544 
545 /*
546  * initialize the global random pool for our use.
547  * rnd_init() must be called very early on in the boot process, so
548  * the pool is ready for other devices to attach as sources.
549  */
550 void
rnd_init(void)551 rnd_init(void)
552 {
553 	uint32_t c;
554 
555 	if (rnd_ready)
556 		return;
557 
558 	/*
559 	 * take a counter early, hoping that there's some variance in
560 	 * the following operations
561 	 */
562 	c = rnd_counter();
563 
564 	rndsinks_init();
565 
566 	/* Initialize the sample queue.  */
567 	mutex_init(&rnd_samples.lock, MUTEX_DEFAULT, IPL_VM);
568 	SIMPLEQ_INIT(&rnd_samples.q);
569 
570 	/* Initialize the global pool and sources list.  */
571 	mutex_init(&rnd_global.lock, MUTEX_DEFAULT, IPL_VM);
572 	rndpool_init(&rnd_global.pool);
573 	LIST_INIT(&rnd_global.sources);
574 	cv_init(&rnd_global.cv, "rndsrc");
575 
576 	rnd_mempc = pool_cache_init(sizeof(rnd_sample_t), 0, 0, 0,
577 				    "rndsample", NULL, IPL_VM,
578 				    NULL, NULL, NULL);
579 
580 	/*
581 	 * Set resource limit. The rnd_process_events() function
582 	 * is called every tick and process the sample queue.
583 	 * Without limitation, if a lot of rnd_add_*() are called,
584 	 * all kernel memory may be eaten up.
585 	 */
586 	pool_cache_sethardlimit(rnd_mempc, RND_POOLBITS, NULL, 0);
587 
588 	/*
589 	 * Mix *something*, *anything* into the pool to help it get started.
590 	 * However, it's not safe for rnd_counter() to call microtime() yet,
591 	 * so on some platforms we might just end up with zeros anyway.
592 	 * XXX more things to add would be nice.
593 	 */
594 	if (c) {
595 		mutex_spin_enter(&rnd_global.lock);
596 		rndpool_add_data(&rnd_global.pool, &c, sizeof(c), 1);
597 		c = rnd_counter();
598 		rndpool_add_data(&rnd_global.pool, &c, sizeof(c), 1);
599 		mutex_spin_exit(&rnd_global.lock);
600 	}
601 
602 	/*
603 	 * Attach CPU RNG if available.
604 	 */
605 #if defined(__HAVE_CPU_RNG) && !defined(_RUMPKERNEL)
606 	if (cpu_rng_init()) {
607 		/* IPL_VM because taken while rnd_global.lock is held.  */
608 		mutex_init(&rnd_cpu.lock, MUTEX_DEFAULT, IPL_VM);
609 		rndsource_setcb(&rnd_cpu.source, rnd_cpu_get, &rnd_cpu.source);
610 		rnd_attach_source(&rnd_cpu.source, "cpurng",
611 		    RND_TYPE_RNG, RND_FLAG_COLLECT_VALUE|
612 		    RND_FLAG_HASCB|RND_FLAG_HASENABLE);
613 		rnd_cpu_get(RND_ENTROPY_THRESHOLD, &rnd_cpu.source);
614 	}
615 #endif
616 
617 	/*
618 	 * If we have a cycle counter, take its error with respect
619 	 * to the callout mechanism as a source of entropy, ala
620 	 * TrueRand.
621  	 *
622 	 */
623 #if defined(__HAVE_CPU_COUNTER)
624 	/* IPL_VM because taken while rnd_global.lock is held.  */
625 	mutex_init(&rnd_skew.lock, MUTEX_DEFAULT, IPL_VM);
626 	callout_init(&rnd_skew.callout, CALLOUT_MPSAFE);
627 	callout_setfunc(&rnd_skew.callout, rnd_skew_intr, NULL);
628 	rndsource_setcb(&rnd_skew.source, rnd_skew_get, &rnd_skew.source);
629 	rndsource_setenable(&rnd_skew.source, rnd_skew_enable);
630 	rnd_attach_source(&rnd_skew.source, "callout", RND_TYPE_SKEW,
631 	    RND_FLAG_COLLECT_VALUE|RND_FLAG_ESTIMATE_VALUE|
632 	    RND_FLAG_HASCB|RND_FLAG_HASENABLE);
633 	rnd_skew.iter = 100;
634 	rnd_skew_intr(NULL);
635 #endif
636 
637 	rnd_printf_verbose("rnd: initialised (%u)%s", RND_POOLBITS,
638 	    c ? " with counter\n" : "\n");
639 	if (boot_rsp != NULL) {
640 		mutex_spin_enter(&rnd_global.lock);
641 		rndpool_add_data(&rnd_global.pool, boot_rsp->data,
642 		    sizeof(boot_rsp->data),
643 		    MIN(boot_rsp->entropy, RND_POOLBITS / 2));
644 		rnd_entropy_added();
645 		mutex_spin_exit(&rnd_global.lock);
646 		rnd_printf("rnd: seeded with %d bits\n",
647 		    MIN(boot_rsp->entropy, RND_POOLBITS / 2));
648 		explicit_memset(boot_rsp, 0, sizeof(*boot_rsp));
649 	}
650 	rnd_attach_source(&rnd_printf_source, "printf", RND_TYPE_UNKNOWN,
651 			  RND_FLAG_NO_ESTIMATE);
652 	rnd_attach_source(&rnd_autoconf_source, "autoconf",
653 			  RND_TYPE_UNKNOWN,
654 			  RND_FLAG_COLLECT_TIME|RND_FLAG_ESTIMATE_TIME);
655 	rnd_ready = 1;
656 }
657 
658 static rnd_sample_t *
rnd_sample_allocate(krndsource_t * source)659 rnd_sample_allocate(krndsource_t *source)
660 {
661 	rnd_sample_t *c;
662 
663 	c = pool_cache_get(rnd_mempc, PR_WAITOK);
664 	if (c == NULL)
665 		return NULL;
666 
667 	c->source = source;
668 	c->cursor = 0;
669 	c->entropy = 0;
670 
671 	return c;
672 }
673 
674 /*
675  * Don't wait on allocation.  To be used in an interrupt context.
676  */
677 static rnd_sample_t *
rnd_sample_allocate_isr(krndsource_t * source)678 rnd_sample_allocate_isr(krndsource_t *source)
679 {
680 	rnd_sample_t *c;
681 
682 	c = pool_cache_get(rnd_mempc, PR_NOWAIT);
683 	if (c == NULL)
684 		return NULL;
685 
686 	c->source = source;
687 	c->cursor = 0;
688 	c->entropy = 0;
689 
690 	return c;
691 }
692 
693 static void
rnd_sample_free(rnd_sample_t * c)694 rnd_sample_free(rnd_sample_t *c)
695 {
696 
697 	explicit_memset(c, 0, sizeof(*c));
698 	pool_cache_put(rnd_mempc, c);
699 }
700 
701 /*
702  * Add a source to our list of sources.
703  */
704 void
rnd_attach_source(krndsource_t * rs,const char * name,uint32_t type,uint32_t flags)705 rnd_attach_source(krndsource_t *rs, const char *name, uint32_t type,
706     uint32_t flags)
707 {
708 	uint32_t ts;
709 
710 	ts = rnd_counter();
711 
712 	strlcpy(rs->name, name, sizeof(rs->name));
713 	memset(&rs->time_delta, 0, sizeof(rs->time_delta));
714 	rs->time_delta.x = ts;
715 	memset(&rs->value_delta, 0, sizeof(rs->value_delta));
716 	rs->total = 0;
717 
718 	/*
719 	 * Some source setup, by type
720 	 */
721 	rs->test = NULL;
722 	rs->test_cnt = -1;
723 
724 	if (flags == 0) {
725 		flags = RND_FLAG_DEFAULT;
726 	}
727 
728 	switch (type) {
729 	case RND_TYPE_NET:		/* Don't collect by default */
730 		flags |= (RND_FLAG_NO_COLLECT | RND_FLAG_NO_ESTIMATE);
731 		break;
732 	case RND_TYPE_RNG:		/* Space for statistical testing */
733 		rs->test = kmem_alloc(sizeof(rngtest_t), KM_NOSLEEP);
734 		rs->test_cnt = 0;
735 		/* FALLTHRU */
736 	case RND_TYPE_VM:		/* Process samples in bulk always */
737 		flags |= RND_FLAG_FAST;
738 		break;
739 	default:
740 		break;
741 	}
742 
743 	rs->type = type;
744 	rs->flags = flags;
745 	rs->refcnt = 1;
746 
747 	rs->state = rnd_sample_allocate(rs);
748 
749 	mutex_spin_enter(&rnd_global.lock);
750 	LIST_INSERT_HEAD(&rnd_global.sources, rs, list);
751 
752 #ifdef RND_VERBOSE
753 	rnd_printf_verbose("rnd: %s attached as an entropy source (",
754 	    rs->name);
755 	if (!(flags & RND_FLAG_NO_COLLECT)) {
756 		rnd_printf_verbose("collecting");
757 		if (flags & RND_FLAG_NO_ESTIMATE)
758 			rnd_printf_verbose(" without estimation");
759 	} else {
760 		rnd_printf_verbose("off");
761 	}
762 	rnd_printf_verbose(")\n");
763 #endif
764 
765 	/*
766 	 * Again, put some more initial junk in the pool.
767 	 * FreeBSD claim to have an analysis that show 4 bits of
768 	 * entropy per source-attach timestamp.  I am skeptical,
769 	 * but we count 1 bit per source here.
770 	 */
771 	rndpool_add_data(&rnd_global.pool, &ts, sizeof(ts), 1);
772 	mutex_spin_exit(&rnd_global.lock);
773 }
774 
775 /*
776  * Remove a source from our list of sources.
777  */
778 void
rnd_detach_source(krndsource_t * source)779 rnd_detach_source(krndsource_t *source)
780 {
781 	rnd_sample_t *sample;
782 
783 	mutex_spin_enter(&rnd_global.lock);
784 	LIST_REMOVE(source, list);
785 	if (0 < --source->refcnt) {
786 		do {
787 			cv_wait(&rnd_global.cv, &rnd_global.lock);
788 		} while (0 < source->refcnt);
789 	}
790 	mutex_spin_exit(&rnd_global.lock);
791 
792 	/*
793 	 * If there are samples queued up "remove" them from the sample queue
794 	 * by setting the source to the no-collect pseudosource.
795 	 */
796 	mutex_spin_enter(&rnd_samples.lock);
797 	sample = SIMPLEQ_FIRST(&rnd_samples.q);
798 	while (sample != NULL) {
799 		if (sample->source == source)
800 			sample->source = &rnd_source_no_collect;
801 
802 		sample = SIMPLEQ_NEXT(sample, next);
803 	}
804 	mutex_spin_exit(&rnd_samples.lock);
805 
806 	if (source->state) {
807 		rnd_sample_free(source->state);
808 		source->state = NULL;
809 	}
810 
811 	if (source->test) {
812 		kmem_free(source->test, sizeof(rngtest_t));
813 	}
814 
815 	rnd_printf_verbose("rnd: %s detached as an entropy source\n",
816 	    source->name);
817 }
818 
819 static inline uint32_t
rnd_estimate(krndsource_t * rs,uint32_t ts,uint32_t val)820 rnd_estimate(krndsource_t *rs, uint32_t ts, uint32_t val)
821 {
822 	uint32_t entropy = 0, dt_est, dv_est;
823 
824 	dt_est = rnd_dt_estimate(rs, ts);
825 	dv_est = rnd_dv_estimate(rs, val);
826 
827 	if (!(rs->flags & RND_FLAG_NO_ESTIMATE)) {
828 		if (rs->flags & RND_FLAG_ESTIMATE_TIME) {
829 			entropy += dt_est;
830 		}
831 
832                 if (rs->flags & RND_FLAG_ESTIMATE_VALUE) {
833 			entropy += dv_est;
834 		}
835 
836 	}
837 	return entropy;
838 }
839 
840 /*
841  * Add a 32-bit value to the entropy pool.  The rs parameter should point to
842  * the source-specific source structure.
843  */
844 void
_rnd_add_uint32(krndsource_t * rs,uint32_t val)845 _rnd_add_uint32(krndsource_t *rs, uint32_t val)
846 {
847 	uint32_t ts;
848 	uint32_t entropy = 0;
849 
850 	if (rs->flags & RND_FLAG_NO_COLLECT)
851 		return;
852 
853 	/*
854 	 * Sample the counter as soon as possible to avoid
855 	 * entropy overestimation.
856 	 */
857 	ts = rnd_counter();
858 
859 	/*
860 	 * Calculate estimates - we may not use them, but if we do
861 	 * not calculate them, the estimators' history becomes invalid.
862 	 */
863 	entropy = rnd_estimate(rs, ts, val);
864 
865 	rnd_add_data_ts(rs, &val, sizeof(val), entropy, ts, true);
866 }
867 
868 void
_rnd_add_uint64(krndsource_t * rs,uint64_t val)869 _rnd_add_uint64(krndsource_t *rs, uint64_t val)
870 {
871 	uint32_t ts;
872 	uint32_t entropy = 0;
873 
874 	if (rs->flags & RND_FLAG_NO_COLLECT)
875                 return;
876 
877 	/*
878 	 * Sample the counter as soon as possible to avoid
879 	 * entropy overestimation.
880 	 */
881 	ts = rnd_counter();
882 
883 	/*
884 	 * Calculate estimates - we may not use them, but if we do
885 	 * not calculate them, the estimators' history becomes invalid.
886 	 */
887 	entropy = rnd_estimate(rs, ts, (uint32_t)(val & (uint64_t)0xffffffff));
888 
889 	rnd_add_data_ts(rs, &val, sizeof(val), entropy, ts, true);
890 }
891 
892 void
rnd_add_data(krndsource_t * rs,const void * const data,uint32_t len,uint32_t entropy)893 rnd_add_data(krndsource_t *rs, const void *const data, uint32_t len,
894 	     uint32_t entropy)
895 {
896 
897 	/*
898 	 * This interface is meant for feeding data which is,
899 	 * itself, random.  Don't estimate entropy based on
900 	 * timestamp, just directly add the data.
901 	 */
902 	if (__predict_false(rs == NULL)) {
903 		mutex_spin_enter(&rnd_global.lock);
904 		rndpool_add_data(&rnd_global.pool, data, len, entropy);
905 		mutex_spin_exit(&rnd_global.lock);
906 	} else {
907 		rnd_add_data_ts(rs, data, len, entropy, rnd_counter(), true);
908 	}
909 }
910 
911 void
rnd_add_data_sync(krndsource_t * rs,const void * data,uint32_t len,uint32_t entropy)912 rnd_add_data_sync(krndsource_t *rs, const void *data, uint32_t len,
913     uint32_t entropy)
914 {
915 
916 	KASSERT(rs != NULL);
917 	rnd_add_data_ts(rs, data, len, entropy, rnd_counter(), false);
918 }
919 
920 static void
rnd_add_data_ts(krndsource_t * rs,const void * const data,uint32_t len,uint32_t entropy,uint32_t ts,bool schedule)921 rnd_add_data_ts(krndsource_t *rs, const void *const data, uint32_t len,
922     uint32_t entropy, uint32_t ts, bool schedule)
923 {
924 	rnd_sample_t *state = NULL;
925 	const uint8_t *p = data;
926 	uint32_t dint;
927 	int todo, done, filled = 0;
928 	int sample_count;
929 	struct rnd_sampleq tmp_samples = SIMPLEQ_HEAD_INITIALIZER(tmp_samples);
930 
931 	if (rs &&
932 	    (rs->flags & RND_FLAG_NO_COLLECT ||
933 		__predict_false(!(rs->flags &
934 			(RND_FLAG_COLLECT_TIME|RND_FLAG_COLLECT_VALUE))))) {
935 		return;
936 	}
937 	todo = len / sizeof(dint);
938 	/*
939 	 * Let's try to be efficient: if we are warm, and a source
940 	 * is adding entropy at a rate of at least 1 bit every 10 seconds,
941 	 * mark it as "fast" and add its samples in bulk.
942 	 */
943 	if (__predict_true(rs->flags & RND_FLAG_FAST) ||
944 	    (todo >= RND_SAMPLE_COUNT)) {
945 		sample_count = RND_SAMPLE_COUNT;
946 	} else {
947 		if (!(rs->flags & RND_FLAG_HASCB) &&
948 		    !cold && rnd_initial_entropy) {
949 			struct timeval upt;
950 
951 			getmicrouptime(&upt);
952 			if ((upt.tv_sec > 0  && rs->total > upt.tv_sec * 10) ||
953 			    (upt.tv_sec > 10 && rs->total > upt.tv_sec) ||
954 			    (upt.tv_sec > 100 &&
955 			      rs->total > upt.tv_sec / 10)) {
956 				rnd_printf_verbose("rnd: source %s is fast"
957 				    " (%d samples at once,"
958 				    " %d bits in %lld seconds), "
959 				    "processing samples in bulk.\n",
960 				    rs->name, todo, rs->total,
961 				    (long long int)upt.tv_sec);
962 				rs->flags |= RND_FLAG_FAST;
963 			}
964 		}
965 		sample_count = 2;
966 	}
967 
968 	/*
969 	 * Loop over data packaging it into sample buffers.
970 	 * If a sample buffer allocation fails, drop all data.
971 	 */
972 	for (done = 0; done < todo ; done++) {
973 		state = rs->state;
974 		if (state == NULL) {
975 			state = rnd_sample_allocate_isr(rs);
976 			if (__predict_false(state == NULL)) {
977 				break;
978 			}
979 			rs->state = state;
980 		}
981 
982 		state->ts[state->cursor] = ts;
983 		(void)memcpy(&dint, &p[done*4], 4);
984 		state->values[state->cursor] = dint;
985 		state->cursor++;
986 
987 		if (state->cursor == sample_count) {
988 			SIMPLEQ_INSERT_HEAD(&tmp_samples, state, next);
989 			filled++;
990 			rs->state = NULL;
991 		}
992 	}
993 
994 	if (__predict_false(state == NULL)) {
995 		while ((state = SIMPLEQ_FIRST(&tmp_samples))) {
996 			SIMPLEQ_REMOVE_HEAD(&tmp_samples, next);
997 			rnd_sample_free(state);
998 		}
999 		return;
1000 	}
1001 
1002 	/*
1003 	 * Claim all the entropy on the last one we send to
1004 	 * the pool, so we don't rely on it being evenly distributed
1005 	 * in the supplied data.
1006 	 *
1007 	 * XXX The rndpool code must accept samples with more
1008 	 * XXX claimed entropy than bits for this to work right.
1009 	 */
1010 	state->entropy += entropy;
1011 	rs->total += entropy;
1012 
1013 	/*
1014 	 * If we didn't finish any sample buffers, we're done.
1015 	 */
1016 	if (!filled) {
1017 		return;
1018 	}
1019 
1020 	mutex_spin_enter(&rnd_samples.lock);
1021 	while ((state = SIMPLEQ_FIRST(&tmp_samples))) {
1022 		SIMPLEQ_REMOVE_HEAD(&tmp_samples, next);
1023 		SIMPLEQ_INSERT_HEAD(&rnd_samples.q, state, next);
1024 	}
1025 	mutex_spin_exit(&rnd_samples.lock);
1026 
1027 	/* Cause processing of queued samples, if caller wants it.  */
1028 	if (schedule)
1029 		rnd_schedule_process();
1030 }
1031 
1032 static int
rnd_hwrng_test(rnd_sample_t * sample)1033 rnd_hwrng_test(rnd_sample_t *sample)
1034 {
1035 	krndsource_t *source = sample->source;
1036 	size_t cmplen;
1037 	uint8_t *v1, *v2;
1038 	size_t resid, totest;
1039 
1040 	KASSERT(source->type == RND_TYPE_RNG);
1041 
1042 	/*
1043 	 * Continuous-output test: compare two halves of the
1044 	 * sample buffer to each other.  The sample buffer (64 ints,
1045 	 * so either 256 or 512 bytes on any modern machine) should be
1046 	 * much larger than a typical hardware RNG output, so this seems
1047 	 * a reasonable way to do it without retaining extra data.
1048 	 */
1049 	cmplen = sizeof(sample->values) / 2;
1050 	v1 = (uint8_t *)sample->values;
1051 	v2 = (uint8_t *)sample->values + cmplen;
1052 
1053 	if (__predict_false(!memcmp(v1, v2, cmplen))) {
1054 		rnd_printf("rnd: source \"%s\""
1055 		    " failed continuous-output test.\n",
1056 		    source->name);
1057 		return 1;
1058 	}
1059 
1060 	/*
1061 	 * FIPS 140 statistical RNG test.  We must accumulate 20,000 bits.
1062 	 */
1063 	if (__predict_true(source->test_cnt == -1)) {
1064 		/* already passed the test */
1065 		return 0;
1066 	}
1067 	resid = FIPS140_RNG_TEST_BYTES - source->test_cnt;
1068 	totest = MIN(RND_SAMPLE_COUNT * 4, resid);
1069 	memcpy(source->test->rt_b + source->test_cnt, sample->values, totest);
1070 	resid -= totest;
1071 	source->test_cnt += totest;
1072 	if (resid == 0) {
1073 		strlcpy(source->test->rt_name, source->name,
1074 			sizeof(source->test->rt_name));
1075 		if (rngtest(source->test)) {
1076 			rnd_printf("rnd: source \"%s\""
1077 			    " failed statistical test.",
1078 			    source->name);
1079 			return 1;
1080 		}
1081 		source->test_cnt = -1;
1082 		explicit_memset(source->test, 0, sizeof(*source->test));
1083 	}
1084 	return 0;
1085 }
1086 
1087 /*
1088  * Process the events in the ring buffer.  Called by rnd_timeout or
1089  * by the add routines directly if the callout has never fired (that
1090  * is, if we are "cold" -- just booted).
1091  *
1092  */
1093 static void
rnd_process_events(void)1094 rnd_process_events(void)
1095 {
1096 	rnd_sample_t *sample = NULL;
1097 	krndsource_t *source;
1098 	static krndsource_t *last_source;
1099 	uint32_t entropy;
1100 	size_t pool_entropy;
1101 	int wake = 0;
1102 	struct rnd_sampleq dq_samples = SIMPLEQ_HEAD_INITIALIZER(dq_samples);
1103 	struct rnd_sampleq df_samples = SIMPLEQ_HEAD_INITIALIZER(df_samples);
1104 
1105 	/*
1106 	 * Drain to the on-stack queue and drop the lock.
1107 	 */
1108 	mutex_spin_enter(&rnd_samples.lock);
1109 	while ((sample = SIMPLEQ_FIRST(&rnd_samples.q))) {
1110 		SIMPLEQ_REMOVE_HEAD(&rnd_samples.q, next);
1111 		/*
1112 		 * We repeat this check here, since it is possible
1113 		 * the source was disabled before we were called, but
1114 		 * after the entry was queued.
1115 		 */
1116 		if (__predict_false(!(sample->source->flags &
1117 			    (RND_FLAG_COLLECT_TIME|RND_FLAG_COLLECT_VALUE)))) {
1118 			SIMPLEQ_INSERT_TAIL(&df_samples, sample, next);
1119 		} else {
1120 			SIMPLEQ_INSERT_TAIL(&dq_samples, sample, next);
1121 		}
1122 	}
1123 	mutex_spin_exit(&rnd_samples.lock);
1124 
1125 	/* Don't thrash the rndpool mtx either.  Hold, add all samples. */
1126 	mutex_spin_enter(&rnd_global.lock);
1127 
1128 	pool_entropy = rndpool_get_entropy_count(&rnd_global.pool);
1129 
1130 	while ((sample = SIMPLEQ_FIRST(&dq_samples))) {
1131 		int sample_count;
1132 
1133 		SIMPLEQ_REMOVE_HEAD(&dq_samples, next);
1134 		source = sample->source;
1135 		entropy = sample->entropy;
1136 		sample_count = sample->cursor;
1137 
1138 		/*
1139 		 * Don't provide a side channel for timing attacks on
1140 		 * low-rate sources: require mixing with some other
1141 		 * source before we schedule a wakeup.
1142 		 */
1143 		if (!wake &&
1144 		    (source != last_source || source->flags & RND_FLAG_FAST)) {
1145 			wake++;
1146 		}
1147 		last_source = source;
1148 
1149 		/*
1150 		 * If the source has been disabled, ignore samples from
1151 		 * it.
1152 		 */
1153 		if (source->flags & RND_FLAG_NO_COLLECT)
1154 			goto skip;
1155 
1156 		/*
1157 		 * Hardware generators are great but sometimes they
1158 		 * have...hardware issues.  Don't use any data from
1159 		 * them unless it passes some tests.
1160 		 */
1161 		if (source->type == RND_TYPE_RNG) {
1162 			if (__predict_false(rnd_hwrng_test(sample))) {
1163 				source->flags |= RND_FLAG_NO_COLLECT;
1164 				rnd_printf("rnd: disabling source \"%s\".\n",
1165 				    source->name);
1166 				goto skip;
1167 			}
1168 		}
1169 
1170 		if (source->flags & RND_FLAG_COLLECT_VALUE) {
1171 			rndpool_add_data(&rnd_global.pool, sample->values,
1172 			    sample_count * sizeof(sample->values[1]),
1173 			    0);
1174 		}
1175 		if (source->flags & RND_FLAG_COLLECT_TIME) {
1176 			rndpool_add_data(&rnd_global.pool, sample->ts,
1177 			    sample_count * sizeof(sample->ts[1]),
1178 			    0);
1179 		}
1180 
1181 		pool_entropy += entropy;
1182 		source->total += sample->entropy;
1183 skip:		SIMPLEQ_INSERT_TAIL(&df_samples, sample, next);
1184 	}
1185 	rndpool_set_entropy_count(&rnd_global.pool, pool_entropy);
1186 	rnd_entropy_added();
1187 	mutex_spin_exit(&rnd_global.lock);
1188 
1189 	/*
1190 	 * If we filled the pool past the threshold, wake anyone
1191 	 * waiting for entropy.
1192 	 */
1193 	if (pool_entropy > RND_ENTROPY_THRESHOLD * 8) {
1194 		wake++;
1195 	}
1196 
1197 	/* Now we hold no locks: clean up. */
1198 	while ((sample = SIMPLEQ_FIRST(&df_samples))) {
1199 		SIMPLEQ_REMOVE_HEAD(&df_samples, next);
1200 		rnd_sample_free(sample);
1201 	}
1202 
1203 	/*
1204 	 * Wake up any potential readers waiting.
1205 	 */
1206 	if (wake) {
1207 		rnd_schedule_wakeup();
1208 	}
1209 }
1210 
1211 static void
rnd_intr(void * arg)1212 rnd_intr(void *arg)
1213 {
1214 
1215 	rnd_process_events();
1216 }
1217 
1218 static void
rnd_wake(void * arg)1219 rnd_wake(void *arg)
1220 {
1221 
1222 	rndsinks_distribute();
1223 }
1224 
1225 static uint32_t
rnd_extract_data(void * p,uint32_t len,uint32_t flags)1226 rnd_extract_data(void *p, uint32_t len, uint32_t flags)
1227 {
1228 	static int timed_in;
1229 	uint32_t retval;
1230 
1231 	mutex_spin_enter(&rnd_global.lock);
1232 	if (__predict_false(!timed_in)) {
1233 		if (boottime.tv_sec) {
1234 			rndpool_add_data(&rnd_global.pool, &boottime,
1235 			    sizeof(boottime), 0);
1236 		}
1237 		timed_in++;
1238 	}
1239 	if (__predict_false(!rnd_initial_entropy)) {
1240 		uint32_t c;
1241 
1242 		rnd_printf_verbose("rnd: WARNING! initial entropy low (%u).\n",
1243 		    rndpool_get_entropy_count(&rnd_global.pool));
1244 		/* Try once again to put something in the pool */
1245 		c = rnd_counter();
1246 		rndpool_add_data(&rnd_global.pool, &c, sizeof(c), 1);
1247 	}
1248 
1249 #ifdef DIAGNOSTIC
1250 	while (!rnd_tested) {
1251 		int entropy_count =
1252 		    rndpool_get_entropy_count(&rnd_global.pool);
1253 		rnd_printf_verbose("rnd: starting statistical RNG test,"
1254 		    " entropy = %d.\n",
1255 		    entropy_count);
1256 		if (rndpool_extract_data(&rnd_global.pool, rnd_rt.rt_b,
1257 			sizeof(rnd_rt.rt_b), RND_EXTRACT_ANY)
1258 		    != sizeof(rnd_rt.rt_b)) {
1259 			panic("rnd: could not get bits for statistical test");
1260 		}
1261 		/*
1262 		 * Stash the tested bits so we can put them back in the
1263 		 * pool, restoring the entropy count.  DO NOT rely on
1264 		 * rngtest to maintain the bits pristine -- we could end
1265 		 * up adding back non-random data claiming it were pure
1266 		 * entropy.
1267 		 */
1268 		memcpy(rnd_testbits, rnd_rt.rt_b, sizeof(rnd_rt.rt_b));
1269 		strlcpy(rnd_rt.rt_name, "entropy pool",
1270 		    sizeof(rnd_rt.rt_name));
1271 		if (rngtest(&rnd_rt)) {
1272 			/*
1273 			 * The probabiliity of a Type I error is 3/10000,
1274 			 * but note this can only happen at boot time.
1275 			 * The relevant standard says to reset the module,
1276 			 * but developers objected...
1277 			 */
1278 			rnd_printf("rnd: WARNING, ENTROPY POOL FAILED "
1279 			    "STATISTICAL TEST!\n");
1280 			continue;
1281 		}
1282 		explicit_memset(&rnd_rt, 0, sizeof(rnd_rt));
1283 		rndpool_add_data(&rnd_global.pool, rnd_testbits,
1284 		    sizeof(rnd_testbits), entropy_count);
1285 		explicit_memset(rnd_testbits, 0, sizeof(rnd_testbits));
1286 		rnd_printf_verbose("rnd: statistical RNG test done,"
1287 		    " entropy = %d.\n",
1288 		    rndpool_get_entropy_count(&rnd_global.pool));
1289 		rnd_tested++;
1290 	}
1291 #endif
1292 	retval = rndpool_extract_data(&rnd_global.pool, p, len, flags);
1293 	mutex_spin_exit(&rnd_global.lock);
1294 
1295 	return retval;
1296 }
1297 
1298 /*
1299  * Fill the buffer with as much entropy as we can.  Return true if it
1300  * has full entropy and false if not.
1301  */
1302 bool
rnd_extract(void * buffer,size_t bytes)1303 rnd_extract(void *buffer, size_t bytes)
1304 {
1305 	const size_t extracted = rnd_extract_data(buffer, bytes,
1306 	    RND_EXTRACT_GOOD);
1307 
1308 	if (extracted < bytes) {
1309 		rnd_getmore(bytes - extracted);
1310 		(void)rnd_extract_data((uint8_t *)buffer + extracted,
1311 		    bytes - extracted, RND_EXTRACT_ANY);
1312 		return false;
1313 	}
1314 
1315 	return true;
1316 }
1317 
1318 /*
1319  * If we have as much entropy as is requested, fill the buffer with it
1320  * and return true.  Otherwise, leave the buffer alone and return
1321  * false.
1322  */
1323 
1324 CTASSERT(RND_ENTROPY_THRESHOLD <= 0xffffffffUL);
1325 CTASSERT(RNDSINK_MAX_BYTES <= (0xffffffffUL - RND_ENTROPY_THRESHOLD));
1326 CTASSERT((RNDSINK_MAX_BYTES + RND_ENTROPY_THRESHOLD) <=
1327 	    (0xffffffffUL / NBBY));
1328 
1329 bool
rnd_tryextract(void * buffer,size_t bytes)1330 rnd_tryextract(void *buffer, size_t bytes)
1331 {
1332 	uint32_t bits_needed, bytes_requested;
1333 
1334 	KASSERT(bytes <= RNDSINK_MAX_BYTES);
1335 	bits_needed = ((bytes + RND_ENTROPY_THRESHOLD) * NBBY);
1336 
1337 	mutex_spin_enter(&rnd_global.lock);
1338 	if (bits_needed <= rndpool_get_entropy_count(&rnd_global.pool)) {
1339 		const uint32_t extracted __diagused =
1340 		    rndpool_extract_data(&rnd_global.pool, buffer, bytes,
1341 			RND_EXTRACT_GOOD);
1342 
1343 		KASSERT(extracted == bytes);
1344 		bytes_requested = 0;
1345 	} else {
1346 		/* XXX Figure the threshold into this...  */
1347 		bytes_requested = howmany((bits_needed -
1348 			rndpool_get_entropy_count(&rnd_global.pool)), NBBY);
1349 		KASSERT(0 < bytes_requested);
1350 	}
1351 	mutex_spin_exit(&rnd_global.lock);
1352 
1353 	if (0 < bytes_requested)
1354 		rnd_getmore(bytes_requested);
1355 
1356 	return bytes_requested == 0;
1357 }
1358 
1359 void
rnd_seed(void * base,size_t len)1360 rnd_seed(void *base, size_t len)
1361 {
1362 	SHA1_CTX s;
1363 	uint8_t digest[SHA1_DIGEST_LENGTH];
1364 
1365 	if (len != sizeof(*boot_rsp)) {
1366 		rnd_printf("rnd: bad seed length %d\n", (int)len);
1367 		return;
1368 	}
1369 
1370 	boot_rsp = (rndsave_t *)base;
1371 	SHA1Init(&s);
1372 	SHA1Update(&s, (uint8_t *)&boot_rsp->entropy,
1373 	    sizeof(boot_rsp->entropy));
1374 	SHA1Update(&s, boot_rsp->data, sizeof(boot_rsp->data));
1375 	SHA1Final(digest, &s);
1376 
1377 	if (memcmp(digest, boot_rsp->digest, sizeof(digest))) {
1378 		rnd_printf("rnd: bad seed checksum\n");
1379 		return;
1380 	}
1381 
1382 	/*
1383 	 * It's not really well-defined whether bootloader-supplied
1384 	 * modules run before or after rnd_init().  Handle both cases.
1385 	 */
1386 	if (rnd_ready) {
1387 		rnd_printf_verbose("rnd: ready,"
1388 		    " feeding in seed data directly.\n");
1389 		mutex_spin_enter(&rnd_global.lock);
1390 		rndpool_add_data(&rnd_global.pool, boot_rsp->data,
1391 		    sizeof(boot_rsp->data),
1392 		    MIN(boot_rsp->entropy, RND_POOLBITS / 2));
1393 		explicit_memset(boot_rsp, 0, sizeof(*boot_rsp));
1394 		mutex_spin_exit(&rnd_global.lock);
1395 	} else {
1396 		rnd_printf_verbose("rnd: not ready, deferring seed feed.\n");
1397 	}
1398 }
1399 
1400 static void
krndsource_to_rndsource(krndsource_t * kr,rndsource_t * r)1401 krndsource_to_rndsource(krndsource_t *kr, rndsource_t *r)
1402 {
1403 
1404 	memset(r, 0, sizeof(*r));
1405 	strlcpy(r->name, kr->name, sizeof(r->name));
1406         r->total = kr->total;
1407         r->type = kr->type;
1408         r->flags = kr->flags;
1409 }
1410 
1411 static void
krndsource_to_rndsource_est(krndsource_t * kr,rndsource_est_t * re)1412 krndsource_to_rndsource_est(krndsource_t *kr, rndsource_est_t *re)
1413 {
1414 
1415 	memset(re, 0, sizeof(*re));
1416 	krndsource_to_rndsource(kr, &re->rt);
1417 	re->dt_samples = kr->time_delta.insamples;
1418 	re->dt_total = kr->time_delta.outbits;
1419 	re->dv_samples = kr->value_delta.insamples;
1420 	re->dv_total = kr->value_delta.outbits;
1421 }
1422 
1423 static void
krs_setflags(krndsource_t * kr,uint32_t flags,uint32_t mask)1424 krs_setflags(krndsource_t *kr, uint32_t flags, uint32_t mask)
1425 {
1426 	uint32_t oflags = kr->flags;
1427 
1428 	kr->flags &= ~mask;
1429 	kr->flags |= (flags & mask);
1430 
1431 	if (oflags & RND_FLAG_HASENABLE &&
1432             ((oflags & RND_FLAG_NO_COLLECT) !=
1433 		(flags & RND_FLAG_NO_COLLECT))) {
1434 		kr->enable(kr, !(flags & RND_FLAG_NO_COLLECT));
1435 	}
1436 }
1437 
1438 int
rnd_system_ioctl(struct file * fp,u_long cmd,void * addr)1439 rnd_system_ioctl(struct file *fp, u_long cmd, void *addr)
1440 {
1441 	krndsource_t *kr;
1442 	rndstat_t *rst;
1443 	rndstat_name_t *rstnm;
1444 	rndstat_est_t *rset;
1445 	rndstat_est_name_t *rsetnm;
1446 	rndctl_t *rctl;
1447 	rnddata_t *rnddata;
1448 	uint32_t count, start;
1449 	int ret = 0;
1450 	int estimate_ok = 0, estimate = 0;
1451 
1452 	switch (cmd) {
1453 	case RNDGETENTCNT:
1454 		break;
1455 
1456 	case RNDGETPOOLSTAT:
1457 	case RNDGETSRCNUM:
1458 	case RNDGETSRCNAME:
1459 	case RNDGETESTNUM:
1460 	case RNDGETESTNAME:
1461 		ret = kauth_authorize_device(curlwp->l_cred,
1462 		    KAUTH_DEVICE_RND_GETPRIV, NULL, NULL, NULL, NULL);
1463 		if (ret)
1464 			return ret;
1465 		break;
1466 
1467 	case RNDCTL:
1468 		ret = kauth_authorize_device(curlwp->l_cred,
1469 		    KAUTH_DEVICE_RND_SETPRIV, NULL, NULL, NULL, NULL);
1470 		if (ret)
1471 			return ret;
1472 		break;
1473 
1474 	case RNDADDDATA:
1475 		ret = kauth_authorize_device(curlwp->l_cred,
1476 		    KAUTH_DEVICE_RND_ADDDATA, NULL, NULL, NULL, NULL);
1477 		if (ret)
1478 			return ret;
1479 		estimate_ok = !kauth_authorize_device(curlwp->l_cred,
1480 		    KAUTH_DEVICE_RND_ADDDATA_ESTIMATE, NULL, NULL, NULL, NULL);
1481 		break;
1482 
1483 	default:
1484 #ifdef COMPAT_50
1485 		return compat_50_rnd_ioctl(fp, cmd, addr);
1486 #else
1487 		return ENOTTY;
1488 #endif
1489 	}
1490 
1491 	switch (cmd) {
1492 	case RNDGETENTCNT:
1493 		mutex_spin_enter(&rnd_global.lock);
1494 		*(uint32_t *)addr =
1495 		    rndpool_get_entropy_count(&rnd_global.pool);
1496 		mutex_spin_exit(&rnd_global.lock);
1497 		break;
1498 
1499 	case RNDGETPOOLSTAT:
1500 		mutex_spin_enter(&rnd_global.lock);
1501 		rndpool_get_stats(&rnd_global.pool, addr,
1502 		    sizeof(rndpoolstat_t));
1503 		mutex_spin_exit(&rnd_global.lock);
1504 		break;
1505 
1506 	case RNDGETSRCNUM:
1507 		rst = (rndstat_t *)addr;
1508 
1509 		if (rst->count == 0)
1510 			break;
1511 
1512 		if (rst->count > RND_MAXSTATCOUNT)
1513 			return EINVAL;
1514 
1515 		mutex_spin_enter(&rnd_global.lock);
1516 		/*
1517 		 * Find the starting source by running through the
1518 		 * list of sources.
1519 		 */
1520 		kr = LIST_FIRST(&rnd_global.sources);
1521 		start = rst->start;
1522 		while (kr != NULL && start >= 1) {
1523 			kr = LIST_NEXT(kr, list);
1524 			start--;
1525 		}
1526 
1527 		/*
1528 		 * Return up to as many structures as the user asked
1529 		 * for.  If we run out of sources, a count of zero
1530 		 * will be returned, without an error.
1531 		 */
1532 		for (count = 0; count < rst->count && kr != NULL; count++) {
1533 			krndsource_to_rndsource(kr, &rst->source[count]);
1534 			kr = LIST_NEXT(kr, list);
1535 		}
1536 
1537 		rst->count = count;
1538 
1539 		mutex_spin_exit(&rnd_global.lock);
1540 		break;
1541 
1542 	case RNDGETESTNUM:
1543 		rset = (rndstat_est_t *)addr;
1544 
1545 		if (rset->count == 0)
1546 			break;
1547 
1548 		if (rset->count > RND_MAXSTATCOUNT)
1549 			return EINVAL;
1550 
1551 		mutex_spin_enter(&rnd_global.lock);
1552 		/*
1553 		 * Find the starting source by running through the
1554 		 * list of sources.
1555 		 */
1556 		kr = LIST_FIRST(&rnd_global.sources);
1557 		start = rset->start;
1558 		while (kr != NULL && start > 0) {
1559 			kr = LIST_NEXT(kr, list);
1560 			start--;
1561 		}
1562 
1563 		/*
1564 		 * Return up to as many structures as the user asked
1565 		 * for.  If we run out of sources, a count of zero
1566 		 * will be returned, without an error.
1567 		 */
1568 		for (count = 0; count < rset->count && kr != NULL; count++) {
1569 			krndsource_to_rndsource_est(kr, &rset->source[count]);
1570 			kr = LIST_NEXT(kr, list);
1571 		}
1572 
1573 		rset->count = count;
1574 
1575 		mutex_spin_exit(&rnd_global.lock);
1576 		break;
1577 
1578 	case RNDGETSRCNAME:
1579 		/*
1580 		 * Scan through the list, trying to find the name.
1581 		 */
1582 		mutex_spin_enter(&rnd_global.lock);
1583 		rstnm = (rndstat_name_t *)addr;
1584 		kr = LIST_FIRST(&rnd_global.sources);
1585 		while (kr != NULL) {
1586 			if (strncmp(kr->name, rstnm->name,
1587 				MIN(sizeof(kr->name),
1588 				    sizeof(rstnm->name))) == 0) {
1589 				krndsource_to_rndsource(kr, &rstnm->source);
1590 				mutex_spin_exit(&rnd_global.lock);
1591 				return 0;
1592 			}
1593 			kr = LIST_NEXT(kr, list);
1594 		}
1595 		mutex_spin_exit(&rnd_global.lock);
1596 
1597 		ret = ENOENT;		/* name not found */
1598 
1599 		break;
1600 
1601 	case RNDGETESTNAME:
1602 		/*
1603 		 * Scan through the list, trying to find the name.
1604 		 */
1605 		mutex_spin_enter(&rnd_global.lock);
1606 		rsetnm = (rndstat_est_name_t *)addr;
1607 		kr = LIST_FIRST(&rnd_global.sources);
1608 		while (kr != NULL) {
1609 			if (strncmp(kr->name, rsetnm->name,
1610 				MIN(sizeof(kr->name), sizeof(rsetnm->name)))
1611 			    == 0) {
1612 				krndsource_to_rndsource_est(kr,
1613 				    &rsetnm->source);
1614 				mutex_spin_exit(&rnd_global.lock);
1615 				return 0;
1616 			}
1617 			kr = LIST_NEXT(kr, list);
1618 		}
1619 		mutex_spin_exit(&rnd_global.lock);
1620 
1621 		ret = ENOENT;           /* name not found */
1622 
1623 		break;
1624 
1625 	case RNDCTL:
1626 		/*
1627 		 * Set flags to enable/disable entropy counting and/or
1628 		 * collection.
1629 		 */
1630 		mutex_spin_enter(&rnd_global.lock);
1631 		rctl = (rndctl_t *)addr;
1632 		kr = LIST_FIRST(&rnd_global.sources);
1633 
1634 		/*
1635 		 * Flags set apply to all sources of this type.
1636 		 */
1637 		if (rctl->type != 0xff) {
1638 			while (kr != NULL) {
1639 				if (kr->type == rctl->type) {
1640 					krs_setflags(kr, rctl->flags,
1641 					    rctl->mask);
1642 				}
1643 				kr = LIST_NEXT(kr, list);
1644 			}
1645 			mutex_spin_exit(&rnd_global.lock);
1646 			return 0;
1647 		}
1648 
1649 		/*
1650 		 * scan through the list, trying to find the name
1651 		 */
1652 		while (kr != NULL) {
1653 			if (strncmp(kr->name, rctl->name,
1654 				MIN(sizeof(kr->name), sizeof(rctl->name)))
1655 			    == 0) {
1656 				krs_setflags(kr, rctl->flags, rctl->mask);
1657 				mutex_spin_exit(&rnd_global.lock);
1658 				return 0;
1659 			}
1660 			kr = LIST_NEXT(kr, list);
1661 		}
1662 
1663 		mutex_spin_exit(&rnd_global.lock);
1664 		ret = ENOENT;		/* name not found */
1665 
1666 		break;
1667 
1668 	case RNDADDDATA:
1669 		/*
1670 		 * Don't seed twice if our bootloader has
1671 		 * seed loading support.
1672 		 */
1673 		if (!boot_rsp) {
1674 			rnddata = (rnddata_t *)addr;
1675 
1676 			if (rnddata->len > sizeof(rnddata->data))
1677 				return EINVAL;
1678 
1679 			if (estimate_ok) {
1680 				/*
1681 				 * Do not accept absurd entropy estimates, and
1682 				 * do not flood the pool with entropy such that
1683 				 * new samples are discarded henceforth.
1684 				 */
1685 				estimate = MIN((rnddata->len * NBBY) / 2,
1686 				    MIN(rnddata->entropy, RND_POOLBITS / 2));
1687 			} else {
1688 				estimate = 0;
1689 			}
1690 
1691 			mutex_spin_enter(&rnd_global.lock);
1692 			rndpool_add_data(&rnd_global.pool, rnddata->data,
1693 			    rnddata->len, estimate);
1694 			rnd_entropy_added();
1695 			mutex_spin_exit(&rnd_global.lock);
1696 
1697 			rndsinks_distribute();
1698 		} else {
1699 			rnd_printf_verbose("rnd"
1700 			    ": already seeded by boot loader\n");
1701 		}
1702 		break;
1703 
1704 	default:
1705 		return ENOTTY;
1706 	}
1707 
1708 	return ret;
1709 }
1710