xref: /openbsd/sys/dev/rnd.c (revision e289b4af)
1 /*	$OpenBSD: rnd.c,v 1.211 2020/05/16 15:53:48 deraadt Exp $	*/
2 
3 /*
4  * Copyright (c) 2011 Theo de Raadt.
5  * Copyright (c) 2008 Damien Miller.
6  * Copyright (c) 1996, 1997, 2000-2002 Michael Shalayeff.
7  * Copyright (c) 2013 Markus Friedl.
8  * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999.
9  * All rights reserved.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, and the entire permission notice in its entirety,
16  *    including the disclaimer of warranties.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. The name of the author may not be used to endorse or promote
21  *    products derived from this software without specific prior
22  *    written permission.
23  *
24  * ALTERNATIVELY, this product may be distributed under the terms of
25  * the GNU Public License, in which case the provisions of the GPL are
26  * required INSTEAD OF the above restrictions.  (This clause is
27  * necessary due to a potential bad interaction between the GPL and
28  * the restrictions contained in a BSD-style copyright.)
29  *
30  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
31  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
32  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
33  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
34  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
35  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
36  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
37  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
38  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
40  * OF THE POSSIBILITY OF SUCH DAMAGE.
41  */
42 
43 /*
44  * The bootblocks pre-fill the kernel .openbsd.randomdata section with seed
45  * material (on-disk from previous boot, hopefully mixed with a hardware rng).
46  * The first arc4random(9) call initializes this seed material as a chacha
47  * state.  Calls can be done early in kernel bootstrap code -- early use is
48  * encouraged.
49  *
50  * After the kernel timeout subsystem is initialized, random_start() prepares
51  * the entropy collection mechanism enqueue_randomness() and timeout-driven
52  * mixing into the chacha state.  The first submissions come from device
53  * probes, later on interrupt-time submissions are more common.  Entropy
54  * data (and timing information) is XOR spread over the entropy input ring
55  * rnd_event_space[] for later integration.
56  *
57  * Based upon timeouts, data in the entropy input ring rnd_event_space[] is
58  * drawn down, CRC bit-distributed and mixed into entropy_pool[].
59  *
60  * From time to time, entropy_pool[] is SHA512-whitened, mixed with time
61  * information again, XOR'd with the inner and outer states of the existing
62  * chacha state, to create a new chacha state.
63  */
64 
65 #include <sys/param.h>
66 #include <sys/systm.h>
67 #include <sys/event.h>
68 #include <sys/ioctl.h>
69 #include <sys/malloc.h>
70 #include <sys/timeout.h>
71 #include <sys/mutex.h>
72 #include <sys/task.h>
73 #include <sys/msgbuf.h>
74 #include <sys/mount.h>
75 #include <sys/syscallargs.h>
76 
77 #include <crypto/sha2.h>
78 
79 #define KEYSTREAM_ONLY
80 #include <crypto/chacha_private.h>
81 
82 #include <dev/rndvar.h>
83 
84 #include <uvm/uvm_param.h>
85 #include <uvm/uvm_extern.h>
86 
87 /*
88  * For the purposes of better mixing, we use the CRC-32 polynomial as
89  * well to make a twisted Generalized Feedback Shift Register
90  *
91  * (See M. Matsumoto & Y. Kurita, 1992.  Twisted GFSR generators.  ACM
92  * Transactions on Modeling and Computer Simulation 2(3):179-194.
93  * Also see M. Matsumoto & Y. Kurita, 1994.  Twisted GFSR generators
94  * II.  ACM Transactions on Modeling and Computer Simulation 4:254-266)
95  */
96 
97 /*
98  * Stirring polynomials over GF(2) for various pool sizes. Used in
99  * add_entropy_words() below.
100  *
101  * The polynomial terms are chosen to be evenly spaced (minimum RMS
102  * distance from evenly spaced; except for the last tap, which is 1 to
103  * get the twisting happening as fast as possible.
104  *
105  * The resultant polynomial is:
106  *   2^POOLWORDS + 2^POOL_TAP1 + 2^POOL_TAP2 + 2^POOL_TAP3 + 2^POOL_TAP4 + 1
107  */
108 #define POOLWORDS	2048
109 #define POOLBYTES	(POOLWORDS*4)
110 #define POOLMASK	(POOLWORDS - 1)
111 #define	POOL_TAP1	1638
112 #define	POOL_TAP2	1231
113 #define	POOL_TAP3	819
114 #define	POOL_TAP4	411
115 
116 /*
117  * Raw entropy collection from device drivers; at interrupt context or not.
118  * enqueue_randomness() is used to submit data into the entropy input ring.
119  */
120 
121 #define QEVLEN	128		 /* must be a power of 2 */
122 #define QEVSLOW (QEVLEN * 3 / 4) /* yet another 0.75 for 60-minutes hour /-; */
123 
124 #define KEYSZ	32
125 #define IVSZ	8
126 #define BLOCKSZ	64
127 #define RSBUFSZ	(16*BLOCKSZ)
128 #define EBUFSIZE KEYSZ + IVSZ
129 
130 struct rand_event {
131 	u_int re_time;
132 	u_int re_val;
133 } rnd_event_space[QEVLEN];
134 
135 u_int rnd_event_cons;
136 u_int rnd_event_prod;
137 
138 struct mutex rnd_enqlck = MUTEX_INITIALIZER(IPL_HIGH);
139 struct mutex rnd_deqlck = MUTEX_INITIALIZER(IPL_HIGH);
140 
141 struct timeout rnd_timeout;
142 
143 static u_int32_t entropy_pool[POOLWORDS];
144 u_int32_t entropy_pool0[POOLWORDS] __attribute__((section(".openbsd.randomdata")));
145 u_int	entropy_add_ptr;
146 u_char	entropy_input_rotate;
147 
148 void	dequeue_randomness(void *);
149 void	add_entropy_words(const u_int32_t *, u_int);
150 void	extract_entropy(u_int8_t *)
151     __attribute__((__bounded__(__minbytes__,1,EBUFSIZE)));
152 
153 int	filt_randomread(struct knote *, long);
154 void	filt_randomdetach(struct knote *);
155 int	filt_randomwrite(struct knote *, long);
156 
157 static void _rs_seed(u_char *, size_t);
158 static void _rs_clearseed(const void *p, size_t s);
159 
160 const struct filterops randomread_filtops = {
161 	.f_flags	= FILTEROP_ISFD,
162 	.f_attach	= NULL,
163 	.f_detach	= filt_randomdetach,
164 	.f_event	= filt_randomread,
165 };
166 
167 const struct filterops randomwrite_filtops = {
168 	.f_flags	= FILTEROP_ISFD,
169 	.f_attach	= NULL,
170 	.f_detach	= filt_randomdetach,
171 	.f_event	= filt_randomwrite,
172 };
173 
174 static inline struct rand_event *
175 rnd_get(void)
176 {
177 	u_int idx;
178 
179 	/* nothing to do if queue is empty */
180 	if (rnd_event_prod == rnd_event_cons)
181 		return NULL;
182 
183 	if (rnd_event_prod - rnd_event_cons > QEVLEN)
184 		rnd_event_cons = rnd_event_prod - QEVLEN;
185 	idx = rnd_event_cons++;
186 	return &rnd_event_space[idx & (QEVLEN - 1)];
187 }
188 
189 static inline struct rand_event *
190 rnd_put(void)
191 {
192 	u_int idx = rnd_event_prod++;
193 
194 	/* allow wrapping. caller will mix it in. */
195 	return &rnd_event_space[idx & (QEVLEN - 1)];
196 }
197 
198 static inline u_int
199 rnd_qlen(void)
200 {
201 	return rnd_event_prod - rnd_event_cons;
202 }
203 
204 /*
205  * This function mixes entropy and timing into the entropy input ring.
206  */
207 void
208 enqueue_randomness(u_int val)
209 {
210 	struct rand_event *rep;
211 	struct timespec	ts;
212 	u_int qlen;
213 
214 	timespecclear(&ts);
215 	if (timeout_initialized(&rnd_timeout))
216 		nanotime(&ts);
217 
218 	mtx_enter(&rnd_enqlck);
219 	rep = rnd_put();
220 	rep->re_time += ts.tv_nsec ^ (ts.tv_sec << 20);
221 	rep->re_val += val;
222 	qlen = rnd_qlen();
223 	mtx_leave(&rnd_enqlck);
224 
225 	if (qlen > QEVSLOW/2 && timeout_initialized(&rnd_timeout) &&
226 	    !timeout_pending(&rnd_timeout))
227 		timeout_add(&rnd_timeout, 1);
228 }
229 
230 /*
231  * This function merges entropy ring information into the buffer using
232  * a polynomial to spread the bits.
233  */
234 void
235 add_entropy_words(const u_int32_t *buf, u_int n)
236 {
237 	/* derived from IEEE 802.3 CRC-32 */
238 	static const u_int32_t twist_table[8] = {
239 		0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
240 		0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278
241 	};
242 
243 	for (; n--; buf++) {
244 		u_int32_t w = (*buf << entropy_input_rotate) |
245 		    (*buf >> ((32 - entropy_input_rotate) & 31));
246 		u_int i = entropy_add_ptr =
247 		    (entropy_add_ptr - 1) & POOLMASK;
248 		/*
249 		 * Normally, we add 7 bits of rotation to the pool.
250 		 * At the beginning of the pool, add an extra 7 bits
251 		 * rotation, so that successive passes spread the
252 		 * input bits across the pool evenly.
253 		 */
254 		entropy_input_rotate =
255 		    (entropy_input_rotate + (i ? 7 : 14)) & 31;
256 
257 		/* XOR pool contents corresponding to polynomial terms */
258 		w ^= entropy_pool[(i + POOL_TAP1) & POOLMASK] ^
259 		     entropy_pool[(i + POOL_TAP2) & POOLMASK] ^
260 		     entropy_pool[(i + POOL_TAP3) & POOLMASK] ^
261 		     entropy_pool[(i + POOL_TAP4) & POOLMASK] ^
262 		     entropy_pool[(i + 1) & POOLMASK] ^
263 		     entropy_pool[i]; /* + 2^POOLWORDS */
264 
265 		entropy_pool[i] = (w >> 3) ^ twist_table[w & 7];
266 	}
267 }
268 
269 /*
270  * Pulls entropy out of the queue and merges it into the pool
271  * with the CRC.
272  */
273 /* ARGSUSED */
274 void
275 dequeue_randomness(void *v)
276 {
277 	struct rand_event *rep;
278 	u_int32_t buf[2];
279 
280 	if (timeout_initialized(&rnd_timeout))
281 		timeout_del(&rnd_timeout);
282 
283 	mtx_enter(&rnd_deqlck);
284 	while ((rep = rnd_get())) {
285 		buf[0] = rep->re_time;
286 		buf[1] = rep->re_val;
287 		mtx_leave(&rnd_deqlck);
288 		add_entropy_words(buf, 2);
289 		mtx_enter(&rnd_deqlck);
290 	}
291 	mtx_leave(&rnd_deqlck);
292 }
293 
294 /*
295  * Grabs a chunk from the entropy_pool[] and slams it through SHA512 when
296  * requested.
297  */
298 void
299 extract_entropy(u_int8_t *buf)
300 {
301 	static u_int32_t extract_pool[POOLWORDS];
302 	u_char digest[SHA512_DIGEST_LENGTH];
303 	SHA2_CTX shactx;
304 
305 #if SHA512_DIGEST_LENGTH < EBUFSIZE
306 #error "need more bigger hash output"
307 #endif
308 
309 	/*
310 	 * INTENTIONALLY not protected by any lock.  Races during
311 	 * memcpy() result in acceptable input data; races during
312 	 * SHA512Update() would create nasty data dependencies.  We
313 	 * do not rely on this as a benefit, but if it happens, cool.
314 	 */
315 	memcpy(extract_pool, entropy_pool, sizeof(extract_pool));
316 
317 	/* Hash the pool to get the output */
318 	SHA512Init(&shactx);
319 	SHA512Update(&shactx, (u_int8_t *)extract_pool, sizeof(extract_pool));
320 	SHA512Final(digest, &shactx);
321 
322 	/* Copy data to destination buffer */
323 	memcpy(buf, digest, EBUFSIZE);
324 
325 	/* Modify pool so next hash will produce different results */
326 	enqueue_randomness(EBUFSIZE);
327 	dequeue_randomness(NULL);
328 
329 	/* Wipe data from memory */
330 	explicit_bzero(extract_pool, sizeof(extract_pool));
331 	explicit_bzero(digest, sizeof(digest));
332 }
333 
334 /* random keystream by ChaCha */
335 
336 void rnd_reinit(void *v);		/* timeout to start reinit */
337 void rnd_init(void *);			/* actually do the reinit */
338 
339 struct mutex rndlock = MUTEX_INITIALIZER(IPL_HIGH);
340 struct timeout rndreinit_timeout;
341 struct task rnd_task = TASK_INITIALIZER(rnd_init, NULL);
342 
343 static chacha_ctx rs;		/* chacha context for random keystream */
344 /* keystream blocks (also chacha seed from boot) */
345 static u_char rs_buf[RSBUFSZ];
346 u_char rs_buf0[RSBUFSZ] __attribute__((section(".openbsd.randomdata")));
347 static size_t rs_have;		/* valid bytes at end of rs_buf */
348 static size_t rs_count;		/* bytes till reseed */
349 
350 void
351 suspend_randomness(void)
352 {
353 	struct timespec ts;
354 
355 	getnanotime(&ts);
356 	enqueue_randomness(ts.tv_sec);
357 	enqueue_randomness(ts.tv_nsec);
358 
359 	dequeue_randomness(NULL);
360 	rs_count = 0;
361 	arc4random_buf(entropy_pool, sizeof(entropy_pool));
362 }
363 
364 void
365 resume_randomness(char *buf, size_t buflen)
366 {
367 	struct timespec ts;
368 
369 	if (buf && buflen)
370 		_rs_seed(buf, buflen);
371 	getnanotime(&ts);
372 	enqueue_randomness(ts.tv_sec);
373 	enqueue_randomness(ts.tv_nsec);
374 
375 	dequeue_randomness(NULL);
376 	rs_count = 0;
377 }
378 
379 static inline void _rs_rekey(u_char *dat, size_t datlen);
380 
381 static inline void
382 _rs_init(u_char *buf, size_t n)
383 {
384 	KASSERT(n >= KEYSZ + IVSZ);
385 	chacha_keysetup(&rs, buf, KEYSZ * 8);
386 	chacha_ivsetup(&rs, buf + KEYSZ, NULL);
387 }
388 
389 static void
390 _rs_seed(u_char *buf, size_t n)
391 {
392 	_rs_rekey(buf, n);
393 
394 	/* invalidate rs_buf */
395 	rs_have = 0;
396 	memset(rs_buf, 0, sizeof(rs_buf));
397 
398 	rs_count = 1600000;
399 }
400 
401 static void
402 _rs_stir(int do_lock)
403 {
404 	struct timespec ts;
405 	u_int8_t buf[EBUFSIZE], *p;
406 	int i;
407 
408 	/*
409 	 * Use SHA512 PRNG data and a system timespec; early in the boot
410 	 * process this is the best we can do -- some architectures do
411 	 * not collect entropy very well during this time, but may have
412 	 * clock information which is better than nothing.
413 	 */
414 	extract_entropy(buf);
415 
416 	nanotime(&ts);
417 	for (p = (u_int8_t *)&ts, i = 0; i < sizeof(ts); i++)
418 		buf[i] ^= p[i];
419 
420 	if (do_lock)
421 		mtx_enter(&rndlock);
422 	_rs_seed(buf, sizeof(buf));
423 	if (do_lock)
424 		mtx_leave(&rndlock);
425 
426 	explicit_bzero(buf, sizeof(buf));
427 }
428 
429 static inline void
430 _rs_stir_if_needed(size_t len)
431 {
432 	static int rs_initialized;
433 
434 	if (!rs_initialized) {
435 		memcpy(entropy_pool, entropy_pool0, sizeof(entropy_pool));
436 		memcpy(rs_buf, rs_buf0, sizeof(rs_buf));
437 		/* seeds cannot be cleaned yet, random_start() will do so */
438 		_rs_init(rs_buf, KEYSZ + IVSZ);
439 		rs_count = 1024 * 1024 * 1024;	/* until main() runs */
440 		rs_initialized = 1;
441 	} else if (rs_count <= len)
442 		_rs_stir(0);
443 	else
444 		rs_count -= len;
445 }
446 
447 static void
448 _rs_clearseed(const void *p, size_t s)
449 {
450 	struct kmem_dyn_mode kd_avoidalias;
451 	vaddr_t va = trunc_page((vaddr_t)p);
452 	vsize_t off = (vaddr_t)p - va;
453 	vsize_t len;
454 	vaddr_t rwva;
455 	paddr_t pa;
456 
457 	while (s > 0) {
458 		pmap_extract(pmap_kernel(), va, &pa);
459 
460 		memset(&kd_avoidalias, 0, sizeof(kd_avoidalias));
461 		kd_avoidalias.kd_prefer = pa;
462 		kd_avoidalias.kd_waitok = 1;
463 		rwva = (vaddr_t)km_alloc(PAGE_SIZE, &kv_any, &kp_none,
464 		    &kd_avoidalias);
465 		if (!rwva)
466 			panic("_rs_clearseed");
467 
468 		pmap_kenter_pa(rwva, pa, PROT_READ | PROT_WRITE);
469 		pmap_update(pmap_kernel());
470 
471 		len = MIN(s, PAGE_SIZE - off);
472 		explicit_bzero((void *)(rwva + off), len);
473 
474 		pmap_kremove(rwva, PAGE_SIZE);
475 		km_free((void *)rwva, PAGE_SIZE, &kv_any, &kp_none);
476 
477 		va += PAGE_SIZE;
478 		s -= len;
479 		off = 0;
480 	}
481 }
482 
483 static inline void
484 _rs_rekey(u_char *dat, size_t datlen)
485 {
486 #ifndef KEYSTREAM_ONLY
487 	memset(rs_buf, 0, sizeof(rs_buf));
488 #endif
489 	/* fill rs_buf with the keystream */
490 	chacha_encrypt_bytes(&rs, rs_buf, rs_buf, sizeof(rs_buf));
491 	/* mix in optional user provided data */
492 	if (dat) {
493 		size_t i, m;
494 
495 		m = MIN(datlen, KEYSZ + IVSZ);
496 		for (i = 0; i < m; i++)
497 			rs_buf[i] ^= dat[i];
498 	}
499 	/* immediately reinit for backtracking resistance */
500 	_rs_init(rs_buf, KEYSZ + IVSZ);
501 	memset(rs_buf, 0, KEYSZ + IVSZ);
502 	rs_have = sizeof(rs_buf) - KEYSZ - IVSZ;
503 }
504 
505 static inline void
506 _rs_random_buf(void *_buf, size_t n)
507 {
508 	u_char *buf = (u_char *)_buf;
509 	size_t m;
510 
511 	_rs_stir_if_needed(n);
512 	while (n > 0) {
513 		if (rs_have > 0) {
514 			m = MIN(n, rs_have);
515 			memcpy(buf, rs_buf + sizeof(rs_buf) - rs_have, m);
516 			memset(rs_buf + sizeof(rs_buf) - rs_have, 0, m);
517 			buf += m;
518 			n -= m;
519 			rs_have -= m;
520 		}
521 		if (rs_have == 0)
522 			_rs_rekey(NULL, 0);
523 	}
524 }
525 
526 static inline void
527 _rs_random_u32(u_int32_t *val)
528 {
529 	_rs_stir_if_needed(sizeof(*val));
530 	if (rs_have < sizeof(*val))
531 		_rs_rekey(NULL, 0);
532 	memcpy(val, rs_buf + sizeof(rs_buf) - rs_have, sizeof(*val));
533 	memset(rs_buf + sizeof(rs_buf) - rs_have, 0, sizeof(*val));
534 	rs_have -= sizeof(*val);
535 }
536 
537 /* Return one word of randomness from a ChaCha20 generator */
538 u_int32_t
539 arc4random(void)
540 {
541 	u_int32_t ret;
542 
543 	mtx_enter(&rndlock);
544 	_rs_random_u32(&ret);
545 	mtx_leave(&rndlock);
546 	return ret;
547 }
548 
549 /*
550  * Fill a buffer of arbitrary length with ChaCha20-derived randomness.
551  */
552 void
553 arc4random_buf(void *buf, size_t n)
554 {
555 	mtx_enter(&rndlock);
556 	_rs_random_buf(buf, n);
557 	mtx_leave(&rndlock);
558 }
559 
560 /*
561  * Allocate a new ChaCha20 context for the caller to use.
562  */
563 struct arc4random_ctx *
564 arc4random_ctx_new()
565 {
566 	char keybuf[KEYSZ + IVSZ];
567 
568 	chacha_ctx *ctx = malloc(sizeof(chacha_ctx), M_TEMP, M_WAITOK);
569 	arc4random_buf(keybuf, KEYSZ + IVSZ);
570 	chacha_keysetup(ctx, keybuf, KEYSZ * 8);
571 	chacha_ivsetup(ctx, keybuf + KEYSZ, NULL);
572 	explicit_bzero(keybuf, sizeof(keybuf));
573 	return (struct arc4random_ctx *)ctx;
574 }
575 
576 /*
577  * Free a ChaCha20 context created by arc4random_ctx_new()
578  */
579 void
580 arc4random_ctx_free(struct arc4random_ctx *ctx)
581 {
582 	explicit_bzero(ctx, sizeof(chacha_ctx));
583 	free(ctx, M_TEMP, sizeof(chacha_ctx));
584 }
585 
586 /*
587  * Use a given ChaCha20 context to fill a buffer
588  */
589 void
590 arc4random_ctx_buf(struct arc4random_ctx *ctx, void *buf, size_t n)
591 {
592 	chacha_encrypt_bytes((chacha_ctx *)ctx, buf, buf, n);
593 }
594 
595 /*
596  * Calculate a uniformly distributed random number less than upper_bound
597  * avoiding "modulo bias".
598  *
599  * Uniformity is achieved by generating new random numbers until the one
600  * returned is outside the range [0, 2**32 % upper_bound).  This
601  * guarantees the selected random number will be inside
602  * [2**32 % upper_bound, 2**32) which maps back to [0, upper_bound)
603  * after reduction modulo upper_bound.
604  */
605 u_int32_t
606 arc4random_uniform(u_int32_t upper_bound)
607 {
608 	u_int32_t r, min;
609 
610 	if (upper_bound < 2)
611 		return 0;
612 
613 	/* 2**32 % x == (2**32 - x) % x */
614 	min = -upper_bound % upper_bound;
615 
616 	/*
617 	 * This could theoretically loop forever but each retry has
618 	 * p > 0.5 (worst case, usually far better) of selecting a
619 	 * number inside the range we need, so it should rarely need
620 	 * to re-roll.
621 	 */
622 	for (;;) {
623 		r = arc4random();
624 		if (r >= min)
625 			break;
626 	}
627 
628 	return r % upper_bound;
629 }
630 
631 /* ARGSUSED */
632 void
633 rnd_init(void *null)
634 {
635 	_rs_stir(1);
636 }
637 
638 /*
639  * Called by timeout to mark arc4 for stirring,
640  */
641 void
642 rnd_reinit(void *v)
643 {
644 	task_add(systq, &rnd_task);
645 	/* 10 minutes, per dm@'s suggestion */
646 	timeout_add_sec(&rndreinit_timeout, 10 * 60);
647 }
648 
649 /*
650  * Start periodic services inside the random subsystem, which pull
651  * entropy forward, hash it, and re-seed the random stream as needed.
652  */
653 void
654 random_start(void)
655 {
656 	extern char etext[];
657 
658 #if !defined(NO_PROPOLICE)
659 	extern long __guard_local;
660 
661 	if (__guard_local == 0)
662 		printf("warning: no entropy supplied by boot loader\n");
663 #endif
664 
665 	_rs_clearseed(entropy_pool0, sizeof(entropy_pool0));
666 	_rs_clearseed(rs_buf0, sizeof(rs_buf0));
667 
668 	/* Message buffer may contain data from previous boot */
669 	if (msgbufp->msg_magic == MSG_MAGIC)
670 		add_entropy_words((u_int32_t *)msgbufp->msg_bufc,
671 		    msgbufp->msg_bufs / sizeof(u_int32_t));
672 	add_entropy_words((u_int32_t *)etext - 32*1024,
673 	    8192/sizeof(u_int32_t));
674 
675 	dequeue_randomness(NULL);
676 	rnd_init(NULL);
677 	timeout_set(&rndreinit_timeout, rnd_reinit, NULL);
678 	rnd_reinit(NULL);
679 	timeout_set(&rnd_timeout, dequeue_randomness, NULL);
680 }
681 
682 int
683 randomopen(dev_t dev, int flag, int mode, struct proc *p)
684 {
685 	return 0;
686 }
687 
688 int
689 randomclose(dev_t dev, int flag, int mode, struct proc *p)
690 {
691 	return 0;
692 }
693 
694 /*
695  * Maximum number of bytes to serve directly from the main ChaCha
696  * pool. Larger requests are served from a discrete ChaCha instance keyed
697  * from the main pool.
698  */
699 #define RND_MAIN_MAX_BYTES	2048
700 
701 int
702 randomread(dev_t dev, struct uio *uio, int ioflag)
703 {
704 	u_char		lbuf[KEYSZ+IVSZ];
705 	chacha_ctx	lctx;
706 	size_t		total = uio->uio_resid;
707 	u_char		*buf;
708 	int		myctx = 0, ret = 0;
709 
710 	if (uio->uio_resid == 0)
711 		return 0;
712 
713 	buf = malloc(POOLBYTES, M_TEMP, M_WAITOK);
714 	if (total > RND_MAIN_MAX_BYTES) {
715 		arc4random_buf(lbuf, sizeof(lbuf));
716 		chacha_keysetup(&lctx, lbuf, KEYSZ * 8);
717 		chacha_ivsetup(&lctx, lbuf + KEYSZ, NULL);
718 		explicit_bzero(lbuf, sizeof(lbuf));
719 		myctx = 1;
720 	}
721 
722 	while (ret == 0 && uio->uio_resid > 0) {
723 		size_t	n = ulmin(POOLBYTES, uio->uio_resid);
724 
725 		if (myctx) {
726 #ifndef KEYSTREAM_ONLY
727 			memset(buf, 0, n);
728 #endif
729 			chacha_encrypt_bytes(&lctx, buf, buf, n);
730 		} else
731 			arc4random_buf(buf, n);
732 		ret = uiomove(buf, n, uio);
733 		if (ret == 0 && uio->uio_resid > 0)
734 			yield();
735 	}
736 	if (myctx)
737 		explicit_bzero(&lctx, sizeof(lctx));
738 	explicit_bzero(buf, POOLBYTES);
739 	free(buf, M_TEMP, POOLBYTES);
740 	return ret;
741 }
742 
743 int
744 randomwrite(dev_t dev, struct uio *uio, int flags)
745 {
746 	int		ret = 0, newdata = 0;
747 	u_int32_t	*buf;
748 
749 	if (uio->uio_resid == 0)
750 		return 0;
751 
752 	buf = malloc(POOLBYTES, M_TEMP, M_WAITOK);
753 
754 	while (ret == 0 && uio->uio_resid > 0) {
755 		size_t	n = ulmin(POOLBYTES, uio->uio_resid);
756 
757 		ret = uiomove(buf, n, uio);
758 		if (ret != 0)
759 			break;
760 		while (n % sizeof(u_int32_t))
761 			((u_int8_t *)buf)[n++] = 0;
762 		add_entropy_words(buf, n / 4);
763 		if (uio->uio_resid > 0)
764 			yield();
765 		newdata = 1;
766 	}
767 
768 	if (newdata)
769 		rnd_init(NULL);
770 
771 	explicit_bzero(buf, POOLBYTES);
772 	free(buf, M_TEMP, POOLBYTES);
773 	return ret;
774 }
775 
776 int
777 randomkqfilter(dev_t dev, struct knote *kn)
778 {
779 	switch (kn->kn_filter) {
780 	case EVFILT_READ:
781 		kn->kn_fop = &randomread_filtops;
782 		break;
783 	case EVFILT_WRITE:
784 		kn->kn_fop = &randomwrite_filtops;
785 		break;
786 	default:
787 		return (EINVAL);
788 	}
789 
790 	return (0);
791 }
792 
793 void
794 filt_randomdetach(struct knote *kn)
795 {
796 }
797 
798 int
799 filt_randomread(struct knote *kn, long hint)
800 {
801 	kn->kn_data = RND_MAIN_MAX_BYTES;
802 	return (1);
803 }
804 
805 int
806 filt_randomwrite(struct knote *kn, long hint)
807 {
808 	kn->kn_data = POOLBYTES;
809 	return (1);
810 }
811 
812 int
813 randomioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
814 {
815 	switch (cmd) {
816 	case FIOASYNC:
817 		/* No async flag in softc so this is a no-op. */
818 		break;
819 	case FIONBIO:
820 		/* Handled in the upper FS layer. */
821 		break;
822 	default:
823 		return ENOTTY;
824 	}
825 	return 0;
826 }
827 
828 int
829 sys_getentropy(struct proc *p, void *v, register_t *retval)
830 {
831 	struct sys_getentropy_args /* {
832 		syscallarg(void *) buf;
833 		syscallarg(size_t) nbyte;
834 	} */ *uap = v;
835 	char buf[256];
836 	int error;
837 
838 	if (SCARG(uap, nbyte) > sizeof(buf))
839 		return (EIO);
840 	arc4random_buf(buf, SCARG(uap, nbyte));
841 	if ((error = copyout(buf, SCARG(uap, buf), SCARG(uap, nbyte))) != 0)
842 		return (error);
843 	explicit_bzero(buf, sizeof(buf));
844 	retval[0] = 0;
845 	return (0);
846 }
847