xref: /openbsd/sys/dev/rnd.c (revision 09467b48)
1 /*	$OpenBSD: rnd.c,v 1.221 2020/06/15 14:52:19 deraadt Exp $	*/
2 
3 /*
4  * Copyright (c) 2011,2020 Theo de Raadt.
5  * Copyright (c) 2008 Damien Miller.
6  * Copyright (c) 1996, 1997, 2000-2002 Michael Shalayeff.
7  * Copyright (c) 2013 Markus Friedl.
8  * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999.
9  * All rights reserved.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, and the entire permission notice in its entirety,
16  *    including the disclaimer of warranties.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. The name of the author may not be used to endorse or promote
21  *    products derived from this software without specific prior
22  *    written permission.
23  *
24  * ALTERNATIVELY, this product may be distributed under the terms of
25  * the GNU Public License, in which case the provisions of the GPL are
26  * required INSTEAD OF the above restrictions.  (This clause is
27  * necessary due to a potential bad interaction between the GPL and
28  * the restrictions contained in a BSD-style copyright.)
29  *
30  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
31  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
32  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
33  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
34  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
35  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
36  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
37  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
38  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
40  * OF THE POSSIBILITY OF SUCH DAMAGE.
41  */
42 
43 /*
44  * The bootblocks pre-fill the kernel .openbsd.randomdata section with seed
45  * material (on-disk from previous boot, hopefully mixed with a hardware rng).
46  * The first arc4random(9) call initializes this seed material as a chacha
47  * state.  Calls can be done early in kernel bootstrap code -- early use is
48  * encouraged.
49  *
50  * After the kernel timeout subsystem is initialized, random_start() prepares
51  * the entropy collection mechanism enqueue_randomness() and timeout-driven
52  * mixing into the chacha state.  The first submissions come from device
53  * probes, later on interrupt-time submissions are more common.  Entropy
54  * data (and timing information) get mixed over the entropy input ring
55  * rnd_event_space[] -- the goal is to collect damage.
56  *
57  * Based upon timeouts, a selection of the entropy ring rnd_event_space[]
58  * CRC bit-distributed and XOR mixed into entropy_pool[].
59  *
60  * From time to time, entropy_pool[] is SHA512-whitened, mixed with time
61  * information again, XOR'd with the inner and outer states of the existing
62  * chacha state, to create a new chacha state.
63  *
64  * During early boot (until cold=0), enqueue operations are immediately
65  * dequeued, and mixed into the chacha.
66  */
67 
68 #include <sys/param.h>
69 #include <sys/event.h>
70 #include <sys/ioctl.h>
71 #include <sys/malloc.h>
72 #include <sys/timeout.h>
73 #include <sys/atomic.h>
74 #include <sys/task.h>
75 #include <sys/msgbuf.h>
76 #include <sys/mount.h>
77 #include <sys/syscallargs.h>
78 
79 #include <crypto/sha2.h>
80 
81 #define KEYSTREAM_ONLY
82 #include <crypto/chacha_private.h>
83 
84 #include <uvm/uvm_extern.h>
85 
86 /*
87  * For the purposes of better mixing, we use the CRC-32 polynomial as
88  * well to make a twisted Generalized Feedback Shift Register
89  *
90  * (See M. Matsumoto & Y. Kurita, 1992.  Twisted GFSR generators.  ACM
91  * Transactions on Modeling and Computer Simulation 2(3):179-194.
92  * Also see M. Matsumoto & Y. Kurita, 1994.  Twisted GFSR generators
93  * II.  ACM Transactions on Modeling and Computer Simulation 4:254-266)
94  */
95 
96 /*
97  * Stirring polynomials over GF(2) for various pool sizes. Used in
98  * add_entropy_words() below.
99  *
100  * The polynomial terms are chosen to be evenly spaced (minimum RMS
101  * distance from evenly spaced; except for the last tap, which is 1 to
102  * get the twisting happening as fast as possible.
103  *
104  * The resultant polynomial is:
105  *   2^POOLWORDS + 2^POOL_TAP1 + 2^POOL_TAP2 + 2^POOL_TAP3 + 2^POOL_TAP4 + 1
106  */
107 #define POOLWORDS	2048
108 #define POOLBYTES	(POOLWORDS*4)
109 #define POOLMASK	(POOLWORDS - 1)
110 #define	POOL_TAP1	1638
111 #define	POOL_TAP2	1231
112 #define	POOL_TAP3	819
113 #define	POOL_TAP4	411
114 
115 /*
116  * Raw entropy collection from device drivers; at interrupt context or not.
117  * enqueue_randomness() is used to submit data into the entropy input ring.
118  */
119 
120 #define QEVLEN	128		 /* must be a power of 2 */
121 #define QEVCONSUME 8		 /* how many events to consume a time */
122 
123 #define KEYSZ	32
124 #define IVSZ	8
125 #define BLOCKSZ	64
126 #define RSBUFSZ	(16*BLOCKSZ)
127 #define EBUFSIZE KEYSZ + IVSZ
128 
129 struct rand_event {
130 	u_int	re_time;
131 	u_int	re_val;
132 } rnd_event_space[QEVLEN];
133 
134 u_int	rnd_event_cons;
135 u_int	rnd_event_prod;
136 int	rnd_cold = 1;
137 int	rnd_slowextract = 1;
138 
139 void	rnd_reinit(void *v);		/* timeout to start reinit */
140 void	rnd_init(void *);			/* actually do the reinit */
141 
142 static u_int32_t entropy_pool[POOLWORDS];
143 u_int32_t entropy_pool0[POOLWORDS] __attribute__((section(".openbsd.randomdata")));
144 
145 void	dequeue_randomness(void *);
146 void	add_entropy_words(const u_int32_t *, u_int);
147 void	extract_entropy(u_int8_t *)
148     __attribute__((__bounded__(__minbytes__,1,EBUFSIZE)));
149 
150 struct timeout rnd_timeout = TIMEOUT_INITIALIZER(dequeue_randomness, NULL);
151 
152 int	filt_randomread(struct knote *, long);
153 void	filt_randomdetach(struct knote *);
154 int	filt_randomwrite(struct knote *, long);
155 
156 static void _rs_seed(u_char *, size_t);
157 static void _rs_clearseed(const void *p, size_t s);
158 
159 const struct filterops randomread_filtops = {
160 	.f_flags	= FILTEROP_ISFD,
161 	.f_attach	= NULL,
162 	.f_detach	= filt_randomdetach,
163 	.f_event	= filt_randomread,
164 };
165 
166 const struct filterops randomwrite_filtops = {
167 	.f_flags	= FILTEROP_ISFD,
168 	.f_attach	= NULL,
169 	.f_detach	= filt_randomdetach,
170 	.f_event	= filt_randomwrite,
171 };
172 
173 /*
174  * This function mixes entropy and timing into the entropy input ring.
175  */
176 void
177 enqueue_randomness(u_int val)
178 {
179 	struct rand_event *rep;
180 	int e;
181 
182 	e = (atomic_inc_int_nv(&rnd_event_prod) - 1) & (QEVLEN-1);
183 	rep = &rnd_event_space[e];
184 	rep->re_time += cpu_rnd_messybits();
185 	rep->re_val += val;
186 
187 	if (rnd_cold) {
188 		dequeue_randomness(NULL);
189 		rnd_init(NULL);
190 		if (!cold)
191 			rnd_cold = 0;
192 	} else if (!timeout_pending(&rnd_timeout) &&
193 	    (rnd_event_prod - rnd_event_cons) > QEVCONSUME) {
194 		rnd_slowextract = min(rnd_slowextract * 2, 5000);
195 		timeout_add_msec(&rnd_timeout, rnd_slowextract * 10);
196 	}
197 }
198 
199 /*
200  * This function merges entropy ring information into the buffer using
201  * a polynomial to spread the bits.
202  */
203 void
204 add_entropy_words(const u_int32_t *buf, u_int n)
205 {
206 	/* derived from IEEE 802.3 CRC-32 */
207 	static const u_int32_t twist_table[8] = {
208 		0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
209 		0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278
210 	};
211 	static u_int	entropy_add_ptr;
212 	static u_char	entropy_input_rotate;
213 
214 	for (; n--; buf++) {
215 		u_int32_t w = (*buf << entropy_input_rotate) |
216 		    (*buf >> ((32 - entropy_input_rotate) & 31));
217 		u_int i = entropy_add_ptr =
218 		    (entropy_add_ptr - 1) & POOLMASK;
219 		/*
220 		 * Normally, we add 7 bits of rotation to the pool.
221 		 * At the beginning of the pool, add an extra 7 bits
222 		 * rotation, so that successive passes spread the
223 		 * input bits across the pool evenly.
224 		 */
225 		entropy_input_rotate =
226 		    (entropy_input_rotate + (i ? 7 : 14)) & 31;
227 
228 		/* XOR pool contents corresponding to polynomial terms */
229 		w ^= entropy_pool[(i + POOL_TAP1) & POOLMASK] ^
230 		     entropy_pool[(i + POOL_TAP2) & POOLMASK] ^
231 		     entropy_pool[(i + POOL_TAP3) & POOLMASK] ^
232 		     entropy_pool[(i + POOL_TAP4) & POOLMASK] ^
233 		     entropy_pool[(i + 1) & POOLMASK] ^
234 		     entropy_pool[i]; /* + 2^POOLWORDS */
235 
236 		entropy_pool[i] = (w >> 3) ^ twist_table[w & 7];
237 	}
238 }
239 
240 /*
241  * Pulls entropy out of the queue and merges it into the poll with the
242  * CRC.  This takes a mix of fresh entries from the producer end of the
243  * queue and entries from the consumer end of the queue which are
244  * likely to have collected more damage.
245  */
246 /* ARGSUSED */
247 void
248 dequeue_randomness(void *v)
249 {
250 	u_int32_t buf[2];
251 	u_int startp, startc, i;
252 
253 	if (!rnd_cold)
254 		timeout_del(&rnd_timeout);
255 
256 	/* Some very new damage */
257 	startp = rnd_event_prod - QEVCONSUME;
258 	for (i = 0; i < QEVCONSUME; i++) {
259 		u_int e = (startp + i) & (QEVLEN-1);
260 
261 		buf[0] = rnd_event_space[e].re_time;
262 		buf[1] = rnd_event_space[e].re_val;
263 		add_entropy_words(buf, 2);
264 	}
265 	/* and some probably more damaged */
266 	startc = rnd_event_cons;
267 	for (i = 0; i < QEVCONSUME; i++) {
268 		u_int e = (startc + i) & (QEVLEN-1);
269 
270 		buf[0] = rnd_event_space[e].re_time;
271 		buf[1] = rnd_event_space[e].re_val;
272 		add_entropy_words(buf, 2);
273 	}
274 	rnd_event_cons = startp + QEVCONSUME;
275 }
276 
277 /*
278  * Grabs a chunk from the entropy_pool[] and slams it through SHA512 when
279  * requested.
280  */
281 void
282 extract_entropy(u_int8_t *buf)
283 {
284 	static u_int32_t extract_pool[POOLWORDS];
285 	u_char digest[SHA512_DIGEST_LENGTH];
286 	SHA2_CTX shactx;
287 
288 #if SHA512_DIGEST_LENGTH < EBUFSIZE
289 #error "need more bigger hash output"
290 #endif
291 
292 	/*
293 	 * INTENTIONALLY not protected by any lock.  Races during
294 	 * memcpy() result in acceptable input data; races during
295 	 * SHA512Update() would create nasty data dependencies.  We
296 	 * do not rely on this as a benefit, but if it happens, cool.
297 	 */
298 	memcpy(extract_pool, entropy_pool, sizeof(extract_pool));
299 
300 	/* Hash the pool to get the output */
301 	SHA512Init(&shactx);
302 	SHA512Update(&shactx, (u_int8_t *)extract_pool, sizeof(extract_pool));
303 	SHA512Final(digest, &shactx);
304 
305 	/* Copy data to destination buffer */
306 	memcpy(buf, digest, EBUFSIZE);
307 
308 	/*
309 	 * Modify pool so next hash will produce different results.
310 	 * During boot-time enqueue/dequeue stage, avoid recursion.
311 	*/
312 	if (!rnd_cold)
313 		enqueue_randomness(extract_pool[0]);
314 	dequeue_randomness(NULL);
315 
316 	/* Wipe data from memory */
317 	explicit_bzero(extract_pool, sizeof(extract_pool));
318 	explicit_bzero(digest, sizeof(digest));
319 }
320 
321 /* random keystream by ChaCha */
322 
323 struct mutex rndlock = MUTEX_INITIALIZER(IPL_HIGH);
324 struct timeout rndreinit_timeout = TIMEOUT_INITIALIZER(rnd_reinit, NULL);
325 struct task rnd_task = TASK_INITIALIZER(rnd_init, NULL);
326 
327 static chacha_ctx rs;		/* chacha context for random keystream */
328 /* keystream blocks (also chacha seed from boot) */
329 static u_char rs_buf[RSBUFSZ];
330 u_char rs_buf0[RSBUFSZ] __attribute__((section(".openbsd.randomdata")));
331 static size_t rs_have;		/* valid bytes at end of rs_buf */
332 static size_t rs_count;		/* bytes till reseed */
333 
334 void
335 suspend_randomness(void)
336 {
337 	struct timespec ts;
338 
339 	getnanotime(&ts);
340 	enqueue_randomness(ts.tv_sec);
341 	enqueue_randomness(ts.tv_nsec);
342 
343 	dequeue_randomness(NULL);
344 	rs_count = 0;
345 	arc4random_buf(entropy_pool, sizeof(entropy_pool));
346 }
347 
348 void
349 resume_randomness(char *buf, size_t buflen)
350 {
351 	struct timespec ts;
352 
353 	if (buf && buflen)
354 		_rs_seed(buf, buflen);
355 	getnanotime(&ts);
356 	enqueue_randomness(ts.tv_sec);
357 	enqueue_randomness(ts.tv_nsec);
358 
359 	dequeue_randomness(NULL);
360 	rs_count = 0;
361 }
362 
363 static inline void _rs_rekey(u_char *dat, size_t datlen);
364 
365 static inline void
366 _rs_init(u_char *buf, size_t n)
367 {
368 	KASSERT(n >= KEYSZ + IVSZ);
369 	chacha_keysetup(&rs, buf, KEYSZ * 8);
370 	chacha_ivsetup(&rs, buf + KEYSZ, NULL);
371 }
372 
373 static void
374 _rs_seed(u_char *buf, size_t n)
375 {
376 	_rs_rekey(buf, n);
377 
378 	/* invalidate rs_buf */
379 	rs_have = 0;
380 	memset(rs_buf, 0, sizeof(rs_buf));
381 
382 	rs_count = 1600000;
383 }
384 
385 static void
386 _rs_stir(int do_lock)
387 {
388 	struct timespec ts;
389 	u_int8_t buf[EBUFSIZE], *p;
390 	int i;
391 
392 	/*
393 	 * Use SHA512 PRNG data and a system timespec; early in the boot
394 	 * process this is the best we can do -- some architectures do
395 	 * not collect entropy very well during this time, but may have
396 	 * clock information which is better than nothing.
397 	 */
398 	extract_entropy(buf);
399 
400 	nanotime(&ts);
401 	for (p = (u_int8_t *)&ts, i = 0; i < sizeof(ts); i++)
402 		buf[i] ^= p[i];
403 
404 	if (do_lock)
405 		mtx_enter(&rndlock);
406 	_rs_seed(buf, sizeof(buf));
407 	if (do_lock)
408 		mtx_leave(&rndlock);
409 	explicit_bzero(buf, sizeof(buf));
410 
411 	/* encourage fast-dequeue again */
412 	rnd_slowextract = 1;
413 }
414 
415 static inline void
416 _rs_stir_if_needed(size_t len)
417 {
418 	static int rs_initialized;
419 
420 	if (!rs_initialized) {
421 		memcpy(entropy_pool, entropy_pool0, sizeof(entropy_pool));
422 		memcpy(rs_buf, rs_buf0, sizeof(rs_buf));
423 		/* seeds cannot be cleaned yet, random_start() will do so */
424 		_rs_init(rs_buf, KEYSZ + IVSZ);
425 		rs_count = 1024 * 1024 * 1024;	/* until main() runs */
426 		rs_initialized = 1;
427 	} else if (rs_count <= len)
428 		_rs_stir(0);
429 	else
430 		rs_count -= len;
431 }
432 
433 static void
434 _rs_clearseed(const void *p, size_t s)
435 {
436 	struct kmem_dyn_mode kd_avoidalias;
437 	vaddr_t va = trunc_page((vaddr_t)p);
438 	vsize_t off = (vaddr_t)p - va;
439 	vsize_t len;
440 	vaddr_t rwva;
441 	paddr_t pa;
442 
443 	while (s > 0) {
444 		pmap_extract(pmap_kernel(), va, &pa);
445 
446 		memset(&kd_avoidalias, 0, sizeof(kd_avoidalias));
447 		kd_avoidalias.kd_prefer = pa;
448 		kd_avoidalias.kd_waitok = 1;
449 		rwva = (vaddr_t)km_alloc(PAGE_SIZE, &kv_any, &kp_none,
450 		    &kd_avoidalias);
451 		if (!rwva)
452 			panic("_rs_clearseed");
453 
454 		pmap_kenter_pa(rwva, pa, PROT_READ | PROT_WRITE);
455 		pmap_update(pmap_kernel());
456 
457 		len = MIN(s, PAGE_SIZE - off);
458 		explicit_bzero((void *)(rwva + off), len);
459 
460 		pmap_kremove(rwva, PAGE_SIZE);
461 		km_free((void *)rwva, PAGE_SIZE, &kv_any, &kp_none);
462 
463 		va += PAGE_SIZE;
464 		s -= len;
465 		off = 0;
466 	}
467 }
468 
469 static inline void
470 _rs_rekey(u_char *dat, size_t datlen)
471 {
472 #ifndef KEYSTREAM_ONLY
473 	memset(rs_buf, 0, sizeof(rs_buf));
474 #endif
475 	/* fill rs_buf with the keystream */
476 	chacha_encrypt_bytes(&rs, rs_buf, rs_buf, sizeof(rs_buf));
477 	/* mix in optional user provided data */
478 	if (dat) {
479 		size_t i, m;
480 
481 		m = MIN(datlen, KEYSZ + IVSZ);
482 		for (i = 0; i < m; i++)
483 			rs_buf[i] ^= dat[i];
484 	}
485 	/* immediately reinit for backtracking resistance */
486 	_rs_init(rs_buf, KEYSZ + IVSZ);
487 	memset(rs_buf, 0, KEYSZ + IVSZ);
488 	rs_have = sizeof(rs_buf) - KEYSZ - IVSZ;
489 }
490 
491 static inline void
492 _rs_random_buf(void *_buf, size_t n)
493 {
494 	u_char *buf = (u_char *)_buf;
495 	size_t m;
496 
497 	_rs_stir_if_needed(n);
498 	while (n > 0) {
499 		if (rs_have > 0) {
500 			m = MIN(n, rs_have);
501 			memcpy(buf, rs_buf + sizeof(rs_buf) - rs_have, m);
502 			memset(rs_buf + sizeof(rs_buf) - rs_have, 0, m);
503 			buf += m;
504 			n -= m;
505 			rs_have -= m;
506 		}
507 		if (rs_have == 0)
508 			_rs_rekey(NULL, 0);
509 	}
510 }
511 
512 static inline void
513 _rs_random_u32(u_int32_t *val)
514 {
515 	_rs_stir_if_needed(sizeof(*val));
516 	if (rs_have < sizeof(*val))
517 		_rs_rekey(NULL, 0);
518 	memcpy(val, rs_buf + sizeof(rs_buf) - rs_have, sizeof(*val));
519 	memset(rs_buf + sizeof(rs_buf) - rs_have, 0, sizeof(*val));
520 	rs_have -= sizeof(*val);
521 }
522 
523 /* Return one word of randomness from a ChaCha20 generator */
524 u_int32_t
525 arc4random(void)
526 {
527 	u_int32_t ret;
528 
529 	mtx_enter(&rndlock);
530 	_rs_random_u32(&ret);
531 	mtx_leave(&rndlock);
532 	return ret;
533 }
534 
535 /*
536  * Fill a buffer of arbitrary length with ChaCha20-derived randomness.
537  */
538 void
539 arc4random_buf(void *buf, size_t n)
540 {
541 	mtx_enter(&rndlock);
542 	_rs_random_buf(buf, n);
543 	mtx_leave(&rndlock);
544 }
545 
546 /*
547  * Allocate a new ChaCha20 context for the caller to use.
548  */
549 struct arc4random_ctx *
550 arc4random_ctx_new()
551 {
552 	char keybuf[KEYSZ + IVSZ];
553 
554 	chacha_ctx *ctx = malloc(sizeof(chacha_ctx), M_TEMP, M_WAITOK);
555 	arc4random_buf(keybuf, KEYSZ + IVSZ);
556 	chacha_keysetup(ctx, keybuf, KEYSZ * 8);
557 	chacha_ivsetup(ctx, keybuf + KEYSZ, NULL);
558 	explicit_bzero(keybuf, sizeof(keybuf));
559 	return (struct arc4random_ctx *)ctx;
560 }
561 
562 /*
563  * Free a ChaCha20 context created by arc4random_ctx_new()
564  */
565 void
566 arc4random_ctx_free(struct arc4random_ctx *ctx)
567 {
568 	explicit_bzero(ctx, sizeof(chacha_ctx));
569 	free(ctx, M_TEMP, sizeof(chacha_ctx));
570 }
571 
572 /*
573  * Use a given ChaCha20 context to fill a buffer
574  */
575 void
576 arc4random_ctx_buf(struct arc4random_ctx *ctx, void *buf, size_t n)
577 {
578 #ifndef KEYSTREAM_ONLY
579 	memset(buf, 0, n);
580 #endif
581 	chacha_encrypt_bytes((chacha_ctx *)ctx, buf, buf, n);
582 }
583 
584 /*
585  * Calculate a uniformly distributed random number less than upper_bound
586  * avoiding "modulo bias".
587  *
588  * Uniformity is achieved by generating new random numbers until the one
589  * returned is outside the range [0, 2**32 % upper_bound).  This
590  * guarantees the selected random number will be inside
591  * [2**32 % upper_bound, 2**32) which maps back to [0, upper_bound)
592  * after reduction modulo upper_bound.
593  */
594 u_int32_t
595 arc4random_uniform(u_int32_t upper_bound)
596 {
597 	u_int32_t r, min;
598 
599 	if (upper_bound < 2)
600 		return 0;
601 
602 	/* 2**32 % x == (2**32 - x) % x */
603 	min = -upper_bound % upper_bound;
604 
605 	/*
606 	 * This could theoretically loop forever but each retry has
607 	 * p > 0.5 (worst case, usually far better) of selecting a
608 	 * number inside the range we need, so it should rarely need
609 	 * to re-roll.
610 	 */
611 	for (;;) {
612 		r = arc4random();
613 		if (r >= min)
614 			break;
615 	}
616 
617 	return r % upper_bound;
618 }
619 
620 /* ARGSUSED */
621 void
622 rnd_init(void *null)
623 {
624 	_rs_stir(1);
625 }
626 
627 /*
628  * Called by timeout to mark arc4 for stirring,
629  */
630 void
631 rnd_reinit(void *v)
632 {
633 	task_add(systq, &rnd_task);
634 	/* 10 minutes, per dm@'s suggestion */
635 	timeout_add_sec(&rndreinit_timeout, 10 * 60);
636 }
637 
638 /*
639  * Start periodic services inside the random subsystem, which pull
640  * entropy forward, hash it, and re-seed the random stream as needed.
641  */
642 void
643 random_start(int goodseed)
644 {
645 	extern char etext[];
646 
647 #if !defined(NO_PROPOLICE)
648 	extern long __guard_local;
649 
650 	if (__guard_local == 0)
651 		printf("warning: no entropy supplied by boot loader\n");
652 #endif
653 
654 	_rs_clearseed(entropy_pool0, sizeof(entropy_pool0));
655 	_rs_clearseed(rs_buf0, sizeof(rs_buf0));
656 
657 	/* Message buffer may contain data from previous boot */
658 	if (msgbufp->msg_magic == MSG_MAGIC)
659 		add_entropy_words((u_int32_t *)msgbufp->msg_bufc,
660 		    msgbufp->msg_bufs / sizeof(u_int32_t));
661 	add_entropy_words((u_int32_t *)etext - 32*1024,
662 	    8192/sizeof(u_int32_t));
663 
664 	dequeue_randomness(NULL);
665 	rnd_init(NULL);
666 	rnd_reinit(NULL);
667 
668 	if (goodseed)
669 		printf("random: good seed from bootblocks\n");
670 	else {
671 		/* XXX kernel should work harder here */
672 		printf("random: boothowto does not indicate good seed\n");
673 	}
674 }
675 
676 int
677 randomopen(dev_t dev, int flag, int mode, struct proc *p)
678 {
679 	return 0;
680 }
681 
682 int
683 randomclose(dev_t dev, int flag, int mode, struct proc *p)
684 {
685 	return 0;
686 }
687 
688 /*
689  * Maximum number of bytes to serve directly from the main ChaCha
690  * pool. Larger requests are served from a discrete ChaCha instance keyed
691  * from the main pool.
692  */
693 #define RND_MAIN_MAX_BYTES	2048
694 
695 int
696 randomread(dev_t dev, struct uio *uio, int ioflag)
697 {
698 	struct arc4random_ctx *lctx = NULL;
699 	size_t		total = uio->uio_resid;
700 	u_char		*buf;
701 	int		ret = 0;
702 
703 	if (uio->uio_resid == 0)
704 		return 0;
705 
706 	buf = malloc(POOLBYTES, M_TEMP, M_WAITOK);
707 	if (total > RND_MAIN_MAX_BYTES)
708 		lctx = arc4random_ctx_new();
709 
710 	while (ret == 0 && uio->uio_resid > 0) {
711 		size_t	n = ulmin(POOLBYTES, uio->uio_resid);
712 
713 		if (lctx != NULL)
714 			arc4random_ctx_buf(lctx, buf, n);
715 		else
716 			arc4random_buf(buf, n);
717 		ret = uiomove(buf, n, uio);
718 		if (ret == 0 && uio->uio_resid > 0)
719 			yield();
720 	}
721 	if (lctx != NULL)
722 		arc4random_ctx_free(lctx);
723 	explicit_bzero(buf, POOLBYTES);
724 	free(buf, M_TEMP, POOLBYTES);
725 	return ret;
726 }
727 
728 int
729 randomwrite(dev_t dev, struct uio *uio, int flags)
730 {
731 	int		ret = 0, newdata = 0;
732 	u_int32_t	*buf;
733 
734 	if (uio->uio_resid == 0)
735 		return 0;
736 
737 	buf = malloc(POOLBYTES, M_TEMP, M_WAITOK);
738 
739 	while (ret == 0 && uio->uio_resid > 0) {
740 		size_t	n = ulmin(POOLBYTES, uio->uio_resid);
741 
742 		ret = uiomove(buf, n, uio);
743 		if (ret != 0)
744 			break;
745 		while (n % sizeof(u_int32_t))
746 			((u_int8_t *)buf)[n++] = 0;
747 		add_entropy_words(buf, n / 4);
748 		if (uio->uio_resid > 0)
749 			yield();
750 		newdata = 1;
751 	}
752 
753 	if (newdata)
754 		rnd_init(NULL);
755 
756 	explicit_bzero(buf, POOLBYTES);
757 	free(buf, M_TEMP, POOLBYTES);
758 	return ret;
759 }
760 
761 int
762 randomkqfilter(dev_t dev, struct knote *kn)
763 {
764 	switch (kn->kn_filter) {
765 	case EVFILT_READ:
766 		kn->kn_fop = &randomread_filtops;
767 		break;
768 	case EVFILT_WRITE:
769 		kn->kn_fop = &randomwrite_filtops;
770 		break;
771 	default:
772 		return (EINVAL);
773 	}
774 
775 	return (0);
776 }
777 
778 void
779 filt_randomdetach(struct knote *kn)
780 {
781 }
782 
783 int
784 filt_randomread(struct knote *kn, long hint)
785 {
786 	kn->kn_data = RND_MAIN_MAX_BYTES;
787 	return (1);
788 }
789 
790 int
791 filt_randomwrite(struct knote *kn, long hint)
792 {
793 	kn->kn_data = POOLBYTES;
794 	return (1);
795 }
796 
797 int
798 randomioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
799 {
800 	switch (cmd) {
801 	case FIOASYNC:
802 		/* No async flag in softc so this is a no-op. */
803 		break;
804 	case FIONBIO:
805 		/* Handled in the upper FS layer. */
806 		break;
807 	default:
808 		return ENOTTY;
809 	}
810 	return 0;
811 }
812 
813 int
814 sys_getentropy(struct proc *p, void *v, register_t *retval)
815 {
816 	struct sys_getentropy_args /* {
817 		syscallarg(void *) buf;
818 		syscallarg(size_t) nbyte;
819 	} */ *uap = v;
820 	char buf[256];
821 	int error;
822 
823 	if (SCARG(uap, nbyte) > sizeof(buf))
824 		return (EIO);
825 	arc4random_buf(buf, SCARG(uap, nbyte));
826 	if ((error = copyout(buf, SCARG(uap, buf), SCARG(uap, nbyte))) != 0)
827 		return (error);
828 	explicit_bzero(buf, sizeof(buf));
829 	retval[0] = 0;
830 	return (0);
831 }
832