xref: /openbsd/sys/dev/rnd.c (revision 10e9e1f8)
1 /*	$OpenBSD: rnd.c,v 1.218 2020/05/29 01:13:14 deraadt Exp $	*/
2 
3 /*
4  * Copyright (c) 2011 Theo de Raadt.
5  * Copyright (c) 2008 Damien Miller.
6  * Copyright (c) 1996, 1997, 2000-2002 Michael Shalayeff.
7  * Copyright (c) 2013 Markus Friedl.
8  * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999.
9  * All rights reserved.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, and the entire permission notice in its entirety,
16  *    including the disclaimer of warranties.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. The name of the author may not be used to endorse or promote
21  *    products derived from this software without specific prior
22  *    written permission.
23  *
24  * ALTERNATIVELY, this product may be distributed under the terms of
25  * the GNU Public License, in which case the provisions of the GPL are
26  * required INSTEAD OF the above restrictions.  (This clause is
27  * necessary due to a potential bad interaction between the GPL and
28  * the restrictions contained in a BSD-style copyright.)
29  *
30  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
31  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
32  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
33  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
34  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
35  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
36  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
37  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
38  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
40  * OF THE POSSIBILITY OF SUCH DAMAGE.
41  */
42 
43 /*
44  * The bootblocks pre-fill the kernel .openbsd.randomdata section with seed
45  * material (on-disk from previous boot, hopefully mixed with a hardware rng).
46  * The first arc4random(9) call initializes this seed material as a chacha
47  * state.  Calls can be done early in kernel bootstrap code -- early use is
48  * encouraged.
49  *
50  * After the kernel timeout subsystem is initialized, random_start() prepares
51  * the entropy collection mechanism enqueue_randomness() and timeout-driven
52  * mixing into the chacha state.  The first submissions come from device
53  * probes, later on interrupt-time submissions are more common.  Entropy
54  * data (and timing information) get mixed over the entropy input ring
55  * rnd_event_space[] -- the goal is to collect damage.
56  *
57  * Based upon timeouts, a selection of the entropy ring rnd_event_space[]
58  * CRC bit-distributed and XOR mixed into entropy_pool[].
59  *
60  * From time to time, entropy_pool[] is SHA512-whitened, mixed with time
61  * information again, XOR'd with the inner and outer states of the existing
62  * chacha state, to create a new chacha state.
63  *
64  * During early boot (until cold=0), enqueue operations are immediately
65  * dequeued, and mixed into the chacha.
66  */
67 
68 #include <sys/param.h>
69 #include <sys/event.h>
70 #include <sys/ioctl.h>
71 #include <sys/malloc.h>
72 #include <sys/timeout.h>
73 #include <sys/atomic.h>
74 #include <sys/task.h>
75 #include <sys/msgbuf.h>
76 #include <sys/mount.h>
77 #include <sys/syscallargs.h>
78 
79 #include <crypto/sha2.h>
80 
81 #define KEYSTREAM_ONLY
82 #include <crypto/chacha_private.h>
83 
84 #include <dev/rndvar.h>
85 #include <uvm/uvm_extern.h>
86 
87 /*
88  * For the purposes of better mixing, we use the CRC-32 polynomial as
89  * well to make a twisted Generalized Feedback Shift Register
90  *
91  * (See M. Matsumoto & Y. Kurita, 1992.  Twisted GFSR generators.  ACM
92  * Transactions on Modeling and Computer Simulation 2(3):179-194.
93  * Also see M. Matsumoto & Y. Kurita, 1994.  Twisted GFSR generators
94  * II.  ACM Transactions on Modeling and Computer Simulation 4:254-266)
95  */
96 
97 /*
98  * Stirring polynomials over GF(2) for various pool sizes. Used in
99  * add_entropy_words() below.
100  *
101  * The polynomial terms are chosen to be evenly spaced (minimum RMS
102  * distance from evenly spaced; except for the last tap, which is 1 to
103  * get the twisting happening as fast as possible.
104  *
105  * The resultant polynomial is:
106  *   2^POOLWORDS + 2^POOL_TAP1 + 2^POOL_TAP2 + 2^POOL_TAP3 + 2^POOL_TAP4 + 1
107  */
108 #define POOLWORDS	2048
109 #define POOLBYTES	(POOLWORDS*4)
110 #define POOLMASK	(POOLWORDS - 1)
111 #define	POOL_TAP1	1638
112 #define	POOL_TAP2	1231
113 #define	POOL_TAP3	819
114 #define	POOL_TAP4	411
115 
116 /*
117  * Raw entropy collection from device drivers; at interrupt context or not.
118  * enqueue_randomness() is used to submit data into the entropy input ring.
119  */
120 
121 #define QEVLEN	128		 /* must be a power of 2 */
122 #define QEVCONSUME 8		 /* how many events to consume a time */
123 
124 #define KEYSZ	32
125 #define IVSZ	8
126 #define BLOCKSZ	64
127 #define RSBUFSZ	(16*BLOCKSZ)
128 #define EBUFSIZE KEYSZ + IVSZ
129 
130 struct rand_event {
131 	u_int	re_time;
132 	u_int	re_val;
133 } rnd_event_space[QEVLEN];
134 
135 u_int	rnd_event_cons;
136 u_int	rnd_event_prod;
137 int	rnd_cold = 1;
138 int	rnd_slowextract = 1;
139 
140 void	rnd_reinit(void *v);		/* timeout to start reinit */
141 void	rnd_init(void *);			/* actually do the reinit */
142 
143 static u_int32_t entropy_pool[POOLWORDS];
144 u_int32_t entropy_pool0[POOLWORDS] __attribute__((section(".openbsd.randomdata")));
145 
146 void	dequeue_randomness(void *);
147 void	add_entropy_words(const u_int32_t *, u_int);
148 void	extract_entropy(u_int8_t *)
149     __attribute__((__bounded__(__minbytes__,1,EBUFSIZE)));
150 
151 struct timeout rnd_timeout = TIMEOUT_INITIALIZER(dequeue_randomness, NULL);
152 
153 int	filt_randomread(struct knote *, long);
154 void	filt_randomdetach(struct knote *);
155 int	filt_randomwrite(struct knote *, long);
156 
157 static void _rs_seed(u_char *, size_t);
158 static void _rs_clearseed(const void *p, size_t s);
159 
160 const struct filterops randomread_filtops = {
161 	.f_flags	= FILTEROP_ISFD,
162 	.f_attach	= NULL,
163 	.f_detach	= filt_randomdetach,
164 	.f_event	= filt_randomread,
165 };
166 
167 const struct filterops randomwrite_filtops = {
168 	.f_flags	= FILTEROP_ISFD,
169 	.f_attach	= NULL,
170 	.f_detach	= filt_randomdetach,
171 	.f_event	= filt_randomwrite,
172 };
173 
174 /*
175  * This function mixes entropy and timing into the entropy input ring.
176  */
177 void
178 enqueue_randomness(u_int val)
179 {
180 	struct rand_event *rep;
181 	struct timespec	ts;
182 	int e;
183 
184 	nanotime(&ts);
185 	e = (atomic_inc_int_nv(&rnd_event_prod) - 1) & (QEVLEN-1);
186 	rep = &rnd_event_space[e];
187 	rep->re_time += ts.tv_nsec ^ (ts.tv_sec << 20);
188 	rep->re_val += val;
189 
190 	if (rnd_cold) {
191 		dequeue_randomness(NULL);
192 		rnd_init(NULL);
193 		if (!cold)
194 			rnd_cold = 0;
195 	} else if (!timeout_pending(&rnd_timeout) &&
196 	    (rnd_event_prod - rnd_event_cons) > QEVCONSUME) {
197 		rnd_slowextract = min(rnd_slowextract * 2, 5000);
198 		timeout_add_msec(&rnd_timeout, rnd_slowextract * 10);
199 	}
200 }
201 
202 /*
203  * This function merges entropy ring information into the buffer using
204  * a polynomial to spread the bits.
205  */
206 void
207 add_entropy_words(const u_int32_t *buf, u_int n)
208 {
209 	/* derived from IEEE 802.3 CRC-32 */
210 	static const u_int32_t twist_table[8] = {
211 		0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
212 		0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278
213 	};
214 	static u_int	entropy_add_ptr;
215 	static u_char	entropy_input_rotate;
216 
217 	for (; n--; buf++) {
218 		u_int32_t w = (*buf << entropy_input_rotate) |
219 		    (*buf >> ((32 - entropy_input_rotate) & 31));
220 		u_int i = entropy_add_ptr =
221 		    (entropy_add_ptr - 1) & POOLMASK;
222 		/*
223 		 * Normally, we add 7 bits of rotation to the pool.
224 		 * At the beginning of the pool, add an extra 7 bits
225 		 * rotation, so that successive passes spread the
226 		 * input bits across the pool evenly.
227 		 */
228 		entropy_input_rotate =
229 		    (entropy_input_rotate + (i ? 7 : 14)) & 31;
230 
231 		/* XOR pool contents corresponding to polynomial terms */
232 		w ^= entropy_pool[(i + POOL_TAP1) & POOLMASK] ^
233 		     entropy_pool[(i + POOL_TAP2) & POOLMASK] ^
234 		     entropy_pool[(i + POOL_TAP3) & POOLMASK] ^
235 		     entropy_pool[(i + POOL_TAP4) & POOLMASK] ^
236 		     entropy_pool[(i + 1) & POOLMASK] ^
237 		     entropy_pool[i]; /* + 2^POOLWORDS */
238 
239 		entropy_pool[i] = (w >> 3) ^ twist_table[w & 7];
240 	}
241 }
242 
243 /*
244  * Pulls entropy out of the queue and merges it into the poll with the
245  * CRC.  This takes a mix of fresh entries from the producer end of the
246  * queue and entries from the consumer end of the queue which are
247  * likely to have collected more damage.
248  */
249 /* ARGSUSED */
250 void
251 dequeue_randomness(void *v)
252 {
253 	u_int32_t buf[2];
254 	u_int startp, startc, i;
255 
256 	if (!rnd_cold)
257 		timeout_del(&rnd_timeout);
258 
259 	/* Some very new damage */
260 	startp = rnd_event_prod - QEVCONSUME;
261 	for (i = 0; i < QEVCONSUME; i++) {
262 		u_int e = (startp + i) & (QEVLEN-1);
263 
264 		buf[0] = rnd_event_space[e].re_time;
265 		buf[1] = rnd_event_space[e].re_val;
266 		add_entropy_words(buf, 2);
267 	}
268 	/* and some probably more damaged */
269 	startc = rnd_event_cons;
270 	for (i = 0; i < QEVCONSUME; i++) {
271 		u_int e = (startc + i) & (QEVLEN-1);
272 
273 		buf[0] = rnd_event_space[e].re_time;
274 		buf[1] = rnd_event_space[e].re_val;
275 		add_entropy_words(buf, 2);
276 	}
277 	rnd_event_cons = startp + QEVCONSUME;
278 }
279 
280 /*
281  * Grabs a chunk from the entropy_pool[] and slams it through SHA512 when
282  * requested.
283  */
284 void
285 extract_entropy(u_int8_t *buf)
286 {
287 	static u_int32_t extract_pool[POOLWORDS];
288 	u_char digest[SHA512_DIGEST_LENGTH];
289 	SHA2_CTX shactx;
290 
291 #if SHA512_DIGEST_LENGTH < EBUFSIZE
292 #error "need more bigger hash output"
293 #endif
294 
295 	/*
296 	 * INTENTIONALLY not protected by any lock.  Races during
297 	 * memcpy() result in acceptable input data; races during
298 	 * SHA512Update() would create nasty data dependencies.  We
299 	 * do not rely on this as a benefit, but if it happens, cool.
300 	 */
301 	memcpy(extract_pool, entropy_pool, sizeof(extract_pool));
302 
303 	/* Hash the pool to get the output */
304 	SHA512Init(&shactx);
305 	SHA512Update(&shactx, (u_int8_t *)extract_pool, sizeof(extract_pool));
306 	SHA512Final(digest, &shactx);
307 
308 	/* Copy data to destination buffer */
309 	memcpy(buf, digest, EBUFSIZE);
310 
311 	/*
312 	 * Modify pool so next hash will produce different results.
313 	 * During boot-time enqueue/dequeue stage, avoid recursion.
314 	*/
315 	if (!rnd_cold)
316 		enqueue_randomness(extract_pool[0]);
317 	dequeue_randomness(NULL);
318 
319 	/* Wipe data from memory */
320 	explicit_bzero(extract_pool, sizeof(extract_pool));
321 	explicit_bzero(digest, sizeof(digest));
322 }
323 
324 /* random keystream by ChaCha */
325 
326 struct mutex rndlock = MUTEX_INITIALIZER(IPL_HIGH);
327 struct timeout rndreinit_timeout = TIMEOUT_INITIALIZER(rnd_reinit, NULL);
328 struct task rnd_task = TASK_INITIALIZER(rnd_init, NULL);
329 
330 static chacha_ctx rs;		/* chacha context for random keystream */
331 /* keystream blocks (also chacha seed from boot) */
332 static u_char rs_buf[RSBUFSZ];
333 u_char rs_buf0[RSBUFSZ] __attribute__((section(".openbsd.randomdata")));
334 static size_t rs_have;		/* valid bytes at end of rs_buf */
335 static size_t rs_count;		/* bytes till reseed */
336 
337 void
338 suspend_randomness(void)
339 {
340 	struct timespec ts;
341 
342 	getnanotime(&ts);
343 	enqueue_randomness(ts.tv_sec);
344 	enqueue_randomness(ts.tv_nsec);
345 
346 	dequeue_randomness(NULL);
347 	rs_count = 0;
348 	arc4random_buf(entropy_pool, sizeof(entropy_pool));
349 }
350 
351 void
352 resume_randomness(char *buf, size_t buflen)
353 {
354 	struct timespec ts;
355 
356 	if (buf && buflen)
357 		_rs_seed(buf, buflen);
358 	getnanotime(&ts);
359 	enqueue_randomness(ts.tv_sec);
360 	enqueue_randomness(ts.tv_nsec);
361 
362 	dequeue_randomness(NULL);
363 	rs_count = 0;
364 }
365 
366 static inline void _rs_rekey(u_char *dat, size_t datlen);
367 
368 static inline void
369 _rs_init(u_char *buf, size_t n)
370 {
371 	KASSERT(n >= KEYSZ + IVSZ);
372 	chacha_keysetup(&rs, buf, KEYSZ * 8);
373 	chacha_ivsetup(&rs, buf + KEYSZ, NULL);
374 }
375 
376 static void
377 _rs_seed(u_char *buf, size_t n)
378 {
379 	_rs_rekey(buf, n);
380 
381 	/* invalidate rs_buf */
382 	rs_have = 0;
383 	memset(rs_buf, 0, sizeof(rs_buf));
384 
385 	rs_count = 1600000;
386 }
387 
388 static void
389 _rs_stir(int do_lock)
390 {
391 	struct timespec ts;
392 	u_int8_t buf[EBUFSIZE], *p;
393 	int i;
394 
395 	/*
396 	 * Use SHA512 PRNG data and a system timespec; early in the boot
397 	 * process this is the best we can do -- some architectures do
398 	 * not collect entropy very well during this time, but may have
399 	 * clock information which is better than nothing.
400 	 */
401 	extract_entropy(buf);
402 
403 	nanotime(&ts);
404 	for (p = (u_int8_t *)&ts, i = 0; i < sizeof(ts); i++)
405 		buf[i] ^= p[i];
406 
407 	if (do_lock)
408 		mtx_enter(&rndlock);
409 	_rs_seed(buf, sizeof(buf));
410 	if (do_lock)
411 		mtx_leave(&rndlock);
412 	explicit_bzero(buf, sizeof(buf));
413 
414 	/* encourage fast-dequeue again */
415 	rnd_slowextract = 1;
416 }
417 
418 static inline void
419 _rs_stir_if_needed(size_t len)
420 {
421 	static int rs_initialized;
422 
423 	if (!rs_initialized) {
424 		memcpy(entropy_pool, entropy_pool0, sizeof(entropy_pool));
425 		memcpy(rs_buf, rs_buf0, sizeof(rs_buf));
426 		/* seeds cannot be cleaned yet, random_start() will do so */
427 		_rs_init(rs_buf, KEYSZ + IVSZ);
428 		rs_count = 1024 * 1024 * 1024;	/* until main() runs */
429 		rs_initialized = 1;
430 	} else if (rs_count <= len)
431 		_rs_stir(0);
432 	else
433 		rs_count -= len;
434 }
435 
436 static void
437 _rs_clearseed(const void *p, size_t s)
438 {
439 	struct kmem_dyn_mode kd_avoidalias;
440 	vaddr_t va = trunc_page((vaddr_t)p);
441 	vsize_t off = (vaddr_t)p - va;
442 	vsize_t len;
443 	vaddr_t rwva;
444 	paddr_t pa;
445 
446 	while (s > 0) {
447 		pmap_extract(pmap_kernel(), va, &pa);
448 
449 		memset(&kd_avoidalias, 0, sizeof(kd_avoidalias));
450 		kd_avoidalias.kd_prefer = pa;
451 		kd_avoidalias.kd_waitok = 1;
452 		rwva = (vaddr_t)km_alloc(PAGE_SIZE, &kv_any, &kp_none,
453 		    &kd_avoidalias);
454 		if (!rwva)
455 			panic("_rs_clearseed");
456 
457 		pmap_kenter_pa(rwva, pa, PROT_READ | PROT_WRITE);
458 		pmap_update(pmap_kernel());
459 
460 		len = MIN(s, PAGE_SIZE - off);
461 		explicit_bzero((void *)(rwva + off), len);
462 
463 		pmap_kremove(rwva, PAGE_SIZE);
464 		km_free((void *)rwva, PAGE_SIZE, &kv_any, &kp_none);
465 
466 		va += PAGE_SIZE;
467 		s -= len;
468 		off = 0;
469 	}
470 }
471 
472 static inline void
473 _rs_rekey(u_char *dat, size_t datlen)
474 {
475 #ifndef KEYSTREAM_ONLY
476 	memset(rs_buf, 0, sizeof(rs_buf));
477 #endif
478 	/* fill rs_buf with the keystream */
479 	chacha_encrypt_bytes(&rs, rs_buf, rs_buf, sizeof(rs_buf));
480 	/* mix in optional user provided data */
481 	if (dat) {
482 		size_t i, m;
483 
484 		m = MIN(datlen, KEYSZ + IVSZ);
485 		for (i = 0; i < m; i++)
486 			rs_buf[i] ^= dat[i];
487 	}
488 	/* immediately reinit for backtracking resistance */
489 	_rs_init(rs_buf, KEYSZ + IVSZ);
490 	memset(rs_buf, 0, KEYSZ + IVSZ);
491 	rs_have = sizeof(rs_buf) - KEYSZ - IVSZ;
492 }
493 
494 static inline void
495 _rs_random_buf(void *_buf, size_t n)
496 {
497 	u_char *buf = (u_char *)_buf;
498 	size_t m;
499 
500 	_rs_stir_if_needed(n);
501 	while (n > 0) {
502 		if (rs_have > 0) {
503 			m = MIN(n, rs_have);
504 			memcpy(buf, rs_buf + sizeof(rs_buf) - rs_have, m);
505 			memset(rs_buf + sizeof(rs_buf) - rs_have, 0, m);
506 			buf += m;
507 			n -= m;
508 			rs_have -= m;
509 		}
510 		if (rs_have == 0)
511 			_rs_rekey(NULL, 0);
512 	}
513 }
514 
515 static inline void
516 _rs_random_u32(u_int32_t *val)
517 {
518 	_rs_stir_if_needed(sizeof(*val));
519 	if (rs_have < sizeof(*val))
520 		_rs_rekey(NULL, 0);
521 	memcpy(val, rs_buf + sizeof(rs_buf) - rs_have, sizeof(*val));
522 	memset(rs_buf + sizeof(rs_buf) - rs_have, 0, sizeof(*val));
523 	rs_have -= sizeof(*val);
524 }
525 
526 /* Return one word of randomness from a ChaCha20 generator */
527 u_int32_t
528 arc4random(void)
529 {
530 	u_int32_t ret;
531 
532 	mtx_enter(&rndlock);
533 	_rs_random_u32(&ret);
534 	mtx_leave(&rndlock);
535 	return ret;
536 }
537 
538 /*
539  * Fill a buffer of arbitrary length with ChaCha20-derived randomness.
540  */
541 void
542 arc4random_buf(void *buf, size_t n)
543 {
544 	mtx_enter(&rndlock);
545 	_rs_random_buf(buf, n);
546 	mtx_leave(&rndlock);
547 }
548 
549 /*
550  * Allocate a new ChaCha20 context for the caller to use.
551  */
552 struct arc4random_ctx *
553 arc4random_ctx_new()
554 {
555 	char keybuf[KEYSZ + IVSZ];
556 
557 	chacha_ctx *ctx = malloc(sizeof(chacha_ctx), M_TEMP, M_WAITOK);
558 	arc4random_buf(keybuf, KEYSZ + IVSZ);
559 	chacha_keysetup(ctx, keybuf, KEYSZ * 8);
560 	chacha_ivsetup(ctx, keybuf + KEYSZ, NULL);
561 	explicit_bzero(keybuf, sizeof(keybuf));
562 	return (struct arc4random_ctx *)ctx;
563 }
564 
565 /*
566  * Free a ChaCha20 context created by arc4random_ctx_new()
567  */
568 void
569 arc4random_ctx_free(struct arc4random_ctx *ctx)
570 {
571 	explicit_bzero(ctx, sizeof(chacha_ctx));
572 	free(ctx, M_TEMP, sizeof(chacha_ctx));
573 }
574 
575 /*
576  * Use a given ChaCha20 context to fill a buffer
577  */
578 void
579 arc4random_ctx_buf(struct arc4random_ctx *ctx, void *buf, size_t n)
580 {
581 #ifndef KEYSTREAM_ONLY
582 	memset(buf, 0, n);
583 #endif
584 	chacha_encrypt_bytes((chacha_ctx *)ctx, buf, buf, n);
585 }
586 
587 /*
588  * Calculate a uniformly distributed random number less than upper_bound
589  * avoiding "modulo bias".
590  *
591  * Uniformity is achieved by generating new random numbers until the one
592  * returned is outside the range [0, 2**32 % upper_bound).  This
593  * guarantees the selected random number will be inside
594  * [2**32 % upper_bound, 2**32) which maps back to [0, upper_bound)
595  * after reduction modulo upper_bound.
596  */
597 u_int32_t
598 arc4random_uniform(u_int32_t upper_bound)
599 {
600 	u_int32_t r, min;
601 
602 	if (upper_bound < 2)
603 		return 0;
604 
605 	/* 2**32 % x == (2**32 - x) % x */
606 	min = -upper_bound % upper_bound;
607 
608 	/*
609 	 * This could theoretically loop forever but each retry has
610 	 * p > 0.5 (worst case, usually far better) of selecting a
611 	 * number inside the range we need, so it should rarely need
612 	 * to re-roll.
613 	 */
614 	for (;;) {
615 		r = arc4random();
616 		if (r >= min)
617 			break;
618 	}
619 
620 	return r % upper_bound;
621 }
622 
623 /* ARGSUSED */
624 void
625 rnd_init(void *null)
626 {
627 	_rs_stir(1);
628 }
629 
630 /*
631  * Called by timeout to mark arc4 for stirring,
632  */
633 void
634 rnd_reinit(void *v)
635 {
636 	task_add(systq, &rnd_task);
637 	/* 10 minutes, per dm@'s suggestion */
638 	timeout_add_sec(&rndreinit_timeout, 10 * 60);
639 }
640 
641 /*
642  * Start periodic services inside the random subsystem, which pull
643  * entropy forward, hash it, and re-seed the random stream as needed.
644  */
645 void
646 random_start(int goodseed)
647 {
648 	extern char etext[];
649 
650 #if !defined(NO_PROPOLICE)
651 	extern long __guard_local;
652 
653 	if (__guard_local == 0)
654 		printf("warning: no entropy supplied by boot loader\n");
655 #endif
656 
657 	_rs_clearseed(entropy_pool0, sizeof(entropy_pool0));
658 	_rs_clearseed(rs_buf0, sizeof(rs_buf0));
659 
660 	/* Message buffer may contain data from previous boot */
661 	if (msgbufp->msg_magic == MSG_MAGIC)
662 		add_entropy_words((u_int32_t *)msgbufp->msg_bufc,
663 		    msgbufp->msg_bufs / sizeof(u_int32_t));
664 	add_entropy_words((u_int32_t *)etext - 32*1024,
665 	    8192/sizeof(u_int32_t));
666 
667 	dequeue_randomness(NULL);
668 	rnd_init(NULL);
669 	rnd_reinit(NULL);
670 
671 	if (goodseed)
672 		printf("random: good seed from bootblocks\n");
673 	else {
674 		/* XXX kernel should work harder here */
675 		printf("random: boothowto does not indicate good seed\n");
676 	}
677 }
678 
679 int
680 randomopen(dev_t dev, int flag, int mode, struct proc *p)
681 {
682 	return 0;
683 }
684 
685 int
686 randomclose(dev_t dev, int flag, int mode, struct proc *p)
687 {
688 	return 0;
689 }
690 
691 /*
692  * Maximum number of bytes to serve directly from the main ChaCha
693  * pool. Larger requests are served from a discrete ChaCha instance keyed
694  * from the main pool.
695  */
696 #define RND_MAIN_MAX_BYTES	2048
697 
698 int
699 randomread(dev_t dev, struct uio *uio, int ioflag)
700 {
701 	struct arc4random_ctx *lctx = NULL;
702 	size_t		total = uio->uio_resid;
703 	u_char		*buf;
704 	int		ret = 0;
705 
706 	if (uio->uio_resid == 0)
707 		return 0;
708 
709 	buf = malloc(POOLBYTES, M_TEMP, M_WAITOK);
710 	if (total > RND_MAIN_MAX_BYTES)
711 		lctx = arc4random_ctx_new();
712 
713 	while (ret == 0 && uio->uio_resid > 0) {
714 		size_t	n = ulmin(POOLBYTES, uio->uio_resid);
715 
716 		if (lctx != NULL)
717 			arc4random_ctx_buf(lctx, buf, n);
718 		else
719 			arc4random_buf(buf, n);
720 		ret = uiomove(buf, n, uio);
721 		if (ret == 0 && uio->uio_resid > 0)
722 			yield();
723 	}
724 	if (lctx != NULL)
725 		arc4random_ctx_free(lctx);
726 	explicit_bzero(buf, POOLBYTES);
727 	free(buf, M_TEMP, POOLBYTES);
728 	return ret;
729 }
730 
731 int
732 randomwrite(dev_t dev, struct uio *uio, int flags)
733 {
734 	int		ret = 0, newdata = 0;
735 	u_int32_t	*buf;
736 
737 	if (uio->uio_resid == 0)
738 		return 0;
739 
740 	buf = malloc(POOLBYTES, M_TEMP, M_WAITOK);
741 
742 	while (ret == 0 && uio->uio_resid > 0) {
743 		size_t	n = ulmin(POOLBYTES, uio->uio_resid);
744 
745 		ret = uiomove(buf, n, uio);
746 		if (ret != 0)
747 			break;
748 		while (n % sizeof(u_int32_t))
749 			((u_int8_t *)buf)[n++] = 0;
750 		add_entropy_words(buf, n / 4);
751 		if (uio->uio_resid > 0)
752 			yield();
753 		newdata = 1;
754 	}
755 
756 	if (newdata)
757 		rnd_init(NULL);
758 
759 	explicit_bzero(buf, POOLBYTES);
760 	free(buf, M_TEMP, POOLBYTES);
761 	return ret;
762 }
763 
764 int
765 randomkqfilter(dev_t dev, struct knote *kn)
766 {
767 	switch (kn->kn_filter) {
768 	case EVFILT_READ:
769 		kn->kn_fop = &randomread_filtops;
770 		break;
771 	case EVFILT_WRITE:
772 		kn->kn_fop = &randomwrite_filtops;
773 		break;
774 	default:
775 		return (EINVAL);
776 	}
777 
778 	return (0);
779 }
780 
781 void
782 filt_randomdetach(struct knote *kn)
783 {
784 }
785 
786 int
787 filt_randomread(struct knote *kn, long hint)
788 {
789 	kn->kn_data = RND_MAIN_MAX_BYTES;
790 	return (1);
791 }
792 
793 int
794 filt_randomwrite(struct knote *kn, long hint)
795 {
796 	kn->kn_data = POOLBYTES;
797 	return (1);
798 }
799 
800 int
801 randomioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
802 {
803 	switch (cmd) {
804 	case FIOASYNC:
805 		/* No async flag in softc so this is a no-op. */
806 		break;
807 	case FIONBIO:
808 		/* Handled in the upper FS layer. */
809 		break;
810 	default:
811 		return ENOTTY;
812 	}
813 	return 0;
814 }
815 
816 int
817 sys_getentropy(struct proc *p, void *v, register_t *retval)
818 {
819 	struct sys_getentropy_args /* {
820 		syscallarg(void *) buf;
821 		syscallarg(size_t) nbyte;
822 	} */ *uap = v;
823 	char buf[256];
824 	int error;
825 
826 	if (SCARG(uap, nbyte) > sizeof(buf))
827 		return (EIO);
828 	arc4random_buf(buf, SCARG(uap, nbyte));
829 	if ((error = copyout(buf, SCARG(uap, buf), SCARG(uap, nbyte))) != 0)
830 		return (error);
831 	explicit_bzero(buf, sizeof(buf));
832 	retval[0] = 0;
833 	return (0);
834 }
835