xref: /openbsd/sys/dev/rnd.c (revision b9ae17a0)
1 /*	$OpenBSD: rnd.c,v 1.230 2024/12/30 02:46:00 guenther Exp $	*/
2 
3 /*
4  * Copyright (c) 2011,2020 Theo de Raadt.
5  * Copyright (c) 2008 Damien Miller.
6  * Copyright (c) 1996, 1997, 2000-2002 Michael Shalayeff.
7  * Copyright (c) 2013 Markus Friedl.
8  * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999.
9  * All rights reserved.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, and the entire permission notice in its entirety,
16  *    including the disclaimer of warranties.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. The name of the author may not be used to endorse or promote
21  *    products derived from this software without specific prior
22  *    written permission.
23  *
24  * ALTERNATIVELY, this product may be distributed under the terms of
25  * the GNU Public License, in which case the provisions of the GPL are
26  * required INSTEAD OF the above restrictions.  (This clause is
27  * necessary due to a potential bad interaction between the GPL and
28  * the restrictions contained in a BSD-style copyright.)
29  *
30  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
31  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
32  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
33  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
34  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
35  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
36  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
37  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
38  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
40  * OF THE POSSIBILITY OF SUCH DAMAGE.
41  */
42 
43 /*
44  * The bootblocks pre-fill the kernel .openbsd.randomdata section with seed
45  * material (on-disk from previous boot, hopefully mixed with a hardware rng).
46  * The first arc4random(9) call initializes this seed material as a chacha
47  * state.  Calls can be done early in kernel bootstrap code -- early use is
48  * encouraged.
49  *
50  * After the kernel timeout subsystem is initialized, random_start() prepares
51  * the entropy collection mechanism enqueue_randomness() and timeout-driven
52  * mixing into the chacha state.  The first submissions come from device
53  * probes, later on interrupt-time submissions are more common.  Entropy
54  * data (and timing information) get mixed over the entropy input ring
55  * rnd_event_space[] -- the goal is to collect damage.
56  *
57  * Based upon timeouts, a selection of the entropy ring rnd_event_space[]
58  * CRC bit-distributed and XOR mixed into entropy_pool[].
59  *
60  * From time to time, entropy_pool[] is SHA512-whitened, mixed with time
61  * information again, XOR'd with the inner and outer states of the existing
62  * chacha state, to create a new chacha state.
63  *
64  * During early boot (until cold=0), enqueue operations are immediately
65  * dequeued, and mixed into the chacha.
66  */
67 
68 #include <sys/param.h>
69 #include <sys/event.h>
70 #include <sys/ioctl.h>
71 #include <sys/malloc.h>
72 #include <sys/timeout.h>
73 #include <sys/atomic.h>
74 #include <sys/task.h>
75 #include <sys/msgbuf.h>
76 #include <sys/mount.h>
77 #include <sys/syscallargs.h>
78 #include <sys/syslimits.h>
79 
80 #include <crypto/sha2.h>
81 
82 #define KEYSTREAM_ONLY
83 #include <crypto/chacha_private.h>
84 
85 #include <uvm/uvm_extern.h>
86 
87 /*
88  * For the purposes of better mixing, we use the CRC-32 polynomial as
89  * well to make a twisted Generalized Feedback Shift Register
90  *
91  * (See M. Matsumoto & Y. Kurita, 1992.  Twisted GFSR generators.  ACM
92  * Transactions on Modeling and Computer Simulation 2(3):179-194.
93  * Also see M. Matsumoto & Y. Kurita, 1994.  Twisted GFSR generators
94  * II.  ACM Transactions on Modeling and Computer Simulation 4:254-266)
95  */
96 
97 /*
98  * Stirring polynomial over GF(2). Used in add_entropy_words() below.
99  *
100  * The polynomial terms are chosen to be evenly spaced (minimum RMS
101  * distance from evenly spaced; except for the last tap, which is 1 to
102  * get the twisting happening as fast as possible.
103  *
104  * The resultant polynomial is:
105  *   2^POOLWORDS + 2^POOL_TAP1 + 2^POOL_TAP2 + 2^POOL_TAP3 + 2^POOL_TAP4 + 1
106  */
107 #define POOLWORDS	2048
108 #define POOLBYTES	(POOLWORDS*4)
109 #define POOLMASK	(POOLWORDS - 1)
110 #define	POOL_TAP1	1638
111 #define	POOL_TAP2	1231
112 #define	POOL_TAP3	819
113 #define	POOL_TAP4	411
114 
115 /*
116  * Raw entropy collection from device drivers; at interrupt context or not.
117  * enqueue_randomness() is used to submit data into the entropy input ring.
118  */
119 
120 #define QEVLEN	128		 /* must be a power of 2 */
121 #define QEVCONSUME 8		 /* how many events to consume a time */
122 
123 #define KEYSZ	32
124 #define IVSZ	8
125 #define BLOCKSZ	64
126 #define RSBUFSZ	(16*BLOCKSZ)
127 #define EBUFSIZE KEYSZ + IVSZ
128 
129 struct rand_event {
130 	u_int	re_time;
131 	u_int	re_val;
132 } rnd_event_space[QEVLEN];
133 
134 u_int	rnd_event_cons;
135 u_int	rnd_event_prod;
136 int	rnd_cold = 1;
137 int	rnd_slowextract = 1;
138 
139 void	rnd_reinit(void *v);		/* timeout to start reinit */
140 void	rnd_init(void *);			/* actually do the reinit */
141 
142 static u_int32_t entropy_pool[POOLWORDS];
143 u_int32_t entropy_pool0[POOLWORDS] __attribute__((section(".openbsd.randomdata")));
144 
145 void	dequeue_randomness(void *);
146 void	add_entropy_words(const u_int32_t *, u_int);
147 void	extract_entropy(u_int8_t *)
148     __attribute__((__bounded__(__minbytes__,1,EBUFSIZE)));
149 
150 struct timeout rnd_timeout = TIMEOUT_INITIALIZER(dequeue_randomness, NULL);
151 
152 int	filt_randomread(struct knote *, long);
153 void	filt_randomdetach(struct knote *);
154 int	filt_randomwrite(struct knote *, long);
155 
156 static void _rs_seed(u_char *, size_t);
157 static void _rs_clearseed(const void *p, size_t s);
158 
159 const struct filterops randomread_filtops = {
160 	.f_flags	= FILTEROP_ISFD,
161 	.f_attach	= NULL,
162 	.f_detach	= filt_randomdetach,
163 	.f_event	= filt_randomread,
164 };
165 
166 const struct filterops randomwrite_filtops = {
167 	.f_flags	= FILTEROP_ISFD,
168 	.f_attach	= NULL,
169 	.f_detach	= filt_randomdetach,
170 	.f_event	= filt_randomwrite,
171 };
172 
173 /*
174  * This function mixes entropy and timing into the entropy input ring.
175  */
176 static void
add_event_data(u_int val)177 add_event_data(u_int val)
178 {
179 	struct rand_event *rep;
180 	int e;
181 
182 	e = (atomic_inc_int_nv(&rnd_event_prod) - 1) & (QEVLEN-1);
183 	rep = &rnd_event_space[e];
184 	rep->re_time += cpu_rnd_messybits();
185 	rep->re_val += val;
186 }
187 
188 void
enqueue_randomness(u_int val)189 enqueue_randomness(u_int val)
190 {
191 	add_event_data(val);
192 
193 	if (rnd_cold) {
194 		dequeue_randomness(NULL);
195 		rnd_init(NULL);
196 		if (!cold)
197 			rnd_cold = 0;
198 	} else if (!timeout_pending(&rnd_timeout) &&
199 	    (rnd_event_prod - rnd_event_cons) > QEVCONSUME) {
200 		rnd_slowextract = min(rnd_slowextract * 2, 5000);
201 		timeout_add_msec(&rnd_timeout, rnd_slowextract * 10);
202 	}
203 }
204 
205 /*
206  * This function merges entropy ring information into the buffer using
207  * a polynomial to spread the bits.
208  */
209 void
add_entropy_words(const u_int32_t * buf,u_int n)210 add_entropy_words(const u_int32_t *buf, u_int n)
211 {
212 	/* derived from IEEE 802.3 CRC-32 */
213 	static const u_int32_t twist_table[8] = {
214 		0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
215 		0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278
216 	};
217 	static u_int	entropy_add_ptr;
218 	static u_char	entropy_input_rotate;
219 
220 	for (; n--; buf++) {
221 		u_int32_t w = (*buf << entropy_input_rotate) |
222 		    (*buf >> ((32 - entropy_input_rotate) & 31));
223 		u_int i = entropy_add_ptr =
224 		    (entropy_add_ptr - 1) & POOLMASK;
225 		/*
226 		 * Normally, we add 7 bits of rotation to the pool.
227 		 * At the beginning of the pool, add an extra 7 bits
228 		 * rotation, so that successive passes spread the
229 		 * input bits across the pool evenly.
230 		 */
231 		entropy_input_rotate =
232 		    (entropy_input_rotate + (i ? 7 : 14)) & 31;
233 
234 		/* XOR pool contents corresponding to polynomial terms */
235 		w ^= entropy_pool[(i + POOL_TAP1) & POOLMASK] ^
236 		     entropy_pool[(i + POOL_TAP2) & POOLMASK] ^
237 		     entropy_pool[(i + POOL_TAP3) & POOLMASK] ^
238 		     entropy_pool[(i + POOL_TAP4) & POOLMASK] ^
239 		     entropy_pool[(i + 1) & POOLMASK] ^
240 		     entropy_pool[i]; /* + 2^POOLWORDS */
241 
242 		entropy_pool[i] = (w >> 3) ^ twist_table[w & 7];
243 	}
244 }
245 
246 /*
247  * Pulls entropy out of the queue and merges it into the pool with the
248  * CRC.  This takes a mix of fresh entries from the producer end of the
249  * queue and entries from the consumer end of the queue which are
250  * likely to have collected more damage.
251  */
252 void
dequeue_randomness(void * v)253 dequeue_randomness(void *v)
254 {
255 	u_int32_t buf[2];
256 	u_int startp, startc, i;
257 
258 	/* Some very new damage */
259 	startp = rnd_event_prod - QEVCONSUME;
260 	for (i = 0; i < QEVCONSUME; i++) {
261 		u_int e = (startp + i) & (QEVLEN-1);
262 
263 		buf[0] = rnd_event_space[e].re_time;
264 		buf[1] = rnd_event_space[e].re_val;
265 		add_entropy_words(buf, 2);
266 	}
267 	/* and some probably more damaged */
268 	startc = atomic_add_int_nv(&rnd_event_cons, QEVCONSUME) - QEVCONSUME;
269 	for (i = 0; i < QEVCONSUME; i++) {
270 		u_int e = (startc + i) & (QEVLEN-1);
271 
272 		buf[0] = rnd_event_space[e].re_time;
273 		buf[1] = rnd_event_space[e].re_val;
274 		add_entropy_words(buf, 2);
275 	}
276 }
277 
278 /*
279  * Grabs a chunk from the entropy_pool[] and slams it through SHA512 when
280  * requested.
281  */
282 void
extract_entropy(u_int8_t * buf)283 extract_entropy(u_int8_t *buf)
284 {
285 	static u_int32_t extract_pool[POOLWORDS];
286 	u_char digest[SHA512_DIGEST_LENGTH];
287 	SHA2_CTX shactx;
288 
289 #if SHA512_DIGEST_LENGTH < EBUFSIZE
290 #error "need more bigger hash output"
291 #endif
292 
293 	/*
294 	 * INTENTIONALLY not protected by any lock.  Races during
295 	 * memcpy() result in acceptable input data; races during
296 	 * SHA512Update() would create nasty data dependencies.  We
297 	 * do not rely on this as a benefit, but if it happens, cool.
298 	 */
299 	memcpy(extract_pool, entropy_pool, sizeof(extract_pool));
300 
301 	/* Hash the pool to get the output */
302 	SHA512Init(&shactx);
303 	SHA512Update(&shactx, (u_int8_t *)extract_pool, sizeof(extract_pool));
304 	SHA512Final(digest, &shactx);
305 
306 	/* Copy data to destination buffer */
307 	memcpy(buf, digest, EBUFSIZE);
308 
309 	/*
310 	 * Modify pool so next hash will produce different results.
311 	 */
312 	add_event_data(extract_pool[0]);
313 	dequeue_randomness(NULL);
314 
315 	/* Wipe data from memory */
316 	explicit_bzero(extract_pool, sizeof(extract_pool));
317 	explicit_bzero(digest, sizeof(digest));
318 }
319 
320 /* random keystream by ChaCha */
321 
322 struct mutex rndlock = MUTEX_INITIALIZER(IPL_HIGH);
323 struct timeout rndreinit_timeout = TIMEOUT_INITIALIZER(rnd_reinit, NULL);
324 struct task rnd_task = TASK_INITIALIZER(rnd_init, NULL);
325 
326 static chacha_ctx rs;		/* chacha context for random keystream */
327 /* keystream blocks (also chacha seed from boot) */
328 static u_char rs_buf[RSBUFSZ];
329 u_char rs_buf0[RSBUFSZ] __attribute__((section(".openbsd.randomdata")));
330 static size_t rs_have;		/* valid bytes at end of rs_buf */
331 static size_t rs_count;		/* bytes till reseed */
332 
333 void
suspend_randomness(void)334 suspend_randomness(void)
335 {
336 	struct timespec ts;
337 
338 	getnanotime(&ts);
339 	enqueue_randomness(ts.tv_sec);
340 	enqueue_randomness(ts.tv_nsec);
341 
342 	dequeue_randomness(NULL);
343 	rs_count = 0;
344 	arc4random_buf(entropy_pool, sizeof(entropy_pool));
345 }
346 
347 void
resume_randomness(char * buf,size_t buflen)348 resume_randomness(char *buf, size_t buflen)
349 {
350 	struct timespec ts;
351 
352 	if (buf && buflen)
353 		_rs_seed(buf, buflen);
354 	getnanotime(&ts);
355 	enqueue_randomness(ts.tv_sec);
356 	enqueue_randomness(ts.tv_nsec);
357 
358 	dequeue_randomness(NULL);
359 	rs_count = 0;
360 }
361 
362 static inline void _rs_rekey(u_char *dat, size_t datlen);
363 
364 static inline void
_rs_init(u_char * buf,size_t n)365 _rs_init(u_char *buf, size_t n)
366 {
367 	KASSERT(n >= KEYSZ + IVSZ);
368 	chacha_keysetup(&rs, buf, KEYSZ * 8);
369 	chacha_ivsetup(&rs, buf + KEYSZ, NULL);
370 }
371 
372 static void
_rs_seed(u_char * buf,size_t n)373 _rs_seed(u_char *buf, size_t n)
374 {
375 	_rs_rekey(buf, n);
376 
377 	/* invalidate rs_buf */
378 	rs_have = 0;
379 	memset(rs_buf, 0, sizeof(rs_buf));
380 
381 	rs_count = 1600000;
382 }
383 
384 static void
_rs_stir(int do_lock)385 _rs_stir(int do_lock)
386 {
387 	struct timespec ts;
388 	u_int8_t buf[EBUFSIZE], *p;
389 	int i;
390 
391 	/*
392 	 * Use SHA512 PRNG data and a system timespec; early in the boot
393 	 * process this is the best we can do -- some architectures do
394 	 * not collect entropy very well during this time, but may have
395 	 * clock information which is better than nothing.
396 	 */
397 	extract_entropy(buf);
398 
399 	nanotime(&ts);
400 	for (p = (u_int8_t *)&ts, i = 0; i < sizeof(ts); i++)
401 		buf[i] ^= p[i];
402 
403 	if (do_lock)
404 		mtx_enter(&rndlock);
405 	_rs_seed(buf, sizeof(buf));
406 	if (do_lock)
407 		mtx_leave(&rndlock);
408 	explicit_bzero(buf, sizeof(buf));
409 
410 	/* encourage fast-dequeue again */
411 	rnd_slowextract = 1;
412 }
413 
414 static inline void
_rs_stir_if_needed(size_t len)415 _rs_stir_if_needed(size_t len)
416 {
417 	static int rs_initialized;
418 
419 	if (!rs_initialized) {
420 		memcpy(entropy_pool, entropy_pool0, sizeof(entropy_pool));
421 		memcpy(rs_buf, rs_buf0, sizeof(rs_buf));
422 		/* seeds cannot be cleaned yet, random_start() will do so */
423 		_rs_init(rs_buf, KEYSZ + IVSZ);
424 		rs_count = 1024 * 1024 * 1024;	/* until main() runs */
425 		rs_initialized = 1;
426 	} else if (rs_count <= len)
427 		_rs_stir(0);
428 	else
429 		rs_count -= len;
430 }
431 
432 static void
_rs_clearseed(const void * p,size_t s)433 _rs_clearseed(const void *p, size_t s)
434 {
435 	struct kmem_dyn_mode kd_avoidalias;
436 	vaddr_t va = trunc_page((vaddr_t)p);
437 	vsize_t off = (vaddr_t)p - va;
438 	vsize_t len;
439 	vaddr_t rwva;
440 	paddr_t pa;
441 
442 	while (s > 0) {
443 		pmap_extract(pmap_kernel(), va, &pa);
444 
445 		memset(&kd_avoidalias, 0, sizeof(kd_avoidalias));
446 		kd_avoidalias.kd_prefer = pa;
447 		kd_avoidalias.kd_waitok = 1;
448 		rwva = (vaddr_t)km_alloc(PAGE_SIZE, &kv_any, &kp_none,
449 		    &kd_avoidalias);
450 		if (!rwva)
451 			panic("_rs_clearseed");
452 
453 		pmap_kenter_pa(rwva, pa, PROT_READ | PROT_WRITE);
454 		pmap_update(pmap_kernel());
455 
456 		len = MIN(s, PAGE_SIZE - off);
457 		explicit_bzero((void *)(rwva + off), len);
458 
459 		pmap_kremove(rwva, PAGE_SIZE);
460 		km_free((void *)rwva, PAGE_SIZE, &kv_any, &kp_none);
461 
462 		va += PAGE_SIZE;
463 		s -= len;
464 		off = 0;
465 	}
466 }
467 
468 static inline void
_rs_rekey(u_char * dat,size_t datlen)469 _rs_rekey(u_char *dat, size_t datlen)
470 {
471 #ifndef KEYSTREAM_ONLY
472 	memset(rs_buf, 0, sizeof(rs_buf));
473 #endif
474 	/* fill rs_buf with the keystream */
475 	chacha_encrypt_bytes(&rs, rs_buf, rs_buf, sizeof(rs_buf));
476 	/* mix in optional user provided data */
477 	if (dat) {
478 		size_t i, m;
479 
480 		m = MIN(datlen, KEYSZ + IVSZ);
481 		for (i = 0; i < m; i++)
482 			rs_buf[i] ^= dat[i];
483 	}
484 	/* immediately reinit for backtracking resistance */
485 	_rs_init(rs_buf, KEYSZ + IVSZ);
486 	memset(rs_buf, 0, KEYSZ + IVSZ);
487 	rs_have = sizeof(rs_buf) - KEYSZ - IVSZ;
488 }
489 
490 static inline void
_rs_random_buf(void * _buf,size_t n)491 _rs_random_buf(void *_buf, size_t n)
492 {
493 	u_char *buf = (u_char *)_buf;
494 	size_t m;
495 
496 	_rs_stir_if_needed(n);
497 	while (n > 0) {
498 		if (rs_have > 0) {
499 			m = MIN(n, rs_have);
500 			memcpy(buf, rs_buf + sizeof(rs_buf) - rs_have, m);
501 			memset(rs_buf + sizeof(rs_buf) - rs_have, 0, m);
502 			buf += m;
503 			n -= m;
504 			rs_have -= m;
505 		}
506 		if (rs_have == 0)
507 			_rs_rekey(NULL, 0);
508 	}
509 }
510 
511 static inline void
_rs_random_u32(u_int32_t * val)512 _rs_random_u32(u_int32_t *val)
513 {
514 	_rs_stir_if_needed(sizeof(*val));
515 	if (rs_have < sizeof(*val))
516 		_rs_rekey(NULL, 0);
517 	memcpy(val, rs_buf + sizeof(rs_buf) - rs_have, sizeof(*val));
518 	memset(rs_buf + sizeof(rs_buf) - rs_have, 0, sizeof(*val));
519 	rs_have -= sizeof(*val);
520 }
521 
522 /* Return one word of randomness from a ChaCha20 generator */
523 u_int32_t
arc4random(void)524 arc4random(void)
525 {
526 	u_int32_t ret;
527 
528 	mtx_enter(&rndlock);
529 	_rs_random_u32(&ret);
530 	mtx_leave(&rndlock);
531 	return ret;
532 }
533 
534 /*
535  * Fill a buffer of arbitrary length with ChaCha20-derived randomness.
536  */
537 void
arc4random_buf(void * buf,size_t n)538 arc4random_buf(void *buf, size_t n)
539 {
540 	mtx_enter(&rndlock);
541 	_rs_random_buf(buf, n);
542 	mtx_leave(&rndlock);
543 }
544 
545 /*
546  * Allocate a new ChaCha20 context for the caller to use.
547  */
548 struct arc4random_ctx *
arc4random_ctx_new(void)549 arc4random_ctx_new(void)
550 {
551 	char keybuf[KEYSZ + IVSZ];
552 
553 	chacha_ctx *ctx = malloc(sizeof(chacha_ctx), M_TEMP, M_WAITOK);
554 	arc4random_buf(keybuf, KEYSZ + IVSZ);
555 	chacha_keysetup(ctx, keybuf, KEYSZ * 8);
556 	chacha_ivsetup(ctx, keybuf + KEYSZ, NULL);
557 	explicit_bzero(keybuf, sizeof(keybuf));
558 	return (struct arc4random_ctx *)ctx;
559 }
560 
561 /*
562  * Free a ChaCha20 context created by arc4random_ctx_new()
563  */
564 void
arc4random_ctx_free(struct arc4random_ctx * ctx)565 arc4random_ctx_free(struct arc4random_ctx *ctx)
566 {
567 	explicit_bzero(ctx, sizeof(chacha_ctx));
568 	free(ctx, M_TEMP, sizeof(chacha_ctx));
569 }
570 
571 /*
572  * Use a given ChaCha20 context to fill a buffer
573  */
574 void
arc4random_ctx_buf(struct arc4random_ctx * ctx,void * buf,size_t n)575 arc4random_ctx_buf(struct arc4random_ctx *ctx, void *buf, size_t n)
576 {
577 #ifndef KEYSTREAM_ONLY
578 	memset(buf, 0, n);
579 #endif
580 	chacha_encrypt_bytes((chacha_ctx *)ctx, buf, buf, n);
581 }
582 
583 /*
584  * Calculate a uniformly distributed random number less than upper_bound
585  * avoiding "modulo bias".
586  *
587  * Uniformity is achieved by generating new random numbers until the one
588  * returned is outside the range [0, 2**32 % upper_bound).  This
589  * guarantees the selected random number will be inside
590  * [2**32 % upper_bound, 2**32) which maps back to [0, upper_bound)
591  * after reduction modulo upper_bound.
592  */
593 u_int32_t
arc4random_uniform(u_int32_t upper_bound)594 arc4random_uniform(u_int32_t upper_bound)
595 {
596 	u_int32_t r, min;
597 
598 	if (upper_bound < 2)
599 		return 0;
600 
601 	/* 2**32 % x == (2**32 - x) % x */
602 	min = -upper_bound % upper_bound;
603 
604 	/*
605 	 * This could theoretically loop forever but each retry has
606 	 * p > 0.5 (worst case, usually far better) of selecting a
607 	 * number inside the range we need, so it should rarely need
608 	 * to re-roll.
609 	 */
610 	for (;;) {
611 		r = arc4random();
612 		if (r >= min)
613 			break;
614 	}
615 
616 	return r % upper_bound;
617 }
618 
619 void
rnd_init(void * null)620 rnd_init(void *null)
621 {
622 	_rs_stir(1);
623 }
624 
625 /*
626  * Called by timeout to mark arc4 for stirring,
627  */
628 void
rnd_reinit(void * v)629 rnd_reinit(void *v)
630 {
631 	task_add(systq, &rnd_task);
632 	/* 10 minutes, per dm@'s suggestion */
633 	timeout_add_sec(&rndreinit_timeout, 10 * 60);
634 }
635 
636 /*
637  * Start periodic services inside the random subsystem, which pull
638  * entropy forward, hash it, and re-seed the random stream as needed.
639  */
640 void
random_start(int goodseed)641 random_start(int goodseed)
642 {
643 	extern char etext[];
644 
645 #if !defined(NO_PROPOLICE)
646 	extern long __guard_local;
647 
648 	if (__guard_local == 0)
649 		printf("warning: no entropy supplied by boot loader\n");
650 #endif
651 
652 	_rs_clearseed(entropy_pool0, sizeof(entropy_pool0));
653 	_rs_clearseed(rs_buf0, sizeof(rs_buf0));
654 
655 	/* Message buffer may contain data from previous boot */
656 	if (msgbufp->msg_magic == MSG_MAGIC)
657 		add_entropy_words((u_int32_t *)msgbufp->msg_bufc,
658 		    msgbufp->msg_bufs / sizeof(u_int32_t));
659 	add_entropy_words((u_int32_t *)etext - 32*1024,
660 	    8192/sizeof(u_int32_t));
661 
662 	dequeue_randomness(NULL);
663 	rnd_init(NULL);
664 	rnd_reinit(NULL);
665 
666 	if (goodseed)
667 		printf("random: good seed from bootblocks\n");
668 	else {
669 		/* XXX kernel should work harder here */
670 		printf("random: boothowto does not indicate good seed\n");
671 	}
672 }
673 
674 int
randomopen(dev_t dev,int flag,int mode,struct proc * p)675 randomopen(dev_t dev, int flag, int mode, struct proc *p)
676 {
677 	return 0;
678 }
679 
680 int
randomclose(dev_t dev,int flag,int mode,struct proc * p)681 randomclose(dev_t dev, int flag, int mode, struct proc *p)
682 {
683 	return 0;
684 }
685 
686 /*
687  * Maximum number of bytes to serve directly from the main ChaCha
688  * pool. Larger requests are served from a discrete ChaCha instance keyed
689  * from the main pool.
690  */
691 #define RND_MAIN_MAX_BYTES	2048
692 
693 int
randomread(dev_t dev,struct uio * uio,int ioflag)694 randomread(dev_t dev, struct uio *uio, int ioflag)
695 {
696 	struct arc4random_ctx *lctx = NULL;
697 	size_t		total = uio->uio_resid;
698 	u_char		*buf;
699 	int		ret = 0;
700 
701 	if (uio->uio_resid == 0)
702 		return 0;
703 
704 	buf = malloc(POOLBYTES, M_TEMP, M_WAITOK);
705 	if (total > RND_MAIN_MAX_BYTES)
706 		lctx = arc4random_ctx_new();
707 
708 	while (ret == 0 && uio->uio_resid > 0) {
709 		size_t	n = ulmin(POOLBYTES, uio->uio_resid);
710 
711 		if (lctx != NULL)
712 			arc4random_ctx_buf(lctx, buf, n);
713 		else
714 			arc4random_buf(buf, n);
715 		ret = uiomove(buf, n, uio);
716 		if (ret == 0 && uio->uio_resid > 0)
717 			yield();
718 	}
719 	if (lctx != NULL)
720 		arc4random_ctx_free(lctx);
721 	explicit_bzero(buf, POOLBYTES);
722 	free(buf, M_TEMP, POOLBYTES);
723 	return ret;
724 }
725 
726 int
randomwrite(dev_t dev,struct uio * uio,int flags)727 randomwrite(dev_t dev, struct uio *uio, int flags)
728 {
729 	int		ret = 0, newdata = 0;
730 	u_int32_t	*buf;
731 
732 	if (uio->uio_resid == 0)
733 		return 0;
734 
735 	buf = malloc(POOLBYTES, M_TEMP, M_WAITOK);
736 
737 	while (ret == 0 && uio->uio_resid > 0) {
738 		size_t	n = ulmin(POOLBYTES, uio->uio_resid);
739 
740 		ret = uiomove(buf, n, uio);
741 		if (ret != 0)
742 			break;
743 		while (n % sizeof(u_int32_t))
744 			((u_int8_t *)buf)[n++] = 0;
745 		add_entropy_words(buf, n / 4);
746 		if (uio->uio_resid > 0)
747 			yield();
748 		newdata = 1;
749 	}
750 
751 	if (newdata)
752 		rnd_init(NULL);
753 
754 	explicit_bzero(buf, POOLBYTES);
755 	free(buf, M_TEMP, POOLBYTES);
756 	return ret;
757 }
758 
759 int
randomkqfilter(dev_t dev,struct knote * kn)760 randomkqfilter(dev_t dev, struct knote *kn)
761 {
762 	switch (kn->kn_filter) {
763 	case EVFILT_READ:
764 		kn->kn_fop = &randomread_filtops;
765 		break;
766 	case EVFILT_WRITE:
767 		kn->kn_fop = &randomwrite_filtops;
768 		break;
769 	default:
770 		return (EINVAL);
771 	}
772 
773 	return (0);
774 }
775 
776 void
filt_randomdetach(struct knote * kn)777 filt_randomdetach(struct knote *kn)
778 {
779 }
780 
781 int
filt_randomread(struct knote * kn,long hint)782 filt_randomread(struct knote *kn, long hint)
783 {
784 	kn->kn_data = RND_MAIN_MAX_BYTES;
785 	return (1);
786 }
787 
788 int
filt_randomwrite(struct knote * kn,long hint)789 filt_randomwrite(struct knote *kn, long hint)
790 {
791 	kn->kn_data = POOLBYTES;
792 	return (1);
793 }
794 
795 int
randomioctl(dev_t dev,u_long cmd,caddr_t data,int flag,struct proc * p)796 randomioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
797 {
798 	switch (cmd) {
799 	case FIOASYNC:
800 		/* No async flag in softc so this is a no-op. */
801 		break;
802 	default:
803 		return ENOTTY;
804 	}
805 	return 0;
806 }
807 
808 int
sys_getentropy(struct proc * p,void * v,register_t * retval)809 sys_getentropy(struct proc *p, void *v, register_t *retval)
810 {
811 	struct sys_getentropy_args /* {
812 		syscallarg(void *) buf;
813 		syscallarg(size_t) nbyte;
814 	} */ *uap = v;
815 	char buf[GETENTROPY_MAX];
816 	int error;
817 
818 	if (SCARG(uap, nbyte) > sizeof(buf))
819 		return (EINVAL);
820 	arc4random_buf(buf, SCARG(uap, nbyte));
821 	if ((error = copyout(buf, SCARG(uap, buf), SCARG(uap, nbyte))) != 0)
822 		return (error);
823 	explicit_bzero(buf, sizeof(buf));
824 	*retval = 0;
825 	return (0);
826 }
827