xref: /dragonfly/sys/kern/kern_nrandom.c (revision 7485684f)
1 /*
2  * Copyright (c) 2004-2014 The DragonFly Project. All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  * by Alex Hornung <alex@alexhornung.com>
7  * by Robin J Carey
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions, and the following disclaimer,
14  *    without modification, immediately at the beginning of the file.
15  * 2. The name of the author may not be used to endorse or promote products
16  *    derived from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
22  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 /*			   --- NOTES ---
31  *
32  * Note: The word "entropy" is often incorrectly used to describe
33  * random data. The word "entropy" originates from the science of
34  * Physics. The correct descriptive definition would be something
35  * along the lines of "seed", "unpredictable numbers" or
36  * "unpredictable data".
37  *
38  * Note: Some /dev/[u]random implementations save "seed" between
39  * boots which represents a security hazard since an adversary
40  * could acquire this data (since it is stored in a file). If
41  * the unpredictable data used in the above routines is only
42  * generated during Kernel operation, then an adversary can only
43  * acquire that data through a Kernel security compromise and/or
44  * a cryptographic algorithm failure/cryptanalysis.
45  *
46  * Note: On FreeBSD-4.11, interrupts have to be manually enabled
47  * using the rndcontrol(8) command.
48  *
49  *		--- DESIGN (FreeBSD-4.11 based) ---
50  *
51  *   The rnddev module automatically initializes itself the first time
52  * it is used (client calls any public rnddev_*() interface routine).
53  * Both CSPRNGs are initially seeded from the precise nano[up]time() routines.
54  * Tests show this method produces good enough results, suitable for intended
55  * use. It is necessary for both CSPRNGs to be completely seeded, initially.
56  *
57  *   After initialization and during Kernel operation the only suitable
58  * unpredictable data available is:
59  *
60  *	(1) Keyboard scan-codes.
61  *	(2) Nanouptime acquired by a Keyboard/Read-Event.
62  *	(3) Suitable interrupt source; hard-disk/ATA-device.
63  *
64  *      (X) Mouse-event (xyz-data unsuitable); NOT IMPLEMENTED.
65  *
66  *   This data is added to both CSPRNGs in real-time as it happens/
67  * becomes-available. Additionally, unpredictable (?) data may be
68  * acquired from a true-random number generator if such a device is
69  * available to the system (not advisable !).
70  *   Nanouptime() acquired by a Read-Event is a very important aspect of
71  * this design, since it ensures that unpredictable data is added to
72  * the CSPRNGs even if there are no other sources.
73  *   The nanouptime() Kernel routine is used since time relative to
74  * boot is less adversary-known than time itself.
75  *
76  *   This design has been thoroughly tested with debug logging
77  * and the output from both /dev/random and /dev/urandom has
78  * been tested with the DIEHARD test-suite; both pass.
79  *
80  * MODIFICATIONS MADE TO ORIGINAL "kern_random.c":
81  *
82  * 6th July 2005:
83  *
84  * o Changed ReadSeed() function to schedule future read-seed-events
85  *   by at least one second. Previous implementation used a randomised
86  *   scheduling { 0, 1, 2, 3 seconds }.
87  * o Changed SEED_NANOUP() function to use a "previous" accumulator
88  *   algorithm similar to ReadSeed(). This ensures that there is no
89  *   way that an adversary can tell what number is being added to the
90  *   CSPRNGs, since the number added to the CSPRNGs at Event-Time is
91  *   the sum of nanouptime()@Event and an unknown/secret number.
92  * o Changed rnddev_add_interrupt() function to schedule future
93  *   interrupt-events by at least one second. Previous implementation
94  *   had no scheduling algorithm which allowed an "interrupt storm"
95  *   to occur resulting in skewed data entering into the CSPRNGs.
96  *
97  *
98  * 9th July 2005:
99  *
100  * o Some small cleanups and change all internal functions to be
101  *   static/private.
102  * o Removed ReadSeed() since its functionality is already performed
103  *   by another function { rnddev_add_interrupt_OR_read() } and remove
104  *   the silly rndByte accumulator/feedback-thing (since multipying by
105  *   rndByte could yield a value of 0).
106  * o Made IBAA/L14 public interface become static/private;
107  *   Local to this file (not changed to that in the original C modules).
108  *
109  * 16th July 2005:
110  *
111  * o SEED_NANOUP() -> NANOUP_EVENT() function rename.
112  * o Make NANOUP_EVENT() handle the time-buffering directly so that all
113  *   time-stamp-events use this single time-buffer (including keyboard).
114  *   This removes dependancy on "time_second" Kernel variable.
115  * o Removed second-time-buffer code in rnddev_add_interrupt_OR_read (void).
116  * o Rewrote the time-buffering algorithm in NANOUP_EVENT() to use a
117  *   randomised time-delay range.
118  *
119  * 12th Dec 2005:
120  *
121  * o Updated to (hopefully final) L15 algorithm.
122  *
123  * 12th June 2006:
124  *
125  * o Added missing (u_char *) cast in RnddevRead() function.
126  * o Changed copyright to 3-clause BSD license and cleaned up the layout
127  *   of this file.
128  *
129  * For a proper changelog, refer to the version control history of this
130  * file.
131  *
132  * January 2020:
133  *
134  * o Made the random number generator per-cpu.
135  *
136  * o Certain entropy sources, such as RDRAND, are per-cpu.
137  *
138  * o Fixed sources such as entropy saved across reboots is split across
139  *   available cpus.  Interrupt and generic sources are dribbled across
140  *   available cpus.
141  *
142  * o In addition, we chain random generator data into the buffer randomness
143  *   from cpu to cpu to force the cpus to diverge quickly from initial states.
144  *   This ensures that no cpu is starved.  This is done at early-init and also
145  *   at regular intervals by rand_thread_loop().
146  *
147  *   The chaining forces the cpus to diverge quickly and also ensures that
148  *   all entropy data ultimately affects all cpus regardless of which cpu
149  *   the entropy was injected into.  The combination should be pretty killer.
150  */
151 
152 #include <sys/types.h>
153 #include <sys/kernel.h>
154 #include <sys/systm.h>
155 #include <sys/poll.h>
156 #include <sys/event.h>
157 #include <sys/random.h>
158 #include <sys/systimer.h>
159 #include <sys/time.h>
160 #include <sys/proc.h>
161 #include <sys/lock.h>
162 #include <sys/sysctl.h>
163 #include <sys/sysmsg.h>
164 #include <sys/spinlock.h>
165 #include <sys/csprng.h>
166 #include <sys/malloc.h>
167 #include <machine/atomic.h>
168 #include <machine/clock.h>
169 
170 #include <sys/spinlock2.h>
171 #include <sys/signal2.h>
172 
173 static struct csprng_state *csprng_pcpu;
174 static struct csprng_state csprng_boot;
175 
176 static MALLOC_DEFINE(M_NRANDOM, "nrandom", "csprng");
177 
178 static int add_buffer_randomness_state(struct csprng_state *state,
179 			    const char *buf, int bytes, int srcid);
180 
181 static
182 struct csprng_state *
183 iterate_csprng_state(int bytes __unused)
184 {
185 	static unsigned int csprng_iterator;
186 	unsigned int n;
187 
188 	if (csprng_pcpu) {
189 		n = csprng_iterator++ % ncpus;
190 		return &csprng_pcpu[n];
191 	}
192 	return NULL;
193 }
194 
195 /*
196  * Portability note: The u_char/unsigned char type is used where
197  * uint8_t from <stdint.h> or u_int8_t from <sys/types.h> should really
198  * be being used. On FreeBSD, it is safe to make the assumption that these
199  * different types are equivalent (on all architectures).
200  * The FreeBSD <sys/crypto/rc4> module also makes this assumption.
201  */
202 
203 /*------------------------------ IBAA ----------------------------------*/
204 
205 /*-------------------------- IBAA CSPRNG -------------------------------*/
206 
207 /*
208  * NOTE: The original source code from which this source code (IBAA)
209  *       was taken has no copyright/license. The algorithm has no patent
210  *       and is freely/publicly available from:
211  *
212  *           http://www.burtleburtle.net/bob/rand/isaac.html
213  */
214 
215 typedef	u_int32_t	u4;   /* unsigned four bytes, 32 bits */
216 
217 static void IBAA
218 (
219 	u4 *m,		/* Memory: array of SIZE ALPHA-bit terms */
220 	u4 *r,		/* Results: the sequence, same size as m */
221 	u4 *aa,		/* Accumulator: a single value */
222 	u4 *bb,		/* the previous result */
223 	u4 *counter	/* counter */
224 )
225 {
226 	u4 a, b, x, y, i;
227 
228 	a = *aa;
229 	b = *bb + *counter;
230 	++*counter;
231 	for (i = 0; i < SIZE; ++i) {
232 		x = m[i];
233 		a = barrel(a) + m[ind(i + (SIZE / 2))];	/* set a */
234 		m[i] = y = m[ind(x)] + a + b;		/* set m */
235 		r[i] = b = m[ind(y >> ALPHA)] + x;	/* set r */
236 	}
237 	*bb = b; *aa = a;
238 }
239 
240 /*-------------------------- IBAA CSPRNG -------------------------------*/
241 
242 static void	IBAA_Init(struct ibaa_state *ibaa);
243 static void	IBAA_Call(struct ibaa_state *ibaa);
244 static void	IBAA_Seed(struct ibaa_state *ibaa, u_int32_t val);
245 static u_char	IBAA_Byte(struct ibaa_state *ibaa);
246 
247 /*
248  * Initialize IBAA.
249  */
250 static void
251 IBAA_Init(struct ibaa_state *ibaa)
252 {
253 	size_t	i;
254 
255 	for (i = 0; i < SIZE; ++i) {
256 		ibaa->IBAA_memory[i] = i;
257 	}
258 	ibaa->memIndex = 0;
259 	ibaa->IBAA_aa = 0;
260 	ibaa->IBAA_bb = 0;
261 	ibaa->IBAA_counter = 0;
262 	/* force IBAA_Call() */
263 	ibaa->IBAA_byte_index = sizeof(ibaa->IBAA_results);
264 }
265 
266 /*
267  * PRIVATE: Call IBAA to produce 256 32-bit u4 results.
268  */
269 static void
270 IBAA_Call (struct ibaa_state *ibaa)
271 {
272 	IBAA(ibaa->IBAA_memory, ibaa->IBAA_results,
273 	     &ibaa->IBAA_aa, &ibaa->IBAA_bb,
274 	     &ibaa->IBAA_counter);
275 	ibaa->IBAA_byte_index = 0;
276 }
277 
278 /*
279  * Add a 32-bit u4 seed value into IBAAs memory.  Mix the low 4 bits
280  * with 4 bits of PNG data to reduce the possibility of a seeding-based
281  * attack.
282  */
283 static void
284 IBAA_Seed (struct ibaa_state *ibaa, const u_int32_t val)
285 {
286 	u4 *iptr;
287 
288 	iptr = &ibaa->IBAA_memory[ibaa->memIndex & MASK];
289 	*iptr = ((*iptr << 3) | (*iptr >> 29)) + (val ^ (IBAA_Byte(ibaa) & 15));
290 	++ibaa->memIndex;
291 }
292 
293 static void
294 IBAA_Vector (struct ibaa_state *ibaa, const char *buf, int bytes)
295 {
296 	int i;
297 
298 	while (bytes >= sizeof(int)) {
299 		IBAA_Seed(ibaa, *(const int *)buf);
300 		buf += sizeof(int);
301 		bytes -= sizeof(int);
302 	}
303 
304 	/*
305 	 * Warm up the generator to get rid of weak initial states.
306 	 */
307 	for (i = 0; i < 10; ++i)
308 		IBAA_Call(ibaa);
309 }
310 
311 /*
312  * Extract a byte from IBAAs 256 32-bit u4 results array.
313  *
314  * NOTE: This code is designed to prevent MP races from taking
315  * IBAA_byte_index out of bounds.
316  */
317 static u_char
318 IBAA_Byte(struct ibaa_state *ibaa)
319 {
320 	u_char result;
321 	int index;
322 
323 	index = ibaa->IBAA_byte_index;
324 	if (index == sizeof(ibaa->IBAA_results)) {
325 		IBAA_Call(ibaa);
326 		index = 0;
327 	}
328 	result = ((u_char *)ibaa->IBAA_results)[index];
329 	ibaa->IBAA_byte_index = index + 1;
330 
331 	return result;
332 }
333 
334 /*------------------------------ IBAA ----------------------------------*/
335 
336 
337 /*------------------------------- L15 ----------------------------------*/
338 
339 /*
340  * IMPORTANT NOTE: LByteType must be exactly 8-bits in size or this software
341  * will not function correctly.
342  */
343 typedef unsigned char	LByteType;
344 
345 /*
346  * PRIVATE FUNCS:
347  */
348 
349 static void		L15_Swap(struct l15_state *l15,const LByteType pos1, const LByteType pos2);
350 static void		L15_InitState(struct l15_state *l15);
351 static void		L15_KSA(struct l15_state *l15,
352 				const LByteType * const key,
353 				const size_t keyLen);
354 static void		L15_Discard(struct l15_state *l15,
355 				const LByteType numCalls);
356 
357 /*
358  * PUBLIC INTERFACE:
359  */
360 static void		L15_Init(struct l15_state *l15,
361 				const LByteType * const key,
362 				const size_t keyLen);
363 static LByteType	L15_Byte(struct l15_state *l15);
364 static void		L15_Vector(struct l15_state *l15,
365 				const LByteType * const key,
366 				const size_t keyLen);
367 
368 static __inline void
369 L15_Swap(struct l15_state *l15, const LByteType pos1, const LByteType pos2)
370 {
371 	LByteType save1;
372 
373 	save1 = l15->L15_state[pos1];
374 	l15->L15_state[pos1] = l15->L15_state[pos2];
375 	l15->L15_state[pos2] = save1;
376 }
377 
378 static void
379 L15_InitState (struct l15_state *l15)
380 {
381 	int i;
382 
383 	for (i = 0; i < L15_STATE_SIZE; ++i)
384 		l15->L15_state[i] = i;
385 }
386 
387 #define  L_SCHEDULE(xx)						\
388 								\
389 for (i = 0; i < L15_STATE_SIZE; ++i) {				\
390     L15_Swap(l15, i, (l15->stateIndex += (l15->L15_state[i] + (xx))));	\
391 }
392 
393 static void
394 L15_KSA (struct l15_state *l15, const LByteType * const key,
395 	 const size_t keyLen)
396 {
397 	size_t	i, keyIndex;
398 
399 	for (keyIndex = 0; keyIndex < keyLen; ++keyIndex) {
400 		L_SCHEDULE(key[keyIndex]);
401 	}
402 	L_SCHEDULE(keyLen);
403 }
404 
405 static void
406 L15_Discard(struct l15_state *l15, const LByteType numCalls)
407 {
408 	LByteType i;
409 	for (i = 0; i < numCalls; ++i) {
410 		(void)L15_Byte(l15);
411 	}
412 }
413 
414 
415 /*
416  * PUBLIC INTERFACE:
417  */
418 static void
419 L15_Init(struct l15_state *l15, const LByteType *key,
420 	 const size_t keyLen)
421 {
422 	l15->stateIndex = 0;
423 	l15->L15_x = 0;
424 	l15->L15_start_x = 0;
425 	l15->L15_y = L15_STATE_SIZE - 1;
426 	L15_InitState(l15);
427 	L15_KSA(l15, key, keyLen);
428 	L15_Discard(l15, L15_Byte(l15));
429 }
430 
431 static LByteType
432 L15_Byte(struct l15_state *l15)
433 {
434 	LByteType z;
435 
436 	L15_Swap(l15, l15->L15_state[l15->L15_x], l15->L15_y);
437 	z = (l15->L15_state [l15->L15_x++] + l15->L15_state[l15->L15_y--]);
438 	if (l15->L15_x == l15->L15_start_x) {
439 		--l15->L15_y;
440 	}
441 	return (l15->L15_state[z]);
442 }
443 
444 static void
445 L15_Vector(struct l15_state *l15, const LByteType * const key,
446 	   const size_t keyLen)
447 {
448 	L15_KSA(l15, key, keyLen);
449 }
450 
451 /*------------------------------- L15 ----------------------------------*/
452 
453 /************************************************************************
454  *				KERNEL INTERFACE			*
455  ************************************************************************
456  *
457  * By Robin J Carey, Matthew Dillon and Alex Hornung.
458  */
459 
460 static int rand_thread_value;
461 static void NANOUP_EVENT(struct timespec *last, struct csprng_state *state);
462 static thread_t rand_td;
463 
464 static int sysctl_kern_random(SYSCTL_HANDLER_ARGS);
465 
466 static int rand_mode = 2;
467 static struct systimer systimer_rand;
468 
469 static int sysctl_kern_rand_mode(SYSCTL_HANDLER_ARGS);
470 
471 SYSCTL_PROC(_kern, OID_AUTO, random, CTLFLAG_RD | CTLFLAG_ANYBODY, 0, 0,
472 		sysctl_kern_random, "I", "Acquire random data");
473 SYSCTL_PROC(_kern, OID_AUTO, rand_mode, CTLTYPE_STRING | CTLFLAG_RW, NULL, 0,
474     sysctl_kern_rand_mode, "A", "RNG mode (csprng, ibaa or mixed)");
475 
476 
477 /*
478  * Called twice.  Once very early on when ncpus is still 1 (before
479  * kmalloc or much of anything else is available), and then again later
480  * after SMP has been heated up.
481  *
482  * The early initialization is needed so various subsystems early in the
483  * boot have some source of pseudo random bytes.
484  */
485 void
486 rand_initialize(void)
487 {
488 	struct csprng_state *state;
489 	struct timespec	now;
490 	globaldata_t rgd;
491 	char buf[64];
492 	int i;
493 	int n;
494 
495 	if (ncpus == 1) {
496 		csprng_pcpu = &csprng_boot;
497 	} else {
498 		csprng_pcpu = kmalloc(ncpus * sizeof(*csprng_pcpu),
499 				      M_NRANDOM, M_WAITOK | M_ZERO);
500 	}
501 
502 	for (n = 0; n < ncpus; ++n) {
503 		state = &csprng_pcpu[n];
504 		rgd = globaldata_find(n);
505 
506 		/* CSPRNG */
507 		csprng_init(state);
508 
509 		/* Initialize IBAA. */
510 		IBAA_Init(&state->ibaa);
511 
512 		/* Initialize L15. */
513 		nanouptime(&now);
514 		L15_Init(&state->l15,
515 		    (const LByteType *)&now.tv_nsec, sizeof(now.tv_nsec));
516 
517 		for (i = 0; i < (SIZE / 2); ++i) {
518 			nanotime(&now);
519 			state->inject_counter[RAND_SRC_TIMING] = 0;
520 			add_buffer_randomness_state(state,
521 						(const uint8_t *)&now.tv_nsec,
522 						sizeof(now.tv_nsec),
523 						RAND_SRC_TIMING);
524 
525 			nanouptime(&now);
526 			state->inject_counter[RAND_SRC_TIMING] = 0;
527 			add_buffer_randomness_state(state,
528 						(const uint8_t *)&now.tv_nsec,
529 						sizeof(now.tv_nsec),
530 						RAND_SRC_TIMING);
531 		}
532 
533 		/*
534 		 * In the second call the globaldata structure has enough
535 		 * differentiation to give us decent initial divergence
536 		 * between cpus.  It isn't really all that random but its
537 		 * better than nothing.
538 		 */
539 		state->inject_counter[RAND_SRC_THREAD2] = 0;
540 		add_buffer_randomness_state(state,
541 					    (void *)rgd,
542 					    sizeof(*rgd),
543 					    RAND_SRC_THREAD2);
544 
545 		/*
546 		 * Warm up the generator to get rid of weak initial states.
547 		 */
548 		for (i = 0; i < 10; ++i)
549 			IBAA_Call(&state->ibaa);
550 
551 		/*
552 		 * Chain to next cpu to create as much divergence as
553 		 * possible.
554 		 */
555 		state->inject_counter[RAND_SRC_TIMING] = 0;
556 		add_buffer_randomness_state(state, buf, sizeof(buf),
557 					    RAND_SRC_TIMING);
558 		read_random(buf, sizeof(buf), 1);
559 	}
560 }
561 
562 SYSINIT(rand1, SI_BOOT2_POST_SMP, SI_ORDER_SECOND, rand_initialize, 0);
563 
564 /*
565  * Keyboard events
566  */
567 void
568 add_keyboard_randomness(u_char scancode)
569 {
570 	struct csprng_state *state;
571 
572 	state = iterate_csprng_state(1);
573 	if (state) {
574 		spin_lock(&state->spin);
575 		L15_Vector(&state->l15,
576 			   (const LByteType *)&scancode, sizeof (scancode));
577 		++state->nrandevents;
578 		++state->nrandseed;
579 		spin_unlock(&state->spin);
580 		add_interrupt_randomness(0);
581 	}
582 }
583 
584 /*
585  * Interrupt events.  This is SMP safe and allowed to race.
586  *
587  * This adjusts rand_thread_value which will be incorporated into the next
588  * time-buffered seed.  It does not effect the seeding period per-say.
589  */
590 void
591 add_interrupt_randomness(int intr)
592 {
593 	if (tsc_present) {
594 		rand_thread_value = (rand_thread_value << 4) ^ 1 ^
595 		((int)rdtsc() % 151);
596 	}
597 	++rand_thread_value;				/* ~1 bit */
598 }
599 
600 /*
601  * Add entropy to our rng.  Half the time we add the entropy to both
602  * csprngs and the other half of the time we add the entropy to one
603  * or the other (so both don't get generated from the same entropy).
604  */
605 static int
606 add_buffer_randomness_state(struct csprng_state *state,
607 			    const char *buf, int bytes, int srcid)
608 {
609 	uint8_t ic;
610 
611 	if (state) {
612 		spin_lock(&state->spin);
613 		ic = ++state->inject_counter[srcid & 255];
614 		if (ic & 1) {
615 			L15_Vector(&state->l15, (const LByteType *)buf, bytes);
616 			IBAA_Vector(&state->ibaa, buf, bytes);
617 			csprng_add_entropy(state, srcid & RAND_SRC_MASK,
618 					   (const uint8_t *)buf, bytes, 0);
619 		} else if (ic & 2) {
620 			L15_Vector(&state->l15, (const LByteType *)buf, bytes);
621 			IBAA_Vector(&state->ibaa, buf, bytes);
622 		} else {
623 			csprng_add_entropy(state, srcid & RAND_SRC_MASK,
624 					   (const uint8_t *)buf, bytes, 0);
625 		}
626 		++state->nrandevents;
627 		state->nrandseed += bytes;
628 		spin_unlock(&state->spin);
629 		wakeup(state);
630 	}
631 
632 	return 0;
633 }
634 
635 
636 
637 /*
638  * Add buffer randomness from miscellaneous sources.  Large amounts of
639  * generic random data will be split across available cpus.
640  */
641 int
642 add_buffer_randomness(const char *buf, int bytes)
643 {
644 	struct csprng_state *state;
645 
646 	state = iterate_csprng_state(bytes);
647 	return add_buffer_randomness_state(state, buf, bytes, RAND_SRC_UNKNOWN);
648 }
649 
650 int
651 add_buffer_randomness_src(const char *buf, int bytes, int srcid)
652 {
653 	struct csprng_state *state;
654 	int n;
655 
656 	while (csprng_pcpu && bytes) {
657 		n = bytes;
658 		if (srcid & RAND_SRCF_PCPU) {
659 			state = &csprng_pcpu[mycpu->gd_cpuid];
660 		} else {
661 			state = iterate_csprng_state(bytes);
662 			if (n > 256)
663 				n = 256;
664 		}
665 		add_buffer_randomness_state(state, buf, bytes, srcid);
666 		bytes -= n;
667 		buf += n;
668 	}
669 	return 0;
670 }
671 
672 /*
673  * Kqueue filter (always succeeds)
674  */
675 int
676 random_filter_read(struct knote *kn, long hint)
677 {
678 	return (1);
679 }
680 
681 /*
682  * Heavy weight random number generator.  May return less then the
683  * requested number of bytes.
684  *
685  * Instead of stopping early,
686  */
687 u_int
688 read_random(void *buf, u_int nbytes, int unlimited)
689 {
690 	struct csprng_state *state;
691 	int i, j;
692 
693 	if (csprng_pcpu == NULL) {
694 		kprintf("read_random: csprng not yet ready\n");
695 		return 0;
696 	}
697 	state = &csprng_pcpu[mycpu->gd_cpuid];
698 
699 	spin_lock(&state->spin);
700 	if (rand_mode == 0) {
701 		/* Only use CSPRNG */
702 		i = csprng_get_random(state, buf, nbytes,
703 				      unlimited ? CSPRNG_UNLIMITED : 0);
704 	} else if (rand_mode == 1) {
705 		/* Only use IBAA */
706 		for (i = 0; i < nbytes; i++)
707 			((u_char *)buf)[i] = IBAA_Byte(&state->ibaa);
708 	} else {
709 		/* Mix both CSPRNG and IBAA */
710 		i = csprng_get_random(state, buf, nbytes,
711 				      unlimited ? CSPRNG_UNLIMITED : 0);
712 		for (j = 0; j < i; j++)
713 			((u_char *)buf)[j] ^= IBAA_Byte(&state->ibaa);
714 	}
715 	spin_unlock(&state->spin);
716 	add_interrupt_randomness(0);
717 
718 	return (i > 0) ? i : 0;
719 }
720 
721 /*
722  * Read random data via sysctl().
723  */
724 static
725 int
726 sysctl_kern_random(SYSCTL_HANDLER_ARGS)
727 {
728 	char buf[256];
729 	size_t n;
730 	size_t r;
731 	int error = 0;
732 
733 	n = req->oldlen;
734 	if (n > 1024 * 1024)
735 		n = 1024 * 1024;
736 	while (n > 0) {
737 		if ((r = n) > sizeof(buf))
738 			r = sizeof(buf);
739 		read_random(buf, r, 1);
740 		error = SYSCTL_OUT(req, buf, r);
741 		if (error)
742 			break;
743 		n -= r;
744 	}
745 	return(error);
746 }
747 
748 int
749 sys_getrandom(struct sysmsg *sysmsg, const struct getrandom_args *uap)
750 {
751 	char buf[256];
752 	ssize_t bytes;
753 	ssize_t r;
754 	ssize_t n;
755 	int error;
756 	int sigcnt;
757 
758 	bytes = (ssize_t)uap->len;
759 	if (bytes < 0)
760 		return EINVAL;
761 
762 	r = 0;
763 	error = 0;
764 	sigcnt = 0;
765 
766 	while (r < bytes) {
767 		n = (ssize_t)sizeof(buf);
768 		if (n > bytes - r)
769 			n = bytes - r;
770 		read_random(buf, n, 1);
771 		error = copyout(buf, (char *)uap->buf + r, n);
772 		if (error)
773 			break;
774 		r += n;
775 		lwkt_user_yield();
776 		if (++sigcnt == 128) {
777 			sigcnt = 0;
778 			if (CURSIG_NOBLOCK(curthread->td_lwp) != 0) {
779 				error = EINTR;
780 				break;
781 			}
782 		}
783 	}
784 	if (error == 0)
785 		sysmsg->sysmsg_szresult = r;
786 
787 	return error;
788 }
789 
790 /*
791  * Change the random mode via sysctl().
792  */
793 static
794 const char *
795 rand_mode_to_str(int mode)
796 {
797 	switch (mode) {
798 	case 0:
799 		return "csprng";
800 	case 1:
801 		return "ibaa";
802 	case 2:
803 		return "mixed";
804 	default:
805 		return "unknown";
806 	}
807 }
808 
809 static
810 int
811 sysctl_kern_rand_mode(SYSCTL_HANDLER_ARGS)
812 {
813 	char mode[32];
814 	int error;
815 
816 	strncpy(mode, rand_mode_to_str(rand_mode), sizeof(mode)-1);
817 	error = sysctl_handle_string(oidp, mode, sizeof(mode), req);
818 	if (error || req->newptr == NULL)
819 	    return error;
820 
821 	if ((strncmp(mode, "csprng", sizeof(mode))) == 0)
822 		rand_mode = 0;
823 	else if ((strncmp(mode, "ibaa", sizeof(mode))) == 0)
824 		rand_mode = 1;
825 	else if ((strncmp(mode, "mixed", sizeof(mode))) == 0)
826 		rand_mode = 2;
827 	else
828 		error = EINVAL;
829 
830 	return error;
831 }
832 
833 /*
834  * Random number generator helper thread.  This limits code overhead from
835  * high frequency events by delaying the clearing of rand_thread_value.
836  *
837  * This is a time-buffered loop, with a randomizing delay.  Note that interrupt
838  * entropy does not cause the thread to wakeup any faster, but does improve the
839  * quality of the entropy produced.
840  *
841  * In addition, we pull statistics from available cpus.
842  */
843 static
844 void
845 rand_thread_loop(void *dummy)
846 {
847 	struct csprng_state *state;
848 	globaldata_t rgd;
849 	int64_t count;
850 	char buf[32];
851 	uint32_t wcpu;
852 	struct timespec	last;
853 
854 	wcpu = 0;
855 	bzero(&last, sizeof(last));
856 
857 	for (;;) {
858 		/*
859 		 * Generate entropy.
860 		 */
861 		wcpu = (wcpu + 1) % ncpus;
862 		state = &csprng_pcpu[wcpu];
863 		rgd = globaldata_find(wcpu);
864 
865 		NANOUP_EVENT(&last, state);
866 		spin_lock(&state->spin);
867 		count = (uint8_t)L15_Byte(&state->l15);
868 		spin_unlock(&state->spin);
869 
870 		/*
871 		 * Calculate 1/10 of a second to 2/10 of a second, fine-grained
872 		 * using a L15_Byte() feedback.
873 		 *
874 		 * Go faster in the first 1200 seconds after boot.  This effects
875 		 * the time-after-next interrupt (pipeline delay).
876 		 */
877 		count = muldivu64(sys_cputimer->freq, count + 256, 256 * 10);
878 		if (time_uptime < 120)
879 			count = count / 10 + 1;
880 		systimer_rand.periodic = count;
881 
882 		/*
883 		 * Force cpus to diverge.  Chained state and per-cpu state
884 		 * is thrown in.
885 		 */
886 		add_buffer_randomness_state(state,
887 					    buf, sizeof(buf),
888 					    RAND_SRC_THREAD1);
889 		add_buffer_randomness_state(state,
890 					    (void *)&rgd->gd_cnt,
891 					    sizeof(rgd->gd_cnt),
892 					    RAND_SRC_THREAD2);
893 		add_buffer_randomness_state(state,
894 					    (void *)&rgd->gd_vmtotal,
895 					    sizeof(rgd->gd_vmtotal),
896 					    RAND_SRC_THREAD3);
897 
898 		read_random(buf, sizeof(buf), 1);
899 
900 		tsleep(rand_td, 0, "rwait", 0);
901 	}
902 }
903 
904 /*
905  * Systimer trigger - fine-grained random trigger
906  */
907 static
908 void
909 rand_thread_wakeup(struct systimer *timer, int in_ipi, struct intrframe *frame)
910 {
911 	wakeup(rand_td);
912 }
913 
914 static
915 void
916 rand_thread_init(void)
917 {
918 	systimer_init_periodic_nq(&systimer_rand, rand_thread_wakeup, NULL, 25);
919 	lwkt_create(rand_thread_loop, NULL, &rand_td, NULL, 0, 0, "random");
920 }
921 
922 SYSINIT(rand2, SI_SUB_HELPER_THREADS, SI_ORDER_ANY, rand_thread_init, 0);
923 
924 /*
925  * Caller is time-buffered.  Incorporate any accumulated interrupt randomness
926  * as well as the high frequency bits of the TSC.
927  *
928  * A delta nanoseconds value is used to remove absolute time from the generated
929  * entropy.  Even though we are pushing 32 bits, this entropy is probably only
930  * good for one or two bits without any interrupt sources, and possibly
931  * 8 bits with.
932  */
933 static void
934 NANOUP_EVENT(struct timespec *last, struct csprng_state *state)
935 {
936 	struct timespec		now;
937 	int			nsec;
938 
939 	/*
940 	 * Delta nanoseconds since last event
941 	 */
942 	nanouptime(&now);
943 	nsec = now.tv_nsec - last->tv_nsec;
944 	*last = now;
945 
946 	/*
947 	 * Interrupt randomness.
948 	 */
949 	nsec ^= rand_thread_value;
950 
951 	/*
952 	 * The TSC, if present, generally has an even higher
953 	 * resolution.  Integrate a portion of it into our seed.
954 	 */
955 	if (tsc_present)
956 		nsec ^= (rdtsc() & 255) << 8;
957 
958 	/*
959 	 * Ok.
960 	 */
961 	add_buffer_randomness_state(state,
962 				    (const uint8_t *)&nsec, sizeof(nsec),
963 				    RAND_SRC_INTR);
964 }
965 
966