xref: /netbsd/sys/kern/kern_entropy.c (revision a681d7f6)
1 /*	$NetBSD: kern_entropy.c,v 1.62 2023/06/30 21:42:05 riastradh Exp $	*/
2 
3 /*-
4  * Copyright (c) 2019 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Taylor R. Campbell.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Entropy subsystem
34  *
35  *	* Each CPU maintains a per-CPU entropy pool so that gathering
36  *	  entropy requires no interprocessor synchronization, except
37  *	  early at boot when we may be scrambling to gather entropy as
38  *	  soon as possible.
39  *
40  *	  - entropy_enter gathers entropy and never drops it on the
41  *	    floor, at the cost of sometimes having to do cryptography.
42  *
43  *	  - entropy_enter_intr gathers entropy or drops it on the
44  *	    floor, with low latency.  Work to stir the pool or kick the
45  *	    housekeeping thread is scheduled in soft interrupts.
46  *
47  *	* entropy_enter immediately enters into the global pool if it
48  *	  can transition to full entropy in one swell foop.  Otherwise,
49  *	  it defers to a housekeeping thread that consolidates entropy,
50  *	  but only when the CPUs collectively have full entropy, in
51  *	  order to mitigate iterative-guessing attacks.
52  *
53  *	* The entropy housekeeping thread continues to consolidate
54  *	  entropy even after we think we have full entropy, in case we
55  *	  are wrong, but is limited to one discretionary consolidation
56  *	  per minute, and only when new entropy is actually coming in,
57  *	  to limit performance impact.
58  *
59  *	* The entropy epoch is the number that changes when we
60  *	  transition from partial entropy to full entropy, so that
61  *	  users can easily determine when to reseed.  This also
62  *	  facilitates an operator explicitly causing everything to
63  *	  reseed by sysctl -w kern.entropy.consolidate=1.
64  *
65  *	* Entropy depletion is available for testing (or if you're into
66  *	  that sort of thing), with sysctl -w kern.entropy.depletion=1;
67  *	  the logic to support it is small, to minimize chance of bugs.
68  */
69 
70 #include <sys/cdefs.h>
71 __KERNEL_RCSID(0, "$NetBSD: kern_entropy.c,v 1.62 2023/06/30 21:42:05 riastradh Exp $");
72 
73 #include <sys/param.h>
74 #include <sys/types.h>
75 #include <sys/atomic.h>
76 #include <sys/compat_stub.h>
77 #include <sys/condvar.h>
78 #include <sys/cpu.h>
79 #include <sys/entropy.h>
80 #include <sys/errno.h>
81 #include <sys/evcnt.h>
82 #include <sys/event.h>
83 #include <sys/file.h>
84 #include <sys/intr.h>
85 #include <sys/kauth.h>
86 #include <sys/kernel.h>
87 #include <sys/kmem.h>
88 #include <sys/kthread.h>
89 #include <sys/lwp.h>
90 #include <sys/module_hook.h>
91 #include <sys/mutex.h>
92 #include <sys/percpu.h>
93 #include <sys/poll.h>
94 #include <sys/proc.h>
95 #include <sys/queue.h>
96 #include <sys/reboot.h>
97 #include <sys/rnd.h>		/* legacy kernel API */
98 #include <sys/rndio.h>		/* userland ioctl interface */
99 #include <sys/rndsource.h>	/* kernel rndsource driver API */
100 #include <sys/select.h>
101 #include <sys/selinfo.h>
102 #include <sys/sha1.h>		/* for boot seed checksum */
103 #include <sys/stdint.h>
104 #include <sys/sysctl.h>
105 #include <sys/syslog.h>
106 #include <sys/systm.h>
107 #include <sys/time.h>
108 #include <sys/xcall.h>
109 
110 #include <lib/libkern/entpool.h>
111 
112 #include <machine/limits.h>
113 
114 #ifdef __HAVE_CPU_COUNTER
115 #include <machine/cpu_counter.h>
116 #endif
117 
118 #define	MINENTROPYBYTES	ENTROPY_CAPACITY
119 #define	MINENTROPYBITS	(MINENTROPYBYTES*NBBY)
120 #define	MINSAMPLES	(2*MINENTROPYBITS)
121 
122 /*
123  * struct entropy_cpu
124  *
125  *	Per-CPU entropy state.  The pool is allocated separately
126  *	because percpu(9) sometimes moves per-CPU objects around
127  *	without zeroing them, which would lead to unwanted copies of
128  *	sensitive secrets.  The evcnt is allocated separately because
129  *	evcnt(9) assumes it stays put in memory.
130  */
131 struct entropy_cpu {
132 	struct entropy_cpu_evcnt {
133 		struct evcnt		softint;
134 		struct evcnt		intrdrop;
135 		struct evcnt		intrtrunc;
136 	}			*ec_evcnt;
137 	struct entpool		*ec_pool;
138 	unsigned		ec_bitspending;
139 	unsigned		ec_samplespending;
140 	bool			ec_locked;
141 };
142 
143 /*
144  * struct entropy_cpu_lock
145  *
146  *	State for locking the per-CPU entropy state.
147  */
148 struct entropy_cpu_lock {
149 	int		ecl_s;
150 	uint64_t	ecl_ncsw;
151 };
152 
153 /*
154  * struct rndsource_cpu
155  *
156  *	Per-CPU rndsource state.
157  */
158 struct rndsource_cpu {
159 	unsigned		rc_entropybits;
160 	unsigned		rc_timesamples;
161 	unsigned		rc_datasamples;
162 	rnd_delta_t		rc_timedelta;
163 };
164 
165 /*
166  * entropy_global (a.k.a. E for short in this file)
167  *
168  *	Global entropy state.  Writes protected by the global lock.
169  *	Some fields, marked (A), can be read outside the lock, and are
170  *	maintained with atomic_load/store_relaxed.
171  */
172 struct {
173 	kmutex_t	lock;		/* covers all global state */
174 	struct entpool	pool;		/* global pool for extraction */
175 	unsigned	bitsneeded;	/* (A) needed globally */
176 	unsigned	bitspending;	/* pending in per-CPU pools */
177 	unsigned	samplesneeded;	/* (A) needed globally */
178 	unsigned	samplespending;	/* pending in per-CPU pools */
179 	unsigned	timestamp;	/* (A) time of last consolidation */
180 	unsigned	epoch;		/* (A) changes when needed -> 0 */
181 	kcondvar_t	cv;		/* notifies state changes */
182 	struct selinfo	selq;		/* notifies needed -> 0 */
183 	struct lwp	*sourcelock;	/* lock on list of sources */
184 	kcondvar_t	sourcelock_cv;	/* notifies sourcelock release */
185 	LIST_HEAD(,krndsource) sources;	/* list of entropy sources */
186 	enum entropy_stage {
187 		ENTROPY_COLD = 0, /* single-threaded */
188 		ENTROPY_WARM,	  /* multi-threaded at boot before CPUs */
189 		ENTROPY_HOT,	  /* multi-threaded multi-CPU */
190 	}		stage;
191 	bool		consolidate;	/* kick thread to consolidate */
192 	bool		seed_rndsource;	/* true if seed source is attached */
193 	bool		seeded;		/* true if seed file already loaded */
194 } entropy_global __cacheline_aligned = {
195 	/* Fields that must be initialized when the kernel is loaded.  */
196 	.bitsneeded = MINENTROPYBITS,
197 	.samplesneeded = MINSAMPLES,
198 	.epoch = (unsigned)-1,	/* -1 means entropy never consolidated */
199 	.sources = LIST_HEAD_INITIALIZER(entropy_global.sources),
200 	.stage = ENTROPY_COLD,
201 };
202 
203 #define	E	(&entropy_global)	/* declutter */
204 
205 /* Read-mostly globals */
206 static struct percpu	*entropy_percpu __read_mostly; /* struct entropy_cpu */
207 static void		*entropy_sih __read_mostly; /* softint handler */
208 static struct lwp	*entropy_lwp __read_mostly; /* housekeeping thread */
209 
210 static struct krndsource seed_rndsource __read_mostly;
211 
212 /*
213  * Event counters
214  *
215  *	Must be careful with adding these because they can serve as
216  *	side channels.
217  */
218 static struct evcnt entropy_discretionary_evcnt =
219     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "discretionary");
220 EVCNT_ATTACH_STATIC(entropy_discretionary_evcnt);
221 static struct evcnt entropy_immediate_evcnt =
222     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "immediate");
223 EVCNT_ATTACH_STATIC(entropy_immediate_evcnt);
224 static struct evcnt entropy_partial_evcnt =
225     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "partial");
226 EVCNT_ATTACH_STATIC(entropy_partial_evcnt);
227 static struct evcnt entropy_consolidate_evcnt =
228     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "consolidate");
229 EVCNT_ATTACH_STATIC(entropy_consolidate_evcnt);
230 static struct evcnt entropy_extract_fail_evcnt =
231     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "extract fail");
232 EVCNT_ATTACH_STATIC(entropy_extract_fail_evcnt);
233 static struct evcnt entropy_request_evcnt =
234     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "request");
235 EVCNT_ATTACH_STATIC(entropy_request_evcnt);
236 static struct evcnt entropy_deplete_evcnt =
237     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "deplete");
238 EVCNT_ATTACH_STATIC(entropy_deplete_evcnt);
239 static struct evcnt entropy_notify_evcnt =
240     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "notify");
241 EVCNT_ATTACH_STATIC(entropy_notify_evcnt);
242 
243 /* Sysctl knobs */
244 static bool	entropy_collection = 1;
245 static bool	entropy_depletion = 0; /* Silly!  */
246 
247 static const struct sysctlnode	*entropy_sysctlroot;
248 static struct sysctllog		*entropy_sysctllog;
249 
250 /* Forward declarations */
251 static void	entropy_init_cpu(void *, void *, struct cpu_info *);
252 static void	entropy_fini_cpu(void *, void *, struct cpu_info *);
253 static void	entropy_account_cpu(struct entropy_cpu *);
254 static void	entropy_enter(const void *, size_t, unsigned, bool);
255 static bool	entropy_enter_intr(const void *, size_t, unsigned, bool);
256 static void	entropy_softintr(void *);
257 static void	entropy_thread(void *);
258 static bool	entropy_pending(void);
259 static void	entropy_pending_cpu(void *, void *, struct cpu_info *);
260 static void	entropy_do_consolidate(void);
261 static void	entropy_consolidate_xc(void *, void *);
262 static void	entropy_notify(void);
263 static int	sysctl_entropy_consolidate(SYSCTLFN_ARGS);
264 static int	sysctl_entropy_gather(SYSCTLFN_ARGS);
265 static void	filt_entropy_read_detach(struct knote *);
266 static int	filt_entropy_read_event(struct knote *, long);
267 static int	entropy_request(size_t, int);
268 static void	rnd_add_data_1(struct krndsource *, const void *, uint32_t,
269 		    uint32_t, bool, uint32_t);
270 static unsigned	rndsource_entropybits(struct krndsource *);
271 static void	rndsource_entropybits_cpu(void *, void *, struct cpu_info *);
272 static void	rndsource_to_user(struct krndsource *, rndsource_t *);
273 static void	rndsource_to_user_est(struct krndsource *, rndsource_est_t *);
274 static void	rndsource_to_user_est_cpu(void *, void *, struct cpu_info *);
275 
276 /*
277  * entropy_timer()
278  *
279  *	Cycle counter, time counter, or anything that changes a wee bit
280  *	unpredictably.
281  */
282 static inline uint32_t
entropy_timer(void)283 entropy_timer(void)
284 {
285 	struct bintime bt;
286 	uint32_t v;
287 
288 	/* If we have a CPU cycle counter, use the low 32 bits.  */
289 #ifdef __HAVE_CPU_COUNTER
290 	if (__predict_true(cpu_hascounter()))
291 		return cpu_counter32();
292 #endif	/* __HAVE_CPU_COUNTER */
293 
294 	/* If we're cold, tough.  Can't binuptime while cold.  */
295 	if (__predict_false(cold))
296 		return 0;
297 
298 	/* Fold the 128 bits of binuptime into 32 bits.  */
299 	binuptime(&bt);
300 	v = bt.frac;
301 	v ^= bt.frac >> 32;
302 	v ^= bt.sec;
303 	v ^= bt.sec >> 32;
304 	return v;
305 }
306 
307 static void
attach_seed_rndsource(void)308 attach_seed_rndsource(void)
309 {
310 
311 	/*
312 	 * First called no later than entropy_init, while we are still
313 	 * single-threaded, so no need for RUN_ONCE.
314 	 */
315 	if (E->stage >= ENTROPY_WARM || E->seed_rndsource)
316 		return;
317 	rnd_attach_source(&seed_rndsource, "seed", RND_TYPE_UNKNOWN,
318 	    RND_FLAG_COLLECT_VALUE);
319 	E->seed_rndsource = true;
320 }
321 
322 /*
323  * entropy_init()
324  *
325  *	Initialize the entropy subsystem.  Panic on failure.
326  *
327  *	Requires percpu(9) and sysctl(9) to be initialized.
328  */
329 static void
entropy_init(void)330 entropy_init(void)
331 {
332 	uint32_t extra[2];
333 	struct krndsource *rs;
334 	unsigned i = 0;
335 
336 	KASSERT(E->stage == ENTROPY_COLD);
337 
338 	/* Grab some cycle counts early at boot.  */
339 	extra[i++] = entropy_timer();
340 
341 	/* Run the entropy pool cryptography self-test.  */
342 	if (entpool_selftest() == -1)
343 		panic("entropy pool crypto self-test failed");
344 
345 	/* Create the sysctl directory.  */
346 	sysctl_createv(&entropy_sysctllog, 0, NULL, &entropy_sysctlroot,
347 	    CTLFLAG_PERMANENT, CTLTYPE_NODE, "entropy",
348 	    SYSCTL_DESCR("Entropy (random number sources) options"),
349 	    NULL, 0, NULL, 0,
350 	    CTL_KERN, CTL_CREATE, CTL_EOL);
351 
352 	/* Create the sysctl knobs.  */
353 	/* XXX These shouldn't be writable at securelevel>0.  */
354 	sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL,
355 	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_BOOL, "collection",
356 	    SYSCTL_DESCR("Automatically collect entropy from hardware"),
357 	    NULL, 0, &entropy_collection, 0, CTL_CREATE, CTL_EOL);
358 	sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL,
359 	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_BOOL, "depletion",
360 	    SYSCTL_DESCR("`Deplete' entropy pool when observed"),
361 	    NULL, 0, &entropy_depletion, 0, CTL_CREATE, CTL_EOL);
362 	sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL,
363 	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT, "consolidate",
364 	    SYSCTL_DESCR("Trigger entropy consolidation now"),
365 	    sysctl_entropy_consolidate, 0, NULL, 0, CTL_CREATE, CTL_EOL);
366 	sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL,
367 	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT, "gather",
368 	    SYSCTL_DESCR("Trigger entropy gathering from sources now"),
369 	    sysctl_entropy_gather, 0, NULL, 0, CTL_CREATE, CTL_EOL);
370 	/* XXX These should maybe not be readable at securelevel>0.  */
371 	sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL,
372 	    CTLFLAG_PERMANENT|CTLFLAG_READONLY|CTLFLAG_PRIVATE, CTLTYPE_INT,
373 	    "needed",
374 	    SYSCTL_DESCR("Systemwide entropy deficit (bits of entropy)"),
375 	    NULL, 0, &E->bitsneeded, 0, CTL_CREATE, CTL_EOL);
376 	sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL,
377 	    CTLFLAG_PERMANENT|CTLFLAG_READONLY|CTLFLAG_PRIVATE, CTLTYPE_INT,
378 	    "pending",
379 	    SYSCTL_DESCR("Number of bits of entropy pending on CPUs"),
380 	    NULL, 0, &E->bitspending, 0, CTL_CREATE, CTL_EOL);
381 	sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL,
382 	    CTLFLAG_PERMANENT|CTLFLAG_READONLY|CTLFLAG_PRIVATE, CTLTYPE_INT,
383 	    "samplesneeded",
384 	    SYSCTL_DESCR("Systemwide entropy deficit (samples)"),
385 	    NULL, 0, &E->samplesneeded, 0, CTL_CREATE, CTL_EOL);
386 	sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL,
387 	    CTLFLAG_PERMANENT|CTLFLAG_READONLY|CTLFLAG_PRIVATE, CTLTYPE_INT,
388 	    "samplespending",
389 	    SYSCTL_DESCR("Number of samples pending on CPUs"),
390 	    NULL, 0, &E->samplespending, 0, CTL_CREATE, CTL_EOL);
391 	sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL,
392 	    CTLFLAG_PERMANENT|CTLFLAG_READONLY|CTLFLAG_PRIVATE, CTLTYPE_INT,
393 	    "epoch", SYSCTL_DESCR("Entropy epoch"),
394 	    NULL, 0, &E->epoch, 0, CTL_CREATE, CTL_EOL);
395 
396 	/* Initialize the global state for multithreaded operation.  */
397 	mutex_init(&E->lock, MUTEX_DEFAULT, IPL_SOFTSERIAL);
398 	cv_init(&E->cv, "entropy");
399 	selinit(&E->selq);
400 	cv_init(&E->sourcelock_cv, "entsrclock");
401 
402 	/* Make sure the seed source is attached.  */
403 	attach_seed_rndsource();
404 
405 	/* Note if the bootloader didn't provide a seed.  */
406 	if (!E->seeded)
407 		aprint_debug("entropy: no seed from bootloader\n");
408 
409 	/* Allocate the per-CPU records for all early entropy sources.  */
410 	LIST_FOREACH(rs, &E->sources, list)
411 		rs->state = percpu_alloc(sizeof(struct rndsource_cpu));
412 
413 	/* Allocate and initialize the per-CPU state.  */
414 	entropy_percpu = percpu_create(sizeof(struct entropy_cpu),
415 	    entropy_init_cpu, entropy_fini_cpu, NULL);
416 
417 	/* Enter the boot cycle count to get started.  */
418 	extra[i++] = entropy_timer();
419 	KASSERT(i == __arraycount(extra));
420 	entropy_enter(extra, sizeof extra, /*nbits*/0, /*count*/false);
421 	explicit_memset(extra, 0, sizeof extra);
422 
423 	/* We are now ready for multi-threaded operation.  */
424 	E->stage = ENTROPY_WARM;
425 }
426 
427 static void
entropy_init_late_cpu(void * a,void * b)428 entropy_init_late_cpu(void *a, void *b)
429 {
430 	int bound;
431 
432 	/*
433 	 * We're not necessarily in a softint lwp here (xc_broadcast
434 	 * triggers softint on other CPUs, but calls directly on this
435 	 * CPU), so explicitly bind to the current CPU to invoke the
436 	 * softintr -- this lets us have a simpler assertion in
437 	 * entropy_account_cpu.  Not necessary to avoid migration
438 	 * because xc_broadcast disables kpreemption anyway, but it
439 	 * doesn't hurt.
440 	 */
441 	bound = curlwp_bind();
442 	entropy_softintr(NULL);
443 	curlwp_bindx(bound);
444 }
445 
446 /*
447  * entropy_init_late()
448  *
449  *	Late initialization.  Panic on failure.
450  *
451  *	Requires CPUs to have been detected and LWPs to have started.
452  */
453 static void
entropy_init_late(void)454 entropy_init_late(void)
455 {
456 	void *sih;
457 	int error;
458 
459 	KASSERT(E->stage == ENTROPY_WARM);
460 
461 	/*
462 	 * Establish the softint at the highest softint priority level.
463 	 * Must happen after CPU detection.
464 	 */
465 	sih = softint_establish(SOFTINT_SERIAL|SOFTINT_MPSAFE,
466 	    &entropy_softintr, NULL);
467 	if (sih == NULL)
468 		panic("unable to establish entropy softint");
469 
470 	/*
471 	 * Create the entropy housekeeping thread.  Must happen after
472 	 * lwpinit.
473 	 */
474 	error = kthread_create(PRI_NONE, KTHREAD_MPSAFE|KTHREAD_TS, NULL,
475 	    entropy_thread, NULL, &entropy_lwp, "entbutler");
476 	if (error)
477 		panic("unable to create entropy housekeeping thread: %d",
478 		    error);
479 
480 	/*
481 	 * Wait until the per-CPU initialization has hit all CPUs
482 	 * before proceeding to mark the entropy system hot and
483 	 * enabling use of the softint.
484 	 */
485 	xc_barrier(XC_HIGHPRI);
486 	E->stage = ENTROPY_HOT;
487 	atomic_store_relaxed(&entropy_sih, sih);
488 
489 	/*
490 	 * At this point, entering new samples from interrupt handlers
491 	 * will trigger the softint to process them.  But there may be
492 	 * some samples that were entered from interrupt handlers
493 	 * before the softint was available.  Make sure we process
494 	 * those samples on all CPUs by running the softint logic on
495 	 * all CPUs.
496 	 */
497 	xc_wait(xc_broadcast(XC_HIGHPRI, entropy_init_late_cpu, NULL, NULL));
498 }
499 
500 /*
501  * entropy_init_cpu(ptr, cookie, ci)
502  *
503  *	percpu(9) constructor for per-CPU entropy pool.
504  */
505 static void
entropy_init_cpu(void * ptr,void * cookie,struct cpu_info * ci)506 entropy_init_cpu(void *ptr, void *cookie, struct cpu_info *ci)
507 {
508 	struct entropy_cpu *ec = ptr;
509 	const char *cpuname;
510 
511 	ec->ec_evcnt = kmem_alloc(sizeof(*ec->ec_evcnt), KM_SLEEP);
512 	ec->ec_pool = kmem_zalloc(sizeof(*ec->ec_pool), KM_SLEEP);
513 	ec->ec_bitspending = 0;
514 	ec->ec_samplespending = 0;
515 	ec->ec_locked = false;
516 
517 	/* XXX ci_cpuname may not be initialized early enough.  */
518 	cpuname = ci->ci_cpuname[0] == '\0' ? "cpu0" : ci->ci_cpuname;
519 	evcnt_attach_dynamic(&ec->ec_evcnt->softint, EVCNT_TYPE_MISC, NULL,
520 	    cpuname, "entropy softint");
521 	evcnt_attach_dynamic(&ec->ec_evcnt->intrdrop, EVCNT_TYPE_MISC, NULL,
522 	    cpuname, "entropy intrdrop");
523 	evcnt_attach_dynamic(&ec->ec_evcnt->intrtrunc, EVCNT_TYPE_MISC, NULL,
524 	    cpuname, "entropy intrtrunc");
525 }
526 
527 /*
528  * entropy_fini_cpu(ptr, cookie, ci)
529  *
530  *	percpu(9) destructor for per-CPU entropy pool.
531  */
532 static void
entropy_fini_cpu(void * ptr,void * cookie,struct cpu_info * ci)533 entropy_fini_cpu(void *ptr, void *cookie, struct cpu_info *ci)
534 {
535 	struct entropy_cpu *ec = ptr;
536 
537 	/*
538 	 * Zero any lingering data.  Disclosure of the per-CPU pool
539 	 * shouldn't retroactively affect the security of any keys
540 	 * generated, because entpool(9) erases whatever we have just
541 	 * drawn out of any pool, but better safe than sorry.
542 	 */
543 	explicit_memset(ec->ec_pool, 0, sizeof(*ec->ec_pool));
544 
545 	evcnt_detach(&ec->ec_evcnt->intrtrunc);
546 	evcnt_detach(&ec->ec_evcnt->intrdrop);
547 	evcnt_detach(&ec->ec_evcnt->softint);
548 
549 	kmem_free(ec->ec_pool, sizeof(*ec->ec_pool));
550 	kmem_free(ec->ec_evcnt, sizeof(*ec->ec_evcnt));
551 }
552 
553 /*
554  * ec = entropy_cpu_get(&lock)
555  * entropy_cpu_put(&lock, ec)
556  *
557  *	Lock and unlock the per-CPU entropy state.  This only prevents
558  *	access on the same CPU -- by hard interrupts, by soft
559  *	interrupts, or by other threads.
560  *
561  *	Blocks soft interrupts and preemption altogether; doesn't block
562  *	hard interrupts, but causes samples in hard interrupts to be
563  *	dropped.
564  */
565 static struct entropy_cpu *
entropy_cpu_get(struct entropy_cpu_lock * lock)566 entropy_cpu_get(struct entropy_cpu_lock *lock)
567 {
568 	struct entropy_cpu *ec;
569 
570 	ec = percpu_getref(entropy_percpu);
571 	lock->ecl_s = splsoftserial();
572 	KASSERT(!ec->ec_locked);
573 	ec->ec_locked = true;
574 	lock->ecl_ncsw = curlwp->l_ncsw;
575 	__insn_barrier();
576 
577 	return ec;
578 }
579 
580 static void
entropy_cpu_put(struct entropy_cpu_lock * lock,struct entropy_cpu * ec)581 entropy_cpu_put(struct entropy_cpu_lock *lock, struct entropy_cpu *ec)
582 {
583 
584 	KASSERT(ec == percpu_getptr_remote(entropy_percpu, curcpu()));
585 	KASSERT(ec->ec_locked);
586 
587 	__insn_barrier();
588 	KASSERT(lock->ecl_ncsw == curlwp->l_ncsw);
589 	ec->ec_locked = false;
590 	splx(lock->ecl_s);
591 	percpu_putref(entropy_percpu);
592 }
593 
594 /*
595  * entropy_seed(seed)
596  *
597  *	Seed the entropy pool with seed.  Meant to be called as early
598  *	as possible by the bootloader; may be called before or after
599  *	entropy_init.  Must be called before system reaches userland.
600  *	Must be called in thread or soft interrupt context, not in hard
601  *	interrupt context.  Must be called at most once.
602  *
603  *	Overwrites the seed in place.  Caller may then free the memory.
604  */
605 static void
entropy_seed(rndsave_t * seed)606 entropy_seed(rndsave_t *seed)
607 {
608 	SHA1_CTX ctx;
609 	uint8_t digest[SHA1_DIGEST_LENGTH];
610 	bool seeded;
611 
612 	/*
613 	 * Verify the checksum.  If the checksum fails, take the data
614 	 * but ignore the entropy estimate -- the file may have been
615 	 * incompletely written with garbage, which is harmless to add
616 	 * but may not be as unpredictable as alleged.
617 	 */
618 	SHA1Init(&ctx);
619 	SHA1Update(&ctx, (const void *)&seed->entropy, sizeof(seed->entropy));
620 	SHA1Update(&ctx, seed->data, sizeof(seed->data));
621 	SHA1Final(digest, &ctx);
622 	CTASSERT(sizeof(seed->digest) == sizeof(digest));
623 	if (!consttime_memequal(digest, seed->digest, sizeof(digest))) {
624 		printf("entropy: invalid seed checksum\n");
625 		seed->entropy = 0;
626 	}
627 	explicit_memset(&ctx, 0, sizeof ctx);
628 	explicit_memset(digest, 0, sizeof digest);
629 
630 	/*
631 	 * If the entropy is insensibly large, try byte-swapping.
632 	 * Otherwise assume the file is corrupted and act as though it
633 	 * has zero entropy.
634 	 */
635 	if (howmany(seed->entropy, NBBY) > sizeof(seed->data)) {
636 		seed->entropy = bswap32(seed->entropy);
637 		if (howmany(seed->entropy, NBBY) > sizeof(seed->data))
638 			seed->entropy = 0;
639 	}
640 
641 	/* Make sure the seed source is attached.  */
642 	attach_seed_rndsource();
643 
644 	/* Test and set E->seeded.  */
645 	if (E->stage >= ENTROPY_WARM)
646 		mutex_enter(&E->lock);
647 	seeded = E->seeded;
648 	E->seeded = (seed->entropy > 0);
649 	if (E->stage >= ENTROPY_WARM)
650 		mutex_exit(&E->lock);
651 
652 	/*
653 	 * If we've been seeded, may be re-entering the same seed
654 	 * (e.g., bootloader vs module init, or something).  No harm in
655 	 * entering it twice, but it contributes no additional entropy.
656 	 */
657 	if (seeded) {
658 		printf("entropy: double-seeded by bootloader\n");
659 		seed->entropy = 0;
660 	} else {
661 		printf("entropy: entering seed from bootloader"
662 		    " with %u bits of entropy\n", (unsigned)seed->entropy);
663 	}
664 
665 	/* Enter it into the pool and promptly zero it.  */
666 	rnd_add_data(&seed_rndsource, seed->data, sizeof(seed->data),
667 	    seed->entropy);
668 	explicit_memset(seed, 0, sizeof(*seed));
669 }
670 
671 /*
672  * entropy_bootrequest()
673  *
674  *	Request entropy from all sources at boot, once config is
675  *	complete and interrupts are running.
676  */
677 void
entropy_bootrequest(void)678 entropy_bootrequest(void)
679 {
680 	int error;
681 
682 	KASSERT(E->stage >= ENTROPY_WARM);
683 
684 	/*
685 	 * Request enough to satisfy the maximum entropy shortage.
686 	 * This is harmless overkill if the bootloader provided a seed.
687 	 */
688 	mutex_enter(&E->lock);
689 	error = entropy_request(MINENTROPYBYTES, ENTROPY_WAIT);
690 	KASSERT(error == 0);
691 	mutex_exit(&E->lock);
692 }
693 
694 /*
695  * entropy_epoch()
696  *
697  *	Returns the current entropy epoch.  If this changes, you should
698  *	reseed.  If -1, means system entropy has not yet reached full
699  *	entropy or been explicitly consolidated; never reverts back to
700  *	-1.  Never zero, so you can always use zero as an uninitialized
701  *	sentinel value meaning `reseed ASAP'.
702  *
703  *	Usage model:
704  *
705  *		struct foo {
706  *			struct crypto_prng prng;
707  *			unsigned epoch;
708  *		} *foo;
709  *
710  *		unsigned epoch = entropy_epoch();
711  *		if (__predict_false(epoch != foo->epoch)) {
712  *			uint8_t seed[32];
713  *			if (entropy_extract(seed, sizeof seed, 0) != 0)
714  *				warn("no entropy");
715  *			crypto_prng_reseed(&foo->prng, seed, sizeof seed);
716  *			foo->epoch = epoch;
717  *		}
718  */
719 unsigned
entropy_epoch(void)720 entropy_epoch(void)
721 {
722 
723 	/*
724 	 * Unsigned int, so no need for seqlock for an atomic read, but
725 	 * make sure we read it afresh each time.
726 	 */
727 	return atomic_load_relaxed(&E->epoch);
728 }
729 
730 /*
731  * entropy_ready()
732  *
733  *	True if the entropy pool has full entropy.
734  */
735 bool
entropy_ready(void)736 entropy_ready(void)
737 {
738 
739 	return atomic_load_relaxed(&E->bitsneeded) == 0;
740 }
741 
742 /*
743  * entropy_account_cpu(ec)
744  *
745  *	Consider whether to consolidate entropy into the global pool
746  *	after we just added some into the current CPU's pending pool.
747  *
748  *	- If this CPU can provide enough entropy now, do so.
749  *
750  *	- If this and whatever else is available on other CPUs can
751  *	  provide enough entropy, kick the consolidation thread.
752  *
753  *	- Otherwise, do as little as possible, except maybe consolidate
754  *	  entropy at most once a minute.
755  *
756  *	Caller must be bound to a CPU and therefore have exclusive
757  *	access to ec.  Will acquire and release the global lock.
758  */
759 static void
entropy_account_cpu(struct entropy_cpu * ec)760 entropy_account_cpu(struct entropy_cpu *ec)
761 {
762 	struct entropy_cpu_lock lock;
763 	struct entropy_cpu *ec0;
764 	unsigned bitsdiff, samplesdiff;
765 
766 	KASSERT(E->stage >= ENTROPY_WARM);
767 	KASSERT(curlwp->l_pflag & LP_BOUND);
768 
769 	/*
770 	 * If there's no entropy needed, and entropy has been
771 	 * consolidated in the last minute, do nothing.
772 	 */
773 	if (__predict_true(atomic_load_relaxed(&E->bitsneeded) == 0) &&
774 	    __predict_true(!atomic_load_relaxed(&entropy_depletion)) &&
775 	    __predict_true((time_uptime - E->timestamp) <= 60))
776 		return;
777 
778 	/*
779 	 * Consider consolidation, under the global lock and with the
780 	 * per-CPU state locked.
781 	 */
782 	mutex_enter(&E->lock);
783 	ec0 = entropy_cpu_get(&lock);
784 	KASSERT(ec0 == ec);
785 
786 	if (ec->ec_bitspending == 0 && ec->ec_samplespending == 0) {
787 		/* Raced with consolidation xcall.  Nothing to do.  */
788 	} else if (E->bitsneeded != 0 && E->bitsneeded <= ec->ec_bitspending) {
789 		/*
790 		 * If we have not yet attained full entropy but we can
791 		 * now, do so.  This way we disseminate entropy
792 		 * promptly when it becomes available early at boot;
793 		 * otherwise we leave it to the entropy consolidation
794 		 * thread, which is rate-limited to mitigate side
795 		 * channels and abuse.
796 		 */
797 		uint8_t buf[ENTPOOL_CAPACITY];
798 
799 		/* Transfer from the local pool to the global pool.  */
800 		entpool_extract(ec->ec_pool, buf, sizeof buf);
801 		entpool_enter(&E->pool, buf, sizeof buf);
802 		atomic_store_relaxed(&ec->ec_bitspending, 0);
803 		atomic_store_relaxed(&ec->ec_samplespending, 0);
804 		atomic_store_relaxed(&E->bitsneeded, 0);
805 		atomic_store_relaxed(&E->samplesneeded, 0);
806 
807 		/* Notify waiters that we now have full entropy.  */
808 		entropy_notify();
809 		entropy_immediate_evcnt.ev_count++;
810 	} else {
811 		/* Determine how much we can add to the global pool.  */
812 		KASSERTMSG(E->bitspending <= MINENTROPYBITS,
813 		    "E->bitspending=%u", E->bitspending);
814 		bitsdiff = MIN(ec->ec_bitspending,
815 		    MINENTROPYBITS - E->bitspending);
816 		KASSERTMSG(E->samplespending <= MINSAMPLES,
817 		    "E->samplespending=%u", E->samplespending);
818 		samplesdiff = MIN(ec->ec_samplespending,
819 		    MINSAMPLES - E->samplespending);
820 
821 		/*
822 		 * This should make a difference unless we are already
823 		 * saturated.
824 		 */
825 		KASSERTMSG((bitsdiff || samplesdiff ||
826 			E->bitspending == MINENTROPYBITS ||
827 			E->samplespending == MINSAMPLES),
828 		    "bitsdiff=%u E->bitspending=%u ec->ec_bitspending=%u"
829 		    "samplesdiff=%u E->samplespending=%u"
830 		    " ec->ec_samplespending=%u"
831 		    " minentropybits=%u minsamples=%u",
832 		    bitsdiff, E->bitspending, ec->ec_bitspending,
833 		    samplesdiff, E->samplespending, ec->ec_samplespending,
834 		    (unsigned)MINENTROPYBITS, (unsigned)MINSAMPLES);
835 
836 		/* Add to the global, subtract from the local.  */
837 		E->bitspending += bitsdiff;
838 		KASSERTMSG(E->bitspending <= MINENTROPYBITS,
839 		    "E->bitspending=%u", E->bitspending);
840 		atomic_store_relaxed(&ec->ec_bitspending,
841 		    ec->ec_bitspending - bitsdiff);
842 
843 		E->samplespending += samplesdiff;
844 		KASSERTMSG(E->samplespending <= MINSAMPLES,
845 		    "E->samplespending=%u", E->samplespending);
846 		atomic_store_relaxed(&ec->ec_samplespending,
847 		    ec->ec_samplespending - samplesdiff);
848 
849 		/* One or the other must have gone up from zero.  */
850 		KASSERT(E->bitspending || E->samplespending);
851 
852 		if (E->bitsneeded <= E->bitspending ||
853 		    E->samplesneeded <= E->samplespending) {
854 			/*
855 			 * Enough bits or at least samples between all
856 			 * the per-CPU pools.  Leave a note for the
857 			 * housekeeping thread to consolidate entropy
858 			 * next time it wakes up -- and wake it up if
859 			 * this is the first time, to speed things up.
860 			 *
861 			 * If we don't need any entropy, this doesn't
862 			 * mean much, but it is the only time we ever
863 			 * gather additional entropy in case the
864 			 * accounting has been overly optimistic.  This
865 			 * happens at most once a minute, so there's
866 			 * negligible performance cost.
867 			 */
868 			E->consolidate = true;
869 			if (E->epoch == (unsigned)-1)
870 				cv_broadcast(&E->cv);
871 			if (E->bitsneeded == 0)
872 				entropy_discretionary_evcnt.ev_count++;
873 		} else {
874 			/* Can't get full entropy.  Keep gathering.  */
875 			entropy_partial_evcnt.ev_count++;
876 		}
877 	}
878 
879 	entropy_cpu_put(&lock, ec);
880 	mutex_exit(&E->lock);
881 }
882 
883 /*
884  * entropy_enter_early(buf, len, nbits)
885  *
886  *	Do entropy bookkeeping globally, before we have established
887  *	per-CPU pools.  Enter directly into the global pool in the hope
888  *	that we enter enough before the first entropy_extract to thwart
889  *	iterative-guessing attacks; entropy_extract will warn if not.
890  */
891 static void
entropy_enter_early(const void * buf,size_t len,unsigned nbits)892 entropy_enter_early(const void *buf, size_t len, unsigned nbits)
893 {
894 	bool notify = false;
895 
896 	KASSERT(E->stage == ENTROPY_COLD);
897 
898 	/* Enter it into the pool.  */
899 	entpool_enter(&E->pool, buf, len);
900 
901 	/*
902 	 * Decide whether to notify reseed -- we will do so if either:
903 	 * (a) we transition from partial entropy to full entropy, or
904 	 * (b) we get a batch of full entropy all at once.
905 	 */
906 	notify |= (E->bitsneeded && E->bitsneeded <= nbits);
907 	notify |= (nbits >= MINENTROPYBITS);
908 
909 	/*
910 	 * Subtract from the needed count and notify if appropriate.
911 	 * We don't count samples here because entropy_timer might
912 	 * still be returning zero at this point if there's no CPU
913 	 * cycle counter.
914 	 */
915 	E->bitsneeded -= MIN(E->bitsneeded, nbits);
916 	if (notify) {
917 		entropy_notify();
918 		entropy_immediate_evcnt.ev_count++;
919 	}
920 }
921 
922 /*
923  * entropy_enter(buf, len, nbits, count)
924  *
925  *	Enter len bytes of data from buf into the system's entropy
926  *	pool, stirring as necessary when the internal buffer fills up.
927  *	nbits is a lower bound on the number of bits of entropy in the
928  *	process that led to this sample.
929  */
930 static void
entropy_enter(const void * buf,size_t len,unsigned nbits,bool count)931 entropy_enter(const void *buf, size_t len, unsigned nbits, bool count)
932 {
933 	struct entropy_cpu_lock lock;
934 	struct entropy_cpu *ec;
935 	unsigned bitspending, samplespending;
936 	int bound;
937 
938 	KASSERTMSG(!cpu_intr_p(),
939 	    "use entropy_enter_intr from interrupt context");
940 	KASSERTMSG(howmany(nbits, NBBY) <= len,
941 	    "impossible entropy rate: %u bits in %zu-byte string", nbits, len);
942 
943 	/* If it's too early after boot, just use entropy_enter_early.  */
944 	if (__predict_false(E->stage == ENTROPY_COLD)) {
945 		entropy_enter_early(buf, len, nbits);
946 		return;
947 	}
948 
949 	/*
950 	 * Bind ourselves to the current CPU so we don't switch CPUs
951 	 * between entering data into the current CPU's pool (and
952 	 * updating the pending count) and transferring it to the
953 	 * global pool in entropy_account_cpu.
954 	 */
955 	bound = curlwp_bind();
956 
957 	/*
958 	 * With the per-CPU state locked, enter into the per-CPU pool
959 	 * and count up what we can add.
960 	 *
961 	 * We don't count samples while cold because entropy_timer
962 	 * might still be returning zero if there's no CPU cycle
963 	 * counter.
964 	 */
965 	ec = entropy_cpu_get(&lock);
966 	entpool_enter(ec->ec_pool, buf, len);
967 	bitspending = ec->ec_bitspending;
968 	bitspending += MIN(MINENTROPYBITS - bitspending, nbits);
969 	atomic_store_relaxed(&ec->ec_bitspending, bitspending);
970 	samplespending = ec->ec_samplespending;
971 	if (__predict_true(count)) {
972 		samplespending += MIN(MINSAMPLES - samplespending, 1);
973 		atomic_store_relaxed(&ec->ec_samplespending, samplespending);
974 	}
975 	entropy_cpu_put(&lock, ec);
976 
977 	/* Consolidate globally if appropriate based on what we added.  */
978 	if (bitspending > 0 || samplespending >= MINSAMPLES)
979 		entropy_account_cpu(ec);
980 
981 	curlwp_bindx(bound);
982 }
983 
984 /*
985  * entropy_enter_intr(buf, len, nbits, count)
986  *
987  *	Enter up to len bytes of data from buf into the system's
988  *	entropy pool without stirring.  nbits is a lower bound on the
989  *	number of bits of entropy in the process that led to this
990  *	sample.  If the sample could be entered completely, assume
991  *	nbits of entropy pending; otherwise assume none, since we don't
992  *	know whether some parts of the sample are constant, for
993  *	instance.  Schedule a softint to stir the entropy pool if
994  *	needed.  Return true if used fully, false if truncated at all.
995  *
996  *	Using this in thread context will work, but you might as well
997  *	use entropy_enter in that case.
998  */
999 static bool
entropy_enter_intr(const void * buf,size_t len,unsigned nbits,bool count)1000 entropy_enter_intr(const void *buf, size_t len, unsigned nbits, bool count)
1001 {
1002 	struct entropy_cpu *ec;
1003 	bool fullyused = false;
1004 	uint32_t bitspending, samplespending;
1005 	void *sih;
1006 
1007 	KASSERT(cpu_intr_p());
1008 	KASSERTMSG(howmany(nbits, NBBY) <= len,
1009 	    "impossible entropy rate: %u bits in %zu-byte string", nbits, len);
1010 
1011 	/* If it's too early after boot, just use entropy_enter_early.  */
1012 	if (__predict_false(E->stage == ENTROPY_COLD)) {
1013 		entropy_enter_early(buf, len, nbits);
1014 		return true;
1015 	}
1016 
1017 	/*
1018 	 * Acquire the per-CPU state.  If someone is in the middle of
1019 	 * using it, drop the sample.  Otherwise, take the lock so that
1020 	 * higher-priority interrupts will drop their samples.
1021 	 */
1022 	ec = percpu_getref(entropy_percpu);
1023 	if (ec->ec_locked) {
1024 		ec->ec_evcnt->intrdrop.ev_count++;
1025 		goto out0;
1026 	}
1027 	ec->ec_locked = true;
1028 	__insn_barrier();
1029 
1030 	/*
1031 	 * Enter as much as we can into the per-CPU pool.  If it was
1032 	 * truncated, schedule a softint to stir the pool and stop.
1033 	 */
1034 	if (!entpool_enter_nostir(ec->ec_pool, buf, len)) {
1035 		sih = atomic_load_relaxed(&entropy_sih);
1036 		if (__predict_true(sih != NULL))
1037 			softint_schedule(sih);
1038 		ec->ec_evcnt->intrtrunc.ev_count++;
1039 		goto out1;
1040 	}
1041 	fullyused = true;
1042 
1043 	/*
1044 	 * Count up what we can contribute.
1045 	 *
1046 	 * We don't count samples while cold because entropy_timer
1047 	 * might still be returning zero if there's no CPU cycle
1048 	 * counter.
1049 	 */
1050 	bitspending = ec->ec_bitspending;
1051 	bitspending += MIN(MINENTROPYBITS - bitspending, nbits);
1052 	atomic_store_relaxed(&ec->ec_bitspending, bitspending);
1053 	if (__predict_true(count)) {
1054 		samplespending = ec->ec_samplespending;
1055 		samplespending += MIN(MINSAMPLES - samplespending, 1);
1056 		atomic_store_relaxed(&ec->ec_samplespending, samplespending);
1057 	}
1058 
1059 	/* Schedule a softint if we added anything and it matters.  */
1060 	if (__predict_false(atomic_load_relaxed(&E->bitsneeded) ||
1061 		atomic_load_relaxed(&entropy_depletion)) &&
1062 	    (nbits != 0 || count)) {
1063 		sih = atomic_load_relaxed(&entropy_sih);
1064 		if (__predict_true(sih != NULL))
1065 			softint_schedule(sih);
1066 	}
1067 
1068 out1:	/* Release the per-CPU state.  */
1069 	KASSERT(ec->ec_locked);
1070 	__insn_barrier();
1071 	ec->ec_locked = false;
1072 out0:	percpu_putref(entropy_percpu);
1073 
1074 	return fullyused;
1075 }
1076 
1077 /*
1078  * entropy_softintr(cookie)
1079  *
1080  *	Soft interrupt handler for entering entropy.  Takes care of
1081  *	stirring the local CPU's entropy pool if it filled up during
1082  *	hard interrupts, and promptly crediting entropy from the local
1083  *	CPU's entropy pool to the global entropy pool if needed.
1084  */
1085 static void
entropy_softintr(void * cookie)1086 entropy_softintr(void *cookie)
1087 {
1088 	struct entropy_cpu_lock lock;
1089 	struct entropy_cpu *ec;
1090 	unsigned bitspending, samplespending;
1091 
1092 	/*
1093 	 * With the per-CPU state locked, stir the pool if necessary
1094 	 * and determine if there's any pending entropy on this CPU to
1095 	 * account globally.
1096 	 */
1097 	ec = entropy_cpu_get(&lock);
1098 	ec->ec_evcnt->softint.ev_count++;
1099 	entpool_stir(ec->ec_pool);
1100 	bitspending = ec->ec_bitspending;
1101 	samplespending = ec->ec_samplespending;
1102 	entropy_cpu_put(&lock, ec);
1103 
1104 	/* Consolidate globally if appropriate based on what we added.  */
1105 	if (bitspending > 0 || samplespending >= MINSAMPLES)
1106 		entropy_account_cpu(ec);
1107 }
1108 
1109 /*
1110  * entropy_thread(cookie)
1111  *
1112  *	Handle any asynchronous entropy housekeeping.
1113  */
1114 static void
entropy_thread(void * cookie)1115 entropy_thread(void *cookie)
1116 {
1117 	bool consolidate;
1118 
1119 	for (;;) {
1120 		/*
1121 		 * Wait until there's full entropy somewhere among the
1122 		 * CPUs, as confirmed at most once per minute, or
1123 		 * someone wants to consolidate.
1124 		 */
1125 		if (entropy_pending()) {
1126 			consolidate = true;
1127 		} else {
1128 			mutex_enter(&E->lock);
1129 			if (!E->consolidate)
1130 				cv_timedwait(&E->cv, &E->lock, 60*hz);
1131 			consolidate = E->consolidate;
1132 			E->consolidate = false;
1133 			mutex_exit(&E->lock);
1134 		}
1135 
1136 		if (consolidate) {
1137 			/* Do it.  */
1138 			entropy_do_consolidate();
1139 
1140 			/* Mitigate abuse.  */
1141 			kpause("entropy", false, hz, NULL);
1142 		}
1143 	}
1144 }
1145 
1146 struct entropy_pending_count {
1147 	uint32_t bitspending;
1148 	uint32_t samplespending;
1149 };
1150 
1151 /*
1152  * entropy_pending()
1153  *
1154  *	True if enough bits or samples are pending on other CPUs to
1155  *	warrant consolidation.
1156  */
1157 static bool
entropy_pending(void)1158 entropy_pending(void)
1159 {
1160 	struct entropy_pending_count count = { 0, 0 }, *C = &count;
1161 
1162 	percpu_foreach(entropy_percpu, &entropy_pending_cpu, C);
1163 	return C->bitspending >= MINENTROPYBITS ||
1164 	    C->samplespending >= MINSAMPLES;
1165 }
1166 
1167 static void
entropy_pending_cpu(void * ptr,void * cookie,struct cpu_info * ci)1168 entropy_pending_cpu(void *ptr, void *cookie, struct cpu_info *ci)
1169 {
1170 	struct entropy_cpu *ec = ptr;
1171 	struct entropy_pending_count *C = cookie;
1172 	uint32_t cpu_bitspending;
1173 	uint32_t cpu_samplespending;
1174 
1175 	cpu_bitspending = atomic_load_relaxed(&ec->ec_bitspending);
1176 	cpu_samplespending = atomic_load_relaxed(&ec->ec_samplespending);
1177 	C->bitspending += MIN(MINENTROPYBITS - C->bitspending,
1178 	    cpu_bitspending);
1179 	C->samplespending += MIN(MINSAMPLES - C->samplespending,
1180 	    cpu_samplespending);
1181 }
1182 
1183 /*
1184  * entropy_do_consolidate()
1185  *
1186  *	Issue a cross-call to gather entropy on all CPUs and advance
1187  *	the entropy epoch.
1188  */
1189 static void
entropy_do_consolidate(void)1190 entropy_do_consolidate(void)
1191 {
1192 	static const struct timeval interval = {.tv_sec = 60, .tv_usec = 0};
1193 	static struct timeval lasttime; /* serialized by E->lock */
1194 	struct entpool pool;
1195 	uint8_t buf[ENTPOOL_CAPACITY];
1196 	unsigned bitsdiff, samplesdiff;
1197 	uint64_t ticket;
1198 
1199 	/* Gather entropy on all CPUs into a temporary pool.  */
1200 	memset(&pool, 0, sizeof pool);
1201 	ticket = xc_broadcast(0, &entropy_consolidate_xc, &pool, NULL);
1202 	xc_wait(ticket);
1203 
1204 	/* Acquire the lock to notify waiters.  */
1205 	mutex_enter(&E->lock);
1206 
1207 	/* Count another consolidation.  */
1208 	entropy_consolidate_evcnt.ev_count++;
1209 
1210 	/* Note when we last consolidated, i.e. now.  */
1211 	E->timestamp = time_uptime;
1212 
1213 	/* Mix what we gathered into the global pool.  */
1214 	entpool_extract(&pool, buf, sizeof buf);
1215 	entpool_enter(&E->pool, buf, sizeof buf);
1216 	explicit_memset(&pool, 0, sizeof pool);
1217 
1218 	/* Count the entropy that was gathered.  */
1219 	bitsdiff = MIN(E->bitsneeded, E->bitspending);
1220 	atomic_store_relaxed(&E->bitsneeded, E->bitsneeded - bitsdiff);
1221 	E->bitspending -= bitsdiff;
1222 	if (__predict_false(E->bitsneeded > 0) && bitsdiff != 0) {
1223 		if ((boothowto & AB_DEBUG) != 0 &&
1224 		    ratecheck(&lasttime, &interval)) {
1225 			printf("WARNING:"
1226 			    " consolidating less than full entropy\n");
1227 		}
1228 	}
1229 
1230 	samplesdiff = MIN(E->samplesneeded, E->samplespending);
1231 	atomic_store_relaxed(&E->samplesneeded,
1232 	    E->samplesneeded - samplesdiff);
1233 	E->samplespending -= samplesdiff;
1234 
1235 	/* Advance the epoch and notify waiters.  */
1236 	entropy_notify();
1237 
1238 	/* Release the lock.  */
1239 	mutex_exit(&E->lock);
1240 }
1241 
1242 /*
1243  * entropy_consolidate_xc(vpool, arg2)
1244  *
1245  *	Extract output from the local CPU's input pool and enter it
1246  *	into a temporary pool passed as vpool.
1247  */
1248 static void
entropy_consolidate_xc(void * vpool,void * arg2 __unused)1249 entropy_consolidate_xc(void *vpool, void *arg2 __unused)
1250 {
1251 	struct entpool *pool = vpool;
1252 	struct entropy_cpu_lock lock;
1253 	struct entropy_cpu *ec;
1254 	uint8_t buf[ENTPOOL_CAPACITY];
1255 	uint32_t extra[7];
1256 	unsigned i = 0;
1257 
1258 	/* Grab CPU number and cycle counter to mix extra into the pool.  */
1259 	extra[i++] = cpu_number();
1260 	extra[i++] = entropy_timer();
1261 
1262 	/*
1263 	 * With the per-CPU state locked, extract from the per-CPU pool
1264 	 * and count it as no longer pending.
1265 	 */
1266 	ec = entropy_cpu_get(&lock);
1267 	extra[i++] = entropy_timer();
1268 	entpool_extract(ec->ec_pool, buf, sizeof buf);
1269 	atomic_store_relaxed(&ec->ec_bitspending, 0);
1270 	atomic_store_relaxed(&ec->ec_samplespending, 0);
1271 	extra[i++] = entropy_timer();
1272 	entropy_cpu_put(&lock, ec);
1273 	extra[i++] = entropy_timer();
1274 
1275 	/*
1276 	 * Copy over statistics, and enter the per-CPU extract and the
1277 	 * extra timing into the temporary pool, under the global lock.
1278 	 */
1279 	mutex_enter(&E->lock);
1280 	extra[i++] = entropy_timer();
1281 	entpool_enter(pool, buf, sizeof buf);
1282 	explicit_memset(buf, 0, sizeof buf);
1283 	extra[i++] = entropy_timer();
1284 	KASSERT(i == __arraycount(extra));
1285 	entpool_enter(pool, extra, sizeof extra);
1286 	explicit_memset(extra, 0, sizeof extra);
1287 	mutex_exit(&E->lock);
1288 }
1289 
1290 /*
1291  * entropy_notify()
1292  *
1293  *	Caller just contributed entropy to the global pool.  Advance
1294  *	the entropy epoch and notify waiters.
1295  *
1296  *	Caller must hold the global entropy lock.
1297  */
1298 static void
entropy_notify(void)1299 entropy_notify(void)
1300 {
1301 	static const struct timeval interval = {.tv_sec = 60, .tv_usec = 0};
1302 	static struct timeval lasttime; /* serialized by E->lock */
1303 	static bool ready = false, besteffort = false;
1304 	unsigned epoch;
1305 
1306 	KASSERT(E->stage == ENTROPY_COLD || mutex_owned(&E->lock));
1307 
1308 	/*
1309 	 * If this is the first time, print a message to the console
1310 	 * that we're ready so operators can compare it to the timing
1311 	 * of other events.
1312 	 *
1313 	 * If we didn't get full entropy from reliable sources, report
1314 	 * instead that we are running on fumes with best effort.  (If
1315 	 * we ever do get full entropy after that, print the ready
1316 	 * message once.)
1317 	 */
1318 	if (__predict_false(!ready)) {
1319 		if (E->bitsneeded == 0) {
1320 			printf("entropy: ready\n");
1321 			ready = true;
1322 		} else if (E->samplesneeded == 0 && !besteffort) {
1323 			printf("entropy: best effort\n");
1324 			besteffort = true;
1325 		}
1326 	}
1327 
1328 	/* Set the epoch; roll over from UINTMAX-1 to 1.  */
1329 	if (__predict_true(!atomic_load_relaxed(&entropy_depletion)) ||
1330 	    ratecheck(&lasttime, &interval)) {
1331 		epoch = E->epoch + 1;
1332 		if (epoch == 0 || epoch == (unsigned)-1)
1333 			epoch = 1;
1334 		atomic_store_relaxed(&E->epoch, epoch);
1335 	}
1336 	KASSERT(E->epoch != (unsigned)-1);
1337 
1338 	/* Notify waiters.  */
1339 	if (E->stage >= ENTROPY_WARM) {
1340 		cv_broadcast(&E->cv);
1341 		selnotify(&E->selq, POLLIN|POLLRDNORM, NOTE_SUBMIT);
1342 	}
1343 
1344 	/* Count another notification.  */
1345 	entropy_notify_evcnt.ev_count++;
1346 }
1347 
1348 /*
1349  * entropy_consolidate()
1350  *
1351  *	Trigger entropy consolidation and wait for it to complete.
1352  *
1353  *	This should be used sparingly, not periodically -- requiring
1354  *	conscious intervention by the operator or a clear policy
1355  *	decision.  Otherwise, the kernel will automatically consolidate
1356  *	when enough entropy has been gathered into per-CPU pools to
1357  *	transition to full entropy.
1358  */
1359 void
entropy_consolidate(void)1360 entropy_consolidate(void)
1361 {
1362 	uint64_t ticket;
1363 	int error;
1364 
1365 	KASSERT(E->stage == ENTROPY_HOT);
1366 
1367 	mutex_enter(&E->lock);
1368 	ticket = entropy_consolidate_evcnt.ev_count;
1369 	E->consolidate = true;
1370 	cv_broadcast(&E->cv);
1371 	while (ticket == entropy_consolidate_evcnt.ev_count) {
1372 		error = cv_wait_sig(&E->cv, &E->lock);
1373 		if (error)
1374 			break;
1375 	}
1376 	mutex_exit(&E->lock);
1377 }
1378 
1379 /*
1380  * sysctl -w kern.entropy.consolidate=1
1381  *
1382  *	Trigger entropy consolidation and wait for it to complete.
1383  *	Writable only by superuser.  This, writing to /dev/random, and
1384  *	ioctl(RNDADDDATA) are the only ways for the system to
1385  *	consolidate entropy if the operator knows something the kernel
1386  *	doesn't about how unpredictable the pending entropy pools are.
1387  */
1388 static int
sysctl_entropy_consolidate(SYSCTLFN_ARGS)1389 sysctl_entropy_consolidate(SYSCTLFN_ARGS)
1390 {
1391 	struct sysctlnode node = *rnode;
1392 	int arg = 0;
1393 	int error;
1394 
1395 	KASSERT(E->stage == ENTROPY_HOT);
1396 
1397 	node.sysctl_data = &arg;
1398 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
1399 	if (error || newp == NULL)
1400 		return error;
1401 	if (arg)
1402 		entropy_consolidate();
1403 
1404 	return error;
1405 }
1406 
1407 /*
1408  * sysctl -w kern.entropy.gather=1
1409  *
1410  *	Trigger gathering entropy from all on-demand sources, and wait
1411  *	for synchronous sources (but not asynchronous sources) to
1412  *	complete.  Writable only by superuser.
1413  */
1414 static int
sysctl_entropy_gather(SYSCTLFN_ARGS)1415 sysctl_entropy_gather(SYSCTLFN_ARGS)
1416 {
1417 	struct sysctlnode node = *rnode;
1418 	int arg = 0;
1419 	int error;
1420 
1421 	KASSERT(E->stage == ENTROPY_HOT);
1422 
1423 	node.sysctl_data = &arg;
1424 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
1425 	if (error || newp == NULL)
1426 		return error;
1427 	if (arg) {
1428 		mutex_enter(&E->lock);
1429 		error = entropy_request(ENTROPY_CAPACITY,
1430 		    ENTROPY_WAIT|ENTROPY_SIG);
1431 		mutex_exit(&E->lock);
1432 	}
1433 
1434 	return 0;
1435 }
1436 
1437 /*
1438  * entropy_extract(buf, len, flags)
1439  *
1440  *	Extract len bytes from the global entropy pool into buf.
1441  *
1442  *	Caller MUST NOT expose these bytes directly -- must use them
1443  *	ONLY to seed a cryptographic pseudorandom number generator
1444  *	(`CPRNG'), a.k.a. deterministic random bit generator (`DRBG'),
1445  *	and then erase them.  entropy_extract does not, on its own,
1446  *	provide backtracking resistance -- it must be combined with a
1447  *	PRNG/DRBG that does.
1448  *
1449  *	You generally shouldn't use this directly -- use cprng(9)
1450  *	instead.
1451  *
1452  *	Flags may have:
1453  *
1454  *		ENTROPY_WAIT	Wait for entropy if not available yet.
1455  *		ENTROPY_SIG	Allow interruption by a signal during wait.
1456  *		ENTROPY_HARDFAIL Either fill the buffer with full entropy,
1457  *				or fail without filling it at all.
1458  *
1459  *	Return zero on success, or error on failure:
1460  *
1461  *		EWOULDBLOCK	No entropy and ENTROPY_WAIT not set.
1462  *		EINTR/ERESTART	No entropy, ENTROPY_SIG set, and interrupted.
1463  *
1464  *	If ENTROPY_WAIT is set, allowed only in thread context.  If
1465  *	ENTROPY_WAIT is not set, allowed also in softint context.
1466  *	Forbidden in hard interrupt context.
1467  */
1468 int
entropy_extract(void * buf,size_t len,int flags)1469 entropy_extract(void *buf, size_t len, int flags)
1470 {
1471 	static const struct timeval interval = {.tv_sec = 60, .tv_usec = 0};
1472 	static struct timeval lasttime; /* serialized by E->lock */
1473 	bool printed = false;
1474 	int error;
1475 
1476 	if (ISSET(flags, ENTROPY_WAIT)) {
1477 		ASSERT_SLEEPABLE();
1478 		KASSERTMSG(E->stage >= ENTROPY_WARM,
1479 		    "can't wait for entropy until warm");
1480 	}
1481 
1482 	/* Refuse to operate in interrupt context.  */
1483 	KASSERT(!cpu_intr_p());
1484 
1485 	/* Acquire the global lock to get at the global pool.  */
1486 	if (E->stage >= ENTROPY_WARM)
1487 		mutex_enter(&E->lock);
1488 
1489 	/* Wait until there is enough entropy in the system.  */
1490 	error = 0;
1491 	if (E->bitsneeded > 0 && E->samplesneeded == 0) {
1492 		/*
1493 		 * We don't have full entropy from reliable sources,
1494 		 * but we gathered a plausible number of samples from
1495 		 * other sources such as timers.  Try asking for more
1496 		 * from any sources we can, but don't worry if it
1497 		 * fails -- best effort.
1498 		 */
1499 		(void)entropy_request(ENTROPY_CAPACITY, flags);
1500 	} else while (E->bitsneeded > 0 && E->samplesneeded > 0) {
1501 		/* Ask for more, synchronously if possible.  */
1502 		error = entropy_request(len, flags);
1503 		if (error)
1504 			break;
1505 
1506 		/* If we got enough, we're done.  */
1507 		if (E->bitsneeded == 0 || E->samplesneeded == 0) {
1508 			KASSERT(error == 0);
1509 			break;
1510 		}
1511 
1512 		/* If not waiting, stop here.  */
1513 		if (!ISSET(flags, ENTROPY_WAIT)) {
1514 			error = EWOULDBLOCK;
1515 			break;
1516 		}
1517 
1518 		/* Wait for some entropy to come in and try again.  */
1519 		KASSERT(E->stage >= ENTROPY_WARM);
1520 		if (!printed) {
1521 			printf("entropy: pid %d (%s) waiting for entropy(7)\n",
1522 			    curproc->p_pid, curproc->p_comm);
1523 			printed = true;
1524 		}
1525 
1526 		if (ISSET(flags, ENTROPY_SIG)) {
1527 			error = cv_timedwait_sig(&E->cv, &E->lock, hz);
1528 			if (error && error != EWOULDBLOCK)
1529 				break;
1530 		} else {
1531 			cv_timedwait(&E->cv, &E->lock, hz);
1532 		}
1533 	}
1534 
1535 	/*
1536 	 * Count failure -- but fill the buffer nevertheless, unless
1537 	 * the caller specified ENTROPY_HARDFAIL.
1538 	 */
1539 	if (error) {
1540 		if (ISSET(flags, ENTROPY_HARDFAIL))
1541 			goto out;
1542 		entropy_extract_fail_evcnt.ev_count++;
1543 	}
1544 
1545 	/*
1546 	 * Report a warning if we haven't yet reached full entropy.
1547 	 * This is the only case where we consider entropy to be
1548 	 * `depleted' without kern.entropy.depletion enabled -- when we
1549 	 * only have partial entropy, an adversary may be able to
1550 	 * narrow the state of the pool down to a small number of
1551 	 * possibilities; the output then enables them to confirm a
1552 	 * guess, reducing its entropy from the adversary's perspective
1553 	 * to zero.
1554 	 *
1555 	 * This should only happen if the operator has chosen to
1556 	 * consolidate, either through sysctl kern.entropy.consolidate
1557 	 * or by writing less than full entropy to /dev/random as root
1558 	 * (which /dev/random promises will immediately affect
1559 	 * subsequent output, for better or worse).
1560 	 */
1561 	if (E->bitsneeded > 0 && E->samplesneeded > 0) {
1562 		if (__predict_false(E->epoch == (unsigned)-1) &&
1563 		    ratecheck(&lasttime, &interval)) {
1564 			printf("WARNING:"
1565 			    " system needs entropy for security;"
1566 			    " see entropy(7)\n");
1567 		}
1568 		atomic_store_relaxed(&E->bitsneeded, MINENTROPYBITS);
1569 		atomic_store_relaxed(&E->samplesneeded, MINSAMPLES);
1570 	}
1571 
1572 	/* Extract data from the pool, and `deplete' if we're doing that.  */
1573 	entpool_extract(&E->pool, buf, len);
1574 	if (__predict_false(atomic_load_relaxed(&entropy_depletion)) &&
1575 	    error == 0) {
1576 		unsigned cost = MIN(len, ENTROPY_CAPACITY)*NBBY;
1577 		unsigned bitsneeded = E->bitsneeded;
1578 		unsigned samplesneeded = E->samplesneeded;
1579 
1580 		bitsneeded += MIN(MINENTROPYBITS - bitsneeded, cost);
1581 		samplesneeded += MIN(MINSAMPLES - samplesneeded, cost);
1582 
1583 		atomic_store_relaxed(&E->bitsneeded, bitsneeded);
1584 		atomic_store_relaxed(&E->samplesneeded, samplesneeded);
1585 		entropy_deplete_evcnt.ev_count++;
1586 	}
1587 
1588 out:	/* Release the global lock and return the error.  */
1589 	if (E->stage >= ENTROPY_WARM)
1590 		mutex_exit(&E->lock);
1591 	return error;
1592 }
1593 
1594 /*
1595  * entropy_poll(events)
1596  *
1597  *	Return the subset of events ready, and if it is not all of
1598  *	events, record curlwp as waiting for entropy.
1599  */
1600 int
entropy_poll(int events)1601 entropy_poll(int events)
1602 {
1603 	int revents = 0;
1604 
1605 	KASSERT(E->stage >= ENTROPY_WARM);
1606 
1607 	/* Always ready for writing.  */
1608 	revents |= events & (POLLOUT|POLLWRNORM);
1609 
1610 	/* Narrow it down to reads.  */
1611 	events &= POLLIN|POLLRDNORM;
1612 	if (events == 0)
1613 		return revents;
1614 
1615 	/*
1616 	 * If we have reached full entropy and we're not depleting
1617 	 * entropy, we are forever ready.
1618 	 */
1619 	if (__predict_true(atomic_load_relaxed(&E->bitsneeded) == 0 ||
1620 		atomic_load_relaxed(&E->samplesneeded) == 0) &&
1621 	    __predict_true(!atomic_load_relaxed(&entropy_depletion)))
1622 		return revents | events;
1623 
1624 	/*
1625 	 * Otherwise, check whether we need entropy under the lock.  If
1626 	 * we don't, we're ready; if we do, add ourselves to the queue.
1627 	 */
1628 	mutex_enter(&E->lock);
1629 	if (E->bitsneeded == 0 || E->samplesneeded == 0)
1630 		revents |= events;
1631 	else
1632 		selrecord(curlwp, &E->selq);
1633 	mutex_exit(&E->lock);
1634 
1635 	return revents;
1636 }
1637 
1638 /*
1639  * filt_entropy_read_detach(kn)
1640  *
1641  *	struct filterops::f_detach callback for entropy read events:
1642  *	remove kn from the list of waiters.
1643  */
1644 static void
filt_entropy_read_detach(struct knote * kn)1645 filt_entropy_read_detach(struct knote *kn)
1646 {
1647 
1648 	KASSERT(E->stage >= ENTROPY_WARM);
1649 
1650 	mutex_enter(&E->lock);
1651 	selremove_knote(&E->selq, kn);
1652 	mutex_exit(&E->lock);
1653 }
1654 
1655 /*
1656  * filt_entropy_read_event(kn, hint)
1657  *
1658  *	struct filterops::f_event callback for entropy read events:
1659  *	poll for entropy.  Caller must hold the global entropy lock if
1660  *	hint is NOTE_SUBMIT, and must not if hint is not NOTE_SUBMIT.
1661  */
1662 static int
filt_entropy_read_event(struct knote * kn,long hint)1663 filt_entropy_read_event(struct knote *kn, long hint)
1664 {
1665 	int ret;
1666 
1667 	KASSERT(E->stage >= ENTROPY_WARM);
1668 
1669 	/* Acquire the lock, if caller is outside entropy subsystem.  */
1670 	if (hint == NOTE_SUBMIT)
1671 		KASSERT(mutex_owned(&E->lock));
1672 	else
1673 		mutex_enter(&E->lock);
1674 
1675 	/*
1676 	 * If we still need entropy, can't read anything; if not, can
1677 	 * read arbitrarily much.
1678 	 */
1679 	if (E->bitsneeded != 0 && E->samplesneeded != 0) {
1680 		ret = 0;
1681 	} else {
1682 		if (atomic_load_relaxed(&entropy_depletion))
1683 			kn->kn_data = ENTROPY_CAPACITY; /* bytes */
1684 		else
1685 			kn->kn_data = MIN(INT64_MAX, SSIZE_MAX);
1686 		ret = 1;
1687 	}
1688 
1689 	/* Release the lock, if caller is outside entropy subsystem.  */
1690 	if (hint == NOTE_SUBMIT)
1691 		KASSERT(mutex_owned(&E->lock));
1692 	else
1693 		mutex_exit(&E->lock);
1694 
1695 	return ret;
1696 }
1697 
1698 /* XXX Makes sense only for /dev/u?random.  */
1699 static const struct filterops entropy_read_filtops = {
1700 	.f_flags = FILTEROP_ISFD | FILTEROP_MPSAFE,
1701 	.f_attach = NULL,
1702 	.f_detach = filt_entropy_read_detach,
1703 	.f_event = filt_entropy_read_event,
1704 };
1705 
1706 /*
1707  * entropy_kqfilter(kn)
1708  *
1709  *	Register kn to receive entropy event notifications.  May be
1710  *	EVFILT_READ or EVFILT_WRITE; anything else yields EINVAL.
1711  */
1712 int
entropy_kqfilter(struct knote * kn)1713 entropy_kqfilter(struct knote *kn)
1714 {
1715 
1716 	KASSERT(E->stage >= ENTROPY_WARM);
1717 
1718 	switch (kn->kn_filter) {
1719 	case EVFILT_READ:
1720 		/* Enter into the global select queue.  */
1721 		mutex_enter(&E->lock);
1722 		kn->kn_fop = &entropy_read_filtops;
1723 		selrecord_knote(&E->selq, kn);
1724 		mutex_exit(&E->lock);
1725 		return 0;
1726 	case EVFILT_WRITE:
1727 		/* Can always dump entropy into the system.  */
1728 		kn->kn_fop = &seltrue_filtops;
1729 		return 0;
1730 	default:
1731 		return EINVAL;
1732 	}
1733 }
1734 
1735 /*
1736  * rndsource_setcb(rs, get, getarg)
1737  *
1738  *	Set the request callback for the entropy source rs, if it can
1739  *	provide entropy on demand.  Must precede rnd_attach_source.
1740  */
1741 void
rndsource_setcb(struct krndsource * rs,void (* get)(size_t,void *),void * getarg)1742 rndsource_setcb(struct krndsource *rs, void (*get)(size_t, void *),
1743     void *getarg)
1744 {
1745 
1746 	rs->get = get;
1747 	rs->getarg = getarg;
1748 }
1749 
1750 /*
1751  * rnd_attach_source(rs, name, type, flags)
1752  *
1753  *	Attach the entropy source rs.  Must be done after
1754  *	rndsource_setcb, if any, and before any calls to rnd_add_data.
1755  */
1756 void
rnd_attach_source(struct krndsource * rs,const char * name,uint32_t type,uint32_t flags)1757 rnd_attach_source(struct krndsource *rs, const char *name, uint32_t type,
1758     uint32_t flags)
1759 {
1760 	uint32_t extra[4];
1761 	unsigned i = 0;
1762 
1763 	KASSERTMSG(name[0] != '\0', "rndsource must have nonempty name");
1764 
1765 	/* Grab cycle counter to mix extra into the pool.  */
1766 	extra[i++] = entropy_timer();
1767 
1768 	/*
1769 	 * Apply some standard flags:
1770 	 *
1771 	 * - We do not bother with network devices by default, for
1772 	 *   hysterical raisins (perhaps: because it is often the case
1773 	 *   that an adversary can influence network packet timings).
1774 	 */
1775 	switch (type) {
1776 	case RND_TYPE_NET:
1777 		flags |= RND_FLAG_NO_COLLECT;
1778 		break;
1779 	}
1780 
1781 	/* Sanity-check the callback if RND_FLAG_HASCB is set.  */
1782 	KASSERT(!ISSET(flags, RND_FLAG_HASCB) || rs->get != NULL);
1783 
1784 	/* Initialize the random source.  */
1785 	memset(rs->name, 0, sizeof(rs->name)); /* paranoia */
1786 	strlcpy(rs->name, name, sizeof(rs->name));
1787 	memset(&rs->time_delta, 0, sizeof(rs->time_delta));
1788 	memset(&rs->value_delta, 0, sizeof(rs->value_delta));
1789 	rs->total = 0;
1790 	rs->type = type;
1791 	rs->flags = flags;
1792 	if (E->stage >= ENTROPY_WARM)
1793 		rs->state = percpu_alloc(sizeof(struct rndsource_cpu));
1794 	extra[i++] = entropy_timer();
1795 
1796 	/* Wire it into the global list of random sources.  */
1797 	if (E->stage >= ENTROPY_WARM)
1798 		mutex_enter(&E->lock);
1799 	LIST_INSERT_HEAD(&E->sources, rs, list);
1800 	if (E->stage >= ENTROPY_WARM)
1801 		mutex_exit(&E->lock);
1802 	extra[i++] = entropy_timer();
1803 
1804 	/* Request that it provide entropy ASAP, if we can.  */
1805 	if (ISSET(flags, RND_FLAG_HASCB))
1806 		(*rs->get)(ENTROPY_CAPACITY, rs->getarg);
1807 	extra[i++] = entropy_timer();
1808 
1809 	/* Mix the extra into the pool.  */
1810 	KASSERT(i == __arraycount(extra));
1811 	entropy_enter(extra, sizeof extra, 0, /*count*/!cold);
1812 	explicit_memset(extra, 0, sizeof extra);
1813 }
1814 
1815 /*
1816  * rnd_detach_source(rs)
1817  *
1818  *	Detach the entropy source rs.  May sleep waiting for users to
1819  *	drain.  Further use is not allowed.
1820  */
1821 void
rnd_detach_source(struct krndsource * rs)1822 rnd_detach_source(struct krndsource *rs)
1823 {
1824 
1825 	/*
1826 	 * If we're cold (shouldn't happen, but hey), just remove it
1827 	 * from the list -- there's nothing allocated.
1828 	 */
1829 	if (E->stage == ENTROPY_COLD) {
1830 		LIST_REMOVE(rs, list);
1831 		return;
1832 	}
1833 
1834 	/* We may have to wait for entropy_request.  */
1835 	ASSERT_SLEEPABLE();
1836 
1837 	/* Wait until the source list is not in use, and remove it.  */
1838 	mutex_enter(&E->lock);
1839 	while (E->sourcelock)
1840 		cv_wait(&E->sourcelock_cv, &E->lock);
1841 	LIST_REMOVE(rs, list);
1842 	mutex_exit(&E->lock);
1843 
1844 	/* Free the per-CPU data.  */
1845 	percpu_free(rs->state, sizeof(struct rndsource_cpu));
1846 }
1847 
1848 /*
1849  * rnd_lock_sources(flags)
1850  *
1851  *	Lock the list of entropy sources.  Caller must hold the global
1852  *	entropy lock.  If successful, no rndsource will go away until
1853  *	rnd_unlock_sources even while the caller releases the global
1854  *	entropy lock.
1855  *
1856  *	If flags & ENTROPY_WAIT, wait for concurrent access to finish.
1857  *	If flags & ENTROPY_SIG, allow interruption by signal.
1858  */
1859 static int __attribute__((warn_unused_result))
rnd_lock_sources(int flags)1860 rnd_lock_sources(int flags)
1861 {
1862 	int error;
1863 
1864 	KASSERT(E->stage == ENTROPY_COLD || mutex_owned(&E->lock));
1865 
1866 	while (E->sourcelock) {
1867 		KASSERT(E->stage >= ENTROPY_WARM);
1868 		if (!ISSET(flags, ENTROPY_WAIT))
1869 			return EWOULDBLOCK;
1870 		if (ISSET(flags, ENTROPY_SIG)) {
1871 			error = cv_wait_sig(&E->sourcelock_cv, &E->lock);
1872 			if (error)
1873 				return error;
1874 		} else {
1875 			cv_wait(&E->sourcelock_cv, &E->lock);
1876 		}
1877 	}
1878 
1879 	E->sourcelock = curlwp;
1880 	return 0;
1881 }
1882 
1883 /*
1884  * rnd_unlock_sources()
1885  *
1886  *	Unlock the list of sources after rnd_lock_sources.  Caller must
1887  *	hold the global entropy lock.
1888  */
1889 static void
rnd_unlock_sources(void)1890 rnd_unlock_sources(void)
1891 {
1892 
1893 	KASSERT(E->stage == ENTROPY_COLD || mutex_owned(&E->lock));
1894 
1895 	KASSERTMSG(E->sourcelock == curlwp, "lwp %p releasing lock held by %p",
1896 	    curlwp, E->sourcelock);
1897 	E->sourcelock = NULL;
1898 	if (E->stage >= ENTROPY_WARM)
1899 		cv_signal(&E->sourcelock_cv);
1900 }
1901 
1902 /*
1903  * rnd_sources_locked()
1904  *
1905  *	True if we hold the list of rndsources locked, for diagnostic
1906  *	assertions.
1907  */
1908 static bool __diagused
rnd_sources_locked(void)1909 rnd_sources_locked(void)
1910 {
1911 
1912 	return E->sourcelock == curlwp;
1913 }
1914 
1915 /*
1916  * entropy_request(nbytes, flags)
1917  *
1918  *	Request nbytes bytes of entropy from all sources in the system.
1919  *	OK if we overdo it.  Caller must hold the global entropy lock;
1920  *	will release and re-acquire it.
1921  *
1922  *	If flags & ENTROPY_WAIT, wait for concurrent access to finish.
1923  *	If flags & ENTROPY_SIG, allow interruption by signal.
1924  */
1925 static int
entropy_request(size_t nbytes,int flags)1926 entropy_request(size_t nbytes, int flags)
1927 {
1928 	struct krndsource *rs;
1929 	int error;
1930 
1931 	KASSERT(E->stage == ENTROPY_COLD || mutex_owned(&E->lock));
1932 	if (flags & ENTROPY_WAIT)
1933 		ASSERT_SLEEPABLE();
1934 
1935 	/*
1936 	 * Lock the list of entropy sources to block rnd_detach_source
1937 	 * until we're done, and to serialize calls to the entropy
1938 	 * callbacks as guaranteed to drivers.
1939 	 */
1940 	error = rnd_lock_sources(flags);
1941 	if (error)
1942 		return error;
1943 	entropy_request_evcnt.ev_count++;
1944 
1945 	/* Clamp to the maximum reasonable request.  */
1946 	nbytes = MIN(nbytes, ENTROPY_CAPACITY);
1947 
1948 	/* Walk the list of sources.  */
1949 	LIST_FOREACH(rs, &E->sources, list) {
1950 		/* Skip sources without callbacks.  */
1951 		if (!ISSET(rs->flags, RND_FLAG_HASCB))
1952 			continue;
1953 
1954 		/*
1955 		 * Skip sources that are disabled altogether -- we
1956 		 * would just ignore their samples anyway.
1957 		 */
1958 		if (ISSET(rs->flags, RND_FLAG_NO_COLLECT))
1959 			continue;
1960 
1961 		/* Drop the lock while we call the callback.  */
1962 		if (E->stage >= ENTROPY_WARM)
1963 			mutex_exit(&E->lock);
1964 		(*rs->get)(nbytes, rs->getarg);
1965 		if (E->stage >= ENTROPY_WARM)
1966 			mutex_enter(&E->lock);
1967 	}
1968 
1969 	/* Request done; unlock the list of entropy sources.  */
1970 	rnd_unlock_sources();
1971 	return 0;
1972 }
1973 
1974 static inline uint32_t
rnd_delta_estimate(rnd_delta_t * d,uint32_t v,int32_t delta)1975 rnd_delta_estimate(rnd_delta_t *d, uint32_t v, int32_t delta)
1976 {
1977 	int32_t delta2, delta3;
1978 
1979 	/*
1980 	 * Calculate the second and third order differentials
1981 	 */
1982 	delta2 = d->dx - delta;
1983 	if (delta2 < 0)
1984 		delta2 = -delta2; /* XXX arithmetic overflow */
1985 
1986 	delta3 = d->d2x - delta2;
1987 	if (delta3 < 0)
1988 		delta3 = -delta3; /* XXX arithmetic overflow */
1989 
1990 	d->x = v;
1991 	d->dx = delta;
1992 	d->d2x = delta2;
1993 
1994 	/*
1995 	 * If any delta is 0, we got no entropy.  If all are non-zero, we
1996 	 * might have something.
1997 	 */
1998 	if (delta == 0 || delta2 == 0 || delta3 == 0)
1999 		return 0;
2000 
2001 	return 1;
2002 }
2003 
2004 static inline uint32_t
rnd_dt_estimate(struct krndsource * rs,uint32_t t)2005 rnd_dt_estimate(struct krndsource *rs, uint32_t t)
2006 {
2007 	int32_t delta;
2008 	uint32_t ret;
2009 	rnd_delta_t *d;
2010 	struct rndsource_cpu *rc;
2011 
2012 	rc = percpu_getref(rs->state);
2013 	d = &rc->rc_timedelta;
2014 
2015 	if (t < d->x) {
2016 		delta = UINT32_MAX - d->x + t;
2017 	} else {
2018 		delta = d->x - t;
2019 	}
2020 
2021 	if (delta < 0) {
2022 		delta = -delta;	/* XXX arithmetic overflow */
2023 	}
2024 
2025 	ret = rnd_delta_estimate(d, t, delta);
2026 
2027 	KASSERT(d->x == t);
2028 	KASSERT(d->dx == delta);
2029 	percpu_putref(rs->state);
2030 	return ret;
2031 }
2032 
2033 /*
2034  * rnd_add_uint32(rs, value)
2035  *
2036  *	Enter 32 bits of data from an entropy source into the pool.
2037  *
2038  *	If rs is NULL, may not be called from interrupt context.
2039  *
2040  *	If rs is non-NULL, may be called from any context.  May drop
2041  *	data if called from interrupt context.
2042  */
2043 void
rnd_add_uint32(struct krndsource * rs,uint32_t value)2044 rnd_add_uint32(struct krndsource *rs, uint32_t value)
2045 {
2046 
2047 	rnd_add_data(rs, &value, sizeof value, 0);
2048 }
2049 
2050 void
_rnd_add_uint32(struct krndsource * rs,uint32_t value)2051 _rnd_add_uint32(struct krndsource *rs, uint32_t value)
2052 {
2053 
2054 	rnd_add_data(rs, &value, sizeof value, 0);
2055 }
2056 
2057 void
_rnd_add_uint64(struct krndsource * rs,uint64_t value)2058 _rnd_add_uint64(struct krndsource *rs, uint64_t value)
2059 {
2060 
2061 	rnd_add_data(rs, &value, sizeof value, 0);
2062 }
2063 
2064 /*
2065  * rnd_add_data(rs, buf, len, entropybits)
2066  *
2067  *	Enter data from an entropy source into the pool, with a
2068  *	driver's estimate of how much entropy the physical source of
2069  *	the data has.  If RND_FLAG_NO_ESTIMATE, we ignore the driver's
2070  *	estimate and treat it as zero.
2071  *
2072  *	If rs is NULL, may not be called from interrupt context.
2073  *
2074  *	If rs is non-NULL, may be called from any context.  May drop
2075  *	data if called from interrupt context.
2076  */
2077 void
rnd_add_data(struct krndsource * rs,const void * buf,uint32_t len,uint32_t entropybits)2078 rnd_add_data(struct krndsource *rs, const void *buf, uint32_t len,
2079     uint32_t entropybits)
2080 {
2081 	uint32_t extra;
2082 	uint32_t flags;
2083 
2084 	KASSERTMSG(howmany(entropybits, NBBY) <= len,
2085 	    "%s: impossible entropy rate:"
2086 	    " %"PRIu32" bits in %"PRIu32"-byte string",
2087 	    rs ? rs->name : "(anonymous)", entropybits, len);
2088 
2089 	/* If there's no rndsource, just enter the data and time now.  */
2090 	if (rs == NULL) {
2091 		entropy_enter(buf, len, entropybits, /*count*/false);
2092 		extra = entropy_timer();
2093 		entropy_enter(&extra, sizeof extra, 0, /*count*/false);
2094 		explicit_memset(&extra, 0, sizeof extra);
2095 		return;
2096 	}
2097 
2098 	/*
2099 	 * Hold up the reset xcall before it zeroes the entropy counts
2100 	 * on this CPU or globally.  Otherwise, we might leave some
2101 	 * nonzero entropy attributed to an untrusted source in the
2102 	 * event of a race with a change to flags.
2103 	 */
2104 	kpreempt_disable();
2105 
2106 	/* Load a snapshot of the flags.  Ioctl may change them under us.  */
2107 	flags = atomic_load_relaxed(&rs->flags);
2108 
2109 	/*
2110 	 * Skip if:
2111 	 * - we're not collecting entropy, or
2112 	 * - the operator doesn't want to collect entropy from this, or
2113 	 * - neither data nor timings are being collected from this.
2114 	 */
2115 	if (!atomic_load_relaxed(&entropy_collection) ||
2116 	    ISSET(flags, RND_FLAG_NO_COLLECT) ||
2117 	    !ISSET(flags, RND_FLAG_COLLECT_VALUE|RND_FLAG_COLLECT_TIME))
2118 		goto out;
2119 
2120 	/* If asked, ignore the estimate.  */
2121 	if (ISSET(flags, RND_FLAG_NO_ESTIMATE))
2122 		entropybits = 0;
2123 
2124 	/* If we are collecting data, enter them.  */
2125 	if (ISSET(flags, RND_FLAG_COLLECT_VALUE)) {
2126 		rnd_add_data_1(rs, buf, len, entropybits, /*count*/false,
2127 		    RND_FLAG_COLLECT_VALUE);
2128 	}
2129 
2130 	/* If we are collecting timings, enter one.  */
2131 	if (ISSET(flags, RND_FLAG_COLLECT_TIME)) {
2132 		bool count;
2133 
2134 		/* Sample a timer.  */
2135 		extra = entropy_timer();
2136 
2137 		/* If asked, do entropy estimation on the time.  */
2138 		if ((flags & (RND_FLAG_ESTIMATE_TIME|RND_FLAG_NO_ESTIMATE)) ==
2139 		    RND_FLAG_ESTIMATE_TIME && !cold)
2140 			count = rnd_dt_estimate(rs, extra);
2141 		else
2142 			count = false;
2143 
2144 		rnd_add_data_1(rs, &extra, sizeof extra, 0, count,
2145 		    RND_FLAG_COLLECT_TIME);
2146 	}
2147 
2148 out:	/* Allow concurrent changes to flags to finish.  */
2149 	kpreempt_enable();
2150 }
2151 
2152 static unsigned
add_sat(unsigned a,unsigned b)2153 add_sat(unsigned a, unsigned b)
2154 {
2155 	unsigned c = a + b;
2156 
2157 	return (c < a ? UINT_MAX : c);
2158 }
2159 
2160 /*
2161  * rnd_add_data_1(rs, buf, len, entropybits, count, flag)
2162  *
2163  *	Internal subroutine to call either entropy_enter_intr, if we're
2164  *	in interrupt context, or entropy_enter if not, and to count the
2165  *	entropy in an rndsource.
2166  */
2167 static void
rnd_add_data_1(struct krndsource * rs,const void * buf,uint32_t len,uint32_t entropybits,bool count,uint32_t flag)2168 rnd_add_data_1(struct krndsource *rs, const void *buf, uint32_t len,
2169     uint32_t entropybits, bool count, uint32_t flag)
2170 {
2171 	bool fullyused;
2172 
2173 	/*
2174 	 * If we're in interrupt context, use entropy_enter_intr and
2175 	 * take note of whether it consumed the full sample; if not,
2176 	 * use entropy_enter, which always consumes the full sample.
2177 	 */
2178 	if (curlwp && cpu_intr_p()) {
2179 		fullyused = entropy_enter_intr(buf, len, entropybits, count);
2180 	} else {
2181 		entropy_enter(buf, len, entropybits, count);
2182 		fullyused = true;
2183 	}
2184 
2185 	/*
2186 	 * If we used the full sample, note how many bits were
2187 	 * contributed from this source.
2188 	 */
2189 	if (fullyused) {
2190 		if (__predict_false(E->stage == ENTROPY_COLD)) {
2191 			rs->total = add_sat(rs->total, entropybits);
2192 			switch (flag) {
2193 			case RND_FLAG_COLLECT_TIME:
2194 				rs->time_delta.insamples =
2195 				    add_sat(rs->time_delta.insamples, 1);
2196 				break;
2197 			case RND_FLAG_COLLECT_VALUE:
2198 				rs->value_delta.insamples =
2199 				    add_sat(rs->value_delta.insamples, 1);
2200 				break;
2201 			}
2202 		} else {
2203 			struct rndsource_cpu *rc = percpu_getref(rs->state);
2204 
2205 			atomic_store_relaxed(&rc->rc_entropybits,
2206 			    add_sat(rc->rc_entropybits, entropybits));
2207 			switch (flag) {
2208 			case RND_FLAG_COLLECT_TIME:
2209 				atomic_store_relaxed(&rc->rc_timesamples,
2210 				    add_sat(rc->rc_timesamples, 1));
2211 				break;
2212 			case RND_FLAG_COLLECT_VALUE:
2213 				atomic_store_relaxed(&rc->rc_datasamples,
2214 				    add_sat(rc->rc_datasamples, 1));
2215 				break;
2216 			}
2217 			percpu_putref(rs->state);
2218 		}
2219 	}
2220 }
2221 
2222 /*
2223  * rnd_add_data_sync(rs, buf, len, entropybits)
2224  *
2225  *	Same as rnd_add_data.  Originally used in rndsource callbacks,
2226  *	to break an unnecessary cycle; no longer really needed.
2227  */
2228 void
rnd_add_data_sync(struct krndsource * rs,const void * buf,uint32_t len,uint32_t entropybits)2229 rnd_add_data_sync(struct krndsource *rs, const void *buf, uint32_t len,
2230     uint32_t entropybits)
2231 {
2232 
2233 	rnd_add_data(rs, buf, len, entropybits);
2234 }
2235 
2236 /*
2237  * rndsource_entropybits(rs)
2238  *
2239  *	Return approximately the number of bits of entropy that have
2240  *	been contributed via rs so far.  Approximate if other CPUs may
2241  *	be calling rnd_add_data concurrently.
2242  */
2243 static unsigned
rndsource_entropybits(struct krndsource * rs)2244 rndsource_entropybits(struct krndsource *rs)
2245 {
2246 	unsigned nbits = rs->total;
2247 
2248 	KASSERT(E->stage >= ENTROPY_WARM);
2249 	KASSERT(rnd_sources_locked());
2250 	percpu_foreach(rs->state, rndsource_entropybits_cpu, &nbits);
2251 	return nbits;
2252 }
2253 
2254 static void
rndsource_entropybits_cpu(void * ptr,void * cookie,struct cpu_info * ci)2255 rndsource_entropybits_cpu(void *ptr, void *cookie, struct cpu_info *ci)
2256 {
2257 	struct rndsource_cpu *rc = ptr;
2258 	unsigned *nbitsp = cookie;
2259 	unsigned cpu_nbits;
2260 
2261 	cpu_nbits = atomic_load_relaxed(&rc->rc_entropybits);
2262 	*nbitsp += MIN(UINT_MAX - *nbitsp, cpu_nbits);
2263 }
2264 
2265 /*
2266  * rndsource_to_user(rs, urs)
2267  *
2268  *	Copy a description of rs out to urs for userland.
2269  */
2270 static void
rndsource_to_user(struct krndsource * rs,rndsource_t * urs)2271 rndsource_to_user(struct krndsource *rs, rndsource_t *urs)
2272 {
2273 
2274 	KASSERT(E->stage >= ENTROPY_WARM);
2275 	KASSERT(rnd_sources_locked());
2276 
2277 	/* Avoid kernel memory disclosure.  */
2278 	memset(urs, 0, sizeof(*urs));
2279 
2280 	CTASSERT(sizeof(urs->name) == sizeof(rs->name));
2281 	strlcpy(urs->name, rs->name, sizeof(urs->name));
2282 	urs->total = rndsource_entropybits(rs);
2283 	urs->type = rs->type;
2284 	urs->flags = atomic_load_relaxed(&rs->flags);
2285 }
2286 
2287 /*
2288  * rndsource_to_user_est(rs, urse)
2289  *
2290  *	Copy a description of rs and estimation statistics out to urse
2291  *	for userland.
2292  */
2293 static void
rndsource_to_user_est(struct krndsource * rs,rndsource_est_t * urse)2294 rndsource_to_user_est(struct krndsource *rs, rndsource_est_t *urse)
2295 {
2296 
2297 	KASSERT(E->stage >= ENTROPY_WARM);
2298 	KASSERT(rnd_sources_locked());
2299 
2300 	/* Avoid kernel memory disclosure.  */
2301 	memset(urse, 0, sizeof(*urse));
2302 
2303 	/* Copy out the rndsource description.  */
2304 	rndsource_to_user(rs, &urse->rt);
2305 
2306 	/* Gather the statistics.  */
2307 	urse->dt_samples = rs->time_delta.insamples;
2308 	urse->dt_total = 0;
2309 	urse->dv_samples = rs->value_delta.insamples;
2310 	urse->dv_total = urse->rt.total;
2311 	percpu_foreach(rs->state, rndsource_to_user_est_cpu, urse);
2312 }
2313 
2314 static void
rndsource_to_user_est_cpu(void * ptr,void * cookie,struct cpu_info * ci)2315 rndsource_to_user_est_cpu(void *ptr, void *cookie, struct cpu_info *ci)
2316 {
2317 	struct rndsource_cpu *rc = ptr;
2318 	rndsource_est_t *urse = cookie;
2319 
2320 	urse->dt_samples = add_sat(urse->dt_samples,
2321 	    atomic_load_relaxed(&rc->rc_timesamples));
2322 	urse->dv_samples = add_sat(urse->dv_samples,
2323 	    atomic_load_relaxed(&rc->rc_datasamples));
2324 }
2325 
2326 /*
2327  * entropy_reset_xc(arg1, arg2)
2328  *
2329  *	Reset the current CPU's pending entropy to zero.
2330  */
2331 static void
entropy_reset_xc(void * arg1 __unused,void * arg2 __unused)2332 entropy_reset_xc(void *arg1 __unused, void *arg2 __unused)
2333 {
2334 	uint32_t extra = entropy_timer();
2335 	struct entropy_cpu_lock lock;
2336 	struct entropy_cpu *ec;
2337 
2338 	/*
2339 	 * With the per-CPU state locked, zero the pending count and
2340 	 * enter a cycle count for fun.
2341 	 */
2342 	ec = entropy_cpu_get(&lock);
2343 	ec->ec_bitspending = 0;
2344 	ec->ec_samplespending = 0;
2345 	entpool_enter(ec->ec_pool, &extra, sizeof extra);
2346 	entropy_cpu_put(&lock, ec);
2347 }
2348 
2349 /*
2350  * entropy_ioctl(cmd, data)
2351  *
2352  *	Handle various /dev/random ioctl queries.
2353  */
2354 int
entropy_ioctl(unsigned long cmd,void * data)2355 entropy_ioctl(unsigned long cmd, void *data)
2356 {
2357 	struct krndsource *rs;
2358 	bool privileged;
2359 	int error;
2360 
2361 	KASSERT(E->stage >= ENTROPY_WARM);
2362 
2363 	/* Verify user's authorization to perform the ioctl.  */
2364 	switch (cmd) {
2365 	case RNDGETENTCNT:
2366 	case RNDGETPOOLSTAT:
2367 	case RNDGETSRCNUM:
2368 	case RNDGETSRCNAME:
2369 	case RNDGETESTNUM:
2370 	case RNDGETESTNAME:
2371 		error = kauth_authorize_device(kauth_cred_get(),
2372 		    KAUTH_DEVICE_RND_GETPRIV, NULL, NULL, NULL, NULL);
2373 		break;
2374 	case RNDCTL:
2375 		error = kauth_authorize_device(kauth_cred_get(),
2376 		    KAUTH_DEVICE_RND_SETPRIV, NULL, NULL, NULL, NULL);
2377 		break;
2378 	case RNDADDDATA:
2379 		error = kauth_authorize_device(kauth_cred_get(),
2380 		    KAUTH_DEVICE_RND_ADDDATA, NULL, NULL, NULL, NULL);
2381 		/* Ascertain whether the user's inputs should be counted.  */
2382 		if (kauth_authorize_device(kauth_cred_get(),
2383 			KAUTH_DEVICE_RND_ADDDATA_ESTIMATE,
2384 			NULL, NULL, NULL, NULL) == 0)
2385 			privileged = true;
2386 		break;
2387 	default: {
2388 		/*
2389 		 * XXX Hack to avoid changing module ABI so this can be
2390 		 * pulled up.  Later, we can just remove the argument.
2391 		 */
2392 		static const struct fileops fops = {
2393 			.fo_ioctl = rnd_system_ioctl,
2394 		};
2395 		struct file f = {
2396 			.f_ops = &fops,
2397 		};
2398 		MODULE_HOOK_CALL(rnd_ioctl_50_hook, (&f, cmd, data),
2399 		    enosys(), error);
2400 #if defined(_LP64)
2401 		if (error == ENOSYS)
2402 			MODULE_HOOK_CALL(rnd_ioctl32_50_hook, (&f, cmd, data),
2403 			    enosys(), error);
2404 #endif
2405 		if (error == ENOSYS)
2406 			error = ENOTTY;
2407 		break;
2408 	}
2409 	}
2410 
2411 	/* If anything went wrong with authorization, stop here.  */
2412 	if (error)
2413 		return error;
2414 
2415 	/* Dispatch on the command.  */
2416 	switch (cmd) {
2417 	case RNDGETENTCNT: {	/* Get current entropy count in bits.  */
2418 		uint32_t *countp = data;
2419 
2420 		mutex_enter(&E->lock);
2421 		*countp = MINENTROPYBITS - E->bitsneeded;
2422 		mutex_exit(&E->lock);
2423 
2424 		break;
2425 	}
2426 	case RNDGETPOOLSTAT: {	/* Get entropy pool statistics.  */
2427 		rndpoolstat_t *pstat = data;
2428 
2429 		mutex_enter(&E->lock);
2430 
2431 		/* parameters */
2432 		pstat->poolsize = ENTPOOL_SIZE/sizeof(uint32_t); /* words */
2433 		pstat->threshold = MINENTROPYBITS/NBBY; /* bytes */
2434 		pstat->maxentropy = ENTROPY_CAPACITY*NBBY; /* bits */
2435 
2436 		/* state */
2437 		pstat->added = 0; /* XXX total entropy_enter count */
2438 		pstat->curentropy = MINENTROPYBITS - E->bitsneeded; /* bits */
2439 		pstat->removed = 0; /* XXX total entropy_extract count */
2440 		pstat->discarded = 0; /* XXX bits of entropy beyond capacity */
2441 
2442 		/*
2443 		 * This used to be bits of data fabricated in some
2444 		 * sense; we'll take it to mean number of samples,
2445 		 * excluding the bits of entropy from HWRNG or seed.
2446 		 */
2447 		pstat->generated = MINSAMPLES - E->samplesneeded;
2448 		pstat->generated -= MIN(pstat->generated, pstat->curentropy);
2449 
2450 		mutex_exit(&E->lock);
2451 		break;
2452 	}
2453 	case RNDGETSRCNUM: {	/* Get entropy sources by number.  */
2454 		rndstat_t *stat = data;
2455 		uint32_t start = 0, i = 0;
2456 
2457 		/* Skip if none requested; fail if too many requested.  */
2458 		if (stat->count == 0)
2459 			break;
2460 		if (stat->count > RND_MAXSTATCOUNT)
2461 			return EINVAL;
2462 
2463 		/*
2464 		 * Under the lock, find the first one, copy out as many
2465 		 * as requested, and report how many we copied out.
2466 		 */
2467 		mutex_enter(&E->lock);
2468 		error = rnd_lock_sources(ENTROPY_WAIT|ENTROPY_SIG);
2469 		if (error) {
2470 			mutex_exit(&E->lock);
2471 			return error;
2472 		}
2473 		LIST_FOREACH(rs, &E->sources, list) {
2474 			if (start++ == stat->start)
2475 				break;
2476 		}
2477 		while (i < stat->count && rs != NULL) {
2478 			mutex_exit(&E->lock);
2479 			rndsource_to_user(rs, &stat->source[i++]);
2480 			mutex_enter(&E->lock);
2481 			rs = LIST_NEXT(rs, list);
2482 		}
2483 		KASSERT(i <= stat->count);
2484 		stat->count = i;
2485 		rnd_unlock_sources();
2486 		mutex_exit(&E->lock);
2487 		break;
2488 	}
2489 	case RNDGETESTNUM: {	/* Get sources and estimates by number.  */
2490 		rndstat_est_t *estat = data;
2491 		uint32_t start = 0, i = 0;
2492 
2493 		/* Skip if none requested; fail if too many requested.  */
2494 		if (estat->count == 0)
2495 			break;
2496 		if (estat->count > RND_MAXSTATCOUNT)
2497 			return EINVAL;
2498 
2499 		/*
2500 		 * Under the lock, find the first one, copy out as many
2501 		 * as requested, and report how many we copied out.
2502 		 */
2503 		mutex_enter(&E->lock);
2504 		error = rnd_lock_sources(ENTROPY_WAIT|ENTROPY_SIG);
2505 		if (error) {
2506 			mutex_exit(&E->lock);
2507 			return error;
2508 		}
2509 		LIST_FOREACH(rs, &E->sources, list) {
2510 			if (start++ == estat->start)
2511 				break;
2512 		}
2513 		while (i < estat->count && rs != NULL) {
2514 			mutex_exit(&E->lock);
2515 			rndsource_to_user_est(rs, &estat->source[i++]);
2516 			mutex_enter(&E->lock);
2517 			rs = LIST_NEXT(rs, list);
2518 		}
2519 		KASSERT(i <= estat->count);
2520 		estat->count = i;
2521 		rnd_unlock_sources();
2522 		mutex_exit(&E->lock);
2523 		break;
2524 	}
2525 	case RNDGETSRCNAME: {	/* Get entropy sources by name.  */
2526 		rndstat_name_t *nstat = data;
2527 		const size_t n = sizeof(rs->name);
2528 
2529 		CTASSERT(sizeof(rs->name) == sizeof(nstat->name));
2530 
2531 		/*
2532 		 * Under the lock, search by name.  If found, copy it
2533 		 * out; if not found, fail with ENOENT.
2534 		 */
2535 		mutex_enter(&E->lock);
2536 		error = rnd_lock_sources(ENTROPY_WAIT|ENTROPY_SIG);
2537 		if (error) {
2538 			mutex_exit(&E->lock);
2539 			return error;
2540 		}
2541 		LIST_FOREACH(rs, &E->sources, list) {
2542 			if (strncmp(rs->name, nstat->name, n) == 0)
2543 				break;
2544 		}
2545 		if (rs != NULL) {
2546 			mutex_exit(&E->lock);
2547 			rndsource_to_user(rs, &nstat->source);
2548 			mutex_enter(&E->lock);
2549 		} else {
2550 			error = ENOENT;
2551 		}
2552 		rnd_unlock_sources();
2553 		mutex_exit(&E->lock);
2554 		break;
2555 	}
2556 	case RNDGETESTNAME: {	/* Get sources and estimates by name.  */
2557 		rndstat_est_name_t *enstat = data;
2558 		const size_t n = sizeof(rs->name);
2559 
2560 		CTASSERT(sizeof(rs->name) == sizeof(enstat->name));
2561 
2562 		/*
2563 		 * Under the lock, search by name.  If found, copy it
2564 		 * out; if not found, fail with ENOENT.
2565 		 */
2566 		mutex_enter(&E->lock);
2567 		error = rnd_lock_sources(ENTROPY_WAIT|ENTROPY_SIG);
2568 		if (error) {
2569 			mutex_exit(&E->lock);
2570 			return error;
2571 		}
2572 		LIST_FOREACH(rs, &E->sources, list) {
2573 			if (strncmp(rs->name, enstat->name, n) == 0)
2574 				break;
2575 		}
2576 		if (rs != NULL) {
2577 			mutex_exit(&E->lock);
2578 			rndsource_to_user_est(rs, &enstat->source);
2579 			mutex_enter(&E->lock);
2580 		} else {
2581 			error = ENOENT;
2582 		}
2583 		rnd_unlock_sources();
2584 		mutex_exit(&E->lock);
2585 		break;
2586 	}
2587 	case RNDCTL: {		/* Modify entropy source flags.  */
2588 		rndctl_t *rndctl = data;
2589 		const size_t n = sizeof(rs->name);
2590 		uint32_t resetflags = RND_FLAG_NO_ESTIMATE|RND_FLAG_NO_COLLECT;
2591 		uint32_t flags;
2592 		bool reset = false, request = false;
2593 
2594 		CTASSERT(sizeof(rs->name) == sizeof(rndctl->name));
2595 
2596 		/* Whitelist the flags that user can change.  */
2597 		rndctl->mask &= RND_FLAG_NO_ESTIMATE|RND_FLAG_NO_COLLECT;
2598 
2599 		/*
2600 		 * For each matching rndsource, either by type if
2601 		 * specified or by name if not, set the masked flags.
2602 		 */
2603 		mutex_enter(&E->lock);
2604 		LIST_FOREACH(rs, &E->sources, list) {
2605 			if (rndctl->type != 0xff) {
2606 				if (rs->type != rndctl->type)
2607 					continue;
2608 			} else if (rndctl->name[0] != '\0') {
2609 				if (strncmp(rs->name, rndctl->name, n) != 0)
2610 					continue;
2611 			}
2612 			flags = rs->flags & ~rndctl->mask;
2613 			flags |= rndctl->flags & rndctl->mask;
2614 			if ((rs->flags & resetflags) == 0 &&
2615 			    (flags & resetflags) != 0)
2616 				reset = true;
2617 			if ((rs->flags ^ flags) & resetflags)
2618 				request = true;
2619 			atomic_store_relaxed(&rs->flags, flags);
2620 		}
2621 		mutex_exit(&E->lock);
2622 
2623 		/*
2624 		 * If we disabled estimation or collection, nix all the
2625 		 * pending entropy and set needed to the maximum.
2626 		 */
2627 		if (reset) {
2628 			xc_broadcast(0, &entropy_reset_xc, NULL, NULL);
2629 			mutex_enter(&E->lock);
2630 			E->bitspending = 0;
2631 			E->samplespending = 0;
2632 			atomic_store_relaxed(&E->bitsneeded, MINENTROPYBITS);
2633 			atomic_store_relaxed(&E->samplesneeded, MINSAMPLES);
2634 			E->consolidate = false;
2635 			mutex_exit(&E->lock);
2636 		}
2637 
2638 		/*
2639 		 * If we changed any of the estimation or collection
2640 		 * flags, request new samples from everyone -- either
2641 		 * to make up for what we just lost, or to get new
2642 		 * samples from what we just added.
2643 		 *
2644 		 * Failing on signal, while waiting for another process
2645 		 * to finish requesting entropy, is OK here even though
2646 		 * we have committed side effects, because this ioctl
2647 		 * command is idempotent, so repeating it is safe.
2648 		 */
2649 		if (request) {
2650 			mutex_enter(&E->lock);
2651 			error = entropy_request(ENTROPY_CAPACITY,
2652 			    ENTROPY_WAIT|ENTROPY_SIG);
2653 			mutex_exit(&E->lock);
2654 		}
2655 		break;
2656 	}
2657 	case RNDADDDATA: {	/* Enter seed into entropy pool.  */
2658 		rnddata_t *rdata = data;
2659 		unsigned entropybits = 0;
2660 
2661 		if (!atomic_load_relaxed(&entropy_collection))
2662 			break;	/* thanks but no thanks */
2663 		if (rdata->len > MIN(sizeof(rdata->data), UINT32_MAX/NBBY))
2664 			return EINVAL;
2665 
2666 		/*
2667 		 * This ioctl serves as the userland alternative a
2668 		 * bootloader-provided seed -- typically furnished by
2669 		 * /etc/rc.d/random_seed.  We accept the user's entropy
2670 		 * claim only if
2671 		 *
2672 		 * (a) the user is privileged, and
2673 		 * (b) we have not entered a bootloader seed.
2674 		 *
2675 		 * under the assumption that the user may use this to
2676 		 * load a seed from disk that we have already loaded
2677 		 * from the bootloader, so we don't double-count it.
2678 		 */
2679 		if (privileged && rdata->entropy && rdata->len) {
2680 			mutex_enter(&E->lock);
2681 			if (!E->seeded) {
2682 				entropybits = MIN(rdata->entropy,
2683 				    MIN(rdata->len, ENTROPY_CAPACITY)*NBBY);
2684 				E->seeded = true;
2685 			}
2686 			mutex_exit(&E->lock);
2687 		}
2688 
2689 		/* Enter the data and consolidate entropy.  */
2690 		rnd_add_data(&seed_rndsource, rdata->data, rdata->len,
2691 		    entropybits);
2692 		entropy_consolidate();
2693 		break;
2694 	}
2695 	default:
2696 		error = ENOTTY;
2697 	}
2698 
2699 	/* Return any error that may have come up.  */
2700 	return error;
2701 }
2702 
2703 /* Legacy entry points */
2704 
2705 void
rnd_seed(void * seed,size_t len)2706 rnd_seed(void *seed, size_t len)
2707 {
2708 
2709 	if (len != sizeof(rndsave_t)) {
2710 		printf("entropy: invalid seed length: %zu,"
2711 		    " expected sizeof(rndsave_t) = %zu\n",
2712 		    len, sizeof(rndsave_t));
2713 		return;
2714 	}
2715 	entropy_seed(seed);
2716 }
2717 
2718 void
rnd_init(void)2719 rnd_init(void)
2720 {
2721 
2722 	entropy_init();
2723 }
2724 
2725 void
rnd_init_softint(void)2726 rnd_init_softint(void)
2727 {
2728 
2729 	entropy_init_late();
2730 	entropy_bootrequest();
2731 }
2732 
2733 int
rnd_system_ioctl(struct file * fp,unsigned long cmd,void * data)2734 rnd_system_ioctl(struct file *fp, unsigned long cmd, void *data)
2735 {
2736 
2737 	return entropy_ioctl(cmd, data);
2738 }
2739