xref: /freebsd/sys/opencrypto/crypto.c (revision 0957b409)
1 /*-
2  * Copyright (c) 2002-2006 Sam Leffler.  All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
14  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
15  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
16  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
17  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
18  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
19  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
20  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
22  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
23  */
24 
25 #include <sys/cdefs.h>
26 __FBSDID("$FreeBSD$");
27 
28 /*
29  * Cryptographic Subsystem.
30  *
31  * This code is derived from the Openbsd Cryptographic Framework (OCF)
32  * that has the copyright shown below.  Very little of the original
33  * code remains.
34  */
35 
36 /*-
37  * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
38  *
39  * This code was written by Angelos D. Keromytis in Athens, Greece, in
40  * February 2000. Network Security Technologies Inc. (NSTI) kindly
41  * supported the development of this code.
42  *
43  * Copyright (c) 2000, 2001 Angelos D. Keromytis
44  *
45  * Permission to use, copy, and modify this software with or without fee
46  * is hereby granted, provided that this entire notice is included in
47  * all source code copies of any software which is or includes a copy or
48  * modification of this software.
49  *
50  * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
51  * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
52  * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
53  * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
54  * PURPOSE.
55  */
56 
57 #define	CRYPTO_TIMING				/* enable timing support */
58 
59 #include "opt_ddb.h"
60 
61 #include <sys/param.h>
62 #include <sys/systm.h>
63 #include <sys/eventhandler.h>
64 #include <sys/kernel.h>
65 #include <sys/kthread.h>
66 #include <sys/linker.h>
67 #include <sys/lock.h>
68 #include <sys/module.h>
69 #include <sys/mutex.h>
70 #include <sys/malloc.h>
71 #include <sys/proc.h>
72 #include <sys/sdt.h>
73 #include <sys/smp.h>
74 #include <sys/sysctl.h>
75 #include <sys/taskqueue.h>
76 
77 #include <ddb/ddb.h>
78 
79 #include <vm/uma.h>
80 #include <crypto/intake.h>
81 #include <opencrypto/cryptodev.h>
82 #include <opencrypto/xform.h>			/* XXX for M_XDATA */
83 
84 #include <sys/kobj.h>
85 #include <sys/bus.h>
86 #include "cryptodev_if.h"
87 
88 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__)
89 #include <machine/pcb.h>
90 #endif
91 
92 struct crypto_session {
93 	device_t parent;
94 	void *softc;
95 	uint32_t hid;
96 	uint32_t capabilities;
97 };
98 
99 SDT_PROVIDER_DEFINE(opencrypto);
100 
101 /*
102  * Crypto drivers register themselves by allocating a slot in the
103  * crypto_drivers table with crypto_get_driverid() and then registering
104  * each algorithm they support with crypto_register() and crypto_kregister().
105  */
106 static	struct mtx crypto_drivers_mtx;		/* lock on driver table */
107 #define	CRYPTO_DRIVER_LOCK()	mtx_lock(&crypto_drivers_mtx)
108 #define	CRYPTO_DRIVER_UNLOCK()	mtx_unlock(&crypto_drivers_mtx)
109 #define	CRYPTO_DRIVER_ASSERT()	mtx_assert(&crypto_drivers_mtx, MA_OWNED)
110 
111 /*
112  * Crypto device/driver capabilities structure.
113  *
114  * Synchronization:
115  * (d) - protected by CRYPTO_DRIVER_LOCK()
116  * (q) - protected by CRYPTO_Q_LOCK()
117  * Not tagged fields are read-only.
118  */
119 struct cryptocap {
120 	device_t	cc_dev;			/* (d) device/driver */
121 	u_int32_t	cc_sessions;		/* (d) # of sessions */
122 	u_int32_t	cc_koperations;		/* (d) # os asym operations */
123 	/*
124 	 * Largest possible operator length (in bits) for each type of
125 	 * encryption algorithm. XXX not used
126 	 */
127 	u_int16_t	cc_max_op_len[CRYPTO_ALGORITHM_MAX + 1];
128 	u_int8_t	cc_alg[CRYPTO_ALGORITHM_MAX + 1];
129 	u_int8_t	cc_kalg[CRK_ALGORITHM_MAX + 1];
130 
131 	int		cc_flags;		/* (d) flags */
132 #define CRYPTOCAP_F_CLEANUP	0x80000000	/* needs resource cleanup */
133 	int		cc_qblocked;		/* (q) symmetric q blocked */
134 	int		cc_kqblocked;		/* (q) asymmetric q blocked */
135 	size_t		cc_session_size;
136 };
137 static	struct cryptocap *crypto_drivers = NULL;
138 static	int crypto_drivers_num = 0;
139 
140 /*
141  * There are two queues for crypto requests; one for symmetric (e.g.
142  * cipher) operations and one for asymmetric (e.g. MOD)operations.
143  * A single mutex is used to lock access to both queues.  We could
144  * have one per-queue but having one simplifies handling of block/unblock
145  * operations.
146  */
147 static	int crp_sleep = 0;
148 static	TAILQ_HEAD(cryptop_q ,cryptop) crp_q;		/* request queues */
149 static	TAILQ_HEAD(,cryptkop) crp_kq;
150 static	struct mtx crypto_q_mtx;
151 #define	CRYPTO_Q_LOCK()		mtx_lock(&crypto_q_mtx)
152 #define	CRYPTO_Q_UNLOCK()	mtx_unlock(&crypto_q_mtx)
153 
154 /*
155  * Taskqueue used to dispatch the crypto requests
156  * that have the CRYPTO_F_ASYNC flag
157  */
158 static struct taskqueue *crypto_tq;
159 
160 /*
161  * Crypto seq numbers are operated on with modular arithmetic
162  */
163 #define	CRYPTO_SEQ_GT(a,b)	((int)((a)-(b)) > 0)
164 
165 struct crypto_ret_worker {
166 	struct mtx crypto_ret_mtx;
167 
168 	TAILQ_HEAD(,cryptop) crp_ordered_ret_q;	/* ordered callback queue for symetric jobs */
169 	TAILQ_HEAD(,cryptop) crp_ret_q;		/* callback queue for symetric jobs */
170 	TAILQ_HEAD(,cryptkop) crp_ret_kq;	/* callback queue for asym jobs */
171 
172 	u_int32_t reorder_ops;		/* total ordered sym jobs received */
173 	u_int32_t reorder_cur_seq;	/* current sym job dispatched */
174 
175 	struct proc *cryptoretproc;
176 };
177 static struct crypto_ret_worker *crypto_ret_workers = NULL;
178 
179 #define CRYPTO_RETW(i)		(&crypto_ret_workers[i])
180 #define CRYPTO_RETW_ID(w)	((w) - crypto_ret_workers)
181 #define FOREACH_CRYPTO_RETW(w) \
182 	for (w = crypto_ret_workers; w < crypto_ret_workers + crypto_workers_num; ++w)
183 
184 #define	CRYPTO_RETW_LOCK(w)	mtx_lock(&w->crypto_ret_mtx)
185 #define	CRYPTO_RETW_UNLOCK(w)	mtx_unlock(&w->crypto_ret_mtx)
186 #define	CRYPTO_RETW_EMPTY(w) \
187 	(TAILQ_EMPTY(&w->crp_ret_q) && TAILQ_EMPTY(&w->crp_ret_kq) && TAILQ_EMPTY(&w->crp_ordered_ret_q))
188 
189 static int crypto_workers_num = 0;
190 SYSCTL_INT(_kern, OID_AUTO, crypto_workers_num, CTLFLAG_RDTUN,
191 	   &crypto_workers_num, 0,
192 	   "Number of crypto workers used to dispatch crypto jobs");
193 
194 static	uma_zone_t cryptop_zone;
195 static	uma_zone_t cryptodesc_zone;
196 static	uma_zone_t cryptoses_zone;
197 
198 int	crypto_userasymcrypto = 1;	/* userland may do asym crypto reqs */
199 SYSCTL_INT(_kern, OID_AUTO, userasymcrypto, CTLFLAG_RW,
200 	   &crypto_userasymcrypto, 0,
201 	   "Enable/disable user-mode access to asymmetric crypto support");
202 int	crypto_devallowsoft = 0;	/* only use hardware crypto */
203 SYSCTL_INT(_kern, OID_AUTO, cryptodevallowsoft, CTLFLAG_RW,
204 	   &crypto_devallowsoft, 0,
205 	   "Enable/disable use of software crypto by /dev/crypto");
206 
207 MALLOC_DEFINE(M_CRYPTO_DATA, "crypto", "crypto session records");
208 
209 static	void crypto_proc(void);
210 static	struct proc *cryptoproc;
211 static	void crypto_ret_proc(struct crypto_ret_worker *ret_worker);
212 static	void crypto_destroy(void);
213 static	int crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint);
214 static	int crypto_kinvoke(struct cryptkop *krp, int flags);
215 static	void crypto_remove(struct cryptocap *cap);
216 static	void crypto_task_invoke(void *ctx, int pending);
217 static void crypto_batch_enqueue(struct cryptop *crp);
218 
219 static	struct cryptostats cryptostats;
220 SYSCTL_STRUCT(_kern, OID_AUTO, crypto_stats, CTLFLAG_RW, &cryptostats,
221 	    cryptostats, "Crypto system statistics");
222 
223 #ifdef CRYPTO_TIMING
224 static	int crypto_timing = 0;
225 SYSCTL_INT(_debug, OID_AUTO, crypto_timing, CTLFLAG_RW,
226 	   &crypto_timing, 0, "Enable/disable crypto timing support");
227 #endif
228 
229 /* Try to avoid directly exposing the key buffer as a symbol */
230 static struct keybuf *keybuf;
231 
232 static struct keybuf empty_keybuf = {
233         .kb_nents = 0
234 };
235 
236 /* Obtain the key buffer from boot metadata */
237 static void
238 keybuf_init(void)
239 {
240 	caddr_t kmdp;
241 
242 	kmdp = preload_search_by_type("elf kernel");
243 
244 	if (kmdp == NULL)
245 		kmdp = preload_search_by_type("elf64 kernel");
246 
247 	keybuf = (struct keybuf *)preload_search_info(kmdp,
248 	    MODINFO_METADATA | MODINFOMD_KEYBUF);
249 
250         if (keybuf == NULL)
251                 keybuf = &empty_keybuf;
252 }
253 
254 /* It'd be nice if we could store these in some kind of secure memory... */
255 struct keybuf * get_keybuf(void) {
256 
257         return (keybuf);
258 }
259 
260 static int
261 crypto_init(void)
262 {
263 	struct crypto_ret_worker *ret_worker;
264 	int error;
265 
266 	mtx_init(&crypto_drivers_mtx, "crypto", "crypto driver table",
267 		MTX_DEF|MTX_QUIET);
268 
269 	TAILQ_INIT(&crp_q);
270 	TAILQ_INIT(&crp_kq);
271 	mtx_init(&crypto_q_mtx, "crypto", "crypto op queues", MTX_DEF);
272 
273 	cryptop_zone = uma_zcreate("cryptop", sizeof (struct cryptop),
274 				    0, 0, 0, 0,
275 				    UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
276 	cryptodesc_zone = uma_zcreate("cryptodesc", sizeof (struct cryptodesc),
277 				    0, 0, 0, 0,
278 				    UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
279 	cryptoses_zone = uma_zcreate("crypto_session",
280 	    sizeof(struct crypto_session), NULL, NULL, NULL, NULL,
281 	    UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
282 
283 	if (cryptodesc_zone == NULL || cryptop_zone == NULL ||
284 	    cryptoses_zone == NULL) {
285 		printf("crypto_init: cannot setup crypto zones\n");
286 		error = ENOMEM;
287 		goto bad;
288 	}
289 
290 	crypto_drivers_num = CRYPTO_DRIVERS_INITIAL;
291 	crypto_drivers = malloc(crypto_drivers_num *
292 	    sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT | M_ZERO);
293 	if (crypto_drivers == NULL) {
294 		printf("crypto_init: cannot setup crypto drivers\n");
295 		error = ENOMEM;
296 		goto bad;
297 	}
298 
299 	if (crypto_workers_num < 1 || crypto_workers_num > mp_ncpus)
300 		crypto_workers_num = mp_ncpus;
301 
302 	crypto_tq = taskqueue_create("crypto", M_WAITOK|M_ZERO,
303 				taskqueue_thread_enqueue, &crypto_tq);
304 	if (crypto_tq == NULL) {
305 		printf("crypto init: cannot setup crypto taskqueue\n");
306 		error = ENOMEM;
307 		goto bad;
308 	}
309 
310 	taskqueue_start_threads(&crypto_tq, crypto_workers_num, PRI_MIN_KERN,
311 		"crypto");
312 
313 	error = kproc_create((void (*)(void *)) crypto_proc, NULL,
314 		    &cryptoproc, 0, 0, "crypto");
315 	if (error) {
316 		printf("crypto_init: cannot start crypto thread; error %d",
317 			error);
318 		goto bad;
319 	}
320 
321 	crypto_ret_workers = malloc(crypto_workers_num * sizeof(struct crypto_ret_worker),
322 			M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
323 	if (crypto_ret_workers == NULL) {
324 		error = ENOMEM;
325 		printf("crypto_init: cannot allocate ret workers\n");
326 		goto bad;
327 	}
328 
329 
330 	FOREACH_CRYPTO_RETW(ret_worker) {
331 		TAILQ_INIT(&ret_worker->crp_ordered_ret_q);
332 		TAILQ_INIT(&ret_worker->crp_ret_q);
333 		TAILQ_INIT(&ret_worker->crp_ret_kq);
334 
335 		ret_worker->reorder_ops = 0;
336 		ret_worker->reorder_cur_seq = 0;
337 
338 		mtx_init(&ret_worker->crypto_ret_mtx, "crypto", "crypto return queues", MTX_DEF);
339 
340 		error = kproc_create((void (*)(void *)) crypto_ret_proc, ret_worker,
341 				&ret_worker->cryptoretproc, 0, 0, "crypto returns %td", CRYPTO_RETW_ID(ret_worker));
342 		if (error) {
343 			printf("crypto_init: cannot start cryptoret thread; error %d",
344 				error);
345 			goto bad;
346 		}
347 	}
348 
349 	keybuf_init();
350 
351 	return 0;
352 bad:
353 	crypto_destroy();
354 	return error;
355 }
356 
357 /*
358  * Signal a crypto thread to terminate.  We use the driver
359  * table lock to synchronize the sleep/wakeups so that we
360  * are sure the threads have terminated before we release
361  * the data structures they use.  See crypto_finis below
362  * for the other half of this song-and-dance.
363  */
364 static void
365 crypto_terminate(struct proc **pp, void *q)
366 {
367 	struct proc *p;
368 
369 	mtx_assert(&crypto_drivers_mtx, MA_OWNED);
370 	p = *pp;
371 	*pp = NULL;
372 	if (p) {
373 		wakeup_one(q);
374 		PROC_LOCK(p);		/* NB: insure we don't miss wakeup */
375 		CRYPTO_DRIVER_UNLOCK();	/* let crypto_finis progress */
376 		msleep(p, &p->p_mtx, PWAIT, "crypto_destroy", 0);
377 		PROC_UNLOCK(p);
378 		CRYPTO_DRIVER_LOCK();
379 	}
380 }
381 
382 static void
383 crypto_destroy(void)
384 {
385 	struct crypto_ret_worker *ret_worker;
386 
387 	/*
388 	 * Terminate any crypto threads.
389 	 */
390 	if (crypto_tq != NULL)
391 		taskqueue_drain_all(crypto_tq);
392 	CRYPTO_DRIVER_LOCK();
393 	crypto_terminate(&cryptoproc, &crp_q);
394 	FOREACH_CRYPTO_RETW(ret_worker)
395 		crypto_terminate(&ret_worker->cryptoretproc, &ret_worker->crp_ret_q);
396 	CRYPTO_DRIVER_UNLOCK();
397 
398 	/* XXX flush queues??? */
399 
400 	/*
401 	 * Reclaim dynamically allocated resources.
402 	 */
403 	if (crypto_drivers != NULL)
404 		free(crypto_drivers, M_CRYPTO_DATA);
405 
406 	if (cryptoses_zone != NULL)
407 		uma_zdestroy(cryptoses_zone);
408 	if (cryptodesc_zone != NULL)
409 		uma_zdestroy(cryptodesc_zone);
410 	if (cryptop_zone != NULL)
411 		uma_zdestroy(cryptop_zone);
412 	mtx_destroy(&crypto_q_mtx);
413 	FOREACH_CRYPTO_RETW(ret_worker)
414 		mtx_destroy(&ret_worker->crypto_ret_mtx);
415 	free(crypto_ret_workers, M_CRYPTO_DATA);
416 	if (crypto_tq != NULL)
417 		taskqueue_free(crypto_tq);
418 	mtx_destroy(&crypto_drivers_mtx);
419 }
420 
421 uint32_t
422 crypto_ses2hid(crypto_session_t crypto_session)
423 {
424 	return (crypto_session->hid);
425 }
426 
427 uint32_t
428 crypto_ses2caps(crypto_session_t crypto_session)
429 {
430 	return (crypto_session->capabilities);
431 }
432 
433 void *
434 crypto_get_driver_session(crypto_session_t crypto_session)
435 {
436 	return (crypto_session->softc);
437 }
438 
439 static struct cryptocap *
440 crypto_checkdriver(u_int32_t hid)
441 {
442 	if (crypto_drivers == NULL)
443 		return NULL;
444 	return (hid >= crypto_drivers_num ? NULL : &crypto_drivers[hid]);
445 }
446 
447 /*
448  * Compare a driver's list of supported algorithms against another
449  * list; return non-zero if all algorithms are supported.
450  */
451 static int
452 driver_suitable(const struct cryptocap *cap, const struct cryptoini *cri)
453 {
454 	const struct cryptoini *cr;
455 
456 	/* See if all the algorithms are supported. */
457 	for (cr = cri; cr; cr = cr->cri_next)
458 		if (cap->cc_alg[cr->cri_alg] == 0)
459 			return 0;
460 	return 1;
461 }
462 
463 /*
464  * Select a driver for a new session that supports the specified
465  * algorithms and, optionally, is constrained according to the flags.
466  * The algorithm we use here is pretty stupid; just use the
467  * first driver that supports all the algorithms we need. If there
468  * are multiple drivers we choose the driver with the fewest active
469  * sessions.  We prefer hardware-backed drivers to software ones.
470  *
471  * XXX We need more smarts here (in real life too, but that's
472  * XXX another story altogether).
473  */
474 static struct cryptocap *
475 crypto_select_driver(const struct cryptoini *cri, int flags)
476 {
477 	struct cryptocap *cap, *best;
478 	int match, hid;
479 
480 	CRYPTO_DRIVER_ASSERT();
481 
482 	/*
483 	 * Look first for hardware crypto devices if permitted.
484 	 */
485 	if (flags & CRYPTOCAP_F_HARDWARE)
486 		match = CRYPTOCAP_F_HARDWARE;
487 	else
488 		match = CRYPTOCAP_F_SOFTWARE;
489 	best = NULL;
490 again:
491 	for (hid = 0; hid < crypto_drivers_num; hid++) {
492 		cap = &crypto_drivers[hid];
493 		/*
494 		 * If it's not initialized, is in the process of
495 		 * going away, or is not appropriate (hardware
496 		 * or software based on match), then skip.
497 		 */
498 		if (cap->cc_dev == NULL ||
499 		    (cap->cc_flags & CRYPTOCAP_F_CLEANUP) ||
500 		    (cap->cc_flags & match) == 0)
501 			continue;
502 
503 		/* verify all the algorithms are supported. */
504 		if (driver_suitable(cap, cri)) {
505 			if (best == NULL ||
506 			    cap->cc_sessions < best->cc_sessions)
507 				best = cap;
508 		}
509 	}
510 	if (best == NULL && match == CRYPTOCAP_F_HARDWARE &&
511 	    (flags & CRYPTOCAP_F_SOFTWARE)) {
512 		/* sort of an Algol 68-style for loop */
513 		match = CRYPTOCAP_F_SOFTWARE;
514 		goto again;
515 	}
516 	return best;
517 }
518 
519 /*
520  * Create a new session.  The crid argument specifies a crypto
521  * driver to use or constraints on a driver to select (hardware
522  * only, software only, either).  Whatever driver is selected
523  * must be capable of the requested crypto algorithms.
524  */
525 int
526 crypto_newsession(crypto_session_t *cses, struct cryptoini *cri, int crid)
527 {
528 	crypto_session_t res;
529 	void *softc_mem;
530 	struct cryptocap *cap;
531 	u_int32_t hid;
532 	size_t softc_size;
533 	int err;
534 
535 restart:
536 	res = NULL;
537 	softc_mem = NULL;
538 
539 	CRYPTO_DRIVER_LOCK();
540 	if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
541 		/*
542 		 * Use specified driver; verify it is capable.
543 		 */
544 		cap = crypto_checkdriver(crid);
545 		if (cap != NULL && !driver_suitable(cap, cri))
546 			cap = NULL;
547 	} else {
548 		/*
549 		 * No requested driver; select based on crid flags.
550 		 */
551 		cap = crypto_select_driver(cri, crid);
552 		/*
553 		 * if NULL then can't do everything in one session.
554 		 * XXX Fix this. We need to inject a "virtual" session
555 		 * XXX layer right about here.
556 		 */
557 	}
558 	if (cap == NULL) {
559 		CRYPTDEB("no driver");
560 		err = EOPNOTSUPP;
561 		goto out;
562 	}
563 	cap->cc_sessions++;
564 	softc_size = cap->cc_session_size;
565 	hid = cap - crypto_drivers;
566 	cap = NULL;
567 	CRYPTO_DRIVER_UNLOCK();
568 
569 	softc_mem = malloc(softc_size, M_CRYPTO_DATA, M_WAITOK | M_ZERO);
570 	res = uma_zalloc(cryptoses_zone, M_WAITOK | M_ZERO);
571 	res->softc = softc_mem;
572 
573 	CRYPTO_DRIVER_LOCK();
574 	cap = crypto_checkdriver(hid);
575 	if (cap != NULL && (cap->cc_flags & CRYPTOCAP_F_CLEANUP) != 0) {
576 		cap->cc_sessions--;
577 		crypto_remove(cap);
578 		cap = NULL;
579 	}
580 	if (cap == NULL) {
581 		free(softc_mem, M_CRYPTO_DATA);
582 		uma_zfree(cryptoses_zone, res);
583 		CRYPTO_DRIVER_UNLOCK();
584 		goto restart;
585 	}
586 
587 	/* Call the driver initialization routine. */
588 	err = CRYPTODEV_NEWSESSION(cap->cc_dev, res, cri);
589 	if (err != 0) {
590 		CRYPTDEB("dev newsession failed: %d", err);
591 		goto out;
592 	}
593 
594 	res->capabilities = cap->cc_flags & 0xff000000;
595 	res->hid = hid;
596 	*cses = res;
597 
598 out:
599 	CRYPTO_DRIVER_UNLOCK();
600 	if (err != 0) {
601 		free(softc_mem, M_CRYPTO_DATA);
602 		if (res != NULL)
603 			uma_zfree(cryptoses_zone, res);
604 	}
605 	return err;
606 }
607 
608 static void
609 crypto_remove(struct cryptocap *cap)
610 {
611 
612 	mtx_assert(&crypto_drivers_mtx, MA_OWNED);
613 	if (cap->cc_sessions == 0 && cap->cc_koperations == 0)
614 		bzero(cap, sizeof(*cap));
615 }
616 
617 /*
618  * Delete an existing session (or a reserved session on an unregistered
619  * driver).
620  */
621 void
622 crypto_freesession(crypto_session_t cses)
623 {
624 	struct cryptocap *cap;
625 	void *ses;
626 	size_t ses_size;
627 	u_int32_t hid;
628 
629 	if (cses == NULL)
630 		return;
631 
632 	CRYPTO_DRIVER_LOCK();
633 
634 	hid = crypto_ses2hid(cses);
635 	KASSERT(hid < crypto_drivers_num,
636 	    ("bogus crypto_session %p hid %u", cses, hid));
637 	cap = &crypto_drivers[hid];
638 
639 	ses = cses->softc;
640 	ses_size = cap->cc_session_size;
641 
642 	if (cap->cc_sessions)
643 		cap->cc_sessions--;
644 
645 	/* Call the driver cleanup routine, if available. */
646 	CRYPTODEV_FREESESSION(cap->cc_dev, cses);
647 
648 	explicit_bzero(ses, ses_size);
649 	free(ses, M_CRYPTO_DATA);
650 	uma_zfree(cryptoses_zone, cses);
651 
652 	if (cap->cc_flags & CRYPTOCAP_F_CLEANUP)
653 		crypto_remove(cap);
654 
655 	CRYPTO_DRIVER_UNLOCK();
656 }
657 
658 /*
659  * Return an unused driver id.  Used by drivers prior to registering
660  * support for the algorithms they handle.
661  */
662 int32_t
663 crypto_get_driverid(device_t dev, size_t sessionsize, int flags)
664 {
665 	struct cryptocap *newdrv;
666 	int i;
667 
668 	if ((flags & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
669 		printf("%s: no flags specified when registering driver\n",
670 		    device_get_nameunit(dev));
671 		return -1;
672 	}
673 
674 	CRYPTO_DRIVER_LOCK();
675 
676 	for (i = 0; i < crypto_drivers_num; i++) {
677 		if (crypto_drivers[i].cc_dev == NULL &&
678 		    (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP) == 0) {
679 			break;
680 		}
681 	}
682 
683 	/* Out of entries, allocate some more. */
684 	if (i == crypto_drivers_num) {
685 		/* Be careful about wrap-around. */
686 		if (2 * crypto_drivers_num <= crypto_drivers_num) {
687 			CRYPTO_DRIVER_UNLOCK();
688 			printf("crypto: driver count wraparound!\n");
689 			return -1;
690 		}
691 
692 		newdrv = malloc(2 * crypto_drivers_num *
693 		    sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
694 		if (newdrv == NULL) {
695 			CRYPTO_DRIVER_UNLOCK();
696 			printf("crypto: no space to expand driver table!\n");
697 			return -1;
698 		}
699 
700 		bcopy(crypto_drivers, newdrv,
701 		    crypto_drivers_num * sizeof(struct cryptocap));
702 
703 		crypto_drivers_num *= 2;
704 
705 		free(crypto_drivers, M_CRYPTO_DATA);
706 		crypto_drivers = newdrv;
707 	}
708 
709 	/* NB: state is zero'd on free */
710 	crypto_drivers[i].cc_sessions = 1;	/* Mark */
711 	crypto_drivers[i].cc_dev = dev;
712 	crypto_drivers[i].cc_flags = flags;
713 	crypto_drivers[i].cc_session_size = sessionsize;
714 	if (bootverbose)
715 		printf("crypto: assign %s driver id %u, flags 0x%x\n",
716 		    device_get_nameunit(dev), i, flags);
717 
718 	CRYPTO_DRIVER_UNLOCK();
719 
720 	return i;
721 }
722 
723 /*
724  * Lookup a driver by name.  We match against the full device
725  * name and unit, and against just the name.  The latter gives
726  * us a simple widlcarding by device name.  On success return the
727  * driver/hardware identifier; otherwise return -1.
728  */
729 int
730 crypto_find_driver(const char *match)
731 {
732 	int i, len = strlen(match);
733 
734 	CRYPTO_DRIVER_LOCK();
735 	for (i = 0; i < crypto_drivers_num; i++) {
736 		device_t dev = crypto_drivers[i].cc_dev;
737 		if (dev == NULL ||
738 		    (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP))
739 			continue;
740 		if (strncmp(match, device_get_nameunit(dev), len) == 0 ||
741 		    strncmp(match, device_get_name(dev), len) == 0)
742 			break;
743 	}
744 	CRYPTO_DRIVER_UNLOCK();
745 	return i < crypto_drivers_num ? i : -1;
746 }
747 
748 /*
749  * Return the device_t for the specified driver or NULL
750  * if the driver identifier is invalid.
751  */
752 device_t
753 crypto_find_device_byhid(int hid)
754 {
755 	struct cryptocap *cap = crypto_checkdriver(hid);
756 	return cap != NULL ? cap->cc_dev : NULL;
757 }
758 
759 /*
760  * Return the device/driver capabilities.
761  */
762 int
763 crypto_getcaps(int hid)
764 {
765 	struct cryptocap *cap = crypto_checkdriver(hid);
766 	return cap != NULL ? cap->cc_flags : 0;
767 }
768 
769 /*
770  * Register support for a key-related algorithm.  This routine
771  * is called once for each algorithm supported a driver.
772  */
773 int
774 crypto_kregister(u_int32_t driverid, int kalg, u_int32_t flags)
775 {
776 	struct cryptocap *cap;
777 	int err;
778 
779 	CRYPTO_DRIVER_LOCK();
780 
781 	cap = crypto_checkdriver(driverid);
782 	if (cap != NULL &&
783 	    (CRK_ALGORITM_MIN <= kalg && kalg <= CRK_ALGORITHM_MAX)) {
784 		/*
785 		 * XXX Do some performance testing to determine placing.
786 		 * XXX We probably need an auxiliary data structure that
787 		 * XXX describes relative performances.
788 		 */
789 
790 		cap->cc_kalg[kalg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
791 		if (bootverbose)
792 			printf("crypto: %s registers key alg %u flags %u\n"
793 				, device_get_nameunit(cap->cc_dev)
794 				, kalg
795 				, flags
796 			);
797 		err = 0;
798 	} else
799 		err = EINVAL;
800 
801 	CRYPTO_DRIVER_UNLOCK();
802 	return err;
803 }
804 
805 /*
806  * Register support for a non-key-related algorithm.  This routine
807  * is called once for each such algorithm supported by a driver.
808  */
809 int
810 crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen,
811     u_int32_t flags)
812 {
813 	struct cryptocap *cap;
814 	int err;
815 
816 	CRYPTO_DRIVER_LOCK();
817 
818 	cap = crypto_checkdriver(driverid);
819 	/* NB: algorithms are in the range [1..max] */
820 	if (cap != NULL &&
821 	    (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX)) {
822 		/*
823 		 * XXX Do some performance testing to determine placing.
824 		 * XXX We probably need an auxiliary data structure that
825 		 * XXX describes relative performances.
826 		 */
827 
828 		cap->cc_alg[alg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
829 		cap->cc_max_op_len[alg] = maxoplen;
830 		if (bootverbose)
831 			printf("crypto: %s registers alg %u flags %u maxoplen %u\n"
832 				, device_get_nameunit(cap->cc_dev)
833 				, alg
834 				, flags
835 				, maxoplen
836 			);
837 		cap->cc_sessions = 0;		/* Unmark */
838 		err = 0;
839 	} else
840 		err = EINVAL;
841 
842 	CRYPTO_DRIVER_UNLOCK();
843 	return err;
844 }
845 
846 static void
847 driver_finis(struct cryptocap *cap)
848 {
849 	u_int32_t ses, kops;
850 
851 	CRYPTO_DRIVER_ASSERT();
852 
853 	ses = cap->cc_sessions;
854 	kops = cap->cc_koperations;
855 	bzero(cap, sizeof(*cap));
856 	if (ses != 0 || kops != 0) {
857 		/*
858 		 * If there are pending sessions,
859 		 * just mark as invalid.
860 		 */
861 		cap->cc_flags |= CRYPTOCAP_F_CLEANUP;
862 		cap->cc_sessions = ses;
863 		cap->cc_koperations = kops;
864 	}
865 }
866 
867 /*
868  * Unregister a crypto driver. If there are pending sessions using it,
869  * leave enough information around so that subsequent calls using those
870  * sessions will correctly detect the driver has been unregistered and
871  * reroute requests.
872  */
873 int
874 crypto_unregister(u_int32_t driverid, int alg)
875 {
876 	struct cryptocap *cap;
877 	int i, err;
878 
879 	CRYPTO_DRIVER_LOCK();
880 	cap = crypto_checkdriver(driverid);
881 	if (cap != NULL &&
882 	    (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX) &&
883 	    cap->cc_alg[alg] != 0) {
884 		cap->cc_alg[alg] = 0;
885 		cap->cc_max_op_len[alg] = 0;
886 
887 		/* Was this the last algorithm ? */
888 		for (i = 1; i <= CRYPTO_ALGORITHM_MAX; i++)
889 			if (cap->cc_alg[i] != 0)
890 				break;
891 
892 		if (i == CRYPTO_ALGORITHM_MAX + 1)
893 			driver_finis(cap);
894 		err = 0;
895 	} else
896 		err = EINVAL;
897 	CRYPTO_DRIVER_UNLOCK();
898 
899 	return err;
900 }
901 
902 /*
903  * Unregister all algorithms associated with a crypto driver.
904  * If there are pending sessions using it, leave enough information
905  * around so that subsequent calls using those sessions will
906  * correctly detect the driver has been unregistered and reroute
907  * requests.
908  */
909 int
910 crypto_unregister_all(u_int32_t driverid)
911 {
912 	struct cryptocap *cap;
913 	int err;
914 
915 	CRYPTO_DRIVER_LOCK();
916 	cap = crypto_checkdriver(driverid);
917 	if (cap != NULL) {
918 		driver_finis(cap);
919 		err = 0;
920 	} else
921 		err = EINVAL;
922 	CRYPTO_DRIVER_UNLOCK();
923 
924 	return err;
925 }
926 
927 /*
928  * Clear blockage on a driver.  The what parameter indicates whether
929  * the driver is now ready for cryptop's and/or cryptokop's.
930  */
931 int
932 crypto_unblock(u_int32_t driverid, int what)
933 {
934 	struct cryptocap *cap;
935 	int err;
936 
937 	CRYPTO_Q_LOCK();
938 	cap = crypto_checkdriver(driverid);
939 	if (cap != NULL) {
940 		if (what & CRYPTO_SYMQ)
941 			cap->cc_qblocked = 0;
942 		if (what & CRYPTO_ASYMQ)
943 			cap->cc_kqblocked = 0;
944 		if (crp_sleep)
945 			wakeup_one(&crp_q);
946 		err = 0;
947 	} else
948 		err = EINVAL;
949 	CRYPTO_Q_UNLOCK();
950 
951 	return err;
952 }
953 
954 /*
955  * Add a crypto request to a queue, to be processed by the kernel thread.
956  */
957 int
958 crypto_dispatch(struct cryptop *crp)
959 {
960 	struct cryptocap *cap;
961 	u_int32_t hid;
962 	int result;
963 
964 	cryptostats.cs_ops++;
965 
966 #ifdef CRYPTO_TIMING
967 	if (crypto_timing)
968 		binuptime(&crp->crp_tstamp);
969 #endif
970 
971 	crp->crp_retw_id = ((uintptr_t)crp->crp_session) % crypto_workers_num;
972 
973 	if (CRYPTOP_ASYNC(crp)) {
974 		if (crp->crp_flags & CRYPTO_F_ASYNC_KEEPORDER) {
975 			struct crypto_ret_worker *ret_worker;
976 
977 			ret_worker = CRYPTO_RETW(crp->crp_retw_id);
978 
979 			CRYPTO_RETW_LOCK(ret_worker);
980 			crp->crp_seq = ret_worker->reorder_ops++;
981 			CRYPTO_RETW_UNLOCK(ret_worker);
982 		}
983 
984 		TASK_INIT(&crp->crp_task, 0, crypto_task_invoke, crp);
985 		taskqueue_enqueue(crypto_tq, &crp->crp_task);
986 		return (0);
987 	}
988 
989 	if ((crp->crp_flags & CRYPTO_F_BATCH) == 0) {
990 		hid = crypto_ses2hid(crp->crp_session);
991 
992 		/*
993 		 * Caller marked the request to be processed
994 		 * immediately; dispatch it directly to the
995 		 * driver unless the driver is currently blocked.
996 		 */
997 		cap = crypto_checkdriver(hid);
998 		/* Driver cannot disappeared when there is an active session. */
999 		KASSERT(cap != NULL, ("%s: Driver disappeared.", __func__));
1000 		if (!cap->cc_qblocked) {
1001 			result = crypto_invoke(cap, crp, 0);
1002 			if (result != ERESTART)
1003 				return (result);
1004 			/*
1005 			 * The driver ran out of resources, put the request on
1006 			 * the queue.
1007 			 */
1008 		}
1009 	}
1010 	crypto_batch_enqueue(crp);
1011 	return 0;
1012 }
1013 
1014 void
1015 crypto_batch_enqueue(struct cryptop *crp)
1016 {
1017 
1018 	CRYPTO_Q_LOCK();
1019 	TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
1020 	if (crp_sleep)
1021 		wakeup_one(&crp_q);
1022 	CRYPTO_Q_UNLOCK();
1023 }
1024 
1025 /*
1026  * Add an asymetric crypto request to a queue,
1027  * to be processed by the kernel thread.
1028  */
1029 int
1030 crypto_kdispatch(struct cryptkop *krp)
1031 {
1032 	int error;
1033 
1034 	cryptostats.cs_kops++;
1035 
1036 	error = crypto_kinvoke(krp, krp->krp_crid);
1037 	if (error == ERESTART) {
1038 		CRYPTO_Q_LOCK();
1039 		TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next);
1040 		if (crp_sleep)
1041 			wakeup_one(&crp_q);
1042 		CRYPTO_Q_UNLOCK();
1043 		error = 0;
1044 	}
1045 	return error;
1046 }
1047 
1048 /*
1049  * Verify a driver is suitable for the specified operation.
1050  */
1051 static __inline int
1052 kdriver_suitable(const struct cryptocap *cap, const struct cryptkop *krp)
1053 {
1054 	return (cap->cc_kalg[krp->krp_op] & CRYPTO_ALG_FLAG_SUPPORTED) != 0;
1055 }
1056 
1057 /*
1058  * Select a driver for an asym operation.  The driver must
1059  * support the necessary algorithm.  The caller can constrain
1060  * which device is selected with the flags parameter.  The
1061  * algorithm we use here is pretty stupid; just use the first
1062  * driver that supports the algorithms we need. If there are
1063  * multiple suitable drivers we choose the driver with the
1064  * fewest active operations.  We prefer hardware-backed
1065  * drivers to software ones when either may be used.
1066  */
1067 static struct cryptocap *
1068 crypto_select_kdriver(const struct cryptkop *krp, int flags)
1069 {
1070 	struct cryptocap *cap, *best;
1071 	int match, hid;
1072 
1073 	CRYPTO_DRIVER_ASSERT();
1074 
1075 	/*
1076 	 * Look first for hardware crypto devices if permitted.
1077 	 */
1078 	if (flags & CRYPTOCAP_F_HARDWARE)
1079 		match = CRYPTOCAP_F_HARDWARE;
1080 	else
1081 		match = CRYPTOCAP_F_SOFTWARE;
1082 	best = NULL;
1083 again:
1084 	for (hid = 0; hid < crypto_drivers_num; hid++) {
1085 		cap = &crypto_drivers[hid];
1086 		/*
1087 		 * If it's not initialized, is in the process of
1088 		 * going away, or is not appropriate (hardware
1089 		 * or software based on match), then skip.
1090 		 */
1091 		if (cap->cc_dev == NULL ||
1092 		    (cap->cc_flags & CRYPTOCAP_F_CLEANUP) ||
1093 		    (cap->cc_flags & match) == 0)
1094 			continue;
1095 
1096 		/* verify all the algorithms are supported. */
1097 		if (kdriver_suitable(cap, krp)) {
1098 			if (best == NULL ||
1099 			    cap->cc_koperations < best->cc_koperations)
1100 				best = cap;
1101 		}
1102 	}
1103 	if (best != NULL)
1104 		return best;
1105 	if (match == CRYPTOCAP_F_HARDWARE && (flags & CRYPTOCAP_F_SOFTWARE)) {
1106 		/* sort of an Algol 68-style for loop */
1107 		match = CRYPTOCAP_F_SOFTWARE;
1108 		goto again;
1109 	}
1110 	return best;
1111 }
1112 
1113 /*
1114  * Dispatch an asymmetric crypto request.
1115  */
1116 static int
1117 crypto_kinvoke(struct cryptkop *krp, int crid)
1118 {
1119 	struct cryptocap *cap = NULL;
1120 	int error;
1121 
1122 	KASSERT(krp != NULL, ("%s: krp == NULL", __func__));
1123 	KASSERT(krp->krp_callback != NULL,
1124 	    ("%s: krp->crp_callback == NULL", __func__));
1125 
1126 	CRYPTO_DRIVER_LOCK();
1127 	if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
1128 		cap = crypto_checkdriver(crid);
1129 		if (cap != NULL) {
1130 			/*
1131 			 * Driver present, it must support the necessary
1132 			 * algorithm and, if s/w drivers are excluded,
1133 			 * it must be registered as hardware-backed.
1134 			 */
1135 			if (!kdriver_suitable(cap, krp) ||
1136 			    (!crypto_devallowsoft &&
1137 			     (cap->cc_flags & CRYPTOCAP_F_HARDWARE) == 0))
1138 				cap = NULL;
1139 		}
1140 	} else {
1141 		/*
1142 		 * No requested driver; select based on crid flags.
1143 		 */
1144 		if (!crypto_devallowsoft)	/* NB: disallow s/w drivers */
1145 			crid &= ~CRYPTOCAP_F_SOFTWARE;
1146 		cap = crypto_select_kdriver(krp, crid);
1147 	}
1148 	if (cap != NULL && !cap->cc_kqblocked) {
1149 		krp->krp_hid = cap - crypto_drivers;
1150 		cap->cc_koperations++;
1151 		CRYPTO_DRIVER_UNLOCK();
1152 		error = CRYPTODEV_KPROCESS(cap->cc_dev, krp, 0);
1153 		CRYPTO_DRIVER_LOCK();
1154 		if (error == ERESTART) {
1155 			cap->cc_koperations--;
1156 			CRYPTO_DRIVER_UNLOCK();
1157 			return (error);
1158 		}
1159 	} else {
1160 		/*
1161 		 * NB: cap is !NULL if device is blocked; in
1162 		 *     that case return ERESTART so the operation
1163 		 *     is resubmitted if possible.
1164 		 */
1165 		error = (cap == NULL) ? ENODEV : ERESTART;
1166 	}
1167 	CRYPTO_DRIVER_UNLOCK();
1168 
1169 	if (error) {
1170 		krp->krp_status = error;
1171 		crypto_kdone(krp);
1172 	}
1173 	return 0;
1174 }
1175 
1176 #ifdef CRYPTO_TIMING
1177 static void
1178 crypto_tstat(struct cryptotstat *ts, struct bintime *bt)
1179 {
1180 	struct bintime now, delta;
1181 	struct timespec t;
1182 	uint64_t u;
1183 
1184 	binuptime(&now);
1185 	u = now.frac;
1186 	delta.frac = now.frac - bt->frac;
1187 	delta.sec = now.sec - bt->sec;
1188 	if (u < delta.frac)
1189 		delta.sec--;
1190 	bintime2timespec(&delta, &t);
1191 	timespecadd(&ts->acc, &t, &ts->acc);
1192 	if (timespeccmp(&t, &ts->min, <))
1193 		ts->min = t;
1194 	if (timespeccmp(&t, &ts->max, >))
1195 		ts->max = t;
1196 	ts->count++;
1197 
1198 	*bt = now;
1199 }
1200 #endif
1201 
1202 static void
1203 crypto_task_invoke(void *ctx, int pending)
1204 {
1205 	struct cryptocap *cap;
1206 	struct cryptop *crp;
1207 	int hid, result;
1208 
1209 	crp = (struct cryptop *)ctx;
1210 
1211 	hid = crypto_ses2hid(crp->crp_session);
1212 	cap = crypto_checkdriver(hid);
1213 
1214 	result = crypto_invoke(cap, crp, 0);
1215 	if (result == ERESTART)
1216 		crypto_batch_enqueue(crp);
1217 }
1218 
1219 /*
1220  * Dispatch a crypto request to the appropriate crypto devices.
1221  */
1222 static int
1223 crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint)
1224 {
1225 
1226 	KASSERT(crp != NULL, ("%s: crp == NULL", __func__));
1227 	KASSERT(crp->crp_callback != NULL,
1228 	    ("%s: crp->crp_callback == NULL", __func__));
1229 	KASSERT(crp->crp_desc != NULL, ("%s: crp->crp_desc == NULL", __func__));
1230 
1231 #ifdef CRYPTO_TIMING
1232 	if (crypto_timing)
1233 		crypto_tstat(&cryptostats.cs_invoke, &crp->crp_tstamp);
1234 #endif
1235 	if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) {
1236 		struct cryptodesc *crd;
1237 		crypto_session_t nses;
1238 
1239 		/*
1240 		 * Driver has unregistered; migrate the session and return
1241 		 * an error to the caller so they'll resubmit the op.
1242 		 *
1243 		 * XXX: What if there are more already queued requests for this
1244 		 *      session?
1245 		 */
1246 		crypto_freesession(crp->crp_session);
1247 
1248 		for (crd = crp->crp_desc; crd->crd_next; crd = crd->crd_next)
1249 			crd->CRD_INI.cri_next = &(crd->crd_next->CRD_INI);
1250 
1251 		/* XXX propagate flags from initial session? */
1252 		if (crypto_newsession(&nses, &(crp->crp_desc->CRD_INI),
1253 		    CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE) == 0)
1254 			crp->crp_session = nses;
1255 
1256 		crp->crp_etype = EAGAIN;
1257 		crypto_done(crp);
1258 		return 0;
1259 	} else {
1260 		/*
1261 		 * Invoke the driver to process the request.
1262 		 */
1263 		return CRYPTODEV_PROCESS(cap->cc_dev, crp, hint);
1264 	}
1265 }
1266 
1267 /*
1268  * Release a set of crypto descriptors.
1269  */
1270 void
1271 crypto_freereq(struct cryptop *crp)
1272 {
1273 	struct cryptodesc *crd;
1274 
1275 	if (crp == NULL)
1276 		return;
1277 
1278 #ifdef DIAGNOSTIC
1279 	{
1280 		struct cryptop *crp2;
1281 		struct crypto_ret_worker *ret_worker;
1282 
1283 		CRYPTO_Q_LOCK();
1284 		TAILQ_FOREACH(crp2, &crp_q, crp_next) {
1285 			KASSERT(crp2 != crp,
1286 			    ("Freeing cryptop from the crypto queue (%p).",
1287 			    crp));
1288 		}
1289 		CRYPTO_Q_UNLOCK();
1290 
1291 		FOREACH_CRYPTO_RETW(ret_worker) {
1292 			CRYPTO_RETW_LOCK(ret_worker);
1293 			TAILQ_FOREACH(crp2, &ret_worker->crp_ret_q, crp_next) {
1294 				KASSERT(crp2 != crp,
1295 				    ("Freeing cryptop from the return queue (%p).",
1296 				    crp));
1297 			}
1298 			CRYPTO_RETW_UNLOCK(ret_worker);
1299 		}
1300 	}
1301 #endif
1302 
1303 	while ((crd = crp->crp_desc) != NULL) {
1304 		crp->crp_desc = crd->crd_next;
1305 		uma_zfree(cryptodesc_zone, crd);
1306 	}
1307 	uma_zfree(cryptop_zone, crp);
1308 }
1309 
1310 /*
1311  * Acquire a set of crypto descriptors.
1312  */
1313 struct cryptop *
1314 crypto_getreq(int num)
1315 {
1316 	struct cryptodesc *crd;
1317 	struct cryptop *crp;
1318 
1319 	crp = uma_zalloc(cryptop_zone, M_NOWAIT|M_ZERO);
1320 	if (crp != NULL) {
1321 		while (num--) {
1322 			crd = uma_zalloc(cryptodesc_zone, M_NOWAIT|M_ZERO);
1323 			if (crd == NULL) {
1324 				crypto_freereq(crp);
1325 				return NULL;
1326 			}
1327 
1328 			crd->crd_next = crp->crp_desc;
1329 			crp->crp_desc = crd;
1330 		}
1331 	}
1332 	return crp;
1333 }
1334 
1335 /*
1336  * Invoke the callback on behalf of the driver.
1337  */
1338 void
1339 crypto_done(struct cryptop *crp)
1340 {
1341 	KASSERT((crp->crp_flags & CRYPTO_F_DONE) == 0,
1342 		("crypto_done: op already done, flags 0x%x", crp->crp_flags));
1343 	crp->crp_flags |= CRYPTO_F_DONE;
1344 	if (crp->crp_etype != 0)
1345 		cryptostats.cs_errs++;
1346 #ifdef CRYPTO_TIMING
1347 	if (crypto_timing)
1348 		crypto_tstat(&cryptostats.cs_done, &crp->crp_tstamp);
1349 #endif
1350 	/*
1351 	 * CBIMM means unconditionally do the callback immediately;
1352 	 * CBIFSYNC means do the callback immediately only if the
1353 	 * operation was done synchronously.  Both are used to avoid
1354 	 * doing extraneous context switches; the latter is mostly
1355 	 * used with the software crypto driver.
1356 	 */
1357 	if (!CRYPTOP_ASYNC_KEEPORDER(crp) &&
1358 	    ((crp->crp_flags & CRYPTO_F_CBIMM) ||
1359 	    ((crp->crp_flags & CRYPTO_F_CBIFSYNC) &&
1360 	     (crypto_ses2caps(crp->crp_session) & CRYPTOCAP_F_SYNC)))) {
1361 		/*
1362 		 * Do the callback directly.  This is ok when the
1363 		 * callback routine does very little (e.g. the
1364 		 * /dev/crypto callback method just does a wakeup).
1365 		 */
1366 #ifdef CRYPTO_TIMING
1367 		if (crypto_timing) {
1368 			/*
1369 			 * NB: We must copy the timestamp before
1370 			 * doing the callback as the cryptop is
1371 			 * likely to be reclaimed.
1372 			 */
1373 			struct bintime t = crp->crp_tstamp;
1374 			crypto_tstat(&cryptostats.cs_cb, &t);
1375 			crp->crp_callback(crp);
1376 			crypto_tstat(&cryptostats.cs_finis, &t);
1377 		} else
1378 #endif
1379 			crp->crp_callback(crp);
1380 	} else {
1381 		struct crypto_ret_worker *ret_worker;
1382 		bool wake;
1383 
1384 		ret_worker = CRYPTO_RETW(crp->crp_retw_id);
1385 		wake = false;
1386 
1387 		/*
1388 		 * Normal case; queue the callback for the thread.
1389 		 */
1390 		CRYPTO_RETW_LOCK(ret_worker);
1391 		if (CRYPTOP_ASYNC_KEEPORDER(crp)) {
1392 			struct cryptop *tmp;
1393 
1394 			TAILQ_FOREACH_REVERSE(tmp, &ret_worker->crp_ordered_ret_q,
1395 					cryptop_q, crp_next) {
1396 				if (CRYPTO_SEQ_GT(crp->crp_seq, tmp->crp_seq)) {
1397 					TAILQ_INSERT_AFTER(&ret_worker->crp_ordered_ret_q,
1398 							tmp, crp, crp_next);
1399 					break;
1400 				}
1401 			}
1402 			if (tmp == NULL) {
1403 				TAILQ_INSERT_HEAD(&ret_worker->crp_ordered_ret_q,
1404 						crp, crp_next);
1405 			}
1406 
1407 			if (crp->crp_seq == ret_worker->reorder_cur_seq)
1408 				wake = true;
1409 		}
1410 		else {
1411 			if (CRYPTO_RETW_EMPTY(ret_worker))
1412 				wake = true;
1413 
1414 			TAILQ_INSERT_TAIL(&ret_worker->crp_ret_q, crp, crp_next);
1415 		}
1416 
1417 		if (wake)
1418 			wakeup_one(&ret_worker->crp_ret_q);	/* shared wait channel */
1419 		CRYPTO_RETW_UNLOCK(ret_worker);
1420 	}
1421 }
1422 
1423 /*
1424  * Invoke the callback on behalf of the driver.
1425  */
1426 void
1427 crypto_kdone(struct cryptkop *krp)
1428 {
1429 	struct crypto_ret_worker *ret_worker;
1430 	struct cryptocap *cap;
1431 
1432 	if (krp->krp_status != 0)
1433 		cryptostats.cs_kerrs++;
1434 	CRYPTO_DRIVER_LOCK();
1435 	/* XXX: What if driver is loaded in the meantime? */
1436 	if (krp->krp_hid < crypto_drivers_num) {
1437 		cap = &crypto_drivers[krp->krp_hid];
1438 		KASSERT(cap->cc_koperations > 0, ("cc_koperations == 0"));
1439 		cap->cc_koperations--;
1440 		if (cap->cc_flags & CRYPTOCAP_F_CLEANUP)
1441 			crypto_remove(cap);
1442 	}
1443 	CRYPTO_DRIVER_UNLOCK();
1444 
1445 	ret_worker = CRYPTO_RETW(0);
1446 
1447 	CRYPTO_RETW_LOCK(ret_worker);
1448 	if (CRYPTO_RETW_EMPTY(ret_worker))
1449 		wakeup_one(&ret_worker->crp_ret_q);		/* shared wait channel */
1450 	TAILQ_INSERT_TAIL(&ret_worker->crp_ret_kq, krp, krp_next);
1451 	CRYPTO_RETW_UNLOCK(ret_worker);
1452 }
1453 
1454 int
1455 crypto_getfeat(int *featp)
1456 {
1457 	int hid, kalg, feat = 0;
1458 
1459 	CRYPTO_DRIVER_LOCK();
1460 	for (hid = 0; hid < crypto_drivers_num; hid++) {
1461 		const struct cryptocap *cap = &crypto_drivers[hid];
1462 
1463 		if ((cap->cc_flags & CRYPTOCAP_F_SOFTWARE) &&
1464 		    !crypto_devallowsoft) {
1465 			continue;
1466 		}
1467 		for (kalg = 0; kalg < CRK_ALGORITHM_MAX; kalg++)
1468 			if (cap->cc_kalg[kalg] & CRYPTO_ALG_FLAG_SUPPORTED)
1469 				feat |=  1 << kalg;
1470 	}
1471 	CRYPTO_DRIVER_UNLOCK();
1472 	*featp = feat;
1473 	return (0);
1474 }
1475 
1476 /*
1477  * Terminate a thread at module unload.  The process that
1478  * initiated this is waiting for us to signal that we're gone;
1479  * wake it up and exit.  We use the driver table lock to insure
1480  * we don't do the wakeup before they're waiting.  There is no
1481  * race here because the waiter sleeps on the proc lock for the
1482  * thread so it gets notified at the right time because of an
1483  * extra wakeup that's done in exit1().
1484  */
1485 static void
1486 crypto_finis(void *chan)
1487 {
1488 	CRYPTO_DRIVER_LOCK();
1489 	wakeup_one(chan);
1490 	CRYPTO_DRIVER_UNLOCK();
1491 	kproc_exit(0);
1492 }
1493 
1494 /*
1495  * Crypto thread, dispatches crypto requests.
1496  */
1497 static void
1498 crypto_proc(void)
1499 {
1500 	struct cryptop *crp, *submit;
1501 	struct cryptkop *krp;
1502 	struct cryptocap *cap;
1503 	u_int32_t hid;
1504 	int result, hint;
1505 
1506 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__)
1507 	fpu_kern_thread(FPU_KERN_NORMAL);
1508 #endif
1509 
1510 	CRYPTO_Q_LOCK();
1511 	for (;;) {
1512 		/*
1513 		 * Find the first element in the queue that can be
1514 		 * processed and look-ahead to see if multiple ops
1515 		 * are ready for the same driver.
1516 		 */
1517 		submit = NULL;
1518 		hint = 0;
1519 		TAILQ_FOREACH(crp, &crp_q, crp_next) {
1520 			hid = crypto_ses2hid(crp->crp_session);
1521 			cap = crypto_checkdriver(hid);
1522 			/*
1523 			 * Driver cannot disappeared when there is an active
1524 			 * session.
1525 			 */
1526 			KASSERT(cap != NULL, ("%s:%u Driver disappeared.",
1527 			    __func__, __LINE__));
1528 			if (cap == NULL || cap->cc_dev == NULL) {
1529 				/* Op needs to be migrated, process it. */
1530 				if (submit == NULL)
1531 					submit = crp;
1532 				break;
1533 			}
1534 			if (!cap->cc_qblocked) {
1535 				if (submit != NULL) {
1536 					/*
1537 					 * We stop on finding another op,
1538 					 * regardless whether its for the same
1539 					 * driver or not.  We could keep
1540 					 * searching the queue but it might be
1541 					 * better to just use a per-driver
1542 					 * queue instead.
1543 					 */
1544 					if (crypto_ses2hid(submit->crp_session) == hid)
1545 						hint = CRYPTO_HINT_MORE;
1546 					break;
1547 				} else {
1548 					submit = crp;
1549 					if ((submit->crp_flags & CRYPTO_F_BATCH) == 0)
1550 						break;
1551 					/* keep scanning for more are q'd */
1552 				}
1553 			}
1554 		}
1555 		if (submit != NULL) {
1556 			TAILQ_REMOVE(&crp_q, submit, crp_next);
1557 			hid = crypto_ses2hid(submit->crp_session);
1558 			cap = crypto_checkdriver(hid);
1559 			KASSERT(cap != NULL, ("%s:%u Driver disappeared.",
1560 			    __func__, __LINE__));
1561 			result = crypto_invoke(cap, submit, hint);
1562 			if (result == ERESTART) {
1563 				/*
1564 				 * The driver ran out of resources, mark the
1565 				 * driver ``blocked'' for cryptop's and put
1566 				 * the request back in the queue.  It would
1567 				 * best to put the request back where we got
1568 				 * it but that's hard so for now we put it
1569 				 * at the front.  This should be ok; putting
1570 				 * it at the end does not work.
1571 				 */
1572 				/* XXX validate sid again? */
1573 				crypto_drivers[crypto_ses2hid(submit->crp_session)].cc_qblocked = 1;
1574 				TAILQ_INSERT_HEAD(&crp_q, submit, crp_next);
1575 				cryptostats.cs_blocks++;
1576 			}
1577 		}
1578 
1579 		/* As above, but for key ops */
1580 		TAILQ_FOREACH(krp, &crp_kq, krp_next) {
1581 			cap = crypto_checkdriver(krp->krp_hid);
1582 			if (cap == NULL || cap->cc_dev == NULL) {
1583 				/*
1584 				 * Operation needs to be migrated, invalidate
1585 				 * the assigned device so it will reselect a
1586 				 * new one below.  Propagate the original
1587 				 * crid selection flags if supplied.
1588 				 */
1589 				krp->krp_hid = krp->krp_crid &
1590 				    (CRYPTOCAP_F_SOFTWARE|CRYPTOCAP_F_HARDWARE);
1591 				if (krp->krp_hid == 0)
1592 					krp->krp_hid =
1593 				    CRYPTOCAP_F_SOFTWARE|CRYPTOCAP_F_HARDWARE;
1594 				break;
1595 			}
1596 			if (!cap->cc_kqblocked)
1597 				break;
1598 		}
1599 		if (krp != NULL) {
1600 			TAILQ_REMOVE(&crp_kq, krp, krp_next);
1601 			result = crypto_kinvoke(krp, krp->krp_hid);
1602 			if (result == ERESTART) {
1603 				/*
1604 				 * The driver ran out of resources, mark the
1605 				 * driver ``blocked'' for cryptkop's and put
1606 				 * the request back in the queue.  It would
1607 				 * best to put the request back where we got
1608 				 * it but that's hard so for now we put it
1609 				 * at the front.  This should be ok; putting
1610 				 * it at the end does not work.
1611 				 */
1612 				/* XXX validate sid again? */
1613 				crypto_drivers[krp->krp_hid].cc_kqblocked = 1;
1614 				TAILQ_INSERT_HEAD(&crp_kq, krp, krp_next);
1615 				cryptostats.cs_kblocks++;
1616 			}
1617 		}
1618 
1619 		if (submit == NULL && krp == NULL) {
1620 			/*
1621 			 * Nothing more to be processed.  Sleep until we're
1622 			 * woken because there are more ops to process.
1623 			 * This happens either by submission or by a driver
1624 			 * becoming unblocked and notifying us through
1625 			 * crypto_unblock.  Note that when we wakeup we
1626 			 * start processing each queue again from the
1627 			 * front. It's not clear that it's important to
1628 			 * preserve this ordering since ops may finish
1629 			 * out of order if dispatched to different devices
1630 			 * and some become blocked while others do not.
1631 			 */
1632 			crp_sleep = 1;
1633 			msleep(&crp_q, &crypto_q_mtx, PWAIT, "crypto_wait", 0);
1634 			crp_sleep = 0;
1635 			if (cryptoproc == NULL)
1636 				break;
1637 			cryptostats.cs_intrs++;
1638 		}
1639 	}
1640 	CRYPTO_Q_UNLOCK();
1641 
1642 	crypto_finis(&crp_q);
1643 }
1644 
1645 /*
1646  * Crypto returns thread, does callbacks for processed crypto requests.
1647  * Callbacks are done here, rather than in the crypto drivers, because
1648  * callbacks typically are expensive and would slow interrupt handling.
1649  */
1650 static void
1651 crypto_ret_proc(struct crypto_ret_worker *ret_worker)
1652 {
1653 	struct cryptop *crpt;
1654 	struct cryptkop *krpt;
1655 
1656 	CRYPTO_RETW_LOCK(ret_worker);
1657 	for (;;) {
1658 		/* Harvest return q's for completed ops */
1659 		crpt = TAILQ_FIRST(&ret_worker->crp_ordered_ret_q);
1660 		if (crpt != NULL) {
1661 			if (crpt->crp_seq == ret_worker->reorder_cur_seq) {
1662 				TAILQ_REMOVE(&ret_worker->crp_ordered_ret_q, crpt, crp_next);
1663 				ret_worker->reorder_cur_seq++;
1664 			} else {
1665 				crpt = NULL;
1666 			}
1667 		}
1668 
1669 		if (crpt == NULL) {
1670 			crpt = TAILQ_FIRST(&ret_worker->crp_ret_q);
1671 			if (crpt != NULL)
1672 				TAILQ_REMOVE(&ret_worker->crp_ret_q, crpt, crp_next);
1673 		}
1674 
1675 		krpt = TAILQ_FIRST(&ret_worker->crp_ret_kq);
1676 		if (krpt != NULL)
1677 			TAILQ_REMOVE(&ret_worker->crp_ret_kq, krpt, krp_next);
1678 
1679 		if (crpt != NULL || krpt != NULL) {
1680 			CRYPTO_RETW_UNLOCK(ret_worker);
1681 			/*
1682 			 * Run callbacks unlocked.
1683 			 */
1684 			if (crpt != NULL) {
1685 #ifdef CRYPTO_TIMING
1686 				if (crypto_timing) {
1687 					/*
1688 					 * NB: We must copy the timestamp before
1689 					 * doing the callback as the cryptop is
1690 					 * likely to be reclaimed.
1691 					 */
1692 					struct bintime t = crpt->crp_tstamp;
1693 					crypto_tstat(&cryptostats.cs_cb, &t);
1694 					crpt->crp_callback(crpt);
1695 					crypto_tstat(&cryptostats.cs_finis, &t);
1696 				} else
1697 #endif
1698 					crpt->crp_callback(crpt);
1699 			}
1700 			if (krpt != NULL)
1701 				krpt->krp_callback(krpt);
1702 			CRYPTO_RETW_LOCK(ret_worker);
1703 		} else {
1704 			/*
1705 			 * Nothing more to be processed.  Sleep until we're
1706 			 * woken because there are more returns to process.
1707 			 */
1708 			msleep(&ret_worker->crp_ret_q, &ret_worker->crypto_ret_mtx, PWAIT,
1709 				"crypto_ret_wait", 0);
1710 			if (ret_worker->cryptoretproc == NULL)
1711 				break;
1712 			cryptostats.cs_rets++;
1713 		}
1714 	}
1715 	CRYPTO_RETW_UNLOCK(ret_worker);
1716 
1717 	crypto_finis(&ret_worker->crp_ret_q);
1718 }
1719 
1720 #ifdef DDB
1721 static void
1722 db_show_drivers(void)
1723 {
1724 	int hid;
1725 
1726 	db_printf("%12s %4s %4s %8s %2s %2s\n"
1727 		, "Device"
1728 		, "Ses"
1729 		, "Kops"
1730 		, "Flags"
1731 		, "QB"
1732 		, "KB"
1733 	);
1734 	for (hid = 0; hid < crypto_drivers_num; hid++) {
1735 		const struct cryptocap *cap = &crypto_drivers[hid];
1736 		if (cap->cc_dev == NULL)
1737 			continue;
1738 		db_printf("%-12s %4u %4u %08x %2u %2u\n"
1739 		    , device_get_nameunit(cap->cc_dev)
1740 		    , cap->cc_sessions
1741 		    , cap->cc_koperations
1742 		    , cap->cc_flags
1743 		    , cap->cc_qblocked
1744 		    , cap->cc_kqblocked
1745 		);
1746 	}
1747 }
1748 
1749 DB_SHOW_COMMAND(crypto, db_show_crypto)
1750 {
1751 	struct cryptop *crp;
1752 	struct crypto_ret_worker *ret_worker;
1753 
1754 	db_show_drivers();
1755 	db_printf("\n");
1756 
1757 	db_printf("%4s %8s %4s %4s %4s %4s %8s %8s\n",
1758 	    "HID", "Caps", "Ilen", "Olen", "Etype", "Flags",
1759 	    "Desc", "Callback");
1760 	TAILQ_FOREACH(crp, &crp_q, crp_next) {
1761 		db_printf("%4u %08x %4u %4u %4u %04x %8p %8p\n"
1762 		    , (int) crypto_ses2hid(crp->crp_session)
1763 		    , (int) crypto_ses2caps(crp->crp_session)
1764 		    , crp->crp_ilen, crp->crp_olen
1765 		    , crp->crp_etype
1766 		    , crp->crp_flags
1767 		    , crp->crp_desc
1768 		    , crp->crp_callback
1769 		);
1770 	}
1771 	FOREACH_CRYPTO_RETW(ret_worker) {
1772 		db_printf("\n%8s %4s %4s %4s %8s\n",
1773 		    "ret_worker", "HID", "Etype", "Flags", "Callback");
1774 		if (!TAILQ_EMPTY(&ret_worker->crp_ret_q)) {
1775 			TAILQ_FOREACH(crp, &ret_worker->crp_ret_q, crp_next) {
1776 				db_printf("%8td %4u %4u %04x %8p\n"
1777 				    , CRYPTO_RETW_ID(ret_worker)
1778 				    , (int) crypto_ses2hid(crp->crp_session)
1779 				    , crp->crp_etype
1780 				    , crp->crp_flags
1781 				    , crp->crp_callback
1782 				);
1783 			}
1784 		}
1785 	}
1786 }
1787 
1788 DB_SHOW_COMMAND(kcrypto, db_show_kcrypto)
1789 {
1790 	struct cryptkop *krp;
1791 	struct crypto_ret_worker *ret_worker;
1792 
1793 	db_show_drivers();
1794 	db_printf("\n");
1795 
1796 	db_printf("%4s %5s %4s %4s %8s %4s %8s\n",
1797 	    "Op", "Status", "#IP", "#OP", "CRID", "HID", "Callback");
1798 	TAILQ_FOREACH(krp, &crp_kq, krp_next) {
1799 		db_printf("%4u %5u %4u %4u %08x %4u %8p\n"
1800 		    , krp->krp_op
1801 		    , krp->krp_status
1802 		    , krp->krp_iparams, krp->krp_oparams
1803 		    , krp->krp_crid, krp->krp_hid
1804 		    , krp->krp_callback
1805 		);
1806 	}
1807 
1808 	ret_worker = CRYPTO_RETW(0);
1809 	if (!TAILQ_EMPTY(&ret_worker->crp_ret_q)) {
1810 		db_printf("%4s %5s %8s %4s %8s\n",
1811 		    "Op", "Status", "CRID", "HID", "Callback");
1812 		TAILQ_FOREACH(krp, &ret_worker->crp_ret_kq, krp_next) {
1813 			db_printf("%4u %5u %08x %4u %8p\n"
1814 			    , krp->krp_op
1815 			    , krp->krp_status
1816 			    , krp->krp_crid, krp->krp_hid
1817 			    , krp->krp_callback
1818 			);
1819 		}
1820 	}
1821 }
1822 #endif
1823 
1824 int crypto_modevent(module_t mod, int type, void *unused);
1825 
1826 /*
1827  * Initialization code, both for static and dynamic loading.
1828  * Note this is not invoked with the usual MODULE_DECLARE
1829  * mechanism but instead is listed as a dependency by the
1830  * cryptosoft driver.  This guarantees proper ordering of
1831  * calls on module load/unload.
1832  */
1833 int
1834 crypto_modevent(module_t mod, int type, void *unused)
1835 {
1836 	int error = EINVAL;
1837 
1838 	switch (type) {
1839 	case MOD_LOAD:
1840 		error = crypto_init();
1841 		if (error == 0 && bootverbose)
1842 			printf("crypto: <crypto core>\n");
1843 		break;
1844 	case MOD_UNLOAD:
1845 		/*XXX disallow if active sessions */
1846 		error = 0;
1847 		crypto_destroy();
1848 		return 0;
1849 	}
1850 	return error;
1851 }
1852 MODULE_VERSION(crypto, 1);
1853 MODULE_DEPEND(crypto, zlib, 1, 1, 1);
1854