xref: /dragonfly/sys/opencrypto/crypto.c (revision f746689a)
1 /*	$FreeBSD: src/sys/opencrypto/crypto.c,v 1.4.2.7 2003/06/03 00:09:02 sam Exp $	*/
2 /*	$DragonFly: src/sys/opencrypto/crypto.c,v 1.14 2006/12/23 00:27:03 swildner Exp $	*/
3 /*	$OpenBSD: crypto.c,v 1.38 2002/06/11 11:14:29 beck Exp $	*/
4 /*
5  * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
6  *
7  * This code was written by Angelos D. Keromytis in Athens, Greece, in
8  * February 2000. Network Security Technologies Inc. (NSTI) kindly
9  * supported the development of this code.
10  *
11  * Copyright (c) 2000, 2001 Angelos D. Keromytis
12  *
13  * Permission to use, copy, and modify this software with or without fee
14  * is hereby granted, provided that this entire notice is included in
15  * all source code copies of any software which is or includes a copy or
16  * modification of this software.
17  *
18  * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
19  * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
20  * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
21  * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
22  * PURPOSE.
23  */
24 
25 #define CRYPTO_TIMING			/* enable cryptop timing stuff */
26 
27 #include <sys/param.h>
28 #include <sys/systm.h>
29 #include <sys/eventhandler.h>
30 #include <sys/kernel.h>
31 #include <sys/kthread.h>
32 #include <sys/malloc.h>
33 #include <sys/proc.h>
34 #include <sys/sysctl.h>
35 #include <sys/interrupt.h>
36 #include <sys/thread2.h>
37 
38 #include <vm/vm_zone.h>
39 #include <opencrypto/cryptodev.h>
40 #include <opencrypto/xform.h>			/* XXX for M_XDATA */
41 
42 #define	SESID2HID(sid)	(((sid) >> 32) & 0xffffffff)
43 
44 /*
45  * Crypto drivers register themselves by allocating a slot in the
46  * crypto_drivers table with crypto_get_driverid() and then registering
47  * each algorithm they support with crypto_register() and crypto_kregister().
48  */
49 static	struct cryptocap *crypto_drivers = NULL;
50 static	int crypto_drivers_num = 0;
51 
52 /*
53  * There are two queues for crypto requests; one for symmetric (e.g.
54  * cipher) operations and one for asymmetric (e.g. MOD) operations.
55  * See below for how synchronization is handled.
56  */
57 static	TAILQ_HEAD(,cryptop) crp_q;		/* request queues */
58 static	TAILQ_HEAD(,cryptkop) crp_kq;
59 
60 /*
61  * There are two queues for processing completed crypto requests; one
62  * for the symmetric and one for the asymmetric ops.  We only need one
63  * but have two to avoid type futzing (cryptop vs. cryptkop).  See below
64  * for how synchronization is handled.
65  */
66 static	TAILQ_HEAD(,cryptop) crp_ret_q;		/* callback queues */
67 static	TAILQ_HEAD(,cryptkop) crp_ret_kq;
68 
69 /*
70  * Crypto op and desciptor data structures are allocated
71  * from separate private zones.
72  */
73 static	vm_zone_t cryptop_zone;
74 static	vm_zone_t cryptodesc_zone;
75 
76 int	crypto_usercrypto = 1;		/* userland may open /dev/crypto */
77 SYSCTL_INT(_kern, OID_AUTO, usercrypto, CTLFLAG_RW,
78 	   &crypto_usercrypto, 0,
79 	   "Enable/disable user-mode access to crypto support");
80 int	crypto_userasymcrypto = 1;	/* userland may do asym crypto reqs */
81 SYSCTL_INT(_kern, OID_AUTO, userasymcrypto, CTLFLAG_RW,
82 	   &crypto_userasymcrypto, 0,
83 	   "Enable/disable user-mode access to asymmetric crypto support");
84 int	crypto_devallowsoft = 0;	/* only use hardware crypto for asym */
85 SYSCTL_INT(_kern, OID_AUTO, cryptodevallowsoft, CTLFLAG_RW,
86 	   &crypto_devallowsoft, 0,
87 	   "Enable/disable use of software asym crypto support");
88 
89 MALLOC_DEFINE(M_CRYPTO_DATA, "crypto", "crypto session records");
90 
91 /*
92  * Synchronization: read carefully, this is non-trivial.
93  *
94  * Crypto requests are submitted via crypto_dispatch.  No critical
95  * section or lock/interlock guarentees are made on entry.
96  *
97  * Requests are typically passed on the driver directly, but they
98  * may also be queued for processing by a software interrupt thread,
99  * cryptointr, that runs in a critical section.  This thread dispatches
100  * the requests to crypto drivers (h/w or s/w) who call crypto_done
101  * when a request is complete.  Hardware crypto drivers are assumed
102  * to register their IRQ's as network devices so their interrupt handlers
103  * and subsequent "done callbacks" happen at appropriate protection levels.
104  *
105  * Completed crypto ops are queued for a separate kernel thread that
106  * handles the callbacks with no critical section or lock/interlock
107  * guarentees.  This decoupling insures the crypto driver interrupt service
108  * routine is not delayed while the callback takes place and that callbacks
109  * are delivered after a context switch (as opposed to a software interrupt
110  * that clients must block).
111  *
112  * This scheme is not intended for SMP machines.
113  */
114 static inthand2_t cryptointr;
115 static	void cryptoret(void);		/* kernel thread for callbacks*/
116 static	struct thread *cryptothread;
117 static	void crypto_destroy(void);
118 static	int crypto_invoke(struct cryptop *crp, int hint);
119 static	int crypto_kinvoke(struct cryptkop *krp, int hint);
120 
121 static struct cryptostats cryptostats;
122 SYSCTL_STRUCT(_kern, OID_AUTO, crypto_stats, CTLFLAG_RW, &cryptostats,
123 	    cryptostats, "Crypto system statistics");
124 
125 #ifdef CRYPTO_TIMING
126 static	int crypto_timing = 0;
127 SYSCTL_INT(_debug, OID_AUTO, crypto_timing, CTLFLAG_RW,
128 	   &crypto_timing, 0, "Enable/disable crypto timing support");
129 #endif
130 
131 static void *crypto_int_id;
132 
133 static int
134 crypto_init(void)
135 {
136 	int error;
137 
138 	cryptop_zone = zinit("cryptop", sizeof (struct cryptop), 0, 0, 1);
139 	cryptodesc_zone = zinit("cryptodesc", sizeof (struct cryptodesc),
140 				0, 0, 1);
141 	if (cryptodesc_zone == NULL || cryptop_zone == NULL) {
142 		kprintf("crypto_init: cannot setup crypto zones\n");
143 		return ENOMEM;
144 	}
145 
146 	crypto_drivers_num = CRYPTO_DRIVERS_INITIAL;
147 	crypto_drivers = kmalloc(crypto_drivers_num *
148 	    sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT | M_ZERO);
149 	if (crypto_drivers == NULL) {
150 		kprintf("crypto_init: cannot malloc driver table\n");
151 		return ENOMEM;
152 	}
153 
154 	TAILQ_INIT(&crp_q);
155 	TAILQ_INIT(&crp_kq);
156 
157 	TAILQ_INIT(&crp_ret_q);
158 	TAILQ_INIT(&crp_ret_kq);
159 
160 	crypto_int_id = register_swi(SWI_CRYPTO, cryptointr, NULL,
161 					"swi_crypto", NULL);
162 	error = kthread_create((void (*)(void *)) cryptoret, NULL,
163 		    &cryptothread, "cryptoret");
164 	if (error) {
165 		kprintf("crypto_init: cannot start cryptoret thread; error %d",
166 			error);
167 		crypto_destroy();
168 	}
169 	return error;
170 }
171 
172 static void
173 crypto_destroy(void)
174 {
175 	/* XXX no wait to reclaim zones */
176 	if (crypto_drivers != NULL)
177 		kfree(crypto_drivers, M_CRYPTO_DATA);
178 	unregister_swi(crypto_int_id);
179 }
180 
181 /*
182  * Initialization code, both for static and dynamic loading.
183  */
184 static int
185 crypto_modevent(module_t mod, int type, void *unused)
186 {
187 	int error = EINVAL;
188 
189 	switch (type) {
190 	case MOD_LOAD:
191 		error = crypto_init();
192 		if (error == 0 && bootverbose)
193 			kprintf("crypto: <crypto core>\n");
194 		break;
195 	case MOD_UNLOAD:
196 		/*XXX disallow if active sessions */
197 		error = 0;
198 		crypto_destroy();
199 		break;
200 	}
201 	return error;
202 }
203 
204 static moduledata_t crypto_mod = {
205 	"crypto",
206 	crypto_modevent,
207 	0
208 };
209 MODULE_VERSION(crypto, 1);
210 DECLARE_MODULE(crypto, crypto_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
211 
212 /*
213  * Create a new session.
214  */
215 int
216 crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int hard)
217 {
218 	struct cryptoini *cr;
219 	u_int32_t hid, lid;
220 	int err = EINVAL;
221 
222 	crit_enter();
223 
224 	if (crypto_drivers == NULL)
225 		goto done;
226 
227 	/*
228 	 * The algorithm we use here is pretty stupid; just use the
229 	 * first driver that supports all the algorithms we need.
230 	 *
231 	 * XXX We need more smarts here (in real life too, but that's
232 	 * XXX another story altogether).
233 	 */
234 
235 	for (hid = 0; hid < crypto_drivers_num; hid++) {
236 		/*
237 		 * If it's not initialized or has remaining sessions
238 		 * referencing it, skip.
239 		 */
240 		if (crypto_drivers[hid].cc_newsession == NULL ||
241 		    (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_CLEANUP))
242 			continue;
243 
244 		/* Hardware required -- ignore software drivers. */
245 		if (hard > 0 &&
246 		    (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE))
247 			continue;
248 		/* Software required -- ignore hardware drivers. */
249 		if (hard < 0 &&
250 		    (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE) == 0)
251 			continue;
252 
253 		/* See if all the algorithms are supported. */
254 		for (cr = cri; cr; cr = cr->cri_next)
255 			if (crypto_drivers[hid].cc_alg[cr->cri_alg] == 0)
256 				break;
257 
258 		if (cr == NULL) {
259 			/* Ok, all algorithms are supported. */
260 
261 			/*
262 			 * Can't do everything in one session.
263 			 *
264 			 * XXX Fix this. We need to inject a "virtual" session layer right
265 			 * XXX about here.
266 			 */
267 
268 			/* Call the driver initialization routine. */
269 			lid = hid;		/* Pass the driver ID. */
270 			err = crypto_drivers[hid].cc_newsession(
271 					crypto_drivers[hid].cc_arg, &lid, cri);
272 			if (err == 0) {
273 				(*sid) = hid;
274 				(*sid) <<= 32;
275 				(*sid) |= (lid & 0xffffffff);
276 				crypto_drivers[hid].cc_sessions++;
277 			}
278 			break;
279 		}
280 	}
281 done:
282 	crit_exit();
283 	return err;
284 }
285 
286 /*
287  * Delete an existing session (or a reserved session on an unregistered
288  * driver).
289  */
290 int
291 crypto_freesession(u_int64_t sid)
292 {
293 	u_int32_t hid;
294 	int err;
295 
296 	crit_enter();
297 
298 	if (crypto_drivers == NULL) {
299 		err = EINVAL;
300 		goto done;
301 	}
302 
303 	/* Determine two IDs. */
304 	hid = SESID2HID(sid);
305 
306 	if (hid >= crypto_drivers_num) {
307 		err = ENOENT;
308 		goto done;
309 	}
310 
311 	if (crypto_drivers[hid].cc_sessions)
312 		crypto_drivers[hid].cc_sessions--;
313 
314 	/* Call the driver cleanup routine, if available. */
315 	if (crypto_drivers[hid].cc_freesession)
316 		err = crypto_drivers[hid].cc_freesession(
317 				crypto_drivers[hid].cc_arg, sid);
318 	else
319 		err = 0;
320 
321 	/*
322 	 * If this was the last session of a driver marked as invalid,
323 	 * make the entry available for reuse.
324 	 */
325 	if ((crypto_drivers[hid].cc_flags & CRYPTOCAP_F_CLEANUP) &&
326 	    crypto_drivers[hid].cc_sessions == 0)
327 		bzero(&crypto_drivers[hid], sizeof(struct cryptocap));
328 
329 done:
330 	crit_exit();
331 	return err;
332 }
333 
334 /*
335  * Return an unused driver id.  Used by drivers prior to registering
336  * support for the algorithms they handle.
337  */
338 int32_t
339 crypto_get_driverid(u_int32_t flags)
340 {
341 	struct cryptocap *newdrv;
342 	int i;
343 
344 	crit_enter();
345 	for (i = 0; i < crypto_drivers_num; i++)
346 		if (crypto_drivers[i].cc_process == NULL &&
347 		    (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP) == 0 &&
348 		    crypto_drivers[i].cc_sessions == 0)
349 			break;
350 
351 	/* Out of entries, allocate some more. */
352 	if (i == crypto_drivers_num) {
353 		/* Be careful about wrap-around. */
354 		if (2 * crypto_drivers_num <= crypto_drivers_num) {
355 			crit_exit();
356 			kprintf("crypto: driver count wraparound!\n");
357 			return -1;
358 		}
359 
360 		newdrv = kmalloc(2 * crypto_drivers_num *
361 		    sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
362 		if (newdrv == NULL) {
363 			crit_exit();
364 			kprintf("crypto: no space to expand driver table!\n");
365 			return -1;
366 		}
367 
368 		bcopy(crypto_drivers, newdrv,
369 		    crypto_drivers_num * sizeof(struct cryptocap));
370 
371 		crypto_drivers_num *= 2;
372 
373 		kfree(crypto_drivers, M_CRYPTO_DATA);
374 		crypto_drivers = newdrv;
375 	}
376 
377 	/* NB: state is zero'd on free */
378 	crypto_drivers[i].cc_sessions = 1;	/* Mark */
379 	crypto_drivers[i].cc_flags = flags;
380 	if (bootverbose)
381 		kprintf("crypto: assign driver %u, flags %u\n", i, flags);
382 
383 	crit_exit();
384 
385 	return i;
386 }
387 
388 static struct cryptocap *
389 crypto_checkdriver(u_int32_t hid)
390 {
391 	if (crypto_drivers == NULL)
392 		return NULL;
393 	return (hid >= crypto_drivers_num ? NULL : &crypto_drivers[hid]);
394 }
395 
396 /*
397  * Register support for a key-related algorithm.  This routine
398  * is called once for each algorithm supported a driver.
399  */
400 int
401 crypto_kregister(u_int32_t driverid, int kalg, u_int32_t flags,
402     int (*kprocess)(void*, struct cryptkop *, int),
403     void *karg)
404 {
405 	struct cryptocap *cap;
406 	int err;
407 
408 	crit_enter();
409 
410 	cap = crypto_checkdriver(driverid);
411 	if (cap != NULL &&
412 	    (CRK_ALGORITM_MIN <= kalg && kalg <= CRK_ALGORITHM_MAX)) {
413 		/*
414 		 * XXX Do some performance testing to determine placing.
415 		 * XXX We probably need an auxiliary data structure that
416 		 * XXX describes relative performances.
417 		 */
418 
419 		cap->cc_kalg[kalg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
420 		if (bootverbose)
421 			kprintf("crypto: driver %u registers key alg %u flags %u\n"
422 				, driverid
423 				, kalg
424 				, flags
425 			);
426 
427 		if (cap->cc_kprocess == NULL) {
428 			cap->cc_karg = karg;
429 			cap->cc_kprocess = kprocess;
430 		}
431 		err = 0;
432 	} else
433 		err = EINVAL;
434 
435 	crit_exit();
436 	return err;
437 }
438 
439 /*
440  * Register support for a non-key-related algorithm.  This routine
441  * is called once for each such algorithm supported by a driver.
442  */
443 int
444 crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen,
445     u_int32_t flags,
446     int (*newses)(void*, u_int32_t*, struct cryptoini*),
447     int (*freeses)(void*, u_int64_t),
448     int (*process)(void*, struct cryptop *, int),
449     void *arg)
450 {
451 	struct cryptocap *cap;
452 	int err;
453 
454 	crit_enter();
455 
456 	cap = crypto_checkdriver(driverid);
457 	/* NB: algorithms are in the range [1..max] */
458 	if (cap != NULL &&
459 	    (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX)) {
460 		/*
461 		 * XXX Do some performance testing to determine placing.
462 		 * XXX We probably need an auxiliary data structure that
463 		 * XXX describes relative performances.
464 		 */
465 
466 		cap->cc_alg[alg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
467 		cap->cc_max_op_len[alg] = maxoplen;
468 		if (bootverbose)
469 			kprintf("crypto: driver %u registers alg %u flags %u maxoplen %u\n"
470 				, driverid
471 				, alg
472 				, flags
473 				, maxoplen
474 			);
475 
476 		if (cap->cc_process == NULL) {
477 			cap->cc_arg = arg;
478 			cap->cc_newsession = newses;
479 			cap->cc_process = process;
480 			cap->cc_freesession = freeses;
481 			cap->cc_sessions = 0;		/* Unmark */
482 		}
483 		err = 0;
484 	} else
485 		err = EINVAL;
486 
487 	crit_exit();
488 	return err;
489 }
490 
491 /*
492  * Unregister a crypto driver. If there are pending sessions using it,
493  * leave enough information around so that subsequent calls using those
494  * sessions will correctly detect the driver has been unregistered and
495  * reroute requests.
496  */
497 int
498 crypto_unregister(u_int32_t driverid, int alg)
499 {
500 	int i, err;
501 	u_int32_t ses;
502 	struct cryptocap *cap;
503 
504 	crit_enter();
505 
506 	cap = crypto_checkdriver(driverid);
507 	if (cap != NULL &&
508 	    (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX) &&
509 	    cap->cc_alg[alg] != 0) {
510 		cap->cc_alg[alg] = 0;
511 		cap->cc_max_op_len[alg] = 0;
512 
513 		/* Was this the last algorithm ? */
514 		for (i = 1; i <= CRYPTO_ALGORITHM_MAX; i++)
515 			if (cap->cc_alg[i] != 0)
516 				break;
517 
518 		if (i == CRYPTO_ALGORITHM_MAX + 1) {
519 			ses = cap->cc_sessions;
520 			bzero(cap, sizeof(struct cryptocap));
521 			if (ses != 0) {
522 				/*
523 				 * If there are pending sessions, just mark as invalid.
524 				 */
525 				cap->cc_flags |= CRYPTOCAP_F_CLEANUP;
526 				cap->cc_sessions = ses;
527 			}
528 		}
529 		err = 0;
530 	} else
531 		err = EINVAL;
532 
533 	crit_exit();
534 	return err;
535 }
536 
537 /*
538  * Unregister all algorithms associated with a crypto driver.
539  * If there are pending sessions using it, leave enough information
540  * around so that subsequent calls using those sessions will
541  * correctly detect the driver has been unregistered and reroute
542  * requests.
543  */
544 int
545 crypto_unregister_all(u_int32_t driverid)
546 {
547 	int i, err;
548 	u_int32_t ses;
549 	struct cryptocap *cap;
550 
551 	crit_enter();
552 	cap = crypto_checkdriver(driverid);
553 	if (cap != NULL) {
554 		for (i = CRYPTO_ALGORITHM_MIN; i <= CRYPTO_ALGORITHM_MAX; i++) {
555 			cap->cc_alg[i] = 0;
556 			cap->cc_max_op_len[i] = 0;
557 		}
558 		ses = cap->cc_sessions;
559 		bzero(cap, sizeof(struct cryptocap));
560 		if (ses != 0) {
561 			/*
562 			 * If there are pending sessions, just mark as invalid.
563 			 */
564 			cap->cc_flags |= CRYPTOCAP_F_CLEANUP;
565 			cap->cc_sessions = ses;
566 		}
567 		err = 0;
568 	} else
569 		err = EINVAL;
570 
571 	crit_exit();
572 	return err;
573 }
574 
575 /*
576  * Clear blockage on a driver.  The what parameter indicates whether
577  * the driver is now ready for cryptop's and/or cryptokop's.
578  */
579 int
580 crypto_unblock(u_int32_t driverid, int what)
581 {
582 	struct cryptocap *cap;
583 	int needwakeup, err;
584 
585 	crit_enter();
586 	cap = crypto_checkdriver(driverid);
587 	if (cap != NULL) {
588 		needwakeup = 0;
589 		if (what & CRYPTO_SYMQ) {
590 			needwakeup |= cap->cc_qblocked;
591 			cap->cc_qblocked = 0;
592 		}
593 		if (what & CRYPTO_ASYMQ) {
594 			needwakeup |= cap->cc_kqblocked;
595 			cap->cc_kqblocked = 0;
596 		}
597 		if (needwakeup)
598 			setsoftcrypto();
599 		err = 0;
600 	} else
601 		err = EINVAL;
602 	crit_exit();
603 
604 	return err;
605 }
606 
607 /*
608  * Dispatch a crypto request to a driver or queue
609  * it, to be processed by the kernel thread.
610  */
611 int
612 crypto_dispatch(struct cryptop *crp)
613 {
614 	u_int32_t hid = SESID2HID(crp->crp_sid);
615 	int result;
616 
617 	cryptostats.cs_ops++;
618 
619 #ifdef CRYPTO_TIMING
620 	if (crypto_timing)
621 		nanouptime(&crp->crp_tstamp);
622 #endif
623 	crit_enter();
624 	if ((crp->crp_flags & CRYPTO_F_BATCH) == 0) {
625 		struct cryptocap *cap;
626 		/*
627 		 * Caller marked the request to be processed
628 		 * immediately; dispatch it directly to the
629 		 * driver unless the driver is currently blocked.
630 		 */
631 		cap = crypto_checkdriver(hid);
632 		if (cap && !cap->cc_qblocked) {
633 			result = crypto_invoke(crp, 0);
634 			if (result == ERESTART) {
635 				/*
636 				 * The driver ran out of resources, mark the
637 				 * driver ``blocked'' for cryptop's and put
638 				 * the op on the queue.
639 				 */
640 				crypto_drivers[hid].cc_qblocked = 1;
641 				TAILQ_INSERT_HEAD(&crp_q, crp, crp_next);
642 				cryptostats.cs_blocks++;
643 				result = 0;
644 			}
645 		} else {
646 			/*
647 			 * The driver is blocked, just queue the op until
648 			 * it unblocks and the swi thread gets kicked.
649 			 */
650 			TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
651 			result = 0;
652 		}
653 	} else {
654 		int wasempty = TAILQ_EMPTY(&crp_q);
655 		/*
656 		 * Caller marked the request as ``ok to delay'';
657 		 * queue it for the swi thread.  This is desirable
658 		 * when the operation is low priority and/or suitable
659 		 * for batching.
660 		 */
661 		TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
662 		if (wasempty)
663 			setsoftcrypto();
664 		result = 0;
665 	}
666 	crit_exit();
667 
668 	return result;
669 }
670 
671 /*
672  * Add an asymetric crypto request to a queue,
673  * to be processed by the kernel thread.
674  */
675 int
676 crypto_kdispatch(struct cryptkop *krp)
677 {
678 	struct cryptocap *cap;
679 	int result;
680 
681 	cryptostats.cs_kops++;
682 
683 	crit_enter();
684 	cap = crypto_checkdriver(krp->krp_hid);
685 	if (cap && !cap->cc_kqblocked) {
686 		result = crypto_kinvoke(krp, 0);
687 		if (result == ERESTART) {
688 			/*
689 			 * The driver ran out of resources, mark the
690 			 * driver ``blocked'' for cryptop's and put
691 			 * the op on the queue.
692 			 */
693 			crypto_drivers[krp->krp_hid].cc_kqblocked = 1;
694 			TAILQ_INSERT_HEAD(&crp_kq, krp, krp_next);
695 			cryptostats.cs_kblocks++;
696 		}
697 	} else {
698 		/*
699 		 * The driver is blocked, just queue the op until
700 		 * it unblocks and the swi thread gets kicked.
701 		 */
702 		TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next);
703 		result = 0;
704 	}
705 	crit_exit();
706 
707 	return result;
708 }
709 
710 /*
711  * Dispatch an assymetric crypto request to the appropriate crypto devices.
712  */
713 static int
714 crypto_kinvoke(struct cryptkop *krp, int hint)
715 {
716 	u_int32_t hid;
717 	int error;
718 
719 	/* Sanity checks. */
720 	if (krp == NULL)
721 		return EINVAL;
722 	if (krp->krp_callback == NULL) {
723 		kfree(krp, M_XDATA);		/* XXX allocated in cryptodev */
724 		return EINVAL;
725 	}
726 
727 	for (hid = 0; hid < crypto_drivers_num; hid++) {
728 		if ((crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE) &&
729 		    !crypto_devallowsoft)
730 			continue;
731 		if (crypto_drivers[hid].cc_kprocess == NULL)
732 			continue;
733 		if ((crypto_drivers[hid].cc_kalg[krp->krp_op] &
734 		    CRYPTO_ALG_FLAG_SUPPORTED) == 0)
735 			continue;
736 		break;
737 	}
738 	if (hid < crypto_drivers_num) {
739 		krp->krp_hid = hid;
740 		error = crypto_drivers[hid].cc_kprocess(
741 				crypto_drivers[hid].cc_karg, krp, hint);
742 	} else
743 		error = ENODEV;
744 
745 	if (error) {
746 		krp->krp_status = error;
747 		crypto_kdone(krp);
748 	}
749 	return 0;
750 }
751 
752 #ifdef CRYPTO_TIMING
753 static void
754 crypto_tstat(struct cryptotstat *ts, struct timespec *tv)
755 {
756 	struct timespec now, t;
757 
758 	nanouptime(&now);
759 	t.tv_sec = now.tv_sec - tv->tv_sec;
760 	t.tv_nsec = now.tv_nsec - tv->tv_nsec;
761 	if (t.tv_nsec < 0) {
762 		t.tv_sec--;
763 		t.tv_nsec += 1000000000;
764 	}
765 	timespecadd(&ts->acc, &t);
766 	if (timespeccmp(&t, &ts->min, <))
767 		ts->min = t;
768 	if (timespeccmp(&t, &ts->max, >))
769 		ts->max = t;
770 	ts->count++;
771 
772 	*tv = now;
773 }
774 #endif
775 
776 /*
777  * Dispatch a crypto request to the appropriate crypto devices.
778  */
779 static int
780 crypto_invoke(struct cryptop *crp, int hint)
781 {
782 	u_int32_t hid;
783 	int (*process)(void*, struct cryptop *, int);
784 
785 #ifdef CRYPTO_TIMING
786 	if (crypto_timing)
787 		crypto_tstat(&cryptostats.cs_invoke, &crp->crp_tstamp);
788 #endif
789 	/* Sanity checks. */
790 	if (crp == NULL)
791 		return EINVAL;
792 	if (crp->crp_callback == NULL) {
793 		crypto_freereq(crp);
794 		return EINVAL;
795 	}
796 	if (crp->crp_desc == NULL) {
797 		crp->crp_etype = EINVAL;
798 		crypto_done(crp);
799 		return 0;
800 	}
801 
802 	hid = SESID2HID(crp->crp_sid);
803 	if (hid < crypto_drivers_num) {
804 		if (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_CLEANUP)
805 			crypto_freesession(crp->crp_sid);
806 		process = crypto_drivers[hid].cc_process;
807 	} else {
808 		process = NULL;
809 	}
810 
811 	if (process == NULL) {
812 		struct cryptodesc *crd;
813 		u_int64_t nid;
814 
815 		/*
816 		 * Driver has unregistered; migrate the session and return
817 		 * an error to the caller so they'll resubmit the op.
818 		 */
819 		for (crd = crp->crp_desc; crd->crd_next; crd = crd->crd_next)
820 			crd->CRD_INI.cri_next = &(crd->crd_next->CRD_INI);
821 
822 		if (crypto_newsession(&nid, &(crp->crp_desc->CRD_INI), 0) == 0)
823 			crp->crp_sid = nid;
824 
825 		crp->crp_etype = EAGAIN;
826 		crypto_done(crp);
827 		return 0;
828 	} else {
829 		/*
830 		 * Invoke the driver to process the request.
831 		 */
832 		return (*process)(crypto_drivers[hid].cc_arg, crp, hint);
833 	}
834 }
835 
836 /*
837  * Release a set of crypto descriptors.
838  */
839 void
840 crypto_freereq(struct cryptop *crp)
841 {
842 	struct cryptodesc *crd;
843 
844 	if (crp) {
845 		while ((crd = crp->crp_desc) != NULL) {
846 			crp->crp_desc = crd->crd_next;
847 			zfree(cryptodesc_zone, crd);
848 		}
849 		zfree(cryptop_zone, crp);
850 	}
851 }
852 
853 /*
854  * Acquire a set of crypto descriptors.  The descriptors are self contained
855  * so no special lock/interlock protection is necessary.
856  */
857 struct cryptop *
858 crypto_getreq(int num)
859 {
860 	struct cryptodesc *crd;
861 	struct cryptop *crp;
862 
863 	crp = zalloc(cryptop_zone);
864 	if (crp != NULL) {
865 		bzero(crp, sizeof (*crp));
866 		while (num--) {
867 			crd = zalloc(cryptodesc_zone);
868 			if (crd == NULL) {
869 				crypto_freereq(crp);
870 				crp = NULL;
871 				break;
872 			}
873 			bzero(crd, sizeof (*crd));
874 			crd->crd_next = crp->crp_desc;
875 			crp->crp_desc = crd;
876 		}
877 	}
878 	return crp;
879 }
880 
881 /*
882  * Invoke the callback on behalf of the driver.
883  */
884 void
885 crypto_done(struct cryptop *crp)
886 {
887 	KASSERT((crp->crp_flags & CRYPTO_F_DONE) == 0,
888 		("crypto_done: op already done, flags 0x%x", crp->crp_flags));
889 	crp->crp_flags |= CRYPTO_F_DONE;
890 	if (crp->crp_etype != 0)
891 		cryptostats.cs_errs++;
892 #ifdef CRYPTO_TIMING
893 	if (crypto_timing)
894 		crypto_tstat(&cryptostats.cs_done, &crp->crp_tstamp);
895 #endif
896 	if (crp->crp_flags & CRYPTO_F_CBIMM) {
897 		/*
898 		 * Do the callback directly.  This is ok when the
899 		 * callback routine does very little (e.g. the
900 		 * /dev/crypto callback method just does a wakeup).
901 		 */
902 #ifdef CRYPTO_TIMING
903 		if (crypto_timing) {
904 			/*
905 			 * NB: We must copy the timestamp before
906 			 * doing the callback as the cryptop is
907 			 * likely to be reclaimed.
908 			 */
909 			struct timespec t = crp->crp_tstamp;
910 			crypto_tstat(&cryptostats.cs_cb, &t);
911 			crp->crp_callback(crp);
912 			crypto_tstat(&cryptostats.cs_finis, &t);
913 		} else
914 #endif
915 			crp->crp_callback(crp);
916 	} else {
917 		int wasempty;
918 		/*
919 		 * Normal case; queue the callback for the thread.
920 		 *
921 		 * The return queue is manipulated by the swi thread
922 		 * and, potentially, by crypto device drivers calling
923 		 * back to mark operations completed.  Thus we need
924 		 * to mask both while manipulating the return queue.
925 		 */
926 		crit_enter();
927 		wasempty = TAILQ_EMPTY(&crp_ret_q);
928 		TAILQ_INSERT_TAIL(&crp_ret_q, crp, crp_next);
929 		if (wasempty)
930 			wakeup_one(&crp_ret_q);
931 		crit_exit();
932 	}
933 }
934 
935 /*
936  * Invoke the callback on behalf of the driver.
937  */
938 void
939 crypto_kdone(struct cryptkop *krp)
940 {
941 	int wasempty;
942 
943 	if (krp->krp_status != 0)
944 		cryptostats.cs_kerrs++;
945 	/*
946 	 * The return queue is manipulated by the swi thread
947 	 * and, potentially, by crypto device drivers calling
948 	 * back to mark operations completed.  Thus we need
949 	 * to mask both while manipulating the return queue.
950 	 */
951 	crit_enter();
952 	wasempty = TAILQ_EMPTY(&crp_ret_kq);
953 	TAILQ_INSERT_TAIL(&crp_ret_kq, krp, krp_next);
954 	if (wasempty)
955 		wakeup_one(&crp_ret_q);
956 	crit_exit();
957 }
958 
959 int
960 crypto_getfeat(int *featp)
961 {
962 	int hid, kalg, feat = 0;
963 
964 	crit_enter();
965 	if (!crypto_userasymcrypto)
966 		goto out;
967 
968 	for (hid = 0; hid < crypto_drivers_num; hid++) {
969 		if ((crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE) &&
970 		    !crypto_devallowsoft) {
971 			continue;
972 		}
973 		if (crypto_drivers[hid].cc_kprocess == NULL)
974 			continue;
975 		for (kalg = 0; kalg < CRK_ALGORITHM_MAX; kalg++)
976 			if ((crypto_drivers[hid].cc_kalg[kalg] &
977 			    CRYPTO_ALG_FLAG_SUPPORTED) != 0)
978 				feat |=  1 << kalg;
979 	}
980 out:
981 	crit_exit();
982 	*featp = feat;
983 	return (0);
984 }
985 
986 /*
987  * Software interrupt thread to dispatch crypto requests.
988  */
989 static void
990 cryptointr(void *dummy, void *frame)
991 {
992 	struct cryptop *crp, *submit;
993 	struct cryptkop *krp;
994 	struct cryptocap *cap;
995 	int result, hint;
996 
997 	cryptostats.cs_intrs++;
998 	crit_enter();
999 	do {
1000 		/*
1001 		 * Find the first element in the queue that can be
1002 		 * processed and look-ahead to see if multiple ops
1003 		 * are ready for the same driver.
1004 		 */
1005 		submit = NULL;
1006 		hint = 0;
1007 		TAILQ_FOREACH(crp, &crp_q, crp_next) {
1008 			u_int32_t hid = SESID2HID(crp->crp_sid);
1009 			cap = crypto_checkdriver(hid);
1010 			if (cap == NULL || cap->cc_process == NULL) {
1011 				/* Op needs to be migrated, process it. */
1012 				if (submit == NULL)
1013 					submit = crp;
1014 				break;
1015 			}
1016 			if (!cap->cc_qblocked) {
1017 				if (submit != NULL) {
1018 					/*
1019 					 * We stop on finding another op,
1020 					 * regardless whether its for the same
1021 					 * driver or not.  We could keep
1022 					 * searching the queue but it might be
1023 					 * better to just use a per-driver
1024 					 * queue instead.
1025 					 */
1026 					if (SESID2HID(submit->crp_sid) == hid)
1027 						hint = CRYPTO_HINT_MORE;
1028 					break;
1029 				} else {
1030 					submit = crp;
1031 					if ((submit->crp_flags & CRYPTO_F_BATCH) == 0)
1032 						break;
1033 					/* keep scanning for more are q'd */
1034 				}
1035 			}
1036 		}
1037 		if (submit != NULL) {
1038 			TAILQ_REMOVE(&crp_q, submit, crp_next);
1039 			result = crypto_invoke(submit, hint);
1040 			if (result == ERESTART) {
1041 				/*
1042 				 * The driver ran out of resources, mark the
1043 				 * driver ``blocked'' for cryptop's and put
1044 				 * the request back in the queue.  It would
1045 				 * best to put the request back where we got
1046 				 * it but that's hard so for now we put it
1047 				 * at the front.  This should be ok; putting
1048 				 * it at the end does not work.
1049 				 */
1050 				/* XXX validate sid again? */
1051 				crypto_drivers[SESID2HID(submit->crp_sid)].cc_qblocked = 1;
1052 				TAILQ_INSERT_HEAD(&crp_q, submit, crp_next);
1053 				cryptostats.cs_blocks++;
1054 			}
1055 		}
1056 
1057 		/* As above, but for key ops */
1058 		TAILQ_FOREACH(krp, &crp_kq, krp_next) {
1059 			cap = crypto_checkdriver(krp->krp_hid);
1060 			if (cap == NULL || cap->cc_kprocess == NULL) {
1061 				/* Op needs to be migrated, process it. */
1062 				break;
1063 			}
1064 			if (!cap->cc_kqblocked)
1065 				break;
1066 		}
1067 		if (krp != NULL) {
1068 			TAILQ_REMOVE(&crp_kq, krp, krp_next);
1069 			result = crypto_kinvoke(krp, 0);
1070 			if (result == ERESTART) {
1071 				/*
1072 				 * The driver ran out of resources, mark the
1073 				 * driver ``blocked'' for cryptkop's and put
1074 				 * the request back in the queue.  It would
1075 				 * best to put the request back where we got
1076 				 * it but that's hard so for now we put it
1077 				 * at the front.  This should be ok; putting
1078 				 * it at the end does not work.
1079 				 */
1080 				/* XXX validate sid again? */
1081 				crypto_drivers[krp->krp_hid].cc_kqblocked = 1;
1082 				TAILQ_INSERT_HEAD(&crp_kq, krp, krp_next);
1083 				cryptostats.cs_kblocks++;
1084 			}
1085 		}
1086 	} while (submit != NULL || krp != NULL);
1087 	crit_exit();
1088 }
1089 
1090 /*
1091  * Kernel thread to do callbacks.
1092  */
1093 static void
1094 cryptoret(void)
1095 {
1096 	struct cryptop *crp;
1097 	struct cryptkop *krp;
1098 
1099 	crit_enter();
1100 	for (;;) {
1101 		crp = TAILQ_FIRST(&crp_ret_q);
1102 		if (crp != NULL)
1103 			TAILQ_REMOVE(&crp_ret_q, crp, crp_next);
1104 		krp = TAILQ_FIRST(&crp_ret_kq);
1105 		if (krp != NULL)
1106 			TAILQ_REMOVE(&crp_ret_kq, krp, krp_next);
1107 
1108 		if (crp != NULL || krp != NULL) {
1109 			crit_exit();		/* lower ipl for callbacks */
1110 			if (crp != NULL) {
1111 #ifdef CRYPTO_TIMING
1112 				if (crypto_timing) {
1113 					/*
1114 					 * NB: We must copy the timestamp before
1115 					 * doing the callback as the cryptop is
1116 					 * likely to be reclaimed.
1117 					 */
1118 					struct timespec t = crp->crp_tstamp;
1119 					crypto_tstat(&cryptostats.cs_cb, &t);
1120 					crp->crp_callback(crp);
1121 					crypto_tstat(&cryptostats.cs_finis, &t);
1122 				} else
1123 #endif
1124 					crp->crp_callback(crp);
1125 			}
1126 			if (krp != NULL)
1127 				krp->krp_callback(krp);
1128 			crit_enter();
1129 		} else {
1130 			(void) tsleep(&crp_ret_q, 0, "crypto_wait", 0);
1131 			cryptostats.cs_rets++;
1132 		}
1133 	}
1134 	/* CODE NOT REACHED (crit_exit() would go here otherwise) */
1135 }
1136