xref: /freebsd/sys/opencrypto/cryptosoft.c (revision 5b9c547c)
1 /*	$OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $	*/
2 
3 /*-
4  * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
5  * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting
6  *
7  * This code was written by Angelos D. Keromytis in Athens, Greece, in
8  * February 2000. Network Security Technologies Inc. (NSTI) kindly
9  * supported the development of this code.
10  *
11  * Copyright (c) 2000, 2001 Angelos D. Keromytis
12  * Copyright (c) 2014 The FreeBSD Foundation
13  * All rights reserved.
14  *
15  * Portions of this software were developed by John-Mark Gurney
16  * under sponsorship of the FreeBSD Foundation and
17  * Rubicon Communications, LLC (Netgate).
18  *
19  * Permission to use, copy, and modify this software with or without fee
20  * is hereby granted, provided that this entire notice is included in
21  * all source code copies of any software which is or includes a copy or
22  * modification of this software.
23  *
24  * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
25  * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
26  * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
27  * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
28  * PURPOSE.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/malloc.h>
37 #include <sys/mbuf.h>
38 #include <sys/module.h>
39 #include <sys/sysctl.h>
40 #include <sys/errno.h>
41 #include <sys/random.h>
42 #include <sys/kernel.h>
43 #include <sys/uio.h>
44 #include <sys/lock.h>
45 #include <sys/rwlock.h>
46 #include <sys/endian.h>
47 #include <sys/limits.h>
48 
49 #include <crypto/blowfish/blowfish.h>
50 #include <crypto/sha1.h>
51 #include <opencrypto/rmd160.h>
52 #include <opencrypto/cast.h>
53 #include <opencrypto/skipjack.h>
54 #include <sys/md5.h>
55 
56 #include <opencrypto/cryptodev.h>
57 #include <opencrypto/cryptosoft.h>
58 #include <opencrypto/xform.h>
59 
60 #include <sys/kobj.h>
61 #include <sys/bus.h>
62 #include "cryptodev_if.h"
63 
64 static	int32_t swcr_id;
65 static	struct swcr_data **swcr_sessions = NULL;
66 static	u_int32_t swcr_sesnum;
67 /* Protects swcr_sessions pointer, not data. */
68 static	struct rwlock swcr_sessions_lock;
69 
70 u_int8_t hmac_ipad_buffer[HMAC_MAX_BLOCK_LEN];
71 u_int8_t hmac_opad_buffer[HMAC_MAX_BLOCK_LEN];
72 
73 static	int swcr_encdec(struct cryptodesc *, struct swcr_data *, caddr_t, int);
74 static	int swcr_authcompute(struct cryptodesc *, struct swcr_data *, caddr_t, int);
75 static	int swcr_authenc(struct cryptop *crp);
76 static	int swcr_compdec(struct cryptodesc *, struct swcr_data *, caddr_t, int);
77 static	int swcr_freesession(device_t dev, u_int64_t tid);
78 static	int swcr_freesession_locked(device_t dev, u_int64_t tid);
79 
80 /*
81  * Apply a symmetric encryption/decryption algorithm.
82  */
83 static int
84 swcr_encdec(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf,
85     int flags)
86 {
87 	unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN], *idat;
88 	unsigned char *ivp, *nivp, iv2[EALG_MAX_BLOCK_LEN];
89 	struct enc_xform *exf;
90 	int i, j, k, blks, ind, count, ivlen;
91 	struct uio *uio, uiolcl;
92 	struct iovec iovlcl[4];
93 	struct iovec *iov;
94 	int iovcnt, iovalloc;
95 	int error;
96 
97 	error = 0;
98 
99 	exf = sw->sw_exf;
100 	blks = exf->blocksize;
101 	ivlen = exf->ivsize;
102 
103 	/* Check for non-padded data */
104 	if (crd->crd_len % blks)
105 		return EINVAL;
106 
107 	if (crd->crd_alg == CRYPTO_AES_ICM &&
108 	    (crd->crd_flags & CRD_F_IV_EXPLICIT) == 0)
109 		return (EINVAL);
110 
111 	/* Initialize the IV */
112 	if (crd->crd_flags & CRD_F_ENCRYPT) {
113 		/* IV explicitly provided ? */
114 		if (crd->crd_flags & CRD_F_IV_EXPLICIT)
115 			bcopy(crd->crd_iv, iv, ivlen);
116 		else
117 			arc4rand(iv, ivlen, 0);
118 
119 		/* Do we need to write the IV */
120 		if (!(crd->crd_flags & CRD_F_IV_PRESENT))
121 			crypto_copyback(flags, buf, crd->crd_inject, ivlen, iv);
122 
123 	} else {	/* Decryption */
124 		/* IV explicitly provided ? */
125 		if (crd->crd_flags & CRD_F_IV_EXPLICIT)
126 			bcopy(crd->crd_iv, iv, ivlen);
127 		else {
128 			/* Get IV off buf */
129 			crypto_copydata(flags, buf, crd->crd_inject, ivlen, iv);
130 		}
131 	}
132 
133 	if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
134 		int error;
135 
136 		if (sw->sw_kschedule)
137 			exf->zerokey(&(sw->sw_kschedule));
138 
139 		error = exf->setkey(&sw->sw_kschedule,
140 				crd->crd_key, crd->crd_klen / 8);
141 		if (error)
142 			return (error);
143 	}
144 
145 	iov = iovlcl;
146 	iovcnt = nitems(iovlcl);
147 	iovalloc = 0;
148 	uio = &uiolcl;
149 	if ((flags & CRYPTO_F_IMBUF) != 0) {
150 		crypto_mbuftoiov((struct mbuf *)buf, &iov, &iovcnt,
151 		    &iovalloc);
152 		uio->uio_iov = iov;
153 		uio->uio_iovcnt = iovcnt;
154 	} else if ((flags & CRYPTO_F_IOV) != 0)
155 		uio = (struct uio *)buf;
156 	else {
157 		iov[0].iov_base = buf;
158 		iov[0].iov_len = crd->crd_skip + crd->crd_len;
159 		uio->uio_iov = iov;
160 		uio->uio_iovcnt = 1;
161 	}
162 
163 	ivp = iv;
164 
165 	if (exf->reinit) {
166 		/*
167 		 * xforms that provide a reinit method perform all IV
168 		 * handling themselves.
169 		 */
170 		exf->reinit(sw->sw_kschedule, iv);
171 	}
172 
173 	count = crd->crd_skip;
174 	ind = cuio_getptr(uio, count, &k);
175 	if (ind == -1) {
176 		error = EINVAL;
177 		goto out;
178 	}
179 
180 	i = crd->crd_len;
181 
182 	while (i > 0) {
183 		/*
184 		 * If there's insufficient data at the end of
185 		 * an iovec, we have to do some copying.
186 		 */
187 		if (uio->uio_iov[ind].iov_len < k + blks &&
188 		    uio->uio_iov[ind].iov_len != k) {
189 			cuio_copydata(uio, count, blks, blk);
190 
191 			/* Actual encryption/decryption */
192 			if (exf->reinit) {
193 				if (crd->crd_flags & CRD_F_ENCRYPT) {
194 					exf->encrypt(sw->sw_kschedule,
195 					    blk);
196 				} else {
197 					exf->decrypt(sw->sw_kschedule,
198 					    blk);
199 				}
200 			} else if (crd->crd_flags & CRD_F_ENCRYPT) {
201 				/* XOR with previous block */
202 				for (j = 0; j < blks; j++)
203 					blk[j] ^= ivp[j];
204 
205 				exf->encrypt(sw->sw_kschedule, blk);
206 
207 				/*
208 				 * Keep encrypted block for XOR'ing
209 				 * with next block
210 				 */
211 				bcopy(blk, iv, blks);
212 				ivp = iv;
213 			} else {	/* decrypt */
214 				/*
215 				 * Keep encrypted block for XOR'ing
216 				 * with next block
217 				 */
218 				nivp = (ivp == iv) ? iv2 : iv;
219 				bcopy(blk, nivp, blks);
220 
221 				exf->decrypt(sw->sw_kschedule, blk);
222 
223 				/* XOR with previous block */
224 				for (j = 0; j < blks; j++)
225 					blk[j] ^= ivp[j];
226 
227 				ivp = nivp;
228 			}
229 
230 			/* Copy back decrypted block */
231 			cuio_copyback(uio, count, blks, blk);
232 
233 			count += blks;
234 
235 			/* Advance pointer */
236 			ind = cuio_getptr(uio, count, &k);
237 			if (ind == -1) {
238 				error = EINVAL;
239 				goto out;
240 			}
241 
242 			i -= blks;
243 
244 			/* Could be done... */
245 			if (i == 0)
246 				break;
247 		}
248 
249 		/*
250 		 * Warning: idat may point to garbage here, but
251 		 * we only use it in the while() loop, only if
252 		 * there are indeed enough data.
253 		 */
254 		idat = (char *)uio->uio_iov[ind].iov_base + k;
255 
256 		while (uio->uio_iov[ind].iov_len >= k + blks && i > 0) {
257 			if (exf->reinit) {
258 				if (crd->crd_flags & CRD_F_ENCRYPT) {
259 					exf->encrypt(sw->sw_kschedule,
260 					    idat);
261 				} else {
262 					exf->decrypt(sw->sw_kschedule,
263 					    idat);
264 				}
265 			} else if (crd->crd_flags & CRD_F_ENCRYPT) {
266 				/* XOR with previous block/IV */
267 				for (j = 0; j < blks; j++)
268 					idat[j] ^= ivp[j];
269 
270 				exf->encrypt(sw->sw_kschedule, idat);
271 				ivp = idat;
272 			} else {	/* decrypt */
273 				/*
274 				 * Keep encrypted block to be used
275 				 * in next block's processing.
276 				 */
277 				nivp = (ivp == iv) ? iv2 : iv;
278 				bcopy(idat, nivp, blks);
279 
280 				exf->decrypt(sw->sw_kschedule, idat);
281 
282 				/* XOR with previous block/IV */
283 				for (j = 0; j < blks; j++)
284 					idat[j] ^= ivp[j];
285 
286 				ivp = nivp;
287 			}
288 
289 			idat += blks;
290 			count += blks;
291 			k += blks;
292 			i -= blks;
293 		}
294 
295 		/*
296 		 * Advance to the next iov if the end of the current iov
297 		 * is aligned with the end of a cipher block.
298 		 * Note that the code is equivalent to calling:
299 		 *      ind = cuio_getptr(uio, count, &k);
300 		 */
301 		if (i > 0 && k == uio->uio_iov[ind].iov_len) {
302 			k = 0;
303 			ind++;
304 			if (ind >= uio->uio_iovcnt) {
305 				error = EINVAL;
306 				goto out;
307 			}
308 		}
309 	}
310 
311 out:
312 	if (iovalloc)
313 		free(iov, M_CRYPTO_DATA);
314 
315 	return (error);
316 }
317 
318 static void
319 swcr_authprepare(struct auth_hash *axf, struct swcr_data *sw, u_char *key,
320     int klen)
321 {
322 	int k;
323 
324 	klen /= 8;
325 
326 	switch (axf->type) {
327 	case CRYPTO_MD5_HMAC:
328 	case CRYPTO_SHA1_HMAC:
329 	case CRYPTO_SHA2_256_HMAC:
330 	case CRYPTO_SHA2_384_HMAC:
331 	case CRYPTO_SHA2_512_HMAC:
332 	case CRYPTO_NULL_HMAC:
333 	case CRYPTO_RIPEMD160_HMAC:
334 		for (k = 0; k < klen; k++)
335 			key[k] ^= HMAC_IPAD_VAL;
336 
337 		axf->Init(sw->sw_ictx);
338 		axf->Update(sw->sw_ictx, key, klen);
339 		axf->Update(sw->sw_ictx, hmac_ipad_buffer, axf->blocksize - klen);
340 
341 		for (k = 0; k < klen; k++)
342 			key[k] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
343 
344 		axf->Init(sw->sw_octx);
345 		axf->Update(sw->sw_octx, key, klen);
346 		axf->Update(sw->sw_octx, hmac_opad_buffer, axf->blocksize - klen);
347 
348 		for (k = 0; k < klen; k++)
349 			key[k] ^= HMAC_OPAD_VAL;
350 		break;
351 	case CRYPTO_MD5_KPDK:
352 	case CRYPTO_SHA1_KPDK:
353 	{
354 		/*
355 		 * We need a buffer that can hold an md5 and a sha1 result
356 		 * just to throw it away.
357 		 * What we do here is the initial part of:
358 		 *   ALGO( key, keyfill, .. )
359 		 * adding the key to sw_ictx and abusing Final() to get the
360 		 * "keyfill" padding.
361 		 * In addition we abuse the sw_octx to save the key to have
362 		 * it to be able to append it at the end in swcr_authcompute().
363 		 */
364 		u_char buf[SHA1_RESULTLEN];
365 
366 		sw->sw_klen = klen;
367 		bcopy(key, sw->sw_octx, klen);
368 		axf->Init(sw->sw_ictx);
369 		axf->Update(sw->sw_ictx, key, klen);
370 		axf->Final(buf, sw->sw_ictx);
371 		break;
372 	}
373 	default:
374 		printf("%s: CRD_F_KEY_EXPLICIT flag given, but algorithm %d "
375 		    "doesn't use keys.\n", __func__, axf->type);
376 	}
377 }
378 
379 /*
380  * Compute keyed-hash authenticator.
381  */
382 static int
383 swcr_authcompute(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf,
384     int flags)
385 {
386 	unsigned char aalg[HASH_MAX_LEN];
387 	struct auth_hash *axf;
388 	union authctx ctx;
389 	int err;
390 
391 	if (sw->sw_ictx == 0)
392 		return EINVAL;
393 
394 	axf = sw->sw_axf;
395 
396 	if (crd->crd_flags & CRD_F_KEY_EXPLICIT)
397 		swcr_authprepare(axf, sw, crd->crd_key, crd->crd_klen);
398 
399 	bcopy(sw->sw_ictx, &ctx, axf->ctxsize);
400 
401 	err = crypto_apply(flags, buf, crd->crd_skip, crd->crd_len,
402 	    (int (*)(void *, void *, unsigned int))axf->Update, (caddr_t)&ctx);
403 	if (err)
404 		return err;
405 
406 	switch (sw->sw_alg) {
407 	case CRYPTO_MD5_HMAC:
408 	case CRYPTO_SHA1_HMAC:
409 	case CRYPTO_SHA2_256_HMAC:
410 	case CRYPTO_SHA2_384_HMAC:
411 	case CRYPTO_SHA2_512_HMAC:
412 	case CRYPTO_RIPEMD160_HMAC:
413 		if (sw->sw_octx == NULL)
414 			return EINVAL;
415 
416 		axf->Final(aalg, &ctx);
417 		bcopy(sw->sw_octx, &ctx, axf->ctxsize);
418 		axf->Update(&ctx, aalg, axf->hashsize);
419 		axf->Final(aalg, &ctx);
420 		break;
421 
422 	case CRYPTO_MD5_KPDK:
423 	case CRYPTO_SHA1_KPDK:
424 		/* If we have no key saved, return error. */
425 		if (sw->sw_octx == NULL)
426 			return EINVAL;
427 
428 		/*
429 		 * Add the trailing copy of the key (see comment in
430 		 * swcr_authprepare()) after the data:
431 		 *   ALGO( .., key, algofill )
432 		 * and let Final() do the proper, natural "algofill"
433 		 * padding.
434 		 */
435 		axf->Update(&ctx, sw->sw_octx, sw->sw_klen);
436 		axf->Final(aalg, &ctx);
437 		break;
438 
439 	case CRYPTO_NULL_HMAC:
440 		axf->Final(aalg, &ctx);
441 		break;
442 	}
443 
444 	/* Inject the authentication data */
445 	crypto_copyback(flags, buf, crd->crd_inject,
446 	    sw->sw_mlen == 0 ? axf->hashsize : sw->sw_mlen, aalg);
447 	return 0;
448 }
449 
450 CTASSERT(INT_MAX <= (1ll<<39) - 256);	/* GCM: plain text < 2^39-256 */
451 CTASSERT(INT_MAX <= (uint64_t)-1);	/* GCM: associated data <= 2^64-1 */
452 
453 /*
454  * Apply a combined encryption-authentication transformation
455  */
456 static int
457 swcr_authenc(struct cryptop *crp)
458 {
459 	uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))];
460 	u_char *blk = (u_char *)blkbuf;
461 	u_char aalg[AALG_MAX_RESULT_LEN];
462 	u_char uaalg[AALG_MAX_RESULT_LEN];
463 	u_char iv[EALG_MAX_BLOCK_LEN];
464 	union authctx ctx;
465 	struct cryptodesc *crd, *crda = NULL, *crde = NULL;
466 	struct swcr_data *sw, *swa, *swe = NULL;
467 	struct auth_hash *axf = NULL;
468 	struct enc_xform *exf = NULL;
469 	caddr_t buf = (caddr_t)crp->crp_buf;
470 	uint32_t *blkp;
471 	int aadlen, blksz, i, ivlen, len, iskip, oskip, r;
472 
473 	ivlen = blksz = iskip = oskip = 0;
474 
475 	for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
476 		for (sw = swcr_sessions[crp->crp_sid & 0xffffffff];
477 		     sw && sw->sw_alg != crd->crd_alg;
478 		     sw = sw->sw_next)
479 			;
480 		if (sw == NULL)
481 			return (EINVAL);
482 
483 		switch (sw->sw_alg) {
484 		case CRYPTO_AES_NIST_GCM_16:
485 		case CRYPTO_AES_NIST_GMAC:
486 			swe = sw;
487 			crde = crd;
488 			exf = swe->sw_exf;
489 			ivlen = 12;
490 			break;
491 		case CRYPTO_AES_128_NIST_GMAC:
492 		case CRYPTO_AES_192_NIST_GMAC:
493 		case CRYPTO_AES_256_NIST_GMAC:
494 			swa = sw;
495 			crda = crd;
496 			axf = swa->sw_axf;
497 			if (swa->sw_ictx == 0)
498 				return (EINVAL);
499 			bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
500 			blksz = axf->blocksize;
501 			break;
502 		default:
503 			return (EINVAL);
504 		}
505 	}
506 	if (crde == NULL || crda == NULL)
507 		return (EINVAL);
508 
509 	if (crde->crd_alg == CRYPTO_AES_NIST_GCM_16 &&
510 	    (crde->crd_flags & CRD_F_IV_EXPLICIT) == 0)
511 		return (EINVAL);
512 
513 	if (crde->crd_klen != crda->crd_klen)
514 		return (EINVAL);
515 
516 	/* Initialize the IV */
517 	if (crde->crd_flags & CRD_F_ENCRYPT) {
518 		/* IV explicitly provided ? */
519 		if (crde->crd_flags & CRD_F_IV_EXPLICIT)
520 			bcopy(crde->crd_iv, iv, ivlen);
521 		else
522 			arc4rand(iv, ivlen, 0);
523 
524 		/* Do we need to write the IV */
525 		if (!(crde->crd_flags & CRD_F_IV_PRESENT))
526 			crypto_copyback(crp->crp_flags, buf, crde->crd_inject,
527 			    ivlen, iv);
528 
529 	} else {	/* Decryption */
530 			/* IV explicitly provided ? */
531 		if (crde->crd_flags & CRD_F_IV_EXPLICIT)
532 			bcopy(crde->crd_iv, iv, ivlen);
533 		else {
534 			/* Get IV off buf */
535 			crypto_copydata(crp->crp_flags, buf, crde->crd_inject,
536 			    ivlen, iv);
537 		}
538 	}
539 
540 	/* Supply MAC with IV */
541 	if (axf->Reinit)
542 		axf->Reinit(&ctx, iv, ivlen);
543 
544 	/* Supply MAC with AAD */
545 	aadlen = crda->crd_len;
546 
547 	for (i = iskip; i < crda->crd_len; i += blksz) {
548 		len = MIN(crda->crd_len - i, blksz - oskip);
549 		crypto_copydata(crp->crp_flags, buf, crda->crd_skip + i, len,
550 		    blk + oskip);
551 		bzero(blk + len + oskip, blksz - len - oskip);
552 		axf->Update(&ctx, blk, blksz);
553 		oskip = 0; /* reset initial output offset */
554 	}
555 
556 	if (exf->reinit)
557 		exf->reinit(swe->sw_kschedule, iv);
558 
559 	/* Do encryption/decryption with MAC */
560 	for (i = 0; i < crde->crd_len; i += blksz) {
561 		len = MIN(crde->crd_len - i, blksz);
562 		if (len < blksz)
563 			bzero(blk, blksz);
564 		crypto_copydata(crp->crp_flags, buf, crde->crd_skip + i, len,
565 		    blk);
566 		if (crde->crd_flags & CRD_F_ENCRYPT) {
567 			exf->encrypt(swe->sw_kschedule, blk);
568 			axf->Update(&ctx, blk, len);
569 			crypto_copyback(crp->crp_flags, buf,
570 			    crde->crd_skip + i, len, blk);
571 		} else {
572 			axf->Update(&ctx, blk, len);
573 		}
574 	}
575 
576 	/* Do any required special finalization */
577 	switch (crda->crd_alg) {
578 		case CRYPTO_AES_128_NIST_GMAC:
579 		case CRYPTO_AES_192_NIST_GMAC:
580 		case CRYPTO_AES_256_NIST_GMAC:
581 			/* length block */
582 			bzero(blk, blksz);
583 			blkp = (uint32_t *)blk + 1;
584 			*blkp = htobe32(aadlen * 8);
585 			blkp = (uint32_t *)blk + 3;
586 			*blkp = htobe32(crde->crd_len * 8);
587 			axf->Update(&ctx, blk, blksz);
588 			break;
589 	}
590 
591 	/* Finalize MAC */
592 	axf->Final(aalg, &ctx);
593 
594 	/* Validate tag */
595 	if (!(crde->crd_flags & CRD_F_ENCRYPT)) {
596 		crypto_copydata(crp->crp_flags, buf, crda->crd_inject,
597 		    axf->hashsize, uaalg);
598 
599 		r = timingsafe_bcmp(aalg, uaalg, axf->hashsize);
600 		if (r == 0) {
601 			/* tag matches, decrypt data */
602 			for (i = 0; i < crde->crd_len; i += blksz) {
603 				len = MIN(crde->crd_len - i, blksz);
604 				if (len < blksz)
605 					bzero(blk, blksz);
606 				crypto_copydata(crp->crp_flags, buf,
607 				    crde->crd_skip + i, len, blk);
608 				if (!(crde->crd_flags & CRD_F_ENCRYPT)) {
609 					exf->decrypt(swe->sw_kschedule, blk);
610 				}
611 				crypto_copyback(crp->crp_flags, buf,
612 				    crde->crd_skip + i, len, blk);
613 			}
614 		} else
615 			return (EBADMSG);
616 	} else {
617 		/* Inject the authentication data */
618 		crypto_copyback(crp->crp_flags, buf, crda->crd_inject,
619 		    axf->hashsize, aalg);
620 	}
621 
622 	return (0);
623 }
624 
625 /*
626  * Apply a compression/decompression algorithm
627  */
628 static int
629 swcr_compdec(struct cryptodesc *crd, struct swcr_data *sw,
630     caddr_t buf, int flags)
631 {
632 	u_int8_t *data, *out;
633 	struct comp_algo *cxf;
634 	int adj;
635 	u_int32_t result;
636 
637 	cxf = sw->sw_cxf;
638 
639 	/* We must handle the whole buffer of data in one time
640 	 * then if there is not all the data in the mbuf, we must
641 	 * copy in a buffer.
642 	 */
643 
644 	data = malloc(crd->crd_len, M_CRYPTO_DATA,  M_NOWAIT);
645 	if (data == NULL)
646 		return (EINVAL);
647 	crypto_copydata(flags, buf, crd->crd_skip, crd->crd_len, data);
648 
649 	if (crd->crd_flags & CRD_F_COMP)
650 		result = cxf->compress(data, crd->crd_len, &out);
651 	else
652 		result = cxf->decompress(data, crd->crd_len, &out);
653 
654 	free(data, M_CRYPTO_DATA);
655 	if (result == 0)
656 		return EINVAL;
657 
658 	/* Copy back the (de)compressed data. m_copyback is
659 	 * extending the mbuf as necessary.
660 	 */
661 	sw->sw_size = result;
662 	/* Check the compressed size when doing compression */
663 	if (crd->crd_flags & CRD_F_COMP) {
664 		if (result >= crd->crd_len) {
665 			/* Compression was useless, we lost time */
666 			free(out, M_CRYPTO_DATA);
667 			return 0;
668 		}
669 	}
670 
671 	crypto_copyback(flags, buf, crd->crd_skip, result, out);
672 	if (result < crd->crd_len) {
673 		adj = result - crd->crd_len;
674 		if (flags & CRYPTO_F_IMBUF) {
675 			adj = result - crd->crd_len;
676 			m_adj((struct mbuf *)buf, adj);
677 		} else if (flags & CRYPTO_F_IOV) {
678 			struct uio *uio = (struct uio *)buf;
679 			int ind;
680 
681 			adj = crd->crd_len - result;
682 			ind = uio->uio_iovcnt - 1;
683 
684 			while (adj > 0 && ind >= 0) {
685 				if (adj < uio->uio_iov[ind].iov_len) {
686 					uio->uio_iov[ind].iov_len -= adj;
687 					break;
688 				}
689 
690 				adj -= uio->uio_iov[ind].iov_len;
691 				uio->uio_iov[ind].iov_len = 0;
692 				ind--;
693 				uio->uio_iovcnt--;
694 			}
695 		}
696 	}
697 	free(out, M_CRYPTO_DATA);
698 	return 0;
699 }
700 
701 /*
702  * Generate a new software session.
703  */
704 static int
705 swcr_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri)
706 {
707 	struct swcr_data **swd;
708 	struct auth_hash *axf;
709 	struct enc_xform *txf;
710 	struct comp_algo *cxf;
711 	u_int32_t i;
712 	int error;
713 
714 	if (sid == NULL || cri == NULL)
715 		return EINVAL;
716 
717 	rw_wlock(&swcr_sessions_lock);
718 	if (swcr_sessions) {
719 		for (i = 1; i < swcr_sesnum; i++)
720 			if (swcr_sessions[i] == NULL)
721 				break;
722 	} else
723 		i = 1;		/* NB: to silence compiler warning */
724 
725 	if (swcr_sessions == NULL || i == swcr_sesnum) {
726 		if (swcr_sessions == NULL) {
727 			i = 1; /* We leave swcr_sessions[0] empty */
728 			swcr_sesnum = CRYPTO_SW_SESSIONS;
729 		} else
730 			swcr_sesnum *= 2;
731 
732 		swd = malloc(swcr_sesnum * sizeof(struct swcr_data *),
733 		    M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
734 		if (swd == NULL) {
735 			/* Reset session number */
736 			if (swcr_sesnum == CRYPTO_SW_SESSIONS)
737 				swcr_sesnum = 0;
738 			else
739 				swcr_sesnum /= 2;
740 			rw_wunlock(&swcr_sessions_lock);
741 			return ENOBUFS;
742 		}
743 
744 		/* Copy existing sessions */
745 		if (swcr_sessions != NULL) {
746 			bcopy(swcr_sessions, swd,
747 			    (swcr_sesnum / 2) * sizeof(struct swcr_data *));
748 			free(swcr_sessions, M_CRYPTO_DATA);
749 		}
750 
751 		swcr_sessions = swd;
752 	}
753 
754 	rw_downgrade(&swcr_sessions_lock);
755 	swd = &swcr_sessions[i];
756 	*sid = i;
757 
758 	while (cri) {
759 		*swd = malloc(sizeof(struct swcr_data),
760 		    M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
761 		if (*swd == NULL) {
762 			swcr_freesession_locked(dev, i);
763 			rw_runlock(&swcr_sessions_lock);
764 			return ENOBUFS;
765 		}
766 
767 		switch (cri->cri_alg) {
768 		case CRYPTO_DES_CBC:
769 			txf = &enc_xform_des;
770 			goto enccommon;
771 		case CRYPTO_3DES_CBC:
772 			txf = &enc_xform_3des;
773 			goto enccommon;
774 		case CRYPTO_BLF_CBC:
775 			txf = &enc_xform_blf;
776 			goto enccommon;
777 		case CRYPTO_CAST_CBC:
778 			txf = &enc_xform_cast5;
779 			goto enccommon;
780 		case CRYPTO_SKIPJACK_CBC:
781 			txf = &enc_xform_skipjack;
782 			goto enccommon;
783 		case CRYPTO_RIJNDAEL128_CBC:
784 			txf = &enc_xform_rijndael128;
785 			goto enccommon;
786 		case CRYPTO_AES_XTS:
787 			txf = &enc_xform_aes_xts;
788 			goto enccommon;
789 		case CRYPTO_AES_ICM:
790 			txf = &enc_xform_aes_icm;
791 			goto enccommon;
792 		case CRYPTO_AES_NIST_GCM_16:
793 			txf = &enc_xform_aes_nist_gcm;
794 			goto enccommon;
795 		case CRYPTO_AES_NIST_GMAC:
796 			txf = &enc_xform_aes_nist_gmac;
797 			(*swd)->sw_exf = txf;
798 			break;
799 		case CRYPTO_CAMELLIA_CBC:
800 			txf = &enc_xform_camellia;
801 			goto enccommon;
802 		case CRYPTO_NULL_CBC:
803 			txf = &enc_xform_null;
804 			goto enccommon;
805 		enccommon:
806 			if (cri->cri_key != NULL) {
807 				error = txf->setkey(&((*swd)->sw_kschedule),
808 				    cri->cri_key, cri->cri_klen / 8);
809 				if (error) {
810 					swcr_freesession_locked(dev, i);
811 					rw_runlock(&swcr_sessions_lock);
812 					return error;
813 				}
814 			}
815 			(*swd)->sw_exf = txf;
816 			break;
817 
818 		case CRYPTO_MD5_HMAC:
819 			axf = &auth_hash_hmac_md5;
820 			goto authcommon;
821 		case CRYPTO_SHA1_HMAC:
822 			axf = &auth_hash_hmac_sha1;
823 			goto authcommon;
824 		case CRYPTO_SHA2_256_HMAC:
825 			axf = &auth_hash_hmac_sha2_256;
826 			goto authcommon;
827 		case CRYPTO_SHA2_384_HMAC:
828 			axf = &auth_hash_hmac_sha2_384;
829 			goto authcommon;
830 		case CRYPTO_SHA2_512_HMAC:
831 			axf = &auth_hash_hmac_sha2_512;
832 			goto authcommon;
833 		case CRYPTO_NULL_HMAC:
834 			axf = &auth_hash_null;
835 			goto authcommon;
836 		case CRYPTO_RIPEMD160_HMAC:
837 			axf = &auth_hash_hmac_ripemd_160;
838 		authcommon:
839 			(*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
840 			    M_NOWAIT);
841 			if ((*swd)->sw_ictx == NULL) {
842 				swcr_freesession_locked(dev, i);
843 				rw_runlock(&swcr_sessions_lock);
844 				return ENOBUFS;
845 			}
846 
847 			(*swd)->sw_octx = malloc(axf->ctxsize, M_CRYPTO_DATA,
848 			    M_NOWAIT);
849 			if ((*swd)->sw_octx == NULL) {
850 				swcr_freesession_locked(dev, i);
851 				rw_runlock(&swcr_sessions_lock);
852 				return ENOBUFS;
853 			}
854 
855 			if (cri->cri_key != NULL) {
856 				swcr_authprepare(axf, *swd, cri->cri_key,
857 				    cri->cri_klen);
858 			}
859 
860 			(*swd)->sw_mlen = cri->cri_mlen;
861 			(*swd)->sw_axf = axf;
862 			break;
863 
864 		case CRYPTO_MD5_KPDK:
865 			axf = &auth_hash_key_md5;
866 			goto auth2common;
867 
868 		case CRYPTO_SHA1_KPDK:
869 			axf = &auth_hash_key_sha1;
870 		auth2common:
871 			(*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
872 			    M_NOWAIT);
873 			if ((*swd)->sw_ictx == NULL) {
874 				swcr_freesession_locked(dev, i);
875 				rw_runlock(&swcr_sessions_lock);
876 				return ENOBUFS;
877 			}
878 
879 			(*swd)->sw_octx = malloc(cri->cri_klen / 8,
880 			    M_CRYPTO_DATA, M_NOWAIT);
881 			if ((*swd)->sw_octx == NULL) {
882 				swcr_freesession_locked(dev, i);
883 				rw_runlock(&swcr_sessions_lock);
884 				return ENOBUFS;
885 			}
886 
887 			/* Store the key so we can "append" it to the payload */
888 			if (cri->cri_key != NULL) {
889 				swcr_authprepare(axf, *swd, cri->cri_key,
890 				    cri->cri_klen);
891 			}
892 
893 			(*swd)->sw_mlen = cri->cri_mlen;
894 			(*swd)->sw_axf = axf;
895 			break;
896 #ifdef notdef
897 		case CRYPTO_MD5:
898 			axf = &auth_hash_md5;
899 			goto auth3common;
900 
901 		case CRYPTO_SHA1:
902 			axf = &auth_hash_sha1;
903 		auth3common:
904 			(*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
905 			    M_NOWAIT);
906 			if ((*swd)->sw_ictx == NULL) {
907 				swcr_freesession_locked(dev, i);
908 				rw_runlock(&swcr_sessions_lock);
909 				return ENOBUFS;
910 			}
911 
912 			axf->Init((*swd)->sw_ictx);
913 			(*swd)->sw_mlen = cri->cri_mlen;
914 			(*swd)->sw_axf = axf;
915 			break;
916 #endif
917 
918 		case CRYPTO_AES_128_NIST_GMAC:
919 			axf = &auth_hash_nist_gmac_aes_128;
920 			goto auth4common;
921 
922 		case CRYPTO_AES_192_NIST_GMAC:
923 			axf = &auth_hash_nist_gmac_aes_192;
924 			goto auth4common;
925 
926 		case CRYPTO_AES_256_NIST_GMAC:
927 			axf = &auth_hash_nist_gmac_aes_256;
928 		auth4common:
929 			(*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
930 			    M_NOWAIT);
931 			if ((*swd)->sw_ictx == NULL) {
932 				swcr_freesession_locked(dev, i);
933 				rw_runlock(&swcr_sessions_lock);
934 				return ENOBUFS;
935 			}
936 			axf->Init((*swd)->sw_ictx);
937 			axf->Setkey((*swd)->sw_ictx, cri->cri_key,
938 			    cri->cri_klen / 8);
939 			(*swd)->sw_axf = axf;
940 			break;
941 
942 		case CRYPTO_DEFLATE_COMP:
943 			cxf = &comp_algo_deflate;
944 			(*swd)->sw_cxf = cxf;
945 			break;
946 		default:
947 			swcr_freesession_locked(dev, i);
948 			rw_runlock(&swcr_sessions_lock);
949 			return EINVAL;
950 		}
951 
952 		(*swd)->sw_alg = cri->cri_alg;
953 		cri = cri->cri_next;
954 		swd = &((*swd)->sw_next);
955 	}
956 	rw_runlock(&swcr_sessions_lock);
957 	return 0;
958 }
959 
960 static int
961 swcr_freesession(device_t dev, u_int64_t tid)
962 {
963 	int error;
964 
965 	rw_rlock(&swcr_sessions_lock);
966 	error = swcr_freesession_locked(dev, tid);
967 	rw_runlock(&swcr_sessions_lock);
968 	return error;
969 }
970 
971 /*
972  * Free a session.
973  */
974 static int
975 swcr_freesession_locked(device_t dev, u_int64_t tid)
976 {
977 	struct swcr_data *swd;
978 	struct enc_xform *txf;
979 	struct auth_hash *axf;
980 	struct comp_algo *cxf;
981 	u_int32_t sid = CRYPTO_SESID2LID(tid);
982 
983 	if (sid > swcr_sesnum || swcr_sessions == NULL ||
984 	    swcr_sessions[sid] == NULL)
985 		return EINVAL;
986 
987 	/* Silently accept and return */
988 	if (sid == 0)
989 		return 0;
990 
991 	while ((swd = swcr_sessions[sid]) != NULL) {
992 		swcr_sessions[sid] = swd->sw_next;
993 
994 		switch (swd->sw_alg) {
995 		case CRYPTO_DES_CBC:
996 		case CRYPTO_3DES_CBC:
997 		case CRYPTO_BLF_CBC:
998 		case CRYPTO_CAST_CBC:
999 		case CRYPTO_SKIPJACK_CBC:
1000 		case CRYPTO_RIJNDAEL128_CBC:
1001 		case CRYPTO_AES_XTS:
1002 		case CRYPTO_AES_ICM:
1003 		case CRYPTO_AES_NIST_GCM_16:
1004 		case CRYPTO_AES_NIST_GMAC:
1005 		case CRYPTO_CAMELLIA_CBC:
1006 		case CRYPTO_NULL_CBC:
1007 			txf = swd->sw_exf;
1008 
1009 			if (swd->sw_kschedule)
1010 				txf->zerokey(&(swd->sw_kschedule));
1011 			break;
1012 
1013 		case CRYPTO_MD5_HMAC:
1014 		case CRYPTO_SHA1_HMAC:
1015 		case CRYPTO_SHA2_256_HMAC:
1016 		case CRYPTO_SHA2_384_HMAC:
1017 		case CRYPTO_SHA2_512_HMAC:
1018 		case CRYPTO_RIPEMD160_HMAC:
1019 		case CRYPTO_NULL_HMAC:
1020 			axf = swd->sw_axf;
1021 
1022 			if (swd->sw_ictx) {
1023 				bzero(swd->sw_ictx, axf->ctxsize);
1024 				free(swd->sw_ictx, M_CRYPTO_DATA);
1025 			}
1026 			if (swd->sw_octx) {
1027 				bzero(swd->sw_octx, axf->ctxsize);
1028 				free(swd->sw_octx, M_CRYPTO_DATA);
1029 			}
1030 			break;
1031 
1032 		case CRYPTO_MD5_KPDK:
1033 		case CRYPTO_SHA1_KPDK:
1034 			axf = swd->sw_axf;
1035 
1036 			if (swd->sw_ictx) {
1037 				bzero(swd->sw_ictx, axf->ctxsize);
1038 				free(swd->sw_ictx, M_CRYPTO_DATA);
1039 			}
1040 			if (swd->sw_octx) {
1041 				bzero(swd->sw_octx, swd->sw_klen);
1042 				free(swd->sw_octx, M_CRYPTO_DATA);
1043 			}
1044 			break;
1045 
1046 		case CRYPTO_MD5:
1047 		case CRYPTO_SHA1:
1048 			axf = swd->sw_axf;
1049 
1050 			if (swd->sw_ictx)
1051 				free(swd->sw_ictx, M_CRYPTO_DATA);
1052 			break;
1053 
1054 		case CRYPTO_DEFLATE_COMP:
1055 			cxf = swd->sw_cxf;
1056 			break;
1057 		}
1058 
1059 		free(swd, M_CRYPTO_DATA);
1060 	}
1061 	return 0;
1062 }
1063 
1064 /*
1065  * Process a software request.
1066  */
1067 static int
1068 swcr_process(device_t dev, struct cryptop *crp, int hint)
1069 {
1070 	struct cryptodesc *crd;
1071 	struct swcr_data *sw;
1072 	u_int32_t lid;
1073 
1074 	/* Sanity check */
1075 	if (crp == NULL)
1076 		return EINVAL;
1077 
1078 	if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
1079 		crp->crp_etype = EINVAL;
1080 		goto done;
1081 	}
1082 
1083 	lid = CRYPTO_SESID2LID(crp->crp_sid);
1084 	rw_rlock(&swcr_sessions_lock);
1085 	if (swcr_sessions == NULL || lid >= swcr_sesnum || lid == 0 ||
1086 	    swcr_sessions[lid] == NULL) {
1087 		rw_runlock(&swcr_sessions_lock);
1088 		crp->crp_etype = ENOENT;
1089 		goto done;
1090 	}
1091 	rw_runlock(&swcr_sessions_lock);
1092 
1093 	/* Go through crypto descriptors, processing as we go */
1094 	for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
1095 		/*
1096 		 * Find the crypto context.
1097 		 *
1098 		 * XXX Note that the logic here prevents us from having
1099 		 * XXX the same algorithm multiple times in a session
1100 		 * XXX (or rather, we can but it won't give us the right
1101 		 * XXX results). To do that, we'd need some way of differentiating
1102 		 * XXX between the various instances of an algorithm (so we can
1103 		 * XXX locate the correct crypto context).
1104 		 */
1105 		rw_rlock(&swcr_sessions_lock);
1106 		if (swcr_sessions == NULL) {
1107 			rw_runlock(&swcr_sessions_lock);
1108 			crp->crp_etype = ENOENT;
1109 			goto done;
1110 		}
1111 		for (sw = swcr_sessions[lid];
1112 		    sw && sw->sw_alg != crd->crd_alg;
1113 		    sw = sw->sw_next)
1114 			;
1115 		rw_runlock(&swcr_sessions_lock);
1116 
1117 		/* No such context ? */
1118 		if (sw == NULL) {
1119 			crp->crp_etype = EINVAL;
1120 			goto done;
1121 		}
1122 		switch (sw->sw_alg) {
1123 		case CRYPTO_DES_CBC:
1124 		case CRYPTO_3DES_CBC:
1125 		case CRYPTO_BLF_CBC:
1126 		case CRYPTO_CAST_CBC:
1127 		case CRYPTO_SKIPJACK_CBC:
1128 		case CRYPTO_RIJNDAEL128_CBC:
1129 		case CRYPTO_AES_XTS:
1130 		case CRYPTO_AES_ICM:
1131 		case CRYPTO_CAMELLIA_CBC:
1132 			if ((crp->crp_etype = swcr_encdec(crd, sw,
1133 			    crp->crp_buf, crp->crp_flags)) != 0)
1134 				goto done;
1135 			break;
1136 		case CRYPTO_NULL_CBC:
1137 			crp->crp_etype = 0;
1138 			break;
1139 		case CRYPTO_MD5_HMAC:
1140 		case CRYPTO_SHA1_HMAC:
1141 		case CRYPTO_SHA2_256_HMAC:
1142 		case CRYPTO_SHA2_384_HMAC:
1143 		case CRYPTO_SHA2_512_HMAC:
1144 		case CRYPTO_RIPEMD160_HMAC:
1145 		case CRYPTO_NULL_HMAC:
1146 		case CRYPTO_MD5_KPDK:
1147 		case CRYPTO_SHA1_KPDK:
1148 		case CRYPTO_MD5:
1149 		case CRYPTO_SHA1:
1150 			if ((crp->crp_etype = swcr_authcompute(crd, sw,
1151 			    crp->crp_buf, crp->crp_flags)) != 0)
1152 				goto done;
1153 			break;
1154 
1155 		case CRYPTO_AES_NIST_GCM_16:
1156 		case CRYPTO_AES_NIST_GMAC:
1157 		case CRYPTO_AES_128_NIST_GMAC:
1158 		case CRYPTO_AES_192_NIST_GMAC:
1159 		case CRYPTO_AES_256_NIST_GMAC:
1160 			crp->crp_etype = swcr_authenc(crp);
1161 			goto done;
1162 
1163 		case CRYPTO_DEFLATE_COMP:
1164 			if ((crp->crp_etype = swcr_compdec(crd, sw,
1165 			    crp->crp_buf, crp->crp_flags)) != 0)
1166 				goto done;
1167 			else
1168 				crp->crp_olen = (int)sw->sw_size;
1169 			break;
1170 
1171 		default:
1172 			/* Unknown/unsupported algorithm */
1173 			crp->crp_etype = EINVAL;
1174 			goto done;
1175 		}
1176 	}
1177 
1178 done:
1179 	crypto_done(crp);
1180 	return 0;
1181 }
1182 
1183 static void
1184 swcr_identify(driver_t *drv, device_t parent)
1185 {
1186 	/* NB: order 10 is so we get attached after h/w devices */
1187 	if (device_find_child(parent, "cryptosoft", -1) == NULL &&
1188 	    BUS_ADD_CHILD(parent, 10, "cryptosoft", 0) == 0)
1189 		panic("cryptosoft: could not attach");
1190 }
1191 
1192 static int
1193 swcr_probe(device_t dev)
1194 {
1195 	device_set_desc(dev, "software crypto");
1196 	return (BUS_PROBE_NOWILDCARD);
1197 }
1198 
1199 static int
1200 swcr_attach(device_t dev)
1201 {
1202 	rw_init(&swcr_sessions_lock, "swcr_sessions_lock");
1203 	memset(hmac_ipad_buffer, HMAC_IPAD_VAL, HMAC_MAX_BLOCK_LEN);
1204 	memset(hmac_opad_buffer, HMAC_OPAD_VAL, HMAC_MAX_BLOCK_LEN);
1205 
1206 	swcr_id = crypto_get_driverid(dev,
1207 			CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC);
1208 	if (swcr_id < 0) {
1209 		device_printf(dev, "cannot initialize!");
1210 		return ENOMEM;
1211 	}
1212 #define	REGISTER(alg) \
1213 	crypto_register(swcr_id, alg, 0,0)
1214 	REGISTER(CRYPTO_DES_CBC);
1215 	REGISTER(CRYPTO_3DES_CBC);
1216 	REGISTER(CRYPTO_BLF_CBC);
1217 	REGISTER(CRYPTO_CAST_CBC);
1218 	REGISTER(CRYPTO_SKIPJACK_CBC);
1219 	REGISTER(CRYPTO_NULL_CBC);
1220 	REGISTER(CRYPTO_MD5_HMAC);
1221 	REGISTER(CRYPTO_SHA1_HMAC);
1222 	REGISTER(CRYPTO_SHA2_256_HMAC);
1223 	REGISTER(CRYPTO_SHA2_384_HMAC);
1224 	REGISTER(CRYPTO_SHA2_512_HMAC);
1225 	REGISTER(CRYPTO_RIPEMD160_HMAC);
1226 	REGISTER(CRYPTO_NULL_HMAC);
1227 	REGISTER(CRYPTO_MD5_KPDK);
1228 	REGISTER(CRYPTO_SHA1_KPDK);
1229 	REGISTER(CRYPTO_MD5);
1230 	REGISTER(CRYPTO_SHA1);
1231 	REGISTER(CRYPTO_RIJNDAEL128_CBC);
1232 	REGISTER(CRYPTO_AES_XTS);
1233 	REGISTER(CRYPTO_AES_ICM);
1234 	REGISTER(CRYPTO_AES_NIST_GCM_16);
1235 	REGISTER(CRYPTO_AES_NIST_GMAC);
1236 	REGISTER(CRYPTO_AES_128_NIST_GMAC);
1237 	REGISTER(CRYPTO_AES_192_NIST_GMAC);
1238 	REGISTER(CRYPTO_AES_256_NIST_GMAC);
1239  	REGISTER(CRYPTO_CAMELLIA_CBC);
1240 	REGISTER(CRYPTO_DEFLATE_COMP);
1241 #undef REGISTER
1242 
1243 	return 0;
1244 }
1245 
1246 static int
1247 swcr_detach(device_t dev)
1248 {
1249 	crypto_unregister_all(swcr_id);
1250 	rw_wlock(&swcr_sessions_lock);
1251 	free(swcr_sessions, M_CRYPTO_DATA);
1252 	swcr_sessions = NULL;
1253 	rw_wunlock(&swcr_sessions_lock);
1254 	rw_destroy(&swcr_sessions_lock);
1255 	return 0;
1256 }
1257 
1258 static device_method_t swcr_methods[] = {
1259 	DEVMETHOD(device_identify,	swcr_identify),
1260 	DEVMETHOD(device_probe,		swcr_probe),
1261 	DEVMETHOD(device_attach,	swcr_attach),
1262 	DEVMETHOD(device_detach,	swcr_detach),
1263 
1264 	DEVMETHOD(cryptodev_newsession,	swcr_newsession),
1265 	DEVMETHOD(cryptodev_freesession,swcr_freesession),
1266 	DEVMETHOD(cryptodev_process,	swcr_process),
1267 
1268 	{0, 0},
1269 };
1270 
1271 static driver_t swcr_driver = {
1272 	"cryptosoft",
1273 	swcr_methods,
1274 	0,		/* NB: no softc */
1275 };
1276 static devclass_t swcr_devclass;
1277 
1278 /*
1279  * NB: We explicitly reference the crypto module so we
1280  * get the necessary ordering when built as a loadable
1281  * module.  This is required because we bundle the crypto
1282  * module code together with the cryptosoft driver (otherwise
1283  * normal module dependencies would handle things).
1284  */
1285 extern int crypto_modevent(struct module *, int, void *);
1286 /* XXX where to attach */
1287 DRIVER_MODULE(cryptosoft, nexus, swcr_driver, swcr_devclass, crypto_modevent,0);
1288 MODULE_VERSION(cryptosoft, 1);
1289 MODULE_DEPEND(cryptosoft, crypto, 1, 1, 1);
1290