xref: /dragonfly/sys/opencrypto/cryptosoft.c (revision 16fb0422)
1 /*-
2  * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
3  * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting
4  *
5  * This code was written by Angelos D. Keromytis in Athens, Greece, in
6  * February 2000. Network Security Technologies Inc. (NSTI) kindly
7  * supported the development of this code.
8  *
9  * Copyright (c) 2000, 2001 Angelos D. Keromytis
10  *
11  * SMP modifications by Matthew Dillon for the DragonFlyBSD Project
12  *
13  * Permission to use, copy, and modify this software with or without fee
14  * is hereby granted, provided that this entire notice is included in
15  * all source code copies of any software which is or includes a copy or
16  * modification of this software.
17  *
18  * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
19  * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
20  * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
21  * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
22  * PURPOSE.
23  *
24  * $FreeBSD: src/sys/opencrypto/cryptosoft.c,v 1.23 2009/02/05 17:43:12 imp Exp $
25  * $OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $
26  */
27 
28 #include <sys/param.h>
29 #include <sys/systm.h>
30 #include <sys/malloc.h>
31 #include <sys/mbuf.h>
32 #include <sys/module.h>
33 #include <sys/sysctl.h>
34 #include <sys/errno.h>
35 #include <sys/endian.h>
36 #include <sys/random.h>
37 #include <sys/kernel.h>
38 #include <sys/uio.h>
39 #include <sys/spinlock2.h>
40 
41 #include <crypto/blowfish/blowfish.h>
42 #include <crypto/sha1.h>
43 #include <opencrypto/rmd160.h>
44 #include <opencrypto/cast.h>
45 #include <opencrypto/skipjack.h>
46 #include <sys/md5.h>
47 
48 #include <opencrypto/cryptodev.h>
49 #include <opencrypto/cryptosoft.h>
50 #include <opencrypto/xform.h>
51 
52 #include <sys/kobj.h>
53 #include <sys/bus.h>
54 #include "cryptodev_if.h"
55 
56 static	int32_t swcr_id;
57 static	struct swcr_data **swcr_sessions = NULL;
58 static	u_int32_t swcr_sesnum;
59 static	u_int32_t swcr_minsesnum = 1;
60 
61 static struct spinlock swcr_spin = SPINLOCK_INITIALIZER(swcr_spin);
62 
63 u_int8_t hmac_ipad_buffer[HMAC_MAX_BLOCK_LEN];
64 u_int8_t hmac_opad_buffer[HMAC_MAX_BLOCK_LEN];
65 
66 static	int swcr_encdec(struct cryptodesc *, struct swcr_data *, caddr_t, int);
67 static	int swcr_authcompute(struct cryptodesc *, struct swcr_data *, caddr_t, int);
68 static	int swcr_combined(struct cryptop *);
69 static	int swcr_compdec(struct cryptodesc *, struct swcr_data *, caddr_t, int);
70 static	int swcr_freesession(device_t dev, u_int64_t tid);
71 static	int swcr_freesession_slot(struct swcr_data **swdp, u_int32_t sid);
72 
73 /*
74  * Apply a symmetric encryption/decryption algorithm.
75  */
76 static int
77 swcr_encdec(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf,
78     int flags)
79 {
80 	unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN], *idat;
81 	unsigned char *ivp, *nivp, iv2[EALG_MAX_BLOCK_LEN];
82 	u_int8_t *kschedule;
83 	u_int8_t *okschedule;
84 	struct enc_xform *exf;
85 	int i, k, j, blks, ivlen;
86 	int error;
87 	int explicit_kschedule;
88 
89 	exf = sw->sw_exf;
90 	blks = exf->blocksize;
91 	ivlen = exf->ivsize;
92 
93 	/* Check for non-padded data */
94 	if (crd->crd_len % blks)
95 		return EINVAL;
96 
97 	/* Initialize the IV */
98 	if (crd->crd_flags & CRD_F_ENCRYPT) {
99 		/* IV explicitly provided ? */
100 		if (crd->crd_flags & CRD_F_IV_EXPLICIT)
101 			bcopy(crd->crd_iv, iv, ivlen);
102 		else
103 			karc4rand(iv, ivlen);
104 
105 		/* Do we need to write the IV */
106 		if (!(crd->crd_flags & CRD_F_IV_PRESENT))
107 			crypto_copyback(flags, buf, crd->crd_inject, ivlen, iv);
108 
109 	} else {	/* Decryption */
110 			/* IV explicitly provided ? */
111 		if (crd->crd_flags & CRD_F_IV_EXPLICIT)
112 			bcopy(crd->crd_iv, iv, ivlen);
113 		else {
114 			/* Get IV off buf */
115 			crypto_copydata(flags, buf, crd->crd_inject, ivlen, iv);
116 		}
117 	}
118 
119 	ivp = iv;
120 
121 	/*
122 	 * The semantics are seriously broken because the session key
123 	 * storage was never designed for concurrent ops.
124 	 */
125 	if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
126 		kschedule = NULL;
127 		explicit_kschedule = 1;
128 		error = exf->setkey(&kschedule,
129 				    crd->crd_key, crd->crd_klen / 8);
130 		if (error)
131 			goto done;
132 	} else {
133 		spin_lock(&swcr_spin);
134 		kschedule = sw->sw_kschedule;
135 		++sw->sw_kschedule_refs;
136 		spin_unlock(&swcr_spin);
137 		explicit_kschedule = 0;
138 	}
139 
140 	/*
141 	 * xforms that provide a reinit method perform all IV
142 	 * handling themselves.
143 	 */
144 	if (exf->reinit)
145 		exf->reinit(kschedule, iv);
146 
147 	if (flags & CRYPTO_F_IMBUF) {
148 		struct mbuf *m = (struct mbuf *) buf;
149 
150 		/* Find beginning of data */
151 		m = m_getptr(m, crd->crd_skip, &k);
152 		if (m == NULL) {
153 			error = EINVAL;
154 			goto done;
155 		}
156 
157 		i = crd->crd_len;
158 
159 		while (i > 0) {
160 			/*
161 			 * If there's insufficient data at the end of
162 			 * an mbuf, we have to do some copying.
163 			 */
164 			if (m->m_len < k + blks && m->m_len != k) {
165 				m_copydata(m, k, blks, blk);
166 
167 				/* Actual encryption/decryption */
168 				if (exf->reinit) {
169 					if (crd->crd_flags & CRD_F_ENCRYPT) {
170 						exf->encrypt(kschedule,
171 						    blk, iv);
172 					} else {
173 						exf->decrypt(kschedule,
174 						    blk, iv);
175 					}
176 				} else if (crd->crd_flags & CRD_F_ENCRYPT) {
177 					/* XOR with previous block */
178 					for (j = 0; j < blks; j++)
179 						blk[j] ^= ivp[j];
180 
181 					exf->encrypt(kschedule, blk, iv);
182 
183 					/*
184 					 * Keep encrypted block for XOR'ing
185 					 * with next block
186 					 */
187 					bcopy(blk, iv, blks);
188 					ivp = iv;
189 				} else {	/* decrypt */
190 					/*
191 					 * Keep encrypted block for XOR'ing
192 					 * with next block
193 					 */
194 					nivp = (ivp == iv) ? iv2 : iv;
195 					bcopy(blk, nivp, blks);
196 
197 					exf->decrypt(kschedule, blk, iv);
198 
199 					/* XOR with previous block */
200 					for (j = 0; j < blks; j++)
201 						blk[j] ^= ivp[j];
202 
203 					ivp = nivp;
204 				}
205 
206 				/* Copy back decrypted block */
207 				m_copyback(m, k, blks, blk);
208 
209 				/* Advance pointer */
210 				m = m_getptr(m, k + blks, &k);
211 				if (m == NULL) {
212 					error = EINVAL;
213 					goto done;
214 				}
215 
216 				i -= blks;
217 
218 				/* Could be done... */
219 				if (i == 0)
220 					break;
221 			}
222 
223 			/* Skip possibly empty mbufs */
224 			if (k == m->m_len) {
225 				for (m = m->m_next; m && m->m_len == 0;
226 				    m = m->m_next)
227 					;
228 				k = 0;
229 			}
230 
231 			/* Sanity check */
232 			if (m == NULL) {
233 				error = EINVAL;
234 				goto done;
235 			}
236 
237 			/*
238 			 * Warning: idat may point to garbage here, but
239 			 * we only use it in the while() loop, only if
240 			 * there are indeed enough data.
241 			 */
242 			idat = mtod(m, unsigned char *) + k;
243 
244 	   		while (m->m_len >= k + blks && i > 0) {
245 				if (exf->reinit) {
246 					if (crd->crd_flags & CRD_F_ENCRYPT) {
247 						exf->encrypt(kschedule,
248 						    idat, iv);
249 					} else {
250 						exf->decrypt(kschedule,
251 						    idat, iv);
252 					}
253 				} else if (crd->crd_flags & CRD_F_ENCRYPT) {
254 					/* XOR with previous block/IV */
255 					for (j = 0; j < blks; j++)
256 						idat[j] ^= ivp[j];
257 
258 					exf->encrypt(kschedule, idat, iv);
259 					ivp = idat;
260 				} else {	/* decrypt */
261 					/*
262 					 * Keep encrypted block to be used
263 					 * in next block's processing.
264 					 */
265 					nivp = (ivp == iv) ? iv2 : iv;
266 					bcopy(idat, nivp, blks);
267 
268 					exf->decrypt(kschedule, idat, iv);
269 
270 					/* XOR with previous block/IV */
271 					for (j = 0; j < blks; j++)
272 						idat[j] ^= ivp[j];
273 
274 					ivp = nivp;
275 				}
276 
277 				idat += blks;
278 				k += blks;
279 				i -= blks;
280 			}
281 		}
282 		error = 0;	/* Done with mbuf encryption/decryption */
283 	} else if (flags & CRYPTO_F_IOV) {
284 		struct uio *uio = (struct uio *) buf;
285 		struct iovec *iov;
286 
287 		/* Find beginning of data */
288 		iov = cuio_getptr(uio, crd->crd_skip, &k);
289 		if (iov == NULL) {
290 			error = EINVAL;
291 			goto done;
292 		}
293 
294 		i = crd->crd_len;
295 
296 		while (i > 0) {
297 			/*
298 			 * If there's insufficient data at the end of
299 			 * an iovec, we have to do some copying.
300 			 */
301 			if (iov->iov_len < k + blks && iov->iov_len != k) {
302 				cuio_copydata(uio, k, blks, blk);
303 
304 				/* Actual encryption/decryption */
305 				if (exf->reinit) {
306 					if (crd->crd_flags & CRD_F_ENCRYPT) {
307 						exf->encrypt(kschedule,
308 						    blk, iv);
309 					} else {
310 						exf->decrypt(kschedule,
311 						    blk, iv);
312 					}
313 				} else if (crd->crd_flags & CRD_F_ENCRYPT) {
314 					/* XOR with previous block */
315 					for (j = 0; j < blks; j++)
316 						blk[j] ^= ivp[j];
317 
318 					exf->encrypt(kschedule, blk, iv);
319 
320 					/*
321 					 * Keep encrypted block for XOR'ing
322 					 * with next block
323 					 */
324 					bcopy(blk, iv, blks);
325 					ivp = iv;
326 				} else {	/* decrypt */
327 					/*
328 					 * Keep encrypted block for XOR'ing
329 					 * with next block
330 					 */
331 					nivp = (ivp == iv) ? iv2 : iv;
332 					bcopy(blk, nivp, blks);
333 
334 					exf->decrypt(kschedule, blk, iv);
335 
336 					/* XOR with previous block */
337 					for (j = 0; j < blks; j++)
338 						blk[j] ^= ivp[j];
339 
340 					ivp = nivp;
341 				}
342 
343 				/* Copy back decrypted block */
344 				cuio_copyback(uio, k, blks, blk);
345 
346 				/* Advance pointer */
347 				iov = cuio_getptr(uio, k + blks, &k);
348 				if (iov == NULL) {
349 					error = EINVAL;
350 					goto done;
351 				}
352 
353 				i -= blks;
354 
355 				/* Could be done... */
356 				if (i == 0)
357 					break;
358 			}
359 
360 			/*
361 			 * Warning: idat may point to garbage here, but
362 			 * we only use it in the while() loop, only if
363 			 * there are indeed enough data.
364 			 */
365 			idat = (char *)iov->iov_base + k;
366 
367 	   		while (iov->iov_len >= k + blks && i > 0) {
368 				if (exf->reinit) {
369 					if (crd->crd_flags & CRD_F_ENCRYPT) {
370 						exf->encrypt(kschedule,
371 						    idat, iv);
372 					} else {
373 						exf->decrypt(kschedule,
374 						    idat, iv);
375 					}
376 				} else if (crd->crd_flags & CRD_F_ENCRYPT) {
377 					/* XOR with previous block/IV */
378 					for (j = 0; j < blks; j++)
379 						idat[j] ^= ivp[j];
380 
381 					exf->encrypt(kschedule, idat, iv);
382 					ivp = idat;
383 				} else {	/* decrypt */
384 					/*
385 					 * Keep encrypted block to be used
386 					 * in next block's processing.
387 					 */
388 					nivp = (ivp == iv) ? iv2 : iv;
389 					bcopy(idat, nivp, blks);
390 
391 					exf->decrypt(kschedule, idat, iv);
392 
393 					/* XOR with previous block/IV */
394 					for (j = 0; j < blks; j++)
395 						idat[j] ^= ivp[j];
396 
397 					ivp = nivp;
398 				}
399 
400 				idat += blks;
401 				k += blks;
402 				i -= blks;
403 			}
404 			if (k == iov->iov_len) {
405 				iov++;
406 				k = 0;
407 			}
408 		}
409 		error = 0;	/* Done with iovec encryption/decryption */
410 	} else {
411 		/*
412 		 * contiguous buffer
413 		 */
414 		if (exf->reinit) {
415 			for(i = crd->crd_skip;
416 			    i < crd->crd_skip + crd->crd_len; i += blks) {
417 				if (crd->crd_flags & CRD_F_ENCRYPT) {
418 					exf->encrypt(kschedule, buf + i, iv);
419 				} else {
420 					exf->decrypt(kschedule, buf + i, iv);
421 				}
422 			}
423 		} else if (crd->crd_flags & CRD_F_ENCRYPT) {
424 			for (i = crd->crd_skip;
425 			    i < crd->crd_skip + crd->crd_len; i += blks) {
426 				/* XOR with the IV/previous block, as appropriate. */
427 				if (i == crd->crd_skip)
428 					for (k = 0; k < blks; k++)
429 						buf[i + k] ^= ivp[k];
430 				else
431 					for (k = 0; k < blks; k++)
432 						buf[i + k] ^= buf[i + k - blks];
433 				exf->encrypt(kschedule, buf + i, iv);
434 			}
435 		} else {		/* Decrypt */
436 			/*
437 			 * Start at the end, so we don't need to keep the
438 			 * encrypted block as the IV for the next block.
439 			 */
440 			for (i = crd->crd_skip + crd->crd_len - blks;
441 			    i >= crd->crd_skip; i -= blks) {
442 				exf->decrypt(kschedule, buf + i, iv);
443 
444 				/* XOR with the IV/previous block, as appropriate */
445 				if (i == crd->crd_skip)
446 					for (k = 0; k < blks; k++)
447 						buf[i + k] ^= ivp[k];
448 				else
449 					for (k = 0; k < blks; k++)
450 						buf[i + k] ^= buf[i + k - blks];
451 			}
452 		}
453 		error = 0; /* Done w/contiguous buffer encrypt/decrypt */
454 	}
455 done:
456 	/*
457 	 * Cleanup - explicitly replace the session key if requested
458 	 *	     (horrible semantics for concurrent operation)
459 	 */
460 	if (explicit_kschedule) {
461 		spin_lock(&swcr_spin);
462 		if (sw->sw_kschedule && sw->sw_kschedule_refs == 0) {
463 			okschedule = sw->sw_kschedule;
464 			sw->sw_kschedule = kschedule;
465 		} else {
466 			okschedule = NULL;
467 		}
468 		spin_unlock(&swcr_spin);
469 		if (okschedule)
470 			exf->zerokey(&okschedule);
471 	} else {
472 		spin_lock(&swcr_spin);
473 		--sw->sw_kschedule_refs;
474 		spin_unlock(&swcr_spin);
475 	}
476 	return error;
477 }
478 
479 static void
480 swcr_authprepare(struct auth_hash *axf, struct swcr_data *sw, u_char *key,
481     int klen)
482 {
483 	int k;
484 
485 	klen /= 8;
486 
487 	switch (axf->type) {
488 	case CRYPTO_MD5_HMAC:
489 	case CRYPTO_SHA1_HMAC:
490 	case CRYPTO_SHA2_256_HMAC:
491 	case CRYPTO_SHA2_384_HMAC:
492 	case CRYPTO_SHA2_512_HMAC:
493 	case CRYPTO_NULL_HMAC:
494 	case CRYPTO_RIPEMD160_HMAC:
495 		for (k = 0; k < klen; k++)
496 			key[k] ^= HMAC_IPAD_VAL;
497 
498 		axf->Init(sw->sw_ictx);
499 		axf->Update(sw->sw_ictx, key, klen);
500 		axf->Update(sw->sw_ictx, hmac_ipad_buffer, axf->blocksize - klen);
501 
502 		for (k = 0; k < klen; k++)
503 			key[k] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
504 
505 		axf->Init(sw->sw_octx);
506 		axf->Update(sw->sw_octx, key, klen);
507 		axf->Update(sw->sw_octx, hmac_opad_buffer, axf->blocksize - klen);
508 
509 		for (k = 0; k < klen; k++)
510 			key[k] ^= HMAC_OPAD_VAL;
511 		break;
512 	case CRYPTO_MD5_KPDK:
513 	case CRYPTO_SHA1_KPDK:
514 	{
515 		/* We need a buffer that can hold an md5 and a sha1 result. */
516 		u_char buf[SHA1_RESULTLEN];
517 
518 		sw->sw_klen = klen;
519 		bcopy(key, sw->sw_octx, klen);
520 		axf->Init(sw->sw_ictx);
521 		axf->Update(sw->sw_ictx, key, klen);
522 		axf->Final(buf, sw->sw_ictx);
523 		break;
524 	}
525 	default:
526 		kprintf("%s: CRD_F_KEY_EXPLICIT flag given, but algorithm %d "
527 		    "doesn't use keys.\n", __func__, axf->type);
528 	}
529 }
530 
531 /*
532  * Compute keyed-hash authenticator.
533  */
534 static int
535 swcr_authcompute(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf,
536     int flags)
537 {
538 	unsigned char aalg[HASH_MAX_LEN];
539 	struct auth_hash *axf;
540 	union authctx ctx;
541 	int err;
542 
543 	if (sw->sw_ictx == 0)
544 		return EINVAL;
545 
546 	axf = sw->sw_axf;
547 
548 	if (crd->crd_flags & CRD_F_KEY_EXPLICIT)
549 		swcr_authprepare(axf, sw, crd->crd_key, crd->crd_klen);
550 
551 	bcopy(sw->sw_ictx, &ctx, axf->ctxsize);
552 
553 	err = crypto_apply(flags, buf, crd->crd_skip, crd->crd_len,
554 	    (int (*)(void *, void *, unsigned int))axf->Update, (caddr_t)&ctx);
555 	if (err)
556 		return err;
557 
558 	switch (sw->sw_alg) {
559 	case CRYPTO_MD5_HMAC:
560 	case CRYPTO_SHA1_HMAC:
561 	case CRYPTO_SHA2_256_HMAC:
562 	case CRYPTO_SHA2_384_HMAC:
563 	case CRYPTO_SHA2_512_HMAC:
564 	case CRYPTO_RIPEMD160_HMAC:
565 		if (sw->sw_octx == NULL)
566 			return EINVAL;
567 
568 		axf->Final(aalg, &ctx);
569 		bcopy(sw->sw_octx, &ctx, axf->ctxsize);
570 		axf->Update(&ctx, aalg, axf->hashsize);
571 		axf->Final(aalg, &ctx);
572 		break;
573 
574 	case CRYPTO_MD5_KPDK:
575 	case CRYPTO_SHA1_KPDK:
576 		if (sw->sw_octx == NULL)
577 			return EINVAL;
578 
579 		axf->Update(&ctx, sw->sw_octx, sw->sw_klen);
580 		axf->Final(aalg, &ctx);
581 		break;
582 
583 	case CRYPTO_NULL_HMAC:
584 		axf->Final(aalg, &ctx);
585 		break;
586 	}
587 
588 	/* Inject the authentication data */
589 	crypto_copyback(flags, buf, crd->crd_inject,
590 	    sw->sw_mlen == 0 ? axf->hashsize : sw->sw_mlen, aalg);
591 	return 0;
592 }
593 
594 /*
595  * Apply a combined encryption-authentication transformation
596  */
597 static int
598 swcr_combined(struct cryptop *crp)
599 {
600 	uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))];
601 	u_char *blk = (u_char *)blkbuf;
602 	u_char aalg[HASH_MAX_LEN];
603 	u_char iv[EALG_MAX_BLOCK_LEN];
604 	uint8_t *kschedule;
605 	union authctx ctx;
606 	struct cryptodesc *crd, *crda = NULL, *crde = NULL;
607 	struct swcr_data *sw, *swa, *swe;
608 	struct auth_hash *axf = NULL;
609 	struct enc_xform *exf = NULL;
610 	struct mbuf *m = NULL;
611 	struct uio *uio = NULL;
612 	caddr_t buf = (caddr_t)crp->crp_buf;
613 	uint32_t *blkp;
614 	int i, blksz, ivlen, outtype, len;
615 
616 	blksz = 0;
617 	ivlen = 0;
618 
619 	for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
620 		for (sw = swcr_sessions[crp->crp_sid & 0xffffffff];
621 		     sw && sw->sw_alg != crd->crd_alg;
622 		     sw = sw->sw_next)
623 			;
624 		if (sw == NULL)
625 			return (EINVAL);
626 
627 		switch (sw->sw_alg) {
628 		case CRYPTO_AES_GCM_16:
629 		case CRYPTO_AES_GMAC:
630 			swe = sw;
631 			crde = crd;
632 			exf = swe->sw_exf;
633 			ivlen = exf->ivsize;
634 			break;
635 		case CRYPTO_AES_128_GMAC:
636 		case CRYPTO_AES_192_GMAC:
637 		case CRYPTO_AES_256_GMAC:
638 			swa = sw;
639 			crda = crd;
640 			axf = swa->sw_axf;
641 			if (swa->sw_ictx == 0)
642 				return (EINVAL);
643 			bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
644 			blksz = axf->blocksize;
645 			break;
646 		default:
647 			return (EINVAL);
648 		}
649 	}
650 	if (crde == NULL || crda == NULL)
651 		return (EINVAL);
652 
653 	if (crp->crp_flags & CRYPTO_F_IMBUF) {
654 		outtype = CRYPTO_BUF_MBUF;
655 		m = (struct mbuf *)buf;
656 	} else {
657 		outtype = CRYPTO_BUF_IOV;
658 		uio = (struct uio *)buf;
659 	}
660 
661 	/* Initialize the IV */
662 	if (crde->crd_flags & CRD_F_ENCRYPT) {
663 		/* IV explicitly provided ? */
664 		if (crde->crd_flags & CRD_F_IV_EXPLICIT)
665 			bcopy(crde->crd_iv, iv, ivlen);
666 		else
667 			karc4rand(iv, ivlen);
668 
669 		/* Do we need to write the IV */
670 		if (!(crde->crd_flags & CRD_F_IV_PRESENT))
671 			crypto_copyback(crde->crd_flags, buf, crde->crd_inject,
672 			    ivlen, iv);
673 
674 	} else {	/* Decryption */
675 			/* IV explicitly provided ? */
676 		if (crde->crd_flags & CRD_F_IV_EXPLICIT)
677 			bcopy(crde->crd_iv, iv, ivlen);
678 		else
679 			/* Get IV off buf */
680 			crypto_copydata(crde->crd_flags, buf, crde->crd_inject,
681 			    ivlen, iv);
682 	}
683 
684 	/* Supply MAC with IV */
685 	if (axf->Reinit)
686 		axf->Reinit(&ctx, iv, ivlen);
687 
688 	/* Supply MAC with AAD */
689 	for (i = 0; i < crda->crd_len; i += blksz) {
690 		len = MIN(crda->crd_len - i, blksz);
691 		crypto_copydata(crde->crd_flags, buf, crda->crd_skip + i, len,
692 		    blk);
693 		axf->Update(&ctx, blk, len);
694 	}
695 
696 	spin_lock(&swcr_spin);
697 	kschedule = sw->sw_kschedule;
698 	++sw->sw_kschedule_refs;
699 	spin_unlock(&swcr_spin);
700 
701 	if (exf->reinit)
702 		exf->reinit(kschedule, iv);
703 
704 	/* Do encryption/decryption with MAC */
705 	for (i = 0; i < crde->crd_len; i += blksz) {
706 		len = MIN(crde->crd_len - i, blksz);
707 		if (len < blksz)
708 			bzero(blk, blksz);
709 		crypto_copydata(crde->crd_flags, buf, crde->crd_skip + i, len,
710 		    blk);
711 		if (crde->crd_flags & CRD_F_ENCRYPT) {
712 			exf->encrypt(kschedule, blk, iv);
713 			axf->Update(&ctx, blk, len);
714 		} else {
715 			axf->Update(&ctx, blk, len);
716 			exf->decrypt(kschedule, blk, iv);
717 		}
718 		crypto_copyback(crde->crd_flags, buf, crde->crd_skip + i, len,
719 		    blk);
720 	}
721 
722 	/* Do any required special finalization */
723 	switch (crda->crd_alg) {
724 		case CRYPTO_AES_128_GMAC:
725 		case CRYPTO_AES_192_GMAC:
726 		case CRYPTO_AES_256_GMAC:
727 			/* length block */
728 			bzero(blk, blksz);
729 			blkp = (uint32_t *)blk + 1;
730 			*blkp = htobe32(crda->crd_len * 8);
731 			blkp = (uint32_t *)blk + 3;
732 			*blkp = htobe32(crde->crd_len * 8);
733 			axf->Update(&ctx, blk, blksz);
734 			break;
735 	}
736 
737 	/* Finalize MAC */
738 	axf->Final(aalg, &ctx);
739 
740 	/* Inject the authentication data */
741 	crypto_copyback(crda->crd_flags, crp->crp_buf, crda->crd_inject,
742 	    axf->blocksize, aalg);
743 
744 	spin_lock(&swcr_spin);
745 	--sw->sw_kschedule_refs;
746 	spin_unlock(&swcr_spin);
747 
748 	return (0);
749 }
750 
751 /*
752  * Apply a compression/decompression algorithm
753  */
754 static int
755 swcr_compdec(struct cryptodesc *crd, struct swcr_data *sw,
756 	     caddr_t buf, int flags)
757 {
758 	u_int8_t *data, *out;
759 	struct comp_algo *cxf;
760 	int adj;
761 	u_int32_t result;
762 
763 	cxf = sw->sw_cxf;
764 
765 	/*
766 	 * We must handle the whole buffer of data in one time
767 	 * then if there is not all the data in the mbuf, we must
768 	 * copy in a buffer.
769 	 */
770 	data = kmalloc(crd->crd_len, M_CRYPTO_DATA, M_INTWAIT);
771 	if (data == NULL)
772 		return (EINVAL);
773 	crypto_copydata(flags, buf, crd->crd_skip, crd->crd_len, data);
774 
775 	if (crd->crd_flags & CRD_F_COMP)
776 		result = cxf->compress(data, crd->crd_len, &out);
777 	else
778 		result = cxf->decompress(data, crd->crd_len, &out);
779 
780 	kfree(data, M_CRYPTO_DATA);
781 	if (result == 0)
782 		return EINVAL;
783 
784 	/* Copy back the (de)compressed data. m_copyback is
785 	 * extending the mbuf as necessary.
786 	 */
787 	sw->sw_size = result;
788 	/* Check the compressed size when doing compression */
789 	if (crd->crd_flags & CRD_F_COMP) {
790 		if (result >= crd->crd_len) {
791 			/* Compression was useless, we lost time */
792 			kfree(out, M_CRYPTO_DATA);
793 			return 0;
794 		}
795 	}
796 
797 	crypto_copyback(flags, buf, crd->crd_skip, result, out);
798 	if (result < crd->crd_len) {
799 		adj = result - crd->crd_len;
800 		if (flags & CRYPTO_F_IMBUF) {
801 			adj = result - crd->crd_len;
802 			m_adj((struct mbuf *)buf, adj);
803 		} else if (flags & CRYPTO_F_IOV) {
804 			struct uio *uio = (struct uio *)buf;
805 			int ind;
806 
807 			adj = crd->crd_len - result;
808 			ind = uio->uio_iovcnt - 1;
809 
810 			while (adj > 0 && ind >= 0) {
811 				if (adj < uio->uio_iov[ind].iov_len) {
812 					uio->uio_iov[ind].iov_len -= adj;
813 					break;
814 				}
815 
816 				adj -= uio->uio_iov[ind].iov_len;
817 				uio->uio_iov[ind].iov_len = 0;
818 				ind--;
819 				uio->uio_iovcnt--;
820 			}
821 		}
822 	}
823 	kfree(out, M_CRYPTO_DATA);
824 	return 0;
825 }
826 
827 /*
828  * Generate a new software session.
829  */
830 static int
831 swcr_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri)
832 {
833 	struct swcr_data *swd_base;
834 	struct swcr_data **swd;
835 	struct swcr_data **oswd;
836 	struct auth_hash *axf;
837 	struct enc_xform *txf;
838 	struct comp_algo *cxf;
839 	u_int32_t i;
840 	u_int32_t n;
841 	int error;
842 
843 	if (sid == NULL || cri == NULL)
844 		return EINVAL;
845 
846 	swd_base = NULL;
847 	swd = &swd_base;
848 
849 	while (cri) {
850 		*swd = kmalloc(sizeof(struct swcr_data),
851 			       M_CRYPTO_DATA, M_WAITOK | M_ZERO);
852 
853 		switch (cri->cri_alg) {
854 		case CRYPTO_DES_CBC:
855 			txf = &enc_xform_des;
856 			goto enccommon;
857 		case CRYPTO_3DES_CBC:
858 			txf = &enc_xform_3des;
859 			goto enccommon;
860 		case CRYPTO_BLF_CBC:
861 			txf = &enc_xform_blf;
862 			goto enccommon;
863 		case CRYPTO_CAST_CBC:
864 			txf = &enc_xform_cast5;
865 			goto enccommon;
866 		case CRYPTO_SKIPJACK_CBC:
867 			txf = &enc_xform_skipjack;
868 			goto enccommon;
869 		case CRYPTO_RIJNDAEL128_CBC:
870 			txf = &enc_xform_rijndael128;
871 			goto enccommon;
872 		case CRYPTO_AES_XTS:
873 			txf = &enc_xform_aes_xts;
874 			goto enccommon;
875 		case CRYPTO_AES_CTR:
876 			txf = &enc_xform_aes_ctr;
877 			goto enccommon;
878 		case CRYPTO_AES_GCM_16:
879 			txf = &enc_xform_aes_gcm;
880 			goto enccommon;
881 		case CRYPTO_AES_GMAC:
882 			txf = &enc_xform_aes_gmac;
883 			(*swd)->sw_exf = txf;
884 			break;
885 		case CRYPTO_CAMELLIA_CBC:
886 			txf = &enc_xform_camellia;
887 			goto enccommon;
888 		case CRYPTO_TWOFISH_CBC:
889 			txf = &enc_xform_twofish;
890 			goto enccommon;
891 		case CRYPTO_SERPENT_CBC:
892 			txf = &enc_xform_serpent;
893 			goto enccommon;
894 		case CRYPTO_TWOFISH_XTS:
895 			txf = &enc_xform_twofish_xts;
896 			goto enccommon;
897 		case CRYPTO_SERPENT_XTS:
898 			txf = &enc_xform_serpent_xts;
899 			goto enccommon;
900 		case CRYPTO_NULL_CBC:
901 			txf = &enc_xform_null;
902 			goto enccommon;
903 		enccommon:
904 			if (cri->cri_key != NULL) {
905 				error = txf->setkey(&((*swd)->sw_kschedule),
906 						    cri->cri_key,
907 						    cri->cri_klen / 8);
908 				if (error) {
909 					swcr_freesession_slot(&swd_base, 0);
910 					return error;
911 				}
912 			}
913 			(*swd)->sw_exf = txf;
914 			break;
915 
916 		case CRYPTO_MD5_HMAC:
917 			axf = &auth_hash_hmac_md5;
918 			goto authcommon;
919 		case CRYPTO_SHA1_HMAC:
920 			axf = &auth_hash_hmac_sha1;
921 			goto authcommon;
922 		case CRYPTO_SHA2_256_HMAC:
923 			axf = &auth_hash_hmac_sha2_256;
924 			goto authcommon;
925 		case CRYPTO_SHA2_384_HMAC:
926 			axf = &auth_hash_hmac_sha2_384;
927 			goto authcommon;
928 		case CRYPTO_SHA2_512_HMAC:
929 			axf = &auth_hash_hmac_sha2_512;
930 			goto authcommon;
931 		case CRYPTO_NULL_HMAC:
932 			axf = &auth_hash_null;
933 			goto authcommon;
934 		case CRYPTO_RIPEMD160_HMAC:
935 			axf = &auth_hash_hmac_ripemd_160;
936 		authcommon:
937 			(*swd)->sw_ictx = kmalloc(axf->ctxsize, M_CRYPTO_DATA,
938 						  M_WAITOK);
939 			if ((*swd)->sw_ictx == NULL) {
940 				swcr_freesession_slot(&swd_base, 0);
941 				return ENOBUFS;
942 			}
943 
944 			(*swd)->sw_octx = kmalloc(axf->ctxsize, M_CRYPTO_DATA,
945 						  M_WAITOK);
946 			if ((*swd)->sw_octx == NULL) {
947 				swcr_freesession_slot(&swd_base, 0);
948 				return ENOBUFS;
949 			}
950 
951 			if (cri->cri_key != NULL) {
952 				swcr_authprepare(axf, *swd, cri->cri_key,
953 				    cri->cri_klen);
954 			}
955 
956 			(*swd)->sw_mlen = cri->cri_mlen;
957 			(*swd)->sw_axf = axf;
958 			break;
959 
960 		case CRYPTO_MD5_KPDK:
961 			axf = &auth_hash_key_md5;
962 			goto auth2common;
963 
964 		case CRYPTO_SHA1_KPDK:
965 			axf = &auth_hash_key_sha1;
966 		auth2common:
967 			(*swd)->sw_ictx = kmalloc(axf->ctxsize, M_CRYPTO_DATA,
968 						  M_WAITOK);
969 			if ((*swd)->sw_ictx == NULL) {
970 				swcr_freesession_slot(&swd_base, 0);
971 				return ENOBUFS;
972 			}
973 
974 			(*swd)->sw_octx = kmalloc(cri->cri_klen / 8,
975 						  M_CRYPTO_DATA, M_WAITOK);
976 			if ((*swd)->sw_octx == NULL) {
977 				swcr_freesession_slot(&swd_base, 0);
978 				return ENOBUFS;
979 			}
980 
981 			/* Store the key so we can "append" it to the payload */
982 			if (cri->cri_key != NULL) {
983 				swcr_authprepare(axf, *swd, cri->cri_key,
984 				    cri->cri_klen);
985 			}
986 
987 			(*swd)->sw_mlen = cri->cri_mlen;
988 			(*swd)->sw_axf = axf;
989 			break;
990 #ifdef notdef
991 		case CRYPTO_MD5:
992 			axf = &auth_hash_md5;
993 			goto auth3common;
994 
995 		case CRYPTO_SHA1:
996 			axf = &auth_hash_sha1;
997 		auth3common:
998 			(*swd)->sw_ictx = kmalloc(axf->ctxsize, M_CRYPTO_DATA,
999 						  M_WAITOK);
1000 			if ((*swd)->sw_ictx == NULL) {
1001 				swcr_freesession_slot(&swd_base, 0);
1002 				return ENOBUFS;
1003 			}
1004 
1005 			axf->Init((*swd)->sw_ictx);
1006 			(*swd)->sw_mlen = cri->cri_mlen;
1007 			(*swd)->sw_axf = axf;
1008 			break;
1009 #endif
1010 		case CRYPTO_AES_128_GMAC:
1011 			axf = &auth_hash_gmac_aes_128;
1012 			goto auth4common;
1013 
1014 		case CRYPTO_AES_192_GMAC:
1015 			axf = &auth_hash_gmac_aes_192;
1016 			goto auth4common;
1017 
1018 		case CRYPTO_AES_256_GMAC:
1019 			axf = &auth_hash_gmac_aes_256;
1020 		auth4common:
1021 			(*swd)->sw_ictx = kmalloc(axf->ctxsize, M_CRYPTO_DATA,
1022 			    M_NOWAIT);
1023 			if ((*swd)->sw_ictx == NULL) {
1024 				swcr_freesession_slot(&swd_base, 0);
1025 				return ENOBUFS;
1026 			}
1027 
1028 			axf->Init((*swd)->sw_ictx);
1029 			axf->Setkey((*swd)->sw_ictx, cri->cri_key,
1030 			    cri->cri_klen / 8);
1031 			(*swd)->sw_axf = axf;
1032 			break;
1033 
1034 		case CRYPTO_DEFLATE_COMP:
1035 			cxf = &comp_algo_deflate;
1036 			(*swd)->sw_cxf = cxf;
1037 			break;
1038 		default:
1039 			swcr_freesession_slot(&swd_base, 0);
1040 			return EINVAL;
1041 		}
1042 
1043 		(*swd)->sw_alg = cri->cri_alg;
1044 		cri = cri->cri_next;
1045 		swd = &((*swd)->sw_next);
1046 	}
1047 
1048 	for (;;) {
1049 		/*
1050 		 * Atomically allocate a session
1051 		 */
1052 		spin_lock(&swcr_spin);
1053 		for (i = swcr_minsesnum; i < swcr_sesnum; ++i) {
1054 			if (swcr_sessions[i] == NULL)
1055 				break;
1056 		}
1057 		if (i < swcr_sesnum) {
1058 			swcr_sessions[i] = swd_base;
1059 			swcr_minsesnum = i + 1;
1060 			spin_unlock(&swcr_spin);
1061 			break;
1062 		}
1063 		n = swcr_sesnum;
1064 		spin_unlock(&swcr_spin);
1065 
1066 		/*
1067 		 * A larger allocation is required, reallocate the array
1068 		 * and replace, checking for SMP races.
1069 		 */
1070 		if (n < CRYPTO_SW_SESSIONS)
1071 			n = CRYPTO_SW_SESSIONS;
1072 		else
1073 			n = n * 3 / 2;
1074 		swd = kmalloc(n * sizeof(struct swcr_data *),
1075 			      M_CRYPTO_DATA, M_WAITOK | M_ZERO);
1076 
1077 		spin_lock(&swcr_spin);
1078 		if (swcr_sesnum >= n) {
1079 			spin_unlock(&swcr_spin);
1080 			kfree(swd, M_CRYPTO_DATA);
1081 		} else if (swcr_sesnum) {
1082 			bcopy(swcr_sessions, swd,
1083 			      swcr_sesnum * sizeof(struct swcr_data *));
1084 			oswd = swcr_sessions;
1085 			swcr_sessions = swd;
1086 			swcr_sesnum = n;
1087 			spin_unlock(&swcr_spin);
1088 			kfree(oswd, M_CRYPTO_DATA);
1089 		} else {
1090 			swcr_sessions = swd;
1091 			swcr_sesnum = n;
1092 			spin_unlock(&swcr_spin);
1093 		}
1094 	}
1095 
1096 	*sid = i;
1097 	return 0;
1098 }
1099 
1100 /*
1101  * Free a session.
1102  */
1103 static int
1104 swcr_freesession(device_t dev, u_int64_t tid)
1105 {
1106 	u_int32_t sid = CRYPTO_SESID2LID(tid);
1107 
1108 	if (sid > swcr_sesnum || swcr_sessions == NULL ||
1109 	    swcr_sessions[sid] == NULL) {
1110 		return EINVAL;
1111 	}
1112 
1113 	/* Silently accept and return */
1114 	if (sid == 0)
1115 		return 0;
1116 
1117 	return(swcr_freesession_slot(&swcr_sessions[sid], sid));
1118 }
1119 
1120 static
1121 int
1122 swcr_freesession_slot(struct swcr_data **swdp, u_int32_t sid)
1123 {
1124 	struct enc_xform *txf;
1125 	struct auth_hash *axf;
1126 	struct comp_algo *cxf;
1127 	struct swcr_data *swd;
1128 	struct swcr_data *swnext;
1129 
1130 	/*
1131 	 * Protect session detachment with the spinlock.
1132 	 */
1133 	spin_lock(&swcr_spin);
1134 	swnext = *swdp;
1135 	*swdp = NULL;
1136 	if (sid && swcr_minsesnum > sid)
1137 		swcr_minsesnum = sid;
1138 	spin_unlock(&swcr_spin);
1139 
1140 	/*
1141 	 * Clean up at our leisure.
1142 	 */
1143 	while ((swd = swnext) != NULL) {
1144 		swnext = swd->sw_next;
1145 
1146 		swd->sw_next = NULL;
1147 
1148 		switch (swd->sw_alg) {
1149 		case CRYPTO_DES_CBC:
1150 		case CRYPTO_3DES_CBC:
1151 		case CRYPTO_BLF_CBC:
1152 		case CRYPTO_CAST_CBC:
1153 		case CRYPTO_SKIPJACK_CBC:
1154 		case CRYPTO_RIJNDAEL128_CBC:
1155 		case CRYPTO_AES_XTS:
1156 		case CRYPTO_AES_CTR:
1157 		case CRYPTO_AES_GCM_16:
1158 		case CRYPTO_AES_GMAC:
1159 		case CRYPTO_CAMELLIA_CBC:
1160 		case CRYPTO_TWOFISH_CBC:
1161 		case CRYPTO_SERPENT_CBC:
1162 		case CRYPTO_TWOFISH_XTS:
1163 		case CRYPTO_SERPENT_XTS:
1164 		case CRYPTO_NULL_CBC:
1165 			txf = swd->sw_exf;
1166 
1167 			if (swd->sw_kschedule)
1168 				txf->zerokey(&(swd->sw_kschedule));
1169 			break;
1170 
1171 		case CRYPTO_MD5_HMAC:
1172 		case CRYPTO_SHA1_HMAC:
1173 		case CRYPTO_SHA2_256_HMAC:
1174 		case CRYPTO_SHA2_384_HMAC:
1175 		case CRYPTO_SHA2_512_HMAC:
1176 		case CRYPTO_RIPEMD160_HMAC:
1177 		case CRYPTO_NULL_HMAC:
1178 			axf = swd->sw_axf;
1179 
1180 			if (swd->sw_ictx) {
1181 				bzero(swd->sw_ictx, axf->ctxsize);
1182 				kfree(swd->sw_ictx, M_CRYPTO_DATA);
1183 			}
1184 			if (swd->sw_octx) {
1185 				bzero(swd->sw_octx, axf->ctxsize);
1186 				kfree(swd->sw_octx, M_CRYPTO_DATA);
1187 			}
1188 			break;
1189 
1190 		case CRYPTO_MD5_KPDK:
1191 		case CRYPTO_SHA1_KPDK:
1192 			axf = swd->sw_axf;
1193 
1194 			if (swd->sw_ictx) {
1195 				bzero(swd->sw_ictx, axf->ctxsize);
1196 				kfree(swd->sw_ictx, M_CRYPTO_DATA);
1197 			}
1198 			if (swd->sw_octx) {
1199 				bzero(swd->sw_octx, swd->sw_klen);
1200 				kfree(swd->sw_octx, M_CRYPTO_DATA);
1201 			}
1202 			break;
1203 
1204 		case CRYPTO_AES_128_GMAC:
1205 		case CRYPTO_AES_192_GMAC:
1206 		case CRYPTO_AES_256_GMAC:
1207 		case CRYPTO_MD5:
1208 		case CRYPTO_SHA1:
1209 			axf = swd->sw_axf;
1210 
1211 			if (swd->sw_ictx) {
1212 				bzero(swd->sw_ictx, axf->ctxsize);
1213 				kfree(swd->sw_ictx, M_CRYPTO_DATA);
1214 			}
1215 			break;
1216 
1217 		case CRYPTO_DEFLATE_COMP:
1218 			cxf = swd->sw_cxf;
1219 			break;
1220 		}
1221 
1222 		//FREE(swd, M_CRYPTO_DATA);
1223 		kfree(swd, M_CRYPTO_DATA);
1224 	}
1225 	return 0;
1226 }
1227 
1228 /*
1229  * Process a software request.
1230  */
1231 static int
1232 swcr_process(device_t dev, struct cryptop *crp, int hint)
1233 {
1234 	struct cryptodesc *crd;
1235 	struct swcr_data *sw;
1236 	u_int32_t lid;
1237 
1238 	/* Sanity check */
1239 	if (crp == NULL)
1240 		return EINVAL;
1241 
1242 	if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
1243 		crp->crp_etype = EINVAL;
1244 		goto done;
1245 	}
1246 
1247 	lid = crp->crp_sid & 0xffffffff;
1248 	if (lid >= swcr_sesnum || lid == 0 || swcr_sessions[lid] == NULL) {
1249 		crp->crp_etype = ENOENT;
1250 		goto done;
1251 	}
1252 
1253 	/* Go through crypto descriptors, processing as we go */
1254 	for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
1255 		/*
1256 		 * Find the crypto context.
1257 		 *
1258 		 * XXX Note that the logic here prevents us from having
1259 		 * XXX the same algorithm multiple times in a session
1260 		 * XXX (or rather, we can but it won't give us the right
1261 		 * XXX results). To do that, we'd need some way of differentiating
1262 		 * XXX between the various instances of an algorithm (so we can
1263 		 * XXX locate the correct crypto context).
1264 		 */
1265 		for (sw = swcr_sessions[lid];
1266 		    sw && sw->sw_alg != crd->crd_alg;
1267 		    sw = sw->sw_next)
1268 			;
1269 
1270 		/* No such context ? */
1271 		if (sw == NULL) {
1272 			crp->crp_etype = EINVAL;
1273 			goto done;
1274 		}
1275 		switch (sw->sw_alg) {
1276 		case CRYPTO_DES_CBC:
1277 		case CRYPTO_3DES_CBC:
1278 		case CRYPTO_BLF_CBC:
1279 		case CRYPTO_CAST_CBC:
1280 		case CRYPTO_SKIPJACK_CBC:
1281 		case CRYPTO_RIJNDAEL128_CBC:
1282 		case CRYPTO_AES_XTS:
1283 		case CRYPTO_AES_CTR:
1284 		case CRYPTO_CAMELLIA_CBC:
1285 		case CRYPTO_TWOFISH_CBC:
1286 		case CRYPTO_SERPENT_CBC:
1287 		case CRYPTO_TWOFISH_XTS:
1288 		case CRYPTO_SERPENT_XTS:
1289 			if ((crp->crp_etype = swcr_encdec(crd, sw,
1290 			    crp->crp_buf, crp->crp_flags)) != 0)
1291 				goto done;
1292 			break;
1293 		case CRYPTO_NULL_CBC:
1294 			crp->crp_etype = 0;
1295 			break;
1296 		case CRYPTO_MD5_HMAC:
1297 		case CRYPTO_SHA1_HMAC:
1298 		case CRYPTO_SHA2_256_HMAC:
1299 		case CRYPTO_SHA2_384_HMAC:
1300 		case CRYPTO_SHA2_512_HMAC:
1301 		case CRYPTO_RIPEMD160_HMAC:
1302 		case CRYPTO_NULL_HMAC:
1303 		case CRYPTO_MD5_KPDK:
1304 		case CRYPTO_SHA1_KPDK:
1305 		case CRYPTO_MD5:
1306 		case CRYPTO_SHA1:
1307 			if ((crp->crp_etype = swcr_authcompute(crd, sw,
1308 			    crp->crp_buf, crp->crp_flags)) != 0)
1309 				goto done;
1310 			break;
1311 
1312 		case CRYPTO_AES_GCM_16:
1313 		case CRYPTO_AES_GMAC:
1314 		case CRYPTO_AES_128_GMAC:
1315 		case CRYPTO_AES_192_GMAC:
1316 		case CRYPTO_AES_256_GMAC:
1317 			crp->crp_etype = swcr_combined(crp);
1318 			goto done;
1319 
1320 		case CRYPTO_DEFLATE_COMP:
1321 			if ((crp->crp_etype = swcr_compdec(crd, sw,
1322 			    crp->crp_buf, crp->crp_flags)) != 0)
1323 				goto done;
1324 			else
1325 				crp->crp_olen = (int)sw->sw_size;
1326 			break;
1327 
1328 		default:
1329 			/* Unknown/unsupported algorithm */
1330 			crp->crp_etype = EINVAL;
1331 			goto done;
1332 		}
1333 	}
1334 
1335 done:
1336 	crypto_done(crp);
1337 	lwkt_yield();
1338 	return 0;
1339 }
1340 
1341 static void
1342 swcr_identify(driver_t *drv, device_t parent)
1343 {
1344 	/* NB: order 10 is so we get attached after h/w devices */
1345 	/* XXX: wouldn't bet about this BUS_ADD_CHILD correctness */
1346 	if (device_find_child(parent, "cryptosoft", -1) == NULL &&
1347 	    BUS_ADD_CHILD(parent, parent, 10, "cryptosoft", -1) == 0)
1348 		panic("cryptosoft: could not attach");
1349 }
1350 
1351 static int
1352 swcr_probe(device_t dev)
1353 {
1354 	device_set_desc(dev, "software crypto");
1355 	return (0);
1356 }
1357 
1358 static int
1359 swcr_attach(device_t dev)
1360 {
1361 	memset(hmac_ipad_buffer, HMAC_IPAD_VAL, HMAC_MAX_BLOCK_LEN);
1362 	memset(hmac_opad_buffer, HMAC_OPAD_VAL, HMAC_MAX_BLOCK_LEN);
1363 
1364 	swcr_id = crypto_get_driverid(dev, CRYPTOCAP_F_SOFTWARE |
1365 					   CRYPTOCAP_F_SYNC |
1366 					   CRYPTOCAP_F_SMP);
1367 	if (swcr_id < 0) {
1368 		device_printf(dev, "cannot initialize!");
1369 		return ENOMEM;
1370 	}
1371 #define	REGISTER(alg) \
1372 	crypto_register(swcr_id, alg, 0,0)
1373 	REGISTER(CRYPTO_DES_CBC);
1374 	REGISTER(CRYPTO_3DES_CBC);
1375 	REGISTER(CRYPTO_BLF_CBC);
1376 	REGISTER(CRYPTO_CAST_CBC);
1377 	REGISTER(CRYPTO_SKIPJACK_CBC);
1378 	REGISTER(CRYPTO_NULL_CBC);
1379 	REGISTER(CRYPTO_MD5_HMAC);
1380 	REGISTER(CRYPTO_SHA1_HMAC);
1381 	REGISTER(CRYPTO_SHA2_256_HMAC);
1382 	REGISTER(CRYPTO_SHA2_384_HMAC);
1383 	REGISTER(CRYPTO_SHA2_512_HMAC);
1384 	REGISTER(CRYPTO_RIPEMD160_HMAC);
1385 	REGISTER(CRYPTO_NULL_HMAC);
1386 	REGISTER(CRYPTO_MD5_KPDK);
1387 	REGISTER(CRYPTO_SHA1_KPDK);
1388 	REGISTER(CRYPTO_MD5);
1389 	REGISTER(CRYPTO_SHA1);
1390 	REGISTER(CRYPTO_RIJNDAEL128_CBC);
1391 	REGISTER(CRYPTO_AES_XTS);
1392 	REGISTER(CRYPTO_AES_CTR);
1393 	REGISTER(CRYPTO_AES_GCM_16);
1394 	REGISTER(CRYPTO_AES_GMAC);
1395 	REGISTER(CRYPTO_AES_128_GMAC);
1396 	REGISTER(CRYPTO_AES_192_GMAC);
1397 	REGISTER(CRYPTO_AES_256_GMAC);
1398 	REGISTER(CRYPTO_CAMELLIA_CBC);
1399 	REGISTER(CRYPTO_TWOFISH_CBC);
1400 	REGISTER(CRYPTO_SERPENT_CBC);
1401 	REGISTER(CRYPTO_TWOFISH_XTS);
1402 	REGISTER(CRYPTO_SERPENT_XTS);
1403 	REGISTER(CRYPTO_DEFLATE_COMP);
1404 #undef REGISTER
1405 
1406 	return 0;
1407 }
1408 
1409 static int
1410 swcr_detach(device_t dev)
1411 {
1412 	crypto_unregister_all(swcr_id);
1413 	if (swcr_sessions != NULL)
1414 		kfree(swcr_sessions, M_CRYPTO_DATA);
1415 	return 0;
1416 }
1417 
1418 static device_method_t swcr_methods[] = {
1419 	DEVMETHOD(device_identify,	swcr_identify),
1420 	DEVMETHOD(device_probe,		swcr_probe),
1421 	DEVMETHOD(device_attach,	swcr_attach),
1422 	DEVMETHOD(device_detach,	swcr_detach),
1423 
1424 	DEVMETHOD(cryptodev_newsession,	swcr_newsession),
1425 	DEVMETHOD(cryptodev_freesession,swcr_freesession),
1426 	DEVMETHOD(cryptodev_process,	swcr_process),
1427 
1428 	{0, 0},
1429 };
1430 
1431 static driver_t swcr_driver = {
1432 	"cryptosoft",
1433 	swcr_methods,
1434 	0,		/* NB: no softc */
1435 };
1436 static devclass_t swcr_devclass;
1437 
1438 /*
1439  * NB: We explicitly reference the crypto module so we
1440  * get the necessary ordering when built as a loadable
1441  * module.  This is required because we bundle the crypto
1442  * module code together with the cryptosoft driver (otherwise
1443  * normal module dependencies would handle things).
1444  */
1445 extern int crypto_modevent(struct module *, int, void *);
1446 /* XXX where to attach */
1447 DRIVER_MODULE(cryptosoft, nexus, swcr_driver, swcr_devclass, crypto_modevent,NULL);
1448 MODULE_VERSION(cryptosoft, 1);
1449 MODULE_DEPEND(cryptosoft, crypto, 1, 1, 1);
1450