xref: /dragonfly/sys/opencrypto/cryptosoft.c (revision 8a0bcd56)
1 /*-
2  * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
3  * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting
4  *
5  * This code was written by Angelos D. Keromytis in Athens, Greece, in
6  * February 2000. Network Security Technologies Inc. (NSTI) kindly
7  * supported the development of this code.
8  *
9  * Copyright (c) 2000, 2001 Angelos D. Keromytis
10  *
11  * SMP modifications by Matthew Dillon for the DragonFlyBSD Project
12  *
13  * Permission to use, copy, and modify this software with or without fee
14  * is hereby granted, provided that this entire notice is included in
15  * all source code copies of any software which is or includes a copy or
16  * modification of this software.
17  *
18  * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
19  * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
20  * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
21  * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
22  * PURPOSE.
23  *
24  * $FreeBSD: src/sys/opencrypto/cryptosoft.c,v 1.23 2009/02/05 17:43:12 imp Exp $
25  * $OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $
26  */
27 
28 #include <sys/param.h>
29 #include <sys/systm.h>
30 #include <sys/malloc.h>
31 #include <sys/mbuf.h>
32 #include <sys/module.h>
33 #include <sys/sysctl.h>
34 #include <sys/errno.h>
35 #include <sys/random.h>
36 #include <sys/kernel.h>
37 #include <sys/uio.h>
38 #include <sys/spinlock2.h>
39 
40 #include <crypto/blowfish/blowfish.h>
41 #include <crypto/sha1.h>
42 #include <opencrypto/rmd160.h>
43 #include <opencrypto/cast.h>
44 #include <opencrypto/skipjack.h>
45 #include <sys/md5.h>
46 
47 #include <opencrypto/cryptodev.h>
48 #include <opencrypto/cryptosoft.h>
49 #include <opencrypto/xform.h>
50 
51 #include <sys/kobj.h>
52 #include <sys/bus.h>
53 #include "cryptodev_if.h"
54 
55 static	int32_t swcr_id;
56 static	struct swcr_data **swcr_sessions = NULL;
57 static	u_int32_t swcr_sesnum;
58 static	u_int32_t swcr_minsesnum = 1;
59 
60 static struct spinlock swcr_spin = SPINLOCK_INITIALIZER(swcr_spin);
61 
62 u_int8_t hmac_ipad_buffer[HMAC_MAX_BLOCK_LEN];
63 u_int8_t hmac_opad_buffer[HMAC_MAX_BLOCK_LEN];
64 
65 static	int swcr_encdec(struct cryptodesc *, struct swcr_data *, caddr_t, int);
66 static	int swcr_authcompute(struct cryptodesc *, struct swcr_data *, caddr_t, int);
67 static	int swcr_compdec(struct cryptodesc *, struct swcr_data *, caddr_t, int);
68 static	int swcr_freesession(device_t dev, u_int64_t tid);
69 static	int swcr_freesession_slot(struct swcr_data **swdp, u_int32_t sid);
70 
71 /*
72  * Apply a symmetric encryption/decryption algorithm.
73  */
74 static int
75 swcr_encdec(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf,
76     int flags)
77 {
78 	unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN], *idat;
79 	unsigned char *ivp, piv[EALG_MAX_BLOCK_LEN];
80 	u_int8_t *kschedule;
81 	u_int8_t *okschedule;
82 	struct enc_xform *exf;
83 	int i, k, j, blks, ivlen;
84 	int error;
85 	int explicit_kschedule;
86 
87 	exf = sw->sw_exf;
88 	blks = exf->blocksize;
89 	ivlen = exf->ivsize;
90 
91 	/* Check for non-padded data */
92 	if (crd->crd_len % blks)
93 		return EINVAL;
94 
95 	/* Initialize the IV */
96 	if (crd->crd_flags & CRD_F_ENCRYPT) {
97 		/* IV explicitly provided ? */
98 		if (crd->crd_flags & CRD_F_IV_EXPLICIT)
99 			bcopy(crd->crd_iv, iv, ivlen);
100 		else
101 			karc4rand(iv, ivlen);
102 
103 		/* Do we need to write the IV */
104 		if (!(crd->crd_flags & CRD_F_IV_PRESENT))
105 			crypto_copyback(flags, buf, crd->crd_inject, ivlen, iv);
106 
107 	} else {	/* Decryption */
108 			/* IV explicitly provided ? */
109 		if (crd->crd_flags & CRD_F_IV_EXPLICIT)
110 			bcopy(crd->crd_iv, iv, ivlen);
111 		else {
112 			/* Get IV off buf */
113 			crypto_copydata(flags, buf, crd->crd_inject, ivlen, iv);
114 		}
115 	}
116 
117 	ivp = iv;
118 
119 	/*
120 	 * The semantics are seriously broken because the session key
121 	 * storage was never designed for concurrent ops.
122 	 */
123 	if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
124 		kschedule = NULL;
125 		explicit_kschedule = 1;
126 		error = exf->setkey(&kschedule,
127 				    crd->crd_key, crd->crd_klen / 8);
128 		if (error)
129 			goto done;
130 	} else {
131 		spin_lock(&swcr_spin);
132 		kschedule = sw->sw_kschedule;
133 		++sw->sw_kschedule_refs;
134 		spin_unlock(&swcr_spin);
135 		explicit_kschedule = 0;
136 	}
137 
138 	/*
139 	 * xforms that provide a reinit method perform all IV
140 	 * handling themselves.
141 	 */
142 	if (exf->reinit)
143 		exf->reinit(kschedule, iv);
144 
145 	if (flags & CRYPTO_F_IMBUF) {
146 		struct mbuf *m = (struct mbuf *) buf;
147 
148 		/* Find beginning of data */
149 		m = m_getptr(m, crd->crd_skip, &k);
150 		if (m == NULL) {
151 			error = EINVAL;
152 			goto done;
153 		}
154 
155 		i = crd->crd_len;
156 
157 		while (i > 0) {
158 			/*
159 			 * If there's insufficient data at the end of
160 			 * an mbuf, we have to do some copying.
161 			 */
162 			if (m->m_len < k + blks && m->m_len != k) {
163 				m_copydata(m, k, blks, blk);
164 
165 				/* Actual encryption/decryption */
166 				if (exf->reinit) {
167 					if (crd->crd_flags & CRD_F_ENCRYPT) {
168 						exf->encrypt(kschedule,
169 						    blk, iv);
170 					} else {
171 						exf->decrypt(kschedule,
172 						    blk, iv);
173 					}
174 				} else if (crd->crd_flags & CRD_F_ENCRYPT) {
175 					/* XOR with previous block */
176 					for (j = 0; j < blks; j++)
177 						blk[j] ^= ivp[j];
178 
179 					exf->encrypt(kschedule, blk, iv);
180 
181 					/*
182 					 * Keep encrypted block for XOR'ing
183 					 * with next block
184 					 */
185 					bcopy(blk, iv, blks);
186 					ivp = iv;
187 				} else {	/* decrypt */
188 					/*
189 					 * Keep encrypted block for XOR'ing
190 					 * with next block
191 					 */
192 					if (ivp == iv)
193 						bcopy(blk, piv, blks);
194 					else
195 						bcopy(blk, iv, blks);
196 
197 					exf->decrypt(kschedule, blk, iv);
198 
199 					/* XOR with previous block */
200 					for (j = 0; j < blks; j++)
201 						blk[j] ^= ivp[j];
202 
203 					if (ivp == iv)
204 						bcopy(piv, iv, blks);
205 					else
206 						ivp = iv;
207 				}
208 
209 				/* Copy back decrypted block */
210 				m_copyback(m, k, blks, blk);
211 
212 				/* Advance pointer */
213 				m = m_getptr(m, k + blks, &k);
214 				if (m == NULL) {
215 					error = EINVAL;
216 					goto done;
217 				}
218 
219 				i -= blks;
220 
221 				/* Could be done... */
222 				if (i == 0)
223 					break;
224 			}
225 
226 			/* Skip possibly empty mbufs */
227 			if (k == m->m_len) {
228 				for (m = m->m_next; m && m->m_len == 0;
229 				    m = m->m_next)
230 					;
231 				k = 0;
232 			}
233 
234 			/* Sanity check */
235 			if (m == NULL) {
236 				error = EINVAL;
237 				goto done;
238 			}
239 
240 			/*
241 			 * Warning: idat may point to garbage here, but
242 			 * we only use it in the while() loop, only if
243 			 * there are indeed enough data.
244 			 */
245 			idat = mtod(m, unsigned char *) + k;
246 
247 	   		while (m->m_len >= k + blks && i > 0) {
248 				if (exf->reinit) {
249 					if (crd->crd_flags & CRD_F_ENCRYPT) {
250 						exf->encrypt(kschedule,
251 						    idat, iv);
252 					} else {
253 						exf->decrypt(kschedule,
254 						    idat, iv);
255 					}
256 				} else if (crd->crd_flags & CRD_F_ENCRYPT) {
257 					/* XOR with previous block/IV */
258 					for (j = 0; j < blks; j++)
259 						idat[j] ^= ivp[j];
260 
261 					exf->encrypt(kschedule, idat, iv);
262 					ivp = idat;
263 				} else {	/* decrypt */
264 					/*
265 					 * Keep encrypted block to be used
266 					 * in next block's processing.
267 					 */
268 					if (ivp == iv)
269 						bcopy(idat, piv, blks);
270 					else
271 						bcopy(idat, iv, blks);
272 
273 					exf->decrypt(kschedule, idat, iv);
274 
275 					/* XOR with previous block/IV */
276 					for (j = 0; j < blks; j++)
277 						idat[j] ^= ivp[j];
278 
279 					if (ivp == iv)
280 						bcopy(piv, iv, blks);
281 					else
282 						ivp = iv;
283 				}
284 
285 				idat += blks;
286 				k += blks;
287 				i -= blks;
288 			}
289 		}
290 		error = 0;	/* Done with mbuf encryption/decryption */
291 	} else if (flags & CRYPTO_F_IOV) {
292 		struct uio *uio = (struct uio *) buf;
293 		struct iovec *iov;
294 
295 		/* Find beginning of data */
296 		iov = cuio_getptr(uio, crd->crd_skip, &k);
297 		if (iov == NULL) {
298 			error = EINVAL;
299 			goto done;
300 		}
301 
302 		i = crd->crd_len;
303 
304 		while (i > 0) {
305 			/*
306 			 * If there's insufficient data at the end of
307 			 * an iovec, we have to do some copying.
308 			 */
309 			if (iov->iov_len < k + blks && iov->iov_len != k) {
310 				cuio_copydata(uio, k, blks, blk);
311 
312 				/* Actual encryption/decryption */
313 				if (exf->reinit) {
314 					if (crd->crd_flags & CRD_F_ENCRYPT) {
315 						exf->encrypt(kschedule,
316 						    blk, iv);
317 					} else {
318 						exf->decrypt(kschedule,
319 						    blk, iv);
320 					}
321 				} else if (crd->crd_flags & CRD_F_ENCRYPT) {
322 					/* XOR with previous block */
323 					for (j = 0; j < blks; j++)
324 						blk[j] ^= ivp[j];
325 
326 					exf->encrypt(kschedule, blk, iv);
327 
328 					/*
329 					 * Keep encrypted block for XOR'ing
330 					 * with next block
331 					 */
332 					bcopy(blk, iv, blks);
333 					ivp = iv;
334 				} else {	/* decrypt */
335 					/*
336 					 * Keep encrypted block for XOR'ing
337 					 * with next block
338 					 */
339 					if (ivp == iv)
340 						bcopy(blk, piv, blks);
341 					else
342 						bcopy(blk, iv, blks);
343 
344 					exf->decrypt(kschedule, blk, iv);
345 
346 					/* XOR with previous block */
347 					for (j = 0; j < blks; j++)
348 						blk[j] ^= ivp[j];
349 
350 					if (ivp == iv)
351 						bcopy(piv, iv, blks);
352 					else
353 						ivp = iv;
354 				}
355 
356 				/* Copy back decrypted block */
357 				cuio_copyback(uio, k, blks, blk);
358 
359 				/* Advance pointer */
360 				iov = cuio_getptr(uio, k + blks, &k);
361 				if (iov == NULL) {
362 					error = EINVAL;
363 					goto done;
364 				}
365 
366 				i -= blks;
367 
368 				/* Could be done... */
369 				if (i == 0)
370 					break;
371 			}
372 
373 			/*
374 			 * Warning: idat may point to garbage here, but
375 			 * we only use it in the while() loop, only if
376 			 * there are indeed enough data.
377 			 */
378 			idat = (char *)iov->iov_base + k;
379 
380 	   		while (iov->iov_len >= k + blks && i > 0) {
381 				if (exf->reinit) {
382 					if (crd->crd_flags & CRD_F_ENCRYPT) {
383 						exf->encrypt(kschedule,
384 						    idat, iv);
385 					} else {
386 						exf->decrypt(kschedule,
387 						    idat, iv);
388 					}
389 				} else if (crd->crd_flags & CRD_F_ENCRYPT) {
390 					/* XOR with previous block/IV */
391 					for (j = 0; j < blks; j++)
392 						idat[j] ^= ivp[j];
393 
394 					exf->encrypt(kschedule, idat, iv);
395 					ivp = idat;
396 				} else {	/* decrypt */
397 					/*
398 					 * Keep encrypted block to be used
399 					 * in next block's processing.
400 					 */
401 					if (ivp == iv)
402 						bcopy(idat, piv, blks);
403 					else
404 						bcopy(idat, iv, blks);
405 
406 					exf->decrypt(kschedule, idat, iv);
407 
408 					/* XOR with previous block/IV */
409 					for (j = 0; j < blks; j++)
410 						idat[j] ^= ivp[j];
411 
412 					if (ivp == iv)
413 						bcopy(piv, iv, blks);
414 					else
415 						ivp = iv;
416 				}
417 
418 				idat += blks;
419 				k += blks;
420 				i -= blks;
421 			}
422 			if (k == iov->iov_len) {
423 				iov++;
424 				k = 0;
425 			}
426 		}
427 		error = 0;	/* Done with iovec encryption/decryption */
428 	} else {
429 		/*
430 		 * contiguous buffer
431 		 */
432 		if (exf->reinit) {
433 			for(i = crd->crd_skip;
434 			    i < crd->crd_skip + crd->crd_len; i += blks) {
435 				if (crd->crd_flags & CRD_F_ENCRYPT) {
436 					exf->encrypt(kschedule, buf + i, iv);
437 				} else {
438 					exf->decrypt(kschedule, buf + i, iv);
439 				}
440 			}
441 		} else if (crd->crd_flags & CRD_F_ENCRYPT) {
442 			for (i = crd->crd_skip;
443 			    i < crd->crd_skip + crd->crd_len; i += blks) {
444 				/* XOR with the IV/previous block, as appropriate. */
445 				if (i == crd->crd_skip)
446 					for (k = 0; k < blks; k++)
447 						buf[i + k] ^= ivp[k];
448 				else
449 					for (k = 0; k < blks; k++)
450 						buf[i + k] ^= buf[i + k - blks];
451 				exf->encrypt(kschedule, buf + i, iv);
452 			}
453 		} else {		/* Decrypt */
454 			/*
455 			 * Start at the end, so we don't need to keep the
456 			 * encrypted block as the IV for the next block.
457 			 */
458 			for (i = crd->crd_skip + crd->crd_len - blks;
459 			    i >= crd->crd_skip; i -= blks) {
460 				exf->decrypt(kschedule, buf + i, iv);
461 
462 				/* XOR with the IV/previous block, as appropriate */
463 				if (i == crd->crd_skip)
464 					for (k = 0; k < blks; k++)
465 						buf[i + k] ^= ivp[k];
466 				else
467 					for (k = 0; k < blks; k++)
468 						buf[i + k] ^= buf[i + k - blks];
469 			}
470 		}
471 		error = 0; /* Done w/contiguous buffer encrypt/decrypt */
472 	}
473 done:
474 	/*
475 	 * Cleanup - explicitly replace the session key if requested
476 	 *	     (horrible semantics for concurrent operation)
477 	 */
478 	if (explicit_kschedule) {
479 		spin_lock(&swcr_spin);
480 		if (sw->sw_kschedule && sw->sw_kschedule_refs == 0) {
481 			okschedule = sw->sw_kschedule;
482 			sw->sw_kschedule = kschedule;
483 		} else {
484 			okschedule = NULL;
485 		}
486 		spin_unlock(&swcr_spin);
487 		if (okschedule)
488 			exf->zerokey(&okschedule);
489 	} else {
490 		spin_lock(&swcr_spin);
491 		--sw->sw_kschedule_refs;
492 		spin_unlock(&swcr_spin);
493 	}
494 	return error;
495 }
496 
497 static void
498 swcr_authprepare(struct auth_hash *axf, struct swcr_data *sw, u_char *key,
499     int klen)
500 {
501 	int k;
502 
503 	klen /= 8;
504 
505 	switch (axf->type) {
506 	case CRYPTO_MD5_HMAC:
507 	case CRYPTO_SHA1_HMAC:
508 	case CRYPTO_SHA2_256_HMAC:
509 	case CRYPTO_SHA2_384_HMAC:
510 	case CRYPTO_SHA2_512_HMAC:
511 	case CRYPTO_NULL_HMAC:
512 	case CRYPTO_RIPEMD160_HMAC:
513 		for (k = 0; k < klen; k++)
514 			key[k] ^= HMAC_IPAD_VAL;
515 
516 		axf->Init(sw->sw_ictx);
517 		axf->Update(sw->sw_ictx, key, klen);
518 		axf->Update(sw->sw_ictx, hmac_ipad_buffer, axf->blocksize - klen);
519 
520 		for (k = 0; k < klen; k++)
521 			key[k] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
522 
523 		axf->Init(sw->sw_octx);
524 		axf->Update(sw->sw_octx, key, klen);
525 		axf->Update(sw->sw_octx, hmac_opad_buffer, axf->blocksize - klen);
526 
527 		for (k = 0; k < klen; k++)
528 			key[k] ^= HMAC_OPAD_VAL;
529 		break;
530 	case CRYPTO_MD5_KPDK:
531 	case CRYPTO_SHA1_KPDK:
532 	{
533 		/* We need a buffer that can hold an md5 and a sha1 result. */
534 		u_char buf[SHA1_RESULTLEN];
535 
536 		sw->sw_klen = klen;
537 		bcopy(key, sw->sw_octx, klen);
538 		axf->Init(sw->sw_ictx);
539 		axf->Update(sw->sw_ictx, key, klen);
540 		axf->Final(buf, sw->sw_ictx);
541 		break;
542 	}
543 	default:
544 		kprintf("%s: CRD_F_KEY_EXPLICIT flag given, but algorithm %d "
545 		    "doesn't use keys.\n", __func__, axf->type);
546 	}
547 }
548 
549 /*
550  * Compute keyed-hash authenticator.
551  */
552 static int
553 swcr_authcompute(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf,
554     int flags)
555 {
556 	unsigned char aalg[HASH_MAX_LEN];
557 	struct auth_hash *axf;
558 	union authctx ctx;
559 	int err;
560 
561 	if (sw->sw_ictx == 0)
562 		return EINVAL;
563 
564 	axf = sw->sw_axf;
565 
566 	if (crd->crd_flags & CRD_F_KEY_EXPLICIT)
567 		swcr_authprepare(axf, sw, crd->crd_key, crd->crd_klen);
568 
569 	bcopy(sw->sw_ictx, &ctx, axf->ctxsize);
570 
571 	err = crypto_apply(flags, buf, crd->crd_skip, crd->crd_len,
572 	    (int (*)(void *, void *, unsigned int))axf->Update, (caddr_t)&ctx);
573 	if (err)
574 		return err;
575 
576 	switch (sw->sw_alg) {
577 	case CRYPTO_MD5_HMAC:
578 	case CRYPTO_SHA1_HMAC:
579 	case CRYPTO_SHA2_256_HMAC:
580 	case CRYPTO_SHA2_384_HMAC:
581 	case CRYPTO_SHA2_512_HMAC:
582 	case CRYPTO_RIPEMD160_HMAC:
583 		if (sw->sw_octx == NULL)
584 			return EINVAL;
585 
586 		axf->Final(aalg, &ctx);
587 		bcopy(sw->sw_octx, &ctx, axf->ctxsize);
588 		axf->Update(&ctx, aalg, axf->hashsize);
589 		axf->Final(aalg, &ctx);
590 		break;
591 
592 	case CRYPTO_MD5_KPDK:
593 	case CRYPTO_SHA1_KPDK:
594 		if (sw->sw_octx == NULL)
595 			return EINVAL;
596 
597 		axf->Update(&ctx, sw->sw_octx, sw->sw_klen);
598 		axf->Final(aalg, &ctx);
599 		break;
600 
601 	case CRYPTO_NULL_HMAC:
602 		axf->Final(aalg, &ctx);
603 		break;
604 	}
605 
606 	/* Inject the authentication data */
607 	crypto_copyback(flags, buf, crd->crd_inject,
608 	    sw->sw_mlen == 0 ? axf->hashsize : sw->sw_mlen, aalg);
609 	return 0;
610 }
611 
612 /*
613  * Apply a compression/decompression algorithm
614  */
615 static int
616 swcr_compdec(struct cryptodesc *crd, struct swcr_data *sw,
617 	     caddr_t buf, int flags)
618 {
619 	u_int8_t *data, *out;
620 	struct comp_algo *cxf;
621 	int adj;
622 	u_int32_t result;
623 
624 	cxf = sw->sw_cxf;
625 
626 	/*
627 	 * We must handle the whole buffer of data in one time
628 	 * then if there is not all the data in the mbuf, we must
629 	 * copy in a buffer.
630 	 */
631 	data = kmalloc(crd->crd_len, M_CRYPTO_DATA, M_INTWAIT);
632 	if (data == NULL)
633 		return (EINVAL);
634 	crypto_copydata(flags, buf, crd->crd_skip, crd->crd_len, data);
635 
636 	if (crd->crd_flags & CRD_F_COMP)
637 		result = cxf->compress(data, crd->crd_len, &out);
638 	else
639 		result = cxf->decompress(data, crd->crd_len, &out);
640 
641 	kfree(data, M_CRYPTO_DATA);
642 	if (result == 0)
643 		return EINVAL;
644 
645 	/* Copy back the (de)compressed data. m_copyback is
646 	 * extending the mbuf as necessary.
647 	 */
648 	sw->sw_size = result;
649 	/* Check the compressed size when doing compression */
650 	if (crd->crd_flags & CRD_F_COMP) {
651 		if (result >= crd->crd_len) {
652 			/* Compression was useless, we lost time */
653 			kfree(out, M_CRYPTO_DATA);
654 			return 0;
655 		}
656 	}
657 
658 	crypto_copyback(flags, buf, crd->crd_skip, result, out);
659 	if (result < crd->crd_len) {
660 		adj = result - crd->crd_len;
661 		if (flags & CRYPTO_F_IMBUF) {
662 			adj = result - crd->crd_len;
663 			m_adj((struct mbuf *)buf, adj);
664 		} else if (flags & CRYPTO_F_IOV) {
665 			struct uio *uio = (struct uio *)buf;
666 			int ind;
667 
668 			adj = crd->crd_len - result;
669 			ind = uio->uio_iovcnt - 1;
670 
671 			while (adj > 0 && ind >= 0) {
672 				if (adj < uio->uio_iov[ind].iov_len) {
673 					uio->uio_iov[ind].iov_len -= adj;
674 					break;
675 				}
676 
677 				adj -= uio->uio_iov[ind].iov_len;
678 				uio->uio_iov[ind].iov_len = 0;
679 				ind--;
680 				uio->uio_iovcnt--;
681 			}
682 		}
683 	}
684 	kfree(out, M_CRYPTO_DATA);
685 	return 0;
686 }
687 
688 /*
689  * Generate a new software session.
690  */
691 static int
692 swcr_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri)
693 {
694 	struct swcr_data *swd_base;
695 	struct swcr_data **swd;
696 	struct swcr_data **oswd;
697 	struct auth_hash *axf;
698 	struct enc_xform *txf;
699 	struct comp_algo *cxf;
700 	u_int32_t i;
701 	u_int32_t n;
702 	int error;
703 
704 	if (sid == NULL || cri == NULL)
705 		return EINVAL;
706 
707 	swd_base = NULL;
708 	swd = &swd_base;
709 
710 	while (cri) {
711 		*swd = kmalloc(sizeof(struct swcr_data),
712 			       M_CRYPTO_DATA, M_WAITOK | M_ZERO);
713 
714 		switch (cri->cri_alg) {
715 		case CRYPTO_DES_CBC:
716 			txf = &enc_xform_des;
717 			goto enccommon;
718 		case CRYPTO_3DES_CBC:
719 			txf = &enc_xform_3des;
720 			goto enccommon;
721 		case CRYPTO_BLF_CBC:
722 			txf = &enc_xform_blf;
723 			goto enccommon;
724 		case CRYPTO_CAST_CBC:
725 			txf = &enc_xform_cast5;
726 			goto enccommon;
727 		case CRYPTO_SKIPJACK_CBC:
728 			txf = &enc_xform_skipjack;
729 			goto enccommon;
730 		case CRYPTO_RIJNDAEL128_CBC:
731 			txf = &enc_xform_rijndael128;
732 			goto enccommon;
733 		case CRYPTO_AES_XTS:
734 			txf = &enc_xform_aes_xts;
735 			goto enccommon;
736 		case CRYPTO_AES_CTR:
737 			txf = &enc_xform_aes_ctr;
738 			goto enccommon;
739 		case CRYPTO_CAMELLIA_CBC:
740 			txf = &enc_xform_camellia;
741 			goto enccommon;
742 		case CRYPTO_NULL_CBC:
743 			txf = &enc_xform_null;
744 			goto enccommon;
745 		enccommon:
746 			if (cri->cri_key != NULL) {
747 				error = txf->setkey(&((*swd)->sw_kschedule),
748 						    cri->cri_key,
749 						    cri->cri_klen / 8);
750 				if (error) {
751 					swcr_freesession_slot(&swd_base, 0);
752 					return error;
753 				}
754 			}
755 			(*swd)->sw_exf = txf;
756 			break;
757 
758 		case CRYPTO_MD5_HMAC:
759 			axf = &auth_hash_hmac_md5;
760 			goto authcommon;
761 		case CRYPTO_SHA1_HMAC:
762 			axf = &auth_hash_hmac_sha1;
763 			goto authcommon;
764 		case CRYPTO_SHA2_256_HMAC:
765 			axf = &auth_hash_hmac_sha2_256;
766 			goto authcommon;
767 		case CRYPTO_SHA2_384_HMAC:
768 			axf = &auth_hash_hmac_sha2_384;
769 			goto authcommon;
770 		case CRYPTO_SHA2_512_HMAC:
771 			axf = &auth_hash_hmac_sha2_512;
772 			goto authcommon;
773 		case CRYPTO_NULL_HMAC:
774 			axf = &auth_hash_null;
775 			goto authcommon;
776 		case CRYPTO_RIPEMD160_HMAC:
777 			axf = &auth_hash_hmac_ripemd_160;
778 		authcommon:
779 			(*swd)->sw_ictx = kmalloc(axf->ctxsize, M_CRYPTO_DATA,
780 						  M_WAITOK);
781 			if ((*swd)->sw_ictx == NULL) {
782 				swcr_freesession_slot(&swd_base, 0);
783 				return ENOBUFS;
784 			}
785 
786 			(*swd)->sw_octx = kmalloc(axf->ctxsize, M_CRYPTO_DATA,
787 						  M_WAITOK);
788 			if ((*swd)->sw_octx == NULL) {
789 				swcr_freesession_slot(&swd_base, 0);
790 				return ENOBUFS;
791 			}
792 
793 			if (cri->cri_key != NULL) {
794 				swcr_authprepare(axf, *swd, cri->cri_key,
795 				    cri->cri_klen);
796 			}
797 
798 			(*swd)->sw_mlen = cri->cri_mlen;
799 			(*swd)->sw_axf = axf;
800 			break;
801 
802 		case CRYPTO_MD5_KPDK:
803 			axf = &auth_hash_key_md5;
804 			goto auth2common;
805 
806 		case CRYPTO_SHA1_KPDK:
807 			axf = &auth_hash_key_sha1;
808 		auth2common:
809 			(*swd)->sw_ictx = kmalloc(axf->ctxsize, M_CRYPTO_DATA,
810 						  M_WAITOK);
811 			if ((*swd)->sw_ictx == NULL) {
812 				swcr_freesession_slot(&swd_base, 0);
813 				return ENOBUFS;
814 			}
815 
816 			(*swd)->sw_octx = kmalloc(cri->cri_klen / 8,
817 						  M_CRYPTO_DATA, M_WAITOK);
818 			if ((*swd)->sw_octx == NULL) {
819 				swcr_freesession_slot(&swd_base, 0);
820 				return ENOBUFS;
821 			}
822 
823 			/* Store the key so we can "append" it to the payload */
824 			if (cri->cri_key != NULL) {
825 				swcr_authprepare(axf, *swd, cri->cri_key,
826 				    cri->cri_klen);
827 			}
828 
829 			(*swd)->sw_mlen = cri->cri_mlen;
830 			(*swd)->sw_axf = axf;
831 			break;
832 #ifdef notdef
833 		case CRYPTO_MD5:
834 			axf = &auth_hash_md5;
835 			goto auth3common;
836 
837 		case CRYPTO_SHA1:
838 			axf = &auth_hash_sha1;
839 		auth3common:
840 			(*swd)->sw_ictx = kmalloc(axf->ctxsize, M_CRYPTO_DATA,
841 						  M_WAITOK);
842 			if ((*swd)->sw_ictx == NULL) {
843 				swcr_freesession_slot(&swd_base, 0);
844 				return ENOBUFS;
845 			}
846 
847 			axf->Init((*swd)->sw_ictx);
848 			(*swd)->sw_mlen = cri->cri_mlen;
849 			(*swd)->sw_axf = axf;
850 			break;
851 #endif
852 		case CRYPTO_DEFLATE_COMP:
853 			cxf = &comp_algo_deflate;
854 			(*swd)->sw_cxf = cxf;
855 			break;
856 		default:
857 			swcr_freesession_slot(&swd_base, 0);
858 			return EINVAL;
859 		}
860 
861 		(*swd)->sw_alg = cri->cri_alg;
862 		cri = cri->cri_next;
863 		swd = &((*swd)->sw_next);
864 	}
865 
866 	for (;;) {
867 		/*
868 		 * Atomically allocate a session
869 		 */
870 		spin_lock(&swcr_spin);
871 		for (i = swcr_minsesnum; i < swcr_sesnum; ++i) {
872 			if (swcr_sessions[i] == NULL)
873 				break;
874 		}
875 		if (i < swcr_sesnum) {
876 			swcr_sessions[i] = swd_base;
877 			swcr_minsesnum = i + 1;
878 			spin_unlock(&swcr_spin);
879 			break;
880 		}
881 		n = swcr_sesnum;
882 		spin_unlock(&swcr_spin);
883 
884 		/*
885 		 * A larger allocation is required, reallocate the array
886 		 * and replace, checking for SMP races.
887 		 */
888 		if (n < CRYPTO_SW_SESSIONS)
889 			n = CRYPTO_SW_SESSIONS;
890 		else
891 			n = n * 3 / 2;
892 		swd = kmalloc(n * sizeof(struct swcr_data *),
893 			      M_CRYPTO_DATA, M_WAITOK | M_ZERO);
894 
895 		spin_lock(&swcr_spin);
896 		if (swcr_sesnum >= n) {
897 			spin_unlock(&swcr_spin);
898 			kfree(swd, M_CRYPTO_DATA);
899 		} else if (swcr_sesnum) {
900 			bcopy(swcr_sessions, swd,
901 			      swcr_sesnum * sizeof(struct swcr_data *));
902 			oswd = swcr_sessions;
903 			swcr_sessions = swd;
904 			swcr_sesnum = n;
905 			spin_unlock(&swcr_spin);
906 			kfree(oswd, M_CRYPTO_DATA);
907 		} else {
908 			swcr_sessions = swd;
909 			swcr_sesnum = n;
910 			spin_unlock(&swcr_spin);
911 		}
912 	}
913 
914 	*sid = i;
915 	return 0;
916 }
917 
918 /*
919  * Free a session.
920  */
921 static int
922 swcr_freesession(device_t dev, u_int64_t tid)
923 {
924 	u_int32_t sid = CRYPTO_SESID2LID(tid);
925 
926 	if (sid > swcr_sesnum || swcr_sessions == NULL ||
927 	    swcr_sessions[sid] == NULL) {
928 		return EINVAL;
929 	}
930 
931 	/* Silently accept and return */
932 	if (sid == 0)
933 		return 0;
934 
935 	return(swcr_freesession_slot(&swcr_sessions[sid], sid));
936 }
937 
938 static
939 int
940 swcr_freesession_slot(struct swcr_data **swdp, u_int32_t sid)
941 {
942 	struct enc_xform *txf;
943 	struct auth_hash *axf;
944 	struct comp_algo *cxf;
945 	struct swcr_data *swd;
946 	struct swcr_data *swnext;
947 
948 	/*
949 	 * Protect session detachment with the spinlock.
950 	 */
951 	spin_lock(&swcr_spin);
952 	swnext = *swdp;
953 	*swdp = NULL;
954 	if (sid && swcr_minsesnum > sid)
955 		swcr_minsesnum = sid;
956 	spin_unlock(&swcr_spin);
957 
958 	/*
959 	 * Clean up at our leisure.
960 	 */
961 	while ((swd = swnext) != NULL) {
962 		swnext = swd->sw_next;
963 
964 		swd->sw_next = NULL;
965 
966 		switch (swd->sw_alg) {
967 		case CRYPTO_DES_CBC:
968 		case CRYPTO_3DES_CBC:
969 		case CRYPTO_BLF_CBC:
970 		case CRYPTO_CAST_CBC:
971 		case CRYPTO_SKIPJACK_CBC:
972 		case CRYPTO_RIJNDAEL128_CBC:
973 		case CRYPTO_AES_XTS:
974 		case CRYPTO_AES_CTR:
975 		case CRYPTO_CAMELLIA_CBC:
976 		case CRYPTO_NULL_CBC:
977 			txf = swd->sw_exf;
978 
979 			if (swd->sw_kschedule)
980 				txf->zerokey(&(swd->sw_kschedule));
981 			break;
982 
983 		case CRYPTO_MD5_HMAC:
984 		case CRYPTO_SHA1_HMAC:
985 		case CRYPTO_SHA2_256_HMAC:
986 		case CRYPTO_SHA2_384_HMAC:
987 		case CRYPTO_SHA2_512_HMAC:
988 		case CRYPTO_RIPEMD160_HMAC:
989 		case CRYPTO_NULL_HMAC:
990 			axf = swd->sw_axf;
991 
992 			if (swd->sw_ictx) {
993 				bzero(swd->sw_ictx, axf->ctxsize);
994 				kfree(swd->sw_ictx, M_CRYPTO_DATA);
995 			}
996 			if (swd->sw_octx) {
997 				bzero(swd->sw_octx, axf->ctxsize);
998 				kfree(swd->sw_octx, M_CRYPTO_DATA);
999 			}
1000 			break;
1001 
1002 		case CRYPTO_MD5_KPDK:
1003 		case CRYPTO_SHA1_KPDK:
1004 			axf = swd->sw_axf;
1005 
1006 			if (swd->sw_ictx) {
1007 				bzero(swd->sw_ictx, axf->ctxsize);
1008 				kfree(swd->sw_ictx, M_CRYPTO_DATA);
1009 			}
1010 			if (swd->sw_octx) {
1011 				bzero(swd->sw_octx, swd->sw_klen);
1012 				kfree(swd->sw_octx, M_CRYPTO_DATA);
1013 			}
1014 			break;
1015 
1016 		case CRYPTO_MD5:
1017 		case CRYPTO_SHA1:
1018 			axf = swd->sw_axf;
1019 
1020 			if (swd->sw_ictx)
1021 				kfree(swd->sw_ictx, M_CRYPTO_DATA);
1022 			break;
1023 
1024 		case CRYPTO_DEFLATE_COMP:
1025 			cxf = swd->sw_cxf;
1026 			break;
1027 		}
1028 
1029 		//FREE(swd, M_CRYPTO_DATA);
1030 		kfree(swd, M_CRYPTO_DATA);
1031 	}
1032 	return 0;
1033 }
1034 
1035 /*
1036  * Process a software request.
1037  */
1038 static int
1039 swcr_process(device_t dev, struct cryptop *crp, int hint)
1040 {
1041 	struct cryptodesc *crd;
1042 	struct swcr_data *sw;
1043 	u_int32_t lid;
1044 
1045 	/* Sanity check */
1046 	if (crp == NULL)
1047 		return EINVAL;
1048 
1049 	if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
1050 		crp->crp_etype = EINVAL;
1051 		goto done;
1052 	}
1053 
1054 	lid = crp->crp_sid & 0xffffffff;
1055 	if (lid >= swcr_sesnum || lid == 0 || swcr_sessions[lid] == NULL) {
1056 		crp->crp_etype = ENOENT;
1057 		goto done;
1058 	}
1059 
1060 	/* Go through crypto descriptors, processing as we go */
1061 	for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
1062 		/*
1063 		 * Find the crypto context.
1064 		 *
1065 		 * XXX Note that the logic here prevents us from having
1066 		 * XXX the same algorithm multiple times in a session
1067 		 * XXX (or rather, we can but it won't give us the right
1068 		 * XXX results). To do that, we'd need some way of differentiating
1069 		 * XXX between the various instances of an algorithm (so we can
1070 		 * XXX locate the correct crypto context).
1071 		 */
1072 		for (sw = swcr_sessions[lid];
1073 		    sw && sw->sw_alg != crd->crd_alg;
1074 		    sw = sw->sw_next)
1075 			;
1076 
1077 		/* No such context ? */
1078 		if (sw == NULL) {
1079 			crp->crp_etype = EINVAL;
1080 			goto done;
1081 		}
1082 		switch (sw->sw_alg) {
1083 		case CRYPTO_DES_CBC:
1084 		case CRYPTO_3DES_CBC:
1085 		case CRYPTO_BLF_CBC:
1086 		case CRYPTO_CAST_CBC:
1087 		case CRYPTO_SKIPJACK_CBC:
1088 		case CRYPTO_RIJNDAEL128_CBC:
1089 		case CRYPTO_AES_XTS:
1090 		case CRYPTO_AES_CTR:
1091 		case CRYPTO_CAMELLIA_CBC:
1092 			if ((crp->crp_etype = swcr_encdec(crd, sw,
1093 			    crp->crp_buf, crp->crp_flags)) != 0)
1094 				goto done;
1095 			break;
1096 		case CRYPTO_NULL_CBC:
1097 			crp->crp_etype = 0;
1098 			break;
1099 		case CRYPTO_MD5_HMAC:
1100 		case CRYPTO_SHA1_HMAC:
1101 		case CRYPTO_SHA2_256_HMAC:
1102 		case CRYPTO_SHA2_384_HMAC:
1103 		case CRYPTO_SHA2_512_HMAC:
1104 		case CRYPTO_RIPEMD160_HMAC:
1105 		case CRYPTO_NULL_HMAC:
1106 		case CRYPTO_MD5_KPDK:
1107 		case CRYPTO_SHA1_KPDK:
1108 		case CRYPTO_MD5:
1109 		case CRYPTO_SHA1:
1110 			if ((crp->crp_etype = swcr_authcompute(crd, sw,
1111 			    crp->crp_buf, crp->crp_flags)) != 0)
1112 				goto done;
1113 			break;
1114 
1115 		case CRYPTO_DEFLATE_COMP:
1116 			if ((crp->crp_etype = swcr_compdec(crd, sw,
1117 			    crp->crp_buf, crp->crp_flags)) != 0)
1118 				goto done;
1119 			else
1120 				crp->crp_olen = (int)sw->sw_size;
1121 			break;
1122 
1123 		default:
1124 			/* Unknown/unsupported algorithm */
1125 			crp->crp_etype = EINVAL;
1126 			goto done;
1127 		}
1128 	}
1129 
1130 done:
1131 	crypto_done(crp);
1132 	lwkt_yield();
1133 	return 0;
1134 }
1135 
1136 static void
1137 swcr_identify(driver_t *drv, device_t parent)
1138 {
1139 	/* NB: order 10 is so we get attached after h/w devices */
1140 	/* XXX: wouldn't bet about this BUS_ADD_CHILD correctness */
1141 	if (device_find_child(parent, "cryptosoft", -1) == NULL &&
1142 	    BUS_ADD_CHILD(parent, parent, 10, "cryptosoft", -1) == 0)
1143 		panic("cryptosoft: could not attach");
1144 }
1145 
1146 static int
1147 swcr_probe(device_t dev)
1148 {
1149 	device_set_desc(dev, "software crypto");
1150 	return (0);
1151 }
1152 
1153 static int
1154 swcr_attach(device_t dev)
1155 {
1156 	memset(hmac_ipad_buffer, HMAC_IPAD_VAL, HMAC_MAX_BLOCK_LEN);
1157 	memset(hmac_opad_buffer, HMAC_OPAD_VAL, HMAC_MAX_BLOCK_LEN);
1158 
1159 	swcr_id = crypto_get_driverid(dev, CRYPTOCAP_F_SOFTWARE |
1160 					   CRYPTOCAP_F_SYNC |
1161 					   CRYPTOCAP_F_SMP);
1162 	if (swcr_id < 0) {
1163 		device_printf(dev, "cannot initialize!");
1164 		return ENOMEM;
1165 	}
1166 #define	REGISTER(alg) \
1167 	crypto_register(swcr_id, alg, 0,0)
1168 	REGISTER(CRYPTO_DES_CBC);
1169 	REGISTER(CRYPTO_3DES_CBC);
1170 	REGISTER(CRYPTO_BLF_CBC);
1171 	REGISTER(CRYPTO_CAST_CBC);
1172 	REGISTER(CRYPTO_SKIPJACK_CBC);
1173 	REGISTER(CRYPTO_NULL_CBC);
1174 	REGISTER(CRYPTO_MD5_HMAC);
1175 	REGISTER(CRYPTO_SHA1_HMAC);
1176 	REGISTER(CRYPTO_SHA2_256_HMAC);
1177 	REGISTER(CRYPTO_SHA2_384_HMAC);
1178 	REGISTER(CRYPTO_SHA2_512_HMAC);
1179 	REGISTER(CRYPTO_RIPEMD160_HMAC);
1180 	REGISTER(CRYPTO_NULL_HMAC);
1181 	REGISTER(CRYPTO_MD5_KPDK);
1182 	REGISTER(CRYPTO_SHA1_KPDK);
1183 	REGISTER(CRYPTO_MD5);
1184 	REGISTER(CRYPTO_SHA1);
1185 	REGISTER(CRYPTO_RIJNDAEL128_CBC);
1186 	REGISTER(CRYPTO_AES_XTS);
1187 	REGISTER(CRYPTO_AES_CTR);
1188 	REGISTER(CRYPTO_CAMELLIA_CBC);
1189 	REGISTER(CRYPTO_DEFLATE_COMP);
1190 #undef REGISTER
1191 
1192 	return 0;
1193 }
1194 
1195 static int
1196 swcr_detach(device_t dev)
1197 {
1198 	crypto_unregister_all(swcr_id);
1199 	if (swcr_sessions != NULL)
1200 		kfree(swcr_sessions, M_CRYPTO_DATA);
1201 	return 0;
1202 }
1203 
1204 static device_method_t swcr_methods[] = {
1205 	DEVMETHOD(device_identify,	swcr_identify),
1206 	DEVMETHOD(device_probe,		swcr_probe),
1207 	DEVMETHOD(device_attach,	swcr_attach),
1208 	DEVMETHOD(device_detach,	swcr_detach),
1209 
1210 	DEVMETHOD(cryptodev_newsession,	swcr_newsession),
1211 	DEVMETHOD(cryptodev_freesession,swcr_freesession),
1212 	DEVMETHOD(cryptodev_process,	swcr_process),
1213 
1214 	{0, 0},
1215 };
1216 
1217 static driver_t swcr_driver = {
1218 	"cryptosoft",
1219 	swcr_methods,
1220 	0,		/* NB: no softc */
1221 };
1222 static devclass_t swcr_devclass;
1223 
1224 /*
1225  * NB: We explicitly reference the crypto module so we
1226  * get the necessary ordering when built as a loadable
1227  * module.  This is required because we bundle the crypto
1228  * module code together with the cryptosoft driver (otherwise
1229  * normal module dependencies would handle things).
1230  */
1231 extern int crypto_modevent(struct module *, int, void *);
1232 /* XXX where to attach */
1233 DRIVER_MODULE(cryptosoft, nexus, swcr_driver, swcr_devclass, crypto_modevent,0);
1234 MODULE_VERSION(cryptosoft, 1);
1235 MODULE_DEPEND(cryptosoft, crypto, 1, 1, 1);
1236