xref: /dragonfly/sys/opencrypto/cryptosoft.c (revision dcd37f7d)
1 /*	$FreeBSD: src/sys/opencrypto/cryptosoft.c,v 1.23 2009/02/05 17:43:12 imp Exp $	*/
2 /*	$OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $	*/
3 
4 /*-
5  * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
6  * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting
7  *
8  * This code was written by Angelos D. Keromytis in Athens, Greece, in
9  * February 2000. Network Security Technologies Inc. (NSTI) kindly
10  * supported the development of this code.
11  *
12  * Copyright (c) 2000, 2001 Angelos D. Keromytis
13  *
14  * Permission to use, copy, and modify this software with or without fee
15  * is hereby granted, provided that this entire notice is included in
16  * all source code copies of any software which is or includes a copy or
17  * modification of this software.
18  *
19  * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
20  * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
21  * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
22  * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
23  * PURPOSE.
24  */
25 
26 #include <sys/param.h>
27 #include <sys/systm.h>
28 #include <sys/malloc.h>
29 #include <sys/mbuf.h>
30 #include <sys/module.h>
31 #include <sys/sysctl.h>
32 #include <sys/errno.h>
33 #include <sys/random.h>
34 #include <sys/kernel.h>
35 #include <sys/uio.h>
36 
37 #include <crypto/blowfish/blowfish.h>
38 #include <crypto/sha1.h>
39 #include <opencrypto/rmd160.h>
40 #include <opencrypto/cast.h>
41 #include <opencrypto/skipjack.h>
42 #include <sys/md5.h>
43 
44 #include <opencrypto/cryptodev.h>
45 #include <opencrypto/cryptosoft.h>
46 #include <opencrypto/xform.h>
47 
48 #include <sys/kobj.h>
49 #include <sys/bus.h>
50 #include "cryptodev_if.h"
51 
52 static	int32_t swcr_id;
53 static	struct swcr_data **swcr_sessions = NULL;
54 static	u_int32_t swcr_sesnum;
55 
56 u_int8_t hmac_ipad_buffer[HMAC_MAX_BLOCK_LEN];
57 u_int8_t hmac_opad_buffer[HMAC_MAX_BLOCK_LEN];
58 
59 static	int swcr_encdec(struct cryptodesc *, struct swcr_data *, caddr_t, int);
60 static	int swcr_authcompute(struct cryptodesc *, struct swcr_data *, caddr_t, int);
61 static	int swcr_compdec(struct cryptodesc *, struct swcr_data *, caddr_t, int);
62 static	int swcr_freesession(device_t dev, u_int64_t tid);
63 
64 /*
65  * Apply a symmetric encryption/decryption algorithm.
66  */
67 static int
68 swcr_encdec(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf,
69     int flags)
70 {
71 	unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN], *idat;
72 	unsigned char *ivp, piv[EALG_MAX_BLOCK_LEN];
73 	struct enc_xform *exf;
74 	int i, k, j, blks;
75 
76 	exf = sw->sw_exf;
77 	blks = exf->blocksize;
78 
79 	/* Check for non-padded data */
80 	if (crd->crd_len % blks)
81 		return EINVAL;
82 
83 	/* Initialize the IV */
84 	if (crd->crd_flags & CRD_F_ENCRYPT) {
85 		/* IV explicitly provided ? */
86 		if (crd->crd_flags & CRD_F_IV_EXPLICIT)
87 			bcopy(crd->crd_iv, iv, blks);
88 		else
89 			karc4rand(iv, blks);
90 
91 		/* Do we need to write the IV */
92 		if (!(crd->crd_flags & CRD_F_IV_PRESENT))
93 			crypto_copyback(flags, buf, crd->crd_inject, blks, iv);
94 
95 	} else {	/* Decryption */
96 			/* IV explicitly provided ? */
97 		if (crd->crd_flags & CRD_F_IV_EXPLICIT)
98 			bcopy(crd->crd_iv, iv, blks);
99 		else {
100 			/* Get IV off buf */
101 			crypto_copydata(flags, buf, crd->crd_inject, blks, iv);
102 		}
103 	}
104 
105 	if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
106 		int error;
107 
108 		if (sw->sw_kschedule)
109 			exf->zerokey(&(sw->sw_kschedule));
110 		error = exf->setkey(&sw->sw_kschedule,
111 				crd->crd_key, crd->crd_klen / 8);
112 		if (error)
113 			return (error);
114 	}
115 	ivp = iv;
116 
117 	if (flags & CRYPTO_F_IMBUF) {
118 		struct mbuf *m = (struct mbuf *) buf;
119 
120 		/* Find beginning of data */
121 		m = m_getptr(m, crd->crd_skip, &k);
122 		if (m == NULL)
123 			return EINVAL;
124 
125 		i = crd->crd_len;
126 
127 		while (i > 0) {
128 			/*
129 			 * If there's insufficient data at the end of
130 			 * an mbuf, we have to do some copying.
131 			 */
132 			if (m->m_len < k + blks && m->m_len != k) {
133 				m_copydata(m, k, blks, blk);
134 
135 				/* Actual encryption/decryption */
136 				if (crd->crd_flags & CRD_F_ENCRYPT) {
137 					/* XOR with previous block */
138 					for (j = 0; j < blks; j++)
139 						blk[j] ^= ivp[j];
140 
141 					exf->encrypt(sw->sw_kschedule, blk);
142 
143 					/*
144 					 * Keep encrypted block for XOR'ing
145 					 * with next block
146 					 */
147 					bcopy(blk, iv, blks);
148 					ivp = iv;
149 				} else {	/* decrypt */
150 					/*
151 					 * Keep encrypted block for XOR'ing
152 					 * with next block
153 					 */
154 					if (ivp == iv)
155 						bcopy(blk, piv, blks);
156 					else
157 						bcopy(blk, iv, blks);
158 
159 					exf->decrypt(sw->sw_kschedule, blk);
160 
161 					/* XOR with previous block */
162 					for (j = 0; j < blks; j++)
163 						blk[j] ^= ivp[j];
164 
165 					if (ivp == iv)
166 						bcopy(piv, iv, blks);
167 					else
168 						ivp = iv;
169 				}
170 
171 				/* Copy back decrypted block */
172 				m_copyback(m, k, blks, blk);
173 
174 				/* Advance pointer */
175 				m = m_getptr(m, k + blks, &k);
176 				if (m == NULL)
177 					return EINVAL;
178 
179 				i -= blks;
180 
181 				/* Could be done... */
182 				if (i == 0)
183 					break;
184 			}
185 
186 			/* Skip possibly empty mbufs */
187 			if (k == m->m_len) {
188 				for (m = m->m_next; m && m->m_len == 0;
189 				    m = m->m_next)
190 					;
191 				k = 0;
192 			}
193 
194 			/* Sanity check */
195 			if (m == NULL)
196 				return EINVAL;
197 
198 			/*
199 			 * Warning: idat may point to garbage here, but
200 			 * we only use it in the while() loop, only if
201 			 * there are indeed enough data.
202 			 */
203 			idat = mtod(m, unsigned char *) + k;
204 
205 	   		while (m->m_len >= k + blks && i > 0) {
206 				if (crd->crd_flags & CRD_F_ENCRYPT) {
207 					/* XOR with previous block/IV */
208 					for (j = 0; j < blks; j++)
209 						idat[j] ^= ivp[j];
210 
211 					exf->encrypt(sw->sw_kschedule, idat);
212 					ivp = idat;
213 				} else {	/* decrypt */
214 					/*
215 					 * Keep encrypted block to be used
216 					 * in next block's processing.
217 					 */
218 					if (ivp == iv)
219 						bcopy(idat, piv, blks);
220 					else
221 						bcopy(idat, iv, blks);
222 
223 					exf->decrypt(sw->sw_kschedule, idat);
224 
225 					/* XOR with previous block/IV */
226 					for (j = 0; j < blks; j++)
227 						idat[j] ^= ivp[j];
228 
229 					if (ivp == iv)
230 						bcopy(piv, iv, blks);
231 					else
232 						ivp = iv;
233 				}
234 
235 				idat += blks;
236 				k += blks;
237 				i -= blks;
238 			}
239 		}
240 
241 		return 0; /* Done with mbuf encryption/decryption */
242 	} else if (flags & CRYPTO_F_IOV) {
243 		struct uio *uio = (struct uio *) buf;
244 		struct iovec *iov;
245 
246 		/* Find beginning of data */
247 		iov = cuio_getptr(uio, crd->crd_skip, &k);
248 		if (iov == NULL)
249 			return EINVAL;
250 
251 		i = crd->crd_len;
252 
253 		while (i > 0) {
254 			/*
255 			 * If there's insufficient data at the end of
256 			 * an iovec, we have to do some copying.
257 			 */
258 			if (iov->iov_len < k + blks && iov->iov_len != k) {
259 				cuio_copydata(uio, k, blks, blk);
260 
261 				/* Actual encryption/decryption */
262 				if (crd->crd_flags & CRD_F_ENCRYPT) {
263 					/* XOR with previous block */
264 					for (j = 0; j < blks; j++)
265 						blk[j] ^= ivp[j];
266 
267 					exf->encrypt(sw->sw_kschedule, blk);
268 
269 					/*
270 					 * Keep encrypted block for XOR'ing
271 					 * with next block
272 					 */
273 					bcopy(blk, iv, blks);
274 					ivp = iv;
275 				} else {	/* decrypt */
276 					/*
277 					 * Keep encrypted block for XOR'ing
278 					 * with next block
279 					 */
280 					if (ivp == iv)
281 						bcopy(blk, piv, blks);
282 					else
283 						bcopy(blk, iv, blks);
284 
285 					exf->decrypt(sw->sw_kschedule, blk);
286 
287 					/* XOR with previous block */
288 					for (j = 0; j < blks; j++)
289 						blk[j] ^= ivp[j];
290 
291 					if (ivp == iv)
292 						bcopy(piv, iv, blks);
293 					else
294 						ivp = iv;
295 				}
296 
297 				/* Copy back decrypted block */
298 				cuio_copyback(uio, k, blks, blk);
299 
300 				/* Advance pointer */
301 				iov = cuio_getptr(uio, k + blks, &k);
302 				if (iov == NULL)
303 					return EINVAL;
304 
305 				i -= blks;
306 
307 				/* Could be done... */
308 				if (i == 0)
309 					break;
310 			}
311 
312 			/*
313 			 * Warning: idat may point to garbage here, but
314 			 * we only use it in the while() loop, only if
315 			 * there are indeed enough data.
316 			 */
317 			idat = (char *)iov->iov_base + k;
318 
319 	   		while (iov->iov_len >= k + blks && i > 0) {
320 				if (crd->crd_flags & CRD_F_ENCRYPT) {
321 					/* XOR with previous block/IV */
322 					for (j = 0; j < blks; j++)
323 						idat[j] ^= ivp[j];
324 
325 					exf->encrypt(sw->sw_kschedule, idat);
326 					ivp = idat;
327 				} else {	/* decrypt */
328 					/*
329 					 * Keep encrypted block to be used
330 					 * in next block's processing.
331 					 */
332 					if (ivp == iv)
333 						bcopy(idat, piv, blks);
334 					else
335 						bcopy(idat, iv, blks);
336 
337 					exf->decrypt(sw->sw_kschedule, idat);
338 
339 					/* XOR with previous block/IV */
340 					for (j = 0; j < blks; j++)
341 						idat[j] ^= ivp[j];
342 
343 					if (ivp == iv)
344 						bcopy(piv, iv, blks);
345 					else
346 						ivp = iv;
347 				}
348 
349 				idat += blks;
350 				k += blks;
351 				i -= blks;
352 			}
353 			if (k == iov->iov_len) {
354 				iov++;
355 				k = 0;
356 			}
357 		}
358 
359 		return 0; /* Done with iovec encryption/decryption */
360 	} else {	/* contiguous buffer */
361 		if (crd->crd_flags & CRD_F_ENCRYPT) {
362 			for (i = crd->crd_skip;
363 			    i < crd->crd_skip + crd->crd_len; i += blks) {
364 				/* XOR with the IV/previous block, as appropriate. */
365 				if (i == crd->crd_skip)
366 					for (k = 0; k < blks; k++)
367 						buf[i + k] ^= ivp[k];
368 				else
369 					for (k = 0; k < blks; k++)
370 						buf[i + k] ^= buf[i + k - blks];
371 				exf->encrypt(sw->sw_kschedule, buf + i);
372 			}
373 		} else {		/* Decrypt */
374 			/*
375 			 * Start at the end, so we don't need to keep the encrypted
376 			 * block as the IV for the next block.
377 			 */
378 			for (i = crd->crd_skip + crd->crd_len - blks;
379 			    i >= crd->crd_skip; i -= blks) {
380 				exf->decrypt(sw->sw_kschedule, buf + i);
381 
382 				/* XOR with the IV/previous block, as appropriate */
383 				if (i == crd->crd_skip)
384 					for (k = 0; k < blks; k++)
385 						buf[i + k] ^= ivp[k];
386 				else
387 					for (k = 0; k < blks; k++)
388 						buf[i + k] ^= buf[i + k - blks];
389 			}
390 		}
391 
392 		return 0; /* Done with contiguous buffer encryption/decryption */
393 	}
394 
395 	/* Unreachable */
396 	return EINVAL;
397 }
398 
399 static void
400 swcr_authprepare(struct auth_hash *axf, struct swcr_data *sw, u_char *key,
401     int klen)
402 {
403 	int k;
404 
405 	klen /= 8;
406 
407 	switch (axf->type) {
408 	case CRYPTO_MD5_HMAC:
409 	case CRYPTO_SHA1_HMAC:
410 	case CRYPTO_SHA2_256_HMAC:
411 	case CRYPTO_SHA2_384_HMAC:
412 	case CRYPTO_SHA2_512_HMAC:
413 	case CRYPTO_NULL_HMAC:
414 	case CRYPTO_RIPEMD160_HMAC:
415 		for (k = 0; k < klen; k++)
416 			key[k] ^= HMAC_IPAD_VAL;
417 
418 		axf->Init(sw->sw_ictx);
419 		axf->Update(sw->sw_ictx, key, klen);
420 		axf->Update(sw->sw_ictx, hmac_ipad_buffer, axf->blocksize - klen);
421 
422 		for (k = 0; k < klen; k++)
423 			key[k] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
424 
425 		axf->Init(sw->sw_octx);
426 		axf->Update(sw->sw_octx, key, klen);
427 		axf->Update(sw->sw_octx, hmac_opad_buffer, axf->blocksize - klen);
428 
429 		for (k = 0; k < klen; k++)
430 			key[k] ^= HMAC_OPAD_VAL;
431 		break;
432 	case CRYPTO_MD5_KPDK:
433 	case CRYPTO_SHA1_KPDK:
434 	{
435 		/* We need a buffer that can hold an md5 and a sha1 result. */
436 		u_char buf[SHA1_RESULTLEN];
437 
438 		sw->sw_klen = klen;
439 		bcopy(key, sw->sw_octx, klen);
440 		axf->Init(sw->sw_ictx);
441 		axf->Update(sw->sw_ictx, key, klen);
442 		axf->Final(buf, sw->sw_ictx);
443 		break;
444 	}
445 	default:
446 		kprintf("%s: CRD_F_KEY_EXPLICIT flag given, but algorithm %d "
447 		    "doesn't use keys.\n", __func__, axf->type);
448 	}
449 }
450 
451 /*
452  * Compute keyed-hash authenticator.
453  */
454 static int
455 swcr_authcompute(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf,
456     int flags)
457 {
458 	unsigned char aalg[HASH_MAX_LEN];
459 	struct auth_hash *axf;
460 	union authctx ctx;
461 	int err;
462 
463 	if (sw->sw_ictx == 0)
464 		return EINVAL;
465 
466 	axf = sw->sw_axf;
467 
468 	if (crd->crd_flags & CRD_F_KEY_EXPLICIT)
469 		swcr_authprepare(axf, sw, crd->crd_key, crd->crd_klen);
470 
471 	bcopy(sw->sw_ictx, &ctx, axf->ctxsize);
472 
473 	err = crypto_apply(flags, buf, crd->crd_skip, crd->crd_len,
474 	    (int (*)(void *, void *, unsigned int))axf->Update, (caddr_t)&ctx);
475 	if (err)
476 		return err;
477 
478 	switch (sw->sw_alg) {
479 	case CRYPTO_MD5_HMAC:
480 	case CRYPTO_SHA1_HMAC:
481 	case CRYPTO_SHA2_256_HMAC:
482 	case CRYPTO_SHA2_384_HMAC:
483 	case CRYPTO_SHA2_512_HMAC:
484 	case CRYPTO_RIPEMD160_HMAC:
485 		if (sw->sw_octx == NULL)
486 			return EINVAL;
487 
488 		axf->Final(aalg, &ctx);
489 		bcopy(sw->sw_octx, &ctx, axf->ctxsize);
490 		axf->Update(&ctx, aalg, axf->hashsize);
491 		axf->Final(aalg, &ctx);
492 		break;
493 
494 	case CRYPTO_MD5_KPDK:
495 	case CRYPTO_SHA1_KPDK:
496 		if (sw->sw_octx == NULL)
497 			return EINVAL;
498 
499 		axf->Update(&ctx, sw->sw_octx, sw->sw_klen);
500 		axf->Final(aalg, &ctx);
501 		break;
502 
503 	case CRYPTO_NULL_HMAC:
504 		axf->Final(aalg, &ctx);
505 		break;
506 	}
507 
508 	/* Inject the authentication data */
509 	crypto_copyback(flags, buf, crd->crd_inject,
510 	    sw->sw_mlen == 0 ? axf->hashsize : sw->sw_mlen, aalg);
511 	return 0;
512 }
513 
514 /*
515  * Apply a compression/decompression algorithm
516  */
517 static int
518 swcr_compdec(struct cryptodesc *crd, struct swcr_data *sw,
519     caddr_t buf, int flags)
520 {
521 	u_int8_t *data, *out;
522 	struct comp_algo *cxf;
523 	int adj;
524 	u_int32_t result;
525 
526 	cxf = sw->sw_cxf;
527 
528 	/* We must handle the whole buffer of data in one time
529 	 * then if there is not all the data in the mbuf, we must
530 	 * copy in a buffer.
531 	 */
532 
533 	data = kmalloc(crd->crd_len, M_CRYPTO_DATA,  M_NOWAIT);
534 	if (data == NULL)
535 		return (EINVAL);
536 	crypto_copydata(flags, buf, crd->crd_skip, crd->crd_len, data);
537 
538 	if (crd->crd_flags & CRD_F_COMP)
539 		result = cxf->compress(data, crd->crd_len, &out);
540 	else
541 		result = cxf->decompress(data, crd->crd_len, &out);
542 
543 	kfree(data, M_CRYPTO_DATA);
544 	if (result == 0)
545 		return EINVAL;
546 
547 	/* Copy back the (de)compressed data. m_copyback is
548 	 * extending the mbuf as necessary.
549 	 */
550 	sw->sw_size = result;
551 	/* Check the compressed size when doing compression */
552 	if (crd->crd_flags & CRD_F_COMP) {
553 		if (result > crd->crd_len) {
554 			/* Compression was useless, we lost time */
555 			kfree(out, M_CRYPTO_DATA);
556 			return 0;
557 		}
558 	}
559 
560 	crypto_copyback(flags, buf, crd->crd_skip, result, out);
561 	if (result < crd->crd_len) {
562 		adj = result - crd->crd_len;
563 		if (flags & CRYPTO_F_IMBUF) {
564 			adj = result - crd->crd_len;
565 			m_adj((struct mbuf *)buf, adj);
566 		} else if (flags & CRYPTO_F_IOV) {
567 			struct uio *uio = (struct uio *)buf;
568 			int ind;
569 
570 			adj = crd->crd_len - result;
571 			ind = uio->uio_iovcnt - 1;
572 
573 			while (adj > 0 && ind >= 0) {
574 				if (adj < uio->uio_iov[ind].iov_len) {
575 					uio->uio_iov[ind].iov_len -= adj;
576 					break;
577 				}
578 
579 				adj -= uio->uio_iov[ind].iov_len;
580 				uio->uio_iov[ind].iov_len = 0;
581 				ind--;
582 				uio->uio_iovcnt--;
583 			}
584 		}
585 	}
586 	kfree(out, M_CRYPTO_DATA);
587 	return 0;
588 }
589 
590 /*
591  * Generate a new software session.
592  */
593 static int
594 swcr_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri)
595 {
596 	struct swcr_data **swd;
597 	struct auth_hash *axf;
598 	struct enc_xform *txf;
599 	struct comp_algo *cxf;
600 	u_int32_t i;
601 	int error;
602 
603 	if (sid == NULL || cri == NULL)
604 		return EINVAL;
605 
606 	if (swcr_sessions) {
607 		for (i = 1; i < swcr_sesnum; i++)
608 			if (swcr_sessions[i] == NULL)
609 				break;
610 	} else
611 		i = 1;		/* NB: to silence compiler warning */
612 
613 	if (swcr_sessions == NULL || i == swcr_sesnum) {
614 		if (swcr_sessions == NULL) {
615 			i = 1; /* We leave swcr_sessions[0] empty */
616 			swcr_sesnum = CRYPTO_SW_SESSIONS;
617 		} else
618 			swcr_sesnum *= 2;
619 
620 		swd = kmalloc(swcr_sesnum * sizeof(struct swcr_data *),
621 		    M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
622 		if (swd == NULL) {
623 			/* Reset session number */
624 			if (swcr_sesnum == CRYPTO_SW_SESSIONS)
625 				swcr_sesnum = 0;
626 			else
627 				swcr_sesnum /= 2;
628 			return ENOBUFS;
629 		}
630 
631 		/* Copy existing sessions */
632 		if (swcr_sessions != NULL) {
633 			bcopy(swcr_sessions, swd,
634 			    (swcr_sesnum / 2) * sizeof(struct swcr_data *));
635 			kfree(swcr_sessions, M_CRYPTO_DATA);
636 		}
637 
638 		swcr_sessions = swd;
639 	}
640 
641 	swd = &swcr_sessions[i];
642 	*sid = i;
643 
644 	while (cri) {
645 		*swd = kmalloc(sizeof(struct swcr_data),
646 		    M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
647 		if (*swd == NULL) {
648 			swcr_freesession(dev, i);
649 			return ENOBUFS;
650 		}
651 
652 		switch (cri->cri_alg) {
653 		case CRYPTO_DES_CBC:
654 			txf = &enc_xform_des;
655 			goto enccommon;
656 		case CRYPTO_3DES_CBC:
657 			txf = &enc_xform_3des;
658 			goto enccommon;
659 		case CRYPTO_BLF_CBC:
660 			txf = &enc_xform_blf;
661 			goto enccommon;
662 		case CRYPTO_CAST_CBC:
663 			txf = &enc_xform_cast5;
664 			goto enccommon;
665 		case CRYPTO_SKIPJACK_CBC:
666 			txf = &enc_xform_skipjack;
667 			goto enccommon;
668 		case CRYPTO_RIJNDAEL128_CBC:
669 			txf = &enc_xform_rijndael128;
670 			goto enccommon;
671 		case CRYPTO_CAMELLIA_CBC:
672 			txf = &enc_xform_camellia;
673 			goto enccommon;
674 		case CRYPTO_NULL_CBC:
675 			txf = &enc_xform_null;
676 			goto enccommon;
677 		enccommon:
678 			if (cri->cri_key != NULL) {
679 				error = txf->setkey(&((*swd)->sw_kschedule),
680 				    cri->cri_key, cri->cri_klen / 8);
681 				if (error) {
682 					swcr_freesession(dev, i);
683 					return error;
684 				}
685 			}
686 			(*swd)->sw_exf = txf;
687 			break;
688 
689 		case CRYPTO_MD5_HMAC:
690 			axf = &auth_hash_hmac_md5;
691 			goto authcommon;
692 		case CRYPTO_SHA1_HMAC:
693 			axf = &auth_hash_hmac_sha1;
694 			goto authcommon;
695 		case CRYPTO_SHA2_256_HMAC:
696 			axf = &auth_hash_hmac_sha2_256;
697 			goto authcommon;
698 		case CRYPTO_SHA2_384_HMAC:
699 			axf = &auth_hash_hmac_sha2_384;
700 			goto authcommon;
701 		case CRYPTO_SHA2_512_HMAC:
702 			axf = &auth_hash_hmac_sha2_512;
703 			goto authcommon;
704 		case CRYPTO_NULL_HMAC:
705 			axf = &auth_hash_null;
706 			goto authcommon;
707 		case CRYPTO_RIPEMD160_HMAC:
708 			axf = &auth_hash_hmac_ripemd_160;
709 		authcommon:
710 			(*swd)->sw_ictx = kmalloc(axf->ctxsize, M_CRYPTO_DATA,
711 			    M_NOWAIT);
712 			if ((*swd)->sw_ictx == NULL) {
713 				swcr_freesession(dev, i);
714 				return ENOBUFS;
715 			}
716 
717 			(*swd)->sw_octx = kmalloc(axf->ctxsize, M_CRYPTO_DATA,
718 			    M_NOWAIT);
719 			if ((*swd)->sw_octx == NULL) {
720 				swcr_freesession(dev, i);
721 				return ENOBUFS;
722 			}
723 
724 			if (cri->cri_key != NULL) {
725 				swcr_authprepare(axf, *swd, cri->cri_key,
726 				    cri->cri_klen);
727 			}
728 
729 			(*swd)->sw_mlen = cri->cri_mlen;
730 			(*swd)->sw_axf = axf;
731 			break;
732 
733 		case CRYPTO_MD5_KPDK:
734 			axf = &auth_hash_key_md5;
735 			goto auth2common;
736 
737 		case CRYPTO_SHA1_KPDK:
738 			axf = &auth_hash_key_sha1;
739 		auth2common:
740 			(*swd)->sw_ictx = kmalloc(axf->ctxsize, M_CRYPTO_DATA,
741 			    M_NOWAIT);
742 			if ((*swd)->sw_ictx == NULL) {
743 				swcr_freesession(dev, i);
744 				return ENOBUFS;
745 			}
746 
747 			(*swd)->sw_octx = kmalloc(cri->cri_klen / 8,
748 			    M_CRYPTO_DATA, M_NOWAIT);
749 			if ((*swd)->sw_octx == NULL) {
750 				swcr_freesession(dev, i);
751 				return ENOBUFS;
752 			}
753 
754 			/* Store the key so we can "append" it to the payload */
755 			if (cri->cri_key != NULL) {
756 				swcr_authprepare(axf, *swd, cri->cri_key,
757 				    cri->cri_klen);
758 			}
759 
760 			(*swd)->sw_mlen = cri->cri_mlen;
761 			(*swd)->sw_axf = axf;
762 			break;
763 #ifdef notdef
764 		case CRYPTO_MD5:
765 			axf = &auth_hash_md5;
766 			goto auth3common;
767 
768 		case CRYPTO_SHA1:
769 			axf = &auth_hash_sha1;
770 		auth3common:
771 			(*swd)->sw_ictx = kmalloc(axf->ctxsize, M_CRYPTO_DATA,
772 			    M_NOWAIT);
773 			if ((*swd)->sw_ictx == NULL) {
774 				swcr_freesession(dev, i);
775 				return ENOBUFS;
776 			}
777 
778 			axf->Init((*swd)->sw_ictx);
779 			(*swd)->sw_mlen = cri->cri_mlen;
780 			(*swd)->sw_axf = axf;
781 			break;
782 #endif
783 		case CRYPTO_DEFLATE_COMP:
784 			cxf = &comp_algo_deflate;
785 			(*swd)->sw_cxf = cxf;
786 			break;
787 		default:
788 			swcr_freesession(dev, i);
789 			return EINVAL;
790 		}
791 
792 		(*swd)->sw_alg = cri->cri_alg;
793 		cri = cri->cri_next;
794 		swd = &((*swd)->sw_next);
795 	}
796 	return 0;
797 }
798 
799 /*
800  * Free a session.
801  */
802 static int
803 swcr_freesession(device_t dev, u_int64_t tid)
804 {
805 	struct swcr_data *swd;
806 	struct enc_xform *txf;
807 	struct auth_hash *axf;
808 	struct comp_algo *cxf;
809 	u_int32_t sid = CRYPTO_SESID2LID(tid);
810 
811 	if (sid > swcr_sesnum || swcr_sessions == NULL ||
812 	    swcr_sessions[sid] == NULL)
813 		return EINVAL;
814 
815 	/* Silently accept and return */
816 	if (sid == 0)
817 		return 0;
818 
819 	while ((swd = swcr_sessions[sid]) != NULL) {
820 		swcr_sessions[sid] = swd->sw_next;
821 
822 		switch (swd->sw_alg) {
823 		case CRYPTO_DES_CBC:
824 		case CRYPTO_3DES_CBC:
825 		case CRYPTO_BLF_CBC:
826 		case CRYPTO_CAST_CBC:
827 		case CRYPTO_SKIPJACK_CBC:
828 		case CRYPTO_RIJNDAEL128_CBC:
829 		case CRYPTO_CAMELLIA_CBC:
830 		case CRYPTO_NULL_CBC:
831 			txf = swd->sw_exf;
832 
833 			if (swd->sw_kschedule)
834 				txf->zerokey(&(swd->sw_kschedule));
835 			break;
836 
837 		case CRYPTO_MD5_HMAC:
838 		case CRYPTO_SHA1_HMAC:
839 		case CRYPTO_SHA2_256_HMAC:
840 		case CRYPTO_SHA2_384_HMAC:
841 		case CRYPTO_SHA2_512_HMAC:
842 		case CRYPTO_RIPEMD160_HMAC:
843 		case CRYPTO_NULL_HMAC:
844 			axf = swd->sw_axf;
845 
846 			if (swd->sw_ictx) {
847 				bzero(swd->sw_ictx, axf->ctxsize);
848 				kfree(swd->sw_ictx, M_CRYPTO_DATA);
849 			}
850 			if (swd->sw_octx) {
851 				bzero(swd->sw_octx, axf->ctxsize);
852 				kfree(swd->sw_octx, M_CRYPTO_DATA);
853 			}
854 			break;
855 
856 		case CRYPTO_MD5_KPDK:
857 		case CRYPTO_SHA1_KPDK:
858 			axf = swd->sw_axf;
859 
860 			if (swd->sw_ictx) {
861 				bzero(swd->sw_ictx, axf->ctxsize);
862 				kfree(swd->sw_ictx, M_CRYPTO_DATA);
863 			}
864 			if (swd->sw_octx) {
865 				bzero(swd->sw_octx, swd->sw_klen);
866 				kfree(swd->sw_octx, M_CRYPTO_DATA);
867 			}
868 			break;
869 
870 		case CRYPTO_MD5:
871 		case CRYPTO_SHA1:
872 			axf = swd->sw_axf;
873 
874 			if (swd->sw_ictx)
875 				kfree(swd->sw_ictx, M_CRYPTO_DATA);
876 			break;
877 
878 		case CRYPTO_DEFLATE_COMP:
879 			cxf = swd->sw_cxf;
880 			break;
881 		}
882 
883 		//FREE(swd, M_CRYPTO_DATA);
884 		kfree(swd, M_CRYPTO_DATA);
885 	}
886 	return 0;
887 }
888 
889 /*
890  * Process a software request.
891  */
892 static int
893 swcr_process(device_t dev, struct cryptop *crp, int hint)
894 {
895 	struct cryptodesc *crd;
896 	struct swcr_data *sw;
897 	u_int32_t lid;
898 
899 	/* Sanity check */
900 	if (crp == NULL)
901 		return EINVAL;
902 
903 	if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
904 		crp->crp_etype = EINVAL;
905 		goto done;
906 	}
907 
908 	lid = crp->crp_sid & 0xffffffff;
909 	if (lid >= swcr_sesnum || lid == 0 || swcr_sessions[lid] == NULL) {
910 		crp->crp_etype = ENOENT;
911 		goto done;
912 	}
913 
914 	/* Go through crypto descriptors, processing as we go */
915 	for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
916 		/*
917 		 * Find the crypto context.
918 		 *
919 		 * XXX Note that the logic here prevents us from having
920 		 * XXX the same algorithm multiple times in a session
921 		 * XXX (or rather, we can but it won't give us the right
922 		 * XXX results). To do that, we'd need some way of differentiating
923 		 * XXX between the various instances of an algorithm (so we can
924 		 * XXX locate the correct crypto context).
925 		 */
926 		for (sw = swcr_sessions[lid];
927 		    sw && sw->sw_alg != crd->crd_alg;
928 		    sw = sw->sw_next)
929 			;
930 
931 		/* No such context ? */
932 		if (sw == NULL) {
933 			crp->crp_etype = EINVAL;
934 			goto done;
935 		}
936 		switch (sw->sw_alg) {
937 		case CRYPTO_DES_CBC:
938 		case CRYPTO_3DES_CBC:
939 		case CRYPTO_BLF_CBC:
940 		case CRYPTO_CAST_CBC:
941 		case CRYPTO_SKIPJACK_CBC:
942 		case CRYPTO_RIJNDAEL128_CBC:
943 		case CRYPTO_CAMELLIA_CBC:
944 			if ((crp->crp_etype = swcr_encdec(crd, sw,
945 			    crp->crp_buf, crp->crp_flags)) != 0)
946 				goto done;
947 			break;
948 		case CRYPTO_NULL_CBC:
949 			crp->crp_etype = 0;
950 			break;
951 		case CRYPTO_MD5_HMAC:
952 		case CRYPTO_SHA1_HMAC:
953 		case CRYPTO_SHA2_256_HMAC:
954 		case CRYPTO_SHA2_384_HMAC:
955 		case CRYPTO_SHA2_512_HMAC:
956 		case CRYPTO_RIPEMD160_HMAC:
957 		case CRYPTO_NULL_HMAC:
958 		case CRYPTO_MD5_KPDK:
959 		case CRYPTO_SHA1_KPDK:
960 		case CRYPTO_MD5:
961 		case CRYPTO_SHA1:
962 			if ((crp->crp_etype = swcr_authcompute(crd, sw,
963 			    crp->crp_buf, crp->crp_flags)) != 0)
964 				goto done;
965 			break;
966 
967 		case CRYPTO_DEFLATE_COMP:
968 			if ((crp->crp_etype = swcr_compdec(crd, sw,
969 			    crp->crp_buf, crp->crp_flags)) != 0)
970 				goto done;
971 			else
972 				crp->crp_olen = (int)sw->sw_size;
973 			break;
974 
975 		default:
976 			/* Unknown/unsupported algorithm */
977 			crp->crp_etype = EINVAL;
978 			goto done;
979 		}
980 	}
981 
982 done:
983 	crypto_done(crp);
984 	return 0;
985 }
986 
987 static void
988 swcr_identify(driver_t *drv, device_t parent)
989 {
990 	/* NB: order 10 is so we get attached after h/w devices */
991 	/* XXX: wouldn't bet about this BUS_ADD_CHILD correctness */
992 	if (device_find_child(parent, "cryptosoft", -1) == NULL &&
993 	    BUS_ADD_CHILD(parent, parent, 10, "cryptosoft", -1) == 0)
994 		panic("cryptosoft: could not attach");
995 }
996 
997 static int
998 swcr_probe(device_t dev)
999 {
1000 	device_set_desc(dev, "software crypto");
1001 	return (0);
1002 }
1003 
1004 static int
1005 swcr_attach(device_t dev)
1006 {
1007 	memset(hmac_ipad_buffer, HMAC_IPAD_VAL, HMAC_MAX_BLOCK_LEN);
1008 	memset(hmac_opad_buffer, HMAC_OPAD_VAL, HMAC_MAX_BLOCK_LEN);
1009 
1010 	swcr_id = crypto_get_driverid(dev,
1011 			CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC);
1012 	if (swcr_id < 0) {
1013 		device_printf(dev, "cannot initialize!");
1014 		return ENOMEM;
1015 	}
1016 #define	REGISTER(alg) \
1017 	crypto_register(swcr_id, alg, 0,0)
1018 	REGISTER(CRYPTO_DES_CBC);
1019 	REGISTER(CRYPTO_3DES_CBC);
1020 	REGISTER(CRYPTO_BLF_CBC);
1021 	REGISTER(CRYPTO_CAST_CBC);
1022 	REGISTER(CRYPTO_SKIPJACK_CBC);
1023 	REGISTER(CRYPTO_NULL_CBC);
1024 	REGISTER(CRYPTO_MD5_HMAC);
1025 	REGISTER(CRYPTO_SHA1_HMAC);
1026 	REGISTER(CRYPTO_SHA2_256_HMAC);
1027 	REGISTER(CRYPTO_SHA2_384_HMAC);
1028 	REGISTER(CRYPTO_SHA2_512_HMAC);
1029 	REGISTER(CRYPTO_RIPEMD160_HMAC);
1030 	REGISTER(CRYPTO_NULL_HMAC);
1031 	REGISTER(CRYPTO_MD5_KPDK);
1032 	REGISTER(CRYPTO_SHA1_KPDK);
1033 	REGISTER(CRYPTO_MD5);
1034 	REGISTER(CRYPTO_SHA1);
1035 	REGISTER(CRYPTO_RIJNDAEL128_CBC);
1036 	REGISTER(CRYPTO_CAMELLIA_CBC);
1037 	REGISTER(CRYPTO_DEFLATE_COMP);
1038 #undef REGISTER
1039 
1040 	return 0;
1041 }
1042 
1043 static int
1044 swcr_detach(device_t dev)
1045 {
1046 	crypto_unregister_all(swcr_id);
1047 	if (swcr_sessions != NULL)
1048 		kfree(swcr_sessions, M_CRYPTO_DATA);
1049 	return 0;
1050 }
1051 
1052 static device_method_t swcr_methods[] = {
1053 	DEVMETHOD(device_identify,	swcr_identify),
1054 	DEVMETHOD(device_probe,		swcr_probe),
1055 	DEVMETHOD(device_attach,	swcr_attach),
1056 	DEVMETHOD(device_detach,	swcr_detach),
1057 
1058 	DEVMETHOD(cryptodev_newsession,	swcr_newsession),
1059 	DEVMETHOD(cryptodev_freesession,swcr_freesession),
1060 	DEVMETHOD(cryptodev_process,	swcr_process),
1061 
1062 	{0, 0},
1063 };
1064 
1065 static driver_t swcr_driver = {
1066 	"cryptosoft",
1067 	swcr_methods,
1068 	0,		/* NB: no softc */
1069 };
1070 static devclass_t swcr_devclass;
1071 
1072 /*
1073  * NB: We explicitly reference the crypto module so we
1074  * get the necessary ordering when built as a loadable
1075  * module.  This is required because we bundle the crypto
1076  * module code together with the cryptosoft driver (otherwise
1077  * normal module dependencies would handle things).
1078  */
1079 extern int crypto_modevent(struct module *, int, void *);
1080 /* XXX where to attach */
1081 DRIVER_MODULE(cryptosoft, nexus, swcr_driver, swcr_devclass, crypto_modevent,0);
1082 MODULE_VERSION(cryptosoft, 1);
1083 MODULE_DEPEND(cryptosoft, crypto, 1, 1, 1);
1084