xref: /openbsd/sys/crypto/cryptosoft.c (revision 404b540a)
1 /*	$OpenBSD: cryptosoft.c,v 1.51 2008/06/09 16:07:00 djm Exp $	*/
2 
3 /*
4  * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
5  *
6  * This code was written by Angelos D. Keromytis in Athens, Greece, in
7  * February 2000. Network Security Technologies Inc. (NSTI) kindly
8  * supported the development of this code.
9  *
10  * Copyright (c) 2000, 2001 Angelos D. Keromytis
11  *
12  * Permission to use, copy, and modify this software with or without fee
13  * is hereby granted, provided that this entire notice is included in
14  * all source code copies of any software which is or includes a copy or
15  * modification of this software.
16  *
17  * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
18  * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
19  * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
20  * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
21  * PURPOSE.
22  */
23 
24 #include <sys/param.h>
25 #include <sys/systm.h>
26 #include <sys/malloc.h>
27 #include <sys/mbuf.h>
28 #include <sys/sysctl.h>
29 #include <sys/errno.h>
30 #include <dev/rndvar.h>
31 #include <crypto/md5.h>
32 #include <crypto/sha1.h>
33 #include <crypto/rmd160.h>
34 #include <crypto/cast.h>
35 #include <crypto/skipjack.h>
36 #include <crypto/blf.h>
37 #include <crypto/cryptodev.h>
38 #include <crypto/cryptosoft.h>
39 #include <crypto/xform.h>
40 
41 const u_int8_t hmac_ipad_buffer[64] = {
42 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
43 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
44 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
45 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
46 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
47 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
48 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
49 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36
50 };
51 
52 const u_int8_t hmac_opad_buffer[64] = {
53 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
54 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
55 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
56 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
57 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
58 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
59 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
60 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C
61 };
62 
63 
64 struct swcr_data **swcr_sessions = NULL;
65 u_int32_t swcr_sesnum = 0;
66 int32_t swcr_id = -1;
67 
68 #define COPYBACK(x, a, b, c, d) \
69 	(x) == CRYPTO_BUF_MBUF ? m_copyback((struct mbuf *)a,b,c,d) \
70 	: cuio_copyback((struct uio *)a,b,c,d)
71 #define COPYDATA(x, a, b, c, d) \
72 	(x) == CRYPTO_BUF_MBUF ? m_copydata((struct mbuf *)a,b,c,d) \
73 	: cuio_copydata((struct uio *)a,b,c,d)
74 
75 /*
76  * Apply a symmetric encryption/decryption algorithm.
77  */
78 int
79 swcr_encdec(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf,
80     int outtype)
81 {
82 	unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN], *idat;
83 	unsigned char *ivp, piv[EALG_MAX_BLOCK_LEN];
84 	struct enc_xform *exf;
85 	int i, k, j, blks, ind, count, ivlen;
86 	struct mbuf *m = NULL;
87 	struct uio *uio = NULL;
88 
89 	exf = sw->sw_exf;
90 	blks = exf->blocksize;
91 	ivlen = exf->ivsize;
92 
93 	/* Check for non-padded data */
94 	if (crd->crd_len % blks)
95 		return EINVAL;
96 
97 	if (outtype == CRYPTO_BUF_MBUF)
98 		m = (struct mbuf *) buf;
99 	else
100 		uio = (struct uio *) buf;
101 
102 	/* Initialize the IV */
103 	if (crd->crd_flags & CRD_F_ENCRYPT) {
104 		/* IV explicitly provided ? */
105 		if (crd->crd_flags & CRD_F_IV_EXPLICIT)
106 			bcopy(crd->crd_iv, iv, ivlen);
107 		else
108 			arc4random_buf(iv, ivlen);
109 
110 		/* Do we need to write the IV */
111 		if (!(crd->crd_flags & CRD_F_IV_PRESENT)) {
112 			COPYBACK(outtype, buf, crd->crd_inject, ivlen, iv);
113 		}
114 
115 	} else {	/* Decryption */
116 			/* IV explicitly provided ? */
117 		if (crd->crd_flags & CRD_F_IV_EXPLICIT)
118 			bcopy(crd->crd_iv, iv, ivlen);
119 		else {
120 			/* Get IV off buf */
121 			COPYDATA(outtype, buf, crd->crd_inject, ivlen, iv);
122 		}
123 	}
124 
125 	ivp = iv;
126 
127 	/*
128 	 * xforms that provide a reinit method perform all IV
129 	 * handling themselves.
130 	 */
131 	if (exf->reinit)
132 		exf->reinit(sw->sw_kschedule, iv);
133 
134 	if (outtype == CRYPTO_BUF_MBUF) {
135 		/* Find beginning of data */
136 		m = m_getptr(m, crd->crd_skip, &k);
137 		if (m == NULL)
138 			return EINVAL;
139 
140 		i = crd->crd_len;
141 
142 		while (i > 0) {
143 			/*
144 			 * If there's insufficient data at the end of
145 			 * an mbuf, we have to do some copying.
146 			 */
147 			if (m->m_len < k + blks && m->m_len != k) {
148 				m_copydata(m, k, blks, blk);
149 
150 				/* Actual encryption/decryption */
151 				if (exf->reinit) {
152 					if (crd->crd_flags & CRD_F_ENCRYPT) {
153 						exf->encrypt(sw->sw_kschedule,
154 						    blk);
155 					} else {
156 						exf->decrypt(sw->sw_kschedule,
157 						    blk);
158 					}
159 				} else if (crd->crd_flags & CRD_F_ENCRYPT) {
160 					/* XOR with previous block */
161 					for (j = 0; j < blks; j++)
162 						blk[j] ^= ivp[j];
163 
164 					exf->encrypt(sw->sw_kschedule, blk);
165 
166 					/*
167 					 * Keep encrypted block for XOR'ing
168 					 * with next block
169 					 */
170 					bcopy(blk, iv, blks);
171 					ivp = iv;
172 				} else {	/* decrypt */
173 					/*
174 					 * Keep encrypted block for XOR'ing
175 					 * with next block
176 					 */
177 					if (ivp == iv)
178 						bcopy(blk, piv, blks);
179 					else
180 						bcopy(blk, iv, blks);
181 
182 					exf->decrypt(sw->sw_kschedule, blk);
183 
184 					/* XOR with previous block */
185 					for (j = 0; j < blks; j++)
186 						blk[j] ^= ivp[j];
187 
188 					if (ivp == iv)
189 						bcopy(piv, iv, blks);
190 					else
191 						ivp = iv;
192 				}
193 
194 				/* Copy back decrypted block */
195 				m_copyback(m, k, blks, blk);
196 
197 				/* Advance pointer */
198 				m = m_getptr(m, k + blks, &k);
199 				if (m == NULL)
200 					return EINVAL;
201 
202 				i -= blks;
203 
204 				/* Could be done... */
205 				if (i == 0)
206 					break;
207 			}
208 
209 			/* Skip possibly empty mbufs */
210 			if (k == m->m_len) {
211 				for (m = m->m_next; m && m->m_len == 0;
212 				    m = m->m_next)
213 					;
214 				k = 0;
215 			}
216 
217 			/* Sanity check */
218 			if (m == NULL)
219 				return EINVAL;
220 
221 			/*
222 			 * Warning: idat may point to garbage here, but
223 			 * we only use it in the while() loop, only if
224 			 * there are indeed enough data.
225 			 */
226 			idat = mtod(m, unsigned char *) + k;
227 
228 			while (m->m_len >= k + blks && i > 0) {
229 				if (exf->reinit) {
230 					if (crd->crd_flags & CRD_F_ENCRYPT) {
231 						exf->encrypt(sw->sw_kschedule,
232 						    idat);
233 					} else {
234 						exf->decrypt(sw->sw_kschedule,
235 						    idat);
236 					}
237 				} else if (crd->crd_flags & CRD_F_ENCRYPT) {
238 					/* XOR with previous block/IV */
239 					for (j = 0; j < blks; j++)
240 						idat[j] ^= ivp[j];
241 
242 					exf->encrypt(sw->sw_kschedule, idat);
243 					ivp = idat;
244 				} else {	/* decrypt */
245 					/*
246 					 * Keep encrypted block to be used
247 					 * in next block's processing.
248 					 */
249 					if (ivp == iv)
250 						bcopy(idat, piv, blks);
251 					else
252 						bcopy(idat, iv, blks);
253 
254 					exf->decrypt(sw->sw_kschedule, idat);
255 
256 					/* XOR with previous block/IV */
257 					for (j = 0; j < blks; j++)
258 						idat[j] ^= ivp[j];
259 
260 					if (ivp == iv)
261 						bcopy(piv, iv, blks);
262 					else
263 						ivp = iv;
264 				}
265 
266 				idat += blks;
267 				k += blks;
268 				i -= blks;
269 			}
270 		}
271 	} else {
272 		/* Find beginning of data */
273 		count = crd->crd_skip;
274 		ind = cuio_getptr(uio, count, &k);
275 		if (ind == -1)
276 			return EINVAL;
277 
278 		i = crd->crd_len;
279 
280 		while (i > 0) {
281 			/*
282 			 * If there's insufficient data at the end,
283 			 * we have to do some copying.
284 			 */
285 			if (uio->uio_iov[ind].iov_len < k + blks &&
286 			    uio->uio_iov[ind].iov_len != k) {
287 				cuio_copydata(uio, k, blks, blk);
288 
289 				/* Actual encryption/decryption */
290 				if (exf->reinit) {
291 					if (crd->crd_flags & CRD_F_ENCRYPT) {
292 						exf->encrypt(sw->sw_kschedule,
293 						    blk);
294 					} else {
295 						exf->decrypt(sw->sw_kschedule,
296 						    blk);
297 					}
298 				} else if (crd->crd_flags & CRD_F_ENCRYPT) {
299 					/* XOR with previous block */
300 					for (j = 0; j < blks; j++)
301 						blk[j] ^= ivp[j];
302 
303 					exf->encrypt(sw->sw_kschedule, blk);
304 
305 					/*
306 					 * Keep encrypted block for XOR'ing
307 					 * with next block
308 					 */
309 					bcopy(blk, iv, blks);
310 					ivp = iv;
311 				} else {	/* decrypt */
312 					/*
313 					 * Keep encrypted block for XOR'ing
314 					 * with next block
315 					 */
316 					if (ivp == iv)
317 						bcopy(blk, piv, blks);
318 					else
319 						bcopy(blk, iv, blks);
320 
321 					exf->decrypt(sw->sw_kschedule, blk);
322 
323 					/* XOR with previous block */
324 					for (j = 0; j < blks; j++)
325 						blk[j] ^= ivp[j];
326 
327 					if (ivp == iv)
328 						bcopy(piv, iv, blks);
329 					else
330 						ivp = iv;
331 				}
332 
333 				/* Copy back decrypted block */
334 				cuio_copyback(uio, k, blks, blk);
335 
336 				count += blks;
337 
338 				/* Advance pointer */
339 				ind = cuio_getptr(uio, count, &k);
340 				if (ind == -1)
341 					return (EINVAL);
342 
343 				i -= blks;
344 
345 				/* Could be done... */
346 				if (i == 0)
347 					break;
348 			}
349 
350 			/*
351 			 * Warning: idat may point to garbage here, but
352 			 * we only use it in the while() loop, only if
353 			 * there are indeed enough data.
354 			 */
355 			idat = (char *)uio->uio_iov[ind].iov_base + k;
356 
357 			while (uio->uio_iov[ind].iov_len >= k + blks &&
358 			    i > 0) {
359 				if (exf->reinit) {
360 					if (crd->crd_flags & CRD_F_ENCRYPT) {
361 						exf->encrypt(sw->sw_kschedule,
362 						    idat);
363 					} else {
364 						exf->decrypt(sw->sw_kschedule,
365 						    idat);
366 					}
367 				} else if (crd->crd_flags & CRD_F_ENCRYPT) {
368 					/* XOR with previous block/IV */
369 					for (j = 0; j < blks; j++)
370 						idat[j] ^= ivp[j];
371 
372 					exf->encrypt(sw->sw_kschedule, idat);
373 					ivp = idat;
374 				} else {	/* decrypt */
375 					/*
376 					 * Keep encrypted block to be used
377 					 * in next block's processing.
378 					 */
379 					if (ivp == iv)
380 						bcopy(idat, piv, blks);
381 					else
382 						bcopy(idat, iv, blks);
383 
384 					exf->decrypt(sw->sw_kschedule, idat);
385 
386 					/* XOR with previous block/IV */
387 					for (j = 0; j < blks; j++)
388 						idat[j] ^= ivp[j];
389 
390 					if (ivp == iv)
391 						bcopy(piv, iv, blks);
392 					else
393 						ivp = iv;
394 				}
395 
396 				idat += blks;
397 				count += blks;
398 				k += blks;
399 				i -= blks;
400 			}
401 		}
402 	}
403 
404 	return 0; /* Done with encryption/decryption */
405 }
406 
407 /*
408  * Compute keyed-hash authenticator.
409  */
410 int
411 swcr_authcompute(struct cryptop *crp, struct cryptodesc *crd,
412     struct swcr_data *sw, caddr_t buf, int outtype)
413 {
414 	unsigned char aalg[AALG_MAX_RESULT_LEN];
415 	struct auth_hash *axf;
416 	union authctx ctx;
417 	int err;
418 
419 	if (sw->sw_ictx == 0)
420 		return EINVAL;
421 
422 	axf = sw->sw_axf;
423 
424 	bcopy(sw->sw_ictx, &ctx, axf->ctxsize);
425 
426 	if (outtype == CRYPTO_BUF_MBUF)
427 		err = m_apply((struct mbuf *) buf, crd->crd_skip, crd->crd_len,
428 		    (int (*)(caddr_t, caddr_t, unsigned int)) axf->Update,
429 		    (caddr_t) &ctx);
430 	else
431 		err = cuio_apply((struct uio *) buf, crd->crd_skip,
432 		    crd->crd_len,
433 		    (int (*)(caddr_t, caddr_t, unsigned int)) axf->Update,
434 		    (caddr_t) &ctx);
435 
436 	if (err)
437 		return err;
438 
439 	switch (sw->sw_alg) {
440 	case CRYPTO_MD5_HMAC:
441 	case CRYPTO_SHA1_HMAC:
442 	case CRYPTO_RIPEMD160_HMAC:
443 	case CRYPTO_SHA2_256_HMAC:
444 	case CRYPTO_SHA2_384_HMAC:
445 	case CRYPTO_SHA2_512_HMAC:
446 		if (sw->sw_octx == NULL)
447 			return EINVAL;
448 
449 		axf->Final(aalg, &ctx);
450 		bcopy(sw->sw_octx, &ctx, axf->ctxsize);
451 		axf->Update(&ctx, aalg, axf->hashsize);
452 		axf->Final(aalg, &ctx);
453 		break;
454 
455 	case CRYPTO_MD5_KPDK:
456 	case CRYPTO_SHA1_KPDK:
457 		if (sw->sw_octx == NULL)
458 			return EINVAL;
459 
460 		axf->Update(&ctx, sw->sw_octx, sw->sw_klen);
461 		axf->Final(aalg, &ctx);
462 		break;
463 
464 	case CRYPTO_MD5:
465 	case CRYPTO_SHA1:
466 		axf->Final(aalg, &ctx);
467 		break;
468 	}
469 
470 	/* Inject the authentication data */
471 	if (outtype == CRYPTO_BUF_MBUF)
472 		COPYBACK(outtype, buf, crd->crd_inject, axf->authsize, aalg);
473 	else
474 		bcopy(aalg, crp->crp_mac, axf->authsize);
475 
476 	return 0;
477 }
478 
479 /*
480  * Apply a compression/decompression algorithm
481  */
482 int
483 swcr_compdec(struct cryptodesc *crd, struct swcr_data *sw,
484     caddr_t buf, int outtype)
485 {
486 	u_int8_t *data, *out;
487 	struct comp_algo *cxf;
488 	int adj;
489 	u_int32_t result;
490 
491 	cxf = sw->sw_cxf;
492 
493 	/* We must handle the whole buffer of data in one time
494 	 * then if there is not all the data in the mbuf, we must
495 	 * copy in a buffer.
496 	 */
497 
498 	data = malloc(crd->crd_len, M_CRYPTO_DATA, M_NOWAIT);
499 	if (data == NULL)
500 		return (EINVAL);
501 	COPYDATA(outtype, buf, crd->crd_skip, crd->crd_len, data);
502 
503 	if (crd->crd_flags & CRD_F_COMP)
504 		result = cxf->compress(data, crd->crd_len, &out);
505 	else
506 		result = cxf->decompress(data, crd->crd_len, &out);
507 
508 	free(data, M_CRYPTO_DATA);
509 	if (result == 0)
510 		return EINVAL;
511 
512 	/* Copy back the (de)compressed data. m_copyback is
513 	 * extending the mbuf as necessary.
514 	 */
515 	sw->sw_size = result;
516 	/* Check the compressed size when doing compression */
517 	if (crd->crd_flags & CRD_F_COMP) {
518 		if (result > crd->crd_len) {
519 			/* Compression was useless, we lost time */
520 			free(out, M_CRYPTO_DATA);
521 			return 0;
522 		}
523 	}
524 
525 	COPYBACK(outtype, buf, crd->crd_skip, result, out);
526 	if (result < crd->crd_len) {
527 		adj = result - crd->crd_len;
528 		if (outtype == CRYPTO_BUF_MBUF) {
529 			adj = result - crd->crd_len;
530 			m_adj((struct mbuf *)buf, adj);
531 		} else {
532 			struct uio *uio = (struct uio *)buf;
533 			int ind;
534 
535 			adj = crd->crd_len - result;
536 			ind = uio->uio_iovcnt - 1;
537 
538 			while (adj > 0 && ind >= 0) {
539 				if (adj < uio->uio_iov[ind].iov_len) {
540 					uio->uio_iov[ind].iov_len -= adj;
541 					break;
542 				}
543 
544 				adj -= uio->uio_iov[ind].iov_len;
545 				uio->uio_iov[ind].iov_len = 0;
546 				ind--;
547 				uio->uio_iovcnt--;
548 			}
549 		}
550 	}
551 	free(out, M_CRYPTO_DATA);
552 	return 0;
553 }
554 
555 /*
556  * Generate a new software session.
557  */
558 int
559 swcr_newsession(u_int32_t *sid, struct cryptoini *cri)
560 {
561 	struct swcr_data **swd;
562 	struct auth_hash *axf;
563 	struct enc_xform *txf;
564 	struct comp_algo *cxf;
565 	u_int32_t i;
566 	int k;
567 
568 	if (sid == NULL || cri == NULL)
569 		return EINVAL;
570 
571 	if (swcr_sessions) {
572 		for (i = 1; i < swcr_sesnum; i++)
573 			if (swcr_sessions[i] == NULL)
574 				break;
575 	}
576 
577 	if (swcr_sessions == NULL || i == swcr_sesnum) {
578 		if (swcr_sessions == NULL) {
579 			i = 1; /* We leave swcr_sessions[0] empty */
580 			swcr_sesnum = CRYPTO_SW_SESSIONS;
581 		} else
582 			swcr_sesnum *= 2;
583 
584 		swd = malloc(swcr_sesnum * sizeof(struct swcr_data *),
585 		    M_CRYPTO_DATA, M_NOWAIT | M_ZERO);
586 		if (swd == NULL) {
587 			/* Reset session number */
588 			if (swcr_sesnum == CRYPTO_SW_SESSIONS)
589 				swcr_sesnum = 0;
590 			else
591 				swcr_sesnum /= 2;
592 			return ENOBUFS;
593 		}
594 
595 		/* Copy existing sessions */
596 		if (swcr_sessions) {
597 			bcopy(swcr_sessions, swd,
598 			    (swcr_sesnum / 2) * sizeof(struct swcr_data *));
599 			free(swcr_sessions, M_CRYPTO_DATA);
600 		}
601 
602 		swcr_sessions = swd;
603 	}
604 
605 	swd = &swcr_sessions[i];
606 	*sid = i;
607 
608 	while (cri) {
609 		*swd = malloc(sizeof(struct swcr_data), M_CRYPTO_DATA,
610 		    M_NOWAIT | M_ZERO);
611 		if (*swd == NULL) {
612 			swcr_freesession(i);
613 			return ENOBUFS;
614 		}
615 
616 		switch (cri->cri_alg) {
617 		case CRYPTO_DES_CBC:
618 			txf = &enc_xform_des;
619 			goto enccommon;
620 		case CRYPTO_3DES_CBC:
621 			txf = &enc_xform_3des;
622 			goto enccommon;
623 		case CRYPTO_BLF_CBC:
624 			txf = &enc_xform_blf;
625 			goto enccommon;
626 		case CRYPTO_CAST_CBC:
627 			txf = &enc_xform_cast5;
628 			goto enccommon;
629 		case CRYPTO_SKIPJACK_CBC:
630 			txf = &enc_xform_skipjack;
631 			goto enccommon;
632 		case CRYPTO_RIJNDAEL128_CBC:
633 			txf = &enc_xform_rijndael128;
634 			goto enccommon;
635 		case CRYPTO_AES_CTR:
636 			txf = &enc_xform_aes_ctr;
637 			goto enccommon;
638 		case CRYPTO_AES_XTS:
639 			txf = &enc_xform_aes_xts;
640 			goto enccommon;
641 		case CRYPTO_NULL:
642 			txf = &enc_xform_null;
643 			goto enccommon;
644 		enccommon:
645 			if (txf->setkey(&((*swd)->sw_kschedule), cri->cri_key,
646 			    cri->cri_klen / 8) < 0) {
647 				swcr_freesession(i);
648 				return EINVAL;
649 			}
650 			(*swd)->sw_exf = txf;
651 			break;
652 
653 		case CRYPTO_MD5_HMAC:
654 			axf = &auth_hash_hmac_md5_96;
655 			goto authcommon;
656 		case CRYPTO_SHA1_HMAC:
657 			axf = &auth_hash_hmac_sha1_96;
658 			goto authcommon;
659 		case CRYPTO_RIPEMD160_HMAC:
660 			axf = &auth_hash_hmac_ripemd_160_96;
661 			goto authcommon;
662 		case CRYPTO_SHA2_256_HMAC:
663 			axf = &auth_hash_hmac_sha2_256_96;
664 			goto authcommon;
665 		case CRYPTO_SHA2_384_HMAC:
666 			axf = &auth_hash_hmac_sha2_384_96;
667 			goto authcommon;
668 		case CRYPTO_SHA2_512_HMAC:
669 			axf = &auth_hash_hmac_sha2_512_96;
670 		authcommon:
671 			(*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
672 			    M_NOWAIT);
673 			if ((*swd)->sw_ictx == NULL) {
674 				swcr_freesession(i);
675 				return ENOBUFS;
676 			}
677 
678 			(*swd)->sw_octx = malloc(axf->ctxsize, M_CRYPTO_DATA,
679 			    M_NOWAIT);
680 			if ((*swd)->sw_octx == NULL) {
681 				swcr_freesession(i);
682 				return ENOBUFS;
683 			}
684 
685 			for (k = 0; k < cri->cri_klen / 8; k++)
686 				cri->cri_key[k] ^= HMAC_IPAD_VAL;
687 
688 			axf->Init((*swd)->sw_ictx);
689 			axf->Update((*swd)->sw_ictx, cri->cri_key,
690 			    cri->cri_klen / 8);
691 			axf->Update((*swd)->sw_ictx, hmac_ipad_buffer,
692 			    HMAC_BLOCK_LEN - (cri->cri_klen / 8));
693 
694 			for (k = 0; k < cri->cri_klen / 8; k++)
695 				cri->cri_key[k] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
696 
697 			axf->Init((*swd)->sw_octx);
698 			axf->Update((*swd)->sw_octx, cri->cri_key,
699 			    cri->cri_klen / 8);
700 			axf->Update((*swd)->sw_octx, hmac_opad_buffer,
701 			    HMAC_BLOCK_LEN - (cri->cri_klen / 8));
702 
703 			for (k = 0; k < cri->cri_klen / 8; k++)
704 				cri->cri_key[k] ^= HMAC_OPAD_VAL;
705 			(*swd)->sw_axf = axf;
706 			break;
707 
708 		case CRYPTO_MD5_KPDK:
709 			axf = &auth_hash_key_md5;
710 			goto auth2common;
711 
712 		case CRYPTO_SHA1_KPDK:
713 			axf = &auth_hash_key_sha1;
714 		auth2common:
715 			(*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
716 			    M_NOWAIT);
717 			if ((*swd)->sw_ictx == NULL) {
718 				swcr_freesession(i);
719 				return ENOBUFS;
720 			}
721 
722 			/* Store the key so we can "append" it to the payload */
723 			(*swd)->sw_octx = malloc(cri->cri_klen / 8, M_CRYPTO_DATA,
724 			    M_NOWAIT);
725 			if ((*swd)->sw_octx == NULL) {
726 				swcr_freesession(i);
727 				return ENOBUFS;
728 			}
729 
730 			(*swd)->sw_klen = cri->cri_klen / 8;
731 			bcopy(cri->cri_key, (*swd)->sw_octx, cri->cri_klen / 8);
732 			axf->Init((*swd)->sw_ictx);
733 			axf->Update((*swd)->sw_ictx, cri->cri_key,
734 			    cri->cri_klen / 8);
735 			axf->Final(NULL, (*swd)->sw_ictx);
736 			(*swd)->sw_axf = axf;
737 			break;
738 
739 		case CRYPTO_MD5:
740 			axf = &auth_hash_md5;
741 			goto auth3common;
742 
743 		case CRYPTO_SHA1:
744 			axf = &auth_hash_sha1;
745 		auth3common:
746 			(*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
747 			    M_NOWAIT);
748 			if ((*swd)->sw_ictx == NULL) {
749 				swcr_freesession(i);
750 				return ENOBUFS;
751 			}
752 
753 			axf->Init((*swd)->sw_ictx);
754 			(*swd)->sw_axf = axf;
755 			break;
756 
757 		case CRYPTO_DEFLATE_COMP:
758 			cxf = &comp_algo_deflate;
759 			(*swd)->sw_cxf = cxf;
760 			break;
761 		default:
762 			swcr_freesession(i);
763 			return EINVAL;
764 		}
765 
766 		(*swd)->sw_alg = cri->cri_alg;
767 		cri = cri->cri_next;
768 		swd = &((*swd)->sw_next);
769 	}
770 	return 0;
771 }
772 
773 /*
774  * Free a session.
775  */
776 int
777 swcr_freesession(u_int64_t tid)
778 {
779 	struct swcr_data *swd;
780 	struct enc_xform *txf;
781 	struct auth_hash *axf;
782 	u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
783 
784 	if (sid > swcr_sesnum || swcr_sessions == NULL ||
785 	    swcr_sessions[sid] == NULL)
786 		return EINVAL;
787 
788 	/* Silently accept and return */
789 	if (sid == 0)
790 		return 0;
791 
792 	while ((swd = swcr_sessions[sid]) != NULL) {
793 		swcr_sessions[sid] = swd->sw_next;
794 
795 		switch (swd->sw_alg) {
796 		case CRYPTO_DES_CBC:
797 		case CRYPTO_3DES_CBC:
798 		case CRYPTO_BLF_CBC:
799 		case CRYPTO_CAST_CBC:
800 		case CRYPTO_SKIPJACK_CBC:
801 		case CRYPTO_RIJNDAEL128_CBC:
802 		case CRYPTO_AES_CTR:
803 		case CRYPTO_AES_XTS:
804 		case CRYPTO_NULL:
805 			txf = swd->sw_exf;
806 
807 			if (swd->sw_kschedule)
808 				txf->zerokey(&(swd->sw_kschedule));
809 			break;
810 
811 		case CRYPTO_MD5_HMAC:
812 		case CRYPTO_SHA1_HMAC:
813 		case CRYPTO_RIPEMD160_HMAC:
814 		case CRYPTO_SHA2_256_HMAC:
815 		case CRYPTO_SHA2_384_HMAC:
816 		case CRYPTO_SHA2_512_HMAC:
817 			axf = swd->sw_axf;
818 
819 			if (swd->sw_ictx) {
820 				bzero(swd->sw_ictx, axf->ctxsize);
821 				free(swd->sw_ictx, M_CRYPTO_DATA);
822 			}
823 			if (swd->sw_octx) {
824 				bzero(swd->sw_octx, axf->ctxsize);
825 				free(swd->sw_octx, M_CRYPTO_DATA);
826 			}
827 			break;
828 
829 		case CRYPTO_MD5_KPDK:
830 		case CRYPTO_SHA1_KPDK:
831 			axf = swd->sw_axf;
832 
833 			if (swd->sw_ictx) {
834 				bzero(swd->sw_ictx, axf->ctxsize);
835 				free(swd->sw_ictx, M_CRYPTO_DATA);
836 			}
837 			if (swd->sw_octx) {
838 				bzero(swd->sw_octx, swd->sw_klen);
839 				free(swd->sw_octx, M_CRYPTO_DATA);
840 			}
841 			break;
842 
843 		case CRYPTO_MD5:
844 		case CRYPTO_SHA1:
845 			axf = swd->sw_axf;
846 
847 			if (swd->sw_ictx)
848 				free(swd->sw_ictx, M_CRYPTO_DATA);
849 			break;
850 		}
851 
852 		free(swd, M_CRYPTO_DATA);
853 	}
854 	return 0;
855 }
856 
857 /*
858  * Process a software request.
859  */
860 int
861 swcr_process(struct cryptop *crp)
862 {
863 	struct cryptodesc *crd;
864 	struct swcr_data *sw;
865 	u_int32_t lid;
866 	int type;
867 
868 	/* Sanity check */
869 	if (crp == NULL)
870 		return EINVAL;
871 
872 	if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
873 		crp->crp_etype = EINVAL;
874 		goto done;
875 	}
876 
877 	lid = crp->crp_sid & 0xffffffff;
878 	if (lid >= swcr_sesnum || lid == 0 || swcr_sessions[lid] == NULL) {
879 		crp->crp_etype = ENOENT;
880 		goto done;
881 	}
882 
883 	if (crp->crp_flags & CRYPTO_F_IMBUF)
884 		type = CRYPTO_BUF_MBUF;
885 	else
886 		type = CRYPTO_BUF_IOV;
887 
888 	/* Go through crypto descriptors, processing as we go */
889 	for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
890 		/*
891 		 * Find the crypto context.
892 		 *
893 		 * XXX Note that the logic here prevents us from having
894 		 * XXX the same algorithm multiple times in a session
895 		 * XXX (or rather, we can but it won't give us the right
896 		 * XXX results). To do that, we'd need some way of differentiating
897 		 * XXX between the various instances of an algorithm (so we can
898 		 * XXX locate the correct crypto context).
899 		 */
900 		for (sw = swcr_sessions[lid];
901 		    sw && sw->sw_alg != crd->crd_alg;
902 		    sw = sw->sw_next)
903 			;
904 
905 		/* No such context ? */
906 		if (sw == NULL) {
907 			crp->crp_etype = EINVAL;
908 			goto done;
909 		}
910 
911 		switch (sw->sw_alg) {
912 		case CRYPTO_NULL:
913 			break;
914 		case CRYPTO_DES_CBC:
915 		case CRYPTO_3DES_CBC:
916 		case CRYPTO_BLF_CBC:
917 		case CRYPTO_CAST_CBC:
918 		case CRYPTO_SKIPJACK_CBC:
919 		case CRYPTO_RIJNDAEL128_CBC:
920 		case CRYPTO_AES_CTR:
921 		case CRYPTO_AES_XTS:
922 			if ((crp->crp_etype = swcr_encdec(crd, sw,
923 			    crp->crp_buf, type)) != 0)
924 				goto done;
925 			break;
926 		case CRYPTO_MD5_HMAC:
927 		case CRYPTO_SHA1_HMAC:
928 		case CRYPTO_RIPEMD160_HMAC:
929 		case CRYPTO_SHA2_256_HMAC:
930 		case CRYPTO_SHA2_384_HMAC:
931 		case CRYPTO_SHA2_512_HMAC:
932 		case CRYPTO_MD5_KPDK:
933 		case CRYPTO_SHA1_KPDK:
934 		case CRYPTO_MD5:
935 		case CRYPTO_SHA1:
936 			if ((crp->crp_etype = swcr_authcompute(crp, crd, sw,
937 			    crp->crp_buf, type)) != 0)
938 				goto done;
939 			break;
940 
941 		case CRYPTO_DEFLATE_COMP:
942 			if ((crp->crp_etype = swcr_compdec(crd, sw,
943 			    crp->crp_buf, type)) != 0)
944 				goto done;
945 			else
946 				crp->crp_olen = (int)sw->sw_size;
947 			break;
948 
949 		default:
950 			/* Unknown/unsupported algorithm */
951 			crp->crp_etype = EINVAL;
952 			goto done;
953 		}
954 	}
955 
956 done:
957 	crypto_done(crp);
958 	return 0;
959 }
960 
961 /*
962  * Initialize the driver, called from the kernel main().
963  */
964 void
965 swcr_init(void)
966 {
967 	int algs[CRYPTO_ALGORITHM_MAX + 1];
968 	int flags = CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_ENCRYPT_MAC |
969 	    CRYPTOCAP_F_MAC_ENCRYPT;
970 
971 	swcr_id = crypto_get_driverid(flags);
972 	if (swcr_id < 0) {
973 		/* This should never happen */
974 		panic("Software crypto device cannot initialize!");
975 	}
976 
977 	bzero(algs, sizeof(algs));
978 
979 	algs[CRYPTO_DES_CBC] = CRYPTO_ALG_FLAG_SUPPORTED;
980 	algs[CRYPTO_3DES_CBC] = CRYPTO_ALG_FLAG_SUPPORTED;
981 	algs[CRYPTO_BLF_CBC] = CRYPTO_ALG_FLAG_SUPPORTED;
982 	algs[CRYPTO_CAST_CBC] = CRYPTO_ALG_FLAG_SUPPORTED;
983 	algs[CRYPTO_SKIPJACK_CBC] = CRYPTO_ALG_FLAG_SUPPORTED;
984 	algs[CRYPTO_MD5_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
985 	algs[CRYPTO_SHA1_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
986 	algs[CRYPTO_RIPEMD160_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
987 	algs[CRYPTO_MD5_KPDK] = CRYPTO_ALG_FLAG_SUPPORTED;
988 	algs[CRYPTO_SHA1_KPDK] = CRYPTO_ALG_FLAG_SUPPORTED;
989 	algs[CRYPTO_MD5] = CRYPTO_ALG_FLAG_SUPPORTED;
990 	algs[CRYPTO_SHA1] = CRYPTO_ALG_FLAG_SUPPORTED;
991 	algs[CRYPTO_RIJNDAEL128_CBC] = CRYPTO_ALG_FLAG_SUPPORTED;
992 	algs[CRYPTO_AES_CTR] = CRYPTO_ALG_FLAG_SUPPORTED;
993 	algs[CRYPTO_AES_XTS] = CRYPTO_ALG_FLAG_SUPPORTED;
994 	algs[CRYPTO_DEFLATE_COMP] = CRYPTO_ALG_FLAG_SUPPORTED;
995 	algs[CRYPTO_NULL] = CRYPTO_ALG_FLAG_SUPPORTED;
996 	algs[CRYPTO_SHA2_256_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
997 	algs[CRYPTO_SHA2_384_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
998 	algs[CRYPTO_SHA2_512_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
999 
1000 	crypto_register(swcr_id, algs, swcr_newsession,
1001 	    swcr_freesession, swcr_process);
1002 }
1003