xref: /openbsd/sys/crypto/cryptosoft.c (revision 8932bfb7)
1 /*	$OpenBSD: cryptosoft.c,v 1.63 2011/01/11 23:00:21 markus Exp $	*/
2 
3 /*
4  * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
5  *
6  * This code was written by Angelos D. Keromytis in Athens, Greece, in
7  * February 2000. Network Security Technologies Inc. (NSTI) kindly
8  * supported the development of this code.
9  *
10  * Copyright (c) 2000, 2001 Angelos D. Keromytis
11  *
12  * Permission to use, copy, and modify this software with or without fee
13  * is hereby granted, provided that this entire notice is included in
14  * all source code copies of any software which is or includes a copy or
15  * modification of this software.
16  *
17  * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
18  * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
19  * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
20  * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
21  * PURPOSE.
22  */
23 
24 #include <sys/param.h>
25 #include <sys/systm.h>
26 #include <sys/malloc.h>
27 #include <sys/mbuf.h>
28 #include <sys/errno.h>
29 #include <dev/rndvar.h>
30 #include <crypto/md5.h>
31 #include <crypto/sha1.h>
32 #include <crypto/rmd160.h>
33 #include <crypto/cast.h>
34 #include <crypto/blf.h>
35 #include <crypto/cryptodev.h>
36 #include <crypto/cryptosoft.h>
37 #include <crypto/xform.h>
38 
39 const u_int8_t hmac_ipad_buffer[HMAC_MAX_BLOCK_LEN] = {
40 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
41 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
42 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
43 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
44 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
45 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
46 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
47 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
48 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
49 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
50 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
51 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
52 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
53 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
54 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
55 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36
56 };
57 
58 const u_int8_t hmac_opad_buffer[HMAC_MAX_BLOCK_LEN] = {
59 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
60 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
61 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
62 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
63 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
64 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
65 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
66 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
67 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
68 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
69 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
70 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
71 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
72 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
73 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
74 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C
75 };
76 
77 
78 struct swcr_data **swcr_sessions = NULL;
79 u_int32_t swcr_sesnum = 0;
80 int32_t swcr_id = -1;
81 
82 #define COPYBACK(x, a, b, c, d) \
83 	do { \
84 		if ((x) == CRYPTO_BUF_MBUF) \
85 			m_copyback((struct mbuf *)a,b,c,d,M_NOWAIT); \
86 		else \
87 			cuio_copyback((struct uio *)a,b,c,d); \
88 	} while (0)
89 #define COPYDATA(x, a, b, c, d) \
90 	do { \
91 		if ((x) == CRYPTO_BUF_MBUF) \
92 			m_copydata((struct mbuf *)a,b,c,d); \
93 		else \
94 			cuio_copydata((struct uio *)a,b,c,d); \
95 	} while (0)
96 
97 /*
98  * Apply a symmetric encryption/decryption algorithm.
99  */
100 int
101 swcr_encdec(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf,
102     int outtype)
103 {
104 	unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN], *idat;
105 	unsigned char *ivp, *nivp, iv2[EALG_MAX_BLOCK_LEN];
106 	struct enc_xform *exf;
107 	int i, k, j, blks, ind, count, ivlen;
108 	struct mbuf *m = NULL;
109 	struct uio *uio = NULL;
110 
111 	exf = sw->sw_exf;
112 	blks = exf->blocksize;
113 	ivlen = exf->ivsize;
114 
115 	/* Check for non-padded data */
116 	if (crd->crd_len % blks)
117 		return EINVAL;
118 
119 	if (outtype == CRYPTO_BUF_MBUF)
120 		m = (struct mbuf *) buf;
121 	else
122 		uio = (struct uio *) buf;
123 
124 	/* Initialize the IV */
125 	if (crd->crd_flags & CRD_F_ENCRYPT) {
126 		/* IV explicitly provided ? */
127 		if (crd->crd_flags & CRD_F_IV_EXPLICIT)
128 			bcopy(crd->crd_iv, iv, ivlen);
129 		else
130 			arc4random_buf(iv, ivlen);
131 
132 		/* Do we need to write the IV */
133 		if (!(crd->crd_flags & CRD_F_IV_PRESENT))
134 			COPYBACK(outtype, buf, crd->crd_inject, ivlen, iv);
135 
136 	} else {	/* Decryption */
137 			/* IV explicitly provided ? */
138 		if (crd->crd_flags & CRD_F_IV_EXPLICIT)
139 			bcopy(crd->crd_iv, iv, ivlen);
140 		else {
141 			/* Get IV off buf */
142 			COPYDATA(outtype, buf, crd->crd_inject, ivlen, iv);
143 		}
144 	}
145 
146 	ivp = iv;
147 
148 	/*
149 	 * xforms that provide a reinit method perform all IV
150 	 * handling themselves.
151 	 */
152 	if (exf->reinit)
153 		exf->reinit(sw->sw_kschedule, iv);
154 
155 	if (outtype == CRYPTO_BUF_MBUF) {
156 		/* Find beginning of data */
157 		m = m_getptr(m, crd->crd_skip, &k);
158 		if (m == NULL)
159 			return EINVAL;
160 
161 		i = crd->crd_len;
162 
163 		while (i > 0) {
164 			/*
165 			 * If there's insufficient data at the end of
166 			 * an mbuf, we have to do some copying.
167 			 */
168 			if (m->m_len < k + blks && m->m_len != k) {
169 				m_copydata(m, k, blks, blk);
170 
171 				/* Actual encryption/decryption */
172 				if (exf->reinit) {
173 					if (crd->crd_flags & CRD_F_ENCRYPT) {
174 						exf->encrypt(sw->sw_kschedule,
175 						    blk);
176 					} else {
177 						exf->decrypt(sw->sw_kschedule,
178 						    blk);
179 					}
180 				} else if (crd->crd_flags & CRD_F_ENCRYPT) {
181 					/* XOR with previous block */
182 					for (j = 0; j < blks; j++)
183 						blk[j] ^= ivp[j];
184 
185 					exf->encrypt(sw->sw_kschedule, blk);
186 
187 					/*
188 					 * Keep encrypted block for XOR'ing
189 					 * with next block
190 					 */
191 					bcopy(blk, iv, blks);
192 					ivp = iv;
193 				} else {	/* decrypt */
194 					/*
195 					 * Keep encrypted block for XOR'ing
196 					 * with next block
197 					 */
198 					nivp = (ivp == iv) ? iv2 : iv;
199 					bcopy(blk, nivp, blks);
200 
201 					exf->decrypt(sw->sw_kschedule, blk);
202 
203 					/* XOR with previous block */
204 					for (j = 0; j < blks; j++)
205 						blk[j] ^= ivp[j];
206 					ivp = nivp;
207 				}
208 
209 				/* Copy back decrypted block */
210 				m_copyback(m, k, blks, blk, M_NOWAIT);
211 
212 				/* Advance pointer */
213 				m = m_getptr(m, k + blks, &k);
214 				if (m == NULL)
215 					return EINVAL;
216 
217 				i -= blks;
218 
219 				/* Could be done... */
220 				if (i == 0)
221 					break;
222 			}
223 
224 			/* Skip possibly empty mbufs */
225 			if (k == m->m_len) {
226 				for (m = m->m_next; m && m->m_len == 0;
227 				    m = m->m_next)
228 					;
229 				k = 0;
230 			}
231 
232 			/* Sanity check */
233 			if (m == NULL)
234 				return EINVAL;
235 
236 			/*
237 			 * Warning: idat may point to garbage here, but
238 			 * we only use it in the while() loop, only if
239 			 * there are indeed enough data.
240 			 */
241 			idat = mtod(m, unsigned char *) + k;
242 
243 			while (m->m_len >= k + blks && i > 0) {
244 				if (exf->reinit) {
245 					if (crd->crd_flags & CRD_F_ENCRYPT) {
246 						exf->encrypt(sw->sw_kschedule,
247 						    idat);
248 					} else {
249 						exf->decrypt(sw->sw_kschedule,
250 						    idat);
251 					}
252 				} else if (crd->crd_flags & CRD_F_ENCRYPT) {
253 					/* XOR with previous block/IV */
254 					for (j = 0; j < blks; j++)
255 						idat[j] ^= ivp[j];
256 
257 					exf->encrypt(sw->sw_kschedule, idat);
258 					ivp = idat;
259 				} else {	/* decrypt */
260 					/*
261 					 * Keep encrypted block to be used
262 					 * in next block's processing.
263 					 */
264 					nivp = (ivp == iv) ? iv2 : iv;
265 					bcopy(idat, nivp, blks);
266 
267 					exf->decrypt(sw->sw_kschedule, idat);
268 
269 					/* XOR with previous block/IV */
270 					for (j = 0; j < blks; j++)
271 						idat[j] ^= ivp[j];
272 					ivp = nivp;
273 				}
274 
275 				idat += blks;
276 				k += blks;
277 				i -= blks;
278 			}
279 		}
280 	} else {
281 		/* Find beginning of data */
282 		count = crd->crd_skip;
283 		ind = cuio_getptr(uio, count, &k);
284 		if (ind == -1)
285 			return EINVAL;
286 
287 		i = crd->crd_len;
288 
289 		while (i > 0) {
290 			/*
291 			 * If there's insufficient data at the end,
292 			 * we have to do some copying.
293 			 */
294 			if (uio->uio_iov[ind].iov_len < k + blks &&
295 			    uio->uio_iov[ind].iov_len != k) {
296 				cuio_copydata(uio, count, blks, blk);
297 
298 				/* Actual encryption/decryption */
299 				if (exf->reinit) {
300 					if (crd->crd_flags & CRD_F_ENCRYPT) {
301 						exf->encrypt(sw->sw_kschedule,
302 						    blk);
303 					} else {
304 						exf->decrypt(sw->sw_kschedule,
305 						    blk);
306 					}
307 				} else if (crd->crd_flags & CRD_F_ENCRYPT) {
308 					/* XOR with previous block */
309 					for (j = 0; j < blks; j++)
310 						blk[j] ^= ivp[j];
311 
312 					exf->encrypt(sw->sw_kschedule, blk);
313 
314 					/*
315 					 * Keep encrypted block for XOR'ing
316 					 * with next block
317 					 */
318 					bcopy(blk, iv, blks);
319 					ivp = iv;
320 				} else {	/* decrypt */
321 					/*
322 					 * Keep encrypted block for XOR'ing
323 					 * with next block
324 					 */
325 					nivp = (ivp == iv) ? iv2 : iv;
326 					bcopy(blk, nivp, blks);
327 
328 					exf->decrypt(sw->sw_kschedule, blk);
329 
330 					/* XOR with previous block */
331 					for (j = 0; j < blks; j++)
332 						blk[j] ^= ivp[j];
333 					ivp = nivp;
334 				}
335 
336 				/* Copy back decrypted block */
337 				cuio_copyback(uio, count, blks, blk);
338 
339 				count += blks;
340 
341 				/* Advance pointer */
342 				ind = cuio_getptr(uio, count, &k);
343 				if (ind == -1)
344 					return (EINVAL);
345 
346 				i -= blks;
347 
348 				/* Could be done... */
349 				if (i == 0)
350 					break;
351 			}
352 
353 			/*
354 			 * Warning: idat may point to garbage here, but
355 			 * we only use it in the while() loop, only if
356 			 * there are indeed enough data.
357 			 */
358 			idat = (char *)uio->uio_iov[ind].iov_base + k;
359 
360 			while (uio->uio_iov[ind].iov_len >= k + blks &&
361 			    i > 0) {
362 				if (exf->reinit) {
363 					if (crd->crd_flags & CRD_F_ENCRYPT) {
364 						exf->encrypt(sw->sw_kschedule,
365 						    idat);
366 					} else {
367 						exf->decrypt(sw->sw_kschedule,
368 						    idat);
369 					}
370 				} else if (crd->crd_flags & CRD_F_ENCRYPT) {
371 					/* XOR with previous block/IV */
372 					for (j = 0; j < blks; j++)
373 						idat[j] ^= ivp[j];
374 
375 					exf->encrypt(sw->sw_kschedule, idat);
376 					ivp = idat;
377 				} else {	/* decrypt */
378 					/*
379 					 * Keep encrypted block to be used
380 					 * in next block's processing.
381 					 */
382 					nivp = (ivp == iv) ? iv2 : iv;
383 					bcopy(idat, nivp, blks);
384 
385 					exf->decrypt(sw->sw_kschedule, idat);
386 
387 					/* XOR with previous block/IV */
388 					for (j = 0; j < blks; j++)
389 						idat[j] ^= ivp[j];
390 					ivp = nivp;
391 				}
392 
393 				idat += blks;
394 				count += blks;
395 				k += blks;
396 				i -= blks;
397 			}
398 
399 			/*
400 			 * Advance to the next iov if the end of the current iov
401 			 * is aligned with the end of a cipher block.
402 			 * Note that the code is equivalent to calling:
403 			 *	ind = cuio_getptr(uio, count, &k);
404 			 */
405 			if (i > 0 && k == uio->uio_iov[ind].iov_len) {
406 				k = 0;
407 				ind++;
408 				if (ind >= uio->uio_iovcnt)
409 					return (EINVAL);
410 			}
411 		}
412 	}
413 
414 	return 0; /* Done with encryption/decryption */
415 }
416 
417 /*
418  * Compute keyed-hash authenticator.
419  */
420 int
421 swcr_authcompute(struct cryptop *crp, struct cryptodesc *crd,
422     struct swcr_data *sw, caddr_t buf, int outtype)
423 {
424 	unsigned char aalg[AALG_MAX_RESULT_LEN];
425 	struct auth_hash *axf;
426 	union authctx ctx;
427 	int err;
428 
429 	if (sw->sw_ictx == 0)
430 		return EINVAL;
431 
432 	axf = sw->sw_axf;
433 
434 	bcopy(sw->sw_ictx, &ctx, axf->ctxsize);
435 
436 	if (outtype == CRYPTO_BUF_MBUF)
437 		err = m_apply((struct mbuf *) buf, crd->crd_skip, crd->crd_len,
438 		    (int (*)(caddr_t, caddr_t, unsigned int)) axf->Update,
439 		    (caddr_t) &ctx);
440 	else
441 		err = cuio_apply((struct uio *) buf, crd->crd_skip,
442 		    crd->crd_len,
443 		    (int (*)(caddr_t, caddr_t, unsigned int)) axf->Update,
444 		    (caddr_t) &ctx);
445 
446 	if (err)
447 		return err;
448 
449 	switch (sw->sw_alg) {
450 	case CRYPTO_MD5_HMAC:
451 	case CRYPTO_SHA1_HMAC:
452 	case CRYPTO_RIPEMD160_HMAC:
453 	case CRYPTO_SHA2_256_HMAC:
454 	case CRYPTO_SHA2_384_HMAC:
455 	case CRYPTO_SHA2_512_HMAC:
456 		if (sw->sw_octx == NULL)
457 			return EINVAL;
458 
459 		axf->Final(aalg, &ctx);
460 		bcopy(sw->sw_octx, &ctx, axf->ctxsize);
461 		axf->Update(&ctx, aalg, axf->hashsize);
462 		axf->Final(aalg, &ctx);
463 		break;
464 
465 	case CRYPTO_MD5_KPDK:
466 	case CRYPTO_SHA1_KPDK:
467 		if (sw->sw_octx == NULL)
468 			return EINVAL;
469 
470 		axf->Update(&ctx, sw->sw_octx, sw->sw_klen);
471 		axf->Final(aalg, &ctx);
472 		break;
473 
474 	case CRYPTO_MD5:
475 	case CRYPTO_SHA1:
476 		axf->Final(aalg, &ctx);
477 		break;
478 	}
479 
480 	/* Inject the authentication data */
481 	if (outtype == CRYPTO_BUF_MBUF)
482 		COPYBACK(outtype, buf, crd->crd_inject, axf->authsize, aalg);
483 	else
484 		bcopy(aalg, crp->crp_mac, axf->authsize);
485 
486 	return 0;
487 }
488 
489 /*
490  * Apply a combined encryption-authentication transformation
491  */
492 int
493 swcr_combined(struct cryptop *crp)
494 {
495 	uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))];
496 	u_char *blk = (u_char *)blkbuf;
497 	u_char aalg[AALG_MAX_RESULT_LEN];
498 	u_char iv[EALG_MAX_BLOCK_LEN];
499 	union authctx ctx;
500 	struct cryptodesc *crd, *crda = NULL, *crde = NULL;
501 	struct swcr_data *sw, *swa, *swe;
502 	struct auth_hash *axf = NULL;
503 	struct enc_xform *exf = NULL;
504 	struct mbuf *m = NULL;
505 	struct uio *uio = NULL;
506 	caddr_t buf = (caddr_t)crp->crp_buf;
507 	uint32_t *blkp;
508 	int i, blksz, ivlen, outtype, len;
509 
510 	for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
511 		for (sw = swcr_sessions[crp->crp_sid & 0xffffffff];
512 		     sw && sw->sw_alg != crd->crd_alg;
513 		     sw = sw->sw_next)
514 			;
515 		if (sw == NULL)
516 			return (EINVAL);
517 
518 		switch (sw->sw_alg) {
519 		case CRYPTO_AES_GCM_16:
520 		case CRYPTO_AES_GMAC:
521 			swe = sw;
522 			crde = crd;
523 			exf = swe->sw_exf;
524 			ivlen = exf->ivsize;
525 			break;
526 		case CRYPTO_AES_128_GMAC:
527 		case CRYPTO_AES_192_GMAC:
528 		case CRYPTO_AES_256_GMAC:
529 			swa = sw;
530 			crda = crd;
531 			axf = swa->sw_axf;
532 			if (swa->sw_ictx == 0)
533 				return (EINVAL);
534 			bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
535 			blksz = axf->blocksize;
536 			break;
537 		default:
538 			return (EINVAL);
539 		}
540 	}
541 	if (crde == NULL || crda == NULL)
542 		return (EINVAL);
543 
544 	if (crp->crp_flags & CRYPTO_F_IMBUF) {
545 		outtype = CRYPTO_BUF_MBUF;
546 		m = (struct mbuf *)buf;
547 	} else {
548 		outtype = CRYPTO_BUF_IOV;
549 		uio = (struct uio *)buf;
550 	}
551 
552 	/* Initialize the IV */
553 	if (crde->crd_flags & CRD_F_ENCRYPT) {
554 		/* IV explicitly provided ? */
555 		if (crde->crd_flags & CRD_F_IV_EXPLICIT)
556 			bcopy(crde->crd_iv, iv, ivlen);
557 		else
558 			arc4random_buf(iv, ivlen);
559 
560 		/* Do we need to write the IV */
561 		if (!(crde->crd_flags & CRD_F_IV_PRESENT))
562 			COPYBACK(outtype, buf, crde->crd_inject, ivlen, iv);
563 
564 	} else {	/* Decryption */
565 			/* IV explicitly provided ? */
566 		if (crde->crd_flags & CRD_F_IV_EXPLICIT)
567 			bcopy(crde->crd_iv, iv, ivlen);
568 		else {
569 			/* Get IV off buf */
570 			COPYDATA(outtype, buf, crde->crd_inject, ivlen, iv);
571 		}
572 	}
573 
574 	/* Supply MAC with IV */
575 	if (axf->Reinit)
576 		axf->Reinit(&ctx, iv, ivlen);
577 
578 	/* Supply MAC with AAD */
579 	for (i = 0; i < crda->crd_len; i += blksz) {
580 		len = MIN(crda->crd_len - i, blksz);
581 		COPYDATA(outtype, buf, crda->crd_skip + i, len, blk);
582 		axf->Update(&ctx, blk, len);
583 	}
584 
585 	if (exf->reinit)
586 		exf->reinit(swe->sw_kschedule, iv);
587 
588 	/* Do encryption/decryption with MAC */
589 	for (i = 0; i < crde->crd_len; i += blksz) {
590 		len = MIN(crde->crd_len - i, blksz);
591 		if (len < blksz)
592 			bzero(blk, blksz);
593 		COPYDATA(outtype, buf, crde->crd_skip + i, len, blk);
594 		if (crde->crd_flags & CRD_F_ENCRYPT) {
595 			exf->encrypt(swe->sw_kschedule, blk);
596 			axf->Update(&ctx, blk, len);
597 		} else {
598 			axf->Update(&ctx, blk, len);
599 			exf->decrypt(swe->sw_kschedule, blk);
600 		}
601 		COPYBACK(outtype, buf, crde->crd_skip + i, len, blk);
602 	}
603 
604 	/* Do any required special finalization */
605 	switch (crda->crd_alg) {
606 		case CRYPTO_AES_128_GMAC:
607 		case CRYPTO_AES_192_GMAC:
608 		case CRYPTO_AES_256_GMAC:
609 			/* length block */
610 			bzero(blk, blksz);
611 			blkp = (uint32_t *)blk + 1;
612 			*blkp = htobe32(crda->crd_len * 8);
613 			blkp = (uint32_t *)blk + 3;
614 			*blkp = htobe32(crde->crd_len * 8);
615 			axf->Update(&ctx, blk, blksz);
616 			break;
617 	}
618 
619 	/* Finalize MAC */
620 	axf->Final(aalg, &ctx);
621 
622 	/* Inject the authentication data */
623 	if (outtype == CRYPTO_BUF_MBUF)
624 		COPYBACK(outtype, buf, crda->crd_inject, axf->authsize, aalg);
625 	else
626 		bcopy(aalg, crp->crp_mac, axf->authsize);
627 
628 	return (0);
629 }
630 
631 /*
632  * Apply a compression/decompression algorithm
633  */
634 int
635 swcr_compdec(struct cryptodesc *crd, struct swcr_data *sw,
636     caddr_t buf, int outtype)
637 {
638 	u_int8_t *data, *out;
639 	struct comp_algo *cxf;
640 	int adj;
641 	u_int32_t result;
642 
643 	cxf = sw->sw_cxf;
644 
645 	/* We must handle the whole buffer of data in one time
646 	 * then if there is not all the data in the mbuf, we must
647 	 * copy in a buffer.
648 	 */
649 
650 	data = malloc(crd->crd_len, M_CRYPTO_DATA, M_NOWAIT);
651 	if (data == NULL)
652 		return (EINVAL);
653 	COPYDATA(outtype, buf, crd->crd_skip, crd->crd_len, data);
654 
655 	if (crd->crd_flags & CRD_F_COMP)
656 		result = cxf->compress(data, crd->crd_len, &out);
657 	else
658 		result = cxf->decompress(data, crd->crd_len, &out);
659 
660 	free(data, M_CRYPTO_DATA);
661 	if (result == 0)
662 		return EINVAL;
663 
664 	/* Copy back the (de)compressed data. m_copyback is
665 	 * extending the mbuf as necessary.
666 	 */
667 	sw->sw_size = result;
668 	/* Check the compressed size when doing compression */
669 	if (crd->crd_flags & CRD_F_COMP) {
670 		if (result > crd->crd_len) {
671 			/* Compression was useless, we lost time */
672 			free(out, M_CRYPTO_DATA);
673 			return 0;
674 		}
675 	}
676 
677 	COPYBACK(outtype, buf, crd->crd_skip, result, out);
678 	if (result < crd->crd_len) {
679 		adj = result - crd->crd_len;
680 		if (outtype == CRYPTO_BUF_MBUF) {
681 			adj = result - crd->crd_len;
682 			m_adj((struct mbuf *)buf, adj);
683 		} else {
684 			struct uio *uio = (struct uio *)buf;
685 			int ind;
686 
687 			adj = crd->crd_len - result;
688 			ind = uio->uio_iovcnt - 1;
689 
690 			while (adj > 0 && ind >= 0) {
691 				if (adj < uio->uio_iov[ind].iov_len) {
692 					uio->uio_iov[ind].iov_len -= adj;
693 					break;
694 				}
695 
696 				adj -= uio->uio_iov[ind].iov_len;
697 				uio->uio_iov[ind].iov_len = 0;
698 				ind--;
699 				uio->uio_iovcnt--;
700 			}
701 		}
702 	}
703 	free(out, M_CRYPTO_DATA);
704 	return 0;
705 }
706 
707 /*
708  * Generate a new software session.
709  */
710 int
711 swcr_newsession(u_int32_t *sid, struct cryptoini *cri)
712 {
713 	struct swcr_data **swd;
714 	struct auth_hash *axf;
715 	struct enc_xform *txf;
716 	struct comp_algo *cxf;
717 	u_int32_t i;
718 	int k;
719 
720 	if (sid == NULL || cri == NULL)
721 		return EINVAL;
722 
723 	if (swcr_sessions) {
724 		for (i = 1; i < swcr_sesnum; i++)
725 			if (swcr_sessions[i] == NULL)
726 				break;
727 	}
728 
729 	if (swcr_sessions == NULL || i == swcr_sesnum) {
730 		if (swcr_sessions == NULL) {
731 			i = 1; /* We leave swcr_sessions[0] empty */
732 			swcr_sesnum = CRYPTO_SW_SESSIONS;
733 		} else
734 			swcr_sesnum *= 2;
735 
736 		swd = malloc(swcr_sesnum * sizeof(struct swcr_data *),
737 		    M_CRYPTO_DATA, M_NOWAIT | M_ZERO);
738 		if (swd == NULL) {
739 			/* Reset session number */
740 			if (swcr_sesnum == CRYPTO_SW_SESSIONS)
741 				swcr_sesnum = 0;
742 			else
743 				swcr_sesnum /= 2;
744 			return ENOBUFS;
745 		}
746 
747 		/* Copy existing sessions */
748 		if (swcr_sessions) {
749 			bcopy(swcr_sessions, swd,
750 			    (swcr_sesnum / 2) * sizeof(struct swcr_data *));
751 			free(swcr_sessions, M_CRYPTO_DATA);
752 		}
753 
754 		swcr_sessions = swd;
755 	}
756 
757 	swd = &swcr_sessions[i];
758 	*sid = i;
759 
760 	while (cri) {
761 		*swd = malloc(sizeof(struct swcr_data), M_CRYPTO_DATA,
762 		    M_NOWAIT | M_ZERO);
763 		if (*swd == NULL) {
764 			swcr_freesession(i);
765 			return ENOBUFS;
766 		}
767 
768 		switch (cri->cri_alg) {
769 		case CRYPTO_DES_CBC:
770 			txf = &enc_xform_des;
771 			goto enccommon;
772 		case CRYPTO_3DES_CBC:
773 			txf = &enc_xform_3des;
774 			goto enccommon;
775 		case CRYPTO_BLF_CBC:
776 			txf = &enc_xform_blf;
777 			goto enccommon;
778 		case CRYPTO_CAST_CBC:
779 			txf = &enc_xform_cast5;
780 			goto enccommon;
781 		case CRYPTO_RIJNDAEL128_CBC:
782 			txf = &enc_xform_rijndael128;
783 			goto enccommon;
784 		case CRYPTO_AES_CTR:
785 			txf = &enc_xform_aes_ctr;
786 			goto enccommon;
787 		case CRYPTO_AES_XTS:
788 			txf = &enc_xform_aes_xts;
789 			goto enccommon;
790 		case CRYPTO_AES_GCM_16:
791 			txf = &enc_xform_aes_gcm;
792 			goto enccommon;
793 		case CRYPTO_AES_GMAC:
794 			txf = &enc_xform_aes_gmac;
795 			(*swd)->sw_exf = txf;
796 			break;
797 		case CRYPTO_NULL:
798 			txf = &enc_xform_null;
799 			goto enccommon;
800 		enccommon:
801 			if (txf->setkey(&((*swd)->sw_kschedule), cri->cri_key,
802 			    cri->cri_klen / 8) < 0) {
803 				swcr_freesession(i);
804 				return EINVAL;
805 			}
806 			(*swd)->sw_exf = txf;
807 			break;
808 
809 		case CRYPTO_MD5_HMAC:
810 			axf = &auth_hash_hmac_md5_96;
811 			goto authcommon;
812 		case CRYPTO_SHA1_HMAC:
813 			axf = &auth_hash_hmac_sha1_96;
814 			goto authcommon;
815 		case CRYPTO_RIPEMD160_HMAC:
816 			axf = &auth_hash_hmac_ripemd_160_96;
817 			goto authcommon;
818 		case CRYPTO_SHA2_256_HMAC:
819 			axf = &auth_hash_hmac_sha2_256_128;
820 			goto authcommon;
821 		case CRYPTO_SHA2_384_HMAC:
822 			axf = &auth_hash_hmac_sha2_384_192;
823 			goto authcommon;
824 		case CRYPTO_SHA2_512_HMAC:
825 			axf = &auth_hash_hmac_sha2_512_256;
826 		authcommon:
827 			(*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
828 			    M_NOWAIT);
829 			if ((*swd)->sw_ictx == NULL) {
830 				swcr_freesession(i);
831 				return ENOBUFS;
832 			}
833 
834 			(*swd)->sw_octx = malloc(axf->ctxsize, M_CRYPTO_DATA,
835 			    M_NOWAIT);
836 			if ((*swd)->sw_octx == NULL) {
837 				swcr_freesession(i);
838 				return ENOBUFS;
839 			}
840 
841 			for (k = 0; k < cri->cri_klen / 8; k++)
842 				cri->cri_key[k] ^= HMAC_IPAD_VAL;
843 
844 			axf->Init((*swd)->sw_ictx);
845 			axf->Update((*swd)->sw_ictx, cri->cri_key,
846 			    cri->cri_klen / 8);
847 			axf->Update((*swd)->sw_ictx, hmac_ipad_buffer,
848 			    axf->blocksize - (cri->cri_klen / 8));
849 
850 			for (k = 0; k < cri->cri_klen / 8; k++)
851 				cri->cri_key[k] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
852 
853 			axf->Init((*swd)->sw_octx);
854 			axf->Update((*swd)->sw_octx, cri->cri_key,
855 			    cri->cri_klen / 8);
856 			axf->Update((*swd)->sw_octx, hmac_opad_buffer,
857 			    axf->blocksize - (cri->cri_klen / 8));
858 
859 			for (k = 0; k < cri->cri_klen / 8; k++)
860 				cri->cri_key[k] ^= HMAC_OPAD_VAL;
861 			(*swd)->sw_axf = axf;
862 			break;
863 
864 		case CRYPTO_MD5_KPDK:
865 			axf = &auth_hash_key_md5;
866 			goto auth2common;
867 
868 		case CRYPTO_SHA1_KPDK:
869 			axf = &auth_hash_key_sha1;
870 		auth2common:
871 			(*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
872 			    M_NOWAIT);
873 			if ((*swd)->sw_ictx == NULL) {
874 				swcr_freesession(i);
875 				return ENOBUFS;
876 			}
877 
878 			/* Store the key so we can "append" it to the payload */
879 			(*swd)->sw_octx = malloc(cri->cri_klen / 8, M_CRYPTO_DATA,
880 			    M_NOWAIT);
881 			if ((*swd)->sw_octx == NULL) {
882 				swcr_freesession(i);
883 				return ENOBUFS;
884 			}
885 
886 			(*swd)->sw_klen = cri->cri_klen / 8;
887 			bcopy(cri->cri_key, (*swd)->sw_octx, cri->cri_klen / 8);
888 			axf->Init((*swd)->sw_ictx);
889 			axf->Update((*swd)->sw_ictx, cri->cri_key,
890 			    cri->cri_klen / 8);
891 			axf->Final(NULL, (*swd)->sw_ictx);
892 			(*swd)->sw_axf = axf;
893 			break;
894 
895 		case CRYPTO_MD5:
896 			axf = &auth_hash_md5;
897 			goto auth3common;
898 
899 		case CRYPTO_SHA1:
900 			axf = &auth_hash_sha1;
901 		auth3common:
902 			(*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
903 			    M_NOWAIT);
904 			if ((*swd)->sw_ictx == NULL) {
905 				swcr_freesession(i);
906 				return ENOBUFS;
907 			}
908 
909 			axf->Init((*swd)->sw_ictx);
910 			(*swd)->sw_axf = axf;
911 			break;
912 
913 		case CRYPTO_AES_128_GMAC:
914 			axf = &auth_hash_gmac_aes_128;
915 			goto auth4common;
916 
917 		case CRYPTO_AES_192_GMAC:
918 			axf = &auth_hash_gmac_aes_192;
919 			goto auth4common;
920 
921 		case CRYPTO_AES_256_GMAC:
922 			axf = &auth_hash_gmac_aes_256;
923 		auth4common:
924 			(*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
925 			    M_NOWAIT);
926 			if ((*swd)->sw_ictx == NULL) {
927 				swcr_freesession(i);
928 				return ENOBUFS;
929 			}
930 			axf->Init((*swd)->sw_ictx);
931 			axf->Setkey((*swd)->sw_ictx, cri->cri_key,
932 			    cri->cri_klen / 8);
933 			(*swd)->sw_axf = axf;
934 			break;
935 
936 		case CRYPTO_DEFLATE_COMP:
937 			cxf = &comp_algo_deflate;
938 			(*swd)->sw_cxf = cxf;
939 			break;
940 		default:
941 			swcr_freesession(i);
942 			return EINVAL;
943 		}
944 
945 		(*swd)->sw_alg = cri->cri_alg;
946 		cri = cri->cri_next;
947 		swd = &((*swd)->sw_next);
948 	}
949 	return 0;
950 }
951 
952 /*
953  * Free a session.
954  */
955 int
956 swcr_freesession(u_int64_t tid)
957 {
958 	struct swcr_data *swd;
959 	struct enc_xform *txf;
960 	struct auth_hash *axf;
961 	u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
962 
963 	if (sid > swcr_sesnum || swcr_sessions == NULL ||
964 	    swcr_sessions[sid] == NULL)
965 		return EINVAL;
966 
967 	/* Silently accept and return */
968 	if (sid == 0)
969 		return 0;
970 
971 	while ((swd = swcr_sessions[sid]) != NULL) {
972 		swcr_sessions[sid] = swd->sw_next;
973 
974 		switch (swd->sw_alg) {
975 		case CRYPTO_DES_CBC:
976 		case CRYPTO_3DES_CBC:
977 		case CRYPTO_BLF_CBC:
978 		case CRYPTO_CAST_CBC:
979 		case CRYPTO_RIJNDAEL128_CBC:
980 		case CRYPTO_AES_CTR:
981 		case CRYPTO_AES_XTS:
982 		case CRYPTO_AES_GCM_16:
983 		case CRYPTO_AES_GMAC:
984 		case CRYPTO_NULL:
985 			txf = swd->sw_exf;
986 
987 			if (swd->sw_kschedule)
988 				txf->zerokey(&(swd->sw_kschedule));
989 			break;
990 
991 		case CRYPTO_MD5_HMAC:
992 		case CRYPTO_SHA1_HMAC:
993 		case CRYPTO_RIPEMD160_HMAC:
994 		case CRYPTO_SHA2_256_HMAC:
995 		case CRYPTO_SHA2_384_HMAC:
996 		case CRYPTO_SHA2_512_HMAC:
997 			axf = swd->sw_axf;
998 
999 			if (swd->sw_ictx) {
1000 				explicit_bzero(swd->sw_ictx, axf->ctxsize);
1001 				free(swd->sw_ictx, M_CRYPTO_DATA);
1002 			}
1003 			if (swd->sw_octx) {
1004 				explicit_bzero(swd->sw_octx, axf->ctxsize);
1005 				free(swd->sw_octx, M_CRYPTO_DATA);
1006 			}
1007 			break;
1008 
1009 		case CRYPTO_MD5_KPDK:
1010 		case CRYPTO_SHA1_KPDK:
1011 			axf = swd->sw_axf;
1012 
1013 			if (swd->sw_ictx) {
1014 				explicit_bzero(swd->sw_ictx, axf->ctxsize);
1015 				free(swd->sw_ictx, M_CRYPTO_DATA);
1016 			}
1017 			if (swd->sw_octx) {
1018 				explicit_bzero(swd->sw_octx, swd->sw_klen);
1019 				free(swd->sw_octx, M_CRYPTO_DATA);
1020 			}
1021 			break;
1022 
1023 		case CRYPTO_AES_128_GMAC:
1024 		case CRYPTO_AES_192_GMAC:
1025 		case CRYPTO_AES_256_GMAC:
1026 		case CRYPTO_MD5:
1027 		case CRYPTO_SHA1:
1028 			axf = swd->sw_axf;
1029 
1030 			if (swd->sw_ictx) {
1031 				explicit_bzero(swd->sw_ictx, axf->ctxsize);
1032 				free(swd->sw_ictx, M_CRYPTO_DATA);
1033 			}
1034 			break;
1035 		}
1036 
1037 		free(swd, M_CRYPTO_DATA);
1038 	}
1039 	return 0;
1040 }
1041 
1042 /*
1043  * Process a software request.
1044  */
1045 int
1046 swcr_process(struct cryptop *crp)
1047 {
1048 	struct cryptodesc *crd;
1049 	struct swcr_data *sw;
1050 	u_int32_t lid;
1051 	int type;
1052 
1053 	/* Sanity check */
1054 	if (crp == NULL)
1055 		return EINVAL;
1056 
1057 	if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
1058 		crp->crp_etype = EINVAL;
1059 		goto done;
1060 	}
1061 
1062 	lid = crp->crp_sid & 0xffffffff;
1063 	if (lid >= swcr_sesnum || lid == 0 || swcr_sessions[lid] == NULL) {
1064 		crp->crp_etype = ENOENT;
1065 		goto done;
1066 	}
1067 
1068 	if (crp->crp_flags & CRYPTO_F_IMBUF)
1069 		type = CRYPTO_BUF_MBUF;
1070 	else
1071 		type = CRYPTO_BUF_IOV;
1072 
1073 	/* Go through crypto descriptors, processing as we go */
1074 	for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
1075 		/*
1076 		 * Find the crypto context.
1077 		 *
1078 		 * XXX Note that the logic here prevents us from having
1079 		 * XXX the same algorithm multiple times in a session
1080 		 * XXX (or rather, we can but it won't give us the right
1081 		 * XXX results). To do that, we'd need some way of differentiating
1082 		 * XXX between the various instances of an algorithm (so we can
1083 		 * XXX locate the correct crypto context).
1084 		 */
1085 		for (sw = swcr_sessions[lid];
1086 		    sw && sw->sw_alg != crd->crd_alg;
1087 		    sw = sw->sw_next)
1088 			;
1089 
1090 		/* No such context ? */
1091 		if (sw == NULL) {
1092 			crp->crp_etype = EINVAL;
1093 			goto done;
1094 		}
1095 
1096 		switch (sw->sw_alg) {
1097 		case CRYPTO_NULL:
1098 			break;
1099 		case CRYPTO_DES_CBC:
1100 		case CRYPTO_3DES_CBC:
1101 		case CRYPTO_BLF_CBC:
1102 		case CRYPTO_CAST_CBC:
1103 		case CRYPTO_RIJNDAEL128_CBC:
1104 		case CRYPTO_AES_CTR:
1105 		case CRYPTO_AES_XTS:
1106 			if ((crp->crp_etype = swcr_encdec(crd, sw,
1107 			    crp->crp_buf, type)) != 0)
1108 				goto done;
1109 			break;
1110 		case CRYPTO_MD5_HMAC:
1111 		case CRYPTO_SHA1_HMAC:
1112 		case CRYPTO_RIPEMD160_HMAC:
1113 		case CRYPTO_SHA2_256_HMAC:
1114 		case CRYPTO_SHA2_384_HMAC:
1115 		case CRYPTO_SHA2_512_HMAC:
1116 		case CRYPTO_MD5_KPDK:
1117 		case CRYPTO_SHA1_KPDK:
1118 		case CRYPTO_MD5:
1119 		case CRYPTO_SHA1:
1120 			if ((crp->crp_etype = swcr_authcompute(crp, crd, sw,
1121 			    crp->crp_buf, type)) != 0)
1122 				goto done;
1123 			break;
1124 
1125 		case CRYPTO_AES_GCM_16:
1126 		case CRYPTO_AES_GMAC:
1127 		case CRYPTO_AES_128_GMAC:
1128 		case CRYPTO_AES_192_GMAC:
1129 		case CRYPTO_AES_256_GMAC:
1130 			crp->crp_etype = swcr_combined(crp);
1131 			goto done;
1132 
1133 		case CRYPTO_DEFLATE_COMP:
1134 			if ((crp->crp_etype = swcr_compdec(crd, sw,
1135 			    crp->crp_buf, type)) != 0)
1136 				goto done;
1137 			else
1138 				crp->crp_olen = (int)sw->sw_size;
1139 			break;
1140 
1141 		default:
1142 			/* Unknown/unsupported algorithm */
1143 			crp->crp_etype = EINVAL;
1144 			goto done;
1145 		}
1146 	}
1147 
1148 done:
1149 	crypto_done(crp);
1150 	return 0;
1151 }
1152 
1153 /*
1154  * Initialize the driver, called from the kernel main().
1155  */
1156 void
1157 swcr_init(void)
1158 {
1159 	int algs[CRYPTO_ALGORITHM_MAX + 1];
1160 	int flags = CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_ENCRYPT_MAC |
1161 	    CRYPTOCAP_F_MAC_ENCRYPT;
1162 
1163 	swcr_id = crypto_get_driverid(flags);
1164 	if (swcr_id < 0) {
1165 		/* This should never happen */
1166 		panic("Software crypto device cannot initialize!");
1167 	}
1168 
1169 	bzero(algs, sizeof(algs));
1170 
1171 	algs[CRYPTO_DES_CBC] = CRYPTO_ALG_FLAG_SUPPORTED;
1172 	algs[CRYPTO_3DES_CBC] = CRYPTO_ALG_FLAG_SUPPORTED;
1173 	algs[CRYPTO_BLF_CBC] = CRYPTO_ALG_FLAG_SUPPORTED;
1174 	algs[CRYPTO_CAST_CBC] = CRYPTO_ALG_FLAG_SUPPORTED;
1175 	algs[CRYPTO_MD5_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
1176 	algs[CRYPTO_SHA1_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
1177 	algs[CRYPTO_RIPEMD160_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
1178 	algs[CRYPTO_MD5_KPDK] = CRYPTO_ALG_FLAG_SUPPORTED;
1179 	algs[CRYPTO_SHA1_KPDK] = CRYPTO_ALG_FLAG_SUPPORTED;
1180 	algs[CRYPTO_MD5] = CRYPTO_ALG_FLAG_SUPPORTED;
1181 	algs[CRYPTO_SHA1] = CRYPTO_ALG_FLAG_SUPPORTED;
1182 	algs[CRYPTO_RIJNDAEL128_CBC] = CRYPTO_ALG_FLAG_SUPPORTED;
1183 	algs[CRYPTO_AES_CTR] = CRYPTO_ALG_FLAG_SUPPORTED;
1184 	algs[CRYPTO_AES_XTS] = CRYPTO_ALG_FLAG_SUPPORTED;
1185 	algs[CRYPTO_AES_GCM_16] = CRYPTO_ALG_FLAG_SUPPORTED;
1186 	algs[CRYPTO_AES_GMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
1187 	algs[CRYPTO_DEFLATE_COMP] = CRYPTO_ALG_FLAG_SUPPORTED;
1188 	algs[CRYPTO_NULL] = CRYPTO_ALG_FLAG_SUPPORTED;
1189 	algs[CRYPTO_SHA2_256_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
1190 	algs[CRYPTO_SHA2_384_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
1191 	algs[CRYPTO_SHA2_512_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
1192 	algs[CRYPTO_AES_128_GMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
1193 	algs[CRYPTO_AES_192_GMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
1194 	algs[CRYPTO_AES_256_GMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
1195 
1196 	crypto_register(swcr_id, algs, swcr_newsession,
1197 	    swcr_freesession, swcr_process);
1198 }
1199