xref: /openbsd/sys/crypto/cryptosoft.c (revision 91f110e0)
1 /*	$OpenBSD: cryptosoft.c,v 1.69 2013/08/25 14:26:56 jsing Exp $	*/
2 
3 /*
4  * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
5  *
6  * This code was written by Angelos D. Keromytis in Athens, Greece, in
7  * February 2000. Network Security Technologies Inc. (NSTI) kindly
8  * supported the development of this code.
9  *
10  * Copyright (c) 2000, 2001 Angelos D. Keromytis
11  *
12  * Permission to use, copy, and modify this software with or without fee
13  * is hereby granted, provided that this entire notice is included in
14  * all source code copies of any software which is or includes a copy or
15  * modification of this software.
16  *
17  * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
18  * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
19  * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
20  * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
21  * PURPOSE.
22  */
23 
24 #include <sys/param.h>
25 #include <sys/systm.h>
26 #include <sys/malloc.h>
27 #include <sys/mbuf.h>
28 #include <sys/errno.h>
29 #include <dev/rndvar.h>
30 #include <crypto/md5.h>
31 #include <crypto/sha1.h>
32 #include <crypto/rmd160.h>
33 #include <crypto/cast.h>
34 #include <crypto/blf.h>
35 #include <crypto/cryptodev.h>
36 #include <crypto/cryptosoft.h>
37 #include <crypto/xform.h>
38 
39 const u_int8_t hmac_ipad_buffer[HMAC_MAX_BLOCK_LEN] = {
40 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
41 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
42 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
43 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
44 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
45 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
46 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
47 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
48 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
49 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
50 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
51 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
52 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
53 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
54 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
55 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36
56 };
57 
58 const u_int8_t hmac_opad_buffer[HMAC_MAX_BLOCK_LEN] = {
59 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
60 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
61 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
62 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
63 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
64 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
65 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
66 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
67 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
68 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
69 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
70 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
71 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
72 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
73 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
74 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C
75 };
76 
77 
78 struct swcr_data **swcr_sessions = NULL;
79 u_int32_t swcr_sesnum = 0;
80 int32_t swcr_id = -1;
81 
82 #define COPYBACK(x, a, b, c, d) \
83 	do { \
84 		if ((x) == CRYPTO_BUF_MBUF) \
85 			m_copyback((struct mbuf *)a,b,c,d,M_NOWAIT); \
86 		else \
87 			cuio_copyback((struct uio *)a,b,c,d); \
88 	} while (0)
89 #define COPYDATA(x, a, b, c, d) \
90 	do { \
91 		if ((x) == CRYPTO_BUF_MBUF) \
92 			m_copydata((struct mbuf *)a,b,c,d); \
93 		else \
94 			cuio_copydata((struct uio *)a,b,c,d); \
95 	} while (0)
96 
97 /*
98  * Apply a symmetric encryption/decryption algorithm.
99  */
100 int
101 swcr_encdec(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf,
102     int outtype)
103 {
104 	unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN], *idat;
105 	unsigned char *ivp, *nivp, iv2[EALG_MAX_BLOCK_LEN];
106 	struct enc_xform *exf;
107 	int i, k, j, blks, ind, count, ivlen;
108 	struct mbuf *m = NULL;
109 	struct uio *uio = NULL;
110 
111 	exf = sw->sw_exf;
112 	blks = exf->blocksize;
113 	ivlen = exf->ivsize;
114 
115 	/* Check for non-padded data */
116 	if (crd->crd_len % blks)
117 		return EINVAL;
118 
119 	if (outtype == CRYPTO_BUF_MBUF)
120 		m = (struct mbuf *) buf;
121 	else
122 		uio = (struct uio *) buf;
123 
124 	/* Initialize the IV */
125 	if (crd->crd_flags & CRD_F_ENCRYPT) {
126 		/* IV explicitly provided ? */
127 		if (crd->crd_flags & CRD_F_IV_EXPLICIT)
128 			bcopy(crd->crd_iv, iv, ivlen);
129 		else
130 			arc4random_buf(iv, ivlen);
131 
132 		/* Do we need to write the IV */
133 		if (!(crd->crd_flags & CRD_F_IV_PRESENT))
134 			COPYBACK(outtype, buf, crd->crd_inject, ivlen, iv);
135 
136 	} else {	/* Decryption */
137 			/* IV explicitly provided ? */
138 		if (crd->crd_flags & CRD_F_IV_EXPLICIT)
139 			bcopy(crd->crd_iv, iv, ivlen);
140 		else {
141 			/* Get IV off buf */
142 			COPYDATA(outtype, buf, crd->crd_inject, ivlen, iv);
143 		}
144 	}
145 
146 	ivp = iv;
147 
148 	/*
149 	 * xforms that provide a reinit method perform all IV
150 	 * handling themselves.
151 	 */
152 	if (exf->reinit)
153 		exf->reinit(sw->sw_kschedule, iv);
154 
155 	if (outtype == CRYPTO_BUF_MBUF) {
156 		/* Find beginning of data */
157 		m = m_getptr(m, crd->crd_skip, &k);
158 		if (m == NULL)
159 			return EINVAL;
160 
161 		i = crd->crd_len;
162 
163 		while (i > 0) {
164 			/*
165 			 * If there's insufficient data at the end of
166 			 * an mbuf, we have to do some copying.
167 			 */
168 			if (m->m_len < k + blks && m->m_len != k) {
169 				m_copydata(m, k, blks, blk);
170 
171 				/* Actual encryption/decryption */
172 				if (exf->reinit) {
173 					if (crd->crd_flags & CRD_F_ENCRYPT) {
174 						exf->encrypt(sw->sw_kschedule,
175 						    blk);
176 					} else {
177 						exf->decrypt(sw->sw_kschedule,
178 						    blk);
179 					}
180 				} else if (crd->crd_flags & CRD_F_ENCRYPT) {
181 					/* XOR with previous block */
182 					for (j = 0; j < blks; j++)
183 						blk[j] ^= ivp[j];
184 
185 					exf->encrypt(sw->sw_kschedule, blk);
186 
187 					/*
188 					 * Keep encrypted block for XOR'ing
189 					 * with next block
190 					 */
191 					bcopy(blk, iv, blks);
192 					ivp = iv;
193 				} else {	/* decrypt */
194 					/*
195 					 * Keep encrypted block for XOR'ing
196 					 * with next block
197 					 */
198 					nivp = (ivp == iv) ? iv2 : iv;
199 					bcopy(blk, nivp, blks);
200 
201 					exf->decrypt(sw->sw_kschedule, blk);
202 
203 					/* XOR with previous block */
204 					for (j = 0; j < blks; j++)
205 						blk[j] ^= ivp[j];
206 					ivp = nivp;
207 				}
208 
209 				/* Copy back decrypted block */
210 				m_copyback(m, k, blks, blk, M_NOWAIT);
211 
212 				/* Advance pointer */
213 				m = m_getptr(m, k + blks, &k);
214 				if (m == NULL)
215 					return EINVAL;
216 
217 				i -= blks;
218 
219 				/* Could be done... */
220 				if (i == 0)
221 					break;
222 			}
223 
224 			/* Skip possibly empty mbufs */
225 			if (k == m->m_len) {
226 				for (m = m->m_next; m && m->m_len == 0;
227 				    m = m->m_next)
228 					;
229 				k = 0;
230 			}
231 
232 			/* Sanity check */
233 			if (m == NULL)
234 				return EINVAL;
235 
236 			/*
237 			 * Warning: idat may point to garbage here, but
238 			 * we only use it in the while() loop, only if
239 			 * there are indeed enough data.
240 			 */
241 			idat = mtod(m, unsigned char *) + k;
242 
243 			while (m->m_len >= k + blks && i > 0) {
244 				if (exf->reinit) {
245 					if (crd->crd_flags & CRD_F_ENCRYPT) {
246 						exf->encrypt(sw->sw_kschedule,
247 						    idat);
248 					} else {
249 						exf->decrypt(sw->sw_kschedule,
250 						    idat);
251 					}
252 				} else if (crd->crd_flags & CRD_F_ENCRYPT) {
253 					/* XOR with previous block/IV */
254 					for (j = 0; j < blks; j++)
255 						idat[j] ^= ivp[j];
256 
257 					exf->encrypt(sw->sw_kschedule, idat);
258 					ivp = idat;
259 				} else {	/* decrypt */
260 					/*
261 					 * Keep encrypted block to be used
262 					 * in next block's processing.
263 					 */
264 					nivp = (ivp == iv) ? iv2 : iv;
265 					bcopy(idat, nivp, blks);
266 
267 					exf->decrypt(sw->sw_kschedule, idat);
268 
269 					/* XOR with previous block/IV */
270 					for (j = 0; j < blks; j++)
271 						idat[j] ^= ivp[j];
272 					ivp = nivp;
273 				}
274 
275 				idat += blks;
276 				k += blks;
277 				i -= blks;
278 			}
279 		}
280 	} else {
281 		/* Find beginning of data */
282 		count = crd->crd_skip;
283 		ind = cuio_getptr(uio, count, &k);
284 		if (ind == -1)
285 			return EINVAL;
286 
287 		i = crd->crd_len;
288 
289 		while (i > 0) {
290 			/*
291 			 * If there's insufficient data at the end,
292 			 * we have to do some copying.
293 			 */
294 			if (uio->uio_iov[ind].iov_len < k + blks &&
295 			    uio->uio_iov[ind].iov_len != k) {
296 				cuio_copydata(uio, count, blks, blk);
297 
298 				/* Actual encryption/decryption */
299 				if (exf->reinit) {
300 					if (crd->crd_flags & CRD_F_ENCRYPT) {
301 						exf->encrypt(sw->sw_kschedule,
302 						    blk);
303 					} else {
304 						exf->decrypt(sw->sw_kschedule,
305 						    blk);
306 					}
307 				} else if (crd->crd_flags & CRD_F_ENCRYPT) {
308 					/* XOR with previous block */
309 					for (j = 0; j < blks; j++)
310 						blk[j] ^= ivp[j];
311 
312 					exf->encrypt(sw->sw_kschedule, blk);
313 
314 					/*
315 					 * Keep encrypted block for XOR'ing
316 					 * with next block
317 					 */
318 					bcopy(blk, iv, blks);
319 					ivp = iv;
320 				} else {	/* decrypt */
321 					/*
322 					 * Keep encrypted block for XOR'ing
323 					 * with next block
324 					 */
325 					nivp = (ivp == iv) ? iv2 : iv;
326 					bcopy(blk, nivp, blks);
327 
328 					exf->decrypt(sw->sw_kschedule, blk);
329 
330 					/* XOR with previous block */
331 					for (j = 0; j < blks; j++)
332 						blk[j] ^= ivp[j];
333 					ivp = nivp;
334 				}
335 
336 				/* Copy back decrypted block */
337 				cuio_copyback(uio, count, blks, blk);
338 
339 				count += blks;
340 
341 				/* Advance pointer */
342 				ind = cuio_getptr(uio, count, &k);
343 				if (ind == -1)
344 					return (EINVAL);
345 
346 				i -= blks;
347 
348 				/* Could be done... */
349 				if (i == 0)
350 					break;
351 			}
352 
353 			/*
354 			 * Warning: idat may point to garbage here, but
355 			 * we only use it in the while() loop, only if
356 			 * there are indeed enough data.
357 			 */
358 			idat = (char *)uio->uio_iov[ind].iov_base + k;
359 
360 			while (uio->uio_iov[ind].iov_len >= k + blks &&
361 			    i > 0) {
362 				if (exf->reinit) {
363 					if (crd->crd_flags & CRD_F_ENCRYPT) {
364 						exf->encrypt(sw->sw_kschedule,
365 						    idat);
366 					} else {
367 						exf->decrypt(sw->sw_kschedule,
368 						    idat);
369 					}
370 				} else if (crd->crd_flags & CRD_F_ENCRYPT) {
371 					/* XOR with previous block/IV */
372 					for (j = 0; j < blks; j++)
373 						idat[j] ^= ivp[j];
374 
375 					exf->encrypt(sw->sw_kschedule, idat);
376 					ivp = idat;
377 				} else {	/* decrypt */
378 					/*
379 					 * Keep encrypted block to be used
380 					 * in next block's processing.
381 					 */
382 					nivp = (ivp == iv) ? iv2 : iv;
383 					bcopy(idat, nivp, blks);
384 
385 					exf->decrypt(sw->sw_kschedule, idat);
386 
387 					/* XOR with previous block/IV */
388 					for (j = 0; j < blks; j++)
389 						idat[j] ^= ivp[j];
390 					ivp = nivp;
391 				}
392 
393 				idat += blks;
394 				count += blks;
395 				k += blks;
396 				i -= blks;
397 			}
398 
399 			/*
400 			 * Advance to the next iov if the end of the current iov
401 			 * is aligned with the end of a cipher block.
402 			 * Note that the code is equivalent to calling:
403 			 *	ind = cuio_getptr(uio, count, &k);
404 			 */
405 			if (i > 0 && k == uio->uio_iov[ind].iov_len) {
406 				k = 0;
407 				ind++;
408 				if (ind >= uio->uio_iovcnt)
409 					return (EINVAL);
410 			}
411 		}
412 	}
413 
414 	return 0; /* Done with encryption/decryption */
415 }
416 
417 /*
418  * Compute keyed-hash authenticator.
419  */
420 int
421 swcr_authcompute(struct cryptop *crp, struct cryptodesc *crd,
422     struct swcr_data *sw, caddr_t buf, int outtype)
423 {
424 	unsigned char aalg[AALG_MAX_RESULT_LEN];
425 	struct auth_hash *axf;
426 	union authctx ctx;
427 	int err;
428 
429 	if (sw->sw_ictx == 0)
430 		return EINVAL;
431 
432 	axf = sw->sw_axf;
433 
434 	bcopy(sw->sw_ictx, &ctx, axf->ctxsize);
435 
436 	if (outtype == CRYPTO_BUF_MBUF)
437 		err = m_apply((struct mbuf *) buf, crd->crd_skip, crd->crd_len,
438 		    (int (*)(caddr_t, caddr_t, unsigned int)) axf->Update,
439 		    (caddr_t) &ctx);
440 	else
441 		err = cuio_apply((struct uio *) buf, crd->crd_skip,
442 		    crd->crd_len,
443 		    (int (*)(caddr_t, caddr_t, unsigned int)) axf->Update,
444 		    (caddr_t) &ctx);
445 
446 	if (err)
447 		return err;
448 
449 	if (crd->crd_flags & CRD_F_ESN)
450 		axf->Update(&ctx, crd->crd_esn, 4);
451 
452 	switch (sw->sw_alg) {
453 	case CRYPTO_MD5_HMAC:
454 	case CRYPTO_SHA1_HMAC:
455 	case CRYPTO_RIPEMD160_HMAC:
456 	case CRYPTO_SHA2_256_HMAC:
457 	case CRYPTO_SHA2_384_HMAC:
458 	case CRYPTO_SHA2_512_HMAC:
459 		if (sw->sw_octx == NULL)
460 			return EINVAL;
461 
462 		axf->Final(aalg, &ctx);
463 		bcopy(sw->sw_octx, &ctx, axf->ctxsize);
464 		axf->Update(&ctx, aalg, axf->hashsize);
465 		axf->Final(aalg, &ctx);
466 		break;
467 
468 	case CRYPTO_MD5_KPDK:
469 	case CRYPTO_SHA1_KPDK:
470 		if (sw->sw_octx == NULL)
471 			return EINVAL;
472 
473 		axf->Update(&ctx, sw->sw_octx, sw->sw_klen);
474 		axf->Final(aalg, &ctx);
475 		break;
476 
477 	case CRYPTO_MD5:
478 	case CRYPTO_SHA1:
479 		axf->Final(aalg, &ctx);
480 		break;
481 	}
482 
483 	/* Inject the authentication data */
484 	if (outtype == CRYPTO_BUF_MBUF)
485 		COPYBACK(outtype, buf, crd->crd_inject, axf->authsize, aalg);
486 	else
487 		bcopy(aalg, crp->crp_mac, axf->authsize);
488 
489 	return 0;
490 }
491 
492 /*
493  * Apply a combined encryption-authentication transformation
494  */
495 int
496 swcr_authenc(struct cryptop *crp)
497 {
498 	uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))];
499 	u_char *blk = (u_char *)blkbuf;
500 	u_char aalg[AALG_MAX_RESULT_LEN];
501 	u_char iv[EALG_MAX_BLOCK_LEN];
502 	union authctx ctx;
503 	struct cryptodesc *crd, *crda = NULL, *crde = NULL;
504 	struct swcr_data *sw, *swa, *swe = NULL;
505 	struct auth_hash *axf = NULL;
506 	struct enc_xform *exf = NULL;
507 	struct mbuf *m = NULL;
508 	struct uio *uio = NULL;
509 	caddr_t buf = (caddr_t)crp->crp_buf;
510 	uint32_t *blkp;
511 	int aadlen, blksz, i, ivlen, outtype, len, iskip, oskip;
512 
513 	ivlen = blksz = iskip = oskip = 0;
514 
515 	for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
516 		for (sw = swcr_sessions[crp->crp_sid & 0xffffffff];
517 		     sw && sw->sw_alg != crd->crd_alg;
518 		     sw = sw->sw_next)
519 			;
520 		if (sw == NULL)
521 			return (EINVAL);
522 
523 		switch (sw->sw_alg) {
524 		case CRYPTO_AES_GCM_16:
525 		case CRYPTO_AES_GMAC:
526 			swe = sw;
527 			crde = crd;
528 			exf = swe->sw_exf;
529 			ivlen = exf->ivsize;
530 			break;
531 		case CRYPTO_AES_128_GMAC:
532 		case CRYPTO_AES_192_GMAC:
533 		case CRYPTO_AES_256_GMAC:
534 			swa = sw;
535 			crda = crd;
536 			axf = swa->sw_axf;
537 			if (swa->sw_ictx == 0)
538 				return (EINVAL);
539 			bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
540 			blksz = axf->blocksize;
541 			break;
542 		default:
543 			return (EINVAL);
544 		}
545 	}
546 	if (crde == NULL || crda == NULL)
547 		return (EINVAL);
548 
549 	if (crp->crp_flags & CRYPTO_F_IMBUF) {
550 		outtype = CRYPTO_BUF_MBUF;
551 		m = (struct mbuf *)buf;
552 	} else {
553 		outtype = CRYPTO_BUF_IOV;
554 		uio = (struct uio *)buf;
555 	}
556 
557 	/* Initialize the IV */
558 	if (crde->crd_flags & CRD_F_ENCRYPT) {
559 		/* IV explicitly provided ? */
560 		if (crde->crd_flags & CRD_F_IV_EXPLICIT)
561 			bcopy(crde->crd_iv, iv, ivlen);
562 		else
563 			arc4random_buf(iv, ivlen);
564 
565 		/* Do we need to write the IV */
566 		if (!(crde->crd_flags & CRD_F_IV_PRESENT))
567 			COPYBACK(outtype, buf, crde->crd_inject, ivlen, iv);
568 
569 	} else {	/* Decryption */
570 			/* IV explicitly provided ? */
571 		if (crde->crd_flags & CRD_F_IV_EXPLICIT)
572 			bcopy(crde->crd_iv, iv, ivlen);
573 		else {
574 			/* Get IV off buf */
575 			COPYDATA(outtype, buf, crde->crd_inject, ivlen, iv);
576 		}
577 	}
578 
579 	/* Supply MAC with IV */
580 	if (axf->Reinit)
581 		axf->Reinit(&ctx, iv, ivlen);
582 
583 	/* Supply MAC with AAD */
584 	aadlen = crda->crd_len;
585 	/*
586 	 * Section 5 of RFC 4106 specifies that AAD construction consists of
587 	 * {SPI, ESN, SN} whereas the real packet contains only {SPI, SN}.
588 	 * Unfortunately it doesn't follow a good example set in the Section
589 	 * 3.3.2.1 of RFC 4303 where upper part of the ESN, located in the
590 	 * external (to the packet) memory buffer, is processed by the hash
591 	 * function in the end thus allowing to retain simple programming
592 	 * interfaces and avoid kludges like the one below.
593 	 */
594 	if (crda->crd_flags & CRD_F_ESN) {
595 		aadlen += 4;
596 		/* SPI */
597 		COPYDATA(outtype, buf, crda->crd_skip, 4, blk);
598 		iskip = 4; /* loop below will start with an offset of 4 */
599 		/* ESN */
600 		bcopy(crda->crd_esn, blk + 4, 4);
601 		oskip = iskip + 4; /* offset output buffer blk by 8 */
602 	}
603 	for (i = iskip; i < crda->crd_len; i += blksz) {
604 		len = MIN(crda->crd_len - i, blksz - oskip);
605 		COPYDATA(outtype, buf, crda->crd_skip + i, len, blk + oskip);
606 		bzero(blk + len + oskip, blksz - len - oskip);
607 		axf->Update(&ctx, blk, blksz);
608 		oskip = 0; /* reset initial output offset */
609 	}
610 
611 	if (exf->reinit)
612 		exf->reinit(swe->sw_kschedule, iv);
613 
614 	/* Do encryption/decryption with MAC */
615 	for (i = 0; i < crde->crd_len; i += blksz) {
616 		len = MIN(crde->crd_len - i, blksz);
617 		if (len < blksz)
618 			bzero(blk, blksz);
619 		COPYDATA(outtype, buf, crde->crd_skip + i, len, blk);
620 		if (crde->crd_flags & CRD_F_ENCRYPT) {
621 			exf->encrypt(swe->sw_kschedule, blk);
622 			axf->Update(&ctx, blk, len);
623 		} else {
624 			axf->Update(&ctx, blk, len);
625 			exf->decrypt(swe->sw_kschedule, blk);
626 		}
627 		COPYBACK(outtype, buf, crde->crd_skip + i, len, blk);
628 	}
629 
630 	/* Do any required special finalization */
631 	switch (crda->crd_alg) {
632 		case CRYPTO_AES_128_GMAC:
633 		case CRYPTO_AES_192_GMAC:
634 		case CRYPTO_AES_256_GMAC:
635 			/* length block */
636 			bzero(blk, blksz);
637 			blkp = (uint32_t *)blk + 1;
638 			*blkp = htobe32(aadlen * 8);
639 			blkp = (uint32_t *)blk + 3;
640 			*blkp = htobe32(crde->crd_len * 8);
641 			axf->Update(&ctx, blk, blksz);
642 			break;
643 	}
644 
645 	/* Finalize MAC */
646 	axf->Final(aalg, &ctx);
647 
648 	/* Inject the authentication data */
649 	if (outtype == CRYPTO_BUF_MBUF)
650 		COPYBACK(outtype, buf, crda->crd_inject, axf->authsize, aalg);
651 	else
652 		bcopy(aalg, crp->crp_mac, axf->authsize);
653 
654 	return (0);
655 }
656 
657 /*
658  * Apply a compression/decompression algorithm
659  */
660 int
661 swcr_compdec(struct cryptodesc *crd, struct swcr_data *sw,
662     caddr_t buf, int outtype)
663 {
664 	u_int8_t *data, *out;
665 	struct comp_algo *cxf;
666 	int adj;
667 	u_int32_t result;
668 
669 	cxf = sw->sw_cxf;
670 
671 	/* We must handle the whole buffer of data in one time
672 	 * then if there is not all the data in the mbuf, we must
673 	 * copy in a buffer.
674 	 */
675 
676 	data = malloc(crd->crd_len, M_CRYPTO_DATA, M_NOWAIT);
677 	if (data == NULL)
678 		return (EINVAL);
679 	COPYDATA(outtype, buf, crd->crd_skip, crd->crd_len, data);
680 
681 	if (crd->crd_flags & CRD_F_COMP)
682 		result = cxf->compress(data, crd->crd_len, &out);
683 	else
684 		result = cxf->decompress(data, crd->crd_len, &out);
685 
686 	free(data, M_CRYPTO_DATA);
687 	if (result == 0)
688 		return EINVAL;
689 
690 	/* Copy back the (de)compressed data. m_copyback is
691 	 * extending the mbuf as necessary.
692 	 */
693 	sw->sw_size = result;
694 	/* Check the compressed size when doing compression */
695 	if (crd->crd_flags & CRD_F_COMP) {
696 		if (result > crd->crd_len) {
697 			/* Compression was useless, we lost time */
698 			free(out, M_CRYPTO_DATA);
699 			return 0;
700 		}
701 	}
702 
703 	COPYBACK(outtype, buf, crd->crd_skip, result, out);
704 	if (result < crd->crd_len) {
705 		adj = result - crd->crd_len;
706 		if (outtype == CRYPTO_BUF_MBUF) {
707 			adj = result - crd->crd_len;
708 			m_adj((struct mbuf *)buf, adj);
709 		} else {
710 			struct uio *uio = (struct uio *)buf;
711 			int ind;
712 
713 			adj = crd->crd_len - result;
714 			ind = uio->uio_iovcnt - 1;
715 
716 			while (adj > 0 && ind >= 0) {
717 				if (adj < uio->uio_iov[ind].iov_len) {
718 					uio->uio_iov[ind].iov_len -= adj;
719 					break;
720 				}
721 
722 				adj -= uio->uio_iov[ind].iov_len;
723 				uio->uio_iov[ind].iov_len = 0;
724 				ind--;
725 				uio->uio_iovcnt--;
726 			}
727 		}
728 	}
729 	free(out, M_CRYPTO_DATA);
730 	return 0;
731 }
732 
733 /*
734  * Generate a new software session.
735  */
736 int
737 swcr_newsession(u_int32_t *sid, struct cryptoini *cri)
738 {
739 	struct swcr_data **swd;
740 	struct auth_hash *axf;
741 	struct enc_xform *txf;
742 	struct comp_algo *cxf;
743 	u_int32_t i;
744 	int k;
745 
746 	if (sid == NULL || cri == NULL)
747 		return EINVAL;
748 
749 	if (swcr_sessions) {
750 		for (i = 1; i < swcr_sesnum; i++)
751 			if (swcr_sessions[i] == NULL)
752 				break;
753 	}
754 
755 	if (swcr_sessions == NULL || i == swcr_sesnum) {
756 		if (swcr_sessions == NULL) {
757 			i = 1; /* We leave swcr_sessions[0] empty */
758 			swcr_sesnum = CRYPTO_SW_SESSIONS;
759 		} else
760 			swcr_sesnum *= 2;
761 
762 		swd = malloc(swcr_sesnum * sizeof(struct swcr_data *),
763 		    M_CRYPTO_DATA, M_NOWAIT | M_ZERO);
764 		if (swd == NULL) {
765 			/* Reset session number */
766 			if (swcr_sesnum == CRYPTO_SW_SESSIONS)
767 				swcr_sesnum = 0;
768 			else
769 				swcr_sesnum /= 2;
770 			return ENOBUFS;
771 		}
772 
773 		/* Copy existing sessions */
774 		if (swcr_sessions) {
775 			bcopy(swcr_sessions, swd,
776 			    (swcr_sesnum / 2) * sizeof(struct swcr_data *));
777 			free(swcr_sessions, M_CRYPTO_DATA);
778 		}
779 
780 		swcr_sessions = swd;
781 	}
782 
783 	swd = &swcr_sessions[i];
784 	*sid = i;
785 
786 	while (cri) {
787 		*swd = malloc(sizeof(struct swcr_data), M_CRYPTO_DATA,
788 		    M_NOWAIT | M_ZERO);
789 		if (*swd == NULL) {
790 			swcr_freesession(i);
791 			return ENOBUFS;
792 		}
793 
794 		switch (cri->cri_alg) {
795 		case CRYPTO_DES_CBC:
796 			txf = &enc_xform_des;
797 			goto enccommon;
798 		case CRYPTO_3DES_CBC:
799 			txf = &enc_xform_3des;
800 			goto enccommon;
801 		case CRYPTO_BLF_CBC:
802 			txf = &enc_xform_blf;
803 			goto enccommon;
804 		case CRYPTO_CAST_CBC:
805 			txf = &enc_xform_cast5;
806 			goto enccommon;
807 		case CRYPTO_RIJNDAEL128_CBC:
808 			txf = &enc_xform_rijndael128;
809 			goto enccommon;
810 		case CRYPTO_AES_CTR:
811 			txf = &enc_xform_aes_ctr;
812 			goto enccommon;
813 		case CRYPTO_AES_XTS:
814 			txf = &enc_xform_aes_xts;
815 			goto enccommon;
816 		case CRYPTO_AES_GCM_16:
817 			txf = &enc_xform_aes_gcm;
818 			goto enccommon;
819 		case CRYPTO_AES_GMAC:
820 			txf = &enc_xform_aes_gmac;
821 			(*swd)->sw_exf = txf;
822 			break;
823 		case CRYPTO_NULL:
824 			txf = &enc_xform_null;
825 			goto enccommon;
826 		enccommon:
827 			if (txf->ctxsize > 0) {
828 				(*swd)->sw_kschedule = malloc(txf->ctxsize,
829 				    M_CRYPTO_DATA, M_NOWAIT | M_ZERO);
830 				if ((*swd)->sw_kschedule == NULL) {
831 					swcr_freesession(i);
832 					return EINVAL;
833 				}
834 			}
835 			if (txf->setkey((*swd)->sw_kschedule, cri->cri_key,
836 			    cri->cri_klen / 8) < 0) {
837 				swcr_freesession(i);
838 				return EINVAL;
839 			}
840 			(*swd)->sw_exf = txf;
841 			break;
842 
843 		case CRYPTO_MD5_HMAC:
844 			axf = &auth_hash_hmac_md5_96;
845 			goto authcommon;
846 		case CRYPTO_SHA1_HMAC:
847 			axf = &auth_hash_hmac_sha1_96;
848 			goto authcommon;
849 		case CRYPTO_RIPEMD160_HMAC:
850 			axf = &auth_hash_hmac_ripemd_160_96;
851 			goto authcommon;
852 		case CRYPTO_SHA2_256_HMAC:
853 			axf = &auth_hash_hmac_sha2_256_128;
854 			goto authcommon;
855 		case CRYPTO_SHA2_384_HMAC:
856 			axf = &auth_hash_hmac_sha2_384_192;
857 			goto authcommon;
858 		case CRYPTO_SHA2_512_HMAC:
859 			axf = &auth_hash_hmac_sha2_512_256;
860 		authcommon:
861 			(*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
862 			    M_NOWAIT);
863 			if ((*swd)->sw_ictx == NULL) {
864 				swcr_freesession(i);
865 				return ENOBUFS;
866 			}
867 
868 			(*swd)->sw_octx = malloc(axf->ctxsize, M_CRYPTO_DATA,
869 			    M_NOWAIT);
870 			if ((*swd)->sw_octx == NULL) {
871 				swcr_freesession(i);
872 				return ENOBUFS;
873 			}
874 
875 			for (k = 0; k < cri->cri_klen / 8; k++)
876 				cri->cri_key[k] ^= HMAC_IPAD_VAL;
877 
878 			axf->Init((*swd)->sw_ictx);
879 			axf->Update((*swd)->sw_ictx, cri->cri_key,
880 			    cri->cri_klen / 8);
881 			axf->Update((*swd)->sw_ictx, hmac_ipad_buffer,
882 			    axf->blocksize - (cri->cri_klen / 8));
883 
884 			for (k = 0; k < cri->cri_klen / 8; k++)
885 				cri->cri_key[k] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
886 
887 			axf->Init((*swd)->sw_octx);
888 			axf->Update((*swd)->sw_octx, cri->cri_key,
889 			    cri->cri_klen / 8);
890 			axf->Update((*swd)->sw_octx, hmac_opad_buffer,
891 			    axf->blocksize - (cri->cri_klen / 8));
892 
893 			for (k = 0; k < cri->cri_klen / 8; k++)
894 				cri->cri_key[k] ^= HMAC_OPAD_VAL;
895 			(*swd)->sw_axf = axf;
896 			break;
897 
898 		case CRYPTO_MD5_KPDK:
899 			axf = &auth_hash_key_md5;
900 			goto auth2common;
901 
902 		case CRYPTO_SHA1_KPDK:
903 			axf = &auth_hash_key_sha1;
904 		auth2common:
905 			(*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
906 			    M_NOWAIT);
907 			if ((*swd)->sw_ictx == NULL) {
908 				swcr_freesession(i);
909 				return ENOBUFS;
910 			}
911 
912 			/* Store the key so we can "append" it to the payload */
913 			(*swd)->sw_octx = malloc(cri->cri_klen / 8, M_CRYPTO_DATA,
914 			    M_NOWAIT);
915 			if ((*swd)->sw_octx == NULL) {
916 				swcr_freesession(i);
917 				return ENOBUFS;
918 			}
919 
920 			(*swd)->sw_klen = cri->cri_klen / 8;
921 			bcopy(cri->cri_key, (*swd)->sw_octx, cri->cri_klen / 8);
922 			axf->Init((*swd)->sw_ictx);
923 			axf->Update((*swd)->sw_ictx, cri->cri_key,
924 			    cri->cri_klen / 8);
925 			axf->Final(NULL, (*swd)->sw_ictx);
926 			(*swd)->sw_axf = axf;
927 			break;
928 
929 		case CRYPTO_MD5:
930 			axf = &auth_hash_md5;
931 			goto auth3common;
932 
933 		case CRYPTO_SHA1:
934 			axf = &auth_hash_sha1;
935 		auth3common:
936 			(*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
937 			    M_NOWAIT);
938 			if ((*swd)->sw_ictx == NULL) {
939 				swcr_freesession(i);
940 				return ENOBUFS;
941 			}
942 
943 			axf->Init((*swd)->sw_ictx);
944 			(*swd)->sw_axf = axf;
945 			break;
946 
947 		case CRYPTO_AES_128_GMAC:
948 			axf = &auth_hash_gmac_aes_128;
949 			goto auth4common;
950 
951 		case CRYPTO_AES_192_GMAC:
952 			axf = &auth_hash_gmac_aes_192;
953 			goto auth4common;
954 
955 		case CRYPTO_AES_256_GMAC:
956 			axf = &auth_hash_gmac_aes_256;
957 		auth4common:
958 			(*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
959 			    M_NOWAIT);
960 			if ((*swd)->sw_ictx == NULL) {
961 				swcr_freesession(i);
962 				return ENOBUFS;
963 			}
964 			axf->Init((*swd)->sw_ictx);
965 			axf->Setkey((*swd)->sw_ictx, cri->cri_key,
966 			    cri->cri_klen / 8);
967 			(*swd)->sw_axf = axf;
968 			break;
969 
970 		case CRYPTO_DEFLATE_COMP:
971 			cxf = &comp_algo_deflate;
972 			(*swd)->sw_cxf = cxf;
973 			break;
974 		case CRYPTO_ESN:
975 			/* nothing to do */
976 			break;
977 		default:
978 			swcr_freesession(i);
979 			return EINVAL;
980 		}
981 
982 		(*swd)->sw_alg = cri->cri_alg;
983 		cri = cri->cri_next;
984 		swd = &((*swd)->sw_next);
985 	}
986 	return 0;
987 }
988 
989 /*
990  * Free a session.
991  */
992 int
993 swcr_freesession(u_int64_t tid)
994 {
995 	struct swcr_data *swd;
996 	struct enc_xform *txf;
997 	struct auth_hash *axf;
998 	u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
999 
1000 	if (sid > swcr_sesnum || swcr_sessions == NULL ||
1001 	    swcr_sessions[sid] == NULL)
1002 		return EINVAL;
1003 
1004 	/* Silently accept and return */
1005 	if (sid == 0)
1006 		return 0;
1007 
1008 	while ((swd = swcr_sessions[sid]) != NULL) {
1009 		swcr_sessions[sid] = swd->sw_next;
1010 
1011 		switch (swd->sw_alg) {
1012 		case CRYPTO_DES_CBC:
1013 		case CRYPTO_3DES_CBC:
1014 		case CRYPTO_BLF_CBC:
1015 		case CRYPTO_CAST_CBC:
1016 		case CRYPTO_RIJNDAEL128_CBC:
1017 		case CRYPTO_AES_CTR:
1018 		case CRYPTO_AES_XTS:
1019 		case CRYPTO_AES_GCM_16:
1020 		case CRYPTO_AES_GMAC:
1021 		case CRYPTO_NULL:
1022 			txf = swd->sw_exf;
1023 
1024 			if (swd->sw_kschedule) {
1025 				explicit_bzero(swd->sw_kschedule, txf->ctxsize);
1026 				free(swd->sw_kschedule, M_CRYPTO_DATA);
1027 			}
1028 			break;
1029 
1030 		case CRYPTO_MD5_HMAC:
1031 		case CRYPTO_SHA1_HMAC:
1032 		case CRYPTO_RIPEMD160_HMAC:
1033 		case CRYPTO_SHA2_256_HMAC:
1034 		case CRYPTO_SHA2_384_HMAC:
1035 		case CRYPTO_SHA2_512_HMAC:
1036 			axf = swd->sw_axf;
1037 
1038 			if (swd->sw_ictx) {
1039 				explicit_bzero(swd->sw_ictx, axf->ctxsize);
1040 				free(swd->sw_ictx, M_CRYPTO_DATA);
1041 			}
1042 			if (swd->sw_octx) {
1043 				explicit_bzero(swd->sw_octx, axf->ctxsize);
1044 				free(swd->sw_octx, M_CRYPTO_DATA);
1045 			}
1046 			break;
1047 
1048 		case CRYPTO_MD5_KPDK:
1049 		case CRYPTO_SHA1_KPDK:
1050 			axf = swd->sw_axf;
1051 
1052 			if (swd->sw_ictx) {
1053 				explicit_bzero(swd->sw_ictx, axf->ctxsize);
1054 				free(swd->sw_ictx, M_CRYPTO_DATA);
1055 			}
1056 			if (swd->sw_octx) {
1057 				explicit_bzero(swd->sw_octx, swd->sw_klen);
1058 				free(swd->sw_octx, M_CRYPTO_DATA);
1059 			}
1060 			break;
1061 
1062 		case CRYPTO_AES_128_GMAC:
1063 		case CRYPTO_AES_192_GMAC:
1064 		case CRYPTO_AES_256_GMAC:
1065 		case CRYPTO_MD5:
1066 		case CRYPTO_SHA1:
1067 			axf = swd->sw_axf;
1068 
1069 			if (swd->sw_ictx) {
1070 				explicit_bzero(swd->sw_ictx, axf->ctxsize);
1071 				free(swd->sw_ictx, M_CRYPTO_DATA);
1072 			}
1073 			break;
1074 		}
1075 
1076 		free(swd, M_CRYPTO_DATA);
1077 	}
1078 	return 0;
1079 }
1080 
1081 /*
1082  * Process a software request.
1083  */
1084 int
1085 swcr_process(struct cryptop *crp)
1086 {
1087 	struct cryptodesc *crd;
1088 	struct swcr_data *sw;
1089 	u_int32_t lid;
1090 	int type;
1091 
1092 	/* Sanity check */
1093 	if (crp == NULL)
1094 		return EINVAL;
1095 
1096 	if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
1097 		crp->crp_etype = EINVAL;
1098 		goto done;
1099 	}
1100 
1101 	lid = crp->crp_sid & 0xffffffff;
1102 	if (lid >= swcr_sesnum || lid == 0 || swcr_sessions[lid] == NULL) {
1103 		crp->crp_etype = ENOENT;
1104 		goto done;
1105 	}
1106 
1107 	if (crp->crp_flags & CRYPTO_F_IMBUF)
1108 		type = CRYPTO_BUF_MBUF;
1109 	else
1110 		type = CRYPTO_BUF_IOV;
1111 
1112 	/* Go through crypto descriptors, processing as we go */
1113 	for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
1114 		/*
1115 		 * Find the crypto context.
1116 		 *
1117 		 * XXX Note that the logic here prevents us from having
1118 		 * XXX the same algorithm multiple times in a session
1119 		 * XXX (or rather, we can but it won't give us the right
1120 		 * XXX results). To do that, we'd need some way of differentiating
1121 		 * XXX between the various instances of an algorithm (so we can
1122 		 * XXX locate the correct crypto context).
1123 		 */
1124 		for (sw = swcr_sessions[lid];
1125 		    sw && sw->sw_alg != crd->crd_alg;
1126 		    sw = sw->sw_next)
1127 			;
1128 
1129 		/* No such context ? */
1130 		if (sw == NULL) {
1131 			crp->crp_etype = EINVAL;
1132 			goto done;
1133 		}
1134 
1135 		switch (sw->sw_alg) {
1136 		case CRYPTO_NULL:
1137 			break;
1138 		case CRYPTO_DES_CBC:
1139 		case CRYPTO_3DES_CBC:
1140 		case CRYPTO_BLF_CBC:
1141 		case CRYPTO_CAST_CBC:
1142 		case CRYPTO_RIJNDAEL128_CBC:
1143 		case CRYPTO_AES_CTR:
1144 		case CRYPTO_AES_XTS:
1145 			if ((crp->crp_etype = swcr_encdec(crd, sw,
1146 			    crp->crp_buf, type)) != 0)
1147 				goto done;
1148 			break;
1149 		case CRYPTO_MD5_HMAC:
1150 		case CRYPTO_SHA1_HMAC:
1151 		case CRYPTO_RIPEMD160_HMAC:
1152 		case CRYPTO_SHA2_256_HMAC:
1153 		case CRYPTO_SHA2_384_HMAC:
1154 		case CRYPTO_SHA2_512_HMAC:
1155 		case CRYPTO_MD5_KPDK:
1156 		case CRYPTO_SHA1_KPDK:
1157 		case CRYPTO_MD5:
1158 		case CRYPTO_SHA1:
1159 			if ((crp->crp_etype = swcr_authcompute(crp, crd, sw,
1160 			    crp->crp_buf, type)) != 0)
1161 				goto done;
1162 			break;
1163 
1164 		case CRYPTO_AES_GCM_16:
1165 		case CRYPTO_AES_GMAC:
1166 		case CRYPTO_AES_128_GMAC:
1167 		case CRYPTO_AES_192_GMAC:
1168 		case CRYPTO_AES_256_GMAC:
1169 			crp->crp_etype = swcr_authenc(crp);
1170 			goto done;
1171 
1172 		case CRYPTO_DEFLATE_COMP:
1173 			if ((crp->crp_etype = swcr_compdec(crd, sw,
1174 			    crp->crp_buf, type)) != 0)
1175 				goto done;
1176 			else
1177 				crp->crp_olen = (int)sw->sw_size;
1178 			break;
1179 
1180 		default:
1181 			/* Unknown/unsupported algorithm */
1182 			crp->crp_etype = EINVAL;
1183 			goto done;
1184 		}
1185 	}
1186 
1187 done:
1188 	crypto_done(crp);
1189 	return 0;
1190 }
1191 
1192 /*
1193  * Initialize the driver, called from the kernel main().
1194  */
1195 void
1196 swcr_init(void)
1197 {
1198 	int algs[CRYPTO_ALGORITHM_MAX + 1];
1199 	int flags = CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_ENCRYPT_MAC |
1200 	    CRYPTOCAP_F_MAC_ENCRYPT;
1201 
1202 	swcr_id = crypto_get_driverid(flags);
1203 	if (swcr_id < 0) {
1204 		/* This should never happen */
1205 		panic("Software crypto device cannot initialize!");
1206 	}
1207 
1208 	bzero(algs, sizeof(algs));
1209 
1210 	algs[CRYPTO_DES_CBC] = CRYPTO_ALG_FLAG_SUPPORTED;
1211 	algs[CRYPTO_3DES_CBC] = CRYPTO_ALG_FLAG_SUPPORTED;
1212 	algs[CRYPTO_BLF_CBC] = CRYPTO_ALG_FLAG_SUPPORTED;
1213 	algs[CRYPTO_CAST_CBC] = CRYPTO_ALG_FLAG_SUPPORTED;
1214 	algs[CRYPTO_MD5_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
1215 	algs[CRYPTO_SHA1_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
1216 	algs[CRYPTO_RIPEMD160_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
1217 	algs[CRYPTO_MD5_KPDK] = CRYPTO_ALG_FLAG_SUPPORTED;
1218 	algs[CRYPTO_SHA1_KPDK] = CRYPTO_ALG_FLAG_SUPPORTED;
1219 	algs[CRYPTO_MD5] = CRYPTO_ALG_FLAG_SUPPORTED;
1220 	algs[CRYPTO_SHA1] = CRYPTO_ALG_FLAG_SUPPORTED;
1221 	algs[CRYPTO_RIJNDAEL128_CBC] = CRYPTO_ALG_FLAG_SUPPORTED;
1222 	algs[CRYPTO_AES_CTR] = CRYPTO_ALG_FLAG_SUPPORTED;
1223 	algs[CRYPTO_AES_XTS] = CRYPTO_ALG_FLAG_SUPPORTED;
1224 	algs[CRYPTO_AES_GCM_16] = CRYPTO_ALG_FLAG_SUPPORTED;
1225 	algs[CRYPTO_AES_GMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
1226 	algs[CRYPTO_DEFLATE_COMP] = CRYPTO_ALG_FLAG_SUPPORTED;
1227 	algs[CRYPTO_NULL] = CRYPTO_ALG_FLAG_SUPPORTED;
1228 	algs[CRYPTO_SHA2_256_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
1229 	algs[CRYPTO_SHA2_384_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
1230 	algs[CRYPTO_SHA2_512_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
1231 	algs[CRYPTO_AES_128_GMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
1232 	algs[CRYPTO_AES_192_GMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
1233 	algs[CRYPTO_AES_256_GMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
1234 	algs[CRYPTO_ESN] = CRYPTO_ALG_FLAG_SUPPORTED;
1235 
1236 	crypto_register(swcr_id, algs, swcr_newsession,
1237 	    swcr_freesession, swcr_process);
1238 }
1239