1 /* $OpenBSD: cryptosoft.c,v 1.91 2021/10/24 10:26:22 patrick Exp $ */
2
3 /*
4 * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
5 *
6 * This code was written by Angelos D. Keromytis in Athens, Greece, in
7 * February 2000. Network Security Technologies Inc. (NSTI) kindly
8 * supported the development of this code.
9 *
10 * Copyright (c) 2000, 2001 Angelos D. Keromytis
11 *
12 * Permission to use, copy, and modify this software with or without fee
13 * is hereby granted, provided that this entire notice is included in
14 * all source code copies of any software which is or includes a copy or
15 * modification of this software.
16 *
17 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
18 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
19 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
20 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
21 * PURPOSE.
22 */
23
24 #include <sys/param.h>
25 #include <sys/systm.h>
26 #include <sys/malloc.h>
27 #include <sys/mbuf.h>
28 #include <sys/errno.h>
29 #include <crypto/md5.h>
30 #include <crypto/sha1.h>
31 #include <crypto/rmd160.h>
32 #include <crypto/cast.h>
33 #include <crypto/cryptodev.h>
34 #include <crypto/cryptosoft.h>
35 #include <crypto/xform.h>
36
37 const u_int8_t hmac_ipad_buffer[HMAC_MAX_BLOCK_LEN] = {
38 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
39 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
40 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
41 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
42 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
43 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
44 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
45 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
46 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
47 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
48 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
49 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
50 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
51 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
52 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
53 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36
54 };
55
56 const u_int8_t hmac_opad_buffer[HMAC_MAX_BLOCK_LEN] = {
57 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
58 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
59 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
60 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
61 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
62 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
63 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
64 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
65 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
66 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
67 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
68 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
69 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
70 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
71 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
72 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C
73 };
74
75
76 struct swcr_list *swcr_sessions = NULL;
77 u_int32_t swcr_sesnum = 0;
78 int32_t swcr_id = -1;
79
80 #define COPYBACK(x, a, b, c, d) \
81 do { \
82 if ((x) == CRYPTO_BUF_MBUF) \
83 m_copyback((struct mbuf *)a,b,c,d,M_NOWAIT); \
84 else \
85 cuio_copyback((struct uio *)a,b,c,d); \
86 } while (0)
87 #define COPYDATA(x, a, b, c, d) \
88 do { \
89 if ((x) == CRYPTO_BUF_MBUF) \
90 m_copydata((struct mbuf *)a,b,c,d); \
91 else \
92 cuio_copydata((struct uio *)a,b,c,d); \
93 } while (0)
94
95 /*
96 * Apply a symmetric encryption/decryption algorithm.
97 */
98 int
swcr_encdec(struct cryptodesc * crd,struct swcr_data * sw,caddr_t buf,int outtype)99 swcr_encdec(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf,
100 int outtype)
101 {
102 unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN], *idat;
103 unsigned char *ivp, *nivp, iv2[EALG_MAX_BLOCK_LEN];
104 const struct enc_xform *exf;
105 int i, k, j, blks, ind, count, ivlen;
106 struct mbuf *m = NULL;
107 struct uio *uio = NULL;
108
109 exf = sw->sw_exf;
110 blks = exf->blocksize;
111 ivlen = exf->ivsize;
112
113 /* Check for non-padded data */
114 if (crd->crd_len % blks)
115 return EINVAL;
116
117 if (outtype == CRYPTO_BUF_MBUF)
118 m = (struct mbuf *) buf;
119 else
120 uio = (struct uio *) buf;
121
122 /* Initialize the IV */
123 if (crd->crd_flags & CRD_F_ENCRYPT) {
124 /* IV explicitly provided ? */
125 if (crd->crd_flags & CRD_F_IV_EXPLICIT)
126 bcopy(crd->crd_iv, iv, ivlen);
127 else
128 arc4random_buf(iv, ivlen);
129
130 /* Do we need to write the IV */
131 if (!(crd->crd_flags & CRD_F_IV_PRESENT))
132 COPYBACK(outtype, buf, crd->crd_inject, ivlen, iv);
133
134 } else { /* Decryption */
135 /* IV explicitly provided ? */
136 if (crd->crd_flags & CRD_F_IV_EXPLICIT)
137 bcopy(crd->crd_iv, iv, ivlen);
138 else {
139 /* Get IV off buf */
140 COPYDATA(outtype, buf, crd->crd_inject, ivlen, iv);
141 }
142 }
143
144 ivp = iv;
145
146 /*
147 * xforms that provide a reinit method perform all IV
148 * handling themselves.
149 */
150 if (exf->reinit)
151 exf->reinit(sw->sw_kschedule, iv);
152
153 if (outtype == CRYPTO_BUF_MBUF) {
154 /* Find beginning of data */
155 m = m_getptr(m, crd->crd_skip, &k);
156 if (m == NULL)
157 return EINVAL;
158
159 i = crd->crd_len;
160
161 while (i > 0) {
162 /*
163 * If there's insufficient data at the end of
164 * an mbuf, we have to do some copying.
165 */
166 if (m->m_len < k + blks && m->m_len != k) {
167 m_copydata(m, k, blks, blk);
168
169 /* Actual encryption/decryption */
170 if (exf->reinit) {
171 if (crd->crd_flags & CRD_F_ENCRYPT) {
172 exf->encrypt(sw->sw_kschedule,
173 blk);
174 } else {
175 exf->decrypt(sw->sw_kschedule,
176 blk);
177 }
178 } else if (crd->crd_flags & CRD_F_ENCRYPT) {
179 /* XOR with previous block */
180 for (j = 0; j < blks; j++)
181 blk[j] ^= ivp[j];
182
183 exf->encrypt(sw->sw_kschedule, blk);
184
185 /*
186 * Keep encrypted block for XOR'ing
187 * with next block
188 */
189 bcopy(blk, iv, blks);
190 ivp = iv;
191 } else { /* decrypt */
192 /*
193 * Keep encrypted block for XOR'ing
194 * with next block
195 */
196 nivp = (ivp == iv) ? iv2 : iv;
197 bcopy(blk, nivp, blks);
198
199 exf->decrypt(sw->sw_kschedule, blk);
200
201 /* XOR with previous block */
202 for (j = 0; j < blks; j++)
203 blk[j] ^= ivp[j];
204 ivp = nivp;
205 }
206
207 /* Copy back decrypted block */
208 m_copyback(m, k, blks, blk, M_NOWAIT);
209
210 /* Advance pointer */
211 m = m_getptr(m, k + blks, &k);
212 if (m == NULL)
213 return EINVAL;
214
215 i -= blks;
216
217 /* Could be done... */
218 if (i == 0)
219 break;
220 }
221
222 /* Skip possibly empty mbufs */
223 if (k == m->m_len) {
224 for (m = m->m_next; m && m->m_len == 0;
225 m = m->m_next)
226 ;
227 k = 0;
228 }
229
230 /* Sanity check */
231 if (m == NULL)
232 return EINVAL;
233
234 /*
235 * Warning: idat may point to garbage here, but
236 * we only use it in the while() loop, only if
237 * there are indeed enough data.
238 */
239 idat = mtod(m, unsigned char *) + k;
240
241 while (m->m_len >= k + blks && i > 0) {
242 if (exf->reinit) {
243 if (crd->crd_flags & CRD_F_ENCRYPT) {
244 exf->encrypt(sw->sw_kschedule,
245 idat);
246 } else {
247 exf->decrypt(sw->sw_kschedule,
248 idat);
249 }
250 } else if (crd->crd_flags & CRD_F_ENCRYPT) {
251 /* XOR with previous block/IV */
252 for (j = 0; j < blks; j++)
253 idat[j] ^= ivp[j];
254
255 exf->encrypt(sw->sw_kschedule, idat);
256 ivp = idat;
257 } else { /* decrypt */
258 /*
259 * Keep encrypted block to be used
260 * in next block's processing.
261 */
262 nivp = (ivp == iv) ? iv2 : iv;
263 bcopy(idat, nivp, blks);
264
265 exf->decrypt(sw->sw_kschedule, idat);
266
267 /* XOR with previous block/IV */
268 for (j = 0; j < blks; j++)
269 idat[j] ^= ivp[j];
270 ivp = nivp;
271 }
272
273 idat += blks;
274 k += blks;
275 i -= blks;
276 }
277 }
278 } else {
279 /* Find beginning of data */
280 count = crd->crd_skip;
281 ind = cuio_getptr(uio, count, &k);
282 if (ind == -1)
283 return EINVAL;
284
285 i = crd->crd_len;
286
287 while (i > 0) {
288 /*
289 * If there's insufficient data at the end,
290 * we have to do some copying.
291 */
292 if (uio->uio_iov[ind].iov_len < k + blks &&
293 uio->uio_iov[ind].iov_len != k) {
294 cuio_copydata(uio, count, blks, blk);
295
296 /* Actual encryption/decryption */
297 if (exf->reinit) {
298 if (crd->crd_flags & CRD_F_ENCRYPT) {
299 exf->encrypt(sw->sw_kschedule,
300 blk);
301 } else {
302 exf->decrypt(sw->sw_kschedule,
303 blk);
304 }
305 } else if (crd->crd_flags & CRD_F_ENCRYPT) {
306 /* XOR with previous block */
307 for (j = 0; j < blks; j++)
308 blk[j] ^= ivp[j];
309
310 exf->encrypt(sw->sw_kschedule, blk);
311
312 /*
313 * Keep encrypted block for XOR'ing
314 * with next block
315 */
316 bcopy(blk, iv, blks);
317 ivp = iv;
318 } else { /* decrypt */
319 /*
320 * Keep encrypted block for XOR'ing
321 * with next block
322 */
323 nivp = (ivp == iv) ? iv2 : iv;
324 bcopy(blk, nivp, blks);
325
326 exf->decrypt(sw->sw_kschedule, blk);
327
328 /* XOR with previous block */
329 for (j = 0; j < blks; j++)
330 blk[j] ^= ivp[j];
331 ivp = nivp;
332 }
333
334 /* Copy back decrypted block */
335 cuio_copyback(uio, count, blks, blk);
336
337 count += blks;
338
339 /* Advance pointer */
340 ind = cuio_getptr(uio, count, &k);
341 if (ind == -1)
342 return (EINVAL);
343
344 i -= blks;
345
346 /* Could be done... */
347 if (i == 0)
348 break;
349 }
350
351 /*
352 * Warning: idat may point to garbage here, but
353 * we only use it in the while() loop, only if
354 * there are indeed enough data.
355 */
356 idat = (char *)uio->uio_iov[ind].iov_base + k;
357
358 while (uio->uio_iov[ind].iov_len >= k + blks &&
359 i > 0) {
360 if (exf->reinit) {
361 if (crd->crd_flags & CRD_F_ENCRYPT) {
362 exf->encrypt(sw->sw_kschedule,
363 idat);
364 } else {
365 exf->decrypt(sw->sw_kschedule,
366 idat);
367 }
368 } else if (crd->crd_flags & CRD_F_ENCRYPT) {
369 /* XOR with previous block/IV */
370 for (j = 0; j < blks; j++)
371 idat[j] ^= ivp[j];
372
373 exf->encrypt(sw->sw_kschedule, idat);
374 ivp = idat;
375 } else { /* decrypt */
376 /*
377 * Keep encrypted block to be used
378 * in next block's processing.
379 */
380 nivp = (ivp == iv) ? iv2 : iv;
381 bcopy(idat, nivp, blks);
382
383 exf->decrypt(sw->sw_kschedule, idat);
384
385 /* XOR with previous block/IV */
386 for (j = 0; j < blks; j++)
387 idat[j] ^= ivp[j];
388 ivp = nivp;
389 }
390
391 idat += blks;
392 count += blks;
393 k += blks;
394 i -= blks;
395 }
396
397 /*
398 * Advance to the next iov if the end of the current iov
399 * is aligned with the end of a cipher block.
400 * Note that the code is equivalent to calling:
401 * ind = cuio_getptr(uio, count, &k);
402 */
403 if (i > 0 && k == uio->uio_iov[ind].iov_len) {
404 k = 0;
405 ind++;
406 if (ind >= uio->uio_iovcnt)
407 return (EINVAL);
408 }
409 }
410 }
411
412 return 0; /* Done with encryption/decryption */
413 }
414
415 /*
416 * Compute keyed-hash authenticator.
417 */
418 int
swcr_authcompute(struct cryptop * crp,struct cryptodesc * crd,struct swcr_data * sw,caddr_t buf,int outtype)419 swcr_authcompute(struct cryptop *crp, struct cryptodesc *crd,
420 struct swcr_data *sw, caddr_t buf, int outtype)
421 {
422 unsigned char aalg[AALG_MAX_RESULT_LEN];
423 const struct auth_hash *axf;
424 union authctx ctx;
425 int err;
426
427 if (sw->sw_ictx == 0)
428 return EINVAL;
429
430 axf = sw->sw_axf;
431
432 bcopy(sw->sw_ictx, &ctx, axf->ctxsize);
433
434 if (outtype == CRYPTO_BUF_MBUF)
435 err = m_apply((struct mbuf *) buf, crd->crd_skip, crd->crd_len,
436 (int (*)(caddr_t, caddr_t, unsigned int)) axf->Update,
437 (caddr_t) &ctx);
438 else
439 err = cuio_apply((struct uio *) buf, crd->crd_skip,
440 crd->crd_len,
441 (int (*)(caddr_t, caddr_t, unsigned int)) axf->Update,
442 (caddr_t) &ctx);
443
444 if (err)
445 return err;
446
447 if (crd->crd_flags & CRD_F_ESN)
448 axf->Update(&ctx, crd->crd_esn, 4);
449
450 switch (sw->sw_alg) {
451 case CRYPTO_MD5_HMAC:
452 case CRYPTO_SHA1_HMAC:
453 case CRYPTO_RIPEMD160_HMAC:
454 case CRYPTO_SHA2_256_HMAC:
455 case CRYPTO_SHA2_384_HMAC:
456 case CRYPTO_SHA2_512_HMAC:
457 if (sw->sw_octx == NULL)
458 return EINVAL;
459
460 axf->Final(aalg, &ctx);
461 bcopy(sw->sw_octx, &ctx, axf->ctxsize);
462 axf->Update(&ctx, aalg, axf->hashsize);
463 axf->Final(aalg, &ctx);
464 break;
465 }
466
467 /* Inject the authentication data */
468 if (outtype == CRYPTO_BUF_MBUF)
469 COPYBACK(outtype, buf, crd->crd_inject, axf->authsize, aalg);
470 else
471 bcopy(aalg, crp->crp_mac, axf->authsize);
472
473 return 0;
474 }
475
476 /*
477 * Apply a combined encryption-authentication transformation
478 */
479 int
swcr_authenc(struct cryptop * crp)480 swcr_authenc(struct cryptop *crp)
481 {
482 uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))];
483 u_char *blk = (u_char *)blkbuf;
484 u_char aalg[AALG_MAX_RESULT_LEN];
485 u_char iv[EALG_MAX_BLOCK_LEN];
486 union authctx ctx;
487 struct cryptodesc *crd, *crda = NULL, *crde = NULL;
488 struct swcr_list *session;
489 struct swcr_data *sw, *swa, *swe = NULL;
490 const struct auth_hash *axf = NULL;
491 const struct enc_xform *exf = NULL;
492 caddr_t buf = (caddr_t)crp->crp_buf;
493 uint32_t *blkp;
494 int aadlen, blksz, i, ivlen, outtype, len, iskip, oskip;
495
496 ivlen = blksz = iskip = oskip = 0;
497
498 session = &swcr_sessions[crp->crp_sid & 0xffffffff];
499 for (i = 0; i < crp->crp_ndesc; i++) {
500 crd = &crp->crp_desc[i];
501 SLIST_FOREACH(sw, session, sw_next) {
502 if (sw->sw_alg == crd->crd_alg)
503 break;
504 }
505 if (sw == NULL)
506 return (EINVAL);
507
508 switch (sw->sw_alg) {
509 case CRYPTO_AES_GCM_16:
510 case CRYPTO_AES_GMAC:
511 case CRYPTO_CHACHA20_POLY1305:
512 swe = sw;
513 crde = crd;
514 exf = swe->sw_exf;
515 ivlen = exf->ivsize;
516 break;
517 case CRYPTO_AES_128_GMAC:
518 case CRYPTO_AES_192_GMAC:
519 case CRYPTO_AES_256_GMAC:
520 case CRYPTO_CHACHA20_POLY1305_MAC:
521 swa = sw;
522 crda = crd;
523 axf = swa->sw_axf;
524 if (swa->sw_ictx == 0)
525 return (EINVAL);
526 bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
527 blksz = axf->blocksize;
528 break;
529 default:
530 return (EINVAL);
531 }
532 }
533 if (crde == NULL || crda == NULL)
534 return (EINVAL);
535
536 if (crp->crp_flags & CRYPTO_F_IMBUF) {
537 outtype = CRYPTO_BUF_MBUF;
538 } else {
539 outtype = CRYPTO_BUF_IOV;
540 }
541
542 /* Initialize the IV */
543 if (crde->crd_flags & CRD_F_ENCRYPT) {
544 /* IV explicitly provided ? */
545 if (crde->crd_flags & CRD_F_IV_EXPLICIT)
546 bcopy(crde->crd_iv, iv, ivlen);
547 else
548 arc4random_buf(iv, ivlen);
549
550 /* Do we need to write the IV */
551 if (!(crde->crd_flags & CRD_F_IV_PRESENT))
552 COPYBACK(outtype, buf, crde->crd_inject, ivlen, iv);
553
554 } else { /* Decryption */
555 /* IV explicitly provided ? */
556 if (crde->crd_flags & CRD_F_IV_EXPLICIT)
557 bcopy(crde->crd_iv, iv, ivlen);
558 else {
559 /* Get IV off buf */
560 COPYDATA(outtype, buf, crde->crd_inject, ivlen, iv);
561 }
562 }
563
564 /* Supply MAC with IV */
565 if (axf->Reinit)
566 axf->Reinit(&ctx, iv, ivlen);
567
568 /* Supply MAC with AAD */
569 aadlen = crda->crd_len;
570 /*
571 * Section 5 of RFC 4106 specifies that AAD construction consists of
572 * {SPI, ESN, SN} whereas the real packet contains only {SPI, SN}.
573 * Unfortunately it doesn't follow a good example set in the Section
574 * 3.3.2.1 of RFC 4303 where upper part of the ESN, located in the
575 * external (to the packet) memory buffer, is processed by the hash
576 * function in the end thus allowing to retain simple programming
577 * interfaces and avoid kludges like the one below.
578 */
579 if (crda->crd_flags & CRD_F_ESN) {
580 aadlen += 4;
581 /* SPI */
582 COPYDATA(outtype, buf, crda->crd_skip, 4, blk);
583 iskip = 4; /* loop below will start with an offset of 4 */
584 /* ESN */
585 bcopy(crda->crd_esn, blk + 4, 4);
586 oskip = iskip + 4; /* offset output buffer blk by 8 */
587 }
588 for (i = iskip; i < crda->crd_len; i += axf->hashsize) {
589 len = MIN(crda->crd_len - i, axf->hashsize - oskip);
590 COPYDATA(outtype, buf, crda->crd_skip + i, len, blk + oskip);
591 bzero(blk + len + oskip, axf->hashsize - len - oskip);
592 axf->Update(&ctx, blk, axf->hashsize);
593 oskip = 0; /* reset initial output offset */
594 }
595
596 if (exf->reinit)
597 exf->reinit(swe->sw_kschedule, iv);
598
599 /* Do encryption/decryption with MAC */
600 for (i = 0; i < crde->crd_len; i += blksz) {
601 len = MIN(crde->crd_len - i, blksz);
602 if (len < blksz)
603 bzero(blk, blksz);
604 COPYDATA(outtype, buf, crde->crd_skip + i, len, blk);
605 if (crde->crd_flags & CRD_F_ENCRYPT) {
606 exf->encrypt(swe->sw_kschedule, blk);
607 axf->Update(&ctx, blk, len);
608 } else {
609 axf->Update(&ctx, blk, len);
610 exf->decrypt(swe->sw_kschedule, blk);
611 }
612 COPYBACK(outtype, buf, crde->crd_skip + i, len, blk);
613 }
614
615 /* Do any required special finalization */
616 switch (crda->crd_alg) {
617 case CRYPTO_AES_128_GMAC:
618 case CRYPTO_AES_192_GMAC:
619 case CRYPTO_AES_256_GMAC:
620 /* length block */
621 bzero(blk, axf->hashsize);
622 blkp = (uint32_t *)blk + 1;
623 *blkp = htobe32(aadlen * 8);
624 blkp = (uint32_t *)blk + 3;
625 *blkp = htobe32(crde->crd_len * 8);
626 axf->Update(&ctx, blk, axf->hashsize);
627 break;
628 case CRYPTO_CHACHA20_POLY1305_MAC:
629 /* length block */
630 bzero(blk, axf->hashsize);
631 blkp = (uint32_t *)blk;
632 *blkp = htole32(aadlen);
633 blkp = (uint32_t *)blk + 2;
634 *blkp = htole32(crde->crd_len);
635 axf->Update(&ctx, blk, axf->hashsize);
636 break;
637 }
638
639 /* Finalize MAC */
640 axf->Final(aalg, &ctx);
641
642 /* Inject the authentication data */
643 if (outtype == CRYPTO_BUF_MBUF)
644 COPYBACK(outtype, buf, crda->crd_inject, axf->authsize, aalg);
645 else
646 bcopy(aalg, crp->crp_mac, axf->authsize);
647
648 return (0);
649 }
650
651 /*
652 * Apply a compression/decompression algorithm
653 */
654 int
swcr_compdec(struct cryptodesc * crd,struct swcr_data * sw,caddr_t buf,int outtype)655 swcr_compdec(struct cryptodesc *crd, struct swcr_data *sw,
656 caddr_t buf, int outtype)
657 {
658 u_int8_t *data, *out;
659 const struct comp_algo *cxf;
660 int adj;
661 u_int32_t result;
662
663 cxf = sw->sw_cxf;
664
665 /* We must handle the whole buffer of data in one time
666 * then if there is not all the data in the mbuf, we must
667 * copy in a buffer.
668 */
669
670 data = malloc(crd->crd_len, M_CRYPTO_DATA, M_NOWAIT);
671 if (data == NULL)
672 return (EINVAL);
673 COPYDATA(outtype, buf, crd->crd_skip, crd->crd_len, data);
674
675 if (crd->crd_flags & CRD_F_COMP)
676 result = cxf->compress(data, crd->crd_len, &out);
677 else
678 result = cxf->decompress(data, crd->crd_len, &out);
679
680 free(data, M_CRYPTO_DATA, crd->crd_len);
681 if (result == 0)
682 return EINVAL;
683
684 /* Copy back the (de)compressed data. m_copyback is
685 * extending the mbuf as necessary.
686 */
687 sw->sw_size = result;
688 /* Check the compressed size when doing compression */
689 if (crd->crd_flags & CRD_F_COMP) {
690 if (result > crd->crd_len) {
691 /* Compression was useless, we lost time */
692 free(out, M_CRYPTO_DATA, result);
693 return 0;
694 }
695 }
696
697 COPYBACK(outtype, buf, crd->crd_skip, result, out);
698 if (result < crd->crd_len) {
699 adj = result - crd->crd_len;
700 if (outtype == CRYPTO_BUF_MBUF) {
701 adj = result - crd->crd_len;
702 m_adj((struct mbuf *)buf, adj);
703 } else {
704 struct uio *uio = (struct uio *)buf;
705 int ind;
706
707 adj = crd->crd_len - result;
708 ind = uio->uio_iovcnt - 1;
709
710 while (adj > 0 && ind >= 0) {
711 if (adj < uio->uio_iov[ind].iov_len) {
712 uio->uio_iov[ind].iov_len -= adj;
713 break;
714 }
715
716 adj -= uio->uio_iov[ind].iov_len;
717 uio->uio_iov[ind].iov_len = 0;
718 ind--;
719 uio->uio_iovcnt--;
720 }
721 }
722 }
723 free(out, M_CRYPTO_DATA, result);
724 return 0;
725 }
726
727 /*
728 * Generate a new software session.
729 */
730 int
swcr_newsession(u_int32_t * sid,struct cryptoini * cri)731 swcr_newsession(u_int32_t *sid, struct cryptoini *cri)
732 {
733 struct swcr_list *session;
734 struct swcr_data *swd, *prev;
735 const struct auth_hash *axf;
736 const struct enc_xform *txf;
737 const struct comp_algo *cxf;
738 u_int32_t i;
739 int k;
740
741 if (sid == NULL || cri == NULL)
742 return EINVAL;
743
744 if (swcr_sessions != NULL) {
745 for (i = 1; i < swcr_sesnum; i++)
746 if (SLIST_EMPTY(&swcr_sessions[i]))
747 break;
748 }
749
750 if (swcr_sessions == NULL || i == swcr_sesnum) {
751 if (swcr_sessions == NULL) {
752 i = 1; /* We leave swcr_sessions[0] empty */
753 swcr_sesnum = CRYPTO_SW_SESSIONS;
754 } else
755 swcr_sesnum *= 2;
756
757 session = mallocarray(swcr_sesnum, sizeof(struct swcr_list),
758 M_CRYPTO_DATA, M_NOWAIT | M_ZERO);
759 if (session == NULL) {
760 /* Reset session number */
761 if (swcr_sesnum == CRYPTO_SW_SESSIONS)
762 swcr_sesnum = 0;
763 else
764 swcr_sesnum /= 2;
765 return ENOBUFS;
766 }
767
768 /* Copy existing sessions */
769 if (swcr_sessions) {
770 bcopy(swcr_sessions, session,
771 (swcr_sesnum / 2) * sizeof(struct swcr_list));
772 free(swcr_sessions, M_CRYPTO_DATA,
773 (swcr_sesnum / 2) * sizeof(struct swcr_list));
774 }
775
776 swcr_sessions = session;
777 }
778
779 session = &swcr_sessions[i];
780 *sid = i;
781 prev = NULL;
782
783 while (cri) {
784 swd = malloc(sizeof(struct swcr_data), M_CRYPTO_DATA,
785 M_NOWAIT | M_ZERO);
786 if (swd == NULL) {
787 swcr_freesession(i);
788 return ENOBUFS;
789 }
790 if (prev == NULL)
791 SLIST_INSERT_HEAD(session, swd, sw_next);
792 else
793 SLIST_INSERT_AFTER(prev, swd, sw_next);
794
795 switch (cri->cri_alg) {
796 case CRYPTO_3DES_CBC:
797 txf = &enc_xform_3des;
798 goto enccommon;
799 case CRYPTO_BLF_CBC:
800 txf = &enc_xform_blf;
801 goto enccommon;
802 case CRYPTO_CAST_CBC:
803 txf = &enc_xform_cast5;
804 goto enccommon;
805 case CRYPTO_AES_CBC:
806 txf = &enc_xform_aes;
807 goto enccommon;
808 case CRYPTO_AES_CTR:
809 txf = &enc_xform_aes_ctr;
810 goto enccommon;
811 case CRYPTO_AES_XTS:
812 txf = &enc_xform_aes_xts;
813 goto enccommon;
814 case CRYPTO_AES_GCM_16:
815 txf = &enc_xform_aes_gcm;
816 goto enccommon;
817 case CRYPTO_AES_GMAC:
818 txf = &enc_xform_aes_gmac;
819 swd->sw_exf = txf;
820 break;
821 case CRYPTO_CHACHA20_POLY1305:
822 txf = &enc_xform_chacha20_poly1305;
823 goto enccommon;
824 case CRYPTO_NULL:
825 txf = &enc_xform_null;
826 goto enccommon;
827 enccommon:
828 if (txf->ctxsize > 0) {
829 swd->sw_kschedule = malloc(txf->ctxsize,
830 M_CRYPTO_DATA, M_NOWAIT | M_ZERO);
831 if (swd->sw_kschedule == NULL) {
832 swcr_freesession(i);
833 return EINVAL;
834 }
835 }
836 if (txf->setkey(swd->sw_kschedule, cri->cri_key,
837 cri->cri_klen / 8) < 0) {
838 swcr_freesession(i);
839 return EINVAL;
840 }
841 swd->sw_exf = txf;
842 break;
843
844 case CRYPTO_MD5_HMAC:
845 axf = &auth_hash_hmac_md5_96;
846 goto authcommon;
847 case CRYPTO_SHA1_HMAC:
848 axf = &auth_hash_hmac_sha1_96;
849 goto authcommon;
850 case CRYPTO_RIPEMD160_HMAC:
851 axf = &auth_hash_hmac_ripemd_160_96;
852 goto authcommon;
853 case CRYPTO_SHA2_256_HMAC:
854 axf = &auth_hash_hmac_sha2_256_128;
855 goto authcommon;
856 case CRYPTO_SHA2_384_HMAC:
857 axf = &auth_hash_hmac_sha2_384_192;
858 goto authcommon;
859 case CRYPTO_SHA2_512_HMAC:
860 axf = &auth_hash_hmac_sha2_512_256;
861 goto authcommon;
862 authcommon:
863 swd->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
864 M_NOWAIT);
865 if (swd->sw_ictx == NULL) {
866 swcr_freesession(i);
867 return ENOBUFS;
868 }
869
870 swd->sw_octx = malloc(axf->ctxsize, M_CRYPTO_DATA,
871 M_NOWAIT);
872 if (swd->sw_octx == NULL) {
873 swcr_freesession(i);
874 return ENOBUFS;
875 }
876
877 for (k = 0; k < cri->cri_klen / 8; k++)
878 cri->cri_key[k] ^= HMAC_IPAD_VAL;
879
880 axf->Init(swd->sw_ictx);
881 axf->Update(swd->sw_ictx, cri->cri_key,
882 cri->cri_klen / 8);
883 axf->Update(swd->sw_ictx, hmac_ipad_buffer,
884 axf->blocksize - (cri->cri_klen / 8));
885
886 for (k = 0; k < cri->cri_klen / 8; k++)
887 cri->cri_key[k] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
888
889 axf->Init(swd->sw_octx);
890 axf->Update(swd->sw_octx, cri->cri_key,
891 cri->cri_klen / 8);
892 axf->Update(swd->sw_octx, hmac_opad_buffer,
893 axf->blocksize - (cri->cri_klen / 8));
894
895 for (k = 0; k < cri->cri_klen / 8; k++)
896 cri->cri_key[k] ^= HMAC_OPAD_VAL;
897 swd->sw_axf = axf;
898 break;
899
900 case CRYPTO_AES_128_GMAC:
901 axf = &auth_hash_gmac_aes_128;
902 goto authenccommon;
903 case CRYPTO_AES_192_GMAC:
904 axf = &auth_hash_gmac_aes_192;
905 goto authenccommon;
906 case CRYPTO_AES_256_GMAC:
907 axf = &auth_hash_gmac_aes_256;
908 goto authenccommon;
909 case CRYPTO_CHACHA20_POLY1305_MAC:
910 axf = &auth_hash_chacha20_poly1305;
911 goto authenccommon;
912 authenccommon:
913 swd->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
914 M_NOWAIT);
915 if (swd->sw_ictx == NULL) {
916 swcr_freesession(i);
917 return ENOBUFS;
918 }
919 axf->Init(swd->sw_ictx);
920 axf->Setkey(swd->sw_ictx, cri->cri_key,
921 cri->cri_klen / 8);
922 swd->sw_axf = axf;
923 break;
924
925 case CRYPTO_DEFLATE_COMP:
926 cxf = &comp_algo_deflate;
927 swd->sw_cxf = cxf;
928 break;
929 case CRYPTO_ESN:
930 /* nothing to do */
931 break;
932 default:
933 swcr_freesession(i);
934 return EINVAL;
935 }
936
937 swd->sw_alg = cri->cri_alg;
938 cri = cri->cri_next;
939 prev = swd;
940 }
941 return 0;
942 }
943
944 /*
945 * Free a session.
946 */
947 int
swcr_freesession(u_int64_t tid)948 swcr_freesession(u_int64_t tid)
949 {
950 struct swcr_list *session;
951 struct swcr_data *swd;
952 const struct enc_xform *txf;
953 const struct auth_hash *axf;
954 u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
955
956 if (sid > swcr_sesnum || swcr_sessions == NULL ||
957 SLIST_EMPTY(&swcr_sessions[sid]))
958 return EINVAL;
959
960 /* Silently accept and return */
961 if (sid == 0)
962 return 0;
963
964 session = &swcr_sessions[sid];
965 while (!SLIST_EMPTY(session)) {
966 swd = SLIST_FIRST(session);
967 SLIST_REMOVE_HEAD(session, sw_next);
968
969 switch (swd->sw_alg) {
970 case CRYPTO_3DES_CBC:
971 case CRYPTO_BLF_CBC:
972 case CRYPTO_CAST_CBC:
973 case CRYPTO_AES_CBC:
974 case CRYPTO_AES_CTR:
975 case CRYPTO_AES_XTS:
976 case CRYPTO_AES_GCM_16:
977 case CRYPTO_AES_GMAC:
978 case CRYPTO_CHACHA20_POLY1305:
979 case CRYPTO_NULL:
980 txf = swd->sw_exf;
981
982 if (swd->sw_kschedule) {
983 explicit_bzero(swd->sw_kschedule, txf->ctxsize);
984 free(swd->sw_kschedule, M_CRYPTO_DATA,
985 txf->ctxsize);
986 }
987 break;
988
989 case CRYPTO_MD5_HMAC:
990 case CRYPTO_SHA1_HMAC:
991 case CRYPTO_RIPEMD160_HMAC:
992 case CRYPTO_SHA2_256_HMAC:
993 case CRYPTO_SHA2_384_HMAC:
994 case CRYPTO_SHA2_512_HMAC:
995 axf = swd->sw_axf;
996
997 if (swd->sw_ictx) {
998 explicit_bzero(swd->sw_ictx, axf->ctxsize);
999 free(swd->sw_ictx, M_CRYPTO_DATA, axf->ctxsize);
1000 }
1001 if (swd->sw_octx) {
1002 explicit_bzero(swd->sw_octx, axf->ctxsize);
1003 free(swd->sw_octx, M_CRYPTO_DATA, axf->ctxsize);
1004 }
1005 break;
1006
1007 case CRYPTO_AES_128_GMAC:
1008 case CRYPTO_AES_192_GMAC:
1009 case CRYPTO_AES_256_GMAC:
1010 case CRYPTO_CHACHA20_POLY1305_MAC:
1011 axf = swd->sw_axf;
1012
1013 if (swd->sw_ictx) {
1014 explicit_bzero(swd->sw_ictx, axf->ctxsize);
1015 free(swd->sw_ictx, M_CRYPTO_DATA, axf->ctxsize);
1016 }
1017 break;
1018 }
1019
1020 free(swd, M_CRYPTO_DATA, sizeof(*swd));
1021 }
1022 return 0;
1023 }
1024
1025 /*
1026 * Process a software request.
1027 */
1028 int
swcr_process(struct cryptop * crp)1029 swcr_process(struct cryptop *crp)
1030 {
1031 struct cryptodesc *crd;
1032 struct swcr_list *session;
1033 struct swcr_data *sw;
1034 u_int32_t lid;
1035 int err = 0;
1036 int type;
1037 int i;
1038
1039 KASSERT(crp->crp_ndesc >= 1);
1040
1041 if (crp->crp_buf == NULL) {
1042 err = EINVAL;
1043 goto done;
1044 }
1045
1046 lid = crp->crp_sid & 0xffffffff;
1047 if (lid >= swcr_sesnum || lid == 0 ||
1048 SLIST_EMPTY(&swcr_sessions[lid])) {
1049 err = ENOENT;
1050 goto done;
1051 }
1052
1053 if (crp->crp_flags & CRYPTO_F_IMBUF)
1054 type = CRYPTO_BUF_MBUF;
1055 else
1056 type = CRYPTO_BUF_IOV;
1057
1058 /* Go through crypto descriptors, processing as we go */
1059 session = &swcr_sessions[lid];
1060 for (i = 0; i < crp->crp_ndesc; i++) {
1061 crd = &crp->crp_desc[i];
1062 /*
1063 * Find the crypto context.
1064 *
1065 * XXX Note that the logic here prevents us from having
1066 * XXX the same algorithm multiple times in a session
1067 * XXX (or rather, we can but it won't give us the right
1068 * XXX results). To do that, we'd need some way of differentiating
1069 * XXX between the various instances of an algorithm (so we can
1070 * XXX locate the correct crypto context).
1071 */
1072 SLIST_FOREACH(sw, session, sw_next) {
1073 if (sw->sw_alg == crd->crd_alg)
1074 break;
1075 }
1076
1077 /* No such context ? */
1078 if (sw == NULL) {
1079 err = EINVAL;
1080 goto done;
1081 }
1082
1083 switch (sw->sw_alg) {
1084 case CRYPTO_NULL:
1085 break;
1086 case CRYPTO_3DES_CBC:
1087 case CRYPTO_BLF_CBC:
1088 case CRYPTO_CAST_CBC:
1089 case CRYPTO_RIJNDAEL128_CBC:
1090 case CRYPTO_AES_CTR:
1091 case CRYPTO_AES_XTS:
1092 if ((err = swcr_encdec(crd, sw,
1093 crp->crp_buf, type)) != 0)
1094 goto done;
1095 break;
1096 case CRYPTO_MD5_HMAC:
1097 case CRYPTO_SHA1_HMAC:
1098 case CRYPTO_RIPEMD160_HMAC:
1099 case CRYPTO_SHA2_256_HMAC:
1100 case CRYPTO_SHA2_384_HMAC:
1101 case CRYPTO_SHA2_512_HMAC:
1102 if ((err = swcr_authcompute(crp, crd, sw,
1103 crp->crp_buf, type)) != 0)
1104 goto done;
1105 break;
1106
1107 case CRYPTO_AES_GCM_16:
1108 case CRYPTO_AES_GMAC:
1109 case CRYPTO_AES_128_GMAC:
1110 case CRYPTO_AES_192_GMAC:
1111 case CRYPTO_AES_256_GMAC:
1112 case CRYPTO_CHACHA20_POLY1305:
1113 case CRYPTO_CHACHA20_POLY1305_MAC:
1114 err = swcr_authenc(crp);
1115 goto done;
1116
1117 case CRYPTO_DEFLATE_COMP:
1118 if ((err = swcr_compdec(crd, sw,
1119 crp->crp_buf, type)) != 0)
1120 goto done;
1121 else
1122 crp->crp_olen = (int)sw->sw_size;
1123 break;
1124
1125 default:
1126 /* Unknown/unsupported algorithm */
1127 err = EINVAL;
1128 goto done;
1129 }
1130 }
1131
1132 done:
1133 return err;
1134 }
1135
1136 /*
1137 * Initialize the driver, called from the kernel main().
1138 */
1139 void
swcr_init(void)1140 swcr_init(void)
1141 {
1142 int algs[CRYPTO_ALGORITHM_MAX + 1];
1143 int flags = CRYPTOCAP_F_SOFTWARE;
1144
1145 swcr_id = crypto_get_driverid(flags);
1146 if (swcr_id < 0) {
1147 /* This should never happen */
1148 panic("Software crypto device cannot initialize!");
1149 }
1150
1151 bzero(algs, sizeof(algs));
1152
1153 algs[CRYPTO_3DES_CBC] = CRYPTO_ALG_FLAG_SUPPORTED;
1154 algs[CRYPTO_BLF_CBC] = CRYPTO_ALG_FLAG_SUPPORTED;
1155 algs[CRYPTO_CAST_CBC] = CRYPTO_ALG_FLAG_SUPPORTED;
1156 algs[CRYPTO_MD5_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
1157 algs[CRYPTO_SHA1_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
1158 algs[CRYPTO_RIPEMD160_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
1159 algs[CRYPTO_AES_CBC] = CRYPTO_ALG_FLAG_SUPPORTED;
1160 algs[CRYPTO_AES_CTR] = CRYPTO_ALG_FLAG_SUPPORTED;
1161 algs[CRYPTO_AES_XTS] = CRYPTO_ALG_FLAG_SUPPORTED;
1162 algs[CRYPTO_AES_GCM_16] = CRYPTO_ALG_FLAG_SUPPORTED;
1163 algs[CRYPTO_AES_GMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
1164 algs[CRYPTO_DEFLATE_COMP] = CRYPTO_ALG_FLAG_SUPPORTED;
1165 algs[CRYPTO_NULL] = CRYPTO_ALG_FLAG_SUPPORTED;
1166 algs[CRYPTO_SHA2_256_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
1167 algs[CRYPTO_SHA2_384_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
1168 algs[CRYPTO_SHA2_512_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
1169 algs[CRYPTO_AES_128_GMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
1170 algs[CRYPTO_AES_192_GMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
1171 algs[CRYPTO_AES_256_GMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
1172 algs[CRYPTO_CHACHA20_POLY1305] = CRYPTO_ALG_FLAG_SUPPORTED;
1173 algs[CRYPTO_CHACHA20_POLY1305_MAC] = CRYPTO_ALG_FLAG_SUPPORTED;
1174 algs[CRYPTO_ESN] = CRYPTO_ALG_FLAG_SUPPORTED;
1175
1176 crypto_register(swcr_id, algs, swcr_newsession,
1177 swcr_freesession, swcr_process);
1178 }
1179