1 /* $OpenBSD: aesni.c,v 1.53 2021/10/24 10:26:22 patrick Exp $ */
2 /*-
3 * Copyright (c) 2003 Jason Wright
4 * Copyright (c) 2003, 2004 Theo de Raadt
5 * Copyright (c) 2010, Thordur I. Bjornsson
6 * Copyright (c) 2010, Mike Belopuhov
7 * All rights reserved.
8 *
9 * Permission to use, copy, modify, and distribute this software for any
10 * purpose with or without fee is hereby granted, provided that the above
11 * copyright notice and this permission notice appear in all copies.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 */
21
22 #include <sys/param.h>
23 #include <sys/systm.h>
24 #include <sys/atomic.h>
25 #include <sys/malloc.h>
26 #include <sys/pool.h>
27 #include <sys/mbuf.h>
28 #include <sys/smr.h>
29
30 #include <crypto/cryptodev.h>
31 #include <crypto/aes.h>
32 #include <crypto/gmac.h>
33 #include <crypto/xform.h>
34 #include <crypto/cryptosoft.h>
35
36 #include <machine/fpu.h>
37
38 struct aesni_aes_ctx {
39 uint32_t aes_ekey[4 * (AES_MAXROUNDS + 1)];
40 uint32_t aes_dkey[4 * (AES_MAXROUNDS + 1)];
41 uint32_t aes_klen;
42 uint32_t aes_pad[3];
43 };
44
45 struct aesni_xts_ctx {
46 struct aesni_aes_ctx xts_keys[2];
47 };
48
49 struct aesni_session {
50 uint32_t ses_ekey[4 * (AES_MAXROUNDS + 1)];
51 uint32_t ses_dkey[4 * (AES_MAXROUNDS + 1)];
52 uint32_t ses_klen;
53 uint8_t ses_nonce[AESCTR_NONCESIZE];
54 int ses_sid;
55 GHASH_CTX *ses_ghash;
56 struct aesni_xts_ctx *ses_xts;
57 struct swcr_data *ses_swd;
58 SMR_LIST_ENTRY(aesni_session)
59 ses_entries;
60 uint8_t *ses_buf;
61 size_t ses_buflen;
62 struct smr_entry ses_smr;
63 };
64
65 struct aesni_softc {
66 int32_t sc_cid;
67 uint32_t sc_sid;
68 struct mutex sc_mtx;
69 SMR_LIST_HEAD(, aesni_session)
70 sc_sessions;
71 } *aesni_sc;
72
73 struct pool aesnipl;
74
75 uint32_t aesni_ops;
76
77 /* assembler-assisted key setup */
78 extern void aesni_set_key(struct aesni_session *ses, uint8_t *key, size_t len);
79
80 /* aes encryption/decryption */
81 extern void aesni_enc(struct aesni_session *ses, uint8_t *dst, uint8_t *src);
82 extern void aesni_dec(struct aesni_session *ses, uint8_t *dst, uint8_t *src);
83
84 /* assembler-assisted CBC mode */
85 extern void aesni_cbc_enc(struct aesni_session *ses, uint8_t *dst,
86 uint8_t *src, size_t len, uint8_t *iv);
87 extern void aesni_cbc_dec(struct aesni_session *ses, uint8_t *dst,
88 uint8_t *src, size_t len, uint8_t *iv);
89
90 /* assembler-assisted CTR mode */
91 extern void aesni_ctr_enc(struct aesni_session *ses, uint8_t *dst,
92 uint8_t *src, size_t len, uint8_t *icb);
93
94 /* assembler-assisted XTS mode */
95 extern void aesni_xts_enc(struct aesni_xts_ctx *xts, uint8_t *dst,
96 uint8_t *src, size_t len, uint8_t *tweak);
97 extern void aesni_xts_dec(struct aesni_xts_ctx *xts, uint8_t *dst,
98 uint8_t *src, size_t len, uint8_t *tweak);
99
100 /* assembler-assisted GMAC */
101 extern void aesni_gmac_update(GHASH_CTX *ghash, uint8_t *src, size_t len);
102 extern void aesni_gmac_final(struct aesni_session *ses, uint8_t *tag,
103 uint8_t *icb, uint8_t *hashstate);
104
105 void aesni_setup(void);
106 int aesni_newsession(u_int32_t *, struct cryptoini *);
107 int aesni_freesession(u_int64_t);
108 int aesni_process(struct cryptop *);
109
110 struct aesni_session *
111 aesni_get(uint32_t);
112 void aesni_free(struct aesni_session *);
113 void aesni_free_smr(void *);
114
115 int aesni_swauth(struct cryptop *, struct cryptodesc *, struct swcr_data *,
116 caddr_t);
117
118 int aesni_encdec(struct cryptop *, struct cryptodesc *,
119 struct cryptodesc *, struct aesni_session *);
120
121 void pclmul_setup(void);
122 void ghash_update_pclmul(GHASH_CTX *, uint8_t *, size_t);
123
124 void
aesni_setup(void)125 aesni_setup(void)
126 {
127 int algs[CRYPTO_ALGORITHM_MAX + 1];
128
129 aesni_sc = malloc(sizeof(*aesni_sc), M_DEVBUF, M_NOWAIT|M_ZERO);
130 if (aesni_sc == NULL)
131 return;
132
133 bzero(algs, sizeof(algs));
134
135 /* Encryption algorithms. */
136 algs[CRYPTO_AES_CBC] = CRYPTO_ALG_FLAG_SUPPORTED;
137 algs[CRYPTO_AES_CTR] = CRYPTO_ALG_FLAG_SUPPORTED;
138 algs[CRYPTO_AES_GCM_16] = CRYPTO_ALG_FLAG_SUPPORTED;
139 algs[CRYPTO_AES_XTS] = CRYPTO_ALG_FLAG_SUPPORTED;
140
141 /* Authenticated encryption algorithms. */
142 algs[CRYPTO_AES_GMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
143 algs[CRYPTO_AES_128_GMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
144 algs[CRYPTO_AES_192_GMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
145 algs[CRYPTO_AES_256_GMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
146
147 /* HMACs needed for IPsec, uses software crypto. */
148 algs[CRYPTO_MD5_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
149 algs[CRYPTO_SHA1_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
150 algs[CRYPTO_RIPEMD160_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
151 algs[CRYPTO_SHA2_256_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
152 algs[CRYPTO_SHA2_384_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
153 algs[CRYPTO_SHA2_512_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
154
155 /* IPsec Extended Sequence Numbers. */
156 algs[CRYPTO_ESN] = CRYPTO_ALG_FLAG_SUPPORTED;
157
158 aesni_sc->sc_cid = crypto_get_driverid(CRYPTOCAP_F_MPSAFE);
159 if (aesni_sc->sc_cid < 0) {
160 free(aesni_sc, M_DEVBUF, sizeof(*aesni_sc));
161 aesni_sc = NULL;
162 return;
163 }
164
165 pool_init(&aesnipl, sizeof(struct aesni_session), 16, IPL_VM, 0,
166 "aesni", NULL);
167 pool_setlowat(&aesnipl, 2);
168
169 mtx_init(&aesni_sc->sc_mtx, IPL_VM);
170
171 crypto_register(aesni_sc->sc_cid, algs, aesni_newsession,
172 aesni_freesession, aesni_process);
173 }
174
175 int
aesni_newsession(u_int32_t * sidp,struct cryptoini * cri)176 aesni_newsession(u_int32_t *sidp, struct cryptoini *cri)
177 {
178 struct aesni_session *ses = NULL;
179 struct aesni_aes_ctx *aes1, *aes2;
180 struct cryptoini *c;
181 const struct auth_hash *axf;
182 struct swcr_data *swd;
183 int i;
184
185 if (sidp == NULL || cri == NULL)
186 return (EINVAL);
187
188 ses = pool_get(&aesnipl, PR_NOWAIT | PR_ZERO);
189 if (!ses)
190 return (ENOMEM);
191 smr_init(&ses->ses_smr);
192
193 ses->ses_buf = malloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT|M_ZERO);
194 if (ses->ses_buf != NULL)
195 ses->ses_buflen = PAGE_SIZE;
196
197 for (c = cri; c != NULL; c = c->cri_next) {
198 switch (c->cri_alg) {
199 case CRYPTO_AES_CBC:
200 ses->ses_klen = c->cri_klen / 8;
201 fpu_kernel_enter();
202 aesni_set_key(ses, c->cri_key, ses->ses_klen);
203 fpu_kernel_exit();
204 break;
205
206 case CRYPTO_AES_CTR:
207 case CRYPTO_AES_GCM_16:
208 case CRYPTO_AES_GMAC:
209 ses->ses_klen = c->cri_klen / 8 - AESCTR_NONCESIZE;
210 memcpy(ses->ses_nonce, c->cri_key + ses->ses_klen,
211 AESCTR_NONCESIZE);
212 fpu_kernel_enter();
213 aesni_set_key(ses, c->cri_key, ses->ses_klen);
214 fpu_kernel_exit();
215 break;
216
217 case CRYPTO_AES_XTS:
218 ses->ses_xts = malloc(sizeof(struct aesni_xts_ctx),
219 M_CRYPTO_DATA, M_NOWAIT | M_ZERO);
220 if (ses->ses_xts == NULL) {
221 aesni_free(ses);
222 return (ENOMEM);
223 }
224
225 ses->ses_klen = c->cri_klen / 16;
226 aes1 = &ses->ses_xts->xts_keys[0];
227 aes1->aes_klen = ses->ses_klen;
228 aes2 = &ses->ses_xts->xts_keys[1];
229 aes2->aes_klen = ses->ses_klen;
230
231 fpu_kernel_enter();
232 aesni_set_key((struct aesni_session *)aes1,
233 c->cri_key, aes1->aes_klen);
234 aesni_set_key((struct aesni_session *)aes2,
235 c->cri_key + ses->ses_klen, aes2->aes_klen);
236 fpu_kernel_exit();
237 break;
238
239 case CRYPTO_AES_128_GMAC:
240 case CRYPTO_AES_192_GMAC:
241 case CRYPTO_AES_256_GMAC:
242 ses->ses_ghash = malloc(sizeof(GHASH_CTX),
243 M_CRYPTO_DATA, M_NOWAIT | M_ZERO);
244 if (ses->ses_ghash == NULL) {
245 aesni_free(ses);
246 return (ENOMEM);
247 }
248
249 /* prepare a hash subkey */
250 fpu_kernel_enter();
251 aesni_enc(ses, ses->ses_ghash->H, ses->ses_ghash->H);
252 fpu_kernel_exit();
253 break;
254
255 case CRYPTO_MD5_HMAC:
256 axf = &auth_hash_hmac_md5_96;
257 goto authcommon;
258 case CRYPTO_SHA1_HMAC:
259 axf = &auth_hash_hmac_sha1_96;
260 goto authcommon;
261 case CRYPTO_RIPEMD160_HMAC:
262 axf = &auth_hash_hmac_ripemd_160_96;
263 goto authcommon;
264 case CRYPTO_SHA2_256_HMAC:
265 axf = &auth_hash_hmac_sha2_256_128;
266 goto authcommon;
267 case CRYPTO_SHA2_384_HMAC:
268 axf = &auth_hash_hmac_sha2_384_192;
269 goto authcommon;
270 case CRYPTO_SHA2_512_HMAC:
271 axf = &auth_hash_hmac_sha2_512_256;
272 authcommon:
273 swd = malloc(sizeof(struct swcr_data), M_CRYPTO_DATA,
274 M_NOWAIT|M_ZERO);
275 if (swd == NULL) {
276 aesni_free(ses);
277 return (ENOMEM);
278 }
279 ses->ses_swd = swd;
280
281 swd->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
282 M_NOWAIT);
283 if (swd->sw_ictx == NULL) {
284 aesni_free(ses);
285 return (ENOMEM);
286 }
287
288 swd->sw_octx = malloc(axf->ctxsize, M_CRYPTO_DATA,
289 M_NOWAIT);
290 if (swd->sw_octx == NULL) {
291 aesni_free(ses);
292 return (ENOMEM);
293 }
294
295 for (i = 0; i < c->cri_klen / 8; i++)
296 c->cri_key[i] ^= HMAC_IPAD_VAL;
297
298 axf->Init(swd->sw_ictx);
299 axf->Update(swd->sw_ictx, c->cri_key, c->cri_klen / 8);
300 axf->Update(swd->sw_ictx, hmac_ipad_buffer,
301 axf->blocksize - (c->cri_klen / 8));
302
303 for (i = 0; i < c->cri_klen / 8; i++)
304 c->cri_key[i] ^= (HMAC_IPAD_VAL ^
305 HMAC_OPAD_VAL);
306
307 axf->Init(swd->sw_octx);
308 axf->Update(swd->sw_octx, c->cri_key, c->cri_klen / 8);
309 axf->Update(swd->sw_octx, hmac_opad_buffer,
310 axf->blocksize - (c->cri_klen / 8));
311
312 for (i = 0; i < c->cri_klen / 8; i++)
313 c->cri_key[i] ^= HMAC_OPAD_VAL;
314
315 swd->sw_axf = axf;
316 swd->sw_alg = c->cri_alg;
317
318 break;
319
320 case CRYPTO_ESN:
321 /* nothing to do */
322 break;
323
324 default:
325 aesni_free(ses);
326 return (EINVAL);
327 }
328 }
329
330 mtx_enter(&aesni_sc->sc_mtx);
331 ses->ses_sid = ++aesni_sc->sc_sid;
332 SMR_LIST_INSERT_HEAD_LOCKED(&aesni_sc->sc_sessions, ses, ses_entries);
333 mtx_leave(&aesni_sc->sc_mtx);
334
335 *sidp = ses->ses_sid;
336 return (0);
337 }
338
339 int
aesni_freesession(u_int64_t tid)340 aesni_freesession(u_int64_t tid)
341 {
342 struct aesni_session *ses;
343 u_int32_t sid = (u_int32_t)tid;
344
345 mtx_enter(&aesni_sc->sc_mtx);
346 SMR_LIST_FOREACH_LOCKED(ses, &aesni_sc->sc_sessions, ses_entries) {
347 if (ses->ses_sid == sid) {
348 SMR_LIST_REMOVE_LOCKED(ses, ses_entries);
349 break;
350 }
351 }
352 mtx_leave(&aesni_sc->sc_mtx);
353
354 if (ses == NULL)
355 return (EINVAL);
356
357 smr_call(&ses->ses_smr, aesni_free_smr, ses);
358
359 return (0);
360 }
361
362 void
aesni_free(struct aesni_session * ses)363 aesni_free(struct aesni_session *ses)
364 {
365 struct swcr_data *swd;
366 const struct auth_hash *axf;
367
368 if (ses->ses_ghash) {
369 explicit_bzero(ses->ses_ghash, sizeof(GHASH_CTX));
370 free(ses->ses_ghash, M_CRYPTO_DATA, sizeof(GHASH_CTX));
371 }
372
373 if (ses->ses_xts) {
374 explicit_bzero(ses->ses_xts, sizeof(struct aesni_xts_ctx));
375 free(ses->ses_xts, M_CRYPTO_DATA, sizeof(struct aesni_xts_ctx));
376 }
377
378 if (ses->ses_swd) {
379 swd = ses->ses_swd;
380 axf = swd->sw_axf;
381
382 if (swd->sw_ictx) {
383 explicit_bzero(swd->sw_ictx, axf->ctxsize);
384 free(swd->sw_ictx, M_CRYPTO_DATA, axf->ctxsize);
385 }
386 if (swd->sw_octx) {
387 explicit_bzero(swd->sw_octx, axf->ctxsize);
388 free(swd->sw_octx, M_CRYPTO_DATA, axf->ctxsize);
389 }
390 free(swd, M_CRYPTO_DATA, sizeof(*swd));
391 }
392
393 if (ses->ses_buf) {
394 explicit_bzero(ses->ses_buf, ses->ses_buflen);
395 free(ses->ses_buf, M_DEVBUF, ses->ses_buflen);
396 }
397
398 explicit_bzero(ses, sizeof (*ses));
399 pool_put(&aesnipl, ses);
400 }
401
402 void
aesni_free_smr(void * arg)403 aesni_free_smr(void *arg)
404 {
405 struct aesni_session *ses = arg;
406
407 aesni_free(ses);
408 }
409
410 struct aesni_session *
aesni_get(uint32_t sid)411 aesni_get(uint32_t sid)
412 {
413 struct aesni_session *ses = NULL;
414
415 SMR_ASSERT_CRITICAL();
416 SMR_LIST_FOREACH(ses, &aesni_sc->sc_sessions, ses_entries) {
417 if (ses->ses_sid == sid)
418 break;
419 }
420 return (ses);
421 }
422
423 int
aesni_swauth(struct cryptop * crp,struct cryptodesc * crd,struct swcr_data * sw,caddr_t buf)424 aesni_swauth(struct cryptop *crp, struct cryptodesc *crd,
425 struct swcr_data *sw, caddr_t buf)
426 {
427 int type;
428
429 if (crp->crp_flags & CRYPTO_F_IMBUF)
430 type = CRYPTO_BUF_MBUF;
431 else
432 type = CRYPTO_BUF_IOV;
433
434 return (swcr_authcompute(crp, crd, sw, buf, type));
435 }
436
437 int
aesni_encdec(struct cryptop * crp,struct cryptodesc * crd,struct cryptodesc * crda,struct aesni_session * ses)438 aesni_encdec(struct cryptop *crp, struct cryptodesc *crd,
439 struct cryptodesc *crda, struct aesni_session *ses)
440 {
441 int aadlen, err, ivlen, iskip, oskip, rlen;
442 uint8_t iv[EALG_MAX_BLOCK_LEN];
443 uint8_t icb[AESCTR_BLOCKSIZE];
444 uint8_t tag[GMAC_DIGEST_LEN];
445 uint8_t *buf = ses->ses_buf;
446 uint32_t *dw;
447
448 aadlen = rlen = err = iskip = oskip = 0;
449
450 if (crd->crd_len > ses->ses_buflen) {
451 if (buf != NULL) {
452 explicit_bzero(buf, ses->ses_buflen);
453 free(buf, M_DEVBUF, ses->ses_buflen);
454 }
455
456 ses->ses_buflen = 0;
457 rlen = roundup(crd->crd_len, EALG_MAX_BLOCK_LEN);
458 ses->ses_buf = buf = malloc(rlen, M_DEVBUF, M_NOWAIT |
459 M_ZERO);
460 if (buf == NULL)
461 return (ENOMEM);
462 ses->ses_buflen = rlen;
463 }
464
465 /* CBC uses 16, CTR/XTS only 8. */
466 ivlen = (crd->crd_alg == CRYPTO_AES_CBC) ? 16 : 8;
467
468 /* Initialize the IV */
469 if (crd->crd_flags & CRD_F_ENCRYPT) {
470 if (crd->crd_flags & CRD_F_IV_EXPLICIT)
471 memcpy(iv, crd->crd_iv, ivlen);
472 else
473 arc4random_buf(iv, ivlen);
474
475 /* Do we need to write the IV */
476 if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0) {
477 if (crp->crp_flags & CRYPTO_F_IMBUF) {
478 if (m_copyback((struct mbuf *)crp->crp_buf,
479 crd->crd_inject, ivlen, iv, M_NOWAIT)) {
480 err = ENOMEM;
481 goto out;
482 }
483 } else
484 cuio_copyback((struct uio *)crp->crp_buf,
485 crd->crd_inject, ivlen, iv);
486 }
487 } else {
488 if (crd->crd_flags & CRD_F_IV_EXPLICIT)
489 memcpy(iv, crd->crd_iv, ivlen);
490 else {
491 if (crp->crp_flags & CRYPTO_F_IMBUF)
492 m_copydata((struct mbuf *)crp->crp_buf,
493 crd->crd_inject, ivlen, iv);
494 else
495 cuio_copydata((struct uio *)crp->crp_buf,
496 crd->crd_inject, ivlen, iv);
497 }
498 }
499
500 if (crda) {
501 /* Supply GMAC with AAD */
502 aadlen = crda->crd_len;
503 if (crda->crd_flags & CRD_F_ESN) {
504 aadlen += 4;
505 /* SPI */
506 if (crp->crp_flags & CRYPTO_F_IMBUF)
507 m_copydata((struct mbuf *)crp->crp_buf,
508 crda->crd_skip, 4, buf);
509 else
510 cuio_copydata((struct uio *)crp->crp_buf,
511 crda->crd_skip, 4, buf);
512 iskip = 4; /* additional input offset */
513 /* ESN */
514 memcpy(buf + 4, crda->crd_esn, 4);
515 oskip = iskip + 4; /* offset output buffer by 8 */
516 }
517 rlen = roundup(aadlen, GMAC_BLOCK_LEN);
518 if (crp->crp_flags & CRYPTO_F_IMBUF)
519 m_copydata((struct mbuf *)crp->crp_buf,
520 crda->crd_skip + iskip, crda->crd_len - iskip,
521 buf + oskip);
522 else
523 cuio_copydata((struct uio *)crp->crp_buf,
524 crda->crd_skip + iskip, crda->crd_len - iskip,
525 buf + oskip);
526 fpu_kernel_enter();
527 aesni_gmac_update(ses->ses_ghash, buf, rlen);
528 fpu_kernel_exit();
529 bzero(buf, aadlen);
530 }
531
532 /* Copy data to be processed to the buffer */
533 if (crp->crp_flags & CRYPTO_F_IMBUF)
534 m_copydata((struct mbuf *)crp->crp_buf, crd->crd_skip,
535 crd->crd_len, buf);
536 else
537 cuio_copydata((struct uio *)crp->crp_buf, crd->crd_skip,
538 crd->crd_len, buf);
539
540 if (crd->crd_alg == CRYPTO_AES_CTR ||
541 crd->crd_alg == CRYPTO_AES_GCM_16 ||
542 crd->crd_alg == CRYPTO_AES_GMAC) {
543 bzero(icb, AESCTR_BLOCKSIZE);
544 memcpy(icb, ses->ses_nonce, AESCTR_NONCESIZE);
545 memcpy(icb + AESCTR_NONCESIZE, iv, AESCTR_IVSIZE);
546 /* rlen is for gcm and gmac only */
547 rlen = roundup(crd->crd_len, AESCTR_BLOCKSIZE);
548 }
549
550 /* Apply cipher */
551 fpu_kernel_enter();
552 switch (crd->crd_alg) {
553 case CRYPTO_AES_CBC:
554 if (crd->crd_flags & CRD_F_ENCRYPT)
555 aesni_cbc_enc(ses, buf, buf, crd->crd_len, iv);
556 else
557 aesni_cbc_dec(ses, buf, buf, crd->crd_len, iv);
558 break;
559 case CRYPTO_AES_CTR:
560 aesni_ctr_enc(ses, buf, buf, crd->crd_len, icb);
561 break;
562 case CRYPTO_AES_GCM_16:
563 icb[AESCTR_BLOCKSIZE - 1] = 1;
564 if (crd->crd_flags & CRD_F_ENCRYPT) {
565 /* encrypt padded data */
566 aesni_ctr_enc(ses, buf, buf, rlen, icb);
567 /* zero out padding bytes */
568 bzero(buf + crd->crd_len, rlen - crd->crd_len);
569 /* hash encrypted data padded with zeroes */
570 aesni_gmac_update(ses->ses_ghash, buf, rlen);
571 } else {
572 aesni_gmac_update(ses->ses_ghash, buf, rlen);
573 aesni_ctr_enc(ses, buf, buf, rlen, icb);
574 }
575 goto gcmcommon;
576 case CRYPTO_AES_GMAC:
577 icb[AESCTR_BLOCKSIZE - 1] = 1;
578 aesni_gmac_update(ses->ses_ghash, buf, rlen);
579 gcmcommon:
580 /* lengths block */
581 bzero(tag, GMAC_BLOCK_LEN);
582 dw = (uint32_t *)tag + 1;
583 *dw = htobe32(aadlen * 8);
584 dw = (uint32_t *)tag + 3;
585 *dw = htobe32(crd->crd_len * 8);
586 aesni_gmac_update(ses->ses_ghash, tag, GMAC_BLOCK_LEN);
587 /* finalization */
588 aesni_gmac_final(ses, tag, icb, ses->ses_ghash->S);
589 break;
590 case CRYPTO_AES_XTS:
591 if (crd->crd_flags & CRD_F_ENCRYPT)
592 aesni_xts_enc(ses->ses_xts, buf, buf, crd->crd_len, iv);
593 else
594 aesni_xts_dec(ses->ses_xts, buf, buf, crd->crd_len, iv);
595 break;
596 }
597 fpu_kernel_exit();
598
599 aesni_ops++;
600
601 /* Copy back the result */
602 if (crp->crp_flags & CRYPTO_F_IMBUF) {
603 if (m_copyback((struct mbuf *)crp->crp_buf, crd->crd_skip,
604 crd->crd_len, buf, M_NOWAIT)) {
605 err = ENOMEM;
606 goto out;
607 }
608 } else
609 cuio_copyback((struct uio *)crp->crp_buf, crd->crd_skip,
610 crd->crd_len, buf);
611
612 /* Copy back the authentication tag */
613 if (crda) {
614 if (crp->crp_flags & CRYPTO_F_IMBUF) {
615 if (m_copyback((struct mbuf *)crp->crp_buf,
616 crda->crd_inject, GMAC_DIGEST_LEN, tag,
617 M_NOWAIT)) {
618 err = ENOMEM;
619 goto out;
620 }
621 } else
622 memcpy(crp->crp_mac, tag, GMAC_BLOCK_LEN);
623
624 /* clean up GHASH state */
625 bzero(ses->ses_ghash->S, GMAC_BLOCK_LEN);
626 bzero(ses->ses_ghash->Z, GMAC_BLOCK_LEN);
627 }
628
629 out:
630 explicit_bzero(buf, roundup(crd->crd_len, EALG_MAX_BLOCK_LEN));
631 return (err);
632 }
633
634 int
aesni_process(struct cryptop * crp)635 aesni_process(struct cryptop *crp)
636 {
637 struct aesni_session *ses;
638 struct cryptodesc *crd, *crda, *crde;
639 int err = 0;
640 int i;
641
642 KASSERT(crp->crp_ndesc >= 1);
643
644 smr_read_enter();
645 ses = aesni_get(crp->crp_sid & 0xffffffff);
646 if (!ses) {
647 err = EINVAL;
648 goto out;
649 }
650
651 crda = crde = NULL;
652 for (i = 0; i < crp->crp_ndesc; i++) {
653 crd = &crp->crp_desc[i];
654 switch (crd->crd_alg) {
655 case CRYPTO_AES_CBC:
656 case CRYPTO_AES_CTR:
657 case CRYPTO_AES_XTS:
658 err = aesni_encdec(crp, crd, NULL, ses);
659 if (err != 0)
660 goto out;
661 break;
662
663 case CRYPTO_AES_GCM_16:
664 case CRYPTO_AES_GMAC:
665 crde = crd;
666 if (!crda)
667 continue;
668 goto gcmcommon;
669 case CRYPTO_AES_128_GMAC:
670 case CRYPTO_AES_192_GMAC:
671 case CRYPTO_AES_256_GMAC:
672 crda = crd;
673 if (!crde)
674 continue;
675 gcmcommon:
676 err = aesni_encdec(crp, crde, crda, ses);
677 if (err != 0)
678 goto out;
679 break;
680
681 case CRYPTO_MD5_HMAC:
682 case CRYPTO_SHA1_HMAC:
683 case CRYPTO_RIPEMD160_HMAC:
684 case CRYPTO_SHA2_256_HMAC:
685 case CRYPTO_SHA2_384_HMAC:
686 case CRYPTO_SHA2_512_HMAC:
687 err = aesni_swauth(crp, crd, ses->ses_swd,
688 crp->crp_buf);
689 if (err != 0)
690 goto out;
691 break;
692
693 default:
694 err = EINVAL;
695 goto out;
696 }
697 }
698
699 out:
700 smr_read_leave();
701 return (err);
702 }
703
704 void
pclmul_setup(void)705 pclmul_setup(void)
706 {
707 ghash_update = ghash_update_pclmul;
708 }
709
710 void
ghash_update_pclmul(GHASH_CTX * ghash,uint8_t * src,size_t len)711 ghash_update_pclmul(GHASH_CTX *ghash, uint8_t *src, size_t len)
712 {
713 fpu_kernel_enter();
714 aesni_gmac_update(ghash, src, len);
715 fpu_kernel_exit();
716 }
717