xref: /freebsd/sys/crypto/ccp/ccp.c (revision 4f52dfbb)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2017 Chelsio Communications, Inc.
5  * Copyright (c) 2017 Conrad Meyer <cem@FreeBSD.org>
6  * All rights reserved.
7  * Largely borrowed from ccr(4), Written by: John Baldwin <jhb@FreeBSD.org>
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include "opt_ddb.h"
35 
36 #include <sys/types.h>
37 #include <sys/bus.h>
38 #include <sys/lock.h>
39 #include <sys/kernel.h>
40 #include <sys/malloc.h>
41 #include <sys/mutex.h>
42 #include <sys/module.h>
43 #include <sys/random.h>
44 #include <sys/sglist.h>
45 #include <sys/sysctl.h>
46 
47 #ifdef DDB
48 #include <ddb/ddb.h>
49 #endif
50 
51 #include <dev/pci/pcivar.h>
52 
53 #include <dev/random/randomdev.h>
54 
55 #include <opencrypto/cryptodev.h>
56 #include <opencrypto/xform.h>
57 
58 #include "cryptodev_if.h"
59 
60 #include "ccp.h"
61 #include "ccp_hardware.h"
62 
63 MALLOC_DEFINE(M_CCP, "ccp", "AMD CCP crypto");
64 
65 /*
66  * Need a global softc available for garbage random_source API, which lacks any
67  * context pointer.  It's also handy for debugging.
68  */
69 struct ccp_softc *g_ccp_softc;
70 
71 bool g_debug_print = false;
72 SYSCTL_BOOL(_hw_ccp, OID_AUTO, debug, CTLFLAG_RWTUN, &g_debug_print, 0,
73     "Set to enable debugging log messages");
74 
75 static struct pciid {
76 	uint32_t devid;
77 	const char *desc;
78 } ccp_ids[] = {
79 	{ 0x14561022, "AMD CCP-5a" },
80 	{ 0x14681022, "AMD CCP-5b" },
81 };
82 
83 static struct random_source random_ccp = {
84 	.rs_ident = "AMD CCP TRNG",
85 	.rs_source = RANDOM_PURE_CCP,
86 	.rs_read = random_ccp_read,
87 };
88 
89 /*
90  * ccp_populate_sglist() generates a scatter/gather list that covers the entire
91  * crypto operation buffer.
92  */
93 static int
94 ccp_populate_sglist(struct sglist *sg, struct cryptop *crp)
95 {
96 	int error;
97 
98 	sglist_reset(sg);
99 	if (crp->crp_flags & CRYPTO_F_IMBUF)
100 		error = sglist_append_mbuf(sg, crp->crp_mbuf);
101 	else if (crp->crp_flags & CRYPTO_F_IOV)
102 		error = sglist_append_uio(sg, crp->crp_uio);
103 	else
104 		error = sglist_append(sg, crp->crp_buf, crp->crp_ilen);
105 	return (error);
106 }
107 
108 /*
109  * Handle a GCM request with an empty payload by performing the
110  * operation in software.  Derived from swcr_authenc().
111  */
112 static void
113 ccp_gcm_soft(struct ccp_session *s, struct cryptop *crp,
114     struct cryptodesc *crda, struct cryptodesc *crde)
115 {
116 	struct aes_gmac_ctx gmac_ctx;
117 	char block[GMAC_BLOCK_LEN];
118 	char digest[GMAC_DIGEST_LEN];
119 	char iv[AES_BLOCK_LEN];
120 	int i, len;
121 
122 	/*
123 	 * This assumes a 12-byte IV from the crp.  See longer comment
124 	 * above in ccp_gcm() for more details.
125 	 */
126 	if (crde->crd_flags & CRD_F_ENCRYPT) {
127 		if (crde->crd_flags & CRD_F_IV_EXPLICIT)
128 			memcpy(iv, crde->crd_iv, 12);
129 		else
130 			arc4rand(iv, 12, 0);
131 		if ((crde->crd_flags & CRD_F_IV_PRESENT) == 0)
132 			crypto_copyback(crp->crp_flags, crp->crp_buf,
133 			    crde->crd_inject, 12, iv);
134 	} else {
135 		if (crde->crd_flags & CRD_F_IV_EXPLICIT)
136 			memcpy(iv, crde->crd_iv, 12);
137 		else
138 			crypto_copydata(crp->crp_flags, crp->crp_buf,
139 			    crde->crd_inject, 12, iv);
140 	}
141 	*(uint32_t *)&iv[12] = htobe32(1);
142 
143 	/* Initialize the MAC. */
144 	AES_GMAC_Init(&gmac_ctx);
145 	AES_GMAC_Setkey(&gmac_ctx, s->blkcipher.enckey, s->blkcipher.key_len);
146 	AES_GMAC_Reinit(&gmac_ctx, iv, sizeof(iv));
147 
148 	/* MAC the AAD. */
149 	for (i = 0; i < crda->crd_len; i += sizeof(block)) {
150 		len = imin(crda->crd_len - i, sizeof(block));
151 		crypto_copydata(crp->crp_flags, crp->crp_buf, crda->crd_skip +
152 		    i, len, block);
153 		bzero(block + len, sizeof(block) - len);
154 		AES_GMAC_Update(&gmac_ctx, block, sizeof(block));
155 	}
156 
157 	/* Length block. */
158 	bzero(block, sizeof(block));
159 	((uint32_t *)block)[1] = htobe32(crda->crd_len * 8);
160 	AES_GMAC_Update(&gmac_ctx, block, sizeof(block));
161 	AES_GMAC_Final(digest, &gmac_ctx);
162 
163 	if (crde->crd_flags & CRD_F_ENCRYPT) {
164 		crypto_copyback(crp->crp_flags, crp->crp_buf, crda->crd_inject,
165 		    sizeof(digest), digest);
166 		crp->crp_etype = 0;
167 	} else {
168 		char digest2[GMAC_DIGEST_LEN];
169 
170 		crypto_copydata(crp->crp_flags, crp->crp_buf, crda->crd_inject,
171 		    sizeof(digest2), digest2);
172 		if (timingsafe_bcmp(digest, digest2, sizeof(digest)) == 0)
173 			crp->crp_etype = 0;
174 		else
175 			crp->crp_etype = EBADMSG;
176 	}
177 	crypto_done(crp);
178 }
179 
180 static int
181 ccp_probe(device_t dev)
182 {
183 	struct pciid *ip;
184 	uint32_t id;
185 
186 	id = pci_get_devid(dev);
187 	for (ip = ccp_ids; ip < &ccp_ids[nitems(ccp_ids)]; ip++) {
188 		if (id == ip->devid) {
189 			device_set_desc(dev, ip->desc);
190 			return (0);
191 		}
192 	}
193 	return (ENXIO);
194 }
195 
196 static void
197 ccp_initialize_queues(struct ccp_softc *sc)
198 {
199 	struct ccp_queue *qp;
200 	size_t i;
201 
202 	for (i = 0; i < nitems(sc->queues); i++) {
203 		qp = &sc->queues[i];
204 
205 		qp->cq_softc = sc;
206 		qp->cq_qindex = i;
207 		mtx_init(&qp->cq_lock, "ccp queue", NULL, MTX_DEF);
208 		/* XXX - arbitrarily chosen sizes */
209 		qp->cq_sg_crp = sglist_alloc(32, M_WAITOK);
210 		/* Two more SGEs than sg_crp to accommodate ipad. */
211 		qp->cq_sg_ulptx = sglist_alloc(34, M_WAITOK);
212 		qp->cq_sg_dst = sglist_alloc(2, M_WAITOK);
213 	}
214 }
215 
216 static void
217 ccp_free_queues(struct ccp_softc *sc)
218 {
219 	struct ccp_queue *qp;
220 	size_t i;
221 
222 	for (i = 0; i < nitems(sc->queues); i++) {
223 		qp = &sc->queues[i];
224 
225 		mtx_destroy(&qp->cq_lock);
226 		sglist_free(qp->cq_sg_crp);
227 		sglist_free(qp->cq_sg_ulptx);
228 		sglist_free(qp->cq_sg_dst);
229 	}
230 }
231 
232 static int
233 ccp_attach(device_t dev)
234 {
235 	struct ccp_softc *sc;
236 	int error;
237 
238 	sc = device_get_softc(dev);
239 	sc->dev = dev;
240 
241 	sc->cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE);
242 	if (sc->cid < 0) {
243 		device_printf(dev, "could not get crypto driver id\n");
244 		return (ENXIO);
245 	}
246 
247 	error = ccp_hw_attach(dev);
248 	if (error != 0)
249 		return (error);
250 
251 	mtx_init(&sc->lock, "ccp", NULL, MTX_DEF);
252 
253 	ccp_initialize_queues(sc);
254 
255 	if (g_ccp_softc == NULL) {
256 		g_ccp_softc = sc;
257 		if ((sc->hw_features & VERSION_CAP_TRNG) != 0)
258 			random_source_register(&random_ccp);
259 	}
260 
261 	if ((sc->hw_features & VERSION_CAP_AES) != 0) {
262 		crypto_register(sc->cid, CRYPTO_AES_CBC, 0, 0);
263 		crypto_register(sc->cid, CRYPTO_AES_ICM, 0, 0);
264 		crypto_register(sc->cid, CRYPTO_AES_NIST_GCM_16, 0, 0);
265 		crypto_register(sc->cid, CRYPTO_AES_128_NIST_GMAC, 0, 0);
266 		crypto_register(sc->cid, CRYPTO_AES_192_NIST_GMAC, 0, 0);
267 		crypto_register(sc->cid, CRYPTO_AES_256_NIST_GMAC, 0, 0);
268 		crypto_register(sc->cid, CRYPTO_AES_XTS, 0, 0);
269 	}
270 	if ((sc->hw_features & VERSION_CAP_SHA) != 0) {
271 		crypto_register(sc->cid, CRYPTO_SHA1_HMAC, 0, 0);
272 		crypto_register(sc->cid, CRYPTO_SHA2_256_HMAC, 0, 0);
273 		crypto_register(sc->cid, CRYPTO_SHA2_384_HMAC, 0, 0);
274 		crypto_register(sc->cid, CRYPTO_SHA2_512_HMAC, 0, 0);
275 	}
276 
277 	return (0);
278 }
279 
280 static int
281 ccp_detach(device_t dev)
282 {
283 	struct ccp_softc *sc;
284 	int i;
285 
286 	sc = device_get_softc(dev);
287 
288 	mtx_lock(&sc->lock);
289 	for (i = 0; i < sc->nsessions; i++) {
290 		if (sc->sessions[i].active || sc->sessions[i].pending != 0) {
291 			mtx_unlock(&sc->lock);
292 			return (EBUSY);
293 		}
294 	}
295 	sc->detaching = true;
296 	mtx_unlock(&sc->lock);
297 
298 	crypto_unregister_all(sc->cid);
299 	if (g_ccp_softc == sc && (sc->hw_features & VERSION_CAP_TRNG) != 0)
300 		random_source_deregister(&random_ccp);
301 
302 	ccp_hw_detach(dev);
303 	ccp_free_queues(sc);
304 
305 	if (g_ccp_softc == sc)
306 		g_ccp_softc = NULL;
307 
308 	free(sc->sessions, M_CCP);
309 	mtx_destroy(&sc->lock);
310 	return (0);
311 }
312 
313 static void
314 ccp_init_hmac_digest(struct ccp_session *s, int cri_alg, char *key,
315     int klen)
316 {
317 	union authctx auth_ctx;
318 	struct auth_hash *axf;
319 	u_int i;
320 
321 	/*
322 	 * If the key is larger than the block size, use the digest of
323 	 * the key as the key instead.
324 	 */
325 	axf = s->hmac.auth_hash;
326 	klen /= 8;
327 	if (klen > axf->blocksize) {
328 		axf->Init(&auth_ctx);
329 		axf->Update(&auth_ctx, key, klen);
330 		axf->Final(s->hmac.ipad, &auth_ctx);
331 		explicit_bzero(&auth_ctx, sizeof(auth_ctx));
332 		klen = axf->hashsize;
333 	} else
334 		memcpy(s->hmac.ipad, key, klen);
335 
336 	memset(s->hmac.ipad + klen, 0, axf->blocksize - klen);
337 	memcpy(s->hmac.opad, s->hmac.ipad, axf->blocksize);
338 
339 	for (i = 0; i < axf->blocksize; i++) {
340 		s->hmac.ipad[i] ^= HMAC_IPAD_VAL;
341 		s->hmac.opad[i] ^= HMAC_OPAD_VAL;
342 	}
343 }
344 
345 static int
346 ccp_aes_check_keylen(int alg, int klen)
347 {
348 
349 	switch (klen) {
350 	case 128:
351 	case 192:
352 		if (alg == CRYPTO_AES_XTS)
353 			return (EINVAL);
354 		break;
355 	case 256:
356 		break;
357 	case 512:
358 		if (alg != CRYPTO_AES_XTS)
359 			return (EINVAL);
360 		break;
361 	default:
362 		return (EINVAL);
363 	}
364 	return (0);
365 }
366 
367 static void
368 ccp_aes_setkey(struct ccp_session *s, int alg, const void *key, int klen)
369 {
370 	unsigned kbits;
371 
372 	if (alg == CRYPTO_AES_XTS)
373 		kbits = klen / 2;
374 	else
375 		kbits = klen;
376 
377 	switch (kbits) {
378 	case 128:
379 		s->blkcipher.cipher_type = CCP_AES_TYPE_128;
380 		break;
381 	case 192:
382 		s->blkcipher.cipher_type = CCP_AES_TYPE_192;
383 		break;
384 	case 256:
385 		s->blkcipher.cipher_type = CCP_AES_TYPE_256;
386 		break;
387 	default:
388 		panic("should not get here");
389 	}
390 
391 	s->blkcipher.key_len = klen / 8;
392 	memcpy(s->blkcipher.enckey, key, s->blkcipher.key_len);
393 }
394 
395 static int
396 ccp_newsession(device_t dev, uint32_t *sidp, struct cryptoini *cri)
397 {
398 	struct ccp_softc *sc;
399 	struct ccp_session *s;
400 	struct auth_hash *auth_hash;
401 	struct cryptoini *c, *hash, *cipher;
402 	enum ccp_aes_mode cipher_mode;
403 	unsigned auth_mode, iv_len;
404 	unsigned partial_digest_len;
405 	unsigned q;
406 	int error, i, sess;
407 	bool gcm_hash;
408 
409 	if (sidp == NULL || cri == NULL)
410 		return (EINVAL);
411 
412 	gcm_hash = false;
413 	cipher = NULL;
414 	hash = NULL;
415 	auth_hash = NULL;
416 	/* XXX reconcile auth_mode with use by ccp_sha */
417 	auth_mode = 0;
418 	cipher_mode = CCP_AES_MODE_ECB;
419 	iv_len = 0;
420 	partial_digest_len = 0;
421 	for (c = cri; c != NULL; c = c->cri_next) {
422 		switch (c->cri_alg) {
423 		case CRYPTO_SHA1_HMAC:
424 		case CRYPTO_SHA2_256_HMAC:
425 		case CRYPTO_SHA2_384_HMAC:
426 		case CRYPTO_SHA2_512_HMAC:
427 		case CRYPTO_AES_128_NIST_GMAC:
428 		case CRYPTO_AES_192_NIST_GMAC:
429 		case CRYPTO_AES_256_NIST_GMAC:
430 			if (hash)
431 				return (EINVAL);
432 			hash = c;
433 			switch (c->cri_alg) {
434 			case CRYPTO_SHA1_HMAC:
435 				auth_hash = &auth_hash_hmac_sha1;
436 				auth_mode = SHA1;
437 				partial_digest_len = SHA1_HASH_LEN;
438 				break;
439 			case CRYPTO_SHA2_256_HMAC:
440 				auth_hash = &auth_hash_hmac_sha2_256;
441 				auth_mode = SHA2_256;
442 				partial_digest_len = SHA2_256_HASH_LEN;
443 				break;
444 			case CRYPTO_SHA2_384_HMAC:
445 				auth_hash = &auth_hash_hmac_sha2_384;
446 				auth_mode = SHA2_384;
447 				partial_digest_len = SHA2_512_HASH_LEN;
448 				break;
449 			case CRYPTO_SHA2_512_HMAC:
450 				auth_hash = &auth_hash_hmac_sha2_512;
451 				auth_mode = SHA2_512;
452 				partial_digest_len = SHA2_512_HASH_LEN;
453 				break;
454 			case CRYPTO_AES_128_NIST_GMAC:
455 			case CRYPTO_AES_192_NIST_GMAC:
456 			case CRYPTO_AES_256_NIST_GMAC:
457 				gcm_hash = true;
458 #if 0
459 				auth_mode = CHCR_SCMD_AUTH_MODE_GHASH;
460 #endif
461 				break;
462 			}
463 			break;
464 		case CRYPTO_AES_CBC:
465 		case CRYPTO_AES_ICM:
466 		case CRYPTO_AES_NIST_GCM_16:
467 		case CRYPTO_AES_XTS:
468 			if (cipher)
469 				return (EINVAL);
470 			cipher = c;
471 			switch (c->cri_alg) {
472 			case CRYPTO_AES_CBC:
473 				cipher_mode = CCP_AES_MODE_CBC;
474 				iv_len = AES_BLOCK_LEN;
475 				break;
476 			case CRYPTO_AES_ICM:
477 				cipher_mode = CCP_AES_MODE_CTR;
478 				iv_len = AES_BLOCK_LEN;
479 				break;
480 			case CRYPTO_AES_NIST_GCM_16:
481 				cipher_mode = CCP_AES_MODE_GCTR;
482 				iv_len = AES_GCM_IV_LEN;
483 				break;
484 			case CRYPTO_AES_XTS:
485 				cipher_mode = CCP_AES_MODE_XTS;
486 				iv_len = AES_BLOCK_LEN;
487 				break;
488 			}
489 			if (c->cri_key != NULL) {
490 				error = ccp_aes_check_keylen(c->cri_alg,
491 				    c->cri_klen);
492 				if (error != 0)
493 					return (error);
494 			}
495 			break;
496 		default:
497 			return (EINVAL);
498 		}
499 	}
500 	if (gcm_hash != (cipher_mode == CCP_AES_MODE_GCTR))
501 		return (EINVAL);
502 	if (hash == NULL && cipher == NULL)
503 		return (EINVAL);
504 	if (hash != NULL && hash->cri_key == NULL)
505 		return (EINVAL);
506 
507 	sc = device_get_softc(dev);
508 	mtx_lock(&sc->lock);
509 	if (sc->detaching) {
510 		mtx_unlock(&sc->lock);
511 		return (ENXIO);
512 	}
513 	sess = -1;
514 	for (i = 0; i < sc->nsessions; i++) {
515 		if (!sc->sessions[i].active && sc->sessions[i].pending == 0) {
516 			sess = i;
517 			break;
518 		}
519 	}
520 	if (sess == -1) {
521 		s = malloc(sizeof(*s) * (sc->nsessions + 1), M_CCP,
522 		    M_NOWAIT | M_ZERO);
523 		if (s == NULL) {
524 			mtx_unlock(&sc->lock);
525 			return (ENOMEM);
526 		}
527 		if (sc->sessions != NULL)
528 			memcpy(s, sc->sessions, sizeof(*s) * sc->nsessions);
529 		sess = sc->nsessions;
530 		free(sc->sessions, M_CCP);
531 		sc->sessions = s;
532 		sc->nsessions++;
533 	}
534 
535 	s = &sc->sessions[sess];
536 
537 	/* Just grab the first usable queue for now. */
538 	for (q = 0; q < nitems(sc->queues); q++)
539 		if ((sc->valid_queues & (1 << q)) != 0)
540 			break;
541 	if (q == nitems(sc->queues)) {
542 		mtx_unlock(&sc->lock);
543 		return (ENXIO);
544 	}
545 	s->queue = q;
546 
547 	if (gcm_hash)
548 		s->mode = GCM;
549 	else if (hash != NULL && cipher != NULL)
550 		s->mode = AUTHENC;
551 	else if (hash != NULL)
552 		s->mode = HMAC;
553 	else {
554 		MPASS(cipher != NULL);
555 		s->mode = BLKCIPHER;
556 	}
557 	if (gcm_hash) {
558 		if (hash->cri_mlen == 0)
559 			s->gmac.hash_len = AES_GMAC_HASH_LEN;
560 		else
561 			s->gmac.hash_len = hash->cri_mlen;
562 	} else if (hash != NULL) {
563 		s->hmac.auth_hash = auth_hash;
564 		s->hmac.auth_mode = auth_mode;
565 		s->hmac.partial_digest_len = partial_digest_len;
566 		if (hash->cri_mlen == 0)
567 			s->hmac.hash_len = auth_hash->hashsize;
568 		else
569 			s->hmac.hash_len = hash->cri_mlen;
570 		ccp_init_hmac_digest(s, hash->cri_alg, hash->cri_key,
571 		    hash->cri_klen);
572 	}
573 	if (cipher != NULL) {
574 		s->blkcipher.cipher_mode = cipher_mode;
575 		s->blkcipher.iv_len = iv_len;
576 		if (cipher->cri_key != NULL)
577 			ccp_aes_setkey(s, cipher->cri_alg, cipher->cri_key,
578 			    cipher->cri_klen);
579 	}
580 
581 	s->active = true;
582 	mtx_unlock(&sc->lock);
583 
584 	*sidp = sess;
585 	return (0);
586 }
587 
588 static int
589 ccp_freesession(device_t dev, uint64_t tid)
590 {
591 	struct ccp_softc *sc;
592 	uint32_t sid;
593 	int error;
594 
595 	sc = device_get_softc(dev);
596 	sid = CRYPTO_SESID2LID(tid);
597 	mtx_lock(&sc->lock);
598 	if (sid >= sc->nsessions || !sc->sessions[sid].active)
599 		error = EINVAL;
600 	else {
601 		if (sc->sessions[sid].pending != 0)
602 			device_printf(dev,
603 			    "session %d freed with %d pending requests\n", sid,
604 			    sc->sessions[sid].pending);
605 		sc->sessions[sid].active = false;
606 		error = 0;
607 	}
608 	mtx_unlock(&sc->lock);
609 	return (error);
610 }
611 
612 static int
613 ccp_process(device_t dev, struct cryptop *crp, int hint)
614 {
615 	struct ccp_softc *sc;
616 	struct ccp_queue *qp;
617 	struct ccp_session *s;
618 	struct cryptodesc *crd, *crda, *crde;
619 	uint32_t sid;
620 	int error;
621 	bool qpheld;
622 
623 	qpheld = false;
624 	qp = NULL;
625 	if (crp == NULL)
626 		return (EINVAL);
627 
628 	crd = crp->crp_desc;
629 	sid = CRYPTO_SESID2LID(crp->crp_sid);
630 	sc = device_get_softc(dev);
631 	mtx_lock(&sc->lock);
632 	if (sid >= sc->nsessions || !sc->sessions[sid].active) {
633 		mtx_unlock(&sc->lock);
634 		error = EINVAL;
635 		goto out;
636 	}
637 
638 	s = &sc->sessions[sid];
639 	qp = &sc->queues[s->queue];
640 	mtx_unlock(&sc->lock);
641 	error = ccp_queue_acquire_reserve(qp, 1 /* placeholder */, M_NOWAIT);
642 	if (error != 0)
643 		goto out;
644 	qpheld = true;
645 
646 	error = ccp_populate_sglist(qp->cq_sg_crp, crp);
647 	if (error != 0)
648 		goto out;
649 
650 	switch (s->mode) {
651 	case HMAC:
652 		if (crd->crd_flags & CRD_F_KEY_EXPLICIT)
653 			ccp_init_hmac_digest(s, crd->crd_alg, crd->crd_key,
654 			    crd->crd_klen);
655 		error = ccp_hmac(qp, s, crp);
656 		break;
657 	case BLKCIPHER:
658 		if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
659 			error = ccp_aes_check_keylen(crd->crd_alg,
660 			    crd->crd_klen);
661 			if (error != 0)
662 				break;
663 			ccp_aes_setkey(s, crd->crd_alg, crd->crd_key,
664 			    crd->crd_klen);
665 		}
666 		error = ccp_blkcipher(qp, s, crp);
667 		break;
668 	case AUTHENC:
669 		error = 0;
670 		switch (crd->crd_alg) {
671 		case CRYPTO_AES_CBC:
672 		case CRYPTO_AES_ICM:
673 		case CRYPTO_AES_XTS:
674 			/* Only encrypt-then-authenticate supported. */
675 			crde = crd;
676 			crda = crd->crd_next;
677 			if (!(crde->crd_flags & CRD_F_ENCRYPT)) {
678 				error = EINVAL;
679 				break;
680 			}
681 			s->cipher_first = true;
682 			break;
683 		default:
684 			crda = crd;
685 			crde = crd->crd_next;
686 			if (crde->crd_flags & CRD_F_ENCRYPT) {
687 				error = EINVAL;
688 				break;
689 			}
690 			s->cipher_first = false;
691 			break;
692 		}
693 		if (error != 0)
694 			break;
695 		if (crda->crd_flags & CRD_F_KEY_EXPLICIT)
696 			ccp_init_hmac_digest(s, crda->crd_alg, crda->crd_key,
697 			    crda->crd_klen);
698 		if (crde->crd_flags & CRD_F_KEY_EXPLICIT) {
699 			error = ccp_aes_check_keylen(crde->crd_alg,
700 			    crde->crd_klen);
701 			if (error != 0)
702 				break;
703 			ccp_aes_setkey(s, crde->crd_alg, crde->crd_key,
704 			    crde->crd_klen);
705 		}
706 		error = ccp_authenc(qp, s, crp, crda, crde);
707 		break;
708 	case GCM:
709 		error = 0;
710 		if (crd->crd_alg == CRYPTO_AES_NIST_GCM_16) {
711 			crde = crd;
712 			crda = crd->crd_next;
713 			s->cipher_first = true;
714 		} else {
715 			crda = crd;
716 			crde = crd->crd_next;
717 			s->cipher_first = false;
718 		}
719 		if (crde->crd_flags & CRD_F_KEY_EXPLICIT) {
720 			error = ccp_aes_check_keylen(crde->crd_alg,
721 			    crde->crd_klen);
722 			if (error != 0)
723 				break;
724 			ccp_aes_setkey(s, crde->crd_alg, crde->crd_key,
725 			    crde->crd_klen);
726 		}
727 		if (crde->crd_len == 0) {
728 			mtx_unlock(&qp->cq_lock);
729 			ccp_gcm_soft(s, crp, crda, crde);
730 			return (0);
731 		}
732 		error = ccp_gcm(qp, s, crp, crda, crde);
733 		break;
734 	}
735 
736 	if (error == 0)
737 		s->pending++;
738 
739 out:
740 	if (qpheld) {
741 		if (error != 0) {
742 			/*
743 			 * Squash EAGAIN so callers don't uselessly and
744 			 * expensively retry if the ring was full.
745 			 */
746 			if (error == EAGAIN)
747 				error = ENOMEM;
748 			ccp_queue_abort(qp);
749 		} else
750 			ccp_queue_release(qp);
751 	}
752 
753 	if (error != 0) {
754 		DPRINTF(dev, "%s: early error:%d\n", __func__, error);
755 		crp->crp_etype = error;
756 		crypto_done(crp);
757 	}
758 	return (0);
759 }
760 
761 static device_method_t ccp_methods[] = {
762 	DEVMETHOD(device_probe,		ccp_probe),
763 	DEVMETHOD(device_attach,	ccp_attach),
764 	DEVMETHOD(device_detach,	ccp_detach),
765 
766 	DEVMETHOD(cryptodev_newsession,	ccp_newsession),
767 	DEVMETHOD(cryptodev_freesession, ccp_freesession),
768 	DEVMETHOD(cryptodev_process,	ccp_process),
769 
770 	DEVMETHOD_END
771 };
772 
773 static driver_t ccp_driver = {
774 	"ccp",
775 	ccp_methods,
776 	sizeof(struct ccp_softc)
777 };
778 
779 static devclass_t ccp_devclass;
780 DRIVER_MODULE(ccp, pci, ccp_driver, ccp_devclass, NULL, NULL);
781 MODULE_VERSION(ccp, 1);
782 MODULE_DEPEND(ccp, crypto, 1, 1, 1);
783 MODULE_DEPEND(ccp, random_device, 1, 1, 1);
784 MODULE_PNP_INFO("W32:vendor/device", pci, ccp, ccp_ids, sizeof(ccp_ids[0]),
785     nitems(ccp_ids));
786 
787 static int
788 ccp_queue_reserve_space(struct ccp_queue *qp, unsigned n, int mflags)
789 {
790 	struct ccp_softc *sc;
791 
792 	mtx_assert(&qp->cq_lock, MA_OWNED);
793 	sc = qp->cq_softc;
794 
795 	if (n < 1 || n >= (1 << sc->ring_size_order))
796 		return (EINVAL);
797 
798 	while (true) {
799 		if (ccp_queue_get_ring_space(qp) >= n)
800 			return (0);
801 		if ((mflags & M_WAITOK) == 0)
802 			return (EAGAIN);
803 		qp->cq_waiting = true;
804 		msleep(&qp->cq_tail, &qp->cq_lock, 0, "ccpqfull", 0);
805 	}
806 }
807 
808 int
809 ccp_queue_acquire_reserve(struct ccp_queue *qp, unsigned n, int mflags)
810 {
811 	int error;
812 
813 	mtx_lock(&qp->cq_lock);
814 	qp->cq_acq_tail = qp->cq_tail;
815 	error = ccp_queue_reserve_space(qp, n, mflags);
816 	if (error != 0)
817 		mtx_unlock(&qp->cq_lock);
818 	return (error);
819 }
820 
821 void
822 ccp_queue_release(struct ccp_queue *qp)
823 {
824 
825 	mtx_assert(&qp->cq_lock, MA_OWNED);
826 	if (qp->cq_tail != qp->cq_acq_tail) {
827 		wmb();
828 		ccp_queue_write_tail(qp);
829 	}
830 	mtx_unlock(&qp->cq_lock);
831 }
832 
833 void
834 ccp_queue_abort(struct ccp_queue *qp)
835 {
836 	unsigned i;
837 
838 	mtx_assert(&qp->cq_lock, MA_OWNED);
839 
840 	/* Wipe out any descriptors associated with this aborted txn. */
841 	for (i = qp->cq_acq_tail; i != qp->cq_tail;
842 	    i = (i + 1) % (1 << qp->cq_softc->ring_size_order)) {
843 		memset(&qp->desc_ring[i], 0, sizeof(qp->desc_ring[i]));
844 	}
845 	qp->cq_tail = qp->cq_acq_tail;
846 
847 	mtx_unlock(&qp->cq_lock);
848 }
849 
850 #ifdef DDB
851 #define	_db_show_lock(lo)	LOCK_CLASS(lo)->lc_ddb_show(lo)
852 #define	db_show_lock(lk)	_db_show_lock(&(lk)->lock_object)
853 static void
854 db_show_ccp_sc(struct ccp_softc *sc)
855 {
856 
857 	db_printf("ccp softc at %p\n", sc);
858 	db_printf(" cid: %d\n", (int)sc->cid);
859 	db_printf(" nsessions: %d\n", sc->nsessions);
860 
861 	db_printf(" lock: ");
862 	db_show_lock(&sc->lock);
863 
864 	db_printf(" detaching: %d\n", (int)sc->detaching);
865 	db_printf(" ring_size_order: %u\n", sc->ring_size_order);
866 
867 	db_printf(" hw_version: %d\n", (int)sc->hw_version);
868 	db_printf(" hw_features: %b\n", (int)sc->hw_features,
869 	    "\20\24ELFC\23TRNG\22Zip_Compress\16Zip_Decompress\13ECC\12RSA"
870 	    "\11SHA\0103DES\07AES");
871 
872 	db_printf(" hw status:\n");
873 	db_ccp_show_hw(sc);
874 }
875 
876 static void
877 db_show_ccp_qp(struct ccp_queue *qp)
878 {
879 
880 	db_printf(" lock: ");
881 	db_show_lock(&qp->cq_lock);
882 
883 	db_printf(" cq_qindex: %u\n", qp->cq_qindex);
884 	db_printf(" cq_softc: %p\n", qp->cq_softc);
885 
886 	db_printf(" head: %u\n", qp->cq_head);
887 	db_printf(" tail: %u\n", qp->cq_tail);
888 	db_printf(" acq_tail: %u\n", qp->cq_acq_tail);
889 	db_printf(" desc_ring: %p\n", qp->desc_ring);
890 	db_printf(" completions_ring: %p\n", qp->completions_ring);
891 	db_printf(" descriptors (phys): 0x%jx\n",
892 	    (uintmax_t)qp->desc_ring_bus_addr);
893 
894 	db_printf(" hw status:\n");
895 	db_ccp_show_queue_hw(qp);
896 }
897 
898 DB_SHOW_COMMAND(ccp, db_show_ccp)
899 {
900 	struct ccp_softc *sc;
901 	unsigned unit, qindex;
902 
903 	if (!have_addr)
904 		goto usage;
905 
906 	unit = (unsigned)addr;
907 
908 	sc = devclass_get_softc(ccp_devclass, unit);
909 	if (sc == NULL) {
910 		db_printf("No such device ccp%u\n", unit);
911 		goto usage;
912 	}
913 
914 	if (count == -1) {
915 		db_show_ccp_sc(sc);
916 		return;
917 	}
918 
919 	qindex = (unsigned)count;
920 	if (qindex >= nitems(sc->queues)) {
921 		db_printf("No such queue %u\n", qindex);
922 		goto usage;
923 	}
924 	db_show_ccp_qp(&sc->queues[qindex]);
925 	return;
926 
927 usage:
928 	db_printf("usage: show ccp <unit>[,<qindex>]\n");
929 	return;
930 }
931 #endif /* DDB */
932