xref: /freebsd/sys/crypto/ccp/ccp.c (revision 190cef3d)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2017 Chelsio Communications, Inc.
5  * Copyright (c) 2017 Conrad Meyer <cem@FreeBSD.org>
6  * All rights reserved.
7  * Largely borrowed from ccr(4), Written by: John Baldwin <jhb@FreeBSD.org>
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include "opt_ddb.h"
35 
36 #include <sys/types.h>
37 #include <sys/bus.h>
38 #include <sys/lock.h>
39 #include <sys/kernel.h>
40 #include <sys/malloc.h>
41 #include <sys/mutex.h>
42 #include <sys/module.h>
43 #include <sys/random.h>
44 #include <sys/sglist.h>
45 #include <sys/sysctl.h>
46 
47 #ifdef DDB
48 #include <ddb/ddb.h>
49 #endif
50 
51 #include <dev/pci/pcivar.h>
52 
53 #include <dev/random/randomdev.h>
54 
55 #include <opencrypto/cryptodev.h>
56 #include <opencrypto/xform.h>
57 
58 #include "cryptodev_if.h"
59 
60 #include "ccp.h"
61 #include "ccp_hardware.h"
62 
63 MALLOC_DEFINE(M_CCP, "ccp", "AMD CCP crypto");
64 
65 /*
66  * Need a global softc available for garbage random_source API, which lacks any
67  * context pointer.  It's also handy for debugging.
68  */
69 struct ccp_softc *g_ccp_softc;
70 
71 bool g_debug_print = false;
72 SYSCTL_BOOL(_hw_ccp, OID_AUTO, debug, CTLFLAG_RWTUN, &g_debug_print, 0,
73     "Set to enable debugging log messages");
74 
75 static struct pciid {
76 	uint32_t devid;
77 	const char *desc;
78 } ccp_ids[] = {
79 	{ 0x14561022, "AMD CCP-5a" },
80 	{ 0x14681022, "AMD CCP-5b" },
81 };
82 
83 static struct random_source random_ccp = {
84 	.rs_ident = "AMD CCP TRNG",
85 	.rs_source = RANDOM_PURE_CCP,
86 	.rs_read = random_ccp_read,
87 };
88 
89 /*
90  * ccp_populate_sglist() generates a scatter/gather list that covers the entire
91  * crypto operation buffer.
92  */
93 static int
94 ccp_populate_sglist(struct sglist *sg, struct cryptop *crp)
95 {
96 	int error;
97 
98 	sglist_reset(sg);
99 	if (crp->crp_flags & CRYPTO_F_IMBUF)
100 		error = sglist_append_mbuf(sg, crp->crp_mbuf);
101 	else if (crp->crp_flags & CRYPTO_F_IOV)
102 		error = sglist_append_uio(sg, crp->crp_uio);
103 	else
104 		error = sglist_append(sg, crp->crp_buf, crp->crp_ilen);
105 	return (error);
106 }
107 
108 /*
109  * Handle a GCM request with an empty payload by performing the
110  * operation in software.  Derived from swcr_authenc().
111  */
112 static void
113 ccp_gcm_soft(struct ccp_session *s, struct cryptop *crp,
114     struct cryptodesc *crda, struct cryptodesc *crde)
115 {
116 	struct aes_gmac_ctx gmac_ctx;
117 	char block[GMAC_BLOCK_LEN];
118 	char digest[GMAC_DIGEST_LEN];
119 	char iv[AES_BLOCK_LEN];
120 	int i, len;
121 
122 	/*
123 	 * This assumes a 12-byte IV from the crp.  See longer comment
124 	 * above in ccp_gcm() for more details.
125 	 */
126 	if (crde->crd_flags & CRD_F_ENCRYPT) {
127 		if (crde->crd_flags & CRD_F_IV_EXPLICIT)
128 			memcpy(iv, crde->crd_iv, 12);
129 		else
130 			arc4rand(iv, 12, 0);
131 		if ((crde->crd_flags & CRD_F_IV_PRESENT) == 0)
132 			crypto_copyback(crp->crp_flags, crp->crp_buf,
133 			    crde->crd_inject, 12, iv);
134 	} else {
135 		if (crde->crd_flags & CRD_F_IV_EXPLICIT)
136 			memcpy(iv, crde->crd_iv, 12);
137 		else
138 			crypto_copydata(crp->crp_flags, crp->crp_buf,
139 			    crde->crd_inject, 12, iv);
140 	}
141 	*(uint32_t *)&iv[12] = htobe32(1);
142 
143 	/* Initialize the MAC. */
144 	AES_GMAC_Init(&gmac_ctx);
145 	AES_GMAC_Setkey(&gmac_ctx, s->blkcipher.enckey, s->blkcipher.key_len);
146 	AES_GMAC_Reinit(&gmac_ctx, iv, sizeof(iv));
147 
148 	/* MAC the AAD. */
149 	for (i = 0; i < crda->crd_len; i += sizeof(block)) {
150 		len = imin(crda->crd_len - i, sizeof(block));
151 		crypto_copydata(crp->crp_flags, crp->crp_buf, crda->crd_skip +
152 		    i, len, block);
153 		bzero(block + len, sizeof(block) - len);
154 		AES_GMAC_Update(&gmac_ctx, block, sizeof(block));
155 	}
156 
157 	/* Length block. */
158 	bzero(block, sizeof(block));
159 	((uint32_t *)block)[1] = htobe32(crda->crd_len * 8);
160 	AES_GMAC_Update(&gmac_ctx, block, sizeof(block));
161 	AES_GMAC_Final(digest, &gmac_ctx);
162 
163 	if (crde->crd_flags & CRD_F_ENCRYPT) {
164 		crypto_copyback(crp->crp_flags, crp->crp_buf, crda->crd_inject,
165 		    sizeof(digest), digest);
166 		crp->crp_etype = 0;
167 	} else {
168 		char digest2[GMAC_DIGEST_LEN];
169 
170 		crypto_copydata(crp->crp_flags, crp->crp_buf, crda->crd_inject,
171 		    sizeof(digest2), digest2);
172 		if (timingsafe_bcmp(digest, digest2, sizeof(digest)) == 0)
173 			crp->crp_etype = 0;
174 		else
175 			crp->crp_etype = EBADMSG;
176 	}
177 	crypto_done(crp);
178 }
179 
180 static int
181 ccp_probe(device_t dev)
182 {
183 	struct pciid *ip;
184 	uint32_t id;
185 
186 	id = pci_get_devid(dev);
187 	for (ip = ccp_ids; ip < &ccp_ids[nitems(ccp_ids)]; ip++) {
188 		if (id == ip->devid) {
189 			device_set_desc(dev, ip->desc);
190 			return (0);
191 		}
192 	}
193 	return (ENXIO);
194 }
195 
196 static void
197 ccp_initialize_queues(struct ccp_softc *sc)
198 {
199 	struct ccp_queue *qp;
200 	size_t i;
201 
202 	for (i = 0; i < nitems(sc->queues); i++) {
203 		qp = &sc->queues[i];
204 
205 		qp->cq_softc = sc;
206 		qp->cq_qindex = i;
207 		mtx_init(&qp->cq_lock, "ccp queue", NULL, MTX_DEF);
208 		/* XXX - arbitrarily chosen sizes */
209 		qp->cq_sg_crp = sglist_alloc(32, M_WAITOK);
210 		/* Two more SGEs than sg_crp to accommodate ipad. */
211 		qp->cq_sg_ulptx = sglist_alloc(34, M_WAITOK);
212 		qp->cq_sg_dst = sglist_alloc(2, M_WAITOK);
213 	}
214 }
215 
216 static void
217 ccp_free_queues(struct ccp_softc *sc)
218 {
219 	struct ccp_queue *qp;
220 	size_t i;
221 
222 	for (i = 0; i < nitems(sc->queues); i++) {
223 		qp = &sc->queues[i];
224 
225 		mtx_destroy(&qp->cq_lock);
226 		sglist_free(qp->cq_sg_crp);
227 		sglist_free(qp->cq_sg_ulptx);
228 		sglist_free(qp->cq_sg_dst);
229 	}
230 }
231 
232 static int
233 ccp_attach(device_t dev)
234 {
235 	struct ccp_softc *sc;
236 	int error;
237 
238 	sc = device_get_softc(dev);
239 	sc->dev = dev;
240 
241 	sc->cid = crypto_get_driverid(dev, sizeof(struct ccp_session),
242 	    CRYPTOCAP_F_HARDWARE);
243 	if (sc->cid < 0) {
244 		device_printf(dev, "could not get crypto driver id\n");
245 		return (ENXIO);
246 	}
247 
248 	error = ccp_hw_attach(dev);
249 	if (error != 0)
250 		return (error);
251 
252 	mtx_init(&sc->lock, "ccp", NULL, MTX_DEF);
253 
254 	ccp_initialize_queues(sc);
255 
256 	if (g_ccp_softc == NULL) {
257 		g_ccp_softc = sc;
258 		if ((sc->hw_features & VERSION_CAP_TRNG) != 0)
259 			random_source_register(&random_ccp);
260 	}
261 
262 	if ((sc->hw_features & VERSION_CAP_AES) != 0) {
263 		crypto_register(sc->cid, CRYPTO_AES_CBC, 0, 0);
264 		crypto_register(sc->cid, CRYPTO_AES_ICM, 0, 0);
265 		crypto_register(sc->cid, CRYPTO_AES_NIST_GCM_16, 0, 0);
266 		crypto_register(sc->cid, CRYPTO_AES_128_NIST_GMAC, 0, 0);
267 		crypto_register(sc->cid, CRYPTO_AES_192_NIST_GMAC, 0, 0);
268 		crypto_register(sc->cid, CRYPTO_AES_256_NIST_GMAC, 0, 0);
269 		crypto_register(sc->cid, CRYPTO_AES_XTS, 0, 0);
270 	}
271 	if ((sc->hw_features & VERSION_CAP_SHA) != 0) {
272 		crypto_register(sc->cid, CRYPTO_SHA1_HMAC, 0, 0);
273 		crypto_register(sc->cid, CRYPTO_SHA2_256_HMAC, 0, 0);
274 		crypto_register(sc->cid, CRYPTO_SHA2_384_HMAC, 0, 0);
275 		crypto_register(sc->cid, CRYPTO_SHA2_512_HMAC, 0, 0);
276 	}
277 
278 	return (0);
279 }
280 
281 static int
282 ccp_detach(device_t dev)
283 {
284 	struct ccp_softc *sc;
285 
286 	sc = device_get_softc(dev);
287 
288 	mtx_lock(&sc->lock);
289 	sc->detaching = true;
290 	mtx_unlock(&sc->lock);
291 
292 	crypto_unregister_all(sc->cid);
293 	if (g_ccp_softc == sc && (sc->hw_features & VERSION_CAP_TRNG) != 0)
294 		random_source_deregister(&random_ccp);
295 
296 	ccp_hw_detach(dev);
297 	ccp_free_queues(sc);
298 
299 	if (g_ccp_softc == sc)
300 		g_ccp_softc = NULL;
301 
302 	mtx_destroy(&sc->lock);
303 	return (0);
304 }
305 
306 static void
307 ccp_init_hmac_digest(struct ccp_session *s, int cri_alg, char *key,
308     int klen)
309 {
310 	union authctx auth_ctx;
311 	struct auth_hash *axf;
312 	u_int i;
313 
314 	/*
315 	 * If the key is larger than the block size, use the digest of
316 	 * the key as the key instead.
317 	 */
318 	axf = s->hmac.auth_hash;
319 	klen /= 8;
320 	if (klen > axf->blocksize) {
321 		axf->Init(&auth_ctx);
322 		axf->Update(&auth_ctx, key, klen);
323 		axf->Final(s->hmac.ipad, &auth_ctx);
324 		explicit_bzero(&auth_ctx, sizeof(auth_ctx));
325 		klen = axf->hashsize;
326 	} else
327 		memcpy(s->hmac.ipad, key, klen);
328 
329 	memset(s->hmac.ipad + klen, 0, axf->blocksize - klen);
330 	memcpy(s->hmac.opad, s->hmac.ipad, axf->blocksize);
331 
332 	for (i = 0; i < axf->blocksize; i++) {
333 		s->hmac.ipad[i] ^= HMAC_IPAD_VAL;
334 		s->hmac.opad[i] ^= HMAC_OPAD_VAL;
335 	}
336 }
337 
338 static int
339 ccp_aes_check_keylen(int alg, int klen)
340 {
341 
342 	switch (klen) {
343 	case 128:
344 	case 192:
345 		if (alg == CRYPTO_AES_XTS)
346 			return (EINVAL);
347 		break;
348 	case 256:
349 		break;
350 	case 512:
351 		if (alg != CRYPTO_AES_XTS)
352 			return (EINVAL);
353 		break;
354 	default:
355 		return (EINVAL);
356 	}
357 	return (0);
358 }
359 
360 static void
361 ccp_aes_setkey(struct ccp_session *s, int alg, const void *key, int klen)
362 {
363 	unsigned kbits;
364 
365 	if (alg == CRYPTO_AES_XTS)
366 		kbits = klen / 2;
367 	else
368 		kbits = klen;
369 
370 	switch (kbits) {
371 	case 128:
372 		s->blkcipher.cipher_type = CCP_AES_TYPE_128;
373 		break;
374 	case 192:
375 		s->blkcipher.cipher_type = CCP_AES_TYPE_192;
376 		break;
377 	case 256:
378 		s->blkcipher.cipher_type = CCP_AES_TYPE_256;
379 		break;
380 	default:
381 		panic("should not get here");
382 	}
383 
384 	s->blkcipher.key_len = klen / 8;
385 	memcpy(s->blkcipher.enckey, key, s->blkcipher.key_len);
386 }
387 
388 static int
389 ccp_newsession(device_t dev, crypto_session_t cses, struct cryptoini *cri)
390 {
391 	struct ccp_softc *sc;
392 	struct ccp_session *s;
393 	struct auth_hash *auth_hash;
394 	struct cryptoini *c, *hash, *cipher;
395 	enum ccp_aes_mode cipher_mode;
396 	unsigned auth_mode, iv_len;
397 	unsigned partial_digest_len;
398 	unsigned q;
399 	int error;
400 	bool gcm_hash;
401 
402 	if (cri == NULL)
403 		return (EINVAL);
404 
405 	s = crypto_get_driver_session(cses);
406 
407 	gcm_hash = false;
408 	cipher = NULL;
409 	hash = NULL;
410 	auth_hash = NULL;
411 	/* XXX reconcile auth_mode with use by ccp_sha */
412 	auth_mode = 0;
413 	cipher_mode = CCP_AES_MODE_ECB;
414 	iv_len = 0;
415 	partial_digest_len = 0;
416 	for (c = cri; c != NULL; c = c->cri_next) {
417 		switch (c->cri_alg) {
418 		case CRYPTO_SHA1_HMAC:
419 		case CRYPTO_SHA2_256_HMAC:
420 		case CRYPTO_SHA2_384_HMAC:
421 		case CRYPTO_SHA2_512_HMAC:
422 		case CRYPTO_AES_128_NIST_GMAC:
423 		case CRYPTO_AES_192_NIST_GMAC:
424 		case CRYPTO_AES_256_NIST_GMAC:
425 			if (hash)
426 				return (EINVAL);
427 			hash = c;
428 			switch (c->cri_alg) {
429 			case CRYPTO_SHA1_HMAC:
430 				auth_hash = &auth_hash_hmac_sha1;
431 				auth_mode = SHA1;
432 				partial_digest_len = SHA1_HASH_LEN;
433 				break;
434 			case CRYPTO_SHA2_256_HMAC:
435 				auth_hash = &auth_hash_hmac_sha2_256;
436 				auth_mode = SHA2_256;
437 				partial_digest_len = SHA2_256_HASH_LEN;
438 				break;
439 			case CRYPTO_SHA2_384_HMAC:
440 				auth_hash = &auth_hash_hmac_sha2_384;
441 				auth_mode = SHA2_384;
442 				partial_digest_len = SHA2_512_HASH_LEN;
443 				break;
444 			case CRYPTO_SHA2_512_HMAC:
445 				auth_hash = &auth_hash_hmac_sha2_512;
446 				auth_mode = SHA2_512;
447 				partial_digest_len = SHA2_512_HASH_LEN;
448 				break;
449 			case CRYPTO_AES_128_NIST_GMAC:
450 			case CRYPTO_AES_192_NIST_GMAC:
451 			case CRYPTO_AES_256_NIST_GMAC:
452 				gcm_hash = true;
453 #if 0
454 				auth_mode = CHCR_SCMD_AUTH_MODE_GHASH;
455 #endif
456 				break;
457 			}
458 			break;
459 		case CRYPTO_AES_CBC:
460 		case CRYPTO_AES_ICM:
461 		case CRYPTO_AES_NIST_GCM_16:
462 		case CRYPTO_AES_XTS:
463 			if (cipher)
464 				return (EINVAL);
465 			cipher = c;
466 			switch (c->cri_alg) {
467 			case CRYPTO_AES_CBC:
468 				cipher_mode = CCP_AES_MODE_CBC;
469 				iv_len = AES_BLOCK_LEN;
470 				break;
471 			case CRYPTO_AES_ICM:
472 				cipher_mode = CCP_AES_MODE_CTR;
473 				iv_len = AES_BLOCK_LEN;
474 				break;
475 			case CRYPTO_AES_NIST_GCM_16:
476 				cipher_mode = CCP_AES_MODE_GCTR;
477 				iv_len = AES_GCM_IV_LEN;
478 				break;
479 			case CRYPTO_AES_XTS:
480 				cipher_mode = CCP_AES_MODE_XTS;
481 				iv_len = AES_BLOCK_LEN;
482 				break;
483 			}
484 			if (c->cri_key != NULL) {
485 				error = ccp_aes_check_keylen(c->cri_alg,
486 				    c->cri_klen);
487 				if (error != 0)
488 					return (error);
489 			}
490 			break;
491 		default:
492 			return (EINVAL);
493 		}
494 	}
495 	if (gcm_hash != (cipher_mode == CCP_AES_MODE_GCTR))
496 		return (EINVAL);
497 	if (hash == NULL && cipher == NULL)
498 		return (EINVAL);
499 	if (hash != NULL && hash->cri_key == NULL)
500 		return (EINVAL);
501 
502 	sc = device_get_softc(dev);
503 	mtx_lock(&sc->lock);
504 	if (sc->detaching) {
505 		mtx_unlock(&sc->lock);
506 		return (ENXIO);
507 	}
508 
509 	/* Just grab the first usable queue for now. */
510 	for (q = 0; q < nitems(sc->queues); q++)
511 		if ((sc->valid_queues & (1 << q)) != 0)
512 			break;
513 	if (q == nitems(sc->queues)) {
514 		mtx_unlock(&sc->lock);
515 		return (ENXIO);
516 	}
517 	s->queue = q;
518 
519 	if (gcm_hash)
520 		s->mode = GCM;
521 	else if (hash != NULL && cipher != NULL)
522 		s->mode = AUTHENC;
523 	else if (hash != NULL)
524 		s->mode = HMAC;
525 	else {
526 		MPASS(cipher != NULL);
527 		s->mode = BLKCIPHER;
528 	}
529 	if (gcm_hash) {
530 		if (hash->cri_mlen == 0)
531 			s->gmac.hash_len = AES_GMAC_HASH_LEN;
532 		else
533 			s->gmac.hash_len = hash->cri_mlen;
534 	} else if (hash != NULL) {
535 		s->hmac.auth_hash = auth_hash;
536 		s->hmac.auth_mode = auth_mode;
537 		s->hmac.partial_digest_len = partial_digest_len;
538 		if (hash->cri_mlen == 0)
539 			s->hmac.hash_len = auth_hash->hashsize;
540 		else
541 			s->hmac.hash_len = hash->cri_mlen;
542 		ccp_init_hmac_digest(s, hash->cri_alg, hash->cri_key,
543 		    hash->cri_klen);
544 	}
545 	if (cipher != NULL) {
546 		s->blkcipher.cipher_mode = cipher_mode;
547 		s->blkcipher.iv_len = iv_len;
548 		if (cipher->cri_key != NULL)
549 			ccp_aes_setkey(s, cipher->cri_alg, cipher->cri_key,
550 			    cipher->cri_klen);
551 	}
552 
553 	s->active = true;
554 	mtx_unlock(&sc->lock);
555 
556 	return (0);
557 }
558 
559 static void
560 ccp_freesession(device_t dev, crypto_session_t cses)
561 {
562 	struct ccp_session *s;
563 
564 	s = crypto_get_driver_session(cses);
565 
566 	if (s->pending != 0)
567 		device_printf(dev,
568 		    "session %p freed with %d pending requests\n", s,
569 		    s->pending);
570 	s->active = false;
571 }
572 
573 static int
574 ccp_process(device_t dev, struct cryptop *crp, int hint)
575 {
576 	struct ccp_softc *sc;
577 	struct ccp_queue *qp;
578 	struct ccp_session *s;
579 	struct cryptodesc *crd, *crda, *crde;
580 	int error;
581 	bool qpheld;
582 
583 	qpheld = false;
584 	qp = NULL;
585 	if (crp == NULL)
586 		return (EINVAL);
587 
588 	crd = crp->crp_desc;
589 	s = crypto_get_driver_session(crp->crp_session);
590 	sc = device_get_softc(dev);
591 	mtx_lock(&sc->lock);
592 	qp = &sc->queues[s->queue];
593 	mtx_unlock(&sc->lock);
594 	error = ccp_queue_acquire_reserve(qp, 1 /* placeholder */, M_NOWAIT);
595 	if (error != 0)
596 		goto out;
597 	qpheld = true;
598 
599 	error = ccp_populate_sglist(qp->cq_sg_crp, crp);
600 	if (error != 0)
601 		goto out;
602 
603 	switch (s->mode) {
604 	case HMAC:
605 		if (crd->crd_flags & CRD_F_KEY_EXPLICIT)
606 			ccp_init_hmac_digest(s, crd->crd_alg, crd->crd_key,
607 			    crd->crd_klen);
608 		error = ccp_hmac(qp, s, crp);
609 		break;
610 	case BLKCIPHER:
611 		if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
612 			error = ccp_aes_check_keylen(crd->crd_alg,
613 			    crd->crd_klen);
614 			if (error != 0)
615 				break;
616 			ccp_aes_setkey(s, crd->crd_alg, crd->crd_key,
617 			    crd->crd_klen);
618 		}
619 		error = ccp_blkcipher(qp, s, crp);
620 		break;
621 	case AUTHENC:
622 		error = 0;
623 		switch (crd->crd_alg) {
624 		case CRYPTO_AES_CBC:
625 		case CRYPTO_AES_ICM:
626 		case CRYPTO_AES_XTS:
627 			/* Only encrypt-then-authenticate supported. */
628 			crde = crd;
629 			crda = crd->crd_next;
630 			if (!(crde->crd_flags & CRD_F_ENCRYPT)) {
631 				error = EINVAL;
632 				break;
633 			}
634 			s->cipher_first = true;
635 			break;
636 		default:
637 			crda = crd;
638 			crde = crd->crd_next;
639 			if (crde->crd_flags & CRD_F_ENCRYPT) {
640 				error = EINVAL;
641 				break;
642 			}
643 			s->cipher_first = false;
644 			break;
645 		}
646 		if (error != 0)
647 			break;
648 		if (crda->crd_flags & CRD_F_KEY_EXPLICIT)
649 			ccp_init_hmac_digest(s, crda->crd_alg, crda->crd_key,
650 			    crda->crd_klen);
651 		if (crde->crd_flags & CRD_F_KEY_EXPLICIT) {
652 			error = ccp_aes_check_keylen(crde->crd_alg,
653 			    crde->crd_klen);
654 			if (error != 0)
655 				break;
656 			ccp_aes_setkey(s, crde->crd_alg, crde->crd_key,
657 			    crde->crd_klen);
658 		}
659 		error = ccp_authenc(qp, s, crp, crda, crde);
660 		break;
661 	case GCM:
662 		error = 0;
663 		if (crd->crd_alg == CRYPTO_AES_NIST_GCM_16) {
664 			crde = crd;
665 			crda = crd->crd_next;
666 			s->cipher_first = true;
667 		} else {
668 			crda = crd;
669 			crde = crd->crd_next;
670 			s->cipher_first = false;
671 		}
672 		if (crde->crd_flags & CRD_F_KEY_EXPLICIT) {
673 			error = ccp_aes_check_keylen(crde->crd_alg,
674 			    crde->crd_klen);
675 			if (error != 0)
676 				break;
677 			ccp_aes_setkey(s, crde->crd_alg, crde->crd_key,
678 			    crde->crd_klen);
679 		}
680 		if (crde->crd_len == 0) {
681 			mtx_unlock(&qp->cq_lock);
682 			ccp_gcm_soft(s, crp, crda, crde);
683 			return (0);
684 		}
685 		error = ccp_gcm(qp, s, crp, crda, crde);
686 		break;
687 	}
688 
689 	if (error == 0)
690 		s->pending++;
691 
692 out:
693 	if (qpheld) {
694 		if (error != 0) {
695 			/*
696 			 * Squash EAGAIN so callers don't uselessly and
697 			 * expensively retry if the ring was full.
698 			 */
699 			if (error == EAGAIN)
700 				error = ENOMEM;
701 			ccp_queue_abort(qp);
702 		} else
703 			ccp_queue_release(qp);
704 	}
705 
706 	if (error != 0) {
707 		DPRINTF(dev, "%s: early error:%d\n", __func__, error);
708 		crp->crp_etype = error;
709 		crypto_done(crp);
710 	}
711 	return (0);
712 }
713 
714 static device_method_t ccp_methods[] = {
715 	DEVMETHOD(device_probe,		ccp_probe),
716 	DEVMETHOD(device_attach,	ccp_attach),
717 	DEVMETHOD(device_detach,	ccp_detach),
718 
719 	DEVMETHOD(cryptodev_newsession,	ccp_newsession),
720 	DEVMETHOD(cryptodev_freesession, ccp_freesession),
721 	DEVMETHOD(cryptodev_process,	ccp_process),
722 
723 	DEVMETHOD_END
724 };
725 
726 static driver_t ccp_driver = {
727 	"ccp",
728 	ccp_methods,
729 	sizeof(struct ccp_softc)
730 };
731 
732 static devclass_t ccp_devclass;
733 DRIVER_MODULE(ccp, pci, ccp_driver, ccp_devclass, NULL, NULL);
734 MODULE_VERSION(ccp, 1);
735 MODULE_DEPEND(ccp, crypto, 1, 1, 1);
736 MODULE_DEPEND(ccp, random_device, 1, 1, 1);
737 #if 0	/* There are enough known issues that we shouldn't load automatically */
738 MODULE_PNP_INFO("W32:vendor/device", pci, ccp, ccp_ids, sizeof(ccp_ids[0]),
739     nitems(ccp_ids));
740 #endif
741 
742 static int
743 ccp_queue_reserve_space(struct ccp_queue *qp, unsigned n, int mflags)
744 {
745 	struct ccp_softc *sc;
746 
747 	mtx_assert(&qp->cq_lock, MA_OWNED);
748 	sc = qp->cq_softc;
749 
750 	if (n < 1 || n >= (1 << sc->ring_size_order))
751 		return (EINVAL);
752 
753 	while (true) {
754 		if (ccp_queue_get_ring_space(qp) >= n)
755 			return (0);
756 		if ((mflags & M_WAITOK) == 0)
757 			return (EAGAIN);
758 		qp->cq_waiting = true;
759 		msleep(&qp->cq_tail, &qp->cq_lock, 0, "ccpqfull", 0);
760 	}
761 }
762 
763 int
764 ccp_queue_acquire_reserve(struct ccp_queue *qp, unsigned n, int mflags)
765 {
766 	int error;
767 
768 	mtx_lock(&qp->cq_lock);
769 	qp->cq_acq_tail = qp->cq_tail;
770 	error = ccp_queue_reserve_space(qp, n, mflags);
771 	if (error != 0)
772 		mtx_unlock(&qp->cq_lock);
773 	return (error);
774 }
775 
776 void
777 ccp_queue_release(struct ccp_queue *qp)
778 {
779 
780 	mtx_assert(&qp->cq_lock, MA_OWNED);
781 	if (qp->cq_tail != qp->cq_acq_tail) {
782 		wmb();
783 		ccp_queue_write_tail(qp);
784 	}
785 	mtx_unlock(&qp->cq_lock);
786 }
787 
788 void
789 ccp_queue_abort(struct ccp_queue *qp)
790 {
791 	unsigned i;
792 
793 	mtx_assert(&qp->cq_lock, MA_OWNED);
794 
795 	/* Wipe out any descriptors associated with this aborted txn. */
796 	for (i = qp->cq_acq_tail; i != qp->cq_tail;
797 	    i = (i + 1) % (1 << qp->cq_softc->ring_size_order)) {
798 		memset(&qp->desc_ring[i], 0, sizeof(qp->desc_ring[i]));
799 	}
800 	qp->cq_tail = qp->cq_acq_tail;
801 
802 	mtx_unlock(&qp->cq_lock);
803 }
804 
805 #ifdef DDB
806 #define	_db_show_lock(lo)	LOCK_CLASS(lo)->lc_ddb_show(lo)
807 #define	db_show_lock(lk)	_db_show_lock(&(lk)->lock_object)
808 static void
809 db_show_ccp_sc(struct ccp_softc *sc)
810 {
811 
812 	db_printf("ccp softc at %p\n", sc);
813 	db_printf(" cid: %d\n", (int)sc->cid);
814 
815 	db_printf(" lock: ");
816 	db_show_lock(&sc->lock);
817 
818 	db_printf(" detaching: %d\n", (int)sc->detaching);
819 	db_printf(" ring_size_order: %u\n", sc->ring_size_order);
820 
821 	db_printf(" hw_version: %d\n", (int)sc->hw_version);
822 	db_printf(" hw_features: %b\n", (int)sc->hw_features,
823 	    "\20\24ELFC\23TRNG\22Zip_Compress\16Zip_Decompress\13ECC\12RSA"
824 	    "\11SHA\0103DES\07AES");
825 
826 	db_printf(" hw status:\n");
827 	db_ccp_show_hw(sc);
828 }
829 
830 static void
831 db_show_ccp_qp(struct ccp_queue *qp)
832 {
833 
834 	db_printf(" lock: ");
835 	db_show_lock(&qp->cq_lock);
836 
837 	db_printf(" cq_qindex: %u\n", qp->cq_qindex);
838 	db_printf(" cq_softc: %p\n", qp->cq_softc);
839 
840 	db_printf(" head: %u\n", qp->cq_head);
841 	db_printf(" tail: %u\n", qp->cq_tail);
842 	db_printf(" acq_tail: %u\n", qp->cq_acq_tail);
843 	db_printf(" desc_ring: %p\n", qp->desc_ring);
844 	db_printf(" completions_ring: %p\n", qp->completions_ring);
845 	db_printf(" descriptors (phys): 0x%jx\n",
846 	    (uintmax_t)qp->desc_ring_bus_addr);
847 
848 	db_printf(" hw status:\n");
849 	db_ccp_show_queue_hw(qp);
850 }
851 
852 DB_SHOW_COMMAND(ccp, db_show_ccp)
853 {
854 	struct ccp_softc *sc;
855 	unsigned unit, qindex;
856 
857 	if (!have_addr)
858 		goto usage;
859 
860 	unit = (unsigned)addr;
861 
862 	sc = devclass_get_softc(ccp_devclass, unit);
863 	if (sc == NULL) {
864 		db_printf("No such device ccp%u\n", unit);
865 		goto usage;
866 	}
867 
868 	if (count == -1) {
869 		db_show_ccp_sc(sc);
870 		return;
871 	}
872 
873 	qindex = (unsigned)count;
874 	if (qindex >= nitems(sc->queues)) {
875 		db_printf("No such queue %u\n", qindex);
876 		goto usage;
877 	}
878 	db_show_ccp_qp(&sc->queues[qindex]);
879 	return;
880 
881 usage:
882 	db_printf("usage: show ccp <unit>[,<qindex>]\n");
883 	return;
884 }
885 #endif /* DDB */
886