xref: /freebsd/sys/dev/safexcel/safexcel.c (revision 7cc42f6d)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2020 Rubicon Communications, LLC (Netgate)
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <sys/param.h>
31 #include <sys/bus.h>
32 #include <sys/endian.h>
33 #include <sys/kernel.h>
34 #include <sys/lock.h>
35 #include <sys/malloc.h>
36 #include <sys/module.h>
37 #include <sys/mutex.h>
38 #include <sys/rman.h>
39 #include <sys/sglist.h>
40 #include <sys/sysctl.h>
41 
42 #include <machine/atomic.h>
43 #include <machine/bus.h>
44 
45 #include <crypto/rijndael/rijndael.h>
46 #include <opencrypto/cryptodev.h>
47 #include <opencrypto/xform.h>
48 
49 #include <dev/ofw/ofw_bus.h>
50 #include <dev/ofw/ofw_bus_subr.h>
51 
52 #include "cryptodev_if.h"
53 
54 #include "safexcel_reg.h"
55 #include "safexcel_var.h"
56 
57 static MALLOC_DEFINE(M_SAFEXCEL, "safexcel_req", "safexcel request buffers");
58 
59 /*
60  * We only support the EIP97 for now.
61  */
62 static struct ofw_compat_data safexcel_compat[] = {
63 	{ "inside-secure,safexcel-eip97ies",	(uintptr_t)97 },
64 	{ "inside-secure,safexcel-eip97",	(uintptr_t)97 },
65 	{ NULL,					0 }
66 };
67 
68 const struct safexcel_reg_offsets eip97_regs_offset = {
69 	.hia_aic	= SAFEXCEL_EIP97_HIA_AIC_BASE,
70 	.hia_aic_g	= SAFEXCEL_EIP97_HIA_AIC_G_BASE,
71 	.hia_aic_r	= SAFEXCEL_EIP97_HIA_AIC_R_BASE,
72 	.hia_aic_xdr	= SAFEXCEL_EIP97_HIA_AIC_xDR_BASE,
73 	.hia_dfe	= SAFEXCEL_EIP97_HIA_DFE_BASE,
74 	.hia_dfe_thr	= SAFEXCEL_EIP97_HIA_DFE_THR_BASE,
75 	.hia_dse	= SAFEXCEL_EIP97_HIA_DSE_BASE,
76 	.hia_dse_thr	= SAFEXCEL_EIP97_HIA_DSE_THR_BASE,
77 	.hia_gen_cfg	= SAFEXCEL_EIP97_HIA_GEN_CFG_BASE,
78 	.pe		= SAFEXCEL_EIP97_PE_BASE,
79 };
80 
81 const struct safexcel_reg_offsets eip197_regs_offset = {
82 	.hia_aic	= SAFEXCEL_EIP197_HIA_AIC_BASE,
83 	.hia_aic_g	= SAFEXCEL_EIP197_HIA_AIC_G_BASE,
84 	.hia_aic_r	= SAFEXCEL_EIP197_HIA_AIC_R_BASE,
85 	.hia_aic_xdr	= SAFEXCEL_EIP197_HIA_AIC_xDR_BASE,
86 	.hia_dfe	= SAFEXCEL_EIP197_HIA_DFE_BASE,
87 	.hia_dfe_thr	= SAFEXCEL_EIP197_HIA_DFE_THR_BASE,
88 	.hia_dse	= SAFEXCEL_EIP197_HIA_DSE_BASE,
89 	.hia_dse_thr	= SAFEXCEL_EIP197_HIA_DSE_THR_BASE,
90 	.hia_gen_cfg	= SAFEXCEL_EIP197_HIA_GEN_CFG_BASE,
91 	.pe		= SAFEXCEL_EIP197_PE_BASE,
92 };
93 
94 static struct safexcel_cmd_descr *
95 safexcel_cmd_descr_next(struct safexcel_cmd_descr_ring *ring)
96 {
97 	struct safexcel_cmd_descr *cdesc;
98 
99 	if (ring->write == ring->read)
100 		return (NULL);
101 	cdesc = &ring->desc[ring->read];
102 	ring->read = (ring->read + 1) % SAFEXCEL_RING_SIZE;
103 	return (cdesc);
104 }
105 
106 static struct safexcel_res_descr *
107 safexcel_res_descr_next(struct safexcel_res_descr_ring *ring)
108 {
109 	struct safexcel_res_descr *rdesc;
110 
111 	if (ring->write == ring->read)
112 		return (NULL);
113 	rdesc = &ring->desc[ring->read];
114 	ring->read = (ring->read + 1) % SAFEXCEL_RING_SIZE;
115 	return (rdesc);
116 }
117 
118 static struct safexcel_request *
119 safexcel_alloc_request(struct safexcel_softc *sc, struct safexcel_ring *ring)
120 {
121 	struct safexcel_request *req;
122 
123 	mtx_assert(&ring->mtx, MA_OWNED);
124 
125 	if ((req = STAILQ_FIRST(&ring->free_requests)) != NULL)
126 		STAILQ_REMOVE_HEAD(&ring->free_requests, link);
127 	return (req);
128 }
129 
130 static void
131 safexcel_free_request(struct safexcel_ring *ring, struct safexcel_request *req)
132 {
133 	struct safexcel_context_record *ctx;
134 
135 	mtx_assert(&ring->mtx, MA_OWNED);
136 
137 	if (req->dmap_loaded) {
138 		bus_dmamap_unload(ring->data_dtag, req->dmap);
139 		req->dmap_loaded = false;
140 	}
141 	ctx = (struct safexcel_context_record *)req->ctx.vaddr;
142 	explicit_bzero(ctx->data, sizeof(ctx->data));
143 	explicit_bzero(req->iv, sizeof(req->iv));
144 	STAILQ_INSERT_TAIL(&ring->free_requests, req, link);
145 }
146 
147 static void
148 safexcel_enqueue_request(struct safexcel_softc *sc, struct safexcel_ring *ring,
149     struct safexcel_request *req)
150 {
151 	mtx_assert(&ring->mtx, MA_OWNED);
152 
153 	STAILQ_INSERT_TAIL(&ring->ready_requests, req, link);
154 }
155 
156 static void
157 safexcel_rdr_intr(struct safexcel_softc *sc, int ringidx)
158 {
159 	struct safexcel_cmd_descr *cdesc;
160 	struct safexcel_res_descr *rdesc;
161 	struct safexcel_request *req;
162 	struct safexcel_ring *ring;
163 	uint32_t error, i, ncdescs, nrdescs, nreqs;
164 
165 	ring = &sc->sc_ring[ringidx];
166 
167 	mtx_lock(&ring->mtx);
168 	nreqs = SAFEXCEL_READ(sc,
169 	    SAFEXCEL_HIA_RDR(sc, ringidx) + SAFEXCEL_HIA_xDR_PROC_COUNT);
170 	nreqs >>= SAFEXCEL_xDR_PROC_xD_PKT_OFFSET;
171 	nreqs &= SAFEXCEL_xDR_PROC_xD_PKT_MASK;
172 	if (nreqs == 0) {
173 		SAFEXCEL_DPRINTF(sc, 1,
174 		    "zero pending requests on ring %d\n", ringidx);
175 		goto out;
176 	}
177 
178 	ring = &sc->sc_ring[ringidx];
179 	bus_dmamap_sync(ring->rdr.dma.tag, ring->rdr.dma.map,
180 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
181 	bus_dmamap_sync(ring->cdr.dma.tag, ring->cdr.dma.map,
182 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
183 	bus_dmamap_sync(ring->dma_atok.tag, ring->dma_atok.map,
184 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
185 
186 	ncdescs = nrdescs = 0;
187 	for (i = 0; i < nreqs; i++) {
188 		req = STAILQ_FIRST(&ring->queued_requests);
189 		KASSERT(req != NULL, ("%s: expected %d pending requests",
190 		    __func__, nreqs));
191                 STAILQ_REMOVE_HEAD(&ring->queued_requests, link);
192 		mtx_unlock(&ring->mtx);
193 
194 		bus_dmamap_sync(req->ctx.tag, req->ctx.map,
195 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
196 		bus_dmamap_sync(ring->data_dtag, req->dmap,
197 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
198 
199 		ncdescs += req->cdescs;
200 		while (req->cdescs-- > 0) {
201 			cdesc = safexcel_cmd_descr_next(&ring->cdr);
202 			KASSERT(cdesc != NULL,
203 			    ("%s: missing control descriptor", __func__));
204 			if (req->cdescs == 0)
205 				KASSERT(cdesc->last_seg,
206 				    ("%s: chain is not terminated", __func__));
207 		}
208 		nrdescs += req->rdescs;
209 		while (req->rdescs-- > 0) {
210 			rdesc = safexcel_res_descr_next(&ring->rdr);
211 			error = rdesc->result_data.error_code;
212 			if (error != 0) {
213 				if (error == SAFEXCEL_RESULT_ERR_AUTH_FAILED &&
214 				    req->crp->crp_etype == 0) {
215 					req->crp->crp_etype = EBADMSG;
216 				} else {
217 					SAFEXCEL_DPRINTF(sc, 1,
218 					    "error code %#x\n", error);
219 					req->crp->crp_etype = EIO;
220 				}
221 			}
222 		}
223 
224 		crypto_done(req->crp);
225 		mtx_lock(&ring->mtx);
226 		safexcel_free_request(ring, req);
227 	}
228 
229 	if (nreqs != 0) {
230 		SAFEXCEL_WRITE(sc,
231 		    SAFEXCEL_HIA_RDR(sc, ringidx) + SAFEXCEL_HIA_xDR_PROC_COUNT,
232 		    SAFEXCEL_xDR_PROC_xD_PKT(nreqs) |
233 		    (sc->sc_config.rd_offset * nrdescs * sizeof(uint32_t)));
234 	}
235 out:
236 	if (!STAILQ_EMPTY(&ring->queued_requests)) {
237 		SAFEXCEL_WRITE(sc,
238 		    SAFEXCEL_HIA_RDR(sc, ringidx) + SAFEXCEL_HIA_xDR_THRESH,
239 		    SAFEXCEL_HIA_CDR_THRESH_PKT_MODE | 1);
240 	}
241 	mtx_unlock(&ring->mtx);
242 }
243 
244 static void
245 safexcel_ring_intr(void *arg)
246 {
247 	struct safexcel_softc *sc;
248 	struct safexcel_intr_handle *ih;
249 	uint32_t status, stat;
250 	int ring;
251 	bool blocked, rdrpending;
252 
253 	ih = arg;
254 	sc = ih->sc;
255 	ring = ih->ring;
256 
257 	status = SAFEXCEL_READ(sc, SAFEXCEL_HIA_AIC_R(sc) +
258 	    SAFEXCEL_HIA_AIC_R_ENABLED_STAT(ring));
259 	/* CDR interrupts */
260 	if (status & SAFEXCEL_CDR_IRQ(ring)) {
261 		stat = SAFEXCEL_READ(sc,
262 		    SAFEXCEL_HIA_CDR(sc, ring) + SAFEXCEL_HIA_xDR_STAT);
263 		SAFEXCEL_WRITE(sc,
264 		    SAFEXCEL_HIA_CDR(sc, ring) + SAFEXCEL_HIA_xDR_STAT,
265 		    stat & SAFEXCEL_CDR_INTR_MASK);
266 	}
267 	/* RDR interrupts */
268 	rdrpending = false;
269 	if (status & SAFEXCEL_RDR_IRQ(ring)) {
270 		stat = SAFEXCEL_READ(sc,
271 		    SAFEXCEL_HIA_RDR(sc, ring) + SAFEXCEL_HIA_xDR_STAT);
272 		if ((stat & SAFEXCEL_xDR_ERR) == 0)
273 			rdrpending = true;
274 		SAFEXCEL_WRITE(sc,
275 		    SAFEXCEL_HIA_RDR(sc, ring) + SAFEXCEL_HIA_xDR_STAT,
276 		    stat & SAFEXCEL_RDR_INTR_MASK);
277 	}
278 	SAFEXCEL_WRITE(sc,
279 	    SAFEXCEL_HIA_AIC_R(sc) + SAFEXCEL_HIA_AIC_R_ACK(ring),
280 	    status);
281 
282 	if (rdrpending)
283 		safexcel_rdr_intr(sc, ring);
284 
285 	mtx_lock(&sc->sc_mtx);
286 	blocked = sc->sc_blocked;
287 	sc->sc_blocked = 0;
288 	mtx_unlock(&sc->sc_mtx);
289 
290 	if (blocked)
291 		crypto_unblock(sc->sc_cid, blocked);
292 }
293 
294 static int
295 safexcel_configure(struct safexcel_softc *sc)
296 {
297 	uint32_t i, mask, pemask, reg;
298 	device_t dev;
299 
300 	if (sc->sc_type == 197) {
301 		sc->sc_offsets = eip197_regs_offset;
302 		pemask = SAFEXCEL_N_PES_MASK;
303 	} else {
304 		sc->sc_offsets = eip97_regs_offset;
305 		pemask = EIP97_N_PES_MASK;
306 	}
307 
308 	dev = sc->sc_dev;
309 
310 	/* Scan for valid ring interrupt controllers. */
311 	for (i = 0; i < SAFEXCEL_MAX_RING_AIC; i++) {
312 		reg = SAFEXCEL_READ(sc, SAFEXCEL_HIA_AIC_R(sc) +
313 		    SAFEXCEL_HIA_AIC_R_VERSION(i));
314 		if (SAFEXCEL_REG_LO16(reg) != EIP201_VERSION_LE)
315 			break;
316 	}
317 	sc->sc_config.aic_rings = i;
318 	if (sc->sc_config.aic_rings == 0)
319 		return (-1);
320 
321 	reg = SAFEXCEL_READ(sc, SAFEXCEL_HIA_AIC_G(sc) + SAFEXCEL_HIA_OPTIONS);
322 	/* Check for 64bit addressing. */
323 	if ((reg & SAFEXCEL_OPT_ADDR_64) == 0)
324 		return (-1);
325 	/* Check alignment constraints (which we do not support). */
326 	if (((reg & SAFEXCEL_OPT_TGT_ALIGN_MASK) >>
327 	    SAFEXCEL_OPT_TGT_ALIGN_OFFSET) != 0)
328 		return (-1);
329 
330 	sc->sc_config.hdw =
331 	    (reg & SAFEXCEL_xDR_HDW_MASK) >> SAFEXCEL_xDR_HDW_OFFSET;
332 	mask = (1 << sc->sc_config.hdw) - 1;
333 
334 	sc->sc_config.rings = reg & SAFEXCEL_N_RINGS_MASK;
335 	/* Limit the number of rings to the number of the AIC Rings. */
336 	sc->sc_config.rings = MIN(sc->sc_config.rings, sc->sc_config.aic_rings);
337 
338 	sc->sc_config.pes = (reg & pemask) >> SAFEXCEL_N_PES_OFFSET;
339 
340 	sc->sc_config.cd_size =
341 	    sizeof(struct safexcel_cmd_descr) / sizeof(uint32_t);
342 	sc->sc_config.cd_offset = (sc->sc_config.cd_size + mask) & ~mask;
343 
344 	sc->sc_config.rd_size =
345 	    sizeof(struct safexcel_res_descr) / sizeof(uint32_t);
346 	sc->sc_config.rd_offset = (sc->sc_config.rd_size + mask) & ~mask;
347 
348 	sc->sc_config.atok_offset =
349 	    (SAFEXCEL_MAX_ATOKENS * sizeof(struct safexcel_instr) + mask) &
350 	    ~mask;
351 
352 	return (0);
353 }
354 
355 static void
356 safexcel_init_hia_bus_access(struct safexcel_softc *sc)
357 {
358 	uint32_t version, val;
359 
360 	/* Determine endianness and configure byte swap. */
361 	version = SAFEXCEL_READ(sc,
362 	    SAFEXCEL_HIA_AIC(sc) + SAFEXCEL_HIA_VERSION);
363 	val = SAFEXCEL_READ(sc, SAFEXCEL_HIA_AIC(sc) + SAFEXCEL_HIA_MST_CTRL);
364 	if (SAFEXCEL_REG_HI16(version) == SAFEXCEL_HIA_VERSION_BE) {
365 		val = SAFEXCEL_READ(sc,
366 		    SAFEXCEL_HIA_AIC(sc) + SAFEXCEL_HIA_MST_CTRL);
367 		val = val ^ (SAFEXCEL_MST_CTRL_NO_BYTE_SWAP >> 24);
368 		SAFEXCEL_WRITE(sc,
369 		    SAFEXCEL_HIA_AIC(sc) + SAFEXCEL_HIA_MST_CTRL,
370 		    val);
371 	}
372 
373 	/* Configure wr/rd cache values. */
374 	SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_GEN_CFG(sc) + SAFEXCEL_HIA_MST_CTRL,
375 	    SAFEXCEL_MST_CTRL_RD_CACHE(RD_CACHE_4BITS) |
376 	    SAFEXCEL_MST_CTRL_WD_CACHE(WR_CACHE_4BITS));
377 }
378 
379 static void
380 safexcel_disable_global_interrupts(struct safexcel_softc *sc)
381 {
382 	/* Disable and clear pending interrupts. */
383 	SAFEXCEL_WRITE(sc,
384 	    SAFEXCEL_HIA_AIC_G(sc) + SAFEXCEL_HIA_AIC_G_ENABLE_CTRL, 0);
385 	SAFEXCEL_WRITE(sc,
386 	    SAFEXCEL_HIA_AIC_G(sc) + SAFEXCEL_HIA_AIC_G_ACK,
387 	    SAFEXCEL_AIC_G_ACK_ALL_MASK);
388 }
389 
390 /*
391  * Configure the data fetch engine.  This component parses command descriptors
392  * and sets up DMA transfers from host memory to the corresponding processing
393  * engine.
394  */
395 static void
396 safexcel_configure_dfe_engine(struct safexcel_softc *sc, int pe)
397 {
398 	/* Reset all DFE threads. */
399 	SAFEXCEL_WRITE(sc,
400 	    SAFEXCEL_HIA_DFE_THR(sc) + SAFEXCEL_HIA_DFE_THR_CTRL(pe),
401 	    SAFEXCEL_DxE_THR_CTRL_RESET_PE);
402 
403 	/* Deassert the DFE reset. */
404 	SAFEXCEL_WRITE(sc,
405 	    SAFEXCEL_HIA_DFE_THR(sc) + SAFEXCEL_HIA_DFE_THR_CTRL(pe), 0);
406 
407 	/* DMA transfer size to use. */
408 	SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_DFE(sc) + SAFEXCEL_HIA_DFE_CFG(pe),
409 	    SAFEXCEL_HIA_DFE_CFG_DIS_DEBUG |
410 	    SAFEXCEL_HIA_DxE_CFG_MIN_DATA_SIZE(6) |
411 	    SAFEXCEL_HIA_DxE_CFG_MAX_DATA_SIZE(9) |
412 	    SAFEXCEL_HIA_DxE_CFG_MIN_CTRL_SIZE(6) |
413 	    SAFEXCEL_HIA_DxE_CFG_MAX_CTRL_SIZE(7) |
414 	    SAFEXCEL_HIA_DxE_CFG_DATA_CACHE_CTRL(RD_CACHE_3BITS) |
415 	    SAFEXCEL_HIA_DxE_CFG_CTRL_CACHE_CTRL(RD_CACHE_3BITS));
416 
417 	/* Configure the PE DMA transfer thresholds. */
418 	SAFEXCEL_WRITE(sc, SAFEXCEL_PE(sc) + SAFEXCEL_PE_IN_DBUF_THRES(pe),
419 	    SAFEXCEL_PE_IN_xBUF_THRES_MIN(6) |
420 	    SAFEXCEL_PE_IN_xBUF_THRES_MAX(9));
421 	SAFEXCEL_WRITE(sc, SAFEXCEL_PE(sc) + SAFEXCEL_PE_IN_TBUF_THRES(pe),
422 	    SAFEXCEL_PE_IN_xBUF_THRES_MIN(6) |
423 	    SAFEXCEL_PE_IN_xBUF_THRES_MAX(7));
424 }
425 
426 /*
427  * Configure the data store engine.  This component parses result descriptors
428  * and sets up DMA transfers from the processing engine to host memory.
429  */
430 static int
431 safexcel_configure_dse(struct safexcel_softc *sc, int pe)
432 {
433 	uint32_t val;
434 	int count;
435 
436 	/* Disable and reset all DSE threads. */
437 	SAFEXCEL_WRITE(sc,
438 	    SAFEXCEL_HIA_DSE_THR(sc) + SAFEXCEL_HIA_DSE_THR_CTRL(pe),
439 	    SAFEXCEL_DxE_THR_CTRL_RESET_PE);
440 
441 	/* Wait for a second for threads to go idle. */
442 	for (count = 0;;) {
443 		val = SAFEXCEL_READ(sc,
444 		    SAFEXCEL_HIA_DSE_THR(sc) + SAFEXCEL_HIA_DSE_THR_STAT(pe));
445 		if ((val & SAFEXCEL_DSE_THR_RDR_ID_MASK) ==
446 		    SAFEXCEL_DSE_THR_RDR_ID_MASK)
447 			break;
448 		if (count++ > 10000) {
449 			device_printf(sc->sc_dev, "DSE reset timeout\n");
450 			return (-1);
451 		}
452 		DELAY(100);
453 	}
454 
455 	/* Exit the reset state. */
456 	SAFEXCEL_WRITE(sc,
457 	    SAFEXCEL_HIA_DSE_THR(sc) + SAFEXCEL_HIA_DSE_THR_CTRL(pe), 0);
458 
459 	/* DMA transfer size to use */
460 	SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_DSE(sc) + SAFEXCEL_HIA_DSE_CFG(pe),
461 	    SAFEXCEL_HIA_DSE_CFG_DIS_DEBUG |
462 	    SAFEXCEL_HIA_DxE_CFG_MIN_DATA_SIZE(7) |
463 	    SAFEXCEL_HIA_DxE_CFG_MAX_DATA_SIZE(8) |
464 	    SAFEXCEL_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS) |
465 	    SAFEXCEL_HIA_DSE_CFG_ALLWAYS_BUFFERABLE);
466 
467 	/* Configure the procesing engine thresholds */
468 	SAFEXCEL_WRITE(sc,
469 	    SAFEXCEL_PE(sc) + SAFEXCEL_PE_OUT_DBUF_THRES(pe),
470 	    SAFEXCEL_PE_OUT_DBUF_THRES_MIN(7) |
471 	    SAFEXCEL_PE_OUT_DBUF_THRES_MAX(8));
472 
473 	return (0);
474 }
475 
476 static void
477 safexcel_hw_prepare_rings(struct safexcel_softc *sc)
478 {
479 	int i;
480 
481 	for (i = 0; i < sc->sc_config.rings; i++) {
482 		/*
483 		 * Command descriptors.
484 		 */
485 
486 		/* Clear interrupts for this ring. */
487 		SAFEXCEL_WRITE(sc,
488 		    SAFEXCEL_HIA_AIC_R(sc) + SAFEXCEL_HIA_AIC_R_ENABLE_CLR(i),
489 		    SAFEXCEL_HIA_AIC_R_ENABLE_CLR_ALL_MASK);
490 
491 		/* Disable external triggering. */
492 		SAFEXCEL_WRITE(sc,
493 		    SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_CFG, 0);
494 
495 		/* Clear the pending prepared counter. */
496 		SAFEXCEL_WRITE(sc,
497 		    SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_PREP_COUNT,
498 		    SAFEXCEL_xDR_PREP_CLR_COUNT);
499 
500 		/* Clear the pending processed counter. */
501 		SAFEXCEL_WRITE(sc,
502 		    SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_PROC_COUNT,
503 		    SAFEXCEL_xDR_PROC_CLR_COUNT);
504 
505 		SAFEXCEL_WRITE(sc,
506 		    SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_PREP_PNTR, 0);
507 		SAFEXCEL_WRITE(sc,
508 		    SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_PROC_PNTR, 0);
509 
510 		SAFEXCEL_WRITE(sc,
511 		    SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_RING_SIZE,
512 		    SAFEXCEL_RING_SIZE * sc->sc_config.cd_offset *
513 		    sizeof(uint32_t));
514 
515 		/*
516 		 * Result descriptors.
517 		 */
518 
519 		/* Disable external triggering. */
520 		SAFEXCEL_WRITE(sc,
521 		    SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_CFG, 0);
522 
523 		/* Clear the pending prepared counter. */
524 		SAFEXCEL_WRITE(sc,
525 		    SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_PREP_COUNT,
526 		    SAFEXCEL_xDR_PREP_CLR_COUNT);
527 
528 		/* Clear the pending processed counter. */
529 		SAFEXCEL_WRITE(sc,
530 		    SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_PROC_COUNT,
531 		    SAFEXCEL_xDR_PROC_CLR_COUNT);
532 
533 		SAFEXCEL_WRITE(sc,
534 		    SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_PREP_PNTR, 0);
535 		SAFEXCEL_WRITE(sc,
536 		    SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_PROC_PNTR, 0);
537 
538 		/* Ring size. */
539 		SAFEXCEL_WRITE(sc,
540 		    SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_RING_SIZE,
541 		    SAFEXCEL_RING_SIZE * sc->sc_config.rd_offset *
542 		    sizeof(uint32_t));
543 	}
544 }
545 
546 static void
547 safexcel_hw_setup_rings(struct safexcel_softc *sc)
548 {
549 	struct safexcel_ring *ring;
550 	uint32_t cd_size_rnd, mask, rd_size_rnd, val;
551 	int i;
552 
553 	mask = (1 << sc->sc_config.hdw) - 1;
554 	cd_size_rnd = (sc->sc_config.cd_size + mask) >> sc->sc_config.hdw;
555 	val = (sizeof(struct safexcel_res_descr) -
556 	    sizeof(struct safexcel_res_data)) / sizeof(uint32_t);
557 	rd_size_rnd = (val + mask) >> sc->sc_config.hdw;
558 
559 	for (i = 0; i < sc->sc_config.rings; i++) {
560 		ring = &sc->sc_ring[i];
561 
562 		/*
563 		 * Command descriptors.
564 		 */
565 
566 		/* Ring base address. */
567 		SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_CDR(sc, i) +
568 		    SAFEXCEL_HIA_xDR_RING_BASE_ADDR_LO,
569 		    SAFEXCEL_ADDR_LO(ring->cdr.dma.paddr));
570 		SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_CDR(sc, i) +
571 		    SAFEXCEL_HIA_xDR_RING_BASE_ADDR_HI,
572 		    SAFEXCEL_ADDR_HI(ring->cdr.dma.paddr));
573 
574 		SAFEXCEL_WRITE(sc,
575 		    SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_DESC_SIZE,
576 		    SAFEXCEL_xDR_DESC_MODE_64BIT | SAFEXCEL_CDR_DESC_MODE_ADCP |
577 		    (sc->sc_config.cd_offset << SAFEXCEL_xDR_DESC_xD_OFFSET) |
578 		    sc->sc_config.cd_size);
579 
580 		SAFEXCEL_WRITE(sc,
581 		    SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_CFG,
582 		    ((SAFEXCEL_FETCH_COUNT * (cd_size_rnd << sc->sc_config.hdw)) <<
583 		      SAFEXCEL_xDR_xD_FETCH_THRESH) |
584 		    (SAFEXCEL_FETCH_COUNT * sc->sc_config.cd_offset));
585 
586 		/* Configure DMA tx control. */
587 		SAFEXCEL_WRITE(sc,
588 		    SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_DMA_CFG,
589 		    SAFEXCEL_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS) |
590 		    SAFEXCEL_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS));
591 
592 		/* Clear any pending interrupt. */
593 		SAFEXCEL_WRITE(sc,
594 		    SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_STAT,
595 		    SAFEXCEL_CDR_INTR_MASK);
596 
597 		/*
598 		 * Result descriptors.
599 		 */
600 
601 		/* Ring base address. */
602 		SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_RDR(sc, i) +
603 		    SAFEXCEL_HIA_xDR_RING_BASE_ADDR_LO,
604 		    SAFEXCEL_ADDR_LO(ring->rdr.dma.paddr));
605 		SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_RDR(sc, i) +
606 		    SAFEXCEL_HIA_xDR_RING_BASE_ADDR_HI,
607 		    SAFEXCEL_ADDR_HI(ring->rdr.dma.paddr));
608 
609 		SAFEXCEL_WRITE(sc,
610 		    SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_DESC_SIZE,
611 		    SAFEXCEL_xDR_DESC_MODE_64BIT |
612 		    (sc->sc_config.rd_offset << SAFEXCEL_xDR_DESC_xD_OFFSET) |
613 		    sc->sc_config.rd_size);
614 
615 		SAFEXCEL_WRITE(sc,
616 		    SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_CFG,
617 		    ((SAFEXCEL_FETCH_COUNT * (rd_size_rnd << sc->sc_config.hdw)) <<
618 		    SAFEXCEL_xDR_xD_FETCH_THRESH) |
619 		    (SAFEXCEL_FETCH_COUNT * sc->sc_config.rd_offset));
620 
621 		/* Configure DMA tx control. */
622 		SAFEXCEL_WRITE(sc,
623 		    SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_DMA_CFG,
624 		    SAFEXCEL_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS) |
625 		    SAFEXCEL_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS) |
626 		    SAFEXCEL_HIA_xDR_WR_RES_BUF | SAFEXCEL_HIA_xDR_WR_CTRL_BUF);
627 
628 		/* Clear any pending interrupt. */
629 		SAFEXCEL_WRITE(sc,
630 		    SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_STAT,
631 		    SAFEXCEL_RDR_INTR_MASK);
632 
633 		/* Enable ring interrupt. */
634 		SAFEXCEL_WRITE(sc,
635 		    SAFEXCEL_HIA_AIC_R(sc) + SAFEXCEL_HIA_AIC_R_ENABLE_CTRL(i),
636 		    SAFEXCEL_RDR_IRQ(i));
637 	}
638 }
639 
640 /* Reset the command and result descriptor rings. */
641 static void
642 safexcel_hw_reset_rings(struct safexcel_softc *sc)
643 {
644 	int i;
645 
646 	for (i = 0; i < sc->sc_config.rings; i++) {
647 		/*
648 		 * Result descriptor ring operations.
649 		 */
650 
651 		/* Reset ring base address. */
652 		SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_RDR(sc, i) +
653 		    SAFEXCEL_HIA_xDR_RING_BASE_ADDR_LO, 0);
654 		SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_RDR(sc, i) +
655 		    SAFEXCEL_HIA_xDR_RING_BASE_ADDR_HI, 0);
656 
657 		/* Clear the pending prepared counter. */
658 		SAFEXCEL_WRITE(sc,
659 		    SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_PREP_COUNT,
660 		    SAFEXCEL_xDR_PREP_CLR_COUNT);
661 
662 		/* Clear the pending processed counter. */
663 		SAFEXCEL_WRITE(sc,
664 		    SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_PROC_COUNT,
665 		    SAFEXCEL_xDR_PROC_CLR_COUNT);
666 
667 		SAFEXCEL_WRITE(sc,
668 		    SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_PREP_PNTR, 0);
669 		SAFEXCEL_WRITE(sc,
670 		    SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_PROC_PNTR, 0);
671 
672 		SAFEXCEL_WRITE(sc,
673 		    SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_RING_SIZE, 0);
674 
675 		/* Clear any pending interrupt. */
676 		SAFEXCEL_WRITE(sc,
677 		    SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_STAT,
678 		    SAFEXCEL_RDR_INTR_MASK);
679 
680 		/* Disable ring interrupt. */
681 		SAFEXCEL_WRITE(sc,
682 		    SAFEXCEL_HIA_AIC_R(sc) + SAFEXCEL_HIA_AIC_R_ENABLE_CLR(i),
683 		    SAFEXCEL_RDR_IRQ(i));
684 
685 		/*
686 		 * Command descriptor ring operations.
687 		 */
688 
689 		/* Reset ring base address. */
690 		SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_CDR(sc, i) +
691 		    SAFEXCEL_HIA_xDR_RING_BASE_ADDR_LO, 0);
692 		SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_CDR(sc, i) +
693 		    SAFEXCEL_HIA_xDR_RING_BASE_ADDR_HI, 0);
694 
695 		/* Clear the pending prepared counter. */
696 		SAFEXCEL_WRITE(sc,
697 		    SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_PREP_COUNT,
698 		    SAFEXCEL_xDR_PREP_CLR_COUNT);
699 
700 		/* Clear the pending processed counter. */
701 		SAFEXCEL_WRITE(sc,
702 		    SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_PROC_COUNT,
703 		    SAFEXCEL_xDR_PROC_CLR_COUNT);
704 
705 		SAFEXCEL_WRITE(sc,
706 		    SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_PREP_PNTR, 0);
707 		SAFEXCEL_WRITE(sc,
708 		    SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_PROC_PNTR, 0);
709 
710 		SAFEXCEL_WRITE(sc,
711 		    SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_RING_SIZE, 0);
712 
713 		/* Clear any pending interrupt. */
714 		SAFEXCEL_WRITE(sc,
715 		    SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_STAT,
716 		    SAFEXCEL_CDR_INTR_MASK);
717 	}
718 }
719 
720 static void
721 safexcel_enable_pe_engine(struct safexcel_softc *sc, int pe)
722 {
723 	int i, ring_mask;
724 
725 	for (ring_mask = 0, i = 0; i < sc->sc_config.rings; i++) {
726 		ring_mask <<= 1;
727 		ring_mask |= 1;
728 	}
729 
730 	/* Enable command descriptor rings. */
731 	SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_DFE_THR(sc) + SAFEXCEL_HIA_DFE_THR_CTRL(pe),
732 	    SAFEXCEL_DxE_THR_CTRL_EN | ring_mask);
733 
734 	/* Enable result descriptor rings. */
735 	SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_DSE_THR(sc) + SAFEXCEL_HIA_DSE_THR_CTRL(pe),
736 	    SAFEXCEL_DxE_THR_CTRL_EN | ring_mask);
737 
738 	/* Clear any HIA interrupt. */
739 	SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_AIC_G(sc) + SAFEXCEL_HIA_AIC_G_ACK,
740 	    SAFEXCEL_AIC_G_ACK_HIA_MASK);
741 }
742 
743 static void
744 safexcel_execute(struct safexcel_softc *sc, struct safexcel_ring *ring,
745     struct safexcel_request *req)
746 {
747 	uint32_t ncdescs, nrdescs, nreqs;
748 	int ringidx;
749 	bool busy;
750 
751 	mtx_assert(&ring->mtx, MA_OWNED);
752 
753 	ringidx = req->sess->ringidx;
754 	if (STAILQ_EMPTY(&ring->ready_requests))
755 		return;
756 	busy = !STAILQ_EMPTY(&ring->queued_requests);
757 	ncdescs = nrdescs = nreqs = 0;
758 	while ((req = STAILQ_FIRST(&ring->ready_requests)) != NULL &&
759 	    req->cdescs + ncdescs <= SAFEXCEL_MAX_BATCH_SIZE &&
760 	    req->rdescs + nrdescs <= SAFEXCEL_MAX_BATCH_SIZE) {
761 		STAILQ_REMOVE_HEAD(&ring->ready_requests, link);
762 		STAILQ_INSERT_TAIL(&ring->queued_requests, req, link);
763 		ncdescs += req->cdescs;
764 		nrdescs += req->rdescs;
765 		nreqs++;
766 	}
767 
768 	if (!busy) {
769 		SAFEXCEL_WRITE(sc,
770 		    SAFEXCEL_HIA_RDR(sc, ringidx) + SAFEXCEL_HIA_xDR_THRESH,
771 		    SAFEXCEL_HIA_CDR_THRESH_PKT_MODE | nreqs);
772 	}
773 	SAFEXCEL_WRITE(sc,
774 	    SAFEXCEL_HIA_RDR(sc, ringidx) + SAFEXCEL_HIA_xDR_PREP_COUNT,
775 	    nrdescs * sc->sc_config.rd_offset * sizeof(uint32_t));
776 	SAFEXCEL_WRITE(sc,
777 	    SAFEXCEL_HIA_CDR(sc, ringidx) + SAFEXCEL_HIA_xDR_PREP_COUNT,
778 	    ncdescs * sc->sc_config.cd_offset * sizeof(uint32_t));
779 }
780 
781 static void
782 safexcel_init_rings(struct safexcel_softc *sc)
783 {
784 	struct safexcel_cmd_descr *cdesc;
785 	struct safexcel_ring *ring;
786 	char buf[32];
787 	uint64_t atok;
788 	int i, j;
789 
790 	for (i = 0; i < sc->sc_config.rings; i++) {
791 		ring = &sc->sc_ring[i];
792 
793 		snprintf(buf, sizeof(buf), "safexcel_ring%d", i);
794 		mtx_init(&ring->mtx, buf, NULL, MTX_DEF);
795 		STAILQ_INIT(&ring->free_requests);
796 		STAILQ_INIT(&ring->ready_requests);
797 		STAILQ_INIT(&ring->queued_requests);
798 
799 		ring->cdr.read = ring->cdr.write = 0;
800 		ring->rdr.read = ring->rdr.write = 0;
801 		for (j = 0; j < SAFEXCEL_RING_SIZE; j++) {
802 			cdesc = &ring->cdr.desc[j];
803 			atok = ring->dma_atok.paddr +
804 			    sc->sc_config.atok_offset * j;
805 			cdesc->atok_lo = SAFEXCEL_ADDR_LO(atok);
806 			cdesc->atok_hi = SAFEXCEL_ADDR_HI(atok);
807 		}
808 	}
809 }
810 
811 static void
812 safexcel_dma_alloc_mem_cb(void *arg, bus_dma_segment_t *segs, int nseg,
813     int error)
814 {
815 	struct safexcel_dma_mem *sdm;
816 
817 	if (error != 0)
818 		return;
819 
820 	KASSERT(nseg == 1, ("%s: nsegs is %d", __func__, nseg));
821 	sdm = arg;
822 	sdm->paddr = segs->ds_addr;
823 }
824 
825 static int
826 safexcel_dma_alloc_mem(struct safexcel_softc *sc, struct safexcel_dma_mem *sdm,
827     bus_size_t size)
828 {
829 	int error;
830 
831 	KASSERT(sdm->vaddr == NULL,
832 	    ("%s: DMA memory descriptor in use.", __func__));
833 
834 	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */
835 	    PAGE_SIZE, 0,		/* alignment, boundary */
836 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
837 	    BUS_SPACE_MAXADDR,		/* highaddr */
838 	    NULL, NULL,			/* filtfunc, filtfuncarg */
839 	    size, 1,			/* maxsize, nsegments */
840 	    size, BUS_DMA_COHERENT,	/* maxsegsz, flags */
841 	    NULL, NULL,			/* lockfunc, lockfuncarg */
842 	    &sdm->tag);			/* dmat */
843 	if (error != 0) {
844 		device_printf(sc->sc_dev,
845 		    "failed to allocate busdma tag, error %d\n", error);
846 		goto err1;
847 	}
848 
849 	error = bus_dmamem_alloc(sdm->tag, (void **)&sdm->vaddr,
850 	    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, &sdm->map);
851 	if (error != 0) {
852 		device_printf(sc->sc_dev,
853 		    "failed to allocate DMA safe memory, error %d\n", error);
854 		goto err2;
855 	}
856 
857 	error = bus_dmamap_load(sdm->tag, sdm->map, sdm->vaddr, size,
858 	    safexcel_dma_alloc_mem_cb, sdm, BUS_DMA_NOWAIT);
859 	if (error != 0) {
860 		device_printf(sc->sc_dev,
861 		    "cannot get address of the DMA memory, error %d\n", error);
862 		goto err3;
863 	}
864 
865 	return (0);
866 err3:
867 	bus_dmamem_free(sdm->tag, sdm->vaddr, sdm->map);
868 err2:
869 	bus_dma_tag_destroy(sdm->tag);
870 err1:
871 	sdm->vaddr = NULL;
872 
873 	return (error);
874 }
875 
876 static void
877 safexcel_dma_free_mem(struct safexcel_dma_mem *sdm)
878 {
879 	bus_dmamap_unload(sdm->tag, sdm->map);
880 	bus_dmamem_free(sdm->tag, sdm->vaddr, sdm->map);
881 	bus_dma_tag_destroy(sdm->tag);
882 }
883 
884 static void
885 safexcel_dma_free_rings(struct safexcel_softc *sc)
886 {
887 	struct safexcel_ring *ring;
888 	int i;
889 
890 	for (i = 0; i < sc->sc_config.rings; i++) {
891 		ring = &sc->sc_ring[i];
892 		safexcel_dma_free_mem(&ring->cdr.dma);
893 		safexcel_dma_free_mem(&ring->dma_atok);
894 		safexcel_dma_free_mem(&ring->rdr.dma);
895 		bus_dma_tag_destroy(ring->data_dtag);
896 		mtx_destroy(&ring->mtx);
897 	}
898 }
899 
900 static int
901 safexcel_dma_init(struct safexcel_softc *sc)
902 {
903 	struct safexcel_ring *ring;
904 	bus_size_t size;
905 	int error, i;
906 
907 	for (i = 0; i < sc->sc_config.rings; i++) {
908 		ring = &sc->sc_ring[i];
909 
910 		error = bus_dma_tag_create(
911 		    bus_get_dma_tag(sc->sc_dev),/* parent */
912 		    1, 0,			/* alignment, boundary */
913 		    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
914 		    BUS_SPACE_MAXADDR,		/* highaddr */
915 		    NULL, NULL,			/* filtfunc, filtfuncarg */
916 		    SAFEXCEL_MAX_REQUEST_SIZE,	/* maxsize */
917 		    SAFEXCEL_MAX_FRAGMENTS,	/* nsegments */
918 		    SAFEXCEL_MAX_REQUEST_SIZE,	/* maxsegsz */
919 		    BUS_DMA_COHERENT,		/* flags */
920 		    NULL, NULL,			/* lockfunc, lockfuncarg */
921 		    &ring->data_dtag);		/* dmat */
922 		if (error != 0) {
923 			device_printf(sc->sc_dev,
924 			    "bus_dma_tag_create main failed; error %d\n", error);
925 			return (error);
926 		}
927 
928 		size = sizeof(uint32_t) * sc->sc_config.cd_offset *
929 		    SAFEXCEL_RING_SIZE;
930 		error = safexcel_dma_alloc_mem(sc, &ring->cdr.dma, size);
931 		if (error != 0) {
932 			device_printf(sc->sc_dev,
933 			    "failed to allocate CDR DMA memory, error %d\n",
934 			    error);
935 			goto err;
936 		}
937 		ring->cdr.desc =
938 		    (struct safexcel_cmd_descr *)ring->cdr.dma.vaddr;
939 
940 		/* Allocate additional CDR token memory. */
941 		size = (bus_size_t)sc->sc_config.atok_offset *
942 		    SAFEXCEL_RING_SIZE;
943 		error = safexcel_dma_alloc_mem(sc, &ring->dma_atok, size);
944 		if (error != 0) {
945 			device_printf(sc->sc_dev,
946 			    "failed to allocate atoken DMA memory, error %d\n",
947 			    error);
948 			goto err;
949 		}
950 
951 		size = sizeof(uint32_t) * sc->sc_config.rd_offset *
952 		    SAFEXCEL_RING_SIZE;
953 		error = safexcel_dma_alloc_mem(sc, &ring->rdr.dma, size);
954 		if (error) {
955 			device_printf(sc->sc_dev,
956 			    "failed to allocate RDR DMA memory, error %d\n",
957 			    error);
958 			goto err;
959 		}
960 		ring->rdr.desc =
961 		    (struct safexcel_res_descr *)ring->rdr.dma.vaddr;
962 	}
963 
964 	return (0);
965 err:
966 	safexcel_dma_free_rings(sc);
967 	return (error);
968 }
969 
970 static void
971 safexcel_deinit_hw(struct safexcel_softc *sc)
972 {
973 	safexcel_hw_reset_rings(sc);
974 	safexcel_dma_free_rings(sc);
975 }
976 
977 static int
978 safexcel_init_hw(struct safexcel_softc *sc)
979 {
980 	int pe;
981 
982 	/* 23.3.7 Initialization */
983 	if (safexcel_configure(sc) != 0)
984 		return (EINVAL);
985 
986 	if (safexcel_dma_init(sc) != 0)
987 		return (ENOMEM);
988 
989 	safexcel_init_rings(sc);
990 
991 	safexcel_init_hia_bus_access(sc);
992 
993 	/* 23.3.7.2 Disable EIP-97 global Interrupts */
994 	safexcel_disable_global_interrupts(sc);
995 
996 	for (pe = 0; pe < sc->sc_config.pes; pe++) {
997 		/* 23.3.7.3 Configure Data Fetch Engine */
998 		safexcel_configure_dfe_engine(sc, pe);
999 
1000 		/* 23.3.7.4 Configure Data Store Engine */
1001 		if (safexcel_configure_dse(sc, pe)) {
1002 			safexcel_deinit_hw(sc);
1003 			return (-1);
1004 		}
1005 
1006 		/* 23.3.7.5 1. Protocol enables */
1007 		SAFEXCEL_WRITE(sc,
1008 		    SAFEXCEL_PE(sc) + SAFEXCEL_PE_EIP96_FUNCTION_EN(pe),
1009 		    0xffffffff);
1010 		SAFEXCEL_WRITE(sc,
1011 		    SAFEXCEL_PE(sc) + SAFEXCEL_PE_EIP96_FUNCTION2_EN(pe),
1012 		    0xffffffff);
1013 	}
1014 
1015 	safexcel_hw_prepare_rings(sc);
1016 
1017 	/* 23.3.7.5 Configure the Processing Engine(s). */
1018 	for (pe = 0; pe < sc->sc_config.pes; pe++)
1019 		safexcel_enable_pe_engine(sc, pe);
1020 
1021 	safexcel_hw_setup_rings(sc);
1022 
1023 	return (0);
1024 }
1025 
1026 static int
1027 safexcel_setup_dev_interrupts(struct safexcel_softc *sc)
1028 {
1029 	int i, j;
1030 
1031 	for (i = 0; i < SAFEXCEL_MAX_RINGS && sc->sc_intr[i] != NULL; i++) {
1032 		sc->sc_ih[i].sc = sc;
1033 		sc->sc_ih[i].ring = i;
1034 
1035 		if (bus_setup_intr(sc->sc_dev, sc->sc_intr[i],
1036 		    INTR_TYPE_NET | INTR_MPSAFE, NULL, safexcel_ring_intr,
1037 		    &sc->sc_ih[i], &sc->sc_ih[i].handle)) {
1038 			device_printf(sc->sc_dev,
1039 			    "couldn't setup interrupt %d\n", i);
1040 			goto err;
1041 		}
1042 	}
1043 
1044 	return (0);
1045 
1046 err:
1047 	for (j = 0; j < i; j++)
1048 		bus_teardown_intr(sc->sc_dev, sc->sc_intr[j],
1049 		    sc->sc_ih[j].handle);
1050 
1051 	return (ENXIO);
1052 }
1053 
1054 static void
1055 safexcel_teardown_dev_interrupts(struct safexcel_softc *sc)
1056 {
1057 	int i;
1058 
1059 	for (i = 0; i < SAFEXCEL_MAX_RINGS; i++)
1060 		bus_teardown_intr(sc->sc_dev, sc->sc_intr[i],
1061 		    sc->sc_ih[i].handle);
1062 }
1063 
1064 static int
1065 safexcel_alloc_dev_resources(struct safexcel_softc *sc)
1066 {
1067 	char name[16];
1068 	device_t dev;
1069 	phandle_t node;
1070 	int error, i, rid;
1071 
1072 	dev = sc->sc_dev;
1073 	node = ofw_bus_get_node(dev);
1074 
1075 	rid = 0;
1076 	sc->sc_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1077 	    RF_ACTIVE);
1078 	if (sc->sc_res == NULL) {
1079 		device_printf(dev, "couldn't allocate memory resources\n");
1080 		return (ENXIO);
1081 	}
1082 
1083 	for (i = 0; i < SAFEXCEL_MAX_RINGS; i++) {
1084 		(void)snprintf(name, sizeof(name), "ring%d", i);
1085 		error = ofw_bus_find_string_index(node, "interrupt-names", name,
1086 		    &rid);
1087 		if (error != 0)
1088 			break;
1089 
1090 		sc->sc_intr[i] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1091 		    RF_ACTIVE | RF_SHAREABLE);
1092 		if (sc->sc_intr[i] == NULL) {
1093 			error = ENXIO;
1094 			goto out;
1095 		}
1096 	}
1097 	if (i == 0) {
1098 		device_printf(dev, "couldn't allocate interrupt resources\n");
1099 		error = ENXIO;
1100 		goto out;
1101 	}
1102 
1103 	mtx_init(&sc->sc_mtx, "safexcel softc", NULL, MTX_DEF);
1104 
1105 	return (0);
1106 
1107 out:
1108 	for (i = 0; i < SAFEXCEL_MAX_RINGS && sc->sc_intr[i] != NULL; i++)
1109 		bus_release_resource(dev, SYS_RES_IRQ,
1110 		    rman_get_rid(sc->sc_intr[i]), sc->sc_intr[i]);
1111 	bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(sc->sc_res),
1112 	    sc->sc_res);
1113 	return (error);
1114 }
1115 
1116 static void
1117 safexcel_free_dev_resources(struct safexcel_softc *sc)
1118 {
1119 	int i;
1120 
1121 	mtx_destroy(&sc->sc_mtx);
1122 
1123 	for (i = 0; i < SAFEXCEL_MAX_RINGS && sc->sc_intr[i] != NULL; i++)
1124 		bus_release_resource(sc->sc_dev, SYS_RES_IRQ,
1125 		    rman_get_rid(sc->sc_intr[i]), sc->sc_intr[i]);
1126 	if (sc->sc_res != NULL)
1127 		bus_release_resource(sc->sc_dev, SYS_RES_MEMORY,
1128 		    rman_get_rid(sc->sc_res), sc->sc_res);
1129 }
1130 
1131 static int
1132 safexcel_probe(device_t dev)
1133 {
1134 	struct safexcel_softc *sc;
1135 
1136 	if (!ofw_bus_status_okay(dev))
1137 		return (ENXIO);
1138 
1139 	sc = device_get_softc(dev);
1140 	sc->sc_type = ofw_bus_search_compatible(dev, safexcel_compat)->ocd_data;
1141 	if (sc->sc_type == 0)
1142 		return (ENXIO);
1143 
1144 	device_set_desc(dev, "SafeXcel EIP-97 crypto accelerator");
1145 
1146 	return (BUS_PROBE_DEFAULT);
1147 }
1148 
1149 static int
1150 safexcel_attach(device_t dev)
1151 {
1152 	struct sysctl_ctx_list *sctx;
1153 	struct safexcel_softc *sc;
1154 	struct safexcel_request *req;
1155 	struct safexcel_ring *ring;
1156 	int i, j, ringidx;
1157 
1158 	sc = device_get_softc(dev);
1159 	sc->sc_dev = dev;
1160 	sc->sc_blocked = 0;
1161 	sc->sc_cid = -1;
1162 
1163 	if (safexcel_alloc_dev_resources(sc))
1164 		goto err;
1165 
1166 	if (safexcel_setup_dev_interrupts(sc))
1167 		goto err1;
1168 
1169 	if (safexcel_init_hw(sc))
1170 		goto err2;
1171 
1172 	for (ringidx = 0; ringidx < sc->sc_config.rings; ringidx++) {
1173 		ring = &sc->sc_ring[ringidx];
1174 
1175 		ring->cmd_data = sglist_alloc(SAFEXCEL_MAX_FRAGMENTS, M_WAITOK);
1176 		ring->res_data = sglist_alloc(SAFEXCEL_MAX_FRAGMENTS, M_WAITOK);
1177 
1178 		ring->requests = mallocarray(SAFEXCEL_REQUESTS_PER_RING,
1179 		    sizeof(struct safexcel_request), M_SAFEXCEL,
1180 		    M_WAITOK | M_ZERO);
1181 
1182 		for (i = 0; i < SAFEXCEL_REQUESTS_PER_RING; i++) {
1183 			req = &ring->requests[i];
1184 			req->sc = sc;
1185 			if (bus_dmamap_create(ring->data_dtag,
1186 			    BUS_DMA_COHERENT, &req->dmap) != 0) {
1187 				for (j = 0; j < i; j++)
1188 					bus_dmamap_destroy(ring->data_dtag,
1189 					    ring->requests[j].dmap);
1190 				goto err2;
1191 			}
1192 			if (safexcel_dma_alloc_mem(sc, &req->ctx,
1193 			    sizeof(struct safexcel_context_record)) != 0) {
1194 				for (j = 0; j < i; j++) {
1195 					bus_dmamap_destroy(ring->data_dtag,
1196 					    ring->requests[j].dmap);
1197 					safexcel_dma_free_mem(
1198 					    &ring->requests[j].ctx);
1199 				}
1200 				goto err2;
1201 			}
1202 			STAILQ_INSERT_TAIL(&ring->free_requests, req, link);
1203 		}
1204 	}
1205 
1206 	sctx = device_get_sysctl_ctx(dev);
1207 	SYSCTL_ADD_INT(sctx, SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1208 	    OID_AUTO, "debug", CTLFLAG_RWTUN, &sc->sc_debug, 0,
1209 	    "Debug message verbosity");
1210 
1211 	sc->sc_cid = crypto_get_driverid(dev, sizeof(struct safexcel_session),
1212 	    CRYPTOCAP_F_HARDWARE);
1213 	if (sc->sc_cid < 0)
1214 		goto err2;
1215 
1216 	return (0);
1217 
1218 err2:
1219 	safexcel_teardown_dev_interrupts(sc);
1220 err1:
1221 	safexcel_free_dev_resources(sc);
1222 err:
1223 	return (ENXIO);
1224 }
1225 
1226 static int
1227 safexcel_detach(device_t dev)
1228 {
1229 	struct safexcel_ring *ring;
1230 	struct safexcel_softc *sc;
1231 	int i, ringidx;
1232 
1233 	sc = device_get_softc(dev);
1234 
1235 	if (sc->sc_cid >= 0)
1236 		crypto_unregister_all(sc->sc_cid);
1237 	for (ringidx = 0; ringidx < sc->sc_config.rings; ringidx++) {
1238 		ring = &sc->sc_ring[ringidx];
1239 		for (i = 0; i < SAFEXCEL_REQUESTS_PER_RING; i++) {
1240 			bus_dmamap_destroy(ring->data_dtag,
1241 			    ring->requests[i].dmap);
1242 			safexcel_dma_free_mem(&ring->requests[i].ctx);
1243 		}
1244 		free(ring->requests, M_SAFEXCEL);
1245 		sglist_free(ring->cmd_data);
1246 		sglist_free(ring->res_data);
1247 	}
1248 	safexcel_deinit_hw(sc);
1249 	safexcel_teardown_dev_interrupts(sc);
1250 	safexcel_free_dev_resources(sc);
1251 
1252 	return (0);
1253 }
1254 
1255 /*
1256  * Populate the request's context record with pre-computed key material.
1257  */
1258 static int
1259 safexcel_set_context(struct safexcel_request *req)
1260 {
1261 	const struct crypto_session_params *csp;
1262 	struct cryptop *crp;
1263 	struct safexcel_context_record *ctx;
1264 	struct safexcel_session *sess;
1265 	uint8_t *data;
1266 	int off;
1267 
1268 	crp = req->crp;
1269 	csp = crypto_get_params(crp->crp_session);
1270 	sess = req->sess;
1271 
1272 	ctx = (struct safexcel_context_record *)req->ctx.vaddr;
1273 	data = (uint8_t *)ctx->data;
1274 	if (csp->csp_cipher_alg != 0) {
1275 		if (crp->crp_cipher_key != NULL)
1276 			memcpy(data, crp->crp_cipher_key, sess->klen);
1277 		else
1278 			memcpy(data, csp->csp_cipher_key, sess->klen);
1279 		off = sess->klen;
1280 	} else if (csp->csp_auth_alg == CRYPTO_AES_NIST_GMAC) {
1281 		if (crp->crp_auth_key != NULL)
1282 			memcpy(data, crp->crp_auth_key, sess->klen);
1283 		else
1284 			memcpy(data, csp->csp_auth_key, sess->klen);
1285 		off = sess->klen;
1286 	} else {
1287 		off = 0;
1288 	}
1289 
1290 	switch (csp->csp_cipher_alg) {
1291 	case CRYPTO_AES_NIST_GCM_16:
1292 		memcpy(data + off, sess->ghash_key, GMAC_BLOCK_LEN);
1293 		off += GMAC_BLOCK_LEN;
1294 		break;
1295 	case CRYPTO_AES_CCM_16:
1296 		memcpy(data + off, sess->xcbc_key,
1297 		    AES_BLOCK_LEN * 2 + sess->klen);
1298 		off += AES_BLOCK_LEN * 2 + sess->klen;
1299 		break;
1300 	case CRYPTO_AES_XTS:
1301 		memcpy(data + off, sess->tweak_key, sess->klen);
1302 		off += sess->klen;
1303 		break;
1304 	}
1305 
1306 	switch (csp->csp_auth_alg) {
1307 	case CRYPTO_AES_NIST_GMAC:
1308 		memcpy(data + off, sess->ghash_key, GMAC_BLOCK_LEN);
1309 		off += GMAC_BLOCK_LEN;
1310 		break;
1311 	case CRYPTO_SHA1_HMAC:
1312 	case CRYPTO_SHA2_224_HMAC:
1313 	case CRYPTO_SHA2_256_HMAC:
1314 	case CRYPTO_SHA2_384_HMAC:
1315 	case CRYPTO_SHA2_512_HMAC:
1316 		memcpy(data + off, sess->hmac_ipad, sess->statelen);
1317 		off += sess->statelen;
1318 		memcpy(data + off, sess->hmac_opad, sess->statelen);
1319 		off += sess->statelen;
1320 		break;
1321 	}
1322 
1323 	return (off);
1324 }
1325 
1326 /*
1327  * Populate fields in the first command descriptor of the chain used to encode
1328  * the specified request.  These fields indicate the algorithms used, the size
1329  * of the key material stored in the associated context record, the primitive
1330  * operations to be performed on input data, and the location of the IV if any.
1331  */
1332 static void
1333 safexcel_set_command(struct safexcel_request *req,
1334     struct safexcel_cmd_descr *cdesc)
1335 {
1336 	const struct crypto_session_params *csp;
1337 	struct cryptop *crp;
1338 	struct safexcel_session *sess;
1339 	uint32_t ctrl0, ctrl1, ctxr_len;
1340 	int alg;
1341 
1342 	crp = req->crp;
1343 	csp = crypto_get_params(crp->crp_session);
1344 	sess = req->sess;
1345 
1346 	ctrl0 = sess->alg | sess->digest | sess->hash;
1347 	ctrl1 = sess->mode;
1348 
1349 	ctxr_len = safexcel_set_context(req) / sizeof(uint32_t);
1350 	ctrl0 |= SAFEXCEL_CONTROL0_SIZE(ctxr_len);
1351 
1352 	alg = csp->csp_cipher_alg;
1353 	if (alg == 0)
1354 		alg = csp->csp_auth_alg;
1355 
1356 	switch (alg) {
1357 	case CRYPTO_AES_CCM_16:
1358 		if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
1359 			ctrl0 |= SAFEXCEL_CONTROL0_TYPE_HASH_ENCRYPT_OUT |
1360 			    SAFEXCEL_CONTROL0_KEY_EN;
1361 		} else {
1362 			ctrl0 |= SAFEXCEL_CONTROL0_TYPE_DECRYPT_HASH_IN |
1363 			    SAFEXCEL_CONTROL0_KEY_EN;
1364 		}
1365 		ctrl1 |= SAFEXCEL_CONTROL1_IV0 | SAFEXCEL_CONTROL1_IV1 |
1366 		    SAFEXCEL_CONTROL1_IV2 | SAFEXCEL_CONTROL1_IV3;
1367 		break;
1368 	case CRYPTO_AES_CBC:
1369 	case CRYPTO_AES_ICM:
1370 	case CRYPTO_AES_XTS:
1371 		if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
1372 			ctrl0 |= SAFEXCEL_CONTROL0_TYPE_CRYPTO_OUT |
1373 			    SAFEXCEL_CONTROL0_KEY_EN;
1374 			if (csp->csp_auth_alg != 0)
1375 				ctrl0 |=
1376 				    SAFEXCEL_CONTROL0_TYPE_ENCRYPT_HASH_OUT;
1377 		} else {
1378 			ctrl0 |= SAFEXCEL_CONTROL0_TYPE_CRYPTO_IN |
1379 			    SAFEXCEL_CONTROL0_KEY_EN;
1380 			if (csp->csp_auth_alg != 0)
1381 				ctrl0 |= SAFEXCEL_CONTROL0_TYPE_HASH_DECRYPT_IN;
1382 		}
1383 		break;
1384 	case CRYPTO_AES_NIST_GCM_16:
1385 	case CRYPTO_AES_NIST_GMAC:
1386 		if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op) ||
1387 		    csp->csp_auth_alg != 0) {
1388 			ctrl0 |= SAFEXCEL_CONTROL0_TYPE_CRYPTO_OUT |
1389 			    SAFEXCEL_CONTROL0_KEY_EN |
1390 			    SAFEXCEL_CONTROL0_TYPE_HASH_OUT;
1391 		} else {
1392 			ctrl0 |= SAFEXCEL_CONTROL0_TYPE_CRYPTO_IN |
1393 			    SAFEXCEL_CONTROL0_KEY_EN |
1394 			    SAFEXCEL_CONTROL0_TYPE_HASH_DECRYPT_IN;
1395 		}
1396 		if (csp->csp_cipher_alg == CRYPTO_AES_NIST_GCM_16) {
1397 			ctrl1 |= SAFEXCEL_CONTROL1_COUNTER_MODE |
1398 			    SAFEXCEL_CONTROL1_IV0 | SAFEXCEL_CONTROL1_IV1 |
1399 			    SAFEXCEL_CONTROL1_IV2;
1400 		}
1401 		break;
1402 	case CRYPTO_SHA1:
1403 	case CRYPTO_SHA2_224:
1404 	case CRYPTO_SHA2_256:
1405 	case CRYPTO_SHA2_384:
1406 	case CRYPTO_SHA2_512:
1407 		ctrl0 |= SAFEXCEL_CONTROL0_RESTART_HASH;
1408 		/* FALLTHROUGH */
1409 	case CRYPTO_SHA1_HMAC:
1410 	case CRYPTO_SHA2_224_HMAC:
1411 	case CRYPTO_SHA2_256_HMAC:
1412 	case CRYPTO_SHA2_384_HMAC:
1413 	case CRYPTO_SHA2_512_HMAC:
1414 		ctrl0 |= SAFEXCEL_CONTROL0_TYPE_HASH_OUT;
1415 		break;
1416 	}
1417 
1418 	cdesc->control_data.control0 = ctrl0;
1419 	cdesc->control_data.control1 = ctrl1;
1420 }
1421 
1422 /*
1423  * Construct a no-op instruction, used to pad input tokens.
1424  */
1425 static void
1426 safexcel_instr_nop(struct safexcel_instr **instrp)
1427 {
1428 	struct safexcel_instr *instr;
1429 
1430 	instr = *instrp;
1431 	instr->opcode = SAFEXCEL_INSTR_OPCODE_INSERT;
1432 	instr->length = (1 << 2);
1433 	instr->status = 0;
1434 	instr->instructions = 0;
1435 
1436 	*instrp = instr + 1;
1437 }
1438 
1439 /*
1440  * Insert the digest of the input payload.  This is typically the last
1441  * instruction of a sequence.
1442  */
1443 static void
1444 safexcel_instr_insert_digest(struct safexcel_instr **instrp, int len)
1445 {
1446 	struct safexcel_instr *instr;
1447 
1448 	instr = *instrp;
1449 	instr->opcode = SAFEXCEL_INSTR_OPCODE_INSERT;
1450 	instr->length = len;
1451 	instr->status = SAFEXCEL_INSTR_STATUS_LAST_HASH |
1452 	    SAFEXCEL_INSTR_STATUS_LAST_PACKET;
1453 	instr->instructions = SAFEXCEL_INSTR_DEST_OUTPUT |
1454 	    SAFEXCEL_INSTR_INSERT_HASH_DIGEST;
1455 
1456 	*instrp = instr + 1;
1457 }
1458 
1459 /*
1460  * Retrieve and verify a digest.
1461  */
1462 static void
1463 safexcel_instr_retrieve_digest(struct safexcel_instr **instrp, int len)
1464 {
1465 	struct safexcel_instr *instr;
1466 
1467 	instr = *instrp;
1468 	instr->opcode = SAFEXCEL_INSTR_OPCODE_RETRIEVE;
1469 	instr->length = len;
1470 	instr->status = SAFEXCEL_INSTR_STATUS_LAST_HASH |
1471 	    SAFEXCEL_INSTR_STATUS_LAST_PACKET;
1472 	instr->instructions = SAFEXCEL_INSTR_INSERT_HASH_DIGEST;
1473 	instr++;
1474 
1475 	instr->opcode = SAFEXCEL_INSTR_OPCODE_VERIFY_FIELDS;
1476 	instr->length = len | SAFEXCEL_INSTR_VERIFY_HASH;
1477 	instr->status = SAFEXCEL_INSTR_STATUS_LAST_HASH |
1478 	    SAFEXCEL_INSTR_STATUS_LAST_PACKET;
1479 	instr->instructions = SAFEXCEL_INSTR_VERIFY_PADDING;
1480 
1481 	*instrp = instr + 1;
1482 }
1483 
1484 static void
1485 safexcel_instr_temp_aes_block(struct safexcel_instr **instrp)
1486 {
1487 	struct safexcel_instr *instr;
1488 
1489 	instr = *instrp;
1490 	instr->opcode = SAFEXCEL_INSTR_OPCODE_INSERT_REMOVE_RESULT;
1491 	instr->length = 0;
1492 	instr->status = 0;
1493 	instr->instructions = AES_BLOCK_LEN;
1494 	instr++;
1495 
1496 	instr->opcode = SAFEXCEL_INSTR_OPCODE_INSERT;
1497 	instr->length = AES_BLOCK_LEN;
1498 	instr->status = 0;
1499 	instr->instructions = SAFEXCEL_INSTR_DEST_OUTPUT |
1500 	    SAFEXCEL_INSTR_DEST_CRYPTO;
1501 
1502 	*instrp = instr + 1;
1503 }
1504 
1505 /*
1506  * Handle a request for an unauthenticated block cipher.
1507  */
1508 static void
1509 safexcel_instr_cipher(struct safexcel_request *req,
1510     struct safexcel_instr *instr, struct safexcel_cmd_descr *cdesc)
1511 {
1512 	struct cryptop *crp;
1513 
1514 	crp = req->crp;
1515 
1516 	/* Insert the payload. */
1517 	instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION;
1518 	instr->length = crp->crp_payload_length;
1519 	instr->status = SAFEXCEL_INSTR_STATUS_LAST_PACKET |
1520 	    SAFEXCEL_INSTR_STATUS_LAST_HASH;
1521 	instr->instructions = SAFEXCEL_INSTR_INS_LAST |
1522 	    SAFEXCEL_INSTR_DEST_CRYPTO | SAFEXCEL_INSTR_DEST_OUTPUT;
1523 
1524 	cdesc->additional_cdata_size = 1;
1525 }
1526 
1527 static void
1528 safexcel_instr_eta(struct safexcel_request *req, struct safexcel_instr *instr,
1529     struct safexcel_cmd_descr *cdesc)
1530 {
1531 	const struct crypto_session_params *csp;
1532 	struct cryptop *crp;
1533 	struct safexcel_instr *start;
1534 
1535 	crp = req->crp;
1536 	csp = crypto_get_params(crp->crp_session);
1537 	start = instr;
1538 
1539 	/* Insert the AAD. */
1540 	instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION;
1541 	instr->length = crp->crp_aad_length;
1542 	instr->status = crp->crp_payload_length == 0 ?
1543 	    SAFEXCEL_INSTR_STATUS_LAST_HASH : 0;
1544 	instr->instructions = SAFEXCEL_INSTR_INS_LAST |
1545 	    SAFEXCEL_INSTR_DEST_HASH;
1546 	instr++;
1547 
1548 	/* Encrypt any data left in the request. */
1549 	if (crp->crp_payload_length > 0) {
1550 		instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION;
1551 		instr->length = crp->crp_payload_length;
1552 		instr->status = SAFEXCEL_INSTR_STATUS_LAST_HASH;
1553 		instr->instructions = SAFEXCEL_INSTR_INS_LAST |
1554 		    SAFEXCEL_INSTR_DEST_CRYPTO |
1555 		    SAFEXCEL_INSTR_DEST_HASH |
1556 		    SAFEXCEL_INSTR_DEST_OUTPUT;
1557 		instr++;
1558 	}
1559 
1560 	/*
1561 	 * Compute the digest, or extract it and place it in the output stream.
1562 	 */
1563 	if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
1564 		safexcel_instr_insert_digest(&instr, req->sess->digestlen);
1565 	else
1566 		safexcel_instr_retrieve_digest(&instr, req->sess->digestlen);
1567 	cdesc->additional_cdata_size = instr - start;
1568 }
1569 
1570 static void
1571 safexcel_instr_sha_hash(struct safexcel_request *req,
1572     struct safexcel_instr *instr)
1573 {
1574 	struct cryptop *crp;
1575 	struct safexcel_instr *start;
1576 
1577 	crp = req->crp;
1578 	start = instr;
1579 
1580 	/* Pass the input data to the hash engine. */
1581 	instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION;
1582 	instr->length = crp->crp_payload_length;
1583 	instr->status = SAFEXCEL_INSTR_STATUS_LAST_HASH;
1584 	instr->instructions = SAFEXCEL_INSTR_DEST_HASH;
1585 	instr++;
1586 
1587 	/* Insert the hash result into the output stream. */
1588 	safexcel_instr_insert_digest(&instr, req->sess->digestlen);
1589 
1590 	/* Pad the rest of the inline instruction space. */
1591 	while (instr != start + SAFEXCEL_MAX_ITOKENS)
1592 		safexcel_instr_nop(&instr);
1593 }
1594 
1595 static void
1596 safexcel_instr_ccm(struct safexcel_request *req, struct safexcel_instr *instr,
1597     struct safexcel_cmd_descr *cdesc)
1598 {
1599 	struct cryptop *crp;
1600 	struct safexcel_instr *start;
1601 	uint8_t *a0, *b0, *alenp, L;
1602 	int aalign, blen;
1603 
1604 	crp = req->crp;
1605 	start = instr;
1606 
1607 	/*
1608 	 * Construct two blocks, A0 and B0, used in encryption and
1609 	 * authentication, respectively.  A0 is embedded in the token
1610 	 * descriptor, and B0 is inserted directly into the data stream using
1611 	 * instructions below.
1612 	 *
1613 	 * OCF seems to assume a 12-byte IV, fixing L (the payload length size)
1614 	 * at 3 bytes due to the layout of B0.  This is fine since the driver
1615 	 * has a maximum of 65535 bytes anyway.
1616 	 */
1617 	blen = AES_BLOCK_LEN;
1618 	L = 3;
1619 
1620 	a0 = (uint8_t *)&cdesc->control_data.token[0];
1621 	memset(a0, 0, blen);
1622 	a0[0] = L - 1;
1623 	memcpy(&a0[1], req->iv, AES_CCM_IV_LEN);
1624 
1625 	/*
1626 	 * Insert B0 and the AAD length into the input stream.
1627 	 */
1628 	instr->opcode = SAFEXCEL_INSTR_OPCODE_INSERT;
1629 	instr->length = blen + (crp->crp_aad_length > 0 ? 2 : 0);
1630 	instr->status = 0;
1631 	instr->instructions = SAFEXCEL_INSTR_DEST_HASH |
1632 	    SAFEXCEL_INSTR_INSERT_IMMEDIATE;
1633 	instr++;
1634 
1635 	b0 = (uint8_t *)instr;
1636 	memset(b0, 0, blen);
1637 	b0[0] =
1638 	    (L - 1) | /* payload length size */
1639 	    ((CCM_CBC_MAX_DIGEST_LEN - 2) / 2) << 3 /* digest length */ |
1640 	    (crp->crp_aad_length > 0 ? 1 : 0) << 6 /* AAD present bit */;
1641 	memcpy(&b0[1], req->iv, AES_CCM_IV_LEN);
1642 	b0[14] = crp->crp_payload_length >> 8;
1643 	b0[15] = crp->crp_payload_length & 0xff;
1644 	instr += blen / sizeof(*instr);
1645 
1646 	/* Insert the AAD length and data into the input stream. */
1647 	if (crp->crp_aad_length > 0) {
1648 		alenp = (uint8_t *)instr;
1649 		alenp[0] = crp->crp_aad_length >> 8;
1650 		alenp[1] = crp->crp_aad_length & 0xff;
1651 		alenp[2] = 0;
1652 		alenp[3] = 0;
1653 		instr++;
1654 
1655 		instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION;
1656 		instr->length = crp->crp_aad_length;
1657 		instr->status = 0;
1658 		instr->instructions = SAFEXCEL_INSTR_DEST_HASH;
1659 		instr++;
1660 
1661 		/* Insert zero padding. */
1662 		aalign = (crp->crp_aad_length + 2) & (blen - 1);
1663 		instr->opcode = SAFEXCEL_INSTR_OPCODE_INSERT;
1664 		instr->length = aalign == 0 ? 0 :
1665 		    blen - ((crp->crp_aad_length + 2) & (blen - 1));
1666 		instr->status = crp->crp_payload_length == 0 ?
1667 		    SAFEXCEL_INSTR_STATUS_LAST_HASH : 0;
1668 		instr->instructions = SAFEXCEL_INSTR_DEST_HASH;
1669 		instr++;
1670 	}
1671 
1672 	safexcel_instr_temp_aes_block(&instr);
1673 
1674 	/* Insert the cipher payload into the input stream. */
1675 	if (crp->crp_payload_length > 0) {
1676 		instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION;
1677 		instr->length = crp->crp_payload_length;
1678 		instr->status = (crp->crp_payload_length & (blen - 1)) == 0 ?
1679 		    SAFEXCEL_INSTR_STATUS_LAST_HASH : 0;
1680 		instr->instructions = SAFEXCEL_INSTR_DEST_OUTPUT |
1681 		    SAFEXCEL_INSTR_DEST_CRYPTO |
1682 		    SAFEXCEL_INSTR_DEST_HASH |
1683 		    SAFEXCEL_INSTR_INS_LAST;
1684 		instr++;
1685 
1686 		/* Insert zero padding. */
1687 		if (crp->crp_payload_length & (blen - 1)) {
1688 			instr->opcode = SAFEXCEL_INSTR_OPCODE_INSERT;
1689 			instr->length = blen -
1690 			    (crp->crp_payload_length & (blen - 1));
1691 			instr->status = SAFEXCEL_INSTR_STATUS_LAST_HASH;
1692 			instr->instructions = SAFEXCEL_INSTR_DEST_HASH;
1693 			instr++;
1694 		}
1695 	}
1696 
1697 	/*
1698 	 * Compute the digest, or extract it and place it in the output stream.
1699 	 */
1700 	if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
1701 		safexcel_instr_insert_digest(&instr, req->sess->digestlen);
1702 	else
1703 		safexcel_instr_retrieve_digest(&instr, req->sess->digestlen);
1704 
1705 	cdesc->additional_cdata_size = instr - start;
1706 }
1707 
1708 static void
1709 safexcel_instr_gcm(struct safexcel_request *req, struct safexcel_instr *instr,
1710     struct safexcel_cmd_descr *cdesc)
1711 {
1712 	struct cryptop *crp;
1713 	struct safexcel_instr *start;
1714 
1715 	memcpy(cdesc->control_data.token, req->iv, AES_GCM_IV_LEN);
1716 	cdesc->control_data.token[3] = htobe32(1);
1717 
1718 	crp = req->crp;
1719 	start = instr;
1720 
1721 	/* Insert the AAD into the input stream. */
1722 	instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION;
1723 	instr->length = crp->crp_aad_length;
1724 	instr->status = crp->crp_payload_length == 0 ?
1725 	    SAFEXCEL_INSTR_STATUS_LAST_HASH : 0;
1726 	instr->instructions = SAFEXCEL_INSTR_INS_LAST |
1727 	    SAFEXCEL_INSTR_DEST_HASH;
1728 	instr++;
1729 
1730 	safexcel_instr_temp_aes_block(&instr);
1731 
1732 	/* Insert the cipher payload into the input stream. */
1733 	if (crp->crp_payload_length > 0) {
1734 		instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION;
1735 		instr->length = crp->crp_payload_length;
1736 		instr->status = SAFEXCEL_INSTR_STATUS_LAST_HASH;
1737 		instr->instructions = SAFEXCEL_INSTR_DEST_OUTPUT |
1738 		    SAFEXCEL_INSTR_DEST_CRYPTO | SAFEXCEL_INSTR_DEST_HASH |
1739 		    SAFEXCEL_INSTR_INS_LAST;
1740 		instr++;
1741 	}
1742 
1743 	/*
1744 	 * Compute the digest, or extract it and place it in the output stream.
1745 	 */
1746 	if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
1747 		safexcel_instr_insert_digest(&instr, req->sess->digestlen);
1748 	else
1749 		safexcel_instr_retrieve_digest(&instr, req->sess->digestlen);
1750 
1751 	cdesc->additional_cdata_size = instr - start;
1752 }
1753 
1754 static void
1755 safexcel_instr_gmac(struct safexcel_request *req, struct safexcel_instr *instr,
1756     struct safexcel_cmd_descr *cdesc)
1757 {
1758 	struct cryptop *crp;
1759 	struct safexcel_instr *start;
1760 
1761 	memcpy(cdesc->control_data.token, req->iv, AES_GCM_IV_LEN);
1762 	cdesc->control_data.token[3] = htobe32(1);
1763 
1764 	crp = req->crp;
1765 	start = instr;
1766 
1767 	instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION;
1768 	instr->length = crp->crp_payload_length;
1769 	instr->status = SAFEXCEL_INSTR_STATUS_LAST_HASH;
1770 	instr->instructions = SAFEXCEL_INSTR_INS_LAST |
1771 	    SAFEXCEL_INSTR_DEST_HASH;
1772 	instr++;
1773 
1774 	safexcel_instr_temp_aes_block(&instr);
1775 
1776 	safexcel_instr_insert_digest(&instr, req->sess->digestlen);
1777 
1778 	cdesc->additional_cdata_size = instr - start;
1779 }
1780 
1781 static void
1782 safexcel_set_token(struct safexcel_request *req)
1783 {
1784 	const struct crypto_session_params *csp;
1785 	struct safexcel_cmd_descr *cdesc;
1786 	struct safexcel_instr *instr;
1787 	struct safexcel_softc *sc;
1788 	int ringidx;
1789 
1790 	csp = crypto_get_params(req->crp->crp_session);
1791 	cdesc = req->cdesc;
1792 	sc = req->sc;
1793 	ringidx = req->sess->ringidx;
1794 
1795 	safexcel_set_command(req, cdesc);
1796 
1797 	/*
1798 	 * For keyless hash operations, the token instructions can be embedded
1799 	 * in the token itself.  Otherwise we use an additional token descriptor
1800 	 * and the embedded instruction space is used to store the IV.
1801 	 */
1802 	if (csp->csp_cipher_alg == 0 &&
1803 	    csp->csp_auth_alg != CRYPTO_AES_NIST_GMAC) {
1804 		instr = (void *)cdesc->control_data.token;
1805 	} else {
1806 		instr = (void *)(sc->sc_ring[ringidx].dma_atok.vaddr +
1807 		    sc->sc_config.atok_offset *
1808 		    (cdesc - sc->sc_ring[ringidx].cdr.desc));
1809 		cdesc->control_data.options |= SAFEXCEL_OPTION_4_TOKEN_IV_CMD;
1810 	}
1811 
1812 	switch (csp->csp_cipher_alg) {
1813 	case CRYPTO_AES_NIST_GCM_16:
1814 		safexcel_instr_gcm(req, instr, cdesc);
1815 		break;
1816 	case CRYPTO_AES_CCM_16:
1817 		safexcel_instr_ccm(req, instr, cdesc);
1818 		break;
1819 	case CRYPTO_AES_XTS:
1820 		memcpy(cdesc->control_data.token, req->iv, AES_XTS_IV_LEN);
1821 		memset(cdesc->control_data.token +
1822 		    AES_XTS_IV_LEN / sizeof(uint32_t), 0, AES_XTS_IV_LEN);
1823 
1824 		safexcel_instr_cipher(req, instr, cdesc);
1825 		break;
1826 	case CRYPTO_AES_CBC:
1827 	case CRYPTO_AES_ICM:
1828 		memcpy(cdesc->control_data.token, req->iv, AES_BLOCK_LEN);
1829 		if (csp->csp_auth_alg != 0)
1830 			safexcel_instr_eta(req, instr, cdesc);
1831 		else
1832 			safexcel_instr_cipher(req, instr, cdesc);
1833 		break;
1834 	default:
1835 		switch (csp->csp_auth_alg) {
1836 		case CRYPTO_SHA1:
1837 		case CRYPTO_SHA1_HMAC:
1838 		case CRYPTO_SHA2_224:
1839 		case CRYPTO_SHA2_224_HMAC:
1840 		case CRYPTO_SHA2_256:
1841 		case CRYPTO_SHA2_256_HMAC:
1842 		case CRYPTO_SHA2_384:
1843 		case CRYPTO_SHA2_384_HMAC:
1844 		case CRYPTO_SHA2_512:
1845 		case CRYPTO_SHA2_512_HMAC:
1846 			safexcel_instr_sha_hash(req, instr);
1847 			break;
1848 		case CRYPTO_AES_NIST_GMAC:
1849 			safexcel_instr_gmac(req, instr, cdesc);
1850 			break;
1851 		default:
1852 			panic("unhandled auth request %d", csp->csp_auth_alg);
1853 		}
1854 		break;
1855 	}
1856 }
1857 
1858 static struct safexcel_res_descr *
1859 safexcel_res_descr_add(struct safexcel_ring *ring, bool first, bool last,
1860     bus_addr_t data, uint32_t len)
1861 {
1862 	struct safexcel_res_descr *rdesc;
1863 	struct safexcel_res_descr_ring *rring;
1864 
1865 	mtx_assert(&ring->mtx, MA_OWNED);
1866 
1867 	rring = &ring->rdr;
1868 	if ((rring->write + 1) % SAFEXCEL_RING_SIZE == rring->read)
1869 		return (NULL);
1870 
1871 	rdesc = &rring->desc[rring->write];
1872 	rring->write = (rring->write + 1) % SAFEXCEL_RING_SIZE;
1873 
1874 	rdesc->particle_size = len;
1875 	rdesc->rsvd0 = 0;
1876 	rdesc->descriptor_overflow = 0;
1877 	rdesc->buffer_overflow = 0;
1878 	rdesc->last_seg = last;
1879 	rdesc->first_seg = first;
1880 	rdesc->result_size =
1881 	    sizeof(struct safexcel_res_data) / sizeof(uint32_t);
1882 	rdesc->rsvd1 = 0;
1883 	rdesc->data_lo = SAFEXCEL_ADDR_LO(data);
1884 	rdesc->data_hi = SAFEXCEL_ADDR_HI(data);
1885 
1886 	if (first) {
1887 		rdesc->result_data.packet_length = 0;
1888 		rdesc->result_data.error_code = 0;
1889 	}
1890 
1891 	return (rdesc);
1892 }
1893 
1894 static struct safexcel_cmd_descr *
1895 safexcel_cmd_descr_add(struct safexcel_ring *ring, bool first, bool last,
1896     bus_addr_t data, uint32_t seglen, uint32_t reqlen, bus_addr_t context)
1897 {
1898 	struct safexcel_cmd_descr *cdesc;
1899 	struct safexcel_cmd_descr_ring *cring;
1900 
1901 	KASSERT(reqlen <= SAFEXCEL_MAX_REQUEST_SIZE,
1902 	    ("%s: request length %u too long", __func__, reqlen));
1903 	mtx_assert(&ring->mtx, MA_OWNED);
1904 
1905 	cring = &ring->cdr;
1906 	if ((cring->write + 1) % SAFEXCEL_RING_SIZE == cring->read)
1907 		return (NULL);
1908 
1909 	cdesc = &cring->desc[cring->write];
1910 	cring->write = (cring->write + 1) % SAFEXCEL_RING_SIZE;
1911 
1912 	cdesc->particle_size = seglen;
1913 	cdesc->rsvd0 = 0;
1914 	cdesc->last_seg = last;
1915 	cdesc->first_seg = first;
1916 	cdesc->additional_cdata_size = 0;
1917 	cdesc->rsvd1 = 0;
1918 	cdesc->data_lo = SAFEXCEL_ADDR_LO(data);
1919 	cdesc->data_hi = SAFEXCEL_ADDR_HI(data);
1920 	if (first) {
1921 		cdesc->control_data.packet_length = reqlen;
1922 		cdesc->control_data.options = SAFEXCEL_OPTION_IP |
1923 		    SAFEXCEL_OPTION_CP | SAFEXCEL_OPTION_CTX_CTRL_IN_CMD |
1924 		    SAFEXCEL_OPTION_RC_AUTO;
1925 		cdesc->control_data.type = SAFEXCEL_TOKEN_TYPE_BYPASS;
1926 		cdesc->control_data.context_lo = SAFEXCEL_ADDR_LO(context) |
1927 		    SAFEXCEL_CONTEXT_SMALL;
1928 		cdesc->control_data.context_hi = SAFEXCEL_ADDR_HI(context);
1929 	}
1930 
1931 	return (cdesc);
1932 }
1933 
1934 static void
1935 safexcel_cmd_descr_rollback(struct safexcel_ring *ring, int count)
1936 {
1937 	struct safexcel_cmd_descr_ring *cring;
1938 
1939 	mtx_assert(&ring->mtx, MA_OWNED);
1940 
1941 	cring = &ring->cdr;
1942 	cring->write -= count;
1943 	if (cring->write < 0)
1944 		cring->write += SAFEXCEL_RING_SIZE;
1945 }
1946 
1947 static void
1948 safexcel_res_descr_rollback(struct safexcel_ring *ring, int count)
1949 {
1950 	struct safexcel_res_descr_ring *rring;
1951 
1952 	mtx_assert(&ring->mtx, MA_OWNED);
1953 
1954 	rring = &ring->rdr;
1955 	rring->write -= count;
1956 	if (rring->write < 0)
1957 		rring->write += SAFEXCEL_RING_SIZE;
1958 }
1959 
1960 static void
1961 safexcel_append_segs(bus_dma_segment_t *segs, int nseg, struct sglist *sg,
1962     int start, int len)
1963 {
1964 	bus_dma_segment_t *seg;
1965 	size_t seglen;
1966 	int error, i;
1967 
1968 	for (i = 0; i < nseg && len > 0; i++) {
1969 		seg = &segs[i];
1970 
1971 		if (seg->ds_len <= start) {
1972 			start -= seg->ds_len;
1973 			continue;
1974 		}
1975 
1976 		seglen = MIN(len, seg->ds_len - start);
1977 		error = sglist_append_phys(sg, seg->ds_addr + start, seglen);
1978 		if (error != 0)
1979 			panic("%s: ran out of segments: %d", __func__, error);
1980 		len -= seglen;
1981 		start = 0;
1982 	}
1983 }
1984 
1985 static void
1986 safexcel_create_chain_cb(void *arg, bus_dma_segment_t *segs, int nseg,
1987     int error)
1988 {
1989 	const struct crypto_session_params *csp;
1990 	struct cryptop *crp;
1991 	struct safexcel_cmd_descr *cdesc;
1992 	struct safexcel_request *req;
1993 	struct safexcel_ring *ring;
1994 	struct safexcel_session *sess;
1995 	struct sglist *sg;
1996 	size_t inlen;
1997 	int i;
1998 	bool first, last;
1999 
2000 	req = arg;
2001 	if (error != 0) {
2002 		req->error = error;
2003 		return;
2004 	}
2005 
2006 	crp = req->crp;
2007 	csp = crypto_get_params(crp->crp_session);
2008 	sess = req->sess;
2009 	ring = &req->sc->sc_ring[sess->ringidx];
2010 
2011 	mtx_assert(&ring->mtx, MA_OWNED);
2012 
2013 	/*
2014 	 * Set up descriptors for input and output data.
2015 	 *
2016 	 * The processing engine programs require that any AAD comes first,
2017 	 * followed by the cipher plaintext, followed by the digest.  Some
2018 	 * consumers place the digest first in the input buffer, in which case
2019 	 * we have to create an extra descriptor.
2020 	 *
2021 	 * As an optimization, unmodified data is not passed to the output
2022 	 * stream.
2023 	 */
2024 	sglist_reset(ring->cmd_data);
2025 	sglist_reset(ring->res_data);
2026 	if (crp->crp_aad_length != 0) {
2027 		safexcel_append_segs(segs, nseg, ring->cmd_data,
2028 		    crp->crp_aad_start, crp->crp_aad_length);
2029 	}
2030 	safexcel_append_segs(segs, nseg, ring->cmd_data,
2031 	    crp->crp_payload_start, crp->crp_payload_length);
2032 	if (csp->csp_cipher_alg != 0) {
2033 		safexcel_append_segs(segs, nseg, ring->res_data,
2034 		    crp->crp_payload_start, crp->crp_payload_length);
2035 	}
2036 	if (sess->digestlen > 0) {
2037 		if ((crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) != 0) {
2038 			safexcel_append_segs(segs, nseg, ring->cmd_data,
2039 			    crp->crp_digest_start, sess->digestlen);
2040 		} else {
2041 			safexcel_append_segs(segs, nseg, ring->res_data,
2042 			    crp->crp_digest_start, sess->digestlen);
2043 		}
2044 	}
2045 
2046 	sg = ring->cmd_data;
2047 	if (sg->sg_nseg == 0) {
2048 		/*
2049 		 * Fake a segment for the command descriptor if the input has
2050 		 * length zero.  The EIP97 apparently does not handle
2051 		 * zero-length packets properly since subsequent requests return
2052 		 * bogus errors, so provide a dummy segment using the context
2053 		 * descriptor.
2054 		 */
2055 		(void)sglist_append_phys(sg, req->ctx.paddr, 1);
2056 	}
2057 	for (i = 0, inlen = 0; i < sg->sg_nseg; i++)
2058 		inlen += sg->sg_segs[i].ss_len;
2059 	for (i = 0; i < sg->sg_nseg; i++) {
2060 		first = i == 0;
2061 		last = i == sg->sg_nseg - 1;
2062 
2063 		cdesc = safexcel_cmd_descr_add(ring, first, last,
2064 		    sg->sg_segs[i].ss_paddr, sg->sg_segs[i].ss_len,
2065 		    (uint32_t)inlen, req->ctx.paddr);
2066 		if (cdesc == NULL) {
2067 			safexcel_cmd_descr_rollback(ring, i);
2068 			req->error = EAGAIN;
2069 			return;
2070 		}
2071 		if (i == 0)
2072 			req->cdesc = cdesc;
2073 	}
2074 	req->cdescs = sg->sg_nseg;
2075 
2076 	sg = ring->res_data;
2077 	if (sg->sg_nseg == 0) {
2078 		/*
2079 		 * We need a result descriptor even if the output stream will be
2080 		 * empty, for example when verifying an AAD digest.
2081 		 */
2082 		sg->sg_segs[0].ss_paddr = 0;
2083 		sg->sg_segs[0].ss_len = 0;
2084 		sg->sg_nseg = 1;
2085 	}
2086 	for (i = 0; i < sg->sg_nseg; i++) {
2087 		first = i == 0;
2088 		last = i == sg->sg_nseg - 1;
2089 
2090 		if (safexcel_res_descr_add(ring, first, last,
2091 		    sg->sg_segs[i].ss_paddr, sg->sg_segs[i].ss_len) == NULL) {
2092 			safexcel_cmd_descr_rollback(ring,
2093 			    ring->cmd_data->sg_nseg);
2094 			safexcel_res_descr_rollback(ring, i);
2095 			req->error = EAGAIN;
2096 			return;
2097 		}
2098 	}
2099 	req->rdescs = sg->sg_nseg;
2100 }
2101 
2102 static int
2103 safexcel_create_chain(struct safexcel_ring *ring, struct safexcel_request *req)
2104 {
2105 	int error;
2106 
2107 	req->error = 0;
2108 	req->cdescs = req->rdescs = 0;
2109 
2110 	error = bus_dmamap_load_crp(ring->data_dtag, req->dmap, req->crp,
2111 	    safexcel_create_chain_cb, req, BUS_DMA_NOWAIT);
2112 	if (error == 0)
2113 		req->dmap_loaded = true;
2114 
2115 	if (req->error != 0)
2116 		error = req->error;
2117 
2118 	return (error);
2119 }
2120 
2121 static bool
2122 safexcel_probe_cipher(const struct crypto_session_params *csp)
2123 {
2124 	switch (csp->csp_cipher_alg) {
2125 	case CRYPTO_AES_CBC:
2126 	case CRYPTO_AES_ICM:
2127 		if (csp->csp_ivlen != AES_BLOCK_LEN)
2128 			return (false);
2129 		break;
2130 	case CRYPTO_AES_XTS:
2131 		if (csp->csp_ivlen != AES_XTS_IV_LEN)
2132 			return (false);
2133 		break;
2134 	default:
2135 		return (false);
2136 	}
2137 
2138 	return (true);
2139 }
2140 
2141 /*
2142  * Determine whether the driver can implement a session with the requested
2143  * parameters.
2144  */
2145 static int
2146 safexcel_probesession(device_t dev, const struct crypto_session_params *csp)
2147 {
2148 	switch (csp->csp_mode) {
2149 	case CSP_MODE_CIPHER:
2150 		if (!safexcel_probe_cipher(csp))
2151 			return (EINVAL);
2152 		break;
2153 	case CSP_MODE_DIGEST:
2154 		switch (csp->csp_auth_alg) {
2155 		case CRYPTO_AES_NIST_GMAC:
2156 			if (csp->csp_ivlen != AES_GCM_IV_LEN)
2157 				return (EINVAL);
2158 			break;
2159 		case CRYPTO_SHA1:
2160 		case CRYPTO_SHA1_HMAC:
2161 		case CRYPTO_SHA2_224:
2162 		case CRYPTO_SHA2_224_HMAC:
2163 		case CRYPTO_SHA2_256:
2164 		case CRYPTO_SHA2_256_HMAC:
2165 		case CRYPTO_SHA2_384:
2166 		case CRYPTO_SHA2_384_HMAC:
2167 		case CRYPTO_SHA2_512:
2168 		case CRYPTO_SHA2_512_HMAC:
2169 			break;
2170 		default:
2171 			return (EINVAL);
2172 		}
2173 		break;
2174 	case CSP_MODE_AEAD:
2175 		switch (csp->csp_cipher_alg) {
2176 		case CRYPTO_AES_NIST_GCM_16:
2177 			if (csp->csp_ivlen != AES_GCM_IV_LEN)
2178 				return (EINVAL);
2179 			break;
2180 		case CRYPTO_AES_CCM_16:
2181 			if (csp->csp_ivlen != AES_CCM_IV_LEN)
2182 				return (EINVAL);
2183 			break;
2184 		default:
2185 			return (EINVAL);
2186 		}
2187 		break;
2188 	case CSP_MODE_ETA:
2189 		if (!safexcel_probe_cipher(csp))
2190 			return (EINVAL);
2191 		switch (csp->csp_cipher_alg) {
2192 		case CRYPTO_AES_CBC:
2193 		case CRYPTO_AES_ICM:
2194 			/*
2195 			 * The EIP-97 does not support combining AES-XTS with
2196 			 * hash operations.
2197 			 */
2198 			if (csp->csp_auth_alg != CRYPTO_SHA1_HMAC &&
2199 			    csp->csp_auth_alg != CRYPTO_SHA2_224_HMAC &&
2200 			    csp->csp_auth_alg != CRYPTO_SHA2_256_HMAC &&
2201 			    csp->csp_auth_alg != CRYPTO_SHA2_384_HMAC &&
2202 			    csp->csp_auth_alg != CRYPTO_SHA2_512_HMAC)
2203 				return (EINVAL);
2204 			break;
2205 		default:
2206 			return (EINVAL);
2207 		}
2208 		break;
2209 	default:
2210 		return (EINVAL);
2211 	}
2212 
2213 	return (CRYPTODEV_PROBE_HARDWARE);
2214 }
2215 
2216 /*
2217  * Pre-compute the hash key used in GHASH, which is a block of zeroes encrypted
2218  * using the cipher key.
2219  */
2220 static void
2221 safexcel_setkey_ghash(struct safexcel_session *sess, const uint8_t *key,
2222     int klen)
2223 {
2224 	uint32_t ks[4 * (RIJNDAEL_MAXNR + 1)];
2225 	uint8_t zeros[AES_BLOCK_LEN];
2226 	int i, rounds;
2227 
2228 	memset(zeros, 0, sizeof(zeros));
2229 
2230 	rounds = rijndaelKeySetupEnc(ks, key, klen * NBBY);
2231 	rijndaelEncrypt(ks, rounds, zeros, (uint8_t *)sess->ghash_key);
2232 	for (i = 0; i < GMAC_BLOCK_LEN / sizeof(uint32_t); i++)
2233 		sess->ghash_key[i] = htobe32(sess->ghash_key[i]);
2234 
2235 	explicit_bzero(ks, sizeof(ks));
2236 }
2237 
2238 /*
2239  * Pre-compute the combined CBC-MAC key, which consists of three keys K1, K2, K3
2240  * in the hardware implementation.  K1 is the cipher key and comes last in the
2241  * buffer since K2 and K3 have a fixed size of AES_BLOCK_LEN.  For now XCBC-MAC
2242  * is not implemented so K2 and K3 are fixed.
2243  */
2244 static void
2245 safexcel_setkey_xcbcmac(struct safexcel_session *sess, const uint8_t *key,
2246     int klen)
2247 {
2248 	int i, off;
2249 
2250 	memset(sess->xcbc_key, 0, sizeof(sess->xcbc_key));
2251 	off = 2 * AES_BLOCK_LEN / sizeof(uint32_t);
2252 	for (i = 0; i < klen / sizeof(uint32_t); i++, key += 4)
2253 		sess->xcbc_key[i + off] = htobe32(le32dec(key));
2254 }
2255 
2256 static void
2257 safexcel_setkey_hmac_digest(struct auth_hash *ahash, union authctx *ctx,
2258     char *buf)
2259 {
2260 	int hashwords, i;
2261 
2262 	switch (ahash->type) {
2263 	case CRYPTO_SHA1_HMAC:
2264 		hashwords = ahash->hashsize / sizeof(uint32_t);
2265 		for (i = 0; i < hashwords; i++)
2266 			((uint32_t *)buf)[i] = htobe32(ctx->sha1ctx.h.b32[i]);
2267 		break;
2268 	case CRYPTO_SHA2_224_HMAC:
2269 		hashwords = auth_hash_hmac_sha2_256.hashsize / sizeof(uint32_t);
2270 		for (i = 0; i < hashwords; i++)
2271 			((uint32_t *)buf)[i] = htobe32(ctx->sha224ctx.state[i]);
2272 		break;
2273 	case CRYPTO_SHA2_256_HMAC:
2274 		hashwords = ahash->hashsize / sizeof(uint32_t);
2275 		for (i = 0; i < hashwords; i++)
2276 			((uint32_t *)buf)[i] = htobe32(ctx->sha256ctx.state[i]);
2277 		break;
2278 	case CRYPTO_SHA2_384_HMAC:
2279 		hashwords = auth_hash_hmac_sha2_512.hashsize / sizeof(uint64_t);
2280 		for (i = 0; i < hashwords; i++)
2281 			((uint64_t *)buf)[i] = htobe64(ctx->sha384ctx.state[i]);
2282 		break;
2283 	case CRYPTO_SHA2_512_HMAC:
2284 		hashwords = ahash->hashsize / sizeof(uint64_t);
2285 		for (i = 0; i < hashwords; i++)
2286 			((uint64_t *)buf)[i] = htobe64(ctx->sha512ctx.state[i]);
2287 		break;
2288 	}
2289 }
2290 
2291 /*
2292  * Pre-compute the inner and outer digests used in the HMAC algorithm.
2293  */
2294 static void
2295 safexcel_setkey_hmac(const struct crypto_session_params *csp,
2296     struct safexcel_session *sess, const uint8_t *key, int klen)
2297 {
2298 	union authctx ctx;
2299 	struct auth_hash *ahash;
2300 
2301 	ahash = crypto_auth_hash(csp);
2302 	hmac_init_ipad(ahash, key, klen, &ctx);
2303 	safexcel_setkey_hmac_digest(ahash, &ctx, sess->hmac_ipad);
2304 	hmac_init_opad(ahash, key, klen, &ctx);
2305 	safexcel_setkey_hmac_digest(ahash, &ctx, sess->hmac_opad);
2306 	explicit_bzero(&ctx, ahash->ctxsize);
2307 }
2308 
2309 static void
2310 safexcel_setkey_xts(struct safexcel_session *sess, const uint8_t *key, int klen)
2311 {
2312 	memcpy(sess->tweak_key, key + klen / 2, klen / 2);
2313 }
2314 
2315 static void
2316 safexcel_setkey(struct safexcel_session *sess,
2317     const struct crypto_session_params *csp, struct cryptop *crp)
2318 {
2319 	const uint8_t *akey, *ckey;
2320 	int aklen, cklen;
2321 
2322 	aklen = csp->csp_auth_klen;
2323 	cklen = csp->csp_cipher_klen;
2324 	akey = ckey = NULL;
2325 	if (crp != NULL) {
2326 		akey = crp->crp_auth_key;
2327 		ckey = crp->crp_cipher_key;
2328 	}
2329 	if (akey == NULL)
2330 		akey = csp->csp_auth_key;
2331 	if (ckey == NULL)
2332 		ckey = csp->csp_cipher_key;
2333 
2334 	sess->klen = cklen;
2335 	switch (csp->csp_cipher_alg) {
2336 	case CRYPTO_AES_NIST_GCM_16:
2337 		safexcel_setkey_ghash(sess, ckey, cklen);
2338 		break;
2339 	case CRYPTO_AES_CCM_16:
2340 		safexcel_setkey_xcbcmac(sess, ckey, cklen);
2341 		break;
2342 	case CRYPTO_AES_XTS:
2343 		safexcel_setkey_xts(sess, ckey, cklen);
2344 		sess->klen /= 2;
2345 		break;
2346 	}
2347 
2348 	switch (csp->csp_auth_alg) {
2349 	case CRYPTO_SHA1_HMAC:
2350 	case CRYPTO_SHA2_224_HMAC:
2351 	case CRYPTO_SHA2_256_HMAC:
2352 	case CRYPTO_SHA2_384_HMAC:
2353 	case CRYPTO_SHA2_512_HMAC:
2354 		safexcel_setkey_hmac(csp, sess, akey, aklen);
2355 		break;
2356 	case CRYPTO_AES_NIST_GMAC:
2357 		sess->klen = aklen;
2358 		safexcel_setkey_ghash(sess, akey, aklen);
2359 		break;
2360 	}
2361 }
2362 
2363 static uint32_t
2364 safexcel_aes_algid(int keylen)
2365 {
2366 	switch (keylen) {
2367 	case 16:
2368 		return (SAFEXCEL_CONTROL0_CRYPTO_ALG_AES128);
2369 	case 24:
2370 		return (SAFEXCEL_CONTROL0_CRYPTO_ALG_AES192);
2371 	case 32:
2372 		return (SAFEXCEL_CONTROL0_CRYPTO_ALG_AES256);
2373 	default:
2374 		panic("invalid AES key length %d", keylen);
2375 	}
2376 }
2377 
2378 static uint32_t
2379 safexcel_aes_ccm_hashid(int keylen)
2380 {
2381 	switch (keylen) {
2382 	case 16:
2383 		return (SAFEXCEL_CONTROL0_HASH_ALG_XCBC128);
2384 	case 24:
2385 		return (SAFEXCEL_CONTROL0_HASH_ALG_XCBC192);
2386 	case 32:
2387 		return (SAFEXCEL_CONTROL0_HASH_ALG_XCBC256);
2388 	default:
2389 		panic("invalid AES key length %d", keylen);
2390 	}
2391 }
2392 
2393 static uint32_t
2394 safexcel_sha_hashid(int alg)
2395 {
2396 	switch (alg) {
2397 	case CRYPTO_SHA1:
2398 	case CRYPTO_SHA1_HMAC:
2399 		return (SAFEXCEL_CONTROL0_HASH_ALG_SHA1);
2400 	case CRYPTO_SHA2_224:
2401 	case CRYPTO_SHA2_224_HMAC:
2402 		return (SAFEXCEL_CONTROL0_HASH_ALG_SHA224);
2403 	case CRYPTO_SHA2_256:
2404 	case CRYPTO_SHA2_256_HMAC:
2405 		return (SAFEXCEL_CONTROL0_HASH_ALG_SHA256);
2406 	case CRYPTO_SHA2_384:
2407 	case CRYPTO_SHA2_384_HMAC:
2408 		return (SAFEXCEL_CONTROL0_HASH_ALG_SHA384);
2409 	case CRYPTO_SHA2_512:
2410 	case CRYPTO_SHA2_512_HMAC:
2411 		return (SAFEXCEL_CONTROL0_HASH_ALG_SHA512);
2412 	default:
2413 		__assert_unreachable();
2414 	}
2415 }
2416 
2417 static int
2418 safexcel_sha_hashlen(int alg)
2419 {
2420 	switch (alg) {
2421 	case CRYPTO_SHA1:
2422 	case CRYPTO_SHA1_HMAC:
2423 		return (SHA1_HASH_LEN);
2424 	case CRYPTO_SHA2_224:
2425 	case CRYPTO_SHA2_224_HMAC:
2426 		return (SHA2_224_HASH_LEN);
2427 	case CRYPTO_SHA2_256:
2428 	case CRYPTO_SHA2_256_HMAC:
2429 		return (SHA2_256_HASH_LEN);
2430 	case CRYPTO_SHA2_384:
2431 	case CRYPTO_SHA2_384_HMAC:
2432 		return (SHA2_384_HASH_LEN);
2433 	case CRYPTO_SHA2_512:
2434 	case CRYPTO_SHA2_512_HMAC:
2435 		return (SHA2_512_HASH_LEN);
2436 	default:
2437 		__assert_unreachable();
2438 	}
2439 }
2440 
2441 static int
2442 safexcel_sha_statelen(int alg)
2443 {
2444 	switch (alg) {
2445 	case CRYPTO_SHA1:
2446 	case CRYPTO_SHA1_HMAC:
2447 		return (SHA1_HASH_LEN);
2448 	case CRYPTO_SHA2_224:
2449 	case CRYPTO_SHA2_224_HMAC:
2450 	case CRYPTO_SHA2_256:
2451 	case CRYPTO_SHA2_256_HMAC:
2452 		return (SHA2_256_HASH_LEN);
2453 	case CRYPTO_SHA2_384:
2454 	case CRYPTO_SHA2_384_HMAC:
2455 	case CRYPTO_SHA2_512:
2456 	case CRYPTO_SHA2_512_HMAC:
2457 		return (SHA2_512_HASH_LEN);
2458 	default:
2459 		__assert_unreachable();
2460 	}
2461 }
2462 
2463 static int
2464 safexcel_newsession(device_t dev, crypto_session_t cses,
2465     const struct crypto_session_params *csp)
2466 {
2467 	struct safexcel_session *sess;
2468 	struct safexcel_softc *sc;
2469 
2470 	sc = device_get_softc(dev);
2471 	sess = crypto_get_driver_session(cses);
2472 
2473 	switch (csp->csp_auth_alg) {
2474 	case CRYPTO_SHA1:
2475 	case CRYPTO_SHA2_224:
2476 	case CRYPTO_SHA2_256:
2477 	case CRYPTO_SHA2_384:
2478 	case CRYPTO_SHA2_512:
2479 		sess->digest = SAFEXCEL_CONTROL0_DIGEST_PRECOMPUTED;
2480 		sess->hash = safexcel_sha_hashid(csp->csp_auth_alg);
2481 		sess->digestlen = safexcel_sha_hashlen(csp->csp_auth_alg);
2482 		sess->statelen = safexcel_sha_statelen(csp->csp_auth_alg);
2483 		break;
2484 	case CRYPTO_SHA1_HMAC:
2485 	case CRYPTO_SHA2_224_HMAC:
2486 	case CRYPTO_SHA2_256_HMAC:
2487 	case CRYPTO_SHA2_384_HMAC:
2488 	case CRYPTO_SHA2_512_HMAC:
2489 		sess->digest = SAFEXCEL_CONTROL0_DIGEST_HMAC;
2490 		sess->hash = safexcel_sha_hashid(csp->csp_auth_alg);
2491 		sess->digestlen = safexcel_sha_hashlen(csp->csp_auth_alg);
2492 		sess->statelen = safexcel_sha_statelen(csp->csp_auth_alg);
2493 		break;
2494 	case CRYPTO_AES_NIST_GMAC:
2495 		sess->digest = SAFEXCEL_CONTROL0_DIGEST_GMAC;
2496 		sess->digestlen = GMAC_DIGEST_LEN;
2497 		sess->hash = SAFEXCEL_CONTROL0_HASH_ALG_GHASH;
2498 		sess->alg = safexcel_aes_algid(csp->csp_auth_klen);
2499 		sess->mode = SAFEXCEL_CONTROL1_CRYPTO_MODE_GCM;
2500 		break;
2501 	}
2502 
2503 	switch (csp->csp_cipher_alg) {
2504 	case CRYPTO_AES_NIST_GCM_16:
2505 		sess->digest = SAFEXCEL_CONTROL0_DIGEST_GMAC;
2506 		sess->digestlen = GMAC_DIGEST_LEN;
2507 		sess->hash = SAFEXCEL_CONTROL0_HASH_ALG_GHASH;
2508 		sess->alg = safexcel_aes_algid(csp->csp_cipher_klen);
2509 		sess->mode = SAFEXCEL_CONTROL1_CRYPTO_MODE_GCM;
2510 		break;
2511 	case CRYPTO_AES_CCM_16:
2512 		sess->hash = safexcel_aes_ccm_hashid(csp->csp_cipher_klen);
2513 		sess->digest = SAFEXCEL_CONTROL0_DIGEST_CCM;
2514 		sess->digestlen = CCM_CBC_MAX_DIGEST_LEN;
2515 		sess->alg = safexcel_aes_algid(csp->csp_cipher_klen);
2516 		sess->mode = SAFEXCEL_CONTROL1_CRYPTO_MODE_CCM;
2517 		break;
2518 	case CRYPTO_AES_CBC:
2519 		sess->alg = safexcel_aes_algid(csp->csp_cipher_klen);
2520 		sess->mode = SAFEXCEL_CONTROL1_CRYPTO_MODE_CBC;
2521 		break;
2522 	case CRYPTO_AES_ICM:
2523 		sess->alg = safexcel_aes_algid(csp->csp_cipher_klen);
2524 		sess->mode = SAFEXCEL_CONTROL1_CRYPTO_MODE_CTR;
2525 		break;
2526 	case CRYPTO_AES_XTS:
2527 		sess->alg = safexcel_aes_algid(csp->csp_cipher_klen / 2);
2528 		sess->mode = SAFEXCEL_CONTROL1_CRYPTO_MODE_XTS;
2529 		break;
2530 	}
2531 
2532 	if (csp->csp_auth_mlen != 0)
2533 		sess->digestlen = csp->csp_auth_mlen;
2534 
2535 	safexcel_setkey(sess, csp, NULL);
2536 
2537 	/* Bind each session to a fixed ring to minimize lock contention. */
2538 	sess->ringidx = atomic_fetchadd_int(&sc->sc_ringidx, 1);
2539 	sess->ringidx %= sc->sc_config.rings;
2540 
2541 	return (0);
2542 }
2543 
2544 static int
2545 safexcel_process(device_t dev, struct cryptop *crp, int hint)
2546 {
2547 	const struct crypto_session_params *csp;
2548 	struct safexcel_request *req;
2549 	struct safexcel_ring *ring;
2550 	struct safexcel_session *sess;
2551 	struct safexcel_softc *sc;
2552 	int error;
2553 
2554 	sc = device_get_softc(dev);
2555 	sess = crypto_get_driver_session(crp->crp_session);
2556 	csp = crypto_get_params(crp->crp_session);
2557 
2558 	if (__predict_false(crypto_buffer_len(&crp->crp_buf) >
2559 	    SAFEXCEL_MAX_REQUEST_SIZE)) {
2560 		crp->crp_etype = E2BIG;
2561 		crypto_done(crp);
2562 		return (0);
2563 	}
2564 
2565 	if (crp->crp_cipher_key != NULL || crp->crp_auth_key != NULL)
2566 		safexcel_setkey(sess, csp, crp);
2567 
2568 	ring = &sc->sc_ring[sess->ringidx];
2569 	mtx_lock(&ring->mtx);
2570 	req = safexcel_alloc_request(sc, ring);
2571         if (__predict_false(req == NULL)) {
2572 		mtx_lock(&sc->sc_mtx);
2573 		mtx_unlock(&ring->mtx);
2574 		sc->sc_blocked = CRYPTO_SYMQ;
2575 		mtx_unlock(&sc->sc_mtx);
2576 		return (ERESTART);
2577 	}
2578 
2579 	req->crp = crp;
2580 	req->sess = sess;
2581 
2582 	crypto_read_iv(crp, req->iv);
2583 
2584 	error = safexcel_create_chain(ring, req);
2585 	if (__predict_false(error != 0)) {
2586 		safexcel_free_request(ring, req);
2587 		mtx_unlock(&ring->mtx);
2588 		crp->crp_etype = error;
2589 		crypto_done(crp);
2590 		return (0);
2591 	}
2592 
2593 	safexcel_set_token(req);
2594 
2595 	bus_dmamap_sync(ring->data_dtag, req->dmap,
2596 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2597 	bus_dmamap_sync(req->ctx.tag, req->ctx.map,
2598 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2599 	bus_dmamap_sync(ring->cdr.dma.tag, ring->cdr.dma.map,
2600 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2601 	bus_dmamap_sync(ring->dma_atok.tag, ring->dma_atok.map,
2602 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2603 	bus_dmamap_sync(ring->rdr.dma.tag, ring->rdr.dma.map,
2604 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2605 
2606 	safexcel_enqueue_request(sc, ring, req);
2607 
2608 	if ((hint & CRYPTO_HINT_MORE) == 0)
2609 		safexcel_execute(sc, ring, req);
2610 	mtx_unlock(&ring->mtx);
2611 
2612 	return (0);
2613 }
2614 
2615 static device_method_t safexcel_methods[] = {
2616 	/* Device interface */
2617 	DEVMETHOD(device_probe,		safexcel_probe),
2618 	DEVMETHOD(device_attach,	safexcel_attach),
2619 	DEVMETHOD(device_detach,	safexcel_detach),
2620 
2621 	/* Cryptodev interface */
2622 	DEVMETHOD(cryptodev_probesession, safexcel_probesession),
2623 	DEVMETHOD(cryptodev_newsession,	safexcel_newsession),
2624 	DEVMETHOD(cryptodev_process,	safexcel_process),
2625 
2626 	DEVMETHOD_END
2627 };
2628 
2629 static devclass_t safexcel_devclass;
2630 
2631 static driver_t safexcel_driver = {
2632 	.name 		= "safexcel",
2633 	.methods 	= safexcel_methods,
2634 	.size		= sizeof(struct safexcel_softc),
2635 };
2636 
2637 DRIVER_MODULE(safexcel, simplebus, safexcel_driver, safexcel_devclass, 0, 0);
2638 MODULE_VERSION(safexcel, 1);
2639 MODULE_DEPEND(safexcel, crypto, 1, 1, 1);
2640