xref: /freebsd/sys/dev/sec/sec.c (revision 5b9c547c)
1 /*-
2  * Copyright (C) 2008-2009 Semihalf, Piotr Ziecik
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
17  * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
18  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
19  * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
21  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
22  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
23  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24  */
25 
26 /*
27  * Freescale integrated Security Engine (SEC) driver. Currently SEC 2.0 and
28  * 3.0 are supported.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/bus.h>
37 #include <sys/endian.h>
38 #include <sys/kernel.h>
39 #include <sys/lock.h>
40 #include <sys/malloc.h>
41 #include <sys/mbuf.h>
42 #include <sys/module.h>
43 #include <sys/mutex.h>
44 #include <sys/random.h>
45 #include <sys/rman.h>
46 
47 #include <machine/bus.h>
48 #include <machine/resource.h>
49 
50 #include <opencrypto/cryptodev.h>
51 #include "cryptodev_if.h"
52 
53 #include <dev/ofw/ofw_bus_subr.h>
54 #include <dev/sec/sec.h>
55 
56 static int	sec_probe(device_t dev);
57 static int	sec_attach(device_t dev);
58 static int	sec_detach(device_t dev);
59 static int	sec_suspend(device_t dev);
60 static int	sec_resume(device_t dev);
61 static int	sec_shutdown(device_t dev);
62 static void	sec_primary_intr(void *arg);
63 static void	sec_secondary_intr(void *arg);
64 static int	sec_setup_intr(struct sec_softc *sc, struct resource **ires,
65     void **ihand, int *irid, driver_intr_t handler, const char *iname);
66 static void	sec_release_intr(struct sec_softc *sc, struct resource *ires,
67     void *ihand, int irid, const char *iname);
68 static int	sec_controller_reset(struct sec_softc *sc);
69 static int	sec_channel_reset(struct sec_softc *sc, int channel, int full);
70 static int	sec_init(struct sec_softc *sc);
71 static int	sec_alloc_dma_mem(struct sec_softc *sc,
72     struct sec_dma_mem *dma_mem, bus_size_t size);
73 static int	sec_desc_map_dma(struct sec_softc *sc,
74     struct sec_dma_mem *dma_mem, void *mem, bus_size_t size, int type,
75     struct sec_desc_map_info *sdmi);
76 static void	sec_free_dma_mem(struct sec_dma_mem *dma_mem);
77 static void	sec_enqueue(struct sec_softc *sc);
78 static int	sec_enqueue_desc(struct sec_softc *sc, struct sec_desc *desc,
79     int channel);
80 static int	sec_eu_channel(struct sec_softc *sc, int eu);
81 static int	sec_make_pointer(struct sec_softc *sc, struct sec_desc *desc,
82     u_int n, void *data, bus_size_t doffset, bus_size_t dsize, int dtype);
83 static int	sec_make_pointer_direct(struct sec_softc *sc,
84     struct sec_desc *desc, u_int n, bus_addr_t data, bus_size_t dsize);
85 static int	sec_alloc_session(struct sec_softc *sc);
86 static int	sec_newsession(device_t dev, u_int32_t *sidp,
87     struct cryptoini *cri);
88 static int	sec_freesession(device_t dev, uint64_t tid);
89 static int	sec_process(device_t dev, struct cryptop *crp, int hint);
90 static int	sec_split_cri(struct cryptoini *cri, struct cryptoini **enc,
91     struct cryptoini **mac);
92 static int	sec_split_crp(struct cryptop *crp, struct cryptodesc **enc,
93     struct cryptodesc **mac);
94 static int	sec_build_common_ns_desc(struct sec_softc *sc,
95     struct sec_desc *desc, struct sec_session *ses, struct cryptop *crp,
96     struct cryptodesc *enc, int buftype);
97 static int	sec_build_common_s_desc(struct sec_softc *sc,
98     struct sec_desc *desc, struct sec_session *ses, struct cryptop *crp,
99     struct cryptodesc *enc, struct cryptodesc *mac, int buftype);
100 
101 static struct sec_session *sec_get_session(struct sec_softc *sc, u_int sid);
102 static struct sec_desc *sec_find_desc(struct sec_softc *sc, bus_addr_t paddr);
103 
104 /* AESU */
105 static int	sec_aesu_newsession(struct sec_softc *sc,
106     struct sec_session *ses, struct cryptoini *enc, struct cryptoini *mac);
107 static int	sec_aesu_make_desc(struct sec_softc *sc,
108     struct sec_session *ses, struct sec_desc *desc, struct cryptop *crp,
109     int buftype);
110 
111 /* DEU */
112 static int	sec_deu_newsession(struct sec_softc *sc,
113     struct sec_session *ses, struct cryptoini *enc, struct cryptoini *mac);
114 static int	sec_deu_make_desc(struct sec_softc *sc,
115     struct sec_session *ses, struct sec_desc *desc, struct cryptop *crp,
116     int buftype);
117 
118 /* MDEU */
119 static int	sec_mdeu_can_handle(u_int alg);
120 static int	sec_mdeu_config(struct cryptodesc *crd,
121     u_int *eu, u_int *mode, u_int *hashlen);
122 static int	sec_mdeu_newsession(struct sec_softc *sc,
123     struct sec_session *ses, struct cryptoini *enc, struct cryptoini *mac);
124 static int	sec_mdeu_make_desc(struct sec_softc *sc,
125     struct sec_session *ses, struct sec_desc *desc, struct cryptop *crp,
126     int buftype);
127 
128 static device_method_t sec_methods[] = {
129 	/* Device interface */
130 	DEVMETHOD(device_probe,		sec_probe),
131 	DEVMETHOD(device_attach,	sec_attach),
132 	DEVMETHOD(device_detach,	sec_detach),
133 
134 	DEVMETHOD(device_suspend,	sec_suspend),
135 	DEVMETHOD(device_resume,	sec_resume),
136 	DEVMETHOD(device_shutdown,	sec_shutdown),
137 
138 	/* Crypto methods */
139 	DEVMETHOD(cryptodev_newsession,	sec_newsession),
140 	DEVMETHOD(cryptodev_freesession,sec_freesession),
141 	DEVMETHOD(cryptodev_process,	sec_process),
142 
143 	DEVMETHOD_END
144 };
145 static driver_t sec_driver = {
146 	"sec",
147 	sec_methods,
148 	sizeof(struct sec_softc),
149 };
150 
151 static devclass_t sec_devclass;
152 DRIVER_MODULE(sec, simplebus, sec_driver, sec_devclass, 0, 0);
153 MODULE_DEPEND(sec, crypto, 1, 1, 1);
154 
155 static struct sec_eu_methods sec_eus[] = {
156 	{
157 		sec_aesu_newsession,
158 		sec_aesu_make_desc,
159 	},
160 	{
161 		sec_deu_newsession,
162 		sec_deu_make_desc,
163 	},
164 	{
165 		sec_mdeu_newsession,
166 		sec_mdeu_make_desc,
167 	},
168 	{ NULL, NULL }
169 };
170 
171 static inline void
172 sec_sync_dma_mem(struct sec_dma_mem *dma_mem, bus_dmasync_op_t op)
173 {
174 
175 	/* Sync only if dma memory is valid */
176 	if (dma_mem->dma_vaddr != NULL)
177 		bus_dmamap_sync(dma_mem->dma_tag, dma_mem->dma_map, op);
178 }
179 
180 static inline void
181 sec_free_session(struct sec_softc *sc, struct sec_session *ses)
182 {
183 
184 	SEC_LOCK(sc, sessions);
185 	ses->ss_used = 0;
186 	SEC_UNLOCK(sc, sessions);
187 }
188 
189 static inline void *
190 sec_get_pointer_data(struct sec_desc *desc, u_int n)
191 {
192 
193 	return (desc->sd_ptr_dmem[n].dma_vaddr);
194 }
195 
196 static int
197 sec_probe(device_t dev)
198 {
199 	struct sec_softc *sc;
200 	uint64_t id;
201 
202 	if (!ofw_bus_status_okay(dev))
203 		return (ENXIO);
204 
205 	if (!ofw_bus_is_compatible(dev, "fsl,sec2.0"))
206 		return (ENXIO);
207 
208 	sc = device_get_softc(dev);
209 
210 	sc->sc_rrid = 0;
211 	sc->sc_rres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rrid,
212 	    RF_ACTIVE);
213 
214 	if (sc->sc_rres == NULL)
215 		return (ENXIO);
216 
217 	sc->sc_bas.bsh = rman_get_bushandle(sc->sc_rres);
218 	sc->sc_bas.bst = rman_get_bustag(sc->sc_rres);
219 
220 	id = SEC_READ(sc, SEC_ID);
221 
222 	bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rrid, sc->sc_rres);
223 
224 	switch (id) {
225 	case SEC_20_ID:
226 		device_set_desc(dev, "Freescale Security Engine 2.0");
227 		sc->sc_version = 2;
228 		break;
229 	case SEC_30_ID:
230 		device_set_desc(dev, "Freescale Security Engine 3.0");
231 		sc->sc_version = 3;
232 		break;
233 	case SEC_31_ID:
234 		device_set_desc(dev, "Freescale Security Engine 3.1");
235 		sc->sc_version = 3;
236 		break;
237 	default:
238 		device_printf(dev, "unknown SEC ID 0x%016llx!\n", id);
239 		return (ENXIO);
240 	}
241 
242 	return (0);
243 }
244 
245 static int
246 sec_attach(device_t dev)
247 {
248 	struct sec_softc *sc;
249 	struct sec_hw_lt *lt;
250 	int error = 0;
251 	int i;
252 
253 	sc = device_get_softc(dev);
254 	sc->sc_dev = dev;
255 	sc->sc_blocked = 0;
256 	sc->sc_shutdown = 0;
257 
258 	sc->sc_cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE);
259 	if (sc->sc_cid < 0) {
260 		device_printf(dev, "could not get crypto driver ID!\n");
261 		return (ENXIO);
262 	}
263 
264 	/* Init locks */
265 	mtx_init(&sc->sc_controller_lock, device_get_nameunit(dev),
266 	    "SEC Controller lock", MTX_DEF);
267 	mtx_init(&sc->sc_descriptors_lock, device_get_nameunit(dev),
268 	    "SEC Descriptors lock", MTX_DEF);
269 	mtx_init(&sc->sc_sessions_lock, device_get_nameunit(dev),
270 	    "SEC Sessions lock", MTX_DEF);
271 
272 	/* Allocate I/O memory for SEC registers */
273 	sc->sc_rrid = 0;
274 	sc->sc_rres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rrid,
275 	    RF_ACTIVE);
276 
277 	if (sc->sc_rres == NULL) {
278 		device_printf(dev, "could not allocate I/O memory!\n");
279 		goto fail1;
280 	}
281 
282 	sc->sc_bas.bsh = rman_get_bushandle(sc->sc_rres);
283 	sc->sc_bas.bst = rman_get_bustag(sc->sc_rres);
284 
285 	/* Setup interrupts */
286 	sc->sc_pri_irid = 0;
287 	error = sec_setup_intr(sc, &sc->sc_pri_ires, &sc->sc_pri_ihand,
288 	    &sc->sc_pri_irid, sec_primary_intr, "primary");
289 
290 	if (error)
291 		goto fail2;
292 
293 
294 	if (sc->sc_version == 3) {
295 		sc->sc_sec_irid = 1;
296 		error = sec_setup_intr(sc, &sc->sc_sec_ires, &sc->sc_sec_ihand,
297 		    &sc->sc_sec_irid, sec_secondary_intr, "secondary");
298 
299 		if (error)
300 			goto fail3;
301 	}
302 
303 	/* Alloc DMA memory for descriptors and link tables */
304 	error = sec_alloc_dma_mem(sc, &(sc->sc_desc_dmem),
305 	    SEC_DESCRIPTORS * sizeof(struct sec_hw_desc));
306 
307 	if (error)
308 		goto fail4;
309 
310 	error = sec_alloc_dma_mem(sc, &(sc->sc_lt_dmem),
311 	    (SEC_LT_ENTRIES + 1) * sizeof(struct sec_hw_lt));
312 
313 	if (error)
314 		goto fail5;
315 
316 	/* Fill in descriptors and link tables */
317 	for (i = 0; i < SEC_DESCRIPTORS; i++) {
318 		sc->sc_desc[i].sd_desc =
319 		    (struct sec_hw_desc*)(sc->sc_desc_dmem.dma_vaddr) + i;
320 		sc->sc_desc[i].sd_desc_paddr = sc->sc_desc_dmem.dma_paddr +
321 		    (i * sizeof(struct sec_hw_desc));
322 	}
323 
324 	for (i = 0; i < SEC_LT_ENTRIES + 1; i++) {
325 		sc->sc_lt[i].sl_lt =
326 		    (struct sec_hw_lt*)(sc->sc_lt_dmem.dma_vaddr) + i;
327 		sc->sc_lt[i].sl_lt_paddr = sc->sc_lt_dmem.dma_paddr +
328 		    (i * sizeof(struct sec_hw_lt));
329 	}
330 
331 	/* Last entry in link table is used to create a circle */
332 	lt = sc->sc_lt[SEC_LT_ENTRIES].sl_lt;
333 	lt->shl_length = 0;
334 	lt->shl_r = 0;
335 	lt->shl_n = 1;
336 	lt->shl_ptr = sc->sc_lt[0].sl_lt_paddr;
337 
338 	/* Init descriptor and link table queues pointers */
339 	SEC_CNT_INIT(sc, sc_free_desc_get_cnt, SEC_DESCRIPTORS);
340 	SEC_CNT_INIT(sc, sc_free_desc_put_cnt, SEC_DESCRIPTORS);
341 	SEC_CNT_INIT(sc, sc_ready_desc_get_cnt, SEC_DESCRIPTORS);
342 	SEC_CNT_INIT(sc, sc_ready_desc_put_cnt, SEC_DESCRIPTORS);
343 	SEC_CNT_INIT(sc, sc_queued_desc_get_cnt, SEC_DESCRIPTORS);
344 	SEC_CNT_INIT(sc, sc_queued_desc_put_cnt, SEC_DESCRIPTORS);
345 	SEC_CNT_INIT(sc, sc_lt_alloc_cnt, SEC_LT_ENTRIES);
346 	SEC_CNT_INIT(sc, sc_lt_free_cnt, SEC_LT_ENTRIES);
347 
348 	/* Create masks for fast checks */
349 	sc->sc_int_error_mask = 0;
350 	for (i = 0; i < SEC_CHANNELS; i++)
351 		sc->sc_int_error_mask |= (~0ULL & SEC_INT_CH_ERR(i));
352 
353 	switch (sc->sc_version) {
354 	case 2:
355 		sc->sc_channel_idle_mask =
356 		    (SEC_CHAN_CSR2_FFLVL_M << SEC_CHAN_CSR2_FFLVL_S) |
357 		    (SEC_CHAN_CSR2_MSTATE_M << SEC_CHAN_CSR2_MSTATE_S) |
358 		    (SEC_CHAN_CSR2_PSTATE_M << SEC_CHAN_CSR2_PSTATE_S) |
359 		    (SEC_CHAN_CSR2_GSTATE_M << SEC_CHAN_CSR2_GSTATE_S);
360 		break;
361 	case 3:
362 		sc->sc_channel_idle_mask =
363 		    (SEC_CHAN_CSR3_FFLVL_M << SEC_CHAN_CSR3_FFLVL_S) |
364 		    (SEC_CHAN_CSR3_MSTATE_M << SEC_CHAN_CSR3_MSTATE_S) |
365 		    (SEC_CHAN_CSR3_PSTATE_M << SEC_CHAN_CSR3_PSTATE_S) |
366 		    (SEC_CHAN_CSR3_GSTATE_M << SEC_CHAN_CSR3_GSTATE_S);
367 		break;
368 	}
369 
370 	/* Init hardware */
371 	error = sec_init(sc);
372 
373 	if (error)
374 		goto fail6;
375 
376 	/* Register in OCF (AESU) */
377 	crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
378 
379 	/* Register in OCF (DEU) */
380 	crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
381 	crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
382 
383 	/* Register in OCF (MDEU) */
384 	crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0);
385 	crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
386 	crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0);
387 	crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
388 	crypto_register(sc->sc_cid, CRYPTO_SHA2_256_HMAC, 0, 0);
389 	if (sc->sc_version >= 3) {
390 		crypto_register(sc->sc_cid, CRYPTO_SHA2_384_HMAC, 0, 0);
391 		crypto_register(sc->sc_cid, CRYPTO_SHA2_512_HMAC, 0, 0);
392 	}
393 
394 	return (0);
395 
396 fail6:
397 	sec_free_dma_mem(&(sc->sc_lt_dmem));
398 fail5:
399 	sec_free_dma_mem(&(sc->sc_desc_dmem));
400 fail4:
401 	sec_release_intr(sc, sc->sc_sec_ires, sc->sc_sec_ihand,
402 	    sc->sc_sec_irid, "secondary");
403 fail3:
404 	sec_release_intr(sc, sc->sc_pri_ires, sc->sc_pri_ihand,
405 	    sc->sc_pri_irid, "primary");
406 fail2:
407 	bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rrid, sc->sc_rres);
408 fail1:
409 	mtx_destroy(&sc->sc_controller_lock);
410 	mtx_destroy(&sc->sc_descriptors_lock);
411 	mtx_destroy(&sc->sc_sessions_lock);
412 
413 	return (ENXIO);
414 }
415 
416 static int
417 sec_detach(device_t dev)
418 {
419 	struct sec_softc *sc = device_get_softc(dev);
420 	int i, error, timeout = SEC_TIMEOUT;
421 
422 	/* Prepare driver to shutdown */
423 	SEC_LOCK(sc, descriptors);
424 	sc->sc_shutdown = 1;
425 	SEC_UNLOCK(sc, descriptors);
426 
427 	/* Wait until all queued processing finishes */
428 	while (1) {
429 		SEC_LOCK(sc, descriptors);
430 		i = SEC_READY_DESC_CNT(sc) + SEC_QUEUED_DESC_CNT(sc);
431 		SEC_UNLOCK(sc, descriptors);
432 
433 		if (i == 0)
434 			break;
435 
436 		if (timeout < 0) {
437 			device_printf(dev, "queue flush timeout!\n");
438 
439 			/* DMA can be still active - stop it */
440 			for (i = 0; i < SEC_CHANNELS; i++)
441 				sec_channel_reset(sc, i, 1);
442 
443 			break;
444 		}
445 
446 		timeout -= 1000;
447 		DELAY(1000);
448 	}
449 
450 	/* Disable interrupts */
451 	SEC_WRITE(sc, SEC_IER, 0);
452 
453 	/* Unregister from OCF */
454 	crypto_unregister_all(sc->sc_cid);
455 
456 	/* Free DMA memory */
457 	for (i = 0; i < SEC_DESCRIPTORS; i++)
458 		SEC_DESC_FREE_POINTERS(&(sc->sc_desc[i]));
459 
460 	sec_free_dma_mem(&(sc->sc_lt_dmem));
461 	sec_free_dma_mem(&(sc->sc_desc_dmem));
462 
463 	/* Release interrupts */
464 	sec_release_intr(sc, sc->sc_pri_ires, sc->sc_pri_ihand,
465 	    sc->sc_pri_irid, "primary");
466 	sec_release_intr(sc, sc->sc_sec_ires, sc->sc_sec_ihand,
467 	    sc->sc_sec_irid, "secondary");
468 
469 	/* Release memory */
470 	if (sc->sc_rres) {
471 		error = bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rrid,
472 		    sc->sc_rres);
473 		if (error)
474 			device_printf(dev, "bus_release_resource() failed for"
475 			    " I/O memory, error %d\n", error);
476 
477 		sc->sc_rres = NULL;
478 	}
479 
480 	mtx_destroy(&sc->sc_controller_lock);
481 	mtx_destroy(&sc->sc_descriptors_lock);
482 	mtx_destroy(&sc->sc_sessions_lock);
483 
484 	return (0);
485 }
486 
487 static int
488 sec_suspend(device_t dev)
489 {
490 
491 	return (0);
492 }
493 
494 static int
495 sec_resume(device_t dev)
496 {
497 
498 	return (0);
499 }
500 
501 static int
502 sec_shutdown(device_t dev)
503 {
504 
505 	return (0);
506 }
507 
508 static int
509 sec_setup_intr(struct sec_softc *sc, struct resource **ires, void **ihand,
510     int *irid, driver_intr_t handler, const char *iname)
511 {
512 	int error;
513 
514 	(*ires) = bus_alloc_resource_any(sc->sc_dev, SYS_RES_IRQ, irid,
515 	    RF_ACTIVE);
516 
517 	if ((*ires) == NULL) {
518 		device_printf(sc->sc_dev, "could not allocate %s IRQ\n", iname);
519 		return (ENXIO);
520 	}
521 
522 	error = bus_setup_intr(sc->sc_dev, *ires, INTR_MPSAFE | INTR_TYPE_NET,
523 	    NULL, handler, sc, ihand);
524 
525 	if (error) {
526 		device_printf(sc->sc_dev, "failed to set up %s IRQ\n", iname);
527 		if (bus_release_resource(sc->sc_dev, SYS_RES_IRQ, *irid, *ires))
528 			device_printf(sc->sc_dev, "could not release %s IRQ\n",
529 			    iname);
530 
531 		(*ires) = NULL;
532 		return (error);
533 	}
534 
535 	return (0);
536 }
537 
538 static void
539 sec_release_intr(struct sec_softc *sc, struct resource *ires, void *ihand,
540     int irid, const char *iname)
541 {
542 	int error;
543 
544 	if (ires == NULL)
545 		return;
546 
547 	error = bus_teardown_intr(sc->sc_dev, ires, ihand);
548 	if (error)
549 		device_printf(sc->sc_dev, "bus_teardown_intr() failed for %s"
550 		    " IRQ, error %d\n", iname, error);
551 
552 	error = bus_release_resource(sc->sc_dev, SYS_RES_IRQ, irid, ires);
553 	if (error)
554 		device_printf(sc->sc_dev, "bus_release_resource() failed for %s"
555 		    " IRQ, error %d\n", iname, error);
556 }
557 
558 static void
559 sec_primary_intr(void *arg)
560 {
561 	struct sec_softc *sc = arg;
562 	struct sec_desc *desc;
563 	uint64_t isr;
564 	int i, wakeup = 0;
565 
566 	SEC_LOCK(sc, controller);
567 
568 	/* Check for errors */
569 	isr = SEC_READ(sc, SEC_ISR);
570 	if (isr & sc->sc_int_error_mask) {
571 		/* Check each channel for error */
572 		for (i = 0; i < SEC_CHANNELS; i++) {
573 			if ((isr & SEC_INT_CH_ERR(i)) == 0)
574 				continue;
575 
576 			device_printf(sc->sc_dev,
577 			    "I/O error on channel %i!\n", i);
578 
579 			/* Find and mark problematic descriptor */
580 			desc = sec_find_desc(sc, SEC_READ(sc,
581 			    SEC_CHAN_CDPR(i)));
582 
583 			if (desc != NULL)
584 				desc->sd_error = EIO;
585 
586 			/* Do partial channel reset */
587 			sec_channel_reset(sc, i, 0);
588 		}
589 	}
590 
591 	/* ACK interrupt */
592 	SEC_WRITE(sc, SEC_ICR, 0xFFFFFFFFFFFFFFFFULL);
593 
594 	SEC_UNLOCK(sc, controller);
595 	SEC_LOCK(sc, descriptors);
596 
597 	/* Handle processed descriptors */
598 	SEC_DESC_SYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
599 
600 	while (SEC_QUEUED_DESC_CNT(sc) > 0) {
601 		desc = SEC_GET_QUEUED_DESC(sc);
602 
603 		if (desc->sd_desc->shd_done != 0xFF && desc->sd_error == 0) {
604 			SEC_PUT_BACK_QUEUED_DESC(sc);
605 			break;
606 		}
607 
608 		SEC_DESC_SYNC_POINTERS(desc, BUS_DMASYNC_PREREAD |
609 		    BUS_DMASYNC_PREWRITE);
610 
611 		desc->sd_crp->crp_etype = desc->sd_error;
612 		crypto_done(desc->sd_crp);
613 
614 		SEC_DESC_FREE_POINTERS(desc);
615 		SEC_DESC_FREE_LT(sc, desc);
616 		SEC_DESC_QUEUED2FREE(sc);
617 	}
618 
619 	SEC_DESC_SYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
620 
621 	if (!sc->sc_shutdown) {
622 		wakeup = sc->sc_blocked;
623 		sc->sc_blocked = 0;
624 	}
625 
626 	SEC_UNLOCK(sc, descriptors);
627 
628 	/* Enqueue ready descriptors in hardware */
629 	sec_enqueue(sc);
630 
631 	if (wakeup)
632 		crypto_unblock(sc->sc_cid, wakeup);
633 }
634 
635 static void
636 sec_secondary_intr(void *arg)
637 {
638 	struct sec_softc *sc = arg;
639 
640 	device_printf(sc->sc_dev, "spurious secondary interrupt!\n");
641 	sec_primary_intr(arg);
642 }
643 
644 static int
645 sec_controller_reset(struct sec_softc *sc)
646 {
647 	int timeout = SEC_TIMEOUT;
648 
649 	/* Reset Controller */
650 	SEC_WRITE(sc, SEC_MCR, SEC_MCR_SWR);
651 
652 	while (SEC_READ(sc, SEC_MCR) & SEC_MCR_SWR) {
653 		DELAY(1000);
654 		timeout -= 1000;
655 
656 		if (timeout < 0) {
657 			device_printf(sc->sc_dev, "timeout while waiting for "
658 			    "device reset!\n");
659 			return (ETIMEDOUT);
660 		}
661 	}
662 
663 	return (0);
664 }
665 
666 static int
667 sec_channel_reset(struct sec_softc *sc, int channel, int full)
668 {
669 	int timeout = SEC_TIMEOUT;
670 	uint64_t bit = (full) ? SEC_CHAN_CCR_R : SEC_CHAN_CCR_CON;
671 	uint64_t reg;
672 
673 	/* Reset Channel */
674 	reg = SEC_READ(sc, SEC_CHAN_CCR(channel));
675 	SEC_WRITE(sc, SEC_CHAN_CCR(channel), reg | bit);
676 
677 	while (SEC_READ(sc, SEC_CHAN_CCR(channel)) & bit) {
678 		DELAY(1000);
679 		timeout -= 1000;
680 
681 		if (timeout < 0) {
682 			device_printf(sc->sc_dev, "timeout while waiting for "
683 			    "channel reset!\n");
684 			return (ETIMEDOUT);
685 		}
686 	}
687 
688 	if (full) {
689 		reg = SEC_CHAN_CCR_CDIE | SEC_CHAN_CCR_NT | SEC_CHAN_CCR_BS;
690 
691 		switch(sc->sc_version) {
692 		case 2:
693 			reg |= SEC_CHAN_CCR_CDWE;
694 			break;
695 		case 3:
696 			reg |= SEC_CHAN_CCR_AWSE | SEC_CHAN_CCR_WGN;
697 			break;
698 		}
699 
700 		SEC_WRITE(sc, SEC_CHAN_CCR(channel), reg);
701 	}
702 
703 	return (0);
704 }
705 
706 static int
707 sec_init(struct sec_softc *sc)
708 {
709 	uint64_t reg;
710 	int error, i;
711 
712 	/* Reset controller twice to clear all pending interrupts */
713 	error = sec_controller_reset(sc);
714 	if (error)
715 		return (error);
716 
717 	error = sec_controller_reset(sc);
718 	if (error)
719 		return (error);
720 
721 	/* Reset channels */
722 	for (i = 0; i < SEC_CHANNELS; i++) {
723 		error = sec_channel_reset(sc, i, 1);
724 		if (error)
725 			return (error);
726 	}
727 
728 	/* Enable Interrupts */
729 	reg = SEC_INT_ITO;
730 	for (i = 0; i < SEC_CHANNELS; i++)
731 		reg |= SEC_INT_CH_DN(i) | SEC_INT_CH_ERR(i);
732 
733 	SEC_WRITE(sc, SEC_IER, reg);
734 
735 	return (error);
736 }
737 
738 static void
739 sec_alloc_dma_mem_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
740 {
741 	struct sec_dma_mem *dma_mem = arg;
742 
743 	if (error)
744 		return;
745 
746 	KASSERT(nseg == 1, ("Wrong number of segments, should be 1"));
747 	dma_mem->dma_paddr = segs->ds_addr;
748 }
749 
750 static void
751 sec_dma_map_desc_cb(void *arg, bus_dma_segment_t *segs, int nseg,
752     int error)
753 {
754 	struct sec_desc_map_info *sdmi = arg;
755 	struct sec_softc *sc = sdmi->sdmi_sc;
756 	struct sec_lt *lt = NULL;
757 	bus_addr_t addr;
758 	bus_size_t size;
759 	int i;
760 
761 	SEC_LOCK_ASSERT(sc, descriptors);
762 
763 	if (error)
764 		return;
765 
766 	for (i = 0; i < nseg; i++) {
767 		addr = segs[i].ds_addr;
768 		size = segs[i].ds_len;
769 
770 		/* Skip requested offset */
771 		if (sdmi->sdmi_offset >= size) {
772 			sdmi->sdmi_offset -= size;
773 			continue;
774 		}
775 
776 		addr += sdmi->sdmi_offset;
777 		size -= sdmi->sdmi_offset;
778 		sdmi->sdmi_offset = 0;
779 
780 		/* Do not link more than requested */
781 		if (sdmi->sdmi_size < size)
782 			size = sdmi->sdmi_size;
783 
784 		lt = SEC_ALLOC_LT_ENTRY(sc);
785 		lt->sl_lt->shl_length = size;
786 		lt->sl_lt->shl_r = 0;
787 		lt->sl_lt->shl_n = 0;
788 		lt->sl_lt->shl_ptr = addr;
789 
790 		if (sdmi->sdmi_lt_first == NULL)
791 			sdmi->sdmi_lt_first = lt;
792 
793 		sdmi->sdmi_lt_used += 1;
794 
795 		if ((sdmi->sdmi_size -= size) == 0)
796 			break;
797 	}
798 
799 	sdmi->sdmi_lt_last = lt;
800 }
801 
802 static void
803 sec_dma_map_desc_cb2(void *arg, bus_dma_segment_t *segs, int nseg,
804     bus_size_t size, int error)
805 {
806 
807 	sec_dma_map_desc_cb(arg, segs, nseg, error);
808 }
809 
810 static int
811 sec_alloc_dma_mem(struct sec_softc *sc, struct sec_dma_mem *dma_mem,
812     bus_size_t size)
813 {
814 	int error;
815 
816 	if (dma_mem->dma_vaddr != NULL)
817 		return (EBUSY);
818 
819 	error = bus_dma_tag_create(NULL,	/* parent */
820 		SEC_DMA_ALIGNMENT, 0,		/* alignment, boundary */
821 		BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
822 		BUS_SPACE_MAXADDR,		/* highaddr */
823 		NULL, NULL,			/* filtfunc, filtfuncarg */
824 		size, 1,			/* maxsize, nsegments */
825 		size, 0,			/* maxsegsz, flags */
826 		NULL, NULL,			/* lockfunc, lockfuncarg */
827 		&(dma_mem->dma_tag));		/* dmat */
828 
829 	if (error) {
830 		device_printf(sc->sc_dev, "failed to allocate busdma tag, error"
831 		    " %i!\n", error);
832 		goto err1;
833 	}
834 
835 	error = bus_dmamem_alloc(dma_mem->dma_tag, &(dma_mem->dma_vaddr),
836 	    BUS_DMA_NOWAIT | BUS_DMA_ZERO, &(dma_mem->dma_map));
837 
838 	if (error) {
839 		device_printf(sc->sc_dev, "failed to allocate DMA safe"
840 		    " memory, error %i!\n", error);
841 		goto err2;
842 	}
843 
844 	error = bus_dmamap_load(dma_mem->dma_tag, dma_mem->dma_map,
845 		    dma_mem->dma_vaddr, size, sec_alloc_dma_mem_cb, dma_mem,
846 		    BUS_DMA_NOWAIT);
847 
848 	if (error) {
849 		device_printf(sc->sc_dev, "cannot get address of the DMA"
850 		    " memory, error %i\n", error);
851 		goto err3;
852 	}
853 
854 	dma_mem->dma_is_map = 0;
855 	return (0);
856 
857 err3:
858 	bus_dmamem_free(dma_mem->dma_tag, dma_mem->dma_vaddr, dma_mem->dma_map);
859 err2:
860 	bus_dma_tag_destroy(dma_mem->dma_tag);
861 err1:
862 	dma_mem->dma_vaddr = NULL;
863 	return(error);
864 }
865 
866 static int
867 sec_desc_map_dma(struct sec_softc *sc, struct sec_dma_mem *dma_mem, void *mem,
868     bus_size_t size, int type, struct sec_desc_map_info *sdmi)
869 {
870 	int error;
871 
872 	if (dma_mem->dma_vaddr != NULL)
873 		return (EBUSY);
874 
875 	switch (type) {
876 	case SEC_MEMORY:
877 		break;
878 	case SEC_UIO:
879 		size = SEC_FREE_LT_CNT(sc) * SEC_MAX_DMA_BLOCK_SIZE;
880 		break;
881 	case SEC_MBUF:
882 		size = m_length((struct mbuf*)mem, NULL);
883 		break;
884 	default:
885 		return (EINVAL);
886 	}
887 
888 	error = bus_dma_tag_create(NULL,	/* parent */
889 		SEC_DMA_ALIGNMENT, 0,		/* alignment, boundary */
890 		BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
891 		BUS_SPACE_MAXADDR,		/* highaddr */
892 		NULL, NULL,			/* filtfunc, filtfuncarg */
893 		size,				/* maxsize */
894 		SEC_FREE_LT_CNT(sc),		/* nsegments */
895 		SEC_MAX_DMA_BLOCK_SIZE, 0,	/* maxsegsz, flags */
896 		NULL, NULL,			/* lockfunc, lockfuncarg */
897 		&(dma_mem->dma_tag));		/* dmat */
898 
899 	if (error) {
900 		device_printf(sc->sc_dev, "failed to allocate busdma tag, error"
901 		    " %i!\n", error);
902 		dma_mem->dma_vaddr = NULL;
903 		return (error);
904 	}
905 
906 	error = bus_dmamap_create(dma_mem->dma_tag, 0, &(dma_mem->dma_map));
907 
908 	if (error) {
909 		device_printf(sc->sc_dev, "failed to create DMA map, error %i!"
910 		    "\n", error);
911 		bus_dma_tag_destroy(dma_mem->dma_tag);
912 		return (error);
913 	}
914 
915 	switch (type) {
916 	case SEC_MEMORY:
917 		error = bus_dmamap_load(dma_mem->dma_tag, dma_mem->dma_map,
918 		    mem, size, sec_dma_map_desc_cb, sdmi, BUS_DMA_NOWAIT);
919 		break;
920 	case SEC_UIO:
921 		error = bus_dmamap_load_uio(dma_mem->dma_tag, dma_mem->dma_map,
922 		    mem, sec_dma_map_desc_cb2, sdmi, BUS_DMA_NOWAIT);
923 		break;
924 	case SEC_MBUF:
925 		error = bus_dmamap_load_mbuf(dma_mem->dma_tag, dma_mem->dma_map,
926 		    mem, sec_dma_map_desc_cb2, sdmi, BUS_DMA_NOWAIT);
927 		break;
928 	}
929 
930 	if (error) {
931 		device_printf(sc->sc_dev, "cannot get address of the DMA"
932 		    " memory, error %i!\n", error);
933 		bus_dmamap_destroy(dma_mem->dma_tag, dma_mem->dma_map);
934 		bus_dma_tag_destroy(dma_mem->dma_tag);
935 		return (error);
936 	}
937 
938 	dma_mem->dma_is_map = 1;
939 	dma_mem->dma_vaddr = mem;
940 
941 	return (0);
942 }
943 
944 static void
945 sec_free_dma_mem(struct sec_dma_mem *dma_mem)
946 {
947 
948 	/* Check for double free */
949 	if (dma_mem->dma_vaddr == NULL)
950 		return;
951 
952 	bus_dmamap_unload(dma_mem->dma_tag, dma_mem->dma_map);
953 
954 	if (dma_mem->dma_is_map)
955 		bus_dmamap_destroy(dma_mem->dma_tag, dma_mem->dma_map);
956 	else
957 		bus_dmamem_free(dma_mem->dma_tag, dma_mem->dma_vaddr,
958 		    dma_mem->dma_map);
959 
960 	bus_dma_tag_destroy(dma_mem->dma_tag);
961 	dma_mem->dma_vaddr = NULL;
962 }
963 
964 static int
965 sec_eu_channel(struct sec_softc *sc, int eu)
966 {
967 	uint64_t reg;
968 	int channel = 0;
969 
970 	SEC_LOCK_ASSERT(sc, controller);
971 
972 	reg = SEC_READ(sc, SEC_EUASR);
973 
974 	switch (eu) {
975 	case SEC_EU_AFEU:
976 		channel = SEC_EUASR_AFEU(reg);
977 		break;
978 	case SEC_EU_DEU:
979 		channel = SEC_EUASR_DEU(reg);
980 		break;
981 	case SEC_EU_MDEU_A:
982 	case SEC_EU_MDEU_B:
983 		channel = SEC_EUASR_MDEU(reg);
984 		break;
985 	case SEC_EU_RNGU:
986 		channel = SEC_EUASR_RNGU(reg);
987 		break;
988 	case SEC_EU_PKEU:
989 		channel = SEC_EUASR_PKEU(reg);
990 		break;
991 	case SEC_EU_AESU:
992 		channel = SEC_EUASR_AESU(reg);
993 		break;
994 	case SEC_EU_KEU:
995 		channel = SEC_EUASR_KEU(reg);
996 		break;
997 	case SEC_EU_CRCU:
998 		channel = SEC_EUASR_CRCU(reg);
999 		break;
1000 	}
1001 
1002 	return (channel - 1);
1003 }
1004 
1005 static int
1006 sec_enqueue_desc(struct sec_softc *sc, struct sec_desc *desc, int channel)
1007 {
1008 	u_int fflvl = SEC_MAX_FIFO_LEVEL;
1009 	uint64_t reg;
1010 	int i;
1011 
1012 	SEC_LOCK_ASSERT(sc, controller);
1013 
1014 	/* Find free channel if have not got one */
1015 	if (channel < 0) {
1016 		for (i = 0; i < SEC_CHANNELS; i++) {
1017 			reg = SEC_READ(sc, SEC_CHAN_CSR(channel));
1018 
1019 			if ((reg & sc->sc_channel_idle_mask) == 0) {
1020 				channel = i;
1021 				break;
1022 			}
1023 		}
1024 	}
1025 
1026 	/* There is no free channel */
1027 	if (channel < 0)
1028 		return (-1);
1029 
1030 	/* Check FIFO level on selected channel */
1031 	reg = SEC_READ(sc, SEC_CHAN_CSR(channel));
1032 
1033 	switch(sc->sc_version) {
1034 	case 2:
1035 		fflvl = (reg >> SEC_CHAN_CSR2_FFLVL_S) & SEC_CHAN_CSR2_FFLVL_M;
1036 		break;
1037 	case 3:
1038 		fflvl = (reg >> SEC_CHAN_CSR3_FFLVL_S) & SEC_CHAN_CSR3_FFLVL_M;
1039 		break;
1040 	}
1041 
1042 	if (fflvl >= SEC_MAX_FIFO_LEVEL)
1043 		return (-1);
1044 
1045 	/* Enqueue descriptor in channel */
1046 	SEC_WRITE(sc, SEC_CHAN_FF(channel), desc->sd_desc_paddr);
1047 
1048 	return (channel);
1049 }
1050 
1051 static void
1052 sec_enqueue(struct sec_softc *sc)
1053 {
1054 	struct sec_desc *desc;
1055 	int ch0, ch1;
1056 
1057 	SEC_LOCK(sc, descriptors);
1058 	SEC_LOCK(sc, controller);
1059 
1060 	while (SEC_READY_DESC_CNT(sc) > 0) {
1061 		desc = SEC_GET_READY_DESC(sc);
1062 
1063 		ch0 = sec_eu_channel(sc, desc->sd_desc->shd_eu_sel0);
1064 		ch1 = sec_eu_channel(sc, desc->sd_desc->shd_eu_sel1);
1065 
1066 		/*
1067 		 * Both EU are used by the same channel.
1068 		 * Enqueue descriptor in channel used by busy EUs.
1069 		 */
1070 		if (ch0 >= 0 && ch0 == ch1) {
1071 			if (sec_enqueue_desc(sc, desc, ch0) >= 0) {
1072 				SEC_DESC_READY2QUEUED(sc);
1073 				continue;
1074 			}
1075 		}
1076 
1077 		/*
1078 		 * Only one EU is free.
1079 		 * Enqueue descriptor in channel used by busy EU.
1080 		 */
1081 		if ((ch0 >= 0 && ch1 < 0) || (ch1 >= 0 && ch0 < 0)) {
1082 			if (sec_enqueue_desc(sc, desc, (ch0 >= 0) ? ch0 : ch1)
1083 			    >= 0) {
1084 				SEC_DESC_READY2QUEUED(sc);
1085 				continue;
1086 			}
1087 		}
1088 
1089 		/*
1090 		 * Both EU are free.
1091 		 * Enqueue descriptor in first free channel.
1092 		 */
1093 		if (ch0 < 0 && ch1 < 0) {
1094 			if (sec_enqueue_desc(sc, desc, -1) >= 0) {
1095 				SEC_DESC_READY2QUEUED(sc);
1096 				continue;
1097 			}
1098 		}
1099 
1100 		/* Current descriptor can not be queued at the moment */
1101 		SEC_PUT_BACK_READY_DESC(sc);
1102 		break;
1103 	}
1104 
1105 	SEC_UNLOCK(sc, controller);
1106 	SEC_UNLOCK(sc, descriptors);
1107 }
1108 
1109 static struct sec_desc *
1110 sec_find_desc(struct sec_softc *sc, bus_addr_t paddr)
1111 {
1112 	struct sec_desc *desc = NULL;
1113 	int i;
1114 
1115 	SEC_LOCK_ASSERT(sc, descriptors);
1116 
1117 	for (i = 0; i < SEC_CHANNELS; i++) {
1118 		if (sc->sc_desc[i].sd_desc_paddr == paddr) {
1119 			desc = &(sc->sc_desc[i]);
1120 			break;
1121 		}
1122 	}
1123 
1124 	return (desc);
1125 }
1126 
1127 static int
1128 sec_make_pointer_direct(struct sec_softc *sc, struct sec_desc *desc, u_int n,
1129     bus_addr_t data, bus_size_t dsize)
1130 {
1131 	struct sec_hw_desc_ptr *ptr;
1132 
1133 	SEC_LOCK_ASSERT(sc, descriptors);
1134 
1135 	ptr = &(desc->sd_desc->shd_pointer[n]);
1136 	ptr->shdp_length = dsize;
1137 	ptr->shdp_extent = 0;
1138 	ptr->shdp_j = 0;
1139 	ptr->shdp_ptr = data;
1140 
1141 	return (0);
1142 }
1143 
1144 static int
1145 sec_make_pointer(struct sec_softc *sc, struct sec_desc *desc,
1146     u_int n, void *data, bus_size_t doffset, bus_size_t dsize, int dtype)
1147 {
1148 	struct sec_desc_map_info sdmi = { sc, dsize, doffset, NULL, NULL, 0 };
1149 	struct sec_hw_desc_ptr *ptr;
1150 	int error;
1151 
1152 	SEC_LOCK_ASSERT(sc, descriptors);
1153 
1154 	/* For flat memory map only requested region */
1155 	if (dtype == SEC_MEMORY) {
1156 		 data = (uint8_t*)(data) + doffset;
1157 		 sdmi.sdmi_offset = 0;
1158 	}
1159 
1160 	error = sec_desc_map_dma(sc, &(desc->sd_ptr_dmem[n]), data, dsize,
1161 	    dtype, &sdmi);
1162 
1163 	if (error)
1164 		return (error);
1165 
1166 	sdmi.sdmi_lt_last->sl_lt->shl_r = 1;
1167 	desc->sd_lt_used += sdmi.sdmi_lt_used;
1168 
1169 	ptr = &(desc->sd_desc->shd_pointer[n]);
1170 	ptr->shdp_length = dsize;
1171 	ptr->shdp_extent = 0;
1172 	ptr->shdp_j = 1;
1173 	ptr->shdp_ptr = sdmi.sdmi_lt_first->sl_lt_paddr;
1174 
1175 	return (0);
1176 }
1177 
1178 static int
1179 sec_split_cri(struct cryptoini *cri, struct cryptoini **enc,
1180     struct cryptoini **mac)
1181 {
1182 	struct cryptoini *e, *m;
1183 
1184 	e = cri;
1185 	m = cri->cri_next;
1186 
1187 	/* We can haldle only two operations */
1188 	if (m && m->cri_next)
1189 		return (EINVAL);
1190 
1191 	if (sec_mdeu_can_handle(e->cri_alg)) {
1192 		cri = m;
1193 		m = e;
1194 		e = cri;
1195 	}
1196 
1197 	if (m && !sec_mdeu_can_handle(m->cri_alg))
1198 		return (EINVAL);
1199 
1200 	*enc = e;
1201 	*mac = m;
1202 
1203 	return (0);
1204 }
1205 
1206 static int
1207 sec_split_crp(struct cryptop *crp, struct cryptodesc **enc,
1208     struct cryptodesc **mac)
1209 {
1210 	struct cryptodesc *e, *m, *t;
1211 
1212 	e = crp->crp_desc;
1213 	m = e->crd_next;
1214 
1215 	/* We can haldle only two operations */
1216 	if (m && m->crd_next)
1217 		return (EINVAL);
1218 
1219 	if (sec_mdeu_can_handle(e->crd_alg)) {
1220 		t = m;
1221 		m = e;
1222 		e = t;
1223 	}
1224 
1225 	if (m && !sec_mdeu_can_handle(m->crd_alg))
1226 		return (EINVAL);
1227 
1228 	*enc = e;
1229 	*mac = m;
1230 
1231 	return (0);
1232 }
1233 
1234 static int
1235 sec_alloc_session(struct sec_softc *sc)
1236 {
1237 	struct sec_session *ses = NULL;
1238 	int sid = -1;
1239 	u_int i;
1240 
1241 	SEC_LOCK(sc, sessions);
1242 
1243 	for (i = 0; i < SEC_MAX_SESSIONS; i++) {
1244 		if (sc->sc_sessions[i].ss_used == 0) {
1245 			ses = &(sc->sc_sessions[i]);
1246 			ses->ss_used = 1;
1247 			ses->ss_ivlen = 0;
1248 			ses->ss_klen = 0;
1249 			ses->ss_mklen = 0;
1250 			sid = i;
1251 			break;
1252 		}
1253 	}
1254 
1255 	SEC_UNLOCK(sc, sessions);
1256 
1257 	return (sid);
1258 }
1259 
1260 static struct sec_session *
1261 sec_get_session(struct sec_softc *sc, u_int sid)
1262 {
1263 	struct sec_session *ses;
1264 
1265 	if (sid >= SEC_MAX_SESSIONS)
1266 		return (NULL);
1267 
1268 	SEC_LOCK(sc, sessions);
1269 
1270 	ses = &(sc->sc_sessions[sid]);
1271 
1272 	if (ses->ss_used == 0)
1273 		ses = NULL;
1274 
1275 	SEC_UNLOCK(sc, sessions);
1276 
1277 	return (ses);
1278 }
1279 
1280 static int
1281 sec_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
1282 {
1283 	struct sec_softc *sc = device_get_softc(dev);
1284 	struct sec_eu_methods *eu = sec_eus;
1285 	struct cryptoini *enc = NULL;
1286 	struct cryptoini *mac = NULL;
1287 	struct sec_session *ses;
1288 	int error = -1;
1289 	int sid;
1290 
1291 	error = sec_split_cri(cri, &enc, &mac);
1292 	if (error)
1293 		return (error);
1294 
1295 	/* Check key lengths */
1296 	if (enc && enc->cri_key && (enc->cri_klen / 8) > SEC_MAX_KEY_LEN)
1297 		return (E2BIG);
1298 
1299 	if (mac && mac->cri_key && (mac->cri_klen / 8) > SEC_MAX_KEY_LEN)
1300 		return (E2BIG);
1301 
1302 	/* Only SEC 3.0 supports digests larger than 256 bits */
1303 	if (sc->sc_version < 3 && mac && mac->cri_klen > 256)
1304 		return (E2BIG);
1305 
1306 	sid = sec_alloc_session(sc);
1307 	if (sid < 0)
1308 		return (ENOMEM);
1309 
1310 	ses = sec_get_session(sc, sid);
1311 
1312 	/* Find EU for this session */
1313 	while (eu->sem_make_desc != NULL) {
1314 		error = eu->sem_newsession(sc, ses, enc, mac);
1315 		if (error >= 0)
1316 			break;
1317 
1318 		eu++;
1319 	}
1320 
1321 	/* If not found, return EINVAL */
1322 	if (error < 0) {
1323 		sec_free_session(sc, ses);
1324 		return (EINVAL);
1325 	}
1326 
1327 	/* Save cipher key */
1328 	if (enc && enc->cri_key) {
1329 		ses->ss_klen = enc->cri_klen / 8;
1330 		memcpy(ses->ss_key, enc->cri_key, ses->ss_klen);
1331 	}
1332 
1333 	/* Save digest key */
1334 	if (mac && mac->cri_key) {
1335 		ses->ss_mklen = mac->cri_klen / 8;
1336 		memcpy(ses->ss_mkey, mac->cri_key, ses->ss_mklen);
1337 	}
1338 
1339 	ses->ss_eu = eu;
1340 	*sidp = sid;
1341 
1342 	return (0);
1343 }
1344 
1345 static int
1346 sec_freesession(device_t dev, uint64_t tid)
1347 {
1348 	struct sec_softc *sc = device_get_softc(dev);
1349 	struct sec_session *ses;
1350 	int error = 0;
1351 
1352 	ses = sec_get_session(sc, CRYPTO_SESID2LID(tid));
1353 	if (ses == NULL)
1354 		return (EINVAL);
1355 
1356 	sec_free_session(sc, ses);
1357 
1358 	return (error);
1359 }
1360 
1361 static int
1362 sec_process(device_t dev, struct cryptop *crp, int hint)
1363 {
1364 	struct sec_softc *sc = device_get_softc(dev);
1365 	struct sec_desc *desc = NULL;
1366 	struct cryptodesc *mac, *enc;
1367 	struct sec_session *ses;
1368 	int buftype, error = 0;
1369 
1370 	/* Check Session ID */
1371 	ses = sec_get_session(sc, CRYPTO_SESID2LID(crp->crp_sid));
1372 	if (ses == NULL) {
1373 		crp->crp_etype = EINVAL;
1374 		crypto_done(crp);
1375 		return (0);
1376 	}
1377 
1378 	/* Check for input length */
1379 	if (crp->crp_ilen > SEC_MAX_DMA_BLOCK_SIZE) {
1380 		crp->crp_etype = E2BIG;
1381 		crypto_done(crp);
1382 		return (0);
1383 	}
1384 
1385 	/* Get descriptors */
1386 	if (sec_split_crp(crp, &enc, &mac)) {
1387 		crp->crp_etype = EINVAL;
1388 		crypto_done(crp);
1389 		return (0);
1390 	}
1391 
1392 	SEC_LOCK(sc, descriptors);
1393 	SEC_DESC_SYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1394 
1395 	/* Block driver if there is no free descriptors or we are going down */
1396 	if (SEC_FREE_DESC_CNT(sc) == 0 || sc->sc_shutdown) {
1397 		sc->sc_blocked |= CRYPTO_SYMQ;
1398 		SEC_UNLOCK(sc, descriptors);
1399 		return (ERESTART);
1400 	}
1401 
1402 	/* Prepare descriptor */
1403 	desc = SEC_GET_FREE_DESC(sc);
1404 	desc->sd_lt_used = 0;
1405 	desc->sd_error = 0;
1406 	desc->sd_crp = crp;
1407 
1408 	if (crp->crp_flags & CRYPTO_F_IOV)
1409 		buftype = SEC_UIO;
1410 	else if (crp->crp_flags & CRYPTO_F_IMBUF)
1411 		buftype = SEC_MBUF;
1412 	else
1413 		buftype = SEC_MEMORY;
1414 
1415 	if (enc && enc->crd_flags & CRD_F_ENCRYPT) {
1416 		if (enc->crd_flags & CRD_F_IV_EXPLICIT)
1417 			memcpy(desc->sd_desc->shd_iv, enc->crd_iv,
1418 			    ses->ss_ivlen);
1419 		else
1420 			arc4rand(desc->sd_desc->shd_iv, ses->ss_ivlen, 0);
1421 
1422 		if ((enc->crd_flags & CRD_F_IV_PRESENT) == 0)
1423 			crypto_copyback(crp->crp_flags, crp->crp_buf,
1424 			    enc->crd_inject, ses->ss_ivlen,
1425 			    desc->sd_desc->shd_iv);
1426 	} else if (enc) {
1427 		if (enc->crd_flags & CRD_F_IV_EXPLICIT)
1428 			memcpy(desc->sd_desc->shd_iv, enc->crd_iv,
1429 			    ses->ss_ivlen);
1430 		else
1431 			crypto_copydata(crp->crp_flags, crp->crp_buf,
1432 			    enc->crd_inject, ses->ss_ivlen,
1433 			    desc->sd_desc->shd_iv);
1434 	}
1435 
1436 	if (enc && enc->crd_flags & CRD_F_KEY_EXPLICIT) {
1437 		if ((enc->crd_klen / 8) <= SEC_MAX_KEY_LEN) {
1438 			ses->ss_klen = enc->crd_klen / 8;
1439 			memcpy(ses->ss_key, enc->crd_key, ses->ss_klen);
1440 		} else
1441 			error = E2BIG;
1442 	}
1443 
1444 	if (!error && mac && mac->crd_flags & CRD_F_KEY_EXPLICIT) {
1445 		if ((mac->crd_klen / 8) <= SEC_MAX_KEY_LEN) {
1446 			ses->ss_mklen = mac->crd_klen / 8;
1447 			memcpy(ses->ss_mkey, mac->crd_key, ses->ss_mklen);
1448 		} else
1449 			error = E2BIG;
1450 	}
1451 
1452 	if (!error) {
1453 		memcpy(desc->sd_desc->shd_key, ses->ss_key, ses->ss_klen);
1454 		memcpy(desc->sd_desc->shd_mkey, ses->ss_mkey, ses->ss_mklen);
1455 
1456 		error = ses->ss_eu->sem_make_desc(sc, ses, desc, crp, buftype);
1457 	}
1458 
1459 	if (error) {
1460 		SEC_DESC_FREE_POINTERS(desc);
1461 		SEC_DESC_PUT_BACK_LT(sc, desc);
1462 		SEC_PUT_BACK_FREE_DESC(sc);
1463 		SEC_UNLOCK(sc, descriptors);
1464 		crp->crp_etype = error;
1465 		crypto_done(crp);
1466 		return (0);
1467 	}
1468 
1469 	/*
1470 	 * Skip DONE interrupt if this is not last request in burst, but only
1471 	 * if we are running on SEC 3.X. On SEC 2.X we have to enable DONE
1472 	 * signaling on each descriptor.
1473 	 */
1474 	if ((hint & CRYPTO_HINT_MORE) && sc->sc_version == 3)
1475 		desc->sd_desc->shd_dn = 0;
1476 	else
1477 		desc->sd_desc->shd_dn = 1;
1478 
1479 	SEC_DESC_SYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1480 	SEC_DESC_SYNC_POINTERS(desc, BUS_DMASYNC_POSTREAD |
1481 	    BUS_DMASYNC_POSTWRITE);
1482 	SEC_DESC_FREE2READY(sc);
1483 	SEC_UNLOCK(sc, descriptors);
1484 
1485 	/* Enqueue ready descriptors in hardware */
1486 	sec_enqueue(sc);
1487 
1488 	return (0);
1489 }
1490 
1491 static int
1492 sec_build_common_ns_desc(struct sec_softc *sc, struct sec_desc *desc,
1493     struct sec_session *ses, struct cryptop *crp, struct cryptodesc *enc,
1494     int buftype)
1495 {
1496 	struct sec_hw_desc *hd = desc->sd_desc;
1497 	int error;
1498 
1499 	hd->shd_desc_type = SEC_DT_COMMON_NONSNOOP;
1500 	hd->shd_eu_sel1 = SEC_EU_NONE;
1501 	hd->shd_mode1 = 0;
1502 
1503 	/* Pointer 0: NULL */
1504 	error = sec_make_pointer_direct(sc, desc, 0, 0, 0);
1505 	if (error)
1506 		return (error);
1507 
1508 	/* Pointer 1: IV IN */
1509 	error = sec_make_pointer_direct(sc, desc, 1, desc->sd_desc_paddr +
1510 	    offsetof(struct sec_hw_desc, shd_iv), ses->ss_ivlen);
1511 	if (error)
1512 		return (error);
1513 
1514 	/* Pointer 2: Cipher Key */
1515 	error = sec_make_pointer_direct(sc, desc, 2, desc->sd_desc_paddr +
1516 	    offsetof(struct sec_hw_desc, shd_key), ses->ss_klen);
1517  	if (error)
1518 		return (error);
1519 
1520 	/* Pointer 3: Data IN */
1521 	error = sec_make_pointer(sc, desc, 3, crp->crp_buf, enc->crd_skip,
1522 	    enc->crd_len, buftype);
1523 	if (error)
1524 		return (error);
1525 
1526 	/* Pointer 4: Data OUT */
1527 	error = sec_make_pointer(sc, desc, 4, crp->crp_buf, enc->crd_skip,
1528 	    enc->crd_len, buftype);
1529 	if (error)
1530 		return (error);
1531 
1532 	/* Pointer 5: IV OUT (Not used: NULL) */
1533 	error = sec_make_pointer_direct(sc, desc, 5, 0, 0);
1534 	if (error)
1535 		return (error);
1536 
1537 	/* Pointer 6: NULL */
1538 	error = sec_make_pointer_direct(sc, desc, 6, 0, 0);
1539 
1540 	return (error);
1541 }
1542 
1543 static int
1544 sec_build_common_s_desc(struct sec_softc *sc, struct sec_desc *desc,
1545     struct sec_session *ses, struct cryptop *crp, struct cryptodesc *enc,
1546     struct cryptodesc *mac, int buftype)
1547 {
1548 	struct sec_hw_desc *hd = desc->sd_desc;
1549 	u_int eu, mode, hashlen;
1550 	int error;
1551 
1552 	if (mac->crd_len < enc->crd_len)
1553 		return (EINVAL);
1554 
1555 	if (mac->crd_skip + mac->crd_len != enc->crd_skip + enc->crd_len)
1556 		return (EINVAL);
1557 
1558 	error = sec_mdeu_config(mac, &eu, &mode, &hashlen);
1559 	if (error)
1560 		return (error);
1561 
1562 	hd->shd_desc_type = SEC_DT_HMAC_SNOOP;
1563 	hd->shd_eu_sel1 = eu;
1564 	hd->shd_mode1 = mode;
1565 
1566 	/* Pointer 0: HMAC Key */
1567 	error = sec_make_pointer_direct(sc, desc, 0, desc->sd_desc_paddr +
1568 	    offsetof(struct sec_hw_desc, shd_mkey), ses->ss_mklen);
1569 	if (error)
1570 		return (error);
1571 
1572 	/* Pointer 1: HMAC-Only Data IN */
1573 	error = sec_make_pointer(sc, desc, 1, crp->crp_buf, mac->crd_skip,
1574 	    mac->crd_len - enc->crd_len, buftype);
1575 	if (error)
1576 		return (error);
1577 
1578 	/* Pointer 2: Cipher Key */
1579 	error = sec_make_pointer_direct(sc, desc, 2, desc->sd_desc_paddr +
1580 	    offsetof(struct sec_hw_desc, shd_key), ses->ss_klen);
1581  	if (error)
1582 		return (error);
1583 
1584 	/* Pointer 3: IV IN */
1585 	error = sec_make_pointer_direct(sc, desc, 3, desc->sd_desc_paddr +
1586 	    offsetof(struct sec_hw_desc, shd_iv), ses->ss_ivlen);
1587 	if (error)
1588 		return (error);
1589 
1590 	/* Pointer 4: Data IN */
1591 	error = sec_make_pointer(sc, desc, 4, crp->crp_buf, enc->crd_skip,
1592 	    enc->crd_len, buftype);
1593 	if (error)
1594 		return (error);
1595 
1596 	/* Pointer 5: Data OUT */
1597 	error = sec_make_pointer(sc, desc, 5, crp->crp_buf, enc->crd_skip,
1598 	    enc->crd_len, buftype);
1599 	if (error)
1600 		return (error);
1601 
1602 	/* Pointer 6: HMAC OUT */
1603 	error = sec_make_pointer(sc, desc, 6, crp->crp_buf, mac->crd_inject,
1604 	    hashlen, buftype);
1605 
1606 	return (error);
1607 }
1608 
1609 /* AESU */
1610 
1611 static int
1612 sec_aesu_newsession(struct sec_softc *sc, struct sec_session *ses,
1613     struct cryptoini *enc, struct cryptoini *mac)
1614 {
1615 
1616 	if (enc == NULL)
1617 		return (-1);
1618 
1619 	if (enc->cri_alg != CRYPTO_AES_CBC)
1620 		return (-1);
1621 
1622 	ses->ss_ivlen = AES_BLOCK_LEN;
1623 
1624 	return (0);
1625 }
1626 
1627 static int
1628 sec_aesu_make_desc(struct sec_softc *sc, struct sec_session *ses,
1629     struct sec_desc *desc, struct cryptop *crp, int buftype)
1630 {
1631 	struct sec_hw_desc *hd = desc->sd_desc;
1632 	struct cryptodesc *enc, *mac;
1633 	int error;
1634 
1635 	error = sec_split_crp(crp, &enc, &mac);
1636 	if (error)
1637 		return (error);
1638 
1639 	if (!enc)
1640 		return (EINVAL);
1641 
1642 	hd->shd_eu_sel0 = SEC_EU_AESU;
1643 	hd->shd_mode0 = SEC_AESU_MODE_CBC;
1644 
1645 	if (enc->crd_alg != CRYPTO_AES_CBC)
1646 		return (EINVAL);
1647 
1648 	if (enc->crd_flags & CRD_F_ENCRYPT) {
1649 		hd->shd_mode0 |= SEC_AESU_MODE_ED;
1650 		hd->shd_dir = 0;
1651 	} else
1652 		hd->shd_dir = 1;
1653 
1654 	if (mac)
1655 		error = sec_build_common_s_desc(sc, desc, ses, crp, enc, mac,
1656 		    buftype);
1657 	else
1658 		error = sec_build_common_ns_desc(sc, desc, ses, crp, enc,
1659 		    buftype);
1660 
1661 	return (error);
1662 }
1663 
1664 /* DEU */
1665 
1666 static int
1667 sec_deu_newsession(struct sec_softc *sc, struct sec_session *ses,
1668     struct cryptoini *enc, struct cryptoini *mac)
1669 {
1670 
1671 	if (enc == NULL)
1672 		return (-1);
1673 
1674 	switch (enc->cri_alg) {
1675 	case CRYPTO_DES_CBC:
1676 	case CRYPTO_3DES_CBC:
1677 		break;
1678 	default:
1679 		return (-1);
1680 	}
1681 
1682 	ses->ss_ivlen = DES_BLOCK_LEN;
1683 
1684 	return (0);
1685 }
1686 
1687 static int
1688 sec_deu_make_desc(struct sec_softc *sc, struct sec_session *ses,
1689     struct sec_desc *desc, struct cryptop *crp, int buftype)
1690 {
1691 	struct sec_hw_desc *hd = desc->sd_desc;
1692 	struct cryptodesc *enc, *mac;
1693 	int error;
1694 
1695 	error = sec_split_crp(crp, &enc, &mac);
1696 	if (error)
1697 		return (error);
1698 
1699 	if (!enc)
1700 		return (EINVAL);
1701 
1702 	hd->shd_eu_sel0 = SEC_EU_DEU;
1703 	hd->shd_mode0 = SEC_DEU_MODE_CBC;
1704 
1705 	switch (enc->crd_alg) {
1706 	case CRYPTO_3DES_CBC:
1707 		hd->shd_mode0 |= SEC_DEU_MODE_TS;
1708 		break;
1709 	case CRYPTO_DES_CBC:
1710 		break;
1711 	default:
1712 		return (EINVAL);
1713 	}
1714 
1715 	if (enc->crd_flags & CRD_F_ENCRYPT) {
1716 		hd->shd_mode0 |= SEC_DEU_MODE_ED;
1717 		hd->shd_dir = 0;
1718 	} else
1719 		hd->shd_dir = 1;
1720 
1721 	if (mac)
1722 		error = sec_build_common_s_desc(sc, desc, ses, crp, enc, mac,
1723 		    buftype);
1724 	else
1725 		error = sec_build_common_ns_desc(sc, desc, ses, crp, enc,
1726 		    buftype);
1727 
1728 	return (error);
1729 }
1730 
1731 /* MDEU */
1732 
1733 static int
1734 sec_mdeu_can_handle(u_int alg)
1735 {
1736 	switch (alg) {
1737 	case CRYPTO_MD5:
1738 	case CRYPTO_SHA1:
1739 	case CRYPTO_MD5_HMAC:
1740 	case CRYPTO_SHA1_HMAC:
1741 	case CRYPTO_SHA2_256_HMAC:
1742 	case CRYPTO_SHA2_384_HMAC:
1743 	case CRYPTO_SHA2_512_HMAC:
1744 		return (1);
1745 	default:
1746 		return (0);
1747 	}
1748 }
1749 
1750 static int
1751 sec_mdeu_config(struct cryptodesc *crd, u_int *eu, u_int *mode, u_int *hashlen)
1752 {
1753 
1754 	*mode = SEC_MDEU_MODE_PD | SEC_MDEU_MODE_INIT;
1755 	*eu = SEC_EU_NONE;
1756 
1757 	switch (crd->crd_alg) {
1758 	case CRYPTO_MD5_HMAC:
1759 		*mode |= SEC_MDEU_MODE_HMAC;
1760 		/* FALLTHROUGH */
1761 	case CRYPTO_MD5:
1762 		*eu = SEC_EU_MDEU_A;
1763 		*mode |= SEC_MDEU_MODE_MD5;
1764 		*hashlen = MD5_HASH_LEN;
1765 		break;
1766 	case CRYPTO_SHA1_HMAC:
1767 		*mode |= SEC_MDEU_MODE_HMAC;
1768 		/* FALLTHROUGH */
1769 	case CRYPTO_SHA1:
1770 		*eu = SEC_EU_MDEU_A;
1771 		*mode |= SEC_MDEU_MODE_SHA1;
1772 		*hashlen = SHA1_HASH_LEN;
1773 		break;
1774 	case CRYPTO_SHA2_256_HMAC:
1775 		*mode |= SEC_MDEU_MODE_HMAC | SEC_MDEU_MODE_SHA256;
1776 		*eu = SEC_EU_MDEU_A;
1777 		break;
1778 	case CRYPTO_SHA2_384_HMAC:
1779 		*mode |= SEC_MDEU_MODE_HMAC | SEC_MDEU_MODE_SHA384;
1780 		*eu = SEC_EU_MDEU_B;
1781 		break;
1782 	case CRYPTO_SHA2_512_HMAC:
1783 		*mode |= SEC_MDEU_MODE_HMAC | SEC_MDEU_MODE_SHA512;
1784 		*eu = SEC_EU_MDEU_B;
1785 		break;
1786 	default:
1787 		return (EINVAL);
1788 	}
1789 
1790 	if (*mode & SEC_MDEU_MODE_HMAC)
1791 		*hashlen = SEC_HMAC_HASH_LEN;
1792 
1793 	return (0);
1794 }
1795 
1796 static int
1797 sec_mdeu_newsession(struct sec_softc *sc, struct sec_session *ses,
1798     struct cryptoini *enc, struct cryptoini *mac)
1799 {
1800 
1801 	if (mac && sec_mdeu_can_handle(mac->cri_alg))
1802 		return (0);
1803 
1804 	return (-1);
1805 }
1806 
1807 static int
1808 sec_mdeu_make_desc(struct sec_softc *sc, struct sec_session *ses,
1809     struct sec_desc *desc, struct cryptop *crp, int buftype)
1810 {
1811 	struct cryptodesc *enc, *mac;
1812 	struct sec_hw_desc *hd = desc->sd_desc;
1813 	u_int eu, mode, hashlen;
1814 	int error;
1815 
1816 	error = sec_split_crp(crp, &enc, &mac);
1817 	if (error)
1818 		return (error);
1819 
1820 	if (enc)
1821 		return (EINVAL);
1822 
1823 	error = sec_mdeu_config(mac, &eu, &mode, &hashlen);
1824 	if (error)
1825 		return (error);
1826 
1827 	hd->shd_desc_type = SEC_DT_COMMON_NONSNOOP;
1828 	hd->shd_eu_sel0 = eu;
1829 	hd->shd_mode0 = mode;
1830 	hd->shd_eu_sel1 = SEC_EU_NONE;
1831 	hd->shd_mode1 = 0;
1832 
1833 	/* Pointer 0: NULL */
1834 	error = sec_make_pointer_direct(sc, desc, 0, 0, 0);
1835 	if (error)
1836 		return (error);
1837 
1838 	/* Pointer 1: Context In (Not used: NULL) */
1839 	error = sec_make_pointer_direct(sc, desc, 1, 0, 0);
1840 	if (error)
1841 		return (error);
1842 
1843 	/* Pointer 2: HMAC Key (or NULL, depending on digest type) */
1844 	if (hd->shd_mode0 & SEC_MDEU_MODE_HMAC)
1845 		error = sec_make_pointer_direct(sc, desc, 2,
1846 		    desc->sd_desc_paddr + offsetof(struct sec_hw_desc,
1847 		    shd_mkey), ses->ss_mklen);
1848 	else
1849 		error = sec_make_pointer_direct(sc, desc, 2, 0, 0);
1850 
1851 	if (error)
1852 		return (error);
1853 
1854 	/* Pointer 3: Input Data */
1855 	error = sec_make_pointer(sc, desc, 3, crp->crp_buf, mac->crd_skip,
1856 	    mac->crd_len, buftype);
1857 	if (error)
1858 		return (error);
1859 
1860 	/* Pointer 4: NULL */
1861 	error = sec_make_pointer_direct(sc, desc, 4, 0, 0);
1862 	if (error)
1863 		return (error);
1864 
1865 	/* Pointer 5: Hash out */
1866 	error = sec_make_pointer(sc, desc, 5, crp->crp_buf,
1867 	    mac->crd_inject, hashlen, buftype);
1868 	if (error)
1869 		return (error);
1870 
1871 	/* Pointer 6: NULL */
1872 	error = sec_make_pointer_direct(sc, desc, 6, 0, 0);
1873 
1874 	return (0);
1875 }
1876