xref: /freebsd/sys/dev/cesa/cesa.c (revision acc1a9ef)
1 /*-
2  * Copyright (C) 2009-2011 Semihalf.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 /*
28  * CESA SRAM Memory Map:
29  *
30  * +------------------------+ <= sc->sc_sram_base + CESA_SRAM_SIZE
31  * |                        |
32  * |          DATA          |
33  * |                        |
34  * +------------------------+ <= sc->sc_sram_base + CESA_DATA(0)
35  * |  struct cesa_sa_data   |
36  * +------------------------+
37  * |  struct cesa_sa_hdesc  |
38  * +------------------------+ <= sc->sc_sram_base
39  */
40 
41 #include <sys/cdefs.h>
42 __FBSDID("$FreeBSD$");
43 
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/bus.h>
47 #include <sys/endian.h>
48 #include <sys/kernel.h>
49 #include <sys/lock.h>
50 #include <sys/mbuf.h>
51 #include <sys/module.h>
52 #include <sys/mutex.h>
53 #include <sys/rman.h>
54 
55 #include <machine/bus.h>
56 #include <machine/intr.h>
57 #include <machine/resource.h>
58 
59 #include <dev/fdt/fdt_common.h>
60 #include <dev/ofw/ofw_bus.h>
61 #include <dev/ofw/ofw_bus_subr.h>
62 
63 #include <sys/md5.h>
64 #include <crypto/sha1.h>
65 #include <crypto/rijndael/rijndael.h>
66 #include <opencrypto/cryptodev.h>
67 #include "cryptodev_if.h"
68 
69 #include <arm/mv/mvreg.h>
70 #include <arm/mv/mvwin.h>
71 #include <arm/mv/mvvar.h>
72 #include "cesa.h"
73 
74 static int	cesa_probe(device_t);
75 static int	cesa_attach(device_t);
76 static int	cesa_detach(device_t);
77 static void	cesa_intr(void *);
78 static int	cesa_newsession(device_t, u_int32_t *, struct cryptoini *);
79 static int	cesa_freesession(device_t, u_int64_t);
80 static int	cesa_process(device_t, struct cryptop *, int);
81 static int	decode_win_cesa_setup(struct cesa_softc *sc);
82 
83 static struct resource_spec cesa_res_spec[] = {
84 	{ SYS_RES_MEMORY, 0, RF_ACTIVE },
85 	{ SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
86 	{ -1, 0 }
87 };
88 
89 static device_method_t cesa_methods[] = {
90 	/* Device interface */
91 	DEVMETHOD(device_probe,		cesa_probe),
92 	DEVMETHOD(device_attach,	cesa_attach),
93 	DEVMETHOD(device_detach,	cesa_detach),
94 
95 	/* Crypto device methods */
96 	DEVMETHOD(cryptodev_newsession,	cesa_newsession),
97 	DEVMETHOD(cryptodev_freesession,cesa_freesession),
98 	DEVMETHOD(cryptodev_process,	cesa_process),
99 
100 	DEVMETHOD_END
101 };
102 
103 static driver_t cesa_driver = {
104 	"cesa",
105 	cesa_methods,
106 	sizeof (struct cesa_softc)
107 };
108 static devclass_t cesa_devclass;
109 
110 DRIVER_MODULE(cesa, simplebus, cesa_driver, cesa_devclass, 0, 0);
111 MODULE_DEPEND(cesa, crypto, 1, 1, 1);
112 
113 static void
114 cesa_dump_cshd(struct cesa_softc *sc, struct cesa_sa_hdesc *cshd)
115 {
116 #ifdef DEBUG
117 	device_t dev;
118 
119 	dev = sc->sc_dev;
120 	device_printf(dev, "CESA SA Hardware Descriptor:\n");
121 	device_printf(dev, "\t\tconfig: 0x%08X\n", cshd->cshd_config);
122 	device_printf(dev, "\t\te_src:  0x%08X\n", cshd->cshd_enc_src);
123 	device_printf(dev, "\t\te_dst:  0x%08X\n", cshd->cshd_enc_dst);
124 	device_printf(dev, "\t\te_dlen: 0x%08X\n", cshd->cshd_enc_dlen);
125 	device_printf(dev, "\t\te_key:  0x%08X\n", cshd->cshd_enc_key);
126 	device_printf(dev, "\t\te_iv_1: 0x%08X\n", cshd->cshd_enc_iv);
127 	device_printf(dev, "\t\te_iv_2: 0x%08X\n", cshd->cshd_enc_iv_buf);
128 	device_printf(dev, "\t\tm_src:  0x%08X\n", cshd->cshd_mac_src);
129 	device_printf(dev, "\t\tm_dst:  0x%08X\n", cshd->cshd_mac_dst);
130 	device_printf(dev, "\t\tm_dlen: 0x%08X\n", cshd->cshd_mac_dlen);
131 	device_printf(dev, "\t\tm_tlen: 0x%08X\n", cshd->cshd_mac_total_dlen);
132 	device_printf(dev, "\t\tm_iv_i: 0x%08X\n", cshd->cshd_mac_iv_in);
133 	device_printf(dev, "\t\tm_iv_o: 0x%08X\n", cshd->cshd_mac_iv_out);
134 #endif
135 }
136 
137 static void
138 cesa_alloc_dma_mem_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
139 {
140 	struct cesa_dma_mem *cdm;
141 
142 	if (error)
143 		return;
144 
145 	KASSERT(nseg == 1, ("Got wrong number of DMA segments, should be 1."));
146 	cdm = arg;
147 	cdm->cdm_paddr = segs->ds_addr;
148 }
149 
150 static int
151 cesa_alloc_dma_mem(struct cesa_softc *sc, struct cesa_dma_mem *cdm,
152     bus_size_t size)
153 {
154 	int error;
155 
156 	KASSERT(cdm->cdm_vaddr == NULL,
157 	    ("%s(): DMA memory descriptor in use.", __func__));
158 
159 	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev),	/* parent */
160 	    PAGE_SIZE, 0,			/* alignment, boundary */
161 	    BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
162 	    BUS_SPACE_MAXADDR,			/* highaddr */
163 	    NULL, NULL,				/* filtfunc, filtfuncarg */
164 	    size, 1,				/* maxsize, nsegments */
165 	    size, 0,				/* maxsegsz, flags */
166 	    NULL, NULL,				/* lockfunc, lockfuncarg */
167 	    &cdm->cdm_tag);			/* dmat */
168 	if (error) {
169 		device_printf(sc->sc_dev, "failed to allocate busdma tag, error"
170 		    " %i!\n", error);
171 
172 		goto err1;
173 	}
174 
175 	error = bus_dmamem_alloc(cdm->cdm_tag, &cdm->cdm_vaddr,
176 	    BUS_DMA_NOWAIT | BUS_DMA_ZERO, &cdm->cdm_map);
177 	if (error) {
178 		device_printf(sc->sc_dev, "failed to allocate DMA safe"
179 		    " memory, error %i!\n", error);
180 
181 		goto err2;
182 	}
183 
184 	error = bus_dmamap_load(cdm->cdm_tag, cdm->cdm_map, cdm->cdm_vaddr,
185 	    size, cesa_alloc_dma_mem_cb, cdm, BUS_DMA_NOWAIT);
186 	if (error) {
187 		device_printf(sc->sc_dev, "cannot get address of the DMA"
188 		    " memory, error %i\n", error);
189 
190 		goto err3;
191 	}
192 
193 	return (0);
194 err3:
195 	bus_dmamem_free(cdm->cdm_tag, cdm->cdm_vaddr, cdm->cdm_map);
196 err2:
197 	bus_dma_tag_destroy(cdm->cdm_tag);
198 err1:
199 	cdm->cdm_vaddr = NULL;
200 	return (error);
201 }
202 
203 static void
204 cesa_free_dma_mem(struct cesa_dma_mem *cdm)
205 {
206 
207 	bus_dmamap_unload(cdm->cdm_tag, cdm->cdm_map);
208 	bus_dmamem_free(cdm->cdm_tag, cdm->cdm_vaddr, cdm->cdm_map);
209 	bus_dma_tag_destroy(cdm->cdm_tag);
210 	cdm->cdm_vaddr = NULL;
211 }
212 
213 static void
214 cesa_sync_dma_mem(struct cesa_dma_mem *cdm, bus_dmasync_op_t op)
215 {
216 
217 	/* Sync only if dma memory is valid */
218         if (cdm->cdm_vaddr != NULL)
219 		bus_dmamap_sync(cdm->cdm_tag, cdm->cdm_map, op);
220 }
221 
222 static void
223 cesa_sync_desc(struct cesa_softc *sc, bus_dmasync_op_t op)
224 {
225 
226 	cesa_sync_dma_mem(&sc->sc_tdesc_cdm, op);
227 	cesa_sync_dma_mem(&sc->sc_sdesc_cdm, op);
228 	cesa_sync_dma_mem(&sc->sc_requests_cdm, op);
229 }
230 
231 static struct cesa_session *
232 cesa_alloc_session(struct cesa_softc *sc)
233 {
234 	struct cesa_session *cs;
235 
236 	CESA_GENERIC_ALLOC_LOCKED(sc, cs, sessions);
237 
238 	return (cs);
239 }
240 
241 static struct cesa_session *
242 cesa_get_session(struct cesa_softc *sc, uint32_t sid)
243 {
244 
245 	if (sid >= CESA_SESSIONS)
246 		return (NULL);
247 
248 	return (&sc->sc_sessions[sid]);
249 }
250 
251 static void
252 cesa_free_session(struct cesa_softc *sc, struct cesa_session *cs)
253 {
254 
255 	CESA_GENERIC_FREE_LOCKED(sc, cs, sessions);
256 }
257 
258 static struct cesa_request *
259 cesa_alloc_request(struct cesa_softc *sc)
260 {
261 	struct cesa_request *cr;
262 
263 	CESA_GENERIC_ALLOC_LOCKED(sc, cr, requests);
264 	if (!cr)
265 		return (NULL);
266 
267 	STAILQ_INIT(&cr->cr_tdesc);
268 	STAILQ_INIT(&cr->cr_sdesc);
269 
270 	return (cr);
271 }
272 
273 static void
274 cesa_free_request(struct cesa_softc *sc, struct cesa_request *cr)
275 {
276 
277 	/* Free TDMA descriptors assigned to this request */
278 	CESA_LOCK(sc, tdesc);
279 	STAILQ_CONCAT(&sc->sc_free_tdesc, &cr->cr_tdesc);
280 	CESA_UNLOCK(sc, tdesc);
281 
282 	/* Free SA descriptors assigned to this request */
283 	CESA_LOCK(sc, sdesc);
284 	STAILQ_CONCAT(&sc->sc_free_sdesc, &cr->cr_sdesc);
285 	CESA_UNLOCK(sc, sdesc);
286 
287 	/* Unload DMA memory asociated with request */
288 	if (cr->cr_dmap_loaded) {
289 		bus_dmamap_unload(sc->sc_data_dtag, cr->cr_dmap);
290 		cr->cr_dmap_loaded = 0;
291 	}
292 
293 	CESA_GENERIC_FREE_LOCKED(sc, cr, requests);
294 }
295 
296 static void
297 cesa_enqueue_request(struct cesa_softc *sc, struct cesa_request *cr)
298 {
299 
300 	CESA_LOCK(sc, requests);
301 	STAILQ_INSERT_TAIL(&sc->sc_ready_requests, cr, cr_stq);
302 	CESA_UNLOCK(sc, requests);
303 }
304 
305 static struct cesa_tdma_desc *
306 cesa_alloc_tdesc(struct cesa_softc *sc)
307 {
308 	struct cesa_tdma_desc *ctd;
309 
310 	CESA_GENERIC_ALLOC_LOCKED(sc, ctd, tdesc);
311 
312 	if (!ctd)
313 		device_printf(sc->sc_dev, "TDMA descriptors pool exhaused. "
314 		    "Consider increasing CESA_TDMA_DESCRIPTORS.\n");
315 
316 	return (ctd);
317 }
318 
319 static struct cesa_sa_desc *
320 cesa_alloc_sdesc(struct cesa_softc *sc, struct cesa_request *cr)
321 {
322 	struct cesa_sa_desc *csd;
323 
324 	CESA_GENERIC_ALLOC_LOCKED(sc, csd, sdesc);
325 	if (!csd) {
326 		device_printf(sc->sc_dev, "SA descriptors pool exhaused. "
327 		    "Consider increasing CESA_SA_DESCRIPTORS.\n");
328 		return (NULL);
329 	}
330 
331 	STAILQ_INSERT_TAIL(&cr->cr_sdesc, csd, csd_stq);
332 
333 	/* Fill-in SA descriptor with default values */
334 	csd->csd_cshd->cshd_enc_key = CESA_SA_DATA(csd_key);
335 	csd->csd_cshd->cshd_enc_iv = CESA_SA_DATA(csd_iv);
336 	csd->csd_cshd->cshd_enc_iv_buf = CESA_SA_DATA(csd_iv);
337 	csd->csd_cshd->cshd_enc_src = 0;
338 	csd->csd_cshd->cshd_enc_dst = 0;
339 	csd->csd_cshd->cshd_enc_dlen = 0;
340 	csd->csd_cshd->cshd_mac_dst = CESA_SA_DATA(csd_hash);
341 	csd->csd_cshd->cshd_mac_iv_in = CESA_SA_DATA(csd_hiv_in);
342 	csd->csd_cshd->cshd_mac_iv_out = CESA_SA_DATA(csd_hiv_out);
343 	csd->csd_cshd->cshd_mac_src = 0;
344 	csd->csd_cshd->cshd_mac_dlen = 0;
345 
346 	return (csd);
347 }
348 
349 static struct cesa_tdma_desc *
350 cesa_tdma_copy(struct cesa_softc *sc, bus_addr_t dst, bus_addr_t src,
351     bus_size_t size)
352 {
353 	struct cesa_tdma_desc *ctd;
354 
355 	ctd = cesa_alloc_tdesc(sc);
356 	if (!ctd)
357 		return (NULL);
358 
359 	ctd->ctd_cthd->cthd_dst = dst;
360 	ctd->ctd_cthd->cthd_src = src;
361 	ctd->ctd_cthd->cthd_byte_count = size;
362 
363 	/* Handle special control packet */
364 	if (size != 0)
365 		ctd->ctd_cthd->cthd_flags = CESA_CTHD_OWNED;
366 	else
367 		ctd->ctd_cthd->cthd_flags = 0;
368 
369 	return (ctd);
370 }
371 
372 static struct cesa_tdma_desc *
373 cesa_tdma_copyin_sa_data(struct cesa_softc *sc, struct cesa_request *cr)
374 {
375 
376 	return (cesa_tdma_copy(sc, sc->sc_sram_base +
377 	    sizeof(struct cesa_sa_hdesc), cr->cr_csd_paddr,
378 	    sizeof(struct cesa_sa_data)));
379 }
380 
381 static struct cesa_tdma_desc *
382 cesa_tdma_copyout_sa_data(struct cesa_softc *sc, struct cesa_request *cr)
383 {
384 
385 	return (cesa_tdma_copy(sc, cr->cr_csd_paddr, sc->sc_sram_base +
386 	    sizeof(struct cesa_sa_hdesc), sizeof(struct cesa_sa_data)));
387 }
388 
389 static struct cesa_tdma_desc *
390 cesa_tdma_copy_sdesc(struct cesa_softc *sc, struct cesa_sa_desc *csd)
391 {
392 
393 	return (cesa_tdma_copy(sc, sc->sc_sram_base, csd->csd_cshd_paddr,
394 	    sizeof(struct cesa_sa_hdesc)));
395 }
396 
397 static void
398 cesa_append_tdesc(struct cesa_request *cr, struct cesa_tdma_desc *ctd)
399 {
400 	struct cesa_tdma_desc *ctd_prev;
401 
402 	if (!STAILQ_EMPTY(&cr->cr_tdesc)) {
403 		ctd_prev = STAILQ_LAST(&cr->cr_tdesc, cesa_tdma_desc, ctd_stq);
404 		ctd_prev->ctd_cthd->cthd_next = ctd->ctd_cthd_paddr;
405 	}
406 
407 	ctd->ctd_cthd->cthd_next = 0;
408 	STAILQ_INSERT_TAIL(&cr->cr_tdesc, ctd, ctd_stq);
409 }
410 
411 static int
412 cesa_append_packet(struct cesa_softc *sc, struct cesa_request *cr,
413     struct cesa_packet *cp, struct cesa_sa_desc *csd)
414 {
415 	struct cesa_tdma_desc *ctd, *tmp;
416 
417 	/* Copy SA descriptor for this packet */
418 	ctd = cesa_tdma_copy_sdesc(sc, csd);
419 	if (!ctd)
420 		return (ENOMEM);
421 
422 	cesa_append_tdesc(cr, ctd);
423 
424 	/* Copy data to be processed */
425 	STAILQ_FOREACH_SAFE(ctd, &cp->cp_copyin, ctd_stq, tmp)
426 		cesa_append_tdesc(cr, ctd);
427 	STAILQ_INIT(&cp->cp_copyin);
428 
429 	/* Insert control descriptor */
430 	ctd = cesa_tdma_copy(sc, 0, 0, 0);
431 	if (!ctd)
432 		return (ENOMEM);
433 
434 	cesa_append_tdesc(cr, ctd);
435 
436 	/* Copy back results */
437 	STAILQ_FOREACH_SAFE(ctd, &cp->cp_copyout, ctd_stq, tmp)
438 		cesa_append_tdesc(cr, ctd);
439 	STAILQ_INIT(&cp->cp_copyout);
440 
441 	return (0);
442 }
443 
444 static int
445 cesa_set_mkey(struct cesa_session *cs, int alg, const uint8_t *mkey, int mklen)
446 {
447 	uint8_t ipad[CESA_MAX_HMAC_BLOCK_LEN];
448 	uint8_t opad[CESA_MAX_HMAC_BLOCK_LEN];
449 	SHA1_CTX sha1ctx;
450 	MD5_CTX md5ctx;
451 	uint32_t *hout;
452 	uint32_t *hin;
453 	int i;
454 
455 	memset(ipad, HMAC_IPAD_VAL, CESA_MAX_HMAC_BLOCK_LEN);
456 	memset(opad, HMAC_OPAD_VAL, CESA_MAX_HMAC_BLOCK_LEN);
457 	for (i = 0; i < mklen; i++) {
458 		ipad[i] ^= mkey[i];
459 		opad[i] ^= mkey[i];
460 	}
461 
462 	hin = (uint32_t *)cs->cs_hiv_in;
463 	hout = (uint32_t *)cs->cs_hiv_out;
464 
465 	switch (alg) {
466 	case CRYPTO_MD5_HMAC:
467 		MD5Init(&md5ctx);
468 		MD5Update(&md5ctx, ipad, MD5_HMAC_BLOCK_LEN);
469 		memcpy(hin, md5ctx.state, sizeof(md5ctx.state));
470 		MD5Init(&md5ctx);
471 		MD5Update(&md5ctx, opad, MD5_HMAC_BLOCK_LEN);
472 		memcpy(hout, md5ctx.state, sizeof(md5ctx.state));
473 		break;
474 	case CRYPTO_SHA1_HMAC:
475 		SHA1Init(&sha1ctx);
476 		SHA1Update(&sha1ctx, ipad, SHA1_HMAC_BLOCK_LEN);
477 		memcpy(hin, sha1ctx.h.b32, sizeof(sha1ctx.h.b32));
478 		SHA1Init(&sha1ctx);
479 		SHA1Update(&sha1ctx, opad, SHA1_HMAC_BLOCK_LEN);
480 		memcpy(hout, sha1ctx.h.b32, sizeof(sha1ctx.h.b32));
481 		break;
482 	default:
483 		return (EINVAL);
484 	}
485 
486 	for (i = 0; i < CESA_MAX_HASH_LEN / sizeof(uint32_t); i++) {
487 		hin[i] = htobe32(hin[i]);
488 		hout[i] = htobe32(hout[i]);
489 	}
490 
491 	return (0);
492 }
493 
494 static int
495 cesa_prep_aes_key(struct cesa_session *cs)
496 {
497 	uint32_t ek[4 * (RIJNDAEL_MAXNR + 1)];
498 	uint32_t *dkey;
499 	int i;
500 
501 	rijndaelKeySetupEnc(ek, cs->cs_key, cs->cs_klen * 8);
502 
503 	cs->cs_config &= ~CESA_CSH_AES_KLEN_MASK;
504 	dkey = (uint32_t *)cs->cs_aes_dkey;
505 
506 	switch (cs->cs_klen) {
507 	case 16:
508 		cs->cs_config |= CESA_CSH_AES_KLEN_128;
509 		for (i = 0; i < 4; i++)
510 			*dkey++ = htobe32(ek[4 * 10 + i]);
511 		break;
512 	case 24:
513 		cs->cs_config |= CESA_CSH_AES_KLEN_192;
514 		for (i = 0; i < 4; i++)
515 			*dkey++ = htobe32(ek[4 * 12 + i]);
516 		for (i = 0; i < 2; i++)
517 			*dkey++ = htobe32(ek[4 * 11 + 2 + i]);
518 		break;
519 	case 32:
520 		cs->cs_config |= CESA_CSH_AES_KLEN_256;
521 		for (i = 0; i < 4; i++)
522 			*dkey++ = htobe32(ek[4 * 14 + i]);
523 		for (i = 0; i < 4; i++)
524 			*dkey++ = htobe32(ek[4 * 13 + i]);
525 		break;
526 	default:
527 		return (EINVAL);
528 	}
529 
530 	return (0);
531 }
532 
533 static int
534 cesa_is_hash(int alg)
535 {
536 
537 	switch (alg) {
538 	case CRYPTO_MD5:
539 	case CRYPTO_MD5_HMAC:
540 	case CRYPTO_SHA1:
541 	case CRYPTO_SHA1_HMAC:
542 		return (1);
543 	default:
544 		return (0);
545 	}
546 }
547 
548 static void
549 cesa_start_packet(struct cesa_packet *cp, unsigned int size)
550 {
551 
552 	cp->cp_size = size;
553 	cp->cp_offset = 0;
554 	STAILQ_INIT(&cp->cp_copyin);
555 	STAILQ_INIT(&cp->cp_copyout);
556 }
557 
558 static int
559 cesa_fill_packet(struct cesa_softc *sc, struct cesa_packet *cp,
560     bus_dma_segment_t *seg)
561 {
562 	struct cesa_tdma_desc *ctd;
563 	unsigned int bsize;
564 
565 	/* Calculate size of block copy */
566 	bsize = MIN(seg->ds_len, cp->cp_size - cp->cp_offset);
567 
568 	if (bsize > 0) {
569 		ctd = cesa_tdma_copy(sc, sc->sc_sram_base +
570 		    CESA_DATA(cp->cp_offset), seg->ds_addr, bsize);
571 		if (!ctd)
572 			return (-ENOMEM);
573 
574 		STAILQ_INSERT_TAIL(&cp->cp_copyin, ctd, ctd_stq);
575 
576 		ctd = cesa_tdma_copy(sc, seg->ds_addr, sc->sc_sram_base +
577 		    CESA_DATA(cp->cp_offset), bsize);
578 		if (!ctd)
579 			return (-ENOMEM);
580 
581 		STAILQ_INSERT_TAIL(&cp->cp_copyout, ctd, ctd_stq);
582 
583 		seg->ds_len -= bsize;
584 		seg->ds_addr += bsize;
585 		cp->cp_offset += bsize;
586 	}
587 
588 	return (bsize);
589 }
590 
591 static void
592 cesa_create_chain_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
593 {
594 	unsigned int mpsize, fragmented;
595 	unsigned int mlen, mskip, tmlen;
596 	struct cesa_chain_info *cci;
597 	unsigned int elen, eskip;
598 	unsigned int skip, len;
599 	struct cesa_sa_desc *csd;
600 	struct cesa_request *cr;
601 	struct cesa_softc *sc;
602 	struct cesa_packet cp;
603 	bus_dma_segment_t seg;
604 	uint32_t config;
605 	int size;
606 
607 	cci = arg;
608 	sc = cci->cci_sc;
609 	cr = cci->cci_cr;
610 
611 	if (error) {
612 		cci->cci_error = error;
613 		return;
614 	}
615 
616 	elen = cci->cci_enc ? cci->cci_enc->crd_len : 0;
617 	eskip = cci->cci_enc ? cci->cci_enc->crd_skip : 0;
618 	mlen = cci->cci_mac ? cci->cci_mac->crd_len : 0;
619 	mskip = cci->cci_mac ? cci->cci_mac->crd_skip : 0;
620 
621 	if (elen && mlen &&
622 	    ((eskip > mskip && ((eskip - mskip) & (cr->cr_cs->cs_ivlen - 1))) ||
623 	    (mskip > eskip && ((mskip - eskip) & (cr->cr_cs->cs_mblen - 1))) ||
624 	    (eskip > (mskip + mlen)) || (mskip > (eskip + elen)))) {
625 		/*
626 		 * Data alignment in the request does not meet CESA requiremnts
627 		 * for combined encryption/decryption and hashing. We have to
628 		 * split the request to separate operations and process them
629 		 * one by one.
630 		 */
631 		config = cci->cci_config;
632 		if ((config & CESA_CSHD_OP_MASK) == CESA_CSHD_MAC_AND_ENC) {
633 			config &= ~CESA_CSHD_OP_MASK;
634 
635 			cci->cci_config = config | CESA_CSHD_MAC;
636 			cci->cci_enc = NULL;
637 			cci->cci_mac = cr->cr_mac;
638 			cesa_create_chain_cb(cci, segs, nseg, cci->cci_error);
639 
640 			cci->cci_config = config | CESA_CSHD_ENC;
641 			cci->cci_enc = cr->cr_enc;
642 			cci->cci_mac = NULL;
643 			cesa_create_chain_cb(cci, segs, nseg, cci->cci_error);
644 		} else {
645 			config &= ~CESA_CSHD_OP_MASK;
646 
647 			cci->cci_config = config | CESA_CSHD_ENC;
648 			cci->cci_enc = cr->cr_enc;
649 			cci->cci_mac = NULL;
650 			cesa_create_chain_cb(cci, segs, nseg, cci->cci_error);
651 
652 			cci->cci_config = config | CESA_CSHD_MAC;
653 			cci->cci_enc = NULL;
654 			cci->cci_mac = cr->cr_mac;
655 			cesa_create_chain_cb(cci, segs, nseg, cci->cci_error);
656 		}
657 
658 		return;
659 	}
660 
661 	tmlen = mlen;
662 	fragmented = 0;
663 	mpsize = CESA_MAX_PACKET_SIZE;
664 	mpsize &= ~((cr->cr_cs->cs_ivlen - 1) | (cr->cr_cs->cs_mblen - 1));
665 
666 	if (elen && mlen) {
667 		skip = MIN(eskip, mskip);
668 		len = MAX(elen + eskip, mlen + mskip) - skip;
669 	} else if (elen) {
670 		skip = eskip;
671 		len = elen;
672 	} else {
673 		skip = mskip;
674 		len = mlen;
675 	}
676 
677 	/* Start first packet in chain */
678 	cesa_start_packet(&cp, MIN(mpsize, len));
679 
680 	while (nseg-- && len > 0) {
681 		seg = *(segs++);
682 
683 		/*
684 		 * Skip data in buffer on which neither ENC nor MAC operation
685 		 * is requested.
686 		 */
687 		if (skip > 0) {
688 			size = MIN(skip, seg.ds_len);
689 			skip -= size;
690 
691 			seg.ds_addr += size;
692 			seg.ds_len -= size;
693 
694 			if (eskip > 0)
695 				eskip -= size;
696 
697 			if (mskip > 0)
698 				mskip -= size;
699 
700 			if (seg.ds_len == 0)
701 				continue;
702 		}
703 
704 		while (1) {
705 			/*
706 			 * Fill in current packet with data. Break if there is
707 			 * no more data in current DMA segment or an error
708 			 * occured.
709 			 */
710 			size = cesa_fill_packet(sc, &cp, &seg);
711 			if (size <= 0) {
712 				error = -size;
713 				break;
714 			}
715 
716 			len -= size;
717 
718 			/* If packet is full, append it to the chain */
719 			if (cp.cp_size == cp.cp_offset) {
720 				csd = cesa_alloc_sdesc(sc, cr);
721 				if (!csd) {
722 					error = ENOMEM;
723 					break;
724 				}
725 
726 				/* Create SA descriptor for this packet */
727 				csd->csd_cshd->cshd_config = cci->cci_config;
728 				csd->csd_cshd->cshd_mac_total_dlen = tmlen;
729 
730 				/*
731 				 * Enable fragmentation if request will not fit
732 				 * into one packet.
733 				 */
734 				if (len > 0) {
735 					if (!fragmented) {
736 						fragmented = 1;
737 						csd->csd_cshd->cshd_config |=
738 						    CESA_CSHD_FRAG_FIRST;
739 					} else
740 						csd->csd_cshd->cshd_config |=
741 						    CESA_CSHD_FRAG_MIDDLE;
742 				} else if (fragmented)
743 					csd->csd_cshd->cshd_config |=
744 					    CESA_CSHD_FRAG_LAST;
745 
746 				if (eskip < cp.cp_size && elen > 0) {
747 					csd->csd_cshd->cshd_enc_src =
748 					    CESA_DATA(eskip);
749 					csd->csd_cshd->cshd_enc_dst =
750 					    CESA_DATA(eskip);
751 					csd->csd_cshd->cshd_enc_dlen =
752 					    MIN(elen, cp.cp_size - eskip);
753 				}
754 
755 				if (mskip < cp.cp_size && mlen > 0) {
756 					csd->csd_cshd->cshd_mac_src =
757 					    CESA_DATA(mskip);
758 					csd->csd_cshd->cshd_mac_dlen =
759 					    MIN(mlen, cp.cp_size - mskip);
760 				}
761 
762 				elen -= csd->csd_cshd->cshd_enc_dlen;
763 				eskip -= MIN(eskip, cp.cp_size);
764 				mlen -= csd->csd_cshd->cshd_mac_dlen;
765 				mskip -= MIN(mskip, cp.cp_size);
766 
767 				cesa_dump_cshd(sc, csd->csd_cshd);
768 
769 				/* Append packet to the request */
770 				error = cesa_append_packet(sc, cr, &cp, csd);
771 				if (error)
772 					break;
773 
774 				/* Start a new packet, as current is full */
775 				cesa_start_packet(&cp, MIN(mpsize, len));
776 			}
777 		}
778 
779 		if (error)
780 			break;
781 	}
782 
783 	if (error) {
784 		/*
785 		 * Move all allocated resources to the request. They will be
786 		 * freed later.
787 		 */
788 		STAILQ_CONCAT(&cr->cr_tdesc, &cp.cp_copyin);
789 		STAILQ_CONCAT(&cr->cr_tdesc, &cp.cp_copyout);
790 		cci->cci_error = error;
791 	}
792 }
793 
794 static void
795 cesa_create_chain_cb2(void *arg, bus_dma_segment_t *segs, int nseg,
796     bus_size_t size, int error)
797 {
798 
799 	cesa_create_chain_cb(arg, segs, nseg, error);
800 }
801 
802 static int
803 cesa_create_chain(struct cesa_softc *sc, struct cesa_request *cr)
804 {
805 	struct cesa_chain_info cci;
806 	struct cesa_tdma_desc *ctd;
807 	uint32_t config;
808 	int error;
809 
810 	error = 0;
811 	CESA_LOCK_ASSERT(sc, sessions);
812 
813 	/* Create request metadata */
814 	if (cr->cr_enc) {
815 		if (cr->cr_enc->crd_alg == CRYPTO_AES_CBC &&
816 		    (cr->cr_enc->crd_flags & CRD_F_ENCRYPT) == 0)
817 			memcpy(cr->cr_csd->csd_key, cr->cr_cs->cs_aes_dkey,
818 			    cr->cr_cs->cs_klen);
819 		else
820 			memcpy(cr->cr_csd->csd_key, cr->cr_cs->cs_key,
821 			    cr->cr_cs->cs_klen);
822 	}
823 
824 	if (cr->cr_mac) {
825 		memcpy(cr->cr_csd->csd_hiv_in, cr->cr_cs->cs_hiv_in,
826 		    CESA_MAX_HASH_LEN);
827 		memcpy(cr->cr_csd->csd_hiv_out, cr->cr_cs->cs_hiv_out,
828 		    CESA_MAX_HASH_LEN);
829 	}
830 
831 	ctd = cesa_tdma_copyin_sa_data(sc, cr);
832 	if (!ctd)
833 		return (ENOMEM);
834 
835 	cesa_append_tdesc(cr, ctd);
836 
837 	/* Prepare SA configuration */
838 	config = cr->cr_cs->cs_config;
839 
840 	if (cr->cr_enc && (cr->cr_enc->crd_flags & CRD_F_ENCRYPT) == 0)
841 		config |= CESA_CSHD_DECRYPT;
842 	if (cr->cr_enc && !cr->cr_mac)
843 		config |= CESA_CSHD_ENC;
844 	if (!cr->cr_enc && cr->cr_mac)
845 		config |= CESA_CSHD_MAC;
846 	if (cr->cr_enc && cr->cr_mac)
847 		config |= (config & CESA_CSHD_DECRYPT) ? CESA_CSHD_MAC_AND_ENC :
848 		    CESA_CSHD_ENC_AND_MAC;
849 
850 	/* Create data packets */
851 	cci.cci_sc = sc;
852 	cci.cci_cr = cr;
853 	cci.cci_enc = cr->cr_enc;
854 	cci.cci_mac = cr->cr_mac;
855 	cci.cci_config = config;
856 	cci.cci_error = 0;
857 
858 	if (cr->cr_crp->crp_flags & CRYPTO_F_IOV)
859 		error = bus_dmamap_load_uio(sc->sc_data_dtag,
860 		    cr->cr_dmap, (struct uio *)cr->cr_crp->crp_buf,
861 		    cesa_create_chain_cb2, &cci, BUS_DMA_NOWAIT);
862 	else if (cr->cr_crp->crp_flags & CRYPTO_F_IMBUF)
863 		error = bus_dmamap_load_mbuf(sc->sc_data_dtag,
864 		    cr->cr_dmap, (struct mbuf *)cr->cr_crp->crp_buf,
865 		    cesa_create_chain_cb2, &cci, BUS_DMA_NOWAIT);
866 	else
867 		error = bus_dmamap_load(sc->sc_data_dtag,
868 		    cr->cr_dmap, cr->cr_crp->crp_buf,
869 		    cr->cr_crp->crp_ilen, cesa_create_chain_cb, &cci,
870 		    BUS_DMA_NOWAIT);
871 
872 	if (!error)
873 		cr->cr_dmap_loaded = 1;
874 
875 	if (cci.cci_error)
876 		error = cci.cci_error;
877 
878 	if (error)
879 		return (error);
880 
881 	/* Read back request metadata */
882 	ctd = cesa_tdma_copyout_sa_data(sc, cr);
883 	if (!ctd)
884 		return (ENOMEM);
885 
886 	cesa_append_tdesc(cr, ctd);
887 
888 	return (0);
889 }
890 
891 static void
892 cesa_execute(struct cesa_softc *sc)
893 {
894 	struct cesa_tdma_desc *prev_ctd, *ctd;
895 	struct cesa_request *prev_cr, *cr;
896 
897 	CESA_LOCK(sc, requests);
898 
899 	/*
900 	 * If ready list is empty, there is nothing to execute. If queued list
901 	 * is not empty, the hardware is busy and we cannot start another
902 	 * execution.
903 	 */
904 	if (STAILQ_EMPTY(&sc->sc_ready_requests) ||
905 	    !STAILQ_EMPTY(&sc->sc_queued_requests)) {
906 		CESA_UNLOCK(sc, requests);
907 		return;
908 	}
909 
910 	/* Move all ready requests to queued list */
911 	STAILQ_CONCAT(&sc->sc_queued_requests, &sc->sc_ready_requests);
912 	STAILQ_INIT(&sc->sc_ready_requests);
913 
914 	/* Create one execution chain from all requests on the list */
915 	if (STAILQ_FIRST(&sc->sc_queued_requests) !=
916 	    STAILQ_LAST(&sc->sc_queued_requests, cesa_request, cr_stq)) {
917 		prev_cr = NULL;
918 		cesa_sync_dma_mem(&sc->sc_tdesc_cdm, BUS_DMASYNC_POSTREAD |
919 		    BUS_DMASYNC_POSTWRITE);
920 
921 		STAILQ_FOREACH(cr, &sc->sc_queued_requests, cr_stq) {
922 			if (prev_cr) {
923 				ctd = STAILQ_FIRST(&cr->cr_tdesc);
924 				prev_ctd = STAILQ_LAST(&prev_cr->cr_tdesc,
925 				    cesa_tdma_desc, ctd_stq);
926 
927 				prev_ctd->ctd_cthd->cthd_next =
928 				    ctd->ctd_cthd_paddr;
929 			}
930 
931 			prev_cr = cr;
932 		}
933 
934 		cesa_sync_dma_mem(&sc->sc_tdesc_cdm, BUS_DMASYNC_PREREAD |
935 		    BUS_DMASYNC_PREWRITE);
936 	}
937 
938 	/* Start chain execution in hardware */
939 	cr = STAILQ_FIRST(&sc->sc_queued_requests);
940 	ctd = STAILQ_FIRST(&cr->cr_tdesc);
941 
942 	CESA_WRITE(sc, CESA_TDMA_ND, ctd->ctd_cthd_paddr);
943 	CESA_WRITE(sc, CESA_SA_CMD, CESA_SA_CMD_ACTVATE);
944 
945 	CESA_UNLOCK(sc, requests);
946 }
947 
948 static int
949 cesa_setup_sram(struct cesa_softc *sc)
950 {
951 	phandle_t sram_node;
952 	ihandle_t sram_ihandle;
953 	pcell_t sram_handle, sram_reg;
954 
955 	if (OF_getprop(ofw_bus_get_node(sc->sc_dev), "sram-handle",
956 	    (void *)&sram_handle, sizeof(sram_handle)) <= 0)
957 		return (ENXIO);
958 
959 	sram_ihandle = (ihandle_t)sram_handle;
960 	sram_ihandle = fdt32_to_cpu(sram_ihandle);
961 	sram_node = OF_instance_to_package(sram_ihandle);
962 
963 	if (OF_getprop(sram_node, "reg", (void *)&sram_reg,
964 	    sizeof(sram_reg)) <= 0)
965 		return (ENXIO);
966 
967 	sc->sc_sram_base = fdt32_to_cpu(sram_reg);
968 
969 	return (0);
970 }
971 
972 static int
973 cesa_probe(device_t dev)
974 {
975 
976 	if (!ofw_bus_status_okay(dev))
977 		return (ENXIO);
978 
979 	if (!ofw_bus_is_compatible(dev, "mrvl,cesa"))
980 		return (ENXIO);
981 
982 	device_set_desc(dev, "Marvell Cryptographic Engine and Security "
983 	    "Accelerator");
984 
985 	return (BUS_PROBE_DEFAULT);
986 }
987 
988 static int
989 cesa_attach(device_t dev)
990 {
991 	struct cesa_softc *sc;
992 	uint32_t d, r;
993 	int error;
994 	int i;
995 
996 	sc = device_get_softc(dev);
997 	sc->sc_blocked = 0;
998 	sc->sc_error = 0;
999 	sc->sc_dev = dev;
1000 
1001 	/* Check if CESA peripheral device has power turned on */
1002 #if defined(SOC_MV_KIRKWOOD)
1003 	if (soc_power_ctrl_get(CPU_PM_CTRL_CRYPTO) == CPU_PM_CTRL_CRYPTO) {
1004 		device_printf(dev, "not powered on\n");
1005 		return (ENXIO);
1006 	}
1007 #else
1008 	if (soc_power_ctrl_get(CPU_PM_CTRL_CRYPTO) != CPU_PM_CTRL_CRYPTO) {
1009 		device_printf(dev, "not powered on\n");
1010 		return (ENXIO);
1011 	}
1012 #endif
1013 	soc_id(&d, &r);
1014 
1015 	switch (d) {
1016 	case MV_DEV_88F6281:
1017 	case MV_DEV_88F6282:
1018 		sc->sc_tperr = 0;
1019 		break;
1020 	case MV_DEV_MV78100:
1021 	case MV_DEV_MV78100_Z0:
1022 		sc->sc_tperr = CESA_ICR_TPERR;
1023 		break;
1024 	default:
1025 		return (ENXIO);
1026 	}
1027 
1028 	/* Initialize mutexes */
1029 	mtx_init(&sc->sc_sc_lock, device_get_nameunit(dev),
1030 	    "CESA Shared Data", MTX_DEF);
1031 	mtx_init(&sc->sc_tdesc_lock, device_get_nameunit(dev),
1032 	    "CESA TDMA Descriptors Pool", MTX_DEF);
1033 	mtx_init(&sc->sc_sdesc_lock, device_get_nameunit(dev),
1034 	    "CESA SA Descriptors Pool", MTX_DEF);
1035 	mtx_init(&sc->sc_requests_lock, device_get_nameunit(dev),
1036 	    "CESA Requests Pool", MTX_DEF);
1037 	mtx_init(&sc->sc_sessions_lock, device_get_nameunit(dev),
1038 	    "CESA Sessions Pool", MTX_DEF);
1039 
1040 	/* Allocate I/O and IRQ resources */
1041 	error = bus_alloc_resources(dev, cesa_res_spec, sc->sc_res);
1042 	if (error) {
1043 		device_printf(dev, "could not allocate resources\n");
1044 		goto err0;
1045 	}
1046 
1047 	sc->sc_bsh = rman_get_bushandle(*(sc->sc_res));
1048 	sc->sc_bst = rman_get_bustag(*(sc->sc_res));
1049 
1050 	/* Setup CESA decoding windows */
1051 	error = decode_win_cesa_setup(sc);
1052 	if (error) {
1053 		device_printf(dev, "could not setup decoding windows\n");
1054 		goto err1;
1055 	}
1056 
1057 	/* Acquire SRAM base address */
1058 	error = cesa_setup_sram(sc);
1059 	if (error) {
1060 		device_printf(dev, "could not setup SRAM\n");
1061 		goto err1;
1062 	}
1063 
1064 	/* Setup interrupt handler */
1065 	error = bus_setup_intr(dev, sc->sc_res[1], INTR_TYPE_NET | INTR_MPSAFE,
1066 	    NULL, cesa_intr, sc, &(sc->sc_icookie));
1067 	if (error) {
1068 		device_printf(dev, "could not setup engine completion irq\n");
1069 		goto err1;
1070 	}
1071 
1072 	/* Create DMA tag for processed data */
1073 	error = bus_dma_tag_create(bus_get_dma_tag(dev),	/* parent */
1074 	    1, 0,				/* alignment, boundary */
1075 	    BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
1076 	    BUS_SPACE_MAXADDR,			/* highaddr */
1077 	    NULL, NULL,				/* filtfunc, filtfuncarg */
1078 	    CESA_MAX_REQUEST_SIZE,		/* maxsize */
1079 	    CESA_MAX_FRAGMENTS,			/* nsegments */
1080 	    CESA_MAX_REQUEST_SIZE, 0,		/* maxsegsz, flags */
1081 	    NULL, NULL,				/* lockfunc, lockfuncarg */
1082 	    &sc->sc_data_dtag);			/* dmat */
1083 	if (error)
1084 		goto err2;
1085 
1086 	/* Initialize data structures: TDMA Descriptors Pool */
1087 	error = cesa_alloc_dma_mem(sc, &sc->sc_tdesc_cdm,
1088 	    CESA_TDMA_DESCRIPTORS * sizeof(struct cesa_tdma_hdesc));
1089 	if (error)
1090 		goto err3;
1091 
1092 	STAILQ_INIT(&sc->sc_free_tdesc);
1093 	for (i = 0; i < CESA_TDMA_DESCRIPTORS; i++) {
1094 		sc->sc_tdesc[i].ctd_cthd =
1095 		    (struct cesa_tdma_hdesc *)(sc->sc_tdesc_cdm.cdm_vaddr) + i;
1096 		sc->sc_tdesc[i].ctd_cthd_paddr = sc->sc_tdesc_cdm.cdm_paddr +
1097 		    (i * sizeof(struct cesa_tdma_hdesc));
1098 		STAILQ_INSERT_TAIL(&sc->sc_free_tdesc, &sc->sc_tdesc[i],
1099 		    ctd_stq);
1100 	}
1101 
1102 	/* Initialize data structures: SA Descriptors Pool */
1103 	error = cesa_alloc_dma_mem(sc, &sc->sc_sdesc_cdm,
1104 	    CESA_SA_DESCRIPTORS * sizeof(struct cesa_sa_hdesc));
1105 	if (error)
1106 		goto err4;
1107 
1108 	STAILQ_INIT(&sc->sc_free_sdesc);
1109 	for (i = 0; i < CESA_SA_DESCRIPTORS; i++) {
1110 		sc->sc_sdesc[i].csd_cshd =
1111 		    (struct cesa_sa_hdesc *)(sc->sc_sdesc_cdm.cdm_vaddr) + i;
1112 		sc->sc_sdesc[i].csd_cshd_paddr = sc->sc_sdesc_cdm.cdm_paddr +
1113 		    (i * sizeof(struct cesa_sa_hdesc));
1114 		STAILQ_INSERT_TAIL(&sc->sc_free_sdesc, &sc->sc_sdesc[i],
1115 		    csd_stq);
1116 	}
1117 
1118 	/* Initialize data structures: Requests Pool */
1119 	error = cesa_alloc_dma_mem(sc, &sc->sc_requests_cdm,
1120 	    CESA_REQUESTS * sizeof(struct cesa_sa_data));
1121 	if (error)
1122 		goto err5;
1123 
1124 	STAILQ_INIT(&sc->sc_free_requests);
1125 	STAILQ_INIT(&sc->sc_ready_requests);
1126 	STAILQ_INIT(&sc->sc_queued_requests);
1127 	for (i = 0; i < CESA_REQUESTS; i++) {
1128 		sc->sc_requests[i].cr_csd =
1129 		    (struct cesa_sa_data *)(sc->sc_requests_cdm.cdm_vaddr) + i;
1130 		sc->sc_requests[i].cr_csd_paddr =
1131 		    sc->sc_requests_cdm.cdm_paddr +
1132 		    (i * sizeof(struct cesa_sa_data));
1133 
1134 		/* Preallocate DMA maps */
1135 		error = bus_dmamap_create(sc->sc_data_dtag, 0,
1136 		    &sc->sc_requests[i].cr_dmap);
1137 		if (error && i > 0) {
1138 			i--;
1139 			do {
1140 				bus_dmamap_destroy(sc->sc_data_dtag,
1141 				    sc->sc_requests[i].cr_dmap);
1142 			} while (i--);
1143 
1144 			goto err6;
1145 		}
1146 
1147 		STAILQ_INSERT_TAIL(&sc->sc_free_requests, &sc->sc_requests[i],
1148 		    cr_stq);
1149 	}
1150 
1151 	/* Initialize data structures: Sessions Pool */
1152 	STAILQ_INIT(&sc->sc_free_sessions);
1153 	for (i = 0; i < CESA_SESSIONS; i++) {
1154 		sc->sc_sessions[i].cs_sid = i;
1155 		STAILQ_INSERT_TAIL(&sc->sc_free_sessions, &sc->sc_sessions[i],
1156 		    cs_stq);
1157 	}
1158 
1159 	/*
1160 	 * Initialize TDMA:
1161 	 * - Burst limit: 128 bytes,
1162 	 * - Outstanding reads enabled,
1163 	 * - No byte-swap.
1164 	 */
1165 	CESA_WRITE(sc, CESA_TDMA_CR, CESA_TDMA_CR_DBL128 | CESA_TDMA_CR_SBL128 |
1166 	    CESA_TDMA_CR_ORDEN | CESA_TDMA_CR_NBS | CESA_TDMA_CR_ENABLE);
1167 
1168 	/*
1169 	 * Initialize SA:
1170 	 * - SA descriptor is present at beginning of CESA SRAM,
1171 	 * - Multi-packet chain mode,
1172 	 * - Cooperation with TDMA enabled.
1173 	 */
1174 	CESA_WRITE(sc, CESA_SA_DPR, 0);
1175 	CESA_WRITE(sc, CESA_SA_CR, CESA_SA_CR_ACTIVATE_TDMA |
1176 	    CESA_SA_CR_WAIT_FOR_TDMA | CESA_SA_CR_MULTI_MODE);
1177 
1178 	/* Unmask interrupts */
1179 	CESA_WRITE(sc, CESA_ICR, 0);
1180 	CESA_WRITE(sc, CESA_ICM, CESA_ICM_ACCTDMA | sc->sc_tperr);
1181 	CESA_WRITE(sc, CESA_TDMA_ECR, 0);
1182 	CESA_WRITE(sc, CESA_TDMA_EMR, CESA_TDMA_EMR_MISS |
1183 	    CESA_TDMA_EMR_DOUBLE_HIT | CESA_TDMA_EMR_BOTH_HIT |
1184 	    CESA_TDMA_EMR_DATA_ERROR);
1185 
1186 	/* Register in OCF */
1187 	sc->sc_cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE);
1188 	if (sc->sc_cid) {
1189 		device_printf(dev, "could not get crypto driver id\n");
1190 		goto err7;
1191 	}
1192 
1193 	crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
1194 	crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
1195 	crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
1196 	crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0);
1197 	crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
1198 	crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0);
1199 	crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
1200 
1201 	return (0);
1202 err7:
1203 	for (i = 0; i < CESA_REQUESTS; i++)
1204 		bus_dmamap_destroy(sc->sc_data_dtag,
1205 		    sc->sc_requests[i].cr_dmap);
1206 err6:
1207 	cesa_free_dma_mem(&sc->sc_requests_cdm);
1208 err5:
1209 	cesa_free_dma_mem(&sc->sc_sdesc_cdm);
1210 err4:
1211 	cesa_free_dma_mem(&sc->sc_tdesc_cdm);
1212 err3:
1213 	bus_dma_tag_destroy(sc->sc_data_dtag);
1214 err2:
1215 	bus_teardown_intr(dev, sc->sc_res[1], sc->sc_icookie);
1216 err1:
1217 	bus_release_resources(dev, cesa_res_spec, sc->sc_res);
1218 err0:
1219 	mtx_destroy(&sc->sc_sessions_lock);
1220 	mtx_destroy(&sc->sc_requests_lock);
1221 	mtx_destroy(&sc->sc_sdesc_lock);
1222 	mtx_destroy(&sc->sc_tdesc_lock);
1223 	mtx_destroy(&sc->sc_sc_lock);
1224 	return (ENXIO);
1225 }
1226 
1227 static int
1228 cesa_detach(device_t dev)
1229 {
1230 	struct cesa_softc *sc;
1231 	int i;
1232 
1233 	sc = device_get_softc(dev);
1234 
1235 	/* TODO: Wait for queued requests completion before shutdown. */
1236 
1237 	/* Mask interrupts */
1238 	CESA_WRITE(sc, CESA_ICM, 0);
1239 	CESA_WRITE(sc, CESA_TDMA_EMR, 0);
1240 
1241 	/* Unregister from OCF */
1242 	crypto_unregister_all(sc->sc_cid);
1243 
1244 	/* Free DMA Maps */
1245 	for (i = 0; i < CESA_REQUESTS; i++)
1246 		bus_dmamap_destroy(sc->sc_data_dtag,
1247 		    sc->sc_requests[i].cr_dmap);
1248 
1249 	/* Free DMA Memory */
1250 	cesa_free_dma_mem(&sc->sc_requests_cdm);
1251 	cesa_free_dma_mem(&sc->sc_sdesc_cdm);
1252 	cesa_free_dma_mem(&sc->sc_tdesc_cdm);
1253 
1254 	/* Free DMA Tag */
1255 	bus_dma_tag_destroy(sc->sc_data_dtag);
1256 
1257 	/* Stop interrupt */
1258 	bus_teardown_intr(dev, sc->sc_res[1], sc->sc_icookie);
1259 
1260 	/* Relase I/O and IRQ resources */
1261 	bus_release_resources(dev, cesa_res_spec, sc->sc_res);
1262 
1263 	/* Destory mutexes */
1264 	mtx_destroy(&sc->sc_sessions_lock);
1265 	mtx_destroy(&sc->sc_requests_lock);
1266 	mtx_destroy(&sc->sc_sdesc_lock);
1267 	mtx_destroy(&sc->sc_tdesc_lock);
1268 	mtx_destroy(&sc->sc_sc_lock);
1269 
1270 	return (0);
1271 }
1272 
1273 static void
1274 cesa_intr(void *arg)
1275 {
1276 	STAILQ_HEAD(, cesa_request) requests;
1277 	struct cesa_request *cr, *tmp;
1278 	struct cesa_softc *sc;
1279 	uint32_t ecr, icr;
1280 	int blocked;
1281 
1282 	sc = arg;
1283 
1284 	/* Ack interrupt */
1285 	ecr = CESA_READ(sc, CESA_TDMA_ECR);
1286 	CESA_WRITE(sc, CESA_TDMA_ECR, 0);
1287 	icr = CESA_READ(sc, CESA_ICR);
1288 	CESA_WRITE(sc, CESA_ICR, 0);
1289 
1290 	/* Check for TDMA errors */
1291 	if (ecr & CESA_TDMA_ECR_MISS) {
1292 		device_printf(sc->sc_dev, "TDMA Miss error detected!\n");
1293 		sc->sc_error = EIO;
1294 	}
1295 
1296 	if (ecr & CESA_TDMA_ECR_DOUBLE_HIT) {
1297 		device_printf(sc->sc_dev, "TDMA Double Hit error detected!\n");
1298 		sc->sc_error = EIO;
1299 	}
1300 
1301 	if (ecr & CESA_TDMA_ECR_BOTH_HIT) {
1302 		device_printf(sc->sc_dev, "TDMA Both Hit error detected!\n");
1303 		sc->sc_error = EIO;
1304 	}
1305 
1306 	if (ecr & CESA_TDMA_ECR_DATA_ERROR) {
1307 		device_printf(sc->sc_dev, "TDMA Data error detected!\n");
1308 		sc->sc_error = EIO;
1309 	}
1310 
1311 	/* Check for CESA errors */
1312 	if (icr & sc->sc_tperr) {
1313 		device_printf(sc->sc_dev, "CESA SRAM Parity error detected!\n");
1314 		sc->sc_error = EIO;
1315 	}
1316 
1317 	/* If there is nothing more to do, return */
1318 	if ((icr & CESA_ICR_ACCTDMA) == 0)
1319 		return;
1320 
1321 	/* Get all finished requests */
1322 	CESA_LOCK(sc, requests);
1323 	STAILQ_INIT(&requests);
1324 	STAILQ_CONCAT(&requests, &sc->sc_queued_requests);
1325 	STAILQ_INIT(&sc->sc_queued_requests);
1326 	CESA_UNLOCK(sc, requests);
1327 
1328 	/* Execute all ready requests */
1329 	cesa_execute(sc);
1330 
1331 	/* Process completed requests */
1332 	cesa_sync_dma_mem(&sc->sc_requests_cdm, BUS_DMASYNC_POSTREAD |
1333 	    BUS_DMASYNC_POSTWRITE);
1334 
1335 	STAILQ_FOREACH_SAFE(cr, &requests, cr_stq, tmp) {
1336 		bus_dmamap_sync(sc->sc_data_dtag, cr->cr_dmap,
1337 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1338 
1339 		cr->cr_crp->crp_etype = sc->sc_error;
1340 		if (cr->cr_mac)
1341 			crypto_copyback(cr->cr_crp->crp_flags,
1342 			    cr->cr_crp->crp_buf, cr->cr_mac->crd_inject,
1343 			    cr->cr_cs->cs_hlen, cr->cr_csd->csd_hash);
1344 
1345 		crypto_done(cr->cr_crp);
1346 		cesa_free_request(sc, cr);
1347 	}
1348 
1349 	cesa_sync_dma_mem(&sc->sc_requests_cdm, BUS_DMASYNC_PREREAD |
1350 	    BUS_DMASYNC_PREWRITE);
1351 
1352 	sc->sc_error = 0;
1353 
1354 	/* Unblock driver if it ran out of resources */
1355 	CESA_LOCK(sc, sc);
1356 	blocked = sc->sc_blocked;
1357 	sc->sc_blocked = 0;
1358 	CESA_UNLOCK(sc, sc);
1359 
1360 	if (blocked)
1361 		crypto_unblock(sc->sc_cid, blocked);
1362 }
1363 
1364 static int
1365 cesa_newsession(device_t dev, uint32_t *sidp, struct cryptoini *cri)
1366 {
1367 	struct cesa_session *cs;
1368 	struct cesa_softc *sc;
1369 	struct cryptoini *enc;
1370 	struct cryptoini *mac;
1371 	int error;
1372 
1373 	sc = device_get_softc(dev);
1374 	enc = NULL;
1375 	mac = NULL;
1376 	error = 0;
1377 
1378 	/* Check and parse input */
1379 	if (cesa_is_hash(cri->cri_alg))
1380 		mac = cri;
1381 	else
1382 		enc = cri;
1383 
1384 	cri = cri->cri_next;
1385 
1386 	if (cri) {
1387 		if (!enc && !cesa_is_hash(cri->cri_alg))
1388 			enc = cri;
1389 
1390 		if (!mac && cesa_is_hash(cri->cri_alg))
1391 			mac = cri;
1392 
1393 		if (cri->cri_next || !(enc && mac))
1394 			return (EINVAL);
1395 	}
1396 
1397 	if ((enc && (enc->cri_klen / 8) > CESA_MAX_KEY_LEN) ||
1398 	    (mac && (mac->cri_klen / 8) > CESA_MAX_MKEY_LEN))
1399 		return (E2BIG);
1400 
1401 	/* Allocate session */
1402 	cs = cesa_alloc_session(sc);
1403 	if (!cs)
1404 		return (ENOMEM);
1405 
1406 	/* Prepare CESA configuration */
1407 	cs->cs_config = 0;
1408 	cs->cs_ivlen = 1;
1409 	cs->cs_mblen = 1;
1410 
1411 	if (enc) {
1412 		switch (enc->cri_alg) {
1413 		case CRYPTO_AES_CBC:
1414 			cs->cs_config |= CESA_CSHD_AES | CESA_CSHD_CBC;
1415 			cs->cs_ivlen = AES_BLOCK_LEN;
1416 			break;
1417 		case CRYPTO_DES_CBC:
1418 			cs->cs_config |= CESA_CSHD_DES | CESA_CSHD_CBC;
1419 			cs->cs_ivlen = DES_BLOCK_LEN;
1420 			break;
1421 		case CRYPTO_3DES_CBC:
1422 			cs->cs_config |= CESA_CSHD_3DES | CESA_CSHD_3DES_EDE |
1423 			    CESA_CSHD_CBC;
1424 			cs->cs_ivlen = DES3_BLOCK_LEN;
1425 			break;
1426 		default:
1427 			error = EINVAL;
1428 			break;
1429 		}
1430 	}
1431 
1432 	if (!error && mac) {
1433 		switch (mac->cri_alg) {
1434 		case CRYPTO_MD5:
1435 			cs->cs_config |= CESA_CSHD_MD5;
1436 			cs->cs_mblen = 1;
1437 			cs->cs_hlen = MD5_HASH_LEN;
1438 			break;
1439 		case CRYPTO_MD5_HMAC:
1440 			cs->cs_config |= CESA_CSHD_MD5_HMAC;
1441 			cs->cs_mblen = MD5_HMAC_BLOCK_LEN;
1442 			cs->cs_hlen = CESA_HMAC_HASH_LENGTH;
1443 			break;
1444 		case CRYPTO_SHA1:
1445 			cs->cs_config |= CESA_CSHD_SHA1;
1446 			cs->cs_mblen = 1;
1447 			cs->cs_hlen = SHA1_HASH_LEN;
1448 			break;
1449 		case CRYPTO_SHA1_HMAC:
1450 			cs->cs_config |= CESA_CSHD_SHA1_HMAC;
1451 			cs->cs_mblen = SHA1_HMAC_BLOCK_LEN;
1452 			cs->cs_hlen = CESA_HMAC_HASH_LENGTH;
1453 			break;
1454 		default:
1455 			error = EINVAL;
1456 			break;
1457 		}
1458 	}
1459 
1460 	/* Save cipher key */
1461 	if (!error && enc && enc->cri_key) {
1462 		cs->cs_klen = enc->cri_klen / 8;
1463 		memcpy(cs->cs_key, enc->cri_key, cs->cs_klen);
1464 		if (enc->cri_alg == CRYPTO_AES_CBC)
1465 			error = cesa_prep_aes_key(cs);
1466 	}
1467 
1468 	/* Save digest key */
1469 	if (!error && mac && mac->cri_key)
1470 		error = cesa_set_mkey(cs, mac->cri_alg, mac->cri_key,
1471 		    mac->cri_klen / 8);
1472 
1473 	if (error) {
1474 		cesa_free_session(sc, cs);
1475 		return (EINVAL);
1476 	}
1477 
1478 	*sidp = cs->cs_sid;
1479 
1480 	return (0);
1481 }
1482 
1483 static int
1484 cesa_freesession(device_t dev, uint64_t tid)
1485 {
1486 	struct cesa_session *cs;
1487 	struct cesa_softc *sc;
1488 
1489 	sc = device_get_softc(dev);
1490 	cs = cesa_get_session(sc, CRYPTO_SESID2LID(tid));
1491 	if (!cs)
1492 		return (EINVAL);
1493 
1494 	/* Free session */
1495 	cesa_free_session(sc, cs);
1496 
1497 	return (0);
1498 }
1499 
1500 static int
1501 cesa_process(device_t dev, struct cryptop *crp, int hint)
1502 {
1503 	struct cesa_request *cr;
1504 	struct cesa_session *cs;
1505 	struct cryptodesc *crd;
1506 	struct cryptodesc *enc;
1507 	struct cryptodesc *mac;
1508 	struct cesa_softc *sc;
1509 	int error;
1510 
1511 	sc = device_get_softc(dev);
1512 	crd = crp->crp_desc;
1513 	enc = NULL;
1514 	mac = NULL;
1515 	error = 0;
1516 
1517 	/* Check session ID */
1518 	cs = cesa_get_session(sc, CRYPTO_SESID2LID(crp->crp_sid));
1519 	if (!cs) {
1520 		crp->crp_etype = EINVAL;
1521 		crypto_done(crp);
1522 		return (0);
1523 	}
1524 
1525 	/* Check and parse input */
1526 	if (crp->crp_ilen > CESA_MAX_REQUEST_SIZE) {
1527 		crp->crp_etype = E2BIG;
1528 		crypto_done(crp);
1529 		return (0);
1530 	}
1531 
1532 	if (cesa_is_hash(crd->crd_alg))
1533 		mac = crd;
1534 	else
1535 		enc = crd;
1536 
1537 	crd = crd->crd_next;
1538 
1539 	if (crd) {
1540 		if (!enc && !cesa_is_hash(crd->crd_alg))
1541 			enc = crd;
1542 
1543 		if (!mac && cesa_is_hash(crd->crd_alg))
1544 			mac = crd;
1545 
1546 		if (crd->crd_next || !(enc && mac)) {
1547 			crp->crp_etype = EINVAL;
1548 			crypto_done(crp);
1549 			return (0);
1550 		}
1551 	}
1552 
1553 	/*
1554 	 * Get request descriptor. Block driver if there is no free
1555 	 * descriptors in pool.
1556 	 */
1557 	cr = cesa_alloc_request(sc);
1558 	if (!cr) {
1559 		CESA_LOCK(sc, sc);
1560 		sc->sc_blocked = CRYPTO_SYMQ;
1561 		CESA_UNLOCK(sc, sc);
1562 		return (ERESTART);
1563 	}
1564 
1565 	/* Prepare request */
1566 	cr->cr_crp = crp;
1567 	cr->cr_enc = enc;
1568 	cr->cr_mac = mac;
1569 	cr->cr_cs = cs;
1570 
1571 	CESA_LOCK(sc, sessions);
1572 	cesa_sync_desc(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1573 
1574 	if (enc && enc->crd_flags & CRD_F_ENCRYPT) {
1575 		if (enc->crd_flags & CRD_F_IV_EXPLICIT)
1576 			memcpy(cr->cr_csd->csd_iv, enc->crd_iv, cs->cs_ivlen);
1577 		else
1578 			arc4rand(cr->cr_csd->csd_iv, cs->cs_ivlen, 0);
1579 
1580 		if ((enc->crd_flags & CRD_F_IV_PRESENT) == 0)
1581 			crypto_copyback(crp->crp_flags, crp->crp_buf,
1582 			    enc->crd_inject, cs->cs_ivlen, cr->cr_csd->csd_iv);
1583 	} else if (enc) {
1584 		if (enc->crd_flags & CRD_F_IV_EXPLICIT)
1585 			memcpy(cr->cr_csd->csd_iv, enc->crd_iv, cs->cs_ivlen);
1586 		else
1587 			crypto_copydata(crp->crp_flags, crp->crp_buf,
1588 			    enc->crd_inject, cs->cs_ivlen, cr->cr_csd->csd_iv);
1589 	}
1590 
1591 	if (enc && enc->crd_flags & CRD_F_KEY_EXPLICIT) {
1592 		if ((enc->crd_klen / 8) <= CESA_MAX_KEY_LEN) {
1593 			cs->cs_klen = enc->crd_klen / 8;
1594 			memcpy(cs->cs_key, enc->crd_key, cs->cs_klen);
1595 			if (enc->crd_alg == CRYPTO_AES_CBC)
1596 				error = cesa_prep_aes_key(cs);
1597 		} else
1598 			error = E2BIG;
1599 	}
1600 
1601 	if (!error && mac && mac->crd_flags & CRD_F_KEY_EXPLICIT) {
1602 		if ((mac->crd_klen / 8) <= CESA_MAX_MKEY_LEN)
1603 			error = cesa_set_mkey(cs, mac->crd_alg, mac->crd_key,
1604 			    mac->crd_klen / 8);
1605 		else
1606 			error = E2BIG;
1607 	}
1608 
1609 	/* Convert request to chain of TDMA and SA descriptors */
1610 	if (!error)
1611 		error = cesa_create_chain(sc, cr);
1612 
1613 	cesa_sync_desc(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1614 	CESA_UNLOCK(sc, sessions);
1615 
1616 	if (error) {
1617 		cesa_free_request(sc, cr);
1618 		crp->crp_etype = error;
1619 		crypto_done(crp);
1620 		return (0);
1621 	}
1622 
1623 	bus_dmamap_sync(sc->sc_data_dtag, cr->cr_dmap, BUS_DMASYNC_PREREAD |
1624 	    BUS_DMASYNC_PREWRITE);
1625 
1626 	/* Enqueue request to execution */
1627 	cesa_enqueue_request(sc, cr);
1628 
1629 	/* Start execution, if we have no more requests in queue */
1630 	if ((hint & CRYPTO_HINT_MORE) == 0)
1631 		cesa_execute(sc);
1632 
1633 	return (0);
1634 }
1635 
1636 /*
1637  * Set CESA TDMA decode windows.
1638  */
1639 static int
1640 decode_win_cesa_setup(struct cesa_softc *sc)
1641 {
1642 	struct mem_region availmem_regions[FDT_MEM_REGIONS];
1643 	int availmem_regions_sz;
1644 	uint32_t br, cr, i;
1645 
1646 	/* Grab physical memory regions information from DTS */
1647 	if (fdt_get_mem_regions(availmem_regions, &availmem_regions_sz,
1648 	    NULL) != 0)
1649 		return (ENXIO);
1650 
1651 	if (availmem_regions_sz > MV_WIN_CESA_MAX) {
1652 		device_printf(sc->sc_dev, "Too much memory regions, cannot "
1653 		    " set CESA windows to cover whole DRAM \n");
1654 		return (ENXIO);
1655 	}
1656 
1657 	/* Disable and clear all CESA windows */
1658 	for (i = 0; i < MV_WIN_CESA_MAX; i++) {
1659 		CESA_WRITE(sc, MV_WIN_CESA_BASE(i), 0);
1660 		CESA_WRITE(sc, MV_WIN_CESA_CTRL(i), 0);
1661 	}
1662 
1663 	/* Fill CESA TDMA decoding windows with information acquired from DTS */
1664 	for (i = 0; i < availmem_regions_sz; i++) {
1665 		br = availmem_regions[i].mr_start;
1666 		cr = availmem_regions[i].mr_size;
1667 
1668 		/* Don't add entries with size lower than 64KB */
1669 		if (cr & 0xffff0000) {
1670 			cr = (((cr - 1) & 0xffff0000) |
1671 			(MV_WIN_DDR_ATTR(i) << MV_WIN_CPU_ATTR_SHIFT) |
1672 			    (MV_WIN_DDR_TARGET << MV_WIN_CPU_TARGET_SHIFT) |
1673 			    MV_WIN_CPU_ENABLE_BIT);
1674 			CESA_WRITE(sc, MV_WIN_CESA_BASE(i), br);
1675 			CESA_WRITE(sc, MV_WIN_CESA_CTRL(i), cr);
1676 		}
1677 	}
1678 
1679 	return (0);
1680 }
1681 
1682