xref: /netbsd/sys/dev/marvell/gtidmac.c (revision 1ea4aa55)
1 /*	$NetBSD: gtidmac.c,v 1.19 2023/06/19 08:40:30 msaitoh Exp $	*/
2 /*
3  * Copyright (c) 2008, 2012, 2016 KIYOHARA Takashi
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
19  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
23  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
24  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  * POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: gtidmac.c,v 1.19 2023/06/19 08:40:30 msaitoh Exp $");
30 
31 #include <sys/param.h>
32 #include <sys/bus.h>
33 #include <sys/device.h>
34 #include <sys/errno.h>
35 #include <sys/endian.h>
36 #include <sys/kmem.h>
37 
38 #include <uvm/uvm_param.h>	/* For PAGE_SIZE */
39 
40 #include <dev/dmover/dmovervar.h>
41 
42 #include <dev/marvell/gtidmacreg.h>
43 #include <dev/marvell/gtidmacvar.h>
44 #include <dev/marvell/marvellreg.h>
45 #include <dev/marvell/marvellvar.h>
46 
47 #include <prop/proplib.h>
48 
49 #include "locators.h"
50 
51 #ifdef GTIDMAC_DEBUG
52 #define DPRINTF(x)	if (gtidmac_debug) printf x
53 int gtidmac_debug = 0;
54 #else
55 #define DPRINTF(x)
56 #endif
57 
58 #define GTIDMAC_NDESC		64
59 #define GTIDMAC_MAXCHAN		8
60 #define MVXORE_NDESC		128
61 #define MVXORE_MAXCHAN		2
62 
63 #define GTIDMAC_NSEGS		((GTIDMAC_MAXXFER + PAGE_SIZE - 1) / PAGE_SIZE)
64 #define MVXORE_NSEGS		((MVXORE_MAXXFER + PAGE_SIZE - 1) / PAGE_SIZE)
65 
66 
67 struct gtidmac_softc;
68 
69 struct gtidmac_function {
70 	int (*chan_alloc)(void *, bus_dmamap_t **, bus_dmamap_t **, void *);
71 	void (*chan_free)(void *, int);
72 	int (*dma_setup)(void *, int, int, bus_dmamap_t *, bus_dmamap_t *,
73 			 bus_size_t);
74 	void (*dma_start)(void *, int,
75 			  void (*dma_done_cb)(void *, int, bus_dmamap_t *,
76 						      bus_dmamap_t *, int));
77 	uint32_t (*dma_finish)(void *, int, int);
78 };
79 
80 struct gtidmac_dma_desc {
81 	int dd_index;
82 	union {
83 		struct gtidmac_desc *idmac_vaddr;
84 		struct mvxore_desc *xore_vaddr;
85 	} dd_vaddr;
86 #define dd_idmac_vaddr	dd_vaddr.idmac_vaddr
87 #define dd_xore_vaddr	dd_vaddr.xore_vaddr
88 	paddr_t dd_paddr;
89 	SLIST_ENTRY(gtidmac_dma_desc) dd_next;
90 };
91 
92 struct gtidmac_softc {
93 	device_t sc_dev;
94 
95 	bus_space_tag_t sc_iot;
96 	bus_space_handle_t sc_ioh;
97 
98 	bus_dma_tag_t sc_dmat;
99 	struct gtidmac_dma_desc *sc_dd_buffer;
100 	bus_dma_segment_t sc_pattern_segment;
101 	struct {
102 		u_char pbuf[16];	/* 16byte/pattern */
103 	} *sc_pbuf;			/*   x256 pattern */
104 
105 	int sc_gtidmac_nchan;
106 	struct gtidmac_desc *sc_dbuf;
107 	bus_dmamap_t sc_dmap;
108 	SLIST_HEAD(, gtidmac_dma_desc) sc_dlist;
109 	struct {
110 		bus_dmamap_t chan_in;		/* In dmamap */
111 		bus_dmamap_t chan_out;		/* Out dmamap */
112 		uint64_t chan_totalcnt;		/* total transferred byte */
113 		int chan_ddidx;
114 		void *chan_running;		/* opaque object data */
115 		void (*chan_dma_done)(void *, int, bus_dmamap_t *,
116 				      bus_dmamap_t *, int);
117 	} sc_cdesc[GTIDMAC_MAXCHAN];
118 	struct gtidmac_intr_arg {
119 		struct gtidmac_softc *ia_sc;
120 		uint32_t ia_cause;
121 		uint32_t ia_mask;
122 		uint32_t ia_eaddr;
123 		uint32_t ia_eselect;
124 	} sc_intrarg[GTIDMAC_NINTRRUPT];
125 
126 	int sc_mvxore_nchan;
127 	struct mvxore_desc *sc_dbuf_xore;
128 	bus_dmamap_t sc_dmap_xore;
129 	SLIST_HEAD(, gtidmac_dma_desc) sc_dlist_xore;
130 	struct {
131 		bus_dmamap_t chan_in[MVXORE_NSRC];	/* In dmamap */
132 		bus_dmamap_t chan_out;			/* Out dmamap */
133 		uint64_t chan_totalcnt;			/* total transferred */
134 		int chan_ddidx;
135 		void *chan_running;			/* opaque object data */
136 		void (*chan_dma_done)(void *, int, bus_dmamap_t *,
137 				      bus_dmamap_t *, int);
138 	} sc_cdesc_xore[MVXORE_MAXCHAN];
139 
140 	struct dmover_backend sc_dmb;
141 	struct dmover_backend sc_dmb_xore;
142 	int sc_dmb_busy;
143 };
144 struct gtidmac_softc *gtidmac_softc = NULL;
145 
146 static int gtidmac_match(device_t, struct cfdata *, void *);
147 static void gtidmac_attach(device_t, device_t, void *);
148 
149 static int gtidmac_intr(void *);
150 static int mvxore_port0_intr(void *);
151 static int mvxore_port1_intr(void *);
152 static int mvxore_intr(struct gtidmac_softc *, int);
153 
154 static void gtidmac_process(struct dmover_backend *);
155 static void gtidmac_dmover_run(struct dmover_backend *);
156 static void gtidmac_dmover_done(void *, int, bus_dmamap_t *, bus_dmamap_t *,
157 				int);
158 static __inline int gtidmac_dmmap_load(struct gtidmac_softc *, bus_dmamap_t,
159 				dmover_buffer_type, dmover_buffer *, int);
160 static __inline void gtidmac_dmmap_unload(struct gtidmac_softc *, bus_dmamap_t, int);
161 
162 static uint32_t gtidmac_finish(void *, int, int);
163 static uint32_t mvxore_finish(void *, int, int);
164 
165 static void gtidmac_wininit(struct gtidmac_softc *, enum marvell_tags *);
166 static void mvxore_wininit(struct gtidmac_softc *, enum marvell_tags *);
167 
168 static int gtidmac_buffer_setup(struct gtidmac_softc *);
169 static int mvxore_buffer_setup(struct gtidmac_softc *);
170 
171 #ifdef GTIDMAC_DEBUG
172 static void gtidmac_dump_idmacreg(struct gtidmac_softc *, int);
173 static void gtidmac_dump_idmacdesc(struct gtidmac_softc *,
174 				   struct gtidmac_dma_desc *, uint32_t, int);
175 static void gtidmac_dump_xorereg(struct gtidmac_softc *, int);
176 static void gtidmac_dump_xoredesc(struct gtidmac_softc *,
177 				  struct gtidmac_dma_desc *, uint32_t, int);
178 #endif
179 
180 
181 static struct gtidmac_function gtidmac_functions = {
182 	.chan_alloc = gtidmac_chan_alloc,
183 	.chan_free = gtidmac_chan_free,
184 	.dma_setup = gtidmac_setup,
185 	.dma_start = gtidmac_start,
186 	.dma_finish = gtidmac_finish,
187 };
188 
189 static struct gtidmac_function mvxore_functions = {
190 	.chan_alloc = mvxore_chan_alloc,
191 	.chan_free = mvxore_chan_free,
192 	.dma_setup = mvxore_setup,
193 	.dma_start = mvxore_start,
194 	.dma_finish = mvxore_finish,
195 };
196 
197 static const struct dmover_algdesc gtidmac_algdescs[] = {
198 	{
199 		.dad_name = DMOVER_FUNC_ZERO,
200 		.dad_data = &gtidmac_functions,
201 		.dad_ninputs = 0
202 	},
203 	{
204 		.dad_name = DMOVER_FUNC_FILL8,
205 		.dad_data = &gtidmac_functions,
206 		.dad_ninputs = 0
207 	},
208 	{
209 		.dad_name = DMOVER_FUNC_COPY,
210 		.dad_data = &gtidmac_functions,
211 		.dad_ninputs = 1
212 	},
213 };
214 
215 static const struct dmover_algdesc mvxore_algdescs[] = {
216 #if 0
217 	/*
218 	 * As for these operations, there are a lot of restrictions.  It is
219 	 * necessary to use IDMAC.
220 	 */
221 	{
222 		.dad_name = DMOVER_FUNC_ZERO,
223 		.dad_data = &mvxore_functions,
224 		.dad_ninputs = 0
225 	},
226 	{
227 		.dad_name = DMOVER_FUNC_FILL8,
228 		.dad_data = &mvxore_functions,
229 		.dad_ninputs = 0
230 	},
231 #endif
232 	{
233 		.dad_name = DMOVER_FUNC_COPY,
234 		.dad_data = &mvxore_functions,
235 		.dad_ninputs = 1
236 	},
237 	{
238 		.dad_name = DMOVER_FUNC_ISCSI_CRC32C,
239 		.dad_data = &mvxore_functions,
240 		.dad_ninputs = 1
241 	},
242 	{
243 		.dad_name = DMOVER_FUNC_XOR2,
244 		.dad_data = &mvxore_functions,
245 		.dad_ninputs = 2
246 	},
247 	{
248 		.dad_name = DMOVER_FUNC_XOR3,
249 		.dad_data = &mvxore_functions,
250 		.dad_ninputs = 3
251 	},
252 	{
253 		.dad_name = DMOVER_FUNC_XOR4,
254 		.dad_data = &mvxore_functions,
255 		.dad_ninputs = 4
256 	},
257 	{
258 		.dad_name = DMOVER_FUNC_XOR5,
259 		.dad_data = &mvxore_functions,
260 		.dad_ninputs = 5
261 	},
262 	{
263 		.dad_name = DMOVER_FUNC_XOR6,
264 		.dad_data = &mvxore_functions,
265 		.dad_ninputs = 6
266 	},
267 	{
268 		.dad_name = DMOVER_FUNC_XOR7,
269 		.dad_data = &mvxore_functions,
270 		.dad_ninputs = 7
271 	},
272 	{
273 		.dad_name = DMOVER_FUNC_XOR8,
274 		.dad_data = &mvxore_functions,
275 		.dad_ninputs = 8
276 	},
277 };
278 
279 static int orion_88f5182_xore_irqs[] = { 30, 31 };
280 static int kirkwood_xore_irqs[] = { 5, 6, 7, 8 };
281 static int dove_xore_irqs[] = { 39, 40, 42, 43 };
282 static int armadaxp_xore_irqs0[] = { 51, 52 };
283 static int armadaxp_xore_irqs1[] = { 94, 95 };
284 
285 static struct {
286 	int model;
287 	int idmac_nchan;
288 	int idmac_irq;
289 	int xore_nchan;
290 	int *xore_irqs;
291 } channels[] = {
292 	/*
293 	 * Marvell System Controllers:
294 	 * need irqs in attach_args.
295 	 */
296 	{ MARVELL_DISCOVERY,		8, -1, 0, NULL },
297 	{ MARVELL_DISCOVERY_II,		8, -1, 0, NULL },
298 	{ MARVELL_DISCOVERY_III,	8, -1, 0, NULL },
299 #if 0
300 	{ MARVELL_DISCOVERY_LT,		4, -1, 2, NULL },
301 	{ MARVELL_DISCOVERY_V,		4, -1, 2, NULL },
302 	{ MARVELL_DISCOVERY_VI,		4, -1, 2, NULL },		????
303 #endif
304 
305 	/*
306 	 * Marvell System on Chips:
307 	 * No need irqs in attach_args.  We always connecting to interrupt-pin
308 	 * statically.
309 	 */
310 	{ MARVELL_ORION_1_88F1181,	4, 24, 0, NULL },
311 	{ MARVELL_ORION_2_88F1281,	4, 24, 0, NULL },
312 	{ MARVELL_ORION_1_88F5082,	4, 24, 0, NULL },
313 	{ MARVELL_ORION_1_88F5180N,	4, 24, 0, NULL },
314 	{ MARVELL_ORION_1_88F5181,	4, 24, 0, NULL },
315 	{ MARVELL_ORION_1_88F5182,	4, 24, 2, orion_88f5182_xore_irqs },
316 	{ MARVELL_ORION_2_88F5281,	4, 24, 0, NULL },
317 	{ MARVELL_ORION_1_88W8660,	4, 24, 0, NULL },
318 	{ MARVELL_KIRKWOOD_88F6180,	0, -1, 4, kirkwood_xore_irqs },
319 	{ MARVELL_KIRKWOOD_88F6192,	0, -1, 4, kirkwood_xore_irqs },
320 	{ MARVELL_KIRKWOOD_88F6281,	0, -1, 4, kirkwood_xore_irqs },
321 	{ MARVELL_KIRKWOOD_88F6282,	0, -1, 4, kirkwood_xore_irqs },
322 	{ MARVELL_DOVE_88AP510,		0, -1, 4, dove_xore_irqs },
323 	{ MARVELL_ARMADAXP_MV78130,	4, 33, 2, armadaxp_xore_irqs0 },
324 	{ MARVELL_ARMADAXP_MV78130,	0, -1, 2, armadaxp_xore_irqs1 },
325 	{ MARVELL_ARMADAXP_MV78160,	4, 33, 2, armadaxp_xore_irqs0 },
326 	{ MARVELL_ARMADAXP_MV78160,	0, -1, 2, armadaxp_xore_irqs1 },
327 	{ MARVELL_ARMADAXP_MV78230,	4, 33, 2, armadaxp_xore_irqs0 },
328 	{ MARVELL_ARMADAXP_MV78230,	0, -1, 2, armadaxp_xore_irqs1 },
329 	{ MARVELL_ARMADAXP_MV78260,	4, 33, 2, armadaxp_xore_irqs0 },
330 	{ MARVELL_ARMADAXP_MV78260,	0, -1, 2, armadaxp_xore_irqs1 },
331 	{ MARVELL_ARMADAXP_MV78460,	4, 33, 2, armadaxp_xore_irqs0 },
332 	{ MARVELL_ARMADAXP_MV78460,	0, -1, 2, armadaxp_xore_irqs1 },
333 };
334 
335 struct gtidmac_winacctbl *gtidmac_winacctbl;
336 struct gtidmac_winacctbl *mvxore_winacctbl;
337 
338 CFATTACH_DECL_NEW(gtidmac_gt, sizeof(struct gtidmac_softc),
339     gtidmac_match, gtidmac_attach, NULL, NULL);
340 CFATTACH_DECL_NEW(gtidmac_mbus, sizeof(struct gtidmac_softc),
341     gtidmac_match, gtidmac_attach, NULL, NULL);
342 
343 
344 /* ARGSUSED */
345 static int
gtidmac_match(device_t parent,struct cfdata * match,void * aux)346 gtidmac_match(device_t parent, struct cfdata *match, void *aux)
347 {
348 	struct marvell_attach_args *mva = aux;
349 	int unit, i;
350 
351 	if (strcmp(mva->mva_name, match->cf_name) != 0)
352 		return 0;
353 	if (mva->mva_offset == MVA_OFFSET_DEFAULT)
354 		return 0;
355 	unit = 0;
356 	for (i = 0; i < __arraycount(channels); i++)
357 		if (mva->mva_model == channels[i].model) {
358 			if (mva->mva_unit == unit) {
359 				mva->mva_size = GTIDMAC_SIZE;
360 				return 1;
361 			}
362 			unit++;
363 		}
364 	return 0;
365 }
366 
367 /* ARGSUSED */
368 static void
gtidmac_attach(device_t parent,device_t self,void * aux)369 gtidmac_attach(device_t parent, device_t self, void *aux)
370 {
371 	struct gtidmac_softc *sc = device_private(self);
372 	struct marvell_attach_args *mva = aux;
373 	prop_dictionary_t dict = device_properties(self);
374 	uint32_t idmac_irq, xore_irq, *xore_irqs, dmb_speed;
375 	int unit, idmac_nchan, xore_nchan, nsegs, i, j, n;
376 
377 	unit = 0;
378 	for (i = 0; i < __arraycount(channels); i++)
379 		if (mva->mva_model == channels[i].model) {
380 			if (mva->mva_unit == unit)
381 				break;
382 			unit++;
383 		}
384 	idmac_nchan = channels[i].idmac_nchan;
385 	idmac_irq = channels[i].idmac_irq;
386 	if (idmac_nchan != 0) {
387 		if (idmac_irq == -1)
388 			idmac_irq = mva->mva_irq;
389 		if (idmac_irq == -1)
390 			/* Discovery */
391 			if (!prop_dictionary_get_uint32(dict,
392 			    "idmac-irq", &idmac_irq)) {
393 				aprint_error(": no idmac-irq property\n");
394 				return;
395 			}
396 	}
397 	xore_nchan = channels[i].xore_nchan;
398 	xore_irqs = channels[i].xore_irqs;
399 	xore_irq = MVA_IRQ_DEFAULT;
400 	if (xore_nchan != 0) {
401 		if (xore_irqs == NULL)
402 			xore_irq = mva->mva_irq;
403 		if (xore_irqs == NULL && xore_irq == MVA_IRQ_DEFAULT)
404 			/* Discovery LT/V/VI */
405 			if (!prop_dictionary_get_uint32(dict,
406 			    "xore-irq", &xore_irq)) {
407 				aprint_error(": no xore-irq property\n");
408 				return;
409 			}
410 	}
411 
412 	aprint_naive("\n");
413 	aprint_normal(": Marvell IDMA Controller%s\n",
414 	    xore_nchan ? "/XOR Engine" : "");
415 	if (idmac_nchan > 0)
416 		aprint_normal_dev(self,
417 		    "IDMA Controller %d channels, intr %d...%d\n",
418 		    idmac_nchan, idmac_irq, idmac_irq + GTIDMAC_NINTRRUPT - 1);
419 	if (xore_nchan > 0) {
420 		aprint_normal_dev(self, "XOR Engine %d channels", xore_nchan);
421 		if (xore_irqs == NULL)
422 			aprint_normal(", intr %d...%d\n",
423 			    xore_irq, xore_irq + xore_nchan - 1);
424 		else {
425 			aprint_normal(", intr %d", xore_irqs[0]);
426 			for (i = 1; i < xore_nchan; i++)
427 				aprint_normal(", %d", xore_irqs[i]);
428 			aprint_normal("\n");
429 		}
430 	}
431 
432 	sc->sc_dev = self;
433 	sc->sc_iot = mva->mva_iot;
434 
435 	/* Map I/O registers */
436 	if (bus_space_subregion(mva->mva_iot, mva->mva_ioh, mva->mva_offset,
437 	    mva->mva_size, &sc->sc_ioh)) {
438 		aprint_error_dev(self, "can't map registers\n");
439 		return;
440 	}
441 
442 	/*
443 	 * Initialise DMA descriptors and associated metadata
444 	 */
445 	sc->sc_dmat = mva->mva_dmat;
446 	n = idmac_nchan * GTIDMAC_NDESC + xore_nchan * MVXORE_NDESC;
447 	sc->sc_dd_buffer =
448 	    kmem_alloc(sizeof(struct gtidmac_dma_desc) * n, KM_SLEEP);
449 
450 	/* pattern buffer */
451 	if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
452 	    &sc->sc_pattern_segment, 1, &nsegs, BUS_DMA_NOWAIT)) {
453 		aprint_error_dev(self,
454 		    "bus_dmamem_alloc failed: pattern buffer\n");
455 		goto fail2;
456 	}
457 	if (bus_dmamem_map(sc->sc_dmat, &sc->sc_pattern_segment, 1, PAGE_SIZE,
458 	    (void **)&sc->sc_pbuf, BUS_DMA_NOWAIT)) {
459 		aprint_error_dev(self,
460 		    "bus_dmamem_map failed: pattern buffer\n");
461 		goto fail3;
462 	}
463 	for (i = 0; i < 0x100; i++)
464 		for (j = 0; j < sizeof(sc->sc_pbuf[i].pbuf); j++)
465 			sc->sc_pbuf[i].pbuf[j] = i;
466 
467 	if (!prop_dictionary_get_uint32(dict, "dmb_speed", &dmb_speed)) {
468 		aprint_error_dev(self, "no dmb_speed property\n");
469 		dmb_speed = 10;	/* More than fast swdmover perhaps. */
470 	}
471 
472 	/* IDMAC DMA descriptor buffer */
473 	sc->sc_gtidmac_nchan = idmac_nchan;
474 	if (sc->sc_gtidmac_nchan > 0) {
475 		if (gtidmac_buffer_setup(sc) != 0)
476 			goto fail4;
477 
478 		if (mva->mva_model != MARVELL_DISCOVERY)
479 			gtidmac_wininit(sc, mva->mva_tags);
480 
481 		/* Setup interrupt */
482 		for (i = 0; i < GTIDMAC_NINTRRUPT; i++) {
483 			j = i * idmac_nchan / GTIDMAC_NINTRRUPT;
484 
485 			sc->sc_intrarg[i].ia_sc = sc;
486 			sc->sc_intrarg[i].ia_cause = GTIDMAC_ICR(j);
487 			sc->sc_intrarg[i].ia_eaddr = GTIDMAC_EAR(j);
488 			sc->sc_intrarg[i].ia_eselect = GTIDMAC_ESR(j);
489 			marvell_intr_establish(idmac_irq + i, IPL_BIO,
490 			    gtidmac_intr, &sc->sc_intrarg[i]);
491 		}
492 
493 		/* Register us with dmover. */
494 		sc->sc_dmb.dmb_name = device_xname(self);
495 		sc->sc_dmb.dmb_speed = dmb_speed;
496 		sc->sc_dmb.dmb_cookie = sc;
497 		sc->sc_dmb.dmb_algdescs = gtidmac_algdescs;
498 		sc->sc_dmb.dmb_nalgdescs = __arraycount(gtidmac_algdescs);
499 		sc->sc_dmb.dmb_process = gtidmac_process;
500 		dmover_backend_register(&sc->sc_dmb);
501 		sc->sc_dmb_busy = 0;
502 	}
503 
504 	/* XORE DMA descriptor buffer */
505 	sc->sc_mvxore_nchan = xore_nchan;
506 	if (sc->sc_mvxore_nchan > 0) {
507 		if (mvxore_buffer_setup(sc) != 0)
508 			goto fail5;
509 
510 		/* Setup interrupt */
511 		for (i = 0; i < sc->sc_mvxore_nchan; i++)
512 			marvell_intr_establish(
513 			    xore_irqs != NULL ? xore_irqs[i] : xore_irq + i,
514 			    IPL_BIO,
515 			    (i & 0x2) ? mvxore_port1_intr : mvxore_port0_intr,
516 			    sc);
517 
518 		mvxore_wininit(sc, mva->mva_tags);
519 
520 		/* Register us with dmover. */
521 		sc->sc_dmb_xore.dmb_name = device_xname(sc->sc_dev);
522 		sc->sc_dmb_xore.dmb_speed = dmb_speed;
523 		sc->sc_dmb_xore.dmb_cookie = sc;
524 		sc->sc_dmb_xore.dmb_algdescs = mvxore_algdescs;
525 		sc->sc_dmb_xore.dmb_nalgdescs = __arraycount(mvxore_algdescs);
526 		sc->sc_dmb_xore.dmb_process = gtidmac_process;
527 		dmover_backend_register(&sc->sc_dmb_xore);
528 	}
529 
530 	gtidmac_softc = sc;
531 
532 	return;
533 
534 fail5:
535 	for (i = sc->sc_gtidmac_nchan - 1; i >= 0; i--) {
536 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdesc[i].chan_in);
537 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdesc[i].chan_out);
538 	}
539 	bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
540 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmap);
541 	bus_dmamem_unmap(sc->sc_dmat, sc->sc_dbuf,
542 	    sizeof(struct gtidmac_desc) * GTIDMAC_NDESC);
543 	bus_dmamem_free(sc->sc_dmat,
544 	    sc->sc_dmap->dm_segs, sc->sc_dmap->dm_nsegs);
545 fail4:
546 	bus_dmamem_unmap(sc->sc_dmat, sc->sc_pbuf, PAGE_SIZE);
547 fail3:
548 	bus_dmamem_free(sc->sc_dmat, &sc->sc_pattern_segment, 1);
549 fail2:
550 	kmem_free(sc->sc_dd_buffer, sizeof(struct gtidmac_dma_desc) * n);
551 	bus_space_unmap(sc->sc_iot, sc->sc_ioh, mva->mva_size);
552 	return;
553 }
554 
555 
556 static int
gtidmac_intr(void * arg)557 gtidmac_intr(void *arg)
558 {
559 	struct gtidmac_intr_arg *ia = arg;
560 	struct gtidmac_softc *sc = ia->ia_sc;
561 	uint32_t cause;
562 	int handled = 0, chan, error;
563 
564 	cause = bus_space_read_4(sc->sc_iot, sc->sc_ioh, ia->ia_cause);
565 	DPRINTF(("IDMAC intr: cause=0x%x\n", cause));
566 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, ia->ia_cause, ~cause);
567 
568 	chan = 0;
569 	while (cause) {
570 		error = 0;
571 		if (cause & GTIDMAC_I_ADDRMISS) {
572 			aprint_error_dev(sc->sc_dev, "Address Miss");
573 			error = EINVAL;
574 		}
575 		if (cause & GTIDMAC_I_ACCPROT) {
576 			aprint_error_dev(sc->sc_dev,
577 			    "Access Protect Violation");
578 			error = EACCES;
579 		}
580 		if (cause & GTIDMAC_I_WRPROT) {
581 			aprint_error_dev(sc->sc_dev, "Write Protect");
582 			error = EACCES;
583 		}
584 		if (cause & GTIDMAC_I_OWN) {
585 			aprint_error_dev(sc->sc_dev, "Ownership Violation");
586 			error = EINVAL;
587 		}
588 
589 #define GTIDMAC_I_ERROR		  \
590 	   (GTIDMAC_I_ADDRMISS	| \
591 	    GTIDMAC_I_ACCPROT	| \
592 	    GTIDMAC_I_WRPROT	| \
593 	    GTIDMAC_I_OWN)
594 		if (cause & GTIDMAC_I_ERROR) {
595 			uint32_t sel;
596 			int select;
597 
598 			sel = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
599 			    ia->ia_eselect) & GTIDMAC_ESR_SEL;
600 			select = sel - chan * GTIDMAC_I_BITS;
601 			if (select >= 0 && select < GTIDMAC_I_BITS) {
602 				uint32_t ear;
603 
604 				ear = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
605 				    ia->ia_eaddr);
606 				aprint_error(": Error Address 0x%x\n", ear);
607 			} else
608 				aprint_error(": lost Error Address\n");
609 		}
610 
611 		if (cause & (GTIDMAC_I_COMP | GTIDMAC_I_ERROR)) {
612 			sc->sc_cdesc[chan].chan_dma_done(
613 			    sc->sc_cdesc[chan].chan_running, chan,
614 			    &sc->sc_cdesc[chan].chan_in,
615 			    &sc->sc_cdesc[chan].chan_out, error);
616 			handled++;
617 		}
618 
619 		cause >>= GTIDMAC_I_BITS;
620 	}
621 	DPRINTF(("IDMAC intr: %shandled\n", handled ? "" : "not "));
622 
623 	return handled;
624 }
625 
626 static int
mvxore_port0_intr(void * arg)627 mvxore_port0_intr(void *arg)
628 {
629 	struct gtidmac_softc *sc = arg;
630 
631 	return mvxore_intr(sc, 0);
632 }
633 
634 static int
mvxore_port1_intr(void * arg)635 mvxore_port1_intr(void *arg)
636 {
637 	struct gtidmac_softc *sc = arg;
638 
639 	return mvxore_intr(sc, 1);
640 }
641 
642 static int
mvxore_intr(struct gtidmac_softc * sc,int port)643 mvxore_intr(struct gtidmac_softc *sc, int port)
644 {
645 	uint32_t cause;
646 	int handled = 0, chan, error;
647 
648 	cause =
649 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEICR(sc, port));
650 	DPRINTF(("XORE port %d intr: cause=0x%x\n", port, cause));
651 	bus_space_write_4(sc->sc_iot, sc->sc_ioh,
652 	    MVXORE_XEICR(sc, port), ~cause);
653 
654 	chan = 0;
655 	while (cause) {
656 		error = 0;
657 		if (cause & MVXORE_I_ADDRDECODE) {
658 			aprint_error_dev(sc->sc_dev, "Failed address decoding");
659 			error = EINVAL;
660 		}
661 		if (cause & MVXORE_I_ACCPROT) {
662 			aprint_error_dev(sc->sc_dev,
663 			    "Access Protect Violation");
664 			error = EACCES;
665 		}
666 		if (cause & MVXORE_I_WRPROT) {
667 			aprint_error_dev(sc->sc_dev, "Write Protect");
668 			error = EACCES;
669 		}
670 		if (cause & MVXORE_I_OWN) {
671 			aprint_error_dev(sc->sc_dev, "Ownership Violation");
672 			error = EINVAL;
673 		}
674 		if (cause & MVXORE_I_INTPARITY) {
675 			aprint_error_dev(sc->sc_dev, "Parity Error");
676 			error = EIO;
677 		}
678 		if (cause & MVXORE_I_XBAR) {
679 			aprint_error_dev(sc->sc_dev, "Crossbar Parity Error");
680 			error = EINVAL;
681 		}
682 
683 #define MVXORE_I_ERROR		  \
684 	   (MVXORE_I_ADDRDECODE	| \
685 	    MVXORE_I_ACCPROT	| \
686 	    MVXORE_I_WRPROT	| \
687 	    MVXORE_I_OWN	| \
688 	    MVXORE_I_INTPARITY	| \
689 	    MVXORE_I_XBAR)
690 		if (cause & MVXORE_I_ERROR) {
691 			uint32_t type;
692 			int event;
693 
694 			type = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
695 			    MVXORE_XEECR(sc, port));
696 			type &= MVXORE_XEECR_ERRORTYPE_MASK;
697 			event = type - chan * MVXORE_I_BITS;
698 			if (event >= 0 && event < MVXORE_I_BITS) {
699 				uint32_t xeear;
700 
701 				xeear = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
702 				    MVXORE_XEEAR(sc, port));
703 				aprint_error(": Error Address 0x%x\n", xeear);
704 			} else
705 				aprint_error(": lost Error Address\n");
706 		}
707 
708 		if (cause & (MVXORE_I_EOC | MVXORE_I_ERROR)) {
709 			sc->sc_cdesc_xore[chan].chan_dma_done(
710 			    sc->sc_cdesc_xore[chan].chan_running, chan,
711 			    sc->sc_cdesc_xore[chan].chan_in,
712 			    &sc->sc_cdesc_xore[chan].chan_out, error);
713 			handled++;
714 		}
715 
716 		cause >>= MVXORE_I_BITS;
717 	}
718 	DPRINTF(("XORE port %d intr: %shandled\n",
719 	    port, handled ? "" : "not "));
720 
721 	return handled;
722 }
723 
724 
725 /*
726  * dmover(9) backend function.
727  */
728 static void
gtidmac_process(struct dmover_backend * dmb)729 gtidmac_process(struct dmover_backend *dmb)
730 {
731 	struct gtidmac_softc *sc = dmb->dmb_cookie;
732 	int s;
733 
734 	/* If the backend is currently idle, go process the queue. */
735 	s = splbio();
736 	if (!sc->sc_dmb_busy)
737 		gtidmac_dmover_run(dmb);
738 	splx(s);
739 }
740 
741 static void
gtidmac_dmover_run(struct dmover_backend * dmb)742 gtidmac_dmover_run(struct dmover_backend *dmb)
743 {
744 	struct gtidmac_softc *sc = dmb->dmb_cookie;
745 	struct dmover_request *dreq;
746 	const struct dmover_algdesc *algdesc;
747 	struct gtidmac_function *df;
748 	bus_dmamap_t *dmamap_in, *dmamap_out;
749 	int chan, ninputs, error, i;
750 
751 	sc->sc_dmb_busy = 1;
752 
753 	for (;;) {
754 		dreq = TAILQ_FIRST(&dmb->dmb_pendreqs);
755 		if (dreq == NULL)
756 			break;
757 		algdesc = dreq->dreq_assignment->das_algdesc;
758 		df = algdesc->dad_data;
759 		chan = (*df->chan_alloc)(sc, &dmamap_in, &dmamap_out, dreq);
760 		if (chan == -1)
761 			return;
762 
763 		dmover_backend_remque(dmb, dreq);
764 		dreq->dreq_flags |= DMOVER_REQ_RUNNING;
765 
766 		/* XXXUNLOCK */
767 
768 		error = 0;
769 
770 		/* Load in/out buffers of dmover to bus_dmamap. */
771 		ninputs = dreq->dreq_assignment->das_algdesc->dad_ninputs;
772 		if (ninputs == 0) {
773 			int pno = 0;
774 
775 			if (algdesc->dad_name == DMOVER_FUNC_FILL8)
776 				pno = dreq->dreq_immediate[0];
777 
778 			i = 0;
779 			error = bus_dmamap_load(sc->sc_dmat, *dmamap_in,
780 			    &sc->sc_pbuf[pno], sizeof(sc->sc_pbuf[pno]), NULL,
781 			    BUS_DMA_NOWAIT | BUS_DMA_STREAMING | BUS_DMA_WRITE);
782 			if (error == 0) {
783 				bus_dmamap_sync(sc->sc_dmat, *dmamap_in, 0,
784 				    sizeof(sc->sc_pbuf[pno]),
785 				    BUS_DMASYNC_PREWRITE);
786 
787 				/*
788 				 * We will call gtidmac_dmmap_unload() when
789 				 * becoming an error.
790 				 */
791 				i = 1;
792 			}
793 		} else
794 			for (i = 0; i < ninputs; i++) {
795 				error = gtidmac_dmmap_load(sc,
796 				    *(dmamap_in + i), dreq->dreq_inbuf_type,
797 				    &dreq->dreq_inbuf[i], 0/*write*/);
798 				if (error != 0)
799 					break;
800 			}
801 		if (algdesc->dad_name != DMOVER_FUNC_ISCSI_CRC32C) {
802 			if (error == 0)
803 				error = gtidmac_dmmap_load(sc, *dmamap_out,
804 				    dreq->dreq_outbuf_type, &dreq->dreq_outbuf,
805 				    1/*read*/);
806 
807 			if (error == 0) {
808 				/*
809 				 * The size of outbuf is always believed to be
810 				 * DMA transfer size in dmover request.
811 				 */
812 				error = (*df->dma_setup)(sc, chan, ninputs,
813 				    dmamap_in, dmamap_out,
814 				    (*dmamap_out)->dm_mapsize);
815 				if (error != 0)
816 					gtidmac_dmmap_unload(sc, *dmamap_out,
817 					    1);
818 			}
819 		} else
820 			if (error == 0)
821 				error = (*df->dma_setup)(sc, chan, ninputs,
822 				    dmamap_in, dmamap_out,
823 				    (*dmamap_in)->dm_mapsize);
824 
825 		/* XXXLOCK */
826 
827 		if (error != 0) {
828 			for (; i-- > 0;)
829 				gtidmac_dmmap_unload(sc, *(dmamap_in + i), 0);
830 			(*df->chan_free)(sc, chan);
831 
832 			dreq->dreq_flags |= DMOVER_REQ_ERROR;
833 			dreq->dreq_error = error;
834 			/* XXXUNLOCK */
835 			dmover_done(dreq);
836 			/* XXXLOCK */
837 			continue;
838 		}
839 
840 		(*df->dma_start)(sc, chan, gtidmac_dmover_done);
841 		break;
842 	}
843 
844 	/* All done */
845 	sc->sc_dmb_busy = 0;
846 }
847 
848 static void
gtidmac_dmover_done(void * object,int chan,bus_dmamap_t * dmamap_in,bus_dmamap_t * dmamap_out,int error)849 gtidmac_dmover_done(void *object, int chan, bus_dmamap_t *dmamap_in,
850 		    bus_dmamap_t *dmamap_out, int error)
851 {
852 	struct gtidmac_softc *sc;
853 	struct dmover_request *dreq = object;
854 	struct dmover_backend *dmb;
855 	struct gtidmac_function *df;
856 	uint32_t result;
857 	int ninputs, i;
858 
859 	KASSERT(dreq != NULL);
860 
861 	dmb = dreq->dreq_assignment->das_backend;
862 	df = dreq->dreq_assignment->das_algdesc->dad_data;
863 	ninputs = dreq->dreq_assignment->das_algdesc->dad_ninputs;
864 	sc = dmb->dmb_cookie;
865 
866 	result = (*df->dma_finish)(sc, chan, error);
867 	for (i = 0; i < ninputs; i++)
868 		gtidmac_dmmap_unload(sc, *(dmamap_in + i), 0);
869 	if (dreq->dreq_assignment->das_algdesc->dad_name ==
870 	    DMOVER_FUNC_ISCSI_CRC32C)
871 		memcpy(dreq->dreq_immediate, &result, sizeof(result));
872 	else
873 		gtidmac_dmmap_unload(sc, *dmamap_out, 1);
874 
875 	(*df->chan_free)(sc, chan);
876 
877 	if (error) {
878 		dreq->dreq_error = error;
879 		dreq->dreq_flags |= DMOVER_REQ_ERROR;
880 	}
881 
882 	dmover_done(dreq);
883 
884 	/*
885 	 * See if we can start some more dmover(9) requests.
886 	 *
887 	 * Note: We're already at splbio() here.
888 	 */
889 	if (!sc->sc_dmb_busy)
890 		gtidmac_dmover_run(dmb);
891 }
892 
893 static __inline int
gtidmac_dmmap_load(struct gtidmac_softc * sc,bus_dmamap_t dmamap,dmover_buffer_type dmbuf_type,dmover_buffer * dmbuf,int read)894 gtidmac_dmmap_load(struct gtidmac_softc *sc, bus_dmamap_t dmamap,
895 		   dmover_buffer_type dmbuf_type, dmover_buffer *dmbuf,
896 		   int read)
897 {
898 	int error, flags;
899 
900 	flags = BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
901 	    read ? BUS_DMA_READ : BUS_DMA_WRITE;
902 
903 	switch (dmbuf_type) {
904 	case DMOVER_BUF_LINEAR:
905 		error = bus_dmamap_load(sc->sc_dmat, dmamap,
906 		    dmbuf->dmbuf_linear.l_addr, dmbuf->dmbuf_linear.l_len,
907 		    NULL, flags);
908 		break;
909 
910 	case DMOVER_BUF_UIO:
911 		if ((read && dmbuf->dmbuf_uio->uio_rw != UIO_READ) ||
912 		    (!read && dmbuf->dmbuf_uio->uio_rw == UIO_READ))
913 			return (EINVAL);
914 
915 		error = bus_dmamap_load_uio(sc->sc_dmat, dmamap,
916 		    dmbuf->dmbuf_uio, flags);
917 		break;
918 
919 	default:
920 		error = EINVAL;
921 	}
922 
923 	if (error == 0)
924 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
925 		    read ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
926 
927 	return error;
928 }
929 
930 static __inline void
gtidmac_dmmap_unload(struct gtidmac_softc * sc,bus_dmamap_t dmamap,int read)931 gtidmac_dmmap_unload(struct gtidmac_softc *sc, bus_dmamap_t dmamap, int read)
932 {
933 
934 	bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
935 	    read ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
936 
937 	bus_dmamap_unload(sc->sc_dmat, dmamap);
938 }
939 
940 
941 /*
942  * IDMAC functions
943  */
944 int
gtidmac_chan_alloc(void * tag,bus_dmamap_t ** dmamap_in,bus_dmamap_t ** dmamap_out,void * object)945 gtidmac_chan_alloc(void *tag, bus_dmamap_t **dmamap_in,
946 		   bus_dmamap_t **dmamap_out, void *object)
947 {
948 	struct gtidmac_softc *sc = tag;
949 	int chan;
950 
951 /* maybe need lock */
952 
953 	for (chan = 0; chan < sc->sc_gtidmac_nchan; chan++)
954 		if (sc->sc_cdesc[chan].chan_running == NULL)
955 			break;
956 	if (chan >= sc->sc_gtidmac_nchan)
957 		return -1;
958 
959 
960 	sc->sc_cdesc[chan].chan_running = object;
961 
962 /* unlock */
963 
964 	*dmamap_in = &sc->sc_cdesc[chan].chan_in;
965 	*dmamap_out = &sc->sc_cdesc[chan].chan_out;
966 
967 	return chan;
968 }
969 
970 void
gtidmac_chan_free(void * tag,int chan)971 gtidmac_chan_free(void *tag, int chan)
972 {
973 	struct gtidmac_softc *sc = tag;
974 
975 /* maybe need lock */
976 
977 	sc->sc_cdesc[chan].chan_running = NULL;
978 
979 /* unlock */
980 }
981 
982 /* ARGSUSED */
983 int
gtidmac_setup(void * tag,int chan,int ninputs,bus_dmamap_t * dmamap_in,bus_dmamap_t * dmamap_out,bus_size_t size)984 gtidmac_setup(void *tag, int chan, int ninputs, bus_dmamap_t *dmamap_in,
985 	      bus_dmamap_t *dmamap_out, bus_size_t size)
986 {
987 	struct gtidmac_softc *sc = tag;
988 	struct gtidmac_dma_desc *dd, *fstdd, *nxtdd;
989 	struct gtidmac_desc *desc;
990 	uint32_t ccl, bcnt, ires, ores;
991 	int n = 0, iidx, oidx;
992 
993 	KASSERT(ninputs == 0 || ninputs == 1);
994 
995 	ccl = bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCLR(chan));
996 #ifdef DIAGNOSTIC
997 	if (ccl & GTIDMAC_CCLR_CHANACT)
998 		panic("gtidmac_setup: chan%d already active", chan);
999 #endif
1000 
1001 	/* We always Chain-mode and max (16M - 1)byte/desc */
1002 	ccl = (GTIDMAC_CCLR_DESCMODE_16M				|
1003 #ifdef GTIDMAC_DEBUG
1004 	    GTIDMAC_CCLR_CDEN						|
1005 #endif
1006 	    GTIDMAC_CCLR_TRANSFERMODE_B /* Transfer Mode: Block */	|
1007 	    GTIDMAC_CCLR_INTMODE_NULL   /* Intr Mode: Next Desc NULL */	|
1008 	    GTIDMAC_CCLR_CHAINMODE_C    /* Chain Mode: Chaind */);
1009 	if (size != (*dmamap_in)->dm_mapsize) {
1010 		ccl |= GTIDMAC_CCLR_SRCHOLD;
1011 		if ((*dmamap_in)->dm_mapsize == 8)
1012 			ccl |= GTIDMAC_CCLR_SBL_8B;
1013 		else if ((*dmamap_in)->dm_mapsize == 16)
1014 			ccl |= GTIDMAC_CCLR_SBL_16B;
1015 		else if ((*dmamap_in)->dm_mapsize == 32)
1016 			ccl |= GTIDMAC_CCLR_SBL_32B;
1017 		else if ((*dmamap_in)->dm_mapsize == 64)
1018 			ccl |= GTIDMAC_CCLR_SBL_64B;
1019 		else if ((*dmamap_in)->dm_mapsize == 128)
1020 			ccl |= GTIDMAC_CCLR_SBL_128B;
1021 		else
1022 			panic("gtidmac_setup: chan%d source:"
1023 			    " unsupport hold size", chan);
1024 	} else
1025 		ccl |= GTIDMAC_CCLR_SBL_128B;
1026 	if (size != (*dmamap_out)->dm_mapsize) {
1027 		ccl |= GTIDMAC_CCLR_DESTHOLD;
1028 		if ((*dmamap_out)->dm_mapsize == 8)
1029 			ccl |= GTIDMAC_CCLR_DBL_8B;
1030 		else if ((*dmamap_out)->dm_mapsize == 16)
1031 			ccl |= GTIDMAC_CCLR_DBL_16B;
1032 		else if ((*dmamap_out)->dm_mapsize == 32)
1033 			ccl |= GTIDMAC_CCLR_DBL_32B;
1034 		else if ((*dmamap_out)->dm_mapsize == 64)
1035 			ccl |= GTIDMAC_CCLR_DBL_64B;
1036 		else if ((*dmamap_out)->dm_mapsize == 128)
1037 			ccl |= GTIDMAC_CCLR_DBL_128B;
1038 		else
1039 			panic("gtidmac_setup: chan%d destination:"
1040 			    " unsupport hold size", chan);
1041 	} else
1042 		ccl |= GTIDMAC_CCLR_DBL_128B;
1043 
1044 	fstdd = SLIST_FIRST(&sc->sc_dlist);
1045 	if (fstdd == NULL) {
1046 		aprint_error_dev(sc->sc_dev, "no descriptor\n");
1047 		return ENOMEM;
1048 	}
1049 	SLIST_REMOVE_HEAD(&sc->sc_dlist, dd_next);
1050 	sc->sc_cdesc[chan].chan_ddidx = fstdd->dd_index;
1051 
1052 	dd = fstdd;
1053 	ires = ores = 0;
1054 	iidx = oidx = 0;
1055 	while (1 /*CONSTCOND*/) {
1056 		if (ccl & GTIDMAC_CCLR_SRCHOLD) {
1057 			if (ccl & GTIDMAC_CCLR_DESTHOLD)
1058 				bcnt = size;	/* src/dst hold */
1059 			else
1060 				bcnt = (*dmamap_out)->dm_segs[oidx].ds_len;
1061 		} else if (ccl & GTIDMAC_CCLR_DESTHOLD)
1062 			bcnt = (*dmamap_in)->dm_segs[iidx].ds_len;
1063 		else
1064 			bcnt = uimin((*dmamap_in)->dm_segs[iidx].ds_len - ires,
1065 			    (*dmamap_out)->dm_segs[oidx].ds_len - ores);
1066 
1067 		desc = dd->dd_idmac_vaddr;
1068 		desc->bc.mode16m.bcnt =
1069 		    bcnt | GTIDMAC_CIDMABCR_BCLEFT | GTIDMAC_CIDMABCR_OWN;
1070 		desc->srcaddr = (*dmamap_in)->dm_segs[iidx].ds_addr + ires;
1071 		desc->dstaddr = (*dmamap_out)->dm_segs[oidx].ds_addr + ores;
1072 
1073 		n += bcnt;
1074 		if (n >= size)
1075 			break;
1076 		if (!(ccl & GTIDMAC_CCLR_SRCHOLD)) {
1077 			ires += bcnt;
1078 			if (ires >= (*dmamap_in)->dm_segs[iidx].ds_len) {
1079 				ires = 0;
1080 				iidx++;
1081 				KASSERT(iidx < (*dmamap_in)->dm_nsegs);
1082 			}
1083 		}
1084 		if (!(ccl & GTIDMAC_CCLR_DESTHOLD)) {
1085 			ores += bcnt;
1086 			if (ores >= (*dmamap_out)->dm_segs[oidx].ds_len) {
1087 				ores = 0;
1088 				oidx++;
1089 				KASSERT(oidx < (*dmamap_out)->dm_nsegs);
1090 			}
1091 		}
1092 
1093 		nxtdd = SLIST_FIRST(&sc->sc_dlist);
1094 		if (nxtdd == NULL) {
1095 			aprint_error_dev(sc->sc_dev, "no descriptor\n");
1096 			return ENOMEM;
1097 		}
1098 		SLIST_REMOVE_HEAD(&sc->sc_dlist, dd_next);
1099 
1100 		desc->nextdp = (uint32_t)nxtdd->dd_paddr;
1101 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap,
1102 		    dd->dd_index * sizeof(*desc), sizeof(*desc),
1103 #ifdef GTIDMAC_DEBUG
1104 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1105 #else
1106 		    BUS_DMASYNC_PREWRITE);
1107 #endif
1108 
1109 		SLIST_INSERT_AFTER(dd, nxtdd, dd_next);
1110 		dd = nxtdd;
1111 	}
1112 	desc->nextdp = (uint32_t)NULL;
1113 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, dd->dd_index * sizeof(*desc),
1114 #ifdef GTIDMAC_DEBUG
1115 	    sizeof(*desc), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1116 #else
1117 	    sizeof(*desc), BUS_DMASYNC_PREWRITE);
1118 #endif
1119 
1120 	/* Set paddr of descriptor to Channel Next Descriptor Pointer */
1121 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CNDPR(chan),
1122 	    fstdd->dd_paddr);
1123 
1124 #if BYTE_ORDER == LITTLE_ENDIAN
1125 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCHR(chan),
1126 	    GTIDMAC_CCHR_DESCBYTESWAP | GTIDMAC_CCHR_ENDIAN_LE);
1127 #else
1128 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCHR(chan),
1129 	    GTIDMAC_CCHR_DESCBYTESWAP | GTIDMAC_CCHR_ENDIAN_BE);
1130 #endif
1131 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCLR(chan), ccl);
1132 
1133 #ifdef GTIDMAC_DEBUG
1134 	gtidmac_dump_idmacdesc(sc, fstdd, ccl, 0/*pre*/);
1135 #endif
1136 
1137 	sc->sc_cdesc[chan].chan_totalcnt += size;
1138 
1139 	return 0;
1140 }
1141 
1142 void
gtidmac_start(void * tag,int chan,void (* dma_done_cb)(void *,int,bus_dmamap_t *,bus_dmamap_t *,int))1143 gtidmac_start(void *tag, int chan,
1144 	      void (*dma_done_cb)(void *, int, bus_dmamap_t *, bus_dmamap_t *,
1145 				  int))
1146 {
1147 	struct gtidmac_softc *sc = tag;
1148 	uint32_t ccl;
1149 
1150 	DPRINTF(("%s:%d: starting\n", device_xname(sc->sc_dev), chan));
1151 
1152 #ifdef GTIDMAC_DEBUG
1153 	gtidmac_dump_idmacreg(sc, chan);
1154 #endif
1155 
1156 	sc->sc_cdesc[chan].chan_dma_done = dma_done_cb;
1157 
1158 	ccl = bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCLR(chan));
1159 	/* Start and 'Fetch Next Descriptor' */
1160 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCLR(chan),
1161 	    ccl | GTIDMAC_CCLR_CHANEN | GTIDMAC_CCLR_FETCHND);
1162 }
1163 
1164 static uint32_t
gtidmac_finish(void * tag,int chan,int error)1165 gtidmac_finish(void *tag, int chan, int error)
1166 {
1167 	struct gtidmac_softc *sc = tag;
1168 	struct gtidmac_dma_desc *dd, *fstdd, *nxtdd;
1169 	struct gtidmac_desc *desc;
1170 
1171 	fstdd = &sc->sc_dd_buffer[sc->sc_cdesc[chan].chan_ddidx];
1172 
1173 #ifdef GTIDMAC_DEBUG
1174 	if (error || gtidmac_debug > 1) {
1175 		uint32_t ccl;
1176 
1177 		gtidmac_dump_idmacreg(sc, chan);
1178 		ccl = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
1179 		    GTIDMAC_CCLR(chan));
1180 		gtidmac_dump_idmacdesc(sc, fstdd, ccl, 1/*post*/);
1181 	}
1182 #endif
1183 
1184 	dd = fstdd;
1185 	do {
1186 		desc = dd->dd_idmac_vaddr;
1187 
1188 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap,
1189 		    dd->dd_index * sizeof(*desc), sizeof(*desc),
1190 #ifdef GTIDMAC_DEBUG
1191 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1192 #else
1193 		    BUS_DMASYNC_POSTWRITE);
1194 #endif
1195 
1196 		nxtdd = SLIST_NEXT(dd, dd_next);
1197 		SLIST_INSERT_HEAD(&sc->sc_dlist, dd, dd_next);
1198 		dd = nxtdd;
1199 	} while (desc->nextdp);
1200 
1201 	return 0;
1202 }
1203 
1204 /*
1205  * XORE functions
1206  */
1207 int
mvxore_chan_alloc(void * tag,bus_dmamap_t ** dmamap_in,bus_dmamap_t ** dmamap_out,void * object)1208 mvxore_chan_alloc(void *tag, bus_dmamap_t **dmamap_in,
1209 		  bus_dmamap_t **dmamap_out, void *object)
1210 {
1211 	struct gtidmac_softc *sc = tag;
1212 	int chan;
1213 
1214 /* maybe need lock */
1215 
1216 	for (chan = 0; chan < sc->sc_mvxore_nchan; chan++)
1217 		if (sc->sc_cdesc_xore[chan].chan_running == NULL)
1218 			break;
1219 	if (chan >= sc->sc_mvxore_nchan)
1220 		return -1;
1221 
1222 
1223 	sc->sc_cdesc_xore[chan].chan_running = object;
1224 
1225 /* unlock */
1226 
1227 	*dmamap_in = sc->sc_cdesc_xore[chan].chan_in;
1228 	*dmamap_out = &sc->sc_cdesc_xore[chan].chan_out;
1229 
1230 	return chan;
1231 }
1232 
1233 void
mvxore_chan_free(void * tag,int chan)1234 mvxore_chan_free(void *tag, int chan)
1235 {
1236 	struct gtidmac_softc *sc = tag;
1237 
1238 /* maybe need lock */
1239 
1240 	sc->sc_cdesc_xore[chan].chan_running = NULL;
1241 
1242 /* unlock */
1243 }
1244 
1245 /* ARGSUSED */
1246 int
mvxore_setup(void * tag,int chan,int ninputs,bus_dmamap_t * dmamap_in,bus_dmamap_t * dmamap_out,bus_size_t size)1247 mvxore_setup(void *tag, int chan, int ninputs, bus_dmamap_t *dmamap_in,
1248 	     bus_dmamap_t *dmamap_out, bus_size_t size)
1249 {
1250 	struct gtidmac_softc *sc = tag;
1251 	struct gtidmac_dma_desc *dd, *fstdd, *nxtdd;
1252 	struct mvxore_desc *desc;
1253 	uint32_t xexc, bcnt, cmd, lastcmd;
1254 	int n = 0, i;
1255 	uint32_t ires[MVXORE_NSRC] = { 0, 0, 0, 0, 0, 0, 0, 0 }, ores = 0;
1256 	int iidx[MVXORE_NSRC] = { 0, 0, 0, 0, 0, 0, 0, 0 }, oidx = 0;
1257 
1258 #ifdef DIAGNOSTIC
1259 	uint32_t xexact =
1260 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXACTR(sc, chan));
1261 
1262 	if ((xexact & MVXORE_XEXACTR_XESTATUS_MASK) ==
1263 	    MVXORE_XEXACTR_XESTATUS_ACT)
1264 		panic("mvxore_setup: chan%d already active."
1265 		    " mvxore not support hot insertion", chan);
1266 #endif
1267 
1268 	xexc =
1269 	    (MVXORE_XEXCR_REGACCPROTECT	|
1270 	     MVXORE_XEXCR_DBL_128B	|
1271 	     MVXORE_XEXCR_SBL_128B);
1272 	cmd = lastcmd = 0;
1273 	if (ninputs > 1) {
1274 		xexc |= MVXORE_XEXCR_OM_XOR;
1275 		lastcmd = cmd = (1 << ninputs) - 1;
1276 	} else if (ninputs == 1) {
1277 		if ((*dmamap_out)->dm_nsegs == 0) {
1278 			xexc |= MVXORE_XEXCR_OM_CRC32;
1279 			lastcmd = MVXORE_DESC_CMD_CRCLAST;
1280 		} else
1281 			xexc |= MVXORE_XEXCR_OM_DMA;
1282 	} else if (ninputs == 0) {
1283 		if ((*dmamap_out)->dm_nsegs != 1) {
1284 			aprint_error_dev(sc->sc_dev,
1285 			    "XORE not supports %d DMA segments\n",
1286 			    (*dmamap_out)->dm_nsegs);
1287 			return EINVAL;
1288 		}
1289 
1290 		if ((*dmamap_in)->dm_mapsize == 0) {
1291 			xexc |= MVXORE_XEXCR_OM_ECC;
1292 
1293 			/* XXXXX: Maybe need to set Timer Mode registers? */
1294 
1295 #if 0
1296 		} else if ((*dmamap_in)->dm_mapsize == 8 ||
1297 		    (*dmamap_in)->dm_mapsize == 16) { /* in case dmover */
1298 			uint64_t pattern;
1299 
1300 			/* XXXX: Get pattern data */
1301 
1302 			KASSERT((*dmamap_in)->dm_mapsize == 8 ||
1303 			    (void *)((uint32_t)(*dmamap_in)->_dm_origbuf &
1304 						~PAGE_MASK) == sc->sc_pbuf);
1305 			pattern = *(uint64_t *)(*dmamap_in)->_dm_origbuf;
1306 
1307 			/* XXXXX: XORE has a IVR.  We should get this first. */
1308 			bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEIVRL,
1309 			    pattern);
1310 			bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEIVRH,
1311 			    pattern >> 32);
1312 
1313 			xexc |= MVXORE_XEXCR_OM_MEMINIT;
1314 #endif
1315 		} else {
1316 			aprint_error_dev(sc->sc_dev,
1317 			    "XORE not supports DMA mapsize %zd\n",
1318 			    (*dmamap_in)->dm_mapsize);
1319 			return EINVAL;
1320 		}
1321 		bus_space_write_4(sc->sc_iot, sc->sc_ioh,
1322 		    MVXORE_XEXDPR(sc, chan), (*dmamap_out)->dm_segs[0].ds_addr);
1323 		bus_space_write_4(sc->sc_iot, sc->sc_ioh,
1324 		    MVXORE_XEXBSR(sc, chan), (*dmamap_out)->dm_mapsize);
1325 
1326 		bus_space_write_4(sc->sc_iot, sc->sc_ioh,
1327 		    MVXORE_XEXCR(sc, chan), xexc);
1328 		sc->sc_cdesc_xore[chan].chan_totalcnt += size;
1329 
1330 		return 0;
1331 	}
1332 
1333 	/* Make descriptor for DMA/CRC32/XOR */
1334 
1335 	fstdd = SLIST_FIRST(&sc->sc_dlist_xore);
1336 	if (fstdd == NULL) {
1337 		aprint_error_dev(sc->sc_dev, "no xore descriptor\n");
1338 		return ENOMEM;
1339 	}
1340 	SLIST_REMOVE_HEAD(&sc->sc_dlist_xore, dd_next);
1341 	sc->sc_cdesc_xore[chan].chan_ddidx =
1342 	    fstdd->dd_index + GTIDMAC_NDESC * sc->sc_gtidmac_nchan;
1343 
1344 	dd = fstdd;
1345 	while (1 /*CONSTCOND*/) {
1346 		desc = dd->dd_xore_vaddr;
1347 		desc->stat = MVXORE_DESC_STAT_OWN;
1348 		desc->cmd = cmd;
1349 		if ((*dmamap_out)->dm_nsegs != 0) {
1350 			desc->dstaddr =
1351 			    (*dmamap_out)->dm_segs[oidx].ds_addr + ores;
1352 			bcnt = (*dmamap_out)->dm_segs[oidx].ds_len - ores;
1353 		} else {
1354 			desc->dstaddr = 0;
1355 			bcnt = MVXORE_MAXXFER;	/* XXXXX */
1356 		}
1357 		for (i = 0; i < ninputs; i++) {
1358 			desc->srcaddr[i] =
1359 			    (*dmamap_in[i]).dm_segs[iidx[i]].ds_addr + ires[i];
1360 			bcnt = uimin(bcnt,
1361 			    (*dmamap_in[i]).dm_segs[iidx[i]].ds_len - ires[i]);
1362 		}
1363 		desc->bcnt = bcnt;
1364 
1365 		n += bcnt;
1366 		if (n >= size)
1367 			break;
1368 		ores += bcnt;
1369 		if ((*dmamap_out)->dm_nsegs != 0 &&
1370 		    ores >= (*dmamap_out)->dm_segs[oidx].ds_len) {
1371 			ores = 0;
1372 			oidx++;
1373 			KASSERT(oidx < (*dmamap_out)->dm_nsegs);
1374 		}
1375 		for (i = 0; i < ninputs; i++) {
1376 			ires[i] += bcnt;
1377 			if (ires[i] >=
1378 			    (*dmamap_in[i]).dm_segs[iidx[i]].ds_len) {
1379 				ires[i] = 0;
1380 				iidx[i]++;
1381 				KASSERT(iidx[i] < (*dmamap_in[i]).dm_nsegs);
1382 			}
1383 		}
1384 
1385 		nxtdd = SLIST_FIRST(&sc->sc_dlist_xore);
1386 		if (nxtdd == NULL) {
1387 			aprint_error_dev(sc->sc_dev, "no xore descriptor\n");
1388 			return ENOMEM;
1389 		}
1390 		SLIST_REMOVE_HEAD(&sc->sc_dlist_xore, dd_next);
1391 
1392 		desc->nextda = (uint32_t)nxtdd->dd_paddr;
1393 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap_xore,
1394 		    dd->dd_index * sizeof(*desc), sizeof(*desc),
1395 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1396 
1397 		SLIST_INSERT_AFTER(dd, nxtdd, dd_next);
1398 		dd = nxtdd;
1399 	}
1400 	desc->cmd = lastcmd;
1401 	desc->nextda = (uint32_t)NULL;
1402 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap_xore,
1403 	    dd->dd_index * sizeof(*desc), sizeof(*desc),
1404 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1405 
1406 	/* Set paddr of descriptor to Channel Next Descriptor Pointer */
1407 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXNDPR(sc, chan),
1408 	    fstdd->dd_paddr);
1409 
1410 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXCR(sc, chan), xexc);
1411 
1412 #ifdef GTIDMAC_DEBUG
1413 	gtidmac_dump_xoredesc(sc, fstdd, xexc, 0/*pre*/);
1414 #endif
1415 
1416 	sc->sc_cdesc_xore[chan].chan_totalcnt += size;
1417 
1418 	return 0;
1419 }
1420 
1421 void
mvxore_start(void * tag,int chan,void (* dma_done_cb)(void *,int,bus_dmamap_t *,bus_dmamap_t *,int))1422 mvxore_start(void *tag, int chan,
1423 	     void (*dma_done_cb)(void *, int, bus_dmamap_t *, bus_dmamap_t *,
1424 				 int))
1425 {
1426 	struct gtidmac_softc *sc = tag;
1427 	uint32_t xexact;
1428 
1429 	DPRINTF(("%s:%d: xore starting\n", device_xname(sc->sc_dev), chan));
1430 
1431 #ifdef GTIDMAC_DEBUG
1432 	gtidmac_dump_xorereg(sc, chan);
1433 #endif
1434 
1435 	sc->sc_cdesc_xore[chan].chan_dma_done = dma_done_cb;
1436 
1437 	xexact =
1438 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXACTR(sc, chan));
1439 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXACTR(sc, chan),
1440 	    xexact | MVXORE_XEXACTR_XESTART);
1441 }
1442 
1443 static uint32_t
mvxore_finish(void * tag,int chan,int error)1444 mvxore_finish(void *tag, int chan, int error)
1445 {
1446 	struct gtidmac_softc *sc = tag;
1447 	struct gtidmac_dma_desc *dd, *fstdd, *nxtdd;
1448 	struct mvxore_desc *desc;
1449 	uint32_t xexc;
1450 
1451 #ifdef GTIDMAC_DEBUG
1452 	if (error || gtidmac_debug > 1)
1453 		gtidmac_dump_xorereg(sc, chan);
1454 #endif
1455 
1456 	xexc = bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXCR(sc, chan));
1457 	if ((xexc & MVXORE_XEXCR_OM_MASK) == MVXORE_XEXCR_OM_ECC ||
1458 	    (xexc & MVXORE_XEXCR_OM_MASK) == MVXORE_XEXCR_OM_MEMINIT)
1459 		return 0;
1460 
1461 	fstdd = &sc->sc_dd_buffer[sc->sc_cdesc_xore[chan].chan_ddidx];
1462 
1463 #ifdef GTIDMAC_DEBUG
1464 	if (error || gtidmac_debug > 1)
1465 		gtidmac_dump_xoredesc(sc, fstdd, xexc, 1/*post*/);
1466 #endif
1467 
1468 	dd = fstdd;
1469 	do {
1470 		desc = dd->dd_xore_vaddr;
1471 
1472 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap_xore,
1473 		    dd->dd_index * sizeof(*desc), sizeof(*desc),
1474 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1475 
1476 		nxtdd = SLIST_NEXT(dd, dd_next);
1477 		SLIST_INSERT_HEAD(&sc->sc_dlist_xore, dd, dd_next);
1478 		dd = nxtdd;
1479 	} while (desc->nextda);
1480 
1481 	if ((xexc & MVXORE_XEXCR_OM_MASK) == MVXORE_XEXCR_OM_CRC32)
1482 		return desc->result;
1483 	return 0;
1484 }
1485 
1486 static void
gtidmac_wininit(struct gtidmac_softc * sc,enum marvell_tags * tags)1487 gtidmac_wininit(struct gtidmac_softc *sc, enum marvell_tags *tags)
1488 {
1489 	device_t pdev = device_parent(sc->sc_dev);
1490 	uint64_t base;
1491 	uint32_t size, cxap, en, winacc;
1492 	int window, target, attr, rv, i, j;
1493 
1494 	en = 0xff;
1495 	cxap = 0;
1496 	for (window = 0, i = 0;
1497 	    tags[i] != MARVELL_TAG_UNDEFINED && window < GTIDMAC_NWINDOW; i++) {
1498 		rv = marvell_winparams_by_tag(pdev, tags[i],
1499 		    &target, &attr, &base, &size);
1500 		if (rv != 0 || size == 0)
1501 			continue;
1502 
1503 		if (base > 0xffffffffULL) {
1504 			if (window >= GTIDMAC_NREMAP) {
1505 				aprint_error_dev(sc->sc_dev,
1506 				    "can't remap window %d\n", window);
1507 				continue;
1508 			}
1509 			bus_space_write_4(sc->sc_iot, sc->sc_ioh,
1510 			    GTIDMAC_HARXR(window), (base >> 32) & 0xffffffff);
1511 		}
1512 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_BARX(window),
1513 		    GTIDMAC_BARX_TARGET(target)	|
1514 		    GTIDMAC_BARX_ATTR(attr)	|
1515 		    GTIDMAC_BARX_BASE(base));
1516 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_SRX(window),
1517 		    GTIDMAC_SRX_SIZE(size));
1518 		en &= ~GTIDMAC_BAER_EN(window);
1519 
1520 		winacc = GTIDMAC_CXAPR_WINACC_FA;
1521 		if (gtidmac_winacctbl != NULL)
1522 			for (j = 0;
1523 			    gtidmac_winacctbl[j].tag != MARVELL_TAG_UNDEFINED;
1524 			    j++) {
1525 				if (gtidmac_winacctbl[j].tag != tags[i])
1526 					continue;
1527 
1528 				switch (gtidmac_winacctbl[j].winacc) {
1529 				case GTIDMAC_WINACC_NOACCESSALLOWED:
1530 					winacc = GTIDMAC_CXAPR_WINACC_NOAA;
1531 					break;
1532 				case GTIDMAC_WINACC_READONLY:
1533 					winacc = GTIDMAC_CXAPR_WINACC_RO;
1534 					break;
1535 				case GTIDMAC_WINACC_FULLACCESS:
1536 				default: /* XXXX: default is full access */
1537 					break;
1538 				}
1539 				break;
1540 			}
1541 		cxap |= GTIDMAC_CXAPR_WINACC(window, winacc);
1542 
1543 		window++;
1544 	}
1545 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_BAER, en);
1546 
1547 	for (i = 0; i < GTIDMAC_NACCPROT; i++)
1548 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CXAPR(i),
1549 		    cxap);
1550 }
1551 
1552 static void
mvxore_wininit(struct gtidmac_softc * sc,enum marvell_tags * tags)1553 mvxore_wininit(struct gtidmac_softc *sc, enum marvell_tags *tags)
1554 {
1555 	device_t pdev = device_parent(sc->sc_dev);
1556 	uint64_t base;
1557 	uint32_t target, attr, size, xexwc, winacc;
1558 	int window, rv, i, j, p;
1559 
1560 	xexwc = 0;
1561 	for (window = 0, i = 0;
1562 	    tags[i] != MARVELL_TAG_UNDEFINED && window < MVXORE_NWINDOW; i++) {
1563 		rv = marvell_winparams_by_tag(pdev, tags[i],
1564 		    &target, &attr, &base, &size);
1565 		if (rv != 0 || size == 0)
1566 			continue;
1567 
1568 		if (base > 0xffffffffULL) {
1569 			if (window >= MVXORE_NREMAP) {
1570 				aprint_error_dev(sc->sc_dev,
1571 				    "can't remap window %d\n", window);
1572 				continue;
1573 			}
1574 			for (p = 0; p < sc->sc_mvxore_nchan >> 1; p++)
1575 				bus_space_write_4(sc->sc_iot, sc->sc_ioh,
1576 				    MVXORE_XEHARRX(sc, p, window),
1577 				    (base >> 32) & 0xffffffff);
1578 		}
1579 
1580 		for (p = 0; p < sc->sc_mvxore_nchan >> 1; p++) {
1581 			bus_space_write_4(sc->sc_iot, sc->sc_ioh,
1582 			    MVXORE_XEBARX(sc, p, window),
1583 			    MVXORE_XEBARX_TARGET(target) |
1584 			    MVXORE_XEBARX_ATTR(attr) |
1585 			    MVXORE_XEBARX_BASE(base));
1586 			bus_space_write_4(sc->sc_iot, sc->sc_ioh,
1587 			    MVXORE_XESMRX(sc, p, window),
1588 			    MVXORE_XESMRX_SIZE(size));
1589 		}
1590 
1591 		winacc = MVXORE_XEXWCR_WINACC_FA;
1592 		if (mvxore_winacctbl != NULL)
1593 			for (j = 0;
1594 			    mvxore_winacctbl[j].tag != MARVELL_TAG_UNDEFINED;
1595 			    j++) {
1596 				if (gtidmac_winacctbl[j].tag != tags[i])
1597 					continue;
1598 
1599 				switch (gtidmac_winacctbl[j].winacc) {
1600 				case GTIDMAC_WINACC_NOACCESSALLOWED:
1601 					winacc = MVXORE_XEXWCR_WINACC_NOAA;
1602 					break;
1603 				case GTIDMAC_WINACC_READONLY:
1604 					winacc = MVXORE_XEXWCR_WINACC_RO;
1605 					break;
1606 				case GTIDMAC_WINACC_FULLACCESS:
1607 				default: /* XXXX: default is full access */
1608 					break;
1609 				}
1610 				break;
1611 			}
1612 		xexwc |= (MVXORE_XEXWCR_WINEN(window) |
1613 		    MVXORE_XEXWCR_WINACC(window, winacc));
1614 		window++;
1615 	}
1616 
1617 	for (i = 0; i < sc->sc_mvxore_nchan; i++) {
1618 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXWCR(sc, i),
1619 		    xexwc);
1620 
1621 		/* XXXXX: reset... */
1622 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXAOCR(sc, 0),
1623 		    0);
1624 	}
1625 }
1626 
1627 static int
gtidmac_buffer_setup(struct gtidmac_softc * sc)1628 gtidmac_buffer_setup(struct gtidmac_softc *sc)
1629 {
1630 	bus_dma_segment_t segs;
1631 	struct gtidmac_dma_desc *dd;
1632 	uint32_t mask;
1633 	int nchan, nsegs, i;
1634 
1635 	nchan = sc->sc_gtidmac_nchan;
1636 
1637 	if (bus_dmamem_alloc(sc->sc_dmat,
1638 	    sizeof(struct gtidmac_desc) * GTIDMAC_NDESC * nchan,
1639 	    PAGE_SIZE, 0, &segs, 1, &nsegs, BUS_DMA_NOWAIT)) {
1640 		aprint_error_dev(sc->sc_dev,
1641 		    "bus_dmamem_alloc failed: descriptor buffer\n");
1642 		goto fail0;
1643 	}
1644 	if (bus_dmamem_map(sc->sc_dmat, &segs, 1,
1645 	    sizeof(struct gtidmac_desc) * GTIDMAC_NDESC * nchan,
1646 	    (void **)&sc->sc_dbuf, BUS_DMA_NOWAIT)) {
1647 		aprint_error_dev(sc->sc_dev,
1648 		    "bus_dmamem_map failed: descriptor buffer\n");
1649 		goto fail1;
1650 	}
1651 	if (bus_dmamap_create(sc->sc_dmat,
1652 	    sizeof(struct gtidmac_desc) * GTIDMAC_NDESC * nchan, 1,
1653 	    sizeof(struct gtidmac_desc) * GTIDMAC_NDESC * nchan, 0,
1654 	    BUS_DMA_NOWAIT, &sc->sc_dmap)) {
1655 		aprint_error_dev(sc->sc_dev,
1656 		    "bus_dmamap_create failed: descriptor buffer\n");
1657 		goto fail2;
1658 	}
1659 	if (bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, sc->sc_dbuf,
1660 	    sizeof(struct gtidmac_desc) * GTIDMAC_NDESC * nchan,
1661 	    NULL, BUS_DMA_NOWAIT)) {
1662 		aprint_error_dev(sc->sc_dev,
1663 		    "bus_dmamap_load failed: descriptor buffer\n");
1664 		goto fail3;
1665 	}
1666 	SLIST_INIT(&sc->sc_dlist);
1667 	for (i = 0; i < GTIDMAC_NDESC * nchan; i++) {
1668 		dd = &sc->sc_dd_buffer[i];
1669 		dd->dd_index = i;
1670 		dd->dd_idmac_vaddr = &sc->sc_dbuf[i];
1671 		dd->dd_paddr = sc->sc_dmap->dm_segs[0].ds_addr +
1672 		    (sizeof(struct gtidmac_desc) * i);
1673 			SLIST_INSERT_HEAD(&sc->sc_dlist, dd, dd_next);
1674 	}
1675 
1676 	/* Initialize IDMAC DMA channels */
1677 	mask = 0;
1678 	for (i = 0; i < nchan; i++) {
1679 		if (i > 0 && ((i * GTIDMAC_I_BITS) & 31 /*bit*/) == 0) {
1680 			bus_space_write_4(sc->sc_iot, sc->sc_ioh,
1681 			    GTIDMAC_IMR(i - 1), mask);
1682 			mask = 0;
1683 		}
1684 
1685 		if (bus_dmamap_create(sc->sc_dmat, GTIDMAC_MAXXFER,
1686 		    GTIDMAC_NSEGS, GTIDMAC_MAXXFER, 0, BUS_DMA_NOWAIT,
1687 		    &sc->sc_cdesc[i].chan_in)) {
1688 			aprint_error_dev(sc->sc_dev,
1689 			    "bus_dmamap_create failed: chan%d in\n", i);
1690 			goto fail4;
1691 		}
1692 		if (bus_dmamap_create(sc->sc_dmat, GTIDMAC_MAXXFER,
1693 		    GTIDMAC_NSEGS, GTIDMAC_MAXXFER, 0, BUS_DMA_NOWAIT,
1694 		    &sc->sc_cdesc[i].chan_out)) {
1695 			aprint_error_dev(sc->sc_dev,
1696 			    "bus_dmamap_create failed: chan%d out\n", i);
1697 			bus_dmamap_destroy(sc->sc_dmat,
1698 			    sc->sc_cdesc[i].chan_in);
1699 			goto fail4;
1700 		}
1701 		sc->sc_cdesc[i].chan_totalcnt = 0;
1702 		sc->sc_cdesc[i].chan_running = NULL;
1703 
1704 		/* Ignore bits overflow.  The mask is 32bit. */
1705 		mask |= GTIDMAC_I(i,
1706 		    GTIDMAC_I_COMP	|
1707 		    GTIDMAC_I_ADDRMISS	|
1708 		    GTIDMAC_I_ACCPROT	|
1709 		    GTIDMAC_I_WRPROT	|
1710 		    GTIDMAC_I_OWN);
1711 
1712 		/* 8bits/channel * 4channels => 32bit */
1713 		if ((i & 0x3) == 0x3) {
1714 			bus_space_write_4(sc->sc_iot, sc->sc_ioh,
1715 			    GTIDMAC_IMR(i), mask);
1716 			mask = 0;
1717 		}
1718 	}
1719 
1720 	return 0;
1721 
1722 fail4:
1723 	for (; i-- > 0;) {
1724 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdesc[i].chan_in);
1725 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdesc[i].chan_out);
1726 	}
1727 	bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
1728 fail3:
1729 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmap);
1730 fail2:
1731 	bus_dmamem_unmap(sc->sc_dmat, sc->sc_dbuf,
1732 	    sizeof(struct gtidmac_desc) * GTIDMAC_NDESC);
1733 fail1:
1734 	bus_dmamem_free(sc->sc_dmat, &segs, 1);
1735 fail0:
1736 	return -1;
1737 }
1738 
1739 static int
mvxore_buffer_setup(struct gtidmac_softc * sc)1740 mvxore_buffer_setup(struct gtidmac_softc *sc)
1741 {
1742 	bus_dma_segment_t segs;
1743 	struct gtidmac_dma_desc *dd;
1744 	uint32_t mask;
1745 	int nchan, nsegs, i, j;
1746 
1747 	nchan = sc->sc_mvxore_nchan;
1748 
1749 	if (bus_dmamem_alloc(sc->sc_dmat,
1750 	    sizeof(struct mvxore_desc) * MVXORE_NDESC * nchan,
1751 	    PAGE_SIZE, 0, &segs, 1, &nsegs, BUS_DMA_NOWAIT)) {
1752 		aprint_error_dev(sc->sc_dev,
1753 		    "bus_dmamem_alloc failed: xore descriptor buffer\n");
1754 		goto fail0;
1755 	}
1756 	if (bus_dmamem_map(sc->sc_dmat, &segs, 1,
1757 	    sizeof(struct mvxore_desc) * MVXORE_NDESC * nchan,
1758 	    (void **)&sc->sc_dbuf_xore, BUS_DMA_NOWAIT)) {
1759 		aprint_error_dev(sc->sc_dev,
1760 		    "bus_dmamem_map failed: xore descriptor buffer\n");
1761 		goto fail1;
1762 	}
1763 	if (bus_dmamap_create(sc->sc_dmat,
1764 	    sizeof(struct mvxore_desc) * MVXORE_NDESC * nchan, 1,
1765 	    sizeof(struct mvxore_desc) * MVXORE_NDESC * nchan, 0,
1766 	    BUS_DMA_NOWAIT, &sc->sc_dmap_xore)) {
1767 		aprint_error_dev(sc->sc_dev,
1768 		    "bus_dmamap_create failed: xore descriptor buffer\n");
1769 		goto fail2;
1770 	}
1771 	if (bus_dmamap_load(sc->sc_dmat, sc->sc_dmap_xore, sc->sc_dbuf_xore,
1772 	    sizeof(struct mvxore_desc) * MVXORE_NDESC * nchan,
1773 	    NULL, BUS_DMA_NOWAIT)) {
1774 		aprint_error_dev(sc->sc_dev,
1775 		    "bus_dmamap_load failed: xore descriptor buffer\n");
1776 		goto fail3;
1777 	}
1778 	SLIST_INIT(&sc->sc_dlist_xore);
1779 	for (i = 0; i < MVXORE_NDESC * nchan; i++) {
1780 		dd =
1781 		    &sc->sc_dd_buffer[i + GTIDMAC_NDESC * sc->sc_gtidmac_nchan];
1782 		dd->dd_index = i;
1783 		dd->dd_xore_vaddr = &sc->sc_dbuf_xore[i];
1784 		dd->dd_paddr = sc->sc_dmap_xore->dm_segs[0].ds_addr +
1785 		    (sizeof(struct mvxore_desc) * i);
1786 		SLIST_INSERT_HEAD(&sc->sc_dlist_xore, dd, dd_next);
1787 	}
1788 
1789 	/* Initialize XORE DMA channels */
1790 	mask = 0;
1791 	for (i = 0; i < nchan; i++) {
1792 		for (j = 0; j < MVXORE_NSRC; j++) {
1793 			if (bus_dmamap_create(sc->sc_dmat,
1794 			    MVXORE_MAXXFER, MVXORE_NSEGS,
1795 			    MVXORE_MAXXFER, 0, BUS_DMA_NOWAIT,
1796 			    &sc->sc_cdesc_xore[i].chan_in[j])) {
1797 				aprint_error_dev(sc->sc_dev,
1798 				    "bus_dmamap_create failed:"
1799 				    " xore chan%d in[%d]\n", i, j);
1800 				goto fail4;
1801 			}
1802 		}
1803 		if (bus_dmamap_create(sc->sc_dmat, MVXORE_MAXXFER,
1804 		    MVXORE_NSEGS, MVXORE_MAXXFER, 0,
1805 		    BUS_DMA_NOWAIT, &sc->sc_cdesc_xore[i].chan_out)) {
1806 			aprint_error_dev(sc->sc_dev,
1807 			    "bus_dmamap_create failed: chan%d out\n", i);
1808 			goto fail5;
1809 		}
1810 		sc->sc_cdesc_xore[i].chan_totalcnt = 0;
1811 		sc->sc_cdesc_xore[i].chan_running = NULL;
1812 
1813 		mask |= MVXORE_I(i,
1814 		    MVXORE_I_EOC	|
1815 		    MVXORE_I_ADDRDECODE	|
1816 		    MVXORE_I_ACCPROT	|
1817 		    MVXORE_I_WRPROT	|
1818 		    MVXORE_I_OWN	|
1819 		    MVXORE_I_INTPARITY	|
1820 		    MVXORE_I_XBAR);
1821 
1822 		/* 16bits/channel * 2channels => 32bit */
1823 		if (i & 0x1) {
1824 			bus_space_write_4(sc->sc_iot, sc->sc_ioh,
1825 			    MVXORE_XEIMR(sc, i >> 1), mask);
1826 			mask = 0;
1827 		}
1828 	}
1829 
1830 	return 0;
1831 
1832 	for (; i-- > 0;) {
1833 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdesc_xore[i].chan_out);
1834 
1835 fail5:
1836 		j = MVXORE_NSRC;
1837 fail4:
1838 		for (; j-- > 0;)
1839 			bus_dmamap_destroy(sc->sc_dmat,
1840 			    sc->sc_cdesc_xore[i].chan_in[j]);
1841 	}
1842 	bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap_xore);
1843 fail3:
1844 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmap_xore);
1845 fail2:
1846 	bus_dmamem_unmap(sc->sc_dmat, sc->sc_dbuf_xore,
1847 	    sizeof(struct mvxore_desc) * MVXORE_NDESC);
1848 fail1:
1849 	bus_dmamem_free(sc->sc_dmat, &segs, 1);
1850 fail0:
1851 	return -1;
1852 }
1853 
1854 #ifdef GTIDMAC_DEBUG
1855 static void
gtidmac_dump_idmacreg(struct gtidmac_softc * sc,int chan)1856 gtidmac_dump_idmacreg(struct gtidmac_softc *sc, int chan)
1857 {
1858 	uint32_t val;
1859 	char buf[256];
1860 
1861 	printf("IDMAC Registers\n");
1862 
1863 	val = bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CIDMABCR(chan));
1864 	snprintb(buf, sizeof(buf), "\177\020b\037Own\0b\036BCLeft\0", val);
1865 	printf("  Byte Count                 : %s\n", buf);
1866 	printf("    ByteCnt                  :   0x%06x\n",
1867 	    val & GTIDMAC_CIDMABCR_BYTECNT_MASK);
1868 	printf("  Source Address             : 0x%08x\n",
1869 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CIDMASAR(chan)));
1870 	printf("  Destination Address        : 0x%08x\n",
1871 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CIDMADAR(chan)));
1872 	printf("  Next Descriptor Pointer    : 0x%08x\n",
1873 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CNDPR(chan)));
1874 	printf("  Current Descriptor Pointer : 0x%08x\n",
1875 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCDPR(chan)));
1876 
1877 	val = bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCLR(chan));
1878 	snprintb(buf, sizeof(buf),
1879 	    "\177\020b\024Abr\0b\021CDEn\0b\016ChanAct\0b\015FetchND\0"
1880 	    "b\014ChanEn\0b\012IntMode\0b\005DestHold\0b\003SrcHold\0",
1881 	    val);
1882 	printf("  Channel Control (Low)      : %s\n", buf);
1883 	printf("    SrcBurstLimit            : %s Bytes\n",
1884 	  (val & GTIDMAC_CCLR_SBL_MASK) == GTIDMAC_CCLR_SBL_128B ? "128" :
1885 	    (val & GTIDMAC_CCLR_SBL_MASK) == GTIDMAC_CCLR_SBL_64B ? "64" :
1886 	    (val & GTIDMAC_CCLR_SBL_MASK) == GTIDMAC_CCLR_SBL_32B ? "32" :
1887 	    (val & GTIDMAC_CCLR_SBL_MASK) == GTIDMAC_CCLR_SBL_16B ? "16" :
1888 	    (val & GTIDMAC_CCLR_SBL_MASK) == GTIDMAC_CCLR_SBL_8B ? "8" :
1889 	    "unknown");
1890 	printf("    DstBurstLimit            : %s Bytes\n",
1891 	  (val & GTIDMAC_CCLR_DBL_MASK) == GTIDMAC_CCLR_DBL_128B ? "128" :
1892 	    (val & GTIDMAC_CCLR_DBL_MASK) == GTIDMAC_CCLR_DBL_64B ? "64" :
1893 	    (val & GTIDMAC_CCLR_DBL_MASK) == GTIDMAC_CCLR_DBL_32B ? "32" :
1894 	    (val & GTIDMAC_CCLR_DBL_MASK) == GTIDMAC_CCLR_DBL_16B ? "16" :
1895 	    (val & GTIDMAC_CCLR_DBL_MASK) == GTIDMAC_CCLR_DBL_8B ? "8" :
1896 	    "unknown");
1897 	printf("    ChainMode                : %sChained\n",
1898 	    val & GTIDMAC_CCLR_CHAINMODE_NC ? "Non-" : "");
1899 	printf("    TransferMode             : %s\n",
1900 	    val & GTIDMAC_CCLR_TRANSFERMODE_B ? "Block" : "Demand");
1901 	printf("    DescMode                 : %s\n",
1902 	    val & GTIDMAC_CCLR_DESCMODE_16M ? "16M" : "64k");
1903 	val = bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCHR(chan));
1904 	snprintb(buf, sizeof(buf),
1905 	    "\177\020b\001DescByteSwap\0b\000Endianness\0", val);
1906 	printf("  Channel Control (High)     : %s\n", buf);
1907 }
1908 
1909 static void
gtidmac_dump_idmacdesc(struct gtidmac_softc * sc,struct gtidmac_dma_desc * dd,uint32_t mode,int post)1910 gtidmac_dump_idmacdesc(struct gtidmac_softc *sc, struct gtidmac_dma_desc *dd,
1911 		       uint32_t mode, int post)
1912 {
1913 	struct gtidmac_desc *desc;
1914 	int i;
1915 	char buf[256];
1916 
1917 	printf("IDMAC Descriptor\n");
1918 
1919 	i = 0;
1920 	while (1 /*CONSTCOND*/) {
1921 		if (post)
1922 			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap,
1923 			    dd->dd_index * sizeof(*desc), sizeof(*desc),
1924 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1925 
1926 		desc = dd->dd_idmac_vaddr;
1927 
1928 		printf("%d (0x%lx)\n", i, dd->dd_paddr);
1929 		if (mode & GTIDMAC_CCLR_DESCMODE_16M) {
1930 			snprintb(buf, sizeof(buf),
1931 			    "\177\020b\037Own\0b\036BCLeft\0",
1932 			    desc->bc.mode16m.bcnt);
1933 			printf("  Byte Count              : %s\n", buf);
1934 			printf("    ByteCount             :   0x%06x\n",
1935 			    desc->bc.mode16m.bcnt &
1936 			    GTIDMAC_CIDMABCR_BYTECNT_MASK);
1937 		} else {
1938 			printf("  Byte Count              :     0x%04x\n",
1939 			    desc->bc.mode64k.bcnt);
1940 			printf("  Remind Byte Count       :     0x%04x\n",
1941 			    desc->bc.mode64k.rbc);
1942 		}
1943 		printf("  Source Address          : 0x%08x\n", desc->srcaddr);
1944 		printf("  Destination Address     : 0x%08x\n", desc->dstaddr);
1945 		printf("  Next Descriptor Pointer : 0x%08x\n", desc->nextdp);
1946 
1947 		if (desc->nextdp == (uint32_t)NULL)
1948 			break;
1949 
1950 		if (!post)
1951 			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap,
1952 			    dd->dd_index * sizeof(*desc), sizeof(*desc),
1953 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1954 
1955 		i++;
1956 		dd = SLIST_NEXT(dd, dd_next);
1957 	}
1958 	if (!post)
1959 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap,
1960 		    dd->dd_index * sizeof(*desc), sizeof(*desc),
1961 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1962 }
1963 
1964 static void
gtidmac_dump_xorereg(struct gtidmac_softc * sc,int chan)1965 gtidmac_dump_xorereg(struct gtidmac_softc *sc, int chan)
1966 {
1967 	uint32_t val, opmode;
1968 	char buf[64];
1969 
1970 	printf("XORE Registers\n");
1971 
1972 	val = bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXCR(sc, chan));
1973 	snprintb(buf, sizeof(buf),
1974 	    "\177\020"
1975 	    "b\017RegAccProtect\0b\016DesSwp\0b\015DwrReqSwp\0b\014DrdResSwp\0",
1976 	    val);
1977 	printf(" Configuration    : %s\n", buf);
1978 	opmode = val & MVXORE_XEXCR_OM_MASK;
1979 	printf("    OperationMode : %s operation\n",
1980 	  opmode == MVXORE_XEXCR_OM_XOR ? "XOR calculate" :
1981 	  opmode == MVXORE_XEXCR_OM_CRC32 ? "CRC-32 calculate" :
1982 	  opmode == MVXORE_XEXCR_OM_DMA ? "DMA" :
1983 	  opmode == MVXORE_XEXCR_OM_ECC ? "ECC cleanup" :
1984 	  opmode == MVXORE_XEXCR_OM_MEMINIT ? "Memory Initialization" :
1985 	  "unknown");
1986 	printf("    SrcBurstLimit : %s Bytes\n",
1987 	  (val & MVXORE_XEXCR_SBL_MASK) == MVXORE_XEXCR_SBL_128B ? "128" :
1988 	    (val & MVXORE_XEXCR_SBL_MASK) == MVXORE_XEXCR_SBL_64B ? "64" :
1989 	    (val & MVXORE_XEXCR_SBL_MASK) == MVXORE_XEXCR_SBL_32B ? "32" :
1990 	    "unknown");
1991 	printf("    DstBurstLimit : %s Bytes\n",
1992 	  (val & MVXORE_XEXCR_SBL_MASK) == MVXORE_XEXCR_SBL_128B ? "128" :
1993 	    (val & MVXORE_XEXCR_SBL_MASK) == MVXORE_XEXCR_SBL_64B ? "64" :
1994 	    (val & MVXORE_XEXCR_SBL_MASK) == MVXORE_XEXCR_SBL_32B ? "32" :
1995 	    "unknown");
1996 	val =
1997 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXACTR(sc, chan));
1998 	printf("  Activation      : 0x%08x\n", val);
1999 	val &= MVXORE_XEXACTR_XESTATUS_MASK;
2000 	printf("    XEstatus      : %s\n",
2001 	    val == MVXORE_XEXACTR_XESTATUS_NA ? "Channel not active" :
2002 	    val == MVXORE_XEXACTR_XESTATUS_ACT ? "Channel active" :
2003 	    val == MVXORE_XEXACTR_XESTATUS_P ? "Channel paused" : "???");
2004 
2005 	if (opmode == MVXORE_XEXCR_OM_XOR ||
2006 	    opmode == MVXORE_XEXCR_OM_CRC32 ||
2007 	    opmode == MVXORE_XEXCR_OM_DMA) {
2008 		printf("  NextDescPtr     : 0x%08x\n",
2009 		    bus_space_read_4(sc->sc_iot, sc->sc_ioh,
2010 		    MVXORE_XEXNDPR(sc, chan)));
2011 		printf("  CurrentDescPtr  : 0x%08x\n",
2012 		    bus_space_read_4(sc->sc_iot, sc->sc_ioh,
2013 		    MVXORE_XEXCDPR(chan)));
2014 	}
2015 	printf("  ByteCnt         : 0x%08x\n",
2016 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXBCR(chan)));
2017 
2018 	if (opmode == MVXORE_XEXCR_OM_ECC ||
2019 	    opmode == MVXORE_XEXCR_OM_MEMINIT) {
2020 		printf("  DstPtr          : 0x%08x\n",
2021 		    bus_space_read_4(sc->sc_iot, sc->sc_ioh,
2022 		    MVXORE_XEXDPR(sc, chan)));
2023 		printf("  BlockSize       : 0x%08x\n",
2024 		    bus_space_read_4(sc->sc_iot, sc->sc_ioh,
2025 		    MVXORE_XEXBSR(sc, chan)));
2026 
2027 		if (opmode == MVXORE_XEXCR_OM_ECC) {
2028 			val = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
2029 			    MVXORE_XETMCR);
2030 			if (val & MVXORE_XETMCR_TIMEREN) {
2031 				val >>= MVXORE_XETMCR_SECTIONSIZECTRL_SHIFT;
2032 				val &= MVXORE_XETMCR_SECTIONSIZECTRL_MASK;
2033 				printf("  SectionSizeCtrl : 0x%08x\n", 2 ^ val);
2034 				printf("  TimerInitVal    : 0x%08x\n",
2035 				    bus_space_read_4(sc->sc_iot, sc->sc_ioh,
2036 				    MVXORE_XETMIVR));
2037 				printf("  TimerCrntVal    : 0x%08x\n",
2038 				    bus_space_read_4(sc->sc_iot, sc->sc_ioh,
2039 				    MVXORE_XETMCVR));
2040 			}
2041 		} else	/* MVXORE_XEXCR_OM_MEMINIT */
2042 			printf("  InitVal         : 0x%08x%08x\n",
2043 			    bus_space_read_4(sc->sc_iot, sc->sc_ioh,
2044 			    MVXORE_XEIVRH),
2045 			    bus_space_read_4(sc->sc_iot, sc->sc_ioh,
2046 			    MVXORE_XEIVRL));
2047 	}
2048 }
2049 
2050 static void
gtidmac_dump_xoredesc(struct gtidmac_softc * sc,struct gtidmac_dma_desc * dd,uint32_t mode,int post)2051 gtidmac_dump_xoredesc(struct gtidmac_softc *sc, struct gtidmac_dma_desc *dd,
2052 		      uint32_t mode, int post)
2053 {
2054 	struct mvxore_desc *desc;
2055 	int i, j;
2056 	char buf[256];
2057 
2058 	printf("XORE Descriptor\n");
2059 
2060 	mode &= MVXORE_XEXCR_OM_MASK;
2061 
2062 	i = 0;
2063 	while (1 /*CONSTCOND*/) {
2064 		if (post)
2065 			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap_xore,
2066 			    dd->dd_index * sizeof(*desc), sizeof(*desc),
2067 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2068 
2069 		desc = dd->dd_xore_vaddr;
2070 
2071 		printf("%d (0x%lx)\n", i, dd->dd_paddr);
2072 
2073 		snprintb(buf, sizeof(buf), "\177\020b\037Own\0b\036Success\0",
2074 		    desc->stat);
2075 		printf("  Status                  : %s\n", buf);
2076 		if (desc->cmd & MVXORE_DESC_CMD_CRCLAST && post)
2077 			printf("  CRC-32 Result           : 0x%08x\n",
2078 			    desc->result);
2079 		snprintb(buf, sizeof(buf),
2080 		    "\177\020b\037EODIntEn\0b\036CRCLast\0"
2081 		    "b\007Src7Cmd\0b\006Src6Cmd\0b\005Src5Cmd\0b\004Src4Cmd\0"
2082 		    "b\003Src3Cmd\0b\002Src2Cmd\0b\001Src1Cmd\0b\000Src0Cmd\0",
2083 		    desc->cmd);
2084 		printf("  Command                 : %s\n", buf);
2085 		printf("  Next Descriptor Address : 0x%08x\n", desc->nextda);
2086 		printf("  Byte Count              :   0x%06x\n", desc->bcnt);
2087 		printf("  Destination Address     : 0x%08x\n", desc->dstaddr);
2088 		if (mode == MVXORE_XEXCR_OM_XOR) {
2089 			for (j = 0; j < MVXORE_NSRC; j++)
2090 				if (desc->cmd & MVXORE_DESC_CMD_SRCCMD(j))
2091 					printf("  Source Address#%d        :"
2092 					    " 0x%08x\n", j, desc->srcaddr[j]);
2093 		} else
2094 			printf("  Source Address          : 0x%08x\n",
2095 			    desc->srcaddr[0]);
2096 
2097 		if (desc->nextda == (uint32_t)NULL)
2098 			break;
2099 
2100 		if (!post)
2101 			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap_xore,
2102 			    dd->dd_index * sizeof(*desc), sizeof(*desc),
2103 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2104 
2105 		i++;
2106 		dd = SLIST_NEXT(dd, dd_next);
2107 	}
2108 	if (!post)
2109 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap_xore,
2110 		    dd->dd_index * sizeof(*desc), sizeof(*desc),
2111 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2112 }
2113 #endif
2114