xref: /netbsd/sys/arch/next68k/dev/nextdma.c (revision c4a72b64)
1 /*	$NetBSD: nextdma.c,v 1.34 2002/10/02 04:22:53 thorpej Exp $	*/
2 /*
3  * Copyright (c) 1998 Darrin B. Jewell
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *      This product includes software developed by Darrin B. Jewell
17  * 4. The name of the author may not be used to endorse or promote products
18  *    derived from this software without specific prior written permission
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/mbuf.h>
35 #include <sys/syslog.h>
36 #include <sys/socket.h>
37 #include <sys/device.h>
38 #include <sys/malloc.h>
39 #include <sys/ioctl.h>
40 #include <sys/errno.h>
41 
42 #define _M68K_BUS_DMA_PRIVATE
43 #include <machine/autoconf.h>
44 #include <machine/cpu.h>
45 #include <machine/intr.h>
46 
47 #include <m68k/cacheops.h>
48 
49 #include <next68k/next68k/isr.h>
50 #include <next68k/next68k/nextrom.h>
51 
52 #include <next68k/dev/intiovar.h>
53 
54 #include "nextdmareg.h"
55 #include "nextdmavar.h"
56 
57 #include "esp.h"
58 #include "xe.h"
59 
60 #if DEBUG
61 #define ND_DEBUG
62 #endif
63 
64 extern int turbo;
65 
66 #define panic		__asm __volatile("trap  #15"); printf
67 
68 #define NEXTDMA_DEBUG nextdma_debug
69 /* (nsc->sc_chan->nd_intr == NEXT_I_SCSI_DMA) && nextdma_debug */
70 #if defined(ND_DEBUG)
71 int nextdma_debug = 0;
72 #define DPRINTF(x) if (NEXTDMA_DEBUG) printf x;
73 int ndtraceshow = 0;
74 char ndtrace[8192+100];
75 char *ndtracep = ndtrace;
76 #define NDTRACEIF(x) if (10 && /* (nsc->sc_chan->nd_intr == NEXT_I_SCSI_DMA) && */ ndtracep < (ndtrace + 8192)) do {x;} while (0)
77 #else
78 #define DPRINTF(x)
79 #define NDTRACEIF(x)
80 #endif
81 #define PRINTF(x) printf x
82 
83 #if defined(ND_DEBUG)
84 int nextdma_debug_enetr_idx = 0;
85 unsigned int nextdma_debug_enetr_state[100] = { 0 };
86 int nextdma_debug_scsi_idx = 0;
87 unsigned int nextdma_debug_scsi_state[100] = { 0 };
88 
89 void nextdma_debug_initstate(struct nextdma_softc *);
90 void nextdma_debug_savestate(struct nextdma_softc *, unsigned int);
91 void nextdma_debug_scsi_dumpstate(void);
92 void nextdma_debug_enetr_dumpstate(void);
93 #endif
94 
95 
96 int	nextdma_match		__P((struct device *, struct cfdata *, void *));
97 void	nextdma_attach		__P((struct device *, struct device *, void *));
98 
99 void nextdmamap_sync		__P((bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
100 				     bus_size_t, int));
101 int nextdma_continue		__P((struct nextdma_softc *));
102 void nextdma_rotate		__P((struct nextdma_softc *));
103 
104 void nextdma_setup_cont_regs	__P((struct nextdma_softc *));
105 void nextdma_setup_curr_regs	__P((struct nextdma_softc *));
106 
107 #if NESP > 0
108 static int nextdma_esp_intr	__P((void *));
109 #endif
110 #if NXE > 0
111 static int nextdma_enet_intr	__P((void *));
112 #endif
113 
114 #define nd_bsr4(reg) bus_space_read_4(nsc->sc_bst, nsc->sc_bsh, (reg))
115 #define nd_bsw4(reg,val) bus_space_write_4(nsc->sc_bst, nsc->sc_bsh, (reg), (val))
116 
117 CFATTACH_DECL(nextdma, sizeof(struct nextdma_softc),
118     nextdma_match, nextdma_attach, NULL, NULL);
119 
120 static struct nextdma_channel nextdma_channel[] = {
121 #if NESP > 0
122 	{ "scsi", NEXT_P_SCSI_CSR, DD_SIZE, NEXT_I_SCSI_DMA, &nextdma_esp_intr },
123 #endif
124 #if NXE > 0
125 	{ "enetx", NEXT_P_ENETX_CSR, DD_SIZE, NEXT_I_ENETX_DMA, &nextdma_enet_intr },
126 	{ "enetr", NEXT_P_ENETR_CSR, DD_SIZE, NEXT_I_ENETR_DMA, &nextdma_enet_intr },
127 #endif
128 };
129 static int nnextdma_channels = (sizeof(nextdma_channel)/sizeof(nextdma_channel[0]));
130 
131 static int attached = 0;
132 
133 struct nextdma_softc *
134 nextdma_findchannel(name)
135 	char *name;
136 {
137 	struct device *dev = alldevs.tqh_first;
138 
139 	while (dev != NULL) {
140 		if (!strncmp(dev->dv_xname, "nextdma", 7)) {
141 			struct nextdma_softc *nsc = (struct nextdma_softc *)dev;
142 			if (!strcmp (nsc->sc_chan->nd_name, name))
143 				return (nsc);
144 		}
145 		dev = dev->dv_list.tqe_next;
146 	}
147 	return (NULL);
148 }
149 
150 int
151 nextdma_match(parent, match, aux)
152 	struct device *parent;
153 	struct cfdata *match;
154 	void *aux;
155 {
156 	struct intio_attach_args *ia = (struct intio_attach_args *)aux;
157 
158 	if (attached >= nnextdma_channels)
159 		return (0);
160 
161 	ia->ia_addr = (void *)nextdma_channel[attached].nd_base;
162 
163 	return (1);
164 }
165 
166 void
167 nextdma_attach(parent, self, aux)
168 	struct device *parent, *self;
169 	void *aux;
170 {
171 	struct nextdma_softc *nsc = (struct nextdma_softc *)self;
172 	struct intio_attach_args *ia = (struct intio_attach_args *)aux;
173 
174 	if (attached >= nnextdma_channels)
175 		return;
176 
177 	nsc->sc_chan = &nextdma_channel[attached];
178 
179 	nsc->sc_dmat = ia->ia_dmat;
180 	nsc->sc_bst = ia->ia_bst;
181 
182 	if (bus_space_map(nsc->sc_bst, nsc->sc_chan->nd_base,
183 			  nsc->sc_chan->nd_size, 0, &nsc->sc_bsh)) {
184 		panic("%s: can't map DMA registers for channel %s",
185 		      nsc->sc_dev.dv_xname, nsc->sc_chan->nd_name);
186 	}
187 
188 	nextdma_init (nsc);
189 
190 	isrlink_autovec(nsc->sc_chan->nd_intrfunc, nsc,
191 			NEXT_I_IPL(nsc->sc_chan->nd_intr), 10, NULL);
192 	INTR_ENABLE(nsc->sc_chan->nd_intr);
193 
194 	printf (": channel %d (%s)\n", attached,
195 		nsc->sc_chan->nd_name);
196 	attached++;
197 
198 	return;
199 }
200 
201 void
202 nextdma_init(nsc)
203 	struct nextdma_softc *nsc;
204 {
205 #ifdef ND_DEBUG
206 	if (NEXTDMA_DEBUG) {
207 		char sbuf[256];
208 
209 		bitmask_snprintf(NEXT_I_BIT(nsc->sc_chan->nd_intr), NEXT_INTR_BITS,
210 				 sbuf, sizeof(sbuf));
211 		printf("DMA init ipl (%ld) intr(0x%s)\n",
212 			NEXT_I_IPL(nsc->sc_chan->nd_intr), sbuf);
213 	}
214 #endif
215 
216 	nsc->sc_stat.nd_map = NULL;
217 	nsc->sc_stat.nd_idx = 0;
218 	nsc->sc_stat.nd_map_cont = NULL;
219 	nsc->sc_stat.nd_idx_cont = 0;
220 	nsc->sc_stat.nd_exception = 0;
221 
222 	nd_bsw4 (DD_CSR, DMACSR_RESET | DMACSR_CLRCOMPLETE);
223 	nd_bsw4 (DD_CSR, 0);
224 
225 #if 01
226 	nextdma_setup_curr_regs(nsc);
227 	nextdma_setup_cont_regs(nsc);
228 #endif
229 
230 #if defined(DIAGNOSTIC)
231 	{
232 		u_long state;
233 		state = nd_bsr4 (DD_CSR);
234 
235 #if 1
236 		/* mourning (a 25Mhz 68040 mono slab) appears to set BUSEXC
237 		 * milo (a 25Mhz 68040 mono cube) didn't have this problem
238 		 * Darrin B. Jewell <jewell@mit.edu>  Mon May 25 07:53:05 1998
239 		 */
240 		state &= (DMACSR_COMPLETE | DMACSR_SUPDATE | DMACSR_ENABLE);
241 #else
242 		state &= (DMACSR_BUSEXC | DMACSR_COMPLETE |
243 			  DMACSR_SUPDATE | DMACSR_ENABLE);
244 #endif
245 		if (state) {
246 			nextdma_print(nsc);
247 			panic("DMA did not reset");
248 		}
249 	}
250 #endif
251 }
252 
253 void
254 nextdma_reset(nsc)
255 	struct nextdma_softc *nsc;
256 {
257 	int s;
258 	struct nextdma_status *stat = &nsc->sc_stat;
259 
260 	s = spldma();
261 
262 	DPRINTF(("DMA reset\n"));
263 
264 #if (defined(ND_DEBUG))
265 	if (NEXTDMA_DEBUG > 1) nextdma_print(nsc);
266 #endif
267 
268 	nd_bsw4 (DD_CSR, DMACSR_CLRCOMPLETE | DMACSR_RESET);
269 	if ((stat->nd_map) || (stat->nd_map_cont)) {
270 		if (stat->nd_map_cont) {
271 			DPRINTF(("DMA: resetting with non null continue map\n"));
272 			if (nsc->sc_conf.nd_completed_cb)
273 				(*nsc->sc_conf.nd_completed_cb)
274 					(stat->nd_map_cont, nsc->sc_conf.nd_cb_arg);
275 
276 			stat->nd_map_cont = 0;
277 			stat->nd_idx_cont = 0;
278 		}
279 		if (nsc->sc_conf.nd_shutdown_cb)
280 			(*nsc->sc_conf.nd_shutdown_cb)(nsc->sc_conf.nd_cb_arg);
281 		stat->nd_map = 0;
282 		stat->nd_idx = 0;
283 	}
284 
285 	splx(s);
286 }
287 
288 /****************************************************************/
289 
290 
291 /* Call the completed and continue callbacks to try to fill
292  * in the dma continue buffers.
293  */
294 void
295 nextdma_rotate(nsc)
296 	struct nextdma_softc *nsc;
297 {
298 	struct nextdma_status *stat = &nsc->sc_stat;
299 
300 	NDTRACEIF (*ndtracep++ = 'r');
301 	DPRINTF(("DMA nextdma_rotate()\n"));
302 
303 	/* Rotate the continue map into the current map */
304 	stat->nd_map = stat->nd_map_cont;
305 	stat->nd_idx = stat->nd_idx_cont;
306 
307 	if ((!stat->nd_map_cont) ||
308 	    ((++stat->nd_idx_cont >= stat->nd_map_cont->dm_nsegs))) {
309 		if (nsc->sc_conf.nd_continue_cb) {
310 			stat->nd_map_cont = (*nsc->sc_conf.nd_continue_cb)
311 				(nsc->sc_conf.nd_cb_arg);
312 			if (stat->nd_map_cont) {
313 				stat->nd_map_cont->dm_xfer_len = 0;
314 			}
315 		} else {
316 			stat->nd_map_cont = 0;
317 		}
318 		stat->nd_idx_cont = 0;
319 	}
320 
321 #if defined(DIAGNOSTIC) && 0
322 	if (stat->nd_map_cont) {
323 		if (!DMA_BEGINALIGNED(stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr)) {
324 			nextdma_print(nsc);
325 			panic("DMA request unaligned at start");
326 		}
327 		if (!DMA_ENDALIGNED(stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr +
328 				stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_len)) {
329 			nextdma_print(nsc);
330 			panic("DMA request unaligned at end");
331 		}
332 	}
333 #endif
334 
335 }
336 
337 void
338 nextdma_setup_curr_regs(nsc)
339 	struct nextdma_softc *nsc;
340 {
341 	bus_addr_t dd_next;
342 	bus_addr_t dd_limit;
343 	bus_addr_t dd_saved_next;
344 	bus_addr_t dd_saved_limit;
345 	struct nextdma_status *stat = &nsc->sc_stat;
346 
347 	NDTRACEIF (*ndtracep++ = 'C');
348 	DPRINTF(("DMA nextdma_setup_curr_regs()\n"));
349 
350 	if (stat->nd_map) {
351 		dd_next = stat->nd_map->dm_segs[stat->nd_idx].ds_addr;
352 		dd_limit = (stat->nd_map->dm_segs[stat->nd_idx].ds_addr +
353 			    stat->nd_map->dm_segs[stat->nd_idx].ds_len);
354 
355 		if (!turbo && nsc->sc_chan->nd_intr == NEXT_I_ENETX_DMA) {
356 			dd_limit |= 0x80000000; /* Ethernet transmit needs secret magic */
357 			dd_limit += 15;
358 		}
359 	} else {
360 		dd_next = turbo ? 0 : 0xdeadbeef;
361 		dd_limit = turbo ? 0 : 0xdeadbeef;
362 	}
363 
364 	dd_saved_next = dd_next;
365 	dd_saved_limit = dd_limit;
366 
367 	NDTRACEIF (if (stat->nd_map) {
368 		sprintf (ndtracep, "%ld", stat->nd_map->dm_segs[stat->nd_idx].ds_len);
369 		ndtracep += strlen (ndtracep);
370 	});
371 
372 	if (!turbo && (nsc->sc_chan->nd_intr == NEXT_I_ENETX_DMA)) {
373 		nd_bsw4 (DD_NEXT_INITBUF, dd_next);
374 	} else {
375 		nd_bsw4 (DD_NEXT, dd_next);
376 	}
377 	nd_bsw4 (DD_LIMIT, dd_limit);
378 	if (!turbo) nd_bsw4 (DD_SAVED_NEXT, dd_saved_next);
379 	if (!turbo) nd_bsw4 (DD_SAVED_LIMIT, dd_saved_limit);
380 
381 #ifdef DIAGNOSTIC
382 	if ((nd_bsr4 (DD_NEXT_INITBUF) != dd_next)
383 	    || (nd_bsr4 (DD_NEXT) != dd_next)
384 	    || (nd_bsr4 (DD_LIMIT) != dd_limit)
385 	    || (!turbo && (nd_bsr4 (DD_SAVED_NEXT) != dd_saved_next))
386 	    || (!turbo && (nd_bsr4 (DD_SAVED_LIMIT) != dd_saved_limit))
387 		) {
388 		nextdma_print(nsc);
389 		panic("DMA failure writing to current regs");
390 	}
391 #endif
392 }
393 
394 void
395 nextdma_setup_cont_regs(nsc)
396 	struct nextdma_softc *nsc;
397 {
398 	bus_addr_t dd_start;
399 	bus_addr_t dd_stop;
400 	bus_addr_t dd_saved_start;
401 	bus_addr_t dd_saved_stop;
402 	struct nextdma_status *stat = &nsc->sc_stat;
403 
404 	NDTRACEIF (*ndtracep++ = 'c');
405 	DPRINTF(("DMA nextdma_setup_regs()\n"));
406 
407 	if (stat->nd_map_cont) {
408 		dd_start = stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr;
409 		dd_stop  = (stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr +
410 			    stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_len);
411 
412 		if (!turbo && nsc->sc_chan->nd_intr == NEXT_I_ENETX_DMA) {
413 			dd_stop |= 0x80000000; /* Ethernet transmit needs secret magic */
414 			dd_stop += 15;
415 		}
416 	} else {
417 		dd_start = turbo ? nd_bsr4 (DD_NEXT) : 0xdeadbee0;
418 		dd_stop = turbo ? 0 : 0xdeadbee0;
419 	}
420 
421 	dd_saved_start = dd_start;
422 	dd_saved_stop  = dd_stop;
423 
424 	NDTRACEIF (if (stat->nd_map_cont) {
425 		sprintf (ndtracep, "%ld", stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_len);
426 		ndtracep += strlen (ndtracep);
427 	});
428 
429 	nd_bsw4 (DD_START, dd_start);
430 	nd_bsw4 (DD_STOP, dd_stop);
431 	if (!turbo) nd_bsw4 (DD_SAVED_START, dd_saved_start);
432 	if (!turbo) nd_bsw4 (DD_SAVED_STOP, dd_saved_stop);
433 	if (turbo && nsc->sc_chan->nd_intr == NEXT_I_ENETR_DMA)
434 		nd_bsw4 (DD_STOP - 0x40, dd_start);
435 
436 #ifdef DIAGNOSTIC
437 	if ((nd_bsr4 (DD_START) != dd_start)
438 	    || (dd_stop && (nd_bsr4 (DD_STOP) != dd_stop))
439 	    || (!turbo && (nd_bsr4 (DD_SAVED_START) != dd_saved_start))
440 	    || (!turbo && (nd_bsr4 (DD_SAVED_STOP) != dd_saved_stop))
441 		) {
442 		nextdma_print(nsc);
443 		panic("DMA failure writing to continue regs");
444 	}
445 #endif
446 }
447 
448 /****************************************************************/
449 
450 #if NESP > 0
451 static int
452 nextdma_esp_intr(arg)
453 	void *arg;
454 {
455 	/* @@@ This is bogus, we can't be certain of arg's type
456 	 * unless the interrupt is for us.  For now we successfully
457 	 * cheat because DMA interrupts are the only things invoked
458 	 * at this interrupt level.
459 	 */
460 	struct nextdma_softc *nsc = arg;
461 	int esp_dma_int __P((void *)); /* XXX */
462 
463 	if (!INTR_OCCURRED(nsc->sc_chan->nd_intr))
464 		return 0;
465 	/* Handle dma interrupts */
466 
467 	return esp_dma_int (nsc->sc_conf.nd_cb_arg);
468 
469 }
470 #endif
471 
472 #if NXE > 0
473 static int
474 nextdma_enet_intr(arg)
475 	void *arg;
476 {
477 	/* @@@ This is bogus, we can't be certain of arg's type
478 	 * unless the interrupt is for us.  For now we successfully
479 	 * cheat because DMA interrupts are the only things invoked
480 	 * at this interrupt level.
481 	 */
482 	struct nextdma_softc *nsc = arg;
483 	unsigned int state;
484 	bus_addr_t onext;
485 	bus_addr_t olimit;
486 	bus_addr_t slimit;
487 	int result;
488 	struct nextdma_status *stat = &nsc->sc_stat;
489 
490 	if (!INTR_OCCURRED(nsc->sc_chan->nd_intr))
491 		return 0;
492 	/* Handle dma interrupts */
493 
494 	NDTRACEIF (*ndtracep++ = 'D');
495 #ifdef ND_DEBUG
496 	if (NEXTDMA_DEBUG) {
497 		char sbuf[256];
498 
499 		bitmask_snprintf(NEXT_I_BIT(nsc->sc_chan->nd_intr), NEXT_INTR_BITS,
500 				 sbuf, sizeof(sbuf));
501 		printf("DMA interrupt ipl (%ld) intr(0x%s)\n",
502 		       NEXT_I_IPL(nsc->sc_chan->nd_intr), sbuf);
503 	}
504 #endif
505 
506 #ifdef DIAGNOSTIC
507 	if (!stat->nd_map) {
508 		nextdma_print(nsc);
509 		panic("DMA missing current map in interrupt!");
510 	}
511 #endif
512 
513 	state = nd_bsr4 (DD_CSR);
514 
515 #if defined(ND_DEBUG)
516 	nextdma_debug_savestate(nsc, state);
517 #endif
518 
519 #ifdef DIAGNOSTIC
520 	if (/* (state & DMACSR_READ) || */ !(state & DMACSR_COMPLETE)) {
521 		char sbuf[256];
522 		nextdma_print(nsc);
523 		bitmask_snprintf(state, DMACSR_BITS, sbuf, sizeof(sbuf));
524 		printf("DMA: state 0x%s\n",sbuf);
525 		panic("DMA complete not set in interrupt");
526 	}
527 #endif
528 
529 	DPRINTF(("DMA: finishing xfer\n"));
530 
531 	onext = stat->nd_map->dm_segs[stat->nd_idx].ds_addr;
532 	olimit = onext + stat->nd_map->dm_segs[stat->nd_idx].ds_len;
533 
534 	result = 0;
535 	if (state & DMACSR_ENABLE) {
536 		/* enable bit was set */
537 		result |= 0x01;
538 	}
539 	if (state & DMACSR_SUPDATE) {
540 		/* supdate bit was set */
541 		result |= 0x02;
542 	}
543 	if (stat->nd_map_cont == NULL) {
544 		KASSERT(stat->nd_idx+1 == stat->nd_map->dm_nsegs);
545 		/* Expecting a shutdown, didn't SETSUPDATE last turn */
546 		result |= 0x04;
547 	}
548 	if (state & DMACSR_BUSEXC) {
549 		/* bus exception bit was set */
550 		result |= 0x08;
551 	}
552 	switch (result) {
553 	case 0x00: /* !BUSEXC && !expecting && !SUPDATE && !ENABLE */
554 	case 0x08: /* BUSEXC && !expecting && !SUPDATE && !ENABLE */
555 		if (turbo) {
556 			volatile u_int *limit = (volatile u_int *)IIOV(0x2000050+0x4000);
557 			slimit = *limit;
558 		} else {
559 			slimit = nd_bsr4 (DD_SAVED_LIMIT);
560 		}
561 		break;
562 	case 0x01: /* !BUSEXC && !expecting && !SUPDATE && ENABLE */
563 	case 0x09: /* BUSEXC && !expecting && !SUPDATE && ENABLE */
564 		if (turbo) {
565 			volatile u_int *limit = (volatile u_int *)IIOV(0x2000050+0x4000);
566 			slimit = *limit;
567 		} else {
568 			slimit = nd_bsr4 (DD_SAVED_LIMIT);
569 		}
570 		break;
571 	case 0x02: /* !BUSEXC && !expecting && SUPDATE && !ENABLE */
572 	case 0x0a: /* BUSEXC && !expecting && SUPDATE && !ENABLE */
573 		slimit = nd_bsr4 (DD_NEXT);
574 		break;
575 	case 0x04:  /* !BUSEXC && expecting && !SUPDATE && !ENABLE */
576 	case 0x0c: /* BUSEXC && expecting && !SUPDATE && !ENABLE */
577 		slimit = nd_bsr4 (DD_LIMIT);
578 		break;
579 	default:
580 #ifdef DIAGNOSTIC
581 	{
582 		char sbuf[256];
583 		printf("DMA: please send this output to port-next68k-maintainer@netbsd.org:\n");
584 		bitmask_snprintf(state, DMACSR_BITS, sbuf, sizeof(sbuf));
585 		printf("DMA: state 0x%s\n",sbuf);
586 		nextdma_print(nsc);
587 		panic("DMA: condition 0x%02x not yet documented to occur",result);
588 	}
589 #endif
590 	slimit = olimit;
591 	break;
592 	}
593 
594 	if (!turbo && nsc->sc_chan->nd_intr == NEXT_I_ENETX_DMA) {
595 		slimit &= ~0x80000000;
596 		slimit -= 15;
597 	}
598 
599 #ifdef DIAGNOSTIC
600 	if ((state & DMACSR_READ))
601 		DPRINTF (("limits: 0x%08lx <= 0x%08lx <= 0x%08lx %s\n", onext, slimit, olimit,
602 			  (state & DMACSR_READ) ? "read" : "write"));
603 	if ((slimit < onext) || (slimit > olimit)) {
604 		char sbuf[256];
605 		bitmask_snprintf(state, DMACSR_BITS, sbuf, sizeof(sbuf));
606 		printf("DMA: state 0x%s\n",sbuf);
607 		nextdma_print(nsc);
608 		panic("DMA: Unexpected limit register (0x%08lx) in finish_xfer",slimit);
609 	}
610 #endif
611 
612 #ifdef DIAGNOSTIC
613 	if ((state & DMACSR_ENABLE) && ((stat->nd_idx+1) != stat->nd_map->dm_nsegs)) {
614 		if (slimit != olimit) {
615 			char sbuf[256];
616 			bitmask_snprintf(state, DMACSR_BITS, sbuf, sizeof(sbuf));
617 			printf("DMA: state 0x%s\n",sbuf);
618 			nextdma_print(nsc);
619 			panic("DMA: short limit register (0x%08lx) w/o finishing map.",slimit);
620 		}
621 	}
622 #endif
623 
624 #if (defined(ND_DEBUG))
625 	if (NEXTDMA_DEBUG > 2) nextdma_print(nsc);
626 #endif
627 
628 	stat->nd_map->dm_xfer_len += slimit-onext;
629 
630 	/* If we've reached the end of the current map, then inform
631 	 * that we've completed that map.
632 	 */
633 	if ((stat->nd_idx+1) == stat->nd_map->dm_nsegs) {
634 		if (nsc->sc_conf.nd_completed_cb)
635 			(*nsc->sc_conf.nd_completed_cb)
636 				(stat->nd_map, nsc->sc_conf.nd_cb_arg);
637 	} else {
638 		KASSERT(stat->nd_map == stat->nd_map_cont);
639 		KASSERT(stat->nd_idx+1 == stat->nd_idx_cont);
640 	}
641 	stat->nd_map = 0;
642 	stat->nd_idx = 0;
643 
644 #if (defined(ND_DEBUG))
645 	if (NEXTDMA_DEBUG) {
646 		char sbuf[256];
647 		bitmask_snprintf(state, DMACSR_BITS, sbuf, sizeof(sbuf));
648 		printf("CLNDMAP: dd->dd_csr          = 0x%s\n",   sbuf);
649 	}
650 #endif
651 	if (state & DMACSR_ENABLE) {
652 		u_long dmadir;		/* DMACSR_SETREAD or DMACSR_SETWRITE */
653 
654 		nextdma_rotate(nsc);
655 		nextdma_setup_cont_regs(nsc);
656 
657 		if (state & DMACSR_READ) {
658 			dmadir = DMACSR_SETREAD;
659 		} else {
660 			dmadir = DMACSR_SETWRITE;
661 		}
662 
663 		if (stat->nd_map_cont == NULL) {
664 			KASSERT(stat->nd_idx+1 == stat->nd_map->dm_nsegs);
665 			nd_bsw4 (DD_CSR, DMACSR_CLRCOMPLETE | dmadir);
666 			NDTRACEIF (*ndtracep++ = 'g');
667 		} else {
668 			nd_bsw4 (DD_CSR, DMACSR_CLRCOMPLETE | dmadir | DMACSR_SETSUPDATE);
669 			NDTRACEIF (*ndtracep++ = 'G');
670 		}
671 	} else {
672 		DPRINTF(("DMA: a shutdown occurred\n"));
673 		nd_bsw4 (DD_CSR, DMACSR_CLRCOMPLETE | DMACSR_RESET);
674 
675 		/* Cleanup more incomplete transfers */
676 		/* cleanup continue map */
677 		if (stat->nd_map_cont) {
678 			DPRINTF(("DMA: shutting down with non null continue map\n"));
679 			if (nsc->sc_conf.nd_completed_cb)
680 				(*nsc->sc_conf.nd_completed_cb)
681 					(stat->nd_map_cont, nsc->sc_conf.nd_cb_arg);
682 
683 			stat->nd_map_cont = 0;
684 			stat->nd_idx_cont = 0;
685 		}
686 		if (nsc->sc_conf.nd_shutdown_cb)
687 			(*nsc->sc_conf.nd_shutdown_cb)(nsc->sc_conf.nd_cb_arg);
688 	}
689 
690 #ifdef ND_DEBUG
691 	if (NEXTDMA_DEBUG) {
692 		char sbuf[256];
693 
694 		bitmask_snprintf(NEXT_I_BIT(nsc->sc_chan->nd_intr), NEXT_INTR_BITS,
695 				 sbuf, sizeof(sbuf));
696 		printf("DMA exiting interrupt ipl (%ld) intr(0x%s)\n",
697 		       NEXT_I_IPL(nsc->sc_chan->nd_intr), sbuf);
698 	}
699 #endif
700 
701 	return(1);
702 }
703 #endif
704 
705 /*
706  * Check to see if dma has finished for a channel */
707 int
708 nextdma_finished(nsc)
709 	struct nextdma_softc *nsc;
710 {
711 	int r;
712 	int s;
713 	struct nextdma_status *stat = &nsc->sc_stat;
714 
715 	s = spldma();
716 	r = (stat->nd_map == NULL) && (stat->nd_map_cont == NULL);
717 	splx(s);
718 
719 	return(r);
720 }
721 
722 void
723 nextdma_start(nsc, dmadir)
724 	struct nextdma_softc *nsc;
725 	u_long dmadir;		/* DMACSR_SETREAD or DMACSR_SETWRITE */
726 {
727 	struct nextdma_status *stat = &nsc->sc_stat;
728 
729 	NDTRACEIF (*ndtracep++ = 'n');
730 #ifdef DIAGNOSTIC
731 	if (!nextdma_finished(nsc)) {
732 		char sbuf[256];
733 
734 		bitmask_snprintf(NEXT_I_BIT(nsc->sc_chan->nd_intr), NEXT_INTR_BITS,
735 				 sbuf, sizeof(sbuf));
736 		panic("DMA trying to start before previous finished on intr(0x%s)", sbuf);
737 	}
738 #endif
739 
740 #ifdef ND_DEBUG
741 	if (NEXTDMA_DEBUG) {
742 		char sbuf[256];
743 
744 		bitmask_snprintf(NEXT_I_BIT(nsc->sc_chan->nd_intr), NEXT_INTR_BITS,
745 				 sbuf, sizeof(sbuf));
746 		printf("DMA start (%ld) intr(0x%s)\n",
747 		       NEXT_I_IPL(nsc->sc_chan->nd_intr), sbuf);
748 	}
749 #endif
750 
751 #ifdef DIAGNOSTIC
752 	if (stat->nd_map) {
753 		nextdma_print(nsc);
754 		panic("DMA: nextdma_start() with non null map");
755 	}
756 	if (stat->nd_map_cont) {
757 		nextdma_print(nsc);
758 		panic("DMA: nextdma_start() with non null continue map");
759 	}
760 #endif
761 
762 #ifdef DIAGNOSTIC
763 	if ((dmadir != DMACSR_SETREAD) && (dmadir != DMACSR_SETWRITE)) {
764 		panic("DMA: nextdma_start(), dmadir arg must be DMACSR_SETREAD or DMACSR_SETWRITE");
765 	}
766 #endif
767 
768 #if defined(ND_DEBUG)
769 	nextdma_debug_initstate(nsc);
770 #endif
771 
772 	/* preload both the current and the continue maps */
773 	nextdma_rotate(nsc);
774 
775 #ifdef DIAGNOSTIC
776 	if (!stat->nd_map_cont) {
777 		panic("No map available in nextdma_start()");
778 	}
779 #endif
780 
781 	nextdma_rotate(nsc);
782 
783 #ifdef ND_DEBUG
784 	if (NEXTDMA_DEBUG) {
785 		char sbuf[256];
786 
787 		bitmask_snprintf(NEXT_I_BIT(nsc->sc_chan->nd_intr), NEXT_INTR_BITS,
788 				 sbuf, sizeof(sbuf));
789 		printf("DMA initiating DMA %s of %d segments on intr(0x%s)\n",
790 		       (dmadir == DMACSR_SETREAD ? "read" : "write"), stat->nd_map->dm_nsegs, sbuf);
791 	}
792 #endif
793 
794 	nd_bsw4 (DD_CSR, (turbo ? DMACSR_INITBUFTURBO : DMACSR_INITBUF) |
795 		 DMACSR_RESET | dmadir);
796 	nd_bsw4 (DD_CSR, 0);
797 
798 	nextdma_setup_curr_regs(nsc);
799 	nextdma_setup_cont_regs(nsc);
800 
801 #if (defined(ND_DEBUG))
802 	if (NEXTDMA_DEBUG > 2) nextdma_print(nsc);
803 #endif
804 
805 	if (stat->nd_map_cont == NULL) {
806 		nd_bsw4 (DD_CSR, DMACSR_SETENABLE | dmadir);
807 	} else {
808 		nd_bsw4 (DD_CSR, DMACSR_SETSUPDATE | DMACSR_SETENABLE | dmadir);
809 	}
810 }
811 
812 /* This routine is used for debugging */
813 void
814 nextdma_print(nsc)
815 	struct nextdma_softc *nsc;
816 {
817 	u_long dd_csr;
818 	u_long dd_next;
819 	u_long dd_next_initbuf;
820 	u_long dd_limit;
821 	u_long dd_start;
822 	u_long dd_stop;
823 	u_long dd_saved_next;
824 	u_long dd_saved_limit;
825 	u_long dd_saved_start;
826 	u_long dd_saved_stop;
827 	char sbuf[256];
828 	struct nextdma_status *stat = &nsc->sc_stat;
829 
830 	/* Read all of the registers before we print anything out,
831 	 * in case something changes
832 	 */
833 	dd_csr          = nd_bsr4 (DD_CSR);
834 	dd_next         = nd_bsr4 (DD_NEXT);
835 	dd_next_initbuf = nd_bsr4 (DD_NEXT_INITBUF);
836 	dd_limit        = nd_bsr4 (DD_LIMIT);
837 	dd_start        = nd_bsr4 (DD_START);
838 	dd_stop         = nd_bsr4 (DD_STOP);
839 	dd_saved_next   = nd_bsr4 (DD_SAVED_NEXT);
840 	dd_saved_limit  = nd_bsr4 (DD_SAVED_LIMIT);
841 	dd_saved_start  = nd_bsr4 (DD_SAVED_START);
842 	dd_saved_stop   = nd_bsr4 (DD_SAVED_STOP);
843 
844 	bitmask_snprintf((*(volatile u_long *)IIOV(NEXT_P_INTRSTAT)),
845 			 NEXT_INTR_BITS, sbuf, sizeof(sbuf));
846 	printf("NDMAP: *intrstat = 0x%s\n", sbuf);
847 
848 	bitmask_snprintf((*(volatile u_long *)IIOV(NEXT_P_INTRMASK)),
849 			 NEXT_INTR_BITS, sbuf, sizeof(sbuf));
850 	printf("NDMAP: *intrmask = 0x%s\n", sbuf);
851 
852 	/* NDMAP is Next DMA Print (really!) */
853 
854 	if (stat->nd_map) {
855 		int i;
856 
857 		printf("NDMAP: nd_map->dm_mapsize = %ld\n",
858 		       stat->nd_map->dm_mapsize);
859 		printf("NDMAP: nd_map->dm_nsegs = %d\n",
860 		       stat->nd_map->dm_nsegs);
861 		printf("NDMAP: nd_map->dm_xfer_len = %ld\n",
862 		       stat->nd_map->dm_xfer_len);
863 		printf("NDMAP: nd_map->dm_segs[%d].ds_addr = 0x%08lx\n",
864 		       stat->nd_idx, stat->nd_map->dm_segs[stat->nd_idx].ds_addr);
865 		printf("NDMAP: nd_map->dm_segs[%d].ds_len = %ld\n",
866 		       stat->nd_idx, stat->nd_map->dm_segs[stat->nd_idx].ds_len);
867 
868 		printf("NDMAP: Entire map;\n");
869 		for(i=0;i<stat->nd_map->dm_nsegs;i++) {
870 			printf("NDMAP:   nd_map->dm_segs[%d].ds_addr = 0x%08lx\n",
871 			       i,stat->nd_map->dm_segs[i].ds_addr);
872 			printf("NDMAP:   nd_map->dm_segs[%d].ds_len = %ld\n",
873 			       i,stat->nd_map->dm_segs[i].ds_len);
874 		}
875 	} else {
876 		printf("NDMAP: nd_map = NULL\n");
877 	}
878 	if (stat->nd_map_cont) {
879 		printf("NDMAP: nd_map_cont->dm_mapsize = %ld\n",
880 		       stat->nd_map_cont->dm_mapsize);
881 		printf("NDMAP: nd_map_cont->dm_nsegs = %d\n",
882 		       stat->nd_map_cont->dm_nsegs);
883 		printf("NDMAP: nd_map_cont->dm_xfer_len = %ld\n",
884 		       stat->nd_map_cont->dm_xfer_len);
885 		printf("NDMAP: nd_map_cont->dm_segs[%d].ds_addr = 0x%08lx\n",
886 		       stat->nd_idx_cont,stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr);
887 		printf("NDMAP: nd_map_cont->dm_segs[%d].ds_len = %ld\n",
888 		       stat->nd_idx_cont,stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_len);
889 		if (stat->nd_map_cont != stat->nd_map) {
890 			int i;
891 			printf("NDMAP: Entire map;\n");
892 			for(i=0;i<stat->nd_map_cont->dm_nsegs;i++) {
893 				printf("NDMAP:   nd_map_cont->dm_segs[%d].ds_addr = 0x%08lx\n",
894 				       i,stat->nd_map_cont->dm_segs[i].ds_addr);
895 				printf("NDMAP:   nd_map_cont->dm_segs[%d].ds_len = %ld\n",
896 				       i,stat->nd_map_cont->dm_segs[i].ds_len);
897 			}
898 		}
899 	} else {
900 		printf("NDMAP: nd_map_cont = NULL\n");
901 	}
902 
903 	bitmask_snprintf(dd_csr, DMACSR_BITS, sbuf, sizeof(sbuf));
904 	printf("NDMAP: dd->dd_csr          = 0x%s\n",   sbuf);
905 
906 	printf("NDMAP: dd->dd_saved_next   = 0x%08lx\n", dd_saved_next);
907 	printf("NDMAP: dd->dd_saved_limit  = 0x%08lx\n", dd_saved_limit);
908 	printf("NDMAP: dd->dd_saved_start  = 0x%08lx\n", dd_saved_start);
909 	printf("NDMAP: dd->dd_saved_stop   = 0x%08lx\n", dd_saved_stop);
910 	printf("NDMAP: dd->dd_next         = 0x%08lx\n", dd_next);
911 	printf("NDMAP: dd->dd_next_initbuf = 0x%08lx\n", dd_next_initbuf);
912 	printf("NDMAP: dd->dd_limit        = 0x%08lx\n", dd_limit);
913 	printf("NDMAP: dd->dd_start        = 0x%08lx\n", dd_start);
914 	printf("NDMAP: dd->dd_stop         = 0x%08lx\n", dd_stop);
915 
916 	bitmask_snprintf(NEXT_I_BIT(nsc->sc_chan->nd_intr), NEXT_INTR_BITS,
917 			 sbuf, sizeof(sbuf));
918 	printf("NDMAP: interrupt ipl (%ld) intr(0x%s)\n",
919 			NEXT_I_IPL(nsc->sc_chan->nd_intr), sbuf);
920 }
921 
922 #if defined(ND_DEBUG)
923 void
924 nextdma_debug_initstate(struct nextdma_softc *nsc)
925 {
926 	switch(nsc->sc_chan->nd_intr) {
927 	case NEXT_I_ENETR_DMA:
928 		memset(nextdma_debug_enetr_state,0,sizeof(nextdma_debug_enetr_state));
929 		break;
930 	case NEXT_I_SCSI_DMA:
931 		memset(nextdma_debug_scsi_state,0,sizeof(nextdma_debug_scsi_state));
932 		break;
933 	}
934 }
935 
936 void
937 nextdma_debug_savestate(struct nextdma_softc *nsc, unsigned int state)
938 {
939 	switch(nsc->sc_chan->nd_intr) {
940 	case NEXT_I_ENETR_DMA:
941 		nextdma_debug_enetr_state[nextdma_debug_enetr_idx++] = state;
942 		nextdma_debug_enetr_idx %= (sizeof(nextdma_debug_enetr_state)/sizeof(unsigned int));
943 		break;
944 	case NEXT_I_SCSI_DMA:
945 		nextdma_debug_scsi_state[nextdma_debug_scsi_idx++] = state;
946 		nextdma_debug_scsi_idx %= (sizeof(nextdma_debug_scsi_state)/sizeof(unsigned int));
947 		break;
948 	}
949 }
950 
951 void
952 nextdma_debug_enetr_dumpstate(void)
953 {
954 	int i;
955 	int s;
956 	s = spldma();
957 	i = nextdma_debug_enetr_idx;
958 	do {
959 		char sbuf[256];
960 		if (nextdma_debug_enetr_state[i]) {
961 			bitmask_snprintf(nextdma_debug_enetr_state[i], DMACSR_BITS, sbuf, sizeof(sbuf));
962 			printf("DMA: 0x%02x state 0x%s\n",i,sbuf);
963 		}
964 		i++;
965 		i %= (sizeof(nextdma_debug_enetr_state)/sizeof(unsigned int));
966 	} while (i != nextdma_debug_enetr_idx);
967 	splx(s);
968 }
969 
970 void
971 nextdma_debug_scsi_dumpstate(void)
972 {
973 	int i;
974 	int s;
975 	s = spldma();
976 	i = nextdma_debug_scsi_idx;
977 	do {
978 		char sbuf[256];
979 		if (nextdma_debug_scsi_state[i]) {
980 			bitmask_snprintf(nextdma_debug_scsi_state[i], DMACSR_BITS, sbuf, sizeof(sbuf));
981 			printf("DMA: 0x%02x state 0x%s\n",i,sbuf);
982 		}
983 		i++;
984 		i %= (sizeof(nextdma_debug_scsi_state)/sizeof(unsigned int));
985 	} while (i != nextdma_debug_scsi_idx);
986 	splx(s);
987 }
988 #endif
989 
990