xref: /netbsd/sys/arch/next68k/dev/nextdma.c (revision bf9ec67e)
1 /*	$NetBSD: nextdma.c,v 1.29 2001/06/16 09:18:46 dbj Exp $	*/
2 /*
3  * Copyright (c) 1998 Darrin B. Jewell
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *      This product includes software developed by Darrin B. Jewell
17  * 4. The name of the author may not be used to endorse or promote products
18  *    derived from this software without specific prior written permission
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/mbuf.h>
35 #include <sys/syslog.h>
36 #include <sys/socket.h>
37 #include <sys/device.h>
38 #include <sys/malloc.h>
39 #include <sys/ioctl.h>
40 #include <sys/errno.h>
41 
42 #include <machine/autoconf.h>
43 #include <machine/cpu.h>
44 #include <machine/intr.h>
45 
46 #include <m68k/cacheops.h>
47 
48 #include <next68k/next68k/isr.h>
49 
50 #define _NEXT68K_BUS_DMA_PRIVATE
51 #include <machine/bus.h>
52 
53 #include "nextdmareg.h"
54 #include "nextdmavar.h"
55 
56 #if 1
57 #define ND_DEBUG
58 #endif
59 
60 #if defined(ND_DEBUG)
61 int nextdma_debug = 0;
62 #define DPRINTF(x) if (nextdma_debug) printf x;
63 #else
64 #define DPRINTF(x)
65 #endif
66 
67 #if defined(ND_DEBUG)
68 int nextdma_debug_enetr_idx = 0;
69 unsigned int nextdma_debug_enetr_state[100] = { 0 };
70 int nextdma_debug_scsi_idx = 0;
71 unsigned int nextdma_debug_scsi_state[100] = { 0 };
72 
73 void nextdma_debug_initstate(struct nextdma_config *nd);
74 void nextdma_debug_savestate(struct nextdma_config *nd, unsigned int state);
75 void nextdma_debug_scsi_dumpstate(void);
76 void nextdma_debug_enetr_dumpstate(void);
77 
78 void
79 nextdma_debug_initstate(struct nextdma_config *nd)
80 {
81 	switch(nd->nd_intr) {
82 	case NEXT_I_ENETR_DMA:
83 		memset(nextdma_debug_enetr_state,0,sizeof(nextdma_debug_enetr_state));
84 		break;
85 	case NEXT_I_SCSI_DMA:
86 		memset(nextdma_debug_scsi_state,0,sizeof(nextdma_debug_scsi_state));
87 		break;
88 	}
89 }
90 
91 void
92 nextdma_debug_savestate(struct nextdma_config *nd, unsigned int state)
93 {
94 	switch(nd->nd_intr) {
95 	case NEXT_I_ENETR_DMA:
96 		nextdma_debug_enetr_state[nextdma_debug_enetr_idx++] = state;
97 		nextdma_debug_enetr_idx %= (sizeof(nextdma_debug_enetr_state)/sizeof(unsigned int));
98 		break;
99 	case NEXT_I_SCSI_DMA:
100 		nextdma_debug_scsi_state[nextdma_debug_scsi_idx++] = state;
101 		nextdma_debug_scsi_idx %= (sizeof(nextdma_debug_scsi_state)/sizeof(unsigned int));
102 		break;
103 	}
104 }
105 
106 void
107 nextdma_debug_enetr_dumpstate(void)
108 {
109 	int i;
110 	int s;
111 	s = spldma();
112 	i = nextdma_debug_enetr_idx;
113 	do {
114 		char sbuf[256];
115 		if (nextdma_debug_enetr_state[i]) {
116 			bitmask_snprintf(nextdma_debug_enetr_state[i], DMACSR_BITS, sbuf, sizeof(sbuf));
117 			printf("DMA: 0x%02x state 0x%s\n",i,sbuf);
118 		}
119 		i++;
120 		i %= (sizeof(nextdma_debug_enetr_state)/sizeof(unsigned int));
121 	} while (i != nextdma_debug_enetr_idx);
122 	splx(s);
123 }
124 
125 void
126 nextdma_debug_scsi_dumpstate(void)
127 {
128 	int i;
129 	int s;
130 	s = spldma();
131 	i = nextdma_debug_scsi_idx;
132 	do {
133 		char sbuf[256];
134 		if (nextdma_debug_scsi_state[i]) {
135 			bitmask_snprintf(nextdma_debug_scsi_state[i], DMACSR_BITS, sbuf, sizeof(sbuf));
136 			printf("DMA: 0x%02x state 0x%s\n",i,sbuf);
137 		}
138 		i++;
139 		i %= (sizeof(nextdma_debug_scsi_state)/sizeof(unsigned int));
140 	} while (i != nextdma_debug_scsi_idx);
141 	splx(s);
142 }
143 #endif
144 
145 
146 void next_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
147                        bus_size_t, int));
148 int next_dma_continue __P((struct nextdma_config *));
149 void next_dma_rotate __P((struct nextdma_config *));
150 
151 void next_dma_setup_cont_regs __P((struct nextdma_config *));
152 void next_dma_setup_curr_regs __P((struct nextdma_config *));
153 
154 void
155 nextdma_config(nd)
156 	struct nextdma_config *nd;
157 {
158 	/* Initialize the dma_tag. As a hack, we currently
159 	 * put the dma tag in the structure itself.  It shouldn't be there.
160 	 */
161 
162 	{
163 		bus_dma_tag_t t;
164 		t = &nd->_nd_dmat;
165 		t->_cookie = nd;
166 		t->_dmamap_create = _bus_dmamap_create;
167 		t->_dmamap_destroy = _bus_dmamap_destroy;
168 		t->_dmamap_load = _bus_dmamap_load_direct;
169 		t->_dmamap_load_mbuf = _bus_dmamap_load_mbuf_direct;
170 		t->_dmamap_load_uio = _bus_dmamap_load_uio_direct;
171 		t->_dmamap_load_raw = _bus_dmamap_load_raw_direct;
172 		t->_dmamap_unload = _bus_dmamap_unload;
173 		t->_dmamap_sync = _bus_dmamap_sync;
174 
175 		t->_dmamem_alloc = _bus_dmamem_alloc;
176 		t->_dmamem_free = _bus_dmamem_free;
177 		t->_dmamem_map = _bus_dmamem_map;
178 		t->_dmamem_unmap = _bus_dmamem_unmap;
179 		t->_dmamem_mmap = _bus_dmamem_mmap;
180 
181 		nd->nd_dmat = t;
182 	}
183 
184 	nextdma_init(nd);
185 
186 	isrlink_autovec(nextdma_intr, nd, NEXT_I_IPL(nd->nd_intr), 10);
187 	INTR_ENABLE(nd->nd_intr);
188 }
189 
190 void
191 nextdma_init(nd)
192 	struct nextdma_config *nd;
193 {
194 #ifdef ND_DEBUG
195 	if (nextdma_debug) {
196 		char sbuf[256];
197 
198 		bitmask_snprintf(NEXT_I_BIT(nd->nd_intr), NEXT_INTR_BITS,
199 				 sbuf, sizeof(sbuf));
200 		printf("DMA init ipl (%ld) intr(0x%s)\n",
201 			NEXT_I_IPL(nd->nd_intr), sbuf);
202 	}
203 #endif
204 
205 	nd->_nd_map = NULL;
206 	nd->_nd_idx = 0;
207 	nd->_nd_map_cont = NULL;
208 	nd->_nd_idx_cont = 0;
209 
210 	bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR, 0);
211 	bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
212 			DMACSR_RESET | DMACSR_INITBUF);
213 
214 	next_dma_setup_curr_regs(nd);
215 	next_dma_setup_cont_regs(nd);
216 
217 #if defined(DIAGNOSTIC)
218 	{
219 		u_long state;
220 		state = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_CSR);
221 
222 #if 1
223 	/* mourning (a 25Mhz 68040 mono slab) appears to set BUSEXC
224 	 * milo (a 25Mhz 68040 mono cube) didn't have this problem
225 	 * Darrin B. Jewell <jewell@mit.edu>  Mon May 25 07:53:05 1998
226 	 */
227     state &= (DMACSR_COMPLETE | DMACSR_SUPDATE | DMACSR_ENABLE);
228 #else
229     state &= (DMACSR_BUSEXC | DMACSR_COMPLETE |
230               DMACSR_SUPDATE | DMACSR_ENABLE);
231 #endif
232 		if (state) {
233 			next_dma_print(nd);
234 			panic("DMA did not reset");
235 		}
236 	}
237 #endif
238 }
239 
240 
241 void
242 nextdma_reset(nd)
243 	struct nextdma_config *nd;
244 {
245 	int s;
246 	s = spldma();
247 
248 	DPRINTF(("DMA reset\n"));
249 
250 #if (defined(ND_DEBUG))
251 	if (nextdma_debug) next_dma_print(nd);
252 #endif
253 
254 	if ((nd->_nd_map) || (nd->_nd_map_cont)) {
255 		/* @@@ clean up dma maps */
256 		panic("DMA abort not implemented\n");
257 	}
258 
259 	nextdma_init(nd);
260 	splx(s);
261 }
262 
263 /****************************************************************/
264 
265 
266 /* Call the completed and continue callbacks to try to fill
267  * in the dma continue buffers.
268  */
269 void
270 next_dma_rotate(nd)
271 	struct nextdma_config *nd;
272 {
273 
274 	DPRINTF(("DMA next_dma_rotate()\n"));
275 
276 	/* Rotate the continue map into the current map */
277 	nd->_nd_map = nd->_nd_map_cont;
278 	nd->_nd_idx = nd->_nd_idx_cont;
279 
280 	if ((!nd->_nd_map_cont) ||
281 			((nd->_nd_map_cont) &&
282 					(++nd->_nd_idx_cont >= nd->_nd_map_cont->dm_nsegs))) {
283 		if (nd->nd_continue_cb) {
284 			nd->_nd_map_cont = (*nd->nd_continue_cb)(nd->nd_cb_arg);
285 			if (nd->_nd_map_cont) {
286 				nd->_nd_map_cont->dm_xfer_len = 0;
287 			}
288 		} else {
289 			nd->_nd_map_cont = 0;
290 		}
291 		nd->_nd_idx_cont = 0;
292 	}
293 
294 #if defined(DIAGNOSTIC) && 0
295 	if (nd->_nd_map_cont) {
296 		if (!DMA_BEGINALIGNED(nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr)) {
297 			next_dma_print(nd);
298 			panic("DMA request unaligned at start\n");
299 		}
300 		if (!DMA_ENDALIGNED(nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr +
301 				nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_len)) {
302 			next_dma_print(nd);
303 			panic("DMA request unaligned at end\n");
304 		}
305 	}
306 #endif
307 
308 }
309 
310 void
311 next_dma_setup_cont_regs(nd)
312 	struct nextdma_config *nd;
313 {
314 	bus_addr_t dd_start;
315 	bus_addr_t dd_stop;
316 	bus_addr_t dd_saved_start;
317 	bus_addr_t dd_saved_stop;
318 
319 	DPRINTF(("DMA next_dma_setup_regs()\n"));
320 
321 	if (nd->_nd_map_cont) {
322 		dd_start = nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr;
323 		dd_stop  = (nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr +
324 				nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_len);
325 
326 		if (nd->nd_intr == NEXT_I_ENETX_DMA) {
327 			dd_stop |= 0x80000000;		/* Ethernet transmit needs secret magic */
328 			dd_stop += 15;
329 		}
330 	} else {
331 		dd_start = 0xdeadbeef;
332 		dd_stop = 0xdeadbeef;
333 	}
334 
335 	dd_saved_start = dd_start;
336 	dd_saved_stop  = dd_stop;
337 
338 	bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_START, dd_start);
339 	bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_STOP, dd_stop);
340 	bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_START, dd_saved_start);
341 	bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_STOP, dd_saved_stop);
342 
343 #ifdef DIAGNOSTIC
344 	if (   (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_START) != dd_start)
345 			|| (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_STOP) != dd_stop)
346 			|| (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_START) != dd_saved_start)
347 			|| (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_STOP) != dd_saved_stop)
348 			) {
349 		next_dma_print(nd);
350 		panic("DMA failure writing to continue regs");
351 	}
352 #endif
353 }
354 
355 void
356 next_dma_setup_curr_regs(nd)
357 	struct nextdma_config *nd;
358 {
359 	bus_addr_t dd_next;
360 	bus_addr_t dd_limit;
361 	bus_addr_t dd_saved_next;
362 	bus_addr_t dd_saved_limit;
363 
364 	DPRINTF(("DMA next_dma_setup_curr_regs()\n"));
365 
366 
367 	if (nd->_nd_map) {
368 		dd_next = nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr;
369 		dd_limit = (nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr +
370 				nd->_nd_map->dm_segs[nd->_nd_idx].ds_len);
371 
372 		if (nd->nd_intr == NEXT_I_ENETX_DMA) {
373 			dd_limit |= 0x80000000; /* Ethernet transmit needs secret magic */
374 			dd_limit += 15;
375 		}
376 	} else {
377 		dd_next = 0xdeadbeef;
378 		dd_limit = 0xdeadbeef;
379 	}
380 
381 	dd_saved_next = dd_next;
382 	dd_saved_limit = dd_limit;
383 
384 	if (nd->nd_intr == NEXT_I_ENETX_DMA) {
385 		bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF, dd_next);
386 	} else {
387 		bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_NEXT, dd_next);
388 	}
389 	bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT, dd_limit);
390 	bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT, dd_saved_next);
391 	bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT, dd_saved_limit);
392 
393 #ifdef DIAGNOSTIC
394 	if (   (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF) != dd_next)
395 			|| (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT) != dd_next)
396 			|| (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT) != dd_limit)
397 			|| (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT) != dd_saved_next)
398 			|| (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT) != dd_saved_limit)
399 			) {
400 		next_dma_print(nd);
401 		panic("DMA failure writing to current regs");
402 	}
403 #endif
404 }
405 
406 
407 /* This routine is used for debugging */
408 
409 void
410 next_dma_print(nd)
411 	struct nextdma_config *nd;
412 {
413 	u_long dd_csr;
414 	u_long dd_next;
415 	u_long dd_next_initbuf;
416 	u_long dd_limit;
417 	u_long dd_start;
418 	u_long dd_stop;
419 	u_long dd_saved_next;
420 	u_long dd_saved_limit;
421 	u_long dd_saved_start;
422 	u_long dd_saved_stop;
423 	char sbuf[256];
424 
425 	/* Read all of the registers before we print anything out,
426 	 * in case something changes
427 	 */
428 	dd_csr          = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_CSR);
429 	dd_next         = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT);
430 	dd_next_initbuf = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF);
431 	dd_limit        = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT);
432 	dd_start        = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_START);
433 	dd_stop         = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_STOP);
434 	dd_saved_next   = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT);
435 	dd_saved_limit  = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT);
436 	dd_saved_start  = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_START);
437 	dd_saved_stop   = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_STOP);
438 
439 	bitmask_snprintf((*(volatile u_long *)IIOV(NEXT_P_INTRSTAT)),
440 			 NEXT_INTR_BITS, sbuf, sizeof(sbuf));
441 	printf("NDMAP: *intrstat = 0x%s\n", sbuf);
442 
443 	bitmask_snprintf((*(volatile u_long *)IIOV(NEXT_P_INTRMASK)),
444 			 NEXT_INTR_BITS, sbuf, sizeof(sbuf));
445 	printf("NDMAP: *intrmask = 0x%s\n", sbuf);
446 
447 	/* NDMAP is Next DMA Print (really!) */
448 
449 	if (nd->_nd_map) {
450 		printf("NDMAP: nd->_nd_map->dm_mapsize = %ld\n",
451 				nd->_nd_map->dm_mapsize);
452 		printf("NDMAP: nd->_nd_map->dm_nsegs = %d\n",
453 				nd->_nd_map->dm_nsegs);
454 		printf("NDMAP: nd->_nd_map->dm_xfer_len = %ld\n",
455 				nd->_nd_map->dm_xfer_len);
456 		printf("NDMAP: nd->_nd_map->dm_segs[%d].ds_addr = 0x%08lx\n",
457 				nd->_nd_idx,nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr);
458 		printf("NDMAP: nd->_nd_map->dm_segs[%d].ds_len = %ld\n",
459 				nd->_nd_idx,nd->_nd_map->dm_segs[nd->_nd_idx].ds_len);
460 		{
461 			int i;
462 			printf("NDMAP: Entire map;\n");
463 			for(i=0;i<nd->_nd_map->dm_nsegs;i++) {
464 				printf("NDMAP:   nd->_nd_map->dm_segs[%d].ds_addr = 0x%08lx\n",
465 						i,nd->_nd_map->dm_segs[i].ds_addr);
466 				printf("NDMAP:   nd->_nd_map->dm_segs[%d].ds_len = %ld\n",
467 						i,nd->_nd_map->dm_segs[i].ds_len);
468 			}
469 		}
470 	} else {
471 		printf("NDMAP: nd->_nd_map = NULL\n");
472 	}
473 	if (nd->_nd_map_cont) {
474 		printf("NDMAP: nd->_nd_map_cont->dm_mapsize = %ld\n",
475 				nd->_nd_map_cont->dm_mapsize);
476 		printf("NDMAP: nd->_nd_map_cont->dm_nsegs = %d\n",
477 				nd->_nd_map_cont->dm_nsegs);
478 		printf("NDMAP: nd->_nd_map_cont->dm_xfer_len = %ld\n",
479 				nd->_nd_map_cont->dm_xfer_len);
480 		printf("NDMAP: nd->_nd_map_cont->dm_segs[%d].ds_addr = 0x%08lx\n",
481 				nd->_nd_idx_cont,nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr);
482 		printf("NDMAP: nd->_nd_map_cont->dm_segs[%d].ds_len = %ld\n",
483 				nd->_nd_idx_cont,nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_len);
484 		if (nd->_nd_map_cont != nd->_nd_map) {
485 			int i;
486 			printf("NDMAP: Entire map;\n");
487 			for(i=0;i<nd->_nd_map_cont->dm_nsegs;i++) {
488 				printf("NDMAP:   nd->_nd_map_cont->dm_segs[%d].ds_addr = 0x%08lx\n",
489 						i,nd->_nd_map_cont->dm_segs[i].ds_addr);
490 				printf("NDMAP:   nd->_nd_map_cont->dm_segs[%d].ds_len = %ld\n",
491 						i,nd->_nd_map_cont->dm_segs[i].ds_len);
492 			}
493 		}
494 	} else {
495 		printf("NDMAP: nd->_nd_map_cont = NULL\n");
496 	}
497 
498 	bitmask_snprintf(dd_csr, DMACSR_BITS, sbuf, sizeof(sbuf));
499 	printf("NDMAP: dd->dd_csr          = 0x%s\n",   sbuf);
500 
501 	printf("NDMAP: dd->dd_saved_next   = 0x%08lx\n", dd_saved_next);
502 	printf("NDMAP: dd->dd_saved_limit  = 0x%08lx\n", dd_saved_limit);
503 	printf("NDMAP: dd->dd_saved_start  = 0x%08lx\n", dd_saved_start);
504 	printf("NDMAP: dd->dd_saved_stop   = 0x%08lx\n", dd_saved_stop);
505 	printf("NDMAP: dd->dd_next         = 0x%08lx\n", dd_next);
506 	printf("NDMAP: dd->dd_next_initbuf = 0x%08lx\n", dd_next_initbuf);
507 	printf("NDMAP: dd->dd_limit        = 0x%08lx\n", dd_limit);
508 	printf("NDMAP: dd->dd_start        = 0x%08lx\n", dd_start);
509 	printf("NDMAP: dd->dd_stop         = 0x%08lx\n", dd_stop);
510 
511 	bitmask_snprintf(NEXT_I_BIT(nd->nd_intr), NEXT_INTR_BITS,
512 			 sbuf, sizeof(sbuf));
513 	printf("NDMAP: interrupt ipl (%ld) intr(0x%s)\n",
514 			NEXT_I_IPL(nd->nd_intr), sbuf);
515 }
516 
517 /****************************************************************/
518 
519 int
520 nextdma_intr(arg)
521      void *arg;
522 {
523   /* @@@ This is bogus, we can't be certain of arg's type
524 	 * unless the interrupt is for us.  For now we successfully
525 	 * cheat because DMA interrupts are the only things invoked
526 	 * at this interrupt level.
527 	 */
528   struct nextdma_config *nd = arg;
529 
530   if (!INTR_OCCURRED(nd->nd_intr)) return 0;
531   /* Handle dma interrupts */
532 
533 #ifdef ND_DEBUG
534 	if (nextdma_debug) {
535 		char sbuf[256];
536 
537 		bitmask_snprintf(NEXT_I_BIT(nd->nd_intr), NEXT_INTR_BITS,
538 				 sbuf, sizeof(sbuf));
539 		printf("DMA interrupt ipl (%ld) intr(0x%s)\n",
540 			NEXT_I_IPL(nd->nd_intr), sbuf);
541 	}
542 #endif
543 
544 #ifdef DIAGNOSTIC
545 	if (!nd->_nd_map) {
546 		next_dma_print(nd);
547 		panic("DMA missing current map in interrupt!\n");
548 	}
549 #endif
550 
551   {
552     unsigned int state = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_CSR);
553 
554 #if defined(ND_DEBUG)
555 		nextdma_debug_savestate(nd,state);
556 #endif
557 
558 #ifdef DIAGNOSTIC
559 		if (!(state & DMACSR_COMPLETE)) {
560 			char sbuf[256];
561 			next_dma_print(nd);
562 			bitmask_snprintf(state, DMACSR_BITS, sbuf, sizeof(sbuf));
563 			printf("DMA: state 0x%s\n",sbuf);
564 			panic("DMA complete not set in interrupt\n");
565 		}
566 #endif
567 
568 		{
569 			bus_addr_t onext;
570 			bus_addr_t olimit;
571 			bus_addr_t slimit;
572 
573 			DPRINTF(("DMA: finishing xfer\n"));
574 
575 			onext = nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr;
576 			olimit = onext + nd->_nd_map->dm_segs[nd->_nd_idx].ds_len;
577 
578 			{
579 				int result = 0;
580 				if (state & DMACSR_ENABLE) {
581 					/* enable bit was set */
582 					result |= 0x01;
583 				}
584 				if (state & DMACSR_SUPDATE) {
585 					/* supdate bit was set */
586 					result |= 0x02;
587 				}
588 				if (nd->_nd_map_cont == NULL) {
589 					KASSERT(nd->_nd_idx+1 == nd->_nd_map->dm_nsegs);
590 					/* Expecting a shutdown, didn't SETSUPDATE last turn */
591 					result |= 0x04;
592 				}
593 				if (state & DMACSR_BUSEXC) {
594 					/* bus exception bit was set */
595 					result |= 0x08;
596 				}
597 				switch (result) {
598 				case 0x00: /* !BUSEXC && !expecting && !SUPDATE && !ENABLE */
599 				case 0x08: /* BUSEXC && !expecting && !SUPDATE && !ENABLE */
600 					if (nd->nd_intr == NEXT_I_SCSI_DMA) {
601 						slimit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT);
602 					} else {
603 						slimit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT);
604 					}
605 					break;
606 				case 0x01: /* !BUSEXC && !expecting && !SUPDATE && ENABLE */
607 				case 0x09: /* BUSEXC && !expecting && !SUPDATE && ENABLE */
608 					if (nd->nd_intr == NEXT_I_SCSI_DMA) {
609 						bus_addr_t snext;
610 						snext = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT);
611 						if (snext != onext) {
612 							slimit = olimit;
613 						} else {
614 							slimit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT);
615 						}
616 					} else {
617 						slimit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT);
618 					}
619 					break;
620 				case 0x02: /* !BUSEXC && !expecting && SUPDATE && !ENABLE */
621 				case 0x0a: /* BUSEXC && !expecting && SUPDATE && !ENABLE */
622 					slimit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT);
623 					break;
624 				case 0x04:  /* !BUSEXC && expecting && !SUPDATE && !ENABLE */
625 				case 0x0c: /* BUSEXC && expecting && !SUPDATE && !ENABLE */
626 					slimit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT);
627 					break;
628 				default:
629 #ifdef DIAGNOSTIC
630 					{
631 						char sbuf[256];
632 						printf("DMA: please send this output to port-next68k-maintainer@netbsd.org:\n");
633 						bitmask_snprintf(state, DMACSR_BITS, sbuf, sizeof(sbuf));
634 						printf("DMA: state 0x%s\n",sbuf);
635 						next_dma_print(nd);
636 						panic("DMA: condition 0x%02x not yet documented to occur\n",result);
637 					}
638 #endif
639 					slimit = olimit;
640 					break;
641 				}
642 			}
643 
644 			if (nd->nd_intr == NEXT_I_ENETX_DMA) {
645 				slimit &= ~0x80000000;
646 				slimit -= 15;
647 			}
648 
649 #ifdef DIAGNOSTIC
650 			if ((slimit < onext) || (slimit > olimit)) {
651 				char sbuf[256];
652 				bitmask_snprintf(state, DMACSR_BITS, sbuf, sizeof(sbuf));
653 				printf("DMA: state 0x%s\n",sbuf);
654 				next_dma_print(nd);
655 				panic("DMA: Unexpected limit register (0x%08lx) in finish_xfer\n",slimit);
656 			}
657 #endif
658 
659 #ifdef DIAGNOSTIC
660 			if ((state & DMACSR_ENABLE) && ((nd->_nd_idx+1) != nd->_nd_map->dm_nsegs)) {
661 				if (slimit != olimit) {
662 					char sbuf[256];
663 					bitmask_snprintf(state, DMACSR_BITS, sbuf, sizeof(sbuf));
664 					printf("DMA: state 0x%s\n",sbuf);
665 					next_dma_print(nd);
666 					panic("DMA: short limit register (0x%08lx) w/o finishing map.\n",slimit);
667 				}
668 			}
669 #endif
670 
671 #if (defined(ND_DEBUG))
672 			if (nextdma_debug > 2) next_dma_print(nd);
673 #endif
674 
675 			nd->_nd_map->dm_xfer_len += slimit-onext;
676 
677 			/* If we've reached the end of the current map, then inform
678 			 * that we've completed that map.
679 			 */
680 			if ((nd->_nd_idx+1) == nd->_nd_map->dm_nsegs) {
681 				if (nd->nd_completed_cb)
682 					(*nd->nd_completed_cb)(nd->_nd_map, nd->nd_cb_arg);
683 			} else {
684 				KASSERT(nd->_nd_map == nd->_nd_map_cont);
685 				KASSERT(nd->_nd_idx+1 == nd->_nd_idx_cont);
686 			}
687 			nd->_nd_map = 0;
688 			nd->_nd_idx = 0;
689 		}
690 
691 		if (state & DMACSR_ENABLE) {
692 
693 			next_dma_rotate(nd);
694 			next_dma_setup_cont_regs(nd);
695 
696 			{
697 				u_long dmadir;								/* 	DMACSR_SETREAD or DMACSR_SETWRITE */
698 
699 				if (state & DMACSR_READ) {
700 					dmadir = DMACSR_SETREAD;
701 				} else {
702 					dmadir = DMACSR_SETWRITE;
703 				}
704 
705 				if (nd->_nd_map_cont == NULL) {
706 					KASSERT(nd->_nd_idx+1 == nd->_nd_map->dm_nsegs);
707 					bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
708 							DMACSR_CLRCOMPLETE | dmadir);
709 				} else {
710 					bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
711 							DMACSR_CLRCOMPLETE | dmadir | DMACSR_SETSUPDATE);
712 				}
713 			}
714 
715 		} else {
716 
717 			DPRINTF(("DMA: a shutdown occurred\n"));
718 			bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR, DMACSR_CLRCOMPLETE | DMACSR_RESET);
719 
720 			/* Cleanup more incomplete transfers */
721 #if 1
722 			/* cleanup continue map */
723 			if (nd->_nd_map_cont) {
724 				DPRINTF(("DMA: shutting down with non null continue map\n"));
725 				if (nd->nd_completed_cb)
726 					(*nd->nd_completed_cb)(nd->_nd_map_cont, nd->nd_cb_arg);
727 
728 				nd->_nd_map_cont = 0;
729 				nd->_nd_idx_cont = 0;
730 			}
731 #else
732 			/* Do an automatic dma restart */
733 			if (nd->_nd_map_cont) {
734 				u_long dmadir;								/* 	DMACSR_SETREAD or DMACSR_SETWRITE */
735 
736 				next_dma_rotate(nd);
737 
738 				if (state & DMACSR_READ) {
739 					dmadir = DMACSR_SETREAD;
740 				} else {
741 					dmadir = DMACSR_SETWRITE;
742 				}
743 
744 				bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR, 0);
745 				bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
746 						DMACSR_INITBUF | DMACSR_RESET | dmadir);
747 
748 				next_dma_setup_curr_regs(nd);
749 				next_dma_setup_cont_regs(nd);
750 
751 				if (nd->_nd_map_cont == NULL) {
752 					KASSERT(nd->_nd_idx+1 == nd->_nd_map->dm_nsegs);
753 					bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
754 							DMACSR_SETENABLE | dmadir);
755 				} else {
756 					bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
757 							DMACSR_SETSUPDATE | DMACSR_SETENABLE | dmadir);
758 				}
759 				return 1;
760 			}
761 #endif
762 			if (nd->nd_shutdown_cb) (*nd->nd_shutdown_cb)(nd->nd_cb_arg);
763 		}
764 	}
765 
766 #ifdef ND_DEBUG
767 	if (nextdma_debug) {
768 		char sbuf[256];
769 
770 		bitmask_snprintf(NEXT_I_BIT(nd->nd_intr), NEXT_INTR_BITS,
771 				 sbuf, sizeof(sbuf));
772 		printf("DMA exiting interrupt ipl (%ld) intr(0x%s)\n",
773 			NEXT_I_IPL(nd->nd_intr), sbuf);
774 	}
775 #endif
776 
777   return(1);
778 }
779 
780 /*
781  * Check to see if dma has finished for a channel */
782 int
783 nextdma_finished(nd)
784 	struct nextdma_config *nd;
785 {
786 	int r;
787 	int s;
788 	s = spldma();									/* @@@ should this be splimp()? */
789 	r = (nd->_nd_map == NULL) && (nd->_nd_map_cont == NULL);
790 	splx(s);
791 	return(r);
792 }
793 
794 void
795 nextdma_start(nd, dmadir)
796 	struct nextdma_config *nd;
797 	u_long dmadir;								/* 	DMACSR_SETREAD or DMACSR_SETWRITE */
798 {
799 
800 #ifdef DIAGNOSTIC
801 	if (!nextdma_finished(nd)) {
802 		char sbuf[256];
803 
804 		bitmask_snprintf(NEXT_I_BIT(nd->nd_intr), NEXT_INTR_BITS,
805 				 sbuf, sizeof(sbuf));
806 		panic("DMA trying to start before previous finished on intr(0x%s)\n", sbuf);
807 	}
808 #endif
809 
810 #ifdef ND_DEBUG
811 	if (nextdma_debug) {
812 		char sbuf[256];
813 
814 		bitmask_snprintf(NEXT_I_BIT(nd->nd_intr), NEXT_INTR_BITS,
815 				 sbuf, sizeof(sbuf));
816 		printf("DMA start (%ld) intr(0x%s)\n",
817 			NEXT_I_IPL(nd->nd_intr), sbuf);
818 	}
819 #endif
820 
821 #ifdef DIAGNOSTIC
822 	if (nd->_nd_map) {
823 		next_dma_print(nd);
824 		panic("DMA: nextdma_start() with non null map\n");
825 	}
826 	if (nd->_nd_map_cont) {
827 		next_dma_print(nd);
828 		panic("DMA: nextdma_start() with non null continue map\n");
829 	}
830 #endif
831 
832 #ifdef DIAGNOSTIC
833 	if ((dmadir != DMACSR_SETREAD) && (dmadir != DMACSR_SETWRITE)) {
834 		panic("DMA: nextdma_start(), dmadir arg must be DMACSR_SETREAD or DMACSR_SETWRITE\n");
835 	}
836 #endif
837 
838 #if defined(ND_DEBUG)
839 	nextdma_debug_initstate(nd);
840 #endif
841 
842 	/* preload both the current and the continue maps */
843 	next_dma_rotate(nd);
844 
845 #ifdef DIAGNOSTIC
846 	if (!nd->_nd_map_cont) {
847 		panic("No map available in nextdma_start()");
848 	}
849 #endif
850 
851 	next_dma_rotate(nd);
852 
853 #ifdef ND_DEBUG
854 	if (nextdma_debug) {
855 		char sbuf[256];
856 
857 		bitmask_snprintf(NEXT_I_BIT(nd->nd_intr), NEXT_INTR_BITS,
858 				 sbuf, sizeof(sbuf));
859 		printf("DMA initiating DMA %s of %d segments on intr(0x%s)\n",
860 			(dmadir == DMACSR_SETREAD ? "read" : "write"), nd->_nd_map->dm_nsegs, sbuf);
861 	}
862 #endif
863 
864 	bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR, 0);
865 	bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
866 			DMACSR_INITBUF | DMACSR_RESET | dmadir);
867 
868 	next_dma_setup_curr_regs(nd);
869 	next_dma_setup_cont_regs(nd);
870 
871 #if (defined(ND_DEBUG))
872 	if (nextdma_debug > 2) next_dma_print(nd);
873 #endif
874 
875 	if (nd->_nd_map_cont == NULL) {
876 		bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
877 				DMACSR_SETENABLE | dmadir);
878 	} else {
879 		bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
880 				DMACSR_SETSUPDATE | DMACSR_SETENABLE | dmadir);
881 	}
882 }
883