xref: /netbsd/sys/arch/next68k/dev/if_xe.c (revision c4a72b64)
1 /*	$NetBSD: if_xe.c,v 1.11 2002/10/02 04:22:52 thorpej Exp $	*/
2 /*
3  * Copyright (c) 1998 Darrin B. Jewell
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *      This product includes software developed by Darrin B. Jewell
17  * 4. The name of the author may not be used to endorse or promote products
18  *    derived from this software without specific prior written permission
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include "opt_inet.h"
33 #include "bpfilter.h"
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/mbuf.h>
38 #include <sys/syslog.h>
39 #include <sys/socket.h>
40 #include <sys/device.h>
41 
42 #include <net/if.h>
43 #include <net/if_ether.h>
44 #include <net/if_media.h>
45 
46 #ifdef INET
47 #include <netinet/in.h>
48 #include <netinet/if_inarp.h>
49 #endif
50 
51 #include <machine/autoconf.h>
52 #include <machine/cpu.h>
53 #include <machine/intr.h>
54 #include <machine/bus.h>
55 
56 #include <next68k/next68k/isr.h>
57 
58 #include <next68k/dev/mb8795reg.h>
59 #include <next68k/dev/mb8795var.h>
60 
61 #include <next68k/dev/bmapreg.h>
62 #include <next68k/dev/intiovar.h>
63 #include <next68k/dev/nextdmareg.h>
64 #include <next68k/dev/nextdmavar.h>
65 
66 #include <next68k/dev/if_xevar.h>
67 #include <next68k/dev/if_xereg.h>
68 
69 #ifdef DEBUG
70 #define XE_DEBUG
71 #endif
72 
73 #ifdef XE_DEBUG
74 int xe_debug = 0;
75 #define DPRINTF(x) if (xe_debug) printf x;
76 extern char *ndtracep;
77 extern char ndtrace[];
78 extern int ndtraceshow;
79 #define NDTRACEIF(x) if (10 && ndtracep < (ndtrace + 8192)) do {x;} while (0)
80 #else
81 #define DPRINTF(x)
82 #define NDTRACEIF(x)
83 #endif
84 #define PRINTF(x) printf x;
85 
86 extern int turbo;
87 
88 int	xe_match __P((struct device *, struct cfdata *, void *));
89 void	xe_attach __P((struct device *, struct device *, void *));
90 int	xe_tint __P((void *));
91 int	xe_rint __P((void *));
92 
93 struct mbuf * xe_dma_rxmap_load __P((struct mb8795_softc *,
94 		bus_dmamap_t map));
95 
96 bus_dmamap_t xe_dma_rx_continue __P((void *));
97 void xe_dma_rx_completed __P((bus_dmamap_t,void *));
98 bus_dmamap_t xe_dma_tx_continue __P((void *));
99 void xe_dma_tx_completed __P((bus_dmamap_t,void *));
100 void xe_dma_rx_shutdown __P((void *));
101 void xe_dma_tx_shutdown __P((void *));
102 
103 static void	findchannel_defer __P((struct device *));
104 
105 CFATTACH_DECL(xe, sizeof(struct xe_softc),
106     xe_match, xe_attach, NULL, NULL);
107 
108 static int xe_dma_medias[] = {
109 	IFM_ETHER|IFM_AUTO,
110 	IFM_ETHER|IFM_10_T,
111 	IFM_ETHER|IFM_10_2,
112 };
113 static int nxe_dma_medias = (sizeof(xe_dma_medias)/sizeof(xe_dma_medias[0]));
114 
115 static int attached = 0;
116 
117 /*
118  * Functions and the switch for the MI code.
119  */
120 u_char		xe_read_reg __P((struct mb8795_softc *, int));
121 void		xe_write_reg __P((struct mb8795_softc *, int, u_char));
122 void		xe_dma_reset __P((struct mb8795_softc *));
123 void		xe_dma_rx_setup __P((struct mb8795_softc *));
124 void		xe_dma_rx_go __P((struct mb8795_softc *));
125 struct mbuf *	xe_dma_rx_mbuf __P((struct mb8795_softc *));
126 void		xe_dma_tx_setup __P((struct mb8795_softc *));
127 void		xe_dma_tx_go __P((struct mb8795_softc *));
128 int		xe_dma_tx_mbuf __P((struct mb8795_softc *, struct mbuf *));
129 int		xe_dma_tx_isactive __P((struct mb8795_softc *));
130 #if 0
131 int	xe_dma_setup __P((struct mb8795_softc *, caddr_t *,
132 	    size_t *, int, size_t *));
133 void	xe_dma_go __P((struct mb8795_softc *));
134 void	xe_dma_stop __P((struct mb8795_softc *));
135 int	xe_dma_isactive __P((struct mb8795_softc *));
136 #endif
137 
138 struct mb8795_glue xe_glue = {
139 	xe_read_reg,
140 	xe_write_reg,
141 	xe_dma_reset,
142 	xe_dma_rx_setup,
143 	xe_dma_rx_go,
144 	xe_dma_rx_mbuf,
145 	xe_dma_tx_setup,
146 	xe_dma_tx_go,
147 	xe_dma_tx_mbuf,
148 	xe_dma_tx_isactive,
149 #if 0
150 	xe_dma_setup,
151 	xe_dma_go,
152 	xe_dma_stop,
153 	xe_dma_isactive,
154 	NULL,			/* gl_clear_latched_intr */
155 #endif
156 };
157 
158 int
159 xe_match(parent, match, aux)
160 	struct device *parent;
161 	struct cfdata *match;
162 	void *aux;
163 {
164 	struct intio_attach_args *ia = (struct intio_attach_args *)aux;
165 
166 	if (attached)
167 		return (0);
168 
169 	ia->ia_addr = (void *)NEXT_P_ENET;
170 
171 	return (1);
172 }
173 
174 static void
175 findchannel_defer(self)
176 	struct device *self;
177 {
178 	struct xe_softc *xsc = (struct xe_softc *)self;
179 	struct mb8795_softc *sc = &xsc->sc_mb8795;
180 	int i, error;
181 
182 	if (!xsc->sc_txdma) {
183 		xsc->sc_txdma = nextdma_findchannel ("enetx");
184 		if (xsc->sc_txdma == NULL)
185 			panic ("%s: can't find enetx dma channel",
186 			       sc->sc_dev.dv_xname);
187 	}
188 	if (!xsc->sc_rxdma) {
189 		xsc->sc_rxdma = nextdma_findchannel ("enetr");
190 		if (xsc->sc_rxdma == NULL)
191 			panic ("%s: can't find enetr dma channel",
192 			       sc->sc_dev.dv_xname);
193 	}
194 	printf ("%s: using dma channels %s %s\n", sc->sc_dev.dv_xname,
195 		xsc->sc_txdma->sc_dev.dv_xname, xsc->sc_rxdma->sc_dev.dv_xname);
196 
197 	nextdma_setconf (xsc->sc_rxdma, continue_cb, xe_dma_rx_continue);
198 	nextdma_setconf (xsc->sc_rxdma, completed_cb, xe_dma_rx_completed);
199 	nextdma_setconf (xsc->sc_rxdma, shutdown_cb, xe_dma_rx_shutdown);
200 	nextdma_setconf (xsc->sc_rxdma, cb_arg, sc);
201 
202 	nextdma_setconf (xsc->sc_txdma, continue_cb, xe_dma_tx_continue);
203 	nextdma_setconf (xsc->sc_txdma, completed_cb, xe_dma_tx_completed);
204 	nextdma_setconf (xsc->sc_txdma, shutdown_cb, xe_dma_tx_shutdown);
205 	nextdma_setconf (xsc->sc_txdma, cb_arg, sc);
206 
207 	/* Initialize the dma maps */
208 	error = bus_dmamap_create(xsc->sc_txdma->sc_dmat, MCLBYTES,
209 				  (MCLBYTES/MSIZE), MCLBYTES, 0, BUS_DMA_ALLOCNOW,
210 				  &xsc->sc_tx_dmamap);
211 	if (error) {
212 		panic("%s: can't create tx DMA map, error = %d",
213 		      sc->sc_dev.dv_xname, error);
214 	}
215 
216 	for(i = 0; i < MB8795_NRXBUFS; i++) {
217 		error = bus_dmamap_create(xsc->sc_rxdma->sc_dmat, MCLBYTES,
218 					  (MCLBYTES/MSIZE), MCLBYTES, 0, BUS_DMA_ALLOCNOW,
219 					  &xsc->sc_rx_dmamap[i]);
220 		if (error) {
221 			panic("%s: can't create rx DMA map, error = %d",
222 			      sc->sc_dev.dv_xname, error);
223 		}
224 		xsc->sc_rx_mb_head[i] = NULL;
225 	}
226 	xsc->sc_rx_loaded_idx = 0;
227 	xsc->sc_rx_completed_idx = 0;
228 	xsc->sc_rx_handled_idx = 0;
229 
230 	/* @@@ more next hacks
231 	 * the  2000 covers at least a 1500 mtu + headers
232 	 * + DMA_BEGINALIGNMENT+ DMA_ENDALIGNMENT
233 	 */
234 	xsc->sc_txbuf = malloc(2000, M_DEVBUF, M_NOWAIT);
235 	if (!xsc->sc_txbuf)
236 		panic("%s: can't malloc tx DMA buffer", sc->sc_dev.dv_xname);
237 
238 	xsc->sc_tx_mb_head = NULL;
239 	xsc->sc_tx_loaded = 0;
240 
241 	mb8795_config(sc, xe_dma_medias, nxe_dma_medias, xe_dma_medias[0]);
242 
243 	isrlink_autovec(xe_tint, sc, NEXT_I_IPL(NEXT_I_ENETX), 1, NULL);
244 	INTR_ENABLE(NEXT_I_ENETX);
245 	isrlink_autovec(xe_rint, sc, NEXT_I_IPL(NEXT_I_ENETR), 1, NULL);
246 	INTR_ENABLE(NEXT_I_ENETR);
247 }
248 
249 void
250 xe_attach(parent, self, aux)
251 	struct device *parent, *self;
252 	void *aux;
253 {
254 	struct intio_attach_args *ia = (struct intio_attach_args *)aux;
255 	struct xe_softc *xsc = (struct xe_softc *)self;
256 	struct mb8795_softc *sc = &xsc->sc_mb8795;
257 
258 	DPRINTF(("%s: xe_attach()\n",sc->sc_dev.dv_xname));
259 
260 	{
261 		extern u_char rom_enetaddr[6];     /* kludge from machdep.c:next68k_bootargs() */
262 		int i;
263 		for(i=0;i<6;i++) {
264 			sc->sc_enaddr[i] = rom_enetaddr[i];
265 		}
266 	}
267 
268 	printf("\n%s: MAC address %02x:%02x:%02x:%02x:%02x:%02x\n",
269 	       sc->sc_dev.dv_xname,
270 	       sc->sc_enaddr[0],sc->sc_enaddr[1],sc->sc_enaddr[2],
271 	       sc->sc_enaddr[3],sc->sc_enaddr[4],sc->sc_enaddr[5]);
272 
273 	xsc->sc_bst = ia->ia_bst;
274 	if (bus_space_map(xsc->sc_bst, NEXT_P_ENET,
275 			  XE_DEVICE_SIZE, 0, &xsc->sc_bsh)) {
276 		panic("\n%s: can't map mb8795 registers",
277 		      sc->sc_dev.dv_xname);
278 	}
279 
280 	sc->sc_bmap_bst = ia->ia_bst;
281 	if (bus_space_map(sc->sc_bmap_bst, NEXT_P_BMAP,
282 			  BMAP_SIZE, 0, &sc->sc_bmap_bsh)) {
283 		panic("\n%s: can't map bmap registers",
284 		      sc->sc_dev.dv_xname);
285 	}
286 
287 	/*
288 	 * Set up glue for MI code.
289 	 */
290 	sc->sc_glue = &xe_glue;
291 
292 	xsc->sc_txdma = nextdma_findchannel ("enetx");
293 	xsc->sc_rxdma = nextdma_findchannel ("enetr");
294 	if (xsc->sc_rxdma && xsc->sc_txdma) {
295 		findchannel_defer (self);
296 	} else {
297 		config_defer (self, findchannel_defer);
298 	}
299 
300 	attached = 1;
301 }
302 
303 int
304 xe_tint(arg)
305 	void *arg;
306 {
307 	if (!INTR_OCCURRED(NEXT_I_ENETX))
308 		return 0;
309 	mb8795_tint((struct mb8795_softc *)arg);
310 	return(1);
311 }
312 
313 int
314 xe_rint(arg)
315 	void *arg;
316 {
317 	if (!INTR_OCCURRED(NEXT_I_ENETR))
318 		return(0);
319 	mb8795_rint((struct mb8795_softc *)arg);
320 	return(1);
321 }
322 
323 /*
324  * Glue functions.
325  */
326 
327 u_char
328 xe_read_reg(sc, reg)
329 	struct mb8795_softc *sc;
330 	int reg;
331 {
332 	struct xe_softc *xsc = (struct xe_softc *)sc;
333 
334 	return(bus_space_read_1(xsc->sc_bst, xsc->sc_bsh, reg));
335 }
336 
337 void
338 xe_write_reg(sc, reg, val)
339 	struct mb8795_softc *sc;
340 	int reg;
341 	u_char val;
342 {
343 	struct xe_softc *xsc = (struct xe_softc *)sc;
344 
345 	bus_space_write_1(xsc->sc_bst, xsc->sc_bsh, reg, val);
346 }
347 
348 void
349 xe_dma_reset(sc)
350 	struct mb8795_softc *sc;
351 {
352 	struct xe_softc *xsc = (struct xe_softc *)sc;
353 	int i;
354 
355 	DPRINTF(("xe dma reset\n"));
356 
357 	nextdma_reset(xsc->sc_rxdma);
358 	nextdma_reset(xsc->sc_txdma);
359 
360 	if (xsc->sc_tx_loaded) {
361 		bus_dmamap_sync(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap,
362 				0, xsc->sc_tx_dmamap->dm_mapsize,
363 				BUS_DMASYNC_POSTWRITE);
364 		bus_dmamap_unload(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap);
365 		xsc->sc_tx_loaded = 0;
366 	}
367 	if (xsc->sc_tx_mb_head) {
368 		m_freem(xsc->sc_tx_mb_head);
369 		xsc->sc_tx_mb_head = NULL;
370 	}
371 
372 	for(i = 0; i < MB8795_NRXBUFS; i++) {
373 		if (xsc->sc_rx_mb_head[i]) {
374 			bus_dmamap_unload(xsc->sc_rxdma->sc_dmat, xsc->sc_rx_dmamap[i]);
375 			m_freem(xsc->sc_rx_mb_head[i]);
376 			xsc->sc_rx_mb_head[i] = NULL;
377 		}
378 	}
379 }
380 
381 void
382 xe_dma_rx_setup (sc)
383 	struct mb8795_softc *sc;
384 {
385 	struct xe_softc *xsc = (struct xe_softc *)sc;
386 	int i;
387 
388 	DPRINTF(("xe dma rx setup\n"));
389 
390 	for(i = 0; i < MB8795_NRXBUFS; i++) {
391 		xsc->sc_rx_mb_head[i] =
392 			xe_dma_rxmap_load(sc, xsc->sc_rx_dmamap[i]);
393 	}
394 	xsc->sc_rx_loaded_idx = 0;
395 	xsc->sc_rx_completed_idx = 0;
396 	xsc->sc_rx_handled_idx = 0;
397 
398 	nextdma_init(xsc->sc_rxdma);
399 }
400 
401 void
402 xe_dma_rx_go (sc)
403 	struct mb8795_softc *sc;
404 {
405 	struct xe_softc *xsc = (struct xe_softc *)sc;
406 
407 	DPRINTF(("xe dma rx go\n"));
408 
409 	nextdma_start(xsc->sc_rxdma, DMACSR_SETREAD);
410 }
411 
412 struct mbuf *
413 xe_dma_rx_mbuf (sc)
414 	struct mb8795_softc *sc;
415 {
416 	struct xe_softc *xsc = (struct xe_softc *)sc;
417 	bus_dmamap_t map;
418 	struct mbuf *m;
419 
420 	m = NULL;
421 	if (xsc->sc_rx_handled_idx != xsc->sc_rx_completed_idx) {
422 		xsc->sc_rx_handled_idx++;
423 		xsc->sc_rx_handled_idx %= MB8795_NRXBUFS;
424 
425 		map = xsc->sc_rx_dmamap[xsc->sc_rx_handled_idx];
426 		m = xsc->sc_rx_mb_head[xsc->sc_rx_handled_idx];
427 
428 		m->m_len = map->dm_xfer_len;
429 
430 		bus_dmamap_sync(xsc->sc_rxdma->sc_dmat, map,
431 				0, map->dm_mapsize, BUS_DMASYNC_POSTREAD);
432 
433 		bus_dmamap_unload(xsc->sc_rxdma->sc_dmat, map);
434 
435 		/* Install a fresh mbuf for next packet */
436 
437 		xsc->sc_rx_mb_head[xsc->sc_rx_handled_idx] =
438 			xe_dma_rxmap_load(sc,map);
439 
440 		/* Punt runt packets
441 		 * dma restarts create 0 length packets for example
442 		 */
443 		if (m->m_len < ETHER_MIN_LEN) {
444 			m_freem(m);
445 			m = NULL;
446 		}
447 	}
448 	return (m);
449 }
450 
451 void
452 xe_dma_tx_setup (sc)
453 	struct mb8795_softc *sc;
454 {
455 	struct xe_softc *xsc = (struct xe_softc *)sc;
456 
457 	DPRINTF(("xe dma tx setup\n"));
458 
459 	nextdma_init(xsc->sc_txdma);
460 }
461 
462 void
463 xe_dma_tx_go (sc)
464 	struct mb8795_softc *sc;
465 {
466 	struct xe_softc *xsc = (struct xe_softc *)sc;
467 
468 	DPRINTF(("xe dma tx go\n"));
469 
470 	nextdma_start(xsc->sc_txdma, DMACSR_SETWRITE);
471 }
472 
473 int
474 xe_dma_tx_mbuf (sc, m)
475 	struct mb8795_softc *sc;
476 	struct mbuf *m;
477 {
478 	struct xe_softc *xsc = (struct xe_softc *)sc;
479 	int error;
480 
481 	xsc->sc_tx_mb_head = m;
482 
483 /* The following is a next specific hack that should
484  * probably be moved out of MI code.
485  * This macro assumes it can move forward as needed
486  * in the buffer.  Perhaps it should zero the extra buffer.
487  */
488 #define REALIGN_DMABUF(s,l) \
489 	{ (s) = ((u_char *)(((unsigned)(s)+DMA_BEGINALIGNMENT-1) \
490 			&~(DMA_BEGINALIGNMENT-1))); \
491     (l) = ((u_char *)(((unsigned)((s)+(l))+DMA_ENDALIGNMENT-1) \
492 				&~(DMA_ENDALIGNMENT-1)))-(s);}
493 
494 #if 0
495 	error = bus_dmamap_load_mbuf(xsc->sc_txdma->sc_dmat,
496 				     xsc->sc_tx_dmamap, xsc->sc_tx_mb_head, BUS_DMA_NOWAIT);
497 #else
498 	{
499 		u_char *buf = xsc->sc_txbuf;
500 		int buflen = 0;
501 
502 		buflen = m->m_pkthdr.len;
503 
504 		/* Fix runt packets,  @@@ memory overrun */
505 		if (buflen < ETHERMIN+sizeof(struct ether_header)) {
506 			buflen = ETHERMIN+sizeof(struct ether_header);
507 		}
508 
509 		{
510 			u_char *p = buf;
511 			for (m=xsc->sc_tx_mb_head; m; m = m->m_next) {
512 				if (m->m_len == 0) continue;
513 				bcopy(mtod(m, u_char *), p, m->m_len);
514 				p += m->m_len;
515 			}
516 		}
517 
518 		error = bus_dmamap_load(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap,
519 					buf,buflen,NULL,BUS_DMA_NOWAIT);
520 	}
521 #endif
522 	if (error) {
523 		printf("%s: can't load mbuf chain, error = %d\n",
524 		       sc->sc_dev.dv_xname, error);
525 		m_freem(xsc->sc_tx_mb_head);
526 		xsc->sc_tx_mb_head = NULL;
527 		return (error);
528 	}
529 
530 #ifdef DIAGNOSTIC
531 	if (xsc->sc_tx_loaded != 0) {
532 		panic("%s: xsc->sc_tx_loaded is %d",sc->sc_dev.dv_xname,
533 		      xsc->sc_tx_loaded);
534 	}
535 #endif
536 
537 	bus_dmamap_sync(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap, 0,
538 			xsc->sc_tx_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
539 
540 	return (0);
541 }
542 
543 int
544 xe_dma_tx_isactive (sc)
545 	struct mb8795_softc *sc;
546 {
547 	struct xe_softc *xsc = (struct xe_softc *)sc;
548 
549 	return (xsc->sc_tx_loaded != 0);
550 }
551 
552 /****************************************************************/
553 
554 void
555 xe_dma_tx_completed(map, arg)
556 	bus_dmamap_t map;
557 	void *arg;
558 {
559 	struct mb8795_softc *sc = arg;
560 	struct xe_softc *xsc = (struct xe_softc *)sc;
561 
562 	DPRINTF(("%s: xe_dma_tx_completed()\n",sc->sc_dev.dv_xname));
563 
564 #ifdef DIAGNOSTIC
565 	if (!xsc->sc_tx_loaded) {
566 		panic("%s: tx completed never loaded",sc->sc_dev.dv_xname);
567 	}
568 	if (map != xsc->sc_tx_dmamap) {
569 		panic("%s: unexpected tx completed map",sc->sc_dev.dv_xname);
570 	}
571 
572 #endif
573 }
574 
575 void
576 xe_dma_tx_shutdown(arg)
577 	void *arg;
578 {
579 	struct mb8795_softc *sc = arg;
580 	struct xe_softc *xsc = (struct xe_softc *)sc;
581 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
582 
583 	DPRINTF(("%s: xe_dma_tx_shutdown()\n",sc->sc_dev.dv_xname));
584 
585 #ifdef DIAGNOSTIC
586 	if (!xsc->sc_tx_loaded) {
587 		panic("%s: tx shutdown never loaded",sc->sc_dev.dv_xname);
588 	}
589 #endif
590 
591 	if (turbo)
592 		MB_WRITE_REG(sc, MB8795_TXMODE, MB8795_TXMODE_TURBO1);
593 	if (xsc->sc_tx_loaded) {
594 		bus_dmamap_sync(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap,
595 				0, xsc->sc_tx_dmamap->dm_mapsize,
596 				BUS_DMASYNC_POSTWRITE);
597 		bus_dmamap_unload(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap);
598 		m_freem(xsc->sc_tx_mb_head);
599 		xsc->sc_tx_mb_head = NULL;
600 
601 		xsc->sc_tx_loaded--;
602 	}
603 
604 #ifdef DIAGNOSTIC
605 	if (xsc->sc_tx_loaded != 0) {
606 		panic("%s: sc->sc_tx_loaded is %d",sc->sc_dev.dv_xname,
607 		      xsc->sc_tx_loaded);
608 	}
609 #endif
610 
611 	ifp->if_timer = 0;
612 
613 #if 1
614 	if ((ifp->if_flags & IFF_RUNNING) && !IF_IS_EMPTY(&sc->sc_tx_snd)) {
615 		void mb8795_start_dma __P((struct mb8795_softc *)); /* XXXX */
616 		mb8795_start_dma(sc);
617 	}
618 #endif
619 
620 #if 0
621 	/* Enable ready interrupt */
622 	MB_WRITE_REG(sc, MB8795_TXMASK,
623 		     MB_READ_REG(sc, MB8795_TXMASK)
624 		     | MB8795_TXMASK_TXRXIE/* READYIE */);
625 #endif
626 }
627 
628 
629 void
630 xe_dma_rx_completed(map, arg)
631 	bus_dmamap_t map;
632 	void *arg;
633 {
634 	struct mb8795_softc *sc = arg;
635 	struct xe_softc *xsc = (struct xe_softc *)sc;
636 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
637 
638 	if (ifp->if_flags & IFF_RUNNING) {
639 		xsc->sc_rx_completed_idx++;
640 		xsc->sc_rx_completed_idx %= MB8795_NRXBUFS;
641 
642 		DPRINTF(("%s: xe_dma_rx_completed(), sc->sc_rx_completed_idx = %d\n",
643 			 sc->sc_dev.dv_xname, xsc->sc_rx_completed_idx));
644 
645 #if (defined(DIAGNOSTIC))
646 		if (map != xsc->sc_rx_dmamap[xsc->sc_rx_completed_idx]) {
647 			panic("%s: Unexpected rx dmamap completed",
648 			      sc->sc_dev.dv_xname);
649 		}
650 #endif
651 	}
652 #ifdef DIAGNOSTIC
653 	else
654 		DPRINTF(("%s: Unexpected rx dmamap completed while if not running\n",
655 			 sc->sc_dev.dv_xname));
656 #endif
657 }
658 
659 void
660 xe_dma_rx_shutdown(arg)
661 	void *arg;
662 {
663 	struct mb8795_softc *sc = arg;
664 	struct xe_softc *xsc = (struct xe_softc *)sc;
665 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
666 
667 	if (ifp->if_flags & IFF_RUNNING) {
668 		DPRINTF(("%s: xe_dma_rx_shutdown(), restarting.\n",
669 			 sc->sc_dev.dv_xname));
670 
671 		nextdma_start(xsc->sc_rxdma, DMACSR_SETREAD);
672 		if (turbo)
673 			MB_WRITE_REG(sc, MB8795_RXMODE, MB8795_RXMODE_TEST | MB8795_RXMODE_MULTICAST);
674 	}
675 #ifdef DIAGNOSTIC
676 	else
677 		DPRINTF(("%s: Unexpected rx dma shutdown while if not running\n",
678 			 sc->sc_dev.dv_xname));
679 #endif
680 }
681 
682 /*
683  * load a dmamap with a freshly allocated mbuf
684  */
685 struct mbuf *
686 xe_dma_rxmap_load(sc,map)
687 	struct mb8795_softc *sc;
688 	bus_dmamap_t map;
689 {
690 	struct xe_softc *xsc = (struct xe_softc *)sc;
691 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
692 	struct mbuf *m;
693 	int error;
694 
695 	MGETHDR(m, M_DONTWAIT, MT_DATA);
696 	if (m) {
697 		MCLGET(m, M_DONTWAIT);
698 		if ((m->m_flags & M_EXT) == 0) {
699 			m_freem(m);
700 			m = NULL;
701 		} else {
702 			m->m_len = MCLBYTES;
703 		}
704 	}
705 	if (!m) {
706 		/* @@@ Handle this gracefully by reusing a scratch buffer
707 		 * or something.
708 		 */
709 		panic("Unable to get memory for incoming ethernet");
710 	}
711 
712 	/* Align buffer, @@@ next specific.
713 	 * perhaps should be using M_ALIGN here instead?
714 	 * First we give us a little room to align with.
715 	 */
716 	{
717 		u_char *buf = m->m_data;
718 		int buflen = m->m_len;
719 		buflen -= DMA_ENDALIGNMENT+DMA_BEGINALIGNMENT;
720 		REALIGN_DMABUF(buf, buflen);
721 		m->m_data = buf;
722 		m->m_len = buflen;
723 	}
724 
725 	m->m_pkthdr.rcvif = ifp;
726 	m->m_pkthdr.len = m->m_len;
727 
728 	error = bus_dmamap_load_mbuf(xsc->sc_rxdma->sc_dmat,
729 			map, m, BUS_DMA_NOWAIT);
730 
731 	bus_dmamap_sync(xsc->sc_rxdma->sc_dmat, map, 0,
732 			map->dm_mapsize, BUS_DMASYNC_PREREAD);
733 
734 	if (error) {
735 		DPRINTF(("DEBUG: m->m_data = %p, m->m_len = %d\n",
736 				m->m_data, m->m_len));
737 		DPRINTF(("DEBUG: MCLBYTES = %d, map->_dm_size = %ld\n",
738 				MCLBYTES, map->_dm_size));
739 
740 		panic("%s: can't load rx mbuf chain, error = %d",
741 				sc->sc_dev.dv_xname, error);
742 		m_freem(m);
743 		m = NULL;
744 	}
745 
746 	return(m);
747 }
748 
749 bus_dmamap_t
750 xe_dma_rx_continue(arg)
751 	void *arg;
752 {
753 	struct mb8795_softc *sc = arg;
754 	struct xe_softc *xsc = (struct xe_softc *)sc;
755 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
756 	bus_dmamap_t map = NULL;
757 
758 	if (ifp->if_flags & IFF_RUNNING) {
759 		if (((xsc->sc_rx_loaded_idx+1)%MB8795_NRXBUFS) == xsc->sc_rx_handled_idx) {
760 			/* make space for one packet by dropping one */
761 			struct mbuf *m;
762 			m = xe_dma_rx_mbuf (sc);
763 			if (m)
764 				m_freem(m);
765 #if (defined(DIAGNOSTIC))
766 			DPRINTF(("%s: out of receive DMA buffers\n",sc->sc_dev.dv_xname));
767 #endif
768 		}
769 		xsc->sc_rx_loaded_idx++;
770 		xsc->sc_rx_loaded_idx %= MB8795_NRXBUFS;
771 		map = xsc->sc_rx_dmamap[xsc->sc_rx_loaded_idx];
772 
773 		DPRINTF(("%s: xe_dma_rx_continue() xsc->sc_rx_loaded_idx = %d\nn",
774 			 sc->sc_dev.dv_xname,xsc->sc_rx_loaded_idx));
775 	}
776 #ifdef DIAGNOSTIC
777 	else
778 		panic("%s: Unexpected rx dma continue while if not running",
779 		      sc->sc_dev.dv_xname);
780 #endif
781 
782 	return(map);
783 }
784 
785 bus_dmamap_t
786 xe_dma_tx_continue(arg)
787 	void *arg;
788 {
789 	struct mb8795_softc *sc = arg;
790 	struct xe_softc *xsc = (struct xe_softc *)sc;
791 	bus_dmamap_t map;
792 
793 	DPRINTF(("%s: xe_dma_tx_continue()\n",sc->sc_dev.dv_xname));
794 
795 	if (xsc->sc_tx_loaded) {
796 		map = NULL;
797 	} else {
798 		map = xsc->sc_tx_dmamap;
799 		xsc->sc_tx_loaded++;
800 	}
801 
802 #ifdef DIAGNOSTIC
803 	if (xsc->sc_tx_loaded != 1) {
804 		panic("%s: sc->sc_tx_loaded is %d",sc->sc_dev.dv_xname,
805 				xsc->sc_tx_loaded);
806 	}
807 #endif
808 
809 	return(map);
810 }
811