xref: /netbsd/sys/arch/next68k/dev/if_xe.c (revision 6550d01e)
1 /*	$NetBSD: if_xe.c,v 1.21 2010/04/24 19:58:13 dbj Exp $	*/
2 /*
3  * Copyright (c) 1998 Darrin B. Jewell
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __KERNEL_RCSID(0, "$NetBSD: if_xe.c,v 1.21 2010/04/24 19:58:13 dbj Exp $");
29 
30 #include "opt_inet.h"
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/mbuf.h>
35 #include <sys/syslog.h>
36 #include <sys/socket.h>
37 #include <sys/device.h>
38 
39 #include <net/if.h>
40 #include <net/if_ether.h>
41 #include <net/if_media.h>
42 
43 #ifdef INET
44 #include <netinet/in.h>
45 #include <netinet/if_inarp.h>
46 #endif
47 
48 #include <machine/autoconf.h>
49 #include <machine/cpu.h>
50 #include <machine/intr.h>
51 #include <machine/bus.h>
52 
53 #include <next68k/next68k/isr.h>
54 
55 #include <next68k/dev/mb8795reg.h>
56 #include <next68k/dev/mb8795var.h>
57 
58 #include <next68k/dev/bmapreg.h>
59 #include <next68k/dev/intiovar.h>
60 #include <next68k/dev/nextdmareg.h>
61 #include <next68k/dev/nextdmavar.h>
62 
63 #include <next68k/dev/if_xevar.h>
64 #include <next68k/dev/if_xereg.h>
65 
66 #ifdef DEBUG
67 #define XE_DEBUG
68 #endif
69 
70 #ifdef XE_DEBUG
71 int xe_debug = 0;
72 #define DPRINTF(x) if (xe_debug) printf x;
73 extern char *ndtracep;
74 extern char ndtrace[];
75 extern int ndtraceshow;
76 #define NDTRACEIF(x) if (10 && ndtracep < (ndtrace + 8192)) do {x;} while (0)
77 #else
78 #define DPRINTF(x)
79 #define NDTRACEIF(x)
80 #endif
81 #define PRINTF(x) printf x;
82 
83 extern int turbo;
84 
85 int	xe_match(struct device *, struct cfdata *, void *);
86 void	xe_attach(struct device *, struct device *, void *);
87 int	xe_tint(void *);
88 int	xe_rint(void *);
89 
90 struct mbuf * xe_dma_rxmap_load(struct mb8795_softc *, bus_dmamap_t);
91 
92 bus_dmamap_t xe_dma_rx_continue(void *);
93 void xe_dma_rx_completed(bus_dmamap_t, void *);
94 bus_dmamap_t xe_dma_tx_continue(void *);
95 void xe_dma_tx_completed(bus_dmamap_t, void *);
96 void xe_dma_rx_shutdown(void *);
97 void xe_dma_tx_shutdown(void *);
98 
99 static void	findchannel_defer(struct device *);
100 
101 CFATTACH_DECL(xe, sizeof(struct xe_softc),
102     xe_match, xe_attach, NULL, NULL);
103 
104 static int xe_dma_medias[] = {
105 	IFM_ETHER|IFM_AUTO,
106 	IFM_ETHER|IFM_10_T,
107 	IFM_ETHER|IFM_10_2,
108 };
109 static int nxe_dma_medias = (sizeof(xe_dma_medias)/sizeof(xe_dma_medias[0]));
110 
111 static int attached = 0;
112 
113 /*
114  * Functions and the switch for the MI code.
115  */
116 u_char		xe_read_reg(struct mb8795_softc *, int);
117 void		xe_write_reg(struct mb8795_softc *, int, u_char);
118 void		xe_dma_reset(struct mb8795_softc *);
119 void		xe_dma_rx_setup(struct mb8795_softc *);
120 void		xe_dma_rx_go(struct mb8795_softc *);
121 struct mbuf *	xe_dma_rx_mbuf(struct mb8795_softc *);
122 void		xe_dma_tx_setup(struct mb8795_softc *);
123 void		xe_dma_tx_go(struct mb8795_softc *);
124 int		xe_dma_tx_mbuf(struct mb8795_softc *, struct mbuf *);
125 int		xe_dma_tx_isactive(struct mb8795_softc *);
126 
127 struct mb8795_glue xe_glue = {
128 	xe_read_reg,
129 	xe_write_reg,
130 	xe_dma_reset,
131 	xe_dma_rx_setup,
132 	xe_dma_rx_go,
133 	xe_dma_rx_mbuf,
134 	xe_dma_tx_setup,
135 	xe_dma_tx_go,
136 	xe_dma_tx_mbuf,
137 	xe_dma_tx_isactive,
138 };
139 
140 int
141 xe_match(struct device *parent, struct cfdata *match, void *aux)
142 {
143 	struct intio_attach_args *ia = (struct intio_attach_args *)aux;
144 
145 	if (attached)
146 		return (0);
147 
148 	ia->ia_addr = (void *)NEXT_P_ENET;
149 
150 	return (1);
151 }
152 
153 static void
154 findchannel_defer(struct device *self)
155 {
156 	struct xe_softc *xsc = (struct xe_softc *)self;
157 	struct mb8795_softc *sc = &xsc->sc_mb8795;
158 	int i, error;
159 
160 	if (!xsc->sc_txdma) {
161 		xsc->sc_txdma = nextdma_findchannel ("enetx");
162 		if (xsc->sc_txdma == NULL)
163 			panic ("%s: can't find enetx DMA channel",
164 			       sc->sc_dev.dv_xname);
165 	}
166 	if (!xsc->sc_rxdma) {
167 		xsc->sc_rxdma = nextdma_findchannel ("enetr");
168 		if (xsc->sc_rxdma == NULL)
169 			panic ("%s: can't find enetr DMA channel",
170 			       sc->sc_dev.dv_xname);
171 	}
172 	printf ("%s: using DMA channels %s %s\n", sc->sc_dev.dv_xname,
173 		xsc->sc_txdma->sc_dev.dv_xname, xsc->sc_rxdma->sc_dev.dv_xname);
174 
175 	nextdma_setconf (xsc->sc_rxdma, continue_cb, xe_dma_rx_continue);
176 	nextdma_setconf (xsc->sc_rxdma, completed_cb, xe_dma_rx_completed);
177 	nextdma_setconf (xsc->sc_rxdma, shutdown_cb, xe_dma_rx_shutdown);
178 	nextdma_setconf (xsc->sc_rxdma, cb_arg, sc);
179 
180 	nextdma_setconf (xsc->sc_txdma, continue_cb, xe_dma_tx_continue);
181 	nextdma_setconf (xsc->sc_txdma, completed_cb, xe_dma_tx_completed);
182 	nextdma_setconf (xsc->sc_txdma, shutdown_cb, xe_dma_tx_shutdown);
183 	nextdma_setconf (xsc->sc_txdma, cb_arg, sc);
184 
185 	/* Initialize the DMA maps */
186 	error = bus_dmamap_create(xsc->sc_txdma->sc_dmat, MCLBYTES,
187 				  (MCLBYTES/MSIZE), MCLBYTES, 0, BUS_DMA_ALLOCNOW,
188 				  &xsc->sc_tx_dmamap);
189 	if (error) {
190 		panic("%s: can't create tx DMA map, error = %d",
191 		      sc->sc_dev.dv_xname, error);
192 	}
193 
194 	for(i = 0; i < MB8795_NRXBUFS; i++) {
195 		error = bus_dmamap_create(xsc->sc_rxdma->sc_dmat, MCLBYTES,
196 					  (MCLBYTES/MSIZE), MCLBYTES, 0, BUS_DMA_ALLOCNOW,
197 					  &xsc->sc_rx_dmamap[i]);
198 		if (error) {
199 			panic("%s: can't create rx DMA map, error = %d",
200 			      sc->sc_dev.dv_xname, error);
201 		}
202 		xsc->sc_rx_mb_head[i] = NULL;
203 	}
204 	xsc->sc_rx_loaded_idx = 0;
205 	xsc->sc_rx_completed_idx = 0;
206 	xsc->sc_rx_handled_idx = 0;
207 
208 	/* @@@ more next hacks
209 	 * the  2000 covers at least a 1500 mtu + headers
210 	 * + DMA_BEGINALIGNMENT+ DMA_ENDALIGNMENT
211 	 */
212 	xsc->sc_txbuf = malloc(2000, M_DEVBUF, M_NOWAIT);
213 	if (!xsc->sc_txbuf)
214 		panic("%s: can't malloc tx DMA buffer", sc->sc_dev.dv_xname);
215 
216 	xsc->sc_tx_mb_head = NULL;
217 	xsc->sc_tx_loaded = 0;
218 
219 	mb8795_config(sc, xe_dma_medias, nxe_dma_medias, xe_dma_medias[0]);
220 
221 	isrlink_autovec(xe_tint, sc, NEXT_I_IPL(NEXT_I_ENETX), 1, NULL);
222 	INTR_ENABLE(NEXT_I_ENETX);
223 	isrlink_autovec(xe_rint, sc, NEXT_I_IPL(NEXT_I_ENETR), 1, NULL);
224 	INTR_ENABLE(NEXT_I_ENETR);
225 }
226 
227 void
228 xe_attach(struct device *parent, struct device *self, void *aux)
229 {
230 	struct intio_attach_args *ia = (struct intio_attach_args *)aux;
231 	struct xe_softc *xsc = (struct xe_softc *)self;
232 	struct mb8795_softc *sc = &xsc->sc_mb8795;
233 
234 	DPRINTF(("%s: xe_attach()\n",sc->sc_dev.dv_xname));
235 
236 	{
237 		extern u_char rom_enetaddr[6];     /* kludge from machdep.c:next68k_bootargs() */
238 		int i;
239 		for(i=0;i<6;i++) {
240 			sc->sc_enaddr[i] = rom_enetaddr[i];
241 		}
242 	}
243 
244 	printf("\n%s: MAC address %02x:%02x:%02x:%02x:%02x:%02x\n",
245 	       sc->sc_dev.dv_xname,
246 	       sc->sc_enaddr[0],sc->sc_enaddr[1],sc->sc_enaddr[2],
247 	       sc->sc_enaddr[3],sc->sc_enaddr[4],sc->sc_enaddr[5]);
248 
249 	xsc->sc_bst = ia->ia_bst;
250 	if (bus_space_map(xsc->sc_bst, NEXT_P_ENET,
251 			  XE_DEVICE_SIZE, 0, &xsc->sc_bsh)) {
252 		panic("\n%s: can't map mb8795 registers",
253 		      sc->sc_dev.dv_xname);
254 	}
255 
256 	sc->sc_bmap_bst = ia->ia_bst;
257 	if (bus_space_map(sc->sc_bmap_bst, NEXT_P_BMAP,
258 			  BMAP_SIZE, 0, &sc->sc_bmap_bsh)) {
259 		panic("\n%s: can't map bmap registers",
260 		      sc->sc_dev.dv_xname);
261 	}
262 
263 	/*
264 	 * Set up glue for MI code.
265 	 */
266 	sc->sc_glue = &xe_glue;
267 
268 	xsc->sc_txdma = nextdma_findchannel ("enetx");
269 	xsc->sc_rxdma = nextdma_findchannel ("enetr");
270 	if (xsc->sc_rxdma && xsc->sc_txdma) {
271 		findchannel_defer (self);
272 	} else {
273 		config_defer (self, findchannel_defer);
274 	}
275 
276 	attached = 1;
277 }
278 
279 int
280 xe_tint(void *arg)
281 {
282 	if (!INTR_OCCURRED(NEXT_I_ENETX))
283 		return 0;
284 	mb8795_tint((struct mb8795_softc *)arg);
285 	return(1);
286 }
287 
288 int
289 xe_rint(void *arg)
290 {
291 	if (!INTR_OCCURRED(NEXT_I_ENETR))
292 		return(0);
293 	mb8795_rint((struct mb8795_softc *)arg);
294 	return(1);
295 }
296 
297 /*
298  * Glue functions.
299  */
300 
301 u_char
302 xe_read_reg(struct mb8795_softc *sc, int reg)
303 {
304 	struct xe_softc *xsc = (struct xe_softc *)sc;
305 
306 	return(bus_space_read_1(xsc->sc_bst, xsc->sc_bsh, reg));
307 }
308 
309 void
310 xe_write_reg(struct mb8795_softc *sc, int reg, u_char val)
311 {
312 	struct xe_softc *xsc = (struct xe_softc *)sc;
313 
314 	bus_space_write_1(xsc->sc_bst, xsc->sc_bsh, reg, val);
315 }
316 
317 void
318 xe_dma_reset(struct mb8795_softc *sc)
319 {
320 	struct xe_softc *xsc = (struct xe_softc *)sc;
321 	int i;
322 
323 	DPRINTF(("xe DMA reset\n"));
324 
325 	nextdma_reset(xsc->sc_rxdma);
326 	nextdma_reset(xsc->sc_txdma);
327 
328 	if (xsc->sc_tx_loaded) {
329 		bus_dmamap_sync(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap,
330 				0, xsc->sc_tx_dmamap->dm_mapsize,
331 				BUS_DMASYNC_POSTWRITE);
332 		bus_dmamap_unload(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap);
333 		xsc->sc_tx_loaded = 0;
334 	}
335 	if (xsc->sc_tx_mb_head) {
336 		m_freem(xsc->sc_tx_mb_head);
337 		xsc->sc_tx_mb_head = NULL;
338 	}
339 
340 	for(i = 0; i < MB8795_NRXBUFS; i++) {
341 		if (xsc->sc_rx_mb_head[i]) {
342 			bus_dmamap_unload(xsc->sc_rxdma->sc_dmat, xsc->sc_rx_dmamap[i]);
343 			m_freem(xsc->sc_rx_mb_head[i]);
344 			xsc->sc_rx_mb_head[i] = NULL;
345 		}
346 	}
347 }
348 
349 void
350 xe_dma_rx_setup(struct mb8795_softc *sc)
351 {
352 	struct xe_softc *xsc = (struct xe_softc *)sc;
353 	int i;
354 
355 	DPRINTF(("xe DMA rx setup\n"));
356 
357 	for(i = 0; i < MB8795_NRXBUFS; i++) {
358 		xsc->sc_rx_mb_head[i] =
359 			xe_dma_rxmap_load(sc, xsc->sc_rx_dmamap[i]);
360 	}
361 	xsc->sc_rx_loaded_idx = 0;
362 	xsc->sc_rx_completed_idx = 0;
363 	xsc->sc_rx_handled_idx = 0;
364 
365 	nextdma_init(xsc->sc_rxdma);
366 }
367 
368 void
369 xe_dma_rx_go(struct mb8795_softc *sc)
370 {
371 	struct xe_softc *xsc = (struct xe_softc *)sc;
372 
373 	DPRINTF(("xe DMA rx go\n"));
374 
375 	nextdma_start(xsc->sc_rxdma, DMACSR_SETREAD);
376 }
377 
378 struct mbuf *
379 xe_dma_rx_mbuf(struct mb8795_softc *sc)
380 {
381 	struct xe_softc *xsc = (struct xe_softc *)sc;
382 	bus_dmamap_t map;
383 	struct mbuf *m;
384 
385 	m = NULL;
386 	if (xsc->sc_rx_handled_idx != xsc->sc_rx_completed_idx) {
387 		xsc->sc_rx_handled_idx++;
388 		xsc->sc_rx_handled_idx %= MB8795_NRXBUFS;
389 
390 		map = xsc->sc_rx_dmamap[xsc->sc_rx_handled_idx];
391 		m = xsc->sc_rx_mb_head[xsc->sc_rx_handled_idx];
392 
393 		m->m_len = map->dm_xfer_len;
394 
395 		bus_dmamap_sync(xsc->sc_rxdma->sc_dmat, map,
396 				0, map->dm_mapsize, BUS_DMASYNC_POSTREAD);
397 
398 		bus_dmamap_unload(xsc->sc_rxdma->sc_dmat, map);
399 
400 		/* Install a fresh mbuf for next packet */
401 
402 		xsc->sc_rx_mb_head[xsc->sc_rx_handled_idx] =
403 			xe_dma_rxmap_load(sc,map);
404 
405 		/* Punt runt packets
406 		 * DMA restarts create 0 length packets for example
407 		 */
408 		if (m->m_len < ETHER_MIN_LEN) {
409 			m_freem(m);
410 			m = NULL;
411 		}
412 	}
413 	return (m);
414 }
415 
416 void
417 xe_dma_tx_setup(struct mb8795_softc *sc)
418 {
419 	struct xe_softc *xsc = (struct xe_softc *)sc;
420 
421 	DPRINTF(("xe DMA tx setup\n"));
422 
423 	nextdma_init(xsc->sc_txdma);
424 }
425 
426 void
427 xe_dma_tx_go(struct mb8795_softc *sc)
428 {
429 	struct xe_softc *xsc = (struct xe_softc *)sc;
430 
431 	DPRINTF(("xe DMA tx go\n"));
432 
433 	nextdma_start(xsc->sc_txdma, DMACSR_SETWRITE);
434 }
435 
436 int
437 xe_dma_tx_mbuf(struct mb8795_softc *sc, struct mbuf *m)
438 {
439 	struct xe_softc *xsc = (struct xe_softc *)sc;
440 	int error;
441 
442 	xsc->sc_tx_mb_head = m;
443 
444 /* The following is a next specific hack that should
445  * probably be moved out of MI code.
446  * This macro assumes it can move forward as needed
447  * in the buffer.  Perhaps it should zero the extra buffer.
448  */
449 #define REALIGN_DMABUF(s,l) \
450 	{ (s) = ((u_char *)(((unsigned)(s)+DMA_BEGINALIGNMENT-1) \
451 			&~(DMA_BEGINALIGNMENT-1))); \
452     (l) = ((u_char *)(((unsigned)((s)+(l))+DMA_ENDALIGNMENT-1) \
453 				&~(DMA_ENDALIGNMENT-1)))-(s);}
454 
455 #if 0
456 	error = bus_dmamap_load_mbuf(xsc->sc_txdma->sc_dmat,
457 				     xsc->sc_tx_dmamap, xsc->sc_tx_mb_head, BUS_DMA_NOWAIT);
458 #else
459 	{
460 		u_char *buf = xsc->sc_txbuf;
461 		int buflen = 0;
462 
463 		buflen = m->m_pkthdr.len;
464 
465 		{
466 			u_char *p = buf;
467 			for (m=xsc->sc_tx_mb_head; m; m = m->m_next) {
468 				if (m->m_len == 0) continue;
469 				memcpy(p, mtod(m, u_char *), m->m_len);
470 				p += m->m_len;
471 			}
472 			/* Fix runt packets */
473 			if (buflen < ETHER_MIN_LEN - ETHER_CRC_LEN) {
474 				memset(p, 0,
475 				    ETHER_MIN_LEN - ETHER_CRC_LEN - buflen);
476 				buflen = ETHER_MIN_LEN - ETHER_CRC_LEN;
477 			}
478 		}
479 
480 		error = bus_dmamap_load(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap,
481 					buf,buflen,NULL,BUS_DMA_NOWAIT);
482 	}
483 #endif
484 	if (error) {
485 		printf("%s: can't load mbuf chain, error = %d\n",
486 		       sc->sc_dev.dv_xname, error);
487 		m_freem(xsc->sc_tx_mb_head);
488 		xsc->sc_tx_mb_head = NULL;
489 		return (error);
490 	}
491 
492 #ifdef DIAGNOSTIC
493 	if (xsc->sc_tx_loaded != 0) {
494 		panic("%s: xsc->sc_tx_loaded is %d",sc->sc_dev.dv_xname,
495 		      xsc->sc_tx_loaded);
496 	}
497 #endif
498 
499 	bus_dmamap_sync(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap, 0,
500 			xsc->sc_tx_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
501 
502 	return (0);
503 }
504 
505 int
506 xe_dma_tx_isactive(struct mb8795_softc *sc)
507 {
508 	struct xe_softc *xsc = (struct xe_softc *)sc;
509 
510 	return (xsc->sc_tx_loaded != 0);
511 }
512 
513 /****************************************************************/
514 
515 void
516 xe_dma_tx_completed(bus_dmamap_t map, void *arg)
517 {
518 #if defined (XE_DEBUG) || defined (DIAGNOSTIC)
519 	struct mb8795_softc *sc = arg;
520 #endif
521 #ifdef DIAGNOSTIC
522 	struct xe_softc *xsc = (struct xe_softc *)sc;
523 #endif
524 
525 	DPRINTF(("%s: xe_dma_tx_completed()\n",sc->sc_dev.dv_xname));
526 
527 #ifdef DIAGNOSTIC
528 	if (!xsc->sc_tx_loaded) {
529 		panic("%s: tx completed never loaded",sc->sc_dev.dv_xname);
530 	}
531 	if (map != xsc->sc_tx_dmamap) {
532 		panic("%s: unexpected tx completed map",sc->sc_dev.dv_xname);
533 	}
534 
535 #endif
536 }
537 
538 void
539 xe_dma_tx_shutdown(void *arg)
540 {
541 	struct mb8795_softc *sc = arg;
542 	struct xe_softc *xsc = (struct xe_softc *)sc;
543 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
544 
545 	DPRINTF(("%s: xe_dma_tx_shutdown()\n",sc->sc_dev.dv_xname));
546 
547 #ifdef DIAGNOSTIC
548 	if (!xsc->sc_tx_loaded) {
549 		panic("%s: tx shutdown never loaded",sc->sc_dev.dv_xname);
550 	}
551 #endif
552 
553 	if (turbo)
554 		MB_WRITE_REG(sc, MB8795_TXMODE, MB8795_TXMODE_TURBO1);
555 	if (xsc->sc_tx_loaded) {
556 		bus_dmamap_sync(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap,
557 				0, xsc->sc_tx_dmamap->dm_mapsize,
558 				BUS_DMASYNC_POSTWRITE);
559 		bus_dmamap_unload(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap);
560 		m_freem(xsc->sc_tx_mb_head);
561 		xsc->sc_tx_mb_head = NULL;
562 
563 		xsc->sc_tx_loaded--;
564 	}
565 
566 #ifdef DIAGNOSTIC
567 	if (xsc->sc_tx_loaded != 0) {
568 		panic("%s: sc->sc_tx_loaded is %d",sc->sc_dev.dv_xname,
569 		      xsc->sc_tx_loaded);
570 	}
571 #endif
572 
573 	ifp->if_timer = 0;
574 
575 #if 1
576 	if ((ifp->if_flags & IFF_RUNNING) && !IF_IS_EMPTY(&sc->sc_tx_snd)) {
577 		void mb8795_start_dma(struct mb8795_softc *); /* XXXX */
578 		mb8795_start_dma(sc);
579 	}
580 #endif
581 
582 #if 0
583 	/* Enable ready interrupt */
584 	MB_WRITE_REG(sc, MB8795_TXMASK,
585 		     MB_READ_REG(sc, MB8795_TXMASK)
586 		     | MB8795_TXMASK_TXRXIE/* READYIE */);
587 #endif
588 }
589 
590 
591 void
592 xe_dma_rx_completed(bus_dmamap_t map, void *arg)
593 {
594 	struct mb8795_softc *sc = arg;
595 	struct xe_softc *xsc = (struct xe_softc *)sc;
596 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
597 
598 	if (ifp->if_flags & IFF_RUNNING) {
599 		xsc->sc_rx_completed_idx++;
600 		xsc->sc_rx_completed_idx %= MB8795_NRXBUFS;
601 
602 		DPRINTF(("%s: xe_dma_rx_completed(), sc->sc_rx_completed_idx = %d\n",
603 			 sc->sc_dev.dv_xname, xsc->sc_rx_completed_idx));
604 
605 #if (defined(DIAGNOSTIC))
606 		if (map != xsc->sc_rx_dmamap[xsc->sc_rx_completed_idx]) {
607 			panic("%s: Unexpected rx dmamap completed",
608 			      sc->sc_dev.dv_xname);
609 		}
610 #endif
611 	}
612 #ifdef DIAGNOSTIC
613 	else
614 		DPRINTF(("%s: Unexpected rx dmamap completed while if not running\n",
615 			 sc->sc_dev.dv_xname));
616 #endif
617 }
618 
619 void
620 xe_dma_rx_shutdown(void *arg)
621 {
622 	struct mb8795_softc *sc = arg;
623 	struct xe_softc *xsc = (struct xe_softc *)sc;
624 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
625 
626 	if (ifp->if_flags & IFF_RUNNING) {
627 		DPRINTF(("%s: xe_dma_rx_shutdown(), restarting.\n",
628 			 sc->sc_dev.dv_xname));
629 
630 		nextdma_start(xsc->sc_rxdma, DMACSR_SETREAD);
631 		if (turbo)
632 			MB_WRITE_REG(sc, MB8795_RXMODE, MB8795_RXMODE_TEST | MB8795_RXMODE_MULTICAST);
633 	}
634 #ifdef DIAGNOSTIC
635 	else
636 		DPRINTF(("%s: Unexpected rx DMA shutdown while if not running\n",
637 			 sc->sc_dev.dv_xname));
638 #endif
639 }
640 
641 /*
642  * load a dmamap with a freshly allocated mbuf
643  */
644 struct mbuf *
645 xe_dma_rxmap_load(struct mb8795_softc *sc, bus_dmamap_t map)
646 {
647 	struct xe_softc *xsc = (struct xe_softc *)sc;
648 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
649 	struct mbuf *m;
650 	int error;
651 
652 	MGETHDR(m, M_DONTWAIT, MT_DATA);
653 	if (m) {
654 		MCLGET(m, M_DONTWAIT);
655 		if ((m->m_flags & M_EXT) == 0) {
656 			m_freem(m);
657 			m = NULL;
658 		} else {
659 			m->m_len = MCLBYTES;
660 		}
661 	}
662 	if (!m) {
663 		/* @@@ Handle this gracefully by reusing a scratch buffer
664 		 * or something.
665 		 */
666 		panic("Unable to get memory for incoming ethernet");
667 	}
668 
669 	/* Align buffer, @@@ next specific.
670 	 * perhaps should be using M_ALIGN here instead?
671 	 * First we give us a little room to align with.
672 	 */
673 	{
674 		u_char *buf = m->m_data;
675 		int buflen = m->m_len;
676 		buflen -= DMA_ENDALIGNMENT+DMA_BEGINALIGNMENT;
677 		REALIGN_DMABUF(buf, buflen);
678 		m->m_data = buf;
679 		m->m_len = buflen;
680 	}
681 
682 	m->m_pkthdr.rcvif = ifp;
683 	m->m_pkthdr.len = m->m_len;
684 
685 	error = bus_dmamap_load_mbuf(xsc->sc_rxdma->sc_dmat,
686 			map, m, BUS_DMA_NOWAIT);
687 
688 	bus_dmamap_sync(xsc->sc_rxdma->sc_dmat, map, 0,
689 			map->dm_mapsize, BUS_DMASYNC_PREREAD);
690 
691 	if (error) {
692 		DPRINTF(("DEBUG: m->m_data = %p, m->m_len = %d\n",
693 				m->m_data, m->m_len));
694 		DPRINTF(("DEBUG: MCLBYTES = %d, map->_dm_size = %ld\n",
695 				MCLBYTES, map->_dm_size));
696 
697 		panic("%s: can't load rx mbuf chain, error = %d",
698 				sc->sc_dev.dv_xname, error);
699 		m_freem(m);
700 		m = NULL;
701 	}
702 
703 	return(m);
704 }
705 
706 bus_dmamap_t
707 xe_dma_rx_continue(void *arg)
708 {
709 	struct mb8795_softc *sc = arg;
710 	struct xe_softc *xsc = (struct xe_softc *)sc;
711 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
712 	bus_dmamap_t map = NULL;
713 
714 	if (ifp->if_flags & IFF_RUNNING) {
715 		if (((xsc->sc_rx_loaded_idx+1)%MB8795_NRXBUFS) == xsc->sc_rx_handled_idx) {
716 			/* make space for one packet by dropping one */
717 			struct mbuf *m;
718 			m = xe_dma_rx_mbuf (sc);
719 			if (m)
720 				m_freem(m);
721 #if (defined(DIAGNOSTIC))
722 			DPRINTF(("%s: out of receive DMA buffers\n",sc->sc_dev.dv_xname));
723 #endif
724 		}
725 		xsc->sc_rx_loaded_idx++;
726 		xsc->sc_rx_loaded_idx %= MB8795_NRXBUFS;
727 		map = xsc->sc_rx_dmamap[xsc->sc_rx_loaded_idx];
728 
729 		DPRINTF(("%s: xe_dma_rx_continue() xsc->sc_rx_loaded_idx = %d\nn",
730 			 sc->sc_dev.dv_xname,xsc->sc_rx_loaded_idx));
731 	}
732 #ifdef DIAGNOSTIC
733 	else
734 		panic("%s: Unexpected rx DMA continue while if not running",
735 		      sc->sc_dev.dv_xname);
736 #endif
737 
738 	return(map);
739 }
740 
741 bus_dmamap_t
742 xe_dma_tx_continue(void *arg)
743 {
744 	struct mb8795_softc *sc = arg;
745 	struct xe_softc *xsc = (struct xe_softc *)sc;
746 	bus_dmamap_t map;
747 
748 	DPRINTF(("%s: xe_dma_tx_continue()\n",sc->sc_dev.dv_xname));
749 
750 	if (xsc->sc_tx_loaded) {
751 		map = NULL;
752 	} else {
753 		map = xsc->sc_tx_dmamap;
754 		xsc->sc_tx_loaded++;
755 	}
756 
757 #ifdef DIAGNOSTIC
758 	if (xsc->sc_tx_loaded != 1) {
759 		panic("%s: sc->sc_tx_loaded is %d",sc->sc_dev.dv_xname,
760 				xsc->sc_tx_loaded);
761 	}
762 #endif
763 
764 	return(map);
765 }
766