xref: /openbsd/sys/dev/sbus/qe.c (revision 264ca280)
1 /*	$OpenBSD: qe.c,v 1.39 2016/04/13 11:36:00 mpi Exp $	*/
2 /*	$NetBSD: qe.c,v 1.16 2001/03/30 17:30:18 christos Exp $	*/
3 
4 /*-
5  * Copyright (c) 1999 The NetBSD Foundation, Inc.
6  * All rights reserved.
7  *
8  * This code is derived from software contributed to The NetBSD Foundation
9  * by Paul Kranenburg.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*
34  * Copyright (c) 1998 Jason L. Wright.
35  * All rights reserved.
36  *
37  * Redistribution and use in source and binary forms, with or without
38  * modification, are permitted provided that the following conditions
39  * are met:
40  * 1. Redistributions of source code must retain the above copyright
41  *    notice, this list of conditions and the following disclaimer.
42  * 2. Redistributions in binary form must reproduce the above copyright
43  *    notice, this list of conditions and the following disclaimer in the
44  *    documentation and/or other materials provided with the distribution.
45  *
46  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
47  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
48  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
49  * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
50  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
51  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
52  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
53  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
54  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
55  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
56  */
57 
58 /*
59  * Driver for the SBus qec+qe QuadEthernet board.
60  *
61  * This driver was written using the AMD MACE Am79C940 documentation, some
62  * ideas gleaned from the S/Linux driver for this card, Solaris header files,
63  * and a loan of a card from Paul Southworth of the Internet Engineering
64  * Group (www.ieng.com).
65  */
66 
67 #define QEDEBUG
68 
69 #include "bpfilter.h"
70 
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/kernel.h>
74 #include <sys/errno.h>
75 #include <sys/ioctl.h>
76 #include <sys/mbuf.h>
77 #include <sys/socket.h>
78 #include <sys/syslog.h>
79 #include <sys/device.h>
80 #include <sys/malloc.h>
81 
82 #include <net/if.h>
83 #include <net/if_media.h>
84 
85 #include <netinet/in.h>
86 #include <netinet/if_ether.h>
87 
88 #if NBPFILTER > 0
89 #include <net/bpf.h>
90 #endif
91 
92 #include <machine/bus.h>
93 #include <machine/intr.h>
94 #include <machine/autoconf.h>
95 
96 #include <dev/sbus/sbusvar.h>
97 #include <dev/sbus/qecreg.h>
98 #include <dev/sbus/qecvar.h>
99 #include <dev/sbus/qereg.h>
100 
101 struct qe_softc {
102 	struct	device	sc_dev;		/* base device */
103 	bus_space_tag_t	sc_bustag;	/* bus & dma tags */
104 	bus_dma_tag_t	sc_dmatag;
105 	bus_dmamap_t	sc_dmamap;
106 	struct	arpcom sc_arpcom;
107 	struct	ifmedia sc_ifmedia;	/* interface media */
108 
109 	struct	qec_softc *sc_qec;	/* QEC parent */
110 
111 	bus_space_handle_t	sc_qr;	/* QEC registers */
112 	bus_space_handle_t	sc_mr;	/* MACE registers */
113 	bus_space_handle_t	sc_cr;	/* channel registers */
114 
115 	int	sc_channel;		/* channel number */
116 	u_int	sc_rev;			/* board revision */
117 
118 	int	sc_burst;
119 
120 	struct  qec_ring	sc_rb;	/* Packet Ring Buffer */
121 
122 #ifdef QEDEBUG
123 	int	sc_debug;
124 #endif
125 };
126 
127 int	qematch(struct device *, void *, void *);
128 void	qeattach(struct device *, struct device *, void *);
129 
130 void	qeinit(struct qe_softc *);
131 void	qestart(struct ifnet *);
132 void	qestop(struct qe_softc *);
133 void	qewatchdog(struct ifnet *);
134 int	qeioctl(struct ifnet *, u_long, caddr_t);
135 void	qereset(struct qe_softc *);
136 
137 int	qeintr(void *);
138 int	qe_eint(struct qe_softc *, u_int32_t);
139 int	qe_rint(struct qe_softc *);
140 int	qe_tint(struct qe_softc *);
141 void	qe_mcreset(struct qe_softc *);
142 
143 int	qe_put(struct qe_softc *, int, struct mbuf *);
144 void	qe_read(struct qe_softc *, int, int);
145 struct mbuf	*qe_get(struct qe_softc *, int, int);
146 
147 /* ifmedia callbacks */
148 void	qe_ifmedia_sts(struct ifnet *, struct ifmediareq *);
149 int	qe_ifmedia_upd(struct ifnet *);
150 
151 struct cfattach qe_ca = {
152 	sizeof(struct qe_softc), qematch, qeattach
153 };
154 
155 struct cfdriver qe_cd = {
156 	NULL, "qe", DV_IFNET
157 };
158 
159 int
160 qematch(parent, vcf, aux)
161 	struct device *parent;
162 	void *vcf;
163 	void *aux;
164 {
165 	struct cfdata *cf = vcf;
166 	struct sbus_attach_args *sa = aux;
167 
168 	return (strcmp(cf->cf_driver->cd_name, sa->sa_name) == 0);
169 }
170 
171 void
172 qeattach(parent, self, aux)
173 	struct device *parent, *self;
174 	void *aux;
175 {
176 	struct sbus_attach_args *sa = aux;
177 	struct qec_softc *qec = (struct qec_softc *)parent;
178 	struct qe_softc *sc = (struct qe_softc *)self;
179 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
180 	int node = sa->sa_node;
181 	bus_dma_tag_t dmatag = sa->sa_dmatag;
182 	bus_dma_segment_t seg;
183 	bus_size_t size;
184 	int rseg, error;
185 	extern void myetheraddr(u_char *);
186 
187 	/* Pass on the bus tags */
188 	sc->sc_bustag = sa->sa_bustag;
189 	sc->sc_dmatag = sa->sa_dmatag;
190 
191 	if (sa->sa_nreg < 2) {
192 		printf("%s: only %d register sets\n",
193 		    self->dv_xname, sa->sa_nreg);
194 		return;
195 	}
196 
197 	if (sbus_bus_map(sa->sa_bustag, sa->sa_reg[0].sbr_slot,
198 	    (bus_addr_t)sa->sa_reg[0].sbr_offset,
199 	    (bus_size_t)sa->sa_reg[0].sbr_size, 0, 0, &sc->sc_cr) != 0) {
200 		printf("%s: cannot map registers\n", self->dv_xname);
201 		return;
202 	}
203 
204 	if (sbus_bus_map(sa->sa_bustag, sa->sa_reg[1].sbr_slot,
205 	    (bus_addr_t)sa->sa_reg[1].sbr_offset,
206 	    (bus_size_t)sa->sa_reg[1].sbr_size, 0, 0, &sc->sc_mr) != 0) {
207 		printf("%s: cannot map registers\n", self->dv_xname);
208 		return;
209 	}
210 
211 	sc->sc_rev = getpropint(node, "mace-version", -1);
212 	printf(" rev %x", sc->sc_rev);
213 
214 	sc->sc_qec = qec;
215 	sc->sc_qr = qec->sc_regs;
216 
217 	sc->sc_channel = getpropint(node, "channel#", -1);
218 	sc->sc_burst = qec->sc_burst;
219 
220 	qestop(sc);
221 
222 	/* Note: no interrupt level passed */
223 	if (bus_intr_establish(sa->sa_bustag, 0, IPL_NET, 0, qeintr, sc,
224 	    self->dv_xname) == NULL) {
225 		printf(": no interrupt established\n");
226 		return;
227 	}
228 
229 	myetheraddr(sc->sc_arpcom.ac_enaddr);
230 
231 	/*
232 	 * Allocate descriptor ring and buffers.
233 	 */
234 
235 	/* for now, allocate as many bufs as there are ring descriptors */
236 	sc->sc_rb.rb_ntbuf = QEC_XD_RING_MAXSIZE;
237 	sc->sc_rb.rb_nrbuf = QEC_XD_RING_MAXSIZE;
238 
239 	size =
240 	    QEC_XD_RING_MAXSIZE * sizeof(struct qec_xd) +
241 	    QEC_XD_RING_MAXSIZE * sizeof(struct qec_xd) +
242 	    sc->sc_rb.rb_ntbuf * QE_PKT_BUF_SZ +
243 	    sc->sc_rb.rb_nrbuf * QE_PKT_BUF_SZ;
244 
245 	/* Get a DMA handle */
246 	if ((error = bus_dmamap_create(dmatag, size, 1, size, 0,
247 	    BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) {
248 		printf("%s: DMA map create error %d\n", self->dv_xname, error);
249 		return;
250 	}
251 
252 	/* Allocate DMA buffer */
253 	if ((error = bus_dmamem_alloc(dmatag, size, 0, 0,
254 	    &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
255 		printf("%s: DMA buffer alloc error %d\n",
256 			self->dv_xname, error);
257 		return;
258 	}
259 
260 	/* Map DMA buffer in CPU addressable space */
261 	if ((error = bus_dmamem_map(dmatag, &seg, rseg, size,
262 	    &sc->sc_rb.rb_membase,
263 	    BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
264 		printf("%s: DMA buffer map error %d\n",
265 		    self->dv_xname, error);
266 		bus_dmamem_free(dmatag, &seg, rseg);
267 		return;
268 	}
269 
270 	/* Load the buffer */
271 	if ((error = bus_dmamap_load(dmatag, sc->sc_dmamap,
272 	    sc->sc_rb.rb_membase, size, NULL, BUS_DMA_NOWAIT)) != 0) {
273 		printf("%s: DMA buffer map load error %d\n",
274 			self->dv_xname, error);
275 		bus_dmamem_unmap(dmatag, sc->sc_rb.rb_membase, size);
276 		bus_dmamem_free(dmatag, &seg, rseg);
277 		return;
278 	}
279 	sc->sc_rb.rb_dmabase = sc->sc_dmamap->dm_segs[0].ds_addr;
280 
281 	/* Initialize media properties */
282 	ifmedia_init(&sc->sc_ifmedia, 0, qe_ifmedia_upd, qe_ifmedia_sts);
283 	ifmedia_add(&sc->sc_ifmedia,
284 	    IFM_MAKEWORD(IFM_ETHER,IFM_10_T,0,0), 0, NULL);
285 	ifmedia_add(&sc->sc_ifmedia,
286 	    IFM_MAKEWORD(IFM_ETHER,IFM_10_5,0,0), 0, NULL);
287 	ifmedia_add(&sc->sc_ifmedia,
288 	    IFM_MAKEWORD(IFM_ETHER,IFM_AUTO,0,0), 0, NULL);
289 	ifmedia_set(&sc->sc_ifmedia, IFM_ETHER|IFM_AUTO);
290 
291 	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
292 	ifp->if_softc = sc;
293 	ifp->if_start = qestart;
294 	ifp->if_ioctl = qeioctl;
295 	ifp->if_watchdog = qewatchdog;
296 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX |
297 	    IFF_MULTICAST;
298 
299 	/* Attach the interface. */
300 	if_attach(ifp);
301 	ether_ifattach(ifp);
302 
303 	printf(" address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
304 }
305 
306 /*
307  * Pull data off an interface.
308  * Len is the length of data, with local net header stripped.
309  * We copy the data into mbufs.  When full cluster sized units are present,
310  * we copy into clusters.
311  */
312 struct mbuf *
313 qe_get(sc, idx, totlen)
314 	struct qe_softc *sc;
315 	int idx, totlen;
316 {
317 	struct mbuf *m;
318 	struct mbuf *top, **mp;
319 	int len, pad, boff = 0;
320 	caddr_t bp;
321 
322 	bp = sc->sc_rb.rb_rxbuf + (idx % sc->sc_rb.rb_nrbuf) * QE_PKT_BUF_SZ;
323 
324 	MGETHDR(m, M_DONTWAIT, MT_DATA);
325 	if (m == NULL)
326 		return (NULL);
327 	m->m_pkthdr.len = totlen;
328 	pad = ALIGN(sizeof(struct ether_header)) - sizeof(struct ether_header);
329 	m->m_data += pad;
330 	len = MHLEN - pad;
331 	top = NULL;
332 	mp = &top;
333 
334 	while (totlen > 0) {
335 		if (top) {
336 			MGET(m, M_DONTWAIT, MT_DATA);
337 			if (m == NULL) {
338 				m_freem(top);
339 				return (NULL);
340 			}
341 			len = MLEN;
342 		}
343 		if (top && totlen >= MINCLSIZE) {
344 			MCLGET(m, M_DONTWAIT);
345 			if (m->m_flags & M_EXT)
346 				len = MCLBYTES;
347 		}
348 		m->m_len = len = min(totlen, len);
349 		bcopy(bp + boff, mtod(m, caddr_t), len);
350 		boff += len;
351 		totlen -= len;
352 		*mp = m;
353 		mp = &m->m_next;
354 	}
355 
356 	return (top);
357 }
358 
359 /*
360  * Routine to copy from mbuf chain to transmit buffer in
361  * network buffer memory.
362  */
363 __inline__ int
364 qe_put(sc, idx, m)
365 	struct qe_softc *sc;
366 	int idx;
367 	struct mbuf *m;
368 {
369 	struct mbuf *n;
370 	int len, tlen = 0, boff = 0;
371 	caddr_t bp;
372 
373 	bp = sc->sc_rb.rb_txbuf + (idx % sc->sc_rb.rb_ntbuf) * QE_PKT_BUF_SZ;
374 
375 	for (; m; m = n) {
376 		len = m->m_len;
377 		if (len == 0) {
378 			n = m_free(m);
379 			continue;
380 		}
381 		bcopy(mtod(m, caddr_t), bp+boff, len);
382 		boff += len;
383 		tlen += len;
384 		n = m_free(m);
385 	}
386 	return (tlen);
387 }
388 
389 /*
390  * Pass a packet to the higher levels.
391  */
392 __inline__ void
393 qe_read(sc, idx, len)
394 	struct qe_softc *sc;
395 	int idx, len;
396 {
397 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
398 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
399 	struct mbuf *m;
400 
401 	if (len <= sizeof(struct ether_header) ||
402 	    len > ETHERMTU + sizeof(struct ether_header)) {
403 
404 		printf("%s: invalid packet size %d; dropping\n",
405 		    ifp->if_xname, len);
406 
407 		ifp->if_ierrors++;
408 		return;
409 	}
410 
411 	/*
412 	 * Pull packet off interface.
413 	 */
414 	m = qe_get(sc, idx, len);
415 	if (m == NULL) {
416 		ifp->if_ierrors++;
417 		return;
418 	}
419 
420 	ml_enqueue(&ml, m);
421 	if_input(ifp, &ml);
422 }
423 
424 /*
425  * Start output on interface.
426  * We make two assumptions here:
427  *  1) that the current priority is set to splnet _before_ this code
428  *     is called *and* is returned to the appropriate priority after
429  *     return
430  *  2) that the IFF_OACTIVE flag is checked before this code is called
431  *     (i.e. that the output part of the interface is idle)
432  */
433 void
434 qestart(ifp)
435 	struct ifnet *ifp;
436 {
437 	struct qe_softc *sc = (struct qe_softc *)ifp->if_softc;
438 	struct qec_xd *txd = sc->sc_rb.rb_txd;
439 	struct mbuf *m;
440 	unsigned int bix, len;
441 	unsigned int ntbuf = sc->sc_rb.rb_ntbuf;
442 
443 	if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
444 		return;
445 
446 	bix = sc->sc_rb.rb_tdhead;
447 
448 	for (;;) {
449 		IFQ_DEQUEUE(&ifp->if_snd, m);
450 		if (m == NULL)
451 			break;
452 
453 
454 #if NBPFILTER > 0
455 		/*
456 		 * If BPF is listening on this interface, let it see the
457 		 * packet before we commit it to the wire.
458 		 */
459 		if (ifp->if_bpf)
460 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
461 #endif
462 
463 		/*
464 		 * Copy the mbuf chain into the transmit buffer.
465 		 */
466 		len = qe_put(sc, bix, m);
467 
468 		/*
469 		 * Initialize transmit registers and start transmission
470 		 */
471 		txd[bix].xd_flags = QEC_XD_OWN | QEC_XD_SOP | QEC_XD_EOP |
472 		    (len & QEC_XD_LENGTH);
473 		bus_space_write_4(sc->sc_bustag, sc->sc_cr, QE_CRI_CTRL,
474 		    QE_CR_CTRL_TWAKEUP);
475 
476 		if (++bix == QEC_XD_RING_MAXSIZE)
477 			bix = 0;
478 
479 		if (++sc->sc_rb.rb_td_nbusy == ntbuf) {
480 			ifq_set_oactive(&ifp->if_snd);
481 			break;
482 		}
483 	}
484 
485 	sc->sc_rb.rb_tdhead = bix;
486 }
487 
488 void
489 qestop(sc)
490 	struct qe_softc *sc;
491 {
492 	bus_space_tag_t t = sc->sc_bustag;
493 	bus_space_handle_t mr = sc->sc_mr;
494 	bus_space_handle_t cr = sc->sc_cr;
495 	int n;
496 
497 	/* Stop the schwurst */
498 	bus_space_write_1(t, mr, QE_MRI_BIUCC, QE_MR_BIUCC_SWRST);
499 	for (n = 200; n > 0; n--) {
500 		if ((bus_space_read_1(t, mr, QE_MRI_BIUCC) &
501 		    QE_MR_BIUCC_SWRST) == 0)
502 			break;
503 		DELAY(20);
504 	}
505 
506 	/* then reset */
507 	bus_space_write_4(t, cr, QE_CRI_CTRL, QE_CR_CTRL_RESET);
508 	for (n = 200; n > 0; n--) {
509 		if ((bus_space_read_4(t, cr, QE_CRI_CTRL) &
510 		    QE_CR_CTRL_RESET) == 0)
511 			break;
512 		DELAY(20);
513 	}
514 }
515 
516 /*
517  * Reset interface.
518  */
519 void
520 qereset(sc)
521 	struct qe_softc *sc;
522 {
523 	int s;
524 
525 	s = splnet();
526 	qestop(sc);
527 	qeinit(sc);
528 	splx(s);
529 }
530 
531 void
532 qewatchdog(ifp)
533 	struct ifnet *ifp;
534 {
535 	struct qe_softc *sc = ifp->if_softc;
536 
537 	log(LOG_ERR, "%s: device timeout\n", sc->sc_dev.dv_xname);
538 	ifp->if_oerrors++;
539 
540 	qereset(sc);
541 }
542 
543 /*
544  * Interrupt dispatch.
545  */
546 int
547 qeintr(arg)
548 	void *arg;
549 {
550 	struct qe_softc *sc = (struct qe_softc *)arg;
551 	bus_space_tag_t t = sc->sc_bustag;
552 	u_int32_t qecstat, qestat;
553 	int r = 0;
554 
555 	/* Read QEC status and channel status */
556 	qecstat = bus_space_read_4(t, sc->sc_qr, QEC_QRI_STAT);
557 #ifdef QEDEBUG
558 	if (sc->sc_debug) {
559 		printf("qe%d: intr: qecstat=%x\n", sc->sc_channel, qecstat);
560 	}
561 #endif
562 
563 	/* Filter out status for this channel */
564 	qecstat = qecstat >> (4 * sc->sc_channel);
565 	if ((qecstat & 0xf) == 0)
566 		return (r);
567 
568 	qestat = bus_space_read_4(t, sc->sc_cr, QE_CRI_STAT);
569 
570 #ifdef QEDEBUG
571 	if (sc->sc_debug) {
572 		int i;
573 		bus_space_tag_t t = sc->sc_bustag;
574 		bus_space_handle_t mr = sc->sc_mr;
575 
576 		printf("qe%d: intr: qestat=%b\n", sc->sc_channel,
577 		    qestat, QE_CR_STAT_BITS);
578 
579 		printf("MACE registers:\n");
580 		for (i = 0 ; i < 32; i++) {
581 			printf("  m[%d]=%x,", i, bus_space_read_1(t, mr, i));
582 			if (((i+1) & 7) == 0)
583 				printf("\n");
584 		}
585 	}
586 #endif
587 
588 	if (qestat & QE_CR_STAT_ALLERRORS) {
589 #ifdef QEDEBUG
590 		if (sc->sc_debug)
591 			printf("qe%d: eint: qestat=%b\n", sc->sc_channel,
592 			    qestat, QE_CR_STAT_BITS);
593 #endif
594 		r |= qe_eint(sc, qestat);
595 		if (r == -1)
596 			return (1);
597 	}
598 
599 	if (qestat & QE_CR_STAT_TXIRQ)
600 		r |= qe_tint(sc);
601 
602 	if (qestat & QE_CR_STAT_RXIRQ)
603 		r |= qe_rint(sc);
604 
605 	return (1);
606 }
607 
608 /*
609  * Transmit interrupt.
610  */
611 int
612 qe_tint(sc)
613 	struct qe_softc *sc;
614 {
615 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
616 	unsigned int bix, txflags;
617 
618 	bix = sc->sc_rb.rb_tdtail;
619 
620 	for (;;) {
621 		if (sc->sc_rb.rb_td_nbusy <= 0)
622 			break;
623 
624 		txflags = sc->sc_rb.rb_txd[bix].xd_flags;
625 
626 		if (txflags & QEC_XD_OWN)
627 			break;
628 
629 		ifq_clr_oactive(&ifp->if_snd);
630 		ifp->if_opackets++;
631 
632 		if (++bix == QEC_XD_RING_MAXSIZE)
633 			bix = 0;
634 
635 		--sc->sc_rb.rb_td_nbusy;
636 	}
637 
638 	if (sc->sc_rb.rb_td_nbusy == 0)
639 		ifp->if_timer = 0;
640 
641 	if (sc->sc_rb.rb_tdtail != bix) {
642 		sc->sc_rb.rb_tdtail = bix;
643 		if (ifq_is_oactive(&ifp->if_snd)) {
644 			ifq_clr_oactive(&ifp->if_snd);
645 			qestart(ifp);
646 		}
647 	}
648 
649 	return (1);
650 }
651 
652 /*
653  * Receive interrupt.
654  */
655 int
656 qe_rint(sc)
657 	struct qe_softc *sc;
658 {
659 	struct qec_xd *xd = sc->sc_rb.rb_rxd;
660 	unsigned int bix, len;
661 	unsigned int nrbuf = sc->sc_rb.rb_nrbuf;
662 #ifdef QEDEBUG
663 	int npackets = 0;
664 #endif
665 
666 	bix = sc->sc_rb.rb_rdtail;
667 
668 	/*
669 	 * Process all buffers with valid data.
670 	 */
671 	for (;;) {
672 		len = xd[bix].xd_flags;
673 		if (len & QEC_XD_OWN)
674 			break;
675 
676 #ifdef QEDEBUG
677 		npackets++;
678 #endif
679 
680 		len &= QEC_XD_LENGTH;
681 		len -= 4;
682 		qe_read(sc, bix, len);
683 
684 		/* ... */
685 		xd[(bix+nrbuf) % QEC_XD_RING_MAXSIZE].xd_flags =
686 		    QEC_XD_OWN | (QE_PKT_BUF_SZ & QEC_XD_LENGTH);
687 
688 		if (++bix == QEC_XD_RING_MAXSIZE)
689 			bix = 0;
690 	}
691 #ifdef QEDEBUG
692 	if (npackets == 0 && sc->sc_debug)
693 		printf("%s: rint: no packets; rb index %d; status 0x%x\n",
694 		    sc->sc_dev.dv_xname, bix, len);
695 #endif
696 
697 	sc->sc_rb.rb_rdtail = bix;
698 
699 	return (1);
700 }
701 
702 /*
703  * Error interrupt.
704  */
705 int
706 qe_eint(sc, why)
707 	struct qe_softc *sc;
708 	u_int32_t why;
709 {
710 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
711 	int r = 0, rst = 0;
712 
713 	if (why & QE_CR_STAT_EDEFER) {
714 		printf("%s: excessive tx defers.\n", sc->sc_dev.dv_xname);
715 		r |= 1;
716 		ifp->if_oerrors++;
717 	}
718 
719 	if (why & QE_CR_STAT_CLOSS) {
720 		ifp->if_oerrors++;
721 		r |= 1;
722 	}
723 
724 	if (why & QE_CR_STAT_ERETRIES) {
725 		printf("%s: excessive tx retries\n", sc->sc_dev.dv_xname);
726 		ifp->if_oerrors++;
727 		r |= 1;
728 		rst = 1;
729 	}
730 
731 
732 	if (why & QE_CR_STAT_LCOLL) {
733 		printf("%s: late tx transmission\n", sc->sc_dev.dv_xname);
734 		ifp->if_oerrors++;
735 		r |= 1;
736 		rst = 1;
737 	}
738 
739 	if (why & QE_CR_STAT_FUFLOW) {
740 		printf("%s: tx fifo underflow\n", sc->sc_dev.dv_xname);
741 		ifp->if_oerrors++;
742 		r |= 1;
743 		rst = 1;
744 	}
745 
746 	if (why & QE_CR_STAT_JERROR) {
747 		printf("%s: jabber seen\n", sc->sc_dev.dv_xname);
748 		r |= 1;
749 	}
750 
751 	if (why & QE_CR_STAT_BERROR) {
752 		printf("%s: babble seen\n", sc->sc_dev.dv_xname);
753 		r |= 1;
754 	}
755 
756 	if (why & QE_CR_STAT_TCCOFLOW) {
757 		ifp->if_collisions += 256;
758 		ifp->if_oerrors += 256;
759 		r |= 1;
760 	}
761 
762 	if (why & QE_CR_STAT_TXDERROR) {
763 		printf("%s: tx descriptor is bad\n", sc->sc_dev.dv_xname);
764 		rst = 1;
765 		r |= 1;
766 	}
767 
768 	if (why & QE_CR_STAT_TXLERR) {
769 		printf("%s: tx late error\n", sc->sc_dev.dv_xname);
770 		ifp->if_oerrors++;
771 		rst = 1;
772 		r |= 1;
773 	}
774 
775 	if (why & QE_CR_STAT_TXPERR) {
776 		printf("%s: tx dma parity error\n", sc->sc_dev.dv_xname);
777 		ifp->if_oerrors++;
778 		rst = 1;
779 		r |= 1;
780 	}
781 
782 	if (why & QE_CR_STAT_TXSERR) {
783 		printf("%s: tx dma sbus error ack\n", sc->sc_dev.dv_xname);
784 		ifp->if_oerrors++;
785 		rst = 1;
786 		r |= 1;
787 	}
788 
789 	if (why & QE_CR_STAT_RCCOFLOW) {
790 		ifp->if_collisions += 256;
791 		ifp->if_ierrors += 256;
792 		r |= 1;
793 	}
794 
795 	if (why & QE_CR_STAT_RUOFLOW) {
796 		ifp->if_ierrors += 256;
797 		r |= 1;
798 	}
799 
800 	if (why & QE_CR_STAT_MCOFLOW) {
801 		ifp->if_ierrors += 256;
802 		r |= 1;
803 	}
804 
805 	if (why & QE_CR_STAT_RXFOFLOW) {
806 		printf("%s: rx fifo overflow\n", sc->sc_dev.dv_xname);
807 		ifp->if_ierrors++;
808 		r |= 1;
809 	}
810 
811 	if (why & QE_CR_STAT_RLCOLL) {
812 		printf("%s: rx late collision\n", sc->sc_dev.dv_xname);
813 		ifp->if_ierrors++;
814 		ifp->if_collisions++;
815 		r |= 1;
816 	}
817 
818 	if (why & QE_CR_STAT_FCOFLOW) {
819 		ifp->if_ierrors += 256;
820 		r |= 1;
821 	}
822 
823 	if (why & QE_CR_STAT_CECOFLOW) {
824 		ifp->if_ierrors += 256;
825 		r |= 1;
826 	}
827 
828 	if (why & QE_CR_STAT_RXDROP) {
829 		printf("%s: rx packet dropped\n", sc->sc_dev.dv_xname);
830 		ifp->if_ierrors++;
831 		r |= 1;
832 	}
833 
834 	if (why & QE_CR_STAT_RXSMALL) {
835 		printf("%s: rx buffer too small\n", sc->sc_dev.dv_xname);
836 		ifp->if_ierrors++;
837 		r |= 1;
838 		rst = 1;
839 	}
840 
841 	if (why & QE_CR_STAT_RXLERR) {
842 		printf("%s: rx late error\n", sc->sc_dev.dv_xname);
843 		ifp->if_ierrors++;
844 		r |= 1;
845 		rst = 1;
846 	}
847 
848 	if (why & QE_CR_STAT_RXPERR) {
849 		printf("%s: rx dma parity error\n", sc->sc_dev.dv_xname);
850 		ifp->if_ierrors++;
851 		r |= 1;
852 		rst = 1;
853 	}
854 
855 	if (why & QE_CR_STAT_RXSERR) {
856 		printf("%s: rx dma sbus error ack\n", sc->sc_dev.dv_xname);
857 		ifp->if_ierrors++;
858 		r |= 1;
859 		rst = 1;
860 	}
861 
862 	if (r == 0)
863 		printf("%s: unexpected interrupt error: %08x\n",
864 			sc->sc_dev.dv_xname, why);
865 
866 	if (rst) {
867 		printf("%s: resetting...\n", sc->sc_dev.dv_xname);
868 		qereset(sc);
869 		return (-1);
870 	}
871 
872 	return (r);
873 }
874 
875 int
876 qeioctl(ifp, cmd, data)
877 	struct ifnet *ifp;
878 	u_long cmd;
879 	caddr_t data;
880 {
881 	struct qe_softc *sc = ifp->if_softc;
882 	struct ifreq *ifr = (struct ifreq *)data;
883 	int s, error = 0;
884 
885 	s = splnet();
886 
887 	switch (cmd) {
888 	case SIOCSIFADDR:
889 		ifp->if_flags |= IFF_UP;
890 		qeinit(sc);
891 		break;
892 
893 	case SIOCSIFFLAGS:
894 		if ((ifp->if_flags & IFF_UP) == 0 &&
895 		    (ifp->if_flags & IFF_RUNNING) != 0) {
896 			/*
897 			 * If interface is marked down and it is running, then
898 			 * stop it.
899 			 */
900 			qestop(sc);
901 			ifp->if_flags &= ~IFF_RUNNING;
902 		} else if ((ifp->if_flags & IFF_UP) != 0 &&
903 			   (ifp->if_flags & IFF_RUNNING) == 0) {
904 			/*
905 			 * If interface is marked up and it is stopped, then
906 			 * start it.
907 			 */
908 			qeinit(sc);
909 		} else {
910 			/*
911 			 * Reset the interface to pick up changes in any other
912 			 * flags that affect hardware registers.
913 			 */
914 			qestop(sc);
915 			qeinit(sc);
916 		}
917 #ifdef QEDEBUG
918 		sc->sc_debug = (ifp->if_flags & IFF_DEBUG) != 0 ? 1 : 0;
919 #endif
920 		break;
921 
922 	case SIOCGIFMEDIA:
923 	case SIOCSIFMEDIA:
924 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_ifmedia, cmd);
925 		break;
926 
927 	default:
928 		error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
929 	}
930 
931 	if (error == ENETRESET) {
932 		if (ifp->if_flags & IFF_RUNNING)
933 			qe_mcreset(sc);
934 		error = 0;
935 	}
936 
937 	splx(s);
938 	return (error);
939 }
940 
941 
942 void
943 qeinit(sc)
944 	struct qe_softc *sc;
945 {
946 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
947 	bus_space_tag_t t = sc->sc_bustag;
948 	bus_space_handle_t cr = sc->sc_cr;
949 	bus_space_handle_t mr = sc->sc_mr;
950 	struct qec_softc *qec = sc->sc_qec;
951 	u_int32_t qecaddr;
952 	u_int8_t *ea;
953 	int s;
954 
955 	s = splnet();
956 
957 	qestop(sc);
958 
959 	/*
960 	 * Allocate descriptor ring and buffers
961 	 */
962 	qec_meminit(&sc->sc_rb, QE_PKT_BUF_SZ);
963 
964 	/* Channel registers: */
965 	bus_space_write_4(t, cr, QE_CRI_RXDS, (u_int32_t)sc->sc_rb.rb_rxddma);
966 	bus_space_write_4(t, cr, QE_CRI_TXDS, (u_int32_t)sc->sc_rb.rb_txddma);
967 
968 	bus_space_write_4(t, cr, QE_CRI_RIMASK, 0);
969 	bus_space_write_4(t, cr, QE_CRI_TIMASK, 0);
970 	bus_space_write_4(t, cr, QE_CRI_QMASK, 0);
971 	bus_space_write_4(t, cr, QE_CRI_MMASK, QE_CR_MMASK_RXCOLL);
972 	bus_space_write_4(t, cr, QE_CRI_CCNT, 0);
973 	bus_space_write_4(t, cr, QE_CRI_PIPG, 0);
974 
975 	qecaddr = sc->sc_channel * qec->sc_msize;
976 	bus_space_write_4(t, cr, QE_CRI_RXWBUF, qecaddr);
977 	bus_space_write_4(t, cr, QE_CRI_RXRBUF, qecaddr);
978 	bus_space_write_4(t, cr, QE_CRI_TXWBUF, qecaddr + qec->sc_rsize);
979 	bus_space_write_4(t, cr, QE_CRI_TXRBUF, qecaddr + qec->sc_rsize);
980 
981 	/*
982 	 * When switching from mace<->qec always guarantee an sbus
983 	 * turnaround (if last op was read, perform a dummy write, and
984 	 * vice versa).
985 	 */
986 	bus_space_read_4(t, cr, QE_CRI_QMASK);
987 
988 	/* MACE registers: */
989 	bus_space_write_1(t, mr, QE_MRI_PHYCC, QE_MR_PHYCC_ASEL);
990 	bus_space_write_1(t, mr, QE_MRI_XMTFC, QE_MR_XMTFC_APADXMT);
991 	bus_space_write_1(t, mr, QE_MRI_RCVFC, 0);
992 
993 	/*
994 	 * Mask MACE's receive interrupt, since we're being notified
995 	 * by the QEC after DMA completes.
996 	 */
997 	bus_space_write_1(t, mr, QE_MRI_IMR,
998 	    QE_MR_IMR_CERRM | QE_MR_IMR_RCVINTM);
999 
1000 	bus_space_write_1(t, mr, QE_MRI_BIUCC,
1001 	    QE_MR_BIUCC_BSWAP | QE_MR_BIUCC_64TS);
1002 
1003 	bus_space_write_1(t, mr, QE_MRI_FIFOFC,
1004 	    QE_MR_FIFOCC_TXF16 | QE_MR_FIFOCC_RXF32 |
1005 	    QE_MR_FIFOCC_RFWU | QE_MR_FIFOCC_TFWU);
1006 
1007 	bus_space_write_1(t, mr, QE_MRI_PLSCC, QE_MR_PLSCC_TP);
1008 
1009 	/*
1010 	 * Station address
1011 	 */
1012 	ea = sc->sc_arpcom.ac_enaddr;
1013 	bus_space_write_1(t, mr, QE_MRI_IAC,
1014 	    QE_MR_IAC_ADDRCHG | QE_MR_IAC_PHYADDR);
1015 	bus_space_write_multi_1(t, mr, QE_MRI_PADR, ea, 6);
1016 
1017 	/* Apply media settings */
1018 	qe_ifmedia_upd(ifp);
1019 
1020 	/*
1021 	 * Clear Logical address filter
1022 	 */
1023 	bus_space_write_1(t, mr, QE_MRI_IAC,
1024 	    QE_MR_IAC_ADDRCHG | QE_MR_IAC_LOGADDR);
1025 	bus_space_set_multi_1(t, mr, QE_MRI_LADRF, 0, 8);
1026 	bus_space_write_1(t, mr, QE_MRI_IAC, 0);
1027 
1028 	/* Clear missed packet count (register cleared on read) */
1029 	(void)bus_space_read_1(t, mr, QE_MRI_MPC);
1030 
1031 #if 0
1032 	/* test register: */
1033 	bus_space_write_1(t, mr, QE_MRI_UTR, 0);
1034 #endif
1035 
1036 	/* Reset multicast filter */
1037 	qe_mcreset(sc);
1038 
1039 	ifp->if_flags |= IFF_RUNNING;
1040 	ifq_clr_oactive(&ifp->if_snd);
1041 	splx(s);
1042 }
1043 
1044 /*
1045  * Reset multicast filter.
1046  */
1047 void
1048 qe_mcreset(sc)
1049 	struct qe_softc *sc;
1050 {
1051 	struct arpcom *ac = &sc->sc_arpcom;
1052 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1053 	bus_space_tag_t t = sc->sc_bustag;
1054 	bus_space_handle_t mr = sc->sc_mr;
1055 	struct ether_multi *enm;
1056 	struct ether_multistep step;
1057 	u_int32_t crc;
1058 	u_int16_t hash[4];
1059 	u_int8_t octet, maccc, *ladrp = (u_int8_t *)&hash[0];
1060 	int i, j;
1061 
1062 	/* We also enable transmitter & receiver here */
1063 	maccc = QE_MR_MACCC_ENXMT | QE_MR_MACCC_ENRCV;
1064 
1065 	if (ifp->if_flags & IFF_PROMISC) {
1066 		maccc |= QE_MR_MACCC_PROM;
1067 		bus_space_write_1(t, mr, QE_MRI_MACCC, maccc);
1068 		return;
1069 	}
1070 
1071 	if (ac->ac_multirangecnt > 0)
1072 		ifp->if_flags |= IFF_ALLMULTI;
1073 
1074 	if (ifp->if_flags & IFF_ALLMULTI) {
1075 		bus_space_write_1(t, mr, QE_MRI_IAC,
1076 		    QE_MR_IAC_ADDRCHG | QE_MR_IAC_LOGADDR);
1077 		bus_space_set_multi_1(t, mr, QE_MRI_LADRF, 0xff, 8);
1078 		bus_space_write_1(t, mr, QE_MRI_IAC, 0);
1079 		bus_space_write_1(t, mr, QE_MRI_MACCC, maccc);
1080 		return;
1081 	}
1082 
1083 	hash[3] = hash[2] = hash[1] = hash[0] = 0;
1084 
1085 	ETHER_FIRST_MULTI(step, ac, enm);
1086 	while (enm != NULL) {
1087 		crc = 0xffffffff;
1088 
1089 		for (i = 0; i < ETHER_ADDR_LEN; i++) {
1090 			octet = enm->enm_addrlo[i];
1091 
1092 			for (j = 0; j < 8; j++) {
1093 				if ((crc & 1) ^ (octet & 1)) {
1094 					crc >>= 1;
1095 					crc ^= MC_POLY_LE;
1096 				}
1097 				else
1098 					crc >>= 1;
1099 				octet >>= 1;
1100 			}
1101 		}
1102 
1103 		crc >>= 26;
1104 		hash[crc >> 4] |= 1 << (crc & 0xf);
1105 		ETHER_NEXT_MULTI(step, enm);
1106 	}
1107 
1108 	bus_space_write_1(t, mr, QE_MRI_IAC,
1109 	    QE_MR_IAC_ADDRCHG | QE_MR_IAC_LOGADDR);
1110 	bus_space_write_multi_1(t, mr, QE_MRI_LADRF, ladrp, 8);
1111 	bus_space_write_1(t, mr, QE_MRI_IAC, 0);
1112 	bus_space_write_1(t, mr, QE_MRI_MACCC, maccc);
1113 }
1114 
1115 /*
1116  * Get current media settings.
1117  */
1118 void
1119 qe_ifmedia_sts(ifp, ifmr)
1120 	struct ifnet *ifp;
1121 	struct ifmediareq *ifmr;
1122 {
1123 	struct qe_softc *sc = ifp->if_softc;
1124 	u_int8_t phycc;
1125 
1126 	ifmr->ifm_active = IFM_ETHER | IFM_10_T;
1127 	phycc = bus_space_read_1(sc->sc_bustag, sc->sc_mr, QE_MRI_PHYCC);
1128 	if ((phycc & QE_MR_PHYCC_DLNKTST) == 0) {
1129 		ifmr->ifm_status |= IFM_AVALID;
1130 		if (phycc & QE_MR_PHYCC_LNKFL)
1131 			ifmr->ifm_status &= ~IFM_ACTIVE;
1132 		else
1133 			ifmr->ifm_status |= IFM_ACTIVE;
1134 	}
1135 }
1136 
1137 /*
1138  * Set media options.
1139  */
1140 int
1141 qe_ifmedia_upd(ifp)
1142 	struct ifnet *ifp;
1143 {
1144 	struct qe_softc *sc = ifp->if_softc;
1145 	uint64_t media = sc->sc_ifmedia.ifm_media;
1146 
1147 	if (IFM_TYPE(media) != IFM_ETHER)
1148 		return (EINVAL);
1149 
1150 	if (IFM_SUBTYPE(media) != IFM_10_T)
1151 		return (EINVAL);
1152 
1153 	return (0);
1154 }
1155