xref: /openbsd/sys/dev/sbus/qe.c (revision 09467b48)
1 /*	$OpenBSD: qe.c,v 1.41 2020/07/10 13:22:21 patrick Exp $	*/
2 /*	$NetBSD: qe.c,v 1.16 2001/03/30 17:30:18 christos Exp $	*/
3 
4 /*-
5  * Copyright (c) 1999 The NetBSD Foundation, Inc.
6  * All rights reserved.
7  *
8  * This code is derived from software contributed to The NetBSD Foundation
9  * by Paul Kranenburg.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*
34  * Copyright (c) 1998 Jason L. Wright.
35  * All rights reserved.
36  *
37  * Redistribution and use in source and binary forms, with or without
38  * modification, are permitted provided that the following conditions
39  * are met:
40  * 1. Redistributions of source code must retain the above copyright
41  *    notice, this list of conditions and the following disclaimer.
42  * 2. Redistributions in binary form must reproduce the above copyright
43  *    notice, this list of conditions and the following disclaimer in the
44  *    documentation and/or other materials provided with the distribution.
45  *
46  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
47  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
48  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
49  * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
50  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
51  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
52  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
53  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
54  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
55  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
56  */
57 
58 /*
59  * Driver for the SBus qec+qe QuadEthernet board.
60  *
61  * This driver was written using the AMD MACE Am79C940 documentation, some
62  * ideas gleaned from the S/Linux driver for this card, Solaris header files,
63  * and a loan of a card from Paul Southworth of the Internet Engineering
64  * Group (www.ieng.com).
65  */
66 
67 #define QEDEBUG
68 
69 #include "bpfilter.h"
70 
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/kernel.h>
74 #include <sys/errno.h>
75 #include <sys/ioctl.h>
76 #include <sys/mbuf.h>
77 #include <sys/socket.h>
78 #include <sys/syslog.h>
79 #include <sys/device.h>
80 #include <sys/malloc.h>
81 
82 #include <net/if.h>
83 #include <net/if_media.h>
84 
85 #include <netinet/in.h>
86 #include <netinet/if_ether.h>
87 
88 #if NBPFILTER > 0
89 #include <net/bpf.h>
90 #endif
91 
92 #include <machine/bus.h>
93 #include <machine/intr.h>
94 #include <machine/autoconf.h>
95 
96 #include <dev/sbus/sbusvar.h>
97 #include <dev/sbus/qecreg.h>
98 #include <dev/sbus/qecvar.h>
99 #include <dev/sbus/qereg.h>
100 
101 struct qe_softc {
102 	struct	device	sc_dev;		/* base device */
103 	bus_space_tag_t	sc_bustag;	/* bus & dma tags */
104 	bus_dma_tag_t	sc_dmatag;
105 	bus_dmamap_t	sc_dmamap;
106 	struct	arpcom sc_arpcom;
107 	struct	ifmedia sc_ifmedia;	/* interface media */
108 
109 	struct	qec_softc *sc_qec;	/* QEC parent */
110 
111 	bus_space_handle_t	sc_qr;	/* QEC registers */
112 	bus_space_handle_t	sc_mr;	/* MACE registers */
113 	bus_space_handle_t	sc_cr;	/* channel registers */
114 
115 	int	sc_channel;		/* channel number */
116 	u_int	sc_rev;			/* board revision */
117 
118 	int	sc_burst;
119 
120 	struct  qec_ring	sc_rb;	/* Packet Ring Buffer */
121 
122 #ifdef QEDEBUG
123 	int	sc_debug;
124 #endif
125 };
126 
127 int	qematch(struct device *, void *, void *);
128 void	qeattach(struct device *, struct device *, void *);
129 
130 void	qeinit(struct qe_softc *);
131 void	qestart(struct ifnet *);
132 void	qestop(struct qe_softc *);
133 void	qewatchdog(struct ifnet *);
134 int	qeioctl(struct ifnet *, u_long, caddr_t);
135 void	qereset(struct qe_softc *);
136 
137 int	qeintr(void *);
138 int	qe_eint(struct qe_softc *, u_int32_t);
139 int	qe_rint(struct qe_softc *);
140 int	qe_tint(struct qe_softc *);
141 void	qe_mcreset(struct qe_softc *);
142 
143 int	qe_put(struct qe_softc *, int, struct mbuf *);
144 void	qe_read(struct qe_softc *, int, int);
145 struct mbuf	*qe_get(struct qe_softc *, int, int);
146 
147 /* ifmedia callbacks */
148 void	qe_ifmedia_sts(struct ifnet *, struct ifmediareq *);
149 int	qe_ifmedia_upd(struct ifnet *);
150 
151 struct cfattach qe_ca = {
152 	sizeof(struct qe_softc), qematch, qeattach
153 };
154 
155 struct cfdriver qe_cd = {
156 	NULL, "qe", DV_IFNET
157 };
158 
159 int
160 qematch(parent, vcf, aux)
161 	struct device *parent;
162 	void *vcf;
163 	void *aux;
164 {
165 	struct cfdata *cf = vcf;
166 	struct sbus_attach_args *sa = aux;
167 
168 	return (strcmp(cf->cf_driver->cd_name, sa->sa_name) == 0);
169 }
170 
171 void
172 qeattach(parent, self, aux)
173 	struct device *parent, *self;
174 	void *aux;
175 {
176 	struct sbus_attach_args *sa = aux;
177 	struct qec_softc *qec = (struct qec_softc *)parent;
178 	struct qe_softc *sc = (struct qe_softc *)self;
179 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
180 	int node = sa->sa_node;
181 	bus_dma_tag_t dmatag = sa->sa_dmatag;
182 	bus_dma_segment_t seg;
183 	bus_size_t size;
184 	int rseg, error;
185 	extern void myetheraddr(u_char *);
186 
187 	/* Pass on the bus tags */
188 	sc->sc_bustag = sa->sa_bustag;
189 	sc->sc_dmatag = sa->sa_dmatag;
190 
191 	if (sa->sa_nreg < 2) {
192 		printf("%s: only %d register sets\n",
193 		    self->dv_xname, sa->sa_nreg);
194 		return;
195 	}
196 
197 	if (sbus_bus_map(sa->sa_bustag, sa->sa_reg[0].sbr_slot,
198 	    (bus_addr_t)sa->sa_reg[0].sbr_offset,
199 	    (bus_size_t)sa->sa_reg[0].sbr_size, 0, 0, &sc->sc_cr) != 0) {
200 		printf("%s: cannot map registers\n", self->dv_xname);
201 		return;
202 	}
203 
204 	if (sbus_bus_map(sa->sa_bustag, sa->sa_reg[1].sbr_slot,
205 	    (bus_addr_t)sa->sa_reg[1].sbr_offset,
206 	    (bus_size_t)sa->sa_reg[1].sbr_size, 0, 0, &sc->sc_mr) != 0) {
207 		printf("%s: cannot map registers\n", self->dv_xname);
208 		return;
209 	}
210 
211 	sc->sc_rev = getpropint(node, "mace-version", -1);
212 	printf(" rev %x", sc->sc_rev);
213 
214 	sc->sc_qec = qec;
215 	sc->sc_qr = qec->sc_regs;
216 
217 	sc->sc_channel = getpropint(node, "channel#", -1);
218 	sc->sc_burst = qec->sc_burst;
219 
220 	qestop(sc);
221 
222 	/* Note: no interrupt level passed */
223 	if (bus_intr_establish(sa->sa_bustag, 0, IPL_NET, 0, qeintr, sc,
224 	    self->dv_xname) == NULL) {
225 		printf(": no interrupt established\n");
226 		return;
227 	}
228 
229 	myetheraddr(sc->sc_arpcom.ac_enaddr);
230 
231 	/*
232 	 * Allocate descriptor ring and buffers.
233 	 */
234 
235 	/* for now, allocate as many bufs as there are ring descriptors */
236 	sc->sc_rb.rb_ntbuf = QEC_XD_RING_MAXSIZE;
237 	sc->sc_rb.rb_nrbuf = QEC_XD_RING_MAXSIZE;
238 
239 	size =
240 	    QEC_XD_RING_MAXSIZE * sizeof(struct qec_xd) +
241 	    QEC_XD_RING_MAXSIZE * sizeof(struct qec_xd) +
242 	    sc->sc_rb.rb_ntbuf * QE_PKT_BUF_SZ +
243 	    sc->sc_rb.rb_nrbuf * QE_PKT_BUF_SZ;
244 
245 	/* Get a DMA handle */
246 	if ((error = bus_dmamap_create(dmatag, size, 1, size, 0,
247 	    BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) {
248 		printf("%s: DMA map create error %d\n", self->dv_xname, error);
249 		return;
250 	}
251 
252 	/* Allocate DMA buffer */
253 	if ((error = bus_dmamem_alloc(dmatag, size, 0, 0,
254 	    &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
255 		printf("%s: DMA buffer alloc error %d\n",
256 			self->dv_xname, error);
257 		return;
258 	}
259 
260 	/* Map DMA buffer in CPU addressable space */
261 	if ((error = bus_dmamem_map(dmatag, &seg, rseg, size,
262 	    &sc->sc_rb.rb_membase,
263 	    BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
264 		printf("%s: DMA buffer map error %d\n",
265 		    self->dv_xname, error);
266 		bus_dmamem_free(dmatag, &seg, rseg);
267 		return;
268 	}
269 
270 	/* Load the buffer */
271 	if ((error = bus_dmamap_load(dmatag, sc->sc_dmamap,
272 	    sc->sc_rb.rb_membase, size, NULL, BUS_DMA_NOWAIT)) != 0) {
273 		printf("%s: DMA buffer map load error %d\n",
274 			self->dv_xname, error);
275 		bus_dmamem_unmap(dmatag, sc->sc_rb.rb_membase, size);
276 		bus_dmamem_free(dmatag, &seg, rseg);
277 		return;
278 	}
279 	sc->sc_rb.rb_dmabase = sc->sc_dmamap->dm_segs[0].ds_addr;
280 
281 	/* Initialize media properties */
282 	ifmedia_init(&sc->sc_ifmedia, 0, qe_ifmedia_upd, qe_ifmedia_sts);
283 	ifmedia_add(&sc->sc_ifmedia,
284 	    IFM_MAKEWORD(IFM_ETHER,IFM_10_T,0,0), 0, NULL);
285 	ifmedia_add(&sc->sc_ifmedia,
286 	    IFM_MAKEWORD(IFM_ETHER,IFM_10_5,0,0), 0, NULL);
287 	ifmedia_add(&sc->sc_ifmedia,
288 	    IFM_MAKEWORD(IFM_ETHER,IFM_AUTO,0,0), 0, NULL);
289 	ifmedia_set(&sc->sc_ifmedia, IFM_ETHER|IFM_AUTO);
290 
291 	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
292 	ifp->if_softc = sc;
293 	ifp->if_start = qestart;
294 	ifp->if_ioctl = qeioctl;
295 	ifp->if_watchdog = qewatchdog;
296 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX |
297 	    IFF_MULTICAST;
298 
299 	/* Attach the interface. */
300 	if_attach(ifp);
301 	ether_ifattach(ifp);
302 
303 	printf(" address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
304 }
305 
306 /*
307  * Pull data off an interface.
308  * Len is the length of data, with local net header stripped.
309  * We copy the data into mbufs.  When full cluster sized units are present,
310  * we copy into clusters.
311  */
312 struct mbuf *
313 qe_get(sc, idx, totlen)
314 	struct qe_softc *sc;
315 	int idx, totlen;
316 {
317 	struct mbuf *m;
318 	struct mbuf *top, **mp;
319 	int len, pad, boff = 0;
320 	caddr_t bp;
321 
322 	bp = sc->sc_rb.rb_rxbuf + (idx % sc->sc_rb.rb_nrbuf) * QE_PKT_BUF_SZ;
323 
324 	MGETHDR(m, M_DONTWAIT, MT_DATA);
325 	if (m == NULL)
326 		return (NULL);
327 	m->m_pkthdr.len = totlen;
328 	pad = ALIGN(sizeof(struct ether_header)) - sizeof(struct ether_header);
329 	m->m_data += pad;
330 	len = MHLEN - pad;
331 	top = NULL;
332 	mp = &top;
333 
334 	while (totlen > 0) {
335 		if (top) {
336 			MGET(m, M_DONTWAIT, MT_DATA);
337 			if (m == NULL) {
338 				m_freem(top);
339 				return (NULL);
340 			}
341 			len = MLEN;
342 		}
343 		if (top && totlen >= MINCLSIZE) {
344 			MCLGET(m, M_DONTWAIT);
345 			if (m->m_flags & M_EXT)
346 				len = MCLBYTES;
347 		}
348 		m->m_len = len = min(totlen, len);
349 		bcopy(bp + boff, mtod(m, caddr_t), len);
350 		boff += len;
351 		totlen -= len;
352 		*mp = m;
353 		mp = &m->m_next;
354 	}
355 
356 	return (top);
357 }
358 
359 /*
360  * Routine to copy from mbuf chain to transmit buffer in
361  * network buffer memory.
362  */
363 __inline__ int
364 qe_put(sc, idx, m)
365 	struct qe_softc *sc;
366 	int idx;
367 	struct mbuf *m;
368 {
369 	struct mbuf *n;
370 	int len, tlen = 0, boff = 0;
371 	caddr_t bp;
372 
373 	bp = sc->sc_rb.rb_txbuf + (idx % sc->sc_rb.rb_ntbuf) * QE_PKT_BUF_SZ;
374 
375 	for (; m; m = n) {
376 		len = m->m_len;
377 		if (len == 0) {
378 			n = m_free(m);
379 			continue;
380 		}
381 		bcopy(mtod(m, caddr_t), bp+boff, len);
382 		boff += len;
383 		tlen += len;
384 		n = m_free(m);
385 	}
386 	return (tlen);
387 }
388 
389 /*
390  * Pass a packet to the higher levels.
391  */
392 __inline__ void
393 qe_read(sc, idx, len)
394 	struct qe_softc *sc;
395 	int idx, len;
396 {
397 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
398 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
399 	struct mbuf *m;
400 
401 	if (len <= sizeof(struct ether_header) ||
402 	    len > ETHERMTU + sizeof(struct ether_header)) {
403 
404 		printf("%s: invalid packet size %d; dropping\n",
405 		    ifp->if_xname, len);
406 
407 		ifp->if_ierrors++;
408 		return;
409 	}
410 
411 	/*
412 	 * Pull packet off interface.
413 	 */
414 	m = qe_get(sc, idx, len);
415 	if (m == NULL) {
416 		ifp->if_ierrors++;
417 		return;
418 	}
419 
420 	ml_enqueue(&ml, m);
421 	if_input(ifp, &ml);
422 }
423 
424 /*
425  * Start output on interface.
426  * We make two assumptions here:
427  *  1) that the current priority is set to splnet _before_ this code
428  *     is called *and* is returned to the appropriate priority after
429  *     return
430  *  2) that the IFF_OACTIVE flag is checked before this code is called
431  *     (i.e. that the output part of the interface is idle)
432  */
433 void
434 qestart(ifp)
435 	struct ifnet *ifp;
436 {
437 	struct qe_softc *sc = (struct qe_softc *)ifp->if_softc;
438 	struct qec_xd *txd = sc->sc_rb.rb_txd;
439 	struct mbuf *m;
440 	unsigned int bix, len;
441 	unsigned int ntbuf = sc->sc_rb.rb_ntbuf;
442 
443 	if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
444 		return;
445 
446 	bix = sc->sc_rb.rb_tdhead;
447 
448 	for (;;) {
449 		m = ifq_dequeue(&ifp->if_snd);
450 		if (m == NULL)
451 			break;
452 
453 
454 #if NBPFILTER > 0
455 		/*
456 		 * If BPF is listening on this interface, let it see the
457 		 * packet before we commit it to the wire.
458 		 */
459 		if (ifp->if_bpf)
460 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
461 #endif
462 
463 		/*
464 		 * Copy the mbuf chain into the transmit buffer.
465 		 */
466 		len = qe_put(sc, bix, m);
467 
468 		/*
469 		 * Initialize transmit registers and start transmission
470 		 */
471 		txd[bix].xd_flags = QEC_XD_OWN | QEC_XD_SOP | QEC_XD_EOP |
472 		    (len & QEC_XD_LENGTH);
473 		bus_space_write_4(sc->sc_bustag, sc->sc_cr, QE_CRI_CTRL,
474 		    QE_CR_CTRL_TWAKEUP);
475 
476 		if (++bix == QEC_XD_RING_MAXSIZE)
477 			bix = 0;
478 
479 		if (++sc->sc_rb.rb_td_nbusy == ntbuf) {
480 			ifq_set_oactive(&ifp->if_snd);
481 			break;
482 		}
483 	}
484 
485 	sc->sc_rb.rb_tdhead = bix;
486 }
487 
488 void
489 qestop(sc)
490 	struct qe_softc *sc;
491 {
492 	bus_space_tag_t t = sc->sc_bustag;
493 	bus_space_handle_t mr = sc->sc_mr;
494 	bus_space_handle_t cr = sc->sc_cr;
495 	int n;
496 
497 	/* Stop the schwurst */
498 	bus_space_write_1(t, mr, QE_MRI_BIUCC, QE_MR_BIUCC_SWRST);
499 	for (n = 200; n > 0; n--) {
500 		if ((bus_space_read_1(t, mr, QE_MRI_BIUCC) &
501 		    QE_MR_BIUCC_SWRST) == 0)
502 			break;
503 		DELAY(20);
504 	}
505 
506 	/* then reset */
507 	bus_space_write_4(t, cr, QE_CRI_CTRL, QE_CR_CTRL_RESET);
508 	for (n = 200; n > 0; n--) {
509 		if ((bus_space_read_4(t, cr, QE_CRI_CTRL) &
510 		    QE_CR_CTRL_RESET) == 0)
511 			break;
512 		DELAY(20);
513 	}
514 }
515 
516 /*
517  * Reset interface.
518  */
519 void
520 qereset(sc)
521 	struct qe_softc *sc;
522 {
523 	int s;
524 
525 	s = splnet();
526 	qestop(sc);
527 	qeinit(sc);
528 	splx(s);
529 }
530 
531 void
532 qewatchdog(ifp)
533 	struct ifnet *ifp;
534 {
535 	struct qe_softc *sc = ifp->if_softc;
536 
537 	log(LOG_ERR, "%s: device timeout\n", sc->sc_dev.dv_xname);
538 	ifp->if_oerrors++;
539 
540 	qereset(sc);
541 }
542 
543 /*
544  * Interrupt dispatch.
545  */
546 int
547 qeintr(arg)
548 	void *arg;
549 {
550 	struct qe_softc *sc = (struct qe_softc *)arg;
551 	bus_space_tag_t t = sc->sc_bustag;
552 	u_int32_t qecstat, qestat;
553 	int r = 0;
554 
555 	/* Read QEC status and channel status */
556 	qecstat = bus_space_read_4(t, sc->sc_qr, QEC_QRI_STAT);
557 #ifdef QEDEBUG
558 	if (sc->sc_debug) {
559 		printf("qe%d: intr: qecstat=%x\n", sc->sc_channel, qecstat);
560 	}
561 #endif
562 
563 	/* Filter out status for this channel */
564 	qecstat = qecstat >> (4 * sc->sc_channel);
565 	if ((qecstat & 0xf) == 0)
566 		return (r);
567 
568 	qestat = bus_space_read_4(t, sc->sc_cr, QE_CRI_STAT);
569 
570 #ifdef QEDEBUG
571 	if (sc->sc_debug) {
572 		int i;
573 		bus_space_tag_t t = sc->sc_bustag;
574 		bus_space_handle_t mr = sc->sc_mr;
575 
576 		printf("qe%d: intr: qestat=%b\n", sc->sc_channel,
577 		    qestat, QE_CR_STAT_BITS);
578 
579 		printf("MACE registers:\n");
580 		for (i = 0 ; i < 32; i++) {
581 			printf("  m[%d]=%x,", i, bus_space_read_1(t, mr, i));
582 			if (((i+1) & 7) == 0)
583 				printf("\n");
584 		}
585 	}
586 #endif
587 
588 	if (qestat & QE_CR_STAT_ALLERRORS) {
589 #ifdef QEDEBUG
590 		if (sc->sc_debug)
591 			printf("qe%d: eint: qestat=%b\n", sc->sc_channel,
592 			    qestat, QE_CR_STAT_BITS);
593 #endif
594 		r |= qe_eint(sc, qestat);
595 		if (r == -1)
596 			return (1);
597 	}
598 
599 	if (qestat & QE_CR_STAT_TXIRQ)
600 		r |= qe_tint(sc);
601 
602 	if (qestat & QE_CR_STAT_RXIRQ)
603 		r |= qe_rint(sc);
604 
605 	return (1);
606 }
607 
608 /*
609  * Transmit interrupt.
610  */
611 int
612 qe_tint(sc)
613 	struct qe_softc *sc;
614 {
615 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
616 	unsigned int bix, txflags;
617 
618 	bix = sc->sc_rb.rb_tdtail;
619 
620 	for (;;) {
621 		if (sc->sc_rb.rb_td_nbusy <= 0)
622 			break;
623 
624 		txflags = sc->sc_rb.rb_txd[bix].xd_flags;
625 
626 		if (txflags & QEC_XD_OWN)
627 			break;
628 
629 		ifq_clr_oactive(&ifp->if_snd);
630 
631 		if (++bix == QEC_XD_RING_MAXSIZE)
632 			bix = 0;
633 
634 		--sc->sc_rb.rb_td_nbusy;
635 	}
636 
637 	if (sc->sc_rb.rb_td_nbusy == 0)
638 		ifp->if_timer = 0;
639 
640 	if (sc->sc_rb.rb_tdtail != bix) {
641 		sc->sc_rb.rb_tdtail = bix;
642 		if (ifq_is_oactive(&ifp->if_snd)) {
643 			ifq_clr_oactive(&ifp->if_snd);
644 			qestart(ifp);
645 		}
646 	}
647 
648 	return (1);
649 }
650 
651 /*
652  * Receive interrupt.
653  */
654 int
655 qe_rint(sc)
656 	struct qe_softc *sc;
657 {
658 	struct qec_xd *xd = sc->sc_rb.rb_rxd;
659 	unsigned int bix, len;
660 	unsigned int nrbuf = sc->sc_rb.rb_nrbuf;
661 #ifdef QEDEBUG
662 	int npackets = 0;
663 #endif
664 
665 	bix = sc->sc_rb.rb_rdtail;
666 
667 	/*
668 	 * Process all buffers with valid data.
669 	 */
670 	for (;;) {
671 		len = xd[bix].xd_flags;
672 		if (len & QEC_XD_OWN)
673 			break;
674 
675 #ifdef QEDEBUG
676 		npackets++;
677 #endif
678 
679 		len &= QEC_XD_LENGTH;
680 		len -= 4;
681 		qe_read(sc, bix, len);
682 
683 		/* ... */
684 		xd[(bix+nrbuf) % QEC_XD_RING_MAXSIZE].xd_flags =
685 		    QEC_XD_OWN | (QE_PKT_BUF_SZ & QEC_XD_LENGTH);
686 
687 		if (++bix == QEC_XD_RING_MAXSIZE)
688 			bix = 0;
689 	}
690 #ifdef QEDEBUG
691 	if (npackets == 0 && sc->sc_debug)
692 		printf("%s: rint: no packets; rb index %d; status 0x%x\n",
693 		    sc->sc_dev.dv_xname, bix, len);
694 #endif
695 
696 	sc->sc_rb.rb_rdtail = bix;
697 
698 	return (1);
699 }
700 
701 /*
702  * Error interrupt.
703  */
704 int
705 qe_eint(sc, why)
706 	struct qe_softc *sc;
707 	u_int32_t why;
708 {
709 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
710 	int r = 0, rst = 0;
711 
712 	if (why & QE_CR_STAT_EDEFER) {
713 		printf("%s: excessive tx defers.\n", sc->sc_dev.dv_xname);
714 		r |= 1;
715 		ifp->if_oerrors++;
716 	}
717 
718 	if (why & QE_CR_STAT_CLOSS) {
719 		ifp->if_oerrors++;
720 		r |= 1;
721 	}
722 
723 	if (why & QE_CR_STAT_ERETRIES) {
724 		printf("%s: excessive tx retries\n", sc->sc_dev.dv_xname);
725 		ifp->if_oerrors++;
726 		r |= 1;
727 		rst = 1;
728 	}
729 
730 
731 	if (why & QE_CR_STAT_LCOLL) {
732 		printf("%s: late tx transmission\n", sc->sc_dev.dv_xname);
733 		ifp->if_oerrors++;
734 		r |= 1;
735 		rst = 1;
736 	}
737 
738 	if (why & QE_CR_STAT_FUFLOW) {
739 		printf("%s: tx fifo underflow\n", sc->sc_dev.dv_xname);
740 		ifp->if_oerrors++;
741 		r |= 1;
742 		rst = 1;
743 	}
744 
745 	if (why & QE_CR_STAT_JERROR) {
746 		printf("%s: jabber seen\n", sc->sc_dev.dv_xname);
747 		r |= 1;
748 	}
749 
750 	if (why & QE_CR_STAT_BERROR) {
751 		printf("%s: babble seen\n", sc->sc_dev.dv_xname);
752 		r |= 1;
753 	}
754 
755 	if (why & QE_CR_STAT_TCCOFLOW) {
756 		ifp->if_collisions += 256;
757 		ifp->if_oerrors += 256;
758 		r |= 1;
759 	}
760 
761 	if (why & QE_CR_STAT_TXDERROR) {
762 		printf("%s: tx descriptor is bad\n", sc->sc_dev.dv_xname);
763 		rst = 1;
764 		r |= 1;
765 	}
766 
767 	if (why & QE_CR_STAT_TXLERR) {
768 		printf("%s: tx late error\n", sc->sc_dev.dv_xname);
769 		ifp->if_oerrors++;
770 		rst = 1;
771 		r |= 1;
772 	}
773 
774 	if (why & QE_CR_STAT_TXPERR) {
775 		printf("%s: tx dma parity error\n", sc->sc_dev.dv_xname);
776 		ifp->if_oerrors++;
777 		rst = 1;
778 		r |= 1;
779 	}
780 
781 	if (why & QE_CR_STAT_TXSERR) {
782 		printf("%s: tx dma sbus error ack\n", sc->sc_dev.dv_xname);
783 		ifp->if_oerrors++;
784 		rst = 1;
785 		r |= 1;
786 	}
787 
788 	if (why & QE_CR_STAT_RCCOFLOW) {
789 		ifp->if_collisions += 256;
790 		ifp->if_ierrors += 256;
791 		r |= 1;
792 	}
793 
794 	if (why & QE_CR_STAT_RUOFLOW) {
795 		ifp->if_ierrors += 256;
796 		r |= 1;
797 	}
798 
799 	if (why & QE_CR_STAT_MCOFLOW) {
800 		ifp->if_ierrors += 256;
801 		r |= 1;
802 	}
803 
804 	if (why & QE_CR_STAT_RXFOFLOW) {
805 		printf("%s: rx fifo overflow\n", sc->sc_dev.dv_xname);
806 		ifp->if_ierrors++;
807 		r |= 1;
808 	}
809 
810 	if (why & QE_CR_STAT_RLCOLL) {
811 		printf("%s: rx late collision\n", sc->sc_dev.dv_xname);
812 		ifp->if_ierrors++;
813 		ifp->if_collisions++;
814 		r |= 1;
815 	}
816 
817 	if (why & QE_CR_STAT_FCOFLOW) {
818 		ifp->if_ierrors += 256;
819 		r |= 1;
820 	}
821 
822 	if (why & QE_CR_STAT_CECOFLOW) {
823 		ifp->if_ierrors += 256;
824 		r |= 1;
825 	}
826 
827 	if (why & QE_CR_STAT_RXDROP) {
828 		printf("%s: rx packet dropped\n", sc->sc_dev.dv_xname);
829 		ifp->if_ierrors++;
830 		r |= 1;
831 	}
832 
833 	if (why & QE_CR_STAT_RXSMALL) {
834 		printf("%s: rx buffer too small\n", sc->sc_dev.dv_xname);
835 		ifp->if_ierrors++;
836 		r |= 1;
837 		rst = 1;
838 	}
839 
840 	if (why & QE_CR_STAT_RXLERR) {
841 		printf("%s: rx late error\n", sc->sc_dev.dv_xname);
842 		ifp->if_ierrors++;
843 		r |= 1;
844 		rst = 1;
845 	}
846 
847 	if (why & QE_CR_STAT_RXPERR) {
848 		printf("%s: rx dma parity error\n", sc->sc_dev.dv_xname);
849 		ifp->if_ierrors++;
850 		r |= 1;
851 		rst = 1;
852 	}
853 
854 	if (why & QE_CR_STAT_RXSERR) {
855 		printf("%s: rx dma sbus error ack\n", sc->sc_dev.dv_xname);
856 		ifp->if_ierrors++;
857 		r |= 1;
858 		rst = 1;
859 	}
860 
861 	if (r == 0)
862 		printf("%s: unexpected interrupt error: %08x\n",
863 			sc->sc_dev.dv_xname, why);
864 
865 	if (rst) {
866 		printf("%s: resetting...\n", sc->sc_dev.dv_xname);
867 		qereset(sc);
868 		return (-1);
869 	}
870 
871 	return (r);
872 }
873 
874 int
875 qeioctl(ifp, cmd, data)
876 	struct ifnet *ifp;
877 	u_long cmd;
878 	caddr_t data;
879 {
880 	struct qe_softc *sc = ifp->if_softc;
881 	struct ifreq *ifr = (struct ifreq *)data;
882 	int s, error = 0;
883 
884 	s = splnet();
885 
886 	switch (cmd) {
887 	case SIOCSIFADDR:
888 		ifp->if_flags |= IFF_UP;
889 		qeinit(sc);
890 		break;
891 
892 	case SIOCSIFFLAGS:
893 		if ((ifp->if_flags & IFF_UP) == 0 &&
894 		    (ifp->if_flags & IFF_RUNNING) != 0) {
895 			/*
896 			 * If interface is marked down and it is running, then
897 			 * stop it.
898 			 */
899 			qestop(sc);
900 			ifp->if_flags &= ~IFF_RUNNING;
901 		} else if ((ifp->if_flags & IFF_UP) != 0 &&
902 			   (ifp->if_flags & IFF_RUNNING) == 0) {
903 			/*
904 			 * If interface is marked up and it is stopped, then
905 			 * start it.
906 			 */
907 			qeinit(sc);
908 		} else {
909 			/*
910 			 * Reset the interface to pick up changes in any other
911 			 * flags that affect hardware registers.
912 			 */
913 			qestop(sc);
914 			qeinit(sc);
915 		}
916 #ifdef QEDEBUG
917 		sc->sc_debug = (ifp->if_flags & IFF_DEBUG) != 0 ? 1 : 0;
918 #endif
919 		break;
920 
921 	case SIOCGIFMEDIA:
922 	case SIOCSIFMEDIA:
923 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_ifmedia, cmd);
924 		break;
925 
926 	default:
927 		error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
928 	}
929 
930 	if (error == ENETRESET) {
931 		if (ifp->if_flags & IFF_RUNNING)
932 			qe_mcreset(sc);
933 		error = 0;
934 	}
935 
936 	splx(s);
937 	return (error);
938 }
939 
940 
941 void
942 qeinit(sc)
943 	struct qe_softc *sc;
944 {
945 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
946 	bus_space_tag_t t = sc->sc_bustag;
947 	bus_space_handle_t cr = sc->sc_cr;
948 	bus_space_handle_t mr = sc->sc_mr;
949 	struct qec_softc *qec = sc->sc_qec;
950 	u_int32_t qecaddr;
951 	u_int8_t *ea;
952 	int s;
953 
954 	s = splnet();
955 
956 	qestop(sc);
957 
958 	/*
959 	 * Allocate descriptor ring and buffers
960 	 */
961 	qec_meminit(&sc->sc_rb, QE_PKT_BUF_SZ);
962 
963 	/* Channel registers: */
964 	bus_space_write_4(t, cr, QE_CRI_RXDS, (u_int32_t)sc->sc_rb.rb_rxddma);
965 	bus_space_write_4(t, cr, QE_CRI_TXDS, (u_int32_t)sc->sc_rb.rb_txddma);
966 
967 	bus_space_write_4(t, cr, QE_CRI_RIMASK, 0);
968 	bus_space_write_4(t, cr, QE_CRI_TIMASK, 0);
969 	bus_space_write_4(t, cr, QE_CRI_QMASK, 0);
970 	bus_space_write_4(t, cr, QE_CRI_MMASK, QE_CR_MMASK_RXCOLL);
971 	bus_space_write_4(t, cr, QE_CRI_CCNT, 0);
972 	bus_space_write_4(t, cr, QE_CRI_PIPG, 0);
973 
974 	qecaddr = sc->sc_channel * qec->sc_msize;
975 	bus_space_write_4(t, cr, QE_CRI_RXWBUF, qecaddr);
976 	bus_space_write_4(t, cr, QE_CRI_RXRBUF, qecaddr);
977 	bus_space_write_4(t, cr, QE_CRI_TXWBUF, qecaddr + qec->sc_rsize);
978 	bus_space_write_4(t, cr, QE_CRI_TXRBUF, qecaddr + qec->sc_rsize);
979 
980 	/*
981 	 * When switching from mace<->qec always guarantee an sbus
982 	 * turnaround (if last op was read, perform a dummy write, and
983 	 * vice versa).
984 	 */
985 	bus_space_read_4(t, cr, QE_CRI_QMASK);
986 
987 	/* MACE registers: */
988 	bus_space_write_1(t, mr, QE_MRI_PHYCC, QE_MR_PHYCC_ASEL);
989 	bus_space_write_1(t, mr, QE_MRI_XMTFC, QE_MR_XMTFC_APADXMT);
990 	bus_space_write_1(t, mr, QE_MRI_RCVFC, 0);
991 
992 	/*
993 	 * Mask MACE's receive interrupt, since we're being notified
994 	 * by the QEC after DMA completes.
995 	 */
996 	bus_space_write_1(t, mr, QE_MRI_IMR,
997 	    QE_MR_IMR_CERRM | QE_MR_IMR_RCVINTM);
998 
999 	bus_space_write_1(t, mr, QE_MRI_BIUCC,
1000 	    QE_MR_BIUCC_BSWAP | QE_MR_BIUCC_64TS);
1001 
1002 	bus_space_write_1(t, mr, QE_MRI_FIFOFC,
1003 	    QE_MR_FIFOCC_TXF16 | QE_MR_FIFOCC_RXF32 |
1004 	    QE_MR_FIFOCC_RFWU | QE_MR_FIFOCC_TFWU);
1005 
1006 	bus_space_write_1(t, mr, QE_MRI_PLSCC, QE_MR_PLSCC_TP);
1007 
1008 	/*
1009 	 * Station address
1010 	 */
1011 	ea = sc->sc_arpcom.ac_enaddr;
1012 	bus_space_write_1(t, mr, QE_MRI_IAC,
1013 	    QE_MR_IAC_ADDRCHG | QE_MR_IAC_PHYADDR);
1014 	bus_space_write_multi_1(t, mr, QE_MRI_PADR, ea, 6);
1015 
1016 	/* Apply media settings */
1017 	qe_ifmedia_upd(ifp);
1018 
1019 	/*
1020 	 * Clear Logical address filter
1021 	 */
1022 	bus_space_write_1(t, mr, QE_MRI_IAC,
1023 	    QE_MR_IAC_ADDRCHG | QE_MR_IAC_LOGADDR);
1024 	bus_space_set_multi_1(t, mr, QE_MRI_LADRF, 0, 8);
1025 	bus_space_write_1(t, mr, QE_MRI_IAC, 0);
1026 
1027 	/* Clear missed packet count (register cleared on read) */
1028 	(void)bus_space_read_1(t, mr, QE_MRI_MPC);
1029 
1030 #if 0
1031 	/* test register: */
1032 	bus_space_write_1(t, mr, QE_MRI_UTR, 0);
1033 #endif
1034 
1035 	/* Reset multicast filter */
1036 	qe_mcreset(sc);
1037 
1038 	ifp->if_flags |= IFF_RUNNING;
1039 	ifq_clr_oactive(&ifp->if_snd);
1040 	splx(s);
1041 }
1042 
1043 /*
1044  * Reset multicast filter.
1045  */
1046 void
1047 qe_mcreset(sc)
1048 	struct qe_softc *sc;
1049 {
1050 	struct arpcom *ac = &sc->sc_arpcom;
1051 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1052 	bus_space_tag_t t = sc->sc_bustag;
1053 	bus_space_handle_t mr = sc->sc_mr;
1054 	struct ether_multi *enm;
1055 	struct ether_multistep step;
1056 	u_int32_t crc;
1057 	u_int16_t hash[4];
1058 	u_int8_t octet, maccc, *ladrp = (u_int8_t *)&hash[0];
1059 	int i, j;
1060 
1061 	/* We also enable transmitter & receiver here */
1062 	maccc = QE_MR_MACCC_ENXMT | QE_MR_MACCC_ENRCV;
1063 
1064 	if (ifp->if_flags & IFF_PROMISC) {
1065 		maccc |= QE_MR_MACCC_PROM;
1066 		bus_space_write_1(t, mr, QE_MRI_MACCC, maccc);
1067 		return;
1068 	}
1069 
1070 	if (ac->ac_multirangecnt > 0)
1071 		ifp->if_flags |= IFF_ALLMULTI;
1072 
1073 	if (ifp->if_flags & IFF_ALLMULTI) {
1074 		bus_space_write_1(t, mr, QE_MRI_IAC,
1075 		    QE_MR_IAC_ADDRCHG | QE_MR_IAC_LOGADDR);
1076 		bus_space_set_multi_1(t, mr, QE_MRI_LADRF, 0xff, 8);
1077 		bus_space_write_1(t, mr, QE_MRI_IAC, 0);
1078 		bus_space_write_1(t, mr, QE_MRI_MACCC, maccc);
1079 		return;
1080 	}
1081 
1082 	hash[3] = hash[2] = hash[1] = hash[0] = 0;
1083 
1084 	ETHER_FIRST_MULTI(step, ac, enm);
1085 	while (enm != NULL) {
1086 		crc = 0xffffffff;
1087 
1088 		for (i = 0; i < ETHER_ADDR_LEN; i++) {
1089 			octet = enm->enm_addrlo[i];
1090 
1091 			for (j = 0; j < 8; j++) {
1092 				if ((crc & 1) ^ (octet & 1)) {
1093 					crc >>= 1;
1094 					crc ^= MC_POLY_LE;
1095 				}
1096 				else
1097 					crc >>= 1;
1098 				octet >>= 1;
1099 			}
1100 		}
1101 
1102 		crc >>= 26;
1103 		hash[crc >> 4] |= 1 << (crc & 0xf);
1104 		ETHER_NEXT_MULTI(step, enm);
1105 	}
1106 
1107 	bus_space_write_1(t, mr, QE_MRI_IAC,
1108 	    QE_MR_IAC_ADDRCHG | QE_MR_IAC_LOGADDR);
1109 	bus_space_write_multi_1(t, mr, QE_MRI_LADRF, ladrp, 8);
1110 	bus_space_write_1(t, mr, QE_MRI_IAC, 0);
1111 	bus_space_write_1(t, mr, QE_MRI_MACCC, maccc);
1112 }
1113 
1114 /*
1115  * Get current media settings.
1116  */
1117 void
1118 qe_ifmedia_sts(ifp, ifmr)
1119 	struct ifnet *ifp;
1120 	struct ifmediareq *ifmr;
1121 {
1122 	struct qe_softc *sc = ifp->if_softc;
1123 	u_int8_t phycc;
1124 
1125 	ifmr->ifm_active = IFM_ETHER | IFM_10_T;
1126 	phycc = bus_space_read_1(sc->sc_bustag, sc->sc_mr, QE_MRI_PHYCC);
1127 	if ((phycc & QE_MR_PHYCC_DLNKTST) == 0) {
1128 		ifmr->ifm_status |= IFM_AVALID;
1129 		if (phycc & QE_MR_PHYCC_LNKFL)
1130 			ifmr->ifm_status &= ~IFM_ACTIVE;
1131 		else
1132 			ifmr->ifm_status |= IFM_ACTIVE;
1133 	}
1134 }
1135 
1136 /*
1137  * Set media options.
1138  */
1139 int
1140 qe_ifmedia_upd(ifp)
1141 	struct ifnet *ifp;
1142 {
1143 	struct qe_softc *sc = ifp->if_softc;
1144 	uint64_t media = sc->sc_ifmedia.ifm_media;
1145 
1146 	if (IFM_TYPE(media) != IFM_ETHER)
1147 		return (EINVAL);
1148 
1149 	if (IFM_SUBTYPE(media) != IFM_10_T)
1150 		return (EINVAL);
1151 
1152 	return (0);
1153 }
1154