1 /* $NetBSD: qe.c,v 1.79 2022/09/25 18:03:04 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1999 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Paul Kranenburg.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Copyright (c) 1998 Jason L. Wright.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. The name of the authors may not be used to endorse or promote products
45 * derived from this software without specific prior written permission.
46 *
47 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
48 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
49 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
50 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
51 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
52 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
53 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
54 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
55 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
56 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
57 */
58
59 /*
60 * Driver for the SBus qec+qe QuadEthernet board.
61 *
62 * This driver was written using the AMD MACE Am79C940 documentation, some
63 * ideas gleaned from the S/Linux driver for this card, Solaris header files,
64 * and a loan of a card from Paul Southworth of the Internet Engineering
65 * Group (www.ieng.com).
66 */
67
68 #include <sys/cdefs.h>
69 __KERNEL_RCSID(0, "$NetBSD: qe.c,v 1.79 2022/09/25 18:03:04 thorpej Exp $");
70
71 #define QEDEBUG
72
73 #include "opt_ddb.h"
74 #include "opt_inet.h"
75
76 #include <sys/param.h>
77 #include <sys/systm.h>
78 #include <sys/kernel.h>
79 #include <sys/errno.h>
80 #include <sys/ioctl.h>
81 #include <sys/mbuf.h>
82 #include <sys/socket.h>
83 #include <sys/syslog.h>
84 #include <sys/device.h>
85
86 #include <net/if.h>
87 #include <net/if_dl.h>
88 #include <net/if_types.h>
89 #include <net/if_media.h>
90 #include <net/if_ether.h>
91 #include <net/bpf.h>
92
93 #ifdef INET
94 #include <netinet/in.h>
95 #include <netinet/if_inarp.h>
96 #include <netinet/in_systm.h>
97 #include <netinet/in_var.h>
98 #include <netinet/ip.h>
99 #endif
100
101 #include <sys/bus.h>
102 #include <sys/intr.h>
103 #include <machine/autoconf.h>
104
105 #include <dev/sbus/sbusvar.h>
106 #include <dev/sbus/qecreg.h>
107 #include <dev/sbus/qecvar.h>
108 #include <dev/sbus/qereg.h>
109
110 struct qe_softc {
111 device_t sc_dev;
112 bus_space_tag_t sc_bustag; /* bus & DMA tags */
113 bus_dma_tag_t sc_dmatag;
114 bus_dmamap_t sc_dmamap;
115 struct ethercom sc_ethercom;
116 struct ifmedia sc_ifmedia; /* interface media */
117
118 struct qec_softc *sc_qec; /* QEC parent */
119
120 bus_space_handle_t sc_qr; /* QEC registers */
121 bus_space_handle_t sc_mr; /* MACE registers */
122 bus_space_handle_t sc_cr; /* channel registers */
123
124 int sc_channel; /* channel number */
125 u_int sc_rev; /* board revision */
126
127 int sc_burst;
128
129 struct qec_ring sc_rb; /* Packet Ring Buffer */
130
131 /* MAC address */
132 uint8_t sc_enaddr[6];
133
134 #ifdef QEDEBUG
135 int sc_debug;
136 #endif
137 };
138
139 int qematch(device_t, cfdata_t, void *);
140 void qeattach(device_t, device_t, void *);
141
142 void qeinit(struct qe_softc *);
143 void qestart(struct ifnet *);
144 void qestop(struct qe_softc *);
145 void qewatchdog(struct ifnet *);
146 int qeioctl(struct ifnet *, u_long, void *);
147 void qereset(struct qe_softc *);
148
149 int qeintr(void *);
150 int qe_eint(struct qe_softc *, uint32_t);
151 int qe_rint(struct qe_softc *);
152 int qe_tint(struct qe_softc *);
153 void qe_mcreset(struct qe_softc *);
154
155 static int qe_put(struct qe_softc *, int, struct mbuf *);
156 static void qe_read(struct qe_softc *, int, int);
157 static struct mbuf *qe_get(struct qe_softc *, int, int);
158
159 /* ifmedia callbacks */
160 void qe_ifmedia_sts(struct ifnet *, struct ifmediareq *);
161 int qe_ifmedia_upd(struct ifnet *);
162
163 CFATTACH_DECL_NEW(qe, sizeof(struct qe_softc),
164 qematch, qeattach, NULL, NULL);
165
166 int
qematch(device_t parent,cfdata_t cf,void * aux)167 qematch(device_t parent, cfdata_t cf, void *aux)
168 {
169 struct sbus_attach_args *sa = aux;
170
171 return (strcmp(cf->cf_name, sa->sa_name) == 0);
172 }
173
174 void
qeattach(device_t parent,device_t self,void * aux)175 qeattach(device_t parent, device_t self, void *aux)
176 {
177 struct sbus_attach_args *sa = aux;
178 struct qec_softc *qec = device_private(parent);
179 struct qe_softc *sc = device_private(self);
180 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
181 int node = sa->sa_node;
182 bus_dma_tag_t dmatag = sa->sa_dmatag;
183 bus_dma_segment_t seg;
184 bus_size_t size;
185 int rseg, error;
186
187 sc->sc_dev = self;
188
189 if (sa->sa_nreg < 2) {
190 printf("%s: only %d register sets\n",
191 device_xname(self), sa->sa_nreg);
192 return;
193 }
194
195 if (bus_space_map(sa->sa_bustag,
196 (bus_addr_t)BUS_ADDR(
197 sa->sa_reg[0].oa_space,
198 sa->sa_reg[0].oa_base),
199 (bus_size_t)sa->sa_reg[0].oa_size,
200 0, &sc->sc_cr) != 0) {
201 aprint_error_dev(self, "cannot map registers\n");
202 return;
203 }
204
205 if (bus_space_map(sa->sa_bustag,
206 (bus_addr_t)BUS_ADDR(
207 sa->sa_reg[1].oa_space,
208 sa->sa_reg[1].oa_base),
209 (bus_size_t)sa->sa_reg[1].oa_size,
210 0, &sc->sc_mr) != 0) {
211 aprint_error_dev(self, "cannot map registers\n");
212 return;
213 }
214
215 sc->sc_rev = prom_getpropint(node, "mace-version", -1);
216 printf(" rev %x", sc->sc_rev);
217
218 sc->sc_bustag = sa->sa_bustag;
219 sc->sc_dmatag = sa->sa_dmatag;
220 sc->sc_qec = qec;
221 sc->sc_qr = qec->sc_regs;
222
223 sc->sc_channel = prom_getpropint(node, "channel#", -1);
224 sc->sc_burst = qec->sc_burst;
225
226 qestop(sc);
227
228 /* Note: no interrupt level passed */
229 (void)bus_intr_establish(sa->sa_bustag, 0, IPL_NET, qeintr, sc);
230 prom_getether(node, sc->sc_enaddr);
231
232 /*
233 * Allocate descriptor ring and buffers.
234 */
235
236 /* for now, allocate as many bufs as there are ring descriptors */
237 sc->sc_rb.rb_ntbuf = QEC_XD_RING_MAXSIZE;
238 sc->sc_rb.rb_nrbuf = QEC_XD_RING_MAXSIZE;
239
240 size = QEC_XD_RING_MAXSIZE * sizeof(struct qec_xd) +
241 QEC_XD_RING_MAXSIZE * sizeof(struct qec_xd) +
242 sc->sc_rb.rb_ntbuf * QE_PKT_BUF_SZ +
243 sc->sc_rb.rb_nrbuf * QE_PKT_BUF_SZ;
244
245 /* Get a DMA handle */
246 if ((error = bus_dmamap_create(dmatag, size, 1, size, 0,
247 BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) {
248 aprint_error_dev(self, "DMA map create error %d\n",
249 error);
250 return;
251 }
252
253 /* Allocate DMA buffer */
254 if ((error = bus_dmamem_alloc(dmatag, size, 0, 0,
255 &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
256 aprint_error_dev(self, "DMA buffer alloc error %d\n",
257 error);
258 return;
259 }
260
261 /* Map DMA buffer in CPU addressable space */
262 if ((error = bus_dmamem_map(dmatag, &seg, rseg, size,
263 &sc->sc_rb.rb_membase,
264 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
265 aprint_error_dev(self, "DMA buffer map error %d\n",
266 error);
267 bus_dmamem_free(dmatag, &seg, rseg);
268 return;
269 }
270
271 /* Load the buffer */
272 if ((error = bus_dmamap_load(dmatag, sc->sc_dmamap,
273 sc->sc_rb.rb_membase, size, NULL,
274 BUS_DMA_NOWAIT)) != 0) {
275 aprint_error_dev(self, "DMA buffer map load error %d\n",
276 error);
277 bus_dmamem_unmap(dmatag, sc->sc_rb.rb_membase, size);
278 bus_dmamem_free(dmatag, &seg, rseg);
279 return;
280 }
281 sc->sc_rb.rb_dmabase = sc->sc_dmamap->dm_segs[0].ds_addr;
282
283 /* Initialize media properties */
284 sc->sc_ethercom.ec_ifmedia = &sc->sc_ifmedia;
285 ifmedia_init(&sc->sc_ifmedia, 0, qe_ifmedia_upd, qe_ifmedia_sts);
286 ifmedia_add(&sc->sc_ifmedia,
287 IFM_MAKEWORD(IFM_ETHER, IFM_10_T, 0, 0),
288 0, NULL);
289 ifmedia_add(&sc->sc_ifmedia,
290 IFM_MAKEWORD(IFM_ETHER, IFM_10_5, 0, 0),
291 0, NULL);
292 ifmedia_add(&sc->sc_ifmedia,
293 IFM_MAKEWORD(IFM_ETHER, IFM_AUTO, 0, 0),
294 0, NULL);
295 ifmedia_set(&sc->sc_ifmedia, IFM_ETHER | IFM_AUTO);
296
297 memcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
298 ifp->if_softc = sc;
299 ifp->if_start = qestart;
300 ifp->if_ioctl = qeioctl;
301 ifp->if_watchdog = qewatchdog;
302 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
303 IFQ_SET_READY(&ifp->if_snd);
304
305 /* Attach the interface. */
306 if_attach(ifp);
307 ether_ifattach(ifp, sc->sc_enaddr);
308
309 printf(" address %s\n", ether_sprintf(sc->sc_enaddr));
310 }
311
312 /*
313 * Pull data off an interface.
314 * Len is the length of data, with local net header stripped.
315 * We copy the data into mbufs. When full cluster sized units are present,
316 * we copy into clusters.
317 */
318 static inline struct mbuf *
qe_get(struct qe_softc * sc,int idx,int totlen)319 qe_get(struct qe_softc *sc, int idx, int totlen)
320 {
321 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
322 struct mbuf *m;
323 struct mbuf *top, **mp;
324 int len, pad, boff = 0;
325 uint8_t *bp;
326
327 bp = sc->sc_rb.rb_rxbuf + (idx % sc->sc_rb.rb_nrbuf) * QE_PKT_BUF_SZ;
328
329 MGETHDR(m, M_DONTWAIT, MT_DATA);
330 if (m == NULL)
331 return (NULL);
332 m_set_rcvif(m, ifp);
333 m->m_pkthdr.len = totlen;
334 pad = ALIGN(sizeof(struct ether_header)) - sizeof(struct ether_header);
335 m->m_data += pad;
336 len = MHLEN - pad;
337 top = NULL;
338 mp = ⊤
339
340 while (totlen > 0) {
341 if (top) {
342 MGET(m, M_DONTWAIT, MT_DATA);
343 if (m == NULL) {
344 m_freem(top);
345 return (NULL);
346 }
347 len = MLEN;
348 }
349 if (top && totlen >= MINCLSIZE) {
350 MCLGET(m, M_DONTWAIT);
351 if (m->m_flags & M_EXT)
352 len = MCLBYTES;
353 }
354 m->m_len = len = uimin(totlen, len);
355 memcpy(mtod(m, void *), bp + boff, len);
356 boff += len;
357 totlen -= len;
358 *mp = m;
359 mp = &m->m_next;
360 }
361
362 return (top);
363 }
364
365 /*
366 * Routine to copy from mbuf chain to transmit buffer in
367 * network buffer memory.
368 */
369 inline int
qe_put(struct qe_softc * sc,int idx,struct mbuf * m)370 qe_put(struct qe_softc *sc, int idx, struct mbuf *m)
371 {
372 struct mbuf *n;
373 int len, tlen = 0, boff = 0;
374 uint8_t *bp;
375
376 bp = sc->sc_rb.rb_txbuf + (idx % sc->sc_rb.rb_ntbuf) * QE_PKT_BUF_SZ;
377
378 for (; m; m = n) {
379 len = m->m_len;
380 if (len == 0) {
381 n = m_free(m);
382 continue;
383 }
384 memcpy(bp + boff, mtod(m, void *), len);
385 boff += len;
386 tlen += len;
387 n = m_free(m);
388 }
389 return (tlen);
390 }
391
392 /*
393 * Pass a packet to the higher levels.
394 */
395 inline void
qe_read(struct qe_softc * sc,int idx,int len)396 qe_read(struct qe_softc *sc, int idx, int len)
397 {
398 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
399 struct mbuf *m;
400
401 if (len <= sizeof(struct ether_header) ||
402 len > ETHERMTU + sizeof(struct ether_header)) {
403
404 printf("%s: invalid packet size %d; dropping\n",
405 ifp->if_xname, len);
406
407 if_statinc(ifp, if_ierrors);
408 return;
409 }
410
411 /*
412 * Pull packet off interface.
413 */
414 m = qe_get(sc, idx, len);
415 if (m == NULL) {
416 if_statinc(ifp, if_ierrors);
417 return;
418 }
419
420 /* Pass the packet up. */
421 if_percpuq_enqueue(ifp->if_percpuq, m);
422 }
423
424 /*
425 * Start output on interface.
426 * We make an assumption here:
427 * 1) that the current priority is set to splnet _before_ this code
428 * is called *and* is returned to the appropriate priority after
429 * return
430 */
431 void
qestart(struct ifnet * ifp)432 qestart(struct ifnet *ifp)
433 {
434 struct qe_softc *sc = ifp->if_softc;
435 struct qec_xd *txd = sc->sc_rb.rb_txd;
436 struct mbuf *m;
437 unsigned int bix, len;
438 unsigned int ntbuf = sc->sc_rb.rb_ntbuf;
439
440 if ((ifp->if_flags & IFF_RUNNING) != IFF_RUNNING)
441 return;
442
443 bix = sc->sc_rb.rb_tdhead;
444
445 while (sc->sc_rb.rb_td_nbusy < ntbuf) {
446 IFQ_DEQUEUE(&ifp->if_snd, m);
447 if (m == 0)
448 break;
449
450 /*
451 * If BPF is listening on this interface, let it see the
452 * packet before we commit it to the wire.
453 */
454 bpf_mtap(ifp, m, BPF_D_OUT);
455
456 /*
457 * Copy the mbuf chain into the transmit buffer.
458 */
459 len = qe_put(sc, bix, m);
460
461 /*
462 * Initialize transmit registers and start transmission
463 */
464 txd[bix].xd_flags = QEC_XD_OWN | QEC_XD_SOP | QEC_XD_EOP |
465 (len & QEC_XD_LENGTH);
466 bus_space_write_4(sc->sc_bustag, sc->sc_cr, QE_CRI_CTRL,
467 QE_CR_CTRL_TWAKEUP);
468
469 if (++bix == QEC_XD_RING_MAXSIZE)
470 bix = 0;
471
472 sc->sc_rb.rb_td_nbusy++;
473 }
474
475 sc->sc_rb.rb_tdhead = bix;
476 }
477
478 void
qestop(struct qe_softc * sc)479 qestop(struct qe_softc *sc)
480 {
481 bus_space_tag_t t = sc->sc_bustag;
482 bus_space_handle_t mr = sc->sc_mr;
483 bus_space_handle_t cr = sc->sc_cr;
484 int n;
485
486 #if defined(SUN4U) || defined(__GNUC__)
487 (void)&t;
488 #endif
489 /* Stop the schwurst */
490 bus_space_write_1(t, mr, QE_MRI_BIUCC, QE_MR_BIUCC_SWRST);
491 for (n = 200; n > 0; n--) {
492 if ((bus_space_read_1(t, mr, QE_MRI_BIUCC) &
493 QE_MR_BIUCC_SWRST) == 0)
494 break;
495 DELAY(20);
496 }
497
498 /* then reset */
499 bus_space_write_4(t, cr, QE_CRI_CTRL, QE_CR_CTRL_RESET);
500 for (n = 200; n > 0; n--) {
501 if ((bus_space_read_4(t, cr, QE_CRI_CTRL) &
502 QE_CR_CTRL_RESET) == 0)
503 break;
504 DELAY(20);
505 }
506 }
507
508 /*
509 * Reset interface.
510 */
511 void
qereset(struct qe_softc * sc)512 qereset(struct qe_softc *sc)
513 {
514 int s;
515
516 s = splnet();
517 qestop(sc);
518 qeinit(sc);
519 splx(s);
520 }
521
522 void
qewatchdog(struct ifnet * ifp)523 qewatchdog(struct ifnet *ifp)
524 {
525 struct qe_softc *sc = ifp->if_softc;
526
527 log(LOG_ERR, "%s: device timeout\n", device_xname(sc->sc_dev));
528 if_statinc(ifp, if_oerrors);
529
530 qereset(sc);
531 }
532
533 /*
534 * Interrupt dispatch.
535 */
536 int
qeintr(void * arg)537 qeintr(void *arg)
538 {
539 struct qe_softc *sc = arg;
540 bus_space_tag_t t = sc->sc_bustag;
541 uint32_t qecstat, qestat;
542 int r = 0;
543
544 #if defined(SUN4U) || defined(__GNUC__)
545 (void)&t;
546 #endif
547 /* Read QEC status and channel status */
548 qecstat = bus_space_read_4(t, sc->sc_qr, QEC_QRI_STAT);
549 #ifdef QEDEBUG
550 if (sc->sc_debug) {
551 printf("qe%d: intr: qecstat=%x\n", sc->sc_channel, qecstat);
552 }
553 #endif
554
555 /* Filter out status for this channel */
556 qecstat = qecstat >> (4 * sc->sc_channel);
557 if ((qecstat & 0xf) == 0)
558 return (r);
559
560 qestat = bus_space_read_4(t, sc->sc_cr, QE_CRI_STAT);
561
562 #ifdef QEDEBUG
563 if (sc->sc_debug) {
564 char bits[64]; int i;
565 bus_space_tag_t t1 = sc->sc_bustag;
566 bus_space_handle_t mr = sc->sc_mr;
567
568 snprintb(bits, sizeof(bits), QE_CR_STAT_BITS, qestat);
569 printf("qe%d: intr: qestat=%s\n", sc->sc_channel, bits);
570
571 printf("MACE registers:\n");
572 for (i = 0 ; i < 32; i++) {
573 printf(" m[%d]=%x,", i, bus_space_read_1(t1, mr, i));
574 if (((i+1) & 7) == 0)
575 printf("\n");
576 }
577 }
578 #endif
579
580 if (qestat & QE_CR_STAT_ALLERRORS) {
581 #ifdef QEDEBUG
582 if (sc->sc_debug) {
583 char bits[64];
584 snprintb(bits, sizeof(bits), QE_CR_STAT_BITS, qestat);
585 printf("qe%d: eint: qestat=%s\n", sc->sc_channel, bits);
586 }
587 #endif
588 r |= qe_eint(sc, qestat);
589 if (r == -1)
590 return (1);
591 }
592
593 if (qestat & QE_CR_STAT_TXIRQ)
594 r |= qe_tint(sc);
595
596 if (qestat & QE_CR_STAT_RXIRQ)
597 r |= qe_rint(sc);
598
599 return (r);
600 }
601
602 /*
603 * Transmit interrupt.
604 */
605 int
qe_tint(struct qe_softc * sc)606 qe_tint(struct qe_softc *sc)
607 {
608 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
609 unsigned int bix, txflags;
610
611 bix = sc->sc_rb.rb_tdtail;
612
613 for (;;) {
614 if (sc->sc_rb.rb_td_nbusy <= 0)
615 break;
616
617 txflags = sc->sc_rb.rb_txd[bix].xd_flags;
618
619 if (txflags & QEC_XD_OWN)
620 break;
621
622 if_statinc(ifp, if_opackets);
623
624 if (++bix == QEC_XD_RING_MAXSIZE)
625 bix = 0;
626
627 --sc->sc_rb.rb_td_nbusy;
628 }
629
630 sc->sc_rb.rb_tdtail = bix;
631
632 qestart(ifp);
633
634 if (sc->sc_rb.rb_td_nbusy == 0)
635 ifp->if_timer = 0;
636
637 return (1);
638 }
639
640 /*
641 * Receive interrupt.
642 */
643 int
qe_rint(struct qe_softc * sc)644 qe_rint(struct qe_softc *sc)
645 {
646 struct qec_xd *xd = sc->sc_rb.rb_rxd;
647 unsigned int bix, len;
648 unsigned int nrbuf = sc->sc_rb.rb_nrbuf;
649 #ifdef QEDEBUG
650 int npackets = 0;
651 #endif
652
653 bix = sc->sc_rb.rb_rdtail;
654
655 /*
656 * Process all buffers with valid data.
657 */
658 for (;;) {
659 len = xd[bix].xd_flags;
660 if (len & QEC_XD_OWN)
661 break;
662
663 #ifdef QEDEBUG
664 npackets++;
665 #endif
666
667 len &= QEC_XD_LENGTH;
668 len -= 4;
669 qe_read(sc, bix, len);
670
671 /* ... */
672 xd[(bix+nrbuf) % QEC_XD_RING_MAXSIZE].xd_flags =
673 QEC_XD_OWN | (QE_PKT_BUF_SZ & QEC_XD_LENGTH);
674
675 if (++bix == QEC_XD_RING_MAXSIZE)
676 bix = 0;
677 }
678 #ifdef QEDEBUG
679 if (npackets == 0 && sc->sc_debug)
680 printf("%s: rint: no packets; rb index %d; status 0x%x\n",
681 device_xname(sc->sc_dev), bix, len);
682 #endif
683
684 sc->sc_rb.rb_rdtail = bix;
685
686 return (1);
687 }
688
689 /*
690 * Error interrupt.
691 */
692 int
qe_eint(struct qe_softc * sc,uint32_t why)693 qe_eint(struct qe_softc *sc, uint32_t why)
694 {
695 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
696 device_t self = sc->sc_dev;
697 const char *xname = device_xname(self);
698 int r = 0, rst = 0;
699
700 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
701
702 if (why & QE_CR_STAT_EDEFER) {
703 printf("%s: excessive tx defers.\n", xname);
704 r |= 1;
705 if_statinc_ref(nsr, if_oerrors);
706 }
707
708 if (why & QE_CR_STAT_CLOSS) {
709 printf("%s: no carrier, link down?\n", xname);
710 if_statinc_ref(nsr, if_oerrors);
711 r |= 1;
712 }
713
714 if (why & QE_CR_STAT_ERETRIES) {
715 printf("%s: excessive tx retries\n", xname);
716 if_statinc_ref(nsr, if_oerrors);
717 r |= 1;
718 rst = 1;
719 }
720
721
722 if (why & QE_CR_STAT_LCOLL) {
723 printf("%s: late tx transmission\n", xname);
724 if_statinc_ref(nsr, if_oerrors);
725 r |= 1;
726 rst = 1;
727 }
728
729 if (why & QE_CR_STAT_FUFLOW) {
730 printf("%s: tx fifo underflow\n", xname);
731 if_statinc_ref(nsr, if_oerrors);
732 r |= 1;
733 rst = 1;
734 }
735
736 if (why & QE_CR_STAT_JERROR) {
737 printf("%s: jabber seen\n", xname);
738 r |= 1;
739 }
740
741 if (why & QE_CR_STAT_BERROR) {
742 printf("%s: babble seen\n", xname);
743 r |= 1;
744 }
745
746 if (why & QE_CR_STAT_TCCOFLOW) {
747 if_statadd_ref(nsr, if_collisions, 256);
748 if_statadd_ref(nsr, if_oerrors, 256);
749 r |= 1;
750 }
751
752 if (why & QE_CR_STAT_TXDERROR) {
753 printf("%s: tx descriptor is bad\n", xname);
754 rst = 1;
755 r |= 1;
756 }
757
758 if (why & QE_CR_STAT_TXLERR) {
759 printf("%s: tx late error\n", xname);
760 if_statinc_ref(nsr, if_oerrors);
761 rst = 1;
762 r |= 1;
763 }
764
765 if (why & QE_CR_STAT_TXPERR) {
766 printf("%s: tx DMA parity error\n", xname);
767 if_statinc_ref(nsr, if_oerrors);
768 rst = 1;
769 r |= 1;
770 }
771
772 if (why & QE_CR_STAT_TXSERR) {
773 printf("%s: tx DMA sbus error ack\n", xname);
774 if_statinc_ref(nsr, if_oerrors);
775 rst = 1;
776 r |= 1;
777 }
778
779 if (why & QE_CR_STAT_RCCOFLOW) {
780 if_statadd_ref(nsr, if_collisions, 256);
781 if_statadd_ref(nsr, if_ierrors, 256);
782 r |= 1;
783 }
784
785 if (why & QE_CR_STAT_RUOFLOW) {
786 if_statadd_ref(nsr, if_ierrors, 256);
787 r |= 1;
788 }
789
790 if (why & QE_CR_STAT_MCOFLOW) {
791 if_statadd_ref(nsr, if_ierrors, 256);
792 r |= 1;
793 }
794
795 if (why & QE_CR_STAT_RXFOFLOW) {
796 printf("%s: rx fifo overflow\n", xname);
797 if_statinc_ref(nsr, if_ierrors);
798 r |= 1;
799 }
800
801 if (why & QE_CR_STAT_RLCOLL) {
802 printf("%s: rx late collision\n", xname);
803 if_statinc_ref(nsr, if_ierrors);
804 if_statinc_ref(nsr, if_collisions);
805 r |= 1;
806 }
807
808 if (why & QE_CR_STAT_FCOFLOW) {
809 if_statadd_ref(nsr, if_ierrors, 256);
810 r |= 1;
811 }
812
813 if (why & QE_CR_STAT_CECOFLOW) {
814 if_statadd_ref(nsr, if_ierrors, 256);
815 r |= 1;
816 }
817
818 if (why & QE_CR_STAT_RXDROP) {
819 printf("%s: rx packet dropped\n", xname);
820 if_statinc_ref(nsr, if_ierrors);
821 r |= 1;
822 }
823
824 if (why & QE_CR_STAT_RXSMALL) {
825 printf("%s: rx buffer too small\n", xname);
826 if_statinc_ref(nsr, if_ierrors);
827 r |= 1;
828 rst = 1;
829 }
830
831 if (why & QE_CR_STAT_RXLERR) {
832 printf("%s: rx late error\n", xname);
833 if_statinc_ref(nsr, if_ierrors);
834 r |= 1;
835 rst = 1;
836 }
837
838 if (why & QE_CR_STAT_RXPERR) {
839 printf("%s: rx DMA parity error\n", xname);
840 if_statinc_ref(nsr, if_ierrors);
841 r |= 1;
842 rst = 1;
843 }
844
845 if (why & QE_CR_STAT_RXSERR) {
846 printf("%s: rx DMA sbus error ack\n", xname);
847 if_statinc_ref(nsr, if_ierrors);
848 r |= 1;
849 rst = 1;
850 }
851
852 IF_STAT_PUTREF(ifp);
853
854 if (r == 0)
855 aprint_error_dev(self, "unexpected interrupt error: %08x\n",
856 why);
857
858 if (rst) {
859 printf("%s: resetting...\n", xname);
860 qereset(sc);
861 return (-1);
862 }
863
864 return (r);
865 }
866
867 int
qeioctl(struct ifnet * ifp,u_long cmd,void * data)868 qeioctl(struct ifnet *ifp, u_long cmd, void *data)
869 {
870 struct qe_softc *sc = ifp->if_softc;
871 struct ifaddr *ifa = data;
872 int s, error = 0;
873
874 s = splnet();
875
876 switch (cmd) {
877 case SIOCINITIFADDR:
878 ifp->if_flags |= IFF_UP;
879 qeinit(sc);
880 switch (ifa->ifa_addr->sa_family) {
881 #ifdef INET
882 case AF_INET:
883 arp_ifinit(ifp, ifa);
884 break;
885 #endif /* INET */
886 default:
887 break;
888 }
889 break;
890
891 case SIOCSIFFLAGS:
892 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
893 break;
894 /* XXX re-use ether_ioctl() */
895 switch (ifp->if_flags & (IFF_UP | IFF_RUNNING)) {
896 case IFF_RUNNING:
897 /*
898 * If interface is marked down and it is running, then
899 * stop it.
900 */
901 qestop(sc);
902 ifp->if_flags &= ~IFF_RUNNING;
903 break;
904 case IFF_UP:
905 /*
906 * If interface is marked up and it is stopped, then
907 * start it.
908 */
909 qeinit(sc);
910 break;
911 default:
912 /*
913 * Reset the interface to pick up changes in any other
914 * flags that affect hardware registers.
915 */
916 qestop(sc);
917 qeinit(sc);
918 break;
919 }
920 #ifdef QEDEBUG
921 sc->sc_debug = (ifp->if_flags & IFF_DEBUG) != 0 ? 1 : 0;
922 #endif
923 break;
924
925 case SIOCADDMULTI:
926 case SIOCDELMULTI:
927 if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) {
928 /*
929 * Multicast list has changed; set the hardware filter
930 * accordingly.
931 */
932 if (ifp->if_flags & IFF_RUNNING)
933 qe_mcreset(sc);
934 error = 0;
935 }
936 break;
937
938 default:
939 error = ether_ioctl(ifp, cmd, data);
940 break;
941 }
942
943 splx(s);
944 return (error);
945 }
946
947
948 void
qeinit(struct qe_softc * sc)949 qeinit(struct qe_softc *sc)
950 {
951 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
952 bus_space_tag_t t = sc->sc_bustag;
953 bus_space_handle_t cr = sc->sc_cr;
954 bus_space_handle_t mr = sc->sc_mr;
955 struct qec_softc *qec = sc->sc_qec;
956 uint32_t qecaddr;
957 uint8_t *ea;
958 int s;
959
960 #if defined(SUN4U) || defined(__GNUC__)
961 (void)&t;
962 #endif
963 s = splnet();
964
965 qestop(sc);
966
967 /*
968 * Allocate descriptor ring and buffers
969 */
970 qec_meminit(&sc->sc_rb, QE_PKT_BUF_SZ);
971
972 /* Channel registers: */
973 bus_space_write_4(t, cr, QE_CRI_RXDS, (uint32_t)sc->sc_rb.rb_rxddma);
974 bus_space_write_4(t, cr, QE_CRI_TXDS, (uint32_t)sc->sc_rb.rb_txddma);
975
976 bus_space_write_4(t, cr, QE_CRI_RIMASK, 0);
977 bus_space_write_4(t, cr, QE_CRI_TIMASK, 0);
978 bus_space_write_4(t, cr, QE_CRI_QMASK, 0);
979 bus_space_write_4(t, cr, QE_CRI_MMASK, QE_CR_MMASK_RXCOLL);
980 bus_space_write_4(t, cr, QE_CRI_CCNT, 0);
981 bus_space_write_4(t, cr, QE_CRI_PIPG, 0);
982
983 qecaddr = sc->sc_channel * qec->sc_msize;
984 bus_space_write_4(t, cr, QE_CRI_RXWBUF, qecaddr);
985 bus_space_write_4(t, cr, QE_CRI_RXRBUF, qecaddr);
986 bus_space_write_4(t, cr, QE_CRI_TXWBUF, qecaddr + qec->sc_rsize);
987 bus_space_write_4(t, cr, QE_CRI_TXRBUF, qecaddr + qec->sc_rsize);
988
989 /* MACE registers: */
990 bus_space_write_1(t, mr, QE_MRI_PHYCC, QE_MR_PHYCC_ASEL);
991 bus_space_write_1(t, mr, QE_MRI_XMTFC, QE_MR_XMTFC_APADXMT);
992 bus_space_write_1(t, mr, QE_MRI_RCVFC, 0);
993
994 /*
995 * Mask MACE's receive interrupt, since we're being notified
996 * by the QEC after DMA completes.
997 */
998 bus_space_write_1(t, mr, QE_MRI_IMR,
999 QE_MR_IMR_CERRM | QE_MR_IMR_RCVINTM);
1000
1001 bus_space_write_1(t, mr, QE_MRI_BIUCC,
1002 QE_MR_BIUCC_BSWAP | QE_MR_BIUCC_64TS);
1003
1004 bus_space_write_1(t, mr, QE_MRI_FIFOFC,
1005 QE_MR_FIFOCC_TXF16 | QE_MR_FIFOCC_RXF32 |
1006 QE_MR_FIFOCC_RFWU | QE_MR_FIFOCC_TFWU);
1007
1008 bus_space_write_1(t, mr, QE_MRI_PLSCC, QE_MR_PLSCC_TP);
1009
1010 /*
1011 * Station address
1012 */
1013 ea = sc->sc_enaddr;
1014 bus_space_write_1(t, mr, QE_MRI_IAC,
1015 QE_MR_IAC_ADDRCHG | QE_MR_IAC_PHYADDR);
1016 bus_space_write_multi_1(t, mr, QE_MRI_PADR, ea, 6);
1017
1018 /* Apply media settings */
1019 qe_ifmedia_upd(ifp);
1020
1021 /*
1022 * Clear Logical address filter
1023 */
1024 bus_space_write_1(t, mr, QE_MRI_IAC,
1025 QE_MR_IAC_ADDRCHG | QE_MR_IAC_LOGADDR);
1026 bus_space_set_multi_1(t, mr, QE_MRI_LADRF, 0, 8);
1027 bus_space_write_1(t, mr, QE_MRI_IAC, 0);
1028
1029 /* Clear missed packet count (register cleared on read) */
1030 (void)bus_space_read_1(t, mr, QE_MRI_MPC);
1031
1032 #if 0
1033 /* test register: */
1034 bus_space_write_1(t, mr, QE_MRI_UTR, 0);
1035 #endif
1036
1037 /* Reset multicast filter */
1038 qe_mcreset(sc);
1039
1040 ifp->if_flags |= IFF_RUNNING;
1041 splx(s);
1042 }
1043
1044 /*
1045 * Reset multicast filter.
1046 */
1047 void
qe_mcreset(struct qe_softc * sc)1048 qe_mcreset(struct qe_softc *sc)
1049 {
1050 struct ethercom *ec = &sc->sc_ethercom;
1051 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1052 bus_space_tag_t t = sc->sc_bustag;
1053 bus_space_handle_t mr = sc->sc_mr;
1054 struct ether_multi *enm;
1055 struct ether_multistep step;
1056 uint32_t crc;
1057 uint16_t hash[4];
1058 uint8_t octet, maccc, *ladrp = (uint8_t *)&hash[0];
1059 int i;
1060
1061 #if defined(SUN4U) || defined(__GNUC__)
1062 (void)&t;
1063 #endif
1064
1065 /* We also enable transmitter & receiver here */
1066 maccc = QE_MR_MACCC_ENXMT | QE_MR_MACCC_ENRCV;
1067
1068 if (ifp->if_flags & IFF_PROMISC) {
1069 maccc |= QE_MR_MACCC_PROM;
1070 bus_space_write_1(t, mr, QE_MRI_MACCC, maccc);
1071 return;
1072 }
1073
1074 if (ifp->if_flags & IFF_ALLMULTI) {
1075 bus_space_write_1(t, mr, QE_MRI_IAC,
1076 QE_MR_IAC_ADDRCHG | QE_MR_IAC_LOGADDR);
1077 bus_space_set_multi_1(t, mr, QE_MRI_LADRF, 0xff, 8);
1078 bus_space_write_1(t, mr, QE_MRI_IAC, 0);
1079 bus_space_write_1(t, mr, QE_MRI_MACCC, maccc);
1080 return;
1081 }
1082
1083 hash[3] = hash[2] = hash[1] = hash[0] = 0;
1084
1085 ETHER_LOCK(ec);
1086 ETHER_FIRST_MULTI(step, ec, enm);
1087 while (enm != NULL) {
1088 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1089 ETHER_ADDR_LEN) != 0) {
1090 /*
1091 * We must listen to a range of multicast
1092 * addresses. For now, just accept all
1093 * multicasts, rather than trying to set only
1094 * those filter bits needed to match the range.
1095 * (At this time, the only use of address
1096 * ranges is for IP multicast routing, for
1097 * which the range is big enough to require
1098 * all bits set.)
1099 */
1100 bus_space_write_1(t, mr, QE_MRI_IAC,
1101 QE_MR_IAC_ADDRCHG | QE_MR_IAC_LOGADDR);
1102 bus_space_set_multi_1(t, mr, QE_MRI_LADRF, 0xff, 8);
1103 bus_space_write_1(t, mr, QE_MRI_IAC, 0);
1104 ifp->if_flags |= IFF_ALLMULTI;
1105 break;
1106 }
1107
1108 crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
1109 crc >>= 26;
1110 hash[crc >> 4] |= 1 << (crc & 0xf);
1111 ETHER_NEXT_MULTI(step, enm);
1112 }
1113 ETHER_UNLOCK(ec);
1114
1115 /* We need to byte-swap the hash before writing to the chip. */
1116 for (i = 0; i < 7; i += 2) {
1117 octet = ladrp[i];
1118 ladrp[i] = ladrp[i + 1];
1119 ladrp[i + 1] = octet;
1120 }
1121 bus_space_write_1(t, mr, QE_MRI_IAC,
1122 QE_MR_IAC_ADDRCHG | QE_MR_IAC_LOGADDR);
1123 bus_space_write_multi_1(t, mr, QE_MRI_LADRF, ladrp, 8);
1124 bus_space_write_1(t, mr, QE_MRI_IAC, 0);
1125 bus_space_write_1(t, mr, QE_MRI_MACCC, maccc);
1126 }
1127
1128 /*
1129 * Get current media settings.
1130 */
1131 void
qe_ifmedia_sts(struct ifnet * ifp,struct ifmediareq * ifmr)1132 qe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1133 {
1134 struct qe_softc *sc = ifp->if_softc;
1135 bus_space_tag_t t = sc->sc_bustag;
1136 bus_space_handle_t mr = sc->sc_mr;
1137 uint8_t v;
1138
1139 #if defined(SUN4U) || defined(__GNUC__)
1140 (void)&t;
1141 #endif
1142 v = bus_space_read_1(t, mr, QE_MRI_PLSCC);
1143
1144 switch (bus_space_read_1(t, mr, QE_MRI_PLSCC) & QE_MR_PLSCC_PORTMASK) {
1145 case QE_MR_PLSCC_TP:
1146 ifmr->ifm_active = IFM_ETHER | IFM_10_T;
1147 break;
1148 case QE_MR_PLSCC_AUI:
1149 ifmr->ifm_active = IFM_ETHER | IFM_10_5;
1150 break;
1151 case QE_MR_PLSCC_GPSI:
1152 case QE_MR_PLSCC_DAI:
1153 /* ... */
1154 break;
1155 }
1156
1157 v = bus_space_read_1(t, mr, QE_MRI_PHYCC);
1158 ifmr->ifm_status |= IFM_AVALID;
1159 if ((v & QE_MR_PHYCC_LNKFL) != 0)
1160 ifmr->ifm_status &= ~IFM_ACTIVE;
1161 else
1162 ifmr->ifm_status |= IFM_ACTIVE;
1163
1164 }
1165
1166 /*
1167 * Set media options.
1168 */
1169 int
qe_ifmedia_upd(struct ifnet * ifp)1170 qe_ifmedia_upd(struct ifnet *ifp)
1171 {
1172 struct qe_softc *sc = ifp->if_softc;
1173 struct ifmedia *ifm = &sc->sc_ifmedia;
1174 bus_space_tag_t t = sc->sc_bustag;
1175 bus_space_handle_t mr = sc->sc_mr;
1176 int newmedia = ifm->ifm_media;
1177 uint8_t plscc, phycc;
1178
1179 #if defined(SUN4U) || defined(__GNUC__)
1180 (void)&t;
1181 #endif
1182 if (IFM_TYPE(newmedia) != IFM_ETHER)
1183 return (EINVAL);
1184
1185 plscc = bus_space_read_1(t, mr, QE_MRI_PLSCC) & ~QE_MR_PLSCC_PORTMASK;
1186 phycc = bus_space_read_1(t, mr, QE_MRI_PHYCC) & ~QE_MR_PHYCC_ASEL;
1187
1188 if (IFM_SUBTYPE(newmedia) == IFM_AUTO)
1189 phycc |= QE_MR_PHYCC_ASEL;
1190 else if (IFM_SUBTYPE(newmedia) == IFM_10_T)
1191 plscc |= QE_MR_PLSCC_TP;
1192 else if (IFM_SUBTYPE(newmedia) == IFM_10_5)
1193 plscc |= QE_MR_PLSCC_AUI;
1194
1195 bus_space_write_1(t, mr, QE_MRI_PLSCC, plscc);
1196 bus_space_write_1(t, mr, QE_MRI_PHYCC, phycc);
1197
1198 return (0);
1199 }
1200