1 /* $NetBSD: qe.c,v 1.28 2002/10/02 16:52:42 thorpej Exp $ */ 2 3 /*- 4 * Copyright (c) 1999 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Paul Kranenburg. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 /* 40 * Copyright (c) 1998 Jason L. Wright. 41 * All rights reserved. 42 * 43 * Redistribution and use in source and binary forms, with or without 44 * modification, are permitted provided that the following conditions 45 * are met: 46 * 1. Redistributions of source code must retain the above copyright 47 * notice, this list of conditions and the following disclaimer. 48 * 2. Redistributions in binary form must reproduce the above copyright 49 * notice, this list of conditions and the following disclaimer in the 50 * documentation and/or other materials provided with the distribution. 51 * 3. The name of the authors may not be used to endorse or promote products 52 * derived from this software without specific prior written permission. 53 * 54 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 55 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 56 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 57 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, 58 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 59 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 60 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 61 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 62 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 63 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 64 */ 65 66 /* 67 * Driver for the SBus qec+qe QuadEthernet board. 68 * 69 * This driver was written using the AMD MACE Am79C940 documentation, some 70 * ideas gleaned from the S/Linux driver for this card, Solaris header files, 71 * and a loan of a card from Paul Southworth of the Internet Engineering 72 * Group (www.ieng.com). 73 */ 74 75 #include <sys/cdefs.h> 76 __KERNEL_RCSID(0, "$NetBSD: qe.c,v 1.28 2002/10/02 16:52:42 thorpej Exp $"); 77 78 #define QEDEBUG 79 80 #include "opt_ddb.h" 81 #include "opt_inet.h" 82 #include "opt_ccitt.h" 83 #include "opt_llc.h" 84 #include "opt_ns.h" 85 #include "bpfilter.h" 86 #include "rnd.h" 87 88 #include <sys/param.h> 89 #include <sys/systm.h> 90 #include <sys/kernel.h> 91 #include <sys/errno.h> 92 #include <sys/ioctl.h> 93 #include <sys/mbuf.h> 94 #include <sys/socket.h> 95 #include <sys/syslog.h> 96 #include <sys/device.h> 97 #include <sys/malloc.h> 98 #if NRND > 0 99 #include <sys/rnd.h> 100 #endif 101 102 #include <net/if.h> 103 #include <net/if_dl.h> 104 #include <net/if_types.h> 105 #include <net/netisr.h> 106 #include <net/if_media.h> 107 #include <net/if_ether.h> 108 109 #ifdef INET 110 #include <netinet/in.h> 111 #include <netinet/if_inarp.h> 112 #include <netinet/in_systm.h> 113 #include <netinet/in_var.h> 114 #include <netinet/ip.h> 115 #endif 116 117 #ifdef NS 118 #include <netns/ns.h> 119 #include <netns/ns_if.h> 120 #endif 121 122 #if NBPFILTER > 0 123 #include <net/bpf.h> 124 #include <net/bpfdesc.h> 125 #endif 126 127 #include <machine/bus.h> 128 #include <machine/intr.h> 129 #include <machine/autoconf.h> 130 131 #include <dev/sbus/sbusvar.h> 132 #include <dev/sbus/qecreg.h> 133 #include <dev/sbus/qecvar.h> 134 #include <dev/sbus/qereg.h> 135 136 struct qe_softc { 137 struct device sc_dev; /* base device */ 138 struct sbusdev sc_sd; /* sbus device */ 139 bus_space_tag_t sc_bustag; /* bus & dma tags */ 140 bus_dma_tag_t sc_dmatag; 141 bus_dmamap_t sc_dmamap; 142 struct ethercom sc_ethercom; 143 struct ifmedia sc_ifmedia; /* interface media */ 144 145 struct qec_softc *sc_qec; /* QEC parent */ 146 147 bus_space_handle_t sc_qr; /* QEC registers */ 148 bus_space_handle_t sc_mr; /* MACE registers */ 149 bus_space_handle_t sc_cr; /* channel registers */ 150 151 int sc_channel; /* channel number */ 152 u_int sc_rev; /* board revision */ 153 154 int sc_burst; 155 156 struct qec_ring sc_rb; /* Packet Ring Buffer */ 157 158 /* MAC address */ 159 u_int8_t sc_enaddr[6]; 160 161 #ifdef QEDEBUG 162 int sc_debug; 163 #endif 164 }; 165 166 int qematch __P((struct device *, struct cfdata *, void *)); 167 void qeattach __P((struct device *, struct device *, void *)); 168 169 void qeinit __P((struct qe_softc *)); 170 void qestart __P((struct ifnet *)); 171 void qestop __P((struct qe_softc *)); 172 void qewatchdog __P((struct ifnet *)); 173 int qeioctl __P((struct ifnet *, u_long, caddr_t)); 174 void qereset __P((struct qe_softc *)); 175 176 int qeintr __P((void *)); 177 int qe_eint __P((struct qe_softc *, u_int32_t)); 178 int qe_rint __P((struct qe_softc *)); 179 int qe_tint __P((struct qe_softc *)); 180 void qe_mcreset __P((struct qe_softc *)); 181 182 static int qe_put __P((struct qe_softc *, int, struct mbuf *)); 183 static void qe_read __P((struct qe_softc *, int, int)); 184 static struct mbuf *qe_get __P((struct qe_softc *, int, int)); 185 186 /* ifmedia callbacks */ 187 void qe_ifmedia_sts __P((struct ifnet *, struct ifmediareq *)); 188 int qe_ifmedia_upd __P((struct ifnet *)); 189 190 CFATTACH_DECL(qe, sizeof(struct qe_softc), 191 qematch, qeattach, NULL, NULL); 192 193 int 194 qematch(parent, cf, aux) 195 struct device *parent; 196 struct cfdata *cf; 197 void *aux; 198 { 199 struct sbus_attach_args *sa = aux; 200 201 return (strcmp(cf->cf_name, sa->sa_name) == 0); 202 } 203 204 void 205 qeattach(parent, self, aux) 206 struct device *parent, *self; 207 void *aux; 208 { 209 struct sbus_attach_args *sa = aux; 210 struct qec_softc *qec = (struct qec_softc *)parent; 211 struct qe_softc *sc = (struct qe_softc *)self; 212 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 213 int node = sa->sa_node; 214 bus_dma_tag_t dmatag = sa->sa_dmatag; 215 bus_dma_segment_t seg; 216 bus_size_t size; 217 int rseg, error; 218 extern void myetheraddr __P((u_char *)); 219 220 if (sa->sa_nreg < 2) { 221 printf("%s: only %d register sets\n", 222 self->dv_xname, sa->sa_nreg); 223 return; 224 } 225 226 if (bus_space_map(sa->sa_bustag, 227 (bus_addr_t)BUS_ADDR( 228 sa->sa_reg[0].oa_space, 229 sa->sa_reg[0].oa_base), 230 (bus_size_t)sa->sa_reg[0].oa_size, 231 0, &sc->sc_cr) != 0) { 232 printf("%s: cannot map registers\n", self->dv_xname); 233 return; 234 } 235 236 if (bus_space_map(sa->sa_bustag, 237 (bus_addr_t)BUS_ADDR( 238 sa->sa_reg[1].oa_space, 239 sa->sa_reg[1].oa_base), 240 (bus_size_t)sa->sa_reg[1].oa_size, 241 0, &sc->sc_mr) != 0) { 242 printf("%s: cannot map registers\n", self->dv_xname); 243 return; 244 } 245 246 sc->sc_rev = PROM_getpropint(node, "mace-version", -1); 247 printf(" rev %x", sc->sc_rev); 248 249 sc->sc_bustag = sa->sa_bustag; 250 sc->sc_dmatag = sa->sa_dmatag; 251 sc->sc_qec = qec; 252 sc->sc_qr = qec->sc_regs; 253 254 sc->sc_channel = PROM_getpropint(node, "channel#", -1); 255 sc->sc_burst = qec->sc_burst; 256 257 qestop(sc); 258 259 /* Note: no interrupt level passed */ 260 (void)bus_intr_establish(sa->sa_bustag, 0, IPL_NET, 0, qeintr, sc); 261 myetheraddr(sc->sc_enaddr); 262 263 /* 264 * Allocate descriptor ring and buffers. 265 */ 266 267 /* for now, allocate as many bufs as there are ring descriptors */ 268 sc->sc_rb.rb_ntbuf = QEC_XD_RING_MAXSIZE; 269 sc->sc_rb.rb_nrbuf = QEC_XD_RING_MAXSIZE; 270 271 size = QEC_XD_RING_MAXSIZE * sizeof(struct qec_xd) + 272 QEC_XD_RING_MAXSIZE * sizeof(struct qec_xd) + 273 sc->sc_rb.rb_ntbuf * QE_PKT_BUF_SZ + 274 sc->sc_rb.rb_nrbuf * QE_PKT_BUF_SZ; 275 276 /* Get a DMA handle */ 277 if ((error = bus_dmamap_create(dmatag, size, 1, size, 0, 278 BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) { 279 printf("%s: DMA map create error %d\n", self->dv_xname, error); 280 return; 281 } 282 283 /* Allocate DMA buffer */ 284 if ((error = bus_dmamem_alloc(dmatag, size, 0, 0, 285 &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) { 286 printf("%s: DMA buffer alloc error %d\n", 287 self->dv_xname, error); 288 return; 289 } 290 291 /* Map DMA buffer in CPU addressable space */ 292 if ((error = bus_dmamem_map(dmatag, &seg, rseg, size, 293 &sc->sc_rb.rb_membase, 294 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { 295 printf("%s: DMA buffer map error %d\n", 296 self->dv_xname, error); 297 bus_dmamem_free(dmatag, &seg, rseg); 298 return; 299 } 300 301 /* Load the buffer */ 302 if ((error = bus_dmamap_load(dmatag, sc->sc_dmamap, 303 sc->sc_rb.rb_membase, size, NULL, 304 BUS_DMA_NOWAIT)) != 0) { 305 printf("%s: DMA buffer map load error %d\n", 306 self->dv_xname, error); 307 bus_dmamem_unmap(dmatag, sc->sc_rb.rb_membase, size); 308 bus_dmamem_free(dmatag, &seg, rseg); 309 return; 310 } 311 sc->sc_rb.rb_dmabase = sc->sc_dmamap->dm_segs[0].ds_addr; 312 313 /* Initialize media properties */ 314 ifmedia_init(&sc->sc_ifmedia, 0, qe_ifmedia_upd, qe_ifmedia_sts); 315 ifmedia_add(&sc->sc_ifmedia, 316 IFM_MAKEWORD(IFM_ETHER,IFM_10_T,0,0), 317 0, NULL); 318 ifmedia_add(&sc->sc_ifmedia, 319 IFM_MAKEWORD(IFM_ETHER,IFM_10_5,0,0), 320 0, NULL); 321 ifmedia_add(&sc->sc_ifmedia, 322 IFM_MAKEWORD(IFM_ETHER,IFM_AUTO,0,0), 323 0, NULL); 324 ifmedia_set(&sc->sc_ifmedia, IFM_ETHER|IFM_AUTO); 325 326 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 327 ifp->if_softc = sc; 328 ifp->if_start = qestart; 329 ifp->if_ioctl = qeioctl; 330 ifp->if_watchdog = qewatchdog; 331 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | 332 IFF_MULTICAST; 333 IFQ_SET_READY(&ifp->if_snd); 334 335 /* Attach the interface. */ 336 if_attach(ifp); 337 ether_ifattach(ifp, sc->sc_enaddr); 338 339 printf(" address %s\n", ether_sprintf(sc->sc_enaddr)); 340 } 341 342 /* 343 * Pull data off an interface. 344 * Len is the length of data, with local net header stripped. 345 * We copy the data into mbufs. When full cluster sized units are present, 346 * we copy into clusters. 347 */ 348 static __inline__ struct mbuf * 349 qe_get(sc, idx, totlen) 350 struct qe_softc *sc; 351 int idx, totlen; 352 { 353 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 354 struct mbuf *m; 355 struct mbuf *top, **mp; 356 int len, pad, boff = 0; 357 caddr_t bp; 358 359 bp = sc->sc_rb.rb_rxbuf + (idx % sc->sc_rb.rb_nrbuf) * QE_PKT_BUF_SZ; 360 361 MGETHDR(m, M_DONTWAIT, MT_DATA); 362 if (m == NULL) 363 return (NULL); 364 m->m_pkthdr.rcvif = ifp; 365 m->m_pkthdr.len = totlen; 366 pad = ALIGN(sizeof(struct ether_header)) - sizeof(struct ether_header); 367 m->m_data += pad; 368 len = MHLEN - pad; 369 top = NULL; 370 mp = ⊤ 371 372 while (totlen > 0) { 373 if (top) { 374 MGET(m, M_DONTWAIT, MT_DATA); 375 if (m == NULL) { 376 m_freem(top); 377 return (NULL); 378 } 379 len = MLEN; 380 } 381 if (top && totlen >= MINCLSIZE) { 382 MCLGET(m, M_DONTWAIT); 383 if (m->m_flags & M_EXT) 384 len = MCLBYTES; 385 } 386 m->m_len = len = min(totlen, len); 387 bcopy(bp + boff, mtod(m, caddr_t), len); 388 boff += len; 389 totlen -= len; 390 *mp = m; 391 mp = &m->m_next; 392 } 393 394 return (top); 395 } 396 397 /* 398 * Routine to copy from mbuf chain to transmit buffer in 399 * network buffer memory. 400 */ 401 __inline__ int 402 qe_put(sc, idx, m) 403 struct qe_softc *sc; 404 int idx; 405 struct mbuf *m; 406 { 407 struct mbuf *n; 408 int len, tlen = 0, boff = 0; 409 caddr_t bp; 410 411 bp = sc->sc_rb.rb_txbuf + (idx % sc->sc_rb.rb_ntbuf) * QE_PKT_BUF_SZ; 412 413 for (; m; m = n) { 414 len = m->m_len; 415 if (len == 0) { 416 MFREE(m, n); 417 continue; 418 } 419 bcopy(mtod(m, caddr_t), bp+boff, len); 420 boff += len; 421 tlen += len; 422 MFREE(m, n); 423 } 424 return (tlen); 425 } 426 427 /* 428 * Pass a packet to the higher levels. 429 */ 430 __inline__ void 431 qe_read(sc, idx, len) 432 struct qe_softc *sc; 433 int idx, len; 434 { 435 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 436 struct mbuf *m; 437 438 if (len <= sizeof(struct ether_header) || 439 len > ETHERMTU + sizeof(struct ether_header)) { 440 441 printf("%s: invalid packet size %d; dropping\n", 442 ifp->if_xname, len); 443 444 ifp->if_ierrors++; 445 return; 446 } 447 448 /* 449 * Pull packet off interface. 450 */ 451 m = qe_get(sc, idx, len); 452 if (m == NULL) { 453 ifp->if_ierrors++; 454 return; 455 } 456 ifp->if_ipackets++; 457 458 #if NBPFILTER > 0 459 /* 460 * Check if there's a BPF listener on this interface. 461 * If so, hand off the raw packet to BPF. 462 */ 463 if (ifp->if_bpf) 464 bpf_mtap(ifp->if_bpf, m); 465 #endif 466 /* Pass the packet up. */ 467 (*ifp->if_input)(ifp, m); 468 } 469 470 /* 471 * Start output on interface. 472 * We make two assumptions here: 473 * 1) that the current priority is set to splnet _before_ this code 474 * is called *and* is returned to the appropriate priority after 475 * return 476 * 2) that the IFF_OACTIVE flag is checked before this code is called 477 * (i.e. that the output part of the interface is idle) 478 */ 479 void 480 qestart(ifp) 481 struct ifnet *ifp; 482 { 483 struct qe_softc *sc = (struct qe_softc *)ifp->if_softc; 484 struct qec_xd *txd = sc->sc_rb.rb_txd; 485 struct mbuf *m; 486 unsigned int bix, len; 487 unsigned int ntbuf = sc->sc_rb.rb_ntbuf; 488 489 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 490 return; 491 492 bix = sc->sc_rb.rb_tdhead; 493 494 for (;;) { 495 IFQ_DEQUEUE(&ifp->if_snd, m); 496 if (m == 0) 497 break; 498 499 #if NBPFILTER > 0 500 /* 501 * If BPF is listening on this interface, let it see the 502 * packet before we commit it to the wire. 503 */ 504 if (ifp->if_bpf) 505 bpf_mtap(ifp->if_bpf, m); 506 #endif 507 508 /* 509 * Copy the mbuf chain into the transmit buffer. 510 */ 511 len = qe_put(sc, bix, m); 512 513 /* 514 * Initialize transmit registers and start transmission 515 */ 516 txd[bix].xd_flags = QEC_XD_OWN | QEC_XD_SOP | QEC_XD_EOP | 517 (len & QEC_XD_LENGTH); 518 bus_space_write_4(sc->sc_bustag, sc->sc_cr, QE_CRI_CTRL, 519 QE_CR_CTRL_TWAKEUP); 520 521 if (++bix == QEC_XD_RING_MAXSIZE) 522 bix = 0; 523 524 if (++sc->sc_rb.rb_td_nbusy == ntbuf) { 525 ifp->if_flags |= IFF_OACTIVE; 526 break; 527 } 528 } 529 530 sc->sc_rb.rb_tdhead = bix; 531 } 532 533 void 534 qestop(sc) 535 struct qe_softc *sc; 536 { 537 bus_space_tag_t t = sc->sc_bustag; 538 bus_space_handle_t mr = sc->sc_mr; 539 bus_space_handle_t cr = sc->sc_cr; 540 int n; 541 542 #if defined(SUN4U) || defined(__GNUC__) 543 (void)&t; 544 #endif 545 /* Stop the schwurst */ 546 bus_space_write_1(t, mr, QE_MRI_BIUCC, QE_MR_BIUCC_SWRST); 547 for (n = 200; n > 0; n--) { 548 if ((bus_space_read_1(t, mr, QE_MRI_BIUCC) & 549 QE_MR_BIUCC_SWRST) == 0) 550 break; 551 DELAY(20); 552 } 553 554 /* then reset */ 555 bus_space_write_4(t, cr, QE_CRI_CTRL, QE_CR_CTRL_RESET); 556 for (n = 200; n > 0; n--) { 557 if ((bus_space_read_4(t, cr, QE_CRI_CTRL) & 558 QE_CR_CTRL_RESET) == 0) 559 break; 560 DELAY(20); 561 } 562 } 563 564 /* 565 * Reset interface. 566 */ 567 void 568 qereset(sc) 569 struct qe_softc *sc; 570 { 571 int s; 572 573 s = splnet(); 574 qestop(sc); 575 qeinit(sc); 576 splx(s); 577 } 578 579 void 580 qewatchdog(ifp) 581 struct ifnet *ifp; 582 { 583 struct qe_softc *sc = ifp->if_softc; 584 585 log(LOG_ERR, "%s: device timeout\n", sc->sc_dev.dv_xname); 586 ifp->if_oerrors++; 587 588 qereset(sc); 589 } 590 591 /* 592 * Interrupt dispatch. 593 */ 594 int 595 qeintr(arg) 596 void *arg; 597 { 598 struct qe_softc *sc = (struct qe_softc *)arg; 599 bus_space_tag_t t = sc->sc_bustag; 600 u_int32_t qecstat, qestat; 601 int r = 0; 602 603 #if defined(SUN4U) || defined(__GNUC__) 604 (void)&t; 605 #endif 606 /* Read QEC status and channel status */ 607 qecstat = bus_space_read_4(t, sc->sc_qr, QEC_QRI_STAT); 608 #ifdef QEDEBUG 609 if (sc->sc_debug) { 610 printf("qe%d: intr: qecstat=%x\n", sc->sc_channel, qecstat); 611 } 612 #endif 613 614 /* Filter out status for this channel */ 615 qecstat = qecstat >> (4 * sc->sc_channel); 616 if ((qecstat & 0xf) == 0) 617 return (r); 618 619 qestat = bus_space_read_4(t, sc->sc_cr, QE_CRI_STAT); 620 621 #ifdef QEDEBUG 622 if (sc->sc_debug) { 623 char bits[64]; int i; 624 bus_space_tag_t t = sc->sc_bustag; 625 bus_space_handle_t mr = sc->sc_mr; 626 627 printf("qe%d: intr: qestat=%s\n", sc->sc_channel, 628 bitmask_snprintf(qestat, QE_CR_STAT_BITS, bits, sizeof(bits))); 629 630 printf("MACE registers:\n"); 631 for (i = 0 ; i < 32; i++) { 632 printf(" m[%d]=%x,", i, bus_space_read_1(t, mr, i)); 633 if (((i+1) & 7) == 0) 634 printf("\n"); 635 } 636 } 637 #endif 638 639 if (qestat & QE_CR_STAT_ALLERRORS) { 640 #ifdef QEDEBUG 641 if (sc->sc_debug) { 642 char bits[64]; 643 printf("qe%d: eint: qestat=%s\n", sc->sc_channel, 644 bitmask_snprintf(qestat, QE_CR_STAT_BITS, bits, 645 sizeof(bits))); 646 } 647 #endif 648 r |= qe_eint(sc, qestat); 649 if (r == -1) 650 return (1); 651 } 652 653 if (qestat & QE_CR_STAT_TXIRQ) 654 r |= qe_tint(sc); 655 656 if (qestat & QE_CR_STAT_RXIRQ) 657 r |= qe_rint(sc); 658 659 return (r); 660 } 661 662 /* 663 * Transmit interrupt. 664 */ 665 int 666 qe_tint(sc) 667 struct qe_softc *sc; 668 { 669 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 670 unsigned int bix, txflags; 671 672 bix = sc->sc_rb.rb_tdtail; 673 674 for (;;) { 675 if (sc->sc_rb.rb_td_nbusy <= 0) 676 break; 677 678 txflags = sc->sc_rb.rb_txd[bix].xd_flags; 679 680 if (txflags & QEC_XD_OWN) 681 break; 682 683 ifp->if_flags &= ~IFF_OACTIVE; 684 ifp->if_opackets++; 685 686 if (++bix == QEC_XD_RING_MAXSIZE) 687 bix = 0; 688 689 --sc->sc_rb.rb_td_nbusy; 690 } 691 692 sc->sc_rb.rb_tdtail = bix; 693 694 qestart(ifp); 695 696 if (sc->sc_rb.rb_td_nbusy == 0) 697 ifp->if_timer = 0; 698 699 return (1); 700 } 701 702 /* 703 * Receive interrupt. 704 */ 705 int 706 qe_rint(sc) 707 struct qe_softc *sc; 708 { 709 struct qec_xd *xd = sc->sc_rb.rb_rxd; 710 unsigned int bix, len; 711 unsigned int nrbuf = sc->sc_rb.rb_nrbuf; 712 #ifdef QEDEBUG 713 int npackets = 0; 714 #endif 715 716 bix = sc->sc_rb.rb_rdtail; 717 718 /* 719 * Process all buffers with valid data. 720 */ 721 for (;;) { 722 len = xd[bix].xd_flags; 723 if (len & QEC_XD_OWN) 724 break; 725 726 #ifdef QEDEBUG 727 npackets++; 728 #endif 729 730 len &= QEC_XD_LENGTH; 731 len -= 4; 732 qe_read(sc, bix, len); 733 734 /* ... */ 735 xd[(bix+nrbuf) % QEC_XD_RING_MAXSIZE].xd_flags = 736 QEC_XD_OWN | (QE_PKT_BUF_SZ & QEC_XD_LENGTH); 737 738 if (++bix == QEC_XD_RING_MAXSIZE) 739 bix = 0; 740 } 741 #ifdef QEDEBUG 742 if (npackets == 0 && sc->sc_debug) 743 printf("%s: rint: no packets; rb index %d; status 0x%x\n", 744 sc->sc_dev.dv_xname, bix, len); 745 #endif 746 747 sc->sc_rb.rb_rdtail = bix; 748 749 return (1); 750 } 751 752 /* 753 * Error interrupt. 754 */ 755 int 756 qe_eint(sc, why) 757 struct qe_softc *sc; 758 u_int32_t why; 759 { 760 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 761 int r = 0, rst = 0; 762 763 if (why & QE_CR_STAT_EDEFER) { 764 printf("%s: excessive tx defers.\n", sc->sc_dev.dv_xname); 765 r |= 1; 766 ifp->if_oerrors++; 767 } 768 769 if (why & QE_CR_STAT_CLOSS) { 770 printf("%s: no carrier, link down?\n", sc->sc_dev.dv_xname); 771 ifp->if_oerrors++; 772 r |= 1; 773 } 774 775 if (why & QE_CR_STAT_ERETRIES) { 776 printf("%s: excessive tx retries\n", sc->sc_dev.dv_xname); 777 ifp->if_oerrors++; 778 r |= 1; 779 rst = 1; 780 } 781 782 783 if (why & QE_CR_STAT_LCOLL) { 784 printf("%s: late tx transmission\n", sc->sc_dev.dv_xname); 785 ifp->if_oerrors++; 786 r |= 1; 787 rst = 1; 788 } 789 790 if (why & QE_CR_STAT_FUFLOW) { 791 printf("%s: tx fifo underflow\n", sc->sc_dev.dv_xname); 792 ifp->if_oerrors++; 793 r |= 1; 794 rst = 1; 795 } 796 797 if (why & QE_CR_STAT_JERROR) { 798 printf("%s: jabber seen\n", sc->sc_dev.dv_xname); 799 r |= 1; 800 } 801 802 if (why & QE_CR_STAT_BERROR) { 803 printf("%s: babble seen\n", sc->sc_dev.dv_xname); 804 r |= 1; 805 } 806 807 if (why & QE_CR_STAT_TCCOFLOW) { 808 ifp->if_collisions += 256; 809 ifp->if_oerrors += 256; 810 r |= 1; 811 } 812 813 if (why & QE_CR_STAT_TXDERROR) { 814 printf("%s: tx descriptor is bad\n", sc->sc_dev.dv_xname); 815 rst = 1; 816 r |= 1; 817 } 818 819 if (why & QE_CR_STAT_TXLERR) { 820 printf("%s: tx late error\n", sc->sc_dev.dv_xname); 821 ifp->if_oerrors++; 822 rst = 1; 823 r |= 1; 824 } 825 826 if (why & QE_CR_STAT_TXPERR) { 827 printf("%s: tx dma parity error\n", sc->sc_dev.dv_xname); 828 ifp->if_oerrors++; 829 rst = 1; 830 r |= 1; 831 } 832 833 if (why & QE_CR_STAT_TXSERR) { 834 printf("%s: tx dma sbus error ack\n", sc->sc_dev.dv_xname); 835 ifp->if_oerrors++; 836 rst = 1; 837 r |= 1; 838 } 839 840 if (why & QE_CR_STAT_RCCOFLOW) { 841 ifp->if_collisions += 256; 842 ifp->if_ierrors += 256; 843 r |= 1; 844 } 845 846 if (why & QE_CR_STAT_RUOFLOW) { 847 ifp->if_ierrors += 256; 848 r |= 1; 849 } 850 851 if (why & QE_CR_STAT_MCOFLOW) { 852 ifp->if_ierrors += 256; 853 r |= 1; 854 } 855 856 if (why & QE_CR_STAT_RXFOFLOW) { 857 printf("%s: rx fifo overflow\n", sc->sc_dev.dv_xname); 858 ifp->if_ierrors++; 859 r |= 1; 860 } 861 862 if (why & QE_CR_STAT_RLCOLL) { 863 printf("%s: rx late collision\n", sc->sc_dev.dv_xname); 864 ifp->if_ierrors++; 865 ifp->if_collisions++; 866 r |= 1; 867 } 868 869 if (why & QE_CR_STAT_FCOFLOW) { 870 ifp->if_ierrors += 256; 871 r |= 1; 872 } 873 874 if (why & QE_CR_STAT_CECOFLOW) { 875 ifp->if_ierrors += 256; 876 r |= 1; 877 } 878 879 if (why & QE_CR_STAT_RXDROP) { 880 printf("%s: rx packet dropped\n", sc->sc_dev.dv_xname); 881 ifp->if_ierrors++; 882 r |= 1; 883 } 884 885 if (why & QE_CR_STAT_RXSMALL) { 886 printf("%s: rx buffer too small\n", sc->sc_dev.dv_xname); 887 ifp->if_ierrors++; 888 r |= 1; 889 rst = 1; 890 } 891 892 if (why & QE_CR_STAT_RXLERR) { 893 printf("%s: rx late error\n", sc->sc_dev.dv_xname); 894 ifp->if_ierrors++; 895 r |= 1; 896 rst = 1; 897 } 898 899 if (why & QE_CR_STAT_RXPERR) { 900 printf("%s: rx dma parity error\n", sc->sc_dev.dv_xname); 901 ifp->if_ierrors++; 902 r |= 1; 903 rst = 1; 904 } 905 906 if (why & QE_CR_STAT_RXSERR) { 907 printf("%s: rx dma sbus error ack\n", sc->sc_dev.dv_xname); 908 ifp->if_ierrors++; 909 r |= 1; 910 rst = 1; 911 } 912 913 if (r == 0) 914 printf("%s: unexpected interrupt error: %08x\n", 915 sc->sc_dev.dv_xname, why); 916 917 if (rst) { 918 printf("%s: resetting...\n", sc->sc_dev.dv_xname); 919 qereset(sc); 920 return (-1); 921 } 922 923 return (r); 924 } 925 926 int 927 qeioctl(ifp, cmd, data) 928 struct ifnet *ifp; 929 u_long cmd; 930 caddr_t data; 931 { 932 struct qe_softc *sc = ifp->if_softc; 933 struct ifaddr *ifa = (struct ifaddr *)data; 934 struct ifreq *ifr = (struct ifreq *)data; 935 int s, error = 0; 936 937 s = splnet(); 938 939 switch (cmd) { 940 case SIOCSIFADDR: 941 ifp->if_flags |= IFF_UP; 942 switch (ifa->ifa_addr->sa_family) { 943 #ifdef INET 944 case AF_INET: 945 qeinit(sc); 946 arp_ifinit(ifp, ifa); 947 break; 948 #endif /* INET */ 949 #ifdef NS 950 case AF_NS: 951 { 952 struct ns_addr *ina = &IA_SNS(ifa)->sns_addr; 953 954 if (ns_nullhost(*ina)) 955 ina->x_host = 956 *(union ns_host *)LLADDR(ifp->if_sadl); 957 else 958 bcopy(ina->x_host.c_host, LLADDR(ifp->if_sadl), 959 sizeof(sc->sc_enaddr)); 960 /* Set new address. */ 961 qeinit(sc); 962 break; 963 } 964 #endif /* NS */ 965 default: 966 qeinit(sc); 967 break; 968 } 969 break; 970 971 case SIOCSIFFLAGS: 972 if ((ifp->if_flags & IFF_UP) == 0 && 973 (ifp->if_flags & IFF_RUNNING) != 0) { 974 /* 975 * If interface is marked down and it is running, then 976 * stop it. 977 */ 978 qestop(sc); 979 ifp->if_flags &= ~IFF_RUNNING; 980 981 } else if ((ifp->if_flags & IFF_UP) != 0 && 982 (ifp->if_flags & IFF_RUNNING) == 0) { 983 /* 984 * If interface is marked up and it is stopped, then 985 * start it. 986 */ 987 qeinit(sc); 988 989 } else { 990 /* 991 * Reset the interface to pick up changes in any other 992 * flags that affect hardware registers. 993 */ 994 qestop(sc); 995 qeinit(sc); 996 } 997 #ifdef QEDEBUG 998 sc->sc_debug = (ifp->if_flags & IFF_DEBUG) != 0 ? 1 : 0; 999 #endif 1000 break; 1001 1002 case SIOCADDMULTI: 1003 case SIOCDELMULTI: 1004 error = (cmd == SIOCADDMULTI) ? 1005 ether_addmulti(ifr, &sc->sc_ethercom): 1006 ether_delmulti(ifr, &sc->sc_ethercom); 1007 1008 if (error == ENETRESET) { 1009 /* 1010 * Multicast list has changed; set the hardware filter 1011 * accordingly. 1012 */ 1013 qe_mcreset(sc); 1014 error = 0; 1015 } 1016 break; 1017 1018 case SIOCGIFMEDIA: 1019 case SIOCSIFMEDIA: 1020 error = ifmedia_ioctl(ifp, ifr, &sc->sc_ifmedia, cmd); 1021 break; 1022 1023 default: 1024 error = EINVAL; 1025 break; 1026 } 1027 1028 splx(s); 1029 return (error); 1030 } 1031 1032 1033 void 1034 qeinit(sc) 1035 struct qe_softc *sc; 1036 { 1037 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1038 bus_space_tag_t t = sc->sc_bustag; 1039 bus_space_handle_t cr = sc->sc_cr; 1040 bus_space_handle_t mr = sc->sc_mr; 1041 struct qec_softc *qec = sc->sc_qec; 1042 u_int32_t qecaddr; 1043 u_int8_t *ea; 1044 int s; 1045 1046 #if defined(SUN4U) || defined(__GNUC__) 1047 (void)&t; 1048 #endif 1049 s = splnet(); 1050 1051 qestop(sc); 1052 1053 /* 1054 * Allocate descriptor ring and buffers 1055 */ 1056 qec_meminit(&sc->sc_rb, QE_PKT_BUF_SZ); 1057 1058 /* Channel registers: */ 1059 bus_space_write_4(t, cr, QE_CRI_RXDS, (u_int32_t)sc->sc_rb.rb_rxddma); 1060 bus_space_write_4(t, cr, QE_CRI_TXDS, (u_int32_t)sc->sc_rb.rb_txddma); 1061 1062 bus_space_write_4(t, cr, QE_CRI_RIMASK, 0); 1063 bus_space_write_4(t, cr, QE_CRI_TIMASK, 0); 1064 bus_space_write_4(t, cr, QE_CRI_QMASK, 0); 1065 bus_space_write_4(t, cr, QE_CRI_MMASK, QE_CR_MMASK_RXCOLL); 1066 bus_space_write_4(t, cr, QE_CRI_CCNT, 0); 1067 bus_space_write_4(t, cr, QE_CRI_PIPG, 0); 1068 1069 qecaddr = sc->sc_channel * qec->sc_msize; 1070 bus_space_write_4(t, cr, QE_CRI_RXWBUF, qecaddr); 1071 bus_space_write_4(t, cr, QE_CRI_RXRBUF, qecaddr); 1072 bus_space_write_4(t, cr, QE_CRI_TXWBUF, qecaddr + qec->sc_rsize); 1073 bus_space_write_4(t, cr, QE_CRI_TXRBUF, qecaddr + qec->sc_rsize); 1074 1075 /* MACE registers: */ 1076 bus_space_write_1(t, mr, QE_MRI_PHYCC, QE_MR_PHYCC_ASEL); 1077 bus_space_write_1(t, mr, QE_MRI_XMTFC, QE_MR_XMTFC_APADXMT); 1078 bus_space_write_1(t, mr, QE_MRI_RCVFC, 0); 1079 1080 /* 1081 * Mask MACE's receive interrupt, since we're being notified 1082 * by the QEC after DMA completes. 1083 */ 1084 bus_space_write_1(t, mr, QE_MRI_IMR, 1085 QE_MR_IMR_CERRM | QE_MR_IMR_RCVINTM); 1086 1087 bus_space_write_1(t, mr, QE_MRI_BIUCC, 1088 QE_MR_BIUCC_BSWAP | QE_MR_BIUCC_64TS); 1089 1090 bus_space_write_1(t, mr, QE_MRI_FIFOFC, 1091 QE_MR_FIFOCC_TXF16 | QE_MR_FIFOCC_RXF32 | 1092 QE_MR_FIFOCC_RFWU | QE_MR_FIFOCC_TFWU); 1093 1094 bus_space_write_1(t, mr, QE_MRI_PLSCC, QE_MR_PLSCC_TP); 1095 1096 /* 1097 * Station address 1098 */ 1099 ea = sc->sc_enaddr; 1100 bus_space_write_1(t, mr, QE_MRI_IAC, 1101 QE_MR_IAC_ADDRCHG | QE_MR_IAC_PHYADDR); 1102 bus_space_write_multi_1(t, mr, QE_MRI_PADR, ea, 6); 1103 1104 /* Apply media settings */ 1105 qe_ifmedia_upd(ifp); 1106 1107 /* 1108 * Clear Logical address filter 1109 */ 1110 bus_space_write_1(t, mr, QE_MRI_IAC, 1111 QE_MR_IAC_ADDRCHG | QE_MR_IAC_LOGADDR); 1112 bus_space_set_multi_1(t, mr, QE_MRI_LADRF, 0, 8); 1113 bus_space_write_1(t, mr, QE_MRI_IAC, 0); 1114 1115 /* Clear missed packet count (register cleared on read) */ 1116 (void)bus_space_read_1(t, mr, QE_MRI_MPC); 1117 1118 #if 0 1119 /* test register: */ 1120 bus_space_write_1(t, mr, QE_MRI_UTR, 0); 1121 #endif 1122 1123 /* Reset multicast filter */ 1124 qe_mcreset(sc); 1125 1126 ifp->if_flags |= IFF_RUNNING; 1127 ifp->if_flags &= ~IFF_OACTIVE; 1128 splx(s); 1129 } 1130 1131 /* 1132 * Reset multicast filter. 1133 */ 1134 void 1135 qe_mcreset(sc) 1136 struct qe_softc *sc; 1137 { 1138 struct ethercom *ec = &sc->sc_ethercom; 1139 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1140 bus_space_tag_t t = sc->sc_bustag; 1141 bus_space_handle_t mr = sc->sc_mr; 1142 struct ether_multi *enm; 1143 struct ether_multistep step; 1144 u_int32_t crc; 1145 u_int16_t hash[4]; 1146 u_int8_t octet, maccc, *ladrp = (u_int8_t *)&hash[0]; 1147 int i, j; 1148 1149 #if defined(SUN4U) || defined(__GNUC__) 1150 (void)&t; 1151 #endif 1152 1153 /* We also enable transmitter & receiver here */ 1154 maccc = QE_MR_MACCC_ENXMT | QE_MR_MACCC_ENRCV; 1155 1156 if (ifp->if_flags & IFF_PROMISC) { 1157 maccc |= QE_MR_MACCC_PROM; 1158 bus_space_write_1(t, mr, QE_MRI_MACCC, maccc); 1159 return; 1160 } 1161 1162 if (ifp->if_flags & IFF_ALLMULTI) { 1163 bus_space_write_1(t, mr, QE_MRI_IAC, 1164 QE_MR_IAC_ADDRCHG | QE_MR_IAC_LOGADDR); 1165 bus_space_set_multi_1(t, mr, QE_MRI_LADRF, 0xff, 8); 1166 bus_space_write_1(t, mr, QE_MRI_IAC, 0); 1167 bus_space_write_1(t, mr, QE_MRI_MACCC, maccc); 1168 return; 1169 } 1170 1171 hash[3] = hash[2] = hash[1] = hash[0] = 0; 1172 1173 ETHER_FIRST_MULTI(step, ec, enm); 1174 while (enm != NULL) { 1175 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 1176 ETHER_ADDR_LEN) != 0) { 1177 /* 1178 * We must listen to a range of multicast 1179 * addresses. For now, just accept all 1180 * multicasts, rather than trying to set only 1181 * those filter bits needed to match the range. 1182 * (At this time, the only use of address 1183 * ranges is for IP multicast routing, for 1184 * which the range is big enough to require 1185 * all bits set.) 1186 */ 1187 bus_space_write_1(t, mr, QE_MRI_IAC, 1188 QE_MR_IAC_ADDRCHG | QE_MR_IAC_LOGADDR); 1189 bus_space_set_multi_1(t, mr, QE_MRI_LADRF, 0xff, 8); 1190 bus_space_write_1(t, mr, QE_MRI_IAC, 0); 1191 ifp->if_flags |= IFF_ALLMULTI; 1192 break; 1193 } 1194 1195 crc = 0xffffffff; 1196 1197 for (i = 0; i < ETHER_ADDR_LEN; i++) { 1198 octet = enm->enm_addrlo[i]; 1199 1200 for (j = 0; j < 8; j++) { 1201 if ((crc & 1) ^ (octet & 1)) { 1202 crc >>= 1; 1203 crc ^= MC_POLY_LE; 1204 } 1205 else 1206 crc >>= 1; 1207 octet >>= 1; 1208 } 1209 } 1210 1211 crc >>= 26; 1212 hash[crc >> 4] |= 1 << (crc & 0xf); 1213 ETHER_NEXT_MULTI(step, enm); 1214 } 1215 1216 bus_space_write_1(t, mr, QE_MRI_IAC, 1217 QE_MR_IAC_ADDRCHG | QE_MR_IAC_LOGADDR); 1218 bus_space_write_multi_1(t, mr, QE_MRI_LADRF, ladrp, 8); 1219 bus_space_write_1(t, mr, QE_MRI_IAC, 0); 1220 bus_space_write_1(t, mr, QE_MRI_MACCC, maccc); 1221 } 1222 1223 /* 1224 * Get current media settings. 1225 */ 1226 void 1227 qe_ifmedia_sts(ifp, ifmr) 1228 struct ifnet *ifp; 1229 struct ifmediareq *ifmr; 1230 { 1231 struct qe_softc *sc = ifp->if_softc; 1232 bus_space_tag_t t = sc->sc_bustag; 1233 bus_space_handle_t mr = sc->sc_mr; 1234 u_int8_t v; 1235 1236 #if defined(SUN4U) || defined(__GNUC__) 1237 (void)&t; 1238 #endif 1239 v = bus_space_read_1(t, mr, QE_MRI_PLSCC); 1240 1241 switch (bus_space_read_1(t, mr, QE_MRI_PLSCC) & QE_MR_PLSCC_PORTMASK) { 1242 case QE_MR_PLSCC_TP: 1243 ifmr->ifm_active = IFM_ETHER | IFM_10_T; 1244 break; 1245 case QE_MR_PLSCC_AUI: 1246 ifmr->ifm_active = IFM_ETHER | IFM_10_5; 1247 break; 1248 case QE_MR_PLSCC_GPSI: 1249 case QE_MR_PLSCC_DAI: 1250 /* ... */ 1251 break; 1252 } 1253 1254 v = bus_space_read_1(t, mr, QE_MRI_PHYCC); 1255 ifmr->ifm_status |= IFM_AVALID; 1256 if ((v & QE_MR_PHYCC_LNKFL) != 0) 1257 ifmr->ifm_status &= ~IFM_ACTIVE; 1258 else 1259 ifmr->ifm_status |= IFM_ACTIVE; 1260 1261 } 1262 1263 /* 1264 * Set media options. 1265 */ 1266 int 1267 qe_ifmedia_upd(ifp) 1268 struct ifnet *ifp; 1269 { 1270 struct qe_softc *sc = ifp->if_softc; 1271 struct ifmedia *ifm = &sc->sc_ifmedia; 1272 bus_space_tag_t t = sc->sc_bustag; 1273 bus_space_handle_t mr = sc->sc_mr; 1274 int newmedia = ifm->ifm_media; 1275 u_int8_t plscc, phycc; 1276 1277 #if defined(SUN4U) || defined(__GNUC__) 1278 (void)&t; 1279 #endif 1280 if (IFM_TYPE(newmedia) != IFM_ETHER) 1281 return (EINVAL); 1282 1283 plscc = bus_space_read_1(t, mr, QE_MRI_PLSCC) & ~QE_MR_PLSCC_PORTMASK; 1284 phycc = bus_space_read_1(t, mr, QE_MRI_PHYCC) & ~QE_MR_PHYCC_ASEL; 1285 1286 if (IFM_SUBTYPE(newmedia) == IFM_AUTO) 1287 phycc |= QE_MR_PHYCC_ASEL; 1288 else if (IFM_SUBTYPE(newmedia) == IFM_10_T) 1289 plscc |= QE_MR_PLSCC_TP; 1290 else if (IFM_SUBTYPE(newmedia) == IFM_10_5) 1291 plscc |= QE_MR_PLSCC_AUI; 1292 1293 bus_space_write_1(t, mr, QE_MRI_PLSCC, plscc); 1294 bus_space_write_1(t, mr, QE_MRI_PHYCC, phycc); 1295 1296 return (0); 1297 } 1298