1 /* $OpenBSD: qe.c,v 1.23 2008/11/28 02:44:18 brad Exp $ */ 2 /* $NetBSD: qe.c,v 1.16 2001/03/30 17:30:18 christos Exp $ */ 3 4 /*- 5 * Copyright (c) 1999 The NetBSD Foundation, Inc. 6 * All rights reserved. 7 * 8 * This code is derived from software contributed to The NetBSD Foundation 9 * by Paul Kranenburg. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 /* 34 * Copyright (c) 1998 Jason L. Wright. 35 * All rights reserved. 36 * 37 * Redistribution and use in source and binary forms, with or without 38 * modification, are permitted provided that the following conditions 39 * are met: 40 * 1. Redistributions of source code must retain the above copyright 41 * notice, this list of conditions and the following disclaimer. 42 * 2. Redistributions in binary form must reproduce the above copyright 43 * notice, this list of conditions and the following disclaimer in the 44 * documentation and/or other materials provided with the distribution. 45 * 46 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 47 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 48 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 49 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, 50 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 51 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 52 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 53 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 54 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 55 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 56 */ 57 58 /* 59 * Driver for the SBus qec+qe QuadEthernet board. 60 * 61 * This driver was written using the AMD MACE Am79C940 documentation, some 62 * ideas gleaned from the S/Linux driver for this card, Solaris header files, 63 * and a loan of a card from Paul Southworth of the Internet Engineering 64 * Group (www.ieng.com). 65 */ 66 67 #define QEDEBUG 68 69 #include "bpfilter.h" 70 71 #include <sys/param.h> 72 #include <sys/systm.h> 73 #include <sys/kernel.h> 74 #include <sys/errno.h> 75 #include <sys/ioctl.h> 76 #include <sys/mbuf.h> 77 #include <sys/socket.h> 78 #include <sys/syslog.h> 79 #include <sys/device.h> 80 #include <sys/malloc.h> 81 82 #include <net/if.h> 83 #include <net/if_dl.h> 84 #include <net/if_types.h> 85 #include <net/netisr.h> 86 #include <net/if_media.h> 87 88 #ifdef INET 89 #include <netinet/in.h> 90 #include <netinet/in_systm.h> 91 #include <netinet/in_var.h> 92 #include <netinet/ip.h> 93 #include <netinet/if_ether.h> 94 #endif 95 96 #if NBPFILTER > 0 97 #include <net/bpf.h> 98 #endif 99 100 #include <machine/bus.h> 101 #include <machine/intr.h> 102 #include <machine/autoconf.h> 103 104 #include <dev/sbus/sbusvar.h> 105 #include <dev/sbus/qecreg.h> 106 #include <dev/sbus/qecvar.h> 107 #include <dev/sbus/qereg.h> 108 109 struct qe_softc { 110 struct device sc_dev; /* base device */ 111 bus_space_tag_t sc_bustag; /* bus & dma tags */ 112 bus_dma_tag_t sc_dmatag; 113 bus_dmamap_t sc_dmamap; 114 struct arpcom sc_arpcom; 115 struct ifmedia sc_ifmedia; /* interface media */ 116 117 struct qec_softc *sc_qec; /* QEC parent */ 118 119 bus_space_handle_t sc_qr; /* QEC registers */ 120 bus_space_handle_t sc_mr; /* MACE registers */ 121 bus_space_handle_t sc_cr; /* channel registers */ 122 123 int sc_channel; /* channel number */ 124 u_int sc_rev; /* board revision */ 125 126 int sc_burst; 127 128 struct qec_ring sc_rb; /* Packet Ring Buffer */ 129 130 #ifdef QEDEBUG 131 int sc_debug; 132 #endif 133 }; 134 135 int qematch(struct device *, void *, void *); 136 void qeattach(struct device *, struct device *, void *); 137 138 void qeinit(struct qe_softc *); 139 void qestart(struct ifnet *); 140 void qestop(struct qe_softc *); 141 void qewatchdog(struct ifnet *); 142 int qeioctl(struct ifnet *, u_long, caddr_t); 143 void qereset(struct qe_softc *); 144 145 int qeintr(void *); 146 int qe_eint(struct qe_softc *, u_int32_t); 147 int qe_rint(struct qe_softc *); 148 int qe_tint(struct qe_softc *); 149 void qe_mcreset(struct qe_softc *); 150 151 int qe_put(struct qe_softc *, int, struct mbuf *); 152 void qe_read(struct qe_softc *, int, int); 153 struct mbuf *qe_get(struct qe_softc *, int, int); 154 155 /* ifmedia callbacks */ 156 void qe_ifmedia_sts(struct ifnet *, struct ifmediareq *); 157 int qe_ifmedia_upd(struct ifnet *); 158 159 struct cfattach qe_ca = { 160 sizeof(struct qe_softc), qematch, qeattach 161 }; 162 163 struct cfdriver qe_cd = { 164 NULL, "qe", DV_IFNET 165 }; 166 167 int 168 qematch(parent, vcf, aux) 169 struct device *parent; 170 void *vcf; 171 void *aux; 172 { 173 struct cfdata *cf = vcf; 174 struct sbus_attach_args *sa = aux; 175 176 return (strcmp(cf->cf_driver->cd_name, sa->sa_name) == 0); 177 } 178 179 void 180 qeattach(parent, self, aux) 181 struct device *parent, *self; 182 void *aux; 183 { 184 struct sbus_attach_args *sa = aux; 185 struct qec_softc *qec = (struct qec_softc *)parent; 186 struct qe_softc *sc = (struct qe_softc *)self; 187 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 188 int node = sa->sa_node; 189 bus_dma_tag_t dmatag = sa->sa_dmatag; 190 bus_dma_segment_t seg; 191 bus_size_t size; 192 int rseg, error; 193 extern void myetheraddr(u_char *); 194 195 /* Pass on the bus tags */ 196 sc->sc_bustag = sa->sa_bustag; 197 sc->sc_dmatag = sa->sa_dmatag; 198 199 if (sa->sa_nreg < 2) { 200 printf("%s: only %d register sets\n", 201 self->dv_xname, sa->sa_nreg); 202 return; 203 } 204 205 if (sbus_bus_map(sa->sa_bustag, sa->sa_reg[0].sbr_slot, 206 (bus_addr_t)sa->sa_reg[0].sbr_offset, 207 (bus_size_t)sa->sa_reg[0].sbr_size, 0, 0, &sc->sc_cr) != 0) { 208 printf("%s: cannot map registers\n", self->dv_xname); 209 return; 210 } 211 212 if (sbus_bus_map(sa->sa_bustag, sa->sa_reg[1].sbr_slot, 213 (bus_addr_t)sa->sa_reg[1].sbr_offset, 214 (bus_size_t)sa->sa_reg[1].sbr_size, 0, 0, &sc->sc_mr) != 0) { 215 printf("%s: cannot map registers\n", self->dv_xname); 216 return; 217 } 218 219 sc->sc_rev = getpropint(node, "mace-version", -1); 220 printf(" rev %x", sc->sc_rev); 221 222 sc->sc_qec = qec; 223 sc->sc_qr = qec->sc_regs; 224 225 sc->sc_channel = getpropint(node, "channel#", -1); 226 sc->sc_burst = qec->sc_burst; 227 228 qestop(sc); 229 230 /* Note: no interrupt level passed */ 231 if (bus_intr_establish(sa->sa_bustag, 0, IPL_NET, 0, qeintr, sc, 232 self->dv_xname) == NULL) { 233 printf(": no interrupt established\n"); 234 return; 235 } 236 237 myetheraddr(sc->sc_arpcom.ac_enaddr); 238 239 /* 240 * Allocate descriptor ring and buffers. 241 */ 242 243 /* for now, allocate as many bufs as there are ring descriptors */ 244 sc->sc_rb.rb_ntbuf = QEC_XD_RING_MAXSIZE; 245 sc->sc_rb.rb_nrbuf = QEC_XD_RING_MAXSIZE; 246 247 size = 248 QEC_XD_RING_MAXSIZE * sizeof(struct qec_xd) + 249 QEC_XD_RING_MAXSIZE * sizeof(struct qec_xd) + 250 sc->sc_rb.rb_ntbuf * QE_PKT_BUF_SZ + 251 sc->sc_rb.rb_nrbuf * QE_PKT_BUF_SZ; 252 253 /* Get a DMA handle */ 254 if ((error = bus_dmamap_create(dmatag, size, 1, size, 0, 255 BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) { 256 printf("%s: DMA map create error %d\n", self->dv_xname, error); 257 return; 258 } 259 260 /* Allocate DMA buffer */ 261 if ((error = bus_dmamem_alloc(dmatag, size, 0, 0, 262 &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) { 263 printf("%s: DMA buffer alloc error %d\n", 264 self->dv_xname, error); 265 return; 266 } 267 268 /* Map DMA buffer in CPU addressable space */ 269 if ((error = bus_dmamem_map(dmatag, &seg, rseg, size, 270 &sc->sc_rb.rb_membase, 271 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { 272 printf("%s: DMA buffer map error %d\n", 273 self->dv_xname, error); 274 bus_dmamem_free(dmatag, &seg, rseg); 275 return; 276 } 277 278 /* Load the buffer */ 279 if ((error = bus_dmamap_load(dmatag, sc->sc_dmamap, 280 sc->sc_rb.rb_membase, size, NULL, BUS_DMA_NOWAIT)) != 0) { 281 printf("%s: DMA buffer map load error %d\n", 282 self->dv_xname, error); 283 bus_dmamem_unmap(dmatag, sc->sc_rb.rb_membase, size); 284 bus_dmamem_free(dmatag, &seg, rseg); 285 return; 286 } 287 sc->sc_rb.rb_dmabase = sc->sc_dmamap->dm_segs[0].ds_addr; 288 289 /* Initialize media properties */ 290 ifmedia_init(&sc->sc_ifmedia, 0, qe_ifmedia_upd, qe_ifmedia_sts); 291 ifmedia_add(&sc->sc_ifmedia, 292 IFM_MAKEWORD(IFM_ETHER,IFM_10_T,0,0), 0, NULL); 293 ifmedia_add(&sc->sc_ifmedia, 294 IFM_MAKEWORD(IFM_ETHER,IFM_10_5,0,0), 0, NULL); 295 ifmedia_add(&sc->sc_ifmedia, 296 IFM_MAKEWORD(IFM_ETHER,IFM_AUTO,0,0), 0, NULL); 297 ifmedia_set(&sc->sc_ifmedia, IFM_ETHER|IFM_AUTO); 298 299 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 300 ifp->if_softc = sc; 301 ifp->if_start = qestart; 302 ifp->if_ioctl = qeioctl; 303 ifp->if_watchdog = qewatchdog; 304 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | 305 IFF_MULTICAST; 306 IFQ_SET_READY(&ifp->if_snd); 307 308 /* Attach the interface. */ 309 if_attach(ifp); 310 ether_ifattach(ifp); 311 312 printf(" address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr)); 313 } 314 315 /* 316 * Pull data off an interface. 317 * Len is the length of data, with local net header stripped. 318 * We copy the data into mbufs. When full cluster sized units are present, 319 * we copy into clusters. 320 */ 321 struct mbuf * 322 qe_get(sc, idx, totlen) 323 struct qe_softc *sc; 324 int idx, totlen; 325 { 326 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 327 struct mbuf *m; 328 struct mbuf *top, **mp; 329 int len, pad, boff = 0; 330 caddr_t bp; 331 332 bp = sc->sc_rb.rb_rxbuf + (idx % sc->sc_rb.rb_nrbuf) * QE_PKT_BUF_SZ; 333 334 MGETHDR(m, M_DONTWAIT, MT_DATA); 335 if (m == NULL) 336 return (NULL); 337 m->m_pkthdr.rcvif = ifp; 338 m->m_pkthdr.len = totlen; 339 pad = ALIGN(sizeof(struct ether_header)) - sizeof(struct ether_header); 340 m->m_data += pad; 341 len = MHLEN - pad; 342 top = NULL; 343 mp = ⊤ 344 345 while (totlen > 0) { 346 if (top) { 347 MGET(m, M_DONTWAIT, MT_DATA); 348 if (m == NULL) { 349 m_freem(top); 350 return (NULL); 351 } 352 len = MLEN; 353 } 354 if (top && totlen >= MINCLSIZE) { 355 MCLGET(m, M_DONTWAIT); 356 if (m->m_flags & M_EXT) 357 len = MCLBYTES; 358 } 359 m->m_len = len = min(totlen, len); 360 bcopy(bp + boff, mtod(m, caddr_t), len); 361 boff += len; 362 totlen -= len; 363 *mp = m; 364 mp = &m->m_next; 365 } 366 367 return (top); 368 } 369 370 /* 371 * Routine to copy from mbuf chain to transmit buffer in 372 * network buffer memory. 373 */ 374 __inline__ int 375 qe_put(sc, idx, m) 376 struct qe_softc *sc; 377 int idx; 378 struct mbuf *m; 379 { 380 struct mbuf *n; 381 int len, tlen = 0, boff = 0; 382 caddr_t bp; 383 384 bp = sc->sc_rb.rb_txbuf + (idx % sc->sc_rb.rb_ntbuf) * QE_PKT_BUF_SZ; 385 386 for (; m; m = n) { 387 len = m->m_len; 388 if (len == 0) { 389 MFREE(m, n); 390 continue; 391 } 392 bcopy(mtod(m, caddr_t), bp+boff, len); 393 boff += len; 394 tlen += len; 395 MFREE(m, n); 396 } 397 return (tlen); 398 } 399 400 /* 401 * Pass a packet to the higher levels. 402 */ 403 __inline__ void 404 qe_read(sc, idx, len) 405 struct qe_softc *sc; 406 int idx, len; 407 { 408 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 409 struct mbuf *m; 410 411 if (len <= sizeof(struct ether_header) || 412 len > ETHERMTU + sizeof(struct ether_header)) { 413 414 printf("%s: invalid packet size %d; dropping\n", 415 ifp->if_xname, len); 416 417 ifp->if_ierrors++; 418 return; 419 } 420 421 /* 422 * Pull packet off interface. 423 */ 424 m = qe_get(sc, idx, len); 425 if (m == NULL) { 426 ifp->if_ierrors++; 427 return; 428 } 429 ifp->if_ipackets++; 430 431 #if NBPFILTER > 0 432 /* 433 * Check if there's a BPF listener on this interface. 434 * If so, hand off the raw packet to BPF. 435 */ 436 if (ifp->if_bpf) 437 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN); 438 #endif 439 /* Pass the packet up. */ 440 ether_input_mbuf(ifp, m); 441 } 442 443 /* 444 * Start output on interface. 445 * We make two assumptions here: 446 * 1) that the current priority is set to splnet _before_ this code 447 * is called *and* is returned to the appropriate priority after 448 * return 449 * 2) that the IFF_OACTIVE flag is checked before this code is called 450 * (i.e. that the output part of the interface is idle) 451 */ 452 void 453 qestart(ifp) 454 struct ifnet *ifp; 455 { 456 struct qe_softc *sc = (struct qe_softc *)ifp->if_softc; 457 struct qec_xd *txd = sc->sc_rb.rb_txd; 458 struct mbuf *m; 459 unsigned int bix, len; 460 unsigned int ntbuf = sc->sc_rb.rb_ntbuf; 461 462 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 463 return; 464 465 bix = sc->sc_rb.rb_tdhead; 466 467 for (;;) { 468 IFQ_POLL(&ifp->if_snd, m); 469 if (m == NULL) 470 break; 471 472 IFQ_DEQUEUE(&ifp->if_snd, m); 473 474 #if NBPFILTER > 0 475 /* 476 * If BPF is listening on this interface, let it see the 477 * packet before we commit it to the wire. 478 */ 479 if (ifp->if_bpf) 480 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT); 481 #endif 482 483 /* 484 * Copy the mbuf chain into the transmit buffer. 485 */ 486 len = qe_put(sc, bix, m); 487 488 /* 489 * Initialize transmit registers and start transmission 490 */ 491 txd[bix].xd_flags = QEC_XD_OWN | QEC_XD_SOP | QEC_XD_EOP | 492 (len & QEC_XD_LENGTH); 493 bus_space_write_4(sc->sc_bustag, sc->sc_cr, QE_CRI_CTRL, 494 QE_CR_CTRL_TWAKEUP); 495 496 if (++bix == QEC_XD_RING_MAXSIZE) 497 bix = 0; 498 499 if (++sc->sc_rb.rb_td_nbusy == ntbuf) { 500 ifp->if_flags |= IFF_OACTIVE; 501 break; 502 } 503 } 504 505 sc->sc_rb.rb_tdhead = bix; 506 } 507 508 void 509 qestop(sc) 510 struct qe_softc *sc; 511 { 512 bus_space_tag_t t = sc->sc_bustag; 513 bus_space_handle_t mr = sc->sc_mr; 514 bus_space_handle_t cr = sc->sc_cr; 515 int n; 516 517 /* Stop the schwurst */ 518 bus_space_write_1(t, mr, QE_MRI_BIUCC, QE_MR_BIUCC_SWRST); 519 for (n = 200; n > 0; n--) { 520 if ((bus_space_read_1(t, mr, QE_MRI_BIUCC) & 521 QE_MR_BIUCC_SWRST) == 0) 522 break; 523 DELAY(20); 524 } 525 526 /* then reset */ 527 bus_space_write_4(t, cr, QE_CRI_CTRL, QE_CR_CTRL_RESET); 528 for (n = 200; n > 0; n--) { 529 if ((bus_space_read_4(t, cr, QE_CRI_CTRL) & 530 QE_CR_CTRL_RESET) == 0) 531 break; 532 DELAY(20); 533 } 534 } 535 536 /* 537 * Reset interface. 538 */ 539 void 540 qereset(sc) 541 struct qe_softc *sc; 542 { 543 int s; 544 545 s = splnet(); 546 qestop(sc); 547 qeinit(sc); 548 splx(s); 549 } 550 551 void 552 qewatchdog(ifp) 553 struct ifnet *ifp; 554 { 555 struct qe_softc *sc = ifp->if_softc; 556 557 log(LOG_ERR, "%s: device timeout\n", sc->sc_dev.dv_xname); 558 ifp->if_oerrors++; 559 560 qereset(sc); 561 } 562 563 /* 564 * Interrupt dispatch. 565 */ 566 int 567 qeintr(arg) 568 void *arg; 569 { 570 struct qe_softc *sc = (struct qe_softc *)arg; 571 bus_space_tag_t t = sc->sc_bustag; 572 u_int32_t qecstat, qestat; 573 int r = 0; 574 575 /* Read QEC status and channel status */ 576 qecstat = bus_space_read_4(t, sc->sc_qr, QEC_QRI_STAT); 577 #ifdef QEDEBUG 578 if (sc->sc_debug) { 579 printf("qe%d: intr: qecstat=%x\n", sc->sc_channel, qecstat); 580 } 581 #endif 582 583 /* Filter out status for this channel */ 584 qecstat = qecstat >> (4 * sc->sc_channel); 585 if ((qecstat & 0xf) == 0) 586 return (r); 587 588 qestat = bus_space_read_4(t, sc->sc_cr, QE_CRI_STAT); 589 590 #ifdef QEDEBUG 591 if (sc->sc_debug) { 592 int i; 593 bus_space_tag_t t = sc->sc_bustag; 594 bus_space_handle_t mr = sc->sc_mr; 595 596 printf("qe%d: intr: qestat=%b\n", sc->sc_channel, 597 qestat, QE_CR_STAT_BITS); 598 599 printf("MACE registers:\n"); 600 for (i = 0 ; i < 32; i++) { 601 printf(" m[%d]=%x,", i, bus_space_read_1(t, mr, i)); 602 if (((i+1) & 7) == 0) 603 printf("\n"); 604 } 605 } 606 #endif 607 608 if (qestat & QE_CR_STAT_ALLERRORS) { 609 #ifdef QEDEBUG 610 if (sc->sc_debug) 611 printf("qe%d: eint: qestat=%b\n", sc->sc_channel, 612 qestat, QE_CR_STAT_BITS); 613 #endif 614 r |= qe_eint(sc, qestat); 615 if (r == -1) 616 return (1); 617 } 618 619 if (qestat & QE_CR_STAT_TXIRQ) 620 r |= qe_tint(sc); 621 622 if (qestat & QE_CR_STAT_RXIRQ) 623 r |= qe_rint(sc); 624 625 return (1); 626 } 627 628 /* 629 * Transmit interrupt. 630 */ 631 int 632 qe_tint(sc) 633 struct qe_softc *sc; 634 { 635 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 636 unsigned int bix, txflags; 637 638 bix = sc->sc_rb.rb_tdtail; 639 640 for (;;) { 641 if (sc->sc_rb.rb_td_nbusy <= 0) 642 break; 643 644 txflags = sc->sc_rb.rb_txd[bix].xd_flags; 645 646 if (txflags & QEC_XD_OWN) 647 break; 648 649 ifp->if_flags &= ~IFF_OACTIVE; 650 ifp->if_opackets++; 651 652 if (++bix == QEC_XD_RING_MAXSIZE) 653 bix = 0; 654 655 --sc->sc_rb.rb_td_nbusy; 656 } 657 658 if (sc->sc_rb.rb_td_nbusy == 0) 659 ifp->if_timer = 0; 660 661 if (sc->sc_rb.rb_tdtail != bix) { 662 sc->sc_rb.rb_tdtail = bix; 663 if (ifp->if_flags & IFF_OACTIVE) { 664 ifp->if_flags &= ~IFF_OACTIVE; 665 qestart(ifp); 666 } 667 } 668 669 return (1); 670 } 671 672 /* 673 * Receive interrupt. 674 */ 675 int 676 qe_rint(sc) 677 struct qe_softc *sc; 678 { 679 struct qec_xd *xd = sc->sc_rb.rb_rxd; 680 unsigned int bix, len; 681 unsigned int nrbuf = sc->sc_rb.rb_nrbuf; 682 #ifdef QEDEBUG 683 int npackets = 0; 684 #endif 685 686 bix = sc->sc_rb.rb_rdtail; 687 688 /* 689 * Process all buffers with valid data. 690 */ 691 for (;;) { 692 len = xd[bix].xd_flags; 693 if (len & QEC_XD_OWN) 694 break; 695 696 #ifdef QEDEBUG 697 npackets++; 698 #endif 699 700 len &= QEC_XD_LENGTH; 701 len -= 4; 702 qe_read(sc, bix, len); 703 704 /* ... */ 705 xd[(bix+nrbuf) % QEC_XD_RING_MAXSIZE].xd_flags = 706 QEC_XD_OWN | (QE_PKT_BUF_SZ & QEC_XD_LENGTH); 707 708 if (++bix == QEC_XD_RING_MAXSIZE) 709 bix = 0; 710 } 711 #ifdef QEDEBUG 712 if (npackets == 0 && sc->sc_debug) 713 printf("%s: rint: no packets; rb index %d; status 0x%x\n", 714 sc->sc_dev.dv_xname, bix, len); 715 #endif 716 717 sc->sc_rb.rb_rdtail = bix; 718 719 return (1); 720 } 721 722 /* 723 * Error interrupt. 724 */ 725 int 726 qe_eint(sc, why) 727 struct qe_softc *sc; 728 u_int32_t why; 729 { 730 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 731 int r = 0, rst = 0; 732 733 if (why & QE_CR_STAT_EDEFER) { 734 printf("%s: excessive tx defers.\n", sc->sc_dev.dv_xname); 735 r |= 1; 736 ifp->if_oerrors++; 737 } 738 739 if (why & QE_CR_STAT_CLOSS) { 740 ifp->if_oerrors++; 741 r |= 1; 742 } 743 744 if (why & QE_CR_STAT_ERETRIES) { 745 printf("%s: excessive tx retries\n", sc->sc_dev.dv_xname); 746 ifp->if_oerrors++; 747 r |= 1; 748 rst = 1; 749 } 750 751 752 if (why & QE_CR_STAT_LCOLL) { 753 printf("%s: late tx transmission\n", sc->sc_dev.dv_xname); 754 ifp->if_oerrors++; 755 r |= 1; 756 rst = 1; 757 } 758 759 if (why & QE_CR_STAT_FUFLOW) { 760 printf("%s: tx fifo underflow\n", sc->sc_dev.dv_xname); 761 ifp->if_oerrors++; 762 r |= 1; 763 rst = 1; 764 } 765 766 if (why & QE_CR_STAT_JERROR) { 767 printf("%s: jabber seen\n", sc->sc_dev.dv_xname); 768 r |= 1; 769 } 770 771 if (why & QE_CR_STAT_BERROR) { 772 printf("%s: babble seen\n", sc->sc_dev.dv_xname); 773 r |= 1; 774 } 775 776 if (why & QE_CR_STAT_TCCOFLOW) { 777 ifp->if_collisions += 256; 778 ifp->if_oerrors += 256; 779 r |= 1; 780 } 781 782 if (why & QE_CR_STAT_TXDERROR) { 783 printf("%s: tx descriptor is bad\n", sc->sc_dev.dv_xname); 784 rst = 1; 785 r |= 1; 786 } 787 788 if (why & QE_CR_STAT_TXLERR) { 789 printf("%s: tx late error\n", sc->sc_dev.dv_xname); 790 ifp->if_oerrors++; 791 rst = 1; 792 r |= 1; 793 } 794 795 if (why & QE_CR_STAT_TXPERR) { 796 printf("%s: tx dma parity error\n", sc->sc_dev.dv_xname); 797 ifp->if_oerrors++; 798 rst = 1; 799 r |= 1; 800 } 801 802 if (why & QE_CR_STAT_TXSERR) { 803 printf("%s: tx dma sbus error ack\n", sc->sc_dev.dv_xname); 804 ifp->if_oerrors++; 805 rst = 1; 806 r |= 1; 807 } 808 809 if (why & QE_CR_STAT_RCCOFLOW) { 810 ifp->if_collisions += 256; 811 ifp->if_ierrors += 256; 812 r |= 1; 813 } 814 815 if (why & QE_CR_STAT_RUOFLOW) { 816 ifp->if_ierrors += 256; 817 r |= 1; 818 } 819 820 if (why & QE_CR_STAT_MCOFLOW) { 821 ifp->if_ierrors += 256; 822 r |= 1; 823 } 824 825 if (why & QE_CR_STAT_RXFOFLOW) { 826 printf("%s: rx fifo overflow\n", sc->sc_dev.dv_xname); 827 ifp->if_ierrors++; 828 r |= 1; 829 } 830 831 if (why & QE_CR_STAT_RLCOLL) { 832 printf("%s: rx late collision\n", sc->sc_dev.dv_xname); 833 ifp->if_ierrors++; 834 ifp->if_collisions++; 835 r |= 1; 836 } 837 838 if (why & QE_CR_STAT_FCOFLOW) { 839 ifp->if_ierrors += 256; 840 r |= 1; 841 } 842 843 if (why & QE_CR_STAT_CECOFLOW) { 844 ifp->if_ierrors += 256; 845 r |= 1; 846 } 847 848 if (why & QE_CR_STAT_RXDROP) { 849 printf("%s: rx packet dropped\n", sc->sc_dev.dv_xname); 850 ifp->if_ierrors++; 851 r |= 1; 852 } 853 854 if (why & QE_CR_STAT_RXSMALL) { 855 printf("%s: rx buffer too small\n", sc->sc_dev.dv_xname); 856 ifp->if_ierrors++; 857 r |= 1; 858 rst = 1; 859 } 860 861 if (why & QE_CR_STAT_RXLERR) { 862 printf("%s: rx late error\n", sc->sc_dev.dv_xname); 863 ifp->if_ierrors++; 864 r |= 1; 865 rst = 1; 866 } 867 868 if (why & QE_CR_STAT_RXPERR) { 869 printf("%s: rx dma parity error\n", sc->sc_dev.dv_xname); 870 ifp->if_ierrors++; 871 r |= 1; 872 rst = 1; 873 } 874 875 if (why & QE_CR_STAT_RXSERR) { 876 printf("%s: rx dma sbus error ack\n", sc->sc_dev.dv_xname); 877 ifp->if_ierrors++; 878 r |= 1; 879 rst = 1; 880 } 881 882 if (r == 0) 883 printf("%s: unexpected interrupt error: %08x\n", 884 sc->sc_dev.dv_xname, why); 885 886 if (rst) { 887 printf("%s: resetting...\n", sc->sc_dev.dv_xname); 888 qereset(sc); 889 return (-1); 890 } 891 892 return (r); 893 } 894 895 int 896 qeioctl(ifp, cmd, data) 897 struct ifnet *ifp; 898 u_long cmd; 899 caddr_t data; 900 { 901 struct qe_softc *sc = ifp->if_softc; 902 struct ifaddr *ifa = (struct ifaddr *)data; 903 struct ifreq *ifr = (struct ifreq *)data; 904 int s, error = 0; 905 906 s = splnet(); 907 908 switch (cmd) { 909 case SIOCSIFADDR: 910 ifp->if_flags |= IFF_UP; 911 switch (ifa->ifa_addr->sa_family) { 912 #ifdef INET 913 case AF_INET: 914 qeinit(sc); 915 arp_ifinit(&sc->sc_arpcom, ifa); 916 break; 917 #endif /* INET */ 918 default: 919 qeinit(sc); 920 break; 921 } 922 break; 923 924 case SIOCSIFFLAGS: 925 if ((ifp->if_flags & IFF_UP) == 0 && 926 (ifp->if_flags & IFF_RUNNING) != 0) { 927 /* 928 * If interface is marked down and it is running, then 929 * stop it. 930 */ 931 qestop(sc); 932 ifp->if_flags &= ~IFF_RUNNING; 933 } else if ((ifp->if_flags & IFF_UP) != 0 && 934 (ifp->if_flags & IFF_RUNNING) == 0) { 935 /* 936 * If interface is marked up and it is stopped, then 937 * start it. 938 */ 939 qeinit(sc); 940 } else { 941 /* 942 * Reset the interface to pick up changes in any other 943 * flags that affect hardware registers. 944 */ 945 qestop(sc); 946 qeinit(sc); 947 } 948 #ifdef QEDEBUG 949 sc->sc_debug = (ifp->if_flags & IFF_DEBUG) != 0 ? 1 : 0; 950 #endif 951 break; 952 953 case SIOCGIFMEDIA: 954 case SIOCSIFMEDIA: 955 error = ifmedia_ioctl(ifp, ifr, &sc->sc_ifmedia, cmd); 956 break; 957 958 default: 959 error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data); 960 } 961 962 if (error == ENETRESET) { 963 if (ifp->if_flags & IFF_RUNNING) 964 qe_mcreset(sc); 965 error = 0; 966 } 967 968 splx(s); 969 return (error); 970 } 971 972 973 void 974 qeinit(sc) 975 struct qe_softc *sc; 976 { 977 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 978 bus_space_tag_t t = sc->sc_bustag; 979 bus_space_handle_t cr = sc->sc_cr; 980 bus_space_handle_t mr = sc->sc_mr; 981 struct qec_softc *qec = sc->sc_qec; 982 u_int32_t qecaddr; 983 u_int8_t *ea; 984 int s; 985 986 s = splnet(); 987 988 qestop(sc); 989 990 /* 991 * Allocate descriptor ring and buffers 992 */ 993 qec_meminit(&sc->sc_rb, QE_PKT_BUF_SZ); 994 995 /* Channel registers: */ 996 bus_space_write_4(t, cr, QE_CRI_RXDS, (u_int32_t)sc->sc_rb.rb_rxddma); 997 bus_space_write_4(t, cr, QE_CRI_TXDS, (u_int32_t)sc->sc_rb.rb_txddma); 998 999 bus_space_write_4(t, cr, QE_CRI_RIMASK, 0); 1000 bus_space_write_4(t, cr, QE_CRI_TIMASK, 0); 1001 bus_space_write_4(t, cr, QE_CRI_QMASK, 0); 1002 bus_space_write_4(t, cr, QE_CRI_MMASK, QE_CR_MMASK_RXCOLL); 1003 bus_space_write_4(t, cr, QE_CRI_CCNT, 0); 1004 bus_space_write_4(t, cr, QE_CRI_PIPG, 0); 1005 1006 qecaddr = sc->sc_channel * qec->sc_msize; 1007 bus_space_write_4(t, cr, QE_CRI_RXWBUF, qecaddr); 1008 bus_space_write_4(t, cr, QE_CRI_RXRBUF, qecaddr); 1009 bus_space_write_4(t, cr, QE_CRI_TXWBUF, qecaddr + qec->sc_rsize); 1010 bus_space_write_4(t, cr, QE_CRI_TXRBUF, qecaddr + qec->sc_rsize); 1011 1012 /* 1013 * When switching from mace<->qec always guarantee an sbus 1014 * turnaround (if last op was read, perform a dummy write, and 1015 * vice versa). 1016 */ 1017 bus_space_read_4(t, cr, QE_CRI_QMASK); 1018 1019 /* MACE registers: */ 1020 bus_space_write_1(t, mr, QE_MRI_PHYCC, QE_MR_PHYCC_ASEL); 1021 bus_space_write_1(t, mr, QE_MRI_XMTFC, QE_MR_XMTFC_APADXMT); 1022 bus_space_write_1(t, mr, QE_MRI_RCVFC, 0); 1023 1024 /* 1025 * Mask MACE's receive interrupt, since we're being notified 1026 * by the QEC after DMA completes. 1027 */ 1028 bus_space_write_1(t, mr, QE_MRI_IMR, 1029 QE_MR_IMR_CERRM | QE_MR_IMR_RCVINTM); 1030 1031 bus_space_write_1(t, mr, QE_MRI_BIUCC, 1032 QE_MR_BIUCC_BSWAP | QE_MR_BIUCC_64TS); 1033 1034 bus_space_write_1(t, mr, QE_MRI_FIFOFC, 1035 QE_MR_FIFOCC_TXF16 | QE_MR_FIFOCC_RXF32 | 1036 QE_MR_FIFOCC_RFWU | QE_MR_FIFOCC_TFWU); 1037 1038 bus_space_write_1(t, mr, QE_MRI_PLSCC, QE_MR_PLSCC_TP); 1039 1040 /* 1041 * Station address 1042 */ 1043 ea = sc->sc_arpcom.ac_enaddr; 1044 bus_space_write_1(t, mr, QE_MRI_IAC, 1045 QE_MR_IAC_ADDRCHG | QE_MR_IAC_PHYADDR); 1046 bus_space_write_multi_1(t, mr, QE_MRI_PADR, ea, 6); 1047 1048 /* Apply media settings */ 1049 qe_ifmedia_upd(ifp); 1050 1051 /* 1052 * Clear Logical address filter 1053 */ 1054 bus_space_write_1(t, mr, QE_MRI_IAC, 1055 QE_MR_IAC_ADDRCHG | QE_MR_IAC_LOGADDR); 1056 bus_space_set_multi_1(t, mr, QE_MRI_LADRF, 0, 8); 1057 bus_space_write_1(t, mr, QE_MRI_IAC, 0); 1058 1059 /* Clear missed packet count (register cleared on read) */ 1060 (void)bus_space_read_1(t, mr, QE_MRI_MPC); 1061 1062 #if 0 1063 /* test register: */ 1064 bus_space_write_1(t, mr, QE_MRI_UTR, 0); 1065 #endif 1066 1067 /* Reset multicast filter */ 1068 qe_mcreset(sc); 1069 1070 ifp->if_flags |= IFF_RUNNING; 1071 ifp->if_flags &= ~IFF_OACTIVE; 1072 splx(s); 1073 } 1074 1075 /* 1076 * Reset multicast filter. 1077 */ 1078 void 1079 qe_mcreset(sc) 1080 struct qe_softc *sc; 1081 { 1082 struct arpcom *ac = &sc->sc_arpcom; 1083 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1084 bus_space_tag_t t = sc->sc_bustag; 1085 bus_space_handle_t mr = sc->sc_mr; 1086 struct ether_multi *enm; 1087 struct ether_multistep step; 1088 u_int32_t crc; 1089 u_int16_t hash[4]; 1090 u_int8_t octet, maccc, *ladrp = (u_int8_t *)&hash[0]; 1091 int i, j; 1092 1093 /* We also enable transmitter & receiver here */ 1094 maccc = QE_MR_MACCC_ENXMT | QE_MR_MACCC_ENRCV; 1095 1096 if (ifp->if_flags & IFF_PROMISC) { 1097 maccc |= QE_MR_MACCC_PROM; 1098 bus_space_write_1(t, mr, QE_MRI_MACCC, maccc); 1099 return; 1100 } 1101 1102 if (ifp->if_flags & IFF_ALLMULTI) { 1103 bus_space_write_1(t, mr, QE_MRI_IAC, 1104 QE_MR_IAC_ADDRCHG | QE_MR_IAC_LOGADDR); 1105 bus_space_set_multi_1(t, mr, QE_MRI_LADRF, 0xff, 8); 1106 bus_space_write_1(t, mr, QE_MRI_IAC, 0); 1107 bus_space_write_1(t, mr, QE_MRI_MACCC, maccc); 1108 return; 1109 } 1110 1111 hash[3] = hash[2] = hash[1] = hash[0] = 0; 1112 1113 ETHER_FIRST_MULTI(step, ac, enm); 1114 while (enm != NULL) { 1115 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, 1116 ETHER_ADDR_LEN) != 0) { 1117 /* 1118 * We must listen to a range of multicast 1119 * addresses. For now, just accept all 1120 * multicasts, rather than trying to set only 1121 * those filter bits needed to match the range. 1122 * (At this time, the only use of address 1123 * ranges is for IP multicast routing, for 1124 * which the range is big enough to require 1125 * all bits set.) 1126 */ 1127 bus_space_write_1(t, mr, QE_MRI_IAC, 1128 QE_MR_IAC_ADDRCHG | QE_MR_IAC_LOGADDR); 1129 bus_space_set_multi_1(t, mr, QE_MRI_LADRF, 0xff, 8); 1130 bus_space_write_1(t, mr, QE_MRI_IAC, 0); 1131 ifp->if_flags |= IFF_ALLMULTI; 1132 break; 1133 } 1134 1135 crc = 0xffffffff; 1136 1137 for (i = 0; i < ETHER_ADDR_LEN; i++) { 1138 octet = enm->enm_addrlo[i]; 1139 1140 for (j = 0; j < 8; j++) { 1141 if ((crc & 1) ^ (octet & 1)) { 1142 crc >>= 1; 1143 crc ^= MC_POLY_LE; 1144 } 1145 else 1146 crc >>= 1; 1147 octet >>= 1; 1148 } 1149 } 1150 1151 crc >>= 26; 1152 hash[crc >> 4] |= 1 << (crc & 0xf); 1153 ETHER_NEXT_MULTI(step, enm); 1154 } 1155 1156 bus_space_write_1(t, mr, QE_MRI_IAC, 1157 QE_MR_IAC_ADDRCHG | QE_MR_IAC_LOGADDR); 1158 bus_space_write_multi_1(t, mr, QE_MRI_LADRF, ladrp, 8); 1159 bus_space_write_1(t, mr, QE_MRI_IAC, 0); 1160 bus_space_write_1(t, mr, QE_MRI_MACCC, maccc); 1161 } 1162 1163 /* 1164 * Get current media settings. 1165 */ 1166 void 1167 qe_ifmedia_sts(ifp, ifmr) 1168 struct ifnet *ifp; 1169 struct ifmediareq *ifmr; 1170 { 1171 struct qe_softc *sc = ifp->if_softc; 1172 u_int8_t phycc; 1173 1174 ifmr->ifm_active = IFM_ETHER | IFM_10_T; 1175 phycc = bus_space_read_1(sc->sc_bustag, sc->sc_mr, QE_MRI_PHYCC); 1176 if ((phycc & QE_MR_PHYCC_DLNKTST) == 0) { 1177 ifmr->ifm_status |= IFM_AVALID; 1178 if (phycc & QE_MR_PHYCC_LNKFL) 1179 ifmr->ifm_status &= ~IFM_ACTIVE; 1180 else 1181 ifmr->ifm_status |= IFM_ACTIVE; 1182 } 1183 } 1184 1185 /* 1186 * Set media options. 1187 */ 1188 int 1189 qe_ifmedia_upd(ifp) 1190 struct ifnet *ifp; 1191 { 1192 struct qe_softc *sc = ifp->if_softc; 1193 int media = sc->sc_ifmedia.ifm_media; 1194 1195 if (IFM_TYPE(media) != IFM_ETHER) 1196 return (EINVAL); 1197 1198 if (IFM_SUBTYPE(media) != IFM_10_T) 1199 return (EINVAL); 1200 1201 return (0); 1202 } 1203