1 /* $NetBSD: if_sq.c,v 1.15 2002/11/09 18:53:25 thorpej Exp $ */ 2 3 /* 4 * Copyright (c) 2001 Rafal K. Boni 5 * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc. 6 * All rights reserved. 7 * 8 * Portions of this code are derived from software contributed to The 9 * NetBSD Foundation by Jason R. Thorpe of the Numerical Aerospace 10 * Simulation Facility, NASA Ames Research Center. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. The name of the author may not be used to endorse or promote products 21 * derived from this software without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include "bpfilter.h" 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/device.h> 40 #include <sys/callout.h> 41 #include <sys/mbuf.h> 42 #include <sys/malloc.h> 43 #include <sys/kernel.h> 44 #include <sys/socket.h> 45 #include <sys/ioctl.h> 46 #include <sys/errno.h> 47 #include <sys/syslog.h> 48 49 #include <uvm/uvm_extern.h> 50 51 #include <machine/endian.h> 52 53 #include <net/if.h> 54 #include <net/if_dl.h> 55 #include <net/if_media.h> 56 #include <net/if_ether.h> 57 58 #if NBPFILTER > 0 59 #include <net/bpf.h> 60 #endif 61 62 #include <machine/bus.h> 63 #include <machine/intr.h> 64 65 #include <dev/ic/seeq8003reg.h> 66 67 #include <sgimips/hpc/sqvar.h> 68 #include <sgimips/hpc/hpcvar.h> 69 #include <sgimips/hpc/hpcreg.h> 70 71 #include <dev/arcbios/arcbios.h> 72 #include <dev/arcbios/arcbiosvar.h> 73 74 #define static 75 76 /* 77 * Short TODO list: 78 * (1) Do counters for bad-RX packets. 79 * (2) Allow multi-segment transmits, instead of copying to a single, 80 * contiguous mbuf. 81 * (3) Verify sq_stop() turns off enough stuff; I was still getting 82 * seeq interrupts after sq_stop(). 83 * (4) Fix up printfs in driver (most should only fire ifdef SQ_DEBUG 84 * or something similar. 85 * (5) Implement EDLC modes: especially packet auto-pad and simplex 86 * mode. 87 * (6) Should the driver filter out its own transmissions in non-EDLC 88 * mode? 89 * (7) Multicast support -- multicast filter, address management, ... 90 * (8) Deal with RB0 (recv buffer overflow) on reception. Will need 91 * to figure out if RB0 is read-only as stated in one spot in the 92 * HPC spec or read-write (ie, is the 'write a one to clear it') 93 * the correct thing? 94 */ 95 96 static int sq_match(struct device *, struct cfdata *, void *); 97 static void sq_attach(struct device *, struct device *, void *); 98 static int sq_init(struct ifnet *); 99 static void sq_start(struct ifnet *); 100 static void sq_stop(struct ifnet *, int); 101 static void sq_watchdog(struct ifnet *); 102 static int sq_ioctl(struct ifnet *, u_long, caddr_t); 103 104 static void sq_set_filter(struct sq_softc *); 105 static int sq_intr(void *); 106 static int sq_rxintr(struct sq_softc *); 107 static int sq_txintr(struct sq_softc *); 108 static void sq_reset(struct sq_softc *); 109 static int sq_add_rxbuf(struct sq_softc *, int); 110 static void sq_dump_buffer(u_int32_t addr, u_int32_t len); 111 112 static void enaddr_aton(const char*, u_int8_t*); 113 114 /* Actions */ 115 #define SQ_RESET 1 116 #define SQ_ADD_TO_DMA 2 117 #define SQ_START_DMA 3 118 #define SQ_DONE_DMA 4 119 #define SQ_RESTART_DMA 5 120 #define SQ_TXINTR_ENTER 6 121 #define SQ_TXINTR_EXIT 7 122 #define SQ_TXINTR_BUSY 8 123 124 struct sq_action_trace { 125 int action; 126 int bufno; 127 int status; 128 int freebuf; 129 }; 130 131 #define SQ_TRACEBUF_SIZE 100 132 int sq_trace_idx = 0; 133 struct sq_action_trace sq_trace[SQ_TRACEBUF_SIZE]; 134 135 void sq_trace_dump(struct sq_softc* sc); 136 137 #define SQ_TRACE(act, buf, stat, free) do { \ 138 sq_trace[sq_trace_idx].action = (act); \ 139 sq_trace[sq_trace_idx].bufno = (buf); \ 140 sq_trace[sq_trace_idx].status = (stat); \ 141 sq_trace[sq_trace_idx].freebuf = (free); \ 142 if (++sq_trace_idx == SQ_TRACEBUF_SIZE) { \ 143 memset(&sq_trace, 0, sizeof(sq_trace)); \ 144 sq_trace_idx = 0; \ 145 } \ 146 } while (0) 147 148 CFATTACH_DECL(sq, sizeof(struct sq_softc), 149 sq_match, sq_attach, NULL, NULL); 150 151 static int 152 sq_match(struct device *parent, struct cfdata *cf, void *aux) 153 { 154 struct hpc_attach_args *ha = aux; 155 156 if (strcmp(ha->ha_name, cf->cf_name) == 0) 157 return (1); 158 159 return (0); 160 } 161 162 static void 163 sq_attach(struct device *parent, struct device *self, void *aux) 164 { 165 int i, err; 166 char* macaddr; 167 struct sq_softc *sc = (void *)self; 168 struct hpc_attach_args *haa = aux; 169 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 170 171 sc->sc_hpct = haa->ha_st; 172 if ((err = bus_space_subregion(haa->ha_st, haa->ha_sh, 173 haa->ha_dmaoff, 174 HPC_ENET_REGS_SIZE, 175 &sc->sc_hpch)) != 0) { 176 printf(": unable to map HPC DMA registers, error = %d\n", err); 177 goto fail_0; 178 } 179 180 sc->sc_regt = haa->ha_st; 181 if ((err = bus_space_subregion(haa->ha_st, haa->ha_sh, 182 haa->ha_devoff, 183 HPC_ENET_DEVREGS_SIZE, 184 &sc->sc_regh)) != 0) { 185 printf(": unable to map Seeq registers, error = %d\n", err); 186 goto fail_0; 187 } 188 189 sc->sc_dmat = haa->ha_dmat; 190 191 if ((err = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct sq_control), 192 PAGE_SIZE, PAGE_SIZE, &sc->sc_cdseg, 193 1, &sc->sc_ncdseg, BUS_DMA_NOWAIT)) != 0) { 194 printf(": unable to allocate control data, error = %d\n", err); 195 goto fail_0; 196 } 197 198 if ((err = bus_dmamem_map(sc->sc_dmat, &sc->sc_cdseg, sc->sc_ncdseg, 199 sizeof(struct sq_control), 200 (caddr_t *)&sc->sc_control, 201 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) { 202 printf(": unable to map control data, error = %d\n", err); 203 goto fail_1; 204 } 205 206 if ((err = bus_dmamap_create(sc->sc_dmat, sizeof(struct sq_control), 207 1, sizeof(struct sq_control), PAGE_SIZE, 208 BUS_DMA_NOWAIT, &sc->sc_cdmap)) != 0) { 209 printf(": unable to create DMA map for control data, error " 210 "= %d\n", err); 211 goto fail_2; 212 } 213 214 if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_cdmap, sc->sc_control, 215 sizeof(struct sq_control), 216 NULL, BUS_DMA_NOWAIT)) != 0) { 217 printf(": unable to load DMA map for control data, error " 218 "= %d\n", err); 219 goto fail_3; 220 } 221 222 memset(sc->sc_control, 0, sizeof(struct sq_control)); 223 224 /* Create transmit buffer DMA maps */ 225 for (i = 0; i < SQ_NTXDESC; i++) { 226 if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 227 0, BUS_DMA_NOWAIT, 228 &sc->sc_txmap[i])) != 0) { 229 printf(": unable to create tx DMA map %d, error = %d\n", 230 i, err); 231 goto fail_4; 232 } 233 } 234 235 /* Create transmit buffer DMA maps */ 236 for (i = 0; i < SQ_NRXDESC; i++) { 237 if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 238 0, BUS_DMA_NOWAIT, 239 &sc->sc_rxmap[i])) != 0) { 240 printf(": unable to create rx DMA map %d, error = %d\n", 241 i, err); 242 goto fail_5; 243 } 244 } 245 246 /* Pre-allocate the receive buffers. */ 247 for (i = 0; i < SQ_NRXDESC; i++) { 248 if ((err = sq_add_rxbuf(sc, i)) != 0) { 249 printf(": unable to allocate or map rx buffer %d\n," 250 " error = %d\n", i, err); 251 goto fail_6; 252 } 253 } 254 255 if ((macaddr = ARCBIOS->GetEnvironmentVariable("eaddr")) == NULL) { 256 printf(": unable to get MAC address!\n"); 257 goto fail_6; 258 } 259 260 evcnt_attach_dynamic(&sc->sq_intrcnt, EVCNT_TYPE_INTR, NULL, 261 self->dv_xname, "intr"); 262 263 if ((cpu_intr_establish(haa->ha_irq, IPL_NET, sq_intr, sc)) == NULL) { 264 printf(": unable to establish interrupt!\n"); 265 goto fail_6; 266 } 267 268 /* Reset the chip to a known state. */ 269 sq_reset(sc); 270 271 /* 272 * Determine if we're an 8003 or 80c03 by setting the first 273 * MAC address register to non-zero, and then reading it back. 274 * If it's zero, we have an 80c03, because we will have read 275 * the TxCollLSB register. 276 */ 277 bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCOLLS0, 0xa5); 278 if (bus_space_read_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCOLLS0) == 0) 279 sc->sc_type = SQ_TYPE_80C03; 280 else 281 sc->sc_type = SQ_TYPE_8003; 282 bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCOLLS0, 0x00); 283 284 printf(": SGI Seeq %s\n", 285 sc->sc_type == SQ_TYPE_80C03 ? "80c03" : "8003"); 286 287 enaddr_aton(macaddr, sc->sc_enaddr); 288 289 printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname, 290 ether_sprintf(sc->sc_enaddr)); 291 292 strcpy(ifp->if_xname, sc->sc_dev.dv_xname); 293 ifp->if_softc = sc; 294 ifp->if_mtu = ETHERMTU; 295 ifp->if_init = sq_init; 296 ifp->if_stop = sq_stop; 297 ifp->if_start = sq_start; 298 ifp->if_ioctl = sq_ioctl; 299 ifp->if_watchdog = sq_watchdog; 300 ifp->if_flags = IFF_BROADCAST | IFF_NOTRAILERS | IFF_MULTICAST; 301 IFQ_SET_READY(&ifp->if_snd); 302 303 if_attach(ifp); 304 ether_ifattach(ifp, sc->sc_enaddr); 305 306 memset(&sq_trace, 0, sizeof(sq_trace)); 307 /* Done! */ 308 return; 309 310 /* 311 * Free any resources we've allocated during the failed attach 312 * attempt. Do this in reverse order and fall through. 313 */ 314 fail_6: 315 for (i = 0; i < SQ_NRXDESC; i++) { 316 if (sc->sc_rxmbuf[i] != NULL) { 317 bus_dmamap_unload(sc->sc_dmat, sc->sc_rxmap[i]); 318 m_freem(sc->sc_rxmbuf[i]); 319 } 320 } 321 fail_5: 322 for (i = 0; i < SQ_NRXDESC; i++) { 323 if (sc->sc_rxmap[i] != NULL) 324 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rxmap[i]); 325 } 326 fail_4: 327 for (i = 0; i < SQ_NTXDESC; i++) { 328 if (sc->sc_txmap[i] != NULL) 329 bus_dmamap_destroy(sc->sc_dmat, sc->sc_txmap[i]); 330 } 331 bus_dmamap_unload(sc->sc_dmat, sc->sc_cdmap); 332 fail_3: 333 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdmap); 334 fail_2: 335 bus_dmamem_unmap(sc->sc_dmat, (caddr_t) sc->sc_control, 336 sizeof(struct sq_control)); 337 fail_1: 338 bus_dmamem_free(sc->sc_dmat, &sc->sc_cdseg, sc->sc_ncdseg); 339 fail_0: 340 return; 341 } 342 343 /* Set up data to get the interface up and running. */ 344 int 345 sq_init(struct ifnet *ifp) 346 { 347 int i; 348 u_int32_t reg; 349 struct sq_softc *sc = ifp->if_softc; 350 351 /* Cancel any in-progress I/O */ 352 sq_stop(ifp, 0); 353 354 sc->sc_nextrx = 0; 355 356 sc->sc_nfreetx = SQ_NTXDESC; 357 sc->sc_nexttx = sc->sc_prevtx = 0; 358 359 SQ_TRACE(SQ_RESET, 0, 0, sc->sc_nfreetx); 360 361 /* Set into 8003 mode, bank 0 to program ethernet address */ 362 bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCMD, TXCMD_BANK0); 363 364 /* Now write the address */ 365 for (i = 0; i < ETHER_ADDR_LEN; i++) 366 bus_space_write_1(sc->sc_regt, sc->sc_regh, i, 367 sc->sc_enaddr[i]); 368 369 sc->sc_rxcmd = RXCMD_IE_CRC | 370 RXCMD_IE_DRIB | 371 RXCMD_IE_SHORT | 372 RXCMD_IE_END | 373 RXCMD_IE_GOOD; 374 375 /* 376 * Set the receive filter -- this will add some bits to the 377 * prototype RXCMD register. Do this before setting the 378 * transmit config register, since we might need to switch 379 * banks. 380 */ 381 sq_set_filter(sc); 382 383 /* Set up Seeq transmit command register */ 384 bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCMD, 385 TXCMD_IE_UFLOW | 386 TXCMD_IE_COLL | 387 TXCMD_IE_16COLL | 388 TXCMD_IE_GOOD); 389 390 /* Now write the receive command register. */ 391 bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_RXCMD, sc->sc_rxcmd); 392 393 /* Set up HPC ethernet DMA config */ 394 reg = bus_space_read_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_DMACFG); 395 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_DMACFG, 396 reg | ENETR_DMACFG_FIX_RXDC | 397 ENETR_DMACFG_FIX_INTR | 398 ENETR_DMACFG_FIX_EOP); 399 400 /* Pass the start of the receive ring to the HPC */ 401 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_NDBP, 402 SQ_CDRXADDR(sc, 0)); 403 404 /* And turn on the HPC ethernet receive channel */ 405 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_CTL, 406 ENETR_CTL_ACTIVE); 407 408 ifp->if_flags |= IFF_RUNNING; 409 ifp->if_flags &= ~IFF_OACTIVE; 410 411 return 0; 412 } 413 414 static void 415 sq_set_filter(struct sq_softc *sc) 416 { 417 struct ethercom *ec = &sc->sc_ethercom; 418 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 419 struct ether_multi *enm; 420 struct ether_multistep step; 421 422 /* 423 * Check for promiscuous mode. Also implies 424 * all-multicast. 425 */ 426 if (ifp->if_flags & IFF_PROMISC) { 427 sc->sc_rxcmd |= RXCMD_REC_ALL; 428 ifp->if_flags |= IFF_ALLMULTI; 429 return; 430 } 431 432 /* 433 * The 8003 has no hash table. If we have any multicast 434 * addresses on the list, enable reception of all multicast 435 * frames. 436 * 437 * XXX The 80c03 has a hash table. We should use it. 438 */ 439 440 ETHER_FIRST_MULTI(step, ec, enm); 441 442 if (enm == NULL) { 443 sc->sc_rxcmd &= ~RXCMD_REC_MASK; 444 sc->sc_rxcmd |= RXCMD_REC_BROAD; 445 446 ifp->if_flags &= ~IFF_ALLMULTI; 447 return; 448 } 449 450 sc->sc_rxcmd |= RXCMD_REC_MULTI; 451 ifp->if_flags |= IFF_ALLMULTI; 452 } 453 454 int 455 sq_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 456 { 457 int s, error = 0; 458 459 s = splnet(); 460 461 error = ether_ioctl(ifp, cmd, data); 462 if (error == ENETRESET) { 463 /* 464 * Multicast list has changed; set the hardware filter 465 * accordingly. 466 */ 467 error = sq_init(ifp); 468 } 469 470 splx(s); 471 return (error); 472 } 473 474 void 475 sq_start(struct ifnet *ifp) 476 { 477 struct sq_softc *sc = ifp->if_softc; 478 u_int32_t status; 479 struct mbuf *m0, *m; 480 bus_dmamap_t dmamap; 481 int err, totlen, nexttx, firsttx, lasttx, ofree, seg; 482 483 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) 484 return; 485 486 /* 487 * Remember the previous number of free descriptors and 488 * the first descriptor we'll use. 489 */ 490 ofree = sc->sc_nfreetx; 491 firsttx = sc->sc_nexttx; 492 493 /* 494 * Loop through the send queue, setting up transmit descriptors 495 * until we drain the queue, or use up all available transmit 496 * descriptors. 497 */ 498 while (sc->sc_nfreetx != 0) { 499 /* 500 * Grab a packet off the queue. 501 */ 502 IFQ_POLL(&ifp->if_snd, m0); 503 if (m0 == NULL) 504 break; 505 m = NULL; 506 507 dmamap = sc->sc_txmap[sc->sc_nexttx]; 508 509 /* 510 * Load the DMA map. If this fails, the packet either 511 * didn't fit in the alloted number of segments, or we were 512 * short on resources. In this case, we'll copy and try 513 * again. 514 */ 515 if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 516 BUS_DMA_NOWAIT) != 0) { 517 MGETHDR(m, M_DONTWAIT, MT_DATA); 518 if (m == NULL) { 519 printf("%s: unable to allocate Tx mbuf\n", 520 sc->sc_dev.dv_xname); 521 break; 522 } 523 if (m0->m_pkthdr.len > MHLEN) { 524 MCLGET(m, M_DONTWAIT); 525 if ((m->m_flags & M_EXT) == 0) { 526 printf("%s: unable to allocate Tx " 527 "cluster\n", sc->sc_dev.dv_xname); 528 m_freem(m); 529 break; 530 } 531 } 532 533 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t)); 534 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; 535 536 if ((err = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, 537 m, BUS_DMA_NOWAIT)) != 0) { 538 printf("%s: unable to load Tx buffer, " 539 "error = %d\n", sc->sc_dev.dv_xname, err); 540 break; 541 } 542 } 543 544 /* 545 * Ensure we have enough descriptors free to describe 546 * the packet. 547 */ 548 if (dmamap->dm_nsegs > sc->sc_nfreetx) { 549 /* 550 * Not enough free descriptors to transmit this 551 * packet. We haven't committed to anything yet, 552 * so just unload the DMA map, put the packet 553 * back on the queue, and punt. Notify the upper 554 * layer that there are no more slots left. 555 * 556 * XXX We could allocate an mbuf and copy, but 557 * XXX it is worth it? 558 */ 559 ifp->if_flags |= IFF_OACTIVE; 560 bus_dmamap_unload(sc->sc_dmat, dmamap); 561 if (m != NULL) 562 m_freem(m); 563 break; 564 } 565 566 IFQ_DEQUEUE(&ifp->if_snd, m0); 567 if (m != NULL) { 568 m_freem(m0); 569 m0 = m; 570 } 571 572 /* 573 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 574 */ 575 576 /* Sync the DMA map. */ 577 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 578 BUS_DMASYNC_PREWRITE); 579 580 /* 581 * Initialize the transmit descriptors. 582 */ 583 for (nexttx = sc->sc_nexttx, seg = 0, totlen = 0; 584 seg < dmamap->dm_nsegs; 585 seg++, nexttx = SQ_NEXTTX(nexttx)) { 586 sc->sc_txdesc[nexttx].hdd_bufptr = 587 dmamap->dm_segs[seg].ds_addr; 588 sc->sc_txdesc[nexttx].hdd_ctl = 589 dmamap->dm_segs[seg].ds_len; 590 sc->sc_txdesc[nexttx].hdd_descptr= 591 SQ_CDTXADDR(sc, SQ_NEXTTX(nexttx)); 592 lasttx = nexttx; 593 totlen += dmamap->dm_segs[seg].ds_len; 594 } 595 596 /* Last descriptor gets end-of-packet */ 597 sc->sc_txdesc[lasttx].hdd_ctl |= HDD_CTL_EOPACKET; 598 599 /* XXXrkb: if not EDLC, pad to min len manually */ 600 if (totlen < ETHER_MIN_LEN) { 601 sc->sc_txdesc[lasttx].hdd_ctl += (ETHER_MIN_LEN - totlen); 602 totlen = ETHER_MIN_LEN; 603 } 604 605 #if 0 606 printf("%s: transmit %d-%d, len %d\n", sc->sc_dev.dv_xname, 607 sc->sc_nexttx, lasttx, 608 totlen); 609 #endif 610 611 if (ifp->if_flags & IFF_DEBUG) { 612 printf(" transmit chain:\n"); 613 for (seg = sc->sc_nexttx;; seg = SQ_NEXTTX(seg)) { 614 printf(" descriptor %d:\n", seg); 615 printf(" hdd_bufptr: 0x%08x\n", 616 sc->sc_txdesc[seg].hdd_bufptr); 617 printf(" hdd_ctl: 0x%08x\n", 618 sc->sc_txdesc[seg].hdd_ctl); 619 printf(" hdd_descptr: 0x%08x\n", 620 sc->sc_txdesc[seg].hdd_descptr); 621 622 if (seg == lasttx) 623 break; 624 } 625 } 626 627 /* Sync the descriptors we're using. */ 628 SQ_CDTXSYNC(sc, sc->sc_nexttx, dmamap->dm_nsegs, 629 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 630 631 /* Store a pointer to the packet so we can free it later */ 632 sc->sc_txmbuf[sc->sc_nexttx] = m0; 633 634 /* Advance the tx pointer. */ 635 sc->sc_nfreetx -= dmamap->dm_nsegs; 636 sc->sc_nexttx = nexttx; 637 638 #if NBPFILTER > 0 639 /* 640 * Pass the packet to any BPF listeners. 641 */ 642 if (ifp->if_bpf) 643 bpf_mtap(ifp->if_bpf, m0); 644 #endif /* NBPFILTER > 0 */ 645 } 646 647 /* All transmit descriptors used up, let upper layers know */ 648 if (sc->sc_nfreetx == 0) 649 ifp->if_flags |= IFF_OACTIVE; 650 651 if (sc->sc_nfreetx != ofree) { 652 #if 0 653 printf("%s: %d packets enqueued, first %d, INTR on %d\n", 654 sc->sc_dev.dv_xname, lasttx - firsttx + 1, 655 firsttx, lasttx); 656 #endif 657 658 /* 659 * Cause a transmit interrupt to happen on the 660 * last packet we enqueued, mark it as the last 661 * descriptor. 662 */ 663 sc->sc_txdesc[lasttx].hdd_ctl |= (HDD_CTL_INTR | 664 HDD_CTL_EOCHAIN); 665 SQ_CDTXSYNC(sc, lasttx, 1, 666 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 667 668 /* 669 * There is a potential race condition here if the HPC 670 * DMA channel is active and we try and either update 671 * the 'next descriptor' pointer in the HPC PIO space 672 * or the 'next descriptor' pointer in a previous desc- 673 * riptor. 674 * 675 * To avoid this, if the channel is active, we rely on 676 * the transmit interrupt routine noticing that there 677 * are more packets to send and restarting the HPC DMA 678 * engine, rather than mucking with the DMA state here. 679 */ 680 status = bus_space_read_4(sc->sc_hpct, sc->sc_hpch, 681 HPC_ENETX_CTL); 682 683 if ((status & ENETX_CTL_ACTIVE) != 0) { 684 SQ_TRACE(SQ_ADD_TO_DMA, firsttx, status, 685 sc->sc_nfreetx); 686 sc->sc_txdesc[SQ_PREVTX(firsttx)].hdd_ctl &= 687 ~HDD_CTL_EOCHAIN; 688 SQ_CDTXSYNC(sc, SQ_PREVTX(firsttx), 1, 689 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 690 } else { 691 SQ_TRACE(SQ_START_DMA, firsttx, status, sc->sc_nfreetx); 692 693 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, 694 HPC_ENETX_NDBP, SQ_CDTXADDR(sc, firsttx)); 695 696 /* Kick DMA channel into life */ 697 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, 698 HPC_ENETX_CTL, ENETX_CTL_ACTIVE); 699 } 700 701 /* Set a watchdog timer in case the chip flakes out. */ 702 ifp->if_timer = 5; 703 } 704 } 705 706 void 707 sq_stop(struct ifnet *ifp, int disable) 708 { 709 int i; 710 struct sq_softc *sc = ifp->if_softc; 711 712 for (i =0; i < SQ_NTXDESC; i++) { 713 if (sc->sc_txmbuf[i] != NULL) { 714 bus_dmamap_unload(sc->sc_dmat, sc->sc_txmap[i]); 715 m_freem(sc->sc_txmbuf[i]); 716 sc->sc_txmbuf[i] = NULL; 717 } 718 } 719 720 /* Clear Seeq transmit/receive command registers */ 721 bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCMD, 0); 722 bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_RXCMD, 0); 723 724 sq_reset(sc); 725 726 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 727 ifp->if_timer = 0; 728 } 729 730 /* Device timeout/watchdog routine. */ 731 void 732 sq_watchdog(struct ifnet *ifp) 733 { 734 u_int32_t status; 735 struct sq_softc *sc = ifp->if_softc; 736 737 status = bus_space_read_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETX_CTL); 738 log(LOG_ERR, "%s: device timeout (prev %d, next %d, free %d, " 739 "status %08x)\n", sc->sc_dev.dv_xname, sc->sc_prevtx, 740 sc->sc_nexttx, sc->sc_nfreetx, status); 741 742 sq_trace_dump(sc); 743 744 memset(&sq_trace, 0, sizeof(sq_trace)); 745 sq_trace_idx = 0; 746 747 ++ifp->if_oerrors; 748 749 sq_init(ifp); 750 } 751 752 void sq_trace_dump(struct sq_softc* sc) 753 { 754 int i; 755 756 for(i = 0; i < sq_trace_idx; i++) { 757 printf("%s: [%d] action %d, buf %d, free %d, status %08x\n", 758 sc->sc_dev.dv_xname, i, sq_trace[i].action, 759 sq_trace[i].bufno, sq_trace[i].freebuf, 760 sq_trace[i].status); 761 } 762 } 763 764 static int 765 sq_intr(void * arg) 766 { 767 struct sq_softc *sc = arg; 768 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 769 int handled = 0; 770 u_int32_t stat; 771 772 stat = bus_space_read_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_RESET); 773 774 if ((stat & 2) == 0) { 775 printf("%s: Unexpected interrupt!\n", sc->sc_dev.dv_xname); 776 return 0; 777 } 778 779 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_RESET, 2); 780 781 /* 782 * If the interface isn't running, the interrupt couldn't 783 * possibly have come from us. 784 */ 785 if ((ifp->if_flags & IFF_RUNNING) == 0) 786 return 0; 787 788 sc->sq_intrcnt.ev_count++; 789 790 /* Always check for received packets */ 791 if (sq_rxintr(sc) != 0) 792 handled++; 793 794 /* Only handle transmit interrupts if we actually sent something */ 795 if (sc->sc_nfreetx < SQ_NTXDESC) { 796 sq_txintr(sc); 797 handled++; 798 } 799 800 #if NRND > 0 801 if (handled) 802 rnd_add_uint32(&sc->rnd_source, stat); 803 #endif 804 return (handled); 805 } 806 807 static int 808 sq_rxintr(struct sq_softc *sc) 809 { 810 int count = 0; 811 struct mbuf* m; 812 int i, framelen; 813 u_int8_t pktstat; 814 u_int32_t status; 815 int new_end, orig_end; 816 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 817 818 for(i = sc->sc_nextrx;; i = SQ_NEXTRX(i)) { 819 SQ_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 820 821 /* If this is a CPU-owned buffer, we're at the end of the list */ 822 if (sc->sc_rxdesc[i].hdd_ctl & HDD_CTL_OWN) { 823 #if 0 824 u_int32_t reg; 825 826 reg = bus_space_read_4(sc->sc_hpct, sc->sc_hpch, 827 HPC_ENETR_CTL); 828 printf("%s: rxintr: done at %d (ctl %08x)\n", 829 sc->sc_dev.dv_xname, i, reg); 830 #endif 831 break; 832 } 833 834 count++; 835 836 m = sc->sc_rxmbuf[i]; 837 framelen = m->m_ext.ext_size - 838 HDD_CTL_BYTECNT(sc->sc_rxdesc[i].hdd_ctl) - 3; 839 840 /* Now sync the actual packet data */ 841 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0, 842 sc->sc_rxmap[i]->dm_mapsize, BUS_DMASYNC_POSTREAD); 843 844 pktstat = *((u_int8_t*)m->m_data + framelen + 2); 845 846 if ((pktstat & RXSTAT_GOOD) == 0) { 847 ifp->if_ierrors++; 848 849 if (pktstat & RXSTAT_OFLOW) 850 printf("%s: receive FIFO overflow\n", 851 sc->sc_dev.dv_xname); 852 853 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0, 854 sc->sc_rxmap[i]->dm_mapsize, 855 BUS_DMASYNC_PREREAD); 856 SQ_INIT_RXDESC(sc, i); 857 continue; 858 } 859 860 if (sq_add_rxbuf(sc, i) != 0) { 861 ifp->if_ierrors++; 862 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0, 863 sc->sc_rxmap[i]->dm_mapsize, 864 BUS_DMASYNC_PREREAD); 865 SQ_INIT_RXDESC(sc, i); 866 continue; 867 } 868 869 870 m->m_data += 2; 871 m->m_pkthdr.rcvif = ifp; 872 m->m_pkthdr.len = m->m_len = framelen; 873 874 ifp->if_ipackets++; 875 876 #if 0 877 printf("%s: sq_rxintr: buf %d len %d\n", sc->sc_dev.dv_xname, 878 i, framelen); 879 #endif 880 881 #if NBPFILTER > 0 882 if (ifp->if_bpf) 883 bpf_mtap(ifp->if_bpf, m); 884 #endif 885 (*ifp->if_input)(ifp, m); 886 } 887 888 889 /* If anything happened, move ring start/end pointers to new spot */ 890 if (i != sc->sc_nextrx) { 891 new_end = SQ_PREVRX(i); 892 sc->sc_rxdesc[new_end].hdd_ctl |= HDD_CTL_EOCHAIN; 893 SQ_CDRXSYNC(sc, new_end, BUS_DMASYNC_PREREAD | 894 BUS_DMASYNC_PREWRITE); 895 896 orig_end = SQ_PREVRX(sc->sc_nextrx); 897 sc->sc_rxdesc[orig_end].hdd_ctl &= ~HDD_CTL_EOCHAIN; 898 SQ_CDRXSYNC(sc, orig_end, BUS_DMASYNC_PREREAD | 899 BUS_DMASYNC_PREWRITE); 900 901 sc->sc_nextrx = i; 902 } 903 904 status = bus_space_read_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_CTL); 905 906 /* If receive channel is stopped, restart it... */ 907 if ((status & ENETR_CTL_ACTIVE) == 0) { 908 /* Pass the start of the receive ring to the HPC */ 909 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, 910 HPC_ENETR_NDBP, SQ_CDRXADDR(sc, sc->sc_nextrx)); 911 912 /* And turn on the HPC ethernet receive channel */ 913 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_CTL, 914 ENETR_CTL_ACTIVE); 915 } 916 917 return count; 918 } 919 920 static int 921 sq_txintr(struct sq_softc *sc) 922 { 923 int i; 924 u_int32_t status; 925 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 926 927 status = bus_space_read_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETX_CTL); 928 929 SQ_TRACE(SQ_TXINTR_ENTER, sc->sc_prevtx, status, sc->sc_nfreetx); 930 931 if ((status & (ENETX_CTL_ACTIVE | TXSTAT_GOOD)) == 0) { 932 if (status & TXSTAT_COLL) 933 ifp->if_collisions++; 934 935 if (status & TXSTAT_UFLOW) { 936 printf("%s: transmit underflow\n", sc->sc_dev.dv_xname); 937 ifp->if_oerrors++; 938 } 939 940 if (status & TXSTAT_16COLL) { 941 printf("%s: max collisions reached\n", sc->sc_dev.dv_xname); 942 ifp->if_oerrors++; 943 ifp->if_collisions += 16; 944 } 945 } 946 947 i = sc->sc_prevtx; 948 while (sc->sc_nfreetx < SQ_NTXDESC) { 949 /* 950 * Check status first so we don't end up with a case of 951 * the buffer not being finished while the DMA channel 952 * has gone idle. 953 */ 954 status = bus_space_read_4(sc->sc_hpct, sc->sc_hpch, 955 HPC_ENETX_CTL); 956 957 SQ_CDTXSYNC(sc, i, sc->sc_txmap[i]->dm_nsegs, 958 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 959 960 /* If not yet transmitted, try and start DMA engine again */ 961 if ((sc->sc_txdesc[i].hdd_ctl & HDD_CTL_XMITDONE) == 0) { 962 if ((status & ENETX_CTL_ACTIVE) == 0) { 963 SQ_TRACE(SQ_RESTART_DMA, i, status, 964 sc->sc_nfreetx); 965 966 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, 967 HPC_ENETX_NDBP, SQ_CDTXADDR(sc, i)); 968 969 /* Kick DMA channel into life */ 970 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, 971 HPC_ENETX_CTL, ENETX_CTL_ACTIVE); 972 973 /* 974 * Set a watchdog timer in case the chip 975 * flakes out. 976 */ 977 ifp->if_timer = 5; 978 } else { 979 SQ_TRACE(SQ_TXINTR_BUSY, i, status, 980 sc->sc_nfreetx); 981 } 982 break; 983 } 984 985 /* Sync the packet data, unload DMA map, free mbuf */ 986 bus_dmamap_sync(sc->sc_dmat, sc->sc_txmap[i], 0, 987 sc->sc_txmap[i]->dm_mapsize, 988 BUS_DMASYNC_POSTWRITE); 989 bus_dmamap_unload(sc->sc_dmat, sc->sc_txmap[i]); 990 m_freem(sc->sc_txmbuf[i]); 991 sc->sc_txmbuf[i] = NULL; 992 993 ifp->if_opackets++; 994 sc->sc_nfreetx++; 995 996 SQ_TRACE(SQ_DONE_DMA, i, status, sc->sc_nfreetx); 997 i = SQ_NEXTTX(i); 998 } 999 1000 /* prevtx now points to next xmit packet not yet finished */ 1001 sc->sc_prevtx = i; 1002 1003 /* If we have buffers free, let upper layers know */ 1004 if (sc->sc_nfreetx > 0) 1005 ifp->if_flags &= ~IFF_OACTIVE; 1006 1007 /* If all packets have left the coop, cancel watchdog */ 1008 if (sc->sc_nfreetx == SQ_NTXDESC) 1009 ifp->if_timer = 0; 1010 1011 SQ_TRACE(SQ_TXINTR_EXIT, sc->sc_prevtx, status, sc->sc_nfreetx); 1012 sq_start(ifp); 1013 1014 return 1; 1015 } 1016 1017 1018 void 1019 sq_reset(struct sq_softc *sc) 1020 { 1021 /* Stop HPC dma channels */ 1022 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_CTL, 0); 1023 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETX_CTL, 0); 1024 1025 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_RESET, 3); 1026 delay(20); 1027 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_RESET, 0); 1028 } 1029 1030 /* sq_add_rxbuf: Add a receive buffer to the indicated descriptor. */ 1031 int 1032 sq_add_rxbuf(struct sq_softc *sc, int idx) 1033 { 1034 int err; 1035 struct mbuf *m; 1036 1037 MGETHDR(m, M_DONTWAIT, MT_DATA); 1038 if (m == NULL) 1039 return (ENOBUFS); 1040 1041 MCLGET(m, M_DONTWAIT); 1042 if ((m->m_flags & M_EXT) == 0) { 1043 m_freem(m); 1044 return (ENOBUFS); 1045 } 1046 1047 if (sc->sc_rxmbuf[idx] != NULL) 1048 bus_dmamap_unload(sc->sc_dmat, sc->sc_rxmap[idx]); 1049 1050 sc->sc_rxmbuf[idx] = m; 1051 1052 if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_rxmap[idx], 1053 m->m_ext.ext_buf, m->m_ext.ext_size, 1054 NULL, BUS_DMA_NOWAIT)) != 0) { 1055 printf("%s: can't load rx DMA map %d, error = %d\n", 1056 sc->sc_dev.dv_xname, idx, err); 1057 panic("sq_add_rxbuf"); /* XXX */ 1058 } 1059 1060 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[idx], 0, 1061 sc->sc_rxmap[idx]->dm_mapsize, BUS_DMASYNC_PREREAD); 1062 1063 SQ_INIT_RXDESC(sc, idx); 1064 1065 return 0; 1066 } 1067 1068 void 1069 sq_dump_buffer(u_int32_t addr, u_int32_t len) 1070 { 1071 u_int i; 1072 u_char* physaddr = (char*) MIPS_PHYS_TO_KSEG1((caddr_t)addr); 1073 1074 if (len == 0) 1075 return; 1076 1077 printf("%p: ", physaddr); 1078 1079 for(i = 0; i < len; i++) { 1080 printf("%02x ", *(physaddr + i) & 0xff); 1081 if ((i % 16) == 15 && i != len - 1) 1082 printf("\n%p: ", physaddr + i); 1083 } 1084 1085 printf("\n"); 1086 } 1087 1088 1089 void 1090 enaddr_aton(const char* str, u_int8_t* eaddr) 1091 { 1092 int i; 1093 char c; 1094 1095 for(i = 0; i < ETHER_ADDR_LEN; i++) { 1096 if (*str == ':') 1097 str++; 1098 1099 c = *str++; 1100 if (isdigit(c)) { 1101 eaddr[i] = (c - '0'); 1102 } else if (isxdigit(c)) { 1103 eaddr[i] = (toupper(c) + 10 - 'A'); 1104 } 1105 1106 c = *str++; 1107 if (isdigit(c)) { 1108 eaddr[i] = (eaddr[i] << 4) | (c - '0'); 1109 } else if (isxdigit(c)) { 1110 eaddr[i] = (eaddr[i] << 4) | (toupper(c) + 10 - 'A'); 1111 } 1112 } 1113 } 1114