1 /* $OpenBSD: ti.c,v 1.28 2020/12/12 11:48:52 jan Exp $ */ 2 3 /* 4 * Copyright (c) 1997, 1998, 1999 5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 * $FreeBSD: src/sys/pci/if_ti.c,v 1.25 2000/01/18 00:26:29 wpaul Exp $ 35 */ 36 37 /* 38 * Alteon Networks Tigon PCI gigabit ethernet driver for OpenBSD. 39 * 40 * Written by Bill Paul <wpaul@ctr.columbia.edu> 41 * Electrical Engineering Department 42 * Columbia University, New York City 43 */ 44 45 /* 46 * The Alteon Networks Tigon chip contains an embedded R4000 CPU, 47 * gigabit MAC, dual DMA channels and a PCI interface unit. NICs 48 * using the Tigon may have anywhere from 512K to 2MB of SRAM. The 49 * Tigon supports hardware IP, TCP and UCP checksumming, multicast 50 * filtering and jumbo (9014 byte) frames. The hardware is largely 51 * controlled by firmware, which must be loaded into the NIC during 52 * initialization. 53 * 54 * The Tigon 2 contains 2 R4000 CPUs and requires a newer firmware 55 * revision, which supports new features such as extended commands, 56 * extended jumbo receive ring desciptors and a mini receive ring. 57 * 58 * Alteon Networks is to be commended for releasing such a vast amount 59 * of development material for the Tigon NIC without requiring an NDA 60 * (although they really should have done it a long time ago). With 61 * any luck, the other vendors will finally wise up and follow Alteon's 62 * stellar example. 63 * 64 * The following people deserve special thanks: 65 * - Terry Murphy of 3Com, for providing a 3c985 Tigon 1 board 66 * for testing 67 * - Raymond Lee of Netgear, for providing a pair of Netgear 68 * GA620 Tigon 2 boards for testing 69 * - Ulf Zimmermann, for bringing the GA260 to my attention and 70 * convincing me to write this driver. 71 * - Andrew Gallatin for providing FreeBSD/Alpha support. 72 */ 73 74 #include "bpfilter.h" 75 #include "vlan.h" 76 77 #include <sys/param.h> 78 #include <sys/systm.h> 79 #include <sys/sockio.h> 80 #include <sys/mbuf.h> 81 #include <sys/malloc.h> 82 #include <sys/kernel.h> 83 #include <sys/socket.h> 84 #include <sys/device.h> 85 #include <sys/queue.h> 86 87 #include <net/if.h> 88 89 #include <netinet/in.h> 90 #include <netinet/if_ether.h> 91 92 #include <net/if_media.h> 93 94 #if NBPFILTER > 0 95 #include <net/bpf.h> 96 #endif 97 98 #include <machine/bus.h> 99 100 #include <dev/ic/tireg.h> 101 #include <dev/ic/tivar.h> 102 #include <dev/pci/pcireg.h> 103 104 struct cfdriver ti_cd = { 105 NULL, "ti", DV_IFNET 106 }; 107 108 void ti_txeof_tigon1(struct ti_softc *); 109 void ti_txeof_tigon2(struct ti_softc *); 110 void ti_rxeof(struct ti_softc *); 111 112 void ti_stats_update(struct ti_softc *); 113 int ti_encap_tigon1(struct ti_softc *, struct mbuf *, u_int32_t *); 114 int ti_encap_tigon2(struct ti_softc *, struct mbuf *, u_int32_t *); 115 116 int ti_intr(void *); 117 void ti_start(struct ifnet *); 118 int ti_ioctl(struct ifnet *, u_long, caddr_t); 119 void ti_init(void *); 120 void ti_init2(struct ti_softc *); 121 void ti_stop(struct ti_softc *); 122 void ti_watchdog(struct ifnet *); 123 int ti_ifmedia_upd(struct ifnet *); 124 void ti_ifmedia_sts(struct ifnet *, struct ifmediareq *); 125 126 u_int32_t ti_eeprom_putbyte(struct ti_softc *, int); 127 u_int8_t ti_eeprom_getbyte(struct ti_softc *, int, u_int8_t *); 128 int ti_read_eeprom(struct ti_softc *, caddr_t, int, int); 129 130 void ti_add_mcast(struct ti_softc *, struct ether_addr *); 131 void ti_del_mcast(struct ti_softc *, struct ether_addr *); 132 void ti_iff(struct ti_softc *); 133 134 void ti_mem_read(struct ti_softc *, u_int32_t, u_int32_t, void *); 135 void ti_mem_write(struct ti_softc *, u_int32_t, u_int32_t, const void*); 136 void ti_mem_set(struct ti_softc *, u_int32_t, u_int32_t); 137 void ti_loadfw(struct ti_softc *); 138 void ti_cmd(struct ti_softc *, struct ti_cmd_desc *); 139 void ti_cmd_ext(struct ti_softc *, struct ti_cmd_desc *, 140 caddr_t, int); 141 void ti_handle_events(struct ti_softc *); 142 int ti_newbuf_std(struct ti_softc *, int, struct mbuf *, bus_dmamap_t); 143 int ti_newbuf_mini(struct ti_softc *, int, struct mbuf *, bus_dmamap_t); 144 int ti_newbuf_jumbo(struct ti_softc *, int, struct mbuf *, bus_dmamap_t); 145 int ti_init_rx_ring_std(struct ti_softc *); 146 void ti_free_rx_ring_std(struct ti_softc *); 147 int ti_init_rx_ring_jumbo(struct ti_softc *); 148 void ti_free_rx_ring_jumbo(struct ti_softc *); 149 int ti_init_rx_ring_mini(struct ti_softc *); 150 void ti_free_rx_ring_mini(struct ti_softc *); 151 void ti_free_tx_ring(struct ti_softc *); 152 int ti_init_tx_ring(struct ti_softc *); 153 154 int ti_64bitslot_war(struct ti_softc *); 155 int ti_chipinit(struct ti_softc *); 156 void ti_chipinit_pci(struct ti_softc *); 157 void ti_chipinit_sbus(struct ti_softc *); 158 int ti_gibinit(struct ti_softc *); 159 160 /* 161 * Send an instruction or address to the EEPROM, check for ACK. 162 */ 163 u_int32_t 164 ti_eeprom_putbyte(struct ti_softc *sc, int byte) 165 { 166 int i, ack = 0; 167 168 /* 169 * Make sure we're in TX mode. 170 */ 171 TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_TXEN); 172 173 /* 174 * Feed in each bit and strobe the clock. 175 */ 176 for (i = 0x80; i; i >>= 1) { 177 if (byte & i) 178 TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_DOUT); 179 else 180 TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_DOUT); 181 DELAY(1); 182 TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); 183 DELAY(1); 184 TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); 185 } 186 187 /* 188 * Turn off TX mode. 189 */ 190 TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_TXEN); 191 192 /* 193 * Check for ack. 194 */ 195 TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); 196 ack = CSR_READ_4(sc, TI_MISC_LOCAL_CTL) & TI_MLC_EE_DIN; 197 TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); 198 199 return (ack); 200 } 201 202 /* 203 * Read a byte of data stored in the EEPROM at address 'addr.' 204 * We have to send two address bytes since the EEPROM can hold 205 * more than 256 bytes of data. 206 */ 207 u_int8_t 208 ti_eeprom_getbyte(struct ti_softc *sc, int addr, u_int8_t *dest) 209 { 210 int i; 211 u_int8_t byte = 0; 212 213 EEPROM_START; 214 215 /* 216 * Send write control code to EEPROM. 217 */ 218 if (ti_eeprom_putbyte(sc, EEPROM_CTL_WRITE)) { 219 printf("%s: failed to send write command, status: %x\n", 220 sc->sc_dv.dv_xname, CSR_READ_4(sc, TI_MISC_LOCAL_CTL)); 221 return (1); 222 } 223 224 /* 225 * Send first byte of address of byte we want to read. 226 */ 227 if (ti_eeprom_putbyte(sc, (addr >> 8) & 0xFF)) { 228 printf("%s: failed to send address, status: %x\n", 229 sc->sc_dv.dv_xname, CSR_READ_4(sc, TI_MISC_LOCAL_CTL)); 230 return (1); 231 } 232 /* 233 * Send second byte address of byte we want to read. 234 */ 235 if (ti_eeprom_putbyte(sc, addr & 0xFF)) { 236 printf("%s: failed to send address, status: %x\n", 237 sc->sc_dv.dv_xname, CSR_READ_4(sc, TI_MISC_LOCAL_CTL)); 238 return (1); 239 } 240 241 EEPROM_STOP; 242 EEPROM_START; 243 /* 244 * Send read control code to EEPROM. 245 */ 246 if (ti_eeprom_putbyte(sc, EEPROM_CTL_READ)) { 247 printf("%s: failed to send read command, status: %x\n", 248 sc->sc_dv.dv_xname, CSR_READ_4(sc, TI_MISC_LOCAL_CTL)); 249 return (1); 250 } 251 252 /* 253 * Start reading bits from EEPROM. 254 */ 255 TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_TXEN); 256 for (i = 0x80; i; i >>= 1) { 257 TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); 258 DELAY(1); 259 if (CSR_READ_4(sc, TI_MISC_LOCAL_CTL) & TI_MLC_EE_DIN) 260 byte |= i; 261 TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); 262 DELAY(1); 263 } 264 265 EEPROM_STOP; 266 267 /* 268 * No ACK generated for read, so just return byte. 269 */ 270 271 *dest = byte; 272 273 return (0); 274 } 275 276 /* 277 * Read a sequence of bytes from the EEPROM. 278 */ 279 int 280 ti_read_eeprom(struct ti_softc *sc, caddr_t dest, int off, int cnt) 281 { 282 int err = 0, i; 283 u_int8_t byte = 0; 284 285 for (i = 0; i < cnt; i++) { 286 err = ti_eeprom_getbyte(sc, off + i, &byte); 287 if (err) 288 break; 289 *(dest + i) = byte; 290 } 291 292 return (err ? 1 : 0); 293 } 294 295 /* 296 * NIC memory read function. 297 * Can be used to copy data from NIC local memory. 298 */ 299 void 300 ti_mem_read(struct ti_softc *sc, u_int32_t addr, u_int32_t len, void *buf) 301 { 302 int segptr, segsize, cnt; 303 caddr_t ptr; 304 305 segptr = addr; 306 cnt = len; 307 ptr = buf; 308 309 while(cnt) { 310 if (cnt < TI_WINLEN) 311 segsize = cnt; 312 else 313 segsize = TI_WINLEN - (segptr % TI_WINLEN); 314 CSR_WRITE_4(sc, TI_WINBASE, (segptr & ~(TI_WINLEN - 1))); 315 bus_space_read_region_4(sc->ti_btag, sc->ti_bhandle, 316 TI_WINDOW + (segptr & (TI_WINLEN - 1)), (u_int32_t *)ptr, 317 segsize / 4); 318 ptr += segsize; 319 segptr += segsize; 320 cnt -= segsize; 321 } 322 } 323 324 /* 325 * NIC memory write function. 326 * Can be used to copy data into NIC local memory. 327 */ 328 void 329 ti_mem_write(struct ti_softc *sc, u_int32_t addr, u_int32_t len, 330 const void *buf) 331 { 332 int segptr, segsize, cnt; 333 const char *ptr; 334 335 segptr = addr; 336 cnt = len; 337 ptr = buf; 338 339 while(cnt) { 340 if (cnt < TI_WINLEN) 341 segsize = cnt; 342 else 343 segsize = TI_WINLEN - (segptr % TI_WINLEN); 344 CSR_WRITE_4(sc, TI_WINBASE, (segptr & ~(TI_WINLEN - 1))); 345 bus_space_write_region_4(sc->ti_btag, sc->ti_bhandle, 346 TI_WINDOW + (segptr & (TI_WINLEN - 1)), (u_int32_t *)ptr, 347 segsize / 4); 348 ptr += segsize; 349 segptr += segsize; 350 cnt -= segsize; 351 } 352 } 353 354 /* 355 * NIC memory write function. 356 * Can be used to clear a section of NIC local memory. 357 */ 358 void 359 ti_mem_set(struct ti_softc *sc, u_int32_t addr, u_int32_t len) 360 { 361 int segptr, segsize, cnt; 362 363 segptr = addr; 364 cnt = len; 365 366 while(cnt) { 367 if (cnt < TI_WINLEN) 368 segsize = cnt; 369 else 370 segsize = TI_WINLEN - (segptr % TI_WINLEN); 371 CSR_WRITE_4(sc, TI_WINBASE, (segptr & ~(TI_WINLEN - 1))); 372 bus_space_set_region_4(sc->ti_btag, sc->ti_bhandle, 373 TI_WINDOW + (segptr & (TI_WINLEN - 1)), 0, segsize / 4); 374 segptr += segsize; 375 cnt -= segsize; 376 } 377 } 378 379 /* 380 * Load firmware image into the NIC. Check that the firmware revision 381 * is acceptable and see if we want the firmware for the Tigon 1 or 382 * Tigon 2. 383 */ 384 void 385 ti_loadfw(struct ti_softc *sc) 386 { 387 struct tigon_firmware *tf; 388 u_char *buf = NULL; 389 u_int32_t *b; 390 size_t buflen, i, cnt; 391 char *name; 392 int error; 393 394 switch(sc->ti_hwrev) { 395 case TI_HWREV_TIGON: 396 name = "tigon1"; 397 break; 398 case TI_HWREV_TIGON_II: 399 name = "tigon2"; 400 break; 401 default: 402 printf("%s: can't load firmware: unknown hardware rev\n", 403 sc->sc_dv.dv_xname); 404 return; 405 } 406 407 error = loadfirmware(name, &buf, &buflen); 408 if (error) 409 return; 410 /* convert firmware to host byte order */ 411 b = (u_int32_t *)buf; 412 cnt = buflen / sizeof(u_int32_t); 413 for (i = 0; i < cnt; i++) 414 b[i] = letoh32(b[i]); 415 416 tf = (struct tigon_firmware *)buf; 417 if (tf->FwReleaseMajor != TI_FIRMWARE_MAJOR || 418 tf->FwReleaseMinor != TI_FIRMWARE_MINOR || 419 tf->FwReleaseFix != TI_FIRMWARE_FIX) { 420 printf("%s: firmware revision mismatch; want " 421 "%d.%d.%d, got %d.%d.%d\n", sc->sc_dv.dv_xname, 422 TI_FIRMWARE_MAJOR, TI_FIRMWARE_MINOR, 423 TI_FIRMWARE_FIX, tf->FwReleaseMajor, 424 tf->FwReleaseMinor, tf->FwReleaseFix); 425 free(buf, M_DEVBUF, buflen); 426 return; 427 } 428 ti_mem_write(sc, tf->FwTextAddr, tf->FwTextLen, 429 (caddr_t)&tf->data[tf->FwTextOffset]); 430 ti_mem_write(sc, tf->FwRodataAddr, tf->FwRodataLen, 431 (caddr_t)&tf->data[tf->FwRodataOffset]); 432 ti_mem_write(sc, tf->FwDataAddr, tf->FwDataLen, 433 (caddr_t)&tf->data[tf->FwDataOffset]); 434 ti_mem_set(sc, tf->FwBssAddr, tf->FwBssLen); 435 ti_mem_set(sc, tf->FwSbssAddr, tf->FwSbssLen); 436 CSR_WRITE_4(sc, TI_CPU_PROGRAM_COUNTER, tf->FwStartAddr); 437 free(buf, M_DEVBUF, buflen); 438 } 439 440 /* 441 * Send the NIC a command via the command ring. 442 */ 443 void 444 ti_cmd(struct ti_softc *sc, struct ti_cmd_desc *cmd) 445 { 446 u_int32_t index; 447 448 index = sc->ti_cmd_saved_prodidx; 449 CSR_WRITE_4(sc, TI_GCR_CMDRING + (index * 4), *(u_int32_t *)(cmd)); 450 TI_INC(index, TI_CMD_RING_CNT); 451 CSR_WRITE_4(sc, TI_MB_CMDPROD_IDX, index); 452 sc->ti_cmd_saved_prodidx = index; 453 } 454 455 /* 456 * Send the NIC an extended command. The 'len' parameter specifies the 457 * number of command slots to include after the initial command. 458 */ 459 void 460 ti_cmd_ext(struct ti_softc *sc, struct ti_cmd_desc *cmd, caddr_t arg, 461 int len) 462 { 463 u_int32_t index; 464 int i; 465 466 index = sc->ti_cmd_saved_prodidx; 467 CSR_WRITE_4(sc, TI_GCR_CMDRING + (index * 4), *(u_int32_t *)(cmd)); 468 TI_INC(index, TI_CMD_RING_CNT); 469 for (i = 0; i < len; i++) { 470 CSR_WRITE_4(sc, TI_GCR_CMDRING + (index * 4), 471 *(u_int32_t *)(&arg[i * 4])); 472 TI_INC(index, TI_CMD_RING_CNT); 473 } 474 CSR_WRITE_4(sc, TI_MB_CMDPROD_IDX, index); 475 sc->ti_cmd_saved_prodidx = index; 476 } 477 478 /* 479 * Handle events that have triggered interrupts. 480 */ 481 void 482 ti_handle_events(struct ti_softc *sc) 483 { 484 struct ti_event_desc *e; 485 struct ifnet *ifp = &sc->arpcom.ac_if; 486 487 while (sc->ti_ev_saved_considx != sc->ti_ev_prodidx.ti_idx) { 488 e = &sc->ti_rdata->ti_event_ring[sc->ti_ev_saved_considx]; 489 switch (TI_EVENT_EVENT(e)) { 490 case TI_EV_LINKSTAT_CHANGED: 491 sc->ti_linkstat = TI_EVENT_CODE(e); 492 switch (sc->ti_linkstat) { 493 case TI_EV_CODE_LINK_UP: 494 case TI_EV_CODE_GIG_LINK_UP: 495 { 496 struct ifmediareq ifmr; 497 498 bzero(&ifmr, sizeof(ifmr)); 499 ti_ifmedia_sts(ifp, &ifmr); 500 if (ifmr.ifm_active & IFM_FDX) { 501 ifp->if_link_state = 502 LINK_STATE_FULL_DUPLEX; 503 } else { 504 ifp->if_link_state = 505 LINK_STATE_HALF_DUPLEX; 506 } 507 if_link_state_change(ifp); 508 ifp->if_baudrate = 509 ifmedia_baudrate(ifmr.ifm_active); 510 break; 511 } 512 case TI_EV_CODE_LINK_DOWN: 513 ifp->if_link_state = LINK_STATE_DOWN; 514 if_link_state_change(ifp); 515 ifp->if_baudrate = 0; 516 break; 517 default: 518 printf("%s: unknown link state code %d\n", 519 sc->sc_dv.dv_xname, sc->ti_linkstat); 520 } 521 break; 522 case TI_EV_ERROR: 523 if (TI_EVENT_CODE(e) == TI_EV_CODE_ERR_INVAL_CMD) 524 printf("%s: invalid command\n", 525 sc->sc_dv.dv_xname); 526 else if (TI_EVENT_CODE(e) == TI_EV_CODE_ERR_UNIMP_CMD) 527 printf("%s: unknown command\n", 528 sc->sc_dv.dv_xname); 529 else if (TI_EVENT_CODE(e) == TI_EV_CODE_ERR_BADCFG) 530 printf("%s: bad config data\n", 531 sc->sc_dv.dv_xname); 532 break; 533 case TI_EV_FIRMWARE_UP: 534 ti_init2(sc); 535 break; 536 case TI_EV_STATS_UPDATED: 537 ti_stats_update(sc); 538 break; 539 case TI_EV_RESET_JUMBO_RING: 540 case TI_EV_MCAST_UPDATED: 541 /* Who cares. */ 542 break; 543 default: 544 printf("%s: unknown event: %d\n", sc->sc_dv.dv_xname, 545 TI_EVENT_EVENT(e)); 546 break; 547 } 548 /* Advance the consumer index. */ 549 TI_INC(sc->ti_ev_saved_considx, TI_EVENT_RING_CNT); 550 CSR_WRITE_4(sc, TI_GCR_EVENTCONS_IDX, sc->ti_ev_saved_considx); 551 } 552 } 553 554 /* 555 * Initialize a standard receive ring descriptor. 556 */ 557 int 558 ti_newbuf_std(struct ti_softc *sc, int i, struct mbuf *m, 559 bus_dmamap_t dmamap) 560 { 561 struct mbuf *m_new = NULL; 562 struct ti_rx_desc *r; 563 564 if (dmamap == NULL) { 565 /* if (m) panic() */ 566 567 if (bus_dmamap_create(sc->sc_dmatag, MCLBYTES, 1, MCLBYTES, 568 0, BUS_DMA_NOWAIT, &dmamap)) { 569 printf("%s: can't create recv map\n", 570 sc->sc_dv.dv_xname); 571 return (ENOMEM); 572 } 573 } else if (m == NULL) 574 bus_dmamap_unload(sc->sc_dmatag, dmamap); 575 576 sc->ti_cdata.ti_rx_std_map[i] = dmamap; 577 578 if (m == NULL) { 579 m_new = MCLGETL(NULL, M_DONTWAIT, MCLBYTES); 580 if (m_new == NULL) 581 return (ENOBUFS); 582 583 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 584 m_adj(m_new, ETHER_ALIGN); 585 586 if (bus_dmamap_load_mbuf(sc->sc_dmatag, dmamap, m_new, 587 BUS_DMA_NOWAIT)) { 588 m_freem(m_new); 589 return (ENOBUFS); 590 } 591 } else { 592 /* 593 * We're re-using a previously allocated mbuf; 594 * be sure to re-init pointers and lengths to 595 * default values. 596 */ 597 m_new = m; 598 m_new->m_data = m_new->m_ext.ext_buf; 599 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 600 m_adj(m_new, ETHER_ALIGN); 601 } 602 603 sc->ti_cdata.ti_rx_std_chain[i] = m_new; 604 r = &sc->ti_rdata->ti_rx_std_ring[i]; 605 TI_HOSTADDR(r->ti_addr) = dmamap->dm_segs[0].ds_addr; 606 r->ti_type = TI_BDTYPE_RECV_BD; 607 r->ti_flags = TI_BDFLAG_IP_CKSUM; 608 r->ti_len = dmamap->dm_segs[0].ds_len; 609 r->ti_idx = i; 610 611 return (0); 612 } 613 614 /* 615 * Initialize a mini receive ring descriptor. This only applies to 616 * the Tigon 2. 617 */ 618 int 619 ti_newbuf_mini(struct ti_softc *sc, int i, struct mbuf *m, 620 bus_dmamap_t dmamap) 621 { 622 struct mbuf *m_new = NULL; 623 struct ti_rx_desc *r; 624 625 if (dmamap == NULL) { 626 /* if (m) panic() */ 627 628 if (bus_dmamap_create(sc->sc_dmatag, MHLEN, 1, MHLEN, 629 0, BUS_DMA_NOWAIT, &dmamap)) { 630 printf("%s: can't create recv map\n", 631 sc->sc_dv.dv_xname); 632 return (ENOMEM); 633 } 634 } else if (m == NULL) 635 bus_dmamap_unload(sc->sc_dmatag, dmamap); 636 637 sc->ti_cdata.ti_rx_mini_map[i] = dmamap; 638 639 if (m == NULL) { 640 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 641 if (m_new == NULL) 642 return (ENOBUFS); 643 644 m_new->m_len = m_new->m_pkthdr.len = MHLEN; 645 m_adj(m_new, ETHER_ALIGN); 646 647 if (bus_dmamap_load_mbuf(sc->sc_dmatag, dmamap, m_new, 648 BUS_DMA_NOWAIT)) { 649 m_freem(m_new); 650 return (ENOBUFS); 651 } 652 } else { 653 /* 654 * We're re-using a previously allocated mbuf; 655 * be sure to re-init pointers and lengths to 656 * default values. 657 */ 658 m_new = m; 659 m_new->m_data = m_new->m_pktdat; 660 m_new->m_len = m_new->m_pkthdr.len = MHLEN; 661 } 662 663 r = &sc->ti_rdata->ti_rx_mini_ring[i]; 664 sc->ti_cdata.ti_rx_mini_chain[i] = m_new; 665 TI_HOSTADDR(r->ti_addr) = dmamap->dm_segs[0].ds_addr; 666 r->ti_type = TI_BDTYPE_RECV_BD; 667 r->ti_flags = TI_BDFLAG_MINI_RING | TI_BDFLAG_IP_CKSUM; 668 r->ti_len = dmamap->dm_segs[0].ds_len; 669 r->ti_idx = i; 670 671 return (0); 672 } 673 674 /* 675 * Initialize a jumbo receive ring descriptor. This allocates 676 * a jumbo buffer from the pool managed internally by the driver. 677 */ 678 int 679 ti_newbuf_jumbo(struct ti_softc *sc, int i, struct mbuf *m, 680 bus_dmamap_t dmamap) 681 { 682 struct mbuf *m_new = NULL; 683 struct ti_rx_desc *r; 684 685 if (dmamap == NULL) { 686 /* if (m) panic() */ 687 688 if (bus_dmamap_create(sc->sc_dmatag, TI_JUMBO_FRAMELEN, 1, 689 TI_JUMBO_FRAMELEN, 0, BUS_DMA_NOWAIT, &dmamap)) { 690 printf("%s: can't create recv map\n", 691 sc->sc_dv.dv_xname); 692 return (ENOMEM); 693 } 694 } else if (m == NULL) 695 bus_dmamap_unload(sc->sc_dmatag, dmamap); 696 697 if (m == NULL) { 698 m_new = MCLGETL(NULL, M_DONTWAIT, TI_JUMBO_FRAMELEN); 699 if (m_new == NULL) 700 return (ENOBUFS); 701 702 m_new->m_len = m_new->m_pkthdr.len = TI_JUMBO_FRAMELEN; 703 m_adj(m_new, ETHER_ALIGN); 704 705 if (bus_dmamap_load_mbuf(sc->sc_dmatag, dmamap, m_new, 706 BUS_DMA_NOWAIT)) { 707 m_freem(m_new); 708 return (ENOBUFS); 709 } 710 } else { 711 /* 712 * We're re-using a previously allocated mbuf; 713 * be sure to re-init pointers and lengths to 714 * default values. 715 */ 716 m_new = m; 717 m_new->m_data = m_new->m_ext.ext_buf; 718 m_new->m_len = m_new->m_pkthdr.len = TI_JUMBO_FRAMELEN; 719 m_adj(m_new, ETHER_ALIGN); 720 } 721 722 /* Set up the descriptor. */ 723 r = &sc->ti_rdata->ti_rx_jumbo_ring[i]; 724 sc->ti_cdata.ti_rx_jumbo_chain[i] = m_new; 725 TI_HOSTADDR(r->ti_addr) = dmamap->dm_segs[0].ds_addr; 726 r->ti_type = TI_BDTYPE_RECV_JUMBO_BD; 727 r->ti_flags = TI_BDFLAG_JUMBO_RING | TI_BDFLAG_IP_CKSUM; 728 r->ti_len = m_new->m_len; 729 r->ti_idx = i; 730 731 return (0); 732 } 733 734 /* 735 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster, 736 * that's 1MB of memory, which is a lot. For now, we fill only the first 737 * 256 ring entries and hope that our CPU is fast enough to keep up with 738 * the NIC. 739 */ 740 int 741 ti_init_rx_ring_std(struct ti_softc *sc) 742 { 743 int i; 744 struct ti_cmd_desc cmd; 745 746 for (i = 0; i < TI_SSLOTS; i++) { 747 if (ti_newbuf_std(sc, i, NULL, 0) == ENOBUFS) 748 return (ENOBUFS); 749 } 750 751 TI_UPDATE_STDPROD(sc, i - 1); 752 sc->ti_std = i - 1; 753 754 return (0); 755 } 756 757 void 758 ti_free_rx_ring_std(struct ti_softc *sc) 759 { 760 int i; 761 762 for (i = 0; i < TI_STD_RX_RING_CNT; i++) { 763 if (sc->ti_cdata.ti_rx_std_chain[i] != NULL) { 764 m_freem(sc->ti_cdata.ti_rx_std_chain[i]); 765 sc->ti_cdata.ti_rx_std_chain[i] = NULL; 766 bus_dmamap_destroy(sc->sc_dmatag, 767 sc->ti_cdata.ti_rx_std_map[i]); 768 sc->ti_cdata.ti_rx_std_map[i] = 0; 769 } 770 bzero(&sc->ti_rdata->ti_rx_std_ring[i], 771 sizeof(struct ti_rx_desc)); 772 } 773 } 774 775 int 776 ti_init_rx_ring_jumbo(struct ti_softc *sc) 777 { 778 int i; 779 struct ti_cmd_desc cmd; 780 781 for (i = 0; i < TI_JUMBO_RX_RING_CNT; i++) { 782 if (ti_newbuf_jumbo(sc, i, NULL, 0) == ENOBUFS) 783 return (ENOBUFS); 784 }; 785 786 TI_UPDATE_JUMBOPROD(sc, i - 1); 787 sc->ti_jumbo = i - 1; 788 789 return (0); 790 } 791 792 void 793 ti_free_rx_ring_jumbo(struct ti_softc *sc) 794 { 795 int i; 796 797 for (i = 0; i < TI_JUMBO_RX_RING_CNT; i++) { 798 if (sc->ti_cdata.ti_rx_jumbo_chain[i] != NULL) { 799 m_freem(sc->ti_cdata.ti_rx_jumbo_chain[i]); 800 sc->ti_cdata.ti_rx_jumbo_chain[i] = NULL; 801 } 802 bzero(&sc->ti_rdata->ti_rx_jumbo_ring[i], 803 sizeof(struct ti_rx_desc)); 804 } 805 } 806 807 int 808 ti_init_rx_ring_mini(struct ti_softc *sc) 809 { 810 int i; 811 812 for (i = 0; i < TI_MSLOTS; i++) { 813 if (ti_newbuf_mini(sc, i, NULL, 0) == ENOBUFS) 814 return (ENOBUFS); 815 }; 816 817 TI_UPDATE_MINIPROD(sc, i - 1); 818 sc->ti_mini = i - 1; 819 820 return (0); 821 } 822 823 void 824 ti_free_rx_ring_mini(struct ti_softc *sc) 825 { 826 int i; 827 828 for (i = 0; i < TI_MINI_RX_RING_CNT; i++) { 829 if (sc->ti_cdata.ti_rx_mini_chain[i] != NULL) { 830 m_freem(sc->ti_cdata.ti_rx_mini_chain[i]); 831 sc->ti_cdata.ti_rx_mini_chain[i] = NULL; 832 bus_dmamap_destroy(sc->sc_dmatag, 833 sc->ti_cdata.ti_rx_mini_map[i]); 834 sc->ti_cdata.ti_rx_mini_map[i] = 0; 835 } 836 bzero(&sc->ti_rdata->ti_rx_mini_ring[i], 837 sizeof(struct ti_rx_desc)); 838 } 839 } 840 841 void 842 ti_free_tx_ring(struct ti_softc *sc) 843 { 844 int i; 845 struct ti_txmap_entry *entry; 846 847 for (i = 0; i < TI_TX_RING_CNT; i++) { 848 if (sc->ti_cdata.ti_tx_chain[i] != NULL) { 849 m_freem(sc->ti_cdata.ti_tx_chain[i]); 850 sc->ti_cdata.ti_tx_chain[i] = NULL; 851 SLIST_INSERT_HEAD(&sc->ti_tx_map_listhead, 852 sc->ti_cdata.ti_tx_map[i], link); 853 sc->ti_cdata.ti_tx_map[i] = 0; 854 } 855 bzero(&sc->ti_rdata->ti_tx_ring[i], 856 sizeof(struct ti_tx_desc)); 857 } 858 859 while ((entry = SLIST_FIRST(&sc->ti_tx_map_listhead))) { 860 SLIST_REMOVE_HEAD(&sc->ti_tx_map_listhead, link); 861 bus_dmamap_destroy(sc->sc_dmatag, entry->dmamap); 862 free(entry, M_DEVBUF, sizeof *entry); 863 } 864 } 865 866 int 867 ti_init_tx_ring(struct ti_softc *sc) 868 { 869 int i; 870 bus_dmamap_t dmamap; 871 struct ti_txmap_entry *entry; 872 873 sc->ti_txcnt = 0; 874 sc->ti_tx_saved_considx = 0; 875 sc->ti_tx_saved_prodidx = 0; 876 CSR_WRITE_4(sc, TI_MB_SENDPROD_IDX, 0); 877 878 SLIST_INIT(&sc->ti_tx_map_listhead); 879 for (i = 0; i < TI_TX_RING_CNT; i++) { 880 if (bus_dmamap_create(sc->sc_dmatag, TI_JUMBO_FRAMELEN, 881 TI_NTXSEG, MCLBYTES, 0, BUS_DMA_NOWAIT, &dmamap)) 882 return (ENOBUFS); 883 884 entry = malloc(sizeof(*entry), M_DEVBUF, M_NOWAIT); 885 if (!entry) { 886 bus_dmamap_destroy(sc->sc_dmatag, dmamap); 887 return (ENOBUFS); 888 } 889 entry->dmamap = dmamap; 890 SLIST_INSERT_HEAD(&sc->ti_tx_map_listhead, entry, link); 891 } 892 893 return (0); 894 } 895 896 /* 897 * The Tigon 2 firmware has a new way to add/delete multicast addresses, 898 * but we have to support the old way too so that Tigon 1 cards will 899 * work. 900 */ 901 void 902 ti_add_mcast(struct ti_softc *sc, struct ether_addr *addr) 903 { 904 struct ti_cmd_desc cmd; 905 u_int16_t *m; 906 u_int32_t ext[2] = {0, 0}; 907 908 m = (u_int16_t *)&addr->ether_addr_octet[0]; 909 910 switch(sc->ti_hwrev) { 911 case TI_HWREV_TIGON: 912 CSR_WRITE_4(sc, TI_GCR_MAR0, htons(m[0])); 913 CSR_WRITE_4(sc, TI_GCR_MAR1, (htons(m[1]) << 16) | htons(m[2])); 914 TI_DO_CMD(TI_CMD_ADD_MCAST_ADDR, 0, 0); 915 break; 916 case TI_HWREV_TIGON_II: 917 ext[0] = htons(m[0]); 918 ext[1] = (htons(m[1]) << 16) | htons(m[2]); 919 TI_DO_CMD_EXT(TI_CMD_EXT_ADD_MCAST, 0, 0, (caddr_t)&ext, 2); 920 break; 921 default: 922 printf("%s: unknown hwrev\n", sc->sc_dv.dv_xname); 923 break; 924 } 925 } 926 927 void 928 ti_del_mcast(struct ti_softc *sc, struct ether_addr *addr) 929 { 930 struct ti_cmd_desc cmd; 931 u_int16_t *m; 932 u_int32_t ext[2] = {0, 0}; 933 934 m = (u_int16_t *)&addr->ether_addr_octet[0]; 935 936 switch(sc->ti_hwrev) { 937 case TI_HWREV_TIGON: 938 CSR_WRITE_4(sc, TI_GCR_MAR0, htons(m[0])); 939 CSR_WRITE_4(sc, TI_GCR_MAR1, (htons(m[1]) << 16) | htons(m[2])); 940 TI_DO_CMD(TI_CMD_DEL_MCAST_ADDR, 0, 0); 941 break; 942 case TI_HWREV_TIGON_II: 943 ext[0] = htons(m[0]); 944 ext[1] = (htons(m[1]) << 16) | htons(m[2]); 945 TI_DO_CMD_EXT(TI_CMD_EXT_DEL_MCAST, 0, 0, (caddr_t)&ext, 2); 946 break; 947 default: 948 printf("%s: unknown hwrev\n", sc->sc_dv.dv_xname); 949 break; 950 } 951 } 952 953 /* 954 * Configure the Tigon's multicast address filter. 955 * 956 * The actual multicast table management is a bit of a pain, thanks to 957 * slight brain damage on the part of both Alteon and us. With our 958 * multicast code, we are only alerted when the multicast address table 959 * changes and at that point we only have the current list of addresses: 960 * we only know the current state, not the previous state, so we don't 961 * actually know what addresses were removed or added. The firmware has 962 * state, but we can't get our grubby mits on it, and there is no 'delete 963 * all multicast addresses' command. Hence, we have to maintain our own 964 * state so we know what addresses have been programmed into the NIC at 965 * any given time. 966 */ 967 void 968 ti_iff(struct ti_softc *sc) 969 { 970 struct ifnet *ifp = &sc->arpcom.ac_if; 971 struct arpcom *ac = &sc->arpcom; 972 struct ether_multi *enm; 973 struct ether_multistep step; 974 struct ti_cmd_desc cmd; 975 struct ti_mc_entry *mc; 976 u_int32_t intrs; 977 978 TI_DO_CMD(TI_CMD_SET_ALLMULTI, TI_CMD_CODE_ALLMULTI_DIS, 0); 979 TI_DO_CMD(TI_CMD_SET_PROMISC_MODE, TI_CMD_CODE_PROMISC_DIS, 0); 980 ifp->if_flags &= ~IFF_ALLMULTI; 981 982 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) { 983 ifp->if_flags |= IFF_ALLMULTI; 984 if (ifp->if_flags & IFF_PROMISC) { 985 TI_DO_CMD(TI_CMD_SET_PROMISC_MODE, 986 TI_CMD_CODE_PROMISC_ENB, 0); 987 } else { 988 TI_DO_CMD(TI_CMD_SET_ALLMULTI, 989 TI_CMD_CODE_ALLMULTI_ENB, 0); 990 } 991 } else { 992 /* Disable interrupts. */ 993 intrs = CSR_READ_4(sc, TI_MB_HOSTINTR); 994 CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1); 995 996 /* First, zot all the existing filters. */ 997 while (SLIST_FIRST(&sc->ti_mc_listhead) != NULL) { 998 mc = SLIST_FIRST(&sc->ti_mc_listhead); 999 ti_del_mcast(sc, &mc->mc_addr); 1000 SLIST_REMOVE_HEAD(&sc->ti_mc_listhead, mc_entries); 1001 free(mc, M_DEVBUF, sizeof *mc); 1002 } 1003 1004 /* Now program new ones. */ 1005 ETHER_FIRST_MULTI(step, ac, enm); 1006 while (enm != NULL) { 1007 mc = malloc(sizeof(struct ti_mc_entry), M_DEVBUF, 1008 M_NOWAIT); 1009 if (mc == NULL) 1010 panic("ti_iff"); 1011 1012 bcopy(enm->enm_addrlo, &mc->mc_addr, 1013 ETHER_ADDR_LEN); 1014 SLIST_INSERT_HEAD(&sc->ti_mc_listhead, mc, 1015 mc_entries); 1016 ti_add_mcast(sc, &mc->mc_addr); 1017 1018 ETHER_NEXT_MULTI(step, enm); 1019 } 1020 1021 /* Re-enable interrupts. */ 1022 CSR_WRITE_4(sc, TI_MB_HOSTINTR, intrs); 1023 } 1024 } 1025 1026 /* 1027 * Check to see if the BIOS has configured us for a 64 bit slot when 1028 * we aren't actually in one. If we detect this condition, we can work 1029 * around it on the Tigon 2 by setting a bit in the PCI state register, 1030 * but for the Tigon 1 we must give up and abort the interface attach. 1031 */ 1032 int 1033 ti_64bitslot_war(struct ti_softc *sc) 1034 { 1035 if (!(CSR_READ_4(sc, TI_PCI_STATE) & TI_PCISTATE_32BIT_BUS)) { 1036 CSR_WRITE_4(sc, 0x600, 0); 1037 CSR_WRITE_4(sc, 0x604, 0); 1038 CSR_WRITE_4(sc, 0x600, 0x5555AAAA); 1039 if (CSR_READ_4(sc, 0x604) == 0x5555AAAA) { 1040 if (sc->ti_hwrev == TI_HWREV_TIGON) 1041 return (EINVAL); 1042 else { 1043 TI_SETBIT(sc, TI_PCI_STATE, 1044 TI_PCISTATE_32BIT_BUS); 1045 return (0); 1046 } 1047 } 1048 } 1049 1050 return (0); 1051 } 1052 1053 /* 1054 * Do endian, PCI and DMA initialization. Also check the on-board ROM 1055 * self-test results. 1056 */ 1057 int 1058 ti_chipinit(struct ti_softc *sc) 1059 { 1060 u_int32_t chip_rev; 1061 1062 /* Initialize link to down state. */ 1063 sc->ti_linkstat = TI_EV_CODE_LINK_DOWN; 1064 1065 /* Set endianness before we access any non-PCI registers. */ 1066 CSR_WRITE_4(sc, TI_MISC_HOST_CTL, 1067 TI_MHC_LITTLEENDIAN_INIT | (TI_MHC_LITTLEENDIAN_INIT << 24)); 1068 1069 /* Check the ROM failed bit to see if self-tests passed. */ 1070 if (CSR_READ_4(sc, TI_CPU_STATE) & TI_CPUSTATE_ROMFAIL) { 1071 printf("%s: board self-diagnostics failed!\n", 1072 sc->sc_dv.dv_xname); 1073 return (ENODEV); 1074 } 1075 1076 /* Halt the CPU. */ 1077 TI_SETBIT(sc, TI_CPU_STATE, TI_CPUSTATE_HALT); 1078 1079 /* Figure out the hardware revision. */ 1080 chip_rev = CSR_READ_4(sc, TI_MISC_HOST_CTL) & TI_MHC_CHIP_REV_MASK; 1081 switch(chip_rev) { 1082 case TI_REV_TIGON_I: 1083 sc->ti_hwrev = TI_HWREV_TIGON; 1084 break; 1085 case TI_REV_TIGON_II: 1086 sc->ti_hwrev = TI_HWREV_TIGON_II; 1087 break; 1088 default: 1089 printf("\n"); 1090 printf("%s: unsupported chip revision: %x\n", 1091 sc->sc_dv.dv_xname, chip_rev); 1092 return (ENODEV); 1093 } 1094 1095 /* Do special setup for Tigon 2. */ 1096 if (sc->ti_hwrev == TI_HWREV_TIGON_II) { 1097 TI_SETBIT(sc, TI_CPU_CTL_B, TI_CPUSTATE_HALT); 1098 TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_SRAM_BANK_512K); 1099 TI_SETBIT(sc, TI_MISC_CONF, TI_MCR_SRAM_SYNCHRONOUS); 1100 } 1101 1102 if (sc->ti_sbus) 1103 ti_chipinit_sbus(sc); 1104 else 1105 ti_chipinit_pci(sc); 1106 1107 /* Recommended settings from Tigon manual. */ 1108 CSR_WRITE_4(sc, TI_GCR_DMA_WRITECFG, TI_DMA_STATE_THRESH_8W); 1109 CSR_WRITE_4(sc, TI_GCR_DMA_READCFG, TI_DMA_STATE_THRESH_8W); 1110 1111 if (ti_64bitslot_war(sc)) { 1112 printf("%s: bios thinks we're in a 64 bit slot, " 1113 "but we aren't", sc->sc_dv.dv_xname); 1114 return (EINVAL); 1115 } 1116 1117 return (0); 1118 } 1119 1120 void 1121 ti_chipinit_pci(struct ti_softc *sc) 1122 { 1123 u_int32_t cacheline; 1124 u_int32_t pci_writemax = 0; 1125 1126 /* Set up the PCI state register. */ 1127 CSR_WRITE_4(sc, TI_PCI_STATE, TI_PCI_READ_CMD | TI_PCI_WRITE_CMD); 1128 if (sc->ti_hwrev == TI_HWREV_TIGON_II) 1129 TI_SETBIT(sc, TI_PCI_STATE, TI_PCISTATE_USE_MEM_RD_MULT); 1130 1131 /* Clear the read/write max DMA parameters. */ 1132 TI_CLRBIT(sc, TI_PCI_STATE, (TI_PCISTATE_WRITE_MAXDMA| 1133 TI_PCISTATE_READ_MAXDMA)); 1134 1135 /* Get cache line size. */ 1136 cacheline = CSR_READ_4(sc, TI_PCI_BIST) & 0xFF; 1137 1138 /* 1139 * If the system has set enabled the PCI memory write 1140 * and invalidate command in the command register, set 1141 * the write max parameter accordingly. This is necessary 1142 * to use MWI with the Tigon 2. 1143 */ 1144 if (CSR_READ_4(sc, TI_PCI_CMDSTAT) & PCI_COMMAND_INVALIDATE_ENABLE) { 1145 switch(cacheline) { 1146 case 1: 1147 case 4: 1148 case 8: 1149 case 16: 1150 case 32: 1151 case 64: 1152 break; 1153 default: 1154 /* Disable PCI memory write and invalidate. */ 1155 CSR_WRITE_4(sc, TI_PCI_CMDSTAT, CSR_READ_4(sc, 1156 TI_PCI_CMDSTAT) & ~PCI_COMMAND_INVALIDATE_ENABLE); 1157 break; 1158 } 1159 } 1160 1161 #ifdef __brokenalpha__ 1162 /* 1163 * From the Alteon sample driver: 1164 * Must insure that we do not cross an 8K (bytes) boundary 1165 * for DMA reads. Our highest limit is 1K bytes. This is a 1166 * restriction on some ALPHA platforms with early revision 1167 * 21174 PCI chipsets, such as the AlphaPC 164lx 1168 */ 1169 TI_SETBIT(sc, TI_PCI_STATE, pci_writemax|TI_PCI_READMAX_1024); 1170 #else 1171 TI_SETBIT(sc, TI_PCI_STATE, pci_writemax); 1172 #endif 1173 1174 /* This sets the min dma param all the way up (0xff). */ 1175 TI_SETBIT(sc, TI_PCI_STATE, TI_PCISTATE_MINDMA); 1176 1177 /* Configure DMA variables. */ 1178 CSR_WRITE_4(sc, TI_GCR_OPMODE, TI_DMA_SWAP_OPTIONS | 1179 TI_OPMODE_WARN_ENB | TI_OPMODE_FATAL_ENB | 1180 TI_OPMODE_DONT_FRAG_JUMBO); 1181 } 1182 1183 void 1184 ti_chipinit_sbus(struct ti_softc *sc) 1185 { 1186 /* Set up the PCI state register. */ 1187 CSR_WRITE_4(sc, TI_PCI_STATE, TI_PCI_READ_CMD | TI_PCI_WRITE_CMD | 1188 TI_PCISTATE_NO_SWAP_READ_DMA | TI_PCISTATE_NO_SWAP_WRITE_DMA | 1189 TI_PCI_WRITEMAX_64 | TI_PCI_READMAX_64 | 1190 TI_PCISTATE_PROVIDE_LEN); 1191 1192 /* Configure DMA variables. */ 1193 CSR_WRITE_4(sc, TI_GCR_OPMODE, TI_OPMODE_WORDSWAP_BD | 1194 TI_OPMODE_1_DMA_ACTIVE | TI_OPMODE_SBUS | 1195 TI_OPMODE_WARN_ENB | TI_OPMODE_FATAL_ENB | 1196 TI_OPMODE_DONT_FRAG_JUMBO); 1197 } 1198 1199 /* 1200 * Initialize the general information block and firmware, and 1201 * start the CPU(s) running. 1202 */ 1203 int 1204 ti_gibinit(struct ti_softc *sc) 1205 { 1206 struct ti_rcb *rcb; 1207 int i; 1208 struct ifnet *ifp; 1209 1210 ifp = &sc->arpcom.ac_if; 1211 1212 /* Disable interrupts for now. */ 1213 CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1); 1214 1215 /* 1216 * Tell the chip where to find the general information block. 1217 * While this struct could go into >4GB memory, we allocate it in a 1218 * single slab with the other descriptors, and those don't seem to 1219 * support being located in a 64-bit region. 1220 */ 1221 CSR_WRITE_4(sc, TI_GCR_GENINFO_HI, 0); 1222 CSR_WRITE_4(sc, TI_GCR_GENINFO_LO, 1223 TI_RING_DMA_ADDR(sc, ti_info) & 0xffffffff); 1224 1225 /* Load the firmware into SRAM. */ 1226 ti_loadfw(sc); 1227 1228 /* Set up the contents of the general info and ring control blocks. */ 1229 1230 /* Set up the event ring and producer pointer. */ 1231 rcb = &sc->ti_rdata->ti_info.ti_ev_rcb; 1232 1233 TI_HOSTADDR(rcb->ti_hostaddr) = TI_RING_DMA_ADDR(sc, ti_event_ring); 1234 rcb->ti_flags = 0; 1235 TI_HOSTADDR(sc->ti_rdata->ti_info.ti_ev_prodidx_ptr) = 1236 TI_RING_DMA_ADDR(sc, ti_ev_prodidx_r); 1237 sc->ti_ev_prodidx.ti_idx = 0; 1238 CSR_WRITE_4(sc, TI_GCR_EVENTCONS_IDX, 0); 1239 sc->ti_ev_saved_considx = 0; 1240 1241 /* Set up the command ring and producer mailbox. */ 1242 rcb = &sc->ti_rdata->ti_info.ti_cmd_rcb; 1243 1244 TI_HOSTADDR(rcb->ti_hostaddr) = TI_GCR_NIC_ADDR(TI_GCR_CMDRING); 1245 rcb->ti_flags = 0; 1246 rcb->ti_max_len = 0; 1247 for (i = 0; i < TI_CMD_RING_CNT; i++) { 1248 CSR_WRITE_4(sc, TI_GCR_CMDRING + (i * 4), 0); 1249 } 1250 CSR_WRITE_4(sc, TI_GCR_CMDCONS_IDX, 0); 1251 CSR_WRITE_4(sc, TI_MB_CMDPROD_IDX, 0); 1252 sc->ti_cmd_saved_prodidx = 0; 1253 1254 /* 1255 * Assign the address of the stats refresh buffer. 1256 * We re-use the current stats buffer for this to 1257 * conserve memory. 1258 */ 1259 TI_HOSTADDR(sc->ti_rdata->ti_info.ti_refresh_stats_ptr) = 1260 TI_RING_DMA_ADDR(sc, ti_info.ti_stats); 1261 1262 /* Set up the standard receive ring. */ 1263 rcb = &sc->ti_rdata->ti_info.ti_std_rx_rcb; 1264 TI_HOSTADDR(rcb->ti_hostaddr) = 1265 TI_RING_DMA_ADDR(sc, ti_rx_std_ring); 1266 rcb->ti_max_len = ETHER_MAX_LEN; 1267 rcb->ti_flags = 0; 1268 rcb->ti_flags |= TI_RCB_FLAG_IP_CKSUM | TI_RCB_FLAG_NO_PHDR_CKSUM; 1269 #if NVLAN > 0 1270 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) 1271 rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST; 1272 #endif 1273 1274 /* Set up the jumbo receive ring. */ 1275 rcb = &sc->ti_rdata->ti_info.ti_jumbo_rx_rcb; 1276 TI_HOSTADDR(rcb->ti_hostaddr) = TI_RING_DMA_ADDR(sc, ti_rx_jumbo_ring); 1277 rcb->ti_max_len = TI_JUMBO_FRAMELEN; 1278 rcb->ti_flags = 0; 1279 rcb->ti_flags |= TI_RCB_FLAG_IP_CKSUM | TI_RCB_FLAG_NO_PHDR_CKSUM; 1280 #if NVLAN > 0 1281 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) 1282 rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST; 1283 #endif 1284 1285 /* 1286 * Set up the mini ring. Only activated on the 1287 * Tigon 2 but the slot in the config block is 1288 * still there on the Tigon 1. 1289 */ 1290 rcb = &sc->ti_rdata->ti_info.ti_mini_rx_rcb; 1291 TI_HOSTADDR(rcb->ti_hostaddr) = TI_RING_DMA_ADDR(sc, ti_rx_mini_ring); 1292 rcb->ti_max_len = MHLEN - ETHER_ALIGN; 1293 if (sc->ti_hwrev == TI_HWREV_TIGON) 1294 rcb->ti_flags = TI_RCB_FLAG_RING_DISABLED; 1295 else 1296 rcb->ti_flags = 0; 1297 rcb->ti_flags |= TI_RCB_FLAG_IP_CKSUM | TI_RCB_FLAG_NO_PHDR_CKSUM; 1298 #if NVLAN > 0 1299 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) 1300 rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST; 1301 #endif 1302 1303 /* 1304 * Set up the receive return ring. 1305 */ 1306 rcb = &sc->ti_rdata->ti_info.ti_return_rcb; 1307 TI_HOSTADDR(rcb->ti_hostaddr) = TI_RING_DMA_ADDR(sc,ti_rx_return_ring); 1308 rcb->ti_flags = 0; 1309 rcb->ti_max_len = TI_RETURN_RING_CNT; 1310 TI_HOSTADDR(sc->ti_rdata->ti_info.ti_return_prodidx_ptr) = 1311 TI_RING_DMA_ADDR(sc, ti_return_prodidx_r); 1312 1313 /* 1314 * Set up the tx ring. Note: for the Tigon 2, we have the option 1315 * of putting the transmit ring in the host's address space and 1316 * letting the chip DMA it instead of leaving the ring in the NIC's 1317 * memory and accessing it through the shared memory region. We 1318 * do this for the Tigon 2, but it doesn't work on the Tigon 1, 1319 * so we have to revert to the shared memory scheme if we detect 1320 * a Tigon 1 chip. 1321 */ 1322 CSR_WRITE_4(sc, TI_WINBASE, TI_TX_RING_BASE); 1323 bzero(sc->ti_rdata->ti_tx_ring, 1324 TI_TX_RING_CNT * sizeof(struct ti_tx_desc)); 1325 rcb = &sc->ti_rdata->ti_info.ti_tx_rcb; 1326 if (sc->ti_hwrev == TI_HWREV_TIGON) 1327 rcb->ti_flags = 0; 1328 else 1329 rcb->ti_flags = TI_RCB_FLAG_HOST_RING; 1330 rcb->ti_flags |= TI_RCB_FLAG_IP_CKSUM | TI_RCB_FLAG_NO_PHDR_CKSUM; 1331 #if NVLAN > 0 1332 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) 1333 rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST; 1334 #endif 1335 rcb->ti_max_len = TI_TX_RING_CNT; 1336 if (sc->ti_hwrev == TI_HWREV_TIGON) 1337 TI_HOSTADDR(rcb->ti_hostaddr) = TI_TX_RING_BASE; 1338 else 1339 TI_HOSTADDR(rcb->ti_hostaddr) = 1340 TI_RING_DMA_ADDR(sc, ti_tx_ring); 1341 TI_HOSTADDR(sc->ti_rdata->ti_info.ti_tx_considx_ptr) = 1342 TI_RING_DMA_ADDR(sc, ti_tx_considx_r); 1343 1344 TI_RING_DMASYNC(sc, ti_info, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1345 1346 /* Set up tuneables */ 1347 CSR_WRITE_4(sc, TI_GCR_RX_COAL_TICKS, (sc->ti_rx_coal_ticks / 10)); 1348 CSR_WRITE_4(sc, TI_GCR_TX_COAL_TICKS, sc->ti_tx_coal_ticks); 1349 CSR_WRITE_4(sc, TI_GCR_STAT_TICKS, sc->ti_stat_ticks); 1350 CSR_WRITE_4(sc, TI_GCR_RX_MAX_COAL_BD, sc->ti_rx_max_coal_bds); 1351 CSR_WRITE_4(sc, TI_GCR_TX_MAX_COAL_BD, sc->ti_tx_max_coal_bds); 1352 CSR_WRITE_4(sc, TI_GCR_TX_BUFFER_RATIO, sc->ti_tx_buf_ratio); 1353 1354 /* Turn interrupts on. */ 1355 CSR_WRITE_4(sc, TI_GCR_MASK_INTRS, 0); 1356 CSR_WRITE_4(sc, TI_MB_HOSTINTR, 0); 1357 1358 /* Start CPU. */ 1359 TI_CLRBIT(sc, TI_CPU_STATE, (TI_CPUSTATE_HALT|TI_CPUSTATE_STEP)); 1360 1361 return (0); 1362 } 1363 1364 int 1365 ti_attach(struct ti_softc *sc) 1366 { 1367 bus_dma_segment_t seg; 1368 int rseg; 1369 struct ifnet *ifp; 1370 caddr_t kva; 1371 1372 if (ti_chipinit(sc)) { 1373 printf("%s: chip initialization failed\n", sc->sc_dv.dv_xname); 1374 return (1); 1375 } 1376 1377 /* Zero out the NIC's on-board SRAM. */ 1378 ti_mem_set(sc, 0x2000, 0x100000 - 0x2000); 1379 1380 /* Init again -- zeroing memory may have clobbered some registers. */ 1381 if (ti_chipinit(sc)) { 1382 printf("%s: chip initialization failed\n", sc->sc_dv.dv_xname); 1383 return (1); 1384 } 1385 1386 /* 1387 * Get station address from the EEPROM. Note: the manual states 1388 * that the MAC address is at offset 0x8c, however the data is 1389 * stored as two longwords (since that's how it's loaded into 1390 * the NIC). This means the MAC address is actually preceded 1391 * by two zero bytes. We need to skip over those. 1392 */ 1393 if (ti_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr, 1394 TI_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) { 1395 printf("%s: failed to read station address\n", 1396 sc->sc_dv.dv_xname); 1397 return (1); 1398 } 1399 1400 /* 1401 * A Tigon chip was detected. Inform the world. 1402 */ 1403 printf(", address %s\n", ether_sprintf(sc->arpcom.ac_enaddr)); 1404 1405 /* Allocate the general information block and ring buffers. */ 1406 if (bus_dmamem_alloc(sc->sc_dmatag, sizeof(struct ti_ring_data), 1407 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 1408 printf("%s: can't alloc rx buffers\n", sc->sc_dv.dv_xname); 1409 return (1); 1410 } 1411 if (bus_dmamem_map(sc->sc_dmatag, &seg, rseg, 1412 sizeof(struct ti_ring_data), &kva, BUS_DMA_NOWAIT)) { 1413 printf("%s: can't map dma buffers (%zu bytes)\n", 1414 sc->sc_dv.dv_xname, sizeof(struct ti_ring_data)); 1415 goto fail_1; 1416 } 1417 if (bus_dmamap_create(sc->sc_dmatag, sizeof(struct ti_ring_data), 1, 1418 sizeof(struct ti_ring_data), 0, BUS_DMA_NOWAIT, 1419 &sc->ti_ring_map)) { 1420 printf("%s: can't create dma map\n", sc->sc_dv.dv_xname); 1421 goto fail_2; 1422 } 1423 if (bus_dmamap_load(sc->sc_dmatag, sc->ti_ring_map, kva, 1424 sizeof(struct ti_ring_data), NULL, BUS_DMA_NOWAIT)) { 1425 goto fail_3; 1426 } 1427 sc->ti_rdata = (struct ti_ring_data *)kva; 1428 bzero(sc->ti_rdata, sizeof(struct ti_ring_data)); 1429 1430 /* Set default tuneable values. */ 1431 sc->ti_stat_ticks = 2 * TI_TICKS_PER_SEC; 1432 sc->ti_rx_coal_ticks = TI_TICKS_PER_SEC / 5000; 1433 sc->ti_tx_coal_ticks = TI_TICKS_PER_SEC / 500; 1434 sc->ti_rx_max_coal_bds = 64; 1435 sc->ti_tx_max_coal_bds = 128; 1436 sc->ti_tx_buf_ratio = 21; 1437 1438 /* Set up ifnet structure */ 1439 ifp = &sc->arpcom.ac_if; 1440 ifp->if_softc = sc; 1441 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1442 ifp->if_ioctl = ti_ioctl; 1443 ifp->if_start = ti_start; 1444 ifp->if_watchdog = ti_watchdog; 1445 ifp->if_hardmtu = TI_JUMBO_FRAMELEN - ETHER_HDR_LEN; 1446 ifq_set_maxlen(&ifp->if_snd, TI_TX_RING_CNT - 1); 1447 bcopy(sc->sc_dv.dv_xname, ifp->if_xname, IFNAMSIZ); 1448 1449 ifp->if_capabilities = IFCAP_VLAN_MTU; 1450 1451 #if NVLAN > 0 1452 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 1453 #endif 1454 1455 /* Set up ifmedia support. */ 1456 ifmedia_init(&sc->ifmedia, IFM_IMASK, ti_ifmedia_upd, ti_ifmedia_sts); 1457 if (sc->ti_copper) { 1458 /* 1459 * Copper cards allow manual 10/100 mode selection, 1460 * but not manual 1000baseTX mode selection. Why? 1461 * Because currently there's no way to specify the 1462 * master/slave setting through the firmware interface, 1463 * so Alteon decided to just bag it and handle it 1464 * via autonegotiation. 1465 */ 1466 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL); 1467 ifmedia_add(&sc->ifmedia, 1468 IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL); 1469 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_TX, 0, NULL); 1470 ifmedia_add(&sc->ifmedia, 1471 IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL); 1472 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_1000_T, 0, NULL); 1473 ifmedia_add(&sc->ifmedia, 1474 IFM_ETHER|IFM_1000_T|IFM_FDX, 0, NULL); 1475 } else { 1476 /* Fiber cards don't support 10/100 modes. */ 1477 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL); 1478 ifmedia_add(&sc->ifmedia, 1479 IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL); 1480 } 1481 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); 1482 ifmedia_set(&sc->ifmedia, IFM_ETHER|IFM_AUTO); 1483 1484 /* 1485 * Call MI attach routines. 1486 */ 1487 if_attach(ifp); 1488 ether_ifattach(ifp); 1489 1490 return (0); 1491 1492 fail_3: 1493 bus_dmamap_destroy(sc->sc_dmatag, sc->ti_ring_map); 1494 1495 fail_2: 1496 bus_dmamem_unmap(sc->sc_dmatag, kva, 1497 sizeof(struct ti_ring_data)); 1498 1499 fail_1: 1500 bus_dmamem_free(sc->sc_dmatag, &seg, rseg); 1501 1502 return (1); 1503 } 1504 1505 /* 1506 * Frame reception handling. This is called if there's a frame 1507 * on the receive return list. 1508 * 1509 * Note: we have to be able to handle three possibilities here: 1510 * 1) the frame is from the mini receive ring (can only happen) 1511 * on Tigon 2 boards) 1512 * 2) the frame is from the jumbo receive ring 1513 * 3) the frame is from the standard receive ring 1514 */ 1515 1516 void 1517 ti_rxeof(struct ti_softc *sc) 1518 { 1519 struct ifnet *ifp; 1520 struct mbuf_list ml = MBUF_LIST_INITIALIZER(); 1521 struct ti_cmd_desc cmd; 1522 1523 ifp = &sc->arpcom.ac_if; 1524 1525 while(sc->ti_rx_saved_considx != sc->ti_return_prodidx.ti_idx) { 1526 struct ti_rx_desc *cur_rx; 1527 u_int32_t rxidx; 1528 struct mbuf *m = NULL; 1529 bus_dmamap_t dmamap; 1530 1531 cur_rx = 1532 &sc->ti_rdata->ti_rx_return_ring[sc->ti_rx_saved_considx]; 1533 rxidx = cur_rx->ti_idx; 1534 TI_INC(sc->ti_rx_saved_considx, TI_RETURN_RING_CNT); 1535 1536 if (cur_rx->ti_flags & TI_BDFLAG_JUMBO_RING) { 1537 TI_INC(sc->ti_jumbo, TI_JUMBO_RX_RING_CNT); 1538 m = sc->ti_cdata.ti_rx_jumbo_chain[rxidx]; 1539 sc->ti_cdata.ti_rx_jumbo_chain[rxidx] = NULL; 1540 dmamap = sc->ti_cdata.ti_rx_jumbo_map[rxidx]; 1541 sc->ti_cdata.ti_rx_jumbo_map[rxidx] = 0; 1542 if (cur_rx->ti_flags & TI_BDFLAG_ERROR) { 1543 ifp->if_ierrors++; 1544 ti_newbuf_jumbo(sc, sc->ti_jumbo, m, dmamap); 1545 continue; 1546 } 1547 if (ti_newbuf_jumbo(sc, sc->ti_jumbo, NULL, dmamap) 1548 == ENOBUFS) { 1549 ifp->if_ierrors++; 1550 ti_newbuf_jumbo(sc, sc->ti_jumbo, m, dmamap); 1551 continue; 1552 } 1553 } else if (cur_rx->ti_flags & TI_BDFLAG_MINI_RING) { 1554 TI_INC(sc->ti_mini, TI_MINI_RX_RING_CNT); 1555 m = sc->ti_cdata.ti_rx_mini_chain[rxidx]; 1556 sc->ti_cdata.ti_rx_mini_chain[rxidx] = NULL; 1557 dmamap = sc->ti_cdata.ti_rx_mini_map[rxidx]; 1558 sc->ti_cdata.ti_rx_mini_map[rxidx] = 0; 1559 if (cur_rx->ti_flags & TI_BDFLAG_ERROR) { 1560 ifp->if_ierrors++; 1561 ti_newbuf_mini(sc, sc->ti_mini, m, dmamap); 1562 continue; 1563 } 1564 if (ti_newbuf_mini(sc, sc->ti_mini, NULL, dmamap) 1565 == ENOBUFS) { 1566 ifp->if_ierrors++; 1567 ti_newbuf_mini(sc, sc->ti_mini, m, dmamap); 1568 continue; 1569 } 1570 } else { 1571 TI_INC(sc->ti_std, TI_STD_RX_RING_CNT); 1572 m = sc->ti_cdata.ti_rx_std_chain[rxidx]; 1573 sc->ti_cdata.ti_rx_std_chain[rxidx] = NULL; 1574 dmamap = sc->ti_cdata.ti_rx_std_map[rxidx]; 1575 sc->ti_cdata.ti_rx_std_map[rxidx] = 0; 1576 if (cur_rx->ti_flags & TI_BDFLAG_ERROR) { 1577 ifp->if_ierrors++; 1578 ti_newbuf_std(sc, sc->ti_std, m, dmamap); 1579 continue; 1580 } 1581 if (ti_newbuf_std(sc, sc->ti_std, NULL, dmamap) 1582 == ENOBUFS) { 1583 ifp->if_ierrors++; 1584 ti_newbuf_std(sc, sc->ti_std, m, dmamap); 1585 continue; 1586 } 1587 } 1588 1589 if (m == NULL) 1590 panic("%s: couldn't get mbuf", sc->sc_dv.dv_xname); 1591 1592 m->m_pkthdr.len = m->m_len = cur_rx->ti_len; 1593 1594 #if NVLAN > 0 1595 if (cur_rx->ti_flags & TI_BDFLAG_VLAN_TAG) { 1596 m->m_pkthdr.ether_vtag = cur_rx->ti_vlan_tag; 1597 m->m_flags |= M_VLANTAG; 1598 } 1599 #endif 1600 1601 if ((cur_rx->ti_ip_cksum ^ 0xffff) == 0) 1602 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 1603 1604 ml_enqueue(&ml, m); 1605 } 1606 1607 /* Only necessary on the Tigon 1. */ 1608 if (sc->ti_hwrev == TI_HWREV_TIGON) 1609 CSR_WRITE_4(sc, TI_GCR_RXRETURNCONS_IDX, 1610 sc->ti_rx_saved_considx); 1611 1612 TI_UPDATE_STDPROD(sc, sc->ti_std); 1613 TI_UPDATE_MINIPROD(sc, sc->ti_mini); 1614 TI_UPDATE_JUMBOPROD(sc, sc->ti_jumbo); 1615 1616 if_input(ifp, &ml); 1617 } 1618 1619 void 1620 ti_txeof_tigon1(struct ti_softc *sc) 1621 { 1622 struct ifnet *ifp; 1623 struct ti_txmap_entry *entry; 1624 int active = 1; 1625 1626 ifp = &sc->arpcom.ac_if; 1627 1628 /* 1629 * Go through our tx ring and free mbufs for those 1630 * frames that have been sent. 1631 */ 1632 while (sc->ti_tx_saved_considx != sc->ti_tx_considx.ti_idx) { 1633 u_int32_t idx = 0; 1634 struct ti_tx_desc txdesc; 1635 1636 idx = sc->ti_tx_saved_considx; 1637 ti_mem_read(sc, TI_TX_RING_BASE + idx * sizeof(txdesc), 1638 sizeof(txdesc), (caddr_t)&txdesc); 1639 1640 if (sc->ti_cdata.ti_tx_chain[idx] != NULL) { 1641 m_freem(sc->ti_cdata.ti_tx_chain[idx]); 1642 sc->ti_cdata.ti_tx_chain[idx] = NULL; 1643 1644 entry = sc->ti_cdata.ti_tx_map[idx]; 1645 bus_dmamap_sync(sc->sc_dmatag, entry->dmamap, 0, 1646 entry->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1647 1648 bus_dmamap_unload(sc->sc_dmatag, entry->dmamap); 1649 SLIST_INSERT_HEAD(&sc->ti_tx_map_listhead, entry, 1650 link); 1651 sc->ti_cdata.ti_tx_map[idx] = NULL; 1652 1653 } 1654 sc->ti_txcnt--; 1655 TI_INC(sc->ti_tx_saved_considx, TI_TX_RING_CNT); 1656 ifp->if_timer = 0; 1657 1658 active = 0; 1659 } 1660 1661 if (!active) 1662 ifq_clr_oactive(&ifp->if_snd); 1663 } 1664 1665 void 1666 ti_txeof_tigon2(struct ti_softc *sc) 1667 { 1668 struct ti_tx_desc *cur_tx = NULL; 1669 struct ifnet *ifp; 1670 struct ti_txmap_entry *entry; 1671 1672 ifp = &sc->arpcom.ac_if; 1673 1674 /* 1675 * Go through our tx ring and free mbufs for those 1676 * frames that have been sent. 1677 */ 1678 while (sc->ti_tx_saved_considx != sc->ti_tx_considx.ti_idx) { 1679 u_int32_t idx = 0; 1680 1681 idx = sc->ti_tx_saved_considx; 1682 cur_tx = &sc->ti_rdata->ti_tx_ring[idx]; 1683 1684 if (sc->ti_cdata.ti_tx_chain[idx] != NULL) { 1685 m_freem(sc->ti_cdata.ti_tx_chain[idx]); 1686 sc->ti_cdata.ti_tx_chain[idx] = NULL; 1687 1688 entry = sc->ti_cdata.ti_tx_map[idx]; 1689 bus_dmamap_sync(sc->sc_dmatag, entry->dmamap, 0, 1690 entry->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1691 1692 bus_dmamap_unload(sc->sc_dmatag, entry->dmamap); 1693 SLIST_INSERT_HEAD(&sc->ti_tx_map_listhead, entry, 1694 link); 1695 sc->ti_cdata.ti_tx_map[idx] = NULL; 1696 1697 } 1698 sc->ti_txcnt--; 1699 TI_INC(sc->ti_tx_saved_considx, TI_TX_RING_CNT); 1700 ifp->if_timer = 0; 1701 } 1702 1703 if (cur_tx != NULL) 1704 ifq_clr_oactive(&ifp->if_snd); 1705 } 1706 1707 int 1708 ti_intr(void *xsc) 1709 { 1710 struct ti_softc *sc; 1711 struct ifnet *ifp; 1712 1713 sc = xsc; 1714 ifp = &sc->arpcom.ac_if; 1715 1716 /* XXX checking this register is expensive. */ 1717 /* Make sure this is really our interrupt. */ 1718 if (!(CSR_READ_4(sc, TI_MISC_HOST_CTL) & TI_MHC_INTSTATE)) 1719 return (0); 1720 1721 /* Ack interrupt and stop others from occurring. */ 1722 CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1); 1723 1724 if (ifp->if_flags & IFF_RUNNING) { 1725 /* Check RX return ring producer/consumer */ 1726 ti_rxeof(sc); 1727 1728 /* Check TX ring producer/consumer */ 1729 if (sc->ti_hwrev == TI_HWREV_TIGON) 1730 ti_txeof_tigon1(sc); 1731 else 1732 ti_txeof_tigon2(sc); 1733 } 1734 1735 ti_handle_events(sc); 1736 1737 /* Re-enable interrupts. */ 1738 CSR_WRITE_4(sc, TI_MB_HOSTINTR, 0); 1739 1740 if (ifp->if_flags & IFF_RUNNING && !ifq_empty(&ifp->if_snd)) 1741 ti_start(ifp); 1742 1743 return (1); 1744 } 1745 1746 void 1747 ti_stats_update(struct ti_softc *sc) 1748 { 1749 struct ifnet *ifp; 1750 struct ti_stats *stats = &sc->ti_rdata->ti_info.ti_stats; 1751 1752 ifp = &sc->arpcom.ac_if; 1753 1754 TI_RING_DMASYNC(sc, ti_info.ti_stats, BUS_DMASYNC_POSTREAD); 1755 1756 ifp->if_collisions += stats->dot3StatsSingleCollisionFrames + 1757 stats->dot3StatsMultipleCollisionFrames + 1758 stats->dot3StatsExcessiveCollisions + 1759 stats->dot3StatsLateCollisions - 1760 ifp->if_collisions; 1761 1762 TI_RING_DMASYNC(sc, ti_info.ti_stats, BUS_DMASYNC_PREREAD); 1763 } 1764 1765 /* 1766 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data 1767 * pointers to descriptors. 1768 */ 1769 int 1770 ti_encap_tigon1(struct ti_softc *sc, struct mbuf *m_head, u_int32_t *txidx) 1771 { 1772 u_int32_t frag, cur; 1773 struct ti_txmap_entry *entry; 1774 bus_dmamap_t txmap; 1775 struct ti_tx_desc txdesc; 1776 int i = 0; 1777 1778 entry = SLIST_FIRST(&sc->ti_tx_map_listhead); 1779 if (entry == NULL) 1780 return (ENOBUFS); 1781 txmap = entry->dmamap; 1782 1783 cur = frag = *txidx; 1784 1785 /* 1786 * Start packing the mbufs in this chain into 1787 * the fragment pointers. Stop when we run out 1788 * of fragments or hit the end of the mbuf chain. 1789 */ 1790 if (bus_dmamap_load_mbuf(sc->sc_dmatag, txmap, m_head, 1791 BUS_DMA_NOWAIT)) 1792 return (ENOBUFS); 1793 1794 /* 1795 * Sanity check: avoid coming within 16 descriptors 1796 * of the end of the ring. 1797 */ 1798 if (txmap->dm_nsegs > (TI_TX_RING_CNT - sc->ti_txcnt - 16)) 1799 goto fail_unload; 1800 1801 for (i = 0; i < txmap->dm_nsegs; i++) { 1802 if (sc->ti_cdata.ti_tx_chain[frag] != NULL) 1803 break; 1804 1805 memset(&txdesc, 0, sizeof(txdesc)); 1806 1807 TI_HOSTADDR(txdesc.ti_addr) = txmap->dm_segs[i].ds_addr; 1808 txdesc.ti_len = txmap->dm_segs[i].ds_len & 0xffff; 1809 txdesc.ti_flags = 0; 1810 txdesc.ti_vlan_tag = 0; 1811 1812 #if NVLAN > 0 1813 if (m_head->m_flags & M_VLANTAG) { 1814 txdesc.ti_flags |= TI_BDFLAG_VLAN_TAG; 1815 txdesc.ti_vlan_tag = m_head->m_pkthdr.ether_vtag; 1816 } 1817 #endif 1818 1819 ti_mem_write(sc, TI_TX_RING_BASE + frag * sizeof(txdesc), 1820 sizeof(txdesc), (caddr_t)&txdesc); 1821 1822 cur = frag; 1823 TI_INC(frag, TI_TX_RING_CNT); 1824 } 1825 1826 if (frag == sc->ti_tx_saved_considx) 1827 goto fail_unload; 1828 1829 txdesc.ti_flags |= TI_BDFLAG_END; 1830 ti_mem_write(sc, TI_TX_RING_BASE + cur * sizeof(txdesc), 1831 sizeof(txdesc), (caddr_t)&txdesc); 1832 1833 bus_dmamap_sync(sc->sc_dmatag, txmap, 0, txmap->dm_mapsize, 1834 BUS_DMASYNC_PREWRITE); 1835 1836 sc->ti_cdata.ti_tx_chain[cur] = m_head; 1837 SLIST_REMOVE_HEAD(&sc->ti_tx_map_listhead, link); 1838 sc->ti_cdata.ti_tx_map[cur] = entry; 1839 sc->ti_txcnt += txmap->dm_nsegs; 1840 1841 *txidx = frag; 1842 1843 return (0); 1844 1845 fail_unload: 1846 bus_dmamap_unload(sc->sc_dmatag, txmap); 1847 1848 return (ENOBUFS); 1849 } 1850 1851 /* 1852 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data 1853 * pointers to descriptors. 1854 */ 1855 int 1856 ti_encap_tigon2(struct ti_softc *sc, struct mbuf *m_head, u_int32_t *txidx) 1857 { 1858 struct ti_tx_desc *f = NULL; 1859 u_int32_t frag, cur; 1860 struct ti_txmap_entry *entry; 1861 bus_dmamap_t txmap; 1862 int i = 0; 1863 1864 entry = SLIST_FIRST(&sc->ti_tx_map_listhead); 1865 if (entry == NULL) 1866 return (ENOBUFS); 1867 txmap = entry->dmamap; 1868 1869 cur = frag = *txidx; 1870 1871 /* 1872 * Start packing the mbufs in this chain into 1873 * the fragment pointers. Stop when we run out 1874 * of fragments or hit the end of the mbuf chain. 1875 */ 1876 if (bus_dmamap_load_mbuf(sc->sc_dmatag, txmap, m_head, 1877 BUS_DMA_NOWAIT)) 1878 return (ENOBUFS); 1879 1880 /* 1881 * Sanity check: avoid coming within 16 descriptors 1882 * of the end of the ring. 1883 */ 1884 if (txmap->dm_nsegs > (TI_TX_RING_CNT - sc->ti_txcnt - 16)) 1885 goto fail_unload; 1886 1887 for (i = 0; i < txmap->dm_nsegs; i++) { 1888 f = &sc->ti_rdata->ti_tx_ring[frag]; 1889 1890 if (sc->ti_cdata.ti_tx_chain[frag] != NULL) 1891 break; 1892 1893 TI_HOSTADDR(f->ti_addr) = txmap->dm_segs[i].ds_addr; 1894 f->ti_len = txmap->dm_segs[i].ds_len & 0xffff; 1895 f->ti_flags = 0; 1896 f->ti_vlan_tag = 0; 1897 1898 #if NVLAN > 0 1899 if (m_head->m_flags & M_VLANTAG) { 1900 f->ti_flags |= TI_BDFLAG_VLAN_TAG; 1901 f->ti_vlan_tag = m_head->m_pkthdr.ether_vtag; 1902 } 1903 #endif 1904 1905 cur = frag; 1906 TI_INC(frag, TI_TX_RING_CNT); 1907 } 1908 1909 if (frag == sc->ti_tx_saved_considx) 1910 goto fail_unload; 1911 1912 sc->ti_rdata->ti_tx_ring[cur].ti_flags |= TI_BDFLAG_END; 1913 1914 bus_dmamap_sync(sc->sc_dmatag, txmap, 0, txmap->dm_mapsize, 1915 BUS_DMASYNC_PREWRITE); 1916 1917 TI_RING_DMASYNC(sc, ti_tx_ring[cur], BUS_DMASYNC_POSTREAD); 1918 1919 sc->ti_cdata.ti_tx_chain[cur] = m_head; 1920 SLIST_REMOVE_HEAD(&sc->ti_tx_map_listhead, link); 1921 sc->ti_cdata.ti_tx_map[cur] = entry; 1922 sc->ti_txcnt += txmap->dm_nsegs; 1923 1924 *txidx = frag; 1925 1926 return (0); 1927 1928 fail_unload: 1929 bus_dmamap_unload(sc->sc_dmatag, txmap); 1930 1931 return (ENOBUFS); 1932 } 1933 1934 /* 1935 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 1936 * to the mbuf data regions directly in the transmit descriptors. 1937 */ 1938 void 1939 ti_start(struct ifnet *ifp) 1940 { 1941 struct ti_softc *sc; 1942 struct mbuf *m_head = NULL; 1943 u_int32_t prodidx; 1944 int pkts = 0, error; 1945 1946 sc = ifp->if_softc; 1947 1948 prodidx = sc->ti_tx_saved_prodidx; 1949 1950 while(sc->ti_cdata.ti_tx_chain[prodidx] == NULL) { 1951 m_head = ifq_deq_begin(&ifp->if_snd); 1952 if (m_head == NULL) 1953 break; 1954 1955 /* 1956 * Pack the data into the transmit ring. If we 1957 * don't have room, set the OACTIVE flag and wait 1958 * for the NIC to drain the ring. 1959 */ 1960 if (sc->ti_hwrev == TI_HWREV_TIGON) 1961 error = ti_encap_tigon1(sc, m_head, &prodidx); 1962 else 1963 error = ti_encap_tigon2(sc, m_head, &prodidx); 1964 1965 if (error) { 1966 ifq_deq_rollback(&ifp->if_snd, m_head); 1967 ifq_set_oactive(&ifp->if_snd); 1968 break; 1969 } 1970 1971 /* now we are committed to transmit the packet */ 1972 ifq_deq_commit(&ifp->if_snd, m_head); 1973 pkts++; 1974 1975 /* 1976 * If there's a BPF listener, bounce a copy of this frame 1977 * to him. 1978 */ 1979 #if NBPFILTER > 0 1980 if (ifp->if_bpf) 1981 bpf_mtap_ether(ifp->if_bpf, m_head, BPF_DIRECTION_OUT); 1982 #endif 1983 } 1984 if (pkts == 0) 1985 return; 1986 1987 /* Transmit */ 1988 sc->ti_tx_saved_prodidx = prodidx; 1989 CSR_WRITE_4(sc, TI_MB_SENDPROD_IDX, prodidx); 1990 1991 /* 1992 * Set a timeout in case the chip goes out to lunch. 1993 */ 1994 ifp->if_timer = 5; 1995 } 1996 1997 void 1998 ti_init(void *xsc) 1999 { 2000 struct ti_softc *sc = xsc; 2001 int s; 2002 2003 s = splnet(); 2004 2005 /* Cancel pending I/O and flush buffers. */ 2006 ti_stop(sc); 2007 2008 /* Init the gen info block, ring control blocks and firmware. */ 2009 if (ti_gibinit(sc)) { 2010 printf("%s: initialization failure\n", sc->sc_dv.dv_xname); 2011 splx(s); 2012 return; 2013 } 2014 2015 splx(s); 2016 } 2017 2018 void 2019 ti_init2(struct ti_softc *sc) 2020 { 2021 struct ti_cmd_desc cmd; 2022 struct ifnet *ifp; 2023 u_int16_t *m; 2024 struct ifmedia *ifm; 2025 int tmp; 2026 2027 ifp = &sc->arpcom.ac_if; 2028 2029 /* Specify MTU and interface index. */ 2030 CSR_WRITE_4(sc, TI_GCR_IFINDEX, sc->sc_dv.dv_unit); 2031 CSR_WRITE_4(sc, TI_GCR_IFMTU, 2032 TI_JUMBO_FRAMELEN + ETHER_VLAN_ENCAP_LEN); 2033 TI_DO_CMD(TI_CMD_UPDATE_GENCOM, 0, 0); 2034 2035 /* Load our MAC address. */ 2036 m = (u_int16_t *)&sc->arpcom.ac_enaddr[0]; 2037 CSR_WRITE_4(sc, TI_GCR_PAR0, htons(m[0])); 2038 CSR_WRITE_4(sc, TI_GCR_PAR1, (htons(m[1]) << 16) | htons(m[2])); 2039 TI_DO_CMD(TI_CMD_SET_MAC_ADDR, 0, 0); 2040 2041 /* Program promiscuous mode and multicast filters. */ 2042 ti_iff(sc); 2043 2044 /* 2045 * If this is a Tigon 1, we should tell the 2046 * firmware to use software packet filtering. 2047 */ 2048 if (sc->ti_hwrev == TI_HWREV_TIGON) 2049 TI_DO_CMD(TI_CMD_FDR_FILTERING, TI_CMD_CODE_FILT_ENB, 0); 2050 2051 /* Init RX ring. */ 2052 if (ti_init_rx_ring_std(sc) == ENOBUFS) 2053 panic("not enough mbufs for rx ring"); 2054 2055 /* Init jumbo RX ring. */ 2056 ti_init_rx_ring_jumbo(sc); 2057 2058 /* 2059 * If this is a Tigon 2, we can also configure the 2060 * mini ring. 2061 */ 2062 if (sc->ti_hwrev == TI_HWREV_TIGON_II) 2063 ti_init_rx_ring_mini(sc); 2064 2065 CSR_WRITE_4(sc, TI_GCR_RXRETURNCONS_IDX, 0); 2066 sc->ti_rx_saved_considx = 0; 2067 2068 /* Init TX ring. */ 2069 ti_init_tx_ring(sc); 2070 2071 /* Tell firmware we're alive. */ 2072 TI_DO_CMD(TI_CMD_HOST_STATE, TI_CMD_CODE_STACK_UP, 0); 2073 2074 /* Enable host interrupts. */ 2075 CSR_WRITE_4(sc, TI_MB_HOSTINTR, 0); 2076 2077 ifp->if_flags |= IFF_RUNNING; 2078 ifq_clr_oactive(&ifp->if_snd); 2079 2080 /* 2081 * Make sure to set media properly. We have to do this 2082 * here since we have to issue commands in order to set 2083 * the link negotiation and we can't issue commands until 2084 * the firmware is running. 2085 */ 2086 ifm = &sc->ifmedia; 2087 tmp = ifm->ifm_media; 2088 ifm->ifm_media = ifm->ifm_cur->ifm_media; 2089 ti_ifmedia_upd(ifp); 2090 ifm->ifm_media = tmp; 2091 } 2092 2093 /* 2094 * Set media options. 2095 */ 2096 int 2097 ti_ifmedia_upd(struct ifnet *ifp) 2098 { 2099 struct ti_softc *sc; 2100 struct ifmedia *ifm; 2101 struct ti_cmd_desc cmd; 2102 2103 sc = ifp->if_softc; 2104 ifm = &sc->ifmedia; 2105 2106 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 2107 return(EINVAL); 2108 2109 switch(IFM_SUBTYPE(ifm->ifm_media)) { 2110 case IFM_AUTO: 2111 CSR_WRITE_4(sc, TI_GCR_GLINK, TI_GLNK_PREF|TI_GLNK_1000MB| 2112 TI_GLNK_FULL_DUPLEX|TI_GLNK_RX_FLOWCTL_Y| 2113 TI_GLNK_AUTONEGENB|TI_GLNK_ENB); 2114 CSR_WRITE_4(sc, TI_GCR_LINK, TI_LNK_100MB|TI_LNK_10MB| 2115 TI_LNK_FULL_DUPLEX|TI_LNK_HALF_DUPLEX| 2116 TI_LNK_AUTONEGENB|TI_LNK_ENB); 2117 TI_DO_CMD(TI_CMD_LINK_NEGOTIATION, 2118 TI_CMD_CODE_NEGOTIATE_BOTH, 0); 2119 break; 2120 case IFM_1000_SX: 2121 case IFM_1000_T: 2122 CSR_WRITE_4(sc, TI_GCR_GLINK, TI_GLNK_PREF|TI_GLNK_1000MB| 2123 TI_GLNK_RX_FLOWCTL_Y|TI_GLNK_ENB); 2124 CSR_WRITE_4(sc, TI_GCR_LINK, 0); 2125 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { 2126 TI_SETBIT(sc, TI_GCR_GLINK, TI_GLNK_FULL_DUPLEX); 2127 } 2128 TI_DO_CMD(TI_CMD_LINK_NEGOTIATION, 2129 TI_CMD_CODE_NEGOTIATE_GIGABIT, 0); 2130 break; 2131 case IFM_100_FX: 2132 case IFM_10_FL: 2133 case IFM_100_TX: 2134 case IFM_10_T: 2135 CSR_WRITE_4(sc, TI_GCR_GLINK, 0); 2136 CSR_WRITE_4(sc, TI_GCR_LINK, TI_LNK_ENB|TI_LNK_PREF); 2137 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_100_FX || 2138 IFM_SUBTYPE(ifm->ifm_media) == IFM_100_TX) { 2139 TI_SETBIT(sc, TI_GCR_LINK, TI_LNK_100MB); 2140 } else { 2141 TI_SETBIT(sc, TI_GCR_LINK, TI_LNK_10MB); 2142 } 2143 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { 2144 TI_SETBIT(sc, TI_GCR_LINK, TI_LNK_FULL_DUPLEX); 2145 } else { 2146 TI_SETBIT(sc, TI_GCR_LINK, TI_LNK_HALF_DUPLEX); 2147 } 2148 TI_DO_CMD(TI_CMD_LINK_NEGOTIATION, 2149 TI_CMD_CODE_NEGOTIATE_10_100, 0); 2150 break; 2151 } 2152 2153 return (0); 2154 } 2155 2156 /* 2157 * Report current media status. 2158 */ 2159 void 2160 ti_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2161 { 2162 struct ti_softc *sc; 2163 u_int32_t media = 0; 2164 2165 sc = ifp->if_softc; 2166 2167 ifmr->ifm_status = IFM_AVALID; 2168 ifmr->ifm_active = IFM_ETHER; 2169 2170 if (sc->ti_linkstat == TI_EV_CODE_LINK_DOWN) { 2171 ifmr->ifm_active |= IFM_NONE; 2172 return; 2173 } 2174 2175 ifmr->ifm_status |= IFM_ACTIVE; 2176 2177 if (sc->ti_linkstat == TI_EV_CODE_GIG_LINK_UP) { 2178 media = CSR_READ_4(sc, TI_GCR_GLINK_STAT); 2179 if (sc->ti_copper) 2180 ifmr->ifm_active |= IFM_1000_T; 2181 else 2182 ifmr->ifm_active |= IFM_1000_SX; 2183 if (media & TI_GLNK_FULL_DUPLEX) 2184 ifmr->ifm_active |= IFM_FDX; 2185 else 2186 ifmr->ifm_active |= IFM_HDX; 2187 } else if (sc->ti_linkstat == TI_EV_CODE_LINK_UP) { 2188 media = CSR_READ_4(sc, TI_GCR_LINK_STAT); 2189 if (sc->ti_copper) { 2190 if (media & TI_LNK_100MB) 2191 ifmr->ifm_active |= IFM_100_TX; 2192 if (media & TI_LNK_10MB) 2193 ifmr->ifm_active |= IFM_10_T; 2194 } else { 2195 if (media & TI_LNK_100MB) 2196 ifmr->ifm_active |= IFM_100_FX; 2197 if (media & TI_LNK_10MB) 2198 ifmr->ifm_active |= IFM_10_FL; 2199 } 2200 if (media & TI_LNK_FULL_DUPLEX) 2201 ifmr->ifm_active |= IFM_FDX; 2202 if (media & TI_LNK_HALF_DUPLEX) 2203 ifmr->ifm_active |= IFM_HDX; 2204 } 2205 } 2206 2207 int 2208 ti_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 2209 { 2210 struct ti_softc *sc = ifp->if_softc; 2211 struct ifreq *ifr = (struct ifreq *)data; 2212 int s, error = 0; 2213 2214 s = splnet(); 2215 2216 switch(command) { 2217 case SIOCSIFADDR: 2218 ifp->if_flags |= IFF_UP; 2219 if ((ifp->if_flags & IFF_RUNNING) == 0) 2220 ti_init(sc); 2221 break; 2222 2223 case SIOCSIFFLAGS: 2224 if (ifp->if_flags & IFF_UP) { 2225 if (ifp->if_flags & IFF_RUNNING) 2226 error = ENETRESET; 2227 else 2228 ti_init(sc); 2229 } else { 2230 if (ifp->if_flags & IFF_RUNNING) 2231 ti_stop(sc); 2232 } 2233 break; 2234 2235 case SIOCSIFMEDIA: 2236 case SIOCGIFMEDIA: 2237 error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command); 2238 break; 2239 2240 default: 2241 error = ether_ioctl(ifp, &sc->arpcom, command, data); 2242 } 2243 2244 if (error == ENETRESET) { 2245 if (ifp->if_flags & IFF_RUNNING) 2246 ti_iff(sc); 2247 error = 0; 2248 } 2249 2250 splx(s); 2251 return (error); 2252 } 2253 2254 void 2255 ti_watchdog(struct ifnet *ifp) 2256 { 2257 struct ti_softc *sc; 2258 2259 sc = ifp->if_softc; 2260 2261 printf("%s: watchdog timeout -- resetting\n", sc->sc_dv.dv_xname); 2262 ti_stop(sc); 2263 ti_init(sc); 2264 2265 ifp->if_oerrors++; 2266 } 2267 2268 /* 2269 * Stop the adapter and free any mbufs allocated to the 2270 * RX and TX lists. 2271 */ 2272 void 2273 ti_stop(struct ti_softc *sc) 2274 { 2275 struct ifnet *ifp; 2276 struct ti_cmd_desc cmd; 2277 2278 ifp = &sc->arpcom.ac_if; 2279 2280 ifp->if_flags &= ~IFF_RUNNING; 2281 ifq_clr_oactive(&ifp->if_snd); 2282 2283 /* Disable host interrupts. */ 2284 CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1); 2285 /* 2286 * Tell firmware we're shutting down. 2287 */ 2288 TI_DO_CMD(TI_CMD_HOST_STATE, TI_CMD_CODE_STACK_DOWN, 0); 2289 2290 /* Halt and reinitialize. */ 2291 ti_chipinit(sc); 2292 ti_mem_set(sc, 0x2000, 0x100000 - 0x2000); 2293 ti_chipinit(sc); 2294 2295 /* Free the RX lists. */ 2296 ti_free_rx_ring_std(sc); 2297 2298 /* Free jumbo RX list. */ 2299 ti_free_rx_ring_jumbo(sc); 2300 2301 /* Free mini RX list. */ 2302 ti_free_rx_ring_mini(sc); 2303 2304 /* Free TX buffers. */ 2305 ti_free_tx_ring(sc); 2306 2307 sc->ti_ev_prodidx.ti_idx = 0; 2308 sc->ti_return_prodidx.ti_idx = 0; 2309 sc->ti_tx_considx.ti_idx = 0; 2310 sc->ti_tx_saved_considx = TI_TXCONS_UNSET; 2311 } 2312