1 /* $OpenBSD: ti.c,v 1.1 2009/08/29 21:12:55 kettenis Exp $ */ 2 3 /* 4 * Copyright (c) 1997, 1998, 1999 5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 * $FreeBSD: src/sys/pci/if_ti.c,v 1.25 2000/01/18 00:26:29 wpaul Exp $ 35 */ 36 37 /* 38 * Alteon Networks Tigon PCI gigabit ethernet driver for OpenBSD. 39 * 40 * Written by Bill Paul <wpaul@ctr.columbia.edu> 41 * Electrical Engineering Department 42 * Columbia University, New York City 43 */ 44 45 /* 46 * The Alteon Networks Tigon chip contains an embedded R4000 CPU, 47 * gigabit MAC, dual DMA channels and a PCI interface unit. NICs 48 * using the Tigon may have anywhere from 512K to 2MB of SRAM. The 49 * Tigon supports hardware IP, TCP and UCP checksumming, multicast 50 * filtering and jumbo (9014 byte) frames. The hardware is largely 51 * controlled by firmware, which must be loaded into the NIC during 52 * initialization. 53 * 54 * The Tigon 2 contains 2 R4000 CPUs and requires a newer firmware 55 * revision, which supports new features such as extended commands, 56 * extended jumbo receive ring desciptors and a mini receive ring. 57 * 58 * Alteon Networks is to be commended for releasing such a vast amount 59 * of development material for the Tigon NIC without requiring an NDA 60 * (although they really should have done it a long time ago). With 61 * any luck, the other vendors will finally wise up and follow Alteon's 62 * stellar example. 63 * 64 * The following people deserve special thanks: 65 * - Terry Murphy of 3Com, for providing a 3c985 Tigon 1 board 66 * for testing 67 * - Raymond Lee of Netgear, for providing a pair of Netgear 68 * GA620 Tigon 2 boards for testing 69 * - Ulf Zimmermann, for bringing the GA260 to my attention and 70 * convincing me to write this driver. 71 * - Andrew Gallatin for providing FreeBSD/Alpha support. 72 */ 73 74 #include "bpfilter.h" 75 #include "vlan.h" 76 77 #include <sys/param.h> 78 #include <sys/systm.h> 79 #include <sys/sockio.h> 80 #include <sys/mbuf.h> 81 #include <sys/malloc.h> 82 #include <sys/kernel.h> 83 #include <sys/socket.h> 84 #include <sys/device.h> 85 #include <sys/queue.h> 86 87 #include <net/if.h> 88 #include <net/if_dl.h> 89 #include <net/if_types.h> 90 91 #ifdef INET 92 #include <netinet/in.h> 93 #include <netinet/in_systm.h> 94 #include <netinet/in_var.h> 95 #include <netinet/ip.h> 96 #include <netinet/if_ether.h> 97 #endif 98 99 #include <net/if_media.h> 100 101 #if NBPFILTER > 0 102 #include <net/bpf.h> 103 #endif 104 105 #if NVLAN > 0 106 #include <net/if_types.h> 107 #include <net/if_vlan_var.h> 108 #endif 109 110 #include <machine/bus.h> 111 112 #include <dev/ic/tireg.h> 113 #include <dev/ic/tivar.h> 114 #include <dev/pci/pcireg.h> 115 116 struct cfdriver ti_cd = { 117 NULL, "ti", DV_IFNET 118 }; 119 120 void ti_txeof_tigon1(struct ti_softc *); 121 void ti_txeof_tigon2(struct ti_softc *); 122 void ti_rxeof(struct ti_softc *); 123 124 void ti_stats_update(struct ti_softc *); 125 int ti_encap_tigon1(struct ti_softc *, struct mbuf *, u_int32_t *); 126 int ti_encap_tigon2(struct ti_softc *, struct mbuf *, u_int32_t *); 127 128 int ti_intr(void *); 129 void ti_start(struct ifnet *); 130 int ti_ioctl(struct ifnet *, u_long, caddr_t); 131 void ti_init(void *); 132 void ti_init2(struct ti_softc *); 133 void ti_stop(struct ti_softc *); 134 void ti_watchdog(struct ifnet *); 135 void ti_shutdown(void *); 136 int ti_ifmedia_upd(struct ifnet *); 137 void ti_ifmedia_sts(struct ifnet *, struct ifmediareq *); 138 139 u_int32_t ti_eeprom_putbyte(struct ti_softc *, int); 140 u_int8_t ti_eeprom_getbyte(struct ti_softc *, int, u_int8_t *); 141 int ti_read_eeprom(struct ti_softc *, caddr_t, int, int); 142 143 void ti_add_mcast(struct ti_softc *, struct ether_addr *); 144 void ti_del_mcast(struct ti_softc *, struct ether_addr *); 145 void ti_setmulti(struct ti_softc *); 146 147 void ti_mem_read(struct ti_softc *, u_int32_t, u_int32_t, void *); 148 void ti_mem_write(struct ti_softc *, u_int32_t, u_int32_t, const void*); 149 void ti_mem_set(struct ti_softc *, u_int32_t, u_int32_t); 150 void ti_loadfw(struct ti_softc *); 151 void ti_cmd(struct ti_softc *, struct ti_cmd_desc *); 152 void ti_cmd_ext(struct ti_softc *, struct ti_cmd_desc *, 153 caddr_t, int); 154 void ti_handle_events(struct ti_softc *); 155 int ti_alloc_jumbo_mem(struct ti_softc *); 156 void *ti_jalloc(struct ti_softc *); 157 void ti_jfree(caddr_t, u_int, void *); 158 int ti_newbuf_std(struct ti_softc *, int, struct mbuf *, bus_dmamap_t); 159 int ti_newbuf_mini(struct ti_softc *, int, struct mbuf *, bus_dmamap_t); 160 int ti_newbuf_jumbo(struct ti_softc *, int, struct mbuf *); 161 int ti_init_rx_ring_std(struct ti_softc *); 162 void ti_free_rx_ring_std(struct ti_softc *); 163 int ti_init_rx_ring_jumbo(struct ti_softc *); 164 void ti_free_rx_ring_jumbo(struct ti_softc *); 165 int ti_init_rx_ring_mini(struct ti_softc *); 166 void ti_free_rx_ring_mini(struct ti_softc *); 167 void ti_free_tx_ring(struct ti_softc *); 168 int ti_init_tx_ring(struct ti_softc *); 169 170 int ti_64bitslot_war(struct ti_softc *); 171 int ti_chipinit(struct ti_softc *); 172 void ti_chipinit_pci(struct ti_softc *); 173 void ti_chipinit_sbus(struct ti_softc *); 174 int ti_gibinit(struct ti_softc *); 175 176 /* 177 * Send an instruction or address to the EEPROM, check for ACK. 178 */ 179 u_int32_t 180 ti_eeprom_putbyte(struct ti_softc *sc, int byte) 181 { 182 int i, ack = 0; 183 184 /* 185 * Make sure we're in TX mode. 186 */ 187 TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_TXEN); 188 189 /* 190 * Feed in each bit and strobe the clock. 191 */ 192 for (i = 0x80; i; i >>= 1) { 193 if (byte & i) 194 TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_DOUT); 195 else 196 TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_DOUT); 197 DELAY(1); 198 TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); 199 DELAY(1); 200 TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); 201 } 202 203 /* 204 * Turn off TX mode. 205 */ 206 TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_TXEN); 207 208 /* 209 * Check for ack. 210 */ 211 TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); 212 ack = CSR_READ_4(sc, TI_MISC_LOCAL_CTL) & TI_MLC_EE_DIN; 213 TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); 214 215 return (ack); 216 } 217 218 /* 219 * Read a byte of data stored in the EEPROM at address 'addr.' 220 * We have to send two address bytes since the EEPROM can hold 221 * more than 256 bytes of data. 222 */ 223 u_int8_t 224 ti_eeprom_getbyte(struct ti_softc *sc, int addr, u_int8_t *dest) 225 { 226 int i; 227 u_int8_t byte = 0; 228 229 EEPROM_START; 230 231 /* 232 * Send write control code to EEPROM. 233 */ 234 if (ti_eeprom_putbyte(sc, EEPROM_CTL_WRITE)) { 235 printf("%s: failed to send write command, status: %x\n", 236 sc->sc_dv.dv_xname, CSR_READ_4(sc, TI_MISC_LOCAL_CTL)); 237 return (1); 238 } 239 240 /* 241 * Send first byte of address of byte we want to read. 242 */ 243 if (ti_eeprom_putbyte(sc, (addr >> 8) & 0xFF)) { 244 printf("%s: failed to send address, status: %x\n", 245 sc->sc_dv.dv_xname, CSR_READ_4(sc, TI_MISC_LOCAL_CTL)); 246 return (1); 247 } 248 /* 249 * Send second byte address of byte we want to read. 250 */ 251 if (ti_eeprom_putbyte(sc, addr & 0xFF)) { 252 printf("%s: failed to send address, status: %x\n", 253 sc->sc_dv.dv_xname, CSR_READ_4(sc, TI_MISC_LOCAL_CTL)); 254 return (1); 255 } 256 257 EEPROM_STOP; 258 EEPROM_START; 259 /* 260 * Send read control code to EEPROM. 261 */ 262 if (ti_eeprom_putbyte(sc, EEPROM_CTL_READ)) { 263 printf("%s: failed to send read command, status: %x\n", 264 sc->sc_dv.dv_xname, CSR_READ_4(sc, TI_MISC_LOCAL_CTL)); 265 return (1); 266 } 267 268 /* 269 * Start reading bits from EEPROM. 270 */ 271 TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_TXEN); 272 for (i = 0x80; i; i >>= 1) { 273 TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); 274 DELAY(1); 275 if (CSR_READ_4(sc, TI_MISC_LOCAL_CTL) & TI_MLC_EE_DIN) 276 byte |= i; 277 TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); 278 DELAY(1); 279 } 280 281 EEPROM_STOP; 282 283 /* 284 * No ACK generated for read, so just return byte. 285 */ 286 287 *dest = byte; 288 289 return (0); 290 } 291 292 /* 293 * Read a sequence of bytes from the EEPROM. 294 */ 295 int 296 ti_read_eeprom(struct ti_softc *sc, caddr_t dest, int off, int cnt) 297 { 298 int err = 0, i; 299 u_int8_t byte = 0; 300 301 for (i = 0; i < cnt; i++) { 302 err = ti_eeprom_getbyte(sc, off + i, &byte); 303 if (err) 304 break; 305 *(dest + i) = byte; 306 } 307 308 return (err ? 1 : 0); 309 } 310 311 /* 312 * NIC memory read function. 313 * Can be used to copy data from NIC local memory. 314 */ 315 void 316 ti_mem_read(struct ti_softc *sc, u_int32_t addr, u_int32_t len, void *buf) 317 { 318 int segptr, segsize, cnt; 319 caddr_t ptr; 320 321 segptr = addr; 322 cnt = len; 323 ptr = buf; 324 325 while(cnt) { 326 if (cnt < TI_WINLEN) 327 segsize = cnt; 328 else 329 segsize = TI_WINLEN - (segptr % TI_WINLEN); 330 CSR_WRITE_4(sc, TI_WINBASE, (segptr & ~(TI_WINLEN - 1))); 331 bus_space_read_region_4(sc->ti_btag, sc->ti_bhandle, 332 TI_WINDOW + (segptr & (TI_WINLEN - 1)), (u_int32_t *)ptr, 333 segsize / 4); 334 ptr += segsize; 335 segptr += segsize; 336 cnt -= segsize; 337 } 338 } 339 340 /* 341 * NIC memory write function. 342 * Can be used to copy data into NIC local memory. 343 */ 344 void 345 ti_mem_write(struct ti_softc *sc, u_int32_t addr, u_int32_t len, 346 const void *buf) 347 { 348 int segptr, segsize, cnt; 349 const char *ptr; 350 351 segptr = addr; 352 cnt = len; 353 ptr = buf; 354 355 while(cnt) { 356 if (cnt < TI_WINLEN) 357 segsize = cnt; 358 else 359 segsize = TI_WINLEN - (segptr % TI_WINLEN); 360 CSR_WRITE_4(sc, TI_WINBASE, (segptr & ~(TI_WINLEN - 1))); 361 bus_space_write_region_4(sc->ti_btag, sc->ti_bhandle, 362 TI_WINDOW + (segptr & (TI_WINLEN - 1)), (u_int32_t *)ptr, 363 segsize / 4); 364 ptr += segsize; 365 segptr += segsize; 366 cnt -= segsize; 367 } 368 } 369 370 /* 371 * NIC memory write function. 372 * Can be used to clear a section of NIC local memory. 373 */ 374 void 375 ti_mem_set(struct ti_softc *sc, u_int32_t addr, u_int32_t len) 376 { 377 int segptr, segsize, cnt; 378 379 segptr = addr; 380 cnt = len; 381 382 while(cnt) { 383 if (cnt < TI_WINLEN) 384 segsize = cnt; 385 else 386 segsize = TI_WINLEN - (segptr % TI_WINLEN); 387 CSR_WRITE_4(sc, TI_WINBASE, (segptr & ~(TI_WINLEN - 1))); 388 bus_space_set_region_4(sc->ti_btag, sc->ti_bhandle, 389 TI_WINDOW + (segptr & (TI_WINLEN - 1)), 0, segsize / 4); 390 segptr += segsize; 391 cnt -= segsize; 392 } 393 } 394 395 /* 396 * Load firmware image into the NIC. Check that the firmware revision 397 * is acceptable and see if we want the firmware for the Tigon 1 or 398 * Tigon 2. 399 */ 400 void 401 ti_loadfw(struct ti_softc *sc) 402 { 403 struct tigon_firmware *tf; 404 u_char *buf = NULL; 405 u_int32_t *b; 406 size_t buflen, i, cnt; 407 char *name; 408 int error; 409 410 switch(sc->ti_hwrev) { 411 case TI_HWREV_TIGON: 412 name = "tigon1"; 413 break; 414 case TI_HWREV_TIGON_II: 415 name = "tigon2"; 416 break; 417 default: 418 printf("%s: can't load firmware: unknown hardware rev\n", 419 sc->sc_dv.dv_xname); 420 return; 421 } 422 423 error = loadfirmware(name, &buf, &buflen); 424 if (error) 425 return; 426 /* convert firmware to host byte order */ 427 b = (u_int32_t *)buf; 428 cnt = buflen / sizeof(u_int32_t); 429 for (i = 0; i < cnt; i++) 430 b[i] = letoh32(b[i]); 431 432 tf = (struct tigon_firmware *)buf; 433 if (tf->FwReleaseMajor != TI_FIRMWARE_MAJOR || 434 tf->FwReleaseMinor != TI_FIRMWARE_MINOR || 435 tf->FwReleaseFix != TI_FIRMWARE_FIX) { 436 printf("%s: firmware revision mismatch; want " 437 "%d.%d.%d, got %d.%d.%d\n", sc->sc_dv.dv_xname, 438 TI_FIRMWARE_MAJOR, TI_FIRMWARE_MINOR, 439 TI_FIRMWARE_FIX, tf->FwReleaseMajor, 440 tf->FwReleaseMinor, tf->FwReleaseFix); 441 free(buf, M_DEVBUF); 442 return; 443 } 444 ti_mem_write(sc, tf->FwTextAddr, tf->FwTextLen, 445 (caddr_t)&tf->data[tf->FwTextOffset]); 446 ti_mem_write(sc, tf->FwRodataAddr, tf->FwRodataLen, 447 (caddr_t)&tf->data[tf->FwRodataOffset]); 448 ti_mem_write(sc, tf->FwDataAddr, tf->FwDataLen, 449 (caddr_t)&tf->data[tf->FwDataOffset]); 450 ti_mem_set(sc, tf->FwBssAddr, tf->FwBssLen); 451 ti_mem_set(sc, tf->FwSbssAddr, tf->FwSbssLen); 452 CSR_WRITE_4(sc, TI_CPU_PROGRAM_COUNTER, tf->FwStartAddr); 453 free(buf, M_DEVBUF); 454 } 455 456 /* 457 * Send the NIC a command via the command ring. 458 */ 459 void 460 ti_cmd(struct ti_softc *sc, struct ti_cmd_desc *cmd) 461 { 462 u_int32_t index; 463 464 index = sc->ti_cmd_saved_prodidx; 465 CSR_WRITE_4(sc, TI_GCR_CMDRING + (index * 4), *(u_int32_t *)(cmd)); 466 TI_INC(index, TI_CMD_RING_CNT); 467 CSR_WRITE_4(sc, TI_MB_CMDPROD_IDX, index); 468 sc->ti_cmd_saved_prodidx = index; 469 } 470 471 /* 472 * Send the NIC an extended command. The 'len' parameter specifies the 473 * number of command slots to include after the initial command. 474 */ 475 void 476 ti_cmd_ext(struct ti_softc *sc, struct ti_cmd_desc *cmd, caddr_t arg, 477 int len) 478 { 479 u_int32_t index; 480 int i; 481 482 index = sc->ti_cmd_saved_prodidx; 483 CSR_WRITE_4(sc, TI_GCR_CMDRING + (index * 4), *(u_int32_t *)(cmd)); 484 TI_INC(index, TI_CMD_RING_CNT); 485 for (i = 0; i < len; i++) { 486 CSR_WRITE_4(sc, TI_GCR_CMDRING + (index * 4), 487 *(u_int32_t *)(&arg[i * 4])); 488 TI_INC(index, TI_CMD_RING_CNT); 489 } 490 CSR_WRITE_4(sc, TI_MB_CMDPROD_IDX, index); 491 sc->ti_cmd_saved_prodidx = index; 492 } 493 494 /* 495 * Handle events that have triggered interrupts. 496 */ 497 void 498 ti_handle_events(struct ti_softc *sc) 499 { 500 struct ti_event_desc *e; 501 struct ifnet *ifp = &sc->arpcom.ac_if; 502 503 if (sc->ti_rdata->ti_event_ring == NULL) 504 return; 505 506 while (sc->ti_ev_saved_considx != sc->ti_ev_prodidx.ti_idx) { 507 e = &sc->ti_rdata->ti_event_ring[sc->ti_ev_saved_considx]; 508 switch (TI_EVENT_EVENT(e)) { 509 case TI_EV_LINKSTAT_CHANGED: 510 sc->ti_linkstat = TI_EVENT_CODE(e); 511 switch (sc->ti_linkstat) { 512 case TI_EV_CODE_LINK_UP: 513 case TI_EV_CODE_GIG_LINK_UP: 514 { 515 struct ifmediareq ifmr; 516 517 bzero(&ifmr, sizeof(ifmr)); 518 ti_ifmedia_sts(ifp, &ifmr); 519 if (ifmr.ifm_active & IFM_FDX) { 520 ifp->if_link_state = 521 LINK_STATE_FULL_DUPLEX; 522 } else { 523 ifp->if_link_state = 524 LINK_STATE_HALF_DUPLEX; 525 } 526 if_link_state_change(ifp); 527 ifp->if_baudrate = 528 ifmedia_baudrate(ifmr.ifm_active); 529 break; 530 } 531 case TI_EV_CODE_LINK_DOWN: 532 ifp->if_link_state = LINK_STATE_DOWN; 533 if_link_state_change(ifp); 534 ifp->if_baudrate = 0; 535 break; 536 default: 537 printf("%s: unknown link state code %d\n", 538 sc->sc_dv.dv_xname, sc->ti_linkstat); 539 } 540 break; 541 case TI_EV_ERROR: 542 if (TI_EVENT_CODE(e) == TI_EV_CODE_ERR_INVAL_CMD) 543 printf("%s: invalid command\n", 544 sc->sc_dv.dv_xname); 545 else if (TI_EVENT_CODE(e) == TI_EV_CODE_ERR_UNIMP_CMD) 546 printf("%s: unknown command\n", 547 sc->sc_dv.dv_xname); 548 else if (TI_EVENT_CODE(e) == TI_EV_CODE_ERR_BADCFG) 549 printf("%s: bad config data\n", 550 sc->sc_dv.dv_xname); 551 break; 552 case TI_EV_FIRMWARE_UP: 553 ti_init2(sc); 554 break; 555 case TI_EV_STATS_UPDATED: 556 ti_stats_update(sc); 557 break; 558 case TI_EV_RESET_JUMBO_RING: 559 case TI_EV_MCAST_UPDATED: 560 /* Who cares. */ 561 break; 562 default: 563 printf("%s: unknown event: %d\n", sc->sc_dv.dv_xname, 564 TI_EVENT_EVENT(e)); 565 break; 566 } 567 /* Advance the consumer index. */ 568 TI_INC(sc->ti_ev_saved_considx, TI_EVENT_RING_CNT); 569 CSR_WRITE_4(sc, TI_GCR_EVENTCONS_IDX, sc->ti_ev_saved_considx); 570 } 571 } 572 573 /* 574 * Memory management for the jumbo receive ring is a pain in the 575 * butt. We need to allocate at least 9018 bytes of space per frame, 576 * _and_ it has to be contiguous (unless you use the extended 577 * jumbo descriptor format). Using malloc() all the time won't 578 * work: malloc() allocates memory in powers of two, which means we 579 * would end up wasting a considerable amount of space by allocating 580 * 9K chunks. We don't have a jumbo mbuf cluster pool. Thus, we have 581 * to do our own memory management. 582 * 583 * The driver needs to allocate a contiguous chunk of memory at boot 584 * time. We then chop this up ourselves into 9K pieces and use them 585 * as external mbuf storage. 586 * 587 * One issue here is how much memory to allocate. The jumbo ring has 588 * 256 slots in it, but at 9K per slot than can consume over 2MB of 589 * RAM. This is a bit much, especially considering we also need 590 * RAM for the standard ring and mini ring (on the Tigon 2). To 591 * save space, we only actually allocate enough memory for 64 slots 592 * by default, which works out to between 500 and 600K. This can 593 * be tuned by changing a #define in if_tireg.h. 594 */ 595 596 int 597 ti_alloc_jumbo_mem(struct ti_softc *sc) 598 { 599 caddr_t ptr, kva; 600 bus_dma_segment_t seg; 601 int i, rseg, state, error; 602 struct ti_jpool_entry *entry; 603 604 state = error = 0; 605 606 /* Grab a big chunk o' storage. */ 607 if (bus_dmamem_alloc(sc->sc_dmatag, TI_JMEM, PAGE_SIZE, 0, 608 &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 609 printf("%s: can't alloc rx buffers\n", sc->sc_dv.dv_xname); 610 return (ENOBUFS); 611 } 612 613 state = 1; 614 if (bus_dmamem_map(sc->sc_dmatag, &seg, rseg, TI_JMEM, &kva, 615 BUS_DMA_NOWAIT)) { 616 printf("%s: can't map dma buffers (%d bytes)\n", 617 sc->sc_dv.dv_xname, TI_JMEM); 618 error = ENOBUFS; 619 goto out; 620 } 621 622 state = 2; 623 if (bus_dmamap_create(sc->sc_dmatag, TI_JMEM, 1, TI_JMEM, 0, 624 BUS_DMA_NOWAIT, &sc->ti_cdata.ti_rx_jumbo_map)) { 625 printf("%s: can't create dma map\n", sc->sc_dv.dv_xname); 626 error = ENOBUFS; 627 goto out; 628 } 629 630 state = 3; 631 if (bus_dmamap_load(sc->sc_dmatag, sc->ti_cdata.ti_rx_jumbo_map, kva, 632 TI_JMEM, NULL, BUS_DMA_NOWAIT)) { 633 printf("%s: can't load dma map\n", sc->sc_dv.dv_xname); 634 error = ENOBUFS; 635 goto out; 636 } 637 638 state = 4; 639 sc->ti_cdata.ti_jumbo_buf = (caddr_t)kva; 640 641 SLIST_INIT(&sc->ti_jfree_listhead); 642 SLIST_INIT(&sc->ti_jinuse_listhead); 643 644 /* 645 * Now divide it up into 9K pieces and save the addresses 646 * in an array. 647 */ 648 ptr = sc->ti_cdata.ti_jumbo_buf; 649 for (i = 0; i < TI_JSLOTS; i++) { 650 sc->ti_cdata.ti_jslots[i].ti_buf = ptr; 651 sc->ti_cdata.ti_jslots[i].ti_inuse = 0; 652 ptr += TI_JLEN; 653 entry = malloc(sizeof(struct ti_jpool_entry), 654 M_DEVBUF, M_NOWAIT); 655 if (entry == NULL) { 656 sc->ti_cdata.ti_jumbo_buf = NULL; 657 printf("%s: no memory for jumbo buffer queue\n", 658 sc->sc_dv.dv_xname); 659 error = ENOBUFS; 660 goto out; 661 } 662 entry->slot = i; 663 SLIST_INSERT_HEAD(&sc->ti_jfree_listhead, entry, jpool_entries); 664 } 665 out: 666 if (error != 0) { 667 switch (state) { 668 case 4: 669 bus_dmamap_unload(sc->sc_dmatag, 670 sc->ti_cdata.ti_rx_jumbo_map); 671 case 3: 672 bus_dmamap_destroy(sc->sc_dmatag, 673 sc->ti_cdata.ti_rx_jumbo_map); 674 case 2: 675 bus_dmamem_unmap(sc->sc_dmatag, kva, TI_JMEM); 676 case 1: 677 bus_dmamem_free(sc->sc_dmatag, &seg, rseg); 678 break; 679 default: 680 break; 681 } 682 } 683 684 return (error); 685 } 686 687 /* 688 * Allocate a jumbo buffer. 689 */ 690 void * 691 ti_jalloc(struct ti_softc *sc) 692 { 693 struct ti_jpool_entry *entry; 694 695 entry = SLIST_FIRST(&sc->ti_jfree_listhead); 696 697 if (entry == NULL) 698 return (NULL); 699 700 SLIST_REMOVE_HEAD(&sc->ti_jfree_listhead, jpool_entries); 701 SLIST_INSERT_HEAD(&sc->ti_jinuse_listhead, entry, jpool_entries); 702 sc->ti_cdata.ti_jslots[entry->slot].ti_inuse = 1; 703 return (sc->ti_cdata.ti_jslots[entry->slot].ti_buf); 704 } 705 706 /* 707 * Release a jumbo buffer. 708 */ 709 void 710 ti_jfree(caddr_t buf, u_int size, void *arg) 711 { 712 struct ti_softc *sc; 713 int i; 714 struct ti_jpool_entry *entry; 715 716 /* Extract the softc struct pointer. */ 717 sc = (struct ti_softc *)arg; 718 719 if (sc == NULL) 720 panic("ti_jfree: can't find softc pointer!"); 721 722 /* calculate the slot this buffer belongs to */ 723 i = ((vaddr_t)buf - (vaddr_t)sc->ti_cdata.ti_jumbo_buf) / TI_JLEN; 724 725 if ((i < 0) || (i >= TI_JSLOTS)) 726 panic("ti_jfree: asked to free buffer that we don't manage!"); 727 else if (sc->ti_cdata.ti_jslots[i].ti_inuse == 0) 728 panic("ti_jfree: buffer already free!"); 729 730 sc->ti_cdata.ti_jslots[i].ti_inuse--; 731 if(sc->ti_cdata.ti_jslots[i].ti_inuse == 0) { 732 entry = SLIST_FIRST(&sc->ti_jinuse_listhead); 733 if (entry == NULL) 734 panic("ti_jfree: buffer not in use!"); 735 entry->slot = i; 736 SLIST_REMOVE_HEAD(&sc->ti_jinuse_listhead, jpool_entries); 737 SLIST_INSERT_HEAD(&sc->ti_jfree_listhead, 738 entry, jpool_entries); 739 } 740 } 741 742 /* 743 * Intialize a standard receive ring descriptor. 744 */ 745 int 746 ti_newbuf_std(struct ti_softc *sc, int i, struct mbuf *m, 747 bus_dmamap_t dmamap) 748 { 749 struct mbuf *m_new = NULL; 750 struct ti_rx_desc *r; 751 752 if (dmamap == NULL) { 753 /* if (m) panic() */ 754 755 if (bus_dmamap_create(sc->sc_dmatag, MCLBYTES, 1, MCLBYTES, 756 0, BUS_DMA_NOWAIT, &dmamap)) { 757 printf("%s: can't create recv map\n", 758 sc->sc_dv.dv_xname); 759 return (ENOMEM); 760 } 761 } else if (m == NULL) 762 bus_dmamap_unload(sc->sc_dmatag, dmamap); 763 764 sc->ti_cdata.ti_rx_std_map[i] = dmamap; 765 766 if (m == NULL) { 767 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 768 if (m_new == NULL) 769 return (ENOBUFS); 770 771 MCLGET(m_new, M_DONTWAIT); 772 if (!(m_new->m_flags & M_EXT)) { 773 m_freem(m_new); 774 return (ENOBUFS); 775 } 776 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 777 778 m_adj(m_new, ETHER_ALIGN); 779 780 if (bus_dmamap_load_mbuf(sc->sc_dmatag, dmamap, m_new, 781 BUS_DMA_NOWAIT)) 782 return (ENOBUFS); 783 784 } else { 785 /* 786 * We're re-using a previously allocated mbuf; 787 * be sure to re-init pointers and lengths to 788 * default values. 789 */ 790 m_new = m; 791 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 792 m_new->m_data = m_new->m_ext.ext_buf; 793 m_adj(m_new, ETHER_ALIGN); 794 } 795 796 sc->ti_cdata.ti_rx_std_chain[i] = m_new; 797 r = &sc->ti_rdata->ti_rx_std_ring[i]; 798 TI_HOSTADDR(r->ti_addr) = dmamap->dm_segs[0].ds_addr; 799 r->ti_type = TI_BDTYPE_RECV_BD; 800 r->ti_flags = TI_BDFLAG_IP_CKSUM; 801 r->ti_len = dmamap->dm_segs[0].ds_len; 802 r->ti_idx = i; 803 804 if ((dmamap->dm_segs[0].ds_addr & ~(MCLBYTES - 1)) != 805 ((dmamap->dm_segs[0].ds_addr + dmamap->dm_segs[0].ds_len - 1) & 806 ~(MCLBYTES - 1))) 807 panic("%s: overwritten!!!", sc->sc_dv.dv_xname); 808 809 return (0); 810 } 811 812 /* 813 * Intialize a mini receive ring descriptor. This only applies to 814 * the Tigon 2. 815 */ 816 int 817 ti_newbuf_mini(struct ti_softc *sc, int i, struct mbuf *m, 818 bus_dmamap_t dmamap) 819 { 820 struct mbuf *m_new = NULL; 821 struct ti_rx_desc *r; 822 823 if (dmamap == NULL) { 824 /* if (m) panic() */ 825 826 if (bus_dmamap_create(sc->sc_dmatag, MHLEN, 1, MHLEN, 827 0, BUS_DMA_NOWAIT, &dmamap)) { 828 printf("%s: can't create recv map\n", 829 sc->sc_dv.dv_xname); 830 return (ENOMEM); 831 } 832 } else if (m == NULL) 833 bus_dmamap_unload(sc->sc_dmatag, dmamap); 834 835 sc->ti_cdata.ti_rx_mini_map[i] = dmamap; 836 837 if (m == NULL) { 838 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 839 if (m_new == NULL) 840 return (ENOBUFS); 841 m_new->m_len = m_new->m_pkthdr.len = MHLEN; 842 m_adj(m_new, ETHER_ALIGN); 843 844 if (bus_dmamap_load_mbuf(sc->sc_dmatag, dmamap, m_new, 845 BUS_DMA_NOWAIT)) 846 return (ENOBUFS); 847 848 } else { 849 /* 850 * We're re-using a previously allocated mbuf; 851 * be sure to re-init pointers and lengths to 852 * default values. 853 */ 854 m_new = m; 855 m_new->m_data = m_new->m_pktdat; 856 m_new->m_len = m_new->m_pkthdr.len = MHLEN; 857 } 858 859 r = &sc->ti_rdata->ti_rx_mini_ring[i]; 860 sc->ti_cdata.ti_rx_mini_chain[i] = m_new; 861 TI_HOSTADDR(r->ti_addr) = dmamap->dm_segs[0].ds_addr; 862 r->ti_type = TI_BDTYPE_RECV_BD; 863 r->ti_flags = TI_BDFLAG_MINI_RING | TI_BDFLAG_IP_CKSUM; 864 r->ti_len = dmamap->dm_segs[0].ds_len; 865 r->ti_idx = i; 866 867 return (0); 868 } 869 870 /* 871 * Initialize a jumbo receive ring descriptor. This allocates 872 * a jumbo buffer from the pool managed internally by the driver. 873 */ 874 int 875 ti_newbuf_jumbo(struct ti_softc *sc, int i, struct mbuf *m) 876 { 877 struct mbuf *m_new = NULL; 878 struct ti_rx_desc *r; 879 880 if (m == NULL) { 881 caddr_t buf = NULL; 882 883 /* Allocate the mbuf. */ 884 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 885 if (m_new == NULL) 886 return (ENOBUFS); 887 888 /* Allocate the jumbo buffer */ 889 buf = ti_jalloc(sc); 890 if (buf == NULL) { 891 m_freem(m_new); 892 return (ENOBUFS); 893 } 894 895 /* Attach the buffer to the mbuf. */ 896 m_new->m_len = m_new->m_pkthdr.len = TI_JUMBO_FRAMELEN; 897 MEXTADD(m_new, buf, TI_JUMBO_FRAMELEN, 0, ti_jfree, sc); 898 } else { 899 /* 900 * We're re-using a previously allocated mbuf; 901 * be sure to re-init pointers and lengths to 902 * default values. 903 */ 904 m_new = m; 905 m_new->m_data = m_new->m_ext.ext_buf; 906 m_new->m_ext.ext_size = TI_JUMBO_FRAMELEN; 907 } 908 909 m_adj(m_new, ETHER_ALIGN); 910 /* Set up the descriptor. */ 911 r = &sc->ti_rdata->ti_rx_jumbo_ring[i]; 912 sc->ti_cdata.ti_rx_jumbo_chain[i] = m_new; 913 TI_HOSTADDR(r->ti_addr) = TI_JUMBO_DMA_ADDR(sc, m_new); 914 r->ti_type = TI_BDTYPE_RECV_JUMBO_BD; 915 r->ti_flags = TI_BDFLAG_JUMBO_RING | TI_BDFLAG_IP_CKSUM; 916 r->ti_len = m_new->m_len; 917 r->ti_idx = i; 918 919 return (0); 920 } 921 922 /* 923 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster, 924 * that's 1MB of memory, which is a lot. For now, we fill only the first 925 * 256 ring entries and hope that our CPU is fast enough to keep up with 926 * the NIC. 927 */ 928 int 929 ti_init_rx_ring_std(struct ti_softc *sc) 930 { 931 int i; 932 struct ti_cmd_desc cmd; 933 934 for (i = 0; i < TI_SSLOTS; i++) { 935 if (ti_newbuf_std(sc, i, NULL, 0) == ENOBUFS) 936 return (ENOBUFS); 937 } 938 939 TI_UPDATE_STDPROD(sc, i - 1); 940 sc->ti_std = i - 1; 941 942 return (0); 943 } 944 945 void 946 ti_free_rx_ring_std(struct ti_softc *sc) 947 { 948 int i; 949 950 for (i = 0; i < TI_STD_RX_RING_CNT; i++) { 951 if (sc->ti_cdata.ti_rx_std_chain[i] != NULL) { 952 m_freem(sc->ti_cdata.ti_rx_std_chain[i]); 953 sc->ti_cdata.ti_rx_std_chain[i] = NULL; 954 bus_dmamap_destroy(sc->sc_dmatag, 955 sc->ti_cdata.ti_rx_std_map[i]); 956 sc->ti_cdata.ti_rx_std_map[i] = 0; 957 } 958 bzero((char *)&sc->ti_rdata->ti_rx_std_ring[i], 959 sizeof(struct ti_rx_desc)); 960 } 961 } 962 963 int 964 ti_init_rx_ring_jumbo(struct ti_softc *sc) 965 { 966 int i; 967 struct ti_cmd_desc cmd; 968 969 for (i = 0; i < TI_JUMBO_RX_RING_CNT; i++) { 970 if (ti_newbuf_jumbo(sc, i, NULL) == ENOBUFS) 971 return (ENOBUFS); 972 }; 973 974 TI_UPDATE_JUMBOPROD(sc, i - 1); 975 sc->ti_jumbo = i - 1; 976 977 return (0); 978 } 979 980 void 981 ti_free_rx_ring_jumbo(struct ti_softc *sc) 982 { 983 int i; 984 985 for (i = 0; i < TI_JUMBO_RX_RING_CNT; i++) { 986 if (sc->ti_cdata.ti_rx_jumbo_chain[i] != NULL) { 987 m_freem(sc->ti_cdata.ti_rx_jumbo_chain[i]); 988 sc->ti_cdata.ti_rx_jumbo_chain[i] = NULL; 989 } 990 bzero((char *)&sc->ti_rdata->ti_rx_jumbo_ring[i], 991 sizeof(struct ti_rx_desc)); 992 } 993 } 994 995 int 996 ti_init_rx_ring_mini(struct ti_softc *sc) 997 { 998 int i; 999 1000 for (i = 0; i < TI_MSLOTS; i++) { 1001 if (ti_newbuf_mini(sc, i, NULL, 0) == ENOBUFS) 1002 return (ENOBUFS); 1003 }; 1004 1005 TI_UPDATE_MINIPROD(sc, i - 1); 1006 sc->ti_mini = i - 1; 1007 1008 return (0); 1009 } 1010 1011 void 1012 ti_free_rx_ring_mini(struct ti_softc *sc) 1013 { 1014 int i; 1015 1016 for (i = 0; i < TI_MINI_RX_RING_CNT; i++) { 1017 if (sc->ti_cdata.ti_rx_mini_chain[i] != NULL) { 1018 m_freem(sc->ti_cdata.ti_rx_mini_chain[i]); 1019 sc->ti_cdata.ti_rx_mini_chain[i] = NULL; 1020 bus_dmamap_destroy(sc->sc_dmatag, 1021 sc->ti_cdata.ti_rx_mini_map[i]); 1022 sc->ti_cdata.ti_rx_mini_map[i] = 0; 1023 } 1024 bzero((char *)&sc->ti_rdata->ti_rx_mini_ring[i], 1025 sizeof(struct ti_rx_desc)); 1026 } 1027 } 1028 1029 void 1030 ti_free_tx_ring(struct ti_softc *sc) 1031 { 1032 int i; 1033 struct ti_txmap_entry *entry; 1034 1035 if (sc->ti_rdata->ti_tx_ring == NULL) 1036 return; 1037 1038 for (i = 0; i < TI_TX_RING_CNT; i++) { 1039 if (sc->ti_cdata.ti_tx_chain[i] != NULL) { 1040 m_freem(sc->ti_cdata.ti_tx_chain[i]); 1041 sc->ti_cdata.ti_tx_chain[i] = NULL; 1042 SLIST_INSERT_HEAD(&sc->ti_tx_map_listhead, 1043 sc->ti_cdata.ti_tx_map[i], link); 1044 sc->ti_cdata.ti_tx_map[i] = 0; 1045 } 1046 bzero((char *)&sc->ti_rdata->ti_tx_ring[i], 1047 sizeof(struct ti_tx_desc)); 1048 } 1049 1050 while ((entry = SLIST_FIRST(&sc->ti_tx_map_listhead))) { 1051 SLIST_REMOVE_HEAD(&sc->ti_tx_map_listhead, link); 1052 bus_dmamap_destroy(sc->sc_dmatag, entry->dmamap); 1053 free(entry, M_DEVBUF); 1054 } 1055 } 1056 1057 int 1058 ti_init_tx_ring(struct ti_softc *sc) 1059 { 1060 int i; 1061 bus_dmamap_t dmamap; 1062 struct ti_txmap_entry *entry; 1063 1064 sc->ti_txcnt = 0; 1065 sc->ti_tx_saved_considx = 0; 1066 sc->ti_tx_saved_prodidx = 0; 1067 CSR_WRITE_4(sc, TI_MB_SENDPROD_IDX, 0); 1068 1069 SLIST_INIT(&sc->ti_tx_map_listhead); 1070 for (i = 0; i < TI_TX_RING_CNT; i++) { 1071 if (bus_dmamap_create(sc->sc_dmatag, TI_JUMBO_FRAMELEN, 1072 TI_NTXSEG, MCLBYTES, 0, BUS_DMA_NOWAIT, &dmamap)) 1073 return (ENOBUFS); 1074 1075 entry = malloc(sizeof(*entry), M_DEVBUF, M_NOWAIT); 1076 if (!entry) { 1077 bus_dmamap_destroy(sc->sc_dmatag, dmamap); 1078 return (ENOBUFS); 1079 } 1080 entry->dmamap = dmamap; 1081 SLIST_INSERT_HEAD(&sc->ti_tx_map_listhead, entry, link); 1082 } 1083 1084 return (0); 1085 } 1086 1087 /* 1088 * The Tigon 2 firmware has a new way to add/delete multicast addresses, 1089 * but we have to support the old way too so that Tigon 1 cards will 1090 * work. 1091 */ 1092 void 1093 ti_add_mcast(struct ti_softc *sc, struct ether_addr *addr) 1094 { 1095 struct ti_cmd_desc cmd; 1096 u_int16_t *m; 1097 u_int32_t ext[2] = {0, 0}; 1098 1099 m = (u_int16_t *)&addr->ether_addr_octet[0]; 1100 1101 switch(sc->ti_hwrev) { 1102 case TI_HWREV_TIGON: 1103 CSR_WRITE_4(sc, TI_GCR_MAR0, htons(m[0])); 1104 CSR_WRITE_4(sc, TI_GCR_MAR1, (htons(m[1]) << 16) | htons(m[2])); 1105 TI_DO_CMD(TI_CMD_ADD_MCAST_ADDR, 0, 0); 1106 break; 1107 case TI_HWREV_TIGON_II: 1108 ext[0] = htons(m[0]); 1109 ext[1] = (htons(m[1]) << 16) | htons(m[2]); 1110 TI_DO_CMD_EXT(TI_CMD_EXT_ADD_MCAST, 0, 0, (caddr_t)&ext, 2); 1111 break; 1112 default: 1113 printf("%s: unknown hwrev\n", sc->sc_dv.dv_xname); 1114 break; 1115 } 1116 } 1117 1118 void 1119 ti_del_mcast(struct ti_softc *sc, struct ether_addr *addr) 1120 { 1121 struct ti_cmd_desc cmd; 1122 u_int16_t *m; 1123 u_int32_t ext[2] = {0, 0}; 1124 1125 m = (u_int16_t *)&addr->ether_addr_octet[0]; 1126 1127 switch(sc->ti_hwrev) { 1128 case TI_HWREV_TIGON: 1129 CSR_WRITE_4(sc, TI_GCR_MAR0, htons(m[0])); 1130 CSR_WRITE_4(sc, TI_GCR_MAR1, (htons(m[1]) << 16) | htons(m[2])); 1131 TI_DO_CMD(TI_CMD_DEL_MCAST_ADDR, 0, 0); 1132 break; 1133 case TI_HWREV_TIGON_II: 1134 ext[0] = htons(m[0]); 1135 ext[1] = (htons(m[1]) << 16) | htons(m[2]); 1136 TI_DO_CMD_EXT(TI_CMD_EXT_DEL_MCAST, 0, 0, (caddr_t)&ext, 2); 1137 break; 1138 default: 1139 printf("%s: unknown hwrev\n", sc->sc_dv.dv_xname); 1140 break; 1141 } 1142 } 1143 1144 /* 1145 * Configure the Tigon's multicast address filter. 1146 * 1147 * The actual multicast table management is a bit of a pain, thanks to 1148 * slight brain damage on the part of both Alteon and us. With our 1149 * multicast code, we are only alerted when the multicast address table 1150 * changes and at that point we only have the current list of addresses: 1151 * we only know the current state, not the previous state, so we don't 1152 * actually know what addresses were removed or added. The firmware has 1153 * state, but we can't get our grubby mits on it, and there is no 'delete 1154 * all multicast addresses' command. Hence, we have to maintain our own 1155 * state so we know what addresses have been programmed into the NIC at 1156 * any given time. 1157 */ 1158 void 1159 ti_setmulti(struct ti_softc *sc) 1160 { 1161 struct ifnet *ifp; 1162 struct arpcom *ac = &sc->arpcom; 1163 struct ether_multi *enm; 1164 struct ether_multistep step; 1165 struct ti_cmd_desc cmd; 1166 struct ti_mc_entry *mc; 1167 u_int32_t intrs; 1168 1169 ifp = &sc->arpcom.ac_if; 1170 1171 allmulti: 1172 if (ifp->if_flags & IFF_ALLMULTI) { 1173 TI_DO_CMD(TI_CMD_SET_ALLMULTI, TI_CMD_CODE_ALLMULTI_ENB, 0); 1174 return; 1175 } else { 1176 TI_DO_CMD(TI_CMD_SET_ALLMULTI, TI_CMD_CODE_ALLMULTI_DIS, 0); 1177 } 1178 1179 /* Disable interrupts. */ 1180 intrs = CSR_READ_4(sc, TI_MB_HOSTINTR); 1181 CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1); 1182 1183 /* First, zot all the existing filters. */ 1184 while (SLIST_FIRST(&sc->ti_mc_listhead) != NULL) { 1185 mc = SLIST_FIRST(&sc->ti_mc_listhead); 1186 ti_del_mcast(sc, &mc->mc_addr); 1187 SLIST_REMOVE_HEAD(&sc->ti_mc_listhead, mc_entries); 1188 free(mc, M_DEVBUF); 1189 } 1190 1191 /* Now program new ones. */ 1192 ETHER_FIRST_MULTI(step, ac, enm); 1193 while (enm != NULL) { 1194 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1195 /* Re-enable interrupts. */ 1196 CSR_WRITE_4(sc, TI_MB_HOSTINTR, intrs); 1197 1198 ifp->if_flags |= IFF_ALLMULTI; 1199 goto allmulti; 1200 } 1201 mc = malloc(sizeof(struct ti_mc_entry), M_DEVBUF, M_NOWAIT); 1202 if (mc == NULL) 1203 panic("ti_setmulti"); 1204 bcopy(enm->enm_addrlo, (char *)&mc->mc_addr, ETHER_ADDR_LEN); 1205 SLIST_INSERT_HEAD(&sc->ti_mc_listhead, mc, mc_entries); 1206 ti_add_mcast(sc, &mc->mc_addr); 1207 ETHER_NEXT_MULTI(step, enm); 1208 } 1209 1210 /* Re-enable interrupts. */ 1211 CSR_WRITE_4(sc, TI_MB_HOSTINTR, intrs); 1212 } 1213 1214 /* 1215 * Check to see if the BIOS has configured us for a 64 bit slot when 1216 * we aren't actually in one. If we detect this condition, we can work 1217 * around it on the Tigon 2 by setting a bit in the PCI state register, 1218 * but for the Tigon 1 we must give up and abort the interface attach. 1219 */ 1220 int 1221 ti_64bitslot_war(struct ti_softc *sc) 1222 { 1223 if (!(CSR_READ_4(sc, TI_PCI_STATE) & TI_PCISTATE_32BIT_BUS)) { 1224 CSR_WRITE_4(sc, 0x600, 0); 1225 CSR_WRITE_4(sc, 0x604, 0); 1226 CSR_WRITE_4(sc, 0x600, 0x5555AAAA); 1227 if (CSR_READ_4(sc, 0x604) == 0x5555AAAA) { 1228 if (sc->ti_hwrev == TI_HWREV_TIGON) 1229 return (EINVAL); 1230 else { 1231 TI_SETBIT(sc, TI_PCI_STATE, 1232 TI_PCISTATE_32BIT_BUS); 1233 return (0); 1234 } 1235 } 1236 } 1237 1238 return (0); 1239 } 1240 1241 /* 1242 * Do endian, PCI and DMA initialization. Also check the on-board ROM 1243 * self-test results. 1244 */ 1245 int 1246 ti_chipinit(struct ti_softc *sc) 1247 { 1248 u_int32_t chip_rev; 1249 1250 /* Initialize link to down state. */ 1251 sc->ti_linkstat = TI_EV_CODE_LINK_DOWN; 1252 1253 /* Set endianness before we access any non-PCI registers. */ 1254 CSR_WRITE_4(sc, TI_MISC_HOST_CTL, 1255 TI_MHC_LITTLEENDIAN_INIT | (TI_MHC_LITTLEENDIAN_INIT << 24)); 1256 1257 /* Check the ROM failed bit to see if self-tests passed. */ 1258 if (CSR_READ_4(sc, TI_CPU_STATE) & TI_CPUSTATE_ROMFAIL) { 1259 printf("%s: board self-diagnostics failed!\n", 1260 sc->sc_dv.dv_xname); 1261 return (ENODEV); 1262 } 1263 1264 /* Halt the CPU. */ 1265 TI_SETBIT(sc, TI_CPU_STATE, TI_CPUSTATE_HALT); 1266 1267 /* Figure out the hardware revision. */ 1268 chip_rev = CSR_READ_4(sc, TI_MISC_HOST_CTL) & TI_MHC_CHIP_REV_MASK; 1269 switch(chip_rev) { 1270 case TI_REV_TIGON_I: 1271 sc->ti_hwrev = TI_HWREV_TIGON; 1272 break; 1273 case TI_REV_TIGON_II: 1274 sc->ti_hwrev = TI_HWREV_TIGON_II; 1275 break; 1276 default: 1277 printf("\n"); 1278 printf("%s: unsupported chip revision: %x\n", 1279 sc->sc_dv.dv_xname, chip_rev); 1280 return (ENODEV); 1281 } 1282 1283 /* Do special setup for Tigon 2. */ 1284 if (sc->ti_hwrev == TI_HWREV_TIGON_II) { 1285 TI_SETBIT(sc, TI_CPU_CTL_B, TI_CPUSTATE_HALT); 1286 TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_SRAM_BANK_512K); 1287 TI_SETBIT(sc, TI_MISC_CONF, TI_MCR_SRAM_SYNCHRONOUS); 1288 } 1289 1290 if (sc->ti_sbus) 1291 ti_chipinit_sbus(sc); 1292 else 1293 ti_chipinit_pci(sc); 1294 1295 /* Recommended settings from Tigon manual. */ 1296 CSR_WRITE_4(sc, TI_GCR_DMA_WRITECFG, TI_DMA_STATE_THRESH_8W); 1297 CSR_WRITE_4(sc, TI_GCR_DMA_READCFG, TI_DMA_STATE_THRESH_8W); 1298 1299 if (ti_64bitslot_war(sc)) { 1300 printf("%s: bios thinks we're in a 64 bit slot, " 1301 "but we aren't", sc->sc_dv.dv_xname); 1302 return (EINVAL); 1303 } 1304 1305 return (0); 1306 } 1307 1308 void 1309 ti_chipinit_pci(struct ti_softc *sc) 1310 { 1311 u_int32_t cacheline; 1312 u_int32_t pci_writemax = 0; 1313 1314 /* Set up the PCI state register. */ 1315 CSR_WRITE_4(sc, TI_PCI_STATE, TI_PCI_READ_CMD | TI_PCI_WRITE_CMD); 1316 if (sc->ti_hwrev == TI_HWREV_TIGON_II) 1317 TI_SETBIT(sc, TI_PCI_STATE, TI_PCISTATE_USE_MEM_RD_MULT); 1318 1319 /* Clear the read/write max DMA parameters. */ 1320 TI_CLRBIT(sc, TI_PCI_STATE, (TI_PCISTATE_WRITE_MAXDMA| 1321 TI_PCISTATE_READ_MAXDMA)); 1322 1323 /* Get cache line size. */ 1324 cacheline = CSR_READ_4(sc, TI_PCI_BIST) & 0xFF; 1325 1326 /* 1327 * If the system has set enabled the PCI memory write 1328 * and invalidate command in the command register, set 1329 * the write max parameter accordingly. This is necessary 1330 * to use MWI with the Tigon 2. 1331 */ 1332 if (CSR_READ_4(sc, TI_PCI_CMDSTAT) & PCI_COMMAND_INVALIDATE_ENABLE) { 1333 switch(cacheline) { 1334 case 1: 1335 case 4: 1336 case 8: 1337 case 16: 1338 case 32: 1339 case 64: 1340 break; 1341 default: 1342 /* Disable PCI memory write and invalidate. */ 1343 CSR_WRITE_4(sc, TI_PCI_CMDSTAT, CSR_READ_4(sc, 1344 TI_PCI_CMDSTAT) & ~PCI_COMMAND_INVALIDATE_ENABLE); 1345 break; 1346 } 1347 } 1348 1349 #ifdef __brokenalpha__ 1350 /* 1351 * From the Alteon sample driver: 1352 * Must insure that we do not cross an 8K (bytes) boundary 1353 * for DMA reads. Our highest limit is 1K bytes. This is a 1354 * restriction on some ALPHA platforms with early revision 1355 * 21174 PCI chipsets, such as the AlphaPC 164lx 1356 */ 1357 TI_SETBIT(sc, TI_PCI_STATE, pci_writemax|TI_PCI_READMAX_1024); 1358 #else 1359 TI_SETBIT(sc, TI_PCI_STATE, pci_writemax); 1360 #endif 1361 1362 /* This sets the min dma param all the way up (0xff). */ 1363 TI_SETBIT(sc, TI_PCI_STATE, TI_PCISTATE_MINDMA); 1364 1365 /* Configure DMA variables. */ 1366 CSR_WRITE_4(sc, TI_GCR_OPMODE, TI_DMA_SWAP_OPTIONS | 1367 TI_OPMODE_WARN_ENB | TI_OPMODE_FATAL_ENB | 1368 TI_OPMODE_DONT_FRAG_JUMBO); 1369 } 1370 1371 void 1372 ti_chipinit_sbus(struct ti_softc *sc) 1373 { 1374 /* Set up the PCI state register. */ 1375 CSR_WRITE_4(sc, TI_PCI_STATE, TI_PCI_READ_CMD | TI_PCI_WRITE_CMD | 1376 TI_PCISTATE_NO_SWAP_READ_DMA | TI_PCISTATE_NO_SWAP_WRITE_DMA | 1377 TI_PCI_WRITEMAX_64 | TI_PCI_READMAX_64 | 1378 TI_PCISTATE_PROVIDE_LEN); 1379 1380 /* Configure DMA variables. */ 1381 CSR_WRITE_4(sc, TI_GCR_OPMODE, TI_OPMODE_WORDSWAP_BD | 1382 TI_OPMODE_1_DMA_ACTIVE | TI_OPMODE_SBUS | 1383 TI_OPMODE_WARN_ENB | TI_OPMODE_FATAL_ENB | 1384 TI_OPMODE_DONT_FRAG_JUMBO); 1385 } 1386 1387 /* 1388 * Initialize the general information block and firmware, and 1389 * start the CPU(s) running. 1390 */ 1391 int 1392 ti_gibinit(struct ti_softc *sc) 1393 { 1394 struct ti_rcb *rcb; 1395 int i; 1396 struct ifnet *ifp; 1397 1398 ifp = &sc->arpcom.ac_if; 1399 1400 /* Disable interrupts for now. */ 1401 CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1); 1402 1403 /* 1404 * Tell the chip where to find the general information block. 1405 * While this struct could go into >4GB memory, we allocate it in a 1406 * single slab with the other descriptors, and those don't seem to 1407 * support being located in a 64-bit region. 1408 */ 1409 CSR_WRITE_4(sc, TI_GCR_GENINFO_HI, 0); 1410 CSR_WRITE_4(sc, TI_GCR_GENINFO_LO, 1411 TI_RING_DMA_ADDR(sc, ti_info) & 0xffffffff); 1412 1413 /* Load the firmware into SRAM. */ 1414 ti_loadfw(sc); 1415 1416 /* Set up the contents of the general info and ring control blocks. */ 1417 1418 /* Set up the event ring and producer pointer. */ 1419 rcb = &sc->ti_rdata->ti_info.ti_ev_rcb; 1420 1421 TI_HOSTADDR(rcb->ti_hostaddr) = TI_RING_DMA_ADDR(sc, ti_event_ring); 1422 rcb->ti_flags = 0; 1423 TI_HOSTADDR(sc->ti_rdata->ti_info.ti_ev_prodidx_ptr) = 1424 TI_RING_DMA_ADDR(sc, ti_ev_prodidx_r); 1425 sc->ti_ev_prodidx.ti_idx = 0; 1426 CSR_WRITE_4(sc, TI_GCR_EVENTCONS_IDX, 0); 1427 sc->ti_ev_saved_considx = 0; 1428 1429 /* Set up the command ring and producer mailbox. */ 1430 rcb = &sc->ti_rdata->ti_info.ti_cmd_rcb; 1431 1432 TI_HOSTADDR(rcb->ti_hostaddr) = TI_GCR_NIC_ADDR(TI_GCR_CMDRING); 1433 rcb->ti_flags = 0; 1434 rcb->ti_max_len = 0; 1435 for (i = 0; i < TI_CMD_RING_CNT; i++) { 1436 CSR_WRITE_4(sc, TI_GCR_CMDRING + (i * 4), 0); 1437 } 1438 CSR_WRITE_4(sc, TI_GCR_CMDCONS_IDX, 0); 1439 CSR_WRITE_4(sc, TI_MB_CMDPROD_IDX, 0); 1440 sc->ti_cmd_saved_prodidx = 0; 1441 1442 /* 1443 * Assign the address of the stats refresh buffer. 1444 * We re-use the current stats buffer for this to 1445 * conserve memory. 1446 */ 1447 TI_HOSTADDR(sc->ti_rdata->ti_info.ti_refresh_stats_ptr) = 1448 TI_RING_DMA_ADDR(sc, ti_info.ti_stats); 1449 1450 /* Set up the standard receive ring. */ 1451 rcb = &sc->ti_rdata->ti_info.ti_std_rx_rcb; 1452 TI_HOSTADDR(rcb->ti_hostaddr) = 1453 TI_RING_DMA_ADDR(sc, ti_rx_std_ring); 1454 rcb->ti_max_len = ETHER_MAX_LEN; 1455 rcb->ti_flags = 0; 1456 rcb->ti_flags |= TI_RCB_FLAG_IP_CKSUM | TI_RCB_FLAG_NO_PHDR_CKSUM; 1457 #if NVLAN > 0 1458 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) 1459 rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST; 1460 #endif 1461 1462 /* Set up the jumbo receive ring. */ 1463 rcb = &sc->ti_rdata->ti_info.ti_jumbo_rx_rcb; 1464 TI_HOSTADDR(rcb->ti_hostaddr) = TI_RING_DMA_ADDR(sc, ti_rx_jumbo_ring); 1465 rcb->ti_max_len = TI_JUMBO_FRAMELEN; 1466 rcb->ti_flags = 0; 1467 rcb->ti_flags |= TI_RCB_FLAG_IP_CKSUM | TI_RCB_FLAG_NO_PHDR_CKSUM; 1468 #if NVLAN > 0 1469 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) 1470 rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST; 1471 #endif 1472 1473 /* 1474 * Set up the mini ring. Only activated on the 1475 * Tigon 2 but the slot in the config block is 1476 * still there on the Tigon 1. 1477 */ 1478 rcb = &sc->ti_rdata->ti_info.ti_mini_rx_rcb; 1479 TI_HOSTADDR(rcb->ti_hostaddr) = TI_RING_DMA_ADDR(sc, ti_rx_mini_ring); 1480 rcb->ti_max_len = MHLEN - ETHER_ALIGN; 1481 if (sc->ti_hwrev == TI_HWREV_TIGON) 1482 rcb->ti_flags = TI_RCB_FLAG_RING_DISABLED; 1483 else 1484 rcb->ti_flags = 0; 1485 rcb->ti_flags |= TI_RCB_FLAG_IP_CKSUM | TI_RCB_FLAG_NO_PHDR_CKSUM; 1486 #if NVLAN > 0 1487 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) 1488 rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST; 1489 #endif 1490 1491 /* 1492 * Set up the receive return ring. 1493 */ 1494 rcb = &sc->ti_rdata->ti_info.ti_return_rcb; 1495 TI_HOSTADDR(rcb->ti_hostaddr) = TI_RING_DMA_ADDR(sc,ti_rx_return_ring); 1496 rcb->ti_flags = 0; 1497 rcb->ti_max_len = TI_RETURN_RING_CNT; 1498 TI_HOSTADDR(sc->ti_rdata->ti_info.ti_return_prodidx_ptr) = 1499 TI_RING_DMA_ADDR(sc, ti_return_prodidx_r); 1500 1501 /* 1502 * Set up the tx ring. Note: for the Tigon 2, we have the option 1503 * of putting the transmit ring in the host's address space and 1504 * letting the chip DMA it instead of leaving the ring in the NIC's 1505 * memory and accessing it through the shared memory region. We 1506 * do this for the Tigon 2, but it doesn't work on the Tigon 1, 1507 * so we have to revert to the shared memory scheme if we detect 1508 * a Tigon 1 chip. 1509 */ 1510 CSR_WRITE_4(sc, TI_WINBASE, TI_TX_RING_BASE); 1511 bzero((char *)sc->ti_rdata->ti_tx_ring, 1512 TI_TX_RING_CNT * sizeof(struct ti_tx_desc)); 1513 rcb = &sc->ti_rdata->ti_info.ti_tx_rcb; 1514 if (sc->ti_hwrev == TI_HWREV_TIGON) 1515 rcb->ti_flags = 0; 1516 else 1517 rcb->ti_flags = TI_RCB_FLAG_HOST_RING; 1518 rcb->ti_flags |= TI_RCB_FLAG_IP_CKSUM | TI_RCB_FLAG_NO_PHDR_CKSUM; 1519 #if NVLAN > 0 1520 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) 1521 rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST; 1522 #endif 1523 rcb->ti_max_len = TI_TX_RING_CNT; 1524 if (sc->ti_hwrev == TI_HWREV_TIGON) 1525 TI_HOSTADDR(rcb->ti_hostaddr) = TI_TX_RING_BASE; 1526 else 1527 TI_HOSTADDR(rcb->ti_hostaddr) = 1528 TI_RING_DMA_ADDR(sc, ti_tx_ring); 1529 TI_HOSTADDR(sc->ti_rdata->ti_info.ti_tx_considx_ptr) = 1530 TI_RING_DMA_ADDR(sc, ti_tx_considx_r); 1531 1532 TI_RING_DMASYNC(sc, ti_info, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1533 1534 /* Set up tuneables */ 1535 CSR_WRITE_4(sc, TI_GCR_RX_COAL_TICKS, (sc->ti_rx_coal_ticks / 10)); 1536 CSR_WRITE_4(sc, TI_GCR_TX_COAL_TICKS, sc->ti_tx_coal_ticks); 1537 CSR_WRITE_4(sc, TI_GCR_STAT_TICKS, sc->ti_stat_ticks); 1538 CSR_WRITE_4(sc, TI_GCR_RX_MAX_COAL_BD, sc->ti_rx_max_coal_bds); 1539 CSR_WRITE_4(sc, TI_GCR_TX_MAX_COAL_BD, sc->ti_tx_max_coal_bds); 1540 CSR_WRITE_4(sc, TI_GCR_TX_BUFFER_RATIO, sc->ti_tx_buf_ratio); 1541 1542 /* Turn interrupts on. */ 1543 CSR_WRITE_4(sc, TI_GCR_MASK_INTRS, 0); 1544 CSR_WRITE_4(sc, TI_MB_HOSTINTR, 0); 1545 1546 /* Start CPU. */ 1547 TI_CLRBIT(sc, TI_CPU_STATE, (TI_CPUSTATE_HALT|TI_CPUSTATE_STEP)); 1548 1549 return (0); 1550 } 1551 1552 int 1553 ti_attach(struct ti_softc *sc) 1554 { 1555 bus_dma_segment_t seg; 1556 int rseg; 1557 struct ifnet *ifp; 1558 caddr_t kva; 1559 1560 if (ti_chipinit(sc)) { 1561 printf("%s: chip initialization failed\n", sc->sc_dv.dv_xname); 1562 return (1); 1563 } 1564 1565 /* Zero out the NIC's on-board SRAM. */ 1566 ti_mem_set(sc, 0x2000, 0x100000 - 0x2000); 1567 1568 /* Init again -- zeroing memory may have clobbered some registers. */ 1569 if (ti_chipinit(sc)) { 1570 printf("%s: chip initialization failed\n", sc->sc_dv.dv_xname); 1571 return (1); 1572 } 1573 1574 /* 1575 * Get station address from the EEPROM. Note: the manual states 1576 * that the MAC address is at offset 0x8c, however the data is 1577 * stored as two longwords (since that's how it's loaded into 1578 * the NIC). This means the MAC address is actually preceded 1579 * by two zero bytes. We need to skip over those. 1580 */ 1581 if (ti_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr, 1582 TI_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) { 1583 printf("%s: failed to read station address\n", 1584 sc->sc_dv.dv_xname); 1585 return (1); 1586 } 1587 1588 /* 1589 * A Tigon chip was detected. Inform the world. 1590 */ 1591 printf(", address %s\n", ether_sprintf(sc->arpcom.ac_enaddr)); 1592 1593 /* Allocate the general information block and ring buffers. */ 1594 if (bus_dmamem_alloc(sc->sc_dmatag, sizeof(struct ti_ring_data), 1595 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 1596 printf("%s: can't alloc rx buffers\n", sc->sc_dv.dv_xname); 1597 return (1); 1598 } 1599 if (bus_dmamem_map(sc->sc_dmatag, &seg, rseg, 1600 sizeof(struct ti_ring_data), &kva, BUS_DMA_NOWAIT)) { 1601 printf("%s: can't map dma buffers (%d bytes)\n", 1602 sc->sc_dv.dv_xname, sizeof(struct ti_ring_data)); 1603 goto fail_1; 1604 } 1605 if (bus_dmamap_create(sc->sc_dmatag, sizeof(struct ti_ring_data), 1, 1606 sizeof(struct ti_ring_data), 0, BUS_DMA_NOWAIT, 1607 &sc->ti_ring_map)) { 1608 printf("%s: can't create dma map\n", sc->sc_dv.dv_xname); 1609 goto fail_2; 1610 } 1611 if (bus_dmamap_load(sc->sc_dmatag, sc->ti_ring_map, kva, 1612 sizeof(struct ti_ring_data), NULL, BUS_DMA_NOWAIT)) { 1613 goto fail_3; 1614 } 1615 sc->ti_rdata = (struct ti_ring_data *)kva; 1616 bzero(sc->ti_rdata, sizeof(struct ti_ring_data)); 1617 1618 /* Try to allocate memory for jumbo buffers. */ 1619 if (ti_alloc_jumbo_mem(sc)) { 1620 printf("%s: jumbo buffer allocation failed\n", 1621 sc->sc_dv.dv_xname); 1622 goto fail_3; 1623 } 1624 1625 /* Set default tuneable values. */ 1626 sc->ti_stat_ticks = 2 * TI_TICKS_PER_SEC; 1627 sc->ti_rx_coal_ticks = TI_TICKS_PER_SEC / 5000; 1628 sc->ti_tx_coal_ticks = TI_TICKS_PER_SEC / 500; 1629 sc->ti_rx_max_coal_bds = 64; 1630 sc->ti_tx_max_coal_bds = 128; 1631 sc->ti_tx_buf_ratio = 21; 1632 1633 /* Set up ifnet structure */ 1634 ifp = &sc->arpcom.ac_if; 1635 ifp->if_softc = sc; 1636 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1637 ifp->if_ioctl = ti_ioctl; 1638 ifp->if_start = ti_start; 1639 ifp->if_watchdog = ti_watchdog; 1640 ifp->if_hardmtu = TI_JUMBO_FRAMELEN - ETHER_HDR_LEN; 1641 IFQ_SET_MAXLEN(&ifp->if_snd, TI_TX_RING_CNT - 1); 1642 IFQ_SET_READY(&ifp->if_snd); 1643 bcopy(sc->sc_dv.dv_xname, ifp->if_xname, IFNAMSIZ); 1644 1645 ifp->if_capabilities = IFCAP_VLAN_MTU; 1646 1647 #if NVLAN > 0 1648 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 1649 #endif 1650 1651 /* Set up ifmedia support. */ 1652 ifmedia_init(&sc->ifmedia, IFM_IMASK, ti_ifmedia_upd, ti_ifmedia_sts); 1653 if (sc->ti_copper) { 1654 /* 1655 * Copper cards allow manual 10/100 mode selection, 1656 * but not manual 1000baseTX mode selection. Why? 1657 * Because currently there's no way to specify the 1658 * master/slave setting through the firmware interface, 1659 * so Alteon decided to just bag it and handle it 1660 * via autonegotiation. 1661 */ 1662 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL); 1663 ifmedia_add(&sc->ifmedia, 1664 IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL); 1665 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_TX, 0, NULL); 1666 ifmedia_add(&sc->ifmedia, 1667 IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL); 1668 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_1000_T, 0, NULL); 1669 ifmedia_add(&sc->ifmedia, 1670 IFM_ETHER|IFM_1000_T|IFM_FDX, 0, NULL); 1671 } else { 1672 /* Fiber cards don't support 10/100 modes. */ 1673 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL); 1674 ifmedia_add(&sc->ifmedia, 1675 IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL); 1676 } 1677 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); 1678 ifmedia_set(&sc->ifmedia, IFM_ETHER|IFM_AUTO); 1679 1680 /* 1681 * Call MI attach routines. 1682 */ 1683 if_attach(ifp); 1684 ether_ifattach(ifp); 1685 1686 shutdownhook_establish(ti_shutdown, sc); 1687 return (0); 1688 1689 fail_3: 1690 bus_dmamap_destroy(sc->sc_dmatag, sc->ti_ring_map); 1691 1692 fail_2: 1693 bus_dmamem_unmap(sc->sc_dmatag, kva, 1694 sizeof(struct ti_ring_data)); 1695 1696 fail_1: 1697 bus_dmamem_free(sc->sc_dmatag, &seg, rseg); 1698 1699 return (1); 1700 } 1701 1702 /* 1703 * Frame reception handling. This is called if there's a frame 1704 * on the receive return list. 1705 * 1706 * Note: we have to be able to handle three possibilities here: 1707 * 1) the frame is from the mini receive ring (can only happen) 1708 * on Tigon 2 boards) 1709 * 2) the frame is from the jumbo receive ring 1710 * 3) the frame is from the standard receive ring 1711 */ 1712 1713 void 1714 ti_rxeof(struct ti_softc *sc) 1715 { 1716 struct ifnet *ifp; 1717 struct ti_cmd_desc cmd; 1718 1719 ifp = &sc->arpcom.ac_if; 1720 1721 while(sc->ti_rx_saved_considx != sc->ti_return_prodidx.ti_idx) { 1722 struct ti_rx_desc *cur_rx; 1723 u_int32_t rxidx; 1724 struct mbuf *m = NULL; 1725 bus_dmamap_t dmamap; 1726 1727 cur_rx = 1728 &sc->ti_rdata->ti_rx_return_ring[sc->ti_rx_saved_considx]; 1729 rxidx = cur_rx->ti_idx; 1730 TI_INC(sc->ti_rx_saved_considx, TI_RETURN_RING_CNT); 1731 1732 if (cur_rx->ti_flags & TI_BDFLAG_JUMBO_RING) { 1733 TI_INC(sc->ti_jumbo, TI_JUMBO_RX_RING_CNT); 1734 m = sc->ti_cdata.ti_rx_jumbo_chain[rxidx]; 1735 sc->ti_cdata.ti_rx_jumbo_chain[rxidx] = NULL; 1736 if (cur_rx->ti_flags & TI_BDFLAG_ERROR) { 1737 ifp->if_ierrors++; 1738 ti_newbuf_jumbo(sc, sc->ti_jumbo, m); 1739 continue; 1740 } 1741 if (ti_newbuf_jumbo(sc, sc->ti_jumbo, NULL) 1742 == ENOBUFS) { 1743 struct mbuf *m0; 1744 m0 = m_devget(mtod(m, char *), cur_rx->ti_len, 1745 ETHER_ALIGN, ifp, NULL); 1746 ti_newbuf_jumbo(sc, sc->ti_jumbo, m); 1747 if (m0 == NULL) { 1748 ifp->if_ierrors++; 1749 continue; 1750 } 1751 m = m0; 1752 } 1753 } else if (cur_rx->ti_flags & TI_BDFLAG_MINI_RING) { 1754 TI_INC(sc->ti_mini, TI_MINI_RX_RING_CNT); 1755 m = sc->ti_cdata.ti_rx_mini_chain[rxidx]; 1756 sc->ti_cdata.ti_rx_mini_chain[rxidx] = NULL; 1757 dmamap = sc->ti_cdata.ti_rx_mini_map[rxidx]; 1758 sc->ti_cdata.ti_rx_mini_map[rxidx] = 0; 1759 if (cur_rx->ti_flags & TI_BDFLAG_ERROR) { 1760 ifp->if_ierrors++; 1761 ti_newbuf_mini(sc, sc->ti_mini, m, dmamap); 1762 continue; 1763 } 1764 if (ti_newbuf_mini(sc, sc->ti_mini, NULL, dmamap) 1765 == ENOBUFS) { 1766 ifp->if_ierrors++; 1767 ti_newbuf_mini(sc, sc->ti_mini, m, dmamap); 1768 continue; 1769 } 1770 } else { 1771 TI_INC(sc->ti_std, TI_STD_RX_RING_CNT); 1772 m = sc->ti_cdata.ti_rx_std_chain[rxidx]; 1773 sc->ti_cdata.ti_rx_std_chain[rxidx] = NULL; 1774 dmamap = sc->ti_cdata.ti_rx_std_map[rxidx]; 1775 sc->ti_cdata.ti_rx_std_map[rxidx] = 0; 1776 if (cur_rx->ti_flags & TI_BDFLAG_ERROR) { 1777 ifp->if_ierrors++; 1778 ti_newbuf_std(sc, sc->ti_std, m, dmamap); 1779 continue; 1780 } 1781 if (ti_newbuf_std(sc, sc->ti_std, NULL, dmamap) 1782 == ENOBUFS) { 1783 ifp->if_ierrors++; 1784 ti_newbuf_std(sc, sc->ti_std, m, dmamap); 1785 continue; 1786 } 1787 } 1788 1789 if (m == NULL) 1790 panic("%s: couldn't get mbuf", sc->sc_dv.dv_xname); 1791 1792 m->m_pkthdr.len = m->m_len = cur_rx->ti_len; 1793 ifp->if_ipackets++; 1794 m->m_pkthdr.rcvif = ifp; 1795 1796 #if NVLAN > 0 1797 if (cur_rx->ti_flags & TI_BDFLAG_VLAN_TAG) { 1798 m->m_pkthdr.ether_vtag = cur_rx->ti_vlan_tag; 1799 m->m_flags |= M_VLANTAG; 1800 } 1801 #endif 1802 1803 #if NBPFILTER > 0 1804 /* 1805 * Handle BPF listeners. Let the BPF user see the packet. 1806 */ 1807 if (ifp->if_bpf) 1808 bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_IN); 1809 #endif 1810 1811 if ((cur_rx->ti_ip_cksum ^ 0xffff) == 0) 1812 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 1813 1814 ether_input_mbuf(ifp, m); 1815 } 1816 1817 /* Only necessary on the Tigon 1. */ 1818 if (sc->ti_hwrev == TI_HWREV_TIGON) 1819 CSR_WRITE_4(sc, TI_GCR_RXRETURNCONS_IDX, 1820 sc->ti_rx_saved_considx); 1821 1822 TI_UPDATE_STDPROD(sc, sc->ti_std); 1823 TI_UPDATE_MINIPROD(sc, sc->ti_mini); 1824 TI_UPDATE_JUMBOPROD(sc, sc->ti_jumbo); 1825 } 1826 1827 void 1828 ti_txeof_tigon1(struct ti_softc *sc) 1829 { 1830 struct ifnet *ifp; 1831 struct ti_txmap_entry *entry; 1832 int active = 1; 1833 1834 ifp = &sc->arpcom.ac_if; 1835 1836 /* 1837 * Go through our tx ring and free mbufs for those 1838 * frames that have been sent. 1839 */ 1840 while (sc->ti_tx_saved_considx != sc->ti_tx_considx.ti_idx) { 1841 u_int32_t idx = 0; 1842 struct ti_tx_desc txdesc; 1843 1844 idx = sc->ti_tx_saved_considx; 1845 ti_mem_read(sc, TI_TX_RING_BASE + idx * sizeof(txdesc), 1846 sizeof(txdesc), (caddr_t)&txdesc); 1847 1848 if (txdesc.ti_flags & TI_BDFLAG_END) 1849 ifp->if_opackets++; 1850 1851 if (sc->ti_cdata.ti_tx_chain[idx] != NULL) { 1852 m_freem(sc->ti_cdata.ti_tx_chain[idx]); 1853 sc->ti_cdata.ti_tx_chain[idx] = NULL; 1854 1855 entry = sc->ti_cdata.ti_tx_map[idx]; 1856 bus_dmamap_sync(sc->sc_dmatag, entry->dmamap, 0, 1857 entry->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1858 1859 bus_dmamap_unload(sc->sc_dmatag, entry->dmamap); 1860 SLIST_INSERT_HEAD(&sc->ti_tx_map_listhead, entry, 1861 link); 1862 sc->ti_cdata.ti_tx_map[idx] = NULL; 1863 1864 } 1865 sc->ti_txcnt--; 1866 TI_INC(sc->ti_tx_saved_considx, TI_TX_RING_CNT); 1867 ifp->if_timer = 0; 1868 1869 active = 0; 1870 } 1871 1872 if (!active) 1873 ifp->if_flags &= ~IFF_OACTIVE; 1874 } 1875 1876 void 1877 ti_txeof_tigon2(struct ti_softc *sc) 1878 { 1879 struct ti_tx_desc *cur_tx = NULL; 1880 struct ifnet *ifp; 1881 struct ti_txmap_entry *entry; 1882 1883 ifp = &sc->arpcom.ac_if; 1884 1885 /* 1886 * Go through our tx ring and free mbufs for those 1887 * frames that have been sent. 1888 */ 1889 while (sc->ti_tx_saved_considx != sc->ti_tx_considx.ti_idx) { 1890 u_int32_t idx = 0; 1891 1892 idx = sc->ti_tx_saved_considx; 1893 cur_tx = &sc->ti_rdata->ti_tx_ring[idx]; 1894 1895 if (cur_tx->ti_flags & TI_BDFLAG_END) 1896 ifp->if_opackets++; 1897 if (sc->ti_cdata.ti_tx_chain[idx] != NULL) { 1898 m_freem(sc->ti_cdata.ti_tx_chain[idx]); 1899 sc->ti_cdata.ti_tx_chain[idx] = NULL; 1900 1901 entry = sc->ti_cdata.ti_tx_map[idx]; 1902 bus_dmamap_sync(sc->sc_dmatag, entry->dmamap, 0, 1903 entry->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1904 1905 bus_dmamap_unload(sc->sc_dmatag, entry->dmamap); 1906 SLIST_INSERT_HEAD(&sc->ti_tx_map_listhead, entry, 1907 link); 1908 sc->ti_cdata.ti_tx_map[idx] = NULL; 1909 1910 } 1911 sc->ti_txcnt--; 1912 TI_INC(sc->ti_tx_saved_considx, TI_TX_RING_CNT); 1913 ifp->if_timer = 0; 1914 } 1915 1916 if (cur_tx != NULL) 1917 ifp->if_flags &= ~IFF_OACTIVE; 1918 } 1919 1920 int 1921 ti_intr(void *xsc) 1922 { 1923 struct ti_softc *sc; 1924 struct ifnet *ifp; 1925 1926 sc = xsc; 1927 ifp = &sc->arpcom.ac_if; 1928 1929 /* XXX checking this register is expensive. */ 1930 /* Make sure this is really our interrupt. */ 1931 if (!(CSR_READ_4(sc, TI_MISC_HOST_CTL) & TI_MHC_INTSTATE)) 1932 return (0); 1933 1934 /* Ack interrupt and stop others from occurring. */ 1935 CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1); 1936 1937 if (ifp->if_flags & IFF_RUNNING) { 1938 /* Check RX return ring producer/consumer */ 1939 ti_rxeof(sc); 1940 1941 /* Check TX ring producer/consumer */ 1942 if (sc->ti_hwrev == TI_HWREV_TIGON) 1943 ti_txeof_tigon1(sc); 1944 else 1945 ti_txeof_tigon2(sc); 1946 } 1947 1948 ti_handle_events(sc); 1949 1950 /* Re-enable interrupts. */ 1951 CSR_WRITE_4(sc, TI_MB_HOSTINTR, 0); 1952 1953 if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd)) 1954 ti_start(ifp); 1955 1956 return (1); 1957 } 1958 1959 void 1960 ti_stats_update(struct ti_softc *sc) 1961 { 1962 struct ifnet *ifp; 1963 struct ti_stats *stats = &sc->ti_rdata->ti_info.ti_stats; 1964 1965 ifp = &sc->arpcom.ac_if; 1966 1967 TI_RING_DMASYNC(sc, ti_info.ti_stats, BUS_DMASYNC_POSTREAD); 1968 1969 ifp->if_collisions += stats->dot3StatsSingleCollisionFrames + 1970 stats->dot3StatsMultipleCollisionFrames + 1971 stats->dot3StatsExcessiveCollisions + 1972 stats->dot3StatsLateCollisions - 1973 ifp->if_collisions; 1974 1975 TI_RING_DMASYNC(sc, ti_info.ti_stats, BUS_DMASYNC_PREREAD); 1976 } 1977 1978 /* 1979 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data 1980 * pointers to descriptors. 1981 */ 1982 int 1983 ti_encap_tigon1(struct ti_softc *sc, struct mbuf *m_head, u_int32_t *txidx) 1984 { 1985 u_int32_t frag, cur; 1986 struct ti_txmap_entry *entry; 1987 bus_dmamap_t txmap; 1988 struct ti_tx_desc txdesc; 1989 int i = 0; 1990 1991 entry = SLIST_FIRST(&sc->ti_tx_map_listhead); 1992 if (entry == NULL) 1993 return (ENOBUFS); 1994 txmap = entry->dmamap; 1995 1996 cur = frag = *txidx; 1997 1998 /* 1999 * Start packing the mbufs in this chain into 2000 * the fragment pointers. Stop when we run out 2001 * of fragments or hit the end of the mbuf chain. 2002 */ 2003 if (bus_dmamap_load_mbuf(sc->sc_dmatag, txmap, m_head, 2004 BUS_DMA_NOWAIT)) 2005 return (ENOBUFS); 2006 2007 /* 2008 * Sanity check: avoid coming within 16 descriptors 2009 * of the end of the ring. 2010 */ 2011 if (txmap->dm_nsegs > (TI_TX_RING_CNT - sc->ti_txcnt - 16)) 2012 goto fail_unload; 2013 2014 for (i = 0; i < txmap->dm_nsegs; i++) { 2015 if (sc->ti_cdata.ti_tx_chain[frag] != NULL) 2016 break; 2017 2018 memset(&txdesc, 0, sizeof(txdesc)); 2019 2020 TI_HOSTADDR(txdesc.ti_addr) = txmap->dm_segs[i].ds_addr; 2021 txdesc.ti_len = txmap->dm_segs[i].ds_len & 0xffff; 2022 txdesc.ti_flags = 0; 2023 txdesc.ti_vlan_tag = 0; 2024 2025 #if NVLAN > 0 2026 if (m_head->m_flags & M_VLANTAG) { 2027 txdesc.ti_flags |= TI_BDFLAG_VLAN_TAG; 2028 txdesc.ti_vlan_tag = m_head->m_pkthdr.ether_vtag; 2029 } 2030 #endif 2031 2032 ti_mem_write(sc, TI_TX_RING_BASE + frag * sizeof(txdesc), 2033 sizeof(txdesc), (caddr_t)&txdesc); 2034 2035 cur = frag; 2036 TI_INC(frag, TI_TX_RING_CNT); 2037 } 2038 2039 if (frag == sc->ti_tx_saved_considx) 2040 goto fail_unload; 2041 2042 txdesc.ti_flags |= TI_BDFLAG_END; 2043 ti_mem_write(sc, TI_TX_RING_BASE + cur * sizeof(txdesc), 2044 sizeof(txdesc), (caddr_t)&txdesc); 2045 2046 bus_dmamap_sync(sc->sc_dmatag, txmap, 0, txmap->dm_mapsize, 2047 BUS_DMASYNC_PREWRITE); 2048 2049 sc->ti_cdata.ti_tx_chain[cur] = m_head; 2050 SLIST_REMOVE_HEAD(&sc->ti_tx_map_listhead, link); 2051 sc->ti_cdata.ti_tx_map[cur] = entry; 2052 sc->ti_txcnt += txmap->dm_nsegs; 2053 2054 *txidx = frag; 2055 2056 return (0); 2057 2058 fail_unload: 2059 bus_dmamap_unload(sc->sc_dmatag, txmap); 2060 2061 return (ENOBUFS); 2062 } 2063 2064 /* 2065 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data 2066 * pointers to descriptors. 2067 */ 2068 int 2069 ti_encap_tigon2(struct ti_softc *sc, struct mbuf *m_head, u_int32_t *txidx) 2070 { 2071 struct ti_tx_desc *f = NULL; 2072 u_int32_t frag, cur; 2073 struct ti_txmap_entry *entry; 2074 bus_dmamap_t txmap; 2075 int i = 0; 2076 2077 entry = SLIST_FIRST(&sc->ti_tx_map_listhead); 2078 if (entry == NULL) 2079 return (ENOBUFS); 2080 txmap = entry->dmamap; 2081 2082 cur = frag = *txidx; 2083 2084 /* 2085 * Start packing the mbufs in this chain into 2086 * the fragment pointers. Stop when we run out 2087 * of fragments or hit the end of the mbuf chain. 2088 */ 2089 if (bus_dmamap_load_mbuf(sc->sc_dmatag, txmap, m_head, 2090 BUS_DMA_NOWAIT)) 2091 return (ENOBUFS); 2092 2093 /* 2094 * Sanity check: avoid coming within 16 descriptors 2095 * of the end of the ring. 2096 */ 2097 if (txmap->dm_nsegs > (TI_TX_RING_CNT - sc->ti_txcnt - 16)) 2098 goto fail_unload; 2099 2100 for (i = 0; i < txmap->dm_nsegs; i++) { 2101 f = &sc->ti_rdata->ti_tx_ring[frag]; 2102 2103 if (sc->ti_cdata.ti_tx_chain[frag] != NULL) 2104 break; 2105 2106 TI_HOSTADDR(f->ti_addr) = txmap->dm_segs[i].ds_addr; 2107 f->ti_len = txmap->dm_segs[i].ds_len & 0xffff; 2108 f->ti_flags = 0; 2109 f->ti_vlan_tag = 0; 2110 2111 #if NVLAN > 0 2112 if (m_head->m_flags & M_VLANTAG) { 2113 f->ti_flags |= TI_BDFLAG_VLAN_TAG; 2114 f->ti_vlan_tag = m_head->m_pkthdr.ether_vtag; 2115 } 2116 #endif 2117 2118 cur = frag; 2119 TI_INC(frag, TI_TX_RING_CNT); 2120 } 2121 2122 if (frag == sc->ti_tx_saved_considx) 2123 goto fail_unload; 2124 2125 sc->ti_rdata->ti_tx_ring[cur].ti_flags |= TI_BDFLAG_END; 2126 2127 bus_dmamap_sync(sc->sc_dmatag, txmap, 0, txmap->dm_mapsize, 2128 BUS_DMASYNC_PREWRITE); 2129 2130 TI_RING_DMASYNC(sc, ti_tx_ring[cur], BUS_DMASYNC_POSTREAD); 2131 2132 sc->ti_cdata.ti_tx_chain[cur] = m_head; 2133 SLIST_REMOVE_HEAD(&sc->ti_tx_map_listhead, link); 2134 sc->ti_cdata.ti_tx_map[cur] = entry; 2135 sc->ti_txcnt += txmap->dm_nsegs; 2136 2137 *txidx = frag; 2138 2139 return (0); 2140 2141 fail_unload: 2142 bus_dmamap_unload(sc->sc_dmatag, txmap); 2143 2144 return (ENOBUFS); 2145 } 2146 2147 /* 2148 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 2149 * to the mbuf data regions directly in the transmit descriptors. 2150 */ 2151 void 2152 ti_start(struct ifnet *ifp) 2153 { 2154 struct ti_softc *sc; 2155 struct mbuf *m_head = NULL; 2156 u_int32_t prodidx; 2157 int pkts = 0, error; 2158 2159 sc = ifp->if_softc; 2160 2161 prodidx = sc->ti_tx_saved_prodidx; 2162 2163 while(sc->ti_cdata.ti_tx_chain[prodidx] == NULL) { 2164 IFQ_POLL(&ifp->if_snd, m_head); 2165 if (m_head == NULL) 2166 break; 2167 2168 /* 2169 * Pack the data into the transmit ring. If we 2170 * don't have room, set the OACTIVE flag and wait 2171 * for the NIC to drain the ring. 2172 */ 2173 if (sc->ti_hwrev == TI_HWREV_TIGON) 2174 error = ti_encap_tigon1(sc, m_head, &prodidx); 2175 else 2176 error = ti_encap_tigon2(sc, m_head, &prodidx); 2177 2178 if (error) { 2179 ifp->if_flags |= IFF_OACTIVE; 2180 break; 2181 } 2182 2183 /* now we are committed to transmit the packet */ 2184 IFQ_DEQUEUE(&ifp->if_snd, m_head); 2185 pkts++; 2186 2187 /* 2188 * If there's a BPF listener, bounce a copy of this frame 2189 * to him. 2190 */ 2191 #if NBPFILTER > 0 2192 if (ifp->if_bpf) 2193 bpf_mtap_ether(ifp->if_bpf, m_head, BPF_DIRECTION_OUT); 2194 #endif 2195 } 2196 if (pkts == 0) 2197 return; 2198 2199 /* Transmit */ 2200 sc->ti_tx_saved_prodidx = prodidx; 2201 CSR_WRITE_4(sc, TI_MB_SENDPROD_IDX, prodidx); 2202 2203 /* 2204 * Set a timeout in case the chip goes out to lunch. 2205 */ 2206 ifp->if_timer = 5; 2207 } 2208 2209 void 2210 ti_init(void *xsc) 2211 { 2212 struct ti_softc *sc = xsc; 2213 int s; 2214 2215 s = splnet(); 2216 2217 /* Cancel pending I/O and flush buffers. */ 2218 ti_stop(sc); 2219 2220 /* Init the gen info block, ring control blocks and firmware. */ 2221 if (ti_gibinit(sc)) { 2222 printf("%s: initialization failure\n", sc->sc_dv.dv_xname); 2223 splx(s); 2224 return; 2225 } 2226 2227 splx(s); 2228 } 2229 2230 void 2231 ti_init2(struct ti_softc *sc) 2232 { 2233 struct ti_cmd_desc cmd; 2234 struct ifnet *ifp; 2235 u_int16_t *m; 2236 struct ifmedia *ifm; 2237 int tmp; 2238 2239 ifp = &sc->arpcom.ac_if; 2240 2241 /* Specify MTU and interface index. */ 2242 CSR_WRITE_4(sc, TI_GCR_IFINDEX, sc->sc_dv.dv_unit); 2243 CSR_WRITE_4(sc, TI_GCR_IFMTU, 2244 TI_JUMBO_FRAMELEN + ETHER_VLAN_ENCAP_LEN); 2245 TI_DO_CMD(TI_CMD_UPDATE_GENCOM, 0, 0); 2246 2247 /* Load our MAC address. */ 2248 m = (u_int16_t *)&sc->arpcom.ac_enaddr[0]; 2249 CSR_WRITE_4(sc, TI_GCR_PAR0, htons(m[0])); 2250 CSR_WRITE_4(sc, TI_GCR_PAR1, (htons(m[1]) << 16) | htons(m[2])); 2251 TI_DO_CMD(TI_CMD_SET_MAC_ADDR, 0, 0); 2252 2253 /* Enable or disable promiscuous mode as needed. */ 2254 if (ifp->if_flags & IFF_PROMISC) { 2255 TI_DO_CMD(TI_CMD_SET_PROMISC_MODE, TI_CMD_CODE_PROMISC_ENB, 0); 2256 } else { 2257 TI_DO_CMD(TI_CMD_SET_PROMISC_MODE, TI_CMD_CODE_PROMISC_DIS, 0); 2258 } 2259 2260 /* Program multicast filter. */ 2261 ti_setmulti(sc); 2262 2263 /* 2264 * If this is a Tigon 1, we should tell the 2265 * firmware to use software packet filtering. 2266 */ 2267 if (sc->ti_hwrev == TI_HWREV_TIGON) { 2268 TI_DO_CMD(TI_CMD_FDR_FILTERING, TI_CMD_CODE_FILT_ENB, 0); 2269 } 2270 2271 /* Init RX ring. */ 2272 if (ti_init_rx_ring_std(sc) == ENOBUFS) 2273 panic("not enough mbufs for rx ring"); 2274 2275 /* Init jumbo RX ring. */ 2276 ti_init_rx_ring_jumbo(sc); 2277 2278 /* 2279 * If this is a Tigon 2, we can also configure the 2280 * mini ring. 2281 */ 2282 if (sc->ti_hwrev == TI_HWREV_TIGON_II) 2283 ti_init_rx_ring_mini(sc); 2284 2285 CSR_WRITE_4(sc, TI_GCR_RXRETURNCONS_IDX, 0); 2286 sc->ti_rx_saved_considx = 0; 2287 2288 /* Init TX ring. */ 2289 ti_init_tx_ring(sc); 2290 2291 /* Tell firmware we're alive. */ 2292 TI_DO_CMD(TI_CMD_HOST_STATE, TI_CMD_CODE_STACK_UP, 0); 2293 2294 /* Enable host interrupts. */ 2295 CSR_WRITE_4(sc, TI_MB_HOSTINTR, 0); 2296 2297 ifp->if_flags |= IFF_RUNNING; 2298 ifp->if_flags &= ~IFF_OACTIVE; 2299 2300 /* 2301 * Make sure to set media properly. We have to do this 2302 * here since we have to issue commands in order to set 2303 * the link negotiation and we can't issue commands until 2304 * the firmware is running. 2305 */ 2306 ifm = &sc->ifmedia; 2307 tmp = ifm->ifm_media; 2308 ifm->ifm_media = ifm->ifm_cur->ifm_media; 2309 ti_ifmedia_upd(ifp); 2310 ifm->ifm_media = tmp; 2311 } 2312 2313 /* 2314 * Set media options. 2315 */ 2316 int 2317 ti_ifmedia_upd(struct ifnet *ifp) 2318 { 2319 struct ti_softc *sc; 2320 struct ifmedia *ifm; 2321 struct ti_cmd_desc cmd; 2322 2323 sc = ifp->if_softc; 2324 ifm = &sc->ifmedia; 2325 2326 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 2327 return(EINVAL); 2328 2329 switch(IFM_SUBTYPE(ifm->ifm_media)) { 2330 case IFM_AUTO: 2331 CSR_WRITE_4(sc, TI_GCR_GLINK, TI_GLNK_PREF|TI_GLNK_1000MB| 2332 TI_GLNK_FULL_DUPLEX|TI_GLNK_RX_FLOWCTL_Y| 2333 TI_GLNK_AUTONEGENB|TI_GLNK_ENB); 2334 CSR_WRITE_4(sc, TI_GCR_LINK, TI_LNK_100MB|TI_LNK_10MB| 2335 TI_LNK_FULL_DUPLEX|TI_LNK_HALF_DUPLEX| 2336 TI_LNK_AUTONEGENB|TI_LNK_ENB); 2337 TI_DO_CMD(TI_CMD_LINK_NEGOTIATION, 2338 TI_CMD_CODE_NEGOTIATE_BOTH, 0); 2339 break; 2340 case IFM_1000_SX: 2341 case IFM_1000_T: 2342 CSR_WRITE_4(sc, TI_GCR_GLINK, TI_GLNK_PREF|TI_GLNK_1000MB| 2343 TI_GLNK_RX_FLOWCTL_Y|TI_GLNK_ENB); 2344 CSR_WRITE_4(sc, TI_GCR_LINK, 0); 2345 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { 2346 TI_SETBIT(sc, TI_GCR_GLINK, TI_GLNK_FULL_DUPLEX); 2347 } 2348 TI_DO_CMD(TI_CMD_LINK_NEGOTIATION, 2349 TI_CMD_CODE_NEGOTIATE_GIGABIT, 0); 2350 break; 2351 case IFM_100_FX: 2352 case IFM_10_FL: 2353 case IFM_100_TX: 2354 case IFM_10_T: 2355 CSR_WRITE_4(sc, TI_GCR_GLINK, 0); 2356 CSR_WRITE_4(sc, TI_GCR_LINK, TI_LNK_ENB|TI_LNK_PREF); 2357 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_100_FX || 2358 IFM_SUBTYPE(ifm->ifm_media) == IFM_100_TX) { 2359 TI_SETBIT(sc, TI_GCR_LINK, TI_LNK_100MB); 2360 } else { 2361 TI_SETBIT(sc, TI_GCR_LINK, TI_LNK_10MB); 2362 } 2363 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { 2364 TI_SETBIT(sc, TI_GCR_LINK, TI_LNK_FULL_DUPLEX); 2365 } else { 2366 TI_SETBIT(sc, TI_GCR_LINK, TI_LNK_HALF_DUPLEX); 2367 } 2368 TI_DO_CMD(TI_CMD_LINK_NEGOTIATION, 2369 TI_CMD_CODE_NEGOTIATE_10_100, 0); 2370 break; 2371 } 2372 2373 return (0); 2374 } 2375 2376 /* 2377 * Report current media status. 2378 */ 2379 void 2380 ti_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2381 { 2382 struct ti_softc *sc; 2383 u_int32_t media = 0; 2384 2385 sc = ifp->if_softc; 2386 2387 ifmr->ifm_status = IFM_AVALID; 2388 ifmr->ifm_active = IFM_ETHER; 2389 2390 if (sc->ti_linkstat == TI_EV_CODE_LINK_DOWN) { 2391 ifmr->ifm_active |= IFM_NONE; 2392 return; 2393 } 2394 2395 ifmr->ifm_status |= IFM_ACTIVE; 2396 2397 if (sc->ti_linkstat == TI_EV_CODE_GIG_LINK_UP) { 2398 media = CSR_READ_4(sc, TI_GCR_GLINK_STAT); 2399 if (sc->ti_copper) 2400 ifmr->ifm_active |= IFM_1000_T; 2401 else 2402 ifmr->ifm_active |= IFM_1000_SX; 2403 if (media & TI_GLNK_FULL_DUPLEX) 2404 ifmr->ifm_active |= IFM_FDX; 2405 else 2406 ifmr->ifm_active |= IFM_HDX; 2407 } else if (sc->ti_linkstat == TI_EV_CODE_LINK_UP) { 2408 media = CSR_READ_4(sc, TI_GCR_LINK_STAT); 2409 if (sc->ti_copper) { 2410 if (media & TI_LNK_100MB) 2411 ifmr->ifm_active |= IFM_100_TX; 2412 if (media & TI_LNK_10MB) 2413 ifmr->ifm_active |= IFM_10_T; 2414 } else { 2415 if (media & TI_LNK_100MB) 2416 ifmr->ifm_active |= IFM_100_FX; 2417 if (media & TI_LNK_10MB) 2418 ifmr->ifm_active |= IFM_10_FL; 2419 } 2420 if (media & TI_LNK_FULL_DUPLEX) 2421 ifmr->ifm_active |= IFM_FDX; 2422 if (media & TI_LNK_HALF_DUPLEX) 2423 ifmr->ifm_active |= IFM_HDX; 2424 } 2425 } 2426 2427 int 2428 ti_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 2429 { 2430 struct ti_softc *sc = ifp->if_softc; 2431 struct ifaddr *ifa = (struct ifaddr *)data; 2432 struct ifreq *ifr = (struct ifreq *)data; 2433 int s, error = 0; 2434 struct ti_cmd_desc cmd; 2435 2436 s = splnet(); 2437 2438 switch(command) { 2439 case SIOCSIFADDR: 2440 ifp->if_flags |= IFF_UP; 2441 if ((ifp->if_flags & IFF_RUNNING) == 0) 2442 ti_init(sc); 2443 #ifdef INET 2444 if (ifa->ifa_addr->sa_family == AF_INET) 2445 arp_ifinit(&sc->arpcom, ifa); 2446 #endif /* INET */ 2447 break; 2448 2449 case SIOCSIFFLAGS: 2450 if (ifp->if_flags & IFF_UP) { 2451 /* 2452 * If only the state of the PROMISC flag changed, 2453 * then just use the 'set promisc mode' command 2454 * instead of reinitializing the entire NIC. Doing 2455 * a full re-init means reloading the firmware and 2456 * waiting for it to start up, which may take a 2457 * second or two. 2458 */ 2459 if (ifp->if_flags & IFF_RUNNING && 2460 ifp->if_flags & IFF_PROMISC && 2461 !(sc->ti_if_flags & IFF_PROMISC)) { 2462 TI_DO_CMD(TI_CMD_SET_PROMISC_MODE, 2463 TI_CMD_CODE_PROMISC_ENB, 0); 2464 } else if (ifp->if_flags & IFF_RUNNING && 2465 !(ifp->if_flags & IFF_PROMISC) && 2466 sc->ti_if_flags & IFF_PROMISC) { 2467 TI_DO_CMD(TI_CMD_SET_PROMISC_MODE, 2468 TI_CMD_CODE_PROMISC_DIS, 0); 2469 } else { 2470 if ((ifp->if_flags & IFF_RUNNING) == 0) 2471 ti_init(sc); 2472 } 2473 } else { 2474 if (ifp->if_flags & IFF_RUNNING) 2475 ti_stop(sc); 2476 } 2477 sc->ti_if_flags = ifp->if_flags; 2478 break; 2479 2480 case SIOCSIFMEDIA: 2481 case SIOCGIFMEDIA: 2482 error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command); 2483 break; 2484 2485 default: 2486 error = ether_ioctl(ifp, &sc->arpcom, command, data); 2487 } 2488 2489 if (error == ENETRESET) { 2490 if (ifp->if_flags & IFF_RUNNING) 2491 ti_setmulti(sc); 2492 error = 0; 2493 } 2494 2495 splx(s); 2496 return (error); 2497 } 2498 2499 void 2500 ti_watchdog(struct ifnet *ifp) 2501 { 2502 struct ti_softc *sc; 2503 2504 sc = ifp->if_softc; 2505 2506 printf("%s: watchdog timeout -- resetting\n", sc->sc_dv.dv_xname); 2507 ti_stop(sc); 2508 ti_init(sc); 2509 2510 ifp->if_oerrors++; 2511 } 2512 2513 /* 2514 * Stop the adapter and free any mbufs allocated to the 2515 * RX and TX lists. 2516 */ 2517 void 2518 ti_stop(struct ti_softc *sc) 2519 { 2520 struct ifnet *ifp; 2521 struct ti_cmd_desc cmd; 2522 2523 ifp = &sc->arpcom.ac_if; 2524 2525 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 2526 2527 /* Disable host interrupts. */ 2528 CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1); 2529 /* 2530 * Tell firmware we're shutting down. 2531 */ 2532 TI_DO_CMD(TI_CMD_HOST_STATE, TI_CMD_CODE_STACK_DOWN, 0); 2533 2534 /* Halt and reinitialize. */ 2535 ti_chipinit(sc); 2536 ti_mem_set(sc, 0x2000, 0x100000 - 0x2000); 2537 ti_chipinit(sc); 2538 2539 /* Free the RX lists. */ 2540 ti_free_rx_ring_std(sc); 2541 2542 /* Free jumbo RX list. */ 2543 ti_free_rx_ring_jumbo(sc); 2544 2545 /* Free mini RX list. */ 2546 ti_free_rx_ring_mini(sc); 2547 2548 /* Free TX buffers. */ 2549 ti_free_tx_ring(sc); 2550 2551 sc->ti_ev_prodidx.ti_idx = 0; 2552 sc->ti_return_prodidx.ti_idx = 0; 2553 sc->ti_tx_considx.ti_idx = 0; 2554 sc->ti_tx_saved_considx = TI_TXCONS_UNSET; 2555 } 2556 2557 /* 2558 * Stop all chip I/O so that the kernel's probe routines don't 2559 * get confused by errant DMAs when rebooting. 2560 */ 2561 void 2562 ti_shutdown(void *xsc) 2563 { 2564 struct ti_softc *sc; 2565 2566 sc = xsc; 2567 2568 ti_chipinit(sc); 2569 } 2570