1 /* 2 * Copyright (c) 1997, 1998, 1999 3 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Bill Paul. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 * 32 * $FreeBSD: src/sys/pci/if_ti.c,v 1.25.2.14 2002/02/15 04:20:20 silby Exp $ 33 */ 34 35 /* 36 * Alteon Networks Tigon PCI gigabit ethernet driver for FreeBSD. 37 * Manuals, sample driver and firmware source kits are available 38 * from http://www.alteon.com/support/openkits. 39 * 40 * Written by Bill Paul <wpaul@ctr.columbia.edu> 41 * Electrical Engineering Department 42 * Columbia University, New York City 43 */ 44 45 /* 46 * The Alteon Networks Tigon chip contains an embedded R4000 CPU, 47 * gigabit MAC, dual DMA channels and a PCI interface unit. NICs 48 * using the Tigon may have anywhere from 512K to 2MB of SRAM. The 49 * Tigon supports hardware IP, TCP and UCP checksumming, multicast 50 * filtering and jumbo (9014 byte) frames. The hardware is largely 51 * controlled by firmware, which must be loaded into the NIC during 52 * initialization. 53 * 54 * The Tigon 2 contains 2 R4000 CPUs and requires a newer firmware 55 * revision, which supports new features such as extended commands, 56 * extended jumbo receive ring desciptors and a mini receive ring. 57 * 58 * Alteon Networks is to be commended for releasing such a vast amount 59 * of development material for the Tigon NIC without requiring an NDA 60 * (although they really should have done it a long time ago). With 61 * any luck, the other vendors will finally wise up and follow Alteon's 62 * stellar example. 63 * 64 * The firmware for the Tigon 1 and 2 NICs is compiled directly into 65 * this driver by #including it as a C header file. This bloats the 66 * driver somewhat, but it's the easiest method considering that the 67 * driver code and firmware code need to be kept in sync. The source 68 * for the firmware is not provided with the FreeBSD distribution since 69 * compiling it requires a GNU toolchain targeted for mips-sgi-irix5.3. 70 * 71 * The following people deserve special thanks: 72 * - Terry Murphy of 3Com, for providing a 3c985 Tigon 1 board 73 * for testing 74 * - Raymond Lee of Netgear, for providing a pair of Netgear 75 * GA620 Tigon 2 boards for testing 76 * - Ulf Zimmermann, for bringing the GA260 to my attention and 77 * convincing me to write this driver. 78 * - Andrew Gallatin for providing FreeBSD/Alpha support. 79 */ 80 81 #include <sys/param.h> 82 #include <sys/systm.h> 83 #include <sys/sockio.h> 84 #include <sys/mbuf.h> 85 #include <sys/malloc.h> 86 #include <sys/kernel.h> 87 #include <sys/socket.h> 88 #include <sys/queue.h> 89 #include <sys/serialize.h> 90 #include <sys/bus.h> 91 #include <sys/rman.h> 92 #include <sys/thread2.h> 93 #include <sys/interrupt.h> 94 95 #include <net/if.h> 96 #include <net/ifq_var.h> 97 #include <net/if_arp.h> 98 #include <net/ethernet.h> 99 #include <net/if_dl.h> 100 #include <net/if_media.h> 101 #include <net/if_types.h> 102 #include <net/vlan/if_vlan_var.h> 103 #include <net/vlan/if_vlan_ether.h> 104 105 #include <net/bpf.h> 106 107 #include <netinet/in_systm.h> 108 #include <netinet/in.h> 109 #include <netinet/ip.h> 110 111 #include <vm/vm.h> /* for vtophys */ 112 #include <vm/pmap.h> /* for vtophys */ 113 114 #include <bus/pci/pcireg.h> 115 #include <bus/pci/pcivar.h> 116 117 #include "if_tireg.h" 118 #include "ti_fw.h" 119 #include "ti_fw2.h" 120 121 /* 122 * Temporarily disable the checksum offload support for now. 123 * Tests with ftp.freesoftware.com show that after about 12 hours, 124 * the firmware will begin calculating completely bogus TX checksums 125 * and refuse to stop until the interface is reset. Unfortunately, 126 * there isn't enough time to fully debug this before the 4.1 127 * release, so this will need to stay off for now. 128 */ 129 #ifdef notdef 130 #define TI_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_IP_FRAGS) 131 #else 132 #define TI_CSUM_FEATURES 0 133 #endif 134 135 /* 136 * Various supported device vendors/types and their names. 137 */ 138 139 static struct ti_type ti_devs[] = { 140 { ALT_VENDORID, ALT_DEVICEID_ACENIC, 141 "Alteon AceNIC 1000baseSX Gigabit Ethernet" }, 142 { ALT_VENDORID, ALT_DEVICEID_ACENIC_COPPER, 143 "Alteon AceNIC 1000baseT Gigabit Ethernet" }, 144 { TC_VENDORID, TC_DEVICEID_3C985, 145 "3Com 3c985-SX Gigabit Ethernet" }, 146 { NG_VENDORID, NG_DEVICEID_GA620, 147 "Netgear GA620 1000baseSX Gigabit Ethernet" }, 148 { NG_VENDORID, NG_DEVICEID_GA620T, 149 "Netgear GA620 1000baseT Gigabit Ethernet" }, 150 { SGI_VENDORID, SGI_DEVICEID_TIGON, 151 "Silicon Graphics Gigabit Ethernet" }, 152 { DEC_VENDORID, DEC_DEVICEID_FARALLON_PN9000SX, 153 "Farallon PN9000SX Gigabit Ethernet" }, 154 { 0, 0, NULL } 155 }; 156 157 static int ti_probe(device_t); 158 static int ti_attach(device_t); 159 static int ti_detach(device_t); 160 static void ti_txeof(struct ti_softc *); 161 static void ti_rxeof(struct ti_softc *); 162 163 static void ti_stats_update(struct ti_softc *); 164 static int ti_encap(struct ti_softc *, struct mbuf *, uint32_t *); 165 166 static void ti_intr(void *); 167 static void ti_start(struct ifnet *); 168 static int ti_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 169 static void ti_init(void *); 170 static void ti_init2(struct ti_softc *); 171 static void ti_stop(struct ti_softc *); 172 static void ti_watchdog(struct ifnet *); 173 static void ti_shutdown(device_t); 174 static int ti_ifmedia_upd(struct ifnet *); 175 static void ti_ifmedia_sts(struct ifnet *, struct ifmediareq *); 176 177 static uint32_t ti_eeprom_putbyte(struct ti_softc *, int); 178 static uint8_t ti_eeprom_getbyte(struct ti_softc *, int, uint8_t *); 179 static int ti_read_eeprom(struct ti_softc *, caddr_t, int, int); 180 181 static void ti_add_mcast(struct ti_softc *, struct ether_addr *); 182 static void ti_del_mcast(struct ti_softc *, struct ether_addr *); 183 static void ti_setmulti(struct ti_softc *); 184 185 static void ti_mem(struct ti_softc *, uint32_t, uint32_t, caddr_t); 186 static void ti_loadfw(struct ti_softc *); 187 static void ti_cmd(struct ti_softc *, struct ti_cmd_desc *); 188 static void ti_cmd_ext(struct ti_softc *, struct ti_cmd_desc *, 189 caddr_t, int); 190 static void ti_handle_events(struct ti_softc *); 191 static int ti_alloc_jumbo_mem(struct ti_softc *); 192 static struct ti_jslot * 193 ti_jalloc(struct ti_softc *); 194 static void ti_jfree(void *); 195 static void ti_jref(void *); 196 static int ti_newbuf_std(struct ti_softc *, int, struct mbuf *); 197 static int ti_newbuf_mini(struct ti_softc *, int, struct mbuf *); 198 static int ti_newbuf_jumbo(struct ti_softc *, int, struct mbuf *); 199 static int ti_init_rx_ring_std(struct ti_softc *); 200 static void ti_free_rx_ring_std(struct ti_softc *); 201 static int ti_init_rx_ring_jumbo(struct ti_softc *); 202 static void ti_free_rx_ring_jumbo(struct ti_softc *); 203 static int ti_init_rx_ring_mini(struct ti_softc *); 204 static void ti_free_rx_ring_mini(struct ti_softc *); 205 static void ti_free_tx_ring(struct ti_softc *); 206 static int ti_init_tx_ring(struct ti_softc *); 207 208 static int ti_64bitslot_war(struct ti_softc *); 209 static int ti_chipinit(struct ti_softc *); 210 static int ti_gibinit(struct ti_softc *); 211 212 static device_method_t ti_methods[] = { 213 /* Device interface */ 214 DEVMETHOD(device_probe, ti_probe), 215 DEVMETHOD(device_attach, ti_attach), 216 DEVMETHOD(device_detach, ti_detach), 217 DEVMETHOD(device_shutdown, ti_shutdown), 218 { 0, 0 } 219 }; 220 221 222 static DEFINE_CLASS_0(ti, ti_driver, ti_methods, sizeof(struct ti_softc)); 223 static devclass_t ti_devclass; 224 225 DECLARE_DUMMY_MODULE(if_ti); 226 DRIVER_MODULE(if_ti, pci, ti_driver, ti_devclass, NULL, NULL); 227 228 /* 229 * Send an instruction or address to the EEPROM, check for ACK. 230 */ 231 static uint32_t 232 ti_eeprom_putbyte(struct ti_softc *sc, int byte) 233 { 234 int ack = 0, i; 235 236 /* 237 * Make sure we're in TX mode. 238 */ 239 TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_TXEN); 240 241 /* 242 * Feed in each bit and stobe the clock. 243 */ 244 for (i = 0x80; i; i >>= 1) { 245 if (byte & i) 246 TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_DOUT); 247 else 248 TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_DOUT); 249 DELAY(1); 250 TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); 251 DELAY(1); 252 TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); 253 } 254 255 /* 256 * Turn off TX mode. 257 */ 258 TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_TXEN); 259 260 /* 261 * Check for ack. 262 */ 263 TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); 264 ack = CSR_READ_4(sc, TI_MISC_LOCAL_CTL) & TI_MLC_EE_DIN; 265 TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); 266 267 return(ack); 268 } 269 270 /* 271 * Read a byte of data stored in the EEPROM at address 'addr.' 272 * We have to send two address bytes since the EEPROM can hold 273 * more than 256 bytes of data. 274 */ 275 static uint8_t 276 ti_eeprom_getbyte(struct ti_softc *sc, int addr, uint8_t *dest) 277 { 278 struct ifnet *ifp = &sc->arpcom.ac_if; 279 int i; 280 uint8_t byte = 0; 281 282 EEPROM_START; 283 284 /* 285 * Send write control code to EEPROM. 286 */ 287 if (ti_eeprom_putbyte(sc, EEPROM_CTL_WRITE)) { 288 if_printf(ifp, "failed to send write command, status: %x\n", 289 CSR_READ_4(sc, TI_MISC_LOCAL_CTL)); 290 return(1); 291 } 292 293 /* 294 * Send first byte of address of byte we want to read. 295 */ 296 if (ti_eeprom_putbyte(sc, (addr >> 8) & 0xFF)) { 297 if_printf(ifp, "failed to send address, status: %x\n", 298 CSR_READ_4(sc, TI_MISC_LOCAL_CTL)); 299 return(1); 300 } 301 /* 302 * Send second byte address of byte we want to read. 303 */ 304 if (ti_eeprom_putbyte(sc, addr & 0xFF)) { 305 if_printf(ifp, "failed to send address, status: %x\n", 306 CSR_READ_4(sc, TI_MISC_LOCAL_CTL)); 307 return(1); 308 } 309 310 EEPROM_STOP; 311 EEPROM_START; 312 /* 313 * Send read control code to EEPROM. 314 */ 315 if (ti_eeprom_putbyte(sc, EEPROM_CTL_READ)) { 316 if_printf(ifp, "failed to send read command, status: %x\n", 317 CSR_READ_4(sc, TI_MISC_LOCAL_CTL)); 318 return(1); 319 } 320 321 /* 322 * Start reading bits from EEPROM. 323 */ 324 TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_TXEN); 325 for (i = 0x80; i; i >>= 1) { 326 TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); 327 DELAY(1); 328 if (CSR_READ_4(sc, TI_MISC_LOCAL_CTL) & TI_MLC_EE_DIN) 329 byte |= i; 330 TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); 331 DELAY(1); 332 } 333 334 EEPROM_STOP; 335 336 /* 337 * No ACK generated for read, so just return byte. 338 */ 339 340 *dest = byte; 341 342 return(0); 343 } 344 345 /* 346 * Read a sequence of bytes from the EEPROM. 347 */ 348 static int 349 ti_read_eeprom(struct ti_softc *sc, caddr_t dest, int off, int cnt) 350 { 351 int err = 0, i; 352 uint8_t byte = 0; 353 354 for (i = 0; i < cnt; i++) { 355 err = ti_eeprom_getbyte(sc, off + i, &byte); 356 if (err) 357 break; 358 *(dest + i) = byte; 359 } 360 361 return(err ? 1 : 0); 362 } 363 364 /* 365 * NIC memory access function. Can be used to either clear a section 366 * of NIC local memory or (if buf is non-NULL) copy data into it. 367 */ 368 static void 369 ti_mem(struct ti_softc *sc, uint32_t addr, uint32_t len, caddr_t buf) 370 { 371 int cnt, segptr, segsize; 372 caddr_t ti_winbase, ptr; 373 374 segptr = addr; 375 cnt = len; 376 ti_winbase = (caddr_t)(sc->ti_vhandle + TI_WINDOW); 377 ptr = buf; 378 379 while(cnt) { 380 if (cnt < TI_WINLEN) 381 segsize = cnt; 382 else 383 segsize = TI_WINLEN - (segptr % TI_WINLEN); 384 CSR_WRITE_4(sc, TI_WINBASE, (segptr & ~(TI_WINLEN - 1))); 385 if (buf == NULL) 386 bzero((char *)ti_winbase + (segptr & 387 (TI_WINLEN - 1)), segsize); 388 else { 389 bcopy((char *)ptr, (char *)ti_winbase + 390 (segptr & (TI_WINLEN - 1)), segsize); 391 ptr += segsize; 392 } 393 segptr += segsize; 394 cnt -= segsize; 395 } 396 } 397 398 /* 399 * Load firmware image into the NIC. Check that the firmware revision 400 * is acceptable and see if we want the firmware for the Tigon 1 or 401 * Tigon 2. 402 */ 403 static void 404 ti_loadfw(struct ti_softc *sc) 405 { 406 struct ifnet *ifp = &sc->arpcom.ac_if; 407 408 switch(sc->ti_hwrev) { 409 case TI_HWREV_TIGON: 410 if (tigonFwReleaseMajor != TI_FIRMWARE_MAJOR || 411 tigonFwReleaseMinor != TI_FIRMWARE_MINOR || 412 tigonFwReleaseFix != TI_FIRMWARE_FIX) { 413 if_printf(ifp, "firmware revision mismatch; want " 414 "%d.%d.%d, got %d.%d.%d\n", 415 TI_FIRMWARE_MAJOR, TI_FIRMWARE_MINOR, 416 TI_FIRMWARE_FIX, tigonFwReleaseMajor, 417 tigonFwReleaseMinor, tigonFwReleaseFix); 418 return; 419 } 420 ti_mem(sc, tigonFwTextAddr, tigonFwTextLen, 421 (caddr_t)tigonFwText); 422 ti_mem(sc, tigonFwDataAddr, tigonFwDataLen, 423 (caddr_t)tigonFwData); 424 ti_mem(sc, tigonFwRodataAddr, tigonFwRodataLen, 425 (caddr_t)tigonFwRodata); 426 ti_mem(sc, tigonFwBssAddr, tigonFwBssLen, NULL); 427 ti_mem(sc, tigonFwSbssAddr, tigonFwSbssLen, NULL); 428 CSR_WRITE_4(sc, TI_CPU_PROGRAM_COUNTER, tigonFwStartAddr); 429 break; 430 case TI_HWREV_TIGON_II: 431 if (tigon2FwReleaseMajor != TI_FIRMWARE_MAJOR || 432 tigon2FwReleaseMinor != TI_FIRMWARE_MINOR || 433 tigon2FwReleaseFix != TI_FIRMWARE_FIX) { 434 if_printf(ifp, "firmware revision mismatch; want " 435 "%d.%d.%d, got %d.%d.%d\n", 436 TI_FIRMWARE_MAJOR, TI_FIRMWARE_MINOR, 437 TI_FIRMWARE_FIX, tigon2FwReleaseMajor, 438 tigon2FwReleaseMinor, tigon2FwReleaseFix); 439 return; 440 } 441 ti_mem(sc, tigon2FwTextAddr, tigon2FwTextLen, 442 (caddr_t)tigon2FwText); 443 ti_mem(sc, tigon2FwDataAddr, tigon2FwDataLen, 444 (caddr_t)tigon2FwData); 445 ti_mem(sc, tigon2FwRodataAddr, tigon2FwRodataLen, 446 (caddr_t)tigon2FwRodata); 447 ti_mem(sc, tigon2FwBssAddr, tigon2FwBssLen, NULL); 448 ti_mem(sc, tigon2FwSbssAddr, tigon2FwSbssLen, NULL); 449 CSR_WRITE_4(sc, TI_CPU_PROGRAM_COUNTER, tigon2FwStartAddr); 450 break; 451 default: 452 if_printf(ifp, "can't load firmware: unknown hardware rev\n"); 453 break; 454 } 455 } 456 457 /* 458 * Send the NIC a command via the command ring. 459 */ 460 static void 461 ti_cmd(struct ti_softc *sc, struct ti_cmd_desc *cmd) 462 { 463 uint32_t index; 464 465 if (sc->ti_rdata->ti_cmd_ring == NULL) 466 return; 467 468 index = sc->ti_cmd_saved_prodidx; 469 CSR_WRITE_4(sc, TI_GCR_CMDRING + (index * 4), *(uint32_t *)(cmd)); 470 TI_INC(index, TI_CMD_RING_CNT); 471 CSR_WRITE_4(sc, TI_MB_CMDPROD_IDX, index); 472 sc->ti_cmd_saved_prodidx = index; 473 } 474 475 /* 476 * Send the NIC an extended command. The 'len' parameter specifies the 477 * number of command slots to include after the initial command. 478 */ 479 static void 480 ti_cmd_ext(struct ti_softc *sc, struct ti_cmd_desc *cmd, caddr_t arg, int len) 481 { 482 uint32_t index; 483 int i; 484 485 if (sc->ti_rdata->ti_cmd_ring == NULL) 486 return; 487 488 index = sc->ti_cmd_saved_prodidx; 489 CSR_WRITE_4(sc, TI_GCR_CMDRING + (index * 4), *(uint32_t *)(cmd)); 490 TI_INC(index, TI_CMD_RING_CNT); 491 for (i = 0; i < len; i++) { 492 CSR_WRITE_4(sc, TI_GCR_CMDRING + (index * 4), 493 *(uint32_t *)(&arg[i * 4])); 494 TI_INC(index, TI_CMD_RING_CNT); 495 } 496 CSR_WRITE_4(sc, TI_MB_CMDPROD_IDX, index); 497 sc->ti_cmd_saved_prodidx = index; 498 } 499 500 /* 501 * Handle events that have triggered interrupts. 502 */ 503 static void 504 ti_handle_events(struct ti_softc *sc) 505 { 506 struct ifnet *ifp = &sc->arpcom.ac_if; 507 struct ti_event_desc *e; 508 509 if (sc->ti_rdata->ti_event_ring == NULL) 510 return; 511 512 while (sc->ti_ev_saved_considx != sc->ti_ev_prodidx.ti_idx) { 513 e = &sc->ti_rdata->ti_event_ring[sc->ti_ev_saved_considx]; 514 switch(e->ti_event) { 515 case TI_EV_LINKSTAT_CHANGED: 516 sc->ti_linkstat = e->ti_code; 517 if (e->ti_code == TI_EV_CODE_LINK_UP) { 518 if_printf(ifp, "10/100 link up\n"); 519 } else if (e->ti_code == TI_EV_CODE_GIG_LINK_UP) { 520 if_printf(ifp, "gigabit link up\n"); 521 } else if (e->ti_code == TI_EV_CODE_LINK_DOWN) { 522 if_printf(ifp, "link down\n"); 523 } 524 break; 525 case TI_EV_ERROR: 526 if (e->ti_code == TI_EV_CODE_ERR_INVAL_CMD) { 527 if_printf(ifp, "invalid command\n"); 528 } else if (e->ti_code == TI_EV_CODE_ERR_UNIMP_CMD) { 529 if_printf(ifp, "unknown command\n"); 530 } else if (e->ti_code == TI_EV_CODE_ERR_BADCFG) { 531 if_printf(ifp, "bad config data\n"); 532 } 533 break; 534 case TI_EV_FIRMWARE_UP: 535 ti_init2(sc); 536 break; 537 case TI_EV_STATS_UPDATED: 538 ti_stats_update(sc); 539 break; 540 case TI_EV_RESET_JUMBO_RING: 541 case TI_EV_MCAST_UPDATED: 542 /* Who cares. */ 543 break; 544 default: 545 if_printf(ifp, "unknown event: %d\n", e->ti_event); 546 break; 547 } 548 /* Advance the consumer index. */ 549 TI_INC(sc->ti_ev_saved_considx, TI_EVENT_RING_CNT); 550 CSR_WRITE_4(sc, TI_GCR_EVENTCONS_IDX, sc->ti_ev_saved_considx); 551 } 552 } 553 554 /* 555 * Memory management for the jumbo receive ring is a pain in the 556 * butt. We need to allocate at least 9018 bytes of space per frame, 557 * _and_ it has to be contiguous (unless you use the extended 558 * jumbo descriptor format). Using malloc() all the time won't 559 * work: malloc() allocates memory in powers of two, which means we 560 * would end up wasting a considerable amount of space by allocating 561 * 9K chunks. We don't have a jumbo mbuf cluster pool. Thus, we have 562 * to do our own memory management. 563 * 564 * The driver needs to allocate a contiguous chunk of memory at boot 565 * time. We then chop this up ourselves into 9K pieces and use them 566 * as external mbuf storage. 567 * 568 * One issue here is how much memory to allocate. The jumbo ring has 569 * 256 slots in it, but at 9K per slot than can consume over 2MB of 570 * RAM. This is a bit much, especially considering we also need 571 * RAM for the standard ring and mini ring (on the Tigon 2). To 572 * save space, we only actually allocate enough memory for 64 slots 573 * by default, which works out to between 500 and 600K. This can 574 * be tuned by changing a #define in if_tireg.h. 575 */ 576 577 static int 578 ti_alloc_jumbo_mem(struct ti_softc *sc) 579 { 580 struct ti_jslot *entry; 581 caddr_t ptr; 582 int i; 583 584 /* Grab a big chunk o' storage. */ 585 sc->ti_cdata.ti_jumbo_buf = contigmalloc(TI_JMEM, M_DEVBUF, 586 M_WAITOK, 0, 0xffffffff, PAGE_SIZE, 0); 587 588 if (sc->ti_cdata.ti_jumbo_buf == NULL) { 589 if_printf(&sc->arpcom.ac_if, "no memory for jumbo buffers!\n"); 590 return(ENOBUFS); 591 } 592 593 lwkt_serialize_init(&sc->ti_jslot_serializer); 594 SLIST_INIT(&sc->ti_jfree_listhead); 595 596 /* 597 * Now divide it up into 9K pieces and save the addresses 598 * in an array. Note that we play an evil trick here by using 599 * the first few bytes in the buffer to hold the the address 600 * of the softc structure for this interface. This is because 601 * ti_jfree() needs it, but it is called by the mbuf management 602 * code which will not pass it to us explicitly. 603 */ 604 ptr = sc->ti_cdata.ti_jumbo_buf; 605 for (i = 0; i < TI_JSLOTS; i++) { 606 entry = &sc->ti_cdata.ti_jslots[i]; 607 entry->ti_sc = sc; 608 entry->ti_buf = ptr; 609 entry->ti_inuse = 0; 610 entry->ti_slot = i; 611 SLIST_INSERT_HEAD(&sc->ti_jfree_listhead, entry, jslot_link); 612 ptr += TI_JLEN; 613 } 614 615 return(0); 616 } 617 618 /* 619 * Allocate a jumbo buffer. 620 */ 621 static struct ti_jslot * 622 ti_jalloc(struct ti_softc *sc) 623 { 624 struct ti_jslot *entry; 625 626 lwkt_serialize_enter(&sc->ti_jslot_serializer); 627 entry = SLIST_FIRST(&sc->ti_jfree_listhead); 628 if (entry) { 629 SLIST_REMOVE_HEAD(&sc->ti_jfree_listhead, jslot_link); 630 entry->ti_inuse = 1; 631 } else { 632 if_printf(&sc->arpcom.ac_if, "no free jumbo buffers\n"); 633 } 634 lwkt_serialize_exit(&sc->ti_jslot_serializer); 635 return(entry); 636 } 637 638 /* 639 * Adjust usage count on a jumbo buffer. In general this doesn't 640 * get used much because our jumbo buffers don't get passed around 641 * too much, but it's implemented for correctness. 642 */ 643 static void 644 ti_jref(void *arg) 645 { 646 struct ti_jslot *entry = (struct ti_jslot *)arg; 647 struct ti_softc *sc = entry->ti_sc; 648 649 if (sc == NULL) 650 panic("ti_jref: can't find softc pointer!"); 651 652 if (&sc->ti_cdata.ti_jslots[entry->ti_slot] != entry) 653 panic("ti_jref: asked to reference buffer " 654 "that we don't manage!"); 655 if (entry->ti_inuse == 0) 656 panic("ti_jref: buffer already free!"); 657 atomic_add_int(&entry->ti_inuse, 1); 658 } 659 660 /* 661 * Release a jumbo buffer. 662 */ 663 static void 664 ti_jfree(void *arg) 665 { 666 struct ti_jslot *entry = (struct ti_jslot *)arg; 667 struct ti_softc *sc = entry->ti_sc; 668 669 if (sc == NULL) 670 panic("ti_jref: can't find softc pointer!"); 671 672 if (&sc->ti_cdata.ti_jslots[entry->ti_slot] != entry) 673 panic("ti_jref: asked to reference buffer " 674 "that we don't manage!"); 675 if (entry->ti_inuse == 0) 676 panic("ti_jref: buffer already free!"); 677 lwkt_serialize_enter(&sc->ti_jslot_serializer); 678 atomic_subtract_int(&entry->ti_inuse, 1); 679 if (entry->ti_inuse == 0) 680 SLIST_INSERT_HEAD(&sc->ti_jfree_listhead, entry, jslot_link); 681 lwkt_serialize_exit(&sc->ti_jslot_serializer); 682 } 683 684 685 /* 686 * Intialize a standard receive ring descriptor. 687 */ 688 static int 689 ti_newbuf_std(struct ti_softc *sc, int i, struct mbuf *m) 690 { 691 struct mbuf *m_new; 692 struct ti_rx_desc *r; 693 694 if (m == NULL) { 695 m_new = m_getcl(MB_DONTWAIT, MT_DATA, M_PKTHDR); 696 if (m_new == NULL) 697 return (ENOBUFS); 698 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 699 } else { 700 m_new = m; 701 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 702 m_new->m_data = m_new->m_ext.ext_buf; 703 } 704 705 706 m_adj(m_new, ETHER_ALIGN); 707 sc->ti_cdata.ti_rx_std_chain[i] = m_new; 708 r = &sc->ti_rdata->ti_rx_std_ring[i]; 709 TI_HOSTADDR(r->ti_addr) = vtophys(mtod(m_new, caddr_t)); 710 r->ti_type = TI_BDTYPE_RECV_BD; 711 r->ti_flags = 0; 712 if (sc->arpcom.ac_if.if_hwassist) 713 r->ti_flags |= TI_BDFLAG_TCP_UDP_CKSUM | TI_BDFLAG_IP_CKSUM; 714 r->ti_len = m_new->m_len; 715 r->ti_idx = i; 716 717 return(0); 718 } 719 720 /* 721 * Intialize a mini receive ring descriptor. This only applies to 722 * the Tigon 2. 723 */ 724 static int 725 ti_newbuf_mini(struct ti_softc *sc, int i, struct mbuf *m) 726 { 727 struct mbuf *m_new; 728 struct ti_rx_desc *r; 729 730 if (m == NULL) { 731 MGETHDR(m_new, MB_DONTWAIT, MT_DATA); 732 if (m_new == NULL) { 733 return(ENOBUFS); 734 } 735 m_new->m_len = m_new->m_pkthdr.len = MHLEN; 736 } else { 737 m_new = m; 738 m_new->m_data = m_new->m_pktdat; 739 m_new->m_len = m_new->m_pkthdr.len = MHLEN; 740 } 741 742 m_adj(m_new, ETHER_ALIGN); 743 r = &sc->ti_rdata->ti_rx_mini_ring[i]; 744 sc->ti_cdata.ti_rx_mini_chain[i] = m_new; 745 TI_HOSTADDR(r->ti_addr) = vtophys(mtod(m_new, caddr_t)); 746 r->ti_type = TI_BDTYPE_RECV_BD; 747 r->ti_flags = TI_BDFLAG_MINI_RING; 748 if (sc->arpcom.ac_if.if_hwassist) 749 r->ti_flags |= TI_BDFLAG_TCP_UDP_CKSUM | TI_BDFLAG_IP_CKSUM; 750 r->ti_len = m_new->m_len; 751 r->ti_idx = i; 752 753 return(0); 754 } 755 756 /* 757 * Initialize a jumbo receive ring descriptor. This allocates 758 * a jumbo buffer from the pool managed internally by the driver. 759 */ 760 static int 761 ti_newbuf_jumbo(struct ti_softc *sc, int i, struct mbuf *m) 762 { 763 struct mbuf *m_new; 764 struct ti_rx_desc *r; 765 struct ti_jslot *buf; 766 767 if (m == NULL) { 768 /* Allocate the mbuf. */ 769 MGETHDR(m_new, MB_DONTWAIT, MT_DATA); 770 if (m_new == NULL) { 771 return(ENOBUFS); 772 } 773 774 /* Allocate the jumbo buffer */ 775 buf = ti_jalloc(sc); 776 if (buf == NULL) { 777 m_freem(m_new); 778 if_printf(&sc->arpcom.ac_if, "jumbo allocation failed " 779 "-- packet dropped!\n"); 780 return(ENOBUFS); 781 } 782 783 /* Attach the buffer to the mbuf. */ 784 m_new->m_ext.ext_arg = buf; 785 m_new->m_ext.ext_buf = buf->ti_buf; 786 m_new->m_ext.ext_free = ti_jfree; 787 m_new->m_ext.ext_ref = ti_jref; 788 m_new->m_ext.ext_size = TI_JUMBO_FRAMELEN; 789 790 m_new->m_flags |= M_EXT; 791 } else { 792 /* 793 * We're re-using a previously allocated mbuf; 794 * be sure to re-init pointers and lengths to 795 * default values. 796 */ 797 KKASSERT(m->m_flags & M_EXT); 798 m_new = m; 799 } 800 m_new->m_data = m_new->m_ext.ext_buf; 801 m_new->m_len = m_new->m_pkthdr.len = m_new->m_ext.ext_size; 802 803 m_adj(m_new, ETHER_ALIGN); 804 /* Set up the descriptor. */ 805 r = &sc->ti_rdata->ti_rx_jumbo_ring[i]; 806 sc->ti_cdata.ti_rx_jumbo_chain[i] = m_new; 807 TI_HOSTADDR(r->ti_addr) = vtophys(mtod(m_new, caddr_t)); 808 r->ti_type = TI_BDTYPE_RECV_JUMBO_BD; 809 r->ti_flags = TI_BDFLAG_JUMBO_RING; 810 if (sc->arpcom.ac_if.if_hwassist) 811 r->ti_flags |= TI_BDFLAG_TCP_UDP_CKSUM | TI_BDFLAG_IP_CKSUM; 812 r->ti_len = m_new->m_len; 813 r->ti_idx = i; 814 815 return(0); 816 } 817 818 /* 819 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster, 820 * that's 1MB or memory, which is a lot. For now, we fill only the first 821 * 256 ring entries and hope that our CPU is fast enough to keep up with 822 * the NIC. 823 */ 824 static int 825 ti_init_rx_ring_std(struct ti_softc *sc) 826 { 827 int i; 828 struct ti_cmd_desc cmd; 829 830 for (i = 0; i < TI_SSLOTS; i++) { 831 if (ti_newbuf_std(sc, i, NULL) == ENOBUFS) 832 return(ENOBUFS); 833 }; 834 835 TI_UPDATE_STDPROD(sc, i - 1); 836 sc->ti_std = i - 1; 837 838 return(0); 839 } 840 841 static void 842 ti_free_rx_ring_std(struct ti_softc *sc) 843 { 844 int i; 845 846 for (i = 0; i < TI_STD_RX_RING_CNT; i++) { 847 if (sc->ti_cdata.ti_rx_std_chain[i] != NULL) { 848 m_freem(sc->ti_cdata.ti_rx_std_chain[i]); 849 sc->ti_cdata.ti_rx_std_chain[i] = NULL; 850 } 851 bzero(&sc->ti_rdata->ti_rx_std_ring[i], 852 sizeof(struct ti_rx_desc)); 853 } 854 } 855 856 static int 857 ti_init_rx_ring_jumbo(struct ti_softc *sc) 858 { 859 int i; 860 struct ti_cmd_desc cmd; 861 862 for (i = 0; i < TI_JUMBO_RX_RING_CNT; i++) { 863 if (ti_newbuf_jumbo(sc, i, NULL) == ENOBUFS) 864 return(ENOBUFS); 865 } 866 867 TI_UPDATE_JUMBOPROD(sc, i - 1); 868 sc->ti_jumbo = i - 1; 869 870 return(0); 871 } 872 873 static void 874 ti_free_rx_ring_jumbo(struct ti_softc *sc) 875 { 876 int i; 877 878 for (i = 0; i < TI_JUMBO_RX_RING_CNT; i++) { 879 if (sc->ti_cdata.ti_rx_jumbo_chain[i] != NULL) { 880 m_freem(sc->ti_cdata.ti_rx_jumbo_chain[i]); 881 sc->ti_cdata.ti_rx_jumbo_chain[i] = NULL; 882 } 883 bzero(&sc->ti_rdata->ti_rx_jumbo_ring[i], 884 sizeof(struct ti_rx_desc)); 885 } 886 } 887 888 static int 889 ti_init_rx_ring_mini(struct ti_softc *sc) 890 { 891 int i; 892 893 for (i = 0; i < TI_MSLOTS; i++) { 894 if (ti_newbuf_mini(sc, i, NULL) == ENOBUFS) 895 return(ENOBUFS); 896 } 897 898 TI_UPDATE_MINIPROD(sc, i - 1); 899 sc->ti_mini = i - 1; 900 901 return(0); 902 } 903 904 static void 905 ti_free_rx_ring_mini(struct ti_softc *sc) 906 { 907 int i; 908 909 for (i = 0; i < TI_MINI_RX_RING_CNT; i++) { 910 if (sc->ti_cdata.ti_rx_mini_chain[i] != NULL) { 911 m_freem(sc->ti_cdata.ti_rx_mini_chain[i]); 912 sc->ti_cdata.ti_rx_mini_chain[i] = NULL; 913 } 914 bzero(&sc->ti_rdata->ti_rx_mini_ring[i], 915 sizeof(struct ti_rx_desc)); 916 } 917 } 918 919 static void 920 ti_free_tx_ring(struct ti_softc *sc) 921 { 922 int i; 923 924 if (sc->ti_rdata->ti_tx_ring == NULL) 925 return; 926 927 for (i = 0; i < TI_TX_RING_CNT; i++) { 928 if (sc->ti_cdata.ti_tx_chain[i] != NULL) { 929 m_freem(sc->ti_cdata.ti_tx_chain[i]); 930 sc->ti_cdata.ti_tx_chain[i] = NULL; 931 } 932 bzero(&sc->ti_rdata->ti_tx_ring[i], 933 sizeof(struct ti_tx_desc)); 934 } 935 } 936 937 static int 938 ti_init_tx_ring(struct ti_softc *sc) 939 { 940 sc->ti_txcnt = 0; 941 sc->ti_tx_saved_considx = 0; 942 CSR_WRITE_4(sc, TI_MB_SENDPROD_IDX, 0); 943 return(0); 944 } 945 946 /* 947 * The Tigon 2 firmware has a new way to add/delete multicast addresses, 948 * but we have to support the old way too so that Tigon 1 cards will 949 * work. 950 */ 951 static void 952 ti_add_mcast(struct ti_softc *sc, struct ether_addr *addr) 953 { 954 struct ti_cmd_desc cmd; 955 uint16_t *m; 956 uint32_t ext[2] = {0, 0}; 957 958 m = (uint16_t *)&addr->octet[0]; 959 960 switch(sc->ti_hwrev) { 961 case TI_HWREV_TIGON: 962 CSR_WRITE_4(sc, TI_GCR_MAR0, htons(m[0])); 963 CSR_WRITE_4(sc, TI_GCR_MAR1, (htons(m[1]) << 16) | htons(m[2])); 964 TI_DO_CMD(TI_CMD_ADD_MCAST_ADDR, 0, 0); 965 break; 966 case TI_HWREV_TIGON_II: 967 ext[0] = htons(m[0]); 968 ext[1] = (htons(m[1]) << 16) | htons(m[2]); 969 TI_DO_CMD_EXT(TI_CMD_EXT_ADD_MCAST, 0, 0, (caddr_t)&ext, 2); 970 break; 971 default: 972 if_printf(&sc->arpcom.ac_if, "unknown hwrev\n"); 973 break; 974 } 975 } 976 977 static void 978 ti_del_mcast(struct ti_softc *sc, struct ether_addr *addr) 979 { 980 struct ti_cmd_desc cmd; 981 uint16_t *m; 982 uint32_t ext[2] = {0, 0}; 983 984 m = (uint16_t *)&addr->octet[0]; 985 986 switch(sc->ti_hwrev) { 987 case TI_HWREV_TIGON: 988 CSR_WRITE_4(sc, TI_GCR_MAR0, htons(m[0])); 989 CSR_WRITE_4(sc, TI_GCR_MAR1, (htons(m[1]) << 16) | htons(m[2])); 990 TI_DO_CMD(TI_CMD_DEL_MCAST_ADDR, 0, 0); 991 break; 992 case TI_HWREV_TIGON_II: 993 ext[0] = htons(m[0]); 994 ext[1] = (htons(m[1]) << 16) | htons(m[2]); 995 TI_DO_CMD_EXT(TI_CMD_EXT_DEL_MCAST, 0, 0, (caddr_t)&ext, 2); 996 break; 997 default: 998 if_printf(&sc->arpcom.ac_if, "unknown hwrev\n"); 999 break; 1000 } 1001 } 1002 1003 /* 1004 * Configure the Tigon's multicast address filter. 1005 * 1006 * The actual multicast table management is a bit of a pain, thanks to 1007 * slight brain damage on the part of both Alteon and us. With our 1008 * multicast code, we are only alerted when the multicast address table 1009 * changes and at that point we only have the current list of addresses: 1010 * we only know the current state, not the previous state, so we don't 1011 * actually know what addresses were removed or added. The firmware has 1012 * state, but we can't get our grubby mits on it, and there is no 'delete 1013 * all multicast addresses' command. Hence, we have to maintain our own 1014 * state so we know what addresses have been programmed into the NIC at 1015 * any given time. 1016 */ 1017 static void 1018 ti_setmulti(struct ti_softc *sc) 1019 { 1020 struct ifnet *ifp = &sc->arpcom.ac_if; 1021 struct ifmultiaddr *ifma; 1022 struct ti_cmd_desc cmd; 1023 struct ti_mc_entry *mc; 1024 uint32_t intrs; 1025 1026 if (ifp->if_flags & IFF_ALLMULTI) { 1027 TI_DO_CMD(TI_CMD_SET_ALLMULTI, TI_CMD_CODE_ALLMULTI_ENB, 0); 1028 return; 1029 } 1030 1031 TI_DO_CMD(TI_CMD_SET_ALLMULTI, TI_CMD_CODE_ALLMULTI_DIS, 0); 1032 1033 /* Disable interrupts. */ 1034 intrs = CSR_READ_4(sc, TI_MB_HOSTINTR); 1035 CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1); 1036 1037 /* First, zot all the existing filters. */ 1038 while (sc->ti_mc_listhead.slh_first != NULL) { 1039 mc = sc->ti_mc_listhead.slh_first; 1040 ti_del_mcast(sc, &mc->mc_addr); 1041 SLIST_REMOVE_HEAD(&sc->ti_mc_listhead, mc_entries); 1042 kfree(mc, M_DEVBUF); 1043 } 1044 1045 /* Now program new ones. */ 1046 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1047 if (ifma->ifma_addr->sa_family != AF_LINK) 1048 continue; 1049 mc = kmalloc(sizeof(struct ti_mc_entry), M_DEVBUF, M_INTWAIT); 1050 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 1051 &mc->mc_addr, ETHER_ADDR_LEN); 1052 SLIST_INSERT_HEAD(&sc->ti_mc_listhead, mc, mc_entries); 1053 ti_add_mcast(sc, &mc->mc_addr); 1054 } 1055 1056 /* Re-enable interrupts. */ 1057 CSR_WRITE_4(sc, TI_MB_HOSTINTR, intrs); 1058 } 1059 1060 /* 1061 * Check to see if the BIOS has configured us for a 64 bit slot when 1062 * we aren't actually in one. If we detect this condition, we can work 1063 * around it on the Tigon 2 by setting a bit in the PCI state register, 1064 * but for the Tigon 1 we must give up and abort the interface attach. 1065 */ 1066 static int 1067 ti_64bitslot_war(struct ti_softc *sc) 1068 { 1069 if ((CSR_READ_4(sc, TI_PCI_STATE) & TI_PCISTATE_32BIT_BUS) == 0) { 1070 CSR_WRITE_4(sc, 0x600, 0); 1071 CSR_WRITE_4(sc, 0x604, 0); 1072 CSR_WRITE_4(sc, 0x600, 0x5555AAAA); 1073 if (CSR_READ_4(sc, 0x604) == 0x5555AAAA) { 1074 if (sc->ti_hwrev == TI_HWREV_TIGON) 1075 return(EINVAL); 1076 TI_SETBIT(sc, TI_PCI_STATE, TI_PCISTATE_32BIT_BUS); 1077 return(0); 1078 } 1079 } 1080 1081 return(0); 1082 } 1083 1084 /* 1085 * Do endian, PCI and DMA initialization. Also check the on-board ROM 1086 * self-test results. 1087 */ 1088 static int 1089 ti_chipinit(struct ti_softc *sc) 1090 { 1091 struct ifnet *ifp = &sc->arpcom.ac_if; 1092 uint32_t cacheline; 1093 uint32_t pci_writemax = 0; 1094 1095 /* Initialize link to down state. */ 1096 sc->ti_linkstat = TI_EV_CODE_LINK_DOWN; 1097 1098 if (ifp->if_capenable & IFCAP_HWCSUM) 1099 ifp->if_hwassist = TI_CSUM_FEATURES; 1100 else 1101 ifp->if_hwassist = 0; 1102 1103 /* Set endianness before we access any non-PCI registers. */ 1104 #if BYTE_ORDER == BIG_ENDIAN 1105 CSR_WRITE_4(sc, TI_MISC_HOST_CTL, 1106 TI_MHC_BIGENDIAN_INIT | (TI_MHC_BIGENDIAN_INIT << 24)); 1107 #else 1108 CSR_WRITE_4(sc, TI_MISC_HOST_CTL, 1109 TI_MHC_LITTLEENDIAN_INIT | (TI_MHC_LITTLEENDIAN_INIT << 24)); 1110 #endif 1111 1112 /* Check the ROM failed bit to see if self-tests passed. */ 1113 if (CSR_READ_4(sc, TI_CPU_STATE) & TI_CPUSTATE_ROMFAIL) { 1114 if_printf(ifp, "board self-diagnostics failed!\n"); 1115 return(ENODEV); 1116 } 1117 1118 /* Halt the CPU. */ 1119 TI_SETBIT(sc, TI_CPU_STATE, TI_CPUSTATE_HALT); 1120 1121 /* Figure out the hardware revision. */ 1122 switch(CSR_READ_4(sc, TI_MISC_HOST_CTL) & TI_MHC_CHIP_REV_MASK) { 1123 case TI_REV_TIGON_I: 1124 sc->ti_hwrev = TI_HWREV_TIGON; 1125 break; 1126 case TI_REV_TIGON_II: 1127 sc->ti_hwrev = TI_HWREV_TIGON_II; 1128 break; 1129 default: 1130 if_printf(ifp, "unsupported chip revision\n"); 1131 return(ENODEV); 1132 } 1133 1134 /* Do special setup for Tigon 2. */ 1135 if (sc->ti_hwrev == TI_HWREV_TIGON_II) { 1136 TI_SETBIT(sc, TI_CPU_CTL_B, TI_CPUSTATE_HALT); 1137 TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_SRAM_BANK_512K); 1138 TI_SETBIT(sc, TI_MISC_CONF, TI_MCR_SRAM_SYNCHRONOUS); 1139 } 1140 1141 /* Set up the PCI state register. */ 1142 CSR_WRITE_4(sc, TI_PCI_STATE, TI_PCI_READ_CMD|TI_PCI_WRITE_CMD); 1143 if (sc->ti_hwrev == TI_HWREV_TIGON_II) { 1144 TI_SETBIT(sc, TI_PCI_STATE, TI_PCISTATE_USE_MEM_RD_MULT); 1145 } 1146 1147 /* Clear the read/write max DMA parameters. */ 1148 TI_CLRBIT(sc, TI_PCI_STATE, (TI_PCISTATE_WRITE_MAXDMA| 1149 TI_PCISTATE_READ_MAXDMA)); 1150 1151 /* Get cache line size. */ 1152 cacheline = CSR_READ_4(sc, TI_PCI_BIST) & 0xFF; 1153 1154 /* 1155 * If the system has set enabled the PCI memory write 1156 * and invalidate command in the command register, set 1157 * the write max parameter accordingly. This is necessary 1158 * to use MWI with the Tigon 2. 1159 */ 1160 if (CSR_READ_4(sc, TI_PCI_CMDSTAT) & PCIM_CMD_MWIEN) { 1161 switch(cacheline) { 1162 case 1: 1163 case 4: 1164 case 8: 1165 case 16: 1166 case 32: 1167 case 64: 1168 break; 1169 default: 1170 /* Disable PCI memory write and invalidate. */ 1171 if (bootverbose) { 1172 if_printf(ifp, "cache line size %d not " 1173 "supported; disabling PCI MWI\n", 1174 cacheline); 1175 } 1176 CSR_WRITE_4(sc, TI_PCI_CMDSTAT, CSR_READ_4(sc, 1177 TI_PCI_CMDSTAT) & ~PCIM_CMD_MWIEN); 1178 break; 1179 } 1180 } 1181 1182 TI_SETBIT(sc, TI_PCI_STATE, pci_writemax); 1183 1184 /* This sets the min dma param all the way up (0xff). */ 1185 TI_SETBIT(sc, TI_PCI_STATE, TI_PCISTATE_MINDMA); 1186 1187 /* Configure DMA variables. */ 1188 #if BYTE_ORDER == BIG_ENDIAN 1189 CSR_WRITE_4(sc, TI_GCR_OPMODE, TI_OPMODE_BYTESWAP_BD | 1190 TI_OPMODE_BYTESWAP_DATA | TI_OPMODE_WORDSWAP_BD | 1191 TI_OPMODE_WARN_ENB | TI_OPMODE_FATAL_ENB | 1192 TI_OPMODE_DONT_FRAG_JUMBO); 1193 #else 1194 CSR_WRITE_4(sc, TI_GCR_OPMODE, TI_OPMODE_BYTESWAP_DATA| 1195 TI_OPMODE_WORDSWAP_BD|TI_OPMODE_DONT_FRAG_JUMBO| 1196 TI_OPMODE_WARN_ENB|TI_OPMODE_FATAL_ENB); 1197 #endif 1198 1199 /* 1200 * Only allow 1 DMA channel to be active at a time. 1201 * I don't think this is a good idea, but without it 1202 * the firmware racks up lots of nicDmaReadRingFull 1203 * errors. This is not compatible with hardware checksums. 1204 */ 1205 if (ifp->if_hwassist == 0) 1206 TI_SETBIT(sc, TI_GCR_OPMODE, TI_OPMODE_1_DMA_ACTIVE); 1207 1208 /* Recommended settings from Tigon manual. */ 1209 CSR_WRITE_4(sc, TI_GCR_DMA_WRITECFG, TI_DMA_STATE_THRESH_8W); 1210 CSR_WRITE_4(sc, TI_GCR_DMA_READCFG, TI_DMA_STATE_THRESH_8W); 1211 1212 if (ti_64bitslot_war(sc)) { 1213 if_printf(ifp, "bios thinks we're in a 64 bit slot, " 1214 "but we aren't"); 1215 return(EINVAL); 1216 } 1217 1218 return(0); 1219 } 1220 1221 /* 1222 * Initialize the general information block and firmware, and 1223 * start the CPU(s) running. 1224 */ 1225 static int 1226 ti_gibinit(struct ti_softc *sc) 1227 { 1228 struct ifnet *ifp = &sc->arpcom.ac_if; 1229 struct ti_rcb *rcb; 1230 int i; 1231 1232 /* Disable interrupts for now. */ 1233 CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1); 1234 1235 /* Tell the chip where to find the general information block. */ 1236 CSR_WRITE_4(sc, TI_GCR_GENINFO_HI, 0); 1237 CSR_WRITE_4(sc, TI_GCR_GENINFO_LO, vtophys(&sc->ti_rdata->ti_info)); 1238 1239 /* Load the firmware into SRAM. */ 1240 ti_loadfw(sc); 1241 1242 /* Set up the contents of the general info and ring control blocks. */ 1243 1244 /* Set up the event ring and producer pointer. */ 1245 rcb = &sc->ti_rdata->ti_info.ti_ev_rcb; 1246 1247 TI_HOSTADDR(rcb->ti_hostaddr) = vtophys(&sc->ti_rdata->ti_event_ring); 1248 rcb->ti_flags = 0; 1249 TI_HOSTADDR(sc->ti_rdata->ti_info.ti_ev_prodidx_ptr) = 1250 vtophys(&sc->ti_ev_prodidx); 1251 sc->ti_ev_prodidx.ti_idx = 0; 1252 CSR_WRITE_4(sc, TI_GCR_EVENTCONS_IDX, 0); 1253 sc->ti_ev_saved_considx = 0; 1254 1255 /* Set up the command ring and producer mailbox. */ 1256 rcb = &sc->ti_rdata->ti_info.ti_cmd_rcb; 1257 1258 sc->ti_rdata->ti_cmd_ring = 1259 (struct ti_cmd_desc *)(sc->ti_vhandle + TI_GCR_CMDRING); 1260 TI_HOSTADDR(rcb->ti_hostaddr) = TI_GCR_NIC_ADDR(TI_GCR_CMDRING); 1261 rcb->ti_flags = 0; 1262 rcb->ti_max_len = 0; 1263 for (i = 0; i < TI_CMD_RING_CNT; i++) 1264 CSR_WRITE_4(sc, TI_GCR_CMDRING + (i * 4), 0); 1265 CSR_WRITE_4(sc, TI_GCR_CMDCONS_IDX, 0); 1266 CSR_WRITE_4(sc, TI_MB_CMDPROD_IDX, 0); 1267 sc->ti_cmd_saved_prodidx = 0; 1268 1269 /* 1270 * Assign the address of the stats refresh buffer. 1271 * We re-use the current stats buffer for this to 1272 * conserve memory. 1273 */ 1274 TI_HOSTADDR(sc->ti_rdata->ti_info.ti_refresh_stats_ptr) = 1275 vtophys(&sc->ti_rdata->ti_info.ti_stats); 1276 1277 /* Set up the standard receive ring. */ 1278 rcb = &sc->ti_rdata->ti_info.ti_std_rx_rcb; 1279 TI_HOSTADDR(rcb->ti_hostaddr) = vtophys(&sc->ti_rdata->ti_rx_std_ring); 1280 rcb->ti_max_len = TI_FRAMELEN; 1281 rcb->ti_flags = 0; 1282 if (ifp->if_hwassist) 1283 rcb->ti_flags |= TI_RCB_FLAG_TCP_UDP_CKSUM | 1284 TI_RCB_FLAG_IP_CKSUM | TI_RCB_FLAG_NO_PHDR_CKSUM; 1285 rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST; 1286 1287 /* Set up the jumbo receive ring. */ 1288 rcb = &sc->ti_rdata->ti_info.ti_jumbo_rx_rcb; 1289 TI_HOSTADDR(rcb->ti_hostaddr) = 1290 vtophys(&sc->ti_rdata->ti_rx_jumbo_ring); 1291 rcb->ti_max_len = TI_JUMBO_FRAMELEN; 1292 rcb->ti_flags = 0; 1293 if (ifp->if_hwassist) 1294 rcb->ti_flags |= TI_RCB_FLAG_TCP_UDP_CKSUM | 1295 TI_RCB_FLAG_IP_CKSUM | TI_RCB_FLAG_NO_PHDR_CKSUM; 1296 rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST; 1297 1298 /* 1299 * Set up the mini ring. Only activated on the 1300 * Tigon 2 but the slot in the config block is 1301 * still there on the Tigon 1. 1302 */ 1303 rcb = &sc->ti_rdata->ti_info.ti_mini_rx_rcb; 1304 TI_HOSTADDR(rcb->ti_hostaddr) = 1305 vtophys(&sc->ti_rdata->ti_rx_mini_ring); 1306 rcb->ti_max_len = MHLEN - ETHER_ALIGN; 1307 if (sc->ti_hwrev == TI_HWREV_TIGON) 1308 rcb->ti_flags = TI_RCB_FLAG_RING_DISABLED; 1309 else 1310 rcb->ti_flags = 0; 1311 if (ifp->if_hwassist) 1312 rcb->ti_flags |= TI_RCB_FLAG_TCP_UDP_CKSUM | 1313 TI_RCB_FLAG_IP_CKSUM | TI_RCB_FLAG_NO_PHDR_CKSUM; 1314 rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST; 1315 1316 /* 1317 * Set up the receive return ring. 1318 */ 1319 rcb = &sc->ti_rdata->ti_info.ti_return_rcb; 1320 TI_HOSTADDR(rcb->ti_hostaddr) = 1321 vtophys(&sc->ti_rdata->ti_rx_return_ring); 1322 rcb->ti_flags = 0; 1323 rcb->ti_max_len = TI_RETURN_RING_CNT; 1324 TI_HOSTADDR(sc->ti_rdata->ti_info.ti_return_prodidx_ptr) = 1325 vtophys(&sc->ti_return_prodidx); 1326 1327 /* 1328 * Set up the tx ring. Note: for the Tigon 2, we have the option 1329 * of putting the transmit ring in the host's address space and 1330 * letting the chip DMA it instead of leaving the ring in the NIC's 1331 * memory and accessing it through the shared memory region. We 1332 * do this for the Tigon 2, but it doesn't work on the Tigon 1, 1333 * so we have to revert to the shared memory scheme if we detect 1334 * a Tigon 1 chip. 1335 */ 1336 CSR_WRITE_4(sc, TI_WINBASE, TI_TX_RING_BASE); 1337 if (sc->ti_hwrev == TI_HWREV_TIGON) { 1338 sc->ti_rdata->ti_tx_ring_nic = 1339 (struct ti_tx_desc *)(sc->ti_vhandle + TI_WINDOW); 1340 } 1341 bzero(sc->ti_rdata->ti_tx_ring, 1342 TI_TX_RING_CNT * sizeof(struct ti_tx_desc)); 1343 rcb = &sc->ti_rdata->ti_info.ti_tx_rcb; 1344 if (sc->ti_hwrev == TI_HWREV_TIGON) 1345 rcb->ti_flags = 0; 1346 else 1347 rcb->ti_flags = TI_RCB_FLAG_HOST_RING; 1348 rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST; 1349 if (ifp->if_hwassist) 1350 rcb->ti_flags |= TI_RCB_FLAG_TCP_UDP_CKSUM | 1351 TI_RCB_FLAG_IP_CKSUM | TI_RCB_FLAG_NO_PHDR_CKSUM; 1352 rcb->ti_max_len = TI_TX_RING_CNT; 1353 if (sc->ti_hwrev == TI_HWREV_TIGON) 1354 TI_HOSTADDR(rcb->ti_hostaddr) = TI_TX_RING_BASE; 1355 else 1356 TI_HOSTADDR(rcb->ti_hostaddr) = 1357 vtophys(&sc->ti_rdata->ti_tx_ring); 1358 TI_HOSTADDR(sc->ti_rdata->ti_info.ti_tx_considx_ptr) = 1359 vtophys(&sc->ti_tx_considx); 1360 1361 /* Set up tuneables */ 1362 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) 1363 CSR_WRITE_4(sc, TI_GCR_RX_COAL_TICKS, 1364 (sc->ti_rx_coal_ticks / 10)); 1365 else 1366 CSR_WRITE_4(sc, TI_GCR_RX_COAL_TICKS, sc->ti_rx_coal_ticks); 1367 CSR_WRITE_4(sc, TI_GCR_TX_COAL_TICKS, sc->ti_tx_coal_ticks); 1368 CSR_WRITE_4(sc, TI_GCR_STAT_TICKS, sc->ti_stat_ticks); 1369 CSR_WRITE_4(sc, TI_GCR_RX_MAX_COAL_BD, sc->ti_rx_max_coal_bds); 1370 CSR_WRITE_4(sc, TI_GCR_TX_MAX_COAL_BD, sc->ti_tx_max_coal_bds); 1371 CSR_WRITE_4(sc, TI_GCR_TX_BUFFER_RATIO, sc->ti_tx_buf_ratio); 1372 1373 /* Turn interrupts on. */ 1374 CSR_WRITE_4(sc, TI_GCR_MASK_INTRS, 0); 1375 CSR_WRITE_4(sc, TI_MB_HOSTINTR, 0); 1376 1377 /* Start CPU. */ 1378 TI_CLRBIT(sc, TI_CPU_STATE, (TI_CPUSTATE_HALT|TI_CPUSTATE_STEP)); 1379 1380 return(0); 1381 } 1382 1383 /* 1384 * Probe for a Tigon chip. Check the PCI vendor and device IDs 1385 * against our list and return its name if we find a match. 1386 */ 1387 static int 1388 ti_probe(device_t dev) 1389 { 1390 struct ti_type *t; 1391 uint16_t vendor, product; 1392 1393 vendor = pci_get_vendor(dev); 1394 product = pci_get_device(dev); 1395 1396 for (t = ti_devs; t->ti_name != NULL; t++) { 1397 if (vendor == t->ti_vid && product == t->ti_did) { 1398 device_set_desc(dev, t->ti_name); 1399 return(0); 1400 } 1401 } 1402 1403 return(ENXIO); 1404 } 1405 1406 static int 1407 ti_attach(device_t dev) 1408 { 1409 struct ti_softc *sc; 1410 struct ifnet *ifp; 1411 int error = 0, rid; 1412 uint8_t eaddr[ETHER_ADDR_LEN]; 1413 1414 sc = device_get_softc(dev); 1415 ifp = &sc->arpcom.ac_if; 1416 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1417 ifp->if_capabilities = IFCAP_HWCSUM | 1418 IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU; 1419 ifp->if_capenable = ifp->if_capabilities; 1420 1421 pci_enable_busmaster(dev); 1422 1423 /* 1424 * Initialize media before any possible error may occur, 1425 * so we can destroy it unconditionally, if an error occurs later on. 1426 */ 1427 ifmedia_init(&sc->ifmedia, IFM_IMASK, ti_ifmedia_upd, ti_ifmedia_sts); 1428 1429 rid = TI_PCI_LOMEM; 1430 sc->ti_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 1431 RF_ACTIVE); 1432 1433 if (sc->ti_res == NULL) { 1434 device_printf(dev, "couldn't map memory\n"); 1435 error = ENXIO; 1436 goto fail; 1437 } 1438 1439 sc->ti_btag = rman_get_bustag(sc->ti_res); 1440 sc->ti_bhandle = rman_get_bushandle(sc->ti_res); 1441 sc->ti_vhandle = (vm_offset_t)rman_get_virtual(sc->ti_res); 1442 1443 /* Allocate interrupt */ 1444 rid = 0; 1445 sc->ti_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 1446 RF_SHAREABLE | RF_ACTIVE); 1447 if (sc->ti_irq == NULL) { 1448 device_printf(dev, "couldn't map interrupt\n"); 1449 error = ENXIO; 1450 goto fail; 1451 } 1452 1453 if (ti_chipinit(sc)) { 1454 device_printf(dev, "chip initialization failed\n"); 1455 error = ENXIO; 1456 goto fail; 1457 } 1458 1459 /* Zero out the NIC's on-board SRAM. */ 1460 ti_mem(sc, 0x2000, 0x100000 - 0x2000, NULL); 1461 1462 /* Init again -- zeroing memory may have clobbered some registers. */ 1463 if (ti_chipinit(sc)) { 1464 device_printf(dev, "chip initialization failed\n"); 1465 error = ENXIO; 1466 goto fail; 1467 } 1468 1469 /* 1470 * Get station address from the EEPROM. Note: the manual states 1471 * that the MAC address is at offset 0x8c, however the data is 1472 * stored as two longwords (since that's how it's loaded into 1473 * the NIC). This means the MAC address is actually preceeded 1474 * by two zero bytes. We need to skip over those. 1475 */ 1476 if (ti_read_eeprom(sc, eaddr, TI_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) { 1477 device_printf(dev, "failed to read station address\n"); 1478 error = ENXIO; 1479 goto fail; 1480 } 1481 1482 /* Allocate the general information block and ring buffers. */ 1483 sc->ti_rdata = contigmalloc(sizeof(struct ti_ring_data), M_DEVBUF, 1484 M_WAITOK | M_ZERO, 0, 0xffffffff, PAGE_SIZE, 0); 1485 1486 if (sc->ti_rdata == NULL) { 1487 device_printf(dev, "no memory for list buffers!\n"); 1488 error = ENXIO; 1489 goto fail; 1490 } 1491 1492 /* Try to allocate memory for jumbo buffers. */ 1493 if (ti_alloc_jumbo_mem(sc)) { 1494 device_printf(dev, "jumbo buffer allocation failed\n"); 1495 error = ENXIO; 1496 goto fail; 1497 } 1498 1499 /* 1500 * We really need a better way to tell a 1000baseT card 1501 * from a 1000baseSX one, since in theory there could be 1502 * OEMed 1000baseT cards from lame vendors who aren't 1503 * clever enough to change the PCI ID. For the moment 1504 * though, the AceNIC is the only copper card available. 1505 */ 1506 if (pci_get_vendor(dev) == ALT_VENDORID && 1507 pci_get_device(dev) == ALT_DEVICEID_ACENIC_COPPER) 1508 sc->ti_copper = 1; 1509 /* Ok, it's not the only copper card available. */ 1510 if (pci_get_vendor(dev) == NG_VENDORID && 1511 pci_get_device(dev) == NG_DEVICEID_GA620T) 1512 sc->ti_copper = 1; 1513 1514 /* Set default tuneable values. */ 1515 sc->ti_stat_ticks = 2 * TI_TICKS_PER_SEC; 1516 sc->ti_rx_coal_ticks = TI_TICKS_PER_SEC / 5000; 1517 sc->ti_tx_coal_ticks = TI_TICKS_PER_SEC / 500; 1518 sc->ti_rx_max_coal_bds = 64; 1519 sc->ti_tx_max_coal_bds = 128; 1520 sc->ti_tx_buf_ratio = 21; 1521 1522 /* Set up ifnet structure */ 1523 ifp->if_softc = sc; 1524 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1525 ifp->if_ioctl = ti_ioctl; 1526 ifp->if_start = ti_start; 1527 ifp->if_watchdog = ti_watchdog; 1528 ifp->if_init = ti_init; 1529 ifp->if_mtu = ETHERMTU; 1530 ifq_set_maxlen(&ifp->if_snd, TI_TX_RING_CNT - 1); 1531 ifq_set_ready(&ifp->if_snd); 1532 1533 /* Set up ifmedia support. */ 1534 if (sc->ti_copper) { 1535 /* 1536 * Copper cards allow manual 10/100 mode selection, 1537 * but not manual 1000baseT mode selection. Why? 1538 * Becuase currently there's no way to specify the 1539 * master/slave setting through the firmware interface, 1540 * so Alteon decided to just bag it and handle it 1541 * via autonegotiation. 1542 */ 1543 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL); 1544 ifmedia_add(&sc->ifmedia, 1545 IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL); 1546 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_TX, 0, NULL); 1547 ifmedia_add(&sc->ifmedia, 1548 IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL); 1549 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_1000_T, 0, NULL); 1550 ifmedia_add(&sc->ifmedia, 1551 IFM_ETHER|IFM_1000_T | IFM_FDX, 0, NULL); 1552 } else { 1553 /* Fiber cards don't support 10/100 modes. */ 1554 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL); 1555 ifmedia_add(&sc->ifmedia, 1556 IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL); 1557 } 1558 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); 1559 ifmedia_set(&sc->ifmedia, IFM_ETHER|IFM_AUTO); 1560 1561 /* 1562 * Call MI attach routine. 1563 */ 1564 ether_ifattach(ifp, eaddr, NULL); 1565 1566 error = bus_setup_intr(dev, sc->ti_irq, INTR_MPSAFE, 1567 ti_intr, sc, &sc->ti_intrhand, 1568 ifp->if_serializer); 1569 if (error) { 1570 device_printf(dev, "couldn't set up irq\n"); 1571 ether_ifdetach(ifp); 1572 goto fail; 1573 } 1574 1575 ifp->if_cpuid = rman_get_cpuid(sc->ti_irq); 1576 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus); 1577 1578 return 0; 1579 fail: 1580 ti_detach(dev); 1581 return(error); 1582 } 1583 1584 static int 1585 ti_detach(device_t dev) 1586 { 1587 struct ti_softc *sc = device_get_softc(dev); 1588 struct ifnet *ifp = &sc->arpcom.ac_if; 1589 1590 if (device_is_attached(dev)) { 1591 lwkt_serialize_enter(ifp->if_serializer); 1592 ti_stop(sc); 1593 bus_teardown_intr(dev, sc->ti_irq, sc->ti_intrhand); 1594 lwkt_serialize_exit(ifp->if_serializer); 1595 1596 ether_ifdetach(ifp); 1597 } 1598 1599 if (sc->ti_irq != NULL) 1600 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->ti_irq); 1601 if (sc->ti_res != NULL) { 1602 bus_release_resource(dev, SYS_RES_MEMORY, 1603 TI_PCI_LOMEM, sc->ti_res); 1604 } 1605 if (sc->ti_cdata.ti_jumbo_buf != NULL) 1606 contigfree(sc->ti_cdata.ti_jumbo_buf, TI_JMEM, M_DEVBUF); 1607 if (sc->ti_rdata != NULL) 1608 contigfree(sc->ti_rdata, sizeof(struct ti_ring_data), M_DEVBUF); 1609 ifmedia_removeall(&sc->ifmedia); 1610 1611 1612 return(0); 1613 } 1614 1615 /* 1616 * Frame reception handling. This is called if there's a frame 1617 * on the receive return list. 1618 * 1619 * Note: we have to be able to handle three possibilities here: 1620 * 1) the frame is from the mini receive ring (can only happen) 1621 * on Tigon 2 boards) 1622 * 2) the frame is from the jumbo recieve ring 1623 * 3) the frame is from the standard receive ring 1624 */ 1625 static void 1626 ti_rxeof(struct ti_softc *sc) 1627 { 1628 struct ifnet *ifp = &sc->arpcom.ac_if; 1629 struct ti_cmd_desc cmd; 1630 1631 while(sc->ti_rx_saved_considx != sc->ti_return_prodidx.ti_idx) { 1632 struct ti_rx_desc *cur_rx; 1633 uint32_t rxidx; 1634 struct mbuf *m; 1635 uint16_t vlan_tag = 0; 1636 int have_tag = 0; 1637 1638 cur_rx = 1639 &sc->ti_rdata->ti_rx_return_ring[sc->ti_rx_saved_considx]; 1640 rxidx = cur_rx->ti_idx; 1641 TI_INC(sc->ti_rx_saved_considx, TI_RETURN_RING_CNT); 1642 1643 if (cur_rx->ti_flags & TI_BDFLAG_VLAN_TAG) { 1644 have_tag = 1; 1645 vlan_tag = cur_rx->ti_vlan_tag & 0xfff; 1646 } 1647 1648 if (cur_rx->ti_flags & TI_BDFLAG_JUMBO_RING) { 1649 TI_INC(sc->ti_jumbo, TI_JUMBO_RX_RING_CNT); 1650 m = sc->ti_cdata.ti_rx_jumbo_chain[rxidx]; 1651 sc->ti_cdata.ti_rx_jumbo_chain[rxidx] = NULL; 1652 if (cur_rx->ti_flags & TI_BDFLAG_ERROR) { 1653 ifp->if_ierrors++; 1654 ti_newbuf_jumbo(sc, sc->ti_jumbo, m); 1655 continue; 1656 } 1657 if (ti_newbuf_jumbo(sc, sc->ti_jumbo, NULL) == ENOBUFS) { 1658 ifp->if_ierrors++; 1659 ti_newbuf_jumbo(sc, sc->ti_jumbo, m); 1660 continue; 1661 } 1662 } else if (cur_rx->ti_flags & TI_BDFLAG_MINI_RING) { 1663 TI_INC(sc->ti_mini, TI_MINI_RX_RING_CNT); 1664 m = sc->ti_cdata.ti_rx_mini_chain[rxidx]; 1665 sc->ti_cdata.ti_rx_mini_chain[rxidx] = NULL; 1666 if (cur_rx->ti_flags & TI_BDFLAG_ERROR) { 1667 ifp->if_ierrors++; 1668 ti_newbuf_mini(sc, sc->ti_mini, m); 1669 continue; 1670 } 1671 if (ti_newbuf_mini(sc, sc->ti_mini, NULL) == ENOBUFS) { 1672 ifp->if_ierrors++; 1673 ti_newbuf_mini(sc, sc->ti_mini, m); 1674 continue; 1675 } 1676 } else { 1677 TI_INC(sc->ti_std, TI_STD_RX_RING_CNT); 1678 m = sc->ti_cdata.ti_rx_std_chain[rxidx]; 1679 sc->ti_cdata.ti_rx_std_chain[rxidx] = NULL; 1680 if (cur_rx->ti_flags & TI_BDFLAG_ERROR) { 1681 ifp->if_ierrors++; 1682 ti_newbuf_std(sc, sc->ti_std, m); 1683 continue; 1684 } 1685 if (ti_newbuf_std(sc, sc->ti_std, NULL) == ENOBUFS) { 1686 ifp->if_ierrors++; 1687 ti_newbuf_std(sc, sc->ti_std, m); 1688 continue; 1689 } 1690 } 1691 1692 m->m_pkthdr.len = m->m_len = cur_rx->ti_len; 1693 ifp->if_ipackets++; 1694 m->m_pkthdr.rcvif = ifp; 1695 1696 if (ifp->if_hwassist) { 1697 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | 1698 CSUM_DATA_VALID; 1699 if ((cur_rx->ti_ip_cksum ^ 0xffff) == 0) 1700 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 1701 m->m_pkthdr.csum_data = cur_rx->ti_tcp_udp_cksum; 1702 } 1703 1704 if (have_tag) { 1705 m->m_flags |= M_VLANTAG; 1706 m->m_pkthdr.ether_vlantag = vlan_tag; 1707 } 1708 ifp->if_input(ifp, m); 1709 } 1710 1711 /* Only necessary on the Tigon 1. */ 1712 if (sc->ti_hwrev == TI_HWREV_TIGON) 1713 CSR_WRITE_4(sc, TI_GCR_RXRETURNCONS_IDX, 1714 sc->ti_rx_saved_considx); 1715 1716 TI_UPDATE_STDPROD(sc, sc->ti_std); 1717 TI_UPDATE_MINIPROD(sc, sc->ti_mini); 1718 TI_UPDATE_JUMBOPROD(sc, sc->ti_jumbo); 1719 } 1720 1721 static void 1722 ti_txeof(struct ti_softc *sc) 1723 { 1724 struct ifnet *ifp = &sc->arpcom.ac_if; 1725 struct ti_tx_desc *cur_tx = NULL; 1726 1727 /* 1728 * Go through our tx ring and free mbufs for those 1729 * frames that have been sent. 1730 */ 1731 while (sc->ti_tx_saved_considx != sc->ti_tx_considx.ti_idx) { 1732 uint32_t idx = 0; 1733 1734 idx = sc->ti_tx_saved_considx; 1735 if (sc->ti_hwrev == TI_HWREV_TIGON) { 1736 if (idx > 383) 1737 CSR_WRITE_4(sc, TI_WINBASE, 1738 TI_TX_RING_BASE + 6144); 1739 else if (idx > 255) 1740 CSR_WRITE_4(sc, TI_WINBASE, 1741 TI_TX_RING_BASE + 4096); 1742 else if (idx > 127) 1743 CSR_WRITE_4(sc, TI_WINBASE, 1744 TI_TX_RING_BASE + 2048); 1745 else 1746 CSR_WRITE_4(sc, TI_WINBASE, 1747 TI_TX_RING_BASE); 1748 cur_tx = &sc->ti_rdata->ti_tx_ring_nic[idx % 128]; 1749 } else 1750 cur_tx = &sc->ti_rdata->ti_tx_ring[idx]; 1751 if (cur_tx->ti_flags & TI_BDFLAG_END) 1752 ifp->if_opackets++; 1753 if (sc->ti_cdata.ti_tx_chain[idx] != NULL) { 1754 m_freem(sc->ti_cdata.ti_tx_chain[idx]); 1755 sc->ti_cdata.ti_tx_chain[idx] = NULL; 1756 } 1757 sc->ti_txcnt--; 1758 TI_INC(sc->ti_tx_saved_considx, TI_TX_RING_CNT); 1759 ifp->if_timer = 0; 1760 } 1761 1762 if (cur_tx != NULL) 1763 ifp->if_flags &= ~IFF_OACTIVE; 1764 } 1765 1766 static void 1767 ti_intr(void *xsc) 1768 { 1769 struct ti_softc *sc = xsc; 1770 struct ifnet *ifp = &sc->arpcom.ac_if; 1771 1772 #ifdef notdef 1773 /* Avoid this for now -- checking this register is expensive. */ 1774 /* Make sure this is really our interrupt. */ 1775 if ((CSR_READ_4(sc, TI_MISC_HOST_CTL) & TI_MHC_INTSTATE) == 0) 1776 return; 1777 #endif 1778 1779 /* Ack interrupt and stop others from occuring. */ 1780 CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1); 1781 1782 if (ifp->if_flags & IFF_RUNNING) { 1783 /* Check RX return ring producer/consumer */ 1784 ti_rxeof(sc); 1785 1786 /* Check TX ring producer/consumer */ 1787 ti_txeof(sc); 1788 } 1789 1790 ti_handle_events(sc); 1791 1792 /* Re-enable interrupts. */ 1793 CSR_WRITE_4(sc, TI_MB_HOSTINTR, 0); 1794 1795 if ((ifp->if_flags & IFF_RUNNING) && !ifq_is_empty(&ifp->if_snd)) 1796 if_devstart(ifp); 1797 } 1798 1799 static void 1800 ti_stats_update(struct ti_softc *sc) 1801 { 1802 struct ifnet *ifp = &sc->arpcom.ac_if; 1803 1804 ifp->if_collisions += 1805 (sc->ti_rdata->ti_info.ti_stats.dot3StatsSingleCollisionFrames + 1806 sc->ti_rdata->ti_info.ti_stats.dot3StatsMultipleCollisionFrames + 1807 sc->ti_rdata->ti_info.ti_stats.dot3StatsExcessiveCollisions + 1808 sc->ti_rdata->ti_info.ti_stats.dot3StatsLateCollisions) - 1809 ifp->if_collisions; 1810 } 1811 1812 /* 1813 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data 1814 * pointers to descriptors. 1815 */ 1816 static int 1817 ti_encap(struct ti_softc *sc, struct mbuf *m_head, uint32_t *txidx) 1818 { 1819 struct ti_tx_desc *f = NULL; 1820 struct mbuf *m; 1821 uint32_t cnt = 0, cur, frag; 1822 uint16_t csum_flags = 0, vlan_tag = 0, vlan_flag = 0; 1823 1824 if (m_head->m_flags & M_VLANTAG) { 1825 vlan_tag = m_head->m_pkthdr.ether_vlantag; 1826 vlan_flag = TI_BDFLAG_VLAN_TAG; 1827 } 1828 1829 m = m_head; 1830 cur = frag = *txidx; 1831 1832 if (m_head->m_pkthdr.csum_flags) { 1833 if (m_head->m_pkthdr.csum_flags & CSUM_IP) 1834 csum_flags |= TI_BDFLAG_IP_CKSUM; 1835 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) 1836 csum_flags |= TI_BDFLAG_TCP_UDP_CKSUM; 1837 if (m_head->m_flags & M_LASTFRAG) 1838 csum_flags |= TI_BDFLAG_IP_FRAG_END; 1839 else if (m_head->m_flags & M_FRAG) 1840 csum_flags |= TI_BDFLAG_IP_FRAG; 1841 } 1842 /* 1843 * Start packing the mbufs in this chain into 1844 * the fragment pointers. Stop when we run out 1845 * of fragments or hit the end of the mbuf chain. 1846 */ 1847 for (m = m_head; m != NULL; m = m->m_next) { 1848 if (m->m_len != 0) { 1849 if (sc->ti_hwrev == TI_HWREV_TIGON) { 1850 if (frag > 383) 1851 CSR_WRITE_4(sc, TI_WINBASE, 1852 TI_TX_RING_BASE + 6144); 1853 else if (frag > 255) 1854 CSR_WRITE_4(sc, TI_WINBASE, 1855 TI_TX_RING_BASE + 4096); 1856 else if (frag > 127) 1857 CSR_WRITE_4(sc, TI_WINBASE, 1858 TI_TX_RING_BASE + 2048); 1859 else 1860 CSR_WRITE_4(sc, TI_WINBASE, 1861 TI_TX_RING_BASE); 1862 f = &sc->ti_rdata->ti_tx_ring_nic[frag % 128]; 1863 } else 1864 f = &sc->ti_rdata->ti_tx_ring[frag]; 1865 if (sc->ti_cdata.ti_tx_chain[frag] != NULL) 1866 break; 1867 TI_HOSTADDR(f->ti_addr) = vtophys(mtod(m, vm_offset_t)); 1868 f->ti_len = m->m_len; 1869 f->ti_flags = csum_flags | vlan_flag; 1870 f->ti_vlan_tag = vlan_tag & 0xfff; 1871 1872 /* 1873 * Sanity check: avoid coming within 16 descriptors 1874 * of the end of the ring. 1875 */ 1876 if ((TI_TX_RING_CNT - (sc->ti_txcnt + cnt)) < 16) 1877 return(ENOBUFS); 1878 cur = frag; 1879 TI_INC(frag, TI_TX_RING_CNT); 1880 cnt++; 1881 } 1882 } 1883 1884 if (m != NULL) 1885 return(ENOBUFS); 1886 1887 if (frag == sc->ti_tx_saved_considx) 1888 return(ENOBUFS); 1889 1890 if (sc->ti_hwrev == TI_HWREV_TIGON) 1891 sc->ti_rdata->ti_tx_ring_nic[cur % 128].ti_flags |= 1892 TI_BDFLAG_END; 1893 else 1894 sc->ti_rdata->ti_tx_ring[cur].ti_flags |= TI_BDFLAG_END; 1895 sc->ti_cdata.ti_tx_chain[cur] = m_head; 1896 sc->ti_txcnt += cnt; 1897 1898 *txidx = frag; 1899 1900 return(0); 1901 } 1902 1903 /* 1904 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 1905 * to the mbuf data regions directly in the transmit descriptors. 1906 */ 1907 static void 1908 ti_start(struct ifnet *ifp) 1909 { 1910 struct ti_softc *sc = ifp->if_softc; 1911 struct mbuf *m_head = NULL; 1912 uint32_t prodidx = 0; 1913 int need_trans; 1914 1915 prodidx = CSR_READ_4(sc, TI_MB_SENDPROD_IDX); 1916 1917 need_trans = 0; 1918 while(sc->ti_cdata.ti_tx_chain[prodidx] == NULL) { 1919 m_head = ifq_dequeue(&ifp->if_snd, NULL); 1920 if (m_head == NULL) 1921 break; 1922 1923 /* 1924 * XXX 1925 * safety overkill. If this is a fragmented packet chain 1926 * with delayed TCP/UDP checksums, then only encapsulate 1927 * it if we have enough descriptors to handle the entire 1928 * chain at once. 1929 * (paranoia -- may not actually be needed) 1930 */ 1931 if (m_head->m_flags & M_FIRSTFRAG && 1932 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) { 1933 if ((TI_TX_RING_CNT - sc->ti_txcnt) < 1934 m_head->m_pkthdr.csum_data + 16) { 1935 ifp->if_flags |= IFF_OACTIVE; 1936 ifq_prepend(&ifp->if_snd, m_head); 1937 break; 1938 } 1939 } 1940 1941 /* 1942 * Pack the data into the transmit ring. If we 1943 * don't have room, set the OACTIVE flag and wait 1944 * for the NIC to drain the ring. 1945 */ 1946 if (ti_encap(sc, m_head, &prodidx)) { 1947 ifp->if_flags |= IFF_OACTIVE; 1948 ifq_prepend(&ifp->if_snd, m_head); 1949 break; 1950 } 1951 need_trans = 1; 1952 1953 ETHER_BPF_MTAP(ifp, m_head); 1954 } 1955 1956 if (!need_trans) 1957 return; 1958 1959 /* Transmit */ 1960 CSR_WRITE_4(sc, TI_MB_SENDPROD_IDX, prodidx); 1961 1962 /* 1963 * Set a timeout in case the chip goes out to lunch. 1964 */ 1965 ifp->if_timer = 5; 1966 } 1967 1968 static void 1969 ti_init(void *xsc) 1970 { 1971 struct ti_softc *sc = xsc; 1972 1973 /* Cancel pending I/O and flush buffers. */ 1974 ti_stop(sc); 1975 1976 /* Init the gen info block, ring control blocks and firmware. */ 1977 if (ti_gibinit(sc)) { 1978 if_printf(&sc->arpcom.ac_if, "initialization failure\n"); 1979 return; 1980 } 1981 } 1982 1983 static void 1984 ti_init2(struct ti_softc *sc) 1985 { 1986 struct ifnet *ifp = &sc->arpcom.ac_if; 1987 struct ti_cmd_desc cmd; 1988 uint16_t *m; 1989 struct ifmedia *ifm; 1990 int tmp; 1991 1992 /* Specify MTU and interface index. */ 1993 CSR_WRITE_4(sc, TI_GCR_IFINDEX, ifp->if_dunit); 1994 CSR_WRITE_4(sc, TI_GCR_IFMTU, ifp->if_mtu + 1995 ETHER_HDR_LEN + ETHER_CRC_LEN); 1996 TI_DO_CMD(TI_CMD_UPDATE_GENCOM, 0, 0); 1997 1998 /* Load our MAC address. */ 1999 m = (uint16_t *)&sc->arpcom.ac_enaddr[0]; 2000 CSR_WRITE_4(sc, TI_GCR_PAR0, htons(m[0])); 2001 CSR_WRITE_4(sc, TI_GCR_PAR1, (htons(m[1]) << 16) | htons(m[2])); 2002 TI_DO_CMD(TI_CMD_SET_MAC_ADDR, 0, 0); 2003 2004 /* Enable or disable promiscuous mode as needed. */ 2005 if (ifp->if_flags & IFF_PROMISC) 2006 TI_DO_CMD(TI_CMD_SET_PROMISC_MODE, TI_CMD_CODE_PROMISC_ENB, 0); 2007 else 2008 TI_DO_CMD(TI_CMD_SET_PROMISC_MODE, TI_CMD_CODE_PROMISC_DIS, 0); 2009 2010 /* Program multicast filter. */ 2011 ti_setmulti(sc); 2012 2013 /* 2014 * If this is a Tigon 1, we should tell the 2015 * firmware to use software packet filtering. 2016 */ 2017 if (sc->ti_hwrev == TI_HWREV_TIGON) 2018 TI_DO_CMD(TI_CMD_FDR_FILTERING, TI_CMD_CODE_FILT_ENB, 0); 2019 2020 /* Init RX ring. */ 2021 ti_init_rx_ring_std(sc); 2022 2023 /* Init jumbo RX ring. */ 2024 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) 2025 ti_init_rx_ring_jumbo(sc); 2026 2027 /* 2028 * If this is a Tigon 2, we can also configure the 2029 * mini ring. 2030 */ 2031 if (sc->ti_hwrev == TI_HWREV_TIGON_II) 2032 ti_init_rx_ring_mini(sc); 2033 2034 CSR_WRITE_4(sc, TI_GCR_RXRETURNCONS_IDX, 0); 2035 sc->ti_rx_saved_considx = 0; 2036 2037 /* Init TX ring. */ 2038 ti_init_tx_ring(sc); 2039 2040 /* Tell firmware we're alive. */ 2041 TI_DO_CMD(TI_CMD_HOST_STATE, TI_CMD_CODE_STACK_UP, 0); 2042 2043 /* Enable host interrupts. */ 2044 CSR_WRITE_4(sc, TI_MB_HOSTINTR, 0); 2045 2046 ifp->if_flags |= IFF_RUNNING; 2047 ifp->if_flags &= ~IFF_OACTIVE; 2048 2049 /* 2050 * Make sure to set media properly. We have to do this 2051 * here since we have to issue commands in order to set 2052 * the link negotiation and we can't issue commands until 2053 * the firmware is running. 2054 */ 2055 ifm = &sc->ifmedia; 2056 tmp = ifm->ifm_media; 2057 ifm->ifm_media = ifm->ifm_cur->ifm_media; 2058 ti_ifmedia_upd(ifp); 2059 ifm->ifm_media = tmp; 2060 } 2061 2062 /* 2063 * Set media options. 2064 */ 2065 static int 2066 ti_ifmedia_upd(struct ifnet *ifp) 2067 { 2068 struct ti_softc *sc = ifp->if_softc; 2069 struct ifmedia *ifm = &sc->ifmedia; 2070 struct ti_cmd_desc cmd; 2071 2072 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 2073 return(EINVAL); 2074 2075 switch(IFM_SUBTYPE(ifm->ifm_media)) { 2076 case IFM_AUTO: 2077 CSR_WRITE_4(sc, TI_GCR_GLINK, TI_GLNK_PREF | TI_GLNK_1000MB | 2078 TI_GLNK_FULL_DUPLEX | TI_GLNK_RX_FLOWCTL_Y | 2079 TI_GLNK_AUTONEGENB | TI_GLNK_ENB); 2080 CSR_WRITE_4(sc, TI_GCR_LINK, TI_LNK_100MB | TI_LNK_10MB | 2081 TI_LNK_FULL_DUPLEX | TI_LNK_HALF_DUPLEX | 2082 TI_LNK_AUTONEGENB | TI_LNK_ENB); 2083 TI_DO_CMD(TI_CMD_LINK_NEGOTIATION, 2084 TI_CMD_CODE_NEGOTIATE_BOTH, 0); 2085 break; 2086 case IFM_1000_SX: 2087 case IFM_1000_T: 2088 CSR_WRITE_4(sc, TI_GCR_GLINK, TI_GLNK_PREF|TI_GLNK_1000MB | 2089 TI_GLNK_RX_FLOWCTL_Y | TI_GLNK_ENB); 2090 CSR_WRITE_4(sc, TI_GCR_LINK, 0); 2091 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 2092 TI_SETBIT(sc, TI_GCR_GLINK, TI_GLNK_FULL_DUPLEX); 2093 TI_DO_CMD(TI_CMD_LINK_NEGOTIATION, 2094 TI_CMD_CODE_NEGOTIATE_GIGABIT, 0); 2095 break; 2096 case IFM_100_FX: 2097 case IFM_10_FL: 2098 case IFM_100_TX: 2099 case IFM_10_T: 2100 CSR_WRITE_4(sc, TI_GCR_GLINK, 0); 2101 CSR_WRITE_4(sc, TI_GCR_LINK, TI_LNK_ENB | TI_LNK_PREF); 2102 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_100_FX || 2103 IFM_SUBTYPE(ifm->ifm_media) == IFM_100_TX) 2104 TI_SETBIT(sc, TI_GCR_LINK, TI_LNK_100MB); 2105 else 2106 TI_SETBIT(sc, TI_GCR_LINK, TI_LNK_10MB); 2107 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 2108 TI_SETBIT(sc, TI_GCR_LINK, TI_LNK_FULL_DUPLEX); 2109 else 2110 TI_SETBIT(sc, TI_GCR_LINK, TI_LNK_HALF_DUPLEX); 2111 TI_DO_CMD(TI_CMD_LINK_NEGOTIATION, 2112 TI_CMD_CODE_NEGOTIATE_10_100, 0); 2113 break; 2114 } 2115 2116 return(0); 2117 } 2118 2119 /* 2120 * Report current media status. 2121 */ 2122 static void 2123 ti_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2124 { 2125 struct ti_softc *sc = ifp->if_softc; 2126 uint32_t media = 0; 2127 2128 ifmr->ifm_status = IFM_AVALID; 2129 ifmr->ifm_active = IFM_ETHER; 2130 2131 if (sc->ti_linkstat == TI_EV_CODE_LINK_DOWN) 2132 return; 2133 2134 ifmr->ifm_status |= IFM_ACTIVE; 2135 2136 if (sc->ti_linkstat == TI_EV_CODE_GIG_LINK_UP) { 2137 media = CSR_READ_4(sc, TI_GCR_GLINK_STAT); 2138 if (sc->ti_copper) 2139 ifmr->ifm_active |= IFM_1000_T; 2140 else 2141 ifmr->ifm_active |= IFM_1000_SX; 2142 if (media & TI_GLNK_FULL_DUPLEX) 2143 ifmr->ifm_active |= IFM_FDX; 2144 else 2145 ifmr->ifm_active |= IFM_HDX; 2146 } else if (sc->ti_linkstat == TI_EV_CODE_LINK_UP) { 2147 media = CSR_READ_4(sc, TI_GCR_LINK_STAT); 2148 if (sc->ti_copper) { 2149 if (media & TI_LNK_100MB) 2150 ifmr->ifm_active |= IFM_100_TX; 2151 if (media & TI_LNK_10MB) 2152 ifmr->ifm_active |= IFM_10_T; 2153 } else { 2154 if (media & TI_LNK_100MB) 2155 ifmr->ifm_active |= IFM_100_FX; 2156 if (media & TI_LNK_10MB) 2157 ifmr->ifm_active |= IFM_10_FL; 2158 } 2159 if (media & TI_LNK_FULL_DUPLEX) 2160 ifmr->ifm_active |= IFM_FDX; 2161 if (media & TI_LNK_HALF_DUPLEX) 2162 ifmr->ifm_active |= IFM_HDX; 2163 } 2164 } 2165 2166 static int 2167 ti_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 2168 { 2169 struct ti_softc *sc = ifp->if_softc; 2170 struct ifreq *ifr = (struct ifreq *) data; 2171 struct ti_cmd_desc cmd; 2172 int error = 0, mask; 2173 2174 switch(command) { 2175 case SIOCSIFMTU: 2176 if (ifr->ifr_mtu > TI_JUMBO_MTU) 2177 error = EINVAL; 2178 else { 2179 ifp->if_mtu = ifr->ifr_mtu; 2180 ti_init(sc); 2181 } 2182 break; 2183 case SIOCSIFFLAGS: 2184 if (ifp->if_flags & IFF_UP) { 2185 /* 2186 * If only the state of the PROMISC flag changed, 2187 * then just use the 'set promisc mode' command 2188 * instead of reinitializing the entire NIC. Doing 2189 * a full re-init means reloading the firmware and 2190 * waiting for it to start up, which may take a 2191 * second or two. 2192 */ 2193 if (ifp->if_flags & IFF_RUNNING && 2194 ifp->if_flags & IFF_PROMISC && 2195 !(sc->ti_if_flags & IFF_PROMISC)) { 2196 TI_DO_CMD(TI_CMD_SET_PROMISC_MODE, 2197 TI_CMD_CODE_PROMISC_ENB, 0); 2198 } else if (ifp->if_flags & IFF_RUNNING && 2199 !(ifp->if_flags & IFF_PROMISC) && 2200 sc->ti_if_flags & IFF_PROMISC) { 2201 TI_DO_CMD(TI_CMD_SET_PROMISC_MODE, 2202 TI_CMD_CODE_PROMISC_DIS, 0); 2203 } else 2204 ti_init(sc); 2205 } else if (ifp->if_flags & IFF_RUNNING) { 2206 ti_stop(sc); 2207 } 2208 sc->ti_if_flags = ifp->if_flags; 2209 error = 0; 2210 break; 2211 case SIOCADDMULTI: 2212 case SIOCDELMULTI: 2213 if (ifp->if_flags & IFF_RUNNING) { 2214 ti_setmulti(sc); 2215 error = 0; 2216 } 2217 break; 2218 case SIOCSIFMEDIA: 2219 case SIOCGIFMEDIA: 2220 error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command); 2221 break; 2222 case SIOCSIFCAP: 2223 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 2224 if (mask & IFCAP_HWCSUM) { 2225 if (IFCAP_HWCSUM & ifp->if_capenable) 2226 ifp->if_capenable &= ~IFCAP_HWCSUM; 2227 else 2228 ifp->if_capenable |= IFCAP_HWCSUM; 2229 if (ifp->if_flags & IFF_RUNNING) 2230 ti_init(sc); 2231 } 2232 error = 0; 2233 break; 2234 default: 2235 error = ether_ioctl(ifp, command, data); 2236 break; 2237 } 2238 return(error); 2239 } 2240 2241 static void 2242 ti_watchdog(struct ifnet *ifp) 2243 { 2244 struct ti_softc *sc = ifp->if_softc; 2245 2246 if_printf(ifp, "watchdog timeout -- resetting\n"); 2247 ti_stop(sc); 2248 ti_init(sc); 2249 2250 ifp->if_oerrors++; 2251 2252 if (!ifq_is_empty(&ifp->if_snd)) 2253 if_devstart(ifp); 2254 } 2255 2256 /* 2257 * Stop the adapter and free any mbufs allocated to the 2258 * RX and TX lists. 2259 */ 2260 static void 2261 ti_stop(struct ti_softc *sc) 2262 { 2263 struct ifnet *ifp = &sc->arpcom.ac_if; 2264 struct ti_cmd_desc cmd; 2265 2266 /* Disable host interrupts. */ 2267 CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1); 2268 /* 2269 * Tell firmware we're shutting down. 2270 */ 2271 TI_DO_CMD(TI_CMD_HOST_STATE, TI_CMD_CODE_STACK_DOWN, 0); 2272 2273 /* Halt and reinitialize. */ 2274 ti_chipinit(sc); 2275 ti_mem(sc, 0x2000, 0x100000 - 0x2000, NULL); 2276 ti_chipinit(sc); 2277 2278 /* Free the RX lists. */ 2279 ti_free_rx_ring_std(sc); 2280 2281 /* Free jumbo RX list. */ 2282 ti_free_rx_ring_jumbo(sc); 2283 2284 /* Free mini RX list. */ 2285 ti_free_rx_ring_mini(sc); 2286 2287 /* Free TX buffers. */ 2288 ti_free_tx_ring(sc); 2289 2290 sc->ti_ev_prodidx.ti_idx = 0; 2291 sc->ti_return_prodidx.ti_idx = 0; 2292 sc->ti_tx_considx.ti_idx = 0; 2293 sc->ti_tx_saved_considx = TI_TXCONS_UNSET; 2294 2295 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 2296 } 2297 2298 /* 2299 * Stop all chip I/O so that the kernel's probe routines don't 2300 * get confused by errant DMAs when rebooting. 2301 */ 2302 static void 2303 ti_shutdown(device_t dev) 2304 { 2305 struct ti_softc *sc = device_get_softc(dev); 2306 2307 ti_chipinit(sc); 2308 } 2309