1 /* 2 * Copyright (c) 1997, 1998, 1999 3 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Bill Paul. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 * 32 * $FreeBSD: src/sys/pci/if_ti.c,v 1.25.2.14 2002/02/15 04:20:20 silby Exp $ 33 * $DragonFly: src/sys/dev/netif/ti/if_ti.c,v 1.52 2008/05/16 13:19:12 sephe Exp $ 34 */ 35 36 /* 37 * Alteon Networks Tigon PCI gigabit ethernet driver for FreeBSD. 38 * Manuals, sample driver and firmware source kits are available 39 * from http://www.alteon.com/support/openkits. 40 * 41 * Written by Bill Paul <wpaul@ctr.columbia.edu> 42 * Electrical Engineering Department 43 * Columbia University, New York City 44 */ 45 46 /* 47 * The Alteon Networks Tigon chip contains an embedded R4000 CPU, 48 * gigabit MAC, dual DMA channels and a PCI interface unit. NICs 49 * using the Tigon may have anywhere from 512K to 2MB of SRAM. The 50 * Tigon supports hardware IP, TCP and UCP checksumming, multicast 51 * filtering and jumbo (9014 byte) frames. The hardware is largely 52 * controlled by firmware, which must be loaded into the NIC during 53 * initialization. 54 * 55 * The Tigon 2 contains 2 R4000 CPUs and requires a newer firmware 56 * revision, which supports new features such as extended commands, 57 * extended jumbo receive ring desciptors and a mini receive ring. 58 * 59 * Alteon Networks is to be commended for releasing such a vast amount 60 * of development material for the Tigon NIC without requiring an NDA 61 * (although they really should have done it a long time ago). With 62 * any luck, the other vendors will finally wise up and follow Alteon's 63 * stellar example. 64 * 65 * The firmware for the Tigon 1 and 2 NICs is compiled directly into 66 * this driver by #including it as a C header file. This bloats the 67 * driver somewhat, but it's the easiest method considering that the 68 * driver code and firmware code need to be kept in sync. The source 69 * for the firmware is not provided with the FreeBSD distribution since 70 * compiling it requires a GNU toolchain targeted for mips-sgi-irix5.3. 71 * 72 * The following people deserve special thanks: 73 * - Terry Murphy of 3Com, for providing a 3c985 Tigon 1 board 74 * for testing 75 * - Raymond Lee of Netgear, for providing a pair of Netgear 76 * GA620 Tigon 2 boards for testing 77 * - Ulf Zimmermann, for bringing the GA260 to my attention and 78 * convincing me to write this driver. 79 * - Andrew Gallatin for providing FreeBSD/Alpha support. 80 */ 81 82 #include <sys/param.h> 83 #include <sys/systm.h> 84 #include <sys/sockio.h> 85 #include <sys/mbuf.h> 86 #include <sys/malloc.h> 87 #include <sys/kernel.h> 88 #include <sys/socket.h> 89 #include <sys/queue.h> 90 #include <sys/serialize.h> 91 #include <sys/bus.h> 92 #include <sys/rman.h> 93 #include <sys/thread2.h> 94 #include <sys/interrupt.h> 95 96 #include <net/if.h> 97 #include <net/ifq_var.h> 98 #include <net/if_arp.h> 99 #include <net/ethernet.h> 100 #include <net/if_dl.h> 101 #include <net/if_media.h> 102 #include <net/if_types.h> 103 #include <net/vlan/if_vlan_var.h> 104 #include <net/vlan/if_vlan_ether.h> 105 106 #include <net/bpf.h> 107 108 #include <netinet/in_systm.h> 109 #include <netinet/in.h> 110 #include <netinet/ip.h> 111 112 #include <vm/vm.h> /* for vtophys */ 113 #include <vm/pmap.h> /* for vtophys */ 114 115 #include <bus/pci/pcireg.h> 116 #include <bus/pci/pcivar.h> 117 118 #include "if_tireg.h" 119 #include "ti_fw.h" 120 #include "ti_fw2.h" 121 122 /* 123 * Temporarily disable the checksum offload support for now. 124 * Tests with ftp.freesoftware.com show that after about 12 hours, 125 * the firmware will begin calculating completely bogus TX checksums 126 * and refuse to stop until the interface is reset. Unfortunately, 127 * there isn't enough time to fully debug this before the 4.1 128 * release, so this will need to stay off for now. 129 */ 130 #ifdef notdef 131 #define TI_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_IP_FRAGS) 132 #else 133 #define TI_CSUM_FEATURES 0 134 #endif 135 136 /* 137 * Various supported device vendors/types and their names. 138 */ 139 140 static struct ti_type ti_devs[] = { 141 { ALT_VENDORID, ALT_DEVICEID_ACENIC, 142 "Alteon AceNIC 1000baseSX Gigabit Ethernet" }, 143 { ALT_VENDORID, ALT_DEVICEID_ACENIC_COPPER, 144 "Alteon AceNIC 1000baseT Gigabit Ethernet" }, 145 { TC_VENDORID, TC_DEVICEID_3C985, 146 "3Com 3c985-SX Gigabit Ethernet" }, 147 { NG_VENDORID, NG_DEVICEID_GA620, 148 "Netgear GA620 1000baseSX Gigabit Ethernet" }, 149 { NG_VENDORID, NG_DEVICEID_GA620T, 150 "Netgear GA620 1000baseT Gigabit Ethernet" }, 151 { SGI_VENDORID, SGI_DEVICEID_TIGON, 152 "Silicon Graphics Gigabit Ethernet" }, 153 { DEC_VENDORID, DEC_DEVICEID_FARALLON_PN9000SX, 154 "Farallon PN9000SX Gigabit Ethernet" }, 155 { 0, 0, NULL } 156 }; 157 158 static int ti_probe(device_t); 159 static int ti_attach(device_t); 160 static int ti_detach(device_t); 161 static void ti_txeof(struct ti_softc *); 162 static void ti_rxeof(struct ti_softc *); 163 164 static void ti_stats_update(struct ti_softc *); 165 static int ti_encap(struct ti_softc *, struct mbuf *, uint32_t *); 166 167 static void ti_intr(void *); 168 static void ti_start(struct ifnet *); 169 static int ti_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 170 static void ti_init(void *); 171 static void ti_init2(struct ti_softc *); 172 static void ti_stop(struct ti_softc *); 173 static void ti_watchdog(struct ifnet *); 174 static void ti_shutdown(device_t); 175 static int ti_ifmedia_upd(struct ifnet *); 176 static void ti_ifmedia_sts(struct ifnet *, struct ifmediareq *); 177 178 static uint32_t ti_eeprom_putbyte(struct ti_softc *, int); 179 static uint8_t ti_eeprom_getbyte(struct ti_softc *, int, uint8_t *); 180 static int ti_read_eeprom(struct ti_softc *, caddr_t, int, int); 181 182 static void ti_add_mcast(struct ti_softc *, struct ether_addr *); 183 static void ti_del_mcast(struct ti_softc *, struct ether_addr *); 184 static void ti_setmulti(struct ti_softc *); 185 186 static void ti_mem(struct ti_softc *, uint32_t, uint32_t, caddr_t); 187 static void ti_loadfw(struct ti_softc *); 188 static void ti_cmd(struct ti_softc *, struct ti_cmd_desc *); 189 static void ti_cmd_ext(struct ti_softc *, struct ti_cmd_desc *, 190 caddr_t, int); 191 static void ti_handle_events(struct ti_softc *); 192 static int ti_alloc_jumbo_mem(struct ti_softc *); 193 static struct ti_jslot * 194 ti_jalloc(struct ti_softc *); 195 static void ti_jfree(void *); 196 static void ti_jref(void *); 197 static int ti_newbuf_std(struct ti_softc *, int, struct mbuf *); 198 static int ti_newbuf_mini(struct ti_softc *, int, struct mbuf *); 199 static int ti_newbuf_jumbo(struct ti_softc *, int, struct mbuf *); 200 static int ti_init_rx_ring_std(struct ti_softc *); 201 static void ti_free_rx_ring_std(struct ti_softc *); 202 static int ti_init_rx_ring_jumbo(struct ti_softc *); 203 static void ti_free_rx_ring_jumbo(struct ti_softc *); 204 static int ti_init_rx_ring_mini(struct ti_softc *); 205 static void ti_free_rx_ring_mini(struct ti_softc *); 206 static void ti_free_tx_ring(struct ti_softc *); 207 static int ti_init_tx_ring(struct ti_softc *); 208 209 static int ti_64bitslot_war(struct ti_softc *); 210 static int ti_chipinit(struct ti_softc *); 211 static int ti_gibinit(struct ti_softc *); 212 213 static device_method_t ti_methods[] = { 214 /* Device interface */ 215 DEVMETHOD(device_probe, ti_probe), 216 DEVMETHOD(device_attach, ti_attach), 217 DEVMETHOD(device_detach, ti_detach), 218 DEVMETHOD(device_shutdown, ti_shutdown), 219 { 0, 0 } 220 }; 221 222 223 static DEFINE_CLASS_0(ti, ti_driver, ti_methods, sizeof(struct ti_softc)); 224 static devclass_t ti_devclass; 225 226 DECLARE_DUMMY_MODULE(if_ti); 227 DRIVER_MODULE(if_ti, pci, ti_driver, ti_devclass, 0, 0); 228 229 /* 230 * Send an instruction or address to the EEPROM, check for ACK. 231 */ 232 static uint32_t 233 ti_eeprom_putbyte(struct ti_softc *sc, int byte) 234 { 235 int ack = 0, i; 236 237 /* 238 * Make sure we're in TX mode. 239 */ 240 TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_TXEN); 241 242 /* 243 * Feed in each bit and stobe the clock. 244 */ 245 for (i = 0x80; i; i >>= 1) { 246 if (byte & i) 247 TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_DOUT); 248 else 249 TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_DOUT); 250 DELAY(1); 251 TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); 252 DELAY(1); 253 TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); 254 } 255 256 /* 257 * Turn off TX mode. 258 */ 259 TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_TXEN); 260 261 /* 262 * Check for ack. 263 */ 264 TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); 265 ack = CSR_READ_4(sc, TI_MISC_LOCAL_CTL) & TI_MLC_EE_DIN; 266 TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); 267 268 return(ack); 269 } 270 271 /* 272 * Read a byte of data stored in the EEPROM at address 'addr.' 273 * We have to send two address bytes since the EEPROM can hold 274 * more than 256 bytes of data. 275 */ 276 static uint8_t 277 ti_eeprom_getbyte(struct ti_softc *sc, int addr, uint8_t *dest) 278 { 279 struct ifnet *ifp = &sc->arpcom.ac_if; 280 int i; 281 uint8_t byte = 0; 282 283 EEPROM_START; 284 285 /* 286 * Send write control code to EEPROM. 287 */ 288 if (ti_eeprom_putbyte(sc, EEPROM_CTL_WRITE)) { 289 if_printf(ifp, "failed to send write command, status: %x\n", 290 CSR_READ_4(sc, TI_MISC_LOCAL_CTL)); 291 return(1); 292 } 293 294 /* 295 * Send first byte of address of byte we want to read. 296 */ 297 if (ti_eeprom_putbyte(sc, (addr >> 8) & 0xFF)) { 298 if_printf(ifp, "failed to send address, status: %x\n", 299 CSR_READ_4(sc, TI_MISC_LOCAL_CTL)); 300 return(1); 301 } 302 /* 303 * Send second byte address of byte we want to read. 304 */ 305 if (ti_eeprom_putbyte(sc, addr & 0xFF)) { 306 if_printf(ifp, "failed to send address, status: %x\n", 307 CSR_READ_4(sc, TI_MISC_LOCAL_CTL)); 308 return(1); 309 } 310 311 EEPROM_STOP; 312 EEPROM_START; 313 /* 314 * Send read control code to EEPROM. 315 */ 316 if (ti_eeprom_putbyte(sc, EEPROM_CTL_READ)) { 317 if_printf(ifp, "failed to send read command, status: %x\n", 318 CSR_READ_4(sc, TI_MISC_LOCAL_CTL)); 319 return(1); 320 } 321 322 /* 323 * Start reading bits from EEPROM. 324 */ 325 TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_TXEN); 326 for (i = 0x80; i; i >>= 1) { 327 TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); 328 DELAY(1); 329 if (CSR_READ_4(sc, TI_MISC_LOCAL_CTL) & TI_MLC_EE_DIN) 330 byte |= i; 331 TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); 332 DELAY(1); 333 } 334 335 EEPROM_STOP; 336 337 /* 338 * No ACK generated for read, so just return byte. 339 */ 340 341 *dest = byte; 342 343 return(0); 344 } 345 346 /* 347 * Read a sequence of bytes from the EEPROM. 348 */ 349 static int 350 ti_read_eeprom(struct ti_softc *sc, caddr_t dest, int off, int cnt) 351 { 352 int err = 0, i; 353 uint8_t byte = 0; 354 355 for (i = 0; i < cnt; i++) { 356 err = ti_eeprom_getbyte(sc, off + i, &byte); 357 if (err) 358 break; 359 *(dest + i) = byte; 360 } 361 362 return(err ? 1 : 0); 363 } 364 365 /* 366 * NIC memory access function. Can be used to either clear a section 367 * of NIC local memory or (if buf is non-NULL) copy data into it. 368 */ 369 static void 370 ti_mem(struct ti_softc *sc, uint32_t addr, uint32_t len, caddr_t buf) 371 { 372 int cnt, segptr, segsize; 373 caddr_t ti_winbase, ptr; 374 375 segptr = addr; 376 cnt = len; 377 ti_winbase = (caddr_t)(sc->ti_vhandle + TI_WINDOW); 378 ptr = buf; 379 380 while(cnt) { 381 if (cnt < TI_WINLEN) 382 segsize = cnt; 383 else 384 segsize = TI_WINLEN - (segptr % TI_WINLEN); 385 CSR_WRITE_4(sc, TI_WINBASE, (segptr & ~(TI_WINLEN - 1))); 386 if (buf == NULL) 387 bzero((char *)ti_winbase + (segptr & 388 (TI_WINLEN - 1)), segsize); 389 else { 390 bcopy((char *)ptr, (char *)ti_winbase + 391 (segptr & (TI_WINLEN - 1)), segsize); 392 ptr += segsize; 393 } 394 segptr += segsize; 395 cnt -= segsize; 396 } 397 } 398 399 /* 400 * Load firmware image into the NIC. Check that the firmware revision 401 * is acceptable and see if we want the firmware for the Tigon 1 or 402 * Tigon 2. 403 */ 404 static void 405 ti_loadfw(struct ti_softc *sc) 406 { 407 struct ifnet *ifp = &sc->arpcom.ac_if; 408 409 switch(sc->ti_hwrev) { 410 case TI_HWREV_TIGON: 411 if (tigonFwReleaseMajor != TI_FIRMWARE_MAJOR || 412 tigonFwReleaseMinor != TI_FIRMWARE_MINOR || 413 tigonFwReleaseFix != TI_FIRMWARE_FIX) { 414 if_printf(ifp, "firmware revision mismatch; want " 415 "%d.%d.%d, got %d.%d.%d\n", 416 TI_FIRMWARE_MAJOR, TI_FIRMWARE_MINOR, 417 TI_FIRMWARE_FIX, tigonFwReleaseMajor, 418 tigonFwReleaseMinor, tigonFwReleaseFix); 419 return; 420 } 421 ti_mem(sc, tigonFwTextAddr, tigonFwTextLen, 422 (caddr_t)tigonFwText); 423 ti_mem(sc, tigonFwDataAddr, tigonFwDataLen, 424 (caddr_t)tigonFwData); 425 ti_mem(sc, tigonFwRodataAddr, tigonFwRodataLen, 426 (caddr_t)tigonFwRodata); 427 ti_mem(sc, tigonFwBssAddr, tigonFwBssLen, NULL); 428 ti_mem(sc, tigonFwSbssAddr, tigonFwSbssLen, NULL); 429 CSR_WRITE_4(sc, TI_CPU_PROGRAM_COUNTER, tigonFwStartAddr); 430 break; 431 case TI_HWREV_TIGON_II: 432 if (tigon2FwReleaseMajor != TI_FIRMWARE_MAJOR || 433 tigon2FwReleaseMinor != TI_FIRMWARE_MINOR || 434 tigon2FwReleaseFix != TI_FIRMWARE_FIX) { 435 if_printf(ifp, "firmware revision mismatch; want " 436 "%d.%d.%d, got %d.%d.%d\n", 437 TI_FIRMWARE_MAJOR, TI_FIRMWARE_MINOR, 438 TI_FIRMWARE_FIX, tigon2FwReleaseMajor, 439 tigon2FwReleaseMinor, tigon2FwReleaseFix); 440 return; 441 } 442 ti_mem(sc, tigon2FwTextAddr, tigon2FwTextLen, 443 (caddr_t)tigon2FwText); 444 ti_mem(sc, tigon2FwDataAddr, tigon2FwDataLen, 445 (caddr_t)tigon2FwData); 446 ti_mem(sc, tigon2FwRodataAddr, tigon2FwRodataLen, 447 (caddr_t)tigon2FwRodata); 448 ti_mem(sc, tigon2FwBssAddr, tigon2FwBssLen, NULL); 449 ti_mem(sc, tigon2FwSbssAddr, tigon2FwSbssLen, NULL); 450 CSR_WRITE_4(sc, TI_CPU_PROGRAM_COUNTER, tigon2FwStartAddr); 451 break; 452 default: 453 if_printf(ifp, "can't load firmware: unknown hardware rev\n"); 454 break; 455 } 456 } 457 458 /* 459 * Send the NIC a command via the command ring. 460 */ 461 static void 462 ti_cmd(struct ti_softc *sc, struct ti_cmd_desc *cmd) 463 { 464 uint32_t index; 465 466 if (sc->ti_rdata->ti_cmd_ring == NULL) 467 return; 468 469 index = sc->ti_cmd_saved_prodidx; 470 CSR_WRITE_4(sc, TI_GCR_CMDRING + (index * 4), *(uint32_t *)(cmd)); 471 TI_INC(index, TI_CMD_RING_CNT); 472 CSR_WRITE_4(sc, TI_MB_CMDPROD_IDX, index); 473 sc->ti_cmd_saved_prodidx = index; 474 } 475 476 /* 477 * Send the NIC an extended command. The 'len' parameter specifies the 478 * number of command slots to include after the initial command. 479 */ 480 static void 481 ti_cmd_ext(struct ti_softc *sc, struct ti_cmd_desc *cmd, caddr_t arg, int len) 482 { 483 uint32_t index; 484 int i; 485 486 if (sc->ti_rdata->ti_cmd_ring == NULL) 487 return; 488 489 index = sc->ti_cmd_saved_prodidx; 490 CSR_WRITE_4(sc, TI_GCR_CMDRING + (index * 4), *(uint32_t *)(cmd)); 491 TI_INC(index, TI_CMD_RING_CNT); 492 for (i = 0; i < len; i++) { 493 CSR_WRITE_4(sc, TI_GCR_CMDRING + (index * 4), 494 *(uint32_t *)(&arg[i * 4])); 495 TI_INC(index, TI_CMD_RING_CNT); 496 } 497 CSR_WRITE_4(sc, TI_MB_CMDPROD_IDX, index); 498 sc->ti_cmd_saved_prodidx = index; 499 } 500 501 /* 502 * Handle events that have triggered interrupts. 503 */ 504 static void 505 ti_handle_events(struct ti_softc *sc) 506 { 507 struct ifnet *ifp = &sc->arpcom.ac_if; 508 struct ti_event_desc *e; 509 510 if (sc->ti_rdata->ti_event_ring == NULL) 511 return; 512 513 while (sc->ti_ev_saved_considx != sc->ti_ev_prodidx.ti_idx) { 514 e = &sc->ti_rdata->ti_event_ring[sc->ti_ev_saved_considx]; 515 switch(e->ti_event) { 516 case TI_EV_LINKSTAT_CHANGED: 517 sc->ti_linkstat = e->ti_code; 518 if (e->ti_code == TI_EV_CODE_LINK_UP) { 519 if_printf(ifp, "10/100 link up\n"); 520 } else if (e->ti_code == TI_EV_CODE_GIG_LINK_UP) { 521 if_printf(ifp, "gigabit link up\n"); 522 } else if (e->ti_code == TI_EV_CODE_LINK_DOWN) { 523 if_printf(ifp, "link down\n"); 524 } 525 break; 526 case TI_EV_ERROR: 527 if (e->ti_code == TI_EV_CODE_ERR_INVAL_CMD) { 528 if_printf(ifp, "invalid command\n"); 529 } else if (e->ti_code == TI_EV_CODE_ERR_UNIMP_CMD) { 530 if_printf(ifp, "unknown command\n"); 531 } else if (e->ti_code == TI_EV_CODE_ERR_BADCFG) { 532 if_printf(ifp, "bad config data\n"); 533 } 534 break; 535 case TI_EV_FIRMWARE_UP: 536 ti_init2(sc); 537 break; 538 case TI_EV_STATS_UPDATED: 539 ti_stats_update(sc); 540 break; 541 case TI_EV_RESET_JUMBO_RING: 542 case TI_EV_MCAST_UPDATED: 543 /* Who cares. */ 544 break; 545 default: 546 if_printf(ifp, "unknown event: %d\n", e->ti_event); 547 break; 548 } 549 /* Advance the consumer index. */ 550 TI_INC(sc->ti_ev_saved_considx, TI_EVENT_RING_CNT); 551 CSR_WRITE_4(sc, TI_GCR_EVENTCONS_IDX, sc->ti_ev_saved_considx); 552 } 553 } 554 555 /* 556 * Memory management for the jumbo receive ring is a pain in the 557 * butt. We need to allocate at least 9018 bytes of space per frame, 558 * _and_ it has to be contiguous (unless you use the extended 559 * jumbo descriptor format). Using malloc() all the time won't 560 * work: malloc() allocates memory in powers of two, which means we 561 * would end up wasting a considerable amount of space by allocating 562 * 9K chunks. We don't have a jumbo mbuf cluster pool. Thus, we have 563 * to do our own memory management. 564 * 565 * The driver needs to allocate a contiguous chunk of memory at boot 566 * time. We then chop this up ourselves into 9K pieces and use them 567 * as external mbuf storage. 568 * 569 * One issue here is how much memory to allocate. The jumbo ring has 570 * 256 slots in it, but at 9K per slot than can consume over 2MB of 571 * RAM. This is a bit much, especially considering we also need 572 * RAM for the standard ring and mini ring (on the Tigon 2). To 573 * save space, we only actually allocate enough memory for 64 slots 574 * by default, which works out to between 500 and 600K. This can 575 * be tuned by changing a #define in if_tireg.h. 576 */ 577 578 static int 579 ti_alloc_jumbo_mem(struct ti_softc *sc) 580 { 581 struct ti_jslot *entry; 582 caddr_t ptr; 583 int i; 584 585 /* Grab a big chunk o' storage. */ 586 sc->ti_cdata.ti_jumbo_buf = contigmalloc(TI_JMEM, M_DEVBUF, 587 M_WAITOK, 0, 0xffffffff, PAGE_SIZE, 0); 588 589 if (sc->ti_cdata.ti_jumbo_buf == NULL) { 590 if_printf(&sc->arpcom.ac_if, "no memory for jumbo buffers!\n"); 591 return(ENOBUFS); 592 } 593 594 lwkt_serialize_init(&sc->ti_jslot_serializer); 595 SLIST_INIT(&sc->ti_jfree_listhead); 596 597 /* 598 * Now divide it up into 9K pieces and save the addresses 599 * in an array. Note that we play an evil trick here by using 600 * the first few bytes in the buffer to hold the the address 601 * of the softc structure for this interface. This is because 602 * ti_jfree() needs it, but it is called by the mbuf management 603 * code which will not pass it to us explicitly. 604 */ 605 ptr = sc->ti_cdata.ti_jumbo_buf; 606 for (i = 0; i < TI_JSLOTS; i++) { 607 entry = &sc->ti_cdata.ti_jslots[i]; 608 entry->ti_sc = sc; 609 entry->ti_buf = ptr; 610 entry->ti_inuse = 0; 611 entry->ti_slot = i; 612 SLIST_INSERT_HEAD(&sc->ti_jfree_listhead, entry, jslot_link); 613 ptr += TI_JLEN; 614 } 615 616 return(0); 617 } 618 619 /* 620 * Allocate a jumbo buffer. 621 */ 622 static struct ti_jslot * 623 ti_jalloc(struct ti_softc *sc) 624 { 625 struct ti_jslot *entry; 626 627 lwkt_serialize_enter(&sc->ti_jslot_serializer); 628 entry = SLIST_FIRST(&sc->ti_jfree_listhead); 629 if (entry) { 630 SLIST_REMOVE_HEAD(&sc->ti_jfree_listhead, jslot_link); 631 entry->ti_inuse = 1; 632 } else { 633 if_printf(&sc->arpcom.ac_if, "no free jumbo buffers\n"); 634 } 635 lwkt_serialize_exit(&sc->ti_jslot_serializer); 636 return(entry); 637 } 638 639 /* 640 * Adjust usage count on a jumbo buffer. In general this doesn't 641 * get used much because our jumbo buffers don't get passed around 642 * too much, but it's implemented for correctness. 643 */ 644 static void 645 ti_jref(void *arg) 646 { 647 struct ti_jslot *entry = (struct ti_jslot *)arg; 648 struct ti_softc *sc = entry->ti_sc; 649 650 if (sc == NULL) 651 panic("ti_jref: can't find softc pointer!"); 652 653 if (&sc->ti_cdata.ti_jslots[entry->ti_slot] != entry) 654 panic("ti_jref: asked to reference buffer " 655 "that we don't manage!"); 656 if (entry->ti_inuse == 0) 657 panic("ti_jref: buffer already free!"); 658 atomic_add_int(&entry->ti_inuse, 1); 659 } 660 661 /* 662 * Release a jumbo buffer. 663 */ 664 static void 665 ti_jfree(void *arg) 666 { 667 struct ti_jslot *entry = (struct ti_jslot *)arg; 668 struct ti_softc *sc = entry->ti_sc; 669 670 if (sc == NULL) 671 panic("ti_jref: can't find softc pointer!"); 672 673 if (&sc->ti_cdata.ti_jslots[entry->ti_slot] != entry) 674 panic("ti_jref: asked to reference buffer " 675 "that we don't manage!"); 676 if (entry->ti_inuse == 0) 677 panic("ti_jref: buffer already free!"); 678 lwkt_serialize_enter(&sc->ti_jslot_serializer); 679 atomic_subtract_int(&entry->ti_inuse, 1); 680 if (entry->ti_inuse == 0) 681 SLIST_INSERT_HEAD(&sc->ti_jfree_listhead, entry, jslot_link); 682 lwkt_serialize_exit(&sc->ti_jslot_serializer); 683 } 684 685 686 /* 687 * Intialize a standard receive ring descriptor. 688 */ 689 static int 690 ti_newbuf_std(struct ti_softc *sc, int i, struct mbuf *m) 691 { 692 struct mbuf *m_new; 693 struct ti_rx_desc *r; 694 695 if (m == NULL) { 696 m_new = m_getcl(MB_DONTWAIT, MT_DATA, M_PKTHDR); 697 if (m_new == NULL) 698 return (ENOBUFS); 699 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 700 } else { 701 m_new = m; 702 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 703 m_new->m_data = m_new->m_ext.ext_buf; 704 } 705 706 707 m_adj(m_new, ETHER_ALIGN); 708 sc->ti_cdata.ti_rx_std_chain[i] = m_new; 709 r = &sc->ti_rdata->ti_rx_std_ring[i]; 710 TI_HOSTADDR(r->ti_addr) = vtophys(mtod(m_new, caddr_t)); 711 r->ti_type = TI_BDTYPE_RECV_BD; 712 r->ti_flags = 0; 713 if (sc->arpcom.ac_if.if_hwassist) 714 r->ti_flags |= TI_BDFLAG_TCP_UDP_CKSUM | TI_BDFLAG_IP_CKSUM; 715 r->ti_len = m_new->m_len; 716 r->ti_idx = i; 717 718 return(0); 719 } 720 721 /* 722 * Intialize a mini receive ring descriptor. This only applies to 723 * the Tigon 2. 724 */ 725 static int 726 ti_newbuf_mini(struct ti_softc *sc, int i, struct mbuf *m) 727 { 728 struct mbuf *m_new; 729 struct ti_rx_desc *r; 730 731 if (m == NULL) { 732 MGETHDR(m_new, MB_DONTWAIT, MT_DATA); 733 if (m_new == NULL) { 734 return(ENOBUFS); 735 } 736 m_new->m_len = m_new->m_pkthdr.len = MHLEN; 737 } else { 738 m_new = m; 739 m_new->m_data = m_new->m_pktdat; 740 m_new->m_len = m_new->m_pkthdr.len = MHLEN; 741 } 742 743 m_adj(m_new, ETHER_ALIGN); 744 r = &sc->ti_rdata->ti_rx_mini_ring[i]; 745 sc->ti_cdata.ti_rx_mini_chain[i] = m_new; 746 TI_HOSTADDR(r->ti_addr) = vtophys(mtod(m_new, caddr_t)); 747 r->ti_type = TI_BDTYPE_RECV_BD; 748 r->ti_flags = TI_BDFLAG_MINI_RING; 749 if (sc->arpcom.ac_if.if_hwassist) 750 r->ti_flags |= TI_BDFLAG_TCP_UDP_CKSUM | TI_BDFLAG_IP_CKSUM; 751 r->ti_len = m_new->m_len; 752 r->ti_idx = i; 753 754 return(0); 755 } 756 757 /* 758 * Initialize a jumbo receive ring descriptor. This allocates 759 * a jumbo buffer from the pool managed internally by the driver. 760 */ 761 static int 762 ti_newbuf_jumbo(struct ti_softc *sc, int i, struct mbuf *m) 763 { 764 struct mbuf *m_new; 765 struct ti_rx_desc *r; 766 struct ti_jslot *buf; 767 768 if (m == NULL) { 769 /* Allocate the mbuf. */ 770 MGETHDR(m_new, MB_DONTWAIT, MT_DATA); 771 if (m_new == NULL) { 772 return(ENOBUFS); 773 } 774 775 /* Allocate the jumbo buffer */ 776 buf = ti_jalloc(sc); 777 if (buf == NULL) { 778 m_freem(m_new); 779 if_printf(&sc->arpcom.ac_if, "jumbo allocation failed " 780 "-- packet dropped!\n"); 781 return(ENOBUFS); 782 } 783 784 /* Attach the buffer to the mbuf. */ 785 m_new->m_ext.ext_arg = buf; 786 m_new->m_ext.ext_buf = buf->ti_buf; 787 m_new->m_ext.ext_free = ti_jfree; 788 m_new->m_ext.ext_ref = ti_jref; 789 m_new->m_ext.ext_size = TI_JUMBO_FRAMELEN; 790 791 m_new->m_flags |= M_EXT; 792 } else { 793 /* 794 * We're re-using a previously allocated mbuf; 795 * be sure to re-init pointers and lengths to 796 * default values. 797 */ 798 KKASSERT(m->m_flags & M_EXT); 799 m_new = m; 800 } 801 m_new->m_data = m_new->m_ext.ext_buf; 802 m_new->m_len = m_new->m_pkthdr.len = m_new->m_ext.ext_size; 803 804 m_adj(m_new, ETHER_ALIGN); 805 /* Set up the descriptor. */ 806 r = &sc->ti_rdata->ti_rx_jumbo_ring[i]; 807 sc->ti_cdata.ti_rx_jumbo_chain[i] = m_new; 808 TI_HOSTADDR(r->ti_addr) = vtophys(mtod(m_new, caddr_t)); 809 r->ti_type = TI_BDTYPE_RECV_JUMBO_BD; 810 r->ti_flags = TI_BDFLAG_JUMBO_RING; 811 if (sc->arpcom.ac_if.if_hwassist) 812 r->ti_flags |= TI_BDFLAG_TCP_UDP_CKSUM | TI_BDFLAG_IP_CKSUM; 813 r->ti_len = m_new->m_len; 814 r->ti_idx = i; 815 816 return(0); 817 } 818 819 /* 820 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster, 821 * that's 1MB or memory, which is a lot. For now, we fill only the first 822 * 256 ring entries and hope that our CPU is fast enough to keep up with 823 * the NIC. 824 */ 825 static int 826 ti_init_rx_ring_std(struct ti_softc *sc) 827 { 828 int i; 829 struct ti_cmd_desc cmd; 830 831 for (i = 0; i < TI_SSLOTS; i++) { 832 if (ti_newbuf_std(sc, i, NULL) == ENOBUFS) 833 return(ENOBUFS); 834 }; 835 836 TI_UPDATE_STDPROD(sc, i - 1); 837 sc->ti_std = i - 1; 838 839 return(0); 840 } 841 842 static void 843 ti_free_rx_ring_std(struct ti_softc *sc) 844 { 845 int i; 846 847 for (i = 0; i < TI_STD_RX_RING_CNT; i++) { 848 if (sc->ti_cdata.ti_rx_std_chain[i] != NULL) { 849 m_freem(sc->ti_cdata.ti_rx_std_chain[i]); 850 sc->ti_cdata.ti_rx_std_chain[i] = NULL; 851 } 852 bzero(&sc->ti_rdata->ti_rx_std_ring[i], 853 sizeof(struct ti_rx_desc)); 854 } 855 } 856 857 static int 858 ti_init_rx_ring_jumbo(struct ti_softc *sc) 859 { 860 int i; 861 struct ti_cmd_desc cmd; 862 863 for (i = 0; i < TI_JUMBO_RX_RING_CNT; i++) { 864 if (ti_newbuf_jumbo(sc, i, NULL) == ENOBUFS) 865 return(ENOBUFS); 866 } 867 868 TI_UPDATE_JUMBOPROD(sc, i - 1); 869 sc->ti_jumbo = i - 1; 870 871 return(0); 872 } 873 874 static void 875 ti_free_rx_ring_jumbo(struct ti_softc *sc) 876 { 877 int i; 878 879 for (i = 0; i < TI_JUMBO_RX_RING_CNT; i++) { 880 if (sc->ti_cdata.ti_rx_jumbo_chain[i] != NULL) { 881 m_freem(sc->ti_cdata.ti_rx_jumbo_chain[i]); 882 sc->ti_cdata.ti_rx_jumbo_chain[i] = NULL; 883 } 884 bzero(&sc->ti_rdata->ti_rx_jumbo_ring[i], 885 sizeof(struct ti_rx_desc)); 886 } 887 } 888 889 static int 890 ti_init_rx_ring_mini(struct ti_softc *sc) 891 { 892 int i; 893 894 for (i = 0; i < TI_MSLOTS; i++) { 895 if (ti_newbuf_mini(sc, i, NULL) == ENOBUFS) 896 return(ENOBUFS); 897 } 898 899 TI_UPDATE_MINIPROD(sc, i - 1); 900 sc->ti_mini = i - 1; 901 902 return(0); 903 } 904 905 static void 906 ti_free_rx_ring_mini(struct ti_softc *sc) 907 { 908 int i; 909 910 for (i = 0; i < TI_MINI_RX_RING_CNT; i++) { 911 if (sc->ti_cdata.ti_rx_mini_chain[i] != NULL) { 912 m_freem(sc->ti_cdata.ti_rx_mini_chain[i]); 913 sc->ti_cdata.ti_rx_mini_chain[i] = NULL; 914 } 915 bzero(&sc->ti_rdata->ti_rx_mini_ring[i], 916 sizeof(struct ti_rx_desc)); 917 } 918 } 919 920 static void 921 ti_free_tx_ring(struct ti_softc *sc) 922 { 923 int i; 924 925 if (sc->ti_rdata->ti_tx_ring == NULL) 926 return; 927 928 for (i = 0; i < TI_TX_RING_CNT; i++) { 929 if (sc->ti_cdata.ti_tx_chain[i] != NULL) { 930 m_freem(sc->ti_cdata.ti_tx_chain[i]); 931 sc->ti_cdata.ti_tx_chain[i] = NULL; 932 } 933 bzero(&sc->ti_rdata->ti_tx_ring[i], 934 sizeof(struct ti_tx_desc)); 935 } 936 } 937 938 static int 939 ti_init_tx_ring(struct ti_softc *sc) 940 { 941 sc->ti_txcnt = 0; 942 sc->ti_tx_saved_considx = 0; 943 CSR_WRITE_4(sc, TI_MB_SENDPROD_IDX, 0); 944 return(0); 945 } 946 947 /* 948 * The Tigon 2 firmware has a new way to add/delete multicast addresses, 949 * but we have to support the old way too so that Tigon 1 cards will 950 * work. 951 */ 952 static void 953 ti_add_mcast(struct ti_softc *sc, struct ether_addr *addr) 954 { 955 struct ti_cmd_desc cmd; 956 uint16_t *m; 957 uint32_t ext[2] = {0, 0}; 958 959 m = (uint16_t *)&addr->octet[0]; 960 961 switch(sc->ti_hwrev) { 962 case TI_HWREV_TIGON: 963 CSR_WRITE_4(sc, TI_GCR_MAR0, htons(m[0])); 964 CSR_WRITE_4(sc, TI_GCR_MAR1, (htons(m[1]) << 16) | htons(m[2])); 965 TI_DO_CMD(TI_CMD_ADD_MCAST_ADDR, 0, 0); 966 break; 967 case TI_HWREV_TIGON_II: 968 ext[0] = htons(m[0]); 969 ext[1] = (htons(m[1]) << 16) | htons(m[2]); 970 TI_DO_CMD_EXT(TI_CMD_EXT_ADD_MCAST, 0, 0, (caddr_t)&ext, 2); 971 break; 972 default: 973 if_printf(&sc->arpcom.ac_if, "unknown hwrev\n"); 974 break; 975 } 976 } 977 978 static void 979 ti_del_mcast(struct ti_softc *sc, struct ether_addr *addr) 980 { 981 struct ti_cmd_desc cmd; 982 uint16_t *m; 983 uint32_t ext[2] = {0, 0}; 984 985 m = (uint16_t *)&addr->octet[0]; 986 987 switch(sc->ti_hwrev) { 988 case TI_HWREV_TIGON: 989 CSR_WRITE_4(sc, TI_GCR_MAR0, htons(m[0])); 990 CSR_WRITE_4(sc, TI_GCR_MAR1, (htons(m[1]) << 16) | htons(m[2])); 991 TI_DO_CMD(TI_CMD_DEL_MCAST_ADDR, 0, 0); 992 break; 993 case TI_HWREV_TIGON_II: 994 ext[0] = htons(m[0]); 995 ext[1] = (htons(m[1]) << 16) | htons(m[2]); 996 TI_DO_CMD_EXT(TI_CMD_EXT_DEL_MCAST, 0, 0, (caddr_t)&ext, 2); 997 break; 998 default: 999 if_printf(&sc->arpcom.ac_if, "unknown hwrev\n"); 1000 break; 1001 } 1002 } 1003 1004 /* 1005 * Configure the Tigon's multicast address filter. 1006 * 1007 * The actual multicast table management is a bit of a pain, thanks to 1008 * slight brain damage on the part of both Alteon and us. With our 1009 * multicast code, we are only alerted when the multicast address table 1010 * changes and at that point we only have the current list of addresses: 1011 * we only know the current state, not the previous state, so we don't 1012 * actually know what addresses were removed or added. The firmware has 1013 * state, but we can't get our grubby mits on it, and there is no 'delete 1014 * all multicast addresses' command. Hence, we have to maintain our own 1015 * state so we know what addresses have been programmed into the NIC at 1016 * any given time. 1017 */ 1018 static void 1019 ti_setmulti(struct ti_softc *sc) 1020 { 1021 struct ifnet *ifp = &sc->arpcom.ac_if; 1022 struct ifmultiaddr *ifma; 1023 struct ti_cmd_desc cmd; 1024 struct ti_mc_entry *mc; 1025 uint32_t intrs; 1026 1027 if (ifp->if_flags & IFF_ALLMULTI) { 1028 TI_DO_CMD(TI_CMD_SET_ALLMULTI, TI_CMD_CODE_ALLMULTI_ENB, 0); 1029 return; 1030 } 1031 1032 TI_DO_CMD(TI_CMD_SET_ALLMULTI, TI_CMD_CODE_ALLMULTI_DIS, 0); 1033 1034 /* Disable interrupts. */ 1035 intrs = CSR_READ_4(sc, TI_MB_HOSTINTR); 1036 CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1); 1037 1038 /* First, zot all the existing filters. */ 1039 while (sc->ti_mc_listhead.slh_first != NULL) { 1040 mc = sc->ti_mc_listhead.slh_first; 1041 ti_del_mcast(sc, &mc->mc_addr); 1042 SLIST_REMOVE_HEAD(&sc->ti_mc_listhead, mc_entries); 1043 kfree(mc, M_DEVBUF); 1044 } 1045 1046 /* Now program new ones. */ 1047 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1048 if (ifma->ifma_addr->sa_family != AF_LINK) 1049 continue; 1050 mc = kmalloc(sizeof(struct ti_mc_entry), M_DEVBUF, M_INTWAIT); 1051 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 1052 &mc->mc_addr, ETHER_ADDR_LEN); 1053 SLIST_INSERT_HEAD(&sc->ti_mc_listhead, mc, mc_entries); 1054 ti_add_mcast(sc, &mc->mc_addr); 1055 } 1056 1057 /* Re-enable interrupts. */ 1058 CSR_WRITE_4(sc, TI_MB_HOSTINTR, intrs); 1059 } 1060 1061 /* 1062 * Check to see if the BIOS has configured us for a 64 bit slot when 1063 * we aren't actually in one. If we detect this condition, we can work 1064 * around it on the Tigon 2 by setting a bit in the PCI state register, 1065 * but for the Tigon 1 we must give up and abort the interface attach. 1066 */ 1067 static int 1068 ti_64bitslot_war(struct ti_softc *sc) 1069 { 1070 if ((CSR_READ_4(sc, TI_PCI_STATE) & TI_PCISTATE_32BIT_BUS) == 0) { 1071 CSR_WRITE_4(sc, 0x600, 0); 1072 CSR_WRITE_4(sc, 0x604, 0); 1073 CSR_WRITE_4(sc, 0x600, 0x5555AAAA); 1074 if (CSR_READ_4(sc, 0x604) == 0x5555AAAA) { 1075 if (sc->ti_hwrev == TI_HWREV_TIGON) 1076 return(EINVAL); 1077 TI_SETBIT(sc, TI_PCI_STATE, TI_PCISTATE_32BIT_BUS); 1078 return(0); 1079 } 1080 } 1081 1082 return(0); 1083 } 1084 1085 /* 1086 * Do endian, PCI and DMA initialization. Also check the on-board ROM 1087 * self-test results. 1088 */ 1089 static int 1090 ti_chipinit(struct ti_softc *sc) 1091 { 1092 struct ifnet *ifp = &sc->arpcom.ac_if; 1093 uint32_t cacheline; 1094 uint32_t pci_writemax = 0; 1095 1096 /* Initialize link to down state. */ 1097 sc->ti_linkstat = TI_EV_CODE_LINK_DOWN; 1098 1099 if (ifp->if_capenable & IFCAP_HWCSUM) 1100 ifp->if_hwassist = TI_CSUM_FEATURES; 1101 else 1102 ifp->if_hwassist = 0; 1103 1104 /* Set endianness before we access any non-PCI registers. */ 1105 #if BYTE_ORDER == BIG_ENDIAN 1106 CSR_WRITE_4(sc, TI_MISC_HOST_CTL, 1107 TI_MHC_BIGENDIAN_INIT | (TI_MHC_BIGENDIAN_INIT << 24)); 1108 #else 1109 CSR_WRITE_4(sc, TI_MISC_HOST_CTL, 1110 TI_MHC_LITTLEENDIAN_INIT | (TI_MHC_LITTLEENDIAN_INIT << 24)); 1111 #endif 1112 1113 /* Check the ROM failed bit to see if self-tests passed. */ 1114 if (CSR_READ_4(sc, TI_CPU_STATE) & TI_CPUSTATE_ROMFAIL) { 1115 if_printf(ifp, "board self-diagnostics failed!\n"); 1116 return(ENODEV); 1117 } 1118 1119 /* Halt the CPU. */ 1120 TI_SETBIT(sc, TI_CPU_STATE, TI_CPUSTATE_HALT); 1121 1122 /* Figure out the hardware revision. */ 1123 switch(CSR_READ_4(sc, TI_MISC_HOST_CTL) & TI_MHC_CHIP_REV_MASK) { 1124 case TI_REV_TIGON_I: 1125 sc->ti_hwrev = TI_HWREV_TIGON; 1126 break; 1127 case TI_REV_TIGON_II: 1128 sc->ti_hwrev = TI_HWREV_TIGON_II; 1129 break; 1130 default: 1131 if_printf(ifp, "unsupported chip revision\n"); 1132 return(ENODEV); 1133 } 1134 1135 /* Do special setup for Tigon 2. */ 1136 if (sc->ti_hwrev == TI_HWREV_TIGON_II) { 1137 TI_SETBIT(sc, TI_CPU_CTL_B, TI_CPUSTATE_HALT); 1138 TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_SRAM_BANK_512K); 1139 TI_SETBIT(sc, TI_MISC_CONF, TI_MCR_SRAM_SYNCHRONOUS); 1140 } 1141 1142 /* Set up the PCI state register. */ 1143 CSR_WRITE_4(sc, TI_PCI_STATE, TI_PCI_READ_CMD|TI_PCI_WRITE_CMD); 1144 if (sc->ti_hwrev == TI_HWREV_TIGON_II) { 1145 TI_SETBIT(sc, TI_PCI_STATE, TI_PCISTATE_USE_MEM_RD_MULT); 1146 } 1147 1148 /* Clear the read/write max DMA parameters. */ 1149 TI_CLRBIT(sc, TI_PCI_STATE, (TI_PCISTATE_WRITE_MAXDMA| 1150 TI_PCISTATE_READ_MAXDMA)); 1151 1152 /* Get cache line size. */ 1153 cacheline = CSR_READ_4(sc, TI_PCI_BIST) & 0xFF; 1154 1155 /* 1156 * If the system has set enabled the PCI memory write 1157 * and invalidate command in the command register, set 1158 * the write max parameter accordingly. This is necessary 1159 * to use MWI with the Tigon 2. 1160 */ 1161 if (CSR_READ_4(sc, TI_PCI_CMDSTAT) & PCIM_CMD_MWIEN) { 1162 switch(cacheline) { 1163 case 1: 1164 case 4: 1165 case 8: 1166 case 16: 1167 case 32: 1168 case 64: 1169 break; 1170 default: 1171 /* Disable PCI memory write and invalidate. */ 1172 if (bootverbose) { 1173 if_printf(ifp, "cache line size %d not " 1174 "supported; disabling PCI MWI\n", 1175 cacheline); 1176 } 1177 CSR_WRITE_4(sc, TI_PCI_CMDSTAT, CSR_READ_4(sc, 1178 TI_PCI_CMDSTAT) & ~PCIM_CMD_MWIEN); 1179 break; 1180 } 1181 } 1182 1183 TI_SETBIT(sc, TI_PCI_STATE, pci_writemax); 1184 1185 /* This sets the min dma param all the way up (0xff). */ 1186 TI_SETBIT(sc, TI_PCI_STATE, TI_PCISTATE_MINDMA); 1187 1188 /* Configure DMA variables. */ 1189 #if BYTE_ORDER == BIG_ENDIAN 1190 CSR_WRITE_4(sc, TI_GCR_OPMODE, TI_OPMODE_BYTESWAP_BD | 1191 TI_OPMODE_BYTESWAP_DATA | TI_OPMODE_WORDSWAP_BD | 1192 TI_OPMODE_WARN_ENB | TI_OPMODE_FATAL_ENB | 1193 TI_OPMODE_DONT_FRAG_JUMBO); 1194 #else 1195 CSR_WRITE_4(sc, TI_GCR_OPMODE, TI_OPMODE_BYTESWAP_DATA| 1196 TI_OPMODE_WORDSWAP_BD|TI_OPMODE_DONT_FRAG_JUMBO| 1197 TI_OPMODE_WARN_ENB|TI_OPMODE_FATAL_ENB); 1198 #endif 1199 1200 /* 1201 * Only allow 1 DMA channel to be active at a time. 1202 * I don't think this is a good idea, but without it 1203 * the firmware racks up lots of nicDmaReadRingFull 1204 * errors. This is not compatible with hardware checksums. 1205 */ 1206 if (ifp->if_hwassist == 0) 1207 TI_SETBIT(sc, TI_GCR_OPMODE, TI_OPMODE_1_DMA_ACTIVE); 1208 1209 /* Recommended settings from Tigon manual. */ 1210 CSR_WRITE_4(sc, TI_GCR_DMA_WRITECFG, TI_DMA_STATE_THRESH_8W); 1211 CSR_WRITE_4(sc, TI_GCR_DMA_READCFG, TI_DMA_STATE_THRESH_8W); 1212 1213 if (ti_64bitslot_war(sc)) { 1214 if_printf(ifp, "bios thinks we're in a 64 bit slot, " 1215 "but we aren't"); 1216 return(EINVAL); 1217 } 1218 1219 return(0); 1220 } 1221 1222 /* 1223 * Initialize the general information block and firmware, and 1224 * start the CPU(s) running. 1225 */ 1226 static int 1227 ti_gibinit(struct ti_softc *sc) 1228 { 1229 struct ifnet *ifp = &sc->arpcom.ac_if; 1230 struct ti_rcb *rcb; 1231 int i; 1232 1233 /* Disable interrupts for now. */ 1234 CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1); 1235 1236 /* Tell the chip where to find the general information block. */ 1237 CSR_WRITE_4(sc, TI_GCR_GENINFO_HI, 0); 1238 CSR_WRITE_4(sc, TI_GCR_GENINFO_LO, vtophys(&sc->ti_rdata->ti_info)); 1239 1240 /* Load the firmware into SRAM. */ 1241 ti_loadfw(sc); 1242 1243 /* Set up the contents of the general info and ring control blocks. */ 1244 1245 /* Set up the event ring and producer pointer. */ 1246 rcb = &sc->ti_rdata->ti_info.ti_ev_rcb; 1247 1248 TI_HOSTADDR(rcb->ti_hostaddr) = vtophys(&sc->ti_rdata->ti_event_ring); 1249 rcb->ti_flags = 0; 1250 TI_HOSTADDR(sc->ti_rdata->ti_info.ti_ev_prodidx_ptr) = 1251 vtophys(&sc->ti_ev_prodidx); 1252 sc->ti_ev_prodidx.ti_idx = 0; 1253 CSR_WRITE_4(sc, TI_GCR_EVENTCONS_IDX, 0); 1254 sc->ti_ev_saved_considx = 0; 1255 1256 /* Set up the command ring and producer mailbox. */ 1257 rcb = &sc->ti_rdata->ti_info.ti_cmd_rcb; 1258 1259 sc->ti_rdata->ti_cmd_ring = 1260 (struct ti_cmd_desc *)(sc->ti_vhandle + TI_GCR_CMDRING); 1261 TI_HOSTADDR(rcb->ti_hostaddr) = TI_GCR_NIC_ADDR(TI_GCR_CMDRING); 1262 rcb->ti_flags = 0; 1263 rcb->ti_max_len = 0; 1264 for (i = 0; i < TI_CMD_RING_CNT; i++) 1265 CSR_WRITE_4(sc, TI_GCR_CMDRING + (i * 4), 0); 1266 CSR_WRITE_4(sc, TI_GCR_CMDCONS_IDX, 0); 1267 CSR_WRITE_4(sc, TI_MB_CMDPROD_IDX, 0); 1268 sc->ti_cmd_saved_prodidx = 0; 1269 1270 /* 1271 * Assign the address of the stats refresh buffer. 1272 * We re-use the current stats buffer for this to 1273 * conserve memory. 1274 */ 1275 TI_HOSTADDR(sc->ti_rdata->ti_info.ti_refresh_stats_ptr) = 1276 vtophys(&sc->ti_rdata->ti_info.ti_stats); 1277 1278 /* Set up the standard receive ring. */ 1279 rcb = &sc->ti_rdata->ti_info.ti_std_rx_rcb; 1280 TI_HOSTADDR(rcb->ti_hostaddr) = vtophys(&sc->ti_rdata->ti_rx_std_ring); 1281 rcb->ti_max_len = TI_FRAMELEN; 1282 rcb->ti_flags = 0; 1283 if (ifp->if_hwassist) 1284 rcb->ti_flags |= TI_RCB_FLAG_TCP_UDP_CKSUM | 1285 TI_RCB_FLAG_IP_CKSUM | TI_RCB_FLAG_NO_PHDR_CKSUM; 1286 rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST; 1287 1288 /* Set up the jumbo receive ring. */ 1289 rcb = &sc->ti_rdata->ti_info.ti_jumbo_rx_rcb; 1290 TI_HOSTADDR(rcb->ti_hostaddr) = 1291 vtophys(&sc->ti_rdata->ti_rx_jumbo_ring); 1292 rcb->ti_max_len = TI_JUMBO_FRAMELEN; 1293 rcb->ti_flags = 0; 1294 if (ifp->if_hwassist) 1295 rcb->ti_flags |= TI_RCB_FLAG_TCP_UDP_CKSUM | 1296 TI_RCB_FLAG_IP_CKSUM | TI_RCB_FLAG_NO_PHDR_CKSUM; 1297 rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST; 1298 1299 /* 1300 * Set up the mini ring. Only activated on the 1301 * Tigon 2 but the slot in the config block is 1302 * still there on the Tigon 1. 1303 */ 1304 rcb = &sc->ti_rdata->ti_info.ti_mini_rx_rcb; 1305 TI_HOSTADDR(rcb->ti_hostaddr) = 1306 vtophys(&sc->ti_rdata->ti_rx_mini_ring); 1307 rcb->ti_max_len = MHLEN - ETHER_ALIGN; 1308 if (sc->ti_hwrev == TI_HWREV_TIGON) 1309 rcb->ti_flags = TI_RCB_FLAG_RING_DISABLED; 1310 else 1311 rcb->ti_flags = 0; 1312 if (ifp->if_hwassist) 1313 rcb->ti_flags |= TI_RCB_FLAG_TCP_UDP_CKSUM | 1314 TI_RCB_FLAG_IP_CKSUM | TI_RCB_FLAG_NO_PHDR_CKSUM; 1315 rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST; 1316 1317 /* 1318 * Set up the receive return ring. 1319 */ 1320 rcb = &sc->ti_rdata->ti_info.ti_return_rcb; 1321 TI_HOSTADDR(rcb->ti_hostaddr) = 1322 vtophys(&sc->ti_rdata->ti_rx_return_ring); 1323 rcb->ti_flags = 0; 1324 rcb->ti_max_len = TI_RETURN_RING_CNT; 1325 TI_HOSTADDR(sc->ti_rdata->ti_info.ti_return_prodidx_ptr) = 1326 vtophys(&sc->ti_return_prodidx); 1327 1328 /* 1329 * Set up the tx ring. Note: for the Tigon 2, we have the option 1330 * of putting the transmit ring in the host's address space and 1331 * letting the chip DMA it instead of leaving the ring in the NIC's 1332 * memory and accessing it through the shared memory region. We 1333 * do this for the Tigon 2, but it doesn't work on the Tigon 1, 1334 * so we have to revert to the shared memory scheme if we detect 1335 * a Tigon 1 chip. 1336 */ 1337 CSR_WRITE_4(sc, TI_WINBASE, TI_TX_RING_BASE); 1338 if (sc->ti_hwrev == TI_HWREV_TIGON) { 1339 sc->ti_rdata->ti_tx_ring_nic = 1340 (struct ti_tx_desc *)(sc->ti_vhandle + TI_WINDOW); 1341 } 1342 bzero(sc->ti_rdata->ti_tx_ring, 1343 TI_TX_RING_CNT * sizeof(struct ti_tx_desc)); 1344 rcb = &sc->ti_rdata->ti_info.ti_tx_rcb; 1345 if (sc->ti_hwrev == TI_HWREV_TIGON) 1346 rcb->ti_flags = 0; 1347 else 1348 rcb->ti_flags = TI_RCB_FLAG_HOST_RING; 1349 rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST; 1350 if (ifp->if_hwassist) 1351 rcb->ti_flags |= TI_RCB_FLAG_TCP_UDP_CKSUM | 1352 TI_RCB_FLAG_IP_CKSUM | TI_RCB_FLAG_NO_PHDR_CKSUM; 1353 rcb->ti_max_len = TI_TX_RING_CNT; 1354 if (sc->ti_hwrev == TI_HWREV_TIGON) 1355 TI_HOSTADDR(rcb->ti_hostaddr) = TI_TX_RING_BASE; 1356 else 1357 TI_HOSTADDR(rcb->ti_hostaddr) = 1358 vtophys(&sc->ti_rdata->ti_tx_ring); 1359 TI_HOSTADDR(sc->ti_rdata->ti_info.ti_tx_considx_ptr) = 1360 vtophys(&sc->ti_tx_considx); 1361 1362 /* Set up tuneables */ 1363 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) 1364 CSR_WRITE_4(sc, TI_GCR_RX_COAL_TICKS, 1365 (sc->ti_rx_coal_ticks / 10)); 1366 else 1367 CSR_WRITE_4(sc, TI_GCR_RX_COAL_TICKS, sc->ti_rx_coal_ticks); 1368 CSR_WRITE_4(sc, TI_GCR_TX_COAL_TICKS, sc->ti_tx_coal_ticks); 1369 CSR_WRITE_4(sc, TI_GCR_STAT_TICKS, sc->ti_stat_ticks); 1370 CSR_WRITE_4(sc, TI_GCR_RX_MAX_COAL_BD, sc->ti_rx_max_coal_bds); 1371 CSR_WRITE_4(sc, TI_GCR_TX_MAX_COAL_BD, sc->ti_tx_max_coal_bds); 1372 CSR_WRITE_4(sc, TI_GCR_TX_BUFFER_RATIO, sc->ti_tx_buf_ratio); 1373 1374 /* Turn interrupts on. */ 1375 CSR_WRITE_4(sc, TI_GCR_MASK_INTRS, 0); 1376 CSR_WRITE_4(sc, TI_MB_HOSTINTR, 0); 1377 1378 /* Start CPU. */ 1379 TI_CLRBIT(sc, TI_CPU_STATE, (TI_CPUSTATE_HALT|TI_CPUSTATE_STEP)); 1380 1381 return(0); 1382 } 1383 1384 /* 1385 * Probe for a Tigon chip. Check the PCI vendor and device IDs 1386 * against our list and return its name if we find a match. 1387 */ 1388 static int 1389 ti_probe(device_t dev) 1390 { 1391 struct ti_type *t; 1392 uint16_t vendor, product; 1393 1394 vendor = pci_get_vendor(dev); 1395 product = pci_get_device(dev); 1396 1397 for (t = ti_devs; t->ti_name != NULL; t++) { 1398 if (vendor == t->ti_vid && product == t->ti_did) { 1399 device_set_desc(dev, t->ti_name); 1400 return(0); 1401 } 1402 } 1403 1404 return(ENXIO); 1405 } 1406 1407 static int 1408 ti_attach(device_t dev) 1409 { 1410 struct ti_softc *sc; 1411 struct ifnet *ifp; 1412 int error = 0, rid; 1413 uint8_t eaddr[ETHER_ADDR_LEN]; 1414 1415 sc = device_get_softc(dev); 1416 ifp = &sc->arpcom.ac_if; 1417 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1418 ifp->if_capabilities = IFCAP_HWCSUM | 1419 IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU; 1420 ifp->if_capenable = ifp->if_capabilities; 1421 1422 pci_enable_busmaster(dev); 1423 1424 /* 1425 * Initialize media before any possible error may occur, 1426 * so we can destroy it unconditionally, if an error occurs later on. 1427 */ 1428 ifmedia_init(&sc->ifmedia, IFM_IMASK, ti_ifmedia_upd, ti_ifmedia_sts); 1429 1430 rid = TI_PCI_LOMEM; 1431 sc->ti_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 1432 RF_ACTIVE); 1433 1434 if (sc->ti_res == NULL) { 1435 device_printf(dev, "couldn't map memory\n"); 1436 error = ENXIO; 1437 goto fail; 1438 } 1439 1440 sc->ti_btag = rman_get_bustag(sc->ti_res); 1441 sc->ti_bhandle = rman_get_bushandle(sc->ti_res); 1442 sc->ti_vhandle = (vm_offset_t)rman_get_virtual(sc->ti_res); 1443 1444 /* Allocate interrupt */ 1445 rid = 0; 1446 sc->ti_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 1447 RF_SHAREABLE | RF_ACTIVE); 1448 if (sc->ti_irq == NULL) { 1449 device_printf(dev, "couldn't map interrupt\n"); 1450 error = ENXIO; 1451 goto fail; 1452 } 1453 1454 if (ti_chipinit(sc)) { 1455 device_printf(dev, "chip initialization failed\n"); 1456 error = ENXIO; 1457 goto fail; 1458 } 1459 1460 /* Zero out the NIC's on-board SRAM. */ 1461 ti_mem(sc, 0x2000, 0x100000 - 0x2000, NULL); 1462 1463 /* Init again -- zeroing memory may have clobbered some registers. */ 1464 if (ti_chipinit(sc)) { 1465 device_printf(dev, "chip initialization failed\n"); 1466 error = ENXIO; 1467 goto fail; 1468 } 1469 1470 /* 1471 * Get station address from the EEPROM. Note: the manual states 1472 * that the MAC address is at offset 0x8c, however the data is 1473 * stored as two longwords (since that's how it's loaded into 1474 * the NIC). This means the MAC address is actually preceeded 1475 * by two zero bytes. We need to skip over those. 1476 */ 1477 if (ti_read_eeprom(sc, eaddr, TI_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) { 1478 device_printf(dev, "failed to read station address\n"); 1479 error = ENXIO; 1480 goto fail; 1481 } 1482 1483 /* Allocate the general information block and ring buffers. */ 1484 sc->ti_rdata = contigmalloc(sizeof(struct ti_ring_data), M_DEVBUF, 1485 M_WAITOK | M_ZERO, 0, 0xffffffff, PAGE_SIZE, 0); 1486 1487 if (sc->ti_rdata == NULL) { 1488 device_printf(dev, "no memory for list buffers!\n"); 1489 error = ENXIO; 1490 goto fail; 1491 } 1492 1493 /* Try to allocate memory for jumbo buffers. */ 1494 if (ti_alloc_jumbo_mem(sc)) { 1495 device_printf(dev, "jumbo buffer allocation failed\n"); 1496 error = ENXIO; 1497 goto fail; 1498 } 1499 1500 /* 1501 * We really need a better way to tell a 1000baseT card 1502 * from a 1000baseSX one, since in theory there could be 1503 * OEMed 1000baseT cards from lame vendors who aren't 1504 * clever enough to change the PCI ID. For the moment 1505 * though, the AceNIC is the only copper card available. 1506 */ 1507 if (pci_get_vendor(dev) == ALT_VENDORID && 1508 pci_get_device(dev) == ALT_DEVICEID_ACENIC_COPPER) 1509 sc->ti_copper = 1; 1510 /* Ok, it's not the only copper card available. */ 1511 if (pci_get_vendor(dev) == NG_VENDORID && 1512 pci_get_device(dev) == NG_DEVICEID_GA620T) 1513 sc->ti_copper = 1; 1514 1515 /* Set default tuneable values. */ 1516 sc->ti_stat_ticks = 2 * TI_TICKS_PER_SEC; 1517 sc->ti_rx_coal_ticks = TI_TICKS_PER_SEC / 5000; 1518 sc->ti_tx_coal_ticks = TI_TICKS_PER_SEC / 500; 1519 sc->ti_rx_max_coal_bds = 64; 1520 sc->ti_tx_max_coal_bds = 128; 1521 sc->ti_tx_buf_ratio = 21; 1522 1523 /* Set up ifnet structure */ 1524 ifp->if_softc = sc; 1525 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1526 ifp->if_ioctl = ti_ioctl; 1527 ifp->if_start = ti_start; 1528 ifp->if_watchdog = ti_watchdog; 1529 ifp->if_init = ti_init; 1530 ifp->if_mtu = ETHERMTU; 1531 ifq_set_maxlen(&ifp->if_snd, TI_TX_RING_CNT - 1); 1532 ifq_set_ready(&ifp->if_snd); 1533 1534 /* Set up ifmedia support. */ 1535 if (sc->ti_copper) { 1536 /* 1537 * Copper cards allow manual 10/100 mode selection, 1538 * but not manual 1000baseT mode selection. Why? 1539 * Becuase currently there's no way to specify the 1540 * master/slave setting through the firmware interface, 1541 * so Alteon decided to just bag it and handle it 1542 * via autonegotiation. 1543 */ 1544 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL); 1545 ifmedia_add(&sc->ifmedia, 1546 IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL); 1547 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_TX, 0, NULL); 1548 ifmedia_add(&sc->ifmedia, 1549 IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL); 1550 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_1000_T, 0, NULL); 1551 ifmedia_add(&sc->ifmedia, 1552 IFM_ETHER|IFM_1000_T | IFM_FDX, 0, NULL); 1553 } else { 1554 /* Fiber cards don't support 10/100 modes. */ 1555 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL); 1556 ifmedia_add(&sc->ifmedia, 1557 IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL); 1558 } 1559 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); 1560 ifmedia_set(&sc->ifmedia, IFM_ETHER|IFM_AUTO); 1561 1562 /* 1563 * Call MI attach routine. 1564 */ 1565 ether_ifattach(ifp, eaddr, NULL); 1566 1567 error = bus_setup_intr(dev, sc->ti_irq, INTR_NETSAFE, 1568 ti_intr, sc, &sc->ti_intrhand, 1569 ifp->if_serializer); 1570 if (error) { 1571 device_printf(dev, "couldn't set up irq\n"); 1572 ether_ifdetach(ifp); 1573 goto fail; 1574 } 1575 1576 ifp->if_cpuid = ithread_cpuid(rman_get_start(sc->ti_irq)); 1577 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus); 1578 1579 return 0; 1580 fail: 1581 ti_detach(dev); 1582 return(error); 1583 } 1584 1585 static int 1586 ti_detach(device_t dev) 1587 { 1588 struct ti_softc *sc = device_get_softc(dev); 1589 struct ifnet *ifp = &sc->arpcom.ac_if; 1590 1591 if (device_is_attached(dev)) { 1592 lwkt_serialize_enter(ifp->if_serializer); 1593 ti_stop(sc); 1594 bus_teardown_intr(dev, sc->ti_irq, sc->ti_intrhand); 1595 lwkt_serialize_exit(ifp->if_serializer); 1596 1597 ether_ifdetach(ifp); 1598 } 1599 1600 if (sc->ti_irq != NULL) 1601 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->ti_irq); 1602 if (sc->ti_res != NULL) { 1603 bus_release_resource(dev, SYS_RES_MEMORY, 1604 TI_PCI_LOMEM, sc->ti_res); 1605 } 1606 if (sc->ti_cdata.ti_jumbo_buf != NULL) 1607 contigfree(sc->ti_cdata.ti_jumbo_buf, TI_JMEM, M_DEVBUF); 1608 if (sc->ti_rdata != NULL) 1609 contigfree(sc->ti_rdata, sizeof(struct ti_ring_data), M_DEVBUF); 1610 ifmedia_removeall(&sc->ifmedia); 1611 1612 1613 return(0); 1614 } 1615 1616 /* 1617 * Frame reception handling. This is called if there's a frame 1618 * on the receive return list. 1619 * 1620 * Note: we have to be able to handle three possibilities here: 1621 * 1) the frame is from the mini receive ring (can only happen) 1622 * on Tigon 2 boards) 1623 * 2) the frame is from the jumbo recieve ring 1624 * 3) the frame is from the standard receive ring 1625 */ 1626 static void 1627 ti_rxeof(struct ti_softc *sc) 1628 { 1629 struct ifnet *ifp = &sc->arpcom.ac_if; 1630 struct ti_cmd_desc cmd; 1631 1632 while(sc->ti_rx_saved_considx != sc->ti_return_prodidx.ti_idx) { 1633 struct ti_rx_desc *cur_rx; 1634 uint32_t rxidx; 1635 struct mbuf *m; 1636 uint16_t vlan_tag = 0; 1637 int have_tag = 0; 1638 1639 cur_rx = 1640 &sc->ti_rdata->ti_rx_return_ring[sc->ti_rx_saved_considx]; 1641 rxidx = cur_rx->ti_idx; 1642 TI_INC(sc->ti_rx_saved_considx, TI_RETURN_RING_CNT); 1643 1644 if (cur_rx->ti_flags & TI_BDFLAG_VLAN_TAG) { 1645 have_tag = 1; 1646 vlan_tag = cur_rx->ti_vlan_tag & 0xfff; 1647 } 1648 1649 if (cur_rx->ti_flags & TI_BDFLAG_JUMBO_RING) { 1650 TI_INC(sc->ti_jumbo, TI_JUMBO_RX_RING_CNT); 1651 m = sc->ti_cdata.ti_rx_jumbo_chain[rxidx]; 1652 sc->ti_cdata.ti_rx_jumbo_chain[rxidx] = NULL; 1653 if (cur_rx->ti_flags & TI_BDFLAG_ERROR) { 1654 ifp->if_ierrors++; 1655 ti_newbuf_jumbo(sc, sc->ti_jumbo, m); 1656 continue; 1657 } 1658 if (ti_newbuf_jumbo(sc, sc->ti_jumbo, NULL) == ENOBUFS) { 1659 ifp->if_ierrors++; 1660 ti_newbuf_jumbo(sc, sc->ti_jumbo, m); 1661 continue; 1662 } 1663 } else if (cur_rx->ti_flags & TI_BDFLAG_MINI_RING) { 1664 TI_INC(sc->ti_mini, TI_MINI_RX_RING_CNT); 1665 m = sc->ti_cdata.ti_rx_mini_chain[rxidx]; 1666 sc->ti_cdata.ti_rx_mini_chain[rxidx] = NULL; 1667 if (cur_rx->ti_flags & TI_BDFLAG_ERROR) { 1668 ifp->if_ierrors++; 1669 ti_newbuf_mini(sc, sc->ti_mini, m); 1670 continue; 1671 } 1672 if (ti_newbuf_mini(sc, sc->ti_mini, NULL) == ENOBUFS) { 1673 ifp->if_ierrors++; 1674 ti_newbuf_mini(sc, sc->ti_mini, m); 1675 continue; 1676 } 1677 } else { 1678 TI_INC(sc->ti_std, TI_STD_RX_RING_CNT); 1679 m = sc->ti_cdata.ti_rx_std_chain[rxidx]; 1680 sc->ti_cdata.ti_rx_std_chain[rxidx] = NULL; 1681 if (cur_rx->ti_flags & TI_BDFLAG_ERROR) { 1682 ifp->if_ierrors++; 1683 ti_newbuf_std(sc, sc->ti_std, m); 1684 continue; 1685 } 1686 if (ti_newbuf_std(sc, sc->ti_std, NULL) == ENOBUFS) { 1687 ifp->if_ierrors++; 1688 ti_newbuf_std(sc, sc->ti_std, m); 1689 continue; 1690 } 1691 } 1692 1693 m->m_pkthdr.len = m->m_len = cur_rx->ti_len; 1694 ifp->if_ipackets++; 1695 m->m_pkthdr.rcvif = ifp; 1696 1697 if (ifp->if_hwassist) { 1698 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | 1699 CSUM_DATA_VALID; 1700 if ((cur_rx->ti_ip_cksum ^ 0xffff) == 0) 1701 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 1702 m->m_pkthdr.csum_data = cur_rx->ti_tcp_udp_cksum; 1703 } 1704 1705 if (have_tag) { 1706 m->m_flags |= M_VLANTAG; 1707 m->m_pkthdr.ether_vlantag = vlan_tag; 1708 } 1709 ifp->if_input(ifp, m); 1710 } 1711 1712 /* Only necessary on the Tigon 1. */ 1713 if (sc->ti_hwrev == TI_HWREV_TIGON) 1714 CSR_WRITE_4(sc, TI_GCR_RXRETURNCONS_IDX, 1715 sc->ti_rx_saved_considx); 1716 1717 TI_UPDATE_STDPROD(sc, sc->ti_std); 1718 TI_UPDATE_MINIPROD(sc, sc->ti_mini); 1719 TI_UPDATE_JUMBOPROD(sc, sc->ti_jumbo); 1720 } 1721 1722 static void 1723 ti_txeof(struct ti_softc *sc) 1724 { 1725 struct ifnet *ifp = &sc->arpcom.ac_if; 1726 struct ti_tx_desc *cur_tx = NULL; 1727 1728 /* 1729 * Go through our tx ring and free mbufs for those 1730 * frames that have been sent. 1731 */ 1732 while (sc->ti_tx_saved_considx != sc->ti_tx_considx.ti_idx) { 1733 uint32_t idx = 0; 1734 1735 idx = sc->ti_tx_saved_considx; 1736 if (sc->ti_hwrev == TI_HWREV_TIGON) { 1737 if (idx > 383) 1738 CSR_WRITE_4(sc, TI_WINBASE, 1739 TI_TX_RING_BASE + 6144); 1740 else if (idx > 255) 1741 CSR_WRITE_4(sc, TI_WINBASE, 1742 TI_TX_RING_BASE + 4096); 1743 else if (idx > 127) 1744 CSR_WRITE_4(sc, TI_WINBASE, 1745 TI_TX_RING_BASE + 2048); 1746 else 1747 CSR_WRITE_4(sc, TI_WINBASE, 1748 TI_TX_RING_BASE); 1749 cur_tx = &sc->ti_rdata->ti_tx_ring_nic[idx % 128]; 1750 } else 1751 cur_tx = &sc->ti_rdata->ti_tx_ring[idx]; 1752 if (cur_tx->ti_flags & TI_BDFLAG_END) 1753 ifp->if_opackets++; 1754 if (sc->ti_cdata.ti_tx_chain[idx] != NULL) { 1755 m_freem(sc->ti_cdata.ti_tx_chain[idx]); 1756 sc->ti_cdata.ti_tx_chain[idx] = NULL; 1757 } 1758 sc->ti_txcnt--; 1759 TI_INC(sc->ti_tx_saved_considx, TI_TX_RING_CNT); 1760 ifp->if_timer = 0; 1761 } 1762 1763 if (cur_tx != NULL) 1764 ifp->if_flags &= ~IFF_OACTIVE; 1765 } 1766 1767 static void 1768 ti_intr(void *xsc) 1769 { 1770 struct ti_softc *sc = xsc; 1771 struct ifnet *ifp = &sc->arpcom.ac_if; 1772 1773 #ifdef notdef 1774 /* Avoid this for now -- checking this register is expensive. */ 1775 /* Make sure this is really our interrupt. */ 1776 if ((CSR_READ_4(sc, TI_MISC_HOST_CTL) & TI_MHC_INTSTATE) == 0) 1777 return; 1778 #endif 1779 1780 /* Ack interrupt and stop others from occuring. */ 1781 CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1); 1782 1783 if (ifp->if_flags & IFF_RUNNING) { 1784 /* Check RX return ring producer/consumer */ 1785 ti_rxeof(sc); 1786 1787 /* Check TX ring producer/consumer */ 1788 ti_txeof(sc); 1789 } 1790 1791 ti_handle_events(sc); 1792 1793 /* Re-enable interrupts. */ 1794 CSR_WRITE_4(sc, TI_MB_HOSTINTR, 0); 1795 1796 if ((ifp->if_flags & IFF_RUNNING) && !ifq_is_empty(&ifp->if_snd)) 1797 if_devstart(ifp); 1798 } 1799 1800 static void 1801 ti_stats_update(struct ti_softc *sc) 1802 { 1803 struct ifnet *ifp = &sc->arpcom.ac_if; 1804 1805 ifp->if_collisions += 1806 (sc->ti_rdata->ti_info.ti_stats.dot3StatsSingleCollisionFrames + 1807 sc->ti_rdata->ti_info.ti_stats.dot3StatsMultipleCollisionFrames + 1808 sc->ti_rdata->ti_info.ti_stats.dot3StatsExcessiveCollisions + 1809 sc->ti_rdata->ti_info.ti_stats.dot3StatsLateCollisions) - 1810 ifp->if_collisions; 1811 } 1812 1813 /* 1814 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data 1815 * pointers to descriptors. 1816 */ 1817 static int 1818 ti_encap(struct ti_softc *sc, struct mbuf *m_head, uint32_t *txidx) 1819 { 1820 struct ti_tx_desc *f = NULL; 1821 struct mbuf *m; 1822 uint32_t cnt = 0, cur, frag; 1823 uint16_t csum_flags = 0, vlan_tag = 0, vlan_flag = 0; 1824 1825 if (m_head->m_flags & M_VLANTAG) { 1826 vlan_tag = m_head->m_pkthdr.ether_vlantag; 1827 vlan_flag = TI_BDFLAG_VLAN_TAG; 1828 } 1829 1830 m = m_head; 1831 cur = frag = *txidx; 1832 1833 if (m_head->m_pkthdr.csum_flags) { 1834 if (m_head->m_pkthdr.csum_flags & CSUM_IP) 1835 csum_flags |= TI_BDFLAG_IP_CKSUM; 1836 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) 1837 csum_flags |= TI_BDFLAG_TCP_UDP_CKSUM; 1838 if (m_head->m_flags & M_LASTFRAG) 1839 csum_flags |= TI_BDFLAG_IP_FRAG_END; 1840 else if (m_head->m_flags & M_FRAG) 1841 csum_flags |= TI_BDFLAG_IP_FRAG; 1842 } 1843 /* 1844 * Start packing the mbufs in this chain into 1845 * the fragment pointers. Stop when we run out 1846 * of fragments or hit the end of the mbuf chain. 1847 */ 1848 for (m = m_head; m != NULL; m = m->m_next) { 1849 if (m->m_len != 0) { 1850 if (sc->ti_hwrev == TI_HWREV_TIGON) { 1851 if (frag > 383) 1852 CSR_WRITE_4(sc, TI_WINBASE, 1853 TI_TX_RING_BASE + 6144); 1854 else if (frag > 255) 1855 CSR_WRITE_4(sc, TI_WINBASE, 1856 TI_TX_RING_BASE + 4096); 1857 else if (frag > 127) 1858 CSR_WRITE_4(sc, TI_WINBASE, 1859 TI_TX_RING_BASE + 2048); 1860 else 1861 CSR_WRITE_4(sc, TI_WINBASE, 1862 TI_TX_RING_BASE); 1863 f = &sc->ti_rdata->ti_tx_ring_nic[frag % 128]; 1864 } else 1865 f = &sc->ti_rdata->ti_tx_ring[frag]; 1866 if (sc->ti_cdata.ti_tx_chain[frag] != NULL) 1867 break; 1868 TI_HOSTADDR(f->ti_addr) = vtophys(mtod(m, vm_offset_t)); 1869 f->ti_len = m->m_len; 1870 f->ti_flags = csum_flags | vlan_flag; 1871 f->ti_vlan_tag = vlan_tag & 0xfff; 1872 1873 /* 1874 * Sanity check: avoid coming within 16 descriptors 1875 * of the end of the ring. 1876 */ 1877 if ((TI_TX_RING_CNT - (sc->ti_txcnt + cnt)) < 16) 1878 return(ENOBUFS); 1879 cur = frag; 1880 TI_INC(frag, TI_TX_RING_CNT); 1881 cnt++; 1882 } 1883 } 1884 1885 if (m != NULL) 1886 return(ENOBUFS); 1887 1888 if (frag == sc->ti_tx_saved_considx) 1889 return(ENOBUFS); 1890 1891 if (sc->ti_hwrev == TI_HWREV_TIGON) 1892 sc->ti_rdata->ti_tx_ring_nic[cur % 128].ti_flags |= 1893 TI_BDFLAG_END; 1894 else 1895 sc->ti_rdata->ti_tx_ring[cur].ti_flags |= TI_BDFLAG_END; 1896 sc->ti_cdata.ti_tx_chain[cur] = m_head; 1897 sc->ti_txcnt += cnt; 1898 1899 *txidx = frag; 1900 1901 return(0); 1902 } 1903 1904 /* 1905 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 1906 * to the mbuf data regions directly in the transmit descriptors. 1907 */ 1908 static void 1909 ti_start(struct ifnet *ifp) 1910 { 1911 struct ti_softc *sc = ifp->if_softc; 1912 struct mbuf *m_head = NULL; 1913 uint32_t prodidx = 0; 1914 int need_trans; 1915 1916 prodidx = CSR_READ_4(sc, TI_MB_SENDPROD_IDX); 1917 1918 need_trans = 0; 1919 while(sc->ti_cdata.ti_tx_chain[prodidx] == NULL) { 1920 m_head = ifq_dequeue(&ifp->if_snd, NULL); 1921 if (m_head == NULL) 1922 break; 1923 1924 /* 1925 * XXX 1926 * safety overkill. If this is a fragmented packet chain 1927 * with delayed TCP/UDP checksums, then only encapsulate 1928 * it if we have enough descriptors to handle the entire 1929 * chain at once. 1930 * (paranoia -- may not actually be needed) 1931 */ 1932 if (m_head->m_flags & M_FIRSTFRAG && 1933 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) { 1934 if ((TI_TX_RING_CNT - sc->ti_txcnt) < 1935 m_head->m_pkthdr.csum_data + 16) { 1936 ifp->if_flags |= IFF_OACTIVE; 1937 ifq_prepend(&ifp->if_snd, m_head); 1938 break; 1939 } 1940 } 1941 1942 /* 1943 * Pack the data into the transmit ring. If we 1944 * don't have room, set the OACTIVE flag and wait 1945 * for the NIC to drain the ring. 1946 */ 1947 if (ti_encap(sc, m_head, &prodidx)) { 1948 ifp->if_flags |= IFF_OACTIVE; 1949 ifq_prepend(&ifp->if_snd, m_head); 1950 break; 1951 } 1952 need_trans = 1; 1953 1954 ETHER_BPF_MTAP(ifp, m_head); 1955 } 1956 1957 if (!need_trans) 1958 return; 1959 1960 /* Transmit */ 1961 CSR_WRITE_4(sc, TI_MB_SENDPROD_IDX, prodidx); 1962 1963 /* 1964 * Set a timeout in case the chip goes out to lunch. 1965 */ 1966 ifp->if_timer = 5; 1967 } 1968 1969 static void 1970 ti_init(void *xsc) 1971 { 1972 struct ti_softc *sc = xsc; 1973 1974 /* Cancel pending I/O and flush buffers. */ 1975 ti_stop(sc); 1976 1977 /* Init the gen info block, ring control blocks and firmware. */ 1978 if (ti_gibinit(sc)) { 1979 if_printf(&sc->arpcom.ac_if, "initialization failure\n"); 1980 return; 1981 } 1982 } 1983 1984 static void 1985 ti_init2(struct ti_softc *sc) 1986 { 1987 struct ifnet *ifp = &sc->arpcom.ac_if; 1988 struct ti_cmd_desc cmd; 1989 uint16_t *m; 1990 struct ifmedia *ifm; 1991 int tmp; 1992 1993 /* Specify MTU and interface index. */ 1994 CSR_WRITE_4(sc, TI_GCR_IFINDEX, ifp->if_dunit); 1995 CSR_WRITE_4(sc, TI_GCR_IFMTU, ifp->if_mtu + 1996 ETHER_HDR_LEN + ETHER_CRC_LEN); 1997 TI_DO_CMD(TI_CMD_UPDATE_GENCOM, 0, 0); 1998 1999 /* Load our MAC address. */ 2000 m = (uint16_t *)&sc->arpcom.ac_enaddr[0]; 2001 CSR_WRITE_4(sc, TI_GCR_PAR0, htons(m[0])); 2002 CSR_WRITE_4(sc, TI_GCR_PAR1, (htons(m[1]) << 16) | htons(m[2])); 2003 TI_DO_CMD(TI_CMD_SET_MAC_ADDR, 0, 0); 2004 2005 /* Enable or disable promiscuous mode as needed. */ 2006 if (ifp->if_flags & IFF_PROMISC) 2007 TI_DO_CMD(TI_CMD_SET_PROMISC_MODE, TI_CMD_CODE_PROMISC_ENB, 0); 2008 else 2009 TI_DO_CMD(TI_CMD_SET_PROMISC_MODE, TI_CMD_CODE_PROMISC_DIS, 0); 2010 2011 /* Program multicast filter. */ 2012 ti_setmulti(sc); 2013 2014 /* 2015 * If this is a Tigon 1, we should tell the 2016 * firmware to use software packet filtering. 2017 */ 2018 if (sc->ti_hwrev == TI_HWREV_TIGON) 2019 TI_DO_CMD(TI_CMD_FDR_FILTERING, TI_CMD_CODE_FILT_ENB, 0); 2020 2021 /* Init RX ring. */ 2022 ti_init_rx_ring_std(sc); 2023 2024 /* Init jumbo RX ring. */ 2025 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) 2026 ti_init_rx_ring_jumbo(sc); 2027 2028 /* 2029 * If this is a Tigon 2, we can also configure the 2030 * mini ring. 2031 */ 2032 if (sc->ti_hwrev == TI_HWREV_TIGON_II) 2033 ti_init_rx_ring_mini(sc); 2034 2035 CSR_WRITE_4(sc, TI_GCR_RXRETURNCONS_IDX, 0); 2036 sc->ti_rx_saved_considx = 0; 2037 2038 /* Init TX ring. */ 2039 ti_init_tx_ring(sc); 2040 2041 /* Tell firmware we're alive. */ 2042 TI_DO_CMD(TI_CMD_HOST_STATE, TI_CMD_CODE_STACK_UP, 0); 2043 2044 /* Enable host interrupts. */ 2045 CSR_WRITE_4(sc, TI_MB_HOSTINTR, 0); 2046 2047 ifp->if_flags |= IFF_RUNNING; 2048 ifp->if_flags &= ~IFF_OACTIVE; 2049 2050 /* 2051 * Make sure to set media properly. We have to do this 2052 * here since we have to issue commands in order to set 2053 * the link negotiation and we can't issue commands until 2054 * the firmware is running. 2055 */ 2056 ifm = &sc->ifmedia; 2057 tmp = ifm->ifm_media; 2058 ifm->ifm_media = ifm->ifm_cur->ifm_media; 2059 ti_ifmedia_upd(ifp); 2060 ifm->ifm_media = tmp; 2061 } 2062 2063 /* 2064 * Set media options. 2065 */ 2066 static int 2067 ti_ifmedia_upd(struct ifnet *ifp) 2068 { 2069 struct ti_softc *sc = ifp->if_softc; 2070 struct ifmedia *ifm = &sc->ifmedia; 2071 struct ti_cmd_desc cmd; 2072 2073 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 2074 return(EINVAL); 2075 2076 switch(IFM_SUBTYPE(ifm->ifm_media)) { 2077 case IFM_AUTO: 2078 CSR_WRITE_4(sc, TI_GCR_GLINK, TI_GLNK_PREF | TI_GLNK_1000MB | 2079 TI_GLNK_FULL_DUPLEX | TI_GLNK_RX_FLOWCTL_Y | 2080 TI_GLNK_AUTONEGENB | TI_GLNK_ENB); 2081 CSR_WRITE_4(sc, TI_GCR_LINK, TI_LNK_100MB | TI_LNK_10MB | 2082 TI_LNK_FULL_DUPLEX | TI_LNK_HALF_DUPLEX | 2083 TI_LNK_AUTONEGENB | TI_LNK_ENB); 2084 TI_DO_CMD(TI_CMD_LINK_NEGOTIATION, 2085 TI_CMD_CODE_NEGOTIATE_BOTH, 0); 2086 break; 2087 case IFM_1000_SX: 2088 case IFM_1000_T: 2089 CSR_WRITE_4(sc, TI_GCR_GLINK, TI_GLNK_PREF|TI_GLNK_1000MB | 2090 TI_GLNK_RX_FLOWCTL_Y | TI_GLNK_ENB); 2091 CSR_WRITE_4(sc, TI_GCR_LINK, 0); 2092 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 2093 TI_SETBIT(sc, TI_GCR_GLINK, TI_GLNK_FULL_DUPLEX); 2094 TI_DO_CMD(TI_CMD_LINK_NEGOTIATION, 2095 TI_CMD_CODE_NEGOTIATE_GIGABIT, 0); 2096 break; 2097 case IFM_100_FX: 2098 case IFM_10_FL: 2099 case IFM_100_TX: 2100 case IFM_10_T: 2101 CSR_WRITE_4(sc, TI_GCR_GLINK, 0); 2102 CSR_WRITE_4(sc, TI_GCR_LINK, TI_LNK_ENB | TI_LNK_PREF); 2103 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_100_FX || 2104 IFM_SUBTYPE(ifm->ifm_media) == IFM_100_TX) 2105 TI_SETBIT(sc, TI_GCR_LINK, TI_LNK_100MB); 2106 else 2107 TI_SETBIT(sc, TI_GCR_LINK, TI_LNK_10MB); 2108 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 2109 TI_SETBIT(sc, TI_GCR_LINK, TI_LNK_FULL_DUPLEX); 2110 else 2111 TI_SETBIT(sc, TI_GCR_LINK, TI_LNK_HALF_DUPLEX); 2112 TI_DO_CMD(TI_CMD_LINK_NEGOTIATION, 2113 TI_CMD_CODE_NEGOTIATE_10_100, 0); 2114 break; 2115 } 2116 2117 return(0); 2118 } 2119 2120 /* 2121 * Report current media status. 2122 */ 2123 static void 2124 ti_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2125 { 2126 struct ti_softc *sc = ifp->if_softc; 2127 uint32_t media = 0; 2128 2129 ifmr->ifm_status = IFM_AVALID; 2130 ifmr->ifm_active = IFM_ETHER; 2131 2132 if (sc->ti_linkstat == TI_EV_CODE_LINK_DOWN) 2133 return; 2134 2135 ifmr->ifm_status |= IFM_ACTIVE; 2136 2137 if (sc->ti_linkstat == TI_EV_CODE_GIG_LINK_UP) { 2138 media = CSR_READ_4(sc, TI_GCR_GLINK_STAT); 2139 if (sc->ti_copper) 2140 ifmr->ifm_active |= IFM_1000_T; 2141 else 2142 ifmr->ifm_active |= IFM_1000_SX; 2143 if (media & TI_GLNK_FULL_DUPLEX) 2144 ifmr->ifm_active |= IFM_FDX; 2145 else 2146 ifmr->ifm_active |= IFM_HDX; 2147 } else if (sc->ti_linkstat == TI_EV_CODE_LINK_UP) { 2148 media = CSR_READ_4(sc, TI_GCR_LINK_STAT); 2149 if (sc->ti_copper) { 2150 if (media & TI_LNK_100MB) 2151 ifmr->ifm_active |= IFM_100_TX; 2152 if (media & TI_LNK_10MB) 2153 ifmr->ifm_active |= IFM_10_T; 2154 } else { 2155 if (media & TI_LNK_100MB) 2156 ifmr->ifm_active |= IFM_100_FX; 2157 if (media & TI_LNK_10MB) 2158 ifmr->ifm_active |= IFM_10_FL; 2159 } 2160 if (media & TI_LNK_FULL_DUPLEX) 2161 ifmr->ifm_active |= IFM_FDX; 2162 if (media & TI_LNK_HALF_DUPLEX) 2163 ifmr->ifm_active |= IFM_HDX; 2164 } 2165 } 2166 2167 static int 2168 ti_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 2169 { 2170 struct ti_softc *sc = ifp->if_softc; 2171 struct ifreq *ifr = (struct ifreq *) data; 2172 struct ti_cmd_desc cmd; 2173 int error = 0, mask; 2174 2175 switch(command) { 2176 case SIOCSIFMTU: 2177 if (ifr->ifr_mtu > TI_JUMBO_MTU) 2178 error = EINVAL; 2179 else { 2180 ifp->if_mtu = ifr->ifr_mtu; 2181 ti_init(sc); 2182 } 2183 break; 2184 case SIOCSIFFLAGS: 2185 if (ifp->if_flags & IFF_UP) { 2186 /* 2187 * If only the state of the PROMISC flag changed, 2188 * then just use the 'set promisc mode' command 2189 * instead of reinitializing the entire NIC. Doing 2190 * a full re-init means reloading the firmware and 2191 * waiting for it to start up, which may take a 2192 * second or two. 2193 */ 2194 if (ifp->if_flags & IFF_RUNNING && 2195 ifp->if_flags & IFF_PROMISC && 2196 !(sc->ti_if_flags & IFF_PROMISC)) { 2197 TI_DO_CMD(TI_CMD_SET_PROMISC_MODE, 2198 TI_CMD_CODE_PROMISC_ENB, 0); 2199 } else if (ifp->if_flags & IFF_RUNNING && 2200 !(ifp->if_flags & IFF_PROMISC) && 2201 sc->ti_if_flags & IFF_PROMISC) { 2202 TI_DO_CMD(TI_CMD_SET_PROMISC_MODE, 2203 TI_CMD_CODE_PROMISC_DIS, 0); 2204 } else 2205 ti_init(sc); 2206 } else if (ifp->if_flags & IFF_RUNNING) { 2207 ti_stop(sc); 2208 } 2209 sc->ti_if_flags = ifp->if_flags; 2210 error = 0; 2211 break; 2212 case SIOCADDMULTI: 2213 case SIOCDELMULTI: 2214 if (ifp->if_flags & IFF_RUNNING) { 2215 ti_setmulti(sc); 2216 error = 0; 2217 } 2218 break; 2219 case SIOCSIFMEDIA: 2220 case SIOCGIFMEDIA: 2221 error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command); 2222 break; 2223 case SIOCSIFCAP: 2224 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 2225 if (mask & IFCAP_HWCSUM) { 2226 if (IFCAP_HWCSUM & ifp->if_capenable) 2227 ifp->if_capenable &= ~IFCAP_HWCSUM; 2228 else 2229 ifp->if_capenable |= IFCAP_HWCSUM; 2230 if (ifp->if_flags & IFF_RUNNING) 2231 ti_init(sc); 2232 } 2233 error = 0; 2234 break; 2235 default: 2236 error = ether_ioctl(ifp, command, data); 2237 break; 2238 } 2239 return(error); 2240 } 2241 2242 static void 2243 ti_watchdog(struct ifnet *ifp) 2244 { 2245 struct ti_softc *sc = ifp->if_softc; 2246 2247 if_printf(ifp, "watchdog timeout -- resetting\n"); 2248 ti_stop(sc); 2249 ti_init(sc); 2250 2251 ifp->if_oerrors++; 2252 2253 if (!ifq_is_empty(&ifp->if_snd)) 2254 if_devstart(ifp); 2255 } 2256 2257 /* 2258 * Stop the adapter and free any mbufs allocated to the 2259 * RX and TX lists. 2260 */ 2261 static void 2262 ti_stop(struct ti_softc *sc) 2263 { 2264 struct ifnet *ifp = &sc->arpcom.ac_if; 2265 struct ti_cmd_desc cmd; 2266 2267 /* Disable host interrupts. */ 2268 CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1); 2269 /* 2270 * Tell firmware we're shutting down. 2271 */ 2272 TI_DO_CMD(TI_CMD_HOST_STATE, TI_CMD_CODE_STACK_DOWN, 0); 2273 2274 /* Halt and reinitialize. */ 2275 ti_chipinit(sc); 2276 ti_mem(sc, 0x2000, 0x100000 - 0x2000, NULL); 2277 ti_chipinit(sc); 2278 2279 /* Free the RX lists. */ 2280 ti_free_rx_ring_std(sc); 2281 2282 /* Free jumbo RX list. */ 2283 ti_free_rx_ring_jumbo(sc); 2284 2285 /* Free mini RX list. */ 2286 ti_free_rx_ring_mini(sc); 2287 2288 /* Free TX buffers. */ 2289 ti_free_tx_ring(sc); 2290 2291 sc->ti_ev_prodidx.ti_idx = 0; 2292 sc->ti_return_prodidx.ti_idx = 0; 2293 sc->ti_tx_considx.ti_idx = 0; 2294 sc->ti_tx_saved_considx = TI_TXCONS_UNSET; 2295 2296 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 2297 } 2298 2299 /* 2300 * Stop all chip I/O so that the kernel's probe routines don't 2301 * get confused by errant DMAs when rebooting. 2302 */ 2303 static void 2304 ti_shutdown(device_t dev) 2305 { 2306 struct ti_softc *sc = device_get_softc(dev); 2307 2308 ti_chipinit(sc); 2309 } 2310