1 /* 2 * Copyright (c) 2001 Wind River Systems 3 * Copyright (c) 1997, 1998, 1999, 2001 4 * Bill Paul <wpaul@windriver.com>. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by Bill Paul. 17 * 4. Neither the name of the author nor the names of any co-contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 31 * THE POSSIBILITY OF SUCH DAMAGE. 32 * 33 * $FreeBSD: src/sys/dev/bge/if_bge.c,v 1.3.2.39 2005/07/03 03:41:18 silby Exp $ 34 */ 35 36 #include "opt_bnx.h" 37 #include "opt_ifpoll.h" 38 39 #include <sys/param.h> 40 #include <sys/bus.h> 41 #include <sys/endian.h> 42 #include <sys/kernel.h> 43 #include <sys/interrupt.h> 44 #include <sys/mbuf.h> 45 #include <sys/malloc.h> 46 #include <sys/queue.h> 47 #include <sys/rman.h> 48 #include <sys/serialize.h> 49 #include <sys/socket.h> 50 #include <sys/sockio.h> 51 #include <sys/sysctl.h> 52 53 #include <netinet/ip.h> 54 #include <netinet/tcp.h> 55 56 #include <net/bpf.h> 57 #include <net/ethernet.h> 58 #include <net/if.h> 59 #include <net/if_arp.h> 60 #include <net/if_dl.h> 61 #include <net/if_media.h> 62 #include <net/if_poll.h> 63 #include <net/if_types.h> 64 #include <net/ifq_var.h> 65 #include <net/toeplitz.h> 66 #include <net/toeplitz2.h> 67 #include <net/vlan/if_vlan_var.h> 68 #include <net/vlan/if_vlan_ether.h> 69 70 #include <dev/netif/mii_layer/mii.h> 71 #include <dev/netif/mii_layer/miivar.h> 72 #include <dev/netif/mii_layer/brgphyreg.h> 73 74 #include "pcidevs.h" 75 #include <bus/pci/pcireg.h> 76 #include <bus/pci/pcivar.h> 77 78 #include <dev/netif/bge/if_bgereg.h> 79 #include <dev/netif/bnx/if_bnxvar.h> 80 81 /* "device miibus" required. See GENERIC if you get errors here. */ 82 #include "miibus_if.h" 83 84 #define BNX_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 85 86 #define BNX_RESET_SHUTDOWN 0 87 #define BNX_RESET_START 1 88 #define BNX_RESET_SUSPEND 2 89 90 #define BNX_INTR_CKINTVL ((10 * hz) / 1000) /* 10ms */ 91 92 #ifdef BNX_RSS_DEBUG 93 #define BNX_RSS_DPRINTF(sc, lvl, fmt, ...) \ 94 do { \ 95 if (sc->bnx_rss_debug >= lvl) \ 96 if_printf(&sc->arpcom.ac_if, fmt, __VA_ARGS__); \ 97 } while (0) 98 #else /* !BNX_RSS_DEBUG */ 99 #define BNX_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0) 100 #endif /* BNX_RSS_DEBUG */ 101 102 static const struct bnx_type { 103 uint16_t bnx_vid; 104 uint16_t bnx_did; 105 char *bnx_name; 106 } bnx_devs[] = { 107 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5717, 108 "Broadcom BCM5717 Gigabit Ethernet" }, 109 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5717C, 110 "Broadcom BCM5717C Gigabit Ethernet" }, 111 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5718, 112 "Broadcom BCM5718 Gigabit Ethernet" }, 113 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5719, 114 "Broadcom BCM5719 Gigabit Ethernet" }, 115 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5720_ALT, 116 "Broadcom BCM5720 Gigabit Ethernet" }, 117 118 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5725, 119 "Broadcom BCM5725 Gigabit Ethernet" }, 120 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5727, 121 "Broadcom BCM5727 Gigabit Ethernet" }, 122 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5762, 123 "Broadcom BCM5762 Gigabit Ethernet" }, 124 125 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57761, 126 "Broadcom BCM57761 Gigabit Ethernet" }, 127 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57762, 128 "Broadcom BCM57762 Gigabit Ethernet" }, 129 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57765, 130 "Broadcom BCM57765 Gigabit Ethernet" }, 131 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57766, 132 "Broadcom BCM57766 Gigabit Ethernet" }, 133 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57781, 134 "Broadcom BCM57781 Gigabit Ethernet" }, 135 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57782, 136 "Broadcom BCM57782 Gigabit Ethernet" }, 137 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57785, 138 "Broadcom BCM57785 Gigabit Ethernet" }, 139 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57786, 140 "Broadcom BCM57786 Gigabit Ethernet" }, 141 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57791, 142 "Broadcom BCM57791 Fast Ethernet" }, 143 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57795, 144 "Broadcom BCM57795 Fast Ethernet" }, 145 146 { 0, 0, NULL } 147 }; 148 149 static const int bnx_tx_mailbox[BNX_TX_RING_MAX] = { 150 BGE_MBX_TX_HOST_PROD0_LO, 151 BGE_MBX_TX_HOST_PROD0_HI, 152 BGE_MBX_TX_HOST_PROD1_LO, 153 BGE_MBX_TX_HOST_PROD1_HI 154 }; 155 156 #define BNX_IS_JUMBO_CAPABLE(sc) ((sc)->bnx_flags & BNX_FLAG_JUMBO) 157 #define BNX_IS_5717_PLUS(sc) ((sc)->bnx_flags & BNX_FLAG_5717_PLUS) 158 #define BNX_IS_57765_PLUS(sc) ((sc)->bnx_flags & BNX_FLAG_57765_PLUS) 159 #define BNX_IS_57765_FAMILY(sc) \ 160 ((sc)->bnx_flags & BNX_FLAG_57765_FAMILY) 161 162 typedef int (*bnx_eaddr_fcn_t)(struct bnx_softc *, uint8_t[]); 163 164 static int bnx_probe(device_t); 165 static int bnx_attach(device_t); 166 static int bnx_detach(device_t); 167 static void bnx_shutdown(device_t); 168 static int bnx_suspend(device_t); 169 static int bnx_resume(device_t); 170 static int bnx_miibus_readreg(device_t, int, int); 171 static int bnx_miibus_writereg(device_t, int, int, int); 172 static void bnx_miibus_statchg(device_t); 173 174 static int bnx_handle_status(struct bnx_softc *); 175 #ifdef IFPOLL_ENABLE 176 static void bnx_npoll(struct ifnet *, struct ifpoll_info *); 177 static void bnx_npoll_rx(struct ifnet *, void *, int); 178 static void bnx_npoll_tx(struct ifnet *, void *, int); 179 static void bnx_npoll_tx_notag(struct ifnet *, void *, int); 180 static void bnx_npoll_status(struct ifnet *); 181 static void bnx_npoll_status_notag(struct ifnet *); 182 #endif 183 static void bnx_intr_legacy(void *); 184 static void bnx_msi(void *); 185 static void bnx_intr(struct bnx_softc *); 186 static void bnx_msix_status(void *); 187 static void bnx_msix_tx_status(void *); 188 static void bnx_msix_rx(void *); 189 static void bnx_msix_rxtx(void *); 190 static void bnx_enable_intr(struct bnx_softc *); 191 static void bnx_disable_intr(struct bnx_softc *); 192 static void bnx_txeof(struct bnx_tx_ring *, uint16_t); 193 static void bnx_rxeof(struct bnx_rx_ret_ring *, uint16_t, int); 194 static int bnx_alloc_intr(struct bnx_softc *); 195 static int bnx_setup_intr(struct bnx_softc *); 196 static void bnx_free_intr(struct bnx_softc *); 197 static void bnx_teardown_intr(struct bnx_softc *, int); 198 static int bnx_alloc_msix(struct bnx_softc *); 199 static void bnx_free_msix(struct bnx_softc *, boolean_t); 200 static void bnx_check_intr_rxtx(void *); 201 static void bnx_check_intr_rx(void *); 202 static void bnx_check_intr_tx(void *); 203 static void bnx_rx_std_refill_ithread(void *); 204 static void bnx_rx_std_refill(void *, void *); 205 static void bnx_rx_std_refill_sched_ipi(void *); 206 static void bnx_rx_std_refill_stop(void *); 207 static void bnx_rx_std_refill_sched(struct bnx_rx_ret_ring *, 208 struct bnx_rx_std_ring *); 209 210 static void bnx_start(struct ifnet *, struct ifaltq_subque *); 211 static int bnx_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 212 static void bnx_init(void *); 213 static void bnx_stop(struct bnx_softc *); 214 static void bnx_watchdog(struct ifaltq_subque *); 215 static int bnx_ifmedia_upd(struct ifnet *); 216 static void bnx_ifmedia_sts(struct ifnet *, struct ifmediareq *); 217 static void bnx_tick(void *); 218 static void bnx_serialize(struct ifnet *, enum ifnet_serialize); 219 static void bnx_deserialize(struct ifnet *, enum ifnet_serialize); 220 static int bnx_tryserialize(struct ifnet *, enum ifnet_serialize); 221 #ifdef INVARIANTS 222 static void bnx_serialize_assert(struct ifnet *, enum ifnet_serialize, 223 boolean_t); 224 #endif 225 static void bnx_serialize_skipmain(struct bnx_softc *); 226 static void bnx_deserialize_skipmain(struct bnx_softc *sc); 227 228 static int bnx_alloc_jumbo_mem(struct bnx_softc *); 229 static void bnx_free_jumbo_mem(struct bnx_softc *); 230 static struct bnx_jslot 231 *bnx_jalloc(struct bnx_softc *); 232 static void bnx_jfree(void *); 233 static void bnx_jref(void *); 234 static int bnx_newbuf_std(struct bnx_rx_ret_ring *, int, int); 235 static int bnx_newbuf_jumbo(struct bnx_softc *, int, int); 236 static void bnx_setup_rxdesc_std(struct bnx_rx_std_ring *, int); 237 static void bnx_setup_rxdesc_jumbo(struct bnx_softc *, int); 238 static int bnx_init_rx_ring_std(struct bnx_rx_std_ring *); 239 static void bnx_free_rx_ring_std(struct bnx_rx_std_ring *); 240 static int bnx_init_rx_ring_jumbo(struct bnx_softc *); 241 static void bnx_free_rx_ring_jumbo(struct bnx_softc *); 242 static void bnx_free_tx_ring(struct bnx_tx_ring *); 243 static int bnx_init_tx_ring(struct bnx_tx_ring *); 244 static int bnx_create_tx_ring(struct bnx_tx_ring *); 245 static void bnx_destroy_tx_ring(struct bnx_tx_ring *); 246 static int bnx_create_rx_ret_ring(struct bnx_rx_ret_ring *); 247 static void bnx_destroy_rx_ret_ring(struct bnx_rx_ret_ring *); 248 static int bnx_dma_alloc(device_t); 249 static void bnx_dma_free(struct bnx_softc *); 250 static int bnx_dma_block_alloc(struct bnx_softc *, bus_size_t, 251 bus_dma_tag_t *, bus_dmamap_t *, void **, bus_addr_t *); 252 static void bnx_dma_block_free(bus_dma_tag_t, bus_dmamap_t, void *); 253 static struct mbuf * 254 bnx_defrag_shortdma(struct mbuf *); 255 static int bnx_encap(struct bnx_tx_ring *, struct mbuf **, 256 uint32_t *, int *); 257 static int bnx_setup_tso(struct bnx_tx_ring *, struct mbuf **, 258 uint16_t *, uint16_t *); 259 static void bnx_setup_serialize(struct bnx_softc *); 260 static void bnx_set_tick_cpuid(struct bnx_softc *, boolean_t); 261 static void bnx_setup_ring_cnt(struct bnx_softc *); 262 263 static struct pktinfo *bnx_rss_info(struct pktinfo *, 264 const struct bge_rx_bd *); 265 static void bnx_init_rss(struct bnx_softc *); 266 static void bnx_reset(struct bnx_softc *); 267 static int bnx_chipinit(struct bnx_softc *); 268 static int bnx_blockinit(struct bnx_softc *); 269 static void bnx_stop_block(struct bnx_softc *, bus_size_t, uint32_t); 270 static void bnx_enable_msi(struct bnx_softc *, boolean_t); 271 static void bnx_setmulti(struct bnx_softc *); 272 static void bnx_setpromisc(struct bnx_softc *); 273 static void bnx_stats_update_regs(struct bnx_softc *); 274 static uint32_t bnx_dma_swap_options(struct bnx_softc *); 275 276 static uint32_t bnx_readmem_ind(struct bnx_softc *, uint32_t); 277 static void bnx_writemem_ind(struct bnx_softc *, uint32_t, uint32_t); 278 #ifdef notdef 279 static uint32_t bnx_readreg_ind(struct bnx_softc *, uint32_t); 280 #endif 281 static void bnx_writemem_direct(struct bnx_softc *, uint32_t, uint32_t); 282 static void bnx_writembx(struct bnx_softc *, int, int); 283 static int bnx_read_nvram(struct bnx_softc *, caddr_t, int, int); 284 static uint8_t bnx_eeprom_getbyte(struct bnx_softc *, uint32_t, uint8_t *); 285 static int bnx_read_eeprom(struct bnx_softc *, caddr_t, uint32_t, size_t); 286 287 static void bnx_tbi_link_upd(struct bnx_softc *, uint32_t); 288 static void bnx_copper_link_upd(struct bnx_softc *, uint32_t); 289 static void bnx_autopoll_link_upd(struct bnx_softc *, uint32_t); 290 static void bnx_link_poll(struct bnx_softc *); 291 292 static int bnx_get_eaddr_mem(struct bnx_softc *, uint8_t[]); 293 static int bnx_get_eaddr_nvram(struct bnx_softc *, uint8_t[]); 294 static int bnx_get_eaddr_eeprom(struct bnx_softc *, uint8_t[]); 295 static int bnx_get_eaddr(struct bnx_softc *, uint8_t[]); 296 297 static void bnx_coal_change(struct bnx_softc *); 298 static int bnx_sysctl_force_defrag(SYSCTL_HANDLER_ARGS); 299 static int bnx_sysctl_tx_wreg(SYSCTL_HANDLER_ARGS); 300 static int bnx_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS); 301 static int bnx_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS); 302 static int bnx_sysctl_rx_coal_bds(SYSCTL_HANDLER_ARGS); 303 static int bnx_sysctl_rx_coal_bds_poll(SYSCTL_HANDLER_ARGS); 304 static int bnx_sysctl_tx_coal_bds(SYSCTL_HANDLER_ARGS); 305 static int bnx_sysctl_tx_coal_bds_poll(SYSCTL_HANDLER_ARGS); 306 static int bnx_sysctl_rx_coal_bds_int(SYSCTL_HANDLER_ARGS); 307 static int bnx_sysctl_tx_coal_bds_int(SYSCTL_HANDLER_ARGS); 308 static int bnx_sysctl_coal_chg(SYSCTL_HANDLER_ARGS, uint32_t *, 309 int, int, uint32_t); 310 #ifdef IFPOLL_ENABLE 311 static int bnx_sysctl_npoll_offset(SYSCTL_HANDLER_ARGS); 312 static int bnx_sysctl_npoll_rxoff(SYSCTL_HANDLER_ARGS); 313 static int bnx_sysctl_npoll_txoff(SYSCTL_HANDLER_ARGS); 314 #endif 315 static int bnx_sysctl_std_refill(SYSCTL_HANDLER_ARGS); 316 317 static void bnx_sig_post_reset(struct bnx_softc *, int); 318 static void bnx_sig_pre_reset(struct bnx_softc *, int); 319 320 static int bnx_msi_enable = 1; 321 static int bnx_msix_enable = 1; 322 323 static int bnx_rx_rings = 0; /* auto */ 324 static int bnx_tx_rings = 0; /* auto */ 325 326 TUNABLE_INT("hw.bnx.msi.enable", &bnx_msi_enable); 327 TUNABLE_INT("hw.bnx.msix.enable", &bnx_msix_enable); 328 TUNABLE_INT("hw.bnx.rx_rings", &bnx_rx_rings); 329 TUNABLE_INT("hw.bnx.tx_rings", &bnx_tx_rings); 330 331 static device_method_t bnx_methods[] = { 332 /* Device interface */ 333 DEVMETHOD(device_probe, bnx_probe), 334 DEVMETHOD(device_attach, bnx_attach), 335 DEVMETHOD(device_detach, bnx_detach), 336 DEVMETHOD(device_shutdown, bnx_shutdown), 337 DEVMETHOD(device_suspend, bnx_suspend), 338 DEVMETHOD(device_resume, bnx_resume), 339 340 /* bus interface */ 341 DEVMETHOD(bus_print_child, bus_generic_print_child), 342 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 343 344 /* MII interface */ 345 DEVMETHOD(miibus_readreg, bnx_miibus_readreg), 346 DEVMETHOD(miibus_writereg, bnx_miibus_writereg), 347 DEVMETHOD(miibus_statchg, bnx_miibus_statchg), 348 349 DEVMETHOD_END 350 }; 351 352 static DEFINE_CLASS_0(bnx, bnx_driver, bnx_methods, sizeof(struct bnx_softc)); 353 static devclass_t bnx_devclass; 354 355 DECLARE_DUMMY_MODULE(if_bnx); 356 DRIVER_MODULE(if_bnx, pci, bnx_driver, bnx_devclass, NULL, NULL); 357 DRIVER_MODULE(miibus, bnx, miibus_driver, miibus_devclass, NULL, NULL); 358 359 static uint32_t 360 bnx_readmem_ind(struct bnx_softc *sc, uint32_t off) 361 { 362 device_t dev = sc->bnx_dev; 363 uint32_t val; 364 365 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); 366 val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4); 367 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4); 368 return (val); 369 } 370 371 static void 372 bnx_writemem_ind(struct bnx_softc *sc, uint32_t off, uint32_t val) 373 { 374 device_t dev = sc->bnx_dev; 375 376 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); 377 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4); 378 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4); 379 } 380 381 static void 382 bnx_writemem_direct(struct bnx_softc *sc, uint32_t off, uint32_t val) 383 { 384 CSR_WRITE_4(sc, off, val); 385 } 386 387 static void 388 bnx_writembx(struct bnx_softc *sc, int off, int val) 389 { 390 CSR_WRITE_4(sc, off, val); 391 } 392 393 /* 394 * Read a sequence of bytes from NVRAM. 395 */ 396 static int 397 bnx_read_nvram(struct bnx_softc *sc, caddr_t dest, int off, int cnt) 398 { 399 return (1); 400 } 401 402 /* 403 * Read a byte of data stored in the EEPROM at address 'addr.' The 404 * BCM570x supports both the traditional bitbang interface and an 405 * auto access interface for reading the EEPROM. We use the auto 406 * access method. 407 */ 408 static uint8_t 409 bnx_eeprom_getbyte(struct bnx_softc *sc, uint32_t addr, uint8_t *dest) 410 { 411 int i; 412 uint32_t byte = 0; 413 414 /* 415 * Enable use of auto EEPROM access so we can avoid 416 * having to use the bitbang method. 417 */ 418 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM); 419 420 /* Reset the EEPROM, load the clock period. */ 421 CSR_WRITE_4(sc, BGE_EE_ADDR, 422 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL)); 423 DELAY(20); 424 425 /* Issue the read EEPROM command. */ 426 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr); 427 428 /* Wait for completion */ 429 for(i = 0; i < BNX_TIMEOUT * 10; i++) { 430 DELAY(10); 431 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE) 432 break; 433 } 434 435 if (i == BNX_TIMEOUT) { 436 if_printf(&sc->arpcom.ac_if, "eeprom read timed out\n"); 437 return(1); 438 } 439 440 /* Get result. */ 441 byte = CSR_READ_4(sc, BGE_EE_DATA); 442 443 *dest = (byte >> ((addr % 4) * 8)) & 0xFF; 444 445 return(0); 446 } 447 448 /* 449 * Read a sequence of bytes from the EEPROM. 450 */ 451 static int 452 bnx_read_eeprom(struct bnx_softc *sc, caddr_t dest, uint32_t off, size_t len) 453 { 454 size_t i; 455 int err; 456 uint8_t byte; 457 458 for (byte = 0, err = 0, i = 0; i < len; i++) { 459 err = bnx_eeprom_getbyte(sc, off + i, &byte); 460 if (err) 461 break; 462 *(dest + i) = byte; 463 } 464 465 return(err ? 1 : 0); 466 } 467 468 static int 469 bnx_miibus_readreg(device_t dev, int phy, int reg) 470 { 471 struct bnx_softc *sc = device_get_softc(dev); 472 uint32_t val; 473 int i; 474 475 KASSERT(phy == sc->bnx_phyno, 476 ("invalid phyno %d, should be %d", phy, sc->bnx_phyno)); 477 478 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */ 479 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) { 480 CSR_WRITE_4(sc, BGE_MI_MODE, 481 sc->bnx_mi_mode & ~BGE_MIMODE_AUTOPOLL); 482 DELAY(80); 483 } 484 485 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY | 486 BGE_MIPHY(phy) | BGE_MIREG(reg)); 487 488 /* Poll for the PHY register access to complete. */ 489 for (i = 0; i < BNX_TIMEOUT; i++) { 490 DELAY(10); 491 val = CSR_READ_4(sc, BGE_MI_COMM); 492 if ((val & BGE_MICOMM_BUSY) == 0) { 493 DELAY(5); 494 val = CSR_READ_4(sc, BGE_MI_COMM); 495 break; 496 } 497 } 498 if (i == BNX_TIMEOUT) { 499 if_printf(&sc->arpcom.ac_if, "PHY read timed out " 500 "(phy %d, reg %d, val 0x%08x)\n", phy, reg, val); 501 val = 0; 502 } 503 504 /* Restore the autopoll bit if necessary. */ 505 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) { 506 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bnx_mi_mode); 507 DELAY(80); 508 } 509 510 if (val & BGE_MICOMM_READFAIL) 511 return 0; 512 513 return (val & 0xFFFF); 514 } 515 516 static int 517 bnx_miibus_writereg(device_t dev, int phy, int reg, int val) 518 { 519 struct bnx_softc *sc = device_get_softc(dev); 520 int i; 521 522 KASSERT(phy == sc->bnx_phyno, 523 ("invalid phyno %d, should be %d", phy, sc->bnx_phyno)); 524 525 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */ 526 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) { 527 CSR_WRITE_4(sc, BGE_MI_MODE, 528 sc->bnx_mi_mode & ~BGE_MIMODE_AUTOPOLL); 529 DELAY(80); 530 } 531 532 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY | 533 BGE_MIPHY(phy) | BGE_MIREG(reg) | val); 534 535 for (i = 0; i < BNX_TIMEOUT; i++) { 536 DELAY(10); 537 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) { 538 DELAY(5); 539 CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */ 540 break; 541 } 542 } 543 if (i == BNX_TIMEOUT) { 544 if_printf(&sc->arpcom.ac_if, "PHY write timed out " 545 "(phy %d, reg %d, val %d)\n", phy, reg, val); 546 } 547 548 /* Restore the autopoll bit if necessary. */ 549 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) { 550 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bnx_mi_mode); 551 DELAY(80); 552 } 553 554 return 0; 555 } 556 557 static void 558 bnx_miibus_statchg(device_t dev) 559 { 560 struct bnx_softc *sc; 561 struct mii_data *mii; 562 uint32_t mac_mode; 563 564 sc = device_get_softc(dev); 565 if ((sc->arpcom.ac_if.if_flags & IFF_RUNNING) == 0) 566 return; 567 568 mii = device_get_softc(sc->bnx_miibus); 569 570 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 571 (IFM_ACTIVE | IFM_AVALID)) { 572 switch (IFM_SUBTYPE(mii->mii_media_active)) { 573 case IFM_10_T: 574 case IFM_100_TX: 575 sc->bnx_link = 1; 576 break; 577 case IFM_1000_T: 578 case IFM_1000_SX: 579 case IFM_2500_SX: 580 sc->bnx_link = 1; 581 break; 582 default: 583 sc->bnx_link = 0; 584 break; 585 } 586 } else { 587 sc->bnx_link = 0; 588 } 589 if (sc->bnx_link == 0) 590 return; 591 592 /* 593 * APE firmware touches these registers to keep the MAC 594 * connected to the outside world. Try to keep the 595 * accesses atomic. 596 */ 597 598 mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) & 599 ~(BGE_MACMODE_PORTMODE | BGE_MACMODE_HALF_DUPLEX); 600 601 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T || 602 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) 603 mac_mode |= BGE_PORTMODE_GMII; 604 else 605 mac_mode |= BGE_PORTMODE_MII; 606 607 if ((mii->mii_media_active & IFM_GMASK) != IFM_FDX) 608 mac_mode |= BGE_MACMODE_HALF_DUPLEX; 609 610 CSR_WRITE_4(sc, BGE_MAC_MODE, mac_mode); 611 DELAY(40); 612 } 613 614 /* 615 * Memory management for jumbo frames. 616 */ 617 static int 618 bnx_alloc_jumbo_mem(struct bnx_softc *sc) 619 { 620 struct ifnet *ifp = &sc->arpcom.ac_if; 621 struct bnx_jslot *entry; 622 uint8_t *ptr; 623 bus_addr_t paddr; 624 int i, error; 625 626 /* 627 * Create tag for jumbo mbufs. 628 * This is really a bit of a kludge. We allocate a special 629 * jumbo buffer pool which (thanks to the way our DMA 630 * memory allocation works) will consist of contiguous 631 * pages. This means that even though a jumbo buffer might 632 * be larger than a page size, we don't really need to 633 * map it into more than one DMA segment. However, the 634 * default mbuf tag will result in multi-segment mappings, 635 * so we have to create a special jumbo mbuf tag that 636 * lets us get away with mapping the jumbo buffers as 637 * a single segment. I think eventually the driver should 638 * be changed so that it uses ordinary mbufs and cluster 639 * buffers, i.e. jumbo frames can span multiple DMA 640 * descriptors. But that's a project for another day. 641 */ 642 643 /* 644 * Create DMA stuffs for jumbo RX ring. 645 */ 646 error = bnx_dma_block_alloc(sc, BGE_JUMBO_RX_RING_SZ, 647 &sc->bnx_cdata.bnx_rx_jumbo_ring_tag, 648 &sc->bnx_cdata.bnx_rx_jumbo_ring_map, 649 (void *)&sc->bnx_ldata.bnx_rx_jumbo_ring, 650 &sc->bnx_ldata.bnx_rx_jumbo_ring_paddr); 651 if (error) { 652 if_printf(ifp, "could not create jumbo RX ring\n"); 653 return error; 654 } 655 656 /* 657 * Create DMA stuffs for jumbo buffer block. 658 */ 659 error = bnx_dma_block_alloc(sc, BNX_JMEM, 660 &sc->bnx_cdata.bnx_jumbo_tag, 661 &sc->bnx_cdata.bnx_jumbo_map, 662 (void **)&sc->bnx_ldata.bnx_jumbo_buf, 663 &paddr); 664 if (error) { 665 if_printf(ifp, "could not create jumbo buffer\n"); 666 return error; 667 } 668 669 SLIST_INIT(&sc->bnx_jfree_listhead); 670 671 /* 672 * Now divide it up into 9K pieces and save the addresses 673 * in an array. Note that we play an evil trick here by using 674 * the first few bytes in the buffer to hold the the address 675 * of the softc structure for this interface. This is because 676 * bnx_jfree() needs it, but it is called by the mbuf management 677 * code which will not pass it to us explicitly. 678 */ 679 for (i = 0, ptr = sc->bnx_ldata.bnx_jumbo_buf; i < BNX_JSLOTS; i++) { 680 entry = &sc->bnx_cdata.bnx_jslots[i]; 681 entry->bnx_sc = sc; 682 entry->bnx_buf = ptr; 683 entry->bnx_paddr = paddr; 684 entry->bnx_inuse = 0; 685 entry->bnx_slot = i; 686 SLIST_INSERT_HEAD(&sc->bnx_jfree_listhead, entry, jslot_link); 687 688 ptr += BNX_JLEN; 689 paddr += BNX_JLEN; 690 } 691 return 0; 692 } 693 694 static void 695 bnx_free_jumbo_mem(struct bnx_softc *sc) 696 { 697 /* Destroy jumbo RX ring. */ 698 bnx_dma_block_free(sc->bnx_cdata.bnx_rx_jumbo_ring_tag, 699 sc->bnx_cdata.bnx_rx_jumbo_ring_map, 700 sc->bnx_ldata.bnx_rx_jumbo_ring); 701 702 /* Destroy jumbo buffer block. */ 703 bnx_dma_block_free(sc->bnx_cdata.bnx_jumbo_tag, 704 sc->bnx_cdata.bnx_jumbo_map, 705 sc->bnx_ldata.bnx_jumbo_buf); 706 } 707 708 /* 709 * Allocate a jumbo buffer. 710 */ 711 static struct bnx_jslot * 712 bnx_jalloc(struct bnx_softc *sc) 713 { 714 struct bnx_jslot *entry; 715 716 lwkt_serialize_enter(&sc->bnx_jslot_serializer); 717 entry = SLIST_FIRST(&sc->bnx_jfree_listhead); 718 if (entry) { 719 SLIST_REMOVE_HEAD(&sc->bnx_jfree_listhead, jslot_link); 720 entry->bnx_inuse = 1; 721 } else { 722 if_printf(&sc->arpcom.ac_if, "no free jumbo buffers\n"); 723 } 724 lwkt_serialize_exit(&sc->bnx_jslot_serializer); 725 return(entry); 726 } 727 728 /* 729 * Adjust usage count on a jumbo buffer. 730 */ 731 static void 732 bnx_jref(void *arg) 733 { 734 struct bnx_jslot *entry = (struct bnx_jslot *)arg; 735 struct bnx_softc *sc = entry->bnx_sc; 736 737 if (sc == NULL) 738 panic("bnx_jref: can't find softc pointer!"); 739 740 if (&sc->bnx_cdata.bnx_jslots[entry->bnx_slot] != entry) { 741 panic("bnx_jref: asked to reference buffer " 742 "that we don't manage!"); 743 } else if (entry->bnx_inuse == 0) { 744 panic("bnx_jref: buffer already free!"); 745 } else { 746 atomic_add_int(&entry->bnx_inuse, 1); 747 } 748 } 749 750 /* 751 * Release a jumbo buffer. 752 */ 753 static void 754 bnx_jfree(void *arg) 755 { 756 struct bnx_jslot *entry = (struct bnx_jslot *)arg; 757 struct bnx_softc *sc = entry->bnx_sc; 758 759 if (sc == NULL) 760 panic("bnx_jfree: can't find softc pointer!"); 761 762 if (&sc->bnx_cdata.bnx_jslots[entry->bnx_slot] != entry) { 763 panic("bnx_jfree: asked to free buffer that we don't manage!"); 764 } else if (entry->bnx_inuse == 0) { 765 panic("bnx_jfree: buffer already free!"); 766 } else { 767 /* 768 * Possible MP race to 0, use the serializer. The atomic insn 769 * is still needed for races against bnx_jref(). 770 */ 771 lwkt_serialize_enter(&sc->bnx_jslot_serializer); 772 atomic_subtract_int(&entry->bnx_inuse, 1); 773 if (entry->bnx_inuse == 0) { 774 SLIST_INSERT_HEAD(&sc->bnx_jfree_listhead, 775 entry, jslot_link); 776 } 777 lwkt_serialize_exit(&sc->bnx_jslot_serializer); 778 } 779 } 780 781 782 /* 783 * Intialize a standard receive ring descriptor. 784 */ 785 static int 786 bnx_newbuf_std(struct bnx_rx_ret_ring *ret, int i, int init) 787 { 788 struct mbuf *m_new = NULL; 789 bus_dma_segment_t seg; 790 bus_dmamap_t map; 791 int error, nsegs; 792 struct bnx_rx_buf *rb; 793 794 rb = &ret->bnx_std->bnx_rx_std_buf[i]; 795 KASSERT(!rb->bnx_rx_refilled, ("RX buf %dth has been refilled", i)); 796 797 m_new = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR); 798 if (m_new == NULL) { 799 error = ENOBUFS; 800 goto back; 801 } 802 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 803 m_adj(m_new, ETHER_ALIGN); 804 805 error = bus_dmamap_load_mbuf_segment(ret->bnx_rx_mtag, 806 ret->bnx_rx_tmpmap, m_new, &seg, 1, &nsegs, BUS_DMA_NOWAIT); 807 if (error) { 808 m_freem(m_new); 809 goto back; 810 } 811 812 if (!init) { 813 bus_dmamap_sync(ret->bnx_rx_mtag, rb->bnx_rx_dmamap, 814 BUS_DMASYNC_POSTREAD); 815 bus_dmamap_unload(ret->bnx_rx_mtag, rb->bnx_rx_dmamap); 816 } 817 818 map = ret->bnx_rx_tmpmap; 819 ret->bnx_rx_tmpmap = rb->bnx_rx_dmamap; 820 821 rb->bnx_rx_dmamap = map; 822 rb->bnx_rx_mbuf = m_new; 823 rb->bnx_rx_paddr = seg.ds_addr; 824 rb->bnx_rx_len = m_new->m_len; 825 back: 826 cpu_sfence(); 827 rb->bnx_rx_refilled = 1; 828 return error; 829 } 830 831 static void 832 bnx_setup_rxdesc_std(struct bnx_rx_std_ring *std, int i) 833 { 834 struct bnx_rx_buf *rb; 835 struct bge_rx_bd *r; 836 bus_addr_t paddr; 837 int len; 838 839 rb = &std->bnx_rx_std_buf[i]; 840 KASSERT(rb->bnx_rx_refilled, ("RX buf %dth is not refilled", i)); 841 842 paddr = rb->bnx_rx_paddr; 843 len = rb->bnx_rx_len; 844 845 cpu_mfence(); 846 847 rb->bnx_rx_refilled = 0; 848 849 r = &std->bnx_rx_std_ring[i]; 850 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(paddr); 851 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(paddr); 852 r->bge_len = len; 853 r->bge_idx = i; 854 r->bge_flags = BGE_RXBDFLAG_END; 855 } 856 857 /* 858 * Initialize a jumbo receive ring descriptor. This allocates 859 * a jumbo buffer from the pool managed internally by the driver. 860 */ 861 static int 862 bnx_newbuf_jumbo(struct bnx_softc *sc, int i, int init) 863 { 864 struct mbuf *m_new = NULL; 865 struct bnx_jslot *buf; 866 bus_addr_t paddr; 867 868 /* Allocate the mbuf. */ 869 MGETHDR(m_new, init ? MB_WAIT : MB_DONTWAIT, MT_DATA); 870 if (m_new == NULL) 871 return ENOBUFS; 872 873 /* Allocate the jumbo buffer */ 874 buf = bnx_jalloc(sc); 875 if (buf == NULL) { 876 m_freem(m_new); 877 return ENOBUFS; 878 } 879 880 /* Attach the buffer to the mbuf. */ 881 m_new->m_ext.ext_arg = buf; 882 m_new->m_ext.ext_buf = buf->bnx_buf; 883 m_new->m_ext.ext_free = bnx_jfree; 884 m_new->m_ext.ext_ref = bnx_jref; 885 m_new->m_ext.ext_size = BNX_JUMBO_FRAMELEN; 886 887 m_new->m_flags |= M_EXT; 888 889 m_new->m_data = m_new->m_ext.ext_buf; 890 m_new->m_len = m_new->m_pkthdr.len = m_new->m_ext.ext_size; 891 892 paddr = buf->bnx_paddr; 893 m_adj(m_new, ETHER_ALIGN); 894 paddr += ETHER_ALIGN; 895 896 /* Save necessary information */ 897 sc->bnx_cdata.bnx_rx_jumbo_chain[i].bnx_rx_mbuf = m_new; 898 sc->bnx_cdata.bnx_rx_jumbo_chain[i].bnx_rx_paddr = paddr; 899 900 /* Set up the descriptor. */ 901 bnx_setup_rxdesc_jumbo(sc, i); 902 return 0; 903 } 904 905 static void 906 bnx_setup_rxdesc_jumbo(struct bnx_softc *sc, int i) 907 { 908 struct bge_rx_bd *r; 909 struct bnx_rx_buf *rc; 910 911 r = &sc->bnx_ldata.bnx_rx_jumbo_ring[i]; 912 rc = &sc->bnx_cdata.bnx_rx_jumbo_chain[i]; 913 914 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(rc->bnx_rx_paddr); 915 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(rc->bnx_rx_paddr); 916 r->bge_len = rc->bnx_rx_mbuf->m_len; 917 r->bge_idx = i; 918 r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING; 919 } 920 921 static int 922 bnx_init_rx_ring_std(struct bnx_rx_std_ring *std) 923 { 924 int i, error; 925 926 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 927 /* Use the first RX return ring's tmp RX mbuf DMA map */ 928 error = bnx_newbuf_std(&std->bnx_sc->bnx_rx_ret_ring[0], i, 1); 929 if (error) 930 return error; 931 bnx_setup_rxdesc_std(std, i); 932 } 933 934 std->bnx_rx_std_used = 0; 935 std->bnx_rx_std_refill = 0; 936 std->bnx_rx_std_running = 0; 937 cpu_sfence(); 938 lwkt_serialize_handler_enable(&std->bnx_rx_std_serialize); 939 940 std->bnx_rx_std = BGE_STD_RX_RING_CNT - 1; 941 bnx_writembx(std->bnx_sc, BGE_MBX_RX_STD_PROD_LO, std->bnx_rx_std); 942 943 return(0); 944 } 945 946 static void 947 bnx_free_rx_ring_std(struct bnx_rx_std_ring *std) 948 { 949 int i; 950 951 lwkt_serialize_handler_disable(&std->bnx_rx_std_serialize); 952 953 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 954 struct bnx_rx_buf *rb = &std->bnx_rx_std_buf[i]; 955 956 rb->bnx_rx_refilled = 0; 957 if (rb->bnx_rx_mbuf != NULL) { 958 bus_dmamap_unload(std->bnx_rx_mtag, rb->bnx_rx_dmamap); 959 m_freem(rb->bnx_rx_mbuf); 960 rb->bnx_rx_mbuf = NULL; 961 } 962 bzero(&std->bnx_rx_std_ring[i], sizeof(struct bge_rx_bd)); 963 } 964 } 965 966 static int 967 bnx_init_rx_ring_jumbo(struct bnx_softc *sc) 968 { 969 struct bge_rcb *rcb; 970 int i, error; 971 972 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 973 error = bnx_newbuf_jumbo(sc, i, 1); 974 if (error) 975 return error; 976 } 977 978 sc->bnx_jumbo = BGE_JUMBO_RX_RING_CNT - 1; 979 980 rcb = &sc->bnx_ldata.bnx_info.bnx_jumbo_rx_rcb; 981 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0); 982 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 983 984 bnx_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bnx_jumbo); 985 986 return(0); 987 } 988 989 static void 990 bnx_free_rx_ring_jumbo(struct bnx_softc *sc) 991 { 992 int i; 993 994 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 995 struct bnx_rx_buf *rc = &sc->bnx_cdata.bnx_rx_jumbo_chain[i]; 996 997 if (rc->bnx_rx_mbuf != NULL) { 998 m_freem(rc->bnx_rx_mbuf); 999 rc->bnx_rx_mbuf = NULL; 1000 } 1001 bzero(&sc->bnx_ldata.bnx_rx_jumbo_ring[i], 1002 sizeof(struct bge_rx_bd)); 1003 } 1004 } 1005 1006 static void 1007 bnx_free_tx_ring(struct bnx_tx_ring *txr) 1008 { 1009 int i; 1010 1011 for (i = 0; i < BGE_TX_RING_CNT; i++) { 1012 struct bnx_tx_buf *buf = &txr->bnx_tx_buf[i]; 1013 1014 if (buf->bnx_tx_mbuf != NULL) { 1015 bus_dmamap_unload(txr->bnx_tx_mtag, 1016 buf->bnx_tx_dmamap); 1017 m_freem(buf->bnx_tx_mbuf); 1018 buf->bnx_tx_mbuf = NULL; 1019 } 1020 bzero(&txr->bnx_tx_ring[i], sizeof(struct bge_tx_bd)); 1021 } 1022 txr->bnx_tx_saved_considx = BNX_TXCONS_UNSET; 1023 } 1024 1025 static int 1026 bnx_init_tx_ring(struct bnx_tx_ring *txr) 1027 { 1028 txr->bnx_tx_cnt = 0; 1029 txr->bnx_tx_saved_considx = 0; 1030 txr->bnx_tx_prodidx = 0; 1031 1032 /* Initialize transmit producer index for host-memory send ring. */ 1033 bnx_writembx(txr->bnx_sc, txr->bnx_tx_mbx, txr->bnx_tx_prodidx); 1034 1035 return(0); 1036 } 1037 1038 static void 1039 bnx_setmulti(struct bnx_softc *sc) 1040 { 1041 struct ifnet *ifp; 1042 struct ifmultiaddr *ifma; 1043 uint32_t hashes[4] = { 0, 0, 0, 0 }; 1044 int h, i; 1045 1046 ifp = &sc->arpcom.ac_if; 1047 1048 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 1049 for (i = 0; i < 4; i++) 1050 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF); 1051 return; 1052 } 1053 1054 /* First, zot all the existing filters. */ 1055 for (i = 0; i < 4; i++) 1056 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0); 1057 1058 /* Now program new ones. */ 1059 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1060 if (ifma->ifma_addr->sa_family != AF_LINK) 1061 continue; 1062 h = ether_crc32_le( 1063 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 1064 ETHER_ADDR_LEN) & 0x7f; 1065 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F); 1066 } 1067 1068 for (i = 0; i < 4; i++) 1069 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]); 1070 } 1071 1072 /* 1073 * Do endian, PCI and DMA initialization. Also check the on-board ROM 1074 * self-test results. 1075 */ 1076 static int 1077 bnx_chipinit(struct bnx_softc *sc) 1078 { 1079 uint32_t dma_rw_ctl, mode_ctl; 1080 int i; 1081 1082 /* Set endian type before we access any non-PCI registers. */ 1083 pci_write_config(sc->bnx_dev, BGE_PCI_MISC_CTL, 1084 BGE_INIT | BGE_PCIMISCCTL_TAGGED_STATUS, 4); 1085 1086 /* 1087 * Clear the MAC statistics block in the NIC's 1088 * internal memory. 1089 */ 1090 for (i = BGE_STATS_BLOCK; 1091 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t)) 1092 BNX_MEMWIN_WRITE(sc, i, 0); 1093 1094 for (i = BGE_STATUS_BLOCK; 1095 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t)) 1096 BNX_MEMWIN_WRITE(sc, i, 0); 1097 1098 if (BNX_IS_57765_FAMILY(sc)) { 1099 uint32_t val; 1100 1101 if (sc->bnx_chipid == BGE_CHIPID_BCM57765_A0) { 1102 mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL); 1103 val = mode_ctl & ~BGE_MODECTL_PCIE_PORTS; 1104 1105 /* Access the lower 1K of PL PCI-E block registers. */ 1106 CSR_WRITE_4(sc, BGE_MODE_CTL, 1107 val | BGE_MODECTL_PCIE_PL_SEL); 1108 1109 val = CSR_READ_4(sc, BGE_PCIE_PL_LO_PHYCTL5); 1110 val |= BGE_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ; 1111 CSR_WRITE_4(sc, BGE_PCIE_PL_LO_PHYCTL5, val); 1112 1113 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl); 1114 } 1115 if (sc->bnx_chiprev != BGE_CHIPREV_57765_AX) { 1116 /* Fix transmit hangs */ 1117 val = CSR_READ_4(sc, BGE_CPMU_PADRNG_CTL); 1118 val |= BGE_CPMU_PADRNG_CTL_RDIV2; 1119 CSR_WRITE_4(sc, BGE_CPMU_PADRNG_CTL, val); 1120 1121 mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL); 1122 val = mode_ctl & ~BGE_MODECTL_PCIE_PORTS; 1123 1124 /* Access the lower 1K of DL PCI-E block registers. */ 1125 CSR_WRITE_4(sc, BGE_MODE_CTL, 1126 val | BGE_MODECTL_PCIE_DL_SEL); 1127 1128 val = CSR_READ_4(sc, BGE_PCIE_DL_LO_FTSMAX); 1129 val &= ~BGE_PCIE_DL_LO_FTSMAX_MASK; 1130 val |= BGE_PCIE_DL_LO_FTSMAX_VAL; 1131 CSR_WRITE_4(sc, BGE_PCIE_DL_LO_FTSMAX, val); 1132 1133 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl); 1134 } 1135 1136 val = CSR_READ_4(sc, BGE_CPMU_LSPD_10MB_CLK); 1137 val &= ~BGE_CPMU_LSPD_10MB_MACCLK_MASK; 1138 val |= BGE_CPMU_LSPD_10MB_MACCLK_6_25; 1139 CSR_WRITE_4(sc, BGE_CPMU_LSPD_10MB_CLK, val); 1140 } 1141 1142 /* 1143 * Set up the PCI DMA control register. 1144 */ 1145 dma_rw_ctl = pci_read_config(sc->bnx_dev, BGE_PCI_DMA_RW_CTL, 4); 1146 /* 1147 * Disable 32bytes cache alignment for DMA write to host memory 1148 * 1149 * NOTE: 1150 * 64bytes cache alignment for DMA write to host memory is still 1151 * enabled. 1152 */ 1153 dma_rw_ctl |= BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT; 1154 if (sc->bnx_chipid == BGE_CHIPID_BCM57765_A0) 1155 dma_rw_ctl &= ~BGE_PCIDMARWCTL_CRDRDR_RDMA_MRRS_MSK; 1156 /* 1157 * Enable HW workaround for controllers that misinterpret 1158 * a status tag update and leave interrupts permanently 1159 * disabled. 1160 */ 1161 if (sc->bnx_asicrev != BGE_ASICREV_BCM5717 && 1162 sc->bnx_asicrev != BGE_ASICREV_BCM5762 && 1163 !BNX_IS_57765_FAMILY(sc)) 1164 dma_rw_ctl |= BGE_PCIDMARWCTL_TAGGED_STATUS_WA; 1165 if (bootverbose) { 1166 if_printf(&sc->arpcom.ac_if, "DMA read/write %#x\n", 1167 dma_rw_ctl); 1168 } 1169 pci_write_config(sc->bnx_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4); 1170 1171 /* 1172 * Set up general mode register. 1173 */ 1174 mode_ctl = bnx_dma_swap_options(sc) | BGE_MODECTL_MAC_ATTN_INTR | 1175 BGE_MODECTL_HOST_SEND_BDS | BGE_MODECTL_TX_NO_PHDR_CSUM; 1176 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl); 1177 1178 /* 1179 * Disable memory write invalidate. Apparently it is not supported 1180 * properly by these devices. Also ensure that INTx isn't disabled, 1181 * as these chips need it even when using MSI. 1182 */ 1183 PCI_CLRBIT(sc->bnx_dev, BGE_PCI_CMD, 1184 (PCIM_CMD_MWRICEN | PCIM_CMD_INTxDIS), 4); 1185 1186 /* Set the timer prescaler (always 66Mhz) */ 1187 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/); 1188 1189 return(0); 1190 } 1191 1192 static int 1193 bnx_blockinit(struct bnx_softc *sc) 1194 { 1195 struct bnx_intr_data *intr; 1196 struct bge_rcb *rcb; 1197 bus_size_t vrcb; 1198 bge_hostaddr taddr; 1199 uint32_t val; 1200 int i, limit; 1201 1202 /* 1203 * Initialize the memory window pointer register so that 1204 * we can access the first 32K of internal NIC RAM. This will 1205 * allow us to set up the TX send ring RCBs and the RX return 1206 * ring RCBs, plus other things which live in NIC memory. 1207 */ 1208 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0); 1209 1210 /* Configure mbuf pool watermarks */ 1211 if (BNX_IS_57765_PLUS(sc)) { 1212 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 1213 if (sc->arpcom.ac_if.if_mtu > ETHERMTU) { 1214 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x7e); 1215 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xea); 1216 } else { 1217 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a); 1218 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xa0); 1219 } 1220 } else { 1221 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 1222 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10); 1223 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 1224 } 1225 1226 /* Configure DMA resource watermarks */ 1227 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5); 1228 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10); 1229 1230 /* Enable buffer manager */ 1231 val = BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN; 1232 /* 1233 * Change the arbitration algorithm of TXMBUF read request to 1234 * round-robin instead of priority based for BCM5719. When 1235 * TXFIFO is almost empty, RDMA will hold its request until 1236 * TXFIFO is not almost empty. 1237 */ 1238 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719) 1239 val |= BGE_BMANMODE_NO_TX_UNDERRUN; 1240 if (sc->bnx_asicrev == BGE_ASICREV_BCM5717 || 1241 sc->bnx_chipid == BGE_CHIPID_BCM5719_A0 || 1242 sc->bnx_chipid == BGE_CHIPID_BCM5720_A0) 1243 val |= BGE_BMANMODE_LOMBUF_ATTN; 1244 CSR_WRITE_4(sc, BGE_BMAN_MODE, val); 1245 1246 /* Poll for buffer manager start indication */ 1247 for (i = 0; i < BNX_TIMEOUT; i++) { 1248 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE) 1249 break; 1250 DELAY(10); 1251 } 1252 1253 if (i == BNX_TIMEOUT) { 1254 if_printf(&sc->arpcom.ac_if, 1255 "buffer manager failed to start\n"); 1256 return(ENXIO); 1257 } 1258 1259 /* Enable flow-through queues */ 1260 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 1261 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 1262 1263 /* Wait until queue initialization is complete */ 1264 for (i = 0; i < BNX_TIMEOUT; i++) { 1265 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0) 1266 break; 1267 DELAY(10); 1268 } 1269 1270 if (i == BNX_TIMEOUT) { 1271 if_printf(&sc->arpcom.ac_if, 1272 "flow-through queue init failed\n"); 1273 return(ENXIO); 1274 } 1275 1276 /* 1277 * Summary of rings supported by the controller: 1278 * 1279 * Standard Receive Producer Ring 1280 * - This ring is used to feed receive buffers for "standard" 1281 * sized frames (typically 1536 bytes) to the controller. 1282 * 1283 * Jumbo Receive Producer Ring 1284 * - This ring is used to feed receive buffers for jumbo sized 1285 * frames (i.e. anything bigger than the "standard" frames) 1286 * to the controller. 1287 * 1288 * Mini Receive Producer Ring 1289 * - This ring is used to feed receive buffers for "mini" 1290 * sized frames to the controller. 1291 * - This feature required external memory for the controller 1292 * but was never used in a production system. Should always 1293 * be disabled. 1294 * 1295 * Receive Return Ring 1296 * - After the controller has placed an incoming frame into a 1297 * receive buffer that buffer is moved into a receive return 1298 * ring. The driver is then responsible to passing the 1299 * buffer up to the stack. BCM5718/BCM57785 families support 1300 * multiple receive return rings. 1301 * 1302 * Send Ring 1303 * - This ring is used for outgoing frames. BCM5719/BCM5720 1304 * support multiple send rings. 1305 */ 1306 1307 /* Initialize the standard receive producer ring control block. */ 1308 rcb = &sc->bnx_ldata.bnx_info.bnx_std_rx_rcb; 1309 rcb->bge_hostaddr.bge_addr_lo = 1310 BGE_ADDR_LO(sc->bnx_rx_std_ring.bnx_rx_std_ring_paddr); 1311 rcb->bge_hostaddr.bge_addr_hi = 1312 BGE_ADDR_HI(sc->bnx_rx_std_ring.bnx_rx_std_ring_paddr); 1313 if (BNX_IS_57765_PLUS(sc)) { 1314 /* 1315 * Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32) 1316 * Bits 15-2 : Maximum RX frame size 1317 * Bit 1 : 1 = Ring Disabled, 0 = Ring ENabled 1318 * Bit 0 : Reserved 1319 */ 1320 rcb->bge_maxlen_flags = 1321 BGE_RCB_MAXLEN_FLAGS(512, BNX_MAX_FRAMELEN << 2); 1322 } else { 1323 /* 1324 * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32) 1325 * Bits 15-2 : Reserved (should be 0) 1326 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled 1327 * Bit 0 : Reserved 1328 */ 1329 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0); 1330 } 1331 if (BNX_IS_5717_PLUS(sc)) 1332 rcb->bge_nicaddr = BGE_STD_RX_RINGS_5717; 1333 else 1334 rcb->bge_nicaddr = BGE_STD_RX_RINGS; 1335 /* Write the standard receive producer ring control block. */ 1336 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi); 1337 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo); 1338 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 1339 if (!BNX_IS_5717_PLUS(sc)) 1340 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr); 1341 /* Reset the standard receive producer ring producer index. */ 1342 bnx_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0); 1343 1344 /* 1345 * Initialize the jumbo RX producer ring control 1346 * block. We set the 'ring disabled' bit in the 1347 * flags field until we're actually ready to start 1348 * using this ring (i.e. once we set the MTU 1349 * high enough to require it). 1350 */ 1351 if (BNX_IS_JUMBO_CAPABLE(sc)) { 1352 rcb = &sc->bnx_ldata.bnx_info.bnx_jumbo_rx_rcb; 1353 /* Get the jumbo receive producer ring RCB parameters. */ 1354 rcb->bge_hostaddr.bge_addr_lo = 1355 BGE_ADDR_LO(sc->bnx_ldata.bnx_rx_jumbo_ring_paddr); 1356 rcb->bge_hostaddr.bge_addr_hi = 1357 BGE_ADDR_HI(sc->bnx_ldata.bnx_rx_jumbo_ring_paddr); 1358 rcb->bge_maxlen_flags = 1359 BGE_RCB_MAXLEN_FLAGS(BNX_MAX_FRAMELEN, 1360 BGE_RCB_FLAG_RING_DISABLED); 1361 if (BNX_IS_5717_PLUS(sc)) 1362 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS_5717; 1363 else 1364 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS; 1365 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI, 1366 rcb->bge_hostaddr.bge_addr_hi); 1367 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO, 1368 rcb->bge_hostaddr.bge_addr_lo); 1369 /* Program the jumbo receive producer ring RCB parameters. */ 1370 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, 1371 rcb->bge_maxlen_flags); 1372 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr); 1373 /* Reset the jumbo receive producer ring producer index. */ 1374 bnx_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0); 1375 } 1376 1377 /* 1378 * The BD ring replenish thresholds control how often the 1379 * hardware fetches new BD's from the producer rings in host 1380 * memory. Setting the value too low on a busy system can 1381 * starve the hardware and recue the throughpout. 1382 * 1383 * Set the BD ring replentish thresholds. The recommended 1384 * values are 1/8th the number of descriptors allocated to 1385 * each ring. 1386 */ 1387 val = 8; 1388 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val); 1389 if (BNX_IS_JUMBO_CAPABLE(sc)) { 1390 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, 1391 BGE_JUMBO_RX_RING_CNT/8); 1392 } 1393 if (BNX_IS_57765_PLUS(sc)) { 1394 CSR_WRITE_4(sc, BGE_STD_REPLENISH_LWM, 32); 1395 CSR_WRITE_4(sc, BGE_JMB_REPLENISH_LWM, 16); 1396 } 1397 1398 /* 1399 * Disable all send rings by setting the 'ring disabled' bit 1400 * in the flags field of all the TX send ring control blocks, 1401 * located in NIC memory. 1402 */ 1403 if (BNX_IS_5717_PLUS(sc)) 1404 limit = 4; 1405 else if (BNX_IS_57765_FAMILY(sc) || 1406 sc->bnx_asicrev == BGE_ASICREV_BCM5762) 1407 limit = 2; 1408 else 1409 limit = 1; 1410 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 1411 for (i = 0; i < limit; i++) { 1412 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1413 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED)); 1414 vrcb += sizeof(struct bge_rcb); 1415 } 1416 1417 /* 1418 * Configure send ring RCBs 1419 */ 1420 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 1421 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) { 1422 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[i]; 1423 1424 BGE_HOSTADDR(taddr, txr->bnx_tx_ring_paddr); 1425 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 1426 taddr.bge_addr_hi); 1427 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 1428 taddr.bge_addr_lo); 1429 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1430 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0)); 1431 vrcb += sizeof(struct bge_rcb); 1432 } 1433 1434 /* 1435 * Disable all receive return rings by setting the 1436 * 'ring disabled' bit in the flags field of all the receive 1437 * return ring control blocks, located in NIC memory. 1438 */ 1439 if (BNX_IS_5717_PLUS(sc)) { 1440 /* Should be 17, use 16 until we get an SRAM map. */ 1441 limit = 16; 1442 } else if (BNX_IS_57765_FAMILY(sc) || 1443 sc->bnx_asicrev == BGE_ASICREV_BCM5762) { 1444 limit = 4; 1445 } else { 1446 limit = 1; 1447 } 1448 /* Disable all receive return rings. */ 1449 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 1450 for (i = 0; i < limit; i++) { 1451 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0); 1452 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0); 1453 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1454 BGE_RCB_FLAG_RING_DISABLED); 1455 bnx_writembx(sc, BGE_MBX_RX_CONS0_LO + 1456 (i * (sizeof(uint64_t))), 0); 1457 vrcb += sizeof(struct bge_rcb); 1458 } 1459 1460 /* 1461 * Set up receive return rings. 1462 */ 1463 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 1464 for (i = 0; i < sc->bnx_rx_retcnt; ++i) { 1465 struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[i]; 1466 1467 BGE_HOSTADDR(taddr, ret->bnx_rx_ret_ring_paddr); 1468 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 1469 taddr.bge_addr_hi); 1470 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 1471 taddr.bge_addr_lo); 1472 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1473 BGE_RCB_MAXLEN_FLAGS(BNX_RETURN_RING_CNT, 0)); 1474 vrcb += sizeof(struct bge_rcb); 1475 } 1476 1477 /* Set random backoff seed for TX */ 1478 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF, 1479 (sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] + 1480 sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] + 1481 sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5]) & 1482 BGE_TX_BACKOFF_SEED_MASK); 1483 1484 /* Set inter-packet gap */ 1485 val = 0x2620; 1486 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720 || 1487 sc->bnx_asicrev == BGE_ASICREV_BCM5762) { 1488 val |= CSR_READ_4(sc, BGE_TX_LENGTHS) & 1489 (BGE_TXLEN_JMB_FRM_LEN_MSK | BGE_TXLEN_CNT_DN_VAL_MSK); 1490 } 1491 CSR_WRITE_4(sc, BGE_TX_LENGTHS, val); 1492 1493 /* 1494 * Specify which ring to use for packets that don't match 1495 * any RX rules. 1496 */ 1497 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08); 1498 1499 /* 1500 * Configure number of RX lists. One interrupt distribution 1501 * list, sixteen active lists, one bad frames class. 1502 */ 1503 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181); 1504 1505 /* Inialize RX list placement stats mask. */ 1506 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF); 1507 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1); 1508 1509 /* Disable host coalescing until we get it set up */ 1510 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000); 1511 1512 /* Poll to make sure it's shut down. */ 1513 for (i = 0; i < BNX_TIMEOUT; i++) { 1514 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE)) 1515 break; 1516 DELAY(10); 1517 } 1518 1519 if (i == BNX_TIMEOUT) { 1520 if_printf(&sc->arpcom.ac_if, 1521 "host coalescing engine failed to idle\n"); 1522 return(ENXIO); 1523 } 1524 1525 /* Set up host coalescing defaults */ 1526 sc->bnx_coal_chg = BNX_RX_COAL_TICKS_CHG | 1527 BNX_TX_COAL_TICKS_CHG | 1528 BNX_RX_COAL_BDS_CHG | 1529 BNX_TX_COAL_BDS_CHG | 1530 BNX_RX_COAL_BDS_INT_CHG | 1531 BNX_TX_COAL_BDS_INT_CHG; 1532 bnx_coal_change(sc); 1533 1534 /* 1535 * Set up addresses of status blocks 1536 */ 1537 intr = &sc->bnx_intr_data[0]; 1538 bzero(intr->bnx_status_block, BGE_STATUS_BLK_SZ); 1539 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, 1540 BGE_ADDR_HI(intr->bnx_status_block_paddr)); 1541 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, 1542 BGE_ADDR_LO(intr->bnx_status_block_paddr)); 1543 for (i = 1; i < sc->bnx_intr_cnt; ++i) { 1544 intr = &sc->bnx_intr_data[i]; 1545 bzero(intr->bnx_status_block, BGE_STATUS_BLK_SZ); 1546 CSR_WRITE_4(sc, BGE_VEC1_STATUSBLK_ADDR_HI + ((i - 1) * 8), 1547 BGE_ADDR_HI(intr->bnx_status_block_paddr)); 1548 CSR_WRITE_4(sc, BGE_VEC1_STATUSBLK_ADDR_LO + ((i - 1) * 8), 1549 BGE_ADDR_LO(intr->bnx_status_block_paddr)); 1550 } 1551 1552 /* Set up status block partail update size. */ 1553 val = BGE_STATBLKSZ_32BYTE; 1554 #if 0 1555 /* 1556 * Does not seem to have visible effect in both 1557 * bulk data (1472B UDP datagram) and tiny data 1558 * (18B UDP datagram) TX tests. 1559 */ 1560 val |= BGE_HCCMODE_CLRTICK_TX; 1561 #endif 1562 /* Turn on host coalescing state machine */ 1563 CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE); 1564 1565 /* Turn on RX BD completion state machine and enable attentions */ 1566 CSR_WRITE_4(sc, BGE_RBDC_MODE, 1567 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN); 1568 1569 /* Turn on RX list placement state machine */ 1570 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 1571 1572 val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB | 1573 BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR | 1574 BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB | 1575 BGE_MACMODE_FRMHDR_DMA_ENB; 1576 1577 if (sc->bnx_flags & BNX_FLAG_TBI) 1578 val |= BGE_PORTMODE_TBI; 1579 else if (sc->bnx_flags & BNX_FLAG_MII_SERDES) 1580 val |= BGE_PORTMODE_GMII; 1581 else 1582 val |= BGE_PORTMODE_MII; 1583 1584 /* Turn on DMA, clear stats */ 1585 CSR_WRITE_4(sc, BGE_MAC_MODE, val); 1586 DELAY(40); 1587 1588 /* Set misc. local control, enable interrupts on attentions */ 1589 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN); 1590 1591 #ifdef notdef 1592 /* Assert GPIO pins for PHY reset */ 1593 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0| 1594 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2); 1595 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0| 1596 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2); 1597 #endif 1598 1599 if (sc->bnx_intr_type == PCI_INTR_TYPE_MSIX) 1600 bnx_enable_msi(sc, TRUE); 1601 1602 /* Turn on write DMA state machine */ 1603 val = BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS; 1604 /* Enable host coalescing bug fix. */ 1605 val |= BGE_WDMAMODE_STATUS_TAG_FIX; 1606 if (sc->bnx_asicrev == BGE_ASICREV_BCM5785) { 1607 /* Request larger DMA burst size to get better performance. */ 1608 val |= BGE_WDMAMODE_BURST_ALL_DATA; 1609 } 1610 CSR_WRITE_4(sc, BGE_WDMA_MODE, val); 1611 DELAY(40); 1612 1613 if (BNX_IS_57765_PLUS(sc)) { 1614 uint32_t dmactl, dmactl_reg; 1615 1616 if (sc->bnx_asicrev == BGE_ASICREV_BCM5762) 1617 dmactl_reg = BGE_RDMA_RSRVCTRL2; 1618 else 1619 dmactl_reg = BGE_RDMA_RSRVCTRL; 1620 1621 dmactl = CSR_READ_4(sc, dmactl_reg); 1622 /* 1623 * Adjust tx margin to prevent TX data corruption and 1624 * fix internal FIFO overflow. 1625 */ 1626 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719 || 1627 sc->bnx_asicrev == BGE_ASICREV_BCM5720 || 1628 sc->bnx_asicrev == BGE_ASICREV_BCM5762) { 1629 dmactl &= ~(BGE_RDMA_RSRVCTRL_FIFO_LWM_MASK | 1630 BGE_RDMA_RSRVCTRL_FIFO_HWM_MASK | 1631 BGE_RDMA_RSRVCTRL_TXMRGN_MASK); 1632 dmactl |= BGE_RDMA_RSRVCTRL_FIFO_LWM_1_5K | 1633 BGE_RDMA_RSRVCTRL_FIFO_HWM_1_5K | 1634 BGE_RDMA_RSRVCTRL_TXMRGN_320B; 1635 } 1636 /* 1637 * Enable fix for read DMA FIFO overruns. 1638 * The fix is to limit the number of RX BDs 1639 * the hardware would fetch at a fime. 1640 */ 1641 CSR_WRITE_4(sc, dmactl_reg, 1642 dmactl | BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX); 1643 } 1644 1645 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719) { 1646 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, 1647 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) | 1648 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K | 1649 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K); 1650 } else if (sc->bnx_asicrev == BGE_ASICREV_BCM5720 || 1651 sc->bnx_asicrev == BGE_ASICREV_BCM5762) { 1652 uint32_t ctrl_reg; 1653 1654 if (sc->bnx_asicrev == BGE_ASICREV_BCM5762) 1655 ctrl_reg = BGE_RDMA_LSO_CRPTEN_CTRL2; 1656 else 1657 ctrl_reg = BGE_RDMA_LSO_CRPTEN_CTRL; 1658 1659 /* 1660 * Allow 4KB burst length reads for non-LSO frames. 1661 * Enable 512B burst length reads for buffer descriptors. 1662 */ 1663 CSR_WRITE_4(sc, ctrl_reg, 1664 CSR_READ_4(sc, ctrl_reg) | 1665 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_512 | 1666 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K); 1667 } 1668 1669 /* Turn on read DMA state machine */ 1670 val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS; 1671 if (sc->bnx_asicrev == BGE_ASICREV_BCM5717) 1672 val |= BGE_RDMAMODE_MULT_DMA_RD_DIS; 1673 if (sc->bnx_asicrev == BGE_ASICREV_BCM5784 || 1674 sc->bnx_asicrev == BGE_ASICREV_BCM5785 || 1675 sc->bnx_asicrev == BGE_ASICREV_BCM57780) { 1676 val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN | 1677 BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN | 1678 BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN; 1679 } 1680 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720 || 1681 sc->bnx_asicrev == BGE_ASICREV_BCM5762) { 1682 val |= CSR_READ_4(sc, BGE_RDMA_MODE) & 1683 BGE_RDMAMODE_H2BNC_VLAN_DET; 1684 /* 1685 * Allow multiple outstanding read requests from 1686 * non-LSO read DMA engine. 1687 */ 1688 val &= ~BGE_RDMAMODE_MULT_DMA_RD_DIS; 1689 } 1690 if (sc->bnx_asicrev == BGE_ASICREV_BCM57766) 1691 val |= BGE_RDMAMODE_JMB_2K_MMRR; 1692 if (sc->bnx_flags & BNX_FLAG_TSO) 1693 val |= BGE_RDMAMODE_TSO4_ENABLE; 1694 val |= BGE_RDMAMODE_FIFO_LONG_BURST; 1695 CSR_WRITE_4(sc, BGE_RDMA_MODE, val); 1696 DELAY(40); 1697 1698 /* Turn on RX data completion state machine */ 1699 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 1700 1701 /* Turn on RX BD initiator state machine */ 1702 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 1703 1704 /* Turn on RX data and RX BD initiator state machine */ 1705 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE); 1706 1707 /* Turn on send BD completion state machine */ 1708 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 1709 1710 /* Turn on send data completion state machine */ 1711 val = BGE_SDCMODE_ENABLE; 1712 if (sc->bnx_asicrev == BGE_ASICREV_BCM5761) 1713 val |= BGE_SDCMODE_CDELAY; 1714 CSR_WRITE_4(sc, BGE_SDC_MODE, val); 1715 1716 /* Turn on send data initiator state machine */ 1717 if (sc->bnx_flags & BNX_FLAG_TSO) { 1718 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE | 1719 BGE_SDIMODE_HW_LSO_PRE_DMA); 1720 } else { 1721 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 1722 } 1723 1724 /* Turn on send BD initiator state machine */ 1725 val = BGE_SBDIMODE_ENABLE; 1726 if (sc->bnx_tx_ringcnt > 1) 1727 val |= BGE_SBDIMODE_MULTI_TXR; 1728 CSR_WRITE_4(sc, BGE_SBDI_MODE, val); 1729 1730 /* Turn on send BD selector state machine */ 1731 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 1732 1733 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF); 1734 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL, 1735 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER); 1736 1737 /* ack/clear link change events */ 1738 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 1739 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE| 1740 BGE_MACSTAT_LINK_CHANGED); 1741 CSR_WRITE_4(sc, BGE_MI_STS, 0); 1742 1743 /* 1744 * Enable attention when the link has changed state for 1745 * devices that use auto polling. 1746 */ 1747 if (sc->bnx_flags & BNX_FLAG_TBI) { 1748 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK); 1749 } else { 1750 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) { 1751 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bnx_mi_mode); 1752 DELAY(80); 1753 } 1754 } 1755 1756 /* 1757 * Clear any pending link state attention. 1758 * Otherwise some link state change events may be lost until attention 1759 * is cleared by bnx_intr() -> bnx_softc.bnx_link_upd() sequence. 1760 * It's not necessary on newer BCM chips - perhaps enabling link 1761 * state change attentions implies clearing pending attention. 1762 */ 1763 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 1764 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE| 1765 BGE_MACSTAT_LINK_CHANGED); 1766 1767 /* Enable link state change attentions. */ 1768 BNX_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED); 1769 1770 return(0); 1771 } 1772 1773 /* 1774 * Probe for a Broadcom chip. Check the PCI vendor and device IDs 1775 * against our list and return its name if we find a match. Note 1776 * that since the Broadcom controller contains VPD support, we 1777 * can get the device name string from the controller itself instead 1778 * of the compiled-in string. This is a little slow, but it guarantees 1779 * we'll always announce the right product name. 1780 */ 1781 static int 1782 bnx_probe(device_t dev) 1783 { 1784 const struct bnx_type *t; 1785 uint16_t product, vendor; 1786 1787 if (!pci_is_pcie(dev)) 1788 return ENXIO; 1789 1790 product = pci_get_device(dev); 1791 vendor = pci_get_vendor(dev); 1792 1793 for (t = bnx_devs; t->bnx_name != NULL; t++) { 1794 if (vendor == t->bnx_vid && product == t->bnx_did) 1795 break; 1796 } 1797 if (t->bnx_name == NULL) 1798 return ENXIO; 1799 1800 device_set_desc(dev, t->bnx_name); 1801 return 0; 1802 } 1803 1804 static int 1805 bnx_attach(device_t dev) 1806 { 1807 struct ifnet *ifp; 1808 struct bnx_softc *sc; 1809 struct bnx_rx_std_ring *std; 1810 uint32_t hwcfg = 0; 1811 int error = 0, rid, capmask, i, std_cpuid, std_cpuid_def; 1812 uint8_t ether_addr[ETHER_ADDR_LEN]; 1813 uint16_t product; 1814 uintptr_t mii_priv = 0; 1815 #if defined(BNX_TSO_DEBUG) || defined(BNX_RSS_DEBUG) || defined(BNX_TSS_DEBUG) 1816 char desc[32]; 1817 #endif 1818 #ifdef IFPOLL_ENABLE 1819 int offset, offset_def; 1820 #endif 1821 1822 sc = device_get_softc(dev); 1823 sc->bnx_dev = dev; 1824 callout_init_mp(&sc->bnx_tick_timer); 1825 lwkt_serialize_init(&sc->bnx_jslot_serializer); 1826 lwkt_serialize_init(&sc->bnx_main_serialize); 1827 1828 /* Always setup interrupt mailboxes */ 1829 for (i = 0; i < BNX_INTR_MAX; ++i) { 1830 callout_init_mp(&sc->bnx_intr_data[i].bnx_intr_timer); 1831 sc->bnx_intr_data[i].bnx_sc = sc; 1832 sc->bnx_intr_data[i].bnx_intr_mbx = BGE_MBX_IRQ0_LO + (i * 8); 1833 sc->bnx_intr_data[i].bnx_intr_rid = -1; 1834 sc->bnx_intr_data[i].bnx_intr_cpuid = -1; 1835 } 1836 1837 product = pci_get_device(dev); 1838 1839 #ifndef BURN_BRIDGES 1840 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 1841 uint32_t irq, mem; 1842 1843 irq = pci_read_config(dev, PCIR_INTLINE, 4); 1844 mem = pci_read_config(dev, BGE_PCI_BAR0, 4); 1845 1846 device_printf(dev, "chip is in D%d power mode " 1847 "-- setting to D0\n", pci_get_powerstate(dev)); 1848 1849 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 1850 1851 pci_write_config(dev, PCIR_INTLINE, irq, 4); 1852 pci_write_config(dev, BGE_PCI_BAR0, mem, 4); 1853 } 1854 #endif /* !BURN_BRIDGE */ 1855 1856 /* 1857 * Map control/status registers. 1858 */ 1859 pci_enable_busmaster(dev); 1860 1861 rid = BGE_PCI_BAR0; 1862 sc->bnx_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 1863 RF_ACTIVE); 1864 1865 if (sc->bnx_res == NULL) { 1866 device_printf(dev, "couldn't map memory\n"); 1867 return ENXIO; 1868 } 1869 1870 sc->bnx_btag = rman_get_bustag(sc->bnx_res); 1871 sc->bnx_bhandle = rman_get_bushandle(sc->bnx_res); 1872 1873 /* Save various chip information */ 1874 sc->bnx_chipid = 1875 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >> 1876 BGE_PCIMISCCTL_ASICREV_SHIFT; 1877 if (BGE_ASICREV(sc->bnx_chipid) == BGE_ASICREV_USE_PRODID_REG) { 1878 /* All chips having dedicated ASICREV register have CPMU */ 1879 sc->bnx_flags |= BNX_FLAG_CPMU; 1880 1881 switch (product) { 1882 case PCI_PRODUCT_BROADCOM_BCM5717: 1883 case PCI_PRODUCT_BROADCOM_BCM5717C: 1884 case PCI_PRODUCT_BROADCOM_BCM5718: 1885 case PCI_PRODUCT_BROADCOM_BCM5719: 1886 case PCI_PRODUCT_BROADCOM_BCM5720_ALT: 1887 case PCI_PRODUCT_BROADCOM_BCM5725: 1888 case PCI_PRODUCT_BROADCOM_BCM5727: 1889 case PCI_PRODUCT_BROADCOM_BCM5762: 1890 sc->bnx_chipid = pci_read_config(dev, 1891 BGE_PCI_GEN2_PRODID_ASICREV, 4); 1892 break; 1893 1894 case PCI_PRODUCT_BROADCOM_BCM57761: 1895 case PCI_PRODUCT_BROADCOM_BCM57762: 1896 case PCI_PRODUCT_BROADCOM_BCM57765: 1897 case PCI_PRODUCT_BROADCOM_BCM57766: 1898 case PCI_PRODUCT_BROADCOM_BCM57781: 1899 case PCI_PRODUCT_BROADCOM_BCM57782: 1900 case PCI_PRODUCT_BROADCOM_BCM57785: 1901 case PCI_PRODUCT_BROADCOM_BCM57786: 1902 case PCI_PRODUCT_BROADCOM_BCM57791: 1903 case PCI_PRODUCT_BROADCOM_BCM57795: 1904 sc->bnx_chipid = pci_read_config(dev, 1905 BGE_PCI_GEN15_PRODID_ASICREV, 4); 1906 break; 1907 1908 default: 1909 sc->bnx_chipid = pci_read_config(dev, 1910 BGE_PCI_PRODID_ASICREV, 4); 1911 break; 1912 } 1913 } 1914 if (sc->bnx_chipid == BGE_CHIPID_BCM5717_C0) 1915 sc->bnx_chipid = BGE_CHIPID_BCM5720_A0; 1916 1917 sc->bnx_asicrev = BGE_ASICREV(sc->bnx_chipid); 1918 sc->bnx_chiprev = BGE_CHIPREV(sc->bnx_chipid); 1919 1920 switch (sc->bnx_asicrev) { 1921 case BGE_ASICREV_BCM5717: 1922 case BGE_ASICREV_BCM5719: 1923 case BGE_ASICREV_BCM5720: 1924 sc->bnx_flags |= BNX_FLAG_5717_PLUS | BNX_FLAG_57765_PLUS; 1925 break; 1926 1927 case BGE_ASICREV_BCM5762: 1928 sc->bnx_flags |= BNX_FLAG_57765_PLUS; 1929 break; 1930 1931 case BGE_ASICREV_BCM57765: 1932 case BGE_ASICREV_BCM57766: 1933 sc->bnx_flags |= BNX_FLAG_57765_FAMILY | BNX_FLAG_57765_PLUS; 1934 break; 1935 } 1936 1937 if (sc->bnx_asicrev == BGE_ASICREV_BCM5717 || 1938 sc->bnx_asicrev == BGE_ASICREV_BCM5719 || 1939 sc->bnx_asicrev == BGE_ASICREV_BCM5720 || 1940 sc->bnx_asicrev == BGE_ASICREV_BCM5762) 1941 sc->bnx_flags |= BNX_FLAG_APE; 1942 1943 sc->bnx_flags |= BNX_FLAG_TSO; 1944 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719 && 1945 sc->bnx_chipid == BGE_CHIPID_BCM5719_A0) 1946 sc->bnx_flags &= ~BNX_FLAG_TSO; 1947 1948 if (sc->bnx_asicrev == BGE_ASICREV_BCM5717 || 1949 BNX_IS_57765_FAMILY(sc)) { 1950 /* 1951 * All BCM57785 and BCM5718 families chips have a bug that 1952 * under certain situation interrupt will not be enabled 1953 * even if status tag is written to interrupt mailbox. 1954 * 1955 * While BCM5719 and BCM5720 have a hardware workaround 1956 * which could fix the above bug. 1957 * See the comment near BGE_PCIDMARWCTL_TAGGED_STATUS_WA in 1958 * bnx_chipinit(). 1959 * 1960 * For the rest of the chips in these two families, we will 1961 * have to poll the status block at high rate (10ms currently) 1962 * to check whether the interrupt is hosed or not. 1963 * See bnx_check_intr_*() for details. 1964 */ 1965 sc->bnx_flags |= BNX_FLAG_STATUSTAG_BUG; 1966 } 1967 1968 sc->bnx_pciecap = pci_get_pciecap_ptr(sc->bnx_dev); 1969 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719 || 1970 sc->bnx_asicrev == BGE_ASICREV_BCM5720) 1971 pcie_set_max_readrq(dev, PCIEM_DEVCTL_MAX_READRQ_2048); 1972 else 1973 pcie_set_max_readrq(dev, PCIEM_DEVCTL_MAX_READRQ_4096); 1974 device_printf(dev, "CHIP ID 0x%08x; " 1975 "ASIC REV 0x%02x; CHIP REV 0x%02x\n", 1976 sc->bnx_chipid, sc->bnx_asicrev, sc->bnx_chiprev); 1977 1978 /* 1979 * Set various PHY quirk flags. 1980 */ 1981 1982 capmask = MII_CAPMASK_DEFAULT; 1983 if (product == PCI_PRODUCT_BROADCOM_BCM57791 || 1984 product == PCI_PRODUCT_BROADCOM_BCM57795) { 1985 /* 10/100 only */ 1986 capmask &= ~BMSR_EXTSTAT; 1987 } 1988 1989 mii_priv |= BRGPHY_FLAG_WIRESPEED; 1990 if (sc->bnx_chipid == BGE_CHIPID_BCM5762_A0) 1991 mii_priv |= BRGPHY_FLAG_5762_A0; 1992 1993 /* Initialize if_name earlier, so if_printf could be used */ 1994 ifp = &sc->arpcom.ac_if; 1995 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1996 1997 /* 1998 * Try to reset the chip. 1999 */ 2000 bnx_sig_pre_reset(sc, BNX_RESET_SHUTDOWN); 2001 bnx_reset(sc); 2002 bnx_sig_post_reset(sc, BNX_RESET_SHUTDOWN); 2003 2004 if (bnx_chipinit(sc)) { 2005 device_printf(dev, "chip initialization failed\n"); 2006 error = ENXIO; 2007 goto fail; 2008 } 2009 2010 /* 2011 * Get station address 2012 */ 2013 error = bnx_get_eaddr(sc, ether_addr); 2014 if (error) { 2015 device_printf(dev, "failed to read station address\n"); 2016 goto fail; 2017 } 2018 2019 /* Setup RX/TX and interrupt count */ 2020 bnx_setup_ring_cnt(sc); 2021 2022 if ((sc->bnx_rx_retcnt == 1 && sc->bnx_tx_ringcnt == 1) || 2023 (sc->bnx_rx_retcnt > 1 && sc->bnx_tx_ringcnt > 1)) { 2024 /* 2025 * The RX ring and the corresponding TX ring processing 2026 * should be on the same CPU, since they share the same 2027 * status block. 2028 */ 2029 sc->bnx_flags |= BNX_FLAG_RXTX_BUNDLE; 2030 if (bootverbose) 2031 device_printf(dev, "RX/TX bundle\n"); 2032 if (sc->bnx_tx_ringcnt > 1) { 2033 /* 2034 * Multiple TX rings do not share status block 2035 * with link status, so link status will have 2036 * to save its own status_tag. 2037 */ 2038 sc->bnx_flags |= BNX_FLAG_STATUS_HASTAG; 2039 if (bootverbose) 2040 device_printf(dev, "status needs tag\n"); 2041 } 2042 } else { 2043 KKASSERT(sc->bnx_rx_retcnt > 1 && sc->bnx_tx_ringcnt == 1); 2044 if (bootverbose) 2045 device_printf(dev, "RX/TX not bundled\n"); 2046 } 2047 2048 error = bnx_dma_alloc(dev); 2049 if (error) 2050 goto fail; 2051 2052 #ifdef IFPOLL_ENABLE 2053 if (sc->bnx_flags & BNX_FLAG_RXTX_BUNDLE) { 2054 /* 2055 * NPOLLING RX/TX CPU offset 2056 */ 2057 if (sc->bnx_rx_retcnt == ncpus2) { 2058 offset = 0; 2059 } else { 2060 offset_def = 2061 (sc->bnx_rx_retcnt * device_get_unit(dev)) % ncpus2; 2062 offset = device_getenv_int(dev, "npoll.offset", 2063 offset_def); 2064 if (offset >= ncpus2 || 2065 offset % sc->bnx_rx_retcnt != 0) { 2066 device_printf(dev, "invalid npoll.offset %d, " 2067 "use %d\n", offset, offset_def); 2068 offset = offset_def; 2069 } 2070 } 2071 sc->bnx_npoll_rxoff = offset; 2072 sc->bnx_npoll_txoff = offset; 2073 } else { 2074 /* 2075 * NPOLLING RX CPU offset 2076 */ 2077 if (sc->bnx_rx_retcnt == ncpus2) { 2078 offset = 0; 2079 } else { 2080 offset_def = 2081 (sc->bnx_rx_retcnt * device_get_unit(dev)) % ncpus2; 2082 offset = device_getenv_int(dev, "npoll.rxoff", 2083 offset_def); 2084 if (offset >= ncpus2 || 2085 offset % sc->bnx_rx_retcnt != 0) { 2086 device_printf(dev, "invalid npoll.rxoff %d, " 2087 "use %d\n", offset, offset_def); 2088 offset = offset_def; 2089 } 2090 } 2091 sc->bnx_npoll_rxoff = offset; 2092 2093 /* 2094 * NPOLLING TX CPU offset 2095 */ 2096 offset_def = device_get_unit(dev) % ncpus2; 2097 offset = device_getenv_int(dev, "npoll.txoff", offset_def); 2098 if (offset >= ncpus2) { 2099 device_printf(dev, "invalid npoll.txoff %d, use %d\n", 2100 offset, offset_def); 2101 offset = offset_def; 2102 } 2103 sc->bnx_npoll_txoff = offset; 2104 } 2105 #endif /* IFPOLL_ENABLE */ 2106 2107 /* 2108 * Allocate interrupt 2109 */ 2110 error = bnx_alloc_intr(sc); 2111 if (error) 2112 goto fail; 2113 2114 /* Setup serializers */ 2115 bnx_setup_serialize(sc); 2116 2117 /* Set default tuneable values. */ 2118 sc->bnx_rx_coal_ticks = BNX_RX_COAL_TICKS_DEF; 2119 sc->bnx_tx_coal_ticks = BNX_TX_COAL_TICKS_DEF; 2120 sc->bnx_rx_coal_bds = BNX_RX_COAL_BDS_DEF; 2121 sc->bnx_rx_coal_bds_poll = sc->bnx_rx_ret_ring[0].bnx_rx_cntmax; 2122 sc->bnx_tx_coal_bds = BNX_TX_COAL_BDS_DEF; 2123 sc->bnx_tx_coal_bds_poll = BNX_TX_COAL_BDS_POLL_DEF; 2124 sc->bnx_rx_coal_bds_int = BNX_RX_COAL_BDS_INT_DEF; 2125 sc->bnx_tx_coal_bds_int = BNX_TX_COAL_BDS_INT_DEF; 2126 2127 /* Set up ifnet structure */ 2128 ifp->if_softc = sc; 2129 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2130 ifp->if_ioctl = bnx_ioctl; 2131 ifp->if_start = bnx_start; 2132 #ifdef IFPOLL_ENABLE 2133 ifp->if_npoll = bnx_npoll; 2134 #endif 2135 ifp->if_init = bnx_init; 2136 ifp->if_serialize = bnx_serialize; 2137 ifp->if_deserialize = bnx_deserialize; 2138 ifp->if_tryserialize = bnx_tryserialize; 2139 #ifdef INVARIANTS 2140 ifp->if_serialize_assert = bnx_serialize_assert; 2141 #endif 2142 ifp->if_mtu = ETHERMTU; 2143 ifp->if_capabilities = IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU; 2144 2145 ifp->if_capabilities |= IFCAP_HWCSUM; 2146 ifp->if_hwassist = BNX_CSUM_FEATURES; 2147 if (sc->bnx_flags & BNX_FLAG_TSO) { 2148 ifp->if_capabilities |= IFCAP_TSO; 2149 ifp->if_hwassist |= CSUM_TSO; 2150 } 2151 if (BNX_RSS_ENABLED(sc)) 2152 ifp->if_capabilities |= IFCAP_RSS; 2153 ifp->if_capenable = ifp->if_capabilities; 2154 2155 ifq_set_maxlen(&ifp->if_snd, BGE_TX_RING_CNT - 1); 2156 ifq_set_ready(&ifp->if_snd); 2157 ifq_set_subq_cnt(&ifp->if_snd, sc->bnx_tx_ringcnt); 2158 2159 if (sc->bnx_tx_ringcnt > 1) { 2160 ifp->if_mapsubq = ifq_mapsubq_mask; 2161 ifq_set_subq_mask(&ifp->if_snd, sc->bnx_tx_ringcnt - 1); 2162 } 2163 2164 /* 2165 * Figure out what sort of media we have by checking the 2166 * hardware config word in the first 32k of NIC internal memory, 2167 * or fall back to examining the EEPROM if necessary. 2168 * Note: on some BCM5700 cards, this value appears to be unset. 2169 * If that's the case, we have to rely on identifying the NIC 2170 * by its PCI subsystem ID, as we do below for the SysKonnect 2171 * SK-9D41. 2172 */ 2173 if (bnx_readmem_ind(sc, BGE_SRAM_DATA_SIG) == BGE_SRAM_DATA_SIG_MAGIC) { 2174 hwcfg = bnx_readmem_ind(sc, BGE_SRAM_DATA_CFG); 2175 } else { 2176 if (bnx_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET, 2177 sizeof(hwcfg))) { 2178 device_printf(dev, "failed to read EEPROM\n"); 2179 error = ENXIO; 2180 goto fail; 2181 } 2182 hwcfg = ntohl(hwcfg); 2183 } 2184 2185 /* The SysKonnect SK-9D41 is a 1000baseSX card. */ 2186 if (pci_get_subvendor(dev) == PCI_PRODUCT_SCHNEIDERKOCH_SK_9D41 || 2187 (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) 2188 sc->bnx_flags |= BNX_FLAG_TBI; 2189 2190 /* Setup MI MODE */ 2191 if (sc->bnx_flags & BNX_FLAG_CPMU) 2192 sc->bnx_mi_mode = BGE_MIMODE_500KHZ_CONST; 2193 else 2194 sc->bnx_mi_mode = BGE_MIMODE_BASE; 2195 2196 /* Setup link status update stuffs */ 2197 if (sc->bnx_flags & BNX_FLAG_TBI) { 2198 sc->bnx_link_upd = bnx_tbi_link_upd; 2199 sc->bnx_link_chg = BGE_MACSTAT_LINK_CHANGED; 2200 } else if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) { 2201 sc->bnx_link_upd = bnx_autopoll_link_upd; 2202 sc->bnx_link_chg = BGE_MACSTAT_LINK_CHANGED; 2203 } else { 2204 sc->bnx_link_upd = bnx_copper_link_upd; 2205 sc->bnx_link_chg = BGE_MACSTAT_LINK_CHANGED; 2206 } 2207 2208 /* Set default PHY address */ 2209 sc->bnx_phyno = 1; 2210 2211 /* 2212 * PHY address mapping for various devices. 2213 * 2214 * | F0 Cu | F0 Sr | F1 Cu | F1 Sr | 2215 * ---------+-------+-------+-------+-------+ 2216 * BCM57XX | 1 | X | X | X | 2217 * BCM5704 | 1 | X | 1 | X | 2218 * BCM5717 | 1 | 8 | 2 | 9 | 2219 * BCM5719 | 1 | 8 | 2 | 9 | 2220 * BCM5720 | 1 | 8 | 2 | 9 | 2221 * 2222 * Other addresses may respond but they are not 2223 * IEEE compliant PHYs and should be ignored. 2224 */ 2225 if (BNX_IS_5717_PLUS(sc)) { 2226 int f; 2227 2228 f = pci_get_function(dev); 2229 if (sc->bnx_chipid == BGE_CHIPID_BCM5717_A0) { 2230 if (CSR_READ_4(sc, BGE_SGDIG_STS) & 2231 BGE_SGDIGSTS_IS_SERDES) 2232 sc->bnx_phyno = f + 8; 2233 else 2234 sc->bnx_phyno = f + 1; 2235 } else { 2236 if (CSR_READ_4(sc, BGE_CPMU_PHY_STRAP) & 2237 BGE_CPMU_PHY_STRAP_IS_SERDES) 2238 sc->bnx_phyno = f + 8; 2239 else 2240 sc->bnx_phyno = f + 1; 2241 } 2242 } 2243 2244 if (sc->bnx_flags & BNX_FLAG_TBI) { 2245 ifmedia_init(&sc->bnx_ifmedia, IFM_IMASK, 2246 bnx_ifmedia_upd, bnx_ifmedia_sts); 2247 ifmedia_add(&sc->bnx_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL); 2248 ifmedia_add(&sc->bnx_ifmedia, 2249 IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL); 2250 ifmedia_add(&sc->bnx_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); 2251 ifmedia_set(&sc->bnx_ifmedia, IFM_ETHER|IFM_AUTO); 2252 sc->bnx_ifmedia.ifm_media = sc->bnx_ifmedia.ifm_cur->ifm_media; 2253 } else { 2254 struct mii_probe_args mii_args; 2255 2256 mii_probe_args_init(&mii_args, bnx_ifmedia_upd, bnx_ifmedia_sts); 2257 mii_args.mii_probemask = 1 << sc->bnx_phyno; 2258 mii_args.mii_capmask = capmask; 2259 mii_args.mii_privtag = MII_PRIVTAG_BRGPHY; 2260 mii_args.mii_priv = mii_priv; 2261 2262 error = mii_probe(dev, &sc->bnx_miibus, &mii_args); 2263 if (error) { 2264 device_printf(dev, "MII without any PHY!\n"); 2265 goto fail; 2266 } 2267 } 2268 2269 /* 2270 * Create sysctl nodes. 2271 */ 2272 sysctl_ctx_init(&sc->bnx_sysctl_ctx); 2273 sc->bnx_sysctl_tree = SYSCTL_ADD_NODE(&sc->bnx_sysctl_ctx, 2274 SYSCTL_STATIC_CHILDREN(_hw), 2275 OID_AUTO, 2276 device_get_nameunit(dev), 2277 CTLFLAG_RD, 0, ""); 2278 if (sc->bnx_sysctl_tree == NULL) { 2279 device_printf(dev, "can't add sysctl node\n"); 2280 error = ENXIO; 2281 goto fail; 2282 } 2283 2284 SYSCTL_ADD_INT(&sc->bnx_sysctl_ctx, 2285 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO, 2286 "rx_rings", CTLFLAG_RD, &sc->bnx_rx_retcnt, 0, "# of RX rings"); 2287 SYSCTL_ADD_INT(&sc->bnx_sysctl_ctx, 2288 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO, 2289 "tx_rings", CTLFLAG_RD, &sc->bnx_tx_ringcnt, 0, "# of TX rings"); 2290 2291 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx, 2292 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), 2293 OID_AUTO, "rx_coal_ticks", 2294 CTLTYPE_INT | CTLFLAG_RW, 2295 sc, 0, bnx_sysctl_rx_coal_ticks, "I", 2296 "Receive coalescing ticks (usec)."); 2297 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx, 2298 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), 2299 OID_AUTO, "tx_coal_ticks", 2300 CTLTYPE_INT | CTLFLAG_RW, 2301 sc, 0, bnx_sysctl_tx_coal_ticks, "I", 2302 "Transmit coalescing ticks (usec)."); 2303 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx, 2304 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), 2305 OID_AUTO, "rx_coal_bds", 2306 CTLTYPE_INT | CTLFLAG_RW, 2307 sc, 0, bnx_sysctl_rx_coal_bds, "I", 2308 "Receive max coalesced BD count."); 2309 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx, 2310 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), 2311 OID_AUTO, "rx_coal_bds_poll", 2312 CTLTYPE_INT | CTLFLAG_RW, 2313 sc, 0, bnx_sysctl_rx_coal_bds_poll, "I", 2314 "Receive max coalesced BD count in polling."); 2315 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx, 2316 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), 2317 OID_AUTO, "tx_coal_bds", 2318 CTLTYPE_INT | CTLFLAG_RW, 2319 sc, 0, bnx_sysctl_tx_coal_bds, "I", 2320 "Transmit max coalesced BD count."); 2321 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx, 2322 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), 2323 OID_AUTO, "tx_coal_bds_poll", 2324 CTLTYPE_INT | CTLFLAG_RW, 2325 sc, 0, bnx_sysctl_tx_coal_bds_poll, "I", 2326 "Transmit max coalesced BD count in polling."); 2327 /* 2328 * A common design characteristic for many Broadcom 2329 * client controllers is that they only support a 2330 * single outstanding DMA read operation on the PCIe 2331 * bus. This means that it will take twice as long to 2332 * fetch a TX frame that is split into header and 2333 * payload buffers as it does to fetch a single, 2334 * contiguous TX frame (2 reads vs. 1 read). For these 2335 * controllers, coalescing buffers to reduce the number 2336 * of memory reads is effective way to get maximum 2337 * performance(about 940Mbps). Without collapsing TX 2338 * buffers the maximum TCP bulk transfer performance 2339 * is about 850Mbps. However forcing coalescing mbufs 2340 * consumes a lot of CPU cycles, so leave it off by 2341 * default. 2342 */ 2343 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx, 2344 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO, 2345 "force_defrag", CTLTYPE_INT | CTLFLAG_RW, 2346 sc, 0, bnx_sysctl_force_defrag, "I", 2347 "Force defragment on TX path"); 2348 2349 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx, 2350 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO, 2351 "tx_wreg", CTLTYPE_INT | CTLFLAG_RW, 2352 sc, 0, bnx_sysctl_tx_wreg, "I", 2353 "# of segments before writing to hardware register"); 2354 2355 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx, 2356 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO, 2357 "std_refill", CTLTYPE_INT | CTLFLAG_RW, 2358 sc, 0, bnx_sysctl_std_refill, "I", 2359 "# of packets received before scheduling standard refilling"); 2360 2361 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx, 2362 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO, 2363 "rx_coal_bds_int", CTLTYPE_INT | CTLFLAG_RW, 2364 sc, 0, bnx_sysctl_rx_coal_bds_int, "I", 2365 "Receive max coalesced BD count during interrupt."); 2366 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx, 2367 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO, 2368 "tx_coal_bds_int", CTLTYPE_INT | CTLFLAG_RW, 2369 sc, 0, bnx_sysctl_tx_coal_bds_int, "I", 2370 "Transmit max coalesced BD count during interrupt."); 2371 2372 #ifdef IFPOLL_ENABLE 2373 if (sc->bnx_flags & BNX_FLAG_RXTX_BUNDLE) { 2374 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx, 2375 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO, 2376 "npoll_offset", CTLTYPE_INT | CTLFLAG_RW, 2377 sc, 0, bnx_sysctl_npoll_offset, "I", 2378 "NPOLLING cpu offset"); 2379 } else { 2380 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx, 2381 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO, 2382 "npoll_rxoff", CTLTYPE_INT | CTLFLAG_RW, 2383 sc, 0, bnx_sysctl_npoll_rxoff, "I", 2384 "NPOLLING RX cpu offset"); 2385 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx, 2386 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO, 2387 "npoll_txoff", CTLTYPE_INT | CTLFLAG_RW, 2388 sc, 0, bnx_sysctl_npoll_txoff, "I", 2389 "NPOLLING TX cpu offset"); 2390 } 2391 #endif 2392 2393 #ifdef BNX_RSS_DEBUG 2394 SYSCTL_ADD_INT(&sc->bnx_sysctl_ctx, 2395 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO, 2396 "std_refill_mask", CTLFLAG_RD, 2397 &sc->bnx_rx_std_ring.bnx_rx_std_refill, 0, ""); 2398 SYSCTL_ADD_INT(&sc->bnx_sysctl_ctx, 2399 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO, 2400 "std_used", CTLFLAG_RD, 2401 &sc->bnx_rx_std_ring.bnx_rx_std_used, 0, ""); 2402 SYSCTL_ADD_INT(&sc->bnx_sysctl_ctx, 2403 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO, 2404 "rss_debug", CTLFLAG_RW, &sc->bnx_rss_debug, 0, ""); 2405 for (i = 0; i < sc->bnx_rx_retcnt; ++i) { 2406 ksnprintf(desc, sizeof(desc), "rx_pkt%d", i); 2407 SYSCTL_ADD_ULONG(&sc->bnx_sysctl_ctx, 2408 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO, 2409 desc, CTLFLAG_RW, &sc->bnx_rx_ret_ring[i].bnx_rx_pkt, ""); 2410 2411 ksnprintf(desc, sizeof(desc), "rx_force_sched%d", i); 2412 SYSCTL_ADD_ULONG(&sc->bnx_sysctl_ctx, 2413 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO, 2414 desc, CTLFLAG_RW, 2415 &sc->bnx_rx_ret_ring[i].bnx_rx_force_sched, ""); 2416 } 2417 #endif 2418 #ifdef BNX_TSS_DEBUG 2419 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) { 2420 ksnprintf(desc, sizeof(desc), "tx_pkt%d", i); 2421 SYSCTL_ADD_ULONG(&sc->bnx_sysctl_ctx, 2422 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO, 2423 desc, CTLFLAG_RW, &sc->bnx_tx_ring[i].bnx_tx_pkt, ""); 2424 } 2425 #endif 2426 2427 SYSCTL_ADD_ULONG(&sc->bnx_sysctl_ctx, 2428 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO, 2429 "norxbds", CTLFLAG_RW, &sc->bnx_norxbds, ""); 2430 2431 SYSCTL_ADD_ULONG(&sc->bnx_sysctl_ctx, 2432 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO, 2433 "errors", CTLFLAG_RW, &sc->bnx_errors, ""); 2434 2435 #ifdef BNX_TSO_DEBUG 2436 for (i = 0; i < BNX_TSO_NSTATS; ++i) { 2437 ksnprintf(desc, sizeof(desc), "tso%d", i + 1); 2438 SYSCTL_ADD_ULONG(&sc->bnx_sysctl_ctx, 2439 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO, 2440 desc, CTLFLAG_RW, &sc->bnx_tsosegs[i], ""); 2441 } 2442 #endif 2443 2444 /* 2445 * Call MI attach routine. 2446 */ 2447 ether_ifattach(ifp, ether_addr, NULL); 2448 2449 /* Setup TX rings and subqueues */ 2450 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) { 2451 struct ifaltq_subque *ifsq = ifq_get_subq(&ifp->if_snd, i); 2452 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[i]; 2453 2454 ifsq_set_cpuid(ifsq, txr->bnx_tx_cpuid); 2455 ifsq_set_hw_serialize(ifsq, &txr->bnx_tx_serialize); 2456 ifsq_set_priv(ifsq, txr); 2457 txr->bnx_ifsq = ifsq; 2458 2459 ifsq_watchdog_init(&txr->bnx_tx_watchdog, ifsq, bnx_watchdog); 2460 2461 if (bootverbose) { 2462 device_printf(dev, "txr %d -> cpu%d\n", i, 2463 txr->bnx_tx_cpuid); 2464 } 2465 } 2466 2467 error = bnx_setup_intr(sc); 2468 if (error) { 2469 ether_ifdetach(ifp); 2470 goto fail; 2471 } 2472 bnx_set_tick_cpuid(sc, FALSE); 2473 2474 /* 2475 * Create RX standard ring refilling thread 2476 */ 2477 std_cpuid_def = device_get_unit(dev) % ncpus; 2478 std_cpuid = device_getenv_int(dev, "std.cpuid", std_cpuid_def); 2479 if (std_cpuid < 0 || std_cpuid >= ncpus) { 2480 device_printf(dev, "invalid std.cpuid %d, use %d\n", 2481 std_cpuid, std_cpuid_def); 2482 std_cpuid = std_cpuid_def; 2483 } 2484 2485 std = &sc->bnx_rx_std_ring; 2486 lwkt_create(bnx_rx_std_refill_ithread, std, NULL, 2487 &std->bnx_rx_std_ithread, TDF_NOSTART | TDF_INTTHREAD, std_cpuid, 2488 "%s std", device_get_nameunit(dev)); 2489 lwkt_setpri(&std->bnx_rx_std_ithread, TDPRI_INT_MED); 2490 std->bnx_rx_std_ithread.td_preemptable = lwkt_preempt; 2491 sc->bnx_flags |= BNX_FLAG_STD_THREAD; 2492 2493 return(0); 2494 fail: 2495 bnx_detach(dev); 2496 return(error); 2497 } 2498 2499 static int 2500 bnx_detach(device_t dev) 2501 { 2502 struct bnx_softc *sc = device_get_softc(dev); 2503 2504 if (device_is_attached(dev)) { 2505 struct ifnet *ifp = &sc->arpcom.ac_if; 2506 2507 ifnet_serialize_all(ifp); 2508 bnx_stop(sc); 2509 bnx_teardown_intr(sc, sc->bnx_intr_cnt); 2510 ifnet_deserialize_all(ifp); 2511 2512 ether_ifdetach(ifp); 2513 } 2514 2515 if (sc->bnx_flags & BNX_FLAG_STD_THREAD) { 2516 struct bnx_rx_std_ring *std = &sc->bnx_rx_std_ring; 2517 2518 tsleep_interlock(std, 0); 2519 2520 if (std->bnx_rx_std_ithread.td_gd == mycpu) { 2521 bnx_rx_std_refill_stop(std); 2522 } else { 2523 lwkt_send_ipiq(std->bnx_rx_std_ithread.td_gd, 2524 bnx_rx_std_refill_stop, std); 2525 } 2526 2527 tsleep(std, PINTERLOCKED, "bnx_detach", 0); 2528 if (bootverbose) 2529 device_printf(dev, "RX std ithread exited\n"); 2530 2531 lwkt_synchronize_ipiqs("bnx_detach_ipiq"); 2532 } 2533 2534 if (sc->bnx_flags & BNX_FLAG_TBI) 2535 ifmedia_removeall(&sc->bnx_ifmedia); 2536 if (sc->bnx_miibus) 2537 device_delete_child(dev, sc->bnx_miibus); 2538 bus_generic_detach(dev); 2539 2540 bnx_free_intr(sc); 2541 2542 if (sc->bnx_msix_mem_res != NULL) { 2543 bus_release_resource(dev, SYS_RES_MEMORY, sc->bnx_msix_mem_rid, 2544 sc->bnx_msix_mem_res); 2545 } 2546 if (sc->bnx_res != NULL) { 2547 bus_release_resource(dev, SYS_RES_MEMORY, 2548 BGE_PCI_BAR0, sc->bnx_res); 2549 } 2550 2551 if (sc->bnx_sysctl_tree != NULL) 2552 sysctl_ctx_free(&sc->bnx_sysctl_ctx); 2553 2554 bnx_dma_free(sc); 2555 2556 if (sc->bnx_serialize != NULL) 2557 kfree(sc->bnx_serialize, M_DEVBUF); 2558 2559 return 0; 2560 } 2561 2562 static void 2563 bnx_reset(struct bnx_softc *sc) 2564 { 2565 device_t dev = sc->bnx_dev; 2566 uint32_t cachesize, command, reset, mac_mode, mac_mode_mask; 2567 void (*write_op)(struct bnx_softc *, uint32_t, uint32_t); 2568 int i, val = 0; 2569 uint16_t devctl; 2570 2571 mac_mode_mask = BGE_MACMODE_HALF_DUPLEX | BGE_MACMODE_PORTMODE; 2572 mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) & mac_mode_mask; 2573 2574 write_op = bnx_writemem_direct; 2575 2576 /* Save some important PCI state. */ 2577 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4); 2578 command = pci_read_config(dev, BGE_PCI_CMD, 4); 2579 2580 pci_write_config(dev, BGE_PCI_MISC_CTL, 2581 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| 2582 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW| 2583 BGE_PCIMISCCTL_TAGGED_STATUS, 4); 2584 2585 /* Disable fastboot on controllers that support it. */ 2586 if (bootverbose) 2587 if_printf(&sc->arpcom.ac_if, "Disabling fastboot\n"); 2588 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0); 2589 2590 /* 2591 * Write the magic number to SRAM at offset 0xB50. 2592 * When firmware finishes its initialization it will 2593 * write ~BGE_SRAM_FW_MB_MAGIC to the same location. 2594 */ 2595 bnx_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC); 2596 2597 reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1); 2598 2599 /* XXX: Broadcom Linux driver. */ 2600 /* Force PCI-E 1.0a mode */ 2601 if (!BNX_IS_57765_PLUS(sc) && 2602 CSR_READ_4(sc, BGE_PCIE_PHY_TSTCTL) == 2603 (BGE_PCIE_PHY_TSTCTL_PSCRAM | 2604 BGE_PCIE_PHY_TSTCTL_PCIE10)) { 2605 CSR_WRITE_4(sc, BGE_PCIE_PHY_TSTCTL, 2606 BGE_PCIE_PHY_TSTCTL_PSCRAM); 2607 } 2608 if (sc->bnx_chipid != BGE_CHIPID_BCM5750_A0) { 2609 /* Prevent PCIE link training during global reset */ 2610 CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29)); 2611 reset |= (1<<29); 2612 } 2613 2614 /* 2615 * Set GPHY Power Down Override to leave GPHY 2616 * powered up in D0 uninitialized. 2617 */ 2618 if ((sc->bnx_flags & BNX_FLAG_CPMU) == 0) 2619 reset |= BGE_MISCCFG_GPHY_PD_OVERRIDE; 2620 2621 /* Issue global reset */ 2622 write_op(sc, BGE_MISC_CFG, reset); 2623 2624 DELAY(100 * 1000); 2625 2626 /* XXX: Broadcom Linux driver. */ 2627 if (sc->bnx_chipid == BGE_CHIPID_BCM5750_A0) { 2628 uint32_t v; 2629 2630 DELAY(500000); /* wait for link training to complete */ 2631 v = pci_read_config(dev, 0xc4, 4); 2632 pci_write_config(dev, 0xc4, v | (1<<15), 4); 2633 } 2634 2635 devctl = pci_read_config(dev, sc->bnx_pciecap + PCIER_DEVCTRL, 2); 2636 2637 /* Disable no snoop and disable relaxed ordering. */ 2638 devctl &= ~(PCIEM_DEVCTL_RELAX_ORDER | PCIEM_DEVCTL_NOSNOOP); 2639 2640 /* Old PCI-E chips only support 128 bytes Max PayLoad Size. */ 2641 if ((sc->bnx_flags & BNX_FLAG_CPMU) == 0) { 2642 devctl &= ~PCIEM_DEVCTL_MAX_PAYLOAD_MASK; 2643 devctl |= PCIEM_DEVCTL_MAX_PAYLOAD_128; 2644 } 2645 2646 pci_write_config(dev, sc->bnx_pciecap + PCIER_DEVCTRL, 2647 devctl, 2); 2648 2649 /* Clear error status. */ 2650 pci_write_config(dev, sc->bnx_pciecap + PCIER_DEVSTS, 2651 PCIEM_DEVSTS_CORR_ERR | 2652 PCIEM_DEVSTS_NFATAL_ERR | 2653 PCIEM_DEVSTS_FATAL_ERR | 2654 PCIEM_DEVSTS_UNSUPP_REQ, 2); 2655 2656 /* Reset some of the PCI state that got zapped by reset */ 2657 pci_write_config(dev, BGE_PCI_MISC_CTL, 2658 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| 2659 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW| 2660 BGE_PCIMISCCTL_TAGGED_STATUS, 4); 2661 val = BGE_PCISTATE_ROM_ENABLE | BGE_PCISTATE_ROM_RETRY_ENABLE; 2662 pci_write_config(dev, BGE_PCI_PCISTATE, val, 4); 2663 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4); 2664 pci_write_config(dev, BGE_PCI_CMD, command, 4); 2665 2666 /* Enable memory arbiter */ 2667 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 2668 2669 /* Fix up byte swapping */ 2670 CSR_WRITE_4(sc, BGE_MODE_CTL, bnx_dma_swap_options(sc)); 2671 2672 val = CSR_READ_4(sc, BGE_MAC_MODE); 2673 val = (val & ~mac_mode_mask) | mac_mode; 2674 CSR_WRITE_4(sc, BGE_MAC_MODE, val); 2675 DELAY(40); 2676 2677 /* 2678 * Poll until we see the 1's complement of the magic number. 2679 * This indicates that the firmware initialization is complete. 2680 */ 2681 for (i = 0; i < BNX_FIRMWARE_TIMEOUT; i++) { 2682 val = bnx_readmem_ind(sc, BGE_SRAM_FW_MB); 2683 if (val == ~BGE_SRAM_FW_MB_MAGIC) 2684 break; 2685 DELAY(10); 2686 } 2687 if (i == BNX_FIRMWARE_TIMEOUT) { 2688 if_printf(&sc->arpcom.ac_if, "firmware handshake " 2689 "timed out, found 0x%08x\n", val); 2690 } 2691 2692 /* BCM57765 A0 needs additional time before accessing. */ 2693 if (sc->bnx_chipid == BGE_CHIPID_BCM57765_A0) 2694 DELAY(10 * 1000); 2695 2696 /* 2697 * The 5704 in TBI mode apparently needs some special 2698 * adjustment to insure the SERDES drive level is set 2699 * to 1.2V. 2700 */ 2701 if (sc->bnx_asicrev == BGE_ASICREV_BCM5704 && 2702 (sc->bnx_flags & BNX_FLAG_TBI)) { 2703 uint32_t serdescfg; 2704 2705 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG); 2706 serdescfg = (serdescfg & ~0xFFF) | 0x880; 2707 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg); 2708 } 2709 2710 CSR_WRITE_4(sc, BGE_MI_MODE, 2711 sc->bnx_mi_mode & ~BGE_MIMODE_AUTOPOLL); 2712 DELAY(80); 2713 2714 /* XXX: Broadcom Linux driver. */ 2715 if (!BNX_IS_57765_PLUS(sc)) { 2716 uint32_t v; 2717 2718 /* Enable Data FIFO protection. */ 2719 v = CSR_READ_4(sc, BGE_PCIE_TLDLPL_PORT); 2720 CSR_WRITE_4(sc, BGE_PCIE_TLDLPL_PORT, v | (1 << 25)); 2721 } 2722 2723 DELAY(10000); 2724 2725 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) { 2726 BNX_CLRBIT(sc, BGE_CPMU_CLCK_ORIDE, 2727 CPMU_CLCK_ORIDE_MAC_ORIDE_EN); 2728 } 2729 } 2730 2731 /* 2732 * Frame reception handling. This is called if there's a frame 2733 * on the receive return list. 2734 * 2735 * Note: we have to be able to handle two possibilities here: 2736 * 1) the frame is from the jumbo recieve ring 2737 * 2) the frame is from the standard receive ring 2738 */ 2739 2740 static void 2741 bnx_rxeof(struct bnx_rx_ret_ring *ret, uint16_t rx_prod, int count) 2742 { 2743 struct bnx_softc *sc = ret->bnx_sc; 2744 struct bnx_rx_std_ring *std = ret->bnx_std; 2745 struct ifnet *ifp = &sc->arpcom.ac_if; 2746 int std_used = 0; 2747 2748 while (ret->bnx_rx_saved_considx != rx_prod && count != 0) { 2749 struct pktinfo pi0, *pi = NULL; 2750 struct bge_rx_bd *cur_rx; 2751 struct bnx_rx_buf *rb; 2752 uint32_t rxidx; 2753 struct mbuf *m = NULL; 2754 uint16_t vlan_tag = 0; 2755 int have_tag = 0; 2756 2757 --count; 2758 2759 cur_rx = &ret->bnx_rx_ret_ring[ret->bnx_rx_saved_considx]; 2760 2761 rxidx = cur_rx->bge_idx; 2762 KKASSERT(rxidx < BGE_STD_RX_RING_CNT); 2763 2764 BNX_INC(ret->bnx_rx_saved_considx, BNX_RETURN_RING_CNT); 2765 #ifdef BNX_RSS_DEBUG 2766 ret->bnx_rx_pkt++; 2767 #endif 2768 2769 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) { 2770 have_tag = 1; 2771 vlan_tag = cur_rx->bge_vlan_tag; 2772 } 2773 2774 if (ret->bnx_rx_cnt >= ret->bnx_rx_cntmax) { 2775 atomic_add_int(&std->bnx_rx_std_used, std_used); 2776 std_used = 0; 2777 2778 bnx_rx_std_refill_sched(ret, std); 2779 } 2780 ret->bnx_rx_cnt++; 2781 ++std_used; 2782 2783 rb = &std->bnx_rx_std_buf[rxidx]; 2784 m = rb->bnx_rx_mbuf; 2785 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 2786 IFNET_STAT_INC(ifp, ierrors, 1); 2787 cpu_sfence(); 2788 rb->bnx_rx_refilled = 1; 2789 continue; 2790 } 2791 if (bnx_newbuf_std(ret, rxidx, 0)) { 2792 IFNET_STAT_INC(ifp, ierrors, 1); 2793 continue; 2794 } 2795 2796 IFNET_STAT_INC(ifp, ipackets, 1); 2797 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN; 2798 m->m_pkthdr.rcvif = ifp; 2799 2800 if ((ifp->if_capenable & IFCAP_RXCSUM) && 2801 (cur_rx->bge_flags & BGE_RXBDFLAG_IPV6) == 0) { 2802 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) { 2803 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 2804 if ((cur_rx->bge_error_flag & 2805 BGE_RXERRFLAG_IP_CSUM_NOK) == 0) 2806 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 2807 } 2808 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) { 2809 m->m_pkthdr.csum_data = 2810 cur_rx->bge_tcp_udp_csum; 2811 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | 2812 CSUM_PSEUDO_HDR; 2813 } 2814 } 2815 if (ifp->if_capenable & IFCAP_RSS) { 2816 pi = bnx_rss_info(&pi0, cur_rx); 2817 if (pi != NULL && 2818 (cur_rx->bge_flags & BGE_RXBDFLAG_RSS_HASH)) { 2819 m->m_flags |= M_HASH; 2820 m->m_pkthdr.hash = 2821 toeplitz_hash(cur_rx->bge_hash); 2822 } 2823 } 2824 2825 /* 2826 * If we received a packet with a vlan tag, pass it 2827 * to vlan_input() instead of ether_input(). 2828 */ 2829 if (have_tag) { 2830 m->m_flags |= M_VLANTAG; 2831 m->m_pkthdr.ether_vlantag = vlan_tag; 2832 } 2833 ether_input_pkt(ifp, m, pi); 2834 } 2835 bnx_writembx(sc, ret->bnx_rx_mbx, ret->bnx_rx_saved_considx); 2836 2837 if (std_used > 0) { 2838 int cur_std_used; 2839 2840 cur_std_used = atomic_fetchadd_int(&std->bnx_rx_std_used, 2841 std_used); 2842 if (cur_std_used + std_used >= (BGE_STD_RX_RING_CNT / 2)) { 2843 #ifdef BNX_RSS_DEBUG 2844 ret->bnx_rx_force_sched++; 2845 #endif 2846 bnx_rx_std_refill_sched(ret, std); 2847 } 2848 } 2849 } 2850 2851 static void 2852 bnx_txeof(struct bnx_tx_ring *txr, uint16_t tx_cons) 2853 { 2854 struct ifnet *ifp = &txr->bnx_sc->arpcom.ac_if; 2855 2856 /* 2857 * Go through our tx ring and free mbufs for those 2858 * frames that have been sent. 2859 */ 2860 while (txr->bnx_tx_saved_considx != tx_cons) { 2861 struct bnx_tx_buf *buf; 2862 uint32_t idx = 0; 2863 2864 idx = txr->bnx_tx_saved_considx; 2865 buf = &txr->bnx_tx_buf[idx]; 2866 if (buf->bnx_tx_mbuf != NULL) { 2867 IFNET_STAT_INC(ifp, opackets, 1); 2868 #ifdef BNX_TSS_DEBUG 2869 txr->bnx_tx_pkt++; 2870 #endif 2871 bus_dmamap_unload(txr->bnx_tx_mtag, 2872 buf->bnx_tx_dmamap); 2873 m_freem(buf->bnx_tx_mbuf); 2874 buf->bnx_tx_mbuf = NULL; 2875 } 2876 txr->bnx_tx_cnt--; 2877 BNX_INC(txr->bnx_tx_saved_considx, BGE_TX_RING_CNT); 2878 } 2879 2880 if ((BGE_TX_RING_CNT - txr->bnx_tx_cnt) >= 2881 (BNX_NSEG_RSVD + BNX_NSEG_SPARE)) 2882 ifsq_clr_oactive(txr->bnx_ifsq); 2883 2884 if (txr->bnx_tx_cnt == 0) 2885 txr->bnx_tx_watchdog.wd_timer = 0; 2886 2887 if (!ifsq_is_empty(txr->bnx_ifsq)) 2888 ifsq_devstart(txr->bnx_ifsq); 2889 } 2890 2891 static int 2892 bnx_handle_status(struct bnx_softc *sc) 2893 { 2894 uint32_t status; 2895 int handle = 0; 2896 2897 status = *sc->bnx_hw_status; 2898 2899 if (status & BGE_STATFLAG_ERROR) { 2900 uint32_t val; 2901 int reset = 0; 2902 2903 sc->bnx_errors++; 2904 2905 val = CSR_READ_4(sc, BGE_FLOW_ATTN); 2906 if (val & ~BGE_FLOWATTN_MB_LOWAT) { 2907 if_printf(&sc->arpcom.ac_if, 2908 "flow attn 0x%08x\n", val); 2909 reset = 1; 2910 } 2911 2912 val = CSR_READ_4(sc, BGE_MSI_STATUS); 2913 if (val & ~BGE_MSISTAT_MSI_PCI_REQ) { 2914 if_printf(&sc->arpcom.ac_if, 2915 "msi status 0x%08x\n", val); 2916 reset = 1; 2917 } 2918 2919 val = CSR_READ_4(sc, BGE_RDMA_STATUS); 2920 if (val) { 2921 if_printf(&sc->arpcom.ac_if, 2922 "rmda status 0x%08x\n", val); 2923 reset = 1; 2924 } 2925 2926 val = CSR_READ_4(sc, BGE_WDMA_STATUS); 2927 if (val) { 2928 if_printf(&sc->arpcom.ac_if, 2929 "wdma status 0x%08x\n", val); 2930 reset = 1; 2931 } 2932 2933 if (reset) { 2934 bnx_serialize_skipmain(sc); 2935 bnx_init(sc); 2936 bnx_deserialize_skipmain(sc); 2937 } 2938 handle = 1; 2939 } 2940 2941 if ((status & BGE_STATFLAG_LINKSTATE_CHANGED) || sc->bnx_link_evt) { 2942 if (bootverbose) { 2943 if_printf(&sc->arpcom.ac_if, "link change, " 2944 "link_evt %d\n", sc->bnx_link_evt); 2945 } 2946 bnx_link_poll(sc); 2947 handle = 1; 2948 } 2949 2950 return handle; 2951 } 2952 2953 #ifdef IFPOLL_ENABLE 2954 2955 static void 2956 bnx_npoll_rx(struct ifnet *ifp __unused, void *xret, int cycle) 2957 { 2958 struct bnx_rx_ret_ring *ret = xret; 2959 uint16_t rx_prod; 2960 2961 ASSERT_SERIALIZED(&ret->bnx_rx_ret_serialize); 2962 2963 ret->bnx_saved_status_tag = *ret->bnx_hw_status_tag; 2964 cpu_lfence(); 2965 2966 rx_prod = *ret->bnx_rx_considx; 2967 if (ret->bnx_rx_saved_considx != rx_prod) 2968 bnx_rxeof(ret, rx_prod, cycle); 2969 } 2970 2971 static void 2972 bnx_npoll_tx_notag(struct ifnet *ifp __unused, void *xtxr, int cycle __unused) 2973 { 2974 struct bnx_tx_ring *txr = xtxr; 2975 uint16_t tx_cons; 2976 2977 ASSERT_SERIALIZED(&txr->bnx_tx_serialize); 2978 2979 tx_cons = *txr->bnx_tx_considx; 2980 if (txr->bnx_tx_saved_considx != tx_cons) 2981 bnx_txeof(txr, tx_cons); 2982 } 2983 2984 static void 2985 bnx_npoll_tx(struct ifnet *ifp, void *xtxr, int cycle) 2986 { 2987 struct bnx_tx_ring *txr = xtxr; 2988 2989 ASSERT_SERIALIZED(&txr->bnx_tx_serialize); 2990 2991 txr->bnx_saved_status_tag = *txr->bnx_hw_status_tag; 2992 cpu_lfence(); 2993 bnx_npoll_tx_notag(ifp, txr, cycle); 2994 } 2995 2996 static void 2997 bnx_npoll_status_notag(struct ifnet *ifp) 2998 { 2999 struct bnx_softc *sc = ifp->if_softc; 3000 3001 ASSERT_SERIALIZED(&sc->bnx_main_serialize); 3002 3003 if (bnx_handle_status(sc)) { 3004 /* 3005 * Status changes are handled; force the chip to 3006 * update the status block to reflect whether there 3007 * are more status changes or not, else staled status 3008 * changes are always seen. 3009 */ 3010 BNX_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW); 3011 } 3012 } 3013 3014 static void 3015 bnx_npoll_status(struct ifnet *ifp) 3016 { 3017 struct bnx_softc *sc = ifp->if_softc; 3018 3019 ASSERT_SERIALIZED(&sc->bnx_main_serialize); 3020 3021 sc->bnx_saved_status_tag = *sc->bnx_hw_status_tag; 3022 cpu_lfence(); 3023 bnx_npoll_status_notag(ifp); 3024 } 3025 3026 static void 3027 bnx_npoll(struct ifnet *ifp, struct ifpoll_info *info) 3028 { 3029 struct bnx_softc *sc = ifp->if_softc; 3030 int i; 3031 3032 ASSERT_IFNET_SERIALIZED_ALL(ifp); 3033 3034 if (info != NULL) { 3035 if (sc->bnx_flags & BNX_FLAG_STATUS_HASTAG) 3036 info->ifpi_status.status_func = bnx_npoll_status; 3037 else 3038 info->ifpi_status.status_func = bnx_npoll_status_notag; 3039 info->ifpi_status.serializer = &sc->bnx_main_serialize; 3040 3041 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) { 3042 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[i]; 3043 int idx = i + sc->bnx_npoll_txoff; 3044 3045 KKASSERT(idx < ncpus2); 3046 if (sc->bnx_flags & BNX_FLAG_RXTX_BUNDLE) { 3047 info->ifpi_tx[idx].poll_func = 3048 bnx_npoll_tx_notag; 3049 } else { 3050 info->ifpi_tx[idx].poll_func = bnx_npoll_tx; 3051 } 3052 info->ifpi_tx[idx].arg = txr; 3053 info->ifpi_tx[idx].serializer = &txr->bnx_tx_serialize; 3054 ifsq_set_cpuid(txr->bnx_ifsq, idx); 3055 } 3056 3057 for (i = 0; i < sc->bnx_rx_retcnt; ++i) { 3058 struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[i]; 3059 int idx = i + sc->bnx_npoll_rxoff; 3060 3061 KKASSERT(idx < ncpus2); 3062 info->ifpi_rx[idx].poll_func = bnx_npoll_rx; 3063 info->ifpi_rx[idx].arg = ret; 3064 info->ifpi_rx[idx].serializer = 3065 &ret->bnx_rx_ret_serialize; 3066 } 3067 3068 if (ifp->if_flags & IFF_RUNNING) { 3069 bnx_disable_intr(sc); 3070 bnx_set_tick_cpuid(sc, TRUE); 3071 3072 sc->bnx_coal_chg = BNX_TX_COAL_BDS_CHG | 3073 BNX_RX_COAL_BDS_CHG; 3074 bnx_coal_change(sc); 3075 } 3076 } else { 3077 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) { 3078 ifsq_set_cpuid(sc->bnx_tx_ring[i].bnx_ifsq, 3079 sc->bnx_tx_ring[i].bnx_tx_cpuid); 3080 } 3081 if (ifp->if_flags & IFF_RUNNING) { 3082 sc->bnx_coal_chg = BNX_TX_COAL_BDS_CHG | 3083 BNX_RX_COAL_BDS_CHG; 3084 bnx_coal_change(sc); 3085 3086 bnx_enable_intr(sc); 3087 bnx_set_tick_cpuid(sc, FALSE); 3088 } 3089 } 3090 } 3091 3092 #endif /* IFPOLL_ENABLE */ 3093 3094 static void 3095 bnx_intr_legacy(void *xsc) 3096 { 3097 struct bnx_softc *sc = xsc; 3098 struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[0]; 3099 3100 if (ret->bnx_saved_status_tag == *ret->bnx_hw_status_tag) { 3101 uint32_t val; 3102 3103 val = pci_read_config(sc->bnx_dev, BGE_PCI_PCISTATE, 4); 3104 if (val & BGE_PCISTAT_INTR_NOTACT) 3105 return; 3106 } 3107 3108 /* 3109 * NOTE: 3110 * Interrupt will have to be disabled if tagged status 3111 * is used, else interrupt will always be asserted on 3112 * certain chips (at least on BCM5750 AX/BX). 3113 */ 3114 bnx_writembx(sc, BGE_MBX_IRQ0_LO, 1); 3115 3116 bnx_intr(sc); 3117 } 3118 3119 static void 3120 bnx_msi(void *xsc) 3121 { 3122 bnx_intr(xsc); 3123 } 3124 3125 static void 3126 bnx_intr(struct bnx_softc *sc) 3127 { 3128 struct ifnet *ifp = &sc->arpcom.ac_if; 3129 struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[0]; 3130 3131 ASSERT_SERIALIZED(&sc->bnx_main_serialize); 3132 3133 ret->bnx_saved_status_tag = *ret->bnx_hw_status_tag; 3134 /* 3135 * Use a load fence to ensure that status_tag is saved 3136 * before rx_prod, tx_cons and status. 3137 */ 3138 cpu_lfence(); 3139 3140 bnx_handle_status(sc); 3141 3142 if (ifp->if_flags & IFF_RUNNING) { 3143 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[0]; 3144 uint16_t rx_prod, tx_cons; 3145 3146 lwkt_serialize_enter(&ret->bnx_rx_ret_serialize); 3147 rx_prod = *ret->bnx_rx_considx; 3148 if (ret->bnx_rx_saved_considx != rx_prod) 3149 bnx_rxeof(ret, rx_prod, -1); 3150 lwkt_serialize_exit(&ret->bnx_rx_ret_serialize); 3151 3152 lwkt_serialize_enter(&txr->bnx_tx_serialize); 3153 tx_cons = *txr->bnx_tx_considx; 3154 if (txr->bnx_tx_saved_considx != tx_cons) 3155 bnx_txeof(txr, tx_cons); 3156 lwkt_serialize_exit(&txr->bnx_tx_serialize); 3157 } 3158 3159 bnx_writembx(sc, BGE_MBX_IRQ0_LO, ret->bnx_saved_status_tag << 24); 3160 } 3161 3162 static void 3163 bnx_msix_tx_status(void *xtxr) 3164 { 3165 struct bnx_tx_ring *txr = xtxr; 3166 struct bnx_softc *sc = txr->bnx_sc; 3167 struct ifnet *ifp = &sc->arpcom.ac_if; 3168 3169 ASSERT_SERIALIZED(&sc->bnx_main_serialize); 3170 3171 txr->bnx_saved_status_tag = *txr->bnx_hw_status_tag; 3172 /* 3173 * Use a load fence to ensure that status_tag is saved 3174 * before tx_cons and status. 3175 */ 3176 cpu_lfence(); 3177 3178 bnx_handle_status(sc); 3179 3180 if (ifp->if_flags & IFF_RUNNING) { 3181 uint16_t tx_cons; 3182 3183 lwkt_serialize_enter(&txr->bnx_tx_serialize); 3184 tx_cons = *txr->bnx_tx_considx; 3185 if (txr->bnx_tx_saved_considx != tx_cons) 3186 bnx_txeof(txr, tx_cons); 3187 lwkt_serialize_exit(&txr->bnx_tx_serialize); 3188 } 3189 3190 bnx_writembx(sc, BGE_MBX_IRQ0_LO, txr->bnx_saved_status_tag << 24); 3191 } 3192 3193 static void 3194 bnx_msix_rx(void *xret) 3195 { 3196 struct bnx_rx_ret_ring *ret = xret; 3197 uint16_t rx_prod; 3198 3199 ASSERT_SERIALIZED(&ret->bnx_rx_ret_serialize); 3200 3201 ret->bnx_saved_status_tag = *ret->bnx_hw_status_tag; 3202 /* 3203 * Use a load fence to ensure that status_tag is saved 3204 * before rx_prod. 3205 */ 3206 cpu_lfence(); 3207 3208 rx_prod = *ret->bnx_rx_considx; 3209 if (ret->bnx_rx_saved_considx != rx_prod) 3210 bnx_rxeof(ret, rx_prod, -1); 3211 3212 bnx_writembx(ret->bnx_sc, ret->bnx_msix_mbx, 3213 ret->bnx_saved_status_tag << 24); 3214 } 3215 3216 static void 3217 bnx_msix_rxtx(void *xret) 3218 { 3219 struct bnx_rx_ret_ring *ret = xret; 3220 struct bnx_tx_ring *txr = ret->bnx_txr; 3221 uint16_t rx_prod, tx_cons; 3222 3223 ASSERT_SERIALIZED(&ret->bnx_rx_ret_serialize); 3224 3225 ret->bnx_saved_status_tag = *ret->bnx_hw_status_tag; 3226 /* 3227 * Use a load fence to ensure that status_tag is saved 3228 * before rx_prod and tx_cons. 3229 */ 3230 cpu_lfence(); 3231 3232 rx_prod = *ret->bnx_rx_considx; 3233 if (ret->bnx_rx_saved_considx != rx_prod) 3234 bnx_rxeof(ret, rx_prod, -1); 3235 3236 lwkt_serialize_enter(&txr->bnx_tx_serialize); 3237 tx_cons = *txr->bnx_tx_considx; 3238 if (txr->bnx_tx_saved_considx != tx_cons) 3239 bnx_txeof(txr, tx_cons); 3240 lwkt_serialize_exit(&txr->bnx_tx_serialize); 3241 3242 bnx_writembx(ret->bnx_sc, ret->bnx_msix_mbx, 3243 ret->bnx_saved_status_tag << 24); 3244 } 3245 3246 static void 3247 bnx_msix_status(void *xsc) 3248 { 3249 struct bnx_softc *sc = xsc; 3250 3251 ASSERT_SERIALIZED(&sc->bnx_main_serialize); 3252 3253 sc->bnx_saved_status_tag = *sc->bnx_hw_status_tag; 3254 /* 3255 * Use a load fence to ensure that status_tag is saved 3256 * before status. 3257 */ 3258 cpu_lfence(); 3259 3260 bnx_handle_status(sc); 3261 3262 bnx_writembx(sc, BGE_MBX_IRQ0_LO, sc->bnx_saved_status_tag << 24); 3263 } 3264 3265 static void 3266 bnx_tick(void *xsc) 3267 { 3268 struct bnx_softc *sc = xsc; 3269 3270 lwkt_serialize_enter(&sc->bnx_main_serialize); 3271 3272 bnx_stats_update_regs(sc); 3273 3274 if (sc->bnx_flags & BNX_FLAG_TBI) { 3275 /* 3276 * Since in TBI mode auto-polling can't be used we should poll 3277 * link status manually. Here we register pending link event 3278 * and trigger interrupt. 3279 */ 3280 sc->bnx_link_evt++; 3281 BNX_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW); 3282 } else if (!sc->bnx_link) { 3283 mii_tick(device_get_softc(sc->bnx_miibus)); 3284 } 3285 3286 callout_reset_bycpu(&sc->bnx_tick_timer, hz, bnx_tick, sc, 3287 sc->bnx_tick_cpuid); 3288 3289 lwkt_serialize_exit(&sc->bnx_main_serialize); 3290 } 3291 3292 static void 3293 bnx_stats_update_regs(struct bnx_softc *sc) 3294 { 3295 struct ifnet *ifp = &sc->arpcom.ac_if; 3296 struct bge_mac_stats_regs stats; 3297 uint32_t *s, val; 3298 int i; 3299 3300 s = (uint32_t *)&stats; 3301 for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) { 3302 *s = CSR_READ_4(sc, BGE_RX_STATS + i); 3303 s++; 3304 } 3305 3306 IFNET_STAT_SET(ifp, collisions, 3307 (stats.dot3StatsSingleCollisionFrames + 3308 stats.dot3StatsMultipleCollisionFrames + 3309 stats.dot3StatsExcessiveCollisions + 3310 stats.dot3StatsLateCollisions)); 3311 3312 val = CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS); 3313 sc->bnx_norxbds += val; 3314 } 3315 3316 /* 3317 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data 3318 * pointers to descriptors. 3319 */ 3320 static int 3321 bnx_encap(struct bnx_tx_ring *txr, struct mbuf **m_head0, uint32_t *txidx, 3322 int *segs_used) 3323 { 3324 struct bge_tx_bd *d = NULL; 3325 uint16_t csum_flags = 0, vlan_tag = 0, mss = 0; 3326 bus_dma_segment_t segs[BNX_NSEG_NEW]; 3327 bus_dmamap_t map; 3328 int error, maxsegs, nsegs, idx, i; 3329 struct mbuf *m_head = *m_head0, *m_new; 3330 3331 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 3332 #ifdef BNX_TSO_DEBUG 3333 int tso_nsegs; 3334 #endif 3335 3336 error = bnx_setup_tso(txr, m_head0, &mss, &csum_flags); 3337 if (error) 3338 return error; 3339 m_head = *m_head0; 3340 3341 #ifdef BNX_TSO_DEBUG 3342 tso_nsegs = (m_head->m_pkthdr.len / 3343 m_head->m_pkthdr.tso_segsz) - 1; 3344 if (tso_nsegs > (BNX_TSO_NSTATS - 1)) 3345 tso_nsegs = BNX_TSO_NSTATS - 1; 3346 else if (tso_nsegs < 0) 3347 tso_nsegs = 0; 3348 txr->bnx_sc->bnx_tsosegs[tso_nsegs]++; 3349 #endif 3350 } else if (m_head->m_pkthdr.csum_flags & BNX_CSUM_FEATURES) { 3351 if (m_head->m_pkthdr.csum_flags & CSUM_IP) 3352 csum_flags |= BGE_TXBDFLAG_IP_CSUM; 3353 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) 3354 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM; 3355 if (m_head->m_flags & M_LASTFRAG) 3356 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END; 3357 else if (m_head->m_flags & M_FRAG) 3358 csum_flags |= BGE_TXBDFLAG_IP_FRAG; 3359 } 3360 if (m_head->m_flags & M_VLANTAG) { 3361 csum_flags |= BGE_TXBDFLAG_VLAN_TAG; 3362 vlan_tag = m_head->m_pkthdr.ether_vlantag; 3363 } 3364 3365 idx = *txidx; 3366 map = txr->bnx_tx_buf[idx].bnx_tx_dmamap; 3367 3368 maxsegs = (BGE_TX_RING_CNT - txr->bnx_tx_cnt) - BNX_NSEG_RSVD; 3369 KASSERT(maxsegs >= BNX_NSEG_SPARE, 3370 ("not enough segments %d", maxsegs)); 3371 3372 if (maxsegs > BNX_NSEG_NEW) 3373 maxsegs = BNX_NSEG_NEW; 3374 3375 /* 3376 * Pad outbound frame to BGE_MIN_FRAMELEN for an unusual reason. 3377 * The bge hardware will pad out Tx runts to BGE_MIN_FRAMELEN, 3378 * but when such padded frames employ the bge IP/TCP checksum 3379 * offload, the hardware checksum assist gives incorrect results 3380 * (possibly from incorporating its own padding into the UDP/TCP 3381 * checksum; who knows). If we pad such runts with zeros, the 3382 * onboard checksum comes out correct. 3383 */ 3384 if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) && 3385 m_head->m_pkthdr.len < BNX_MIN_FRAMELEN) { 3386 error = m_devpad(m_head, BNX_MIN_FRAMELEN); 3387 if (error) 3388 goto back; 3389 } 3390 3391 if ((txr->bnx_tx_flags & BNX_TX_FLAG_SHORTDMA) && 3392 m_head->m_next != NULL) { 3393 m_new = bnx_defrag_shortdma(m_head); 3394 if (m_new == NULL) { 3395 error = ENOBUFS; 3396 goto back; 3397 } 3398 *m_head0 = m_head = m_new; 3399 } 3400 if ((m_head->m_pkthdr.csum_flags & CSUM_TSO) == 0 && 3401 (txr->bnx_tx_flags & BNX_TX_FLAG_FORCE_DEFRAG) && 3402 m_head->m_next != NULL) { 3403 /* 3404 * Forcefully defragment mbuf chain to overcome hardware 3405 * limitation which only support a single outstanding 3406 * DMA read operation. If it fails, keep moving on using 3407 * the original mbuf chain. 3408 */ 3409 m_new = m_defrag(m_head, MB_DONTWAIT); 3410 if (m_new != NULL) 3411 *m_head0 = m_head = m_new; 3412 } 3413 3414 error = bus_dmamap_load_mbuf_defrag(txr->bnx_tx_mtag, map, 3415 m_head0, segs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 3416 if (error) 3417 goto back; 3418 *segs_used += nsegs; 3419 3420 m_head = *m_head0; 3421 bus_dmamap_sync(txr->bnx_tx_mtag, map, BUS_DMASYNC_PREWRITE); 3422 3423 for (i = 0; ; i++) { 3424 d = &txr->bnx_tx_ring[idx]; 3425 3426 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr); 3427 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr); 3428 d->bge_len = segs[i].ds_len; 3429 d->bge_flags = csum_flags; 3430 d->bge_vlan_tag = vlan_tag; 3431 d->bge_mss = mss; 3432 3433 if (i == nsegs - 1) 3434 break; 3435 BNX_INC(idx, BGE_TX_RING_CNT); 3436 } 3437 /* Mark the last segment as end of packet... */ 3438 d->bge_flags |= BGE_TXBDFLAG_END; 3439 3440 /* 3441 * Insure that the map for this transmission is placed at 3442 * the array index of the last descriptor in this chain. 3443 */ 3444 txr->bnx_tx_buf[*txidx].bnx_tx_dmamap = txr->bnx_tx_buf[idx].bnx_tx_dmamap; 3445 txr->bnx_tx_buf[idx].bnx_tx_dmamap = map; 3446 txr->bnx_tx_buf[idx].bnx_tx_mbuf = m_head; 3447 txr->bnx_tx_cnt += nsegs; 3448 3449 BNX_INC(idx, BGE_TX_RING_CNT); 3450 *txidx = idx; 3451 back: 3452 if (error) { 3453 m_freem(*m_head0); 3454 *m_head0 = NULL; 3455 } 3456 return error; 3457 } 3458 3459 /* 3460 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 3461 * to the mbuf data regions directly in the transmit descriptors. 3462 */ 3463 static void 3464 bnx_start(struct ifnet *ifp, struct ifaltq_subque *ifsq) 3465 { 3466 struct bnx_tx_ring *txr = ifsq_get_priv(ifsq); 3467 struct mbuf *m_head = NULL; 3468 uint32_t prodidx; 3469 int nsegs = 0; 3470 3471 KKASSERT(txr->bnx_ifsq == ifsq); 3472 ASSERT_SERIALIZED(&txr->bnx_tx_serialize); 3473 3474 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifsq_is_oactive(ifsq)) 3475 return; 3476 3477 prodidx = txr->bnx_tx_prodidx; 3478 3479 while (txr->bnx_tx_buf[prodidx].bnx_tx_mbuf == NULL) { 3480 /* 3481 * Sanity check: avoid coming within BGE_NSEG_RSVD 3482 * descriptors of the end of the ring. Also make 3483 * sure there are BGE_NSEG_SPARE descriptors for 3484 * jumbo buffers' or TSO segments' defragmentation. 3485 */ 3486 if ((BGE_TX_RING_CNT - txr->bnx_tx_cnt) < 3487 (BNX_NSEG_RSVD + BNX_NSEG_SPARE)) { 3488 ifsq_set_oactive(ifsq); 3489 break; 3490 } 3491 3492 m_head = ifsq_dequeue(ifsq); 3493 if (m_head == NULL) 3494 break; 3495 3496 /* 3497 * Pack the data into the transmit ring. If we 3498 * don't have room, set the OACTIVE flag and wait 3499 * for the NIC to drain the ring. 3500 */ 3501 if (bnx_encap(txr, &m_head, &prodidx, &nsegs)) { 3502 ifsq_set_oactive(ifsq); 3503 IFNET_STAT_INC(ifp, oerrors, 1); 3504 break; 3505 } 3506 3507 if (nsegs >= txr->bnx_tx_wreg) { 3508 /* Transmit */ 3509 bnx_writembx(txr->bnx_sc, txr->bnx_tx_mbx, prodidx); 3510 nsegs = 0; 3511 } 3512 3513 ETHER_BPF_MTAP(ifp, m_head); 3514 3515 /* 3516 * Set a timeout in case the chip goes out to lunch. 3517 */ 3518 txr->bnx_tx_watchdog.wd_timer = 5; 3519 } 3520 3521 if (nsegs > 0) { 3522 /* Transmit */ 3523 bnx_writembx(txr->bnx_sc, txr->bnx_tx_mbx, prodidx); 3524 } 3525 txr->bnx_tx_prodidx = prodidx; 3526 } 3527 3528 static void 3529 bnx_init(void *xsc) 3530 { 3531 struct bnx_softc *sc = xsc; 3532 struct ifnet *ifp = &sc->arpcom.ac_if; 3533 uint16_t *m; 3534 uint32_t mode; 3535 int i; 3536 boolean_t polling; 3537 3538 ASSERT_IFNET_SERIALIZED_ALL(ifp); 3539 3540 /* Cancel pending I/O and flush buffers. */ 3541 bnx_stop(sc); 3542 3543 bnx_sig_pre_reset(sc, BNX_RESET_START); 3544 bnx_reset(sc); 3545 bnx_sig_post_reset(sc, BNX_RESET_START); 3546 3547 bnx_chipinit(sc); 3548 3549 /* 3550 * Init the various state machines, ring 3551 * control blocks and firmware. 3552 */ 3553 if (bnx_blockinit(sc)) { 3554 if_printf(ifp, "initialization failure\n"); 3555 bnx_stop(sc); 3556 return; 3557 } 3558 3559 /* Specify MTU. */ 3560 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu + 3561 ETHER_HDR_LEN + ETHER_CRC_LEN + EVL_ENCAPLEN); 3562 3563 /* Load our MAC address. */ 3564 m = (uint16_t *)&sc->arpcom.ac_enaddr[0]; 3565 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0])); 3566 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2])); 3567 3568 /* Enable or disable promiscuous mode as needed. */ 3569 bnx_setpromisc(sc); 3570 3571 /* Program multicast filter. */ 3572 bnx_setmulti(sc); 3573 3574 /* Init RX ring. */ 3575 if (bnx_init_rx_ring_std(&sc->bnx_rx_std_ring)) { 3576 if_printf(ifp, "RX ring initialization failed\n"); 3577 bnx_stop(sc); 3578 return; 3579 } 3580 3581 /* Init jumbo RX ring. */ 3582 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) { 3583 if (bnx_init_rx_ring_jumbo(sc)) { 3584 if_printf(ifp, "Jumbo RX ring initialization failed\n"); 3585 bnx_stop(sc); 3586 return; 3587 } 3588 } 3589 3590 /* Init our RX return ring index */ 3591 for (i = 0; i < sc->bnx_rx_retcnt; ++i) { 3592 struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[i]; 3593 3594 ret->bnx_rx_saved_considx = 0; 3595 ret->bnx_rx_cnt = 0; 3596 } 3597 3598 /* Init TX ring. */ 3599 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) 3600 bnx_init_tx_ring(&sc->bnx_tx_ring[i]); 3601 3602 /* Enable TX MAC state machine lockup fix. */ 3603 mode = CSR_READ_4(sc, BGE_TX_MODE); 3604 mode |= BGE_TXMODE_MBUF_LOCKUP_FIX; 3605 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720 || 3606 sc->bnx_asicrev == BGE_ASICREV_BCM5762) { 3607 mode &= ~(BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE); 3608 mode |= CSR_READ_4(sc, BGE_TX_MODE) & 3609 (BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE); 3610 } 3611 /* Turn on transmitter */ 3612 CSR_WRITE_4(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE); 3613 DELAY(100); 3614 3615 /* Initialize RSS */ 3616 mode = BGE_RXMODE_ENABLE; 3617 if (BNX_RSS_ENABLED(sc)) { 3618 bnx_init_rss(sc); 3619 mode |= BGE_RXMODE_RSS_ENABLE | 3620 BGE_RXMODE_RSS_HASH_MASK_BITS | 3621 BGE_RXMODE_RSS_IPV4_HASH | 3622 BGE_RXMODE_RSS_TCP_IPV4_HASH; 3623 } 3624 /* Turn on receiver */ 3625 BNX_SETBIT(sc, BGE_RX_MODE, mode); 3626 DELAY(10); 3627 3628 /* 3629 * Set the number of good frames to receive after RX MBUF 3630 * Low Watermark has been reached. After the RX MAC receives 3631 * this number of frames, it will drop subsequent incoming 3632 * frames until the MBUF High Watermark is reached. 3633 */ 3634 if (BNX_IS_57765_FAMILY(sc)) 3635 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 1); 3636 else 3637 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2); 3638 3639 if (sc->bnx_intr_type == PCI_INTR_TYPE_MSI || 3640 sc->bnx_intr_type == PCI_INTR_TYPE_MSIX) { 3641 if (bootverbose) { 3642 if_printf(ifp, "MSI_MODE: %#x\n", 3643 CSR_READ_4(sc, BGE_MSI_MODE)); 3644 } 3645 } 3646 3647 /* Tell firmware we're alive. */ 3648 BNX_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 3649 3650 /* Enable host interrupts if polling(4) is not enabled. */ 3651 PCI_SETBIT(sc->bnx_dev, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA, 4); 3652 3653 polling = FALSE; 3654 #ifdef IFPOLL_ENABLE 3655 if (ifp->if_flags & IFF_NPOLLING) 3656 polling = TRUE; 3657 #endif 3658 if (polling) 3659 bnx_disable_intr(sc); 3660 else 3661 bnx_enable_intr(sc); 3662 bnx_set_tick_cpuid(sc, polling); 3663 3664 ifp->if_flags |= IFF_RUNNING; 3665 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) { 3666 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[i]; 3667 3668 ifsq_clr_oactive(txr->bnx_ifsq); 3669 ifsq_watchdog_start(&txr->bnx_tx_watchdog); 3670 } 3671 3672 bnx_ifmedia_upd(ifp); 3673 3674 callout_reset_bycpu(&sc->bnx_tick_timer, hz, bnx_tick, sc, 3675 sc->bnx_tick_cpuid); 3676 } 3677 3678 /* 3679 * Set media options. 3680 */ 3681 static int 3682 bnx_ifmedia_upd(struct ifnet *ifp) 3683 { 3684 struct bnx_softc *sc = ifp->if_softc; 3685 3686 /* If this is a 1000baseX NIC, enable the TBI port. */ 3687 if (sc->bnx_flags & BNX_FLAG_TBI) { 3688 struct ifmedia *ifm = &sc->bnx_ifmedia; 3689 3690 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 3691 return(EINVAL); 3692 3693 switch(IFM_SUBTYPE(ifm->ifm_media)) { 3694 case IFM_AUTO: 3695 break; 3696 3697 case IFM_1000_SX: 3698 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { 3699 BNX_CLRBIT(sc, BGE_MAC_MODE, 3700 BGE_MACMODE_HALF_DUPLEX); 3701 } else { 3702 BNX_SETBIT(sc, BGE_MAC_MODE, 3703 BGE_MACMODE_HALF_DUPLEX); 3704 } 3705 DELAY(40); 3706 break; 3707 default: 3708 return(EINVAL); 3709 } 3710 } else { 3711 struct mii_data *mii = device_get_softc(sc->bnx_miibus); 3712 3713 sc->bnx_link_evt++; 3714 sc->bnx_link = 0; 3715 if (mii->mii_instance) { 3716 struct mii_softc *miisc; 3717 3718 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 3719 mii_phy_reset(miisc); 3720 } 3721 mii_mediachg(mii); 3722 3723 /* 3724 * Force an interrupt so that we will call bnx_link_upd 3725 * if needed and clear any pending link state attention. 3726 * Without this we are not getting any further interrupts 3727 * for link state changes and thus will not UP the link and 3728 * not be able to send in bnx_start. The only way to get 3729 * things working was to receive a packet and get an RX 3730 * intr. 3731 * 3732 * bnx_tick should help for fiber cards and we might not 3733 * need to do this here if BNX_FLAG_TBI is set but as 3734 * we poll for fiber anyway it should not harm. 3735 */ 3736 BNX_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW); 3737 } 3738 return(0); 3739 } 3740 3741 /* 3742 * Report current media status. 3743 */ 3744 static void 3745 bnx_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 3746 { 3747 struct bnx_softc *sc = ifp->if_softc; 3748 3749 if ((ifp->if_flags & IFF_RUNNING) == 0) 3750 return; 3751 3752 if (sc->bnx_flags & BNX_FLAG_TBI) { 3753 ifmr->ifm_status = IFM_AVALID; 3754 ifmr->ifm_active = IFM_ETHER; 3755 if (CSR_READ_4(sc, BGE_MAC_STS) & 3756 BGE_MACSTAT_TBI_PCS_SYNCHED) { 3757 ifmr->ifm_status |= IFM_ACTIVE; 3758 } else { 3759 ifmr->ifm_active |= IFM_NONE; 3760 return; 3761 } 3762 3763 ifmr->ifm_active |= IFM_1000_SX; 3764 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX) 3765 ifmr->ifm_active |= IFM_HDX; 3766 else 3767 ifmr->ifm_active |= IFM_FDX; 3768 } else { 3769 struct mii_data *mii = device_get_softc(sc->bnx_miibus); 3770 3771 mii_pollstat(mii); 3772 ifmr->ifm_active = mii->mii_media_active; 3773 ifmr->ifm_status = mii->mii_media_status; 3774 } 3775 } 3776 3777 static int 3778 bnx_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 3779 { 3780 struct bnx_softc *sc = ifp->if_softc; 3781 struct ifreq *ifr = (struct ifreq *)data; 3782 int mask, error = 0; 3783 3784 ASSERT_IFNET_SERIALIZED_ALL(ifp); 3785 3786 switch (command) { 3787 case SIOCSIFMTU: 3788 if ((!BNX_IS_JUMBO_CAPABLE(sc) && ifr->ifr_mtu > ETHERMTU) || 3789 (BNX_IS_JUMBO_CAPABLE(sc) && 3790 ifr->ifr_mtu > BNX_JUMBO_MTU)) { 3791 error = EINVAL; 3792 } else if (ifp->if_mtu != ifr->ifr_mtu) { 3793 ifp->if_mtu = ifr->ifr_mtu; 3794 if (ifp->if_flags & IFF_RUNNING) 3795 bnx_init(sc); 3796 } 3797 break; 3798 case SIOCSIFFLAGS: 3799 if (ifp->if_flags & IFF_UP) { 3800 if (ifp->if_flags & IFF_RUNNING) { 3801 mask = ifp->if_flags ^ sc->bnx_if_flags; 3802 3803 /* 3804 * If only the state of the PROMISC flag 3805 * changed, then just use the 'set promisc 3806 * mode' command instead of reinitializing 3807 * the entire NIC. Doing a full re-init 3808 * means reloading the firmware and waiting 3809 * for it to start up, which may take a 3810 * second or two. Similarly for ALLMULTI. 3811 */ 3812 if (mask & IFF_PROMISC) 3813 bnx_setpromisc(sc); 3814 if (mask & IFF_ALLMULTI) 3815 bnx_setmulti(sc); 3816 } else { 3817 bnx_init(sc); 3818 } 3819 } else if (ifp->if_flags & IFF_RUNNING) { 3820 bnx_stop(sc); 3821 } 3822 sc->bnx_if_flags = ifp->if_flags; 3823 break; 3824 case SIOCADDMULTI: 3825 case SIOCDELMULTI: 3826 if (ifp->if_flags & IFF_RUNNING) 3827 bnx_setmulti(sc); 3828 break; 3829 case SIOCSIFMEDIA: 3830 case SIOCGIFMEDIA: 3831 if (sc->bnx_flags & BNX_FLAG_TBI) { 3832 error = ifmedia_ioctl(ifp, ifr, 3833 &sc->bnx_ifmedia, command); 3834 } else { 3835 struct mii_data *mii; 3836 3837 mii = device_get_softc(sc->bnx_miibus); 3838 error = ifmedia_ioctl(ifp, ifr, 3839 &mii->mii_media, command); 3840 } 3841 break; 3842 case SIOCSIFCAP: 3843 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 3844 if (mask & IFCAP_HWCSUM) { 3845 ifp->if_capenable ^= (mask & IFCAP_HWCSUM); 3846 if (ifp->if_capenable & IFCAP_TXCSUM) 3847 ifp->if_hwassist |= BNX_CSUM_FEATURES; 3848 else 3849 ifp->if_hwassist &= ~BNX_CSUM_FEATURES; 3850 } 3851 if (mask & IFCAP_TSO) { 3852 ifp->if_capenable ^= (mask & IFCAP_TSO); 3853 if (ifp->if_capenable & IFCAP_TSO) 3854 ifp->if_hwassist |= CSUM_TSO; 3855 else 3856 ifp->if_hwassist &= ~CSUM_TSO; 3857 } 3858 if (mask & IFCAP_RSS) 3859 ifp->if_capenable ^= IFCAP_RSS; 3860 break; 3861 default: 3862 error = ether_ioctl(ifp, command, data); 3863 break; 3864 } 3865 return error; 3866 } 3867 3868 static void 3869 bnx_watchdog(struct ifaltq_subque *ifsq) 3870 { 3871 struct ifnet *ifp = ifsq_get_ifp(ifsq); 3872 struct bnx_softc *sc = ifp->if_softc; 3873 int i; 3874 3875 ASSERT_IFNET_SERIALIZED_ALL(ifp); 3876 3877 if_printf(ifp, "watchdog timeout -- resetting\n"); 3878 3879 bnx_init(sc); 3880 3881 IFNET_STAT_INC(ifp, oerrors, 1); 3882 3883 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) 3884 ifsq_devstart_sched(sc->bnx_tx_ring[i].bnx_ifsq); 3885 } 3886 3887 /* 3888 * Stop the adapter and free any mbufs allocated to the 3889 * RX and TX lists. 3890 */ 3891 static void 3892 bnx_stop(struct bnx_softc *sc) 3893 { 3894 struct ifnet *ifp = &sc->arpcom.ac_if; 3895 int i; 3896 3897 ASSERT_IFNET_SERIALIZED_ALL(ifp); 3898 3899 callout_stop(&sc->bnx_tick_timer); 3900 3901 /* Disable host interrupts. */ 3902 bnx_disable_intr(sc); 3903 3904 /* 3905 * Tell firmware we're shutting down. 3906 */ 3907 bnx_sig_pre_reset(sc, BNX_RESET_SHUTDOWN); 3908 3909 /* 3910 * Disable all of the receiver blocks 3911 */ 3912 bnx_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 3913 bnx_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 3914 bnx_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 3915 bnx_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE); 3916 bnx_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 3917 bnx_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE); 3918 3919 /* 3920 * Disable all of the transmit blocks 3921 */ 3922 bnx_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 3923 bnx_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 3924 bnx_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 3925 bnx_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE); 3926 bnx_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 3927 bnx_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 3928 3929 /* 3930 * Shut down all of the memory managers and related 3931 * state machines. 3932 */ 3933 bnx_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 3934 bnx_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE); 3935 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 3936 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 3937 3938 bnx_reset(sc); 3939 bnx_sig_post_reset(sc, BNX_RESET_SHUTDOWN); 3940 3941 /* 3942 * Tell firmware we're shutting down. 3943 */ 3944 BNX_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 3945 3946 /* Free the RX lists. */ 3947 bnx_free_rx_ring_std(&sc->bnx_rx_std_ring); 3948 3949 /* Free jumbo RX list. */ 3950 if (BNX_IS_JUMBO_CAPABLE(sc)) 3951 bnx_free_rx_ring_jumbo(sc); 3952 3953 /* Free TX buffers. */ 3954 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) { 3955 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[i]; 3956 3957 txr->bnx_saved_status_tag = 0; 3958 bnx_free_tx_ring(txr); 3959 } 3960 3961 /* Clear saved status tag */ 3962 for (i = 0; i < sc->bnx_rx_retcnt; ++i) 3963 sc->bnx_rx_ret_ring[i].bnx_saved_status_tag = 0; 3964 3965 sc->bnx_link = 0; 3966 sc->bnx_coal_chg = 0; 3967 3968 ifp->if_flags &= ~IFF_RUNNING; 3969 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) { 3970 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[i]; 3971 3972 ifsq_clr_oactive(txr->bnx_ifsq); 3973 ifsq_watchdog_stop(&txr->bnx_tx_watchdog); 3974 } 3975 } 3976 3977 /* 3978 * Stop all chip I/O so that the kernel's probe routines don't 3979 * get confused by errant DMAs when rebooting. 3980 */ 3981 static void 3982 bnx_shutdown(device_t dev) 3983 { 3984 struct bnx_softc *sc = device_get_softc(dev); 3985 struct ifnet *ifp = &sc->arpcom.ac_if; 3986 3987 ifnet_serialize_all(ifp); 3988 bnx_stop(sc); 3989 ifnet_deserialize_all(ifp); 3990 } 3991 3992 static int 3993 bnx_suspend(device_t dev) 3994 { 3995 struct bnx_softc *sc = device_get_softc(dev); 3996 struct ifnet *ifp = &sc->arpcom.ac_if; 3997 3998 ifnet_serialize_all(ifp); 3999 bnx_stop(sc); 4000 ifnet_deserialize_all(ifp); 4001 4002 return 0; 4003 } 4004 4005 static int 4006 bnx_resume(device_t dev) 4007 { 4008 struct bnx_softc *sc = device_get_softc(dev); 4009 struct ifnet *ifp = &sc->arpcom.ac_if; 4010 4011 ifnet_serialize_all(ifp); 4012 4013 if (ifp->if_flags & IFF_UP) { 4014 int i; 4015 4016 bnx_init(sc); 4017 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) 4018 ifsq_devstart_sched(sc->bnx_tx_ring[i].bnx_ifsq); 4019 } 4020 4021 ifnet_deserialize_all(ifp); 4022 4023 return 0; 4024 } 4025 4026 static void 4027 bnx_setpromisc(struct bnx_softc *sc) 4028 { 4029 struct ifnet *ifp = &sc->arpcom.ac_if; 4030 4031 if (ifp->if_flags & IFF_PROMISC) 4032 BNX_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 4033 else 4034 BNX_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 4035 } 4036 4037 static void 4038 bnx_dma_free(struct bnx_softc *sc) 4039 { 4040 struct bnx_rx_std_ring *std = &sc->bnx_rx_std_ring; 4041 int i; 4042 4043 /* Destroy RX return rings */ 4044 if (sc->bnx_rx_ret_ring != NULL) { 4045 for (i = 0; i < sc->bnx_rx_retcnt; ++i) 4046 bnx_destroy_rx_ret_ring(&sc->bnx_rx_ret_ring[i]); 4047 kfree(sc->bnx_rx_ret_ring, M_DEVBUF); 4048 } 4049 4050 /* Destroy RX mbuf DMA stuffs. */ 4051 if (std->bnx_rx_mtag != NULL) { 4052 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 4053 KKASSERT(std->bnx_rx_std_buf[i].bnx_rx_mbuf == NULL); 4054 bus_dmamap_destroy(std->bnx_rx_mtag, 4055 std->bnx_rx_std_buf[i].bnx_rx_dmamap); 4056 } 4057 bus_dma_tag_destroy(std->bnx_rx_mtag); 4058 } 4059 4060 /* Destroy standard RX ring */ 4061 bnx_dma_block_free(std->bnx_rx_std_ring_tag, 4062 std->bnx_rx_std_ring_map, std->bnx_rx_std_ring); 4063 4064 /* Destroy TX rings */ 4065 if (sc->bnx_tx_ring != NULL) { 4066 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) 4067 bnx_destroy_tx_ring(&sc->bnx_tx_ring[i]); 4068 kfree(sc->bnx_tx_ring, M_DEVBUF); 4069 } 4070 4071 if (BNX_IS_JUMBO_CAPABLE(sc)) 4072 bnx_free_jumbo_mem(sc); 4073 4074 /* Destroy status blocks */ 4075 for (i = 0; i < sc->bnx_intr_cnt; ++i) { 4076 struct bnx_intr_data *intr = &sc->bnx_intr_data[i]; 4077 4078 bnx_dma_block_free(intr->bnx_status_tag, 4079 intr->bnx_status_map, intr->bnx_status_block); 4080 } 4081 4082 /* Destroy the parent tag */ 4083 if (sc->bnx_cdata.bnx_parent_tag != NULL) 4084 bus_dma_tag_destroy(sc->bnx_cdata.bnx_parent_tag); 4085 } 4086 4087 static int 4088 bnx_dma_alloc(device_t dev) 4089 { 4090 struct bnx_softc *sc = device_get_softc(dev); 4091 struct bnx_rx_std_ring *std = &sc->bnx_rx_std_ring; 4092 int i, error, mbx; 4093 4094 /* 4095 * Allocate the parent bus DMA tag appropriate for PCI. 4096 * 4097 * All of the NetExtreme/NetLink controllers have 4GB boundary 4098 * DMA bug. 4099 * Whenever an address crosses a multiple of the 4GB boundary 4100 * (including 4GB, 8Gb, 12Gb, etc.) and makes the transition 4101 * from 0xX_FFFF_FFFF to 0x(X+1)_0000_0000 an internal DMA 4102 * state machine will lockup and cause the device to hang. 4103 */ 4104 error = bus_dma_tag_create(NULL, 1, BGE_DMA_BOUNDARY_4G, 4105 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 4106 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 4107 0, &sc->bnx_cdata.bnx_parent_tag); 4108 if (error) { 4109 device_printf(dev, "could not create parent DMA tag\n"); 4110 return error; 4111 } 4112 4113 /* 4114 * Create DMA stuffs for status blocks. 4115 */ 4116 for (i = 0; i < sc->bnx_intr_cnt; ++i) { 4117 struct bnx_intr_data *intr = &sc->bnx_intr_data[i]; 4118 4119 error = bnx_dma_block_alloc(sc, 4120 __VM_CACHELINE_ALIGN(BGE_STATUS_BLK_SZ), 4121 &intr->bnx_status_tag, &intr->bnx_status_map, 4122 (void *)&intr->bnx_status_block, 4123 &intr->bnx_status_block_paddr); 4124 if (error) { 4125 device_printf(dev, 4126 "could not create %dth status block\n", i); 4127 return error; 4128 } 4129 } 4130 sc->bnx_hw_status = &sc->bnx_intr_data[0].bnx_status_block->bge_status; 4131 if (sc->bnx_flags & BNX_FLAG_STATUS_HASTAG) { 4132 sc->bnx_hw_status_tag = 4133 &sc->bnx_intr_data[0].bnx_status_block->bge_status_tag; 4134 } 4135 4136 /* 4137 * Create DMA tag and maps for RX mbufs. 4138 */ 4139 std->bnx_sc = sc; 4140 lwkt_serialize_init(&std->bnx_rx_std_serialize); 4141 error = bus_dma_tag_create(sc->bnx_cdata.bnx_parent_tag, 1, 0, 4142 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 4143 NULL, NULL, MCLBYTES, 1, MCLBYTES, 4144 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK, &std->bnx_rx_mtag); 4145 if (error) { 4146 device_printf(dev, "could not create RX mbuf DMA tag\n"); 4147 return error; 4148 } 4149 4150 for (i = 0; i < BGE_STD_RX_RING_CNT; ++i) { 4151 error = bus_dmamap_create(std->bnx_rx_mtag, BUS_DMA_WAITOK, 4152 &std->bnx_rx_std_buf[i].bnx_rx_dmamap); 4153 if (error) { 4154 int j; 4155 4156 for (j = 0; j < i; ++j) { 4157 bus_dmamap_destroy(std->bnx_rx_mtag, 4158 std->bnx_rx_std_buf[j].bnx_rx_dmamap); 4159 } 4160 bus_dma_tag_destroy(std->bnx_rx_mtag); 4161 std->bnx_rx_mtag = NULL; 4162 4163 device_printf(dev, 4164 "could not create %dth RX mbuf DMA map\n", i); 4165 return error; 4166 } 4167 } 4168 4169 /* 4170 * Create DMA stuffs for standard RX ring. 4171 */ 4172 error = bnx_dma_block_alloc(sc, BGE_STD_RX_RING_SZ, 4173 &std->bnx_rx_std_ring_tag, 4174 &std->bnx_rx_std_ring_map, 4175 (void *)&std->bnx_rx_std_ring, 4176 &std->bnx_rx_std_ring_paddr); 4177 if (error) { 4178 device_printf(dev, "could not create std RX ring\n"); 4179 return error; 4180 } 4181 4182 /* 4183 * Create RX return rings 4184 */ 4185 mbx = BGE_MBX_RX_CONS0_LO; 4186 sc->bnx_rx_ret_ring = kmalloc_cachealign( 4187 sizeof(struct bnx_rx_ret_ring) * sc->bnx_rx_retcnt, M_DEVBUF, 4188 M_WAITOK | M_ZERO); 4189 for (i = 0; i < sc->bnx_rx_retcnt; ++i) { 4190 struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[i]; 4191 struct bnx_intr_data *intr; 4192 4193 ret->bnx_sc = sc; 4194 ret->bnx_std = std; 4195 ret->bnx_rx_mbx = mbx; 4196 ret->bnx_rx_cntmax = (BGE_STD_RX_RING_CNT / 4) / 4197 sc->bnx_rx_retcnt; 4198 ret->bnx_rx_mask = 1 << i; 4199 4200 if (!BNX_RSS_ENABLED(sc)) { 4201 intr = &sc->bnx_intr_data[0]; 4202 } else { 4203 KKASSERT(i + 1 < sc->bnx_intr_cnt); 4204 intr = &sc->bnx_intr_data[i + 1]; 4205 } 4206 4207 if (i == 0) { 4208 ret->bnx_rx_considx = 4209 &intr->bnx_status_block->bge_idx[0].bge_rx_prod_idx; 4210 } else if (i == 1) { 4211 ret->bnx_rx_considx = 4212 &intr->bnx_status_block->bge_rx_jumbo_cons_idx; 4213 } else if (i == 2) { 4214 ret->bnx_rx_considx = 4215 &intr->bnx_status_block->bge_rsvd1; 4216 } else if (i == 3) { 4217 ret->bnx_rx_considx = 4218 &intr->bnx_status_block->bge_rx_mini_cons_idx; 4219 } else { 4220 panic("unknown RX return ring %d\n", i); 4221 } 4222 ret->bnx_hw_status_tag = 4223 &intr->bnx_status_block->bge_status_tag; 4224 4225 error = bnx_create_rx_ret_ring(ret); 4226 if (error) { 4227 device_printf(dev, 4228 "could not create %dth RX ret ring\n", i); 4229 return error; 4230 } 4231 mbx += 8; 4232 } 4233 4234 /* 4235 * Create TX rings 4236 */ 4237 sc->bnx_tx_ring = kmalloc_cachealign( 4238 sizeof(struct bnx_tx_ring) * sc->bnx_tx_ringcnt, M_DEVBUF, 4239 M_WAITOK | M_ZERO); 4240 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) { 4241 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[i]; 4242 struct bnx_intr_data *intr; 4243 4244 txr->bnx_sc = sc; 4245 txr->bnx_tx_mbx = bnx_tx_mailbox[i]; 4246 4247 if (sc->bnx_tx_ringcnt == 1) { 4248 intr = &sc->bnx_intr_data[0]; 4249 } else { 4250 KKASSERT(i + 1 < sc->bnx_intr_cnt); 4251 intr = &sc->bnx_intr_data[i + 1]; 4252 } 4253 4254 if ((sc->bnx_flags & BNX_FLAG_RXTX_BUNDLE) == 0) { 4255 txr->bnx_hw_status_tag = 4256 &intr->bnx_status_block->bge_status_tag; 4257 } 4258 txr->bnx_tx_considx = 4259 &intr->bnx_status_block->bge_idx[0].bge_tx_cons_idx; 4260 4261 error = bnx_create_tx_ring(txr); 4262 if (error) { 4263 device_printf(dev, 4264 "could not create %dth TX ring\n", i); 4265 return error; 4266 } 4267 } 4268 4269 /* 4270 * Create jumbo buffer pool. 4271 */ 4272 if (BNX_IS_JUMBO_CAPABLE(sc)) { 4273 error = bnx_alloc_jumbo_mem(sc); 4274 if (error) { 4275 device_printf(dev, 4276 "could not create jumbo buffer pool\n"); 4277 return error; 4278 } 4279 } 4280 4281 return 0; 4282 } 4283 4284 static int 4285 bnx_dma_block_alloc(struct bnx_softc *sc, bus_size_t size, bus_dma_tag_t *tag, 4286 bus_dmamap_t *map, void **addr, bus_addr_t *paddr) 4287 { 4288 bus_dmamem_t dmem; 4289 int error; 4290 4291 error = bus_dmamem_coherent(sc->bnx_cdata.bnx_parent_tag, PAGE_SIZE, 0, 4292 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 4293 size, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem); 4294 if (error) 4295 return error; 4296 4297 *tag = dmem.dmem_tag; 4298 *map = dmem.dmem_map; 4299 *addr = dmem.dmem_addr; 4300 *paddr = dmem.dmem_busaddr; 4301 4302 return 0; 4303 } 4304 4305 static void 4306 bnx_dma_block_free(bus_dma_tag_t tag, bus_dmamap_t map, void *addr) 4307 { 4308 if (tag != NULL) { 4309 bus_dmamap_unload(tag, map); 4310 bus_dmamem_free(tag, addr, map); 4311 bus_dma_tag_destroy(tag); 4312 } 4313 } 4314 4315 static void 4316 bnx_tbi_link_upd(struct bnx_softc *sc, uint32_t status) 4317 { 4318 struct ifnet *ifp = &sc->arpcom.ac_if; 4319 4320 #define PCS_ENCODE_ERR (BGE_MACSTAT_PORT_DECODE_ERROR|BGE_MACSTAT_MI_COMPLETE) 4321 4322 /* 4323 * Sometimes PCS encoding errors are detected in 4324 * TBI mode (on fiber NICs), and for some reason 4325 * the chip will signal them as link changes. 4326 * If we get a link change event, but the 'PCS 4327 * encoding error' bit in the MAC status register 4328 * is set, don't bother doing a link check. 4329 * This avoids spurious "gigabit link up" messages 4330 * that sometimes appear on fiber NICs during 4331 * periods of heavy traffic. 4332 */ 4333 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) { 4334 if (!sc->bnx_link) { 4335 sc->bnx_link++; 4336 if (sc->bnx_asicrev == BGE_ASICREV_BCM5704) { 4337 BNX_CLRBIT(sc, BGE_MAC_MODE, 4338 BGE_MACMODE_TBI_SEND_CFGS); 4339 DELAY(40); 4340 } 4341 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF); 4342 4343 if (bootverbose) 4344 if_printf(ifp, "link UP\n"); 4345 4346 ifp->if_link_state = LINK_STATE_UP; 4347 if_link_state_change(ifp); 4348 } 4349 } else if ((status & PCS_ENCODE_ERR) != PCS_ENCODE_ERR) { 4350 if (sc->bnx_link) { 4351 sc->bnx_link = 0; 4352 4353 if (bootverbose) 4354 if_printf(ifp, "link DOWN\n"); 4355 4356 ifp->if_link_state = LINK_STATE_DOWN; 4357 if_link_state_change(ifp); 4358 } 4359 } 4360 4361 #undef PCS_ENCODE_ERR 4362 4363 /* Clear the attention. */ 4364 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 4365 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 4366 BGE_MACSTAT_LINK_CHANGED); 4367 } 4368 4369 static void 4370 bnx_copper_link_upd(struct bnx_softc *sc, uint32_t status __unused) 4371 { 4372 struct ifnet *ifp = &sc->arpcom.ac_if; 4373 struct mii_data *mii = device_get_softc(sc->bnx_miibus); 4374 4375 mii_pollstat(mii); 4376 bnx_miibus_statchg(sc->bnx_dev); 4377 4378 if (bootverbose) { 4379 if (sc->bnx_link) 4380 if_printf(ifp, "link UP\n"); 4381 else 4382 if_printf(ifp, "link DOWN\n"); 4383 } 4384 4385 /* Clear the attention. */ 4386 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 4387 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 4388 BGE_MACSTAT_LINK_CHANGED); 4389 } 4390 4391 static void 4392 bnx_autopoll_link_upd(struct bnx_softc *sc, uint32_t status __unused) 4393 { 4394 struct ifnet *ifp = &sc->arpcom.ac_if; 4395 struct mii_data *mii = device_get_softc(sc->bnx_miibus); 4396 4397 mii_pollstat(mii); 4398 4399 if (!sc->bnx_link && 4400 (mii->mii_media_status & IFM_ACTIVE) && 4401 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 4402 sc->bnx_link++; 4403 if (bootverbose) 4404 if_printf(ifp, "link UP\n"); 4405 } else if (sc->bnx_link && 4406 (!(mii->mii_media_status & IFM_ACTIVE) || 4407 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) { 4408 sc->bnx_link = 0; 4409 if (bootverbose) 4410 if_printf(ifp, "link DOWN\n"); 4411 } 4412 4413 /* Clear the attention. */ 4414 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 4415 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 4416 BGE_MACSTAT_LINK_CHANGED); 4417 } 4418 4419 static int 4420 bnx_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS) 4421 { 4422 struct bnx_softc *sc = arg1; 4423 4424 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req, 4425 &sc->bnx_rx_coal_ticks, 4426 BNX_RX_COAL_TICKS_MIN, BNX_RX_COAL_TICKS_MAX, 4427 BNX_RX_COAL_TICKS_CHG); 4428 } 4429 4430 static int 4431 bnx_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS) 4432 { 4433 struct bnx_softc *sc = arg1; 4434 4435 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req, 4436 &sc->bnx_tx_coal_ticks, 4437 BNX_TX_COAL_TICKS_MIN, BNX_TX_COAL_TICKS_MAX, 4438 BNX_TX_COAL_TICKS_CHG); 4439 } 4440 4441 static int 4442 bnx_sysctl_rx_coal_bds(SYSCTL_HANDLER_ARGS) 4443 { 4444 struct bnx_softc *sc = arg1; 4445 4446 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req, 4447 &sc->bnx_rx_coal_bds, 4448 BNX_RX_COAL_BDS_MIN, BNX_RX_COAL_BDS_MAX, 4449 BNX_RX_COAL_BDS_CHG); 4450 } 4451 4452 static int 4453 bnx_sysctl_rx_coal_bds_poll(SYSCTL_HANDLER_ARGS) 4454 { 4455 struct bnx_softc *sc = arg1; 4456 4457 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req, 4458 &sc->bnx_rx_coal_bds_poll, 4459 BNX_RX_COAL_BDS_MIN, BNX_RX_COAL_BDS_MAX, 4460 BNX_RX_COAL_BDS_CHG); 4461 } 4462 4463 static int 4464 bnx_sysctl_tx_coal_bds(SYSCTL_HANDLER_ARGS) 4465 { 4466 struct bnx_softc *sc = arg1; 4467 4468 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req, 4469 &sc->bnx_tx_coal_bds, 4470 BNX_TX_COAL_BDS_MIN, BNX_TX_COAL_BDS_MAX, 4471 BNX_TX_COAL_BDS_CHG); 4472 } 4473 4474 static int 4475 bnx_sysctl_tx_coal_bds_poll(SYSCTL_HANDLER_ARGS) 4476 { 4477 struct bnx_softc *sc = arg1; 4478 4479 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req, 4480 &sc->bnx_tx_coal_bds_poll, 4481 BNX_TX_COAL_BDS_MIN, BNX_TX_COAL_BDS_MAX, 4482 BNX_TX_COAL_BDS_CHG); 4483 } 4484 4485 static int 4486 bnx_sysctl_rx_coal_bds_int(SYSCTL_HANDLER_ARGS) 4487 { 4488 struct bnx_softc *sc = arg1; 4489 4490 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req, 4491 &sc->bnx_rx_coal_bds_int, 4492 BNX_RX_COAL_BDS_MIN, BNX_RX_COAL_BDS_MAX, 4493 BNX_RX_COAL_BDS_INT_CHG); 4494 } 4495 4496 static int 4497 bnx_sysctl_tx_coal_bds_int(SYSCTL_HANDLER_ARGS) 4498 { 4499 struct bnx_softc *sc = arg1; 4500 4501 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req, 4502 &sc->bnx_tx_coal_bds_int, 4503 BNX_TX_COAL_BDS_MIN, BNX_TX_COAL_BDS_MAX, 4504 BNX_TX_COAL_BDS_INT_CHG); 4505 } 4506 4507 static int 4508 bnx_sysctl_coal_chg(SYSCTL_HANDLER_ARGS, uint32_t *coal, 4509 int coal_min, int coal_max, uint32_t coal_chg_mask) 4510 { 4511 struct bnx_softc *sc = arg1; 4512 struct ifnet *ifp = &sc->arpcom.ac_if; 4513 int error = 0, v; 4514 4515 ifnet_serialize_all(ifp); 4516 4517 v = *coal; 4518 error = sysctl_handle_int(oidp, &v, 0, req); 4519 if (!error && req->newptr != NULL) { 4520 if (v < coal_min || v > coal_max) { 4521 error = EINVAL; 4522 } else { 4523 *coal = v; 4524 sc->bnx_coal_chg |= coal_chg_mask; 4525 4526 /* Commit changes */ 4527 bnx_coal_change(sc); 4528 } 4529 } 4530 4531 ifnet_deserialize_all(ifp); 4532 return error; 4533 } 4534 4535 static void 4536 bnx_coal_change(struct bnx_softc *sc) 4537 { 4538 struct ifnet *ifp = &sc->arpcom.ac_if; 4539 int i; 4540 4541 ASSERT_IFNET_SERIALIZED_ALL(ifp); 4542 4543 if (sc->bnx_coal_chg & BNX_RX_COAL_TICKS_CHG) { 4544 if (sc->bnx_rx_retcnt == 1) { 4545 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, 4546 sc->bnx_rx_coal_ticks); 4547 i = 0; 4548 } else { 4549 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, 0); 4550 for (i = 0; i < sc->bnx_rx_retcnt; ++i) { 4551 CSR_WRITE_4(sc, BGE_VEC1_RX_COAL_TICKS + 4552 (i * BGE_VEC_COALSET_SIZE), 4553 sc->bnx_rx_coal_ticks); 4554 } 4555 } 4556 for (; i < BNX_INTR_MAX - 1; ++i) { 4557 CSR_WRITE_4(sc, BGE_VEC1_RX_COAL_TICKS + 4558 (i * BGE_VEC_COALSET_SIZE), 0); 4559 } 4560 if (bootverbose) { 4561 if_printf(ifp, "rx_coal_ticks -> %u\n", 4562 sc->bnx_rx_coal_ticks); 4563 } 4564 } 4565 4566 if (sc->bnx_coal_chg & BNX_TX_COAL_TICKS_CHG) { 4567 if (sc->bnx_tx_ringcnt == 1) { 4568 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, 4569 sc->bnx_tx_coal_ticks); 4570 i = 0; 4571 } else { 4572 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, 0); 4573 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) { 4574 CSR_WRITE_4(sc, BGE_VEC1_TX_COAL_TICKS + 4575 (i * BGE_VEC_COALSET_SIZE), 4576 sc->bnx_tx_coal_ticks); 4577 } 4578 } 4579 for (; i < BNX_INTR_MAX - 1; ++i) { 4580 CSR_WRITE_4(sc, BGE_VEC1_TX_COAL_TICKS + 4581 (i * BGE_VEC_COALSET_SIZE), 0); 4582 } 4583 if (bootverbose) { 4584 if_printf(ifp, "tx_coal_ticks -> %u\n", 4585 sc->bnx_tx_coal_ticks); 4586 } 4587 } 4588 4589 if (sc->bnx_coal_chg & BNX_RX_COAL_BDS_CHG) { 4590 uint32_t rx_coal_bds; 4591 4592 if (ifp->if_flags & IFF_NPOLLING) 4593 rx_coal_bds = sc->bnx_rx_coal_bds_poll; 4594 else 4595 rx_coal_bds = sc->bnx_rx_coal_bds; 4596 4597 if (sc->bnx_rx_retcnt == 1) { 4598 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, rx_coal_bds); 4599 i = 0; 4600 } else { 4601 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, 0); 4602 for (i = 0; i < sc->bnx_rx_retcnt; ++i) { 4603 CSR_WRITE_4(sc, BGE_VEC1_RX_MAX_COAL_BDS + 4604 (i * BGE_VEC_COALSET_SIZE), rx_coal_bds); 4605 } 4606 } 4607 for (; i < BNX_INTR_MAX - 1; ++i) { 4608 CSR_WRITE_4(sc, BGE_VEC1_RX_MAX_COAL_BDS + 4609 (i * BGE_VEC_COALSET_SIZE), 0); 4610 } 4611 if (bootverbose) { 4612 if_printf(ifp, "%srx_coal_bds -> %u\n", 4613 (ifp->if_flags & IFF_NPOLLING) ? "polling " : "", 4614 rx_coal_bds); 4615 } 4616 } 4617 4618 if (sc->bnx_coal_chg & BNX_TX_COAL_BDS_CHG) { 4619 uint32_t tx_coal_bds; 4620 4621 if (ifp->if_flags & IFF_NPOLLING) 4622 tx_coal_bds = sc->bnx_tx_coal_bds_poll; 4623 else 4624 tx_coal_bds = sc->bnx_tx_coal_bds; 4625 4626 if (sc->bnx_tx_ringcnt == 1) { 4627 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, tx_coal_bds); 4628 i = 0; 4629 } else { 4630 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, 0); 4631 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) { 4632 CSR_WRITE_4(sc, BGE_VEC1_TX_MAX_COAL_BDS + 4633 (i * BGE_VEC_COALSET_SIZE), tx_coal_bds); 4634 } 4635 } 4636 for (; i < BNX_INTR_MAX - 1; ++i) { 4637 CSR_WRITE_4(sc, BGE_VEC1_TX_MAX_COAL_BDS + 4638 (i * BGE_VEC_COALSET_SIZE), 0); 4639 } 4640 if (bootverbose) { 4641 if_printf(ifp, "%stx_coal_bds -> %u\n", 4642 (ifp->if_flags & IFF_NPOLLING) ? "polling " : "", 4643 tx_coal_bds); 4644 } 4645 } 4646 4647 if (sc->bnx_coal_chg & BNX_RX_COAL_BDS_INT_CHG) { 4648 if (sc->bnx_rx_retcnt == 1) { 4649 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 4650 sc->bnx_rx_coal_bds_int); 4651 i = 0; 4652 } else { 4653 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0); 4654 for (i = 0; i < sc->bnx_rx_retcnt; ++i) { 4655 CSR_WRITE_4(sc, BGE_VEC1_RX_MAX_COAL_BDS_INT + 4656 (i * BGE_VEC_COALSET_SIZE), 4657 sc->bnx_rx_coal_bds_int); 4658 } 4659 } 4660 for (; i < BNX_INTR_MAX - 1; ++i) { 4661 CSR_WRITE_4(sc, BGE_VEC1_RX_MAX_COAL_BDS_INT + 4662 (i * BGE_VEC_COALSET_SIZE), 0); 4663 } 4664 if (bootverbose) { 4665 if_printf(ifp, "rx_coal_bds_int -> %u\n", 4666 sc->bnx_rx_coal_bds_int); 4667 } 4668 } 4669 4670 if (sc->bnx_coal_chg & BNX_TX_COAL_BDS_INT_CHG) { 4671 if (sc->bnx_tx_ringcnt == 1) { 4672 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 4673 sc->bnx_tx_coal_bds_int); 4674 i = 0; 4675 } else { 4676 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0); 4677 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) { 4678 CSR_WRITE_4(sc, BGE_VEC1_TX_MAX_COAL_BDS_INT + 4679 (i * BGE_VEC_COALSET_SIZE), 4680 sc->bnx_tx_coal_bds_int); 4681 } 4682 } 4683 for (; i < BNX_INTR_MAX - 1; ++i) { 4684 CSR_WRITE_4(sc, BGE_VEC1_TX_MAX_COAL_BDS_INT + 4685 (i * BGE_VEC_COALSET_SIZE), 0); 4686 } 4687 if (bootverbose) { 4688 if_printf(ifp, "tx_coal_bds_int -> %u\n", 4689 sc->bnx_tx_coal_bds_int); 4690 } 4691 } 4692 4693 sc->bnx_coal_chg = 0; 4694 } 4695 4696 static void 4697 bnx_check_intr_rxtx(void *xintr) 4698 { 4699 struct bnx_intr_data *intr = xintr; 4700 struct bnx_rx_ret_ring *ret; 4701 struct bnx_tx_ring *txr; 4702 struct ifnet *ifp; 4703 4704 lwkt_serialize_enter(intr->bnx_intr_serialize); 4705 4706 KKASSERT(mycpuid == intr->bnx_intr_cpuid); 4707 4708 ifp = &intr->bnx_sc->arpcom.ac_if; 4709 if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) != IFF_RUNNING) { 4710 lwkt_serialize_exit(intr->bnx_intr_serialize); 4711 return; 4712 } 4713 4714 txr = intr->bnx_txr; 4715 ret = intr->bnx_ret; 4716 4717 if (*ret->bnx_rx_considx != ret->bnx_rx_saved_considx || 4718 *txr->bnx_tx_considx != txr->bnx_tx_saved_considx) { 4719 if (intr->bnx_rx_check_considx == ret->bnx_rx_saved_considx && 4720 intr->bnx_tx_check_considx == txr->bnx_tx_saved_considx) { 4721 if (!intr->bnx_intr_maylose) { 4722 intr->bnx_intr_maylose = TRUE; 4723 goto done; 4724 } 4725 if (bootverbose) 4726 if_printf(ifp, "lost interrupt\n"); 4727 intr->bnx_intr_func(intr->bnx_intr_arg); 4728 } 4729 } 4730 intr->bnx_intr_maylose = FALSE; 4731 intr->bnx_rx_check_considx = ret->bnx_rx_saved_considx; 4732 intr->bnx_tx_check_considx = txr->bnx_tx_saved_considx; 4733 4734 done: 4735 callout_reset(&intr->bnx_intr_timer, BNX_INTR_CKINTVL, 4736 intr->bnx_intr_check, intr); 4737 lwkt_serialize_exit(intr->bnx_intr_serialize); 4738 } 4739 4740 static void 4741 bnx_check_intr_tx(void *xintr) 4742 { 4743 struct bnx_intr_data *intr = xintr; 4744 struct bnx_tx_ring *txr; 4745 struct ifnet *ifp; 4746 4747 lwkt_serialize_enter(intr->bnx_intr_serialize); 4748 4749 KKASSERT(mycpuid == intr->bnx_intr_cpuid); 4750 4751 ifp = &intr->bnx_sc->arpcom.ac_if; 4752 if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) != IFF_RUNNING) { 4753 lwkt_serialize_exit(intr->bnx_intr_serialize); 4754 return; 4755 } 4756 4757 txr = intr->bnx_txr; 4758 4759 if (*txr->bnx_tx_considx != txr->bnx_tx_saved_considx) { 4760 if (intr->bnx_tx_check_considx == txr->bnx_tx_saved_considx) { 4761 if (!intr->bnx_intr_maylose) { 4762 intr->bnx_intr_maylose = TRUE; 4763 goto done; 4764 } 4765 if (bootverbose) 4766 if_printf(ifp, "lost interrupt\n"); 4767 intr->bnx_intr_func(intr->bnx_intr_arg); 4768 } 4769 } 4770 intr->bnx_intr_maylose = FALSE; 4771 intr->bnx_tx_check_considx = txr->bnx_tx_saved_considx; 4772 4773 done: 4774 callout_reset(&intr->bnx_intr_timer, BNX_INTR_CKINTVL, 4775 intr->bnx_intr_check, intr); 4776 lwkt_serialize_exit(intr->bnx_intr_serialize); 4777 } 4778 4779 static void 4780 bnx_check_intr_rx(void *xintr) 4781 { 4782 struct bnx_intr_data *intr = xintr; 4783 struct bnx_rx_ret_ring *ret; 4784 struct ifnet *ifp; 4785 4786 lwkt_serialize_enter(intr->bnx_intr_serialize); 4787 4788 KKASSERT(mycpuid == intr->bnx_intr_cpuid); 4789 4790 ifp = &intr->bnx_sc->arpcom.ac_if; 4791 if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) != IFF_RUNNING) { 4792 lwkt_serialize_exit(intr->bnx_intr_serialize); 4793 return; 4794 } 4795 4796 ret = intr->bnx_ret; 4797 4798 if (*ret->bnx_rx_considx != ret->bnx_rx_saved_considx) { 4799 if (intr->bnx_rx_check_considx == ret->bnx_rx_saved_considx) { 4800 if (!intr->bnx_intr_maylose) { 4801 intr->bnx_intr_maylose = TRUE; 4802 goto done; 4803 } 4804 if (bootverbose) 4805 if_printf(ifp, "lost interrupt\n"); 4806 intr->bnx_intr_func(intr->bnx_intr_arg); 4807 } 4808 } 4809 intr->bnx_intr_maylose = FALSE; 4810 intr->bnx_rx_check_considx = ret->bnx_rx_saved_considx; 4811 4812 done: 4813 callout_reset(&intr->bnx_intr_timer, BNX_INTR_CKINTVL, 4814 intr->bnx_intr_check, intr); 4815 lwkt_serialize_exit(intr->bnx_intr_serialize); 4816 } 4817 4818 static void 4819 bnx_enable_intr(struct bnx_softc *sc) 4820 { 4821 struct ifnet *ifp = &sc->arpcom.ac_if; 4822 int i; 4823 4824 for (i = 0; i < sc->bnx_intr_cnt; ++i) { 4825 lwkt_serialize_handler_enable( 4826 sc->bnx_intr_data[i].bnx_intr_serialize); 4827 } 4828 4829 /* 4830 * Enable interrupt. 4831 */ 4832 for (i = 0; i < sc->bnx_intr_cnt; ++i) { 4833 struct bnx_intr_data *intr = &sc->bnx_intr_data[i]; 4834 4835 bnx_writembx(sc, intr->bnx_intr_mbx, 4836 (*intr->bnx_saved_status_tag) << 24); 4837 /* XXX Linux driver */ 4838 bnx_writembx(sc, intr->bnx_intr_mbx, 4839 (*intr->bnx_saved_status_tag) << 24); 4840 } 4841 4842 /* 4843 * Unmask the interrupt when we stop polling. 4844 */ 4845 PCI_CLRBIT(sc->bnx_dev, BGE_PCI_MISC_CTL, 4846 BGE_PCIMISCCTL_MASK_PCI_INTR, 4); 4847 4848 /* 4849 * Trigger another interrupt, since above writing 4850 * to interrupt mailbox0 may acknowledge pending 4851 * interrupt. 4852 */ 4853 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET); 4854 4855 if (sc->bnx_flags & BNX_FLAG_STATUSTAG_BUG) { 4856 if (bootverbose) 4857 if_printf(ifp, "status tag bug workaround\n"); 4858 4859 for (i = 0; i < sc->bnx_intr_cnt; ++i) { 4860 struct bnx_intr_data *intr = &sc->bnx_intr_data[i]; 4861 4862 if (intr->bnx_intr_check == NULL) 4863 continue; 4864 intr->bnx_intr_maylose = FALSE; 4865 intr->bnx_rx_check_considx = 0; 4866 intr->bnx_tx_check_considx = 0; 4867 callout_reset_bycpu(&intr->bnx_intr_timer, 4868 BNX_INTR_CKINTVL, intr->bnx_intr_check, intr, 4869 intr->bnx_intr_cpuid); 4870 } 4871 } 4872 } 4873 4874 static void 4875 bnx_disable_intr(struct bnx_softc *sc) 4876 { 4877 int i; 4878 4879 for (i = 0; i < sc->bnx_intr_cnt; ++i) { 4880 struct bnx_intr_data *intr = &sc->bnx_intr_data[i]; 4881 4882 callout_stop(&intr->bnx_intr_timer); 4883 intr->bnx_intr_maylose = FALSE; 4884 intr->bnx_rx_check_considx = 0; 4885 intr->bnx_tx_check_considx = 0; 4886 } 4887 4888 /* 4889 * Mask the interrupt when we start polling. 4890 */ 4891 PCI_SETBIT(sc->bnx_dev, BGE_PCI_MISC_CTL, 4892 BGE_PCIMISCCTL_MASK_PCI_INTR, 4); 4893 4894 /* 4895 * Acknowledge possible asserted interrupt. 4896 */ 4897 for (i = 0; i < BNX_INTR_MAX; ++i) 4898 bnx_writembx(sc, sc->bnx_intr_data[i].bnx_intr_mbx, 1); 4899 4900 for (i = 0; i < sc->bnx_intr_cnt; ++i) { 4901 lwkt_serialize_handler_disable( 4902 sc->bnx_intr_data[i].bnx_intr_serialize); 4903 } 4904 } 4905 4906 static int 4907 bnx_get_eaddr_mem(struct bnx_softc *sc, uint8_t ether_addr[]) 4908 { 4909 uint32_t mac_addr; 4910 int ret = 1; 4911 4912 mac_addr = bnx_readmem_ind(sc, 0x0c14); 4913 if ((mac_addr >> 16) == 0x484b) { 4914 ether_addr[0] = (uint8_t)(mac_addr >> 8); 4915 ether_addr[1] = (uint8_t)mac_addr; 4916 mac_addr = bnx_readmem_ind(sc, 0x0c18); 4917 ether_addr[2] = (uint8_t)(mac_addr >> 24); 4918 ether_addr[3] = (uint8_t)(mac_addr >> 16); 4919 ether_addr[4] = (uint8_t)(mac_addr >> 8); 4920 ether_addr[5] = (uint8_t)mac_addr; 4921 ret = 0; 4922 } 4923 return ret; 4924 } 4925 4926 static int 4927 bnx_get_eaddr_nvram(struct bnx_softc *sc, uint8_t ether_addr[]) 4928 { 4929 int mac_offset = BGE_EE_MAC_OFFSET; 4930 4931 if (BNX_IS_5717_PLUS(sc)) { 4932 int f; 4933 4934 f = pci_get_function(sc->bnx_dev); 4935 if (f & 1) 4936 mac_offset = BGE_EE_MAC_OFFSET_5717; 4937 if (f > 1) 4938 mac_offset += BGE_EE_MAC_OFFSET_5717_OFF; 4939 } 4940 4941 return bnx_read_nvram(sc, ether_addr, mac_offset + 2, ETHER_ADDR_LEN); 4942 } 4943 4944 static int 4945 bnx_get_eaddr_eeprom(struct bnx_softc *sc, uint8_t ether_addr[]) 4946 { 4947 if (sc->bnx_flags & BNX_FLAG_NO_EEPROM) 4948 return 1; 4949 4950 return bnx_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2, 4951 ETHER_ADDR_LEN); 4952 } 4953 4954 static int 4955 bnx_get_eaddr(struct bnx_softc *sc, uint8_t eaddr[]) 4956 { 4957 static const bnx_eaddr_fcn_t bnx_eaddr_funcs[] = { 4958 /* NOTE: Order is critical */ 4959 bnx_get_eaddr_mem, 4960 bnx_get_eaddr_nvram, 4961 bnx_get_eaddr_eeprom, 4962 NULL 4963 }; 4964 const bnx_eaddr_fcn_t *func; 4965 4966 for (func = bnx_eaddr_funcs; *func != NULL; ++func) { 4967 if ((*func)(sc, eaddr) == 0) 4968 break; 4969 } 4970 return (*func == NULL ? ENXIO : 0); 4971 } 4972 4973 /* 4974 * NOTE: 'm' is not freed upon failure 4975 */ 4976 struct mbuf * 4977 bnx_defrag_shortdma(struct mbuf *m) 4978 { 4979 struct mbuf *n; 4980 int found; 4981 4982 /* 4983 * If device receive two back-to-back send BDs with less than 4984 * or equal to 8 total bytes then the device may hang. The two 4985 * back-to-back send BDs must in the same frame for this failure 4986 * to occur. Scan mbuf chains and see whether two back-to-back 4987 * send BDs are there. If this is the case, allocate new mbuf 4988 * and copy the frame to workaround the silicon bug. 4989 */ 4990 for (n = m, found = 0; n != NULL; n = n->m_next) { 4991 if (n->m_len < 8) { 4992 found++; 4993 if (found > 1) 4994 break; 4995 continue; 4996 } 4997 found = 0; 4998 } 4999 5000 if (found > 1) 5001 n = m_defrag(m, MB_DONTWAIT); 5002 else 5003 n = m; 5004 return n; 5005 } 5006 5007 static void 5008 bnx_stop_block(struct bnx_softc *sc, bus_size_t reg, uint32_t bit) 5009 { 5010 int i; 5011 5012 BNX_CLRBIT(sc, reg, bit); 5013 for (i = 0; i < BNX_TIMEOUT; i++) { 5014 if ((CSR_READ_4(sc, reg) & bit) == 0) 5015 return; 5016 DELAY(100); 5017 } 5018 } 5019 5020 static void 5021 bnx_link_poll(struct bnx_softc *sc) 5022 { 5023 uint32_t status; 5024 5025 status = CSR_READ_4(sc, BGE_MAC_STS); 5026 if ((status & sc->bnx_link_chg) || sc->bnx_link_evt) { 5027 sc->bnx_link_evt = 0; 5028 sc->bnx_link_upd(sc, status); 5029 } 5030 } 5031 5032 static void 5033 bnx_enable_msi(struct bnx_softc *sc, boolean_t is_msix) 5034 { 5035 uint32_t msi_mode; 5036 5037 msi_mode = CSR_READ_4(sc, BGE_MSI_MODE); 5038 msi_mode |= BGE_MSIMODE_ENABLE; 5039 /* 5040 * NOTE: 5041 * 5718-PG105-R says that "one shot" mode does not work 5042 * if MSI is used, however, it obviously works. 5043 */ 5044 msi_mode &= ~BGE_MSIMODE_ONESHOT_DISABLE; 5045 if (is_msix) 5046 msi_mode |= BGE_MSIMODE_MSIX_MULTIMODE; 5047 else 5048 msi_mode &= ~BGE_MSIMODE_MSIX_MULTIMODE; 5049 CSR_WRITE_4(sc, BGE_MSI_MODE, msi_mode); 5050 } 5051 5052 static uint32_t 5053 bnx_dma_swap_options(struct bnx_softc *sc) 5054 { 5055 uint32_t dma_options; 5056 5057 dma_options = BGE_MODECTL_WORDSWAP_NONFRAME | 5058 BGE_MODECTL_BYTESWAP_DATA | BGE_MODECTL_WORDSWAP_DATA; 5059 #if BYTE_ORDER == BIG_ENDIAN 5060 dma_options |= BGE_MODECTL_BYTESWAP_NONFRAME; 5061 #endif 5062 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720 || 5063 sc->bnx_asicrev == BGE_ASICREV_BCM5762) { 5064 dma_options |= BGE_MODECTL_BYTESWAP_B2HRX_DATA | 5065 BGE_MODECTL_WORDSWAP_B2HRX_DATA | BGE_MODECTL_B2HRX_ENABLE | 5066 BGE_MODECTL_HTX2B_ENABLE; 5067 } 5068 return dma_options; 5069 } 5070 5071 static int 5072 bnx_setup_tso(struct bnx_tx_ring *txr, struct mbuf **mp, 5073 uint16_t *mss0, uint16_t *flags0) 5074 { 5075 struct mbuf *m; 5076 struct ip *ip; 5077 struct tcphdr *th; 5078 int thoff, iphlen, hoff, hlen; 5079 uint16_t flags, mss; 5080 5081 m = *mp; 5082 KASSERT(M_WRITABLE(m), ("TSO mbuf not writable")); 5083 5084 hoff = m->m_pkthdr.csum_lhlen; 5085 iphlen = m->m_pkthdr.csum_iphlen; 5086 thoff = m->m_pkthdr.csum_thlen; 5087 5088 KASSERT(hoff > 0, ("invalid ether header len")); 5089 KASSERT(iphlen > 0, ("invalid ip header len")); 5090 KASSERT(thoff > 0, ("invalid tcp header len")); 5091 5092 if (__predict_false(m->m_len < hoff + iphlen + thoff)) { 5093 m = m_pullup(m, hoff + iphlen + thoff); 5094 if (m == NULL) { 5095 *mp = NULL; 5096 return ENOBUFS; 5097 } 5098 *mp = m; 5099 } 5100 ip = mtodoff(m, struct ip *, hoff); 5101 th = mtodoff(m, struct tcphdr *, hoff + iphlen); 5102 5103 mss = m->m_pkthdr.tso_segsz; 5104 flags = BGE_TXBDFLAG_CPU_PRE_DMA | BGE_TXBDFLAG_CPU_POST_DMA; 5105 5106 ip->ip_len = htons(mss + iphlen + thoff); 5107 th->th_sum = 0; 5108 5109 hlen = (iphlen + thoff) >> 2; 5110 mss |= ((hlen & 0x3) << 14); 5111 flags |= ((hlen & 0xf8) << 7) | ((hlen & 0x4) << 2); 5112 5113 *mss0 = mss; 5114 *flags0 = flags; 5115 5116 return 0; 5117 } 5118 5119 static int 5120 bnx_create_tx_ring(struct bnx_tx_ring *txr) 5121 { 5122 bus_size_t txmaxsz, txmaxsegsz; 5123 int i, error; 5124 5125 lwkt_serialize_init(&txr->bnx_tx_serialize); 5126 5127 /* 5128 * Create DMA tag and maps for TX mbufs. 5129 */ 5130 if (txr->bnx_sc->bnx_flags & BNX_FLAG_TSO) 5131 txmaxsz = IP_MAXPACKET + sizeof(struct ether_vlan_header); 5132 else 5133 txmaxsz = BNX_JUMBO_FRAMELEN; 5134 if (txr->bnx_sc->bnx_asicrev == BGE_ASICREV_BCM57766) 5135 txmaxsegsz = MCLBYTES; 5136 else 5137 txmaxsegsz = PAGE_SIZE; 5138 error = bus_dma_tag_create(txr->bnx_sc->bnx_cdata.bnx_parent_tag, 5139 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 5140 txmaxsz, BNX_NSEG_NEW, txmaxsegsz, 5141 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 5142 &txr->bnx_tx_mtag); 5143 if (error) { 5144 device_printf(txr->bnx_sc->bnx_dev, 5145 "could not create TX mbuf DMA tag\n"); 5146 return error; 5147 } 5148 5149 for (i = 0; i < BGE_TX_RING_CNT; i++) { 5150 error = bus_dmamap_create(txr->bnx_tx_mtag, 5151 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 5152 &txr->bnx_tx_buf[i].bnx_tx_dmamap); 5153 if (error) { 5154 int j; 5155 5156 for (j = 0; j < i; ++j) { 5157 bus_dmamap_destroy(txr->bnx_tx_mtag, 5158 txr->bnx_tx_buf[j].bnx_tx_dmamap); 5159 } 5160 bus_dma_tag_destroy(txr->bnx_tx_mtag); 5161 txr->bnx_tx_mtag = NULL; 5162 5163 device_printf(txr->bnx_sc->bnx_dev, 5164 "could not create TX mbuf DMA map\n"); 5165 return error; 5166 } 5167 } 5168 5169 /* 5170 * Create DMA stuffs for TX ring. 5171 */ 5172 error = bnx_dma_block_alloc(txr->bnx_sc, BGE_TX_RING_SZ, 5173 &txr->bnx_tx_ring_tag, 5174 &txr->bnx_tx_ring_map, 5175 (void *)&txr->bnx_tx_ring, 5176 &txr->bnx_tx_ring_paddr); 5177 if (error) { 5178 device_printf(txr->bnx_sc->bnx_dev, 5179 "could not create TX ring\n"); 5180 return error; 5181 } 5182 5183 txr->bnx_tx_flags |= BNX_TX_FLAG_SHORTDMA; 5184 txr->bnx_tx_wreg = BNX_TX_WREG_NSEGS; 5185 5186 return 0; 5187 } 5188 5189 static void 5190 bnx_destroy_tx_ring(struct bnx_tx_ring *txr) 5191 { 5192 /* Destroy TX mbuf DMA stuffs. */ 5193 if (txr->bnx_tx_mtag != NULL) { 5194 int i; 5195 5196 for (i = 0; i < BGE_TX_RING_CNT; i++) { 5197 KKASSERT(txr->bnx_tx_buf[i].bnx_tx_mbuf == NULL); 5198 bus_dmamap_destroy(txr->bnx_tx_mtag, 5199 txr->bnx_tx_buf[i].bnx_tx_dmamap); 5200 } 5201 bus_dma_tag_destroy(txr->bnx_tx_mtag); 5202 } 5203 5204 /* Destroy TX ring */ 5205 bnx_dma_block_free(txr->bnx_tx_ring_tag, 5206 txr->bnx_tx_ring_map, txr->bnx_tx_ring); 5207 } 5208 5209 static int 5210 bnx_sysctl_force_defrag(SYSCTL_HANDLER_ARGS) 5211 { 5212 struct bnx_softc *sc = (void *)arg1; 5213 struct ifnet *ifp = &sc->arpcom.ac_if; 5214 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[0]; 5215 int error, defrag, i; 5216 5217 if (txr->bnx_tx_flags & BNX_TX_FLAG_FORCE_DEFRAG) 5218 defrag = 1; 5219 else 5220 defrag = 0; 5221 5222 error = sysctl_handle_int(oidp, &defrag, 0, req); 5223 if (error || req->newptr == NULL) 5224 return error; 5225 5226 ifnet_serialize_all(ifp); 5227 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) { 5228 txr = &sc->bnx_tx_ring[i]; 5229 if (defrag) 5230 txr->bnx_tx_flags |= BNX_TX_FLAG_FORCE_DEFRAG; 5231 else 5232 txr->bnx_tx_flags &= ~BNX_TX_FLAG_FORCE_DEFRAG; 5233 } 5234 ifnet_deserialize_all(ifp); 5235 5236 return 0; 5237 } 5238 5239 static int 5240 bnx_sysctl_tx_wreg(SYSCTL_HANDLER_ARGS) 5241 { 5242 struct bnx_softc *sc = (void *)arg1; 5243 struct ifnet *ifp = &sc->arpcom.ac_if; 5244 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[0]; 5245 int error, tx_wreg, i; 5246 5247 tx_wreg = txr->bnx_tx_wreg; 5248 error = sysctl_handle_int(oidp, &tx_wreg, 0, req); 5249 if (error || req->newptr == NULL) 5250 return error; 5251 5252 ifnet_serialize_all(ifp); 5253 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) 5254 sc->bnx_tx_ring[i].bnx_tx_wreg = tx_wreg; 5255 ifnet_deserialize_all(ifp); 5256 5257 return 0; 5258 } 5259 5260 static int 5261 bnx_create_rx_ret_ring(struct bnx_rx_ret_ring *ret) 5262 { 5263 int error; 5264 5265 lwkt_serialize_init(&ret->bnx_rx_ret_serialize); 5266 5267 /* 5268 * Create DMA stuffs for RX return ring. 5269 */ 5270 error = bnx_dma_block_alloc(ret->bnx_sc, 5271 BGE_RX_RTN_RING_SZ(BNX_RETURN_RING_CNT), 5272 &ret->bnx_rx_ret_ring_tag, 5273 &ret->bnx_rx_ret_ring_map, 5274 (void *)&ret->bnx_rx_ret_ring, 5275 &ret->bnx_rx_ret_ring_paddr); 5276 if (error) { 5277 device_printf(ret->bnx_sc->bnx_dev, 5278 "could not create RX ret ring\n"); 5279 return error; 5280 } 5281 5282 /* Shadow standard ring's RX mbuf DMA tag */ 5283 ret->bnx_rx_mtag = ret->bnx_std->bnx_rx_mtag; 5284 5285 /* 5286 * Create tmp DMA map for RX mbufs. 5287 */ 5288 error = bus_dmamap_create(ret->bnx_rx_mtag, BUS_DMA_WAITOK, 5289 &ret->bnx_rx_tmpmap); 5290 if (error) { 5291 device_printf(ret->bnx_sc->bnx_dev, 5292 "could not create tmp RX mbuf DMA map\n"); 5293 ret->bnx_rx_mtag = NULL; 5294 return error; 5295 } 5296 return 0; 5297 } 5298 5299 static void 5300 bnx_destroy_rx_ret_ring(struct bnx_rx_ret_ring *ret) 5301 { 5302 /* Destroy tmp RX mbuf DMA map */ 5303 if (ret->bnx_rx_mtag != NULL) 5304 bus_dmamap_destroy(ret->bnx_rx_mtag, ret->bnx_rx_tmpmap); 5305 5306 /* Destroy RX return ring */ 5307 bnx_dma_block_free(ret->bnx_rx_ret_ring_tag, 5308 ret->bnx_rx_ret_ring_map, ret->bnx_rx_ret_ring); 5309 } 5310 5311 static int 5312 bnx_alloc_intr(struct bnx_softc *sc) 5313 { 5314 struct bnx_intr_data *intr; 5315 u_int intr_flags; 5316 int error; 5317 5318 if (sc->bnx_intr_cnt > 1) { 5319 error = bnx_alloc_msix(sc); 5320 if (error) 5321 return error; 5322 KKASSERT(sc->bnx_intr_type == PCI_INTR_TYPE_MSIX); 5323 return 0; 5324 } 5325 5326 KKASSERT(sc->bnx_intr_cnt == 1); 5327 5328 intr = &sc->bnx_intr_data[0]; 5329 intr->bnx_ret = &sc->bnx_rx_ret_ring[0]; 5330 intr->bnx_txr = &sc->bnx_tx_ring[0]; 5331 intr->bnx_intr_serialize = &sc->bnx_main_serialize; 5332 intr->bnx_intr_check = bnx_check_intr_rxtx; 5333 intr->bnx_saved_status_tag = &intr->bnx_ret->bnx_saved_status_tag; 5334 5335 sc->bnx_intr_type = pci_alloc_1intr(sc->bnx_dev, bnx_msi_enable, 5336 &intr->bnx_intr_rid, &intr_flags); 5337 5338 intr->bnx_intr_res = bus_alloc_resource_any(sc->bnx_dev, SYS_RES_IRQ, 5339 &intr->bnx_intr_rid, intr_flags); 5340 if (intr->bnx_intr_res == NULL) { 5341 device_printf(sc->bnx_dev, "could not alloc interrupt\n"); 5342 return ENXIO; 5343 } 5344 5345 if (sc->bnx_intr_type == PCI_INTR_TYPE_MSI) { 5346 bnx_enable_msi(sc, FALSE); 5347 intr->bnx_intr_func = bnx_msi; 5348 if (bootverbose) 5349 device_printf(sc->bnx_dev, "oneshot MSI\n"); 5350 } else { 5351 intr->bnx_intr_func = bnx_intr_legacy; 5352 } 5353 intr->bnx_intr_arg = sc; 5354 intr->bnx_intr_cpuid = rman_get_cpuid(intr->bnx_intr_res); 5355 5356 intr->bnx_txr->bnx_tx_cpuid = intr->bnx_intr_cpuid; 5357 5358 return 0; 5359 } 5360 5361 static int 5362 bnx_setup_intr(struct bnx_softc *sc) 5363 { 5364 int error, i; 5365 5366 for (i = 0; i < sc->bnx_intr_cnt; ++i) { 5367 struct bnx_intr_data *intr = &sc->bnx_intr_data[i]; 5368 5369 error = bus_setup_intr_descr(sc->bnx_dev, intr->bnx_intr_res, 5370 INTR_MPSAFE, intr->bnx_intr_func, intr->bnx_intr_arg, 5371 &intr->bnx_intr_hand, intr->bnx_intr_serialize, 5372 intr->bnx_intr_desc); 5373 if (error) { 5374 device_printf(sc->bnx_dev, 5375 "could not set up %dth intr\n", i); 5376 bnx_teardown_intr(sc, i); 5377 return error; 5378 } 5379 } 5380 return 0; 5381 } 5382 5383 static void 5384 bnx_teardown_intr(struct bnx_softc *sc, int cnt) 5385 { 5386 int i; 5387 5388 for (i = 0; i < cnt; ++i) { 5389 struct bnx_intr_data *intr = &sc->bnx_intr_data[i]; 5390 5391 bus_teardown_intr(sc->bnx_dev, intr->bnx_intr_res, 5392 intr->bnx_intr_hand); 5393 } 5394 } 5395 5396 static void 5397 bnx_free_intr(struct bnx_softc *sc) 5398 { 5399 if (sc->bnx_intr_type != PCI_INTR_TYPE_MSIX) { 5400 struct bnx_intr_data *intr; 5401 5402 KKASSERT(sc->bnx_intr_cnt <= 1); 5403 intr = &sc->bnx_intr_data[0]; 5404 5405 if (intr->bnx_intr_res != NULL) { 5406 bus_release_resource(sc->bnx_dev, SYS_RES_IRQ, 5407 intr->bnx_intr_rid, intr->bnx_intr_res); 5408 } 5409 if (sc->bnx_intr_type == PCI_INTR_TYPE_MSI) 5410 pci_release_msi(sc->bnx_dev); 5411 } else { 5412 bnx_free_msix(sc, TRUE); 5413 } 5414 } 5415 5416 static void 5417 bnx_setup_serialize(struct bnx_softc *sc) 5418 { 5419 int i, j; 5420 5421 /* 5422 * Allocate serializer array 5423 */ 5424 5425 /* Main + RX STD + TX + RX RET */ 5426 sc->bnx_serialize_cnt = 1 + 1 + sc->bnx_tx_ringcnt + sc->bnx_rx_retcnt; 5427 5428 sc->bnx_serialize = 5429 kmalloc(sc->bnx_serialize_cnt * sizeof(struct lwkt_serialize *), 5430 M_DEVBUF, M_WAITOK | M_ZERO); 5431 5432 /* 5433 * Setup serializers 5434 * 5435 * NOTE: Order is critical 5436 */ 5437 5438 i = 0; 5439 5440 KKASSERT(i < sc->bnx_serialize_cnt); 5441 sc->bnx_serialize[i++] = &sc->bnx_main_serialize; 5442 5443 KKASSERT(i < sc->bnx_serialize_cnt); 5444 sc->bnx_serialize[i++] = &sc->bnx_rx_std_ring.bnx_rx_std_serialize; 5445 5446 for (j = 0; j < sc->bnx_rx_retcnt; ++j) { 5447 KKASSERT(i < sc->bnx_serialize_cnt); 5448 sc->bnx_serialize[i++] = 5449 &sc->bnx_rx_ret_ring[j].bnx_rx_ret_serialize; 5450 } 5451 5452 for (j = 0; j < sc->bnx_tx_ringcnt; ++j) { 5453 KKASSERT(i < sc->bnx_serialize_cnt); 5454 sc->bnx_serialize[i++] = 5455 &sc->bnx_tx_ring[j].bnx_tx_serialize; 5456 } 5457 5458 KKASSERT(i == sc->bnx_serialize_cnt); 5459 } 5460 5461 static void 5462 bnx_serialize(struct ifnet *ifp, enum ifnet_serialize slz) 5463 { 5464 struct bnx_softc *sc = ifp->if_softc; 5465 5466 ifnet_serialize_array_enter(sc->bnx_serialize, 5467 sc->bnx_serialize_cnt, slz); 5468 } 5469 5470 static void 5471 bnx_deserialize(struct ifnet *ifp, enum ifnet_serialize slz) 5472 { 5473 struct bnx_softc *sc = ifp->if_softc; 5474 5475 ifnet_serialize_array_exit(sc->bnx_serialize, 5476 sc->bnx_serialize_cnt, slz); 5477 } 5478 5479 static int 5480 bnx_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz) 5481 { 5482 struct bnx_softc *sc = ifp->if_softc; 5483 5484 return ifnet_serialize_array_try(sc->bnx_serialize, 5485 sc->bnx_serialize_cnt, slz); 5486 } 5487 5488 #ifdef INVARIANTS 5489 5490 static void 5491 bnx_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz, 5492 boolean_t serialized) 5493 { 5494 struct bnx_softc *sc = ifp->if_softc; 5495 5496 ifnet_serialize_array_assert(sc->bnx_serialize, sc->bnx_serialize_cnt, 5497 slz, serialized); 5498 } 5499 5500 #endif /* INVARIANTS */ 5501 5502 #ifdef IFPOLL_ENABLE 5503 5504 static int 5505 bnx_sysctl_npoll_offset(SYSCTL_HANDLER_ARGS) 5506 { 5507 struct bnx_softc *sc = (void *)arg1; 5508 struct ifnet *ifp = &sc->arpcom.ac_if; 5509 int error, off; 5510 5511 off = sc->bnx_npoll_rxoff; 5512 error = sysctl_handle_int(oidp, &off, 0, req); 5513 if (error || req->newptr == NULL) 5514 return error; 5515 if (off < 0) 5516 return EINVAL; 5517 5518 ifnet_serialize_all(ifp); 5519 if (off >= ncpus2 || off % sc->bnx_rx_retcnt != 0) { 5520 error = EINVAL; 5521 } else { 5522 error = 0; 5523 sc->bnx_npoll_txoff = off; 5524 sc->bnx_npoll_rxoff = off; 5525 } 5526 ifnet_deserialize_all(ifp); 5527 5528 return error; 5529 } 5530 5531 static int 5532 bnx_sysctl_npoll_rxoff(SYSCTL_HANDLER_ARGS) 5533 { 5534 struct bnx_softc *sc = (void *)arg1; 5535 struct ifnet *ifp = &sc->arpcom.ac_if; 5536 int error, off; 5537 5538 off = sc->bnx_npoll_rxoff; 5539 error = sysctl_handle_int(oidp, &off, 0, req); 5540 if (error || req->newptr == NULL) 5541 return error; 5542 if (off < 0) 5543 return EINVAL; 5544 5545 ifnet_serialize_all(ifp); 5546 if (off >= ncpus2 || off % sc->bnx_rx_retcnt != 0) { 5547 error = EINVAL; 5548 } else { 5549 error = 0; 5550 sc->bnx_npoll_rxoff = off; 5551 } 5552 ifnet_deserialize_all(ifp); 5553 5554 return error; 5555 } 5556 5557 static int 5558 bnx_sysctl_npoll_txoff(SYSCTL_HANDLER_ARGS) 5559 { 5560 struct bnx_softc *sc = (void *)arg1; 5561 struct ifnet *ifp = &sc->arpcom.ac_if; 5562 int error, off; 5563 5564 off = sc->bnx_npoll_txoff; 5565 error = sysctl_handle_int(oidp, &off, 0, req); 5566 if (error || req->newptr == NULL) 5567 return error; 5568 if (off < 0) 5569 return EINVAL; 5570 5571 ifnet_serialize_all(ifp); 5572 if (off >= ncpus2) { 5573 error = EINVAL; 5574 } else { 5575 error = 0; 5576 sc->bnx_npoll_txoff = off; 5577 } 5578 ifnet_deserialize_all(ifp); 5579 5580 return error; 5581 } 5582 5583 #endif /* IFPOLL_ENABLE */ 5584 5585 static void 5586 bnx_set_tick_cpuid(struct bnx_softc *sc, boolean_t polling) 5587 { 5588 if (polling) 5589 sc->bnx_tick_cpuid = 0; /* XXX */ 5590 else 5591 sc->bnx_tick_cpuid = sc->bnx_intr_data[0].bnx_intr_cpuid; 5592 } 5593 5594 static void 5595 bnx_rx_std_refill_ithread(void *xstd) 5596 { 5597 struct bnx_rx_std_ring *std = xstd; 5598 struct globaldata *gd = mycpu; 5599 5600 crit_enter_gd(gd); 5601 5602 while (!std->bnx_rx_std_stop) { 5603 if (std->bnx_rx_std_refill) { 5604 lwkt_serialize_handler_call( 5605 &std->bnx_rx_std_serialize, 5606 bnx_rx_std_refill, std, NULL); 5607 } 5608 5609 crit_exit_gd(gd); 5610 crit_enter_gd(gd); 5611 5612 atomic_poll_release_int(&std->bnx_rx_std_running); 5613 cpu_mfence(); 5614 5615 if (!std->bnx_rx_std_refill && !std->bnx_rx_std_stop) { 5616 lwkt_deschedule_self(gd->gd_curthread); 5617 lwkt_switch(); 5618 } 5619 } 5620 5621 crit_exit_gd(gd); 5622 5623 wakeup(std); 5624 5625 lwkt_exit(); 5626 } 5627 5628 static void 5629 bnx_rx_std_refill(void *xstd, void *frame __unused) 5630 { 5631 struct bnx_rx_std_ring *std = xstd; 5632 int cnt, refill_mask; 5633 5634 again: 5635 cnt = 0; 5636 5637 cpu_lfence(); 5638 refill_mask = std->bnx_rx_std_refill; 5639 atomic_clear_int(&std->bnx_rx_std_refill, refill_mask); 5640 5641 while (refill_mask) { 5642 uint16_t check_idx = std->bnx_rx_std; 5643 int ret_idx; 5644 5645 ret_idx = bsfl(refill_mask); 5646 for (;;) { 5647 struct bnx_rx_buf *rb; 5648 int refilled; 5649 5650 BNX_INC(check_idx, BGE_STD_RX_RING_CNT); 5651 rb = &std->bnx_rx_std_buf[check_idx]; 5652 refilled = rb->bnx_rx_refilled; 5653 cpu_lfence(); 5654 if (refilled) { 5655 bnx_setup_rxdesc_std(std, check_idx); 5656 std->bnx_rx_std = check_idx; 5657 ++cnt; 5658 if (cnt >= 8) { 5659 atomic_subtract_int( 5660 &std->bnx_rx_std_used, cnt); 5661 bnx_writembx(std->bnx_sc, 5662 BGE_MBX_RX_STD_PROD_LO, 5663 std->bnx_rx_std); 5664 cnt = 0; 5665 } 5666 } else { 5667 break; 5668 } 5669 } 5670 refill_mask &= ~(1 << ret_idx); 5671 } 5672 5673 if (cnt) { 5674 atomic_subtract_int(&std->bnx_rx_std_used, cnt); 5675 bnx_writembx(std->bnx_sc, BGE_MBX_RX_STD_PROD_LO, 5676 std->bnx_rx_std); 5677 } 5678 5679 if (std->bnx_rx_std_refill) 5680 goto again; 5681 5682 atomic_poll_release_int(&std->bnx_rx_std_running); 5683 cpu_mfence(); 5684 5685 if (std->bnx_rx_std_refill) 5686 goto again; 5687 } 5688 5689 static int 5690 bnx_sysctl_std_refill(SYSCTL_HANDLER_ARGS) 5691 { 5692 struct bnx_softc *sc = (void *)arg1; 5693 struct ifnet *ifp = &sc->arpcom.ac_if; 5694 struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[0]; 5695 int error, cntmax, i; 5696 5697 cntmax = ret->bnx_rx_cntmax; 5698 error = sysctl_handle_int(oidp, &cntmax, 0, req); 5699 if (error || req->newptr == NULL) 5700 return error; 5701 5702 ifnet_serialize_all(ifp); 5703 5704 if ((cntmax * sc->bnx_rx_retcnt) >= BGE_STD_RX_RING_CNT / 2) { 5705 error = EINVAL; 5706 goto back; 5707 } 5708 5709 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) 5710 sc->bnx_rx_ret_ring[i].bnx_rx_cntmax = cntmax; 5711 error = 0; 5712 5713 back: 5714 ifnet_deserialize_all(ifp); 5715 5716 return error; 5717 } 5718 5719 static void 5720 bnx_init_rss(struct bnx_softc *sc) 5721 { 5722 uint8_t key[BGE_RSS_KEYREG_CNT * BGE_RSS_KEYREG_SIZE]; 5723 int i, j, r; 5724 5725 KKASSERT(BNX_RSS_ENABLED(sc)); 5726 5727 /* 5728 * Configure RSS redirect table in following fashion: 5729 * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)] 5730 */ 5731 r = 0; 5732 for (j = 0; j < BGE_RSS_INDIR_TBL_CNT; ++j) { 5733 uint32_t tbl = 0; 5734 5735 for (i = 0; i < BGE_RSS_INDIR_TBLENT_CNT; ++i) { 5736 uint32_t q; 5737 5738 q = r % sc->bnx_rx_retcnt; 5739 tbl |= q << (BGE_RSS_INDIR_TBLENT_SHIFT * 5740 (BGE_RSS_INDIR_TBLENT_CNT - i - 1)); 5741 ++r; 5742 } 5743 5744 BNX_RSS_DPRINTF(sc, 1, "tbl%d %08x\n", j, tbl); 5745 CSR_WRITE_4(sc, BGE_RSS_INDIR_TBL(j), tbl); 5746 } 5747 5748 toeplitz_get_key(key, sizeof(key)); 5749 for (i = 0; i < BGE_RSS_KEYREG_CNT; ++i) { 5750 uint32_t keyreg; 5751 5752 keyreg = BGE_RSS_KEYREG_VAL(key, i); 5753 5754 BNX_RSS_DPRINTF(sc, 1, "key%d %08x\n", i, keyreg); 5755 CSR_WRITE_4(sc, BGE_RSS_KEYREG(i), keyreg); 5756 } 5757 } 5758 5759 static void 5760 bnx_setup_ring_cnt(struct bnx_softc *sc) 5761 { 5762 int msix_enable, i, msix_cnt, msix_cnt2, ring_max; 5763 5764 sc->bnx_tx_ringcnt = 1; 5765 sc->bnx_rx_retcnt = 1; 5766 sc->bnx_intr_cnt = 1; 5767 5768 msix_enable = device_getenv_int(sc->bnx_dev, "msix.enable", 5769 bnx_msix_enable); 5770 if (!msix_enable) 5771 return; 5772 5773 if (ncpus2 == 1) 5774 return; 5775 5776 msix_cnt = pci_msix_count(sc->bnx_dev); 5777 if (msix_cnt <= 1) 5778 return; 5779 5780 i = 0; 5781 while ((1 << (i + 1)) <= msix_cnt) 5782 ++i; 5783 msix_cnt2 = 1 << i; 5784 5785 /* 5786 * One MSI-X vector is dedicated to status or single TX queue, 5787 * so make sure that there are enough MSI-X vectors. 5788 */ 5789 if (msix_cnt == msix_cnt2) { 5790 /* 5791 * XXX 5792 * This probably will not happen; 57785/5718 families 5793 * come with at least 5 MSI-X vectors. 5794 */ 5795 msix_cnt2 >>= 1; 5796 if (msix_cnt2 <= 1) { 5797 device_printf(sc->bnx_dev, 5798 "MSI-X count %d could not be used\n", msix_cnt); 5799 return; 5800 } 5801 device_printf(sc->bnx_dev, "MSI-X count %d is power of 2\n", 5802 msix_cnt); 5803 } 5804 5805 /* 5806 * Setup RX ring count 5807 */ 5808 ring_max = BNX_RX_RING_MAX; 5809 if (ring_max > msix_cnt2) 5810 ring_max = msix_cnt2; 5811 sc->bnx_rx_retcnt = device_getenv_int(sc->bnx_dev, "rx_rings", 5812 bnx_rx_rings); 5813 sc->bnx_rx_retcnt = if_ring_count2(sc->bnx_rx_retcnt, ring_max); 5814 5815 if (sc->bnx_rx_retcnt == 1) 5816 return; 5817 5818 /* 5819 * We need one extra MSI-X vector for link status or 5820 * TX ring (if only one TX ring is enabled). 5821 */ 5822 sc->bnx_intr_cnt = sc->bnx_rx_retcnt + 1; 5823 5824 /* 5825 * Setup TX ring count 5826 * 5827 * Currently only BCM5719 and BCM5720 support multiple TX rings 5828 * and the TX ring count must be less than the RX ring count. 5829 */ 5830 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719 || 5831 sc->bnx_asicrev == BGE_ASICREV_BCM5720) { 5832 ring_max = BNX_TX_RING_MAX; 5833 if (ring_max > msix_cnt2) 5834 ring_max = msix_cnt2; 5835 if (ring_max > sc->bnx_rx_retcnt) 5836 ring_max = sc->bnx_rx_retcnt; 5837 sc->bnx_tx_ringcnt = device_getenv_int(sc->bnx_dev, "tx_rings", 5838 bnx_tx_rings); 5839 sc->bnx_tx_ringcnt = if_ring_count2(sc->bnx_tx_ringcnt, 5840 ring_max); 5841 } 5842 } 5843 5844 static int 5845 bnx_alloc_msix(struct bnx_softc *sc) 5846 { 5847 struct bnx_intr_data *intr; 5848 boolean_t setup = FALSE; 5849 int error, i, offset, offset_def; 5850 5851 KKASSERT(sc->bnx_intr_cnt > 1); 5852 KKASSERT(sc->bnx_intr_cnt == sc->bnx_rx_retcnt + 1); 5853 5854 if (sc->bnx_flags & BNX_FLAG_RXTX_BUNDLE) { 5855 /* 5856 * Link status 5857 */ 5858 intr = &sc->bnx_intr_data[0]; 5859 5860 intr->bnx_intr_serialize = &sc->bnx_main_serialize; 5861 intr->bnx_saved_status_tag = &sc->bnx_saved_status_tag; 5862 5863 intr->bnx_intr_func = bnx_msix_status; 5864 intr->bnx_intr_arg = sc; 5865 intr->bnx_intr_cpuid = 0; /* XXX */ 5866 5867 ksnprintf(intr->bnx_intr_desc0, sizeof(intr->bnx_intr_desc0), 5868 "%s sts", device_get_nameunit(sc->bnx_dev)); 5869 intr->bnx_intr_desc = intr->bnx_intr_desc0; 5870 5871 /* 5872 * RX/TX rings 5873 */ 5874 if (sc->bnx_rx_retcnt == ncpus2) { 5875 offset = 0; 5876 } else { 5877 offset_def = (sc->bnx_rx_retcnt * 5878 device_get_unit(sc->bnx_dev)) % ncpus2; 5879 5880 offset = device_getenv_int(sc->bnx_dev, 5881 "msix.offset", offset_def); 5882 if (offset >= ncpus2 || 5883 offset % sc->bnx_rx_retcnt != 0) { 5884 device_printf(sc->bnx_dev, 5885 "invalid msix.offset %d, use %d\n", 5886 offset, offset_def); 5887 offset = offset_def; 5888 } 5889 } 5890 5891 for (i = 1; i < sc->bnx_intr_cnt; ++i) { 5892 int idx = i - 1; 5893 5894 intr = &sc->bnx_intr_data[i]; 5895 5896 KKASSERT(idx < sc->bnx_rx_retcnt); 5897 intr->bnx_ret = &sc->bnx_rx_ret_ring[idx]; 5898 if (idx < sc->bnx_tx_ringcnt) { 5899 intr->bnx_txr = &sc->bnx_tx_ring[idx]; 5900 intr->bnx_ret->bnx_txr = intr->bnx_txr; 5901 } 5902 5903 intr->bnx_intr_serialize = 5904 &intr->bnx_ret->bnx_rx_ret_serialize; 5905 intr->bnx_saved_status_tag = 5906 &intr->bnx_ret->bnx_saved_status_tag; 5907 5908 intr->bnx_intr_arg = intr->bnx_ret; 5909 KKASSERT(idx + offset < ncpus2); 5910 intr->bnx_intr_cpuid = idx + offset; 5911 5912 if (intr->bnx_txr == NULL) { 5913 intr->bnx_intr_check = bnx_check_intr_rx; 5914 intr->bnx_intr_func = bnx_msix_rx; 5915 ksnprintf(intr->bnx_intr_desc0, 5916 sizeof(intr->bnx_intr_desc0), "%s rx%d", 5917 device_get_nameunit(sc->bnx_dev), idx); 5918 } else { 5919 intr->bnx_intr_check = bnx_check_intr_rxtx; 5920 intr->bnx_intr_func = bnx_msix_rxtx; 5921 ksnprintf(intr->bnx_intr_desc0, 5922 sizeof(intr->bnx_intr_desc0), "%s rxtx%d", 5923 device_get_nameunit(sc->bnx_dev), idx); 5924 5925 intr->bnx_txr->bnx_tx_cpuid = 5926 intr->bnx_intr_cpuid; 5927 } 5928 intr->bnx_intr_desc = intr->bnx_intr_desc0; 5929 5930 intr->bnx_ret->bnx_msix_mbx = intr->bnx_intr_mbx; 5931 } 5932 } else { 5933 /* 5934 * TX ring and link status 5935 */ 5936 offset_def = device_get_unit(sc->bnx_dev) % ncpus2; 5937 offset = device_getenv_int(sc->bnx_dev, "msix.txoff", 5938 offset_def); 5939 if (offset >= ncpus2) { 5940 device_printf(sc->bnx_dev, 5941 "invalid msix.txoff %d, use %d\n", 5942 offset, offset_def); 5943 offset = offset_def; 5944 } 5945 5946 intr = &sc->bnx_intr_data[0]; 5947 5948 intr->bnx_txr = &sc->bnx_tx_ring[0]; 5949 intr->bnx_intr_serialize = &sc->bnx_main_serialize; 5950 intr->bnx_intr_check = bnx_check_intr_tx; 5951 intr->bnx_saved_status_tag = 5952 &intr->bnx_txr->bnx_saved_status_tag; 5953 5954 intr->bnx_intr_func = bnx_msix_tx_status; 5955 intr->bnx_intr_arg = intr->bnx_txr; 5956 intr->bnx_intr_cpuid = offset; 5957 5958 ksnprintf(intr->bnx_intr_desc0, sizeof(intr->bnx_intr_desc0), 5959 "%s ststx", device_get_nameunit(sc->bnx_dev)); 5960 intr->bnx_intr_desc = intr->bnx_intr_desc0; 5961 5962 intr->bnx_txr->bnx_tx_cpuid = intr->bnx_intr_cpuid; 5963 5964 /* 5965 * RX rings 5966 */ 5967 if (sc->bnx_rx_retcnt == ncpus2) { 5968 offset = 0; 5969 } else { 5970 offset_def = (sc->bnx_rx_retcnt * 5971 device_get_unit(sc->bnx_dev)) % ncpus2; 5972 5973 offset = device_getenv_int(sc->bnx_dev, 5974 "msix.rxoff", offset_def); 5975 if (offset >= ncpus2 || 5976 offset % sc->bnx_rx_retcnt != 0) { 5977 device_printf(sc->bnx_dev, 5978 "invalid msix.rxoff %d, use %d\n", 5979 offset, offset_def); 5980 offset = offset_def; 5981 } 5982 } 5983 5984 for (i = 1; i < sc->bnx_intr_cnt; ++i) { 5985 int idx = i - 1; 5986 5987 intr = &sc->bnx_intr_data[i]; 5988 5989 KKASSERT(idx < sc->bnx_rx_retcnt); 5990 intr->bnx_ret = &sc->bnx_rx_ret_ring[idx]; 5991 intr->bnx_intr_serialize = 5992 &intr->bnx_ret->bnx_rx_ret_serialize; 5993 intr->bnx_intr_check = bnx_check_intr_rx; 5994 intr->bnx_saved_status_tag = 5995 &intr->bnx_ret->bnx_saved_status_tag; 5996 5997 intr->bnx_intr_func = bnx_msix_rx; 5998 intr->bnx_intr_arg = intr->bnx_ret; 5999 KKASSERT(idx + offset < ncpus2); 6000 intr->bnx_intr_cpuid = idx + offset; 6001 6002 ksnprintf(intr->bnx_intr_desc0, 6003 sizeof(intr->bnx_intr_desc0), "%s rx%d", 6004 device_get_nameunit(sc->bnx_dev), idx); 6005 intr->bnx_intr_desc = intr->bnx_intr_desc0; 6006 6007 intr->bnx_ret->bnx_msix_mbx = intr->bnx_intr_mbx; 6008 } 6009 } 6010 6011 if (BNX_IS_5717_PLUS(sc)) 6012 sc->bnx_msix_mem_rid = PCIR_BAR(4); 6013 else 6014 sc->bnx_msix_mem_rid = PCIR_BAR(2); 6015 sc->bnx_msix_mem_res = bus_alloc_resource_any(sc->bnx_dev, 6016 SYS_RES_MEMORY, &sc->bnx_msix_mem_rid, RF_ACTIVE); 6017 if (sc->bnx_msix_mem_res == NULL) { 6018 device_printf(sc->bnx_dev, "could not alloc MSI-X table\n"); 6019 return ENXIO; 6020 } 6021 6022 bnx_enable_msi(sc, TRUE); 6023 6024 error = pci_setup_msix(sc->bnx_dev); 6025 if (error) { 6026 device_printf(sc->bnx_dev, "could not setup MSI-X\n"); 6027 goto back; 6028 } 6029 setup = TRUE; 6030 6031 for (i = 0; i < sc->bnx_intr_cnt; ++i) { 6032 intr = &sc->bnx_intr_data[i]; 6033 6034 error = pci_alloc_msix_vector(sc->bnx_dev, i, 6035 &intr->bnx_intr_rid, intr->bnx_intr_cpuid); 6036 if (error) { 6037 device_printf(sc->bnx_dev, 6038 "could not alloc MSI-X %d on cpu%d\n", 6039 i, intr->bnx_intr_cpuid); 6040 goto back; 6041 } 6042 6043 intr->bnx_intr_res = bus_alloc_resource_any(sc->bnx_dev, 6044 SYS_RES_IRQ, &intr->bnx_intr_rid, RF_ACTIVE); 6045 if (intr->bnx_intr_res == NULL) { 6046 device_printf(sc->bnx_dev, 6047 "could not alloc MSI-X %d resource\n", i); 6048 error = ENXIO; 6049 goto back; 6050 } 6051 } 6052 6053 pci_enable_msix(sc->bnx_dev); 6054 sc->bnx_intr_type = PCI_INTR_TYPE_MSIX; 6055 back: 6056 if (error) 6057 bnx_free_msix(sc, setup); 6058 return error; 6059 } 6060 6061 static void 6062 bnx_free_msix(struct bnx_softc *sc, boolean_t setup) 6063 { 6064 int i; 6065 6066 KKASSERT(sc->bnx_intr_cnt > 1); 6067 6068 for (i = 0; i < sc->bnx_intr_cnt; ++i) { 6069 struct bnx_intr_data *intr = &sc->bnx_intr_data[i]; 6070 6071 if (intr->bnx_intr_res != NULL) { 6072 bus_release_resource(sc->bnx_dev, SYS_RES_IRQ, 6073 intr->bnx_intr_rid, intr->bnx_intr_res); 6074 } 6075 if (intr->bnx_intr_rid >= 0) { 6076 pci_release_msix_vector(sc->bnx_dev, 6077 intr->bnx_intr_rid); 6078 } 6079 } 6080 if (setup) 6081 pci_teardown_msix(sc->bnx_dev); 6082 } 6083 6084 static void 6085 bnx_rx_std_refill_sched_ipi(void *xret) 6086 { 6087 struct bnx_rx_ret_ring *ret = xret; 6088 struct bnx_rx_std_ring *std = ret->bnx_std; 6089 struct globaldata *gd = mycpu; 6090 6091 crit_enter_gd(gd); 6092 6093 atomic_set_int(&std->bnx_rx_std_refill, ret->bnx_rx_mask); 6094 cpu_sfence(); 6095 6096 KKASSERT(std->bnx_rx_std_ithread.td_gd == gd); 6097 lwkt_schedule(&std->bnx_rx_std_ithread); 6098 6099 crit_exit_gd(gd); 6100 } 6101 6102 static void 6103 bnx_rx_std_refill_stop(void *xstd) 6104 { 6105 struct bnx_rx_std_ring *std = xstd; 6106 struct globaldata *gd = mycpu; 6107 6108 crit_enter_gd(gd); 6109 6110 std->bnx_rx_std_stop = 1; 6111 cpu_sfence(); 6112 6113 KKASSERT(std->bnx_rx_std_ithread.td_gd == gd); 6114 lwkt_schedule(&std->bnx_rx_std_ithread); 6115 6116 crit_exit_gd(gd); 6117 } 6118 6119 static void 6120 bnx_serialize_skipmain(struct bnx_softc *sc) 6121 { 6122 lwkt_serialize_array_enter(sc->bnx_serialize, 6123 sc->bnx_serialize_cnt, 1); 6124 } 6125 6126 static void 6127 bnx_deserialize_skipmain(struct bnx_softc *sc) 6128 { 6129 lwkt_serialize_array_exit(sc->bnx_serialize, 6130 sc->bnx_serialize_cnt, 1); 6131 } 6132 6133 static void 6134 bnx_rx_std_refill_sched(struct bnx_rx_ret_ring *ret, 6135 struct bnx_rx_std_ring *std) 6136 { 6137 struct globaldata *gd = mycpu; 6138 6139 ret->bnx_rx_cnt = 0; 6140 cpu_sfence(); 6141 6142 crit_enter_gd(gd); 6143 6144 atomic_set_int(&std->bnx_rx_std_refill, ret->bnx_rx_mask); 6145 cpu_sfence(); 6146 if (atomic_poll_acquire_int(&std->bnx_rx_std_running)) { 6147 if (std->bnx_rx_std_ithread.td_gd == gd) { 6148 lwkt_schedule(&std->bnx_rx_std_ithread); 6149 } else { 6150 lwkt_send_ipiq( 6151 std->bnx_rx_std_ithread.td_gd, 6152 bnx_rx_std_refill_sched_ipi, ret); 6153 } 6154 } 6155 6156 crit_exit_gd(gd); 6157 } 6158 6159 static struct pktinfo * 6160 bnx_rss_info(struct pktinfo *pi, const struct bge_rx_bd *cur_rx) 6161 { 6162 /* Don't pick up IPv6 packet */ 6163 if (cur_rx->bge_flags & BGE_RXBDFLAG_IPV6) 6164 return NULL; 6165 6166 /* Don't pick up IP packet w/o IP checksum */ 6167 if ((cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) == 0 || 6168 (cur_rx->bge_error_flag & BGE_RXERRFLAG_IP_CSUM_NOK)) 6169 return NULL; 6170 6171 /* Don't pick up IP packet w/o TCP/UDP checksum */ 6172 if ((cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) == 0) 6173 return NULL; 6174 6175 /* May be IP fragment */ 6176 if (cur_rx->bge_tcp_udp_csum != 0xffff) 6177 return NULL; 6178 6179 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_IS_TCP) 6180 pi->pi_l3proto = IPPROTO_TCP; 6181 else 6182 pi->pi_l3proto = IPPROTO_UDP; 6183 pi->pi_netisr = NETISR_IP; 6184 pi->pi_flags = 0; 6185 6186 return pi; 6187 } 6188 6189 static void 6190 bnx_sig_pre_reset(struct bnx_softc *sc, int type) 6191 { 6192 #if 0 6193 if (type == BNX_RESET_START || type == BNX_RESET_SUSPEND) 6194 bnx_ape_driver_state_change(sc, type); 6195 #endif 6196 } 6197 6198 static void 6199 bnx_sig_post_reset(struct bnx_softc *sc, int type) 6200 { 6201 #if 0 6202 if (type == BNX_RESET_SHUTDOWN) 6203 bnx_ape_driver_state_change(sc, type); 6204 #endif 6205 } 6206